HelenOS sources

cpu               125 abi/include/abi/sysinfo.h 	unsigned int cpu;       /**< Associated CPU ID (if on_cpu is true) */
cpu                57 kernel/arch/abs32le/src/cpu/cpu.c void cpu_print_report(cpu_t *cpu)
cpu               106 kernel/arch/arm32/src/cpu/cpu.c static void arch_cpu_identify(cpu_arch_t *cpu)
cpu               110 kernel/arch/arm32/src/cpu/cpu.c 	cpu->imp_num = (ident >> MIDR_IMPLEMENTER_SHIFT) & MIDR_IMPLEMENTER_MASK;
cpu               111 kernel/arch/arm32/src/cpu/cpu.c 	cpu->variant_num = (ident >> MIDR_VARIANT_SHIFT) & MIDR_VARIANT_MASK;
cpu               112 kernel/arch/arm32/src/cpu/cpu.c 	cpu->arch_num = (ident >> MIDR_ARCHITECTURE_SHIFT) & MIDR_ARCHITECTURE_MASK;
cpu               113 kernel/arch/arm32/src/cpu/cpu.c 	cpu->prim_part_num = (ident >> MIDR_PART_NUMBER_SHIFT) & MIDR_PART_NUMBER_MASK;
cpu               114 kernel/arch/arm32/src/cpu/cpu.c 	cpu->rev_num = (ident >> MIDR_REVISION_SHIFT) & MIDR_REVISION_MASK;
cpu               117 kernel/arch/arm32/src/cpu/cpu.c 	cpu->dcache_levels = dcache_levels();
cpu               119 kernel/arch/arm32/src/cpu/cpu.c 	for (unsigned i = 0; i < cpu->dcache_levels; ++i) {
cpu               120 kernel/arch/arm32/src/cpu/cpu.c 		cpu->dcache[i].ways = dcache_ways(i);
cpu               121 kernel/arch/arm32/src/cpu/cpu.c 		cpu->dcache[i].sets = dcache_sets(i);
cpu               122 kernel/arch/arm32/src/cpu/cpu.c 		cpu->dcache[i].way_shift = 31 - log2(cpu->dcache[i].ways);
cpu               123 kernel/arch/arm32/src/cpu/cpu.c 		cpu->dcache[i].set_shift = dcache_linesize_log(i);
cpu               124 kernel/arch/arm32/src/cpu/cpu.c 		cpu->dcache[i].line_size = 1 << dcache_linesize_log(i);
cpu               126 kernel/arch/arm32/src/cpu/cpu.c 		    "(shifts: w%u, s%u)\n", i + 1, cpu->dcache[i].ways,
cpu               127 kernel/arch/arm32/src/cpu/cpu.c 		    cpu->dcache[i].sets, cpu->dcache[i].line_size,
cpu               128 kernel/arch/arm32/src/cpu/cpu.c 		    cpu->dcache[i].way_shift, cpu->dcache[i].set_shift);
cpu               147 kernel/arch/ia32/src/cpu/cpu.c void cpu_print_report(cpu_t *cpu)
cpu               150 kernel/arch/ia32/src/cpu/cpu.c 	    " MHz\n", cpu->id, vendor_str[cpu->arch.vendor], cpu->arch.family,
cpu               151 kernel/arch/ia32/src/cpu/cpu.c 	    cpu->arch.model, cpu->arch.stepping, cpu->arch.id, cpu->frequency_mhz);
cpu                53 kernel/arch/ppc32/src/cpu/cpu.c void cpu_print_report(cpu_t *cpu)
cpu                57 kernel/arch/ppc32/src/cpu/cpu.c 	switch (cpu->arch.version) {
cpu                74 kernel/arch/ppc32/src/cpu/cpu.c 	printf("cpu%u: version=%" PRIu16 " (%s), revision=%" PRIu16 "\n", cpu->id,
cpu                75 kernel/arch/ppc32/src/cpu/cpu.c 	    cpu->arch.version, name, cpu->arch.revision);
cpu                56 kernel/arch/riscv64/src/cpu/cpu.c void cpu_print_report(cpu_t *cpu)
cpu                49 kernel/arch/sparc64/include/arch/sun4v/cpu.h struct cpu;
cpu                55 kernel/arch/sparc64/include/arch/sun4v/cpu.h 	struct cpu *cpus[MAX_CORE_STRANDS];
cpu                65 kernel/generic/include/arch.h struct cpu;
cpu                80 kernel/generic/include/arch.h 	struct cpu *cpu;        /**< Executing CPU. */
cpu                46 kernel/generic/include/cpu.h #define CPU                  (CURRENT->cpu)
cpu               202 kernel/generic/include/proc/thread.h 	_Atomic(cpu_t *) cpu;
cpu               163 kernel/generic/include/synch/spinlock.h 	_Atomic(struct cpu *) owner;  /**< Which cpu currently owns this lock */
cpu                57 kernel/generic/include/time/timeout.h 	cpu_t *cpu;
cpu               100 kernel/generic/src/debug/panic.c 		    CURRENT->thread, CURRENT->task, CURRENT->cpu, CURRENT->as, CURRENT->magic);
cpu                95 kernel/generic/src/mm/tlb.c 		cpu_t *cpu = &cpus[i];
cpu                97 kernel/generic/src/mm/tlb.c 		irq_spinlock_lock(&cpu->tlb_lock, false);
cpu                98 kernel/generic/src/mm/tlb.c 		if (cpu->tlb_messages_count == TLB_MESSAGE_QUEUE_LEN) {
cpu               103 kernel/generic/src/mm/tlb.c 			cpu->tlb_messages_count = 1;
cpu               104 kernel/generic/src/mm/tlb.c 			cpu->tlb_messages[0].type = TLB_INVL_ALL;
cpu               105 kernel/generic/src/mm/tlb.c 			cpu->tlb_messages[0].asid = ASID_INVALID;
cpu               106 kernel/generic/src/mm/tlb.c 			cpu->tlb_messages[0].page = 0;
cpu               107 kernel/generic/src/mm/tlb.c 			cpu->tlb_messages[0].count = 0;
cpu               112 kernel/generic/src/mm/tlb.c 			size_t idx = cpu->tlb_messages_count++;
cpu               113 kernel/generic/src/mm/tlb.c 			cpu->tlb_messages[idx].type = type;
cpu               114 kernel/generic/src/mm/tlb.c 			cpu->tlb_messages[idx].asid = asid;
cpu               115 kernel/generic/src/mm/tlb.c 			cpu->tlb_messages[idx].page = page;
cpu               116 kernel/generic/src/mm/tlb.c 			cpu->tlb_messages[idx].count = count;
cpu               118 kernel/generic/src/mm/tlb.c 		irq_spinlock_unlock(&cpu->tlb_lock, false);
cpu                58 kernel/generic/src/proc/current.c 	the->cpu = NULL;
cpu               312 kernel/generic/src/proc/scheduler.c 	assert(atomic_get_unordered(&THREAD->cpu) == CPU);
cpu               367 kernel/generic/src/proc/scheduler.c static void add_to_rq(thread_t *thread, cpu_t *cpu, int i)
cpu               370 kernel/generic/src/proc/scheduler.c 	runq_t *rq = &cpu->rq[i];
cpu               378 kernel/generic/src/proc/scheduler.c 	atomic_inc(&cpu->nrdy);
cpu               387 kernel/generic/src/proc/scheduler.c 	assert(atomic_get_unordered(&thread->cpu) == CPU);
cpu               411 kernel/generic/src/proc/scheduler.c 	cpu_t *cpu = atomic_get_unordered(&thread->cpu);
cpu               413 kernel/generic/src/proc/scheduler.c 	if (!cpu) {
cpu               414 kernel/generic/src/proc/scheduler.c 		cpu = CPU;
cpu               415 kernel/generic/src/proc/scheduler.c 		atomic_set_unordered(&thread->cpu, CPU);
cpu               418 kernel/generic/src/proc/scheduler.c 	add_to_rq(thread, cpu, 0);
cpu               647 kernel/generic/src/proc/scheduler.c 		atomic_set_unordered(&thread->cpu, CPU);
cpu               726 kernel/generic/src/proc/scheduler.c 			cpu_t *cpu = &cpus[acpu];
cpu               734 kernel/generic/src/proc/scheduler.c 			if (CPU == cpu)
cpu               737 kernel/generic/src/proc/scheduler.c 			if (atomic_load(&cpu->nrdy) <= average)
cpu               740 kernel/generic/src/proc/scheduler.c 			if (steal_thread_from(cpu, rq) && --count == 0)
cpu               772 kernel/generic/src/proc/scheduler.c 	size_t cpu;
cpu               773 kernel/generic/src/proc/scheduler.c 	for (cpu = 0; cpu < config.cpu_count; cpu++) {
cpu               774 kernel/generic/src/proc/scheduler.c 		if (!cpus[cpu].active)
cpu               778 kernel/generic/src/proc/scheduler.c 		    cpus[cpu].id, &cpus[cpu], atomic_load(&cpus[cpu].nrdy));
cpu               782 kernel/generic/src/proc/scheduler.c 			irq_spinlock_lock(&(cpus[cpu].rq[i].lock), false);
cpu               783 kernel/generic/src/proc/scheduler.c 			if (cpus[cpu].rq[i].n == 0) {
cpu               784 kernel/generic/src/proc/scheduler.c 				irq_spinlock_unlock(&(cpus[cpu].rq[i].lock), false);
cpu               789 kernel/generic/src/proc/scheduler.c 			list_foreach(cpus[cpu].rq[i].rq, rq_link, thread_t,
cpu               796 kernel/generic/src/proc/scheduler.c 			irq_spinlock_unlock(&(cpus[cpu].rq[i].lock), false);
cpu               196 kernel/generic/src/proc/thread.c void thread_wire(thread_t *thread, cpu_t *cpu)
cpu               199 kernel/generic/src/proc/thread.c 	atomic_set_unordered(&thread->cpu, cpu);
cpu               264 kernel/generic/src/proc/thread.c 	atomic_init(&thread->cpu, NULL);
cpu               344 kernel/generic/src/proc/thread.c 	cpu_t *cpu = atomic_get_unordered(&thread->cpu);
cpu               345 kernel/generic/src/proc/thread.c 	if (cpu) {
cpu               351 kernel/generic/src/proc/thread.c 		irq_spinlock_lock(&cpu->fpu_lock, false);
cpu               353 kernel/generic/src/proc/thread.c 		if (atomic_get_unordered(&cpu->fpu_owner) == thread)
cpu               354 kernel/generic/src/proc/thread.c 			atomic_set_unordered(&cpu->fpu_owner, NULL);
cpu               356 kernel/generic/src/proc/thread.c 		irq_spinlock_unlock(&cpu->fpu_lock, false);
cpu               706 kernel/generic/src/proc/thread.c 		cpu_t *cpu = atomic_get_unordered(&thread->cpu);
cpu               707 kernel/generic/src/proc/thread.c 		if (cpu)
cpu               708 kernel/generic/src/proc/thread.c 			printf("%-5u", cpu->id);
cpu                60 kernel/generic/src/synch/irq_spinlock.c 	cpu_t *cpu = CPU_OWNER;
cpu                61 kernel/generic/src/synch/irq_spinlock.c 	atomic_store_explicit(&lock->owner, cpu, memory_order_relaxed);
cpu               309 kernel/generic/src/sysinfo/stats.c 	cpu_t *cpu = atomic_get_unordered(&thread->cpu);
cpu               311 kernel/generic/src/sysinfo/stats.c 	if (cpu != NULL) {
cpu               313 kernel/generic/src/sysinfo/stats.c 		stats_thread->cpu = cpu->id;
cpu                70 kernel/generic/src/time/timeout.c 	timeout->cpu = NULL;
cpu                88 kernel/generic/src/time/timeout.c 		.cpu = CPU,
cpu               155 kernel/generic/src/time/timeout.c 	assert(timeout->cpu);
cpu               157 kernel/generic/src/time/timeout.c 	irq_spinlock_lock(&timeout->cpu->timeoutlock, true);
cpu               164 kernel/generic/src/time/timeout.c 	irq_spinlock_unlock(&timeout->cpu->timeoutlock, true);
cpu               131 uspace/app/stats/stats.c 				printf("%6u ", stats_threads[i].cpu);
HelenOS homepage, sources at GitHub