HelenOS sources
This source file includes following definitions.
- get_stats_cpus
- get_task_virtmem
- get_task_resmem
- produce_stats_task
- get_stats_tasks
- produce_stats_thread
- get_stats_threads
- produce_stats_ipcc_cb
- get_stats_ipccs
- get_stats_task
- get_stats_thread
- get_stats_exceptions
- get_stats_exception
- get_stats_physmem
- get_stats_load
- load_calc
- kload
- stats_init
#include <assert.h>
#include <typedefs.h>
#include <abi/sysinfo.h>
#include <sysinfo/stats.h>
#include <sysinfo/sysinfo.h>
#include <synch/spinlock.h>
#include <synch/mutex.h>
#include <time/clock.h>
#include <mm/frame.h>
#include <proc/task.h>
#include <proc/thread.h>
#include <interrupt.h>
#include <stdbool.h>
#include <str.h>
#include <errno.h>
#include <cpu.h>
#include <arch.h>
#include <stdlib.h>
#define LOAD_FIXED_SHIFT 11
#define LOAD_USPACE_SHIFT 6
#define LOAD_KERNEL_SHIFT (LOAD_FIXED_SHIFT - LOAD_USPACE_SHIFT)
#define LOAD_FIXED_1 (1 << LOAD_FIXED_SHIFT)
#define LOAD_INTERVAL 5
typedef struct {
bool counting;
size_t count;
size_t i;
stats_ipcc_t *data;
} ipccs_state_t;
static load_t load_exp[LOAD_STEPS] = { 1884, 2014, 2037 };
static load_t avenrdy[LOAD_STEPS] = { 0, 0, 0 };
static mutex_t load_lock;
static void *get_stats_cpus(struct sysinfo_item *item, size_t *size,
bool dry_run, void *data)
{
*size = sizeof(stats_cpu_t) * config.cpu_count;
if (dry_run)
return NULL;
stats_cpu_t *stats_cpus = (stats_cpu_t *) malloc(*size);
if (stats_cpus == NULL) {
*size = 0;
return NULL;
}
size_t i;
for (i = 0; i < config.cpu_count; i++) {
stats_cpus[i].id = cpus[i].id;
stats_cpus[i].active = cpus[i].active;
stats_cpus[i].frequency_mhz = cpus[i].frequency_mhz;
stats_cpus[i].busy_cycles = atomic_time_read(&cpus[i].busy_cycles);
stats_cpus[i].idle_cycles = atomic_time_read(&cpus[i].idle_cycles);
}
return ((void *) stats_cpus);
}
static size_t get_task_virtmem(as_t *as)
{
if (mutex_trylock(&as->lock) != EOK)
return 0;
size_t pages = 0;
as_area_t *area = as_area_first(as);
while (area != NULL) {
if (mutex_trylock(&area->lock) != EOK)
continue;
pages += area->pages;
mutex_unlock(&area->lock);
area = as_area_next(area);
}
mutex_unlock(&as->lock);
return (pages << PAGE_WIDTH);
}
static size_t get_task_resmem(as_t *as)
{
if (mutex_trylock(&as->lock) != EOK)
return 0;
size_t pages = 0;
as_area_t *area = as_area_first(as);
while (area != NULL) {
if (mutex_trylock(&area->lock) != EOK)
continue;
pages += area->used_space.pages;
mutex_unlock(&area->lock);
area = as_area_next(area);
}
mutex_unlock(&as->lock);
return (pages << PAGE_WIDTH);
}
static void produce_stats_task(task_t *task, stats_task_t *stats_task)
{
assert(interrupts_disabled());
assert(irq_spinlock_locked(&task->lock));
stats_task->task_id = task->taskid;
str_cpy(stats_task->name, TASK_NAME_BUFLEN, task->name);
stats_task->virtmem = get_task_virtmem(task->as);
stats_task->resmem = get_task_resmem(task->as);
stats_task->threads = atomic_load(&task->lifecount);
task_get_accounting(task, &(stats_task->ucycles),
&(stats_task->kcycles));
stats_task->ipc_info = task->ipc_info;
}
static void *get_stats_tasks(struct sysinfo_item *item, size_t *size,
bool dry_run, void *data)
{
irq_spinlock_lock(&tasks_lock, true);
size_t count = task_count();
if (count == 0) {
irq_spinlock_unlock(&tasks_lock, true);
*size = 0;
return NULL;
}
*size = sizeof(stats_task_t) * count;
if (dry_run) {
irq_spinlock_unlock(&tasks_lock, true);
return NULL;
}
stats_task_t *stats_tasks = (stats_task_t *) malloc(*size);
if (stats_tasks == NULL) {
irq_spinlock_unlock(&tasks_lock, true);
*size = 0;
return NULL;
}
size_t i = 0;
task_t *task = task_first();
while (task != NULL) {
irq_spinlock_lock(&(task->lock), false);
produce_stats_task(task, &stats_tasks[i]);
i++;
irq_spinlock_unlock(&(task->lock), false);
task = task_next(task);
}
irq_spinlock_unlock(&tasks_lock, true);
return ((void *) stats_tasks);
}
static void produce_stats_thread(thread_t *thread, stats_thread_t *stats_thread)
{
assert(interrupts_disabled());
stats_thread->thread_id = thread->tid;
stats_thread->task_id = thread->task->taskid;
stats_thread->state = atomic_get_unordered(&thread->state);
stats_thread->priority = atomic_get_unordered(&thread->priority);
stats_thread->ucycles = atomic_time_read(&thread->ucycles);
stats_thread->kcycles = atomic_time_read(&thread->kcycles);
cpu_t *cpu = atomic_get_unordered(&thread->cpu);
if (cpu != NULL) {
stats_thread->on_cpu = true;
stats_thread->cpu = cpu->id;
} else
stats_thread->on_cpu = false;
}
static void *get_stats_threads(struct sysinfo_item *item, size_t *size,
bool dry_run, void *data)
{
irq_spinlock_lock(&threads_lock, true);
size_t count = thread_count();
if (count == 0) {
irq_spinlock_unlock(&threads_lock, true);
*size = 0;
return NULL;
}
*size = sizeof(stats_thread_t) * count;
if (dry_run) {
irq_spinlock_unlock(&threads_lock, true);
return NULL;
}
stats_thread_t *stats_threads = (stats_thread_t *) malloc(*size);
if (stats_threads == NULL) {
irq_spinlock_unlock(&threads_lock, true);
*size = 0;
return NULL;
}
size_t i = 0;
thread_t *thread = thread_first();
while (thread != NULL) {
produce_stats_thread(thread, &stats_threads[i]);
i++;
thread = thread_next(thread);
}
irq_spinlock_unlock(&threads_lock, true);
return ((void *) stats_threads);
}
static bool produce_stats_ipcc_cb(cap_t *cap, void *arg)
{
phone_t *phone = cap->kobject->phone;
ipccs_state_t *state = (ipccs_state_t *) arg;
if (state->counting) {
state->count++;
return true;
}
if ((state->data == NULL) || (state->i >= state->count)) {
return true;
}
mutex_lock(&phone->lock);
if (phone->state == IPC_PHONE_CONNECTED) {
state->data[state->i].caller = phone->caller->taskid;
state->data[state->i].callee = phone->callee->task->taskid;
state->i++;
}
mutex_unlock(&phone->lock);
return true;
}
static void *get_stats_ipccs(struct sysinfo_item *item, size_t *size,
bool dry_run, void *data)
{
irq_spinlock_lock(&tasks_lock, true);
ipccs_state_t state = {
.counting = true,
.count = 0,
.i = 0,
.data = NULL
};
task_t *task = task_first();
while (task != NULL) {
task_hold(task);
irq_spinlock_unlock(&tasks_lock, true);
caps_apply_to_kobject_type(task, KOBJECT_TYPE_PHONE,
produce_stats_ipcc_cb, &state);
irq_spinlock_lock(&tasks_lock, true);
task = task_next(task);
}
state.counting = false;
*size = sizeof(stats_ipcc_t) * state.count;
if (!dry_run)
state.data = (stats_ipcc_t *) malloc(*size);
task = task_first();
while (task != NULL) {
irq_spinlock_unlock(&tasks_lock, true);
caps_apply_to_kobject_type(task, KOBJECT_TYPE_PHONE,
produce_stats_ipcc_cb, &state);
irq_spinlock_lock(&tasks_lock, true);
task_t *prev_task = task;
task = task_next(prev_task);
task_release(prev_task);
}
irq_spinlock_unlock(&tasks_lock, true);
return ((void *) state.data);
}
static sysinfo_return_t get_stats_task(const char *name, bool dry_run,
void *data)
{
sysinfo_return_t ret = {
.tag = SYSINFO_VAL_UNDEFINED,
};
task_id_t task_id;
if (str_uint64_t(name, NULL, 0, true, &task_id) != EOK)
return ret;
task_t *task = task_find_by_id(task_id);
if (!task)
return ret;
if (dry_run) {
ret.tag = SYSINFO_VAL_FUNCTION_DATA;
ret.data.data = NULL;
ret.data.size = sizeof(stats_task_t);
} else {
stats_task_t *stats_task = malloc(sizeof(stats_task_t));
if (stats_task != NULL) {
ret.tag = SYSINFO_VAL_FUNCTION_DATA;
ret.data.data = stats_task;
ret.data.size = sizeof(stats_task_t);
irq_spinlock_lock(&task->lock, true);
produce_stats_task(task, stats_task);
irq_spinlock_unlock(&task->lock, true);
}
}
task_release(task);
return ret;
}
static sysinfo_return_t get_stats_thread(const char *name, bool dry_run,
void *data)
{
sysinfo_return_t ret;
ret.tag = SYSINFO_VAL_UNDEFINED;
thread_id_t thread_id;
if (str_uint64_t(name, NULL, 0, true, &thread_id) != EOK)
return ret;
irq_spinlock_lock(&threads_lock, true);
thread_t *thread = thread_find_by_id(thread_id);
if (thread == NULL) {
irq_spinlock_unlock(&threads_lock, true);
return ret;
}
if (dry_run) {
ret.tag = SYSINFO_VAL_FUNCTION_DATA;
ret.data.data = NULL;
ret.data.size = sizeof(stats_thread_t);
irq_spinlock_unlock(&threads_lock, true);
} else {
stats_thread_t *stats_thread =
(stats_thread_t *) malloc(sizeof(stats_thread_t));
if (stats_thread == NULL) {
irq_spinlock_unlock(&threads_lock, true);
return ret;
}
ret.tag = SYSINFO_VAL_FUNCTION_DATA;
ret.data.data = (void *) stats_thread;
ret.data.size = sizeof(stats_thread_t);
produce_stats_thread(thread, stats_thread);
irq_spinlock_unlock(&threads_lock, true);
}
return ret;
}
static void *get_stats_exceptions(struct sysinfo_item *item, size_t *size,
bool dry_run, void *data)
{
*size = sizeof(stats_exc_t) * IVT_ITEMS;
if ((dry_run) || (IVT_ITEMS == 0))
return NULL;
stats_exc_t *stats_exceptions =
(stats_exc_t *) malloc(*size);
if (stats_exceptions == NULL) {
*size = 0;
return NULL;
}
#if (IVT_ITEMS > 0)
irq_spinlock_lock(&exctbl_lock, true);
unsigned int i;
for (i = 0; i < IVT_ITEMS; i++) {
stats_exceptions[i].id = i + IVT_FIRST;
str_cpy(stats_exceptions[i].desc, EXC_NAME_BUFLEN, exc_table[i].name);
stats_exceptions[i].hot = exc_table[i].hot;
stats_exceptions[i].cycles = exc_table[i].cycles;
stats_exceptions[i].count = exc_table[i].count;
}
irq_spinlock_unlock(&exctbl_lock, true);
#endif
return ((void *) stats_exceptions);
}
static sysinfo_return_t get_stats_exception(const char *name, bool dry_run,
void *data)
{
sysinfo_return_t ret;
ret.tag = SYSINFO_VAL_UNDEFINED;
uint64_t excn;
if (str_uint64_t(name, NULL, 0, true, &excn) != EOK)
return ret;
#if (IVT_FIRST > 0)
if (excn < IVT_FIRST)
return ret;
#endif
#if (IVT_ITEMS + IVT_FIRST == 0)
return ret;
#else
if (excn >= IVT_ITEMS + IVT_FIRST)
return ret;
#endif
if (dry_run) {
ret.tag = SYSINFO_VAL_FUNCTION_DATA;
ret.data.data = NULL;
ret.data.size = sizeof(stats_thread_t);
} else {
excn -= IVT_FIRST;
stats_exc_t *stats_exception =
(stats_exc_t *) malloc(sizeof(stats_exc_t));
if (stats_exception == NULL)
return ret;
irq_spinlock_lock(&exctbl_lock, true);
ret.tag = SYSINFO_VAL_FUNCTION_DATA;
ret.data.data = (void *) stats_exception;
ret.data.size = sizeof(stats_exc_t);
stats_exception->id = excn;
str_cpy(stats_exception->desc, EXC_NAME_BUFLEN, exc_table[excn].name);
stats_exception->hot = exc_table[excn].hot;
stats_exception->cycles = exc_table[excn].cycles;
stats_exception->count = exc_table[excn].count;
irq_spinlock_unlock(&exctbl_lock, true);
}
return ret;
}
static void *get_stats_physmem(struct sysinfo_item *item, size_t *size,
bool dry_run, void *data)
{
*size = sizeof(stats_physmem_t);
if (dry_run)
return NULL;
stats_physmem_t *stats_physmem =
(stats_physmem_t *) malloc(*size);
if (stats_physmem == NULL) {
*size = 0;
return NULL;
}
zones_stats(&(stats_physmem->total), &(stats_physmem->unavail),
&(stats_physmem->used), &(stats_physmem->free));
return ((void *) stats_physmem);
}
static void *get_stats_load(struct sysinfo_item *item, size_t *size,
bool dry_run, void *data)
{
*size = sizeof(load_t) * LOAD_STEPS;
if (dry_run)
return NULL;
load_t *stats_load = (load_t *) malloc(*size);
if (stats_load == NULL) {
*size = 0;
return NULL;
}
mutex_lock(&load_lock);
unsigned int i;
for (i = 0; i < LOAD_STEPS; i++)
stats_load[i] = avenrdy[i] << LOAD_KERNEL_SHIFT;
mutex_unlock(&load_lock);
return ((void *) stats_load);
}
static inline load_t load_calc(load_t load, load_t exp, size_t ready)
{
load *= exp;
load += (ready << LOAD_FIXED_SHIFT) * (LOAD_FIXED_1 - exp);
return (load >> LOAD_FIXED_SHIFT);
}
void kload(void *arg)
{
while (true) {
size_t ready = atomic_load(&nrdy);
mutex_lock(&load_lock);
unsigned int i;
for (i = 0; i < LOAD_STEPS; i++)
avenrdy[i] = load_calc(avenrdy[i], load_exp[i], ready);
mutex_unlock(&load_lock);
thread_sleep(LOAD_INTERVAL);
}
}
void stats_init(void)
{
mutex_initialize(&load_lock, MUTEX_PASSIVE);
sysinfo_set_item_gen_data("system.cpus", NULL, get_stats_cpus, NULL);
sysinfo_set_item_gen_data("system.physmem", NULL, get_stats_physmem, NULL);
sysinfo_set_item_gen_data("system.load", NULL, get_stats_load, NULL);
sysinfo_set_item_gen_data("system.tasks", NULL, get_stats_tasks, NULL);
sysinfo_set_item_gen_data("system.threads", NULL, get_stats_threads, NULL);
sysinfo_set_item_gen_data("system.ipccs", NULL, get_stats_ipccs, NULL);
sysinfo_set_item_gen_data("system.exceptions", NULL, get_stats_exceptions, NULL);
sysinfo_set_subtree_fn("system.tasks", NULL, get_stats_task, NULL);
sysinfo_set_subtree_fn("system.threads", NULL, get_stats_thread, NULL);
sysinfo_set_subtree_fn("system.exceptions", NULL, get_stats_exception, NULL);
}
HelenOS homepage, sources at GitHub