HelenOS sources
This source file includes following definitions.
- elf_nonanon_pages_get
- elf_orig_page
- elf_create
- elf_resize
- elf_share
- elf_destroy
- elf_is_resizable
- elf_is_shareable
- elf_page_fault
- elf_frame_free
#include <lib/elf.h>
#include <assert.h>
#include <typedefs.h>
#include <mm/as.h>
#include <mm/frame.h>
#include <mm/slab.h>
#include <mm/page.h>
#include <mm/reserve.h>
#include <mm/km.h>
#include <genarch/mm/page_pt.h>
#include <genarch/mm/page_ht.h>
#include <align.h>
#include <memw.h>
#include <macros.h>
#include <arch.h>
#include <barrier.h>
static bool elf_create(as_area_t *);
static bool elf_resize(as_area_t *, size_t);
static void elf_share(as_area_t *);
static void elf_destroy(as_area_t *);
static bool elf_is_resizable(as_area_t *);
static bool elf_is_shareable(as_area_t *);
static int elf_page_fault(as_area_t *, uintptr_t, pf_access_t);
static void elf_frame_free(as_area_t *, uintptr_t, uintptr_t);
mem_backend_t elf_backend = {
.create = elf_create,
.resize = elf_resize,
.share = elf_share,
.destroy = elf_destroy,
.is_resizable = elf_is_resizable,
.is_shareable = elf_is_shareable,
.page_fault = elf_page_fault,
.frame_free = elf_frame_free,
.create_shared_data = NULL,
.destroy_shared_data = NULL
};
static size_t elf_nonanon_pages_get(as_area_t *area)
{
elf_segment_header_t *entry = area->backend_data.segment;
uintptr_t first = ALIGN_UP(entry->p_vaddr, PAGE_SIZE);
uintptr_t last = ALIGN_DOWN(entry->p_vaddr + entry->p_filesz,
PAGE_SIZE);
if (entry->p_flags & PF_W)
return 0;
if (last < first)
return 0;
return last - first;
}
static uintptr_t elf_orig_page(as_area_t *area, uintptr_t page)
{
return page - area->base + area->backend_data.elf_base;
}
bool elf_create(as_area_t *area)
{
size_t nonanon_pages = elf_nonanon_pages_get(area);
if (area->pages <= nonanon_pages)
return true;
return reserve_try_alloc(area->pages - nonanon_pages);
}
bool elf_resize(as_area_t *area, size_t new_pages)
{
size_t nonanon_pages = elf_nonanon_pages_get(area);
if (new_pages > area->pages) {
if (area->pages >= nonanon_pages)
return reserve_try_alloc(new_pages - area->pages);
else if (new_pages > nonanon_pages)
return reserve_try_alloc(new_pages - nonanon_pages);
} else if (new_pages < area->pages) {
if (new_pages >= nonanon_pages)
reserve_free(area->pages - new_pages);
else if (area->pages > nonanon_pages)
reserve_free(nonanon_pages - new_pages);
}
return true;
}
void elf_share(as_area_t *area)
{
elf_segment_header_t *entry = area->backend_data.segment;
used_space_ival_t *start;
used_space_ival_t *cur;
uintptr_t start_anon = entry->p_vaddr + entry->p_filesz;
assert(mutex_locked(&area->as->lock));
assert(mutex_locked(&area->lock));
if (area->flags & AS_AREA_WRITE) {
start = used_space_first(&area->used_space);
} else {
start = used_space_find_gteq(&area->used_space, start_anon);
}
mutex_lock(&area->sh_info->lock);
cur = start;
while (cur != NULL) {
uintptr_t base = cur->page;
size_t count = cur->count;
unsigned int i;
if (!(area->flags & AS_AREA_WRITE))
if (base >= entry->p_vaddr &&
base + P2SZ(count) <= start_anon)
continue;
for (i = 0; i < count; i++) {
pte_t pte;
bool found;
if (!(area->flags & AS_AREA_WRITE))
if (base >= entry->p_vaddr &&
base + P2SZ(i + 1) <= start_anon)
continue;
page_table_lock(area->as, false);
found = page_mapping_find(area->as,
base + P2SZ(i), false, &pte);
(void) found;
assert(found);
assert(PTE_VALID(&pte));
assert(PTE_PRESENT(&pte));
as_pagemap_insert(&area->sh_info->pagemap,
(base + P2SZ(i)) - area->base,
PTE_GET_FRAME(&pte));
page_table_unlock(area->as, false);
pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(&pte));
frame_reference_add(pfn);
}
cur = used_space_next(cur);
}
mutex_unlock(&area->sh_info->lock);
}
void elf_destroy(as_area_t *area)
{
size_t nonanon_pages = elf_nonanon_pages_get(area);
if (area->pages > nonanon_pages)
reserve_free(area->pages - nonanon_pages);
}
bool elf_is_resizable(as_area_t *area)
{
return true;
}
bool elf_is_shareable(as_area_t *area)
{
return true;
}
int elf_page_fault(as_area_t *area, uintptr_t upage, pf_access_t access)
{
elf_header_t *elf = area->backend_data.elf;
elf_segment_header_t *entry = area->backend_data.segment;
uintptr_t base;
uintptr_t frame;
uintptr_t kpage;
uintptr_t start_anon;
uintptr_t elfpage;
size_t i;
bool dirty = false;
assert(page_table_locked(AS));
assert(mutex_locked(&area->lock));
assert(IS_ALIGNED(upage, PAGE_SIZE));
elfpage = elf_orig_page(area, upage);
if (!as_area_check_access(area, access))
return AS_PF_FAULT;
if (elfpage < ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE))
return AS_PF_FAULT;
if (elfpage >= entry->p_vaddr + entry->p_memsz)
return AS_PF_FAULT;
i = (elfpage - ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE)) >>
PAGE_WIDTH;
base = (uintptr_t)
(((void *) elf) + ALIGN_DOWN(entry->p_offset, PAGE_SIZE));
start_anon = entry->p_vaddr + entry->p_filesz;
mutex_lock(&area->sh_info->lock);
if (area->sh_info->shared) {
errno_t rc = as_pagemap_find(&area->sh_info->pagemap,
upage - area->base, &frame);
if (rc == EOK) {
frame_reference_add(ADDR2PFN(frame));
page_mapping_insert(AS, upage, frame,
as_area_get_flags(area));
if (!used_space_insert(&area->used_space, upage, 1))
panic("Cannot insert used space.");
mutex_unlock(&area->sh_info->lock);
return AS_PF_OK;
}
}
if (elfpage >= entry->p_vaddr && elfpage + PAGE_SIZE <= start_anon) {
if (entry->p_flags & PF_W) {
kpage = km_temporary_page_get(&frame, FRAME_NO_RESERVE);
memcpy((void *) kpage, (void *) (base + i * PAGE_SIZE),
PAGE_SIZE);
if (entry->p_flags & PF_X) {
smc_coherence((void *) kpage, PAGE_SIZE);
}
km_temporary_page_put(kpage);
dirty = true;
} else {
pte_t pte;
bool found;
found = page_mapping_find(AS_KERNEL,
base + i * FRAME_SIZE, true, &pte);
(void) found;
assert(found);
assert(PTE_PRESENT(&pte));
frame = PTE_GET_FRAME(&pte);
}
} else if (elfpage >= start_anon) {
kpage = km_temporary_page_get(&frame, FRAME_NO_RESERVE);
memsetb((void *) kpage, PAGE_SIZE, 0);
km_temporary_page_put(kpage);
dirty = true;
} else {
size_t pad_lo, pad_hi;
if (upage < entry->p_vaddr)
pad_lo = entry->p_vaddr - upage;
else
pad_lo = 0;
if (start_anon < upage + PAGE_SIZE)
pad_hi = upage + PAGE_SIZE - start_anon;
else
pad_hi = 0;
kpage = km_temporary_page_get(&frame, FRAME_NO_RESERVE);
memcpy((void *) (kpage + pad_lo),
(void *) (base + i * PAGE_SIZE + pad_lo),
PAGE_SIZE - pad_lo - pad_hi);
if (entry->p_flags & PF_X) {
smc_coherence((void *) (kpage + pad_lo),
PAGE_SIZE - pad_lo - pad_hi);
}
memsetb((void *) kpage, pad_lo, 0);
memsetb((void *) (kpage + PAGE_SIZE - pad_hi), pad_hi, 0);
km_temporary_page_put(kpage);
dirty = true;
}
if (dirty && area->sh_info->shared) {
frame_reference_add(ADDR2PFN(frame));
as_pagemap_insert(&area->sh_info->pagemap, upage - area->base,
frame);
}
mutex_unlock(&area->sh_info->lock);
page_mapping_insert(AS, upage, frame, as_area_get_flags(area));
if (!used_space_insert(&area->used_space, upage, 1))
panic("Cannot insert used space.");
return AS_PF_OK;
}
void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame)
{
elf_segment_header_t *entry = area->backend_data.segment;
uintptr_t start_anon;
uintptr_t elfpage;
assert(page_table_locked(area->as));
assert(mutex_locked(&area->lock));
elfpage = elf_orig_page(area, page);
assert(elfpage >= ALIGN_DOWN(entry->p_vaddr, PAGE_SIZE));
assert(elfpage < entry->p_vaddr + entry->p_memsz);
start_anon = entry->p_vaddr + entry->p_filesz;
if (elfpage >= entry->p_vaddr && elfpage + PAGE_SIZE <= start_anon) {
if (entry->p_flags & PF_W) {
frame_free_noreserve(frame, 1);
}
} else {
frame_free_noreserve(frame, 1);
}
}
HelenOS homepage, sources at GitHub