HelenOS sources
This source file includes following definitions.
- tlb_invalidate_all
- tlb_invalidate_asid
- tlb_invalidate_pages
- dtc_mapping_insert
- itc_mapping_insert
- tc_mapping_insert
- itr_mapping_insert
- dtr_mapping_insert
- tr_mapping_insert
- dtlb_kernel_mapping_insert
- dtr_purge
- dtc_pte_copy
- itc_pte_copy
- is_kernel_fault
- alternate_instruction_tlb_fault
- is_io_page_accessible
- try_memmap_io_insertion
- alternate_data_tlb_fault
- data_nested_tlb_fault
- data_dirty_bit_fault
- instruction_access_bit_fault
- data_access_bit_fault
- data_access_rights_fault
- page_not_present
- tlb_arch_init
- tlb_print
#include <mm/tlb.h>
#include <mm/asid.h>
#include <mm/page.h>
#include <mm/as.h>
#include <arch/mm/tlb.h>
#include <arch/mm/page.h>
#include <arch/mm/vhpt.h>
#include <barrier.h>
#include <arch/interrupt.h>
#include <arch/pal/pal.h>
#include <arch/asm.h>
#include <assert.h>
#include <panic.h>
#include <arch.h>
#include <interrupt.h>
#include <arch/legacyio.h>
void tlb_invalidate_all(void)
{
ipl_t ipl;
uintptr_t adr;
uint32_t count1, count2, stride1, stride2;
unsigned int i, j;
adr = PAL_PTCE_INFO_BASE();
count1 = PAL_PTCE_INFO_COUNT1();
count2 = PAL_PTCE_INFO_COUNT2();
stride1 = PAL_PTCE_INFO_STRIDE1();
stride2 = PAL_PTCE_INFO_STRIDE2();
ipl = interrupts_disable();
for (i = 0; i < count1; i++) {
for (j = 0; j < count2; j++) {
asm volatile (
"ptc.e %[adr] ;;"
:: [adr] "r" (adr)
);
adr += stride2;
}
adr += stride1;
}
interrupts_restore(ipl);
srlz_d();
srlz_i();
#ifdef CONFIG_VHPT
vhpt_invalidate_all();
#endif
}
void tlb_invalidate_asid(asid_t asid)
{
tlb_invalidate_all();
}
void tlb_invalidate_pages(asid_t asid, uintptr_t page, size_t cnt)
{
region_register_t rr;
bool restore_rr = false;
int b = 0;
int c = cnt;
uintptr_t va;
va = page;
rr.word = rr_read(VA2VRN(page));
if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(page))))) {
region_register_t rr0;
rr0 = rr;
rr0.map.rid = ASID2RID(asid, VA2VRN(page));
rr_write(VA2VRN(page), rr0.word);
srlz_d();
srlz_i();
}
while (c >>= 1)
b++;
b >>= 1;
uint64_t ps;
switch (b) {
case 0:
ps = PAGE_WIDTH;
break;
case 1:
ps = PAGE_WIDTH + 2;
va &= ~((1UL << ps) - 1);
break;
case 2:
ps = PAGE_WIDTH + 4;
va &= ~((1UL << ps) - 1);
break;
case 3:
ps = PAGE_WIDTH + 6;
va &= ~((1UL << ps) - 1);
break;
case 4:
ps = PAGE_WIDTH + 8;
va &= ~((1UL << ps) - 1);
break;
case 5:
ps = PAGE_WIDTH + 10;
va &= ~((1UL << ps) - 1);
break;
case 6:
ps = PAGE_WIDTH + 12;
va &= ~((1UL << ps) - 1);
break;
case 7:
case 8:
ps = PAGE_WIDTH + 14;
va &= ~((1UL << ps) - 1);
break;
default:
ps = PAGE_WIDTH + 18;
va &= ~((1UL << ps) - 1);
break;
}
for (; va < (page + cnt * PAGE_SIZE); va += (1UL << ps))
asm volatile (
"ptc.l %[va], %[ps] ;;"
:: [va] "r" (va),
[ps] "r" (ps << 2)
);
srlz_d();
srlz_i();
if (restore_rr) {
rr_write(VA2VRN(page), rr.word);
srlz_d();
srlz_i();
}
}
void dtc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
{
tc_mapping_insert(va, asid, entry, true);
}
void itc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
{
tc_mapping_insert(va, asid, entry, false);
}
void tc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtc)
{
region_register_t rr;
bool restore_rr = false;
rr.word = rr_read(VA2VRN(va));
if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
region_register_t rr0;
rr0 = rr;
rr0.map.rid = ASID2RID(asid, VA2VRN(va));
rr_write(VA2VRN(va), rr0.word);
srlz_d();
srlz_i();
}
asm volatile (
"mov r8 = psr ;;\n"
"rsm %[mask] ;;\n"
"srlz.d ;;\n"
"srlz.i ;;\n"
"mov cr.ifa = %[va]\n"
"mov cr.itir = %[word1] ;;\n"
"cmp.eq p6, p7 = %[dtc], r0 ;;\n"
"(p6) itc.i %[word0] ;;\n"
"(p7) itc.d %[word0] ;;\n"
"mov psr.l = r8 ;;\n"
"srlz.d ;;\n"
:: [mask] "i" (PSR_IC_MASK),
[va] "r" (va),
[word0] "r" (entry.word[0]),
[word1] "r" (entry.word[1]),
[dtc] "r" (dtc)
: "p6", "p7", "r8"
);
if (restore_rr) {
rr_write(VA2VRN(va), rr.word);
srlz_d();
srlz_i();
}
}
void itr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, size_t tr)
{
tr_mapping_insert(va, asid, entry, false, tr);
}
void dtr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, size_t tr)
{
tr_mapping_insert(va, asid, entry, true, tr);
}
void tr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtr,
size_t tr)
{
region_register_t rr;
bool restore_rr = false;
rr.word = rr_read(VA2VRN(va));
if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
region_register_t rr0;
rr0 = rr;
rr0.map.rid = ASID2RID(asid, VA2VRN(va));
rr_write(VA2VRN(va), rr0.word);
srlz_d();
srlz_i();
}
asm volatile (
"mov r8 = psr ;;\n"
"rsm %[mask] ;;\n"
"srlz.d ;;\n"
"srlz.i ;;\n"
"mov cr.ifa = %[va]\n"
"mov cr.itir = %[word1] ;;\n"
"cmp.eq p6, p7 = %[dtr], r0 ;;\n"
"(p6) itr.i itr[%[tr]] = %[word0] ;;\n"
"(p7) itr.d dtr[%[tr]] = %[word0] ;;\n"
"mov psr.l = r8 ;;\n"
"srlz.d ;;\n"
:: [mask] "i" (PSR_IC_MASK),
[va] "r" (va),
[word1] "r" (entry.word[1]),
[word0] "r" (entry.word[0]),
[tr] "r" (tr),
[dtr] "r" (dtr)
: "p6", "p7", "r8"
);
if (restore_rr) {
rr_write(VA2VRN(va), rr.word);
srlz_d();
srlz_i();
}
}
void dtlb_kernel_mapping_insert(uintptr_t page, uintptr_t frame, bool dtr,
size_t tr)
{
tlb_entry_t entry;
entry.word[0] = 0;
entry.word[1] = 0;
entry.p = true;
entry.ma = MA_WRITEBACK;
entry.a = true;
entry.d = true;
entry.pl = PL_KERNEL;
entry.ar = AR_READ | AR_WRITE;
entry.ppn = frame >> PPN_SHIFT;
entry.ps = PAGE_WIDTH;
if (dtr)
dtr_mapping_insert(page, ASID_KERNEL, entry, tr);
else
dtc_mapping_insert(page, ASID_KERNEL, entry);
}
void dtr_purge(uintptr_t page, size_t width)
{
asm volatile (
"ptr.d %[page], %[width]\n"
:: [page] "r" (page),
[width] "r" (width << 2)
);
}
void dtc_pte_copy(pte_t *t)
{
tlb_entry_t entry;
entry.word[0] = 0;
entry.word[1] = 0;
entry.p = t->p;
entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
entry.a = t->a;
entry.d = t->d;
entry.pl = t->k ? PL_KERNEL : PL_USER;
entry.ar = t->w ? AR_WRITE : AR_READ;
entry.ppn = t->frame >> PPN_SHIFT;
entry.ps = PAGE_WIDTH;
dtc_mapping_insert(t->page, t->as->asid, entry);
#ifdef CONFIG_VHPT
vhpt_mapping_insert(t->page, t->as->asid, entry);
#endif
}
void itc_pte_copy(pte_t *t)
{
tlb_entry_t entry;
entry.word[0] = 0;
entry.word[1] = 0;
assert(t->x);
entry.p = t->p;
entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
entry.a = t->a;
entry.pl = t->k ? PL_KERNEL : PL_USER;
entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ;
entry.ppn = t->frame >> PPN_SHIFT;
entry.ps = PAGE_WIDTH;
itc_mapping_insert(t->page, t->as->asid, entry);
#ifdef CONFIG_VHPT
vhpt_mapping_insert(t->page, t->as->asid, entry);
#endif
}
static bool is_kernel_fault(istate_t *istate, uintptr_t va)
{
region_register_t rr;
if (istate_from_uspace(istate))
return false;
rr.word = rr_read(VA2VRN(va));
rid_t rid = rr.map.rid;
return (RID2ASID(rid) == ASID_KERNEL) && (VA2VRN(va) == VRN_KERNEL);
}
void alternate_instruction_tlb_fault(unsigned int n, istate_t *istate)
{
uintptr_t va;
pte_t t;
assert(istate_from_uspace(istate));
va = istate->cr_ifa;
bool found = page_mapping_find(AS, va, true, &t);
if (found) {
assert(t.p);
itc_pte_copy(&t);
} else {
as_page_fault(va, PF_ACCESS_EXEC, istate);
}
}
static int is_io_page_accessible(int page)
{
if (TASK->arch.iomap)
return bitmap_get(TASK->arch.iomap, page);
else
return 0;
}
static int try_memmap_io_insertion(uintptr_t va, istate_t *istate)
{
if ((va >= LEGACYIO_USER_BASE) && (va < LEGACYIO_USER_BASE + (1 << LEGACYIO_PAGE_WIDTH))) {
if (TASK) {
uint64_t io_page = (va & ((1 << LEGACYIO_PAGE_WIDTH) - 1)) >>
LEGACYIO_SINGLE_PAGE_WIDTH;
if (is_io_page_accessible(io_page)) {
uint64_t page, frame;
page = LEGACYIO_USER_BASE +
(1 << LEGACYIO_SINGLE_PAGE_WIDTH) * io_page;
frame = LEGACYIO_PHYS_BASE +
(1 << LEGACYIO_SINGLE_PAGE_WIDTH) * io_page;
tlb_entry_t entry;
entry.word[0] = 0;
entry.word[1] = 0;
entry.p = true;
entry.ma = MA_UNCACHEABLE;
entry.a = true;
entry.d = true;
entry.pl = PL_USER;
entry.ar = AR_READ | AR_WRITE;
entry.ppn = frame >> PPN_SHIFT;
entry.ps = LEGACYIO_SINGLE_PAGE_WIDTH;
dtc_mapping_insert(page, TASK->as->asid, entry);
return 1;
} else {
fault_if_from_uspace(istate,
"IO access fault at %p.", (void *) va);
}
}
}
return 0;
}
void alternate_data_tlb_fault(unsigned int n, istate_t *istate)
{
if (istate->cr_isr.sp) {
istate->cr_ipsr.ed = true;
return;
}
uintptr_t va = istate->cr_ifa;
as_t *as = AS;
if (is_kernel_fault(istate, va)) {
if (va < end_of_identity) {
dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0);
return;
} else {
as = AS_KERNEL;
}
}
pte_t t;
bool found = page_mapping_find(as, va, true, &t);
if (found) {
assert(t.p);
dtc_pte_copy(&t);
} else {
if (try_memmap_io_insertion(va, istate))
return;
as_page_fault(va, PF_ACCESS_READ, istate);
}
}
void data_nested_tlb_fault(unsigned int n, istate_t *istate)
{
assert(false);
}
void data_dirty_bit_fault(unsigned int n, istate_t *istate)
{
uintptr_t va;
pte_t t;
as_t *as = AS;
va = istate->cr_ifa;
if (is_kernel_fault(istate, va))
as = AS_KERNEL;
bool found = page_mapping_find(as, va, true, &t);
assert(found);
assert(t.p);
if (found && t.p && t.w) {
t.d = true;
dtc_pte_copy(&t);
page_mapping_update(as, va, true, &t);
} else {
as_page_fault(va, PF_ACCESS_WRITE, istate);
}
}
void instruction_access_bit_fault(unsigned int n, istate_t *istate)
{
uintptr_t va;
pte_t t;
assert(istate_from_uspace(istate));
va = istate->cr_ifa;
bool found = page_mapping_find(AS, va, true, &t);
assert(found);
assert(t.p);
if (found && t.p && t.x) {
t.a = true;
itc_pte_copy(&t);
page_mapping_update(AS, va, true, &t);
} else {
as_page_fault(va, PF_ACCESS_EXEC, istate);
}
}
void data_access_bit_fault(unsigned int n, istate_t *istate)
{
uintptr_t va;
pte_t t;
as_t *as = AS;
va = istate->cr_ifa;
if (is_kernel_fault(istate, va))
as = AS_KERNEL;
bool found = page_mapping_find(as, va, true, &t);
assert(found);
assert(t.p);
if (found && t.p) {
t.a = true;
dtc_pte_copy(&t);
page_mapping_update(as, va, true, &t);
} else {
if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
fault_if_from_uspace(istate, "Page fault at %p.",
(void *) va);
panic_memtrap(istate, PF_ACCESS_UNKNOWN, va, NULL);
}
}
}
void data_access_rights_fault(unsigned int n, istate_t *istate)
{
uintptr_t va;
pte_t t;
assert(istate_from_uspace(istate));
va = istate->cr_ifa;
bool found = page_mapping_find(AS, va, true, &t);
assert(found);
assert(t.p);
assert(!t.w);
as_page_fault(va, PF_ACCESS_WRITE, istate);
}
void page_not_present(unsigned int n, istate_t *istate)
{
uintptr_t va;
pte_t t;
assert(istate_from_uspace(istate));
va = istate->cr_ifa;
bool found = page_mapping_find(AS, va, true, &t);
assert(found);
if (t.p) {
if (t.x)
itc_pte_copy(&t);
else
dtc_pte_copy(&t);
} else {
as_page_fault(va, PF_ACCESS_READ, istate);
}
}
void tlb_arch_init(void)
{
}
void tlb_print(void)
{
}
HelenOS homepage, sources at GitHub