diff --git a/arch/x86/pagetables.c b/arch/x86/pagetables.c index 14d76deb..f388c1a4 100644 --- a/arch/x86/pagetables.c +++ b/arch/x86/pagetables.c @@ -37,6 +37,16 @@ static pgentry_t *_tmp_mapping_entry; cr3_t __aligned(PAGE_SIZE) cr3; cr3_t user_cr3; +/* Used by lower level vmap() functions - must not be taken before mmap_lock */ +static spinlock_t vmap_lock = SPINLOCK_INIT; + +static inline void *tmp_map_mfn(mfn_t mfn) { + BUG_ON(mfn_invalid(mfn)); + set_pgentry(_tmp_mapping_entry, mfn, L1_PROT); + invlpg(_tmp_mapping); + return _tmp_mapping; +} + static inline const char *dump_pte_flags(char *buf, size_t size, pte_t pte) { /* clang-format off */ snprintf(buf, size, "%c %c%c%c%c%c%c%c%c%c", @@ -55,60 +65,130 @@ static inline const char *dump_pte_flags(char *buf, size_t size, pte_t pte) { return buf; } -static inline void dump_page_table(void *table, int level) { - char flags[16]; - int entries; - pte_t *pt = table; - +static inline int level_to_entries(int level) { switch (level) { case 4: - entries = L4_PT_ENTRIES; - break; + return L4_PT_ENTRIES; case 3: - entries = L3_PT_ENTRIES; - break; + return L3_PT_ENTRIES; case 2: - entries = L2_PT_ENTRIES; - break; + return L2_PT_ENTRIES; case 1: - entries = L1_PT_ENTRIES; - break; + return L1_PT_ENTRIES; default: - return; - }; + return 0; + } +} - for (int i = 0; i < entries; i++) { +static inline void dump_pte(void *entry, mfn_t table, int level, int index) { + pte_t *pte = entry; + paddr_t pt_paddr = mfn_to_paddr(table) + index * sizeof(pgentry_t); + paddr_t paddr = mfn_to_paddr(pte->mfn); + int indent = (4 - level) * 2; + char flags[16]; + + dump_pte_flags(flags, sizeof(flags), *pte); + printk("[0x%lx] %*s%d[%03u] paddr: 0x%016lx flags: %s\n", pt_paddr, indent, "L", + level, index, paddr, flags); +} + +static void dump_pagetable(mfn_t table, int level) { + pte_t *pt; + + BUG_ON(mfn_invalid(table)); + pt = tmp_map_mfn(table); + BUG_ON(!pt); + + for (int i = 0; i < level_to_entries(level); i++) { if (!pt[i].P) continue; - dump_pte_flags(flags, sizeof(flags), pt[i]); - paddr_t paddr = mfn_to_paddr(pt[i].mfn); - printk("[%p] %*s%d[%03u] paddr: 0x%016lx flags: %s\n", _ptr(virt_to_paddr(pt)), - (4 - level) * 2, "L", level, i, paddr, flags); + dump_pte(&pt[i], table, level, i); if (level == 2 && ((pde_t *) pt)[i].PS) continue; if (level == 3 && ((pdpe_t *) pt)[i].PS) continue; - dump_page_table(paddr_to_virt_kern(paddr), level - 1); + dump_pagetable(pt[i].mfn, level - 1); + pt = tmp_map_mfn(table); + } +} + +void dump_pagetables(cr3_t *cr3_ptr) { +#if defined(__x86_64__) + int level = 4; +#else + int level = 3; +#endif + ASSERT(cr3_ptr); + if (mfn_invalid(cr3_ptr->mfn)) { + warning("CR3: 0x%lx is invalid", cr3.paddr); + return; } + + printk("Page Tables: CR3 paddr: 0x%lx\n", cr3.paddr); + spin_lock(&vmap_lock); + dump_pagetable(cr3_ptr->mfn, level); + spin_unlock(&vmap_lock); } -void dump_pagetables(cr3_t cr3) { - printk("\nPage Tables:\n"); +static void dump_pagetable_va(cr3_t *cr3_ptr, void *va) { + paddr_t tab_paddr; + pgentry_t *tab; +#if defined(__x86_64__) + int level = 4; +#else + int level = 3; +#endif + + ASSERT(cr3_ptr); + if (mfn_invalid(cr3_ptr->mfn)) { + warning("CR3: 0x%lx is invalid", cr3.paddr); + return; + } + + spin_lock(&vmap_lock); + + tab = tmp_map_mfn(cr3_ptr->mfn); +#if defined(__x86_64__) + pml4_t *l4e = l4_table_entry((pml4_t *) tab, va); + dump_pte(l4e, cr3_ptr->mfn, level--, l4_table_index(va)); + + if (mfn_invalid(l4e->mfn)) + goto unlock; + + tab_paddr = mfn_to_paddr(l4e->mfn); + tab = tmp_map_mfn(l4e->mfn); +#endif + pdpe_t *l3e = l3_table_entry((pdpe_t *) tab, va); + dump_pte(l3e, tab_paddr, level--, l3_table_index(va)); + + if (mfn_invalid(l3e->mfn) || l3e->PS) + goto unlock; + + tab_paddr = mfn_to_paddr(l3e->mfn); + tab = tmp_map_mfn(l3e->mfn); + pde_t *l2e = l2_table_entry((pde_t *) tab, va); + dump_pte(l2e, tab_paddr, level--, l2_table_index(va)); - /* Map all used frames to be able to parse page tables */ - map_used_memory(); + if (mfn_invalid(l2e->mfn) || l2e->PS) + goto unlock; - printk("CR3: paddr: 0x%lx\n", cr3.paddr); - dump_page_table(paddr_to_virt_kern(cr3.paddr), 4); + tab_paddr = mfn_to_paddr(l2e->mfn); + tab = tmp_map_mfn(l2e->mfn); + pte_t *l1e = l1_table_entry((pte_t *) tab, va); + dump_pte(l1e, tab_paddr, level--, l1_table_index(va)); + +unlock: + spin_unlock(&vmap_lock); } -static inline void *tmp_map_mfn(mfn_t mfn) { - BUG_ON(mfn_invalid(mfn)); - set_pgentry(_tmp_mapping_entry, mfn, L1_PROT); - invlpg(_tmp_mapping); - return _tmp_mapping; +void dump_kern_pagetable_va(void *va) { + dump_pagetable_va(&cr3, va); +} + +void dump_user_pagetable_va(void *va) { + dump_pagetable_va(&user_cr3, va); } static mfn_t get_cr3_mfn(cr3_t *cr3_entry) { @@ -179,16 +259,13 @@ static void *_vmap(cr3_t *cr3_ptr, void *va, mfn_t mfn, unsigned int order, #endif unsigned long l3_flags, unsigned long l2_flags, unsigned long l1_flags) { - static spinlock_t lock = SPINLOCK_INIT; mfn_t l1t_mfn, l2t_mfn, l3t_mfn; pgentry_t *tab, *entry; if (!va || (_ul(va) & ~PAGE_ORDER_TO_MASK(order))) return NULL; - dprintk("%s: va: %p mfn: 0x%lx (order: %u)\n", __func__, va, mfn, order); - - spin_lock(&lock); + dprintk("%s: va: 0x%p mfn: 0x%lx (order: %u)\n", __func__, va, mfn, order); #if defined(__x86_64__) l3t_mfn = get_pgentry_mfn(get_cr3_mfn(cr3_ptr), l4_table_index(va), l4_flags); @@ -222,7 +299,6 @@ static void *_vmap(cr3_t *cr3_ptr, void *va, mfn_t mfn, unsigned int order, invlpg(va); done: - spin_unlock(&lock); return va; } @@ -233,7 +309,10 @@ void *vmap_kern(void *va, mfn_t mfn, unsigned int order, unsigned long l3_flags, unsigned long l2_flags, unsigned long l1_flags) { unsigned long _va = _ul(va) & PAGE_ORDER_TO_MASK(order); - return _vmap(&cr3, _ptr(_va), mfn, order, l4_flags, l3_flags, l2_flags, l1_flags); + spin_lock(&vmap_lock); + va = _vmap(&cr3, _ptr(_va), mfn, order, l4_flags, l3_flags, l2_flags, l1_flags); + spin_unlock(&vmap_lock); + return va; } void *vmap_user(void *va, mfn_t mfn, unsigned int order, @@ -243,8 +322,10 @@ void *vmap_user(void *va, mfn_t mfn, unsigned int order, unsigned long l3_flags, unsigned long l2_flags, unsigned long l1_flags) { unsigned long _va = _ul(va) & PAGE_ORDER_TO_MASK(order); - return _vmap(&user_cr3, _ptr(_va), mfn, order, l4_flags, l3_flags, l2_flags, - l1_flags); + spin_lock(&vmap_lock); + va = _vmap(&user_cr3, _ptr(_va), mfn, order, l4_flags, l3_flags, l2_flags, l1_flags); + spin_unlock(&vmap_lock); + return va; } static inline void init_tmp_mapping(void) { diff --git a/common/setup.c b/common/setup.c index ae0ec70e..5b06aa30 100644 --- a/common/setup.c +++ b/common/setup.c @@ -227,7 +227,7 @@ void __noreturn __text_init kernel_start(uint32_t multiboot_magic, unsigned long WRITE_SP(get_free_pages_top(PAGE_ORDER_2M, GFP_KERNEL_MAP)); if (opt_debug) - dump_pagetables(cr3); + dump_pagetables(&cr3); map_bios_area(); diff --git a/include/arch/x86/pagetable.h b/include/arch/x86/pagetable.h index b67ab009..301c68d3 100644 --- a/include/arch/x86/pagetable.h +++ b/include/arch/x86/pagetable.h @@ -287,7 +287,9 @@ extern pml4_t l4_pt_entries[L4_PT_ENTRIES]; #endif extern void init_pagetables(void); -extern void dump_pagetables(cr3_t cr3); +extern void dump_pagetables(cr3_t *cr3_ptr); +extern void dump_kern_pagetable_va(void *va); +extern void dump_user_pagetable_va(void *va); #endif /* __ASSEMBLY__ */