@@ -84,10 +84,12 @@ lpae_t boot_third[LPAE_ENTRIES] __attribute__((__aligned__(4096)));
*/
#ifdef CONFIG_ARM_64
+#define HYP_PT_ROOT_LEVEL 0
lpae_t xen_pgtable[LPAE_ENTRIES] __attribute__((__aligned__(4096)));
lpae_t xen_first[LPAE_ENTRIES] __attribute__((__aligned__(4096)));
#define THIS_CPU_PGTABLE xen_pgtable
#else
+#define HYP_PT_ROOT_LEVEL 1
/* Per-CPU pagetable pages */
/* xen_pgtable == root of the trie (zeroeth level on 64-bit, first on 32-bit) */
static DEFINE_PER_CPU(lpae_t *, xen_pgtable);
@@ -165,34 +167,49 @@ static inline void check_memory_layout_alignment_constraints(void) {
#endif
}
-void dump_pt_walk(lpae_t *first, paddr_t addr)
+void dump_pt_walk(lpae_t *root, paddr_t addr,
+ unsigned int root_level)
{
- lpae_t *second = NULL, *third = NULL;
+ static const char *level_strs[4] = { "0TH", "1ST", "2ND", "3RD" };
+ const unsigned int offsets[4] = {
+ zeroeth_table_offset(addr),
+ first_table_offset(addr),
+ second_table_offset(addr),
+ third_table_offset(addr)
+ };
+ lpae_t pte, *mappings[4] = { 0, };
+ unsigned int level;
+
+ BUG_ON(!root);
+#ifdef CONFIG_ARM_32
+ BUG_ON(root_level < 1);
+#endif
+ BUG_ON(root_level > 3);
- if ( first_table_offset(addr) >= LPAE_ENTRIES )
- return;
+ mappings[root_level] = root;
+
+ for ( level = root_level; ; level++ )
+ {
+ if ( offsets[level] > LPAE_ENTRIES )
+ break;
- printk("1ST[0x%x] = 0x%"PRIpaddr"\n", first_table_offset(addr),
- first[first_table_offset(addr)].bits);
- if ( !first[first_table_offset(addr)].walk.valid ||
- !first[first_table_offset(addr)].walk.table )
- goto done;
+ pte = mappings[level][offsets[level]];
- second = map_domain_page(first[first_table_offset(addr)].walk.base);
- printk("2ND[0x%x] = 0x%"PRIpaddr"\n", second_table_offset(addr),
- second[second_table_offset(addr)].bits);
- if ( !second[second_table_offset(addr)].walk.valid ||
- !second[second_table_offset(addr)].walk.table )
- goto done;
+ printk("%s[0x%x] = 0x%"PRIpaddr"\n",
+ level_strs[level], offsets[level], pte.bits);
- third = map_domain_page(second[second_table_offset(addr)].walk.base);
- printk("3RD[0x%x] = 0x%"PRIpaddr"\n", third_table_offset(addr),
- third[third_table_offset(addr)].bits);
+ if ( level == 3 || !pte.walk.valid || !pte.walk.table )
+ break;
-done:
- if (third) unmap_domain_page(third);
- if (second) unmap_domain_page(second);
+ mappings[level+1] = map_domain_page(pte.walk.base);
+ }
+ /* mappings[root_level] is provided by the caller so don't unmap that */
+ do
+ {
+ unmap_domain_page(mappings[level]);
+ }
+ while ( level-- > root_level );
}
void dump_hyp_walk(vaddr_t addr)
@@ -208,7 +225,7 @@ void dump_hyp_walk(vaddr_t addr)
BUG_ON( (lpae_t *)(unsigned long)(ttbr - phys_offset) != pgtable );
else
BUG_ON( virt_to_maddr(pgtable) != ttbr );
- dump_pt_walk(pgtable, addr);
+ dump_pt_walk(pgtable, addr, HYP_PT_ROOT_LEVEL);
}
/* Map a 4k page in a fixmap entry */
@@ -11,6 +11,8 @@
#include <asm/hardirq.h>
#include <asm/page.h>
+#define P2M_ROOT_LEVEL 1
+
/* First level P2M is 2 consecutive pages */
#define P2M_ROOT_ORDER 1
#define P2M_ROOT_ENTRIES (LPAE_ENTRIES<<P2M_ROOT_ORDER)
@@ -68,7 +70,7 @@ void dump_p2m_lookup(struct domain *d, paddr_t addr)
p2m->root, page_to_mfn(p2m->root));
first = __map_domain_page(p2m->root);
- dump_pt_walk(first, addr);
+ dump_pt_walk(first, addr, P2M_ROOT_LEVEL);
unmap_domain_page(first);
}
@@ -352,7 +352,7 @@ static inline void flush_xen_data_tlb_range_va(unsigned long va,
void flush_page_to_ram(unsigned long mfn);
/* Print a walk of an arbitrary page table */
-void dump_pt_walk(lpae_t *table, paddr_t addr);
+void dump_pt_walk(lpae_t *table, paddr_t addr, unsigned int root_level);
/* Print a walk of the hypervisor's page tables for a virtual addr. */
extern void dump_hyp_walk(vaddr_t addr);
This allows us to correctly dump 64-bit hypervisor addresses, which use a 4 level table. It also paves the way for boot-time selection of the number of levels to use in the p2m, which is required to support both 40-bit and 48-bit systems. To support multiple levels it is convenient to recast the page table walk as a loop over the levels instead of the current open coding. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> --- v3: - coding style nit - validate input root_level is sensible. v2: - map_domain_page cannot fail, so don't check - don't map an extra page for a valid L3 entry - avoid unmapping levels which we didn't map. - root_level argument is unsigned int. - fold in spurious whitespace change from next patch --- xen/arch/arm/mm.c | 61 ++++++++++++++++++++++++++++---------------- xen/arch/arm/p2m.c | 4 ++- xen/include/asm-arm/page.h | 2 +- 3 files changed, 43 insertions(+), 24 deletions(-)