Message ID | 20190917181244.30027-7-julien.grall@arm.com |
---|---|
State | New |
Headers | show |
Series | xen/arm: Rework head.S to make it more compliant with the Arm Arm | expand |
On Tue, 17 Sep 2019, Julien Grall wrote: > At the moment the function create_page_tables() will use 1GB/2MB > mapping for the identity mapping. As we don't know what is present > before and after Xen in memory, we may end up to map > device/reserved-memory with cacheable memory. This may result to > mismatched attributes as other users may access the same region > differently. > > To prevent any issues, we should only map the strict minimum in the > 1:1 mapping. A check in xen.lds.S already guarantees anything > necessary for turning on the MMU fits in a page (at the moment 4K). > > As only one page will be mapped for the 1:1 mapping, it is necessary > to pre-allocate a page for the 3rd level table. > > Signed-off-by: Julien Grall <julien.grall@arm.com> Reviewed-by: Stefano Stabellini <sstabellini@kernel.org> > --- > Changes in v4: > - Don't pre-link the page-tables for the 1:1 mapping. Instead > only link what's necessary. > > Changes in v3: > - Patch added > --- > xen/arch/arm/arm64/head.S | 166 ++++++++++++++++++---------------------------- > xen/arch/arm/mm.c | 2 + > 2 files changed, 68 insertions(+), 100 deletions(-) > > diff --git a/xen/arch/arm/arm64/head.S b/xen/arch/arm/arm64/head.S > index 2cce342217..e5015f93a2 100644 > --- a/xen/arch/arm/arm64/head.S > +++ b/xen/arch/arm/arm64/head.S > @@ -566,100 +566,17 @@ ENDPROC(cpu_init) > * x19: paddr(start) > * x20: phys offset > * > - * Clobbers x0 - x4, x25 > - * > - * Register usage within this function: > - * x25: Identity map in place > + * Clobbers x0 - x4 > */ > create_page_tables: > - /* > - * If Xen is loaded at exactly XEN_VIRT_START then we don't > - * need an additional 1:1 mapping, the virtual mapping will > - * suffice. > - */ > - cmp x19, #XEN_VIRT_START > - cset x25, eq /* x25 := identity map in place, or not */ > - > - load_paddr x4, boot_pgtable > - > - /* Setup boot_pgtable: */ > - load_paddr x1, boot_first > - > - /* ... map boot_first in boot_pgtable[0] */ > - mov x3, #PT_PT /* x2 := table map of boot_first */ > - orr x2, x1, x3 /* + rights for linear PT */ > - str x2, [x4, #0] /* Map it in slot 0 */ > - > - /* ... map of paddr(start) in boot_pgtable+boot_first_id */ > - lsr x1, x19, #ZEROETH_SHIFT/* Offset of base paddr in boot_pgtable */ > - cbz x1, 1f /* It's in slot 0, map in boot_first > - * or boot_second later on */ > + /* Prepare the page-tables for mapping Xen */ > + ldr x0, =XEN_VIRT_START > + create_table_entry boot_pgtable, boot_first, x0, ZEROETH_SHIFT, x1, x2, x3 > + create_table_entry boot_first, boot_second, x0, FIRST_SHIFT, x1, x2, x3 > + create_table_entry boot_second, boot_third, x0, SECOND_SHIFT, x1, x2, x3 > > - /* > - * Level zero does not support superpage mappings, so we have > - * to use an extra first level page in which we create a 1GB mapping. > - */ > - load_paddr x2, boot_first_id > - > - mov x3, #PT_PT /* x2 := table map of boot_first_id */ > - orr x2, x2, x3 /* + rights for linear PT */ > - str x2, [x4, x1, lsl #3] > - > - load_paddr x4, boot_first_id > - > - lsr x1, x19, #FIRST_SHIFT /* x1 := Offset of base paddr in boot_first_id */ > - lsl x2, x1, #FIRST_SHIFT /* x2 := Base address for 1GB mapping */ > - mov x3, #PT_MEM /* x2 := Section map */ > - orr x2, x2, x3 > - and x1, x1, #LPAE_ENTRY_MASK /* x1 := Slot offset */ > - str x2, [x4, x1, lsl #3] /* Mapping of paddr(start) */ > - mov x25, #1 /* x25 := identity map now in place */ > - > -1: /* Setup boot_first: */ > - load_paddr x4, boot_first /* Next level into boot_first */ > - > - /* ... map boot_second in boot_first[0] */ > - load_paddr x1, boot_second > - mov x3, #PT_PT /* x2 := table map of boot_second */ > - orr x2, x1, x3 /* + rights for linear PT */ > - str x2, [x4, #0] /* Map it in slot 0 */ > - > - /* ... map of paddr(start) in boot_first */ > - cbnz x25, 1f /* x25 is set if already created */ > - lsr x2, x19, #FIRST_SHIFT /* x2 := Offset of base paddr in boot_first */ > - and x1, x2, #LPAE_ENTRY_MASK /* x1 := Slot to use */ > - cbz x1, 1f /* It's in slot 0, map in boot_second */ > - > - lsl x2, x2, #FIRST_SHIFT /* Base address for 1GB mapping */ > - mov x3, #PT_MEM /* x2 := Section map */ > - orr x2, x2, x3 > - str x2, [x4, x1, lsl #3] /* Create mapping of paddr(start)*/ > - mov x25, #1 /* x25 := identity map now in place */ > - > -1: /* Setup boot_second: */ > - load_paddr x4, boot_second > - > - /* ... map boot_third in boot_second[1] */ > - load_paddr x1, boot_third > - mov x3, #PT_PT /* x2 := table map of boot_third */ > - orr x2, x1, x3 /* + rights for linear PT */ > - str x2, [x4, #8] /* Map it in slot 1 */ > - > - /* ... map of paddr(start) in boot_second */ > - cbnz x25, 1f /* x25 is set if already created */ > - lsr x2, x19, #SECOND_SHIFT /* x2 := Offset of base paddr in boot_second */ > - and x1, x2, #LPAE_ENTRY_MASK /* x1 := Slot to use */ > - cmp x1, #1 > - b.eq virtphys_clash /* It's in slot 1, which we cannot handle */ > - > - lsl x2, x2, #SECOND_SHIFT /* Base address for 2MB mapping */ > - mov x3, #PT_MEM /* x2 := Section map */ > - orr x2, x2, x3 > - str x2, [x4, x1, lsl #3] /* Create mapping of paddr(start)*/ > - mov x25, #1 /* x25 := identity map now in place */ > - > -1: /* Setup boot_third: */ > - load_paddr x4, boot_third > + /* Map Xen */ > + adr_l x4, boot_third > > lsr x2, x19, #THIRD_SHIFT /* Base address for 4K mapping */ > lsl x2, x2, #THIRD_SHIFT > @@ -674,21 +591,70 @@ create_page_tables: > cmp x1, #(LPAE_ENTRIES<<3) /* 512 entries per page */ > b.lt 1b > > - /* Defer fixmap and dtb mapping until after paging enabled, to > - * avoid them clashing with the 1:1 mapping. */ > + /* > + * If Xen is loaded at exactly XEN_VIRT_START then we don't > + * need an additional 1:1 mapping, the virtual mapping will > + * suffice. > + */ > + cmp x19, #XEN_VIRT_START > + bne 1f > + ret > +1: > + /* > + * Setup the 1:1 mapping so we can turn the MMU on. Note that > + * only the first page of Xen will be part of the 1:1 mapping. > + */ > + > + /* > + * Find the zeroeth slot used. If the slot is not > + * XEN_ZEROETH_SLOT, then the 1:1 mapping will use its own set of > + * page-tables from the first level. > + */ > + lsr x0, x19, #ZEROETH_SHIFT /* x0 := zeroeth slot */ > + cmp x0, #XEN_ZEROETH_SLOT > + beq 1f > + create_table_entry boot_pgtable, boot_first_id, x19, ZEROETH_SHIFT, x0, x1, x2 > + b link_from_first_id > + > +1: > + /* > + * Find the first slot used. If the slot is not XEN_FIRST_SLOT, > + * then the 1:1 mapping will use its own set of page-tables from > + * the second level. > + */ > + lsr x0, x19, #FIRST_SHIFT > + and x0, x0, #LPAE_ENTRY_MASK /* x0 := first slot */ > + cmp x0, #XEN_FIRST_SLOT > + beq 1f > + create_table_entry boot_first, boot_second_id, x19, FIRST_SHIFT, x0, x1, x2 > + b link_from_second_id > > - /* boot pagetable setup complete */ > +1: > + /* > + * Find the second slot used. If the slot is XEN_SECOND_SLOT, then the > + * 1:1 mapping will use its own set of page-tables from the > + * third level. For slot XEN_SECOND_SLOT, Xen is not yet able to handle > + * it. > + */ > + lsr x0, x19, #SECOND_SHIFT > + and x0, x0, #LPAE_ENTRY_MASK /* x0 := first slot */ > + cmp x0, #XEN_SECOND_SLOT > + beq virtphys_clash > + create_table_entry boot_second, boot_third_id, x19, SECOND_SHIFT, x0, x1, x2 > + b link_from_third_id > + > +link_from_first_id: > + create_table_entry boot_first_id, boot_second_id, x19, FIRST_SHIFT, x0, x1, x2 > +link_from_second_id: > + create_table_entry boot_second_id, boot_third_id, x19, SECOND_SHIFT, x0, x1, x2 > +link_from_third_id: > + create_mapping_entry boot_third_id, x19, x19, x0, x1, x2 > + ret > > - cbnz x25, 1f /* Did we manage to create an identity mapping ? */ > - PRINT("Unable to build boot page tables - Failed to identity map Xen.\r\n") > - b fail > virtphys_clash: > /* Identity map clashes with boot_third, which we cannot handle yet */ > PRINT("- Unable to build boot page tables - virt and phys addresses clash. -\r\n") > b fail > - > -1: > - ret > ENDPROC(create_page_tables) > > /* > diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c > index 65552da4ba..72ffea7472 100644 > --- a/xen/arch/arm/mm.c > +++ b/xen/arch/arm/mm.c > @@ -105,6 +105,8 @@ DEFINE_BOOT_PAGE_TABLE(boot_pgtable); > #ifdef CONFIG_ARM_64 > DEFINE_BOOT_PAGE_TABLE(boot_first); > DEFINE_BOOT_PAGE_TABLE(boot_first_id); > +DEFINE_BOOT_PAGE_TABLE(boot_second_id); > +DEFINE_BOOT_PAGE_TABLE(boot_third_id); > #endif > DEFINE_BOOT_PAGE_TABLE(boot_second); > DEFINE_BOOT_PAGE_TABLE(boot_third); > -- > 2.11.0 >
diff --git a/xen/arch/arm/arm64/head.S b/xen/arch/arm/arm64/head.S index 2cce342217..e5015f93a2 100644 --- a/xen/arch/arm/arm64/head.S +++ b/xen/arch/arm/arm64/head.S @@ -566,100 +566,17 @@ ENDPROC(cpu_init) * x19: paddr(start) * x20: phys offset * - * Clobbers x0 - x4, x25 - * - * Register usage within this function: - * x25: Identity map in place + * Clobbers x0 - x4 */ create_page_tables: - /* - * If Xen is loaded at exactly XEN_VIRT_START then we don't - * need an additional 1:1 mapping, the virtual mapping will - * suffice. - */ - cmp x19, #XEN_VIRT_START - cset x25, eq /* x25 := identity map in place, or not */ - - load_paddr x4, boot_pgtable - - /* Setup boot_pgtable: */ - load_paddr x1, boot_first - - /* ... map boot_first in boot_pgtable[0] */ - mov x3, #PT_PT /* x2 := table map of boot_first */ - orr x2, x1, x3 /* + rights for linear PT */ - str x2, [x4, #0] /* Map it in slot 0 */ - - /* ... map of paddr(start) in boot_pgtable+boot_first_id */ - lsr x1, x19, #ZEROETH_SHIFT/* Offset of base paddr in boot_pgtable */ - cbz x1, 1f /* It's in slot 0, map in boot_first - * or boot_second later on */ + /* Prepare the page-tables for mapping Xen */ + ldr x0, =XEN_VIRT_START + create_table_entry boot_pgtable, boot_first, x0, ZEROETH_SHIFT, x1, x2, x3 + create_table_entry boot_first, boot_second, x0, FIRST_SHIFT, x1, x2, x3 + create_table_entry boot_second, boot_third, x0, SECOND_SHIFT, x1, x2, x3 - /* - * Level zero does not support superpage mappings, so we have - * to use an extra first level page in which we create a 1GB mapping. - */ - load_paddr x2, boot_first_id - - mov x3, #PT_PT /* x2 := table map of boot_first_id */ - orr x2, x2, x3 /* + rights for linear PT */ - str x2, [x4, x1, lsl #3] - - load_paddr x4, boot_first_id - - lsr x1, x19, #FIRST_SHIFT /* x1 := Offset of base paddr in boot_first_id */ - lsl x2, x1, #FIRST_SHIFT /* x2 := Base address for 1GB mapping */ - mov x3, #PT_MEM /* x2 := Section map */ - orr x2, x2, x3 - and x1, x1, #LPAE_ENTRY_MASK /* x1 := Slot offset */ - str x2, [x4, x1, lsl #3] /* Mapping of paddr(start) */ - mov x25, #1 /* x25 := identity map now in place */ - -1: /* Setup boot_first: */ - load_paddr x4, boot_first /* Next level into boot_first */ - - /* ... map boot_second in boot_first[0] */ - load_paddr x1, boot_second - mov x3, #PT_PT /* x2 := table map of boot_second */ - orr x2, x1, x3 /* + rights for linear PT */ - str x2, [x4, #0] /* Map it in slot 0 */ - - /* ... map of paddr(start) in boot_first */ - cbnz x25, 1f /* x25 is set if already created */ - lsr x2, x19, #FIRST_SHIFT /* x2 := Offset of base paddr in boot_first */ - and x1, x2, #LPAE_ENTRY_MASK /* x1 := Slot to use */ - cbz x1, 1f /* It's in slot 0, map in boot_second */ - - lsl x2, x2, #FIRST_SHIFT /* Base address for 1GB mapping */ - mov x3, #PT_MEM /* x2 := Section map */ - orr x2, x2, x3 - str x2, [x4, x1, lsl #3] /* Create mapping of paddr(start)*/ - mov x25, #1 /* x25 := identity map now in place */ - -1: /* Setup boot_second: */ - load_paddr x4, boot_second - - /* ... map boot_third in boot_second[1] */ - load_paddr x1, boot_third - mov x3, #PT_PT /* x2 := table map of boot_third */ - orr x2, x1, x3 /* + rights for linear PT */ - str x2, [x4, #8] /* Map it in slot 1 */ - - /* ... map of paddr(start) in boot_second */ - cbnz x25, 1f /* x25 is set if already created */ - lsr x2, x19, #SECOND_SHIFT /* x2 := Offset of base paddr in boot_second */ - and x1, x2, #LPAE_ENTRY_MASK /* x1 := Slot to use */ - cmp x1, #1 - b.eq virtphys_clash /* It's in slot 1, which we cannot handle */ - - lsl x2, x2, #SECOND_SHIFT /* Base address for 2MB mapping */ - mov x3, #PT_MEM /* x2 := Section map */ - orr x2, x2, x3 - str x2, [x4, x1, lsl #3] /* Create mapping of paddr(start)*/ - mov x25, #1 /* x25 := identity map now in place */ - -1: /* Setup boot_third: */ - load_paddr x4, boot_third + /* Map Xen */ + adr_l x4, boot_third lsr x2, x19, #THIRD_SHIFT /* Base address for 4K mapping */ lsl x2, x2, #THIRD_SHIFT @@ -674,21 +591,70 @@ create_page_tables: cmp x1, #(LPAE_ENTRIES<<3) /* 512 entries per page */ b.lt 1b - /* Defer fixmap and dtb mapping until after paging enabled, to - * avoid them clashing with the 1:1 mapping. */ + /* + * If Xen is loaded at exactly XEN_VIRT_START then we don't + * need an additional 1:1 mapping, the virtual mapping will + * suffice. + */ + cmp x19, #XEN_VIRT_START + bne 1f + ret +1: + /* + * Setup the 1:1 mapping so we can turn the MMU on. Note that + * only the first page of Xen will be part of the 1:1 mapping. + */ + + /* + * Find the zeroeth slot used. If the slot is not + * XEN_ZEROETH_SLOT, then the 1:1 mapping will use its own set of + * page-tables from the first level. + */ + lsr x0, x19, #ZEROETH_SHIFT /* x0 := zeroeth slot */ + cmp x0, #XEN_ZEROETH_SLOT + beq 1f + create_table_entry boot_pgtable, boot_first_id, x19, ZEROETH_SHIFT, x0, x1, x2 + b link_from_first_id + +1: + /* + * Find the first slot used. If the slot is not XEN_FIRST_SLOT, + * then the 1:1 mapping will use its own set of page-tables from + * the second level. + */ + lsr x0, x19, #FIRST_SHIFT + and x0, x0, #LPAE_ENTRY_MASK /* x0 := first slot */ + cmp x0, #XEN_FIRST_SLOT + beq 1f + create_table_entry boot_first, boot_second_id, x19, FIRST_SHIFT, x0, x1, x2 + b link_from_second_id - /* boot pagetable setup complete */ +1: + /* + * Find the second slot used. If the slot is XEN_SECOND_SLOT, then the + * 1:1 mapping will use its own set of page-tables from the + * third level. For slot XEN_SECOND_SLOT, Xen is not yet able to handle + * it. + */ + lsr x0, x19, #SECOND_SHIFT + and x0, x0, #LPAE_ENTRY_MASK /* x0 := first slot */ + cmp x0, #XEN_SECOND_SLOT + beq virtphys_clash + create_table_entry boot_second, boot_third_id, x19, SECOND_SHIFT, x0, x1, x2 + b link_from_third_id + +link_from_first_id: + create_table_entry boot_first_id, boot_second_id, x19, FIRST_SHIFT, x0, x1, x2 +link_from_second_id: + create_table_entry boot_second_id, boot_third_id, x19, SECOND_SHIFT, x0, x1, x2 +link_from_third_id: + create_mapping_entry boot_third_id, x19, x19, x0, x1, x2 + ret - cbnz x25, 1f /* Did we manage to create an identity mapping ? */ - PRINT("Unable to build boot page tables - Failed to identity map Xen.\r\n") - b fail virtphys_clash: /* Identity map clashes with boot_third, which we cannot handle yet */ PRINT("- Unable to build boot page tables - virt and phys addresses clash. -\r\n") b fail - -1: - ret ENDPROC(create_page_tables) /* diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c index 65552da4ba..72ffea7472 100644 --- a/xen/arch/arm/mm.c +++ b/xen/arch/arm/mm.c @@ -105,6 +105,8 @@ DEFINE_BOOT_PAGE_TABLE(boot_pgtable); #ifdef CONFIG_ARM_64 DEFINE_BOOT_PAGE_TABLE(boot_first); DEFINE_BOOT_PAGE_TABLE(boot_first_id); +DEFINE_BOOT_PAGE_TABLE(boot_second_id); +DEFINE_BOOT_PAGE_TABLE(boot_third_id); #endif DEFINE_BOOT_PAGE_TABLE(boot_second); DEFINE_BOOT_PAGE_TABLE(boot_third);
At the moment the function create_page_tables() will use 1GB/2MB mapping for the identity mapping. As we don't know what is present before and after Xen in memory, we may end up to map device/reserved-memory with cacheable memory. This may result to mismatched attributes as other users may access the same region differently. To prevent any issues, we should only map the strict minimum in the 1:1 mapping. A check in xen.lds.S already guarantees anything necessary for turning on the MMU fits in a page (at the moment 4K). As only one page will be mapped for the 1:1 mapping, it is necessary to pre-allocate a page for the 3rd level table. Signed-off-by: Julien Grall <julien.grall@arm.com> --- Changes in v4: - Don't pre-link the page-tables for the 1:1 mapping. Instead only link what's necessary. Changes in v3: - Patch added --- xen/arch/arm/arm64/head.S | 166 ++++++++++++++++++---------------------------- xen/arch/arm/mm.c | 2 + 2 files changed, 68 insertions(+), 100 deletions(-)