Message ID | 20190722213958.5761-25-julien.grall@arm.com |
---|---|
State | Superseded |
Headers | show |
Series | xen/arm: Rework head.S to make it more compliant with the Arm Arm | expand |
On Mon, 22 Jul 2019, Julien Grall wrote: > The boot code is currently quite difficult to go through because of the > lack of documentation and a number of indirection to avoid executing > some path in either the boot CPU or secondary CPUs. > > In an attempt to make the boot code easier to follow, each parts of the > boot are now in separate functions. Furthermore, the paths for the boot > CPU and secondary CPUs are now distinct and for now will call each > functions. > > Follow-ups will remove unnecessary calls and do further improvement > (such as adding documentation and reshuffling). > > Note that the switch from using the ID mapping to the runtime mapping > is duplicated for each path. This is because in the future we will need > to stay longer in the ID mapping for the boot CPU. > > Lastly, it is now required to save lr in cpu_init() becauswe the > function will call other functions and therefore clobber lr. > > Signed-off-by: Julien Grall <julien.grall@arm.com> > > --- > Changes in v2: > - Patch added > --- > xen/arch/arm/arm32/head.S | 64 +++++++++++++++++++++++++++++++++++++++-------- > 1 file changed, 53 insertions(+), 11 deletions(-) > > diff --git a/xen/arch/arm/arm32/head.S b/xen/arch/arm/arm32/head.S > index bbcfdcd351..13793e85d8 100644 > --- a/xen/arch/arm/arm32/head.S > +++ b/xen/arch/arm/arm32/head.S > @@ -148,7 +148,19 @@ past_zImage: > > mov r12, #0 /* r12 := is_secondary_cpu */ > > - b common_start > + bl check_cpu_mode > + bl zero_bss > + bl cpu_init > + bl create_page_tables > + bl enable_mmu > + > + /* We are still in the ID map. Jump to the runtime Virtual Address */ The arm64 patch has been switched to use "1:1", it would be good to be consistent. In the commit message too. > + ldr r0, =primary_switched > + mov pc, r0 > +primary_switched: > + bl setup_fixmap > + b launch > +ENDPROC(start) > > GLOBAL(init_secondary) > cpsid aif /* Disable all interrupts */ > @@ -179,8 +191,21 @@ GLOBAL(init_secondary) > print_reg r7 > PRINT(" booting -\r\n") > #endif > - > -common_start: > + bl check_cpu_mode > + bl zero_bss > + bl cpu_init > + bl create_page_tables > + bl enable_mmu > + > + /* We are still in the ID map. Jump to the runtime Virtual Address. */ Same here. > + ldr r0, =secondary_switched > + mov pc, r0 > +secondary_switched: > + bl setup_fixmap > + b launch > +ENDPROC(init_secondary) > + > +check_cpu_mode: > /* Check that this CPU has Hyp mode */ > mrc CP32(r0, ID_PFR1) > and r0, r0, #0xf000 /* Bits 12-15 define virt extensions */ > @@ -202,7 +227,10 @@ common_start: > b fail > > hyp: PRINT("- Xen starting in Hyp mode -\r\n") > + mov pc, lr > +ENDPROC(check_cpu_mode) > > +zero_bss: > /* Zero BSS On the boot CPU to avoid nasty surprises */ > teq r12, #0 > bne skip_bss > @@ -219,8 +247,14 @@ hyp: PRINT("- Xen starting in Hyp mode -\r\n") > blo 1b > > skip_bss: > + mov pc, lr > +ENDPROC(zero_bss) > + > +cpu_init: > PRINT("- Setting up control registers -\r\n") > > + mov r5, lr /* r5 := return address */ Please align the comment with the others in this proc. Other than these minor comments the patch looks fine. Have you had a chance to test it on real hardware? > + > /* Get processor specific proc info into r1 */ > bl __lookup_processor_type > teq r1, #0 > @@ -231,7 +265,6 @@ skip_bss: > PRINT(" -\r\n") > b fail > 1: > - > /* Jump to cpu_init */ > ldr r1, [r1, #PROCINFO_cpu_init] /* r1 := vaddr(init func) */ > adr lr, cpu_init_done /* Save return address */ > @@ -256,6 +289,10 @@ cpu_init_done: > ldr r0, =HSCTLR_SET > mcr CP32(r0, HSCTLR) > > + mov pc, r5 /* Return address is in r5 */ > +ENDPROC(cpu_init) > + > +create_page_tables: > /* > * Rebuild the boot pagetable's first-level entries. The structure > * is described in mm.c. > @@ -359,15 +396,16 @@ cpu_init_done: > /* boot pagetable setup complete */ > > cmp r6, #1 /* Did we manage to create an identity mapping ? */ > - beq 1f > + moveq pc, lr > PRINT("Unable to build boot page tables - Failed to identity map Xen.\r\n") > b fail > virtphys_clash: > /* Identity map clashes with boot_third, which we cannot handle yet */ > PRINT("- Unable to build boot page tables - virt and phys addresses clash. -\r\n") > b fail > +ENDPROC(create_page_tables) > > -1: > +enable_mmu: > PRINT("- Turning on paging -\r\n") > > /* > @@ -377,16 +415,16 @@ virtphys_clash: > mcr CP32(r0, TLBIALLH) /* Flush hypervisor TLBs */ > dsb nsh > > - ldr r1, =paging /* Explicit vaddr, not RIP-relative */ > mrc CP32(r0, HSCTLR) > /* Enable MMU and D-cache */ > orr r0, r0, #(SCTLR_Axx_ELx_M|SCTLR_Axx_ELx_C) > dsb /* Flush PTE writes and finish reads */ > mcr CP32(r0, HSCTLR) /* now paging is enabled */ > isb /* Now, flush the icache */ > - mov pc, r1 /* Get a proper vaddr into PC */ > -paging: > + mov pc, lr > +ENDPROC(enable_mmu) > > +setup_fixmap: > /* > * Now we can install the fixmap and dtb mappings, since we > * don't need the 1:1 map any more > @@ -436,12 +474,15 @@ paging: > mcr CP32(r0, TLBIALLH) /* Flush hypervisor TLB */ > dsb /* Ensure completion of TLB flush */ > isb > + mov pc, lr > +ENDPROC(setup_fixmap) > > +launch: > PRINT("- Ready -\r\n") > > /* The boot CPU should go straight into C now */ > teq r12, #0 > - beq launch > + beq 1f > > /* > * Non-boot CPUs need to move on to the proper pagetables, which were > @@ -460,7 +501,7 @@ paging: > dsb /* Ensure completion of TLB+BP flush */ > isb > > -launch: > +1: > ldr r0, =init_data > add r0, #INITINFO_stack /* Find the boot-time stack */ > ldr sp, [r0] > @@ -471,6 +512,7 @@ launch: > moveq r1, r8 /* - DTB address */ > beq start_xen /* and disappear into the land of C */ > b start_secondary /* (to the appropriate entry point) */ > +ENDPROC(launch) > > /* Fail-stop */ > fail: PRINT("- Boot failed -\r\n") > -- > 2.11.0 >
Hi Stefano, On 7/30/19 9:07 PM, Stefano Stabellini wrote: > On Mon, 22 Jul 2019, Julien Grall wrote: >> The boot code is currently quite difficult to go through because of the >> lack of documentation and a number of indirection to avoid executing >> some path in either the boot CPU or secondary CPUs. >> >> In an attempt to make the boot code easier to follow, each parts of the >> boot are now in separate functions. Furthermore, the paths for the boot >> CPU and secondary CPUs are now distinct and for now will call each >> functions. >> >> Follow-ups will remove unnecessary calls and do further improvement >> (such as adding documentation and reshuffling). >> >> Note that the switch from using the ID mapping to the runtime mapping >> is duplicated for each path. This is because in the future we will need >> to stay longer in the ID mapping for the boot CPU. >> >> Lastly, it is now required to save lr in cpu_init() becauswe the >> function will call other functions and therefore clobber lr. >> >> Signed-off-by: Julien Grall <julien.grall@arm.com> >> >> --- >> Changes in v2: >> - Patch added >> --- >> xen/arch/arm/arm32/head.S | 64 +++++++++++++++++++++++++++++++++++++++-------- >> 1 file changed, 53 insertions(+), 11 deletions(-) >> >> diff --git a/xen/arch/arm/arm32/head.S b/xen/arch/arm/arm32/head.S >> index bbcfdcd351..13793e85d8 100644 >> --- a/xen/arch/arm/arm32/head.S >> +++ b/xen/arch/arm/arm32/head.S >> @@ -148,7 +148,19 @@ past_zImage: >> >> mov r12, #0 /* r12 := is_secondary_cpu */ >> >> - b common_start >> + bl check_cpu_mode >> + bl zero_bss >> + bl cpu_init >> + bl create_page_tables >> + bl enable_mmu >> + >> + /* We are still in the ID map. Jump to the runtime Virtual Address */ > > The arm64 patch has been switched to use "1:1", it would be good to be > consistent. In the commit message too. > > >> + ldr r0, =primary_switched >> + mov pc, r0 >> +primary_switched: >> + bl setup_fixmap >> + b launch >> +ENDPROC(start) >> >> GLOBAL(init_secondary) >> cpsid aif /* Disable all interrupts */ >> @@ -179,8 +191,21 @@ GLOBAL(init_secondary) >> print_reg r7 >> PRINT(" booting -\r\n") >> #endif >> - >> -common_start: >> + bl check_cpu_mode >> + bl zero_bss >> + bl cpu_init >> + bl create_page_tables >> + bl enable_mmu >> + >> + /* We are still in the ID map. Jump to the runtime Virtual Address. */ > > Same here. > > >> + ldr r0, =secondary_switched >> + mov pc, r0 >> +secondary_switched: >> + bl setup_fixmap >> + b launch >> +ENDPROC(init_secondary) >> + >> +check_cpu_mode: >> /* Check that this CPU has Hyp mode */ >> mrc CP32(r0, ID_PFR1) >> and r0, r0, #0xf000 /* Bits 12-15 define virt extensions */ >> @@ -202,7 +227,10 @@ common_start: >> b fail >> >> hyp: PRINT("- Xen starting in Hyp mode -\r\n") >> + mov pc, lr >> +ENDPROC(check_cpu_mode) >> >> +zero_bss: >> /* Zero BSS On the boot CPU to avoid nasty surprises */ >> teq r12, #0 >> bne skip_bss >> @@ -219,8 +247,14 @@ hyp: PRINT("- Xen starting in Hyp mode -\r\n") >> blo 1b >> >> skip_bss: >> + mov pc, lr >> +ENDPROC(zero_bss) >> + >> +cpu_init: >> PRINT("- Setting up control registers -\r\n") >> >> + mov r5, lr /* r5 := return address */ > > Please align the comment with the others in this proc. It looks like the line was containing some hard tab. I have replaced them with soft tab. > > Other than these minor comments the patch looks fine. Have you had a > chance to test it on real hardware? I actually didn't test the SMP path on the model until I sent it because I had some issues getting secondary CPU up. I wanted to get some feedback first. Hence the "lightly tested" in the cover letter :). This series fully boot on the model and I still need to test on real hardware. I have a TC2 (Cortex-A15 + Cortex-A7) on my desk, so I will give it a try there first. Anyway, I probably need to resend this patch to replace all the "mov pc, lr" with "bx lr" Cheers,
diff --git a/xen/arch/arm/arm32/head.S b/xen/arch/arm/arm32/head.S index bbcfdcd351..13793e85d8 100644 --- a/xen/arch/arm/arm32/head.S +++ b/xen/arch/arm/arm32/head.S @@ -148,7 +148,19 @@ past_zImage: mov r12, #0 /* r12 := is_secondary_cpu */ - b common_start + bl check_cpu_mode + bl zero_bss + bl cpu_init + bl create_page_tables + bl enable_mmu + + /* We are still in the ID map. Jump to the runtime Virtual Address */ + ldr r0, =primary_switched + mov pc, r0 +primary_switched: + bl setup_fixmap + b launch +ENDPROC(start) GLOBAL(init_secondary) cpsid aif /* Disable all interrupts */ @@ -179,8 +191,21 @@ GLOBAL(init_secondary) print_reg r7 PRINT(" booting -\r\n") #endif - -common_start: + bl check_cpu_mode + bl zero_bss + bl cpu_init + bl create_page_tables + bl enable_mmu + + /* We are still in the ID map. Jump to the runtime Virtual Address. */ + ldr r0, =secondary_switched + mov pc, r0 +secondary_switched: + bl setup_fixmap + b launch +ENDPROC(init_secondary) + +check_cpu_mode: /* Check that this CPU has Hyp mode */ mrc CP32(r0, ID_PFR1) and r0, r0, #0xf000 /* Bits 12-15 define virt extensions */ @@ -202,7 +227,10 @@ common_start: b fail hyp: PRINT("- Xen starting in Hyp mode -\r\n") + mov pc, lr +ENDPROC(check_cpu_mode) +zero_bss: /* Zero BSS On the boot CPU to avoid nasty surprises */ teq r12, #0 bne skip_bss @@ -219,8 +247,14 @@ hyp: PRINT("- Xen starting in Hyp mode -\r\n") blo 1b skip_bss: + mov pc, lr +ENDPROC(zero_bss) + +cpu_init: PRINT("- Setting up control registers -\r\n") + mov r5, lr /* r5 := return address */ + /* Get processor specific proc info into r1 */ bl __lookup_processor_type teq r1, #0 @@ -231,7 +265,6 @@ skip_bss: PRINT(" -\r\n") b fail 1: - /* Jump to cpu_init */ ldr r1, [r1, #PROCINFO_cpu_init] /* r1 := vaddr(init func) */ adr lr, cpu_init_done /* Save return address */ @@ -256,6 +289,10 @@ cpu_init_done: ldr r0, =HSCTLR_SET mcr CP32(r0, HSCTLR) + mov pc, r5 /* Return address is in r5 */ +ENDPROC(cpu_init) + +create_page_tables: /* * Rebuild the boot pagetable's first-level entries. The structure * is described in mm.c. @@ -359,15 +396,16 @@ cpu_init_done: /* boot pagetable setup complete */ cmp r6, #1 /* Did we manage to create an identity mapping ? */ - beq 1f + moveq pc, lr PRINT("Unable to build boot page tables - Failed to identity map Xen.\r\n") b fail virtphys_clash: /* Identity map clashes with boot_third, which we cannot handle yet */ PRINT("- Unable to build boot page tables - virt and phys addresses clash. -\r\n") b fail +ENDPROC(create_page_tables) -1: +enable_mmu: PRINT("- Turning on paging -\r\n") /* @@ -377,16 +415,16 @@ virtphys_clash: mcr CP32(r0, TLBIALLH) /* Flush hypervisor TLBs */ dsb nsh - ldr r1, =paging /* Explicit vaddr, not RIP-relative */ mrc CP32(r0, HSCTLR) /* Enable MMU and D-cache */ orr r0, r0, #(SCTLR_Axx_ELx_M|SCTLR_Axx_ELx_C) dsb /* Flush PTE writes and finish reads */ mcr CP32(r0, HSCTLR) /* now paging is enabled */ isb /* Now, flush the icache */ - mov pc, r1 /* Get a proper vaddr into PC */ -paging: + mov pc, lr +ENDPROC(enable_mmu) +setup_fixmap: /* * Now we can install the fixmap and dtb mappings, since we * don't need the 1:1 map any more @@ -436,12 +474,15 @@ paging: mcr CP32(r0, TLBIALLH) /* Flush hypervisor TLB */ dsb /* Ensure completion of TLB flush */ isb + mov pc, lr +ENDPROC(setup_fixmap) +launch: PRINT("- Ready -\r\n") /* The boot CPU should go straight into C now */ teq r12, #0 - beq launch + beq 1f /* * Non-boot CPUs need to move on to the proper pagetables, which were @@ -460,7 +501,7 @@ paging: dsb /* Ensure completion of TLB+BP flush */ isb -launch: +1: ldr r0, =init_data add r0, #INITINFO_stack /* Find the boot-time stack */ ldr sp, [r0] @@ -471,6 +512,7 @@ launch: moveq r1, r8 /* - DTB address */ beq start_xen /* and disappear into the land of C */ b start_secondary /* (to the appropriate entry point) */ +ENDPROC(launch) /* Fail-stop */ fail: PRINT("- Boot failed -\r\n")
The boot code is currently quite difficult to go through because of the lack of documentation and a number of indirection to avoid executing some path in either the boot CPU or secondary CPUs. In an attempt to make the boot code easier to follow, each parts of the boot are now in separate functions. Furthermore, the paths for the boot CPU and secondary CPUs are now distinct and for now will call each functions. Follow-ups will remove unnecessary calls and do further improvement (such as adding documentation and reshuffling). Note that the switch from using the ID mapping to the runtime mapping is duplicated for each path. This is because in the future we will need to stay longer in the ID mapping for the boot CPU. Lastly, it is now required to save lr in cpu_init() becauswe the function will call other functions and therefore clobber lr. Signed-off-by: Julien Grall <julien.grall@arm.com> --- Changes in v2: - Patch added --- xen/arch/arm/arm32/head.S | 64 +++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 53 insertions(+), 11 deletions(-)