@@ -182,8 +182,9 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
xorl %r15d, %r15d
/* Derive the runtime physical address of init_top_pgt[] */
- movq phys_base(%rip), %rax
- addq $(init_top_pgt - __START_KERNEL_map), %rax
+ leaq init_top_pgt(%rip), %rax
+ subq $__START_KERNEL_map, %rax
+ addq phys_base(%rip), %rax
/*
* Retrieve the modifier (SME encryption mask if SME is active) to be
@@ -314,7 +315,8 @@ SYM_INNER_LABEL(common_startup_64, SYM_L_LOCAL)
.Lsetup_cpu:
/* Get the per cpu offset for the given CPU# which is in ECX */
- movq __per_cpu_offset(,%rcx,8), %rdx
+ leaq __per_cpu_offset(%rip), %rdx
+ movq (%rdx,%rcx,8), %rdx
#else
xorl %edx, %edx /* zero-extended to clear all of RDX */
#endif /* CONFIG_SMP */
@@ -325,7 +327,8 @@ SYM_INNER_LABEL(common_startup_64, SYM_L_LOCAL)
*
* RDX contains the per-cpu offset
*/
- movq pcpu_hot + X86_current_task(%rdx), %rax
+ leaq pcpu_hot + X86_current_task(%rip), %rax
+ movq (%rax,%rdx), %rax
movq TASK_threadsp(%rax), %rsp
/*
@@ -346,7 +349,8 @@ SYM_INNER_LABEL(common_startup_64, SYM_L_LOCAL)
*/
subq $16, %rsp
movw $(GDT_SIZE-1), (%rsp)
- leaq gdt_page(%rdx), %rax
+ leaq gdt_page(%rip), %rax
+ addq %rdx, %rax
movq %rax, 2(%rsp)
lgdt (%rsp)
addq $16, %rsp
@@ -106,6 +106,9 @@ SYM_CODE_START_NOALIGN(relocate_kernel)
/* setup a new stack at the end of the physical control page */
lea PAGE_SIZE(%r8), %rsp
+ /* take the virtual address of virtual_mapped() before jumping */
+ leaq virtual_mapped(%rip), %r14
+
/* jump to identity mapped page */
addq $(identity_mapped - relocate_kernel), %r8
pushq %r8
@@ -225,8 +228,7 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
movq %rax, %cr3
lea PAGE_SIZE(%r8), %rsp
call swap_pages
- movq $virtual_mapped, %rax
- pushq %rax
+ pushq %r14
ANNOTATE_UNRET_SAFE
ret
int3