@@ -15,15 +15,18 @@
*/
#include <linux/linkage.h>
-#include <asm/asm-offsets.h>
#include <asm/msr.h>
#include <asm/page_types.h>
+#include <asm/pgtable_types.h>
#include <asm/processor-flags.h>
#include <asm/segment.h>
-#include <asm/setup.h>
.code64
.text
+ .balign 8
+SYM_DATA_LOCAL(gdt, .quad 0x0, 0x0, 0xaf9a000000ffff) /* __KERNEL_CS */
+ .set gdt_size, . - gdt
+
/*
* When booting in 64-bit mode on 32-bit EFI firmware, startup_64_mixed_mode()
* is the first thing that runs after switching to long mode. Depending on
@@ -35,30 +38,34 @@
* pointer is used to disambiguate.
*
* +--------------+
- * +------------------+ +------------+ +------>| efi_pe_entry |
- * | efi32_pe_entry |---->| | | +-----------+--+
- * +------------------+ | | +------+----------------+ |
- * | startup_32 |---->| startup_64_mixed_mode | |
- * +------------------+ | | +------+----------------+ |
- * | efi32_stub_entry |---->| | | |
- * +------------------+ +------------+ | |
+ * +------------------+ +-------------+ +------>| efi_pe_entry |
+ * | efi32_pe_entry |--->| | | +-----------+--+
+ * +------------------+ | | +------+----------------+ |
+ * | efi32_entry |---->| startup_64_mixed_mode | |
+ * +------------------+ | | +------+----------------+ |
+ * | efi32_stub_entry |--->| | | |
+ * +------------------+ +-------------+ | |
* V |
- * +------------+ +----------------+ |
- * | startup_64 |<----| efi_stub_entry |<--------+
- * +------------+ +----------------+
+ * +-------------+ +----------------+ |
+ * | vmlinux |<----| efi_stub_entry |<--------+
+ * +-------------+ +----------------+
*/
-SYM_FUNC_START(startup_64_mixed_mode)
- lea efi32_boot_args(%rip), %rdx
- mov 0(%rdx), %edi
- mov 4(%rdx), %esi
+SYM_FUNC_START_LOCAL_NOALIGN(startup_64_mixed_mode)
+ xorl %eax, %eax
+ movl %eax, %ds
+ movl %eax, %es
+ movl %eax, %ss
+ movl %eax, %fs
+ movl %eax, %gs
- /* Switch to the firmware's stack */
- movl efi32_boot_sp(%rip), %esp
- andl $~7, %esp
+ movl 0(%rsp), %ecx // MS calling convention
+ movl 4(%rsp), %edx
#ifdef CONFIG_EFI_HANDOVER_PROTOCOL
- mov 8(%rdx), %edx // saved bootparams pointer
- test %edx, %edx
+ test %edi, %edi // struct boot_params provided?
+ movl %edx, %esi // SysV calling convention
+ cmovnzl %edi, %edx
+ movl %ecx, %edi
jnz efi_stub_entry
#endif
/*
@@ -69,8 +76,6 @@ SYM_FUNC_START(startup_64_mixed_mode)
* the correct stack alignment for entry.
*/
sub $40, %rsp
- mov %rdi, %rcx // MS calling convention
- mov %rsi, %rdx
jmp efi_pe_entry
SYM_FUNC_END(startup_64_mixed_mode)
@@ -151,7 +156,6 @@ SYM_FUNC_END(__efi64_thunk)
SYM_FUNC_START(efi32_stub_entry)
call 1f
1: popl %ecx
- leal (efi32_boot_args - 1b)(%ecx), %ebx
/* Clear BSS */
xorl %eax, %eax
@@ -163,10 +167,7 @@ SYM_FUNC_START(efi32_stub_entry)
rep stosl
add $0x4, %esp /* Discard return address */
- popl %ecx
- popl %edx
- popl %esi
- movl %esi, 8(%ebx)
+ movl 8(%esp), %edi /* struct boot_params pointer */
jmp efi32_entry
SYM_FUNC_END(efi32_stub_entry)
#endif
@@ -241,8 +242,9 @@ SYM_FUNC_END(efi_enter32)
/*
* This is the common EFI stub entry point for mixed mode.
*
- * Arguments: %ecx image handle
- * %edx EFI system table pointer
+ * Arguments: 0(%esp) image handle
+ * 4(%esp) EFI system table pointer
+ * %edi struct boot_params pointer (or NULL)
*
* Since this is the point of no return for ordinary execution, no registers
* are considered live except for the function parameters. [Note that the EFI
@@ -261,31 +263,58 @@ SYM_FUNC_START_LOCAL(efi32_entry)
/* Store firmware IDT descriptor */
sidtl (efi32_boot_idt - 1b)(%ebx)
- /* Store firmware stack pointer */
- movl %esp, (efi32_boot_sp - 1b)(%ebx)
-
- /* Store boot arguments */
- leal (efi32_boot_args - 1b)(%ebx), %ebx
- movl %ecx, 0(%ebx)
- movl %edx, 4(%ebx)
- movb $0x0, 12(%ebx) // efi_is64
-
- /*
- * Allocate some memory for a temporary struct boot_params, which only
- * needs the minimal pieces that startup_32() relies on.
- */
- subl $PARAM_SIZE, %esp
- movl %esp, %esi
- movl $PAGE_SIZE, BP_kernel_alignment(%esi)
- movl $_end - 1b, BP_init_size(%esi)
- subl $startup_32 - 1b, BP_init_size(%esi)
+ /* Record mixed mode entry */
+ movb $0x0, (efi_is64 - 1b)(%ebx)
/* Disable paging */
movl %cr0, %eax
btrl $X86_CR0_PG_BIT, %eax
movl %eax, %cr0
- jmp startup_32
+ /* Set up 1:1 mapping */
+ leal (pte - 1b)(%ebx), %eax
+ movl $_PAGE_PRESENT | _PAGE_RW | _PAGE_PSE, %ecx
+ leal (_PAGE_PRESENT | _PAGE_RW)(%eax), %edx
+2: movl %ecx, (%eax)
+ addl $8, %eax
+ addl $PMD_SIZE, %ecx
+ jnc 2b
+
+ xor %ecx, %ecx
+ movl $PAGE_SIZE, %esi
+3: movl %edx, (%eax,%ecx,8)
+ addl %esi, %edx
+ inc %ecx
+ cmp $4, %ecx
+ jl 3b
+
+ addl %esi, %eax
+ movl %edx, (%eax)
+ movl %eax, %cr3
+
+ movl %cr4, %eax
+ orl $X86_CR4_PAE, %eax
+ movl %eax, %cr4
+
+ movl $MSR_EFER, %ecx
+ rdmsr
+ btsl $_EFER_LME, %eax
+ wrmsr
+
+ leal (gdt - 1b)(%ebx), %ecx
+ pushl %ecx
+ pushw $gdt_size - 1
+ lgdtl (%esp)
+ lea 6(%esp), %esp
+
+ /* Enable paging and jump to long mode */
+ leal (startup_64_mixed_mode - 1b)(%ebx), %ecx
+ pushl $__KERNEL_CS
+ pushl %ecx
+ movl %cr0, %eax
+ btsl $X86_CR0_PG_BIT, %eax
+ movl %eax, %cr0
+ lret
SYM_FUNC_END(efi32_entry)
/*
@@ -301,10 +330,8 @@ SYM_FUNC_START(efi32_pe_entry)
btl $29, %edx // check long mode bit
jnc 1f
leal 8(%esp), %esp // preserve stack alignment
- movl (%esp), %ecx // image_handle
- movl 4(%esp), %edx // sys_table
- jmp efi32_entry // pass %ecx, %edx
- // no other registers remain live
+ xor %edi, %edi // no struct boot_params in EDI
+ jmp efi32_entry // only ESP and EDI remain live
1: movl $0x80000003, %eax // EFI_UNSUPPORTED
popl %ebx
RET
@@ -318,8 +345,10 @@ SYM_FUNC_START_NOALIGN(efi64_stub_entry)
SYM_FUNC_END(efi64_stub_entry)
#endif
- .data
- .balign 8
+ .bss
+ .balign PAGE_SIZE
+SYM_DATA_LOCAL(pte, .fill 6 * PAGE_SIZE, 1, 0)
+
SYM_DATA_START_LOCAL(efi32_boot_gdt)
.word 0
.quad 0
@@ -330,8 +359,8 @@ SYM_DATA_START_LOCAL(efi32_boot_idt)
.quad 0
SYM_DATA_END(efi32_boot_idt)
+ .data
+ .balign 4
SYM_DATA_LOCAL(efi32_boot_cs, .word 0)
SYM_DATA_LOCAL(efi32_boot_ds, .word 0)
-SYM_DATA_LOCAL(efi32_boot_sp, .long 0)
-SYM_DATA_LOCAL(efi32_boot_args, .long 0, 0, 0)
SYM_DATA(efi_is64, .byte 1)
@@ -263,13 +263,6 @@ SYM_FUNC_START(startup_32)
* used to perform that far jump.
*/
leal rva(startup_64)(%ebp), %eax
-#ifdef CONFIG_EFI_MIXED
- cmpb $1, rva(efi_is64)(%ebp)
- je 1f
- leal rva(startup_64_mixed_mode)(%ebp), %eax
-1:
-#endif
-
pushl $__KERNEL_CS
pushl %eax