@@ -180,12 +180,12 @@ SYM_FUNC_START(startup_32)
*/
/*
* If SEV is active then set the encryption mask in the page tables.
- * This will insure that when the kernel is copied and decompressed
+ * This will ensure that when the kernel is copied and decompressed
* it will be done so encrypted.
*/
+#ifdef CONFIG_AMD_MEM_ENCRYPT
call get_sev_encryption_bit
xorl %edx, %edx
-#ifdef CONFIG_AMD_MEM_ENCRYPT
testl %eax, %eax
jz 1f
subl $32, %eax /* Encryption bit is always above bit 31 */
@@ -199,6 +199,8 @@ SYM_FUNC_START(startup_32)
*/
movl $1, rva(sev_status)(%ebp)
1:
+#else
+ xorl %edx, %edx
#endif
/* Initialize Page tables to 0 */
@@ -21,12 +21,7 @@
.code32
SYM_FUNC_START(get_sev_encryption_bit)
- xor %eax, %eax
-
-#ifdef CONFIG_AMD_MEM_ENCRYPT
push %ebx
- push %ecx
- push %edx
movl $0x80000000, %eax /* CPUID to check the highest leaf */
cpuid
@@ -57,12 +52,7 @@ SYM_FUNC_START(get_sev_encryption_bit)
xor %eax, %eax
.Lsev_exit:
- pop %edx
- pop %ecx
pop %ebx
-
-#endif /* CONFIG_AMD_MEM_ENCRYPT */
-
RET
SYM_FUNC_END(get_sev_encryption_bit)
Make get_sev_encryption_bit() follow the ordinary x86 calling convention, and only call it if CONFIG_AMD_MEM_ENCRYPT is actually enabled. This clarifies the calling code, and makes it more maintainable. Signed-off-by: Ard Biesheuvel <ardb@kernel.org> --- arch/x86/boot/compressed/head_64.S | 6 ++++-- arch/x86/boot/compressed/mem_encrypt.S | 10 ---------- 2 files changed, 4 insertions(+), 12 deletions(-)