diff mbox series

x86/boot/sev: Support memory acceptance in the EFI stub under SVSM

Message ID 20250428174322.2780170-2-ardb+git@google.com
State New
Headers show
Series x86/boot/sev: Support memory acceptance in the EFI stub under SVSM | expand

Commit Message

Ard Biesheuvel April 28, 2025, 5:43 p.m. UTC
From: Ard Biesheuvel <ardb@kernel.org>

Commit

  d54d610243a4 ("x86/boot/sev: Avoid shared GHCB page for early memory acceptance")

provided a fix for SEV-SNP memory acceptance from the EFI stub when
running at VMPL #0. However, that fix was insufficient for SVSM SEV-SNP
guests running at VMPL >0, as those rely on a SVSM calling area, which
is a shared buffer whose address is programmed into a SEV-SNP MSR, and
the SEV init code that sets up this calling area executes much later
during the boot.

Given that booting via the EFI stub at VMPL >0 implies that the firmware
has configured this calling area already, reuse it for performing memory
acceptance in the EFI stub.

Cc: Borislav Petkov <bp@alien8.de>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Dionna Amalie Glaze <dionnaglaze@google.com>
Cc: Kevin Loughlin <kevinloughlin@google.com>
Cc: <stable@vger.kernel.org>
Fixes: fcd042e86422 ("x86/sev: Perform PVALIDATE using the SVSM when not at VMPL0")
Co-developed-by: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
---
Tom,

Please confirm that this works as you intended.

Thanks,

 arch/x86/boot/compressed/mem.c |  5 +--
 arch/x86/boot/compressed/sev.c | 40 ++++++++++++++++++++
 arch/x86/boot/compressed/sev.h |  2 +
 3 files changed, 43 insertions(+), 4 deletions(-)


base-commit: b4432656b36e5cc1d50a1f2dc15357543add530e

Comments

Tom Lendacky May 1, 2025, 6:05 p.m. UTC | #1
On 4/28/25 12:43, Ard Biesheuvel wrote:
> From: Ard Biesheuvel <ardb@kernel.org>
> 
> Commit
> 
>   d54d610243a4 ("x86/boot/sev: Avoid shared GHCB page for early memory acceptance")
> 
> provided a fix for SEV-SNP memory acceptance from the EFI stub when
> running at VMPL #0. However, that fix was insufficient for SVSM SEV-SNP
> guests running at VMPL >0, as those rely on a SVSM calling area, which
> is a shared buffer whose address is programmed into a SEV-SNP MSR, and
> the SEV init code that sets up this calling area executes much later
> during the boot.
> 
> Given that booting via the EFI stub at VMPL >0 implies that the firmware
> has configured this calling area already, reuse it for performing memory
> acceptance in the EFI stub.

This looks to be working for SNP guest boot and kexec. SNP guest boot with
an SVSM is also working, but kexec isn't. But the kexec failure of an SVSM
SNP guest is unrelated to this patch, I'll send a fix for that separately.

Thanks,
Tom

> 
> Cc: Borislav Petkov <bp@alien8.de>
> Cc: Ingo Molnar <mingo@kernel.org>
> Cc: Dionna Amalie Glaze <dionnaglaze@google.com>
> Cc: Kevin Loughlin <kevinloughlin@google.com>
> Cc: <stable@vger.kernel.org>
> Fixes: fcd042e86422 ("x86/sev: Perform PVALIDATE using the SVSM when not at VMPL0")
> Co-developed-by: Tom Lendacky <thomas.lendacky@amd.com>
> Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
> Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
> ---
> Tom,
> 
> Please confirm that this works as you intended.
> 
> Thanks,
> 
>  arch/x86/boot/compressed/mem.c |  5 +--
>  arch/x86/boot/compressed/sev.c | 40 ++++++++++++++++++++
>  arch/x86/boot/compressed/sev.h |  2 +
>  3 files changed, 43 insertions(+), 4 deletions(-)
> 
> diff --git a/arch/x86/boot/compressed/mem.c b/arch/x86/boot/compressed/mem.c
> index f676156d9f3d..0e9f84ab4bdc 100644
> --- a/arch/x86/boot/compressed/mem.c
> +++ b/arch/x86/boot/compressed/mem.c
> @@ -34,14 +34,11 @@ static bool early_is_tdx_guest(void)
>  
>  void arch_accept_memory(phys_addr_t start, phys_addr_t end)
>  {
> -	static bool sevsnp;
> -
>  	/* Platform-specific memory-acceptance call goes here */
>  	if (early_is_tdx_guest()) {
>  		if (!tdx_accept_memory(start, end))
>  			panic("TDX: Failed to accept memory\n");
> -	} else if (sevsnp || (sev_get_status() & MSR_AMD64_SEV_SNP_ENABLED)) {
> -		sevsnp = true;
> +	} else if (early_is_sevsnp_guest()) {
>  		snp_accept_memory(start, end);
>  	} else {
>  		error("Cannot accept memory: unknown platform\n");
> diff --git a/arch/x86/boot/compressed/sev.c b/arch/x86/boot/compressed/sev.c
> index 89ba168f4f0f..0003e4416efd 100644
> --- a/arch/x86/boot/compressed/sev.c
> +++ b/arch/x86/boot/compressed/sev.c
> @@ -645,3 +645,43 @@ void sev_prep_identity_maps(unsigned long top_level_pgt)
>  
>  	sev_verify_cbit(top_level_pgt);
>  }
> +
> +bool early_is_sevsnp_guest(void)
> +{
> +	static bool sevsnp;
> +
> +	if (sevsnp)
> +		return true;
> +
> +	if (!(sev_get_status() & MSR_AMD64_SEV_SNP_ENABLED))
> +		return false;
> +
> +	sevsnp = true;
> +
> +	if (!snp_vmpl) {
> +		unsigned int eax, ebx, ecx, edx;
> +
> +		/*
> +		 * CPUID Fn8000_001F_EAX[28] - SVSM support
> +		 */
> +		eax = 0x8000001f;
> +		ecx = 0;
> +		native_cpuid(&eax, &ebx, &ecx, &edx);
> +		if (eax & BIT(28)) {
> +			struct msr m;
> +
> +			/* Obtain the address of the calling area to use */
> +			boot_rdmsr(MSR_SVSM_CAA, &m);
> +			boot_svsm_caa = (void *)m.q;
> +			boot_svsm_caa_pa = m.q;
> +
> +			/*
> +			 * The real VMPL level cannot be discovered, but the
> +			 * memory acceptance routines make no use of that so
> +			 * any non-zero value suffices here.
> +			 */
> +			snp_vmpl = U8_MAX;
> +		}
> +	}
> +	return true;
> +}
> diff --git a/arch/x86/boot/compressed/sev.h b/arch/x86/boot/compressed/sev.h
> index 4e463f33186d..d3900384b8ab 100644
> --- a/arch/x86/boot/compressed/sev.h
> +++ b/arch/x86/boot/compressed/sev.h
> @@ -13,12 +13,14 @@
>  bool sev_snp_enabled(void);
>  void snp_accept_memory(phys_addr_t start, phys_addr_t end);
>  u64 sev_get_status(void);
> +bool early_is_sevsnp_guest(void);
>  
>  #else
>  
>  static inline bool sev_snp_enabled(void) { return false; }
>  static inline void snp_accept_memory(phys_addr_t start, phys_addr_t end) { }
>  static inline u64 sev_get_status(void) { return 0; }
> +static inline bool early_is_sevsnp_guest(void) { return false; }
>  
>  #endif
>  
> 
> base-commit: b4432656b36e5cc1d50a1f2dc15357543add530e
Ard Biesheuvel May 2, 2025, 1:11 p.m. UTC | #2
On Thu, 1 May 2025 at 20:05, Tom Lendacky <thomas.lendacky@amd.com> wrote:
>
> On 4/28/25 12:43, Ard Biesheuvel wrote:
> > From: Ard Biesheuvel <ardb@kernel.org>
> >
> > Commit
> >
> >   d54d610243a4 ("x86/boot/sev: Avoid shared GHCB page for early memory acceptance")
> >
> > provided a fix for SEV-SNP memory acceptance from the EFI stub when
> > running at VMPL #0. However, that fix was insufficient for SVSM SEV-SNP
> > guests running at VMPL >0, as those rely on a SVSM calling area, which
> > is a shared buffer whose address is programmed into a SEV-SNP MSR, and
> > the SEV init code that sets up this calling area executes much later
> > during the boot.
> >
> > Given that booting via the EFI stub at VMPL >0 implies that the firmware
> > has configured this calling area already, reuse it for performing memory
> > acceptance in the EFI stub.
>
> This looks to be working for SNP guest boot and kexec. SNP guest boot with
> an SVSM is also working, but kexec isn't. But the kexec failure of an SVSM
> SNP guest is unrelated to this patch, I'll send a fix for that separately.
>

Thanks for confirming.

Ingo, Boris, can we get this queued as a fix, please, and merge it
back into x86/boot as was done before?


> Thanks,
> Tom
>
> >
> > Cc: Borislav Petkov <bp@alien8.de>
> > Cc: Ingo Molnar <mingo@kernel.org>
> > Cc: Dionna Amalie Glaze <dionnaglaze@google.com>
> > Cc: Kevin Loughlin <kevinloughlin@google.com>
> > Cc: <stable@vger.kernel.org>
> > Fixes: fcd042e86422 ("x86/sev: Perform PVALIDATE using the SVSM when not at VMPL0")
> > Co-developed-by: Tom Lendacky <thomas.lendacky@amd.com>
> > Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
> > Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
> > ---
> > Tom,
> >
> > Please confirm that this works as you intended.
> >
> > Thanks,
> >
Ingo Molnar May 4, 2025, 7:38 a.m. UTC | #3
* Ard Biesheuvel <ardb@kernel.org> wrote:

> On Thu, 1 May 2025 at 20:05, Tom Lendacky <thomas.lendacky@amd.com> wrote:
> >
> > On 4/28/25 12:43, Ard Biesheuvel wrote:
> > > From: Ard Biesheuvel <ardb@kernel.org>
> > >
> > > Commit
> > >
> > >   d54d610243a4 ("x86/boot/sev: Avoid shared GHCB page for early memory acceptance")
> > >
> > > provided a fix for SEV-SNP memory acceptance from the EFI stub when
> > > running at VMPL #0. However, that fix was insufficient for SVSM SEV-SNP
> > > guests running at VMPL >0, as those rely on a SVSM calling area, which
> > > is a shared buffer whose address is programmed into a SEV-SNP MSR, and
> > > the SEV init code that sets up this calling area executes much later
> > > during the boot.
> > >
> > > Given that booting via the EFI stub at VMPL >0 implies that the firmware
> > > has configured this calling area already, reuse it for performing memory
> > > acceptance in the EFI stub.
> >
> > This looks to be working for SNP guest boot and kexec. SNP guest boot with
> > an SVSM is also working, but kexec isn't. But the kexec failure of an SVSM
> > SNP guest is unrelated to this patch, I'll send a fix for that separately.
> >
> 
> Thanks for confirming.
> 
> Ingo, Boris, can we get this queued as a fix, please, and merge it
> back into x86/boot as was done before?

Just to clarify, memory acceptance trough the EFI stub from VMPL >0 
SEV-SNP guests was broken last summer via fcd042e86422, and it hasn't 
worked since then?

Thanks,

	Ingo
Ard Biesheuvel May 4, 2025, 7:42 a.m. UTC | #4
On Sun, 4 May 2025 at 09:38, Ingo Molnar <mingo@kernel.org> wrote:
>
>
> * Ard Biesheuvel <ardb@kernel.org> wrote:
>
> > On Thu, 1 May 2025 at 20:05, Tom Lendacky <thomas.lendacky@amd.com> wrote:
> > >
> > > On 4/28/25 12:43, Ard Biesheuvel wrote:
> > > > From: Ard Biesheuvel <ardb@kernel.org>
> > > >
> > > > Commit
> > > >
> > > >   d54d610243a4 ("x86/boot/sev: Avoid shared GHCB page for early memory acceptance")
> > > >
> > > > provided a fix for SEV-SNP memory acceptance from the EFI stub when
> > > > running at VMPL #0. However, that fix was insufficient for SVSM SEV-SNP
> > > > guests running at VMPL >0, as those rely on a SVSM calling area, which
> > > > is a shared buffer whose address is programmed into a SEV-SNP MSR, and
> > > > the SEV init code that sets up this calling area executes much later
> > > > during the boot.
> > > >
> > > > Given that booting via the EFI stub at VMPL >0 implies that the firmware
> > > > has configured this calling area already, reuse it for performing memory
> > > > acceptance in the EFI stub.
> > >
> > > This looks to be working for SNP guest boot and kexec. SNP guest boot with
> > > an SVSM is also working, but kexec isn't. But the kexec failure of an SVSM
> > > SNP guest is unrelated to this patch, I'll send a fix for that separately.
> > >
> >
> > Thanks for confirming.
> >
> > Ingo, Boris, can we get this queued as a fix, please, and merge it
> > back into x86/boot as was done before?
>
> Just to clarify, memory acceptance trough the EFI stub from VMPL >0
> SEV-SNP guests was broken last summer via fcd042e86422, and it hasn't
> worked since then?
>

It never worked correctly at all for SEV-SNP, since it was enabled in
d54d610243a4.

We never noticed because it appears that the SEV-SNP firmware rarely
exposes EFI_UNACCEPTED regions in chunks that are not 2M aligned, and
the EFI stub only accepts the misaligned bits so it can populate the
unaccepted_memory table accurately, which keeps track of unaccepted
memory with 2M granularity.
diff mbox series

Patch

diff --git a/arch/x86/boot/compressed/mem.c b/arch/x86/boot/compressed/mem.c
index f676156d9f3d..0e9f84ab4bdc 100644
--- a/arch/x86/boot/compressed/mem.c
+++ b/arch/x86/boot/compressed/mem.c
@@ -34,14 +34,11 @@  static bool early_is_tdx_guest(void)
 
 void arch_accept_memory(phys_addr_t start, phys_addr_t end)
 {
-	static bool sevsnp;
-
 	/* Platform-specific memory-acceptance call goes here */
 	if (early_is_tdx_guest()) {
 		if (!tdx_accept_memory(start, end))
 			panic("TDX: Failed to accept memory\n");
-	} else if (sevsnp || (sev_get_status() & MSR_AMD64_SEV_SNP_ENABLED)) {
-		sevsnp = true;
+	} else if (early_is_sevsnp_guest()) {
 		snp_accept_memory(start, end);
 	} else {
 		error("Cannot accept memory: unknown platform\n");
diff --git a/arch/x86/boot/compressed/sev.c b/arch/x86/boot/compressed/sev.c
index 89ba168f4f0f..0003e4416efd 100644
--- a/arch/x86/boot/compressed/sev.c
+++ b/arch/x86/boot/compressed/sev.c
@@ -645,3 +645,43 @@  void sev_prep_identity_maps(unsigned long top_level_pgt)
 
 	sev_verify_cbit(top_level_pgt);
 }
+
+bool early_is_sevsnp_guest(void)
+{
+	static bool sevsnp;
+
+	if (sevsnp)
+		return true;
+
+	if (!(sev_get_status() & MSR_AMD64_SEV_SNP_ENABLED))
+		return false;
+
+	sevsnp = true;
+
+	if (!snp_vmpl) {
+		unsigned int eax, ebx, ecx, edx;
+
+		/*
+		 * CPUID Fn8000_001F_EAX[28] - SVSM support
+		 */
+		eax = 0x8000001f;
+		ecx = 0;
+		native_cpuid(&eax, &ebx, &ecx, &edx);
+		if (eax & BIT(28)) {
+			struct msr m;
+
+			/* Obtain the address of the calling area to use */
+			boot_rdmsr(MSR_SVSM_CAA, &m);
+			boot_svsm_caa = (void *)m.q;
+			boot_svsm_caa_pa = m.q;
+
+			/*
+			 * The real VMPL level cannot be discovered, but the
+			 * memory acceptance routines make no use of that so
+			 * any non-zero value suffices here.
+			 */
+			snp_vmpl = U8_MAX;
+		}
+	}
+	return true;
+}
diff --git a/arch/x86/boot/compressed/sev.h b/arch/x86/boot/compressed/sev.h
index 4e463f33186d..d3900384b8ab 100644
--- a/arch/x86/boot/compressed/sev.h
+++ b/arch/x86/boot/compressed/sev.h
@@ -13,12 +13,14 @@ 
 bool sev_snp_enabled(void);
 void snp_accept_memory(phys_addr_t start, phys_addr_t end);
 u64 sev_get_status(void);
+bool early_is_sevsnp_guest(void);
 
 #else
 
 static inline bool sev_snp_enabled(void) { return false; }
 static inline void snp_accept_memory(phys_addr_t start, phys_addr_t end) { }
 static inline u64 sev_get_status(void) { return 0; }
+static inline bool early_is_sevsnp_guest(void) { return false; }
 
 #endif