@@ -2378,6 +2378,28 @@ static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
GHCB_MSR_INFO_POS);
break;
}
+ case GHCB_MSR_GHCB_GPA_REGISTER_REQ: {
+ kvm_pfn_t pfn;
+ u64 gfn;
+
+ gfn = get_ghcb_msr_bits(svm,
+ GHCB_MSR_GHCB_GPA_REGISTER_VALUE_MASK,
+ GHCB_MSR_GHCB_GPA_REGISTER_VALUE_POS);
+
+ pfn = kvm_vcpu_gfn_to_pfn(vcpu, gfn);
+ if (is_error_noslot_pfn(pfn))
+ gfn = GHCB_MSR_GHCB_GPA_REGISTER_ERROR;
+ else
+ svm->ghcb_registered_gpa = gfn_to_gpa(gfn);
+
+ set_ghcb_msr_bits(svm, gfn,
+ GHCB_MSR_GHCB_GPA_REGISTER_VALUE_MASK,
+ GHCB_MSR_GHCB_GPA_REGISTER_VALUE_POS);
+ set_ghcb_msr_bits(svm, GHCB_MSR_GHCB_GPA_REGISTER_RESP,
+ GHCB_MSR_INFO_MASK,
+ GHCB_MSR_INFO_POS);
+ break;
+ }
case GHCB_MSR_TERM_REQ: {
u64 reason_set, reason_code;
@@ -2418,6 +2440,12 @@ int sev_handle_vmgexit(struct vcpu_svm *svm)
return -EINVAL;
}
+ /* SEV-SNP guest requires that the GHCB GPA must be registered */
+ if (sev_snp_guest(svm->vcpu.kvm) && !ghcb_gpa_is_registered(svm, ghcb_gpa)) {
+ vcpu_unimpl(&svm->vcpu, "vmgexit: GHCB GPA [%#llx] is not registered.\n", ghcb_gpa);
+ return -EINVAL;
+ }
+
if (kvm_vcpu_map(&svm->vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->ghcb_map)) {
/* Unable to map GHCB from guest */
vcpu_unimpl(&svm->vcpu, "vmgexit: error mapping GHCB [%#llx] from guest\n",
@@ -194,6 +194,8 @@ struct vcpu_svm {
u64 ghcb_sa_len;
bool ghcb_sa_sync;
bool ghcb_sa_free;
+
+ u64 ghcb_registered_gpa;
};
struct svm_cpu_data {
@@ -254,6 +256,13 @@ static inline bool sev_snp_guest(struct kvm *kvm)
#endif
}
+#define GHCB_GPA_INVALID 0xffffffffffffffff
+
+static inline bool ghcb_gpa_is_registered(struct vcpu_svm *svm, u64 val)
+{
+ return svm->ghcb_registered_gpa == val;
+}
+
static inline void vmcb_mark_all_dirty(struct vmcb *vmcb)
{
vmcb->control.clean = 0;
@@ -574,6 +583,12 @@ void svm_vcpu_unblocking(struct kvm_vcpu *vcpu);
#define GHCB_MSR_CPUID_REG_POS 30
#define GHCB_MSR_CPUID_REG_MASK 0x3
+#define GHCB_MSR_GHCB_GPA_REGISTER_REQ 0x012
+#define GHCB_MSR_GHCB_GPA_REGISTER_VALUE_POS 12
+#define GHCB_MSR_GHCB_GPA_REGISTER_VALUE_MASK 0xfffffffffffff
+#define GHCB_MSR_GHCB_GPA_REGISTER_RESP 0x013
+#define GHCB_MSR_GHCB_GPA_REGISTER_ERROR 0xfffffffffffff
+
#define GHCB_MSR_TERM_REQ 0x100
#define GHCB_MSR_TERM_REASON_SET_POS 12
#define GHCB_MSR_TERM_REASON_SET_MASK 0xf
SEV-SNP guests are required to perform a GHCB GPA registration (see section 2.5.2 in GHCB specification). Before using a GHCB GPA for a vCPU the first time, a guest must register the vCPU GHCB GPA. If hypervisor can work with the guest requested GPA then it must respond back with the same GPA otherwise return -1. On every VMEXIT, we verify that GHCB GPA matches with the registered value. If a mismatch is detected then abort the guest. Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Joerg Roedel <jroedel@suse.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: "Peter Zijlstra (Intel)" <peterz@infradead.org> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Tom Lendacky <thomas.lendacky@amd.com> Cc: David Rientjes <rientjes@google.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Vitaly Kuznetsov <vkuznets@redhat.com> Cc: Wanpeng Li <wanpengli@tencent.com> Cc: Jim Mattson <jmattson@google.com> Cc: x86@kernel.org Cc: kvm@vger.kernel.org Signed-off-by: Brijesh Singh <brijesh.singh@amd.com> --- arch/x86/kvm/svm/sev.c | 28 ++++++++++++++++++++++++++++ arch/x86/kvm/svm/svm.h | 15 +++++++++++++++ 2 files changed, 43 insertions(+)