Message ID | 20221103141351.50662-4-mlevitsk@redhat.com |
---|---|
State | Superseded |
Headers | show |
Series | nSVM: Security and correctness fixes | expand |
On 03/11/2022 14:13, Maxim Levitsky wrote: > add kvm_leave_nested which wraps a call to nested_ops->leave_nested > into a function. > > Cc: stable@vger.kernel.org > Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com> Reviewed-by: Liam Merwick <liam.merwick@oracle.com> > --- > arch/x86/kvm/svm/nested.c | 3 --- > arch/x86/kvm/vmx/nested.c | 3 --- > arch/x86/kvm/x86.c | 8 +++++++- > 3 files changed, 7 insertions(+), 7 deletions(-) > > diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c > index b74da40c1fc40c..bcc4f6620f8aec 100644 > --- a/arch/x86/kvm/svm/nested.c > +++ b/arch/x86/kvm/svm/nested.c > @@ -1147,9 +1147,6 @@ void svm_free_nested(struct vcpu_svm *svm) > svm->nested.initialized = false; > } > > -/* > - * Forcibly leave nested mode in order to be able to reset the VCPU later on. > - */ > void svm_leave_nested(struct kvm_vcpu *vcpu) > { > struct vcpu_svm *svm = to_svm(vcpu); > diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c > index 61a2e551640a08..1ebe141a0a015f 100644 > --- a/arch/x86/kvm/vmx/nested.c > +++ b/arch/x86/kvm/vmx/nested.c > @@ -6441,9 +6441,6 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu, > return kvm_state.size; > } > > -/* > - * Forcibly leave nested mode in order to be able to reset the VCPU later on. > - */ > void vmx_leave_nested(struct kvm_vcpu *vcpu) > { > if (is_guest_mode(vcpu)) { > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c > index cd9eb13e2ed7fc..316ab1d5317f92 100644 > --- a/arch/x86/kvm/x86.c > +++ b/arch/x86/kvm/x86.c > @@ -627,6 +627,12 @@ static void kvm_queue_exception_vmexit(struct kvm_vcpu *vcpu, unsigned int vecto > ex->payload = payload; > } > > +/* Forcibly leave the nested mode in cases like a vCPU reset */ > +static void kvm_leave_nested(struct kvm_vcpu *vcpu) > +{ > + kvm_x86_ops.nested_ops->leave_nested(vcpu); > +} > + > static void kvm_multiple_exception(struct kvm_vcpu *vcpu, > unsigned nr, bool has_error, u32 error_code, > bool has_payload, unsigned long payload, bool reinject) > @@ -5193,7 +5199,7 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, > if (events->flags & KVM_VCPUEVENT_VALID_SMM) { > #ifdef CONFIG_KVM_SMM > if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) { > - kvm_x86_ops.nested_ops->leave_nested(vcpu); > + kvm_leave_nested(vcpu); > kvm_smm_changed(vcpu, events->smi.smm); > } >
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index b74da40c1fc40c..bcc4f6620f8aec 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -1147,9 +1147,6 @@ void svm_free_nested(struct vcpu_svm *svm) svm->nested.initialized = false; } -/* - * Forcibly leave nested mode in order to be able to reset the VCPU later on. - */ void svm_leave_nested(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 61a2e551640a08..1ebe141a0a015f 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -6441,9 +6441,6 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu, return kvm_state.size; } -/* - * Forcibly leave nested mode in order to be able to reset the VCPU later on. - */ void vmx_leave_nested(struct kvm_vcpu *vcpu) { if (is_guest_mode(vcpu)) { diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index cd9eb13e2ed7fc..316ab1d5317f92 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -627,6 +627,12 @@ static void kvm_queue_exception_vmexit(struct kvm_vcpu *vcpu, unsigned int vecto ex->payload = payload; } +/* Forcibly leave the nested mode in cases like a vCPU reset */ +static void kvm_leave_nested(struct kvm_vcpu *vcpu) +{ + kvm_x86_ops.nested_ops->leave_nested(vcpu); +} + static void kvm_multiple_exception(struct kvm_vcpu *vcpu, unsigned nr, bool has_error, u32 error_code, bool has_payload, unsigned long payload, bool reinject) @@ -5193,7 +5199,7 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, if (events->flags & KVM_VCPUEVENT_VALID_SMM) { #ifdef CONFIG_KVM_SMM if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) { - kvm_x86_ops.nested_ops->leave_nested(vcpu); + kvm_leave_nested(vcpu); kvm_smm_changed(vcpu, events->smi.smm); }
add kvm_leave_nested which wraps a call to nested_ops->leave_nested into a function. Cc: stable@vger.kernel.org Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com> --- arch/x86/kvm/svm/nested.c | 3 --- arch/x86/kvm/vmx/nested.c | 3 --- arch/x86/kvm/x86.c | 8 +++++++- 3 files changed, 7 insertions(+), 7 deletions(-)