@@ -5695,7 +5695,7 @@ static void kvm_mmu_zap_all_fast(struct kvm *kvm)
if (is_tdp_mmu_enabled(kvm)) {
read_lock(&kvm->mmu_lock);
- kvm_tdp_mmu_zap_invalidated_roots(kvm);
+ kvm_tdp_mmu_zap_invalidated_roots(kvm, true);
read_unlock(&kvm->mmu_lock);
}
}
@@ -821,14 +821,18 @@ static struct kvm_mmu_page *next_invalidated_root(struct kvm *kvm,
* only has to do a trivial amount of work. Since the roots are invalid,
* no new SPTEs should be created under them.
*/
-void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
+void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm, bool shared)
{
struct kvm_mmu_page *next_root;
struct kvm_mmu_page *root;
bool flush = false;
- lockdep_assert_held_read(&kvm->mmu_lock);
+ kvm_lockdep_assert_mmu_lock_held(kvm, shared);
+ /*
+ * rcu_read_lock is only needed for shared == true, but we
+ * always take it for simplicity.
+ */
rcu_read_lock();
root = next_invalidated_root(kvm, NULL);
@@ -838,13 +842,10 @@ void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
rcu_read_unlock();
- flush = zap_gfn_range(kvm, root, 0, -1ull, true, flush, true);
+ flush = zap_gfn_range(kvm, root, 0, -1ull, true, flush, shared);
- /*
- * Put the reference acquired in
- * kvm_tdp_mmu_invalidate_roots
- */
- kvm_tdp_mmu_put_root(kvm, root, true);
+ /* Put the reference acquired in kvm_tdp_mmu_invalidate_roots. */
+ kvm_tdp_mmu_put_root(kvm, root, shared);
root = next_root;
@@ -46,7 +46,7 @@ static inline bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
void kvm_tdp_mmu_zap_all(struct kvm *kvm);
void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm);
-void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm);
+void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm, bool shared);
int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
Zapping within a write-side critical section is more efficient, so it is desirable if we know that no vCPU is running (such as within the .release MMU notifier callback). Prepare for reusing kvm_tdp_mmu_zap_invalidated_roots in such scenarios. Fixes: b7cccd397f31 ("KVM: x86/mmu: Fast invalidation for TDP MMU") Cc: stable@vger.kernel.org Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> --- arch/x86/kvm/mmu/mmu.c | 2 +- arch/x86/kvm/mmu/tdp_mmu.c | 17 +++++++++-------- arch/x86/kvm/mmu/tdp_mmu.h | 2 +- 3 files changed, 11 insertions(+), 10 deletions(-)