diff mbox series

[v8,06/13] KVM: x86: Generalize private fault lookups to guest_memfd fault lookups

Message ID 20250430165655.605595-7-tabba@google.com
State New
Headers show
Series KVM: Mapping guest_memfd backed memory at the host for software protected VMs | expand

Commit Message

Fuad Tabba April 30, 2025, 4:56 p.m. UTC
Until now, faults to private memory backed by guest_memfd are always
consumed from guest_memfd whereas faults to shared memory are consumed
from anonymous memory. Subsequent patches will allow sharing guest_memfd
backed memory in-place, and mapping it by the host. Faults to in-place
shared memory should be consumed from guest_memfd as well.

In order to facilitate that, generalize the fault lookups. Currently,
only private memory is consumed from guest_memfd and therefore as it
stands, this patch does not change the behavior.

Co-developed-by: David Hildenbrand <david@redhat.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Fuad Tabba <tabba@google.com>
---
 arch/x86/kvm/mmu/mmu.c   | 19 +++++++++----------
 include/linux/kvm_host.h |  6 ++++++
 2 files changed, 15 insertions(+), 10 deletions(-)
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 6d5dd869c890..08eebd24a0e1 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -3258,7 +3258,7 @@  static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn,
 
 static int __kvm_mmu_max_mapping_level(struct kvm *kvm,
 				       const struct kvm_memory_slot *slot,
-				       gfn_t gfn, int max_level, bool is_private)
+				       gfn_t gfn, int max_level, bool is_gmem)
 {
 	struct kvm_lpage_info *linfo;
 	int host_level;
@@ -3270,7 +3270,7 @@  static int __kvm_mmu_max_mapping_level(struct kvm *kvm,
 			break;
 	}
 
-	if (is_private)
+	if (is_gmem)
 		return max_level;
 
 	if (max_level == PG_LEVEL_4K)
@@ -3283,10 +3283,9 @@  static int __kvm_mmu_max_mapping_level(struct kvm *kvm,
 int kvm_mmu_max_mapping_level(struct kvm *kvm,
 			      const struct kvm_memory_slot *slot, gfn_t gfn)
 {
-	bool is_private = kvm_slot_has_gmem(slot) &&
-			  kvm_mem_is_private(kvm, gfn);
+	bool is_gmem = kvm_slot_has_gmem(slot) && kvm_mem_from_gmem(kvm, gfn);
 
-	return __kvm_mmu_max_mapping_level(kvm, slot, gfn, PG_LEVEL_NUM, is_private);
+	return __kvm_mmu_max_mapping_level(kvm, slot, gfn, PG_LEVEL_NUM, is_gmem);
 }
 
 void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
@@ -4465,7 +4464,7 @@  static inline u8 kvm_max_level_for_order(int order)
 	return PG_LEVEL_4K;
 }
 
-static u8 kvm_max_private_mapping_level(struct kvm *kvm, kvm_pfn_t pfn,
+static u8 kvm_max_gmem_mapping_level(struct kvm *kvm, kvm_pfn_t pfn,
 					u8 max_level, int gmem_order)
 {
 	u8 req_max_level;
@@ -4491,7 +4490,7 @@  static void kvm_mmu_finish_page_fault(struct kvm_vcpu *vcpu,
 				 r == RET_PF_RETRY, fault->map_writable);
 }
 
-static int kvm_mmu_faultin_pfn_private(struct kvm_vcpu *vcpu,
+static int kvm_mmu_faultin_pfn_gmem(struct kvm_vcpu *vcpu,
 				       struct kvm_page_fault *fault)
 {
 	int max_order, r;
@@ -4509,8 +4508,8 @@  static int kvm_mmu_faultin_pfn_private(struct kvm_vcpu *vcpu,
 	}
 
 	fault->map_writable = !(fault->slot->flags & KVM_MEM_READONLY);
-	fault->max_level = kvm_max_private_mapping_level(vcpu->kvm, fault->pfn,
-							 fault->max_level, max_order);
+	fault->max_level = kvm_max_gmem_mapping_level(vcpu->kvm, fault->pfn,
+						      fault->max_level, max_order);
 
 	return RET_PF_CONTINUE;
 }
@@ -4521,7 +4520,7 @@  static int __kvm_mmu_faultin_pfn(struct kvm_vcpu *vcpu,
 	unsigned int foll = fault->write ? FOLL_WRITE : 0;
 
 	if (fault->is_private)
-		return kvm_mmu_faultin_pfn_private(vcpu, fault);
+		return kvm_mmu_faultin_pfn_gmem(vcpu, fault);
 
 	foll |= FOLL_NOWAIT;
 	fault->pfn = __kvm_faultin_pfn(fault->slot, fault->gfn, foll,
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index d9616ee6acc7..cdcd7ac091b5 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -2514,6 +2514,12 @@  static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn)
 }
 #endif /* CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES */
 
+static inline bool kvm_mem_from_gmem(struct kvm *kvm, gfn_t gfn)
+{
+	/* For now, only private memory gets consumed from guest_memfd. */
+	return kvm_mem_is_private(kvm, gfn);
+}
+
 #ifdef CONFIG_KVM_GMEM
 int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
 		     gfn_t gfn, kvm_pfn_t *pfn, struct page **page,