@@ -3258,7 +3258,7 @@ static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn,
static int __kvm_mmu_max_mapping_level(struct kvm *kvm,
const struct kvm_memory_slot *slot,
- gfn_t gfn, int max_level, bool is_private)
+ gfn_t gfn, int max_level, bool is_gmem)
{
struct kvm_lpage_info *linfo;
int host_level;
@@ -3270,7 +3270,7 @@ static int __kvm_mmu_max_mapping_level(struct kvm *kvm,
break;
}
- if (is_private)
+ if (is_gmem)
return max_level;
if (max_level == PG_LEVEL_4K)
@@ -3283,10 +3283,9 @@ static int __kvm_mmu_max_mapping_level(struct kvm *kvm,
int kvm_mmu_max_mapping_level(struct kvm *kvm,
const struct kvm_memory_slot *slot, gfn_t gfn)
{
- bool is_private = kvm_slot_has_gmem(slot) &&
- kvm_mem_is_private(kvm, gfn);
+ bool is_gmem = kvm_slot_has_gmem(slot) && kvm_mem_from_gmem(kvm, gfn);
- return __kvm_mmu_max_mapping_level(kvm, slot, gfn, PG_LEVEL_NUM, is_private);
+ return __kvm_mmu_max_mapping_level(kvm, slot, gfn, PG_LEVEL_NUM, is_gmem);
}
void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
@@ -4465,7 +4464,7 @@ static inline u8 kvm_max_level_for_order(int order)
return PG_LEVEL_4K;
}
-static u8 kvm_max_private_mapping_level(struct kvm *kvm, kvm_pfn_t pfn,
+static u8 kvm_max_gmem_mapping_level(struct kvm *kvm, kvm_pfn_t pfn,
u8 max_level, int gmem_order)
{
u8 req_max_level;
@@ -4491,7 +4490,7 @@ static void kvm_mmu_finish_page_fault(struct kvm_vcpu *vcpu,
r == RET_PF_RETRY, fault->map_writable);
}
-static int kvm_mmu_faultin_pfn_private(struct kvm_vcpu *vcpu,
+static int kvm_mmu_faultin_pfn_gmem(struct kvm_vcpu *vcpu,
struct kvm_page_fault *fault)
{
int max_order, r;
@@ -4509,8 +4508,8 @@ static int kvm_mmu_faultin_pfn_private(struct kvm_vcpu *vcpu,
}
fault->map_writable = !(fault->slot->flags & KVM_MEM_READONLY);
- fault->max_level = kvm_max_private_mapping_level(vcpu->kvm, fault->pfn,
- fault->max_level, max_order);
+ fault->max_level = kvm_max_gmem_mapping_level(vcpu->kvm, fault->pfn,
+ fault->max_level, max_order);
return RET_PF_CONTINUE;
}
@@ -4521,7 +4520,7 @@ static int __kvm_mmu_faultin_pfn(struct kvm_vcpu *vcpu,
unsigned int foll = fault->write ? FOLL_WRITE : 0;
if (fault->is_private)
- return kvm_mmu_faultin_pfn_private(vcpu, fault);
+ return kvm_mmu_faultin_pfn_gmem(vcpu, fault);
foll |= FOLL_NOWAIT;
fault->pfn = __kvm_faultin_pfn(fault->slot, fault->gfn, foll,
@@ -2514,6 +2514,12 @@ static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn)
}
#endif /* CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES */
+static inline bool kvm_mem_from_gmem(struct kvm *kvm, gfn_t gfn)
+{
+ /* For now, only private memory gets consumed from guest_memfd. */
+ return kvm_mem_is_private(kvm, gfn);
+}
+
#ifdef CONFIG_KVM_GMEM
int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
gfn_t gfn, kvm_pfn_t *pfn, struct page **page,