@@ -2254,9 +2254,9 @@ void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level,
#ifdef CONFIG_KVM_GMEM
-#define kvm_arch_has_private_mem(kvm) ((kvm)->arch.has_private_mem)
+#define kvm_arch_supports_gmem(kvm) ((kvm)->arch.has_private_mem)
#else
-#define kvm_arch_has_private_mem(kvm) false
+#define kvm_arch_supports_gmem(kvm) false
#endif
#define kvm_arch_has_readonly_mem(kvm) (!(kvm)->arch.has_protected_state)
@@ -2309,8 +2309,8 @@ enum {
#define HF_SMM_INSIDE_NMI_MASK (1 << 2)
# define KVM_MAX_NR_ADDRESS_SPACES 2
-/* SMM is currently unsupported for guests with private memory. */
-# define kvm_arch_nr_memslot_as_ids(kvm) (kvm_arch_has_private_mem(kvm) ? 1 : 2)
+/* SMM is currently unsupported for guests with guest_memfd (esp private) memory. */
+# define kvm_arch_nr_memslot_as_ids(kvm) (kvm_arch_supports_gmem(kvm) ? 1 : 2)
# define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0)
# define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role).smm)
#else
@@ -4917,7 +4917,7 @@ long kvm_arch_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu,
if (r)
return r;
- if (kvm_arch_has_private_mem(vcpu->kvm) &&
+ if (kvm_arch_supports_gmem(vcpu->kvm) &&
kvm_mem_is_private(vcpu->kvm, gpa_to_gfn(range->gpa)))
error_code |= PFERR_PRIVATE_ACCESS;
@@ -7683,7 +7683,7 @@ bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
* Zapping SPTEs in this case ensures KVM will reassess whether or not
* a hugepage can be used for affected ranges.
*/
- if (WARN_ON_ONCE(!kvm_arch_has_private_mem(kvm)))
+ if (WARN_ON_ONCE(!kvm_arch_supports_gmem(kvm)))
return false;
/* Unmap the old attribute page. */
@@ -7746,7 +7746,7 @@ bool kvm_arch_post_set_memory_attributes(struct kvm *kvm,
* a range that has PRIVATE GFNs, and conversely converting a range to
* SHARED may now allow hugepages.
*/
- if (WARN_ON_ONCE(!kvm_arch_has_private_mem(kvm)))
+ if (WARN_ON_ONCE(!kvm_arch_supports_gmem(kvm)))
return false;
/*
@@ -7802,7 +7802,7 @@ void kvm_mmu_init_memslot_memory_attributes(struct kvm *kvm,
{
int level;
- if (!kvm_arch_has_private_mem(kvm))
+ if (!kvm_arch_supports_gmem(kvm))
return;
for (level = PG_LEVEL_2M; level <= KVM_MAX_HUGEPAGE_LEVEL; level++) {
@@ -719,11 +719,11 @@ static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
#endif
/*
- * Arch code must define kvm_arch_has_private_mem if support for private memory
+ * Arch code must define kvm_arch_supports_gmem if support for guest_memfd
* is enabled.
*/
-#if !defined(kvm_arch_has_private_mem) && !IS_ENABLED(CONFIG_KVM_GMEM)
-static inline bool kvm_arch_has_private_mem(struct kvm *kvm)
+#if !defined(kvm_arch_supports_gmem) && !IS_ENABLED(CONFIG_KVM_GMEM)
+static inline bool kvm_arch_supports_gmem(struct kvm *kvm)
{
return false;
}
@@ -1531,7 +1531,7 @@ static int check_memory_region_flags(struct kvm *kvm,
{
u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES;
- if (kvm_arch_has_private_mem(kvm))
+ if (kvm_arch_supports_gmem(kvm))
valid_flags |= KVM_MEM_GUEST_MEMFD;
/* Dirty logging private memory is not currently supported. */
@@ -2362,7 +2362,7 @@ static int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm,
#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
static u64 kvm_supported_mem_attributes(struct kvm *kvm)
{
- if (!kvm || kvm_arch_has_private_mem(kvm))
+ if (!kvm || kvm_arch_supports_gmem(kvm))
return KVM_MEMORY_ATTRIBUTE_PRIVATE;
return 0;
@@ -4844,7 +4844,7 @@ static int kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
#endif
#ifdef CONFIG_KVM_GMEM
case KVM_CAP_GUEST_MEMFD:
- return !kvm || kvm_arch_has_private_mem(kvm);
+ return !kvm || kvm_arch_supports_gmem(kvm);
#endif
default:
break;