@@ -1331,7 +1331,7 @@ struct kvm_arch {
unsigned int indirect_shadow_pages;
u8 mmu_valid_gen;
u8 vm_type;
- bool has_private_mem;
+ bool supports_gmem;
bool has_protected_state;
bool pre_fault_allowed;
struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
@@ -2254,7 +2254,7 @@ void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level,
#ifdef CONFIG_KVM_GMEM
-#define kvm_arch_supports_gmem(kvm) ((kvm)->arch.has_private_mem)
+#define kvm_arch_supports_gmem(kvm) ((kvm)->arch.supports_gmem)
#else
#define kvm_arch_supports_gmem(kvm) false
#endif
@@ -3486,7 +3486,7 @@ static bool page_fault_can_be_fast(struct kvm *kvm, struct kvm_page_fault *fault
* on RET_PF_SPURIOUS until the update completes, or an actual spurious
* case might go down the slow path. Either case will resolve itself.
*/
- if (kvm->arch.has_private_mem &&
+ if (kvm->arch.supports_gmem &&
fault->is_private != kvm_mem_is_private(kvm, fault->gfn))
return false;
@@ -5048,8 +5048,8 @@ static int svm_vm_init(struct kvm *kvm)
(type == KVM_X86_SEV_ES_VM || type == KVM_X86_SNP_VM);
to_kvm_sev_info(kvm)->need_init = true;
- kvm->arch.has_private_mem = (type == KVM_X86_SNP_VM);
- kvm->arch.pre_fault_allowed = !kvm->arch.has_private_mem;
+ kvm->arch.supports_gmem = (type == KVM_X86_SNP_VM);
+ kvm->arch.pre_fault_allowed = !kvm->arch.supports_gmem;
}
if (!pause_filter_count || !pause_filter_thresh)
@@ -12716,8 +12716,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
return -EINVAL;
kvm->arch.vm_type = type;
- kvm->arch.has_private_mem =
- (type == KVM_X86_SW_PROTECTED_VM);
+ kvm->arch.supports_gmem = (type == KVM_X86_SW_PROTECTED_VM);
/* Decided by the vendor code for other VM types. */
kvm->arch.pre_fault_allowed =
type == KVM_X86_DEFAULT_VM || type == KVM_X86_SW_PROTECTED_VM;