@@ -4465,21 +4465,25 @@ static inline u8 kvm_max_level_for_order(int order)
return PG_LEVEL_4K;
}
-static u8 kvm_max_private_mapping_level(struct kvm *kvm, kvm_pfn_t pfn,
- u8 max_level, int gmem_order)
+static u8 kvm_max_level_for_fault_and_order(struct kvm *kvm,
+ struct kvm_page_fault *fault,
+ int order)
{
- u8 req_max_level;
+ u8 max_level = fault->max_level;
if (max_level == PG_LEVEL_4K)
return PG_LEVEL_4K;
- max_level = min(kvm_max_level_for_order(gmem_order), max_level);
+ max_level = min(kvm_max_level_for_order(order), max_level);
if (max_level == PG_LEVEL_4K)
return PG_LEVEL_4K;
- req_max_level = kvm_x86_call(private_max_mapping_level)(kvm, pfn);
- if (req_max_level)
- max_level = min(max_level, req_max_level);
+ if (fault->is_private) {
+ u8 level = kvm_x86_call(private_max_mapping_level)(kvm, fault->pfn);
+
+ if (level)
+ max_level = min(max_level, level);
+ }
return max_level;
}
@@ -4491,10 +4495,10 @@ static void kvm_mmu_finish_page_fault(struct kvm_vcpu *vcpu,
r == RET_PF_RETRY, fault->map_writable);
}
-static int kvm_mmu_faultin_pfn_private(struct kvm_vcpu *vcpu,
- struct kvm_page_fault *fault)
+static int kvm_mmu_faultin_pfn_gmem(struct kvm_vcpu *vcpu,
+ struct kvm_page_fault *fault)
{
- int max_order, r;
+ int gmem_order, r;
if (!kvm_slot_has_gmem(fault->slot)) {
kvm_mmu_prepare_memory_fault_exit(vcpu, fault);
@@ -4502,15 +4506,14 @@ static int kvm_mmu_faultin_pfn_private(struct kvm_vcpu *vcpu,
}
r = kvm_gmem_get_pfn(vcpu->kvm, fault->slot, fault->gfn, &fault->pfn,
- &fault->refcounted_page, &max_order);
+ &fault->refcounted_page, &gmem_order);
if (r) {
kvm_mmu_prepare_memory_fault_exit(vcpu, fault);
return r;
}
fault->map_writable = !(fault->slot->flags & KVM_MEM_READONLY);
- fault->max_level = kvm_max_private_mapping_level(vcpu->kvm, fault->pfn,
- fault->max_level, max_order);
+ fault->max_level = kvm_max_level_for_fault_and_order(vcpu->kvm, fault, gmem_order);
return RET_PF_CONTINUE;
}
@@ -4520,8 +4523,8 @@ static int __kvm_mmu_faultin_pfn(struct kvm_vcpu *vcpu,
{
unsigned int foll = fault->write ? FOLL_WRITE : 0;
- if (fault->is_private)
- return kvm_mmu_faultin_pfn_private(vcpu, fault);
+ if (fault->is_private || kvm_gmem_memslot_supports_shared(fault->slot))
+ return kvm_mmu_faultin_pfn_gmem(vcpu, fault);
foll |= FOLL_NOWAIT;
fault->pfn = __kvm_faultin_pfn(fault->slot, fault->gfn, foll,
@@ -2502,6 +2502,15 @@ static inline void kvm_prepare_memory_fault_exit(struct kvm_vcpu *vcpu,
vcpu->run->memory_fault.flags |= KVM_MEMORY_EXIT_FLAG_PRIVATE;
}
+#ifdef CONFIG_KVM_GMEM_SHARED_MEM
+bool kvm_gmem_memslot_supports_shared(const struct kvm_memory_slot *slot);
+#else
+static inline bool kvm_gmem_memslot_supports_shared(const struct kvm_memory_slot *slot)
+{
+ return false;
+}
+#endif /* CONFIG_KVM_GMEM_SHARED_MEM */
+
#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
static inline unsigned long kvm_get_memory_attributes(struct kvm *kvm, gfn_t gfn)
{
@@ -2515,10 +2524,30 @@ bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
bool kvm_arch_post_set_memory_attributes(struct kvm *kvm,
struct kvm_gfn_range *range);
+/*
+ * Returns true if the given gfn's private/shared status (in the CoCo sense) is
+ * private.
+ *
+ * A return value of false indicates that the gfn is explicitly or implicity
+ * shared (i.e., non-CoCo VMs).
+ */
static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn)
{
- return IS_ENABLED(CONFIG_KVM_GMEM) &&
- kvm_get_memory_attributes(kvm, gfn) & KVM_MEMORY_ATTRIBUTE_PRIVATE;
+ struct kvm_memory_slot *slot;
+
+ if (!IS_ENABLED(CONFIG_KVM_GMEM))
+ return false;
+
+ slot = gfn_to_memslot(kvm, gfn);
+ if (kvm_slot_has_gmem(slot) && kvm_gmem_memslot_supports_shared(slot)) {
+ /*
+ * For now, memslots only support in-place shared memory if the
+ * host is allowed to mmap memory (i.e., non-Coco VMs).
+ */
+ return false;
+ }
+
+ return kvm_get_memory_attributes(kvm, gfn) & KVM_MEMORY_ATTRIBUTE_PRIVATE;
}
#else
static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn)
@@ -388,6 +388,23 @@ static int kvm_gmem_mmap(struct file *file, struct vm_area_struct *vma)
return 0;
}
+
+bool kvm_gmem_memslot_supports_shared(const struct kvm_memory_slot *slot)
+{
+ struct file *file;
+ bool ret;
+
+ file = kvm_gmem_get_file((struct kvm_memory_slot *)slot);
+ if (!file)
+ return false;
+
+ ret = kvm_gmem_supports_shared(file_inode(file));
+
+ fput(file);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(kvm_gmem_memslot_supports_shared);
+
#else
#define kvm_gmem_mmap NULL
#endif /* CONFIG_KVM_GMEM_SHARED_MEM */