@@ -229,6 +229,9 @@ struct kvm_s2_mmu {
};
struct kvm_arch_memory_slot {
+#ifdef CONFIG_GUNYAH
+ struct page **pages;
+#endif
};
/**
@@ -660,11 +660,47 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
{
}
-void kvm_arch_commit_memory_region(struct kvm *kvm,
- struct kvm_memory_slot *old,
- const struct kvm_memory_slot *new,
- enum kvm_mr_change change)
+static int gunyah_pin_user_memory(struct kvm *kvm, struct kvm_memory_slot *memslot)
{
+ unsigned int gup_flags = FOLL_WRITE | FOLL_LONGTERM;
+ unsigned long start = memslot->userspace_addr;
+ struct vm_area_struct *vma;
+ struct page **pages;
+ int ret;
+
+ if (!memslot->npages)
+ return 0;
+
+ /* It needs to be a valid VMA-backed region */
+ mmap_read_lock(current->mm);
+ vma = find_vma(current->mm, start);
+ if (!vma || start < vma->vm_start) {
+ mmap_read_unlock(current->mm);
+ return 0;
+ }
+ if (!(vma->vm_flags & VM_READ) || !(vma->vm_flags & VM_WRITE)) {
+ mmap_read_unlock(current->mm);
+ return 0;
+ }
+ mmap_read_unlock(current->mm);
+
+ pages = kvcalloc(memslot->npages, sizeof(*pages), GFP_KERNEL);
+ if (!pages)
+ return -ENOMEM;
+
+ ret = pin_user_pages_fast(start, memslot->npages, gup_flags, pages);
+ if (ret < 0) {
+ goto err;
+ } else if (ret != memslot->npages) {
+ ret = -EIO;
+ goto err;
+ } else {
+ memslot->arch.pages = pages;
+ return 0;
+ }
+err:
+ kvfree(pages);
+ return ret;
}
int kvm_arch_prepare_memory_region(struct kvm *kvm,
@@ -672,11 +708,33 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
- return 0;
+ int ret;
+
+ switch (change) {
+ case KVM_MR_CREATE:
+ ret = gunyah_pin_user_memory(kvm, new);
+ break;
+ default:
+ return 0;
+ }
+ return ret;
+}
+
+void kvm_arch_commit_memory_region(struct kvm *kvm,
+ struct kvm_memory_slot *old,
+ const struct kvm_memory_slot *new,
+ enum kvm_mr_change change)
+{
}
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
{
+ if (!slot->arch.pages)
+ return;
+
+ unpin_user_pages(slot->arch.pages, slot->npages);
+
+ kvfree(slot->arch.pages);
}
void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
Qualcomm's Gunyah hypervisor allows to implement protected VMs, in which private memory given to the guest is no more accessible to the host (an access violation will be raised if the host tries to read/write that memory). In the context of protected VMs (aka confidential computing), the consensus to manage this memory is via guest_memfd. We would like this port to be be based on guest_memfd. However, for this RFC, the port allocates anonymous pages, which are subject to migration and swap out. That will trigger a violation if the memory is private, which is the case for most of the guest's main memory. Since the memory is allocated and given to the guest for possibly an unbounded amount of time, we longterm pin the pages to prevent the kernel from touching and possibly swapping or migrating those pages. In upcoming versions of this port, we intend to move to guest_memfd. Signed-off-by: Karim Manaouil <karim.manaouil@linaro.org> --- arch/arm64/include/asm/kvm_host.h | 3 ++ arch/arm64/kvm/gunyah.c | 68 ++++++++++++++++++++++++++++--- 2 files changed, 66 insertions(+), 5 deletions(-)