@@ -14888,6 +14888,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
T: quilt git://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new
F: include/linux/gfp.h
F: include/linux/gfp_types.h
+F: include/linux/guestmem.h
F: include/linux/memfd.h
F: include/linux/memory.h
F: include/linux/memory_hotplug.h
@@ -14903,6 +14904,7 @@ F: include/linux/pagewalk.h
F: include/linux/rmap.h
F: include/trace/events/ksm.h
F: mm/
+F: mm/guestmem.c
F: tools/mm/
F: tools/testing/selftests/mm/
N: include/linux/page[-_]*
new file mode 100644
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_GUESTMEM_H
+#define _LINUX_GUESTMEM_H
+
+struct address_space;
+struct list_head;
+
+/**
+ * struct guestmem_ops - Hypervisor-specific maintenance operations to perform on folios
+ * @release_folio - Try to bring the folio back to fully owned by Linux
+ * for instance: about to free the folio [optional]
+ * @invalidate_begin - start invalidating mappings between start and end offsets
+ * @invalidate_end - paired with ->invalidate_begin() [optional]
+ */
+struct guestmem_ops {
+ bool (*release_folio)(struct list_head *entry, struct folio *folio);
+ int (*invalidate_begin)(struct list_head *entry, pgoff_t start,
+ pgoff_t end);
+ void (*invalidate_end)(struct list_head *entry, pgoff_t start,
+ pgoff_t end);
+};
+
+int guestmem_attach_mapping(struct address_space *mapping,
+ const struct guestmem_ops *const ops,
+ struct list_head *data);
+void guestmem_detach_mapping(struct address_space *mapping,
+ struct list_head *data);
+
+struct folio *guestmem_grab_folio(struct address_space *mapping, pgoff_t index);
+int guestmem_punch_hole(struct address_space *mapping, loff_t offset,
+ loff_t len);
+
+#endif
@@ -1190,6 +1190,9 @@ config SECRETMEM
memory areas visible only in the context of the owning process and
not mapped to other processes and other kernel page tables.
+config GUESTMEM
+ bool
+
config ANON_VMA_NAME
bool "Anonymous VMA name support"
depends on PROC_FS && ADVISE_SYSCALLS && MMU
@@ -136,6 +136,7 @@ obj-$(CONFIG_PERCPU_STATS) += percpu-stats.o
obj-$(CONFIG_ZONE_DEVICE) += memremap.o
obj-$(CONFIG_HMM_MIRROR) += hmm.o
obj-$(CONFIG_MEMFD_CREATE) += memfd.o
+obj-$(CONFIG_GUESTMEM) += guestmem.o
obj-$(CONFIG_MAPPING_DIRTY_HELPERS) += mapping_dirty_helpers.o
obj-$(CONFIG_PTDUMP_CORE) += ptdump.o
obj-$(CONFIG_PAGE_REPORTING) += page_reporting.o
new file mode 100644
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * guestmem library
+ *
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/fs.h>
+#include <linux/guestmem.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+
+struct guestmem {
+ const struct guestmem_ops *ops;
+};
+
+static inline struct guestmem *folio_to_guestmem(struct folio *folio)
+{
+ struct address_space *const mapping = folio->mapping;
+
+ return mapping->i_private_data;
+}
+
+static inline bool __guestmem_release_folio(struct address_space *const mapping,
+ struct folio *folio)
+{
+ struct guestmem *gmem = mapping->i_private_data;
+ struct list_head *entry;
+
+ if (gmem->ops->release_folio) {
+ list_for_each(entry, &mapping->i_private_list) {
+ if (!gmem->ops->release_folio(entry, folio))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static inline int
+__guestmem_invalidate_begin(struct address_space *const mapping, pgoff_t start,
+ pgoff_t end)
+{
+ struct guestmem *gmem = mapping->i_private_data;
+ struct list_head *entry;
+ int ret = 0;
+
+ list_for_each(entry, &mapping->i_private_list) {
+ ret = gmem->ops->invalidate_begin(entry, start, end);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static inline void
+__guestmem_invalidate_end(struct address_space *const mapping, pgoff_t start,
+ pgoff_t end)
+{
+ struct guestmem *gmem = mapping->i_private_data;
+ struct list_head *entry;
+
+ if (gmem->ops->invalidate_end) {
+ list_for_each(entry, &mapping->i_private_list)
+ gmem->ops->invalidate_end(entry, start, end);
+ }
+}
+
+static bool guestmem_release_folio(struct folio *folio, gfp_t gfp)
+{
+ return __guestmem_release_folio(folio->mapping, folio);
+}
+
+static void guestmem_invalidate_folio(struct folio *folio, size_t offset,
+ size_t len)
+{
+ WARN_ON_ONCE(offset != 0);
+ WARN_ON_ONCE(len != folio_size(folio));
+
+ if (offset == 0 && len == folio_size(folio))
+ WARN_ON_ONCE(filemap_release_folio(folio, 0));
+}
+
+static int guestmem_error_folio(struct address_space *mapping,
+ struct folio *folio)
+{
+ pgoff_t start, end;
+ int ret;
+
+ filemap_invalidate_lock_shared(mapping);
+
+ start = folio->index;
+ end = start + folio_nr_pages(folio);
+
+ ret = __guestmem_invalidate_begin(mapping, start, end);
+ if (ret)
+ goto out;
+
+ /*
+ * Do not truncate the range, what action is taken in response to the
+ * error is userspace's decision (assuming the architecture supports
+ * gracefully handling memory errors). If/when the guest attempts to
+ * access a poisoned page, kvm_gmem_get_pfn() will return -EHWPOISON,
+ * at which point KVM can either terminate the VM or propagate the
+ * error to userspace.
+ */
+
+ __guestmem_invalidate_end(mapping, start, end);
+
+out:
+ filemap_invalidate_unlock_shared(mapping);
+ return ret ? MF_DELAYED : MF_FAILED;
+}
+
+static int guestmem_migrate_folio(struct address_space *mapping,
+ struct folio *dst, struct folio *src,
+ enum migrate_mode mode)
+{
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+}
+
+static const struct address_space_operations guestmem_aops = {
+ .dirty_folio = noop_dirty_folio,
+ .release_folio = guestmem_release_folio,
+ .invalidate_folio = guestmem_invalidate_folio,
+ .error_remove_folio = guestmem_error_folio,
+ .migrate_folio = guestmem_migrate_folio,
+};
+
+/**
+ * guestmem_attach_mapping() - Attach/create a guestmem mapping
+ * @mapping: The address space to attach to
+ * @ops: The guestmem operations to use
+ * @data: Private data to pass to the ops functions
+ */
+int guestmem_attach_mapping(struct address_space *mapping,
+ const struct guestmem_ops *const ops,
+ struct list_head *data)
+{
+ struct guestmem *gmem;
+
+ if (mapping->a_ops == &guestmem_aops) {
+ gmem = mapping->i_private_data;
+ if (gmem->ops != ops)
+ return -EINVAL;
+
+ goto add;
+ }
+
+ gmem = kzalloc(sizeof(*gmem), GFP_KERNEL);
+ if (!gmem)
+ return -ENOMEM;
+
+ gmem->ops = ops;
+
+ mapping->a_ops = &guestmem_aops;
+ mapping->i_private_data = gmem;
+
+ mapping_set_gfp_mask(mapping, GFP_HIGHUSER);
+ mapping_set_inaccessible(mapping);
+ /* Unmovable mappings are supposed to be marked unevictable as well. */
+ WARN_ON_ONCE(!mapping_unevictable(mapping));
+
+add:
+ list_add(data, &mapping->i_private_list);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(guestmem_attach_mapping);
+
+/**
+ * guestmem_detach_mapping() - Detach a guestmem mapping
+ * @mapping: The address space to detach
+ * @data: Private data to detach
+ */
+void guestmem_detach_mapping(struct address_space *mapping,
+ struct list_head *data)
+{
+ list_del(data);
+
+ if (list_empty(&mapping->i_private_list)) {
+ kfree(mapping->i_private_data);
+ mapping->i_private_data = NULL;
+ mapping->a_ops = &empty_aops;
+ }
+}
+EXPORT_SYMBOL_GPL(guestmem_detach_mapping);
+
+/**
+ * guestmem_grab_folio() - Grab a folio from a guestmem mapping
+ * @mapping: The address space to grab from
+ * @index: The index of the folio to grab
+ *
+ * Return: The grabbed folio, or ERR_PTR() on failure.
+ */
+struct folio *guestmem_grab_folio(struct address_space *mapping, pgoff_t index)
+{
+ /* TODO: Support huge pages. */
+ return filemap_grab_folio(mapping, index);
+}
+EXPORT_SYMBOL_GPL(guestmem_grab_folio);
+
+/**
+ * guestmem_put_folio() - Helper to punch a hole in a guestmem mapping
+ * @mapping: The address space to punch a hole in
+ * @offset: The offset to punch a hole at
+ * @len: The length of the hole to punch
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+int guestmem_punch_hole(struct address_space *mapping, loff_t offset,
+ loff_t len)
+{
+ pgoff_t start = offset >> PAGE_SHIFT;
+ pgoff_t end = (offset + len) >> PAGE_SHIFT;
+ int ret;
+
+ filemap_invalidate_lock(mapping);
+ ret = __guestmem_invalidate_begin(mapping, start, end);
+ if (ret)
+ goto out;
+
+ truncate_inode_pages_range(mapping, offset, offset + len - 1);
+
+ __guestmem_invalidate_end(mapping, start, end);
+
+out:
+ filemap_invalidate_unlock(mapping);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(guestmem_punch_hole);
@@ -106,6 +106,7 @@ config KVM_GENERIC_MEMORY_ATTRIBUTES
config KVM_PRIVATE_MEM
select XARRAY_MULTI
+ select GUESTMEM
bool
config KVM_GENERIC_PRIVATE_MEM
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/backing-dev.h>
#include <linux/falloc.h>
+#include <linux/guestmem.h>
#include <linux/kvm_host.h>
#include <linux/pagemap.h>
#include <linux/anon_inodes.h>
@@ -98,8 +99,7 @@ static int kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slot,
*/
static struct folio *kvm_gmem_get_folio(struct inode *inode, pgoff_t index)
{
- /* TODO: Support huge pages. */
- return filemap_grab_folio(inode->i_mapping, index);
+ return guestmem_grab_folio(inode->i_mapping, index);
}
static void kvm_gmem_invalidate_begin(struct kvm_gmem *gmem, pgoff_t start,
@@ -151,28 +151,7 @@ static void kvm_gmem_invalidate_end(struct kvm_gmem *gmem, pgoff_t start,
static long kvm_gmem_punch_hole(struct inode *inode, loff_t offset, loff_t len)
{
- struct list_head *gmem_list = &inode->i_mapping->i_private_list;
- pgoff_t start = offset >> PAGE_SHIFT;
- pgoff_t end = (offset + len) >> PAGE_SHIFT;
- struct kvm_gmem *gmem;
-
- /*
- * Bindings must be stable across invalidation to ensure the start+end
- * are balanced.
- */
- filemap_invalidate_lock(inode->i_mapping);
-
- list_for_each_entry(gmem, gmem_list, entry)
- kvm_gmem_invalidate_begin(gmem, start, end);
-
- truncate_inode_pages_range(inode->i_mapping, offset, offset + len - 1);
-
- list_for_each_entry(gmem, gmem_list, entry)
- kvm_gmem_invalidate_end(gmem, start, end);
-
- filemap_invalidate_unlock(inode->i_mapping);
-
- return 0;
+ return guestmem_punch_hole(inode->i_mapping, offset, len);
}
static long kvm_gmem_allocate(struct inode *inode, loff_t offset, loff_t len)
@@ -277,7 +256,7 @@ static int kvm_gmem_release(struct inode *inode, struct file *file)
kvm_gmem_invalidate_begin(gmem, 0, -1ul);
kvm_gmem_invalidate_end(gmem, 0, -1ul);
- list_del(&gmem->entry);
+ guestmem_detach_mapping(inode->i_mapping, &gmem->entry);
filemap_invalidate_unlock(inode->i_mapping);
@@ -318,47 +297,8 @@ void kvm_gmem_init(struct module *module)
kvm_gmem_fops.owner = module;
}
-static int kvm_gmem_migrate_folio(struct address_space *mapping,
- struct folio *dst, struct folio *src,
- enum migrate_mode mode)
-{
- WARN_ON_ONCE(1);
- return -EINVAL;
-}
-
-static int kvm_gmem_error_folio(struct address_space *mapping, struct folio *folio)
-{
- struct list_head *gmem_list = &mapping->i_private_list;
- struct kvm_gmem *gmem;
- pgoff_t start, end;
-
- filemap_invalidate_lock_shared(mapping);
-
- start = folio->index;
- end = start + folio_nr_pages(folio);
-
- list_for_each_entry(gmem, gmem_list, entry)
- kvm_gmem_invalidate_begin(gmem, start, end);
-
- /*
- * Do not truncate the range, what action is taken in response to the
- * error is userspace's decision (assuming the architecture supports
- * gracefully handling memory errors). If/when the guest attempts to
- * access a poisoned page, kvm_gmem_get_pfn() will return -EHWPOISON,
- * at which point KVM can either terminate the VM or propagate the
- * error to userspace.
- */
-
- list_for_each_entry(gmem, gmem_list, entry)
- kvm_gmem_invalidate_end(gmem, start, end);
-
- filemap_invalidate_unlock_shared(mapping);
-
- return MF_DELAYED;
-}
-
#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE
-static bool kvm_gmem_release_folio(struct folio *folio, gfp_t gfp)
+static bool kvm_gmem_release_folio(struct list_head *entry, struct folio *folio)
{
struct page *page = folio_page(folio, 0);
kvm_pfn_t pfn = page_to_pfn(page);
@@ -368,25 +308,31 @@ static bool kvm_gmem_release_folio(struct folio *folio, gfp_t gfp)
return true;
}
+#endif
-static void kvm_gmem_invalidate_folio(struct folio *folio, size_t offset,
- size_t len)
+static int kvm_guestmem_invalidate_begin(struct list_head *entry, pgoff_t start,
+ pgoff_t end)
{
- WARN_ON_ONCE(offset != 0);
- WARN_ON_ONCE(len != folio_size(folio));
+ struct kvm_gmem *gmem = container_of(entry, struct kvm_gmem, entry);
+
+ kvm_gmem_invalidate_begin(gmem, start, end);
- if (offset == 0 && len == folio_size(folio))
- filemap_release_folio(folio, 0);
+ return 0;
}
-#endif
-static const struct address_space_operations kvm_gmem_aops = {
- .dirty_folio = noop_dirty_folio,
- .migrate_folio = kvm_gmem_migrate_folio,
- .error_remove_folio = kvm_gmem_error_folio,
+static void kvm_guestmem_invalidate_end(struct list_head *entry, pgoff_t start,
+ pgoff_t end)
+{
+ struct kvm_gmem *gmem = container_of(entry, struct kvm_gmem, entry);
+
+ kvm_gmem_invalidate_end(gmem, start, end);
+}
+
+static const struct guestmem_ops kvm_guestmem_ops = {
+ .invalidate_begin = kvm_guestmem_invalidate_begin,
+ .invalidate_end = kvm_guestmem_invalidate_end,
#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE
.release_folio = kvm_gmem_release_folio,
- .invalidate_folio = kvm_gmem_invalidate_folio,
#endif
};
@@ -442,22 +388,22 @@ static int __kvm_gmem_create(struct kvm *kvm, loff_t size, u64 flags)
inode->i_private = (void *)(unsigned long)flags;
inode->i_op = &kvm_gmem_iops;
- inode->i_mapping->a_ops = &kvm_gmem_aops;
inode->i_mode |= S_IFREG;
inode->i_size = size;
- mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER);
- mapping_set_inaccessible(inode->i_mapping);
- /* Unmovable mappings are supposed to be marked unevictable as well. */
- WARN_ON_ONCE(!mapping_unevictable(inode->i_mapping));
+ err = guestmem_attach_mapping(inode->i_mapping, &kvm_guestmem_ops,
+ &gmem->entry);
+ if (err)
+ goto err_putfile;
kvm_get_kvm(kvm);
gmem->kvm = kvm;
xa_init(&gmem->bindings);
- list_add(&gmem->entry, &inode->i_mapping->i_private_list);
fd_install(fd, file);
return fd;
+err_putfile:
+ fput(file);
err_gmem:
kfree(gmem);
err_fd:
A few near-term features are coming to guest_memfd which make sense to create a built-in library. - pKVM will introduce MMU-based protection for guests and allow guest memory to be switched between "guest-private" and "accessible to host". Additional tracking is needed to manage the state of pages as accessing "guest-private" pages crashes the host. - Introduction of large folios requires tracking since guests will not have awareness whether the memory backing a page is huge or not. Guests may wish to share only a partial page. - Gunyah hypervisor support will be added and also make use of guestmem for its MMU-based protection. The address_space is targeted for the guestmem library. KVM still "owns" the inode and file. MAINTAINERS is updated with explicit references to guestmem files else the stm maintainers are automatically added. Tested with: run_kselftest.sh -t kvm:guest_memfd_test -t kvm:set_memory_region_test Signed-off-by: Elliot Berman <quic_eberman@quicinc.com> --- MAINTAINERS | 2 + include/linux/guestmem.h | 33 +++++++ mm/Kconfig | 3 + mm/Makefile | 1 + mm/guestmem.c | 232 +++++++++++++++++++++++++++++++++++++++++++++++ virt/kvm/Kconfig | 1 + virt/kvm/guest_memfd.c | 112 ++++++----------------- 7 files changed, 301 insertions(+), 83 deletions(-)