@@ -117,6 +117,16 @@ static struct folio *kvm_gmem_get_folio(struct inode *inode, pgoff_t index)
return filemap_grab_folio(inode->i_mapping, index);
}
+#ifdef CONFIG_KVM_GMEM_SHARED_MEM
+static void kvm_gmem_offset_range_invalidate_shared(struct inode *inode,
+ pgoff_t start, pgoff_t end);
+#else
+static inline void kvm_gmem_offset_range_invalidate_shared(struct inode *inode,
+ pgoff_t start, pgoff_t end)
+{
+}
+#endif
+
static void kvm_gmem_invalidate_begin(struct kvm_gmem *gmem, pgoff_t start,
pgoff_t end)
{
@@ -126,6 +136,7 @@ static void kvm_gmem_invalidate_begin(struct kvm_gmem *gmem, pgoff_t start,
unsigned long index;
xa_for_each_range(&gmem->bindings, index, slot, start, end - 1) {
+ struct file *file = READ_ONCE(slot->gmem.file);
pgoff_t pgoff = slot->gmem.pgoff;
struct kvm_gfn_range gfn_range = {
@@ -145,6 +156,16 @@ static void kvm_gmem_invalidate_begin(struct kvm_gmem *gmem, pgoff_t start,
}
flush |= kvm_mmu_unmap_gfn_range(kvm, &gfn_range);
+
+ /*
+ * If this gets called after kvm_gmem_unbind() it means that all
+ * in-flight operations are gone, and the file has been closed.
+ */
+ if (file) {
+ kvm_gmem_offset_range_invalidate_shared(file_inode(file),
+ gfn_range.start,
+ gfn_range.end);
+ }
}
if (flush)
@@ -509,6 +530,41 @@ static int kvm_gmem_offset_clear_shared(struct inode *inode, pgoff_t index)
return r;
}
+/*
+ * Callback when invalidating memory that is potentially shared.
+ *
+ * Must be called with the filemap (inode->i_mapping) invalidate_lock held.
+ */
+static void kvm_gmem_offset_range_invalidate_shared(struct inode *inode,
+ pgoff_t start, pgoff_t end)
+{
+ struct xarray *shared_offsets = &kvm_gmem_private(inode)->shared_offsets;
+ pgoff_t i;
+
+ rwsem_assert_held_write_nolockdep(&inode->i_mapping->invalidate_lock);
+
+ for (i = start; i < end; i++) {
+ /*
+ * If the folio is NONE_SHARED, it indicates that it is
+ * transitioning to private (GUEST_SHARED). Transition it to
+ * shared (ALL_SHARED) and remove the callback.
+ */
+ if (xa_to_value(xa_load(shared_offsets, i)) == KVM_GMEM_NONE_SHARED) {
+ struct folio *folio = folio = filemap_lock_folio(inode->i_mapping, i);
+
+ if (!WARN_ON_ONCE(IS_ERR(folio))) {
+ if (folio_test_guestmem(folio))
+ kvm_gmem_restore_pending_folio(folio, inode);
+
+ folio_unlock(folio);
+ folio_put(folio);
+ }
+ }
+
+ xa_erase(shared_offsets, i);
+ }
+}
+
/*
* Marks the range [start, end) as not shared with the host. If the host doesn't
* have any references to a particular folio, then that folio is marked as
When guest_memfd backed memory is invalidated, e.g., on punching holes, releasing, ensure that the sharing states are updated and that any folios in a transient state are restored to an appropriate state. Signed-off-by: Fuad Tabba <tabba@google.com> --- virt/kvm/guest_memfd.c | 56 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+)