@@ -1961,6 +1961,16 @@ int userfaultfd_wp_async(struct vm_area_struct *vma)
return (ctx && (ctx->features & UFFD_FEATURE_WP_ASYNC));
}
+int wp_range_async(struct vm_area_struct *vma, unsigned long start, unsigned long len)
+{
+ struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx;
+
+ if (!ctx)
+ return -1;
+
+ return __mwriteprotect_range(ctx->mm, start, len, true, &ctx->mmap_changing);
+}
+
static inline unsigned int uffd_ctx_features(__u64 user_features)
{
/*
@@ -73,6 +73,9 @@ extern ssize_t mcopy_continue(struct mm_struct *dst_mm, unsigned long dst_start,
extern int mwriteprotect_range(struct mm_struct *dst_mm,
unsigned long start, unsigned long len,
bool enable_wp, atomic_t *mmap_changing);
+extern int __mwriteprotect_range(struct mm_struct *dst_mm,
+ unsigned long start, unsigned long len,
+ bool enable_wp, atomic_t *mmap_changing);
extern void uffd_wp_range(struct mm_struct *dst_mm, struct vm_area_struct *vma,
unsigned long start, unsigned long len, bool enable_wp);
@@ -180,6 +183,8 @@ extern int userfaultfd_unmap_prep(struct mm_struct *mm, unsigned long start,
extern void userfaultfd_unmap_complete(struct mm_struct *mm,
struct list_head *uf);
extern int userfaultfd_wp_async(struct vm_area_struct *vma);
+extern int wp_range_async(struct vm_area_struct *vma, unsigned long start,
+ unsigned long len);
#else /* CONFIG_USERFAULTFD */
@@ -280,6 +285,11 @@ int userfaultfd_wp_async(struct vm_area_struct *vma)
return false;
}
+int wp_range_async(struct vm_area_struct *vma, unsigned long start, unsigned long len)
+{
+ return -1;
+}
+
#endif /* CONFIG_USERFAULTFD */
static inline bool pte_marker_entry_uffd_wp(swp_entry_t entry)
@@ -734,25 +734,13 @@ void uffd_wp_range(struct mm_struct *dst_mm, struct vm_area_struct *dst_vma,
tlb_finish_mmu(&tlb);
}
-int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start,
- unsigned long len, bool enable_wp,
- atomic_t *mmap_changing)
+int __mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start,
+ unsigned long len, bool enable_wp,
+ atomic_t *mmap_changing)
{
struct vm_area_struct *dst_vma;
unsigned long page_mask;
int err;
-
- /*
- * Sanitize the command parameters:
- */
- BUG_ON(start & ~PAGE_MASK);
- BUG_ON(len & ~PAGE_MASK);
-
- /* Does the address range wrap, or is the span zero-sized? */
- BUG_ON(start + len <= start);
-
- mmap_read_lock(dst_mm);
-
/*
* If memory mappings are changing because of non-cooperative
* operation (e.g. mremap) running in parallel, bail out and
@@ -783,6 +771,28 @@ int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start,
err = 0;
out_unlock:
+ return err;
+}
+
+int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start,
+ unsigned long len, bool enable_wp,
+ atomic_t *mmap_changing)
+{
+ int err;
+
+ /*
+ * Sanitize the command parameters:
+ */
+ BUG_ON(start & ~PAGE_MASK);
+ BUG_ON(len & ~PAGE_MASK);
+
+ /* Does the address range wrap, or is the span zero-sized? */
+ BUG_ON(start + len <= start);
+
+ mmap_read_lock(dst_mm);
+
+ err = __mwriteprotect_range(dst_mm, start, len, enable_wp, mmap_changing);
+
mmap_read_unlock(dst_mm);
return err;
}
Split mwriteprotect_range() to create a unlocked version. This will be used in the next patch to write protect a memory area. Add a helper function, wp_range_async() as well. Signed-off-by: Muhammad Usama Anjum <usama.anjum@collabora.com> --- Changes in v7: - Remove async being set in the PAGEMAP_IOCTL --- fs/userfaultfd.c | 10 +++++++++ include/linux/userfaultfd_k.h | 10 +++++++++ mm/userfaultfd.c | 40 ++++++++++++++++++++++------------- 3 files changed, 45 insertions(+), 15 deletions(-)