@@ -143,29 +143,18 @@ int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasy
}
EXPORT_SYMBOL_GPL(fb_deferred_io_fsync);
-/* vm_ops->page_mkwrite handler */
-static vm_fault_t fb_deferred_io_mkwrite(struct vm_fault *vmf)
+/*
+ * Adds a page to the dirty list. Requires caller to hold
+ * struct fb_deferred_io.lock. Call this from struct
+ * vm_operations_struct.page_mkwrite.
+ */
+static vm_fault_t __fb_deferred_io_track_page(struct fb_info *info, unsigned long offset,
+ struct page *page)
{
- struct page *page = vmf->page;
- struct fb_info *info = vmf->vma->vm_private_data;
struct fb_deferred_io *fbdefio = info->fbdefio;
struct fb_deferred_io_pageref *pageref;
- unsigned long offset;
vm_fault_t ret;
- offset = (vmf->address - vmf->vma->vm_start);
-
- /* this is a callback we get when userspace first tries to
- write to the page. we schedule a workqueue. that workqueue
- will eventually mkclean the touched pages and execute the
- deferred framebuffer IO. then if userspace touches a page
- again, we repeat the same scheme */
-
- file_update_time(vmf->vma->vm_file);
-
- /* protect against the workqueue changing the page list */
- mutex_lock(&fbdefio->lock);
-
/* first write in this cycle, notify the driver */
if (fbdefio->first_io && list_empty(&fbdefio->pagelist))
fbdefio->first_io(info);
@@ -186,8 +175,6 @@ static vm_fault_t fb_deferred_io_mkwrite(struct vm_fault *vmf)
*/
lock_page(pageref->page);
- mutex_unlock(&fbdefio->lock);
-
/* come back after delay to process the deferred IO */
schedule_delayed_work(&info->deferred_work, fbdefio->delay);
return VM_FAULT_LOCKED;
@@ -197,6 +184,48 @@ static vm_fault_t fb_deferred_io_mkwrite(struct vm_fault *vmf)
return ret;
}
+/**
+ * fb_deferred_io_page_mkwrite - Mark a page as written for deferred I/O
+ * @fb_info: The fbdev info structure
+ * @vmf: The VM fault
+ *
+ * This is a callback we get when userspace first tries to
+ * write to the page. We schedule a workqueue. That workqueue
+ * will eventually mkclean the touched pages and execute the
+ * deferred framebuffer IO. Then if userspace touches a page
+ * again, we repeat the same scheme.
+ *
+ * Returns:
+ * VM_FAULT_LOCKED on success, or a VM_FAULT error otherwise.
+ */
+vm_fault_t fb_deferred_io_page_mkwrite(struct fb_info *info, struct vm_fault *vmf)
+{
+ struct page *page = vmf->page;
+ struct fb_deferred_io *fbdefio = info->fbdefio;
+ unsigned long offset;
+ vm_fault_t ret;
+
+ offset = (vmf->address - vmf->vma->vm_start);
+
+ file_update_time(vmf->vma->vm_file);
+
+ /* protect against the workqueue changing the page list */
+ mutex_lock(&fbdefio->lock);
+ ret = __fb_deferred_io_track_page(info, offset, page);
+ mutex_unlock(&fbdefio->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(fb_deferred_io_page_mkwrite);
+
+/* vm_ops->page_mkwrite handler */
+static vm_fault_t fb_deferred_io_mkwrite(struct vm_fault *vmf)
+{
+ struct fb_info *info = vmf->vma->vm_private_data;
+
+ return fb_deferred_io_page_mkwrite(info, vmf);
+}
+
static const struct vm_operations_struct fb_deferred_io_vm_ops = {
.fault = fb_deferred_io_fault,
.page_mkwrite = fb_deferred_io_mkwrite,
@@ -670,6 +670,7 @@ static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch,
}
/* drivers/video/fb_defio.c */
+vm_fault_t fb_deferred_io_page_mkwrite(struct fb_info *info, struct vm_fault *vmf);
int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma);
extern int fb_deferred_io_init(struct fb_info *info);
extern void fb_deferred_io_open(struct fb_info *info,
Refactor the page-write handler and export it as helper function fb_deferred_io_page_mkwrite(). Drivers that implement struct vm_operations_struct.page_mkwrite for deferred I/O should use the function to let fbdev track written pages of mmap'ed framebuffer memory. Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de> --- drivers/video/fbdev/core/fb_defio.c | 69 ++++++++++++++++++++--------- include/linux/fb.h | 1 + 2 files changed, 50 insertions(+), 20 deletions(-)