Refactor the page-write handler for deferred I/O. Drivers use the function to let fbdev track written pages of mmap'ed framebuffer memory. v2: * don't export the helper until we have an external caller Signed-off-by: Thomas Zimmermann <tzimmermann@xxxxxxx> Reviewed-by: Javier Martinez Canillas <javierm@xxxxxxxxxx> --- drivers/video/fbdev/core/fb_defio.c | 68 ++++++++++++++++++++--------- 1 file changed, 48 insertions(+), 20 deletions(-) diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c index a03b9c64fc61..214581ce5840 100644 --- a/drivers/video/fbdev/core/fb_defio.c +++ b/drivers/video/fbdev/core/fb_defio.c @@ -143,29 +143,18 @@ int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasy } EXPORT_SYMBOL_GPL(fb_deferred_io_fsync); -/* vm_ops->page_mkwrite handler */ -static vm_fault_t fb_deferred_io_mkwrite(struct vm_fault *vmf) +/* + * Adds a page to the dirty list. Requires caller to hold + * struct fb_deferred_io.lock. Call this from struct + * vm_operations_struct.page_mkwrite. + */ +static vm_fault_t __fb_deferred_io_track_page(struct fb_info *info, unsigned long offset, + struct page *page) { - struct page *page = vmf->page; - struct fb_info *info = vmf->vma->vm_private_data; struct fb_deferred_io *fbdefio = info->fbdefio; struct fb_deferred_io_pageref *pageref; - unsigned long offset; vm_fault_t ret; - offset = (vmf->address - vmf->vma->vm_start); - - /* this is a callback we get when userspace first tries to - write to the page. we schedule a workqueue. that workqueue - will eventually mkclean the touched pages and execute the - deferred framebuffer IO. then if userspace touches a page - again, we repeat the same scheme */ - - file_update_time(vmf->vma->vm_file); - - /* protect against the workqueue changing the page list */ - mutex_lock(&fbdefio->lock); - /* first write in this cycle, notify the driver */ if (fbdefio->first_io && list_empty(&fbdefio->pagelist)) fbdefio->first_io(info); @@ -186,8 +175,6 @@ static vm_fault_t fb_deferred_io_mkwrite(struct vm_fault *vmf) */ lock_page(pageref->page); - mutex_unlock(&fbdefio->lock); - /* come back after delay to process the deferred IO */ schedule_delayed_work(&info->deferred_work, fbdefio->delay); return VM_FAULT_LOCKED; @@ -197,6 +184,47 @@ static vm_fault_t fb_deferred_io_mkwrite(struct vm_fault *vmf) return ret; } +/* + * fb_deferred_io_page_mkwrite - Mark a page as written for deferred I/O + * @fb_info: The fbdev info structure + * @vmf: The VM fault + * + * This is a callback we get when userspace first tries to + * write to the page. We schedule a workqueue. That workqueue + * will eventually mkclean the touched pages and execute the + * deferred framebuffer IO. Then if userspace touches a page + * again, we repeat the same scheme. + * + * Returns: + * VM_FAULT_LOCKED on success, or a VM_FAULT error otherwise. + */ +static vm_fault_t fb_deferred_io_page_mkwrite(struct fb_info *info, struct vm_fault *vmf) +{ + struct page *page = vmf->page; + struct fb_deferred_io *fbdefio = info->fbdefio; + unsigned long offset; + vm_fault_t ret; + + offset = (vmf->address - vmf->vma->vm_start); + + file_update_time(vmf->vma->vm_file); + + /* protect against the workqueue changing the page list */ + mutex_lock(&fbdefio->lock); + ret = __fb_deferred_io_track_page(info, offset, page); + mutex_unlock(&fbdefio->lock); + + return ret; +} + +/* vm_ops->page_mkwrite handler */ +static vm_fault_t fb_deferred_io_mkwrite(struct vm_fault *vmf) +{ + struct fb_info *info = vmf->vma->vm_private_data; + + return fb_deferred_io_page_mkwrite(info, vmf); +} + static const struct vm_operations_struct fb_deferred_io_vm_ops = { .fault = fb_deferred_io_fault, .page_mkwrite = fb_deferred_io_mkwrite, -- 2.36.0