Convert to use vm_map_pages() to map range of kernel memory to user vma. Signed-off-by: Souptick Joarder <jrdr.linux@xxxxxxxxx> Reviewed-by: Oleksandr Andrushchenko <oleksandr_andrushchenko@xxxxxxxx> --- drivers/gpu/drm/xen/xen_drm_front_gem.c | 18 +++++------------- 1 file changed, 5 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c index 28bc501..dd0602d 100644 --- a/drivers/gpu/drm/xen/xen_drm_front_gem.c +++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c @@ -224,8 +224,7 @@ struct drm_gem_object * static int gem_mmap_obj(struct xen_gem_object *xen_obj, struct vm_area_struct *vma) { - unsigned long addr = vma->vm_start; - int i; + int ret; /* * clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the @@ -246,18 +245,11 @@ static int gem_mmap_obj(struct xen_gem_object *xen_obj, * FIXME: as we insert all the pages now then no .fault handler must * be called, so don't provide one */ - for (i = 0; i < xen_obj->num_pages; i++) { - int ret; - - ret = vm_insert_page(vma, addr, xen_obj->pages[i]); - if (ret < 0) { - DRM_ERROR("Failed to insert pages into vma: %d\n", ret); - return ret; - } + ret = vm_map_pages(vma, xen_obj->pages, xen_obj->num_pages); + if (ret < 0) + DRM_ERROR("Failed to map pages into vma: %d\n", ret); - addr += PAGE_SIZE; - } - return 0; + return ret; } int xen_drm_front_gem_mmap(struct file *filp, struct vm_area_struct *vma) -- 1.9.1