The patch titled Subject: drm/i915: use vmap in shmem_pin_map has been added to the -mm tree. Its filename is drm-i915-use-vmap-in-shmem_pin_map.patch This patch should soon appear at https://ozlabs.org/~akpm/mmots/broken-out/drm-i915-use-vmap-in-shmem_pin_map.patch and later at https://ozlabs.org/~akpm/mmotm/broken-out/drm-i915-use-vmap-in-shmem_pin_map.patch Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next and is updated there every 3-4 working days ------------------------------------------------------ From: Christoph Hellwig <hch@xxxxxx> Subject: drm/i915: use vmap in shmem_pin_map shmem_pin_map somewhat awkwardly reimplements vmap using alloc_vm_area and manual pte setup. The only practical difference is that alloc_vm_area prefeaults the vmalloc area PTEs, which doesn't seem to be required here (and could be added to vmap using a flag if actually required). Link: https://lkml.kernel.org/r/20200918163724.2511-4-hch@xxxxxx Signed-off-by: Christoph Hellwig <hch@xxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- drivers/gpu/drm/i915/gt/shmem_utils.c | 90 ++++++++++-------------- 1 file changed, 38 insertions(+), 52 deletions(-) --- a/drivers/gpu/drm/i915/gt/shmem_utils.c~drm-i915-use-vmap-in-shmem_pin_map +++ a/drivers/gpu/drm/i915/gt/shmem_utils.c @@ -49,80 +49,66 @@ struct file *shmem_create_from_object(st return file; } -static size_t shmem_npte(struct file *file) +static size_t shmem_npages(struct file *file) { return file->f_mapping->host->i_size >> PAGE_SHIFT; } -static void __shmem_unpin_map(struct file *file, void *ptr, size_t n_pte) -{ - unsigned long pfn; - - vunmap(ptr); - - for (pfn = 0; pfn < n_pte; pfn++) { - struct page *page; - - page = shmem_read_mapping_page_gfp(file->f_mapping, pfn, - GFP_KERNEL); - if (!WARN_ON(IS_ERR(page))) { - put_page(page); - put_page(page); - } - } -} - void *shmem_pin_map(struct file *file) { - const size_t n_pte = shmem_npte(file); - pte_t *stack[32], **ptes, **mem; - struct vm_struct *area; - unsigned long pfn; - - mem = stack; - if (n_pte > ARRAY_SIZE(stack)) { - mem = kvmalloc_array(n_pte, sizeof(*mem), GFP_KERNEL); - if (!mem) + const size_t n_pages = shmem_npages(file); + struct page **pages, *stack[32]; + void *vaddr; + long i; + + pages = stack; + if (n_pages > ARRAY_SIZE(stack)) { + pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL); + if (!pages) return NULL; } - area = alloc_vm_area(n_pte << PAGE_SHIFT, mem); - if (!area) { - if (mem != stack) - kvfree(mem); - return NULL; - } - - ptes = mem; - for (pfn = 0; pfn < n_pte; pfn++) { - struct page *page; - - page = shmem_read_mapping_page_gfp(file->f_mapping, pfn, - GFP_KERNEL); - if (IS_ERR(page)) + for (i = 0; i < n_pages; i++) { + pages[i] = shmem_read_mapping_page_gfp(file->f_mapping, i, + GFP_KERNEL); + if (IS_ERR(pages[i])) goto err_page; - - **ptes++ = mk_pte(page, PAGE_KERNEL); } - if (mem != stack) - kvfree(mem); + vaddr = vmap(pages, n_pages, 0, PAGE_KERNEL); + if (!vaddr) + goto err_page; + if (pages != stack) + kvfree(pages); mapping_set_unevictable(file->f_mapping); - return area->addr; + return vaddr; err_page: - if (mem != stack) - kvfree(mem); - - __shmem_unpin_map(file, area->addr, pfn); + while (--i >= 0) + put_page(pages[i]); + if (pages != stack) + kvfree(pages); return NULL; } void shmem_unpin_map(struct file *file, void *ptr) { + long i = shmem_npages(file); + mapping_clear_unevictable(file->f_mapping); - __shmem_unpin_map(file, ptr, shmem_npte(file)); + vunmap(ptr); + + for (i = 0; i < shmem_npages(file); i++) { + struct page *page; + + page = shmem_read_mapping_page_gfp(file->f_mapping, i, + GFP_KERNEL); + if (!WARN_ON(IS_ERR(page))) { + put_page(page); + put_page(page); + } + } } static int __shmem_rw(struct file *file, loff_t off, _ Patches currently in -mm which might be from hch@xxxxxx are zsmalloc-switch-from-alloc_vm_area-to-get_vm_area.patch mm-add-a-vmap_pfn-function.patch drm-i915-use-vmap-in-shmem_pin_map.patch drm-i915-use-vmap-in-i915_gem_object_map.patch xen-xenbus-use-apply_to_page_range-directly-in-xenbus_map_ring_pv.patch x86-xen-open-code-alloc_vm_area-in-arch_gnttab_valloc.patch