The gem drivers use shmemfs to allocate backing storage for gem objects. On Samsung Chromebook Plus, the drm/rockchip driver may call rockchip_gem_get_pages -> drm_gem_get_pages -> shmem_read_mapping_page to pin a lot of pages, breaking the page reclaim mechanism and causing oom-killer invocation. E.g. when the size of a zone is 3.9 GiB, the inactive_ratio is 5. If active_anon / inactive_anon < 5 and all pages in the inactive_anon lru are pinned, page reclaim would keep scanning inactive_anon lru without reclaiming memory. It breaks page reclaim when the rockchip driver only pins about 1/6 of the anon lru pages. Mark these pinned pages as unevictable to avoid the premature oom-killer invocation. See also similar patch on i915 driver [1]. [1]: https://patchwork.freedesktop.org/patch/msgid/20181106132324.17390-1-chris@xxxxxxxxxxxxxxxxxx Signed-off-by: Kuo-Hsin Yang <vovoy@xxxxxxxxxxxx> Cc: Daniel Vetter <daniel@xxxxxxxx> Cc: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> --- drivers/gpu/drm/drm_gem.c | 36 +++++++++++++++++++++++++++++++++--- 1 file changed, 33 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 8b55ece97967f..2896ff60552f5 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -37,6 +37,7 @@ #include <linux/shmem_fs.h> #include <linux/dma-buf.h> #include <linux/mem_encrypt.h> +#include <linux/pagevec.h> #include <drm/drmP.h> #include <drm/drm_vma_manager.h> #include <drm/drm_gem.h> @@ -526,6 +527,17 @@ int drm_gem_create_mmap_offset(struct drm_gem_object *obj) } EXPORT_SYMBOL(drm_gem_create_mmap_offset); +/* + * Move pages to appropriate lru and release the pagevec, decrementing the + * ref count of those pages. + */ +static void drm_gem_check_release_pagevec(struct pagevec *pvec) +{ + check_move_unevictable_pages(pvec); + __pagevec_release(pvec); + cond_resched(); +} + /** * drm_gem_get_pages - helper to allocate backing pages for a GEM object * from shmem @@ -551,6 +563,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj) { struct address_space *mapping; struct page *p, **pages; + struct pagevec pvec; int i, npages; /* This is the shared memory object that backs the GEM resource */ @@ -568,6 +581,8 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj) if (pages == NULL) return ERR_PTR(-ENOMEM); + mapping_set_unevictable(mapping); + for (i = 0; i < npages; i++) { p = shmem_read_mapping_page(mapping, i); if (IS_ERR(p)) @@ -586,8 +601,14 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj) return pages; fail: - while (i--) - put_page(pages[i]); + mapping_clear_unevictable(mapping); + pagevec_init(&pvec); + while (i--) { + if (!pagevec_add(&pvec, pages[i])) + drm_gem_check_release_pagevec(&pvec); + } + if (pagevec_count(&pvec)) + drm_gem_check_release_pagevec(&pvec); kvfree(pages); return ERR_CAST(p); @@ -605,6 +626,11 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, bool dirty, bool accessed) { int i, npages; + struct address_space *mapping; + struct pagevec pvec; + + mapping = file_inode(obj->filp)->i_mapping; + mapping_clear_unevictable(mapping); /* We already BUG_ON() for non-page-aligned sizes in * drm_gem_object_init(), so we should never hit this unless @@ -614,6 +640,7 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, npages = obj->size >> PAGE_SHIFT; + pagevec_init(&pvec); for (i = 0; i < npages; i++) { if (dirty) set_page_dirty(pages[i]); @@ -622,8 +649,11 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, mark_page_accessed(pages[i]); /* Undo the reference we took when populating the table */ - put_page(pages[i]); + if (!pagevec_add(&pvec, pages[i])) + drm_gem_check_release_pagevec(&pvec); } + if (pagevec_count(&pvec)) + drm_gem_check_release_pagevec(&pvec); kvfree(pages); } -- 2.20.0.405.gbc1bbc6f85-goog _______________________________________________ dri-devel mailing list dri-devel@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/dri-devel