On 9/17/19 5:53 PM, Vitaly Wool wrote: > Currently there is a leak in init_z3fold_page() -- it allocates > handles from kmem cache even for headless pages, but then they are > never used and never freed, so eventually kmem cache may get > exhausted. This patch provides a fix for that. > > Reported-by: Markus Linnala <markus.linnala@xxxxxxxxx> > Signed-off-by: Vitaly Wool <vitalywool@xxxxxxxxx> Can a Fixes: commit be pinpointed, and CC stable added? > --- > mm/z3fold.c | 15 +++++++++------ > 1 file changed, 9 insertions(+), 6 deletions(-) > > diff --git a/mm/z3fold.c b/mm/z3fold.c > index 6397725b5ec6..7dffef2599c3 100644 > --- a/mm/z3fold.c > +++ b/mm/z3fold.c > @@ -301,14 +301,11 @@ static void z3fold_unregister_migration(struct z3fold_pool *pool) > } > > /* Initializes the z3fold header of a newly allocated z3fold page */ > -static struct z3fold_header *init_z3fold_page(struct page *page, > +static struct z3fold_header *init_z3fold_page(struct page *page, bool headless, > struct z3fold_pool *pool, gfp_t gfp) > { > struct z3fold_header *zhdr = page_address(page); > - struct z3fold_buddy_slots *slots = alloc_slots(pool, gfp); > - > - if (!slots) > - return NULL; > + struct z3fold_buddy_slots *slots; > > INIT_LIST_HEAD(&page->lru); > clear_bit(PAGE_HEADLESS, &page->private); > @@ -316,6 +313,12 @@ static struct z3fold_header *init_z3fold_page(struct page *page, > clear_bit(NEEDS_COMPACTING, &page->private); > clear_bit(PAGE_STALE, &page->private); > clear_bit(PAGE_CLAIMED, &page->private); > + if (headless) > + return zhdr; > + > + slots = alloc_slots(pool, gfp); > + if (!slots) > + return NULL; > > spin_lock_init(&zhdr->page_lock); > kref_init(&zhdr->refcount); > @@ -962,7 +965,7 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp, > if (!page) > return -ENOMEM; > > - zhdr = init_z3fold_page(page, pool, gfp); > + zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp); > if (!zhdr) { > __free_page(page); > return -ENOMEM; >