Move the code that frees a zspage object out of the zs_free() function and into its own obj_free() function. This is required by zsmalloc shrinking, which will also need to free objects during zspage reclaiming. Signed-off-by: Dan Streetman <ddstreet@xxxxxxxx> Cc: Minchan Kim <minchan@xxxxxxxxxx> --- mm/zsmalloc.c | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 51db622..cff8935 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -584,6 +584,21 @@ static unsigned long obj_idx_to_offset(struct page *page, return off + obj_idx * class_size; } +static void obj_free(unsigned long obj, struct page *page, unsigned long offset) +{ + struct page *first_page = get_first_page(page); + struct link_free *link; + + /* Insert this object in containing zspage's freelist */ + link = (struct link_free *)((unsigned char *)kmap_atomic(page) + + offset); + link->next = first_page->freelist; + kunmap_atomic(link); + first_page->freelist = (void *)obj; + + first_page->inuse--; +} + static void reset_page(struct page *page) { clear_bit(PG_private, &page->flags); @@ -1049,7 +1064,6 @@ EXPORT_SYMBOL_GPL(zs_malloc); void zs_free(struct zs_pool *pool, unsigned long obj) { - struct link_free *link; struct page *first_page, *f_page; unsigned long f_objidx, f_offset; @@ -1069,14 +1083,8 @@ void zs_free(struct zs_pool *pool, unsigned long obj) spin_lock(&class->lock); - /* Insert this object in containing zspage's freelist */ - link = (struct link_free *)((unsigned char *)kmap_atomic(f_page) - + f_offset); - link->next = first_page->freelist; - kunmap_atomic(link); - first_page->freelist = (void *)obj; + obj_free(obj, f_page, f_offset); - first_page->inuse--; fullness = fix_fullness_group(pool, first_page); spin_unlock(&class->lock); -- 1.8.3.1 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>