On (05/09/16 11:20), Minchan Kim wrote: > Currently, putback_zspage does free zspage under class->lock > if fullness become ZS_EMPTY but it makes trouble to implement > locking scheme for new zspage migration. > So, this patch is to separate free_zspage from putback_zspage > and free zspage out of class->lock which is preparation for > zspage migration. > > Cc: Sergey Senozhatsky <sergey.senozhatsky@xxxxxxxxx> > Signed-off-by: Minchan Kim <minchan@xxxxxxxxxx> Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@xxxxxxxxx> -ss > --- > mm/zsmalloc.c | 27 +++++++++++---------------- > 1 file changed, 11 insertions(+), 16 deletions(-) > > diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c > index 162a598a417a..5ccd83732a14 100644 > --- a/mm/zsmalloc.c > +++ b/mm/zsmalloc.c > @@ -1685,14 +1685,12 @@ static struct zspage *isolate_zspage(struct size_class *class, bool source) > > /* > * putback_zspage - add @zspage into right class's fullness list > - * @pool: target pool > * @class: destination class > * @zspage: target page > * > * Return @zspage's fullness_group > */ > -static enum fullness_group putback_zspage(struct zs_pool *pool, > - struct size_class *class, > +static enum fullness_group putback_zspage(struct size_class *class, > struct zspage *zspage) > { > enum fullness_group fullness; > @@ -1701,15 +1699,6 @@ static enum fullness_group putback_zspage(struct zs_pool *pool, > insert_zspage(class, zspage, fullness); > set_zspage_mapping(zspage, class->index, fullness); > > - if (fullness == ZS_EMPTY) { > - zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage( > - class->size, class->pages_per_zspage)); > - atomic_long_sub(class->pages_per_zspage, > - &pool->pages_allocated); > - > - free_zspage(pool, zspage); > - } > - > return fullness; > } > > @@ -1755,23 +1744,29 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class) > if (!migrate_zspage(pool, class, &cc)) > break; > > - putback_zspage(pool, class, dst_zspage); > + putback_zspage(class, dst_zspage); > } > > /* Stop if we couldn't find slot */ > if (dst_zspage == NULL) > break; > > - putback_zspage(pool, class, dst_zspage); > - if (putback_zspage(pool, class, src_zspage) == ZS_EMPTY) > + putback_zspage(class, dst_zspage); > + if (putback_zspage(class, src_zspage) == ZS_EMPTY) { > + zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage( > + class->size, class->pages_per_zspage)); > + atomic_long_sub(class->pages_per_zspage, > + &pool->pages_allocated); > + free_zspage(pool, src_zspage); > pool->stats.pages_compacted += class->pages_per_zspage; > + } > spin_unlock(&class->lock); > cond_resched(); > spin_lock(&class->lock); > } > > if (src_zspage) > - putback_zspage(pool, class, src_zspage); > + putback_zspage(class, src_zspage); > > spin_unlock(&class->lock); > } > -- > 1.9.1 > > -- > To unsubscribe, send a message with 'unsubscribe linux-mm' in > the body to majordomo@xxxxxxxxx. For more info on Linux MM, > see: http://www.linux-mm.org/ . > Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a> > -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>