It makes no sense for us to recompress the object if it will be in the same size class. We anyway don't get any memory gain. But, at the same time, we get a CPU time overhead when inserting this object into zspage and decompressing it afterwards. Signed-off-by: Alexey Romanov <avromanov@xxxxxxxxxxxxxx> --- drivers/block/zram/zram_drv.c | 5 +++++ include/linux/zsmalloc.h | 2 ++ mm/zsmalloc.c | 20 ++++++++++++++++++++ 3 files changed, 27 insertions(+) diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 364323713393..bf610cf6a09c 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -1632,6 +1632,8 @@ static int zram_recompress(struct zram *zram, u32 index, struct page *page, unsigned long handle_next; unsigned int comp_len_next; unsigned int comp_len_prev; + unsigned int class_size_next; + unsigned int class_size_prev; struct zcomp_strm *zstrm; void *src, *dst; int ret; @@ -1656,6 +1658,8 @@ static int zram_recompress(struct zram *zram, u32 index, struct page *page, ret = zcomp_compress(zstrm, src, &comp_len_next); kunmap_atomic(src); + class_size_prev = zs_get_class_size(zram->mem_pool, comp_len_prev); + class_size_next = zs_get_class_size(zram->mem_pool, comp_len_next); /* * Either a compression error or we failed to compressed the object * in a way that will save us memory. Mark the object so that we @@ -1663,6 +1667,7 @@ static int zram_recompress(struct zram *zram, u32 index, struct page *page, */ if (comp_len_next >= huge_class_size || comp_len_next >= comp_len_prev || + class_size_next == class_size_prev || ret) { zram_set_flag(zram, index, ZRAM_RECOMP_SKIP); zram_clear_flag(zram, index, ZRAM_IDLE); diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h index 2a430e713ce5..75dcbafd5f36 100644 --- a/include/linux/zsmalloc.h +++ b/include/linux/zsmalloc.h @@ -56,4 +56,6 @@ unsigned long zs_get_total_pages(struct zs_pool *pool); unsigned long zs_compact(struct zs_pool *pool); void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats); + +unsigned int zs_get_class_size(struct zs_pool *pool, unsigned int size); #endif diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index d03941cace2c..148451385445 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -1205,6 +1205,26 @@ static bool zspage_full(struct size_class *class, struct zspage *zspage) return get_zspage_inuse(zspage) == class->objs_per_zspage; } +/** + * zs_get_class_size() - Returns the size (in bytes) of the + * zsmalloc &size_class into which the object with specified + * size will be inserted or already inserted. + * + * @pool: zsmalloc pool to use + * + * Context: Any context. + * + * Return: the size (in bytes) of the zsmalloc &size_class into which + * the object with specified size will be inserted. + */ +unsigned int zs_get_class_size(struct zs_pool *pool, unsigned int size) +{ + struct size_class *class = pool->size_class[get_size_class_index(size)]; + + return class->size; +} +EXPORT_SYMBOL_GPL(zs_get_class_size); + unsigned long zs_get_total_pages(struct zs_pool *pool) { return atomic_long_read(&pool->pages_allocated); -- 2.25.1