On (24/03/28 10:48), Barry Song wrote: [..] > +/* > + * Use a temporary buffer to decompress the page, as the decompressor > + * always expects a full page for the output. > + */ > +static int zram_bvec_read_multi_pages_partial(struct zram *zram, struct bio_vec *bvec, > + u32 index, int offset) > +{ > + struct page *page = alloc_pages(GFP_NOIO | __GFP_COMP, ZCOMP_MULTI_PAGES_ORDER); > + int ret; > + > + if (!page) > + return -ENOMEM; > + ret = zram_read_multi_pages(zram, page, index, NULL); > + if (likely(!ret)) { > + atomic64_inc(&zram->stats.zram_bio_read_multi_pages_partial_count); > + void *dst = kmap_local_page(bvec->bv_page); > + void *src = kmap_local_page(page); > + > + memcpy(dst + bvec->bv_offset, src + offset, bvec->bv_len); > + kunmap_local(src); > + kunmap_local(dst); > + } > + __free_pages(page, ZCOMP_MULTI_PAGES_ORDER); > + return ret; > +} [..] > +static int zram_bvec_write_multi_pages_partial(struct zram *zram, struct bio_vec *bvec, > + u32 index, int offset, struct bio *bio) > +{ > + struct page *page = alloc_pages(GFP_NOIO | __GFP_COMP, ZCOMP_MULTI_PAGES_ORDER); > + int ret; > + void *src, *dst; > + > + if (!page) > + return -ENOMEM; > + > + ret = zram_read_multi_pages(zram, page, index, bio); > + if (!ret) { > + src = kmap_local_page(bvec->bv_page); > + dst = kmap_local_page(page); > + memcpy(dst + offset, src + bvec->bv_offset, bvec->bv_len); > + kunmap_local(dst); > + kunmap_local(src); > + > + atomic64_inc(&zram->stats.zram_bio_write_multi_pages_partial_count); > + ret = zram_write_page(zram, page, index); > + } > + __free_pages(page, ZCOMP_MULTI_PAGES_ORDER); > + return ret; > +} What type of testing you run on it? How often do you see partial reads and writes? Because this looks concerning - zsmalloc memory usage reduction is one metrics, but this also can be achieved via recompression, writeback, or even a different compression algorithm, but higher CPU/power usage/higher requirements for physically contig pages cannot be offset easily. (Another corner case, assume we have partial read requests on every CPU simultaneously.)