read_from_bdev always reads a whole page, so pass a page to it instead of the bvec and remove the now pointless zram_bvec_read_from_bdev wrapper. Signed-off-by: Christoph Hellwig <hch@xxxxxx> Reviewed-by: Sergey Senozhatsky <senozhatsky@xxxxxxxxxxxx> --- drivers/block/zram/zram_drv.c | 46 +++++++++++++---------------------- 1 file changed, 17 insertions(+), 29 deletions(-) diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 1bd6160a49b25c..9d066c6a4f8265 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -587,7 +587,7 @@ static void zram_page_end_io(struct bio *bio) /* * Returns 1 if the submission is successful. */ -static int read_from_bdev_async(struct zram *zram, struct bio_vec *bvec, +static int read_from_bdev_async(struct zram *zram, struct page *page, unsigned long entry, struct bio *parent) { struct bio *bio; @@ -598,7 +598,7 @@ static int read_from_bdev_async(struct zram *zram, struct bio_vec *bvec, return -ENOMEM; bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9); - if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len, bvec->bv_offset)) { + if (!bio_add_page(bio, page, PAGE_SIZE, 0)) { bio_put(bio); return -EIO; } @@ -794,7 +794,7 @@ struct zram_work { struct zram *zram; unsigned long entry; struct bio *bio; - struct bio_vec bvec; + struct page *page; }; #if PAGE_SIZE != 4096 @@ -805,7 +805,7 @@ static void zram_sync_read(struct work_struct *work) unsigned long entry = zw->entry; struct bio *bio = zw->bio; - read_from_bdev_async(zram, &zw->bvec, entry, bio); + read_from_bdev_async(zram, zw->page, entry, bio); } /* @@ -813,12 +813,12 @@ static void zram_sync_read(struct work_struct *work) * chained IO with parent IO in same context, it's a deadlock. To avoid that, * use a worker thread context. */ -static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec, +static int read_from_bdev_sync(struct zram *zram, struct page *page, unsigned long entry, struct bio *bio) { struct zram_work work; - work.bvec = *bvec; + work.page = page; work.zram = zram; work.entry = entry; work.bio = bio; @@ -831,7 +831,7 @@ static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec, return 1; } #else -static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec, +static int read_from_bdev_sync(struct zram *zram, struct page *page, unsigned long entry, struct bio *bio) { WARN_ON(1); @@ -839,18 +839,17 @@ static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec, } #endif -static int read_from_bdev(struct zram *zram, struct bio_vec *bvec, +static int read_from_bdev(struct zram *zram, struct page *page, unsigned long entry, struct bio *parent, bool sync) { atomic64_inc(&zram->stats.bd_reads); if (sync) - return read_from_bdev_sync(zram, bvec, entry, parent); - else - return read_from_bdev_async(zram, bvec, entry, parent); + return read_from_bdev_sync(zram, page, entry, parent); + return read_from_bdev_async(zram, page, entry, parent); } #else static inline void reset_bdev(struct zram *zram) {}; -static int read_from_bdev(struct zram *zram, struct bio_vec *bvec, +static int read_from_bdev(struct zram *zram, struct page *page, unsigned long entry, struct bio *parent, bool sync) { return -EIO; @@ -1334,20 +1333,6 @@ static void zram_free_page(struct zram *zram, size_t index) ~(1UL << ZRAM_LOCK | 1UL << ZRAM_UNDER_WB)); } -/* - * Reads a page from the writeback devices. Corresponding ZRAM slot - * should be unlocked. - */ -static int zram_bvec_read_from_bdev(struct zram *zram, struct page *page, - u32 index, struct bio *bio, bool partial_io) -{ - struct bio_vec bvec; - - bvec_set_page(&bvec, page, PAGE_SIZE, 0); - return read_from_bdev(zram, &bvec, zram_get_element(zram, index), bio, - partial_io); -} - /* * Reads (decompresses if needed) a page from zspool (zsmalloc). * Corresponding ZRAM slot should be locked. @@ -1408,11 +1393,14 @@ static int zram_read_page(struct zram *zram, struct page *page, u32 index, ret = zram_read_from_zspool(zram, page, index); zram_slot_unlock(zram, index); } else { - /* Slot should be unlocked before the function call */ + /* + * The slot should be unlocked before reading from the backing + * device. + */ zram_slot_unlock(zram, index); - ret = zram_bvec_read_from_bdev(zram, page, index, bio, - partial_io); + ret = read_from_bdev(zram, page, zram_get_element(zram, index), + bio, partial_io); } /* Should NEVER happen. Return bio error if it does. */ -- 2.39.2