These changes allow DM core to make full use of BIOSET_PERCPU_CACHE for REQ_POLLED bios: Factor out bio_alloc_percpu_cache() from bio_alloc_kiocb() to allow use by bio_alloc_clone() too. Update bioset_init_from_src() to set BIOSET_PERCPU_CACHE if bio_src->cache is not NULL. Move bio_clear_polled() to include/linux/bio.h to allow users outside of block core. Signed-off-by: Mike Snitzer <snitzer@xxxxxxxxxx> --- block/bio.c | 56 +++++++++++++++++++++++++++++++++-------------------- block/blk.h | 7 ------- include/linux/bio.h | 7 +++++++ 3 files changed, 42 insertions(+), 28 deletions(-) diff --git a/block/bio.c b/block/bio.c index b15f5466ce08..a7633aa82d7d 100644 --- a/block/bio.c +++ b/block/bio.c @@ -420,6 +420,33 @@ static void punt_bios_to_rescuer(struct bio_set *bs) queue_work(bs->rescue_workqueue, &bs->rescue_work); } +static struct bio *bio_alloc_percpu_cache(struct block_device *bdev, + unsigned short nr_vecs, unsigned int opf, gfp_t gfp, + struct bio_set *bs) +{ + struct bio_alloc_cache *cache; + struct bio *bio; + + cache = per_cpu_ptr(bs->cache, get_cpu()); + if (cache->free_list) { + bio = cache->free_list; + cache->free_list = bio->bi_next; + cache->nr--; + put_cpu(); + bio_init(bio, bdev, nr_vecs ? bio->bi_inline_vecs : NULL, + nr_vecs, opf); + bio->bi_pool = bs; + bio_set_flag(bio, BIO_PERCPU_CACHE); + return bio; + } + put_cpu(); + bio = bio_alloc_bioset(bdev, nr_vecs, opf, gfp, bs); + if (!bio) + return NULL; + bio_set_flag(bio, BIO_PERCPU_CACHE); + return bio; +} + /** * bio_alloc_bioset - allocate a bio for I/O * @bdev: block device to allocate the bio for (can be %NULL) @@ -768,7 +795,10 @@ struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src, { struct bio *bio; - bio = bio_alloc_bioset(bdev, 0, bio_src->bi_opf, gfp, bs); + if (bs->cache && bio_src->bi_opf & REQ_POLLED) + bio = bio_alloc_percpu_cache(bdev, 0, bio_src->bi_opf, gfp, bs); + else + bio = bio_alloc_bioset(bdev, 0, bio_src->bi_opf, gfp, bs); if (!bio) return NULL; @@ -1736,6 +1766,8 @@ int bioset_init_from_src(struct bio_set *bs, struct bio_set *src) flags |= BIOSET_NEED_BVECS; if (src->rescue_workqueue) flags |= BIOSET_NEED_RESCUER; + if (src->cache) + flags |= BIOSET_PERCPU_CACHE; return bioset_init(bs, src->bio_pool.min_nr, src->front_pad, flags); } @@ -1753,35 +1785,17 @@ EXPORT_SYMBOL(bioset_init_from_src); * Like @bio_alloc_bioset, but pass in the kiocb. The kiocb is only * used to check if we should dip into the per-cpu bio_set allocation * cache. The allocation uses GFP_KERNEL internally. On return, the - * bio is marked BIO_PERCPU_CACHEABLE, and the final put of the bio + * bio is marked BIO_PERCPU_CACHE, and the final put of the bio * MUST be done from process context, not hard/soft IRQ. * */ struct bio *bio_alloc_kiocb(struct kiocb *kiocb, struct block_device *bdev, unsigned short nr_vecs, unsigned int opf, struct bio_set *bs) { - struct bio_alloc_cache *cache; - struct bio *bio; - if (!(kiocb->ki_flags & IOCB_ALLOC_CACHE) || nr_vecs > BIO_INLINE_VECS) return bio_alloc_bioset(bdev, nr_vecs, opf, GFP_KERNEL, bs); - cache = per_cpu_ptr(bs->cache, get_cpu()); - if (cache->free_list) { - bio = cache->free_list; - cache->free_list = bio->bi_next; - cache->nr--; - put_cpu(); - bio_init(bio, bdev, nr_vecs ? bio->bi_inline_vecs : NULL, - nr_vecs, opf); - bio->bi_pool = bs; - bio_set_flag(bio, BIO_PERCPU_CACHE); - return bio; - } - put_cpu(); - bio = bio_alloc_bioset(bdev, nr_vecs, opf, GFP_KERNEL, bs); - bio_set_flag(bio, BIO_PERCPU_CACHE); - return bio; + return bio_alloc_percpu_cache(bdev, nr_vecs, opf, GFP_KERNEL, bs); } EXPORT_SYMBOL_GPL(bio_alloc_kiocb); diff --git a/block/blk.h b/block/blk.h index ebaa59ca46ca..8e338e76d303 100644 --- a/block/blk.h +++ b/block/blk.h @@ -451,13 +451,6 @@ extern struct device_attribute dev_attr_events; extern struct device_attribute dev_attr_events_async; extern struct device_attribute dev_attr_events_poll_msecs; -static inline void bio_clear_polled(struct bio *bio) -{ - /* can't support alloc cache if we turn off polling */ - bio_clear_flag(bio, BIO_PERCPU_CACHE); - bio->bi_opf &= ~REQ_POLLED; -} - long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg); long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg); diff --git a/include/linux/bio.h b/include/linux/bio.h index 7523aba4ddf7..709663ae757a 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -787,6 +787,13 @@ static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb) bio->bi_opf |= REQ_NOWAIT; } +static inline void bio_clear_polled(struct bio *bio) +{ + /* can't support alloc cache if we turn off polling */ + bio_clear_flag(bio, BIO_PERCPU_CACHE); + bio->bi_opf &= ~REQ_POLLED; +} + struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev, unsigned int nr_pages, unsigned int opf, gfp_t gfp); -- 2.15.0