On 8/12/21 9:18 AM, Christoph Hellwig wrote: > On Thu, Aug 12, 2021 at 09:08:29AM -0600, Jens Axboe wrote: >>>> + cache = per_cpu_ptr(bs->cache, get_cpu()); >>>> + bio = bio_list_pop(&cache->free_list); >>>> + if (bio) { >>>> + cache->nr--; >>>> + put_cpu(); >>>> + bio_init(bio, nr_vecs ? bio->bi_inline_vecs : NULL, nr_vecs); >>>> + bio_set_flag(bio, BIO_PERCPU_CACHE); >>>> + return bio; >>>> + } >>>> + put_cpu(); >>>> +normal_alloc: >>>> + bio = bio_alloc_bioset(gfp, nr_vecs, bs); >>>> + if (cache) >>>> + bio_set_flag(bio, BIO_PERCPU_CACHE); >>>> + return bio; >>> >>> The goto here is pretty obsfucating and adds an extra patch to the fast >>> path. >> >> I don't agree, and it's not the fast path - the fast path is popping off >> a bio off the list, not hitting the allocator. > > Oh, I see you special case the list pop return now. Still seems much > easier to follow to avoid the goto, the cache initialization and the > conditional in the no bio found in the list case (see patch below). > > diff --git a/block/bio.c b/block/bio.c > index 689335c00937..b42621cecbef 100644 > --- a/block/bio.c > +++ b/block/bio.c > @@ -1707,11 +1707,11 @@ EXPORT_SYMBOL(bioset_init_from_src); > struct bio *bio_alloc_kiocb(struct kiocb *kiocb, gfp_t gfp, > unsigned short nr_vecs, struct bio_set *bs) > { > - struct bio_alloc_cache *cache = NULL; > + struct bio_alloc_cache *cache; > struct bio *bio; > > if (!(kiocb->ki_flags & IOCB_ALLOC_CACHE) || nr_vecs > BIO_INLINE_VECS) > - goto normal_alloc; > + return bio_alloc_bioset(gfp, nr_vecs, bs); > > cache = per_cpu_ptr(bs->cache, get_cpu()); > bio = bio_list_pop(&cache->free_list); > @@ -1723,10 +1723,8 @@ struct bio *bio_alloc_kiocb(struct kiocb *kiocb, gfp_t gfp, > return bio; > } > put_cpu(); > -normal_alloc: > bio = bio_alloc_bioset(gfp, nr_vecs, bs); > - if (cache) > - bio_set_flag(bio, BIO_PERCPU_CACHE); > + bio_set_flag(bio, BIO_PERCPU_CACHE); > return bio; > } Sure, if that'll shut it down, I'll make the edit :-) -- Jens Axboe