Re: [PATCH 2/6] block: change the blk_queue_bounce calling convention

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 7/27/22 03:30, Christoph Hellwig wrote:
> The double indirect bio leads to somewhat suboptimal code generation.
> Instead return the (original or split) bio, and make sure the
> request_queue arguments to the lower level helpers is passed after the
> bio to avoid constant reshuffling of the argument passing registers.
> 
> Signed-off-by: Christoph Hellwig <hch@xxxxxx>

Looks OK to me.

Reviewed-by: Damien Le Moal <damien.lemoal@xxxxxxxxxxxxxxxxxx>

> ---
>  block/blk-mq.c |  2 +-
>  block/blk.h    | 10 ++++++----
>  block/bounce.c | 26 +++++++++++++-------------
>  3 files changed, 20 insertions(+), 18 deletions(-)
> 
> diff --git a/block/blk-mq.c b/block/blk-mq.c
> index 790f55453f1b1..7adba3eeba1c6 100644
> --- a/block/blk-mq.c
> +++ b/block/blk-mq.c
> @@ -2815,7 +2815,7 @@ void blk_mq_submit_bio(struct bio *bio)
>  	unsigned int nr_segs = 1;
>  	blk_status_t ret;
>  
> -	blk_queue_bounce(q, &bio);
> +	bio = blk_queue_bounce(bio, q);
>  	if (bio_may_exceed_limits(bio, q))
>  		bio = __bio_split_to_limits(bio, q, &nr_segs);
>  
> diff --git a/block/blk.h b/block/blk.h
> index 623be4c2e60c1..f50c8fcded99e 100644
> --- a/block/blk.h
> +++ b/block/blk.h
> @@ -378,7 +378,7 @@ static inline void blk_throtl_bio_endio(struct bio *bio) { }
>  static inline void blk_throtl_stat_add(struct request *rq, u64 time) { }
>  #endif
>  
> -void __blk_queue_bounce(struct request_queue *q, struct bio **bio);
> +struct bio *__blk_queue_bounce(struct bio *bio, struct request_queue *q);
>  
>  static inline bool blk_queue_may_bounce(struct request_queue *q)
>  {
> @@ -387,10 +387,12 @@ static inline bool blk_queue_may_bounce(struct request_queue *q)
>  		max_low_pfn >= max_pfn;
>  }
>  
> -static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
> +static inline struct bio *blk_queue_bounce(struct bio *bio,
> +		struct request_queue *q)
>  {
> -	if (unlikely(blk_queue_may_bounce(q) && bio_has_data(*bio)))
> -		__blk_queue_bounce(q, bio);	
> +	if (unlikely(blk_queue_may_bounce(q) && bio_has_data(bio)))
> +		return __blk_queue_bounce(bio, q);
> +	return bio;
>  }
>  
>  #ifdef CONFIG_BLK_CGROUP_IOLATENCY
> diff --git a/block/bounce.c b/block/bounce.c
> index c8f487af7be37..7cfcb242f9a11 100644
> --- a/block/bounce.c
> +++ b/block/bounce.c
> @@ -199,24 +199,24 @@ static struct bio *bounce_clone_bio(struct bio *bio_src)
>  	return NULL;
>  }
>  
> -void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
> +struct bio *__blk_queue_bounce(struct bio *bio_orig, struct request_queue *q)
>  {
>  	struct bio *bio;
> -	int rw = bio_data_dir(*bio_orig);
> +	int rw = bio_data_dir(bio_orig);
>  	struct bio_vec *to, from;
>  	struct bvec_iter iter;
>  	unsigned i = 0, bytes = 0;
>  	bool bounce = false;
>  	int sectors;
>  
> -	bio_for_each_segment(from, *bio_orig, iter) {
> +	bio_for_each_segment(from, bio_orig, iter) {
>  		if (i++ < BIO_MAX_VECS)
>  			bytes += from.bv_len;
>  		if (PageHighMem(from.bv_page))
>  			bounce = true;
>  	}
>  	if (!bounce)
> -		return;
> +		return bio_orig;
>  
>  	/*
>  	 * Individual bvecs might not be logical block aligned. Round down
> @@ -225,13 +225,13 @@ void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
>  	 */
>  	sectors = ALIGN_DOWN(bytes, queue_logical_block_size(q)) >>
>  			SECTOR_SHIFT;
> -	if (sectors < bio_sectors(*bio_orig)) {
> -		bio = bio_split(*bio_orig, sectors, GFP_NOIO, &bounce_bio_split);
> -		bio_chain(bio, *bio_orig);
> -		submit_bio_noacct(*bio_orig);
> -		*bio_orig = bio;
> +	if (sectors < bio_sectors(bio_orig)) {
> +		bio = bio_split(bio_orig, sectors, GFP_NOIO, &bounce_bio_split);
> +		bio_chain(bio, bio_orig);
> +		submit_bio_noacct(bio_orig);
> +		bio_orig = bio;
>  	}
> -	bio = bounce_clone_bio(*bio_orig);
> +	bio = bounce_clone_bio(bio_orig);
>  
>  	/*
>  	 * Bvec table can't be updated by bio_for_each_segment_all(),
> @@ -254,7 +254,7 @@ void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
>  		to->bv_page = bounce_page;
>  	}
>  
> -	trace_block_bio_bounce(*bio_orig);
> +	trace_block_bio_bounce(bio_orig);
>  
>  	bio->bi_flags |= (1 << BIO_BOUNCED);
>  
> @@ -263,6 +263,6 @@ void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
>  	else
>  		bio->bi_end_io = bounce_end_io_write;
>  
> -	bio->bi_private = *bio_orig;
> -	*bio_orig = bio;
> +	bio->bi_private = bio_orig;
> +	return bio;
>  }


-- 
Damien Le Moal
Western Digital Research



[Index of Archives]     [Linux RAID]     [Linux SCSI]     [Linux ATA RAID]     [IDE]     [Linux Wireless]     [Linux Kernel]     [ATH6KL]     [Linux Bluetooth]     [Linux Netdev]     [Kernel Newbies]     [Security]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Device Mapper]

  Powered by Linux