Re: [PATCHv2 1/3] block/bio: remove duplicate append pages code

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Thu, May 19, 2022 at 09:32:56AM +0200, Christoph Hellwig wrote:
> On Wed, May 18, 2022 at 10:11:29AM -0700, Keith Busch wrote:
> > From: Keith Busch <kbusch@xxxxxxxxxx>
> > 
> > The setup for getting pages are identical for zone append and normal IO.
> > Use common code for each.
> 
> How about using even more common code and avoiding churn at the same
> time?  Something like:

Yes, I'll fold this in.
 
> diff --git a/block/bio.c b/block/bio.c
> index a3893d80dccc9..15da722ed26d1 100644
> --- a/block/bio.c
> +++ b/block/bio.c
> @@ -1158,6 +1158,37 @@ static void bio_put_pages(struct page **pages, size_t size, size_t off)
>  		put_page(pages[i]);
>  }
>  
> +static int bio_iov_add_page(struct bio *bio, struct page *page,
> +		unsigned int len, unsigned int offset)
> +{
> +	bool same_page = false;
> +
> +	if (!__bio_try_merge_page(bio, page, len, offset, &same_page)) {
> +		if (WARN_ON_ONCE(bio_full(bio, len)))
> +			return -EINVAL;
> +		__bio_add_page(bio, page, len, offset);
> +		return 0;
> +	}
> +
> +	if (same_page)
> +		put_page(page);
> +	return 0;
> +}
> +
> +static int bio_iov_add_zone_append_page(struct bio *bio, struct page *page,
> +		unsigned int len, unsigned int offset)
> +{
> +	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
> +	bool same_page = false;
> +
> +	if (bio_add_hw_page(q, bio, page, len, offset,
> +			queue_max_zone_append_sectors(q), &same_page) != len)
> +		return -EINVAL;
> +	if (same_page)
> +		put_page(page);
> +	return 0;
> +}
> +
>  #define PAGE_PTRS_PER_BVEC     (sizeof(struct bio_vec) / sizeof(struct page *))
>  
>  /**
> @@ -1176,7 +1207,6 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
>  	unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
>  	struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
>  	struct page **pages = (struct page **)bv;
> -	bool same_page = false;
>  	ssize_t size, left;
>  	unsigned len, i;
>  	size_t offset;
> @@ -1195,18 +1225,18 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
>  
>  	for (left = size, i = 0; left > 0; left -= len, i++) {
>  		struct page *page = pages[i];
> +		int ret;
>  
>  		len = min_t(size_t, PAGE_SIZE - offset, left);
> +		if (bio_op(bio) == REQ_OP_ZONE_APPEND)	
> +			ret = bio_iov_add_zone_append_page(bio, page, len,
> +					offset);
> +		else
> +			ret = bio_iov_add_page(bio, page, len, offset);
>  
> -		if (__bio_try_merge_page(bio, page, len, offset, &same_page)) {
> -			if (same_page)
> -				put_page(page);
> -		} else {
> -			if (WARN_ON_ONCE(bio_full(bio, len))) {
> -				bio_put_pages(pages + i, left, offset);
> -				return -EINVAL;
> -			}
> -			__bio_add_page(bio, page, len, offset);
> +		if (ret) {
> +			bio_put_pages(pages + i, left, offset);
> +			return ret;
>  		}
>  		offset = 0;
>  	}
> @@ -1215,54 +1245,6 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
>  	return 0;
>  }
>  
> -static int __bio_iov_append_get_pages(struct bio *bio, struct iov_iter *iter)
> -{
> -	unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
> -	unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
> -	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
> -	unsigned int max_append_sectors = queue_max_zone_append_sectors(q);
> -	struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
> -	struct page **pages = (struct page **)bv;
> -	ssize_t size, left;
> -	unsigned len, i;
> -	size_t offset;
> -	int ret = 0;
> -
> -	if (WARN_ON_ONCE(!max_append_sectors))
> -		return 0;
> -
> -	/*
> -	 * Move page array up in the allocated memory for the bio vecs as far as
> -	 * possible so that we can start filling biovecs from the beginning
> -	 * without overwriting the temporary page array.
> -	 */
> -	BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2);
> -	pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
> -
> -	size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
> -	if (unlikely(size <= 0))
> -		return size ? size : -EFAULT;
> -
> -	for (left = size, i = 0; left > 0; left -= len, i++) {
> -		struct page *page = pages[i];
> -		bool same_page = false;
> -
> -		len = min_t(size_t, PAGE_SIZE - offset, left);
> -		if (bio_add_hw_page(q, bio, page, len, offset,
> -				max_append_sectors, &same_page) != len) {
> -			bio_put_pages(pages + i, left, offset);
> -			ret = -EINVAL;
> -			break;
> -		}
> -		if (same_page)
> -			put_page(page);
> -		offset = 0;
> -	}
> -
> -	iov_iter_advance(iter, size - left);
> -	return ret;
> -}
> -
>  /**
>   * bio_iov_iter_get_pages - add user or kernel pages to a bio
>   * @bio: bio to add pages to
> @@ -1297,10 +1279,7 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
>  	}
>  
>  	do {
> -		if (bio_op(bio) == REQ_OP_ZONE_APPEND)
> -			ret = __bio_iov_append_get_pages(bio, iter);
> -		else
> -			ret = __bio_iov_iter_get_pages(bio, iter);
> +		ret = __bio_iov_iter_get_pages(bio, iter);
>  	} while (!ret && iov_iter_count(iter) && !bio_full(bio, 0));
>  
>  	/* don't account direct I/O as memory stall */



[Index of Archives]     [Linux RAID]     [Linux SCSI]     [Linux ATA RAID]     [IDE]     [Linux Wireless]     [Linux Kernel]     [ATH6KL]     [Linux Bluetooth]     [Linux Netdev]     [Kernel Newbies]     [Security]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Device Mapper]

  Powered by Linux