Get callers out of poking into bvec internals a bit more. Not a huge win right now, but with the proposed new DMA mapping API we might end up with a lot more of this otherwise. Signed-off-by: Christoph Hellwig <hch@xxxxxx> --- arch/m68k/emu/nfblock.c | 2 +- block/bio.c | 2 +- block/blk-merge.c | 6 ++---- block/blk.h | 4 ++-- include/linux/bvec.h | 15 +++++++++++++++ 5 files changed, 21 insertions(+), 8 deletions(-) diff --git a/arch/m68k/emu/nfblock.c b/arch/m68k/emu/nfblock.c index 8eea7ef9115146..874fe958877388 100644 --- a/arch/m68k/emu/nfblock.c +++ b/arch/m68k/emu/nfblock.c @@ -71,7 +71,7 @@ static void nfhd_submit_bio(struct bio *bio) len = bvec.bv_len; len >>= 9; nfhd_read_write(dev->id, 0, dir, sec >> shift, len >> shift, - page_to_phys(bvec.bv_page) + bvec.bv_offset); + bvec_phys(&bvec)); sec += len; } bio_endio(bio); diff --git a/block/bio.c b/block/bio.c index e9e809a63c5975..a3b1b2266c50be 100644 --- a/block/bio.c +++ b/block/bio.c @@ -953,7 +953,7 @@ bool bvec_try_merge_hw_page(struct request_queue *q, struct bio_vec *bv, bool *same_page) { unsigned long mask = queue_segment_boundary(q); - phys_addr_t addr1 = page_to_phys(bv->bv_page) + bv->bv_offset; + phys_addr_t addr1 = bvec_phys(bv); phys_addr_t addr2 = page_to_phys(page) + offset + len - 1; if ((addr1 | mask) != (addr2 | mask)) diff --git a/block/blk-merge.c b/block/blk-merge.c index 515173342eb757..93779fc6dfb1c3 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -255,8 +255,7 @@ static bool bvec_split_segs(const struct queue_limits *lim, unsigned seg_size = 0; while (len && *nsegs < max_segs) { - seg_size = get_max_segment_size(lim, page_to_phys(bv->bv_page) + - bv->bv_offset + total_len); + seg_size = get_max_segment_size(lim, bvec_phys(bv) + total_len); seg_size = min(seg_size, len); (*nsegs)++; @@ -492,8 +491,7 @@ static unsigned blk_bvec_map_sg(struct request_queue *q, while (nbytes > 0) { unsigned offset = bvec->bv_offset + total; unsigned len = min(get_max_segment_size(&q->limits, - page_to_phys(bvec->bv_page) + offset), - nbytes); + bvec_phys(bvec) + total), nbytes); struct page *page = bvec->bv_page; /* diff --git a/block/blk.h b/block/blk.h index 47dadd2439b1ca..8e8936e97307c6 100644 --- a/block/blk.h +++ b/block/blk.h @@ -98,8 +98,8 @@ static inline bool biovec_phys_mergeable(struct request_queue *q, struct bio_vec *vec1, struct bio_vec *vec2) { unsigned long mask = queue_segment_boundary(q); - phys_addr_t addr1 = page_to_phys(vec1->bv_page) + vec1->bv_offset; - phys_addr_t addr2 = page_to_phys(vec2->bv_page) + vec2->bv_offset; + phys_addr_t addr1 = bvec_phys(vec1); + phys_addr_t addr2 = bvec_phys(vec2); /* * Merging adjacent physical pages may not work correctly under KMSAN diff --git a/include/linux/bvec.h b/include/linux/bvec.h index bd1e361b351c5a..0b9a56fc0faaf5 100644 --- a/include/linux/bvec.h +++ b/include/linux/bvec.h @@ -280,4 +280,19 @@ static inline void *bvec_virt(struct bio_vec *bvec) return page_address(bvec->bv_page) + bvec->bv_offset; } +/** + * bvec_phys - return the physical address for a bvec + * @bvec: bvec to return the physical address for + */ +static inline phys_addr_t bvec_phys(const struct bio_vec *bvec) +{ + /* + * Note this open codes page_to_phys because page_to_phys is defined in + * <asm/io.h>, which we don't want to pull in here. If it ever moves to + * a sensible place we should start using it. + */ + return ((phys_addr_t)page_to_pfn(bvec->bv_page) << PAGE_SHIFT) + + bvec->bv_offset; +} + #endif /* __LINUX_BVEC_H */ -- 2.43.0