On Fri, Apr 19, 2024 at 02:47:21PM +0530, Kundan Kumar wrote: > @@ -1289,16 +1291,33 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) > > for (left = size, i = 0; left > 0; left -= len, i++) { > struct page *page = pages[i]; > + folio = page_folio(page); > + > + if (!folio_test_large(folio) || > + (bio_op(bio) == REQ_OP_ZONE_APPEND)) { > + len = min_t(size_t, PAGE_SIZE - offset, left); > + if (bio_op(bio) == REQ_OP_ZONE_APPEND) { > + ret = bio_iov_add_zone_append_page(bio, page, > + len, offset); > + if (ret) > + break; > + } else > + bio_iov_add_page(bio, page, len, offset); > + } else { > + /* See the offset of folio and the size */ > + folio_offset = (folio_page_idx(folio, page) > + << PAGE_SHIFT) + offset; > + size_folio = folio_size(folio); > > - len = min_t(size_t, PAGE_SIZE - offset, left); > - if (bio_op(bio) == REQ_OP_ZONE_APPEND) { > - ret = bio_iov_add_zone_append_page(bio, page, len, > - offset); > - if (ret) > - break; > - } else > - bio_iov_add_page(bio, page, len, offset); > + /* Calculate the length of folio to be added */ > + len = min_t(size_t, (size_folio - folio_offset), left); > + > + num_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE); > > + bio_iov_add_page(bio, page, len, offset); I think there's another optimization to be had here. You only need one reference on the folio for all its pages, so I believe you can safely drop (num_pages - 1) references right here. Then __bio_release_pages() can be further simplified by removing the 'do{...}while()" loop releasing individual pages. I tested this atop your patch, and it looks okay so far. This could be more efficient if we had access to gup_put_folio() since we already know all the pages are part of the same folio (unpin_user_pages() recalculates that) --- diff --git a/block/bio.c b/block/bio.c index 469606494f8f7..9829c79494108 100644 --- a/block/bio.c +++ b/block/bio.c @@ -1155,7 +1155,6 @@ void __bio_release_pages(struct bio *bio, bool mark_dirty) bio_for_each_folio_all(fi, bio) { struct page *page; - size_t nr_pages; if (mark_dirty) { folio_lock(fi.folio); @@ -1163,11 +1162,7 @@ void __bio_release_pages(struct bio *bio, bool mark_dirty) folio_unlock(fi.folio); } page = folio_page(fi.folio, fi.offset / PAGE_SIZE); - nr_pages = (fi.offset + fi.length - 1) / PAGE_SIZE - - fi.offset / PAGE_SIZE + 1; - do { - bio_release_page(bio, page++); - } while (--nr_pages != 0); + bio_release_page(bio, page); } } EXPORT_SYMBOL_GPL(__bio_release_pages); @@ -1315,6 +1310,8 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) num_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE); bio_iov_add_page(bio, page, len, offset); + if (bio_flagged(bio, BIO_PAGE_PINNED) && num_pages > 1) + unpin_user_pages(pages + i, num_pages - 1); /* Skip the pages which got added */ i = i + (num_pages - 1); } --