> nr_pages = (fi.offset + fi.length - 1) / PAGE_SIZE - > fi.offset / PAGE_SIZE + 1; > + bio_release_folio(bio, fi.folio, nr_pages); > } > } > EXPORT_SYMBOL_GPL(__bio_release_pages); > diff --git a/block/blk.h b/block/blk.h > index 0c8857fe4079..18520b05c6ce 100644 > --- a/block/blk.h > +++ b/block/blk.h > @@ -548,6 +548,13 @@ static inline void bio_release_page(struct bio *bio, struct page *page) > unpin_user_page(page); > } > > +static inline void bio_release_folio(struct bio *bio, struct folio *folio, > + unsigned long npages) > +{ > + if (bio_flagged(bio, BIO_PAGE_PINNED)) > + unpin_user_folio(folio, npages); > +} This is only used in __bio_release_pages, and given that __bio_release_pages is only called when BIO_PAGE_PINNED is set there is no need to check it inside the loop again. Also this means we know the loop doesn't do anything if mark_dirty is false, which is another trivial check that can move into bio_release_pages. As this optimization already applies as-is I'll send a prep patch for it. so that we can avoid the npages calculation for the !BIO_PAGE_PINNED case. Morover having the BIO_PAGE_PINNED knowledge there means we can skip the entire loop for !BIO_PAGE_PINNED && > +/** > + * unpin_user_folio() - release pages of a folio > + * @folio: pointer to folio to be released > + * @npages: number of pages of same folio > + * > + * Release npages of the folio > + */ > +void unpin_user_folio(struct folio *folio, unsigned long npages) > +{ > + gup_put_folio(folio, npages, FOLL_PIN); > +} > +EXPORT_SYMBOL(unpin_user_folio); Please don't hide a new MM API inside a block patch, but split it out with a mm prefix.