This is the incremental patch. Doesn't do the FOLL_PIN to bool conversion for the extra helper yet, and needs to be folded into the original patches still. diff --git a/block/bio.c b/block/bio.c index 6dc54bf3ed27d4..bd8433f3644fd7 100644 --- a/block/bio.c +++ b/block/bio.c @@ -1170,14 +1170,20 @@ bool bio_add_folio(struct bio *bio, struct folio *folio, size_t len, void __bio_release_pages(struct bio *bio, bool mark_dirty) { - unsigned int gup_flags = bio_to_gup_flags(bio); + bool pinned = bio_flagged(bio, BIO_PAGE_PINNED); + bool reffed = bio_flagged(bio, BIO_PAGE_REFFED); struct bvec_iter_all iter_all; struct bio_vec *bvec; bio_for_each_segment_all(bvec, bio, iter_all) { if (mark_dirty && !PageCompound(bvec->bv_page)) set_page_dirty_lock(bvec->bv_page); - page_put_unpin(bvec->bv_page, gup_flags); + + if (pinned) + unpin_user_page(bvec->bv_page); + /* this can go away once direct-io.c is converted: */ + else if (reffed) + put_page(bvec->bv_page); } } EXPORT_SYMBOL_GPL(__bio_release_pages); diff --git a/block/blk.h b/block/blk.h index 294044d696e09f..a16d4425d2751c 100644 --- a/block/blk.h +++ b/block/blk.h @@ -430,27 +430,19 @@ int bio_add_hw_page(struct request_queue *q, struct bio *bio, */ static inline void bio_set_cleanup_mode(struct bio *bio, struct iov_iter *iter) { - unsigned int cleanup_mode = iov_iter_extract_mode(iter); - - if (cleanup_mode & FOLL_GET) - bio_set_flag(bio, BIO_PAGE_REFFED); - if (cleanup_mode & FOLL_PIN) + if (iov_iter_extract_mode(iter) & FOLL_PIN) bio_set_flag(bio, BIO_PAGE_PINNED); } -static inline unsigned int bio_to_gup_flags(struct bio *bio) -{ - return (bio_flagged(bio, BIO_PAGE_REFFED) ? FOLL_GET : 0) | - (bio_flagged(bio, BIO_PAGE_PINNED) ? FOLL_PIN : 0); -} - /* * Clean up a page appropriately, where the page may be pinned, may have a * ref taken on it or neither. */ static inline void bio_release_page(struct bio *bio, struct page *page) { - page_put_unpin(page, bio_to_gup_flags(bio)); + WARN_ON_ONCE(bio_flagged(bio, BIO_PAGE_REFFED)); + if (bio_flagged(bio, BIO_PAGE_PINNED)) + unpin_user_page(page); } struct request_queue *blk_alloc_queue(int node_id);