> + size = iov_iter_extract_pages(iter, &pages, > + UINT_MAX - bio->bi_iter.bi_size, > + nr_pages, gup_flags, &offset); > if (unlikely(size <= 0)) > + bio_set_cleanup_mode(bio, iter, gup_flags); This should move out to bio_iov_iter_get_pages and only be called once. > +++ b/block/blk-map.c > @@ -285,24 +285,24 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter, > gup_flags |= FOLL_PCI_P2PDMA; > > while (iov_iter_count(iter)) { > - struct page **pages, *stack_pages[UIO_FASTIOV]; > + struct page *stack_pages[UIO_FASTIOV]; > + struct page **pages = stack_pages; > ssize_t bytes; > size_t offs; > int npages; > > - if (nr_vecs <= ARRAY_SIZE(stack_pages)) { > - pages = stack_pages; > - bytes = iov_iter_get_pages(iter, pages, LONG_MAX, > - nr_vecs, &offs, gup_flags); > - } else { > - bytes = iov_iter_get_pages_alloc(iter, &pages, > - LONG_MAX, &offs, gup_flags); > - } > + if (nr_vecs > ARRAY_SIZE(stack_pages)) > + pages = NULL; > + > + bytes = iov_iter_extract_pages(iter, &pages, LONG_MAX, > + nr_vecs, gup_flags, &offs); > if (unlikely(bytes <= 0)) { > ret = bytes ? bytes : -EFAULT; > goto out_unmap; > } > > + bio_set_cleanup_mode(bio, iter, gup_flags); Same here - one call outside of the loop. > +static inline void bio_set_cleanup_mode(struct bio *bio, struct iov_iter *iter, > + unsigned int gup_flags) > +{ > + unsigned int cleanup_mode; > + > + bio_clear_flag(bio, BIO_PAGE_REFFED); .. and this should not be needed. Instead: > + cleanup_mode = iov_iter_extract_mode(iter, gup_flags); > + if (cleanup_mode & FOLL_GET) > + bio_set_flag(bio, BIO_PAGE_REFFED); > + if (cleanup_mode & FOLL_PIN) > + bio_set_flag(bio, BIO_PAGE_PINNED); We could warn if a not match flag is set here if we really care.