On 24/03/2021 12:05, Christoph Hellwig wrote: > On Wed, Mar 24, 2021 at 10:09:32AM +0000, Johannes Thumshirn wrote: >> Stupid question, but wouldn't it be sufficient if I did (this can still be >> simplified): > > No, this loses data if the iter is bigger than what you truncate it to. > Just with your last patch you probably did not test with larger > enough iters to be beyond the zone append limit. Hmm I did a fio job with bs=128k (and my nullblock device has 127k zone_append_max_bytes) and verify=md5. Anyhow, I like your solution more. Fio looks good (bs=1M this time, to be sure), xfstests still pending. > >> diff --git a/block/bio.c b/block/bio.c >> index 26b7f721cda8..9c529b2db8fa 100644 >> --- a/block/bio.c >> +++ b/block/bio.c >> @@ -964,6 +964,16 @@ static int bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter) >> return 0; >> } >> >> +static int bio_iov_append_bvec_set(struct bio *bio, struct iov_iter *iter) >> +{ >> + struct request_queue *q = bio->bi_bdev->bd_disk->queue; >> + unsigned int max_append = queue_max_zone_append_sectors(q) << 9; >> + >> + iov_iter_truncate(iter, max_append); >> + >> + return bio_iov_bvec_set(bio, iter); > > OTOH if you copy the iter by value to a local one first and then > make sure the original iter is advanced it should work. We don't > really need the iter advance for the original one, though. Something like: > > diff --git a/block/bio.c b/block/bio.c > index a1c4d2900c7a83..7d9e01580f2ab1 100644 > --- a/block/bio.c > +++ b/block/bio.c > @@ -949,7 +949,7 @@ void bio_release_pages(struct bio *bio, bool mark_dirty) > } > EXPORT_SYMBOL_GPL(bio_release_pages); > > -static int bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter) > +static void __bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter) > { > WARN_ON_ONCE(bio->bi_max_vecs); > > @@ -959,7 +959,11 @@ static int bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter) > bio->bi_iter.bi_size = iter->count; > bio_set_flag(bio, BIO_NO_PAGE_REF); > bio_set_flag(bio, BIO_CLONED); > +} > > +static int bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter) > +{ > + __bio_iov_bvec_set(bio, iter); > iov_iter_advance(iter, iter->count); > return 0; > } > @@ -1019,6 +1023,17 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) > return 0; > } > > +static int bio_iov_bvec_set_append(struct bio *bio, struct iov_iter *iter) > +{ > + struct request_queue *q = bio->bi_bdev->bd_disk->queue; > + struct iov_iter i = *iter; > + > + iov_iter_truncate(&i, queue_max_zone_append_sectors(q) << 9); > + __bio_iov_bvec_set(bio, &i); > + iov_iter_advance(iter, i.count); > + return 0; > +} > + > static int __bio_iov_append_get_pages(struct bio *bio, struct iov_iter *iter) > { > unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt; > @@ -1094,8 +1109,8 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) > int ret = 0; > > if (iov_iter_is_bvec(iter)) { > - if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND)) > - return -EINVAL; > + if (bio_op(bio) == REQ_OP_ZONE_APPEND) > + return bio_iov_bvec_set_append(bio, iter); > return bio_iov_bvec_set(bio, iter); > } > >