On Wed, Dec 11, 2024 at 09:53:45AM +0100, Christoph Hellwig wrote: > struct iomap_ioend currently tracks outstanding buffered writes and has > some really nice code in core iomap and XFS to merge contiguous I/Os > an defer them to userspace for completion in a very efficient way. > > For zoned writes we'll also need a per-bio user context completion to > record the written blocks, and the infrastructure for that would look > basically like the ioend handling for buffered I/O. > > So instead of reinventing the wheel, reuse the existing infrastructure. > > Signed-off-by: Christoph Hellwig <hch@xxxxxx> > --- > fs/iomap/buffered-io.c | 3 +++ > fs/iomap/direct-io.c | 50 +++++++++++++++++++++++++++++++++++++++++- > fs/iomap/internal.h | 7 ++++++ > include/linux/iomap.h | 4 +++- > 4 files changed, 62 insertions(+), 2 deletions(-) > create mode 100644 fs/iomap/internal.h > ... > diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c > index b521eb15759e..b5466361cafe 100644 > --- a/fs/iomap/direct-io.c > +++ b/fs/iomap/direct-io.c ... > @@ -163,6 +166,51 @@ static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret) > cmpxchg(&dio->error, 0, ret); > } > > +u32 iomap_finish_ioend_direct(struct iomap_ioend *ioend) > +{ > + struct iomap_dio *dio = ioend->io_bio.bi_private; > + bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY); > + struct kiocb *iocb = dio->iocb; > + u32 vec_count = ioend->io_bio.bi_vcnt; > + > + if (ioend->io_error) > + iomap_dio_set_error(dio, ioend->io_error); > + > + if (atomic_dec_and_test(&dio->ref)) { > + struct inode *inode = file_inode(iocb->ki_filp); > + > + if (dio->wait_for_completion) { > + struct task_struct *waiter = dio->submit.waiter; > + > + WRITE_ONCE(dio->submit.waiter, NULL); > + blk_wake_io_task(waiter); > + } else if (!inode->i_mapping->nrpages) { > + WRITE_ONCE(iocb->private, NULL); > + > + /* > + * We must never invalidate pages from this thread to > + * avoid deadlocks with buffered I/O completions. > + * Tough luck if you hit the tiny race with someone > + * dirtying the range now. > + */ > + dio->flags |= IOMAP_DIO_NO_INVALIDATE; > + iomap_dio_complete_work(&dio->aio.work); > + } else { > + INIT_WORK(&dio->aio.work, iomap_dio_complete_work); > + queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work); > + } > + } > + > + if (should_dirty) { > + bio_check_pages_dirty(&ioend->io_bio); > + } else { > + bio_release_pages(&ioend->io_bio, false); > + bio_put(&ioend->io_bio); > + } > + Not that it matters all that much, but I'm a little curious about the reasoning for using vec_count here. AFAICS this correlates to per-folio writeback completions for buffered I/O, but that doesn't seem to apply to direct I/O. Is there a reason to have the caller throttle based on vec_counts, or are we just pulling some non-zero value for consistency sake? Brian > + return vec_count; > +} > + > void iomap_dio_bio_end_io(struct bio *bio) > { > struct iomap_dio *dio = bio->bi_private; > diff --git a/fs/iomap/internal.h b/fs/iomap/internal.h > new file mode 100644 > index 000000000000..20cccfc3bb13 > --- /dev/null > +++ b/fs/iomap/internal.h > @@ -0,0 +1,7 @@ > +/* SPDX-License-Identifier: GPL-2.0 */ > +#ifndef _IOMAP_INTERNAL_H > +#define _IOMAP_INTERNAL_H 1 > + > +u32 iomap_finish_ioend_direct(struct iomap_ioend *ioend); > + > +#endif /* _IOMAP_INTERNAL_H */ > diff --git a/include/linux/iomap.h b/include/linux/iomap.h > index eaa8cb9083eb..f6943c80e5fd 100644 > --- a/include/linux/iomap.h > +++ b/include/linux/iomap.h > @@ -343,9 +343,11 @@ sector_t iomap_bmap(struct address_space *mapping, sector_t bno, > #define IOMAP_IOEND_UNWRITTEN (1U << 1) > /* don't merge into previous ioend */ > #define IOMAP_IOEND_BOUNDARY (1U << 2) > +/* is direct I/O */ > +#define IOMAP_IOEND_DIRECT (1U << 3) > > #define IOMAP_IOEND_NOMERGE_FLAGS \ > - (IOMAP_IOEND_SHARED | IOMAP_IOEND_UNWRITTEN) > + (IOMAP_IOEND_SHARED | IOMAP_IOEND_UNWRITTEN | IOMAP_IOEND_DIRECT) > > /* > * Structure for writeback I/O completions. > -- > 2.45.2 > >