We don't really need to cache this, let's reclaim 8 bytes from struct io_mapped_ubuf and just calculate it when we need it. The only hot path here is io_import_fixed(). Signed-off-by: Jens Axboe <axboe@xxxxxxxxx> --- io_uring/rsrc.c | 9 +++------ io_uring/rsrc.h | 1 - 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c index 9264e555ae59..2477995e2d65 100644 --- a/io_uring/rsrc.c +++ b/io_uring/rsrc.c @@ -988,13 +988,10 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov, imu->ubuf_end = imu->ubuf + iov->iov_len; imu->nr_bvecs = nr_pages; imu->folio_shift = PAGE_SHIFT; - imu->folio_mask = PAGE_MASK; - if (coalesced) { + if (coalesced) imu->folio_shift = data.folio_shift; - imu->folio_mask = ~((1UL << data.folio_shift) - 1); - } refcount_set(&imu->refs, 1); - off = (unsigned long) iov->iov_base & ~imu->folio_mask; + off = (unsigned long) iov->iov_base & ((1UL << imu->folio_shift) - 1); *pimu = imu; ret = 0; @@ -1132,7 +1129,7 @@ int io_import_fixed(int ddir, struct iov_iter *iter, iter->bvec = bvec + seg_skip; iter->nr_segs -= seg_skip; iter->count -= bvec->bv_len + offset; - iter->iov_offset = offset & ~imu->folio_mask; + iter->iov_offset = offset & ((1UL << imu->folio_shift) - 1); } } diff --git a/io_uring/rsrc.h b/io_uring/rsrc.h index eb4803e473b0..e290d2be3285 100644 --- a/io_uring/rsrc.h +++ b/io_uring/rsrc.h @@ -46,7 +46,6 @@ struct io_mapped_ubuf { unsigned int nr_bvecs; unsigned int folio_shift; unsigned long acct_pages; - unsigned long folio_mask; refcount_t refs; struct bio_vec bvec[] __counted_by(nr_bvecs); }; -- 2.45.2