This patch introduces helper functions to check whether a buffer can be coalesced or not, and gather folio data for later use. The coalescing optimizes time and space consumption caused by mapping and storing multi-hugepage fixed buffers. A coalescable multi-hugepage buffer should fully cover its folios (except potentially the first and last one), and these folios should have the same size. These requirements are for easier later process, also we need same size'd chunks in io_import_fixed for fast iov_iter adjust. Signed-off-by: Chenliang Li <cliang01.li@xxxxxxxxxxx> --- io_uring/rsrc.c | 78 +++++++++++++++++++++++++++++++++++++++++++++++++ io_uring/rsrc.h | 10 +++++++ 2 files changed, 88 insertions(+) diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c index 65417c9553b1..d08224c0c5b0 100644 --- a/io_uring/rsrc.c +++ b/io_uring/rsrc.c @@ -871,6 +871,84 @@ static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages, return ret; } +static bool __io_sqe_buffer_try_coalesce(struct page **pages, int nr_pages, + struct io_imu_folio_data *data) +{ + struct folio *folio = page_folio(pages[0]); + unsigned int count = 1; + int i; + + data->nr_pages_mid = folio_nr_pages(folio); + if (data->nr_pages_mid == 1) + return false; + + data->folio_shift = folio_shift(folio); + data->folio_size = folio_size(folio); + data->nr_folios = 1; + /* + * Check if pages are contiguous inside a folio, and all folios have + * the same page count except for the head and tail. + */ + for (i = 1; i < nr_pages; i++) { + if (page_folio(pages[i]) == folio && + pages[i] == pages[i-1] + 1) { + count++; + continue; + } + + if (data->nr_folios == 1) + data->nr_pages_head = count; + else if (count != data->nr_pages_mid) + return false; + + folio = page_folio(pages[i]); + if (folio_size(folio) != data->folio_size) + return false; + + count = 1; + data->nr_folios++; + } + if (data->nr_folios == 1) + data->nr_pages_head = count; + + return true; +} + +static bool io_sqe_buffer_try_coalesce(struct page **pages, int nr_pages, + struct io_imu_folio_data *data) +{ + int i, j; + + if (nr_pages <= 1 || + !__io_sqe_buffer_try_coalesce(pages, nr_pages, data)) + return false; + + /* + * The pages are bound to the folio, it doesn't + * actually unpin them but drops all but one reference, + * which is usually put down by io_buffer_unmap(). + * Note, needs a better helper. + */ + if (data->nr_pages_head > 1) + unpin_user_pages(&pages[1], data->nr_pages_head - 1); + + j = data->nr_pages_head; + nr_pages -= data->nr_pages_head; + for (i = 1; i < data->nr_folios; i++) { + unsigned int nr_unpin; + + nr_unpin = min_t(unsigned int, nr_pages - 1, + data->nr_pages_mid - 1); + if (nr_unpin == 0) + break; + unpin_user_pages(&pages[j+1], nr_unpin); + j += data->nr_pages_mid; + nr_pages -= data->nr_pages_mid; + } + + return true; +} + static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov, struct io_mapped_ubuf **pimu, struct page **last_hpage) diff --git a/io_uring/rsrc.h b/io_uring/rsrc.h index c032ca3436ca..b2a9d66b76dd 100644 --- a/io_uring/rsrc.h +++ b/io_uring/rsrc.h @@ -50,6 +50,16 @@ struct io_mapped_ubuf { struct bio_vec bvec[] __counted_by(nr_bvecs); }; +struct io_imu_folio_data { + /* Head folio can be partially included in the fixed buf */ + unsigned int nr_pages_head; + /* For non-head/tail folios, has to be fully included */ + unsigned int nr_pages_mid; + unsigned int nr_folios; + unsigned int folio_shift; + size_t folio_size; +}; + void io_rsrc_node_ref_zero(struct io_rsrc_node *node); void io_rsrc_node_destroy(struct io_ring_ctx *ctx, struct io_rsrc_node *ref_node); struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx); -- 2.34.1