On Mon, May 24, 2021 at 7:51 PM Pavel Begunkov <asml.silence@xxxxxxxxx> wrote: > > Some parts like fixed file table use 2 level tables, factor out helpers > for allocating/deallocating them as more users are to come. > > Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx> > --- > fs/io_uring.c | 73 ++++++++++++++++++++++++++++++--------------------- > 1 file changed, 43 insertions(+), 30 deletions(-) > > diff --git a/fs/io_uring.c b/fs/io_uring.c > index 40b70c34c1b2..1cc2d16637ff 100644 > --- a/fs/io_uring.c > +++ b/fs/io_uring.c > @@ -7054,14 +7054,36 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, > return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0; > } > > -static void io_free_file_tables(struct io_file_table *table, unsigned nr_files) > +static void io_free_page_table(void **table, size_t size) > { > - unsigned i, nr_tables = DIV_ROUND_UP(nr_files, IORING_MAX_FILES_TABLE); > + unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE); > > for (i = 0; i < nr_tables; i++) > - kfree(table->files[i]); > - kfree(table->files); > - table->files = NULL; > + kfree(table[i]); > + kfree(table); > +} > + > +static void **io_alloc_page_table(size_t size) > +{ > + unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE); > + size_t init_size = size; > + void **table; > + > + table = kcalloc(nr_tables, sizeof(*table), GFP_KERNEL); > + if (!table) > + return NULL; > + > + for (i = 0; i < nr_tables; i++) { > + unsigned int this_size = min(size, PAGE_SIZE); > + > + table[i] = kzalloc(this_size, GFP_KERNEL); > + if (!table[i]) { > + io_free_page_table(table, init_size); > + return NULL; Unless zalloc returns non-NULL for size == 0, you are guranteed to do this for size <= PAGE_SIZE * (nr_tables - 1). Possibly worth calculating early? If you calculate early you could then make the loop: for (i = 0; i < nr_tables - 1; i++) { table[i] = kzalloc(PAGE_SIZE, GFP_KERNEL); if (!table[i]) { io_free_page_table(table, init_size); return NULL; } } table[i] = kzalloc(size - (nr_tables - 1) * PAGE_SIZE, GFP_KERNEL); if (!table[i]) { io_free_page_table(table, init_size); return NULL; } Which is almost certainly faster. > + } > + size -= this_size; > + } > + return table; > } > > static inline void io_rsrc_ref_lock(struct io_ring_ctx *ctx) > @@ -7190,6 +7212,22 @@ static int io_rsrc_data_alloc(struct io_ring_ctx *ctx, rsrc_put_fn *do_put, > return 0; > } > > +static bool io_alloc_file_tables(struct io_file_table *table, unsigned nr_files) > +{ > + size_t size = nr_files * sizeof(struct io_fixed_file); > + > + table->files = (struct io_fixed_file **)io_alloc_page_table(size); > + return !!table->files; > +} > + > +static void io_free_file_tables(struct io_file_table *table, unsigned nr_files) > +{ > + size_t size = nr_files * sizeof(struct io_fixed_file); > + > + io_free_page_table((void **)table->files, size); > + table->files = NULL; > +} > + > static void __io_sqe_files_unregister(struct io_ring_ctx *ctx) > { > #if defined(CONFIG_UNIX) > @@ -7451,31 +7489,6 @@ static int io_sqe_files_scm(struct io_ring_ctx *ctx) > } > #endif > > -static bool io_alloc_file_tables(struct io_file_table *table, unsigned nr_files) > -{ > - unsigned i, nr_tables = DIV_ROUND_UP(nr_files, IORING_MAX_FILES_TABLE); > - > - table->files = kcalloc(nr_tables, sizeof(*table->files), GFP_KERNEL); > - if (!table->files) > - return false; > - > - for (i = 0; i < nr_tables; i++) { > - unsigned int this_files = min(nr_files, IORING_MAX_FILES_TABLE); > - > - table->files[i] = kcalloc(this_files, sizeof(*table->files[i]), > - GFP_KERNEL); > - if (!table->files[i]) > - break; > - nr_files -= this_files; > - } > - > - if (i == nr_tables) > - return true; > - > - io_free_file_tables(table, nr_tables * IORING_MAX_FILES_TABLE); > - return false; > -} > - > static void io_rsrc_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc) > { > struct file *file = prsrc->file; > -- > 2.31.1 >