Re: [PATCH v2 08/12] io_uring: overflow processing for CQE32

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 




On 4/21/22 7:15 PM, Kanchan Joshi wrote:
> On Thu, Apr 21, 2022 at 1:37 PM Stefan Roesch <shr@xxxxxx> wrote:
>>
>> This adds the overflow processing for large CQE's.
>>
>> This adds two parameters to the io_cqring_event_overflow function and
>> uses these fields to initialize the large CQE fields.
>>
>> Allocate enough space for large CQE's in the overflow structue. If no
>> large CQE's are used, the size of the allocation is unchanged.
>>
>> The cqe field can have a different size depending if its a large
>> CQE or not. To be able to allocate different sizes, the two fields
>> in the structure are re-ordered.
>>
>> Co-developed-by: Jens Axboe <axboe@xxxxxxxxx>
>> Signed-off-by: Stefan Roesch <shr@xxxxxx>
>> Signed-off-by: Jens Axboe <axboe@xxxxxxxxx>
>> ---
>>  fs/io_uring.c | 26 +++++++++++++++++++-------
>>  1 file changed, 19 insertions(+), 7 deletions(-)
>>
>> diff --git a/fs/io_uring.c b/fs/io_uring.c
>> index ff6229b6df16..50efced63ec9 100644
>> --- a/fs/io_uring.c
>> +++ b/fs/io_uring.c
>> @@ -220,8 +220,8 @@ struct io_mapped_ubuf {
>>  struct io_ring_ctx;
>>
>>  struct io_overflow_cqe {
>> -       struct io_uring_cqe cqe;
>>         struct list_head list;
>> +       struct io_uring_cqe cqe;
>>  };
>>
>>  struct io_fixed_file {
>> @@ -2016,13 +2016,17 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
>>         while (!list_empty(&ctx->cq_overflow_list)) {
>>                 struct io_uring_cqe *cqe = io_get_cqe(ctx);
>>                 struct io_overflow_cqe *ocqe;
>> +               size_t cqe_size = sizeof(struct io_uring_cqe);
>> +
>> +               if (ctx->flags & IORING_SETUP_CQE32)
>> +                       cqe_size <<= 1;
>>
>>                 if (!cqe && !force)
>>                         break;
>>                 ocqe = list_first_entry(&ctx->cq_overflow_list,
>>                                         struct io_overflow_cqe, list);
>>                 if (cqe)
>> -                       memcpy(cqe, &ocqe->cqe, sizeof(*cqe));
>> +                       memcpy(cqe, &ocqe->cqe, cqe_size);
>>                 else
>>                         io_account_cq_overflow(ctx);
>>
>> @@ -2111,11 +2115,15 @@ static __cold void io_uring_drop_tctx_refs(struct task_struct *task)
>>  }
>>
>>  static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
>> -                                    s32 res, u32 cflags)
>> +                                    s32 res, u32 cflags, u64 extra1, u64 extra2)
>>  {
>>         struct io_overflow_cqe *ocqe;
>> +       size_t ocq_size = sizeof(struct io_overflow_cqe);
>>
>> -       ocqe = kmalloc(sizeof(*ocqe), GFP_ATOMIC | __GFP_ACCOUNT);
>> +       if (ctx->flags & IORING_SETUP_CQE32)
> 
> This can go inside in a bool variable, as this check is repeated in
> this function.

V3 will have this change.



[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux