Re: [PATCH for-next] rdma_rxe: address an issue with hardened user copy

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 8/26/20 8:21 PM, Zhu Yanjun wrote:
> On 8/26/2020 12:58 AM, Bob Pearson wrote:
>> Change rxe pools to use kzalloc instead of kmem_cache to allocate
> 
> Why do you use kzalloc instead of kmem_cache? For performance or some bugs?
> 
> Zhu Yanjun
> 
>> memory for rxe objects.
>>
>> Signed-off-by: Bob Pearson <rpearson@xxxxxxx>
>> ---
>>   drivers/infiniband/sw/rxe/rxe.c      |  8 ----
>>   drivers/infiniband/sw/rxe/rxe_pool.c | 60 +---------------------------
>>   drivers/infiniband/sw/rxe/rxe_pool.h |  7 ----
>>   3 files changed, 2 insertions(+), 73 deletions(-)
>>
>> diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
>> index cc395da13eff..a1ff70e0b1f8 100644
>> --- a/drivers/infiniband/sw/rxe/rxe.c
>> +++ b/drivers/infiniband/sw/rxe/rxe.c
>> @@ -277,13 +277,6 @@ static int __init rxe_module_init(void)
>>   {
>>       int err;
>>   -    /* initialize slab caches for managed objects */
>> -    err = rxe_cache_init();
>> -    if (err) {
>> -        pr_err("unable to init object pools\n");
>> -        return err;
>> -    }
>> -
>>       err = rxe_net_init();
>>       if (err)
>>           return err;
>> @@ -298,7 +291,6 @@ static void __exit rxe_module_exit(void)
>>       rdma_link_unregister(&rxe_link_ops);
>>       ib_unregister_driver(RDMA_DRIVER_RXE);
>>       rxe_net_exit();
>> -    rxe_cache_exit();
>>         pr_info("unloaded\n");
>>   }
>> diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
>> index c0fab4a65f9e..70fc9f7a25b6 100644
>> --- a/drivers/infiniband/sw/rxe/rxe_pool.c
>> +++ b/drivers/infiniband/sw/rxe/rxe_pool.c
>> @@ -84,62 +84,6 @@ static inline const char *pool_name(struct rxe_pool *pool)
>>       return rxe_type_info[pool->type].name;
>>   }
>>   -static inline struct kmem_cache *pool_cache(struct rxe_pool *pool)
>> -{
>> -    return rxe_type_info[pool->type].cache;
>> -}
>> -
>> -static void rxe_cache_clean(size_t cnt)
>> -{
>> -    int i;
>> -    struct rxe_type_info *type;
>> -
>> -    for (i = 0; i < cnt; i++) {
>> -        type = &rxe_type_info[i];
>> -        if (!(type->flags & RXE_POOL_NO_ALLOC)) {
>> -            kmem_cache_destroy(type->cache);
>> -            type->cache = NULL;
>> -        }
>> -    }
>> -}
>> -
>> -int rxe_cache_init(void)
>> -{
>> -    int err;
>> -    int i;
>> -    size_t size;
>> -    struct rxe_type_info *type;
>> -
>> -    for (i = 0; i < RXE_NUM_TYPES; i++) {
>> -        type = &rxe_type_info[i];
>> -        size = ALIGN(type->size, RXE_POOL_ALIGN);
>> -        if (!(type->flags & RXE_POOL_NO_ALLOC)) {
>> -            type->cache =
>> -                kmem_cache_create(type->name, size,
>> -                          RXE_POOL_ALIGN,
>> -                          RXE_POOL_CACHE_FLAGS, NULL);
>> -            if (!type->cache) {
>> -                pr_err("Unable to init kmem cache for %s\n",
>> -                       type->name);
>> -                err = -ENOMEM;
>> -                goto err1;
>> -            }
>> -        }
>> -    }
>> -
>> -    return 0;
>> -
>> -err1:
>> -    rxe_cache_clean(i);
>> -
>> -    return err;
>> -}
>> -
>> -void rxe_cache_exit(void)
>> -{
>> -    rxe_cache_clean(RXE_NUM_TYPES);
>> -}
>> -
>>   static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min)
>>   {
>>       int err = 0;
>> @@ -381,7 +325,7 @@ void *rxe_alloc(struct rxe_pool *pool)
>>       if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
>>           goto out_cnt;
>>   -    elem = kmem_cache_zalloc(pool_cache(pool),
>> +    elem = kzalloc(rxe_type_info[pool->type].size,
>>                    (pool->flags & RXE_POOL_ATOMIC) ?
>>                    GFP_ATOMIC : GFP_KERNEL);
>>       if (!elem)
>> @@ -443,7 +387,7 @@ void rxe_elem_release(struct kref *kref)
>>           pool->cleanup(elem);
>>         if (!(pool->flags & RXE_POOL_NO_ALLOC))
>> -        kmem_cache_free(pool_cache(pool), elem);
>> +        kfree(elem);
>>       atomic_dec(&pool->num_elem);
>>       ib_device_put(&pool->rxe->ib_dev);
>>       rxe_pool_put(pool);
>> diff --git a/drivers/infiniband/sw/rxe/rxe_pool.h b/drivers/infiniband/sw/rxe/rxe_pool.h
>> index 64d92be3f060..3d722aae5f15 100644
>> --- a/drivers/infiniband/sw/rxe/rxe_pool.h
>> +++ b/drivers/infiniband/sw/rxe/rxe_pool.h
>> @@ -42,7 +42,6 @@ struct rxe_type_info {
>>       u32            min_index;
>>       size_t            key_offset;
>>       size_t            key_size;
>> -    struct kmem_cache    *cache;
>>   };
>>     extern struct rxe_type_info rxe_type_info[];
>> @@ -96,12 +95,6 @@ struct rxe_pool {
>>       } key;
>>   };
>>   -/* initialize slab caches for managed objects */
>> -int rxe_cache_init(void);
>> -
>> -/* cleanup slab caches for managed objects */
>> -void rxe_cache_exit(void);
>> -
>>   /* initialize a pool of objects with given limit on
>>    * number of elements. gets parameters from rxe_type_info
>>    * pool elements will be allocated out of a slab cache
> 
> 
There is a regression in rxe caused by the hardened usercopy patches. It leads to a kernel warning the first time a QP is created each boot. The origin has been discussed in several emails between Leon myself and the list. There are a lot of ways to eliminate the warning but so far there has been resistance to any of these fixes. As far as I can tell there is no performance hit from moving from kmem_cache to kzalloc. (kzalloc and kmalloc just use pre defined caches with objects that are powers of 2 in size.) I am waiting for Leon to express an opinion on this solution. He also has a proposal to allocate QP objects in the core (like PD, etc).

Bob



[Index of Archives]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Photo]     [Yosemite News]     [Yosemite Photos]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux