[PATCH for-next 4/7] RDMA/rxe: Replace pool_lock by xa_lock

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



In rxe_pool.c xa_alloc_bh and xa_erase_bh and variants already include
	spin_lock_bh()
	__xa_alloc()
	spin_unlock_bh()
So we are double locking. Replacing pool_lock by xa_lock and using xa_lock
in all the places that were previously locked by pool_lock but dropping the
double locks is a performance improvement.

Signed-off-by: Bob Pearson <rpearsonhpe@xxxxxxxxx>
---
 drivers/infiniband/sw/rxe/rxe_pool.c | 54 ++++++++++++++--------------
 1 file changed, 26 insertions(+), 28 deletions(-)

diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
index ba5c600fa9e8..1b7269dd6d9e 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.c
+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
@@ -133,8 +133,6 @@ int rxe_pool_init(
 
 	atomic_set(&pool->num_elem, 0);
 
-	rwlock_init(&pool->pool_lock);
-
 	if (info->flags & RXE_POOL_XARRAY) {
 		xa_init_flags(&pool->xarray.xa, XA_FLAGS_ALLOC);
 		pool->xarray.limit.max = info->max_index;
@@ -292,9 +290,9 @@ static void *__rxe_alloc_locked(struct rxe_pool *pool)
 	elem->obj = obj;
 
 	if (pool->flags & RXE_POOL_XARRAY) {
-		err = xa_alloc_cyclic_bh(&pool->xarray.xa, &elem->index, elem,
-					 pool->xarray.limit,
-					 &pool->xarray.next, GFP_KERNEL);
+		err = __xa_alloc_cyclic(&pool->xarray.xa, &elem->index, elem,
+					pool->xarray.limit,
+					&pool->xarray.next, GFP_KERNEL);
 		if (err)
 			goto err;
 	}
@@ -359,9 +357,9 @@ void *rxe_alloc(struct rxe_pool *pool)
 {
 	void *obj;
 
-	write_lock_bh(&pool->pool_lock);
+	xa_lock_bh(&pool->xarray.xa);
 	obj = rxe_alloc_locked(pool);
-	write_unlock_bh(&pool->pool_lock);
+	xa_unlock_bh(&pool->xarray.xa);
 
 	return obj;
 }
@@ -370,9 +368,9 @@ void *rxe_alloc_with_key(struct rxe_pool *pool, void *key)
 {
 	void *obj;
 
-	write_lock_bh(&pool->pool_lock);
+	xa_lock_bh(&pool->xarray.xa);
 	obj = rxe_alloc_with_key_locked(pool, key);
-	write_unlock_bh(&pool->pool_lock);
+	xa_unlock_bh(&pool->xarray.xa);
 
 	return obj;
 }
@@ -381,7 +379,7 @@ int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem)
 {
 	int err;
 
-	write_lock_bh(&pool->pool_lock);
+	xa_lock_bh(&pool->xarray.xa);
 	if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
 		goto err;
 
@@ -389,9 +387,9 @@ int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem)
 	elem->obj = (u8 *)elem - pool->elem_offset;
 
 	if (pool->flags & RXE_POOL_XARRAY) {
-		err = xa_alloc_cyclic_bh(&pool->xarray.xa, &elem->index, elem,
-					 pool->xarray.limit,
-					 &pool->xarray.next, GFP_KERNEL);
+		err = __xa_alloc_cyclic(&pool->xarray.xa, &elem->index, elem,
+					pool->xarray.limit,
+					&pool->xarray.next, GFP_KERNEL);
 		if (err)
 			goto err;
 	}
@@ -403,13 +401,13 @@ int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem)
 	}
 
 	refcount_set(&elem->refcnt, 1);
-	write_unlock_bh(&pool->pool_lock);
+	xa_unlock_bh(&pool->xarray.xa);
 
 	return 0;
 
 err:
 	atomic_dec(&pool->num_elem);
-	write_unlock_bh(&pool->pool_lock);
+	xa_unlock_bh(&pool->xarray.xa);
 	return -EINVAL;
 }
 
@@ -442,9 +440,9 @@ static void *__rxe_get_index(struct rxe_pool *pool, u32 index)
 {
 	void *obj;
 
-	read_lock_bh(&pool->pool_lock);
+	xa_lock_bh(&pool->xarray.xa);
 	obj = __rxe_get_index_locked(pool, index);
-	read_unlock_bh(&pool->pool_lock);
+	xa_unlock_bh(&pool->xarray.xa);
 
 	return obj;
 }
@@ -465,9 +463,9 @@ static void *__rxe_get_xarray(struct rxe_pool *pool, u32 index)
 {
 	void *obj;
 
-	read_lock_bh(&pool->pool_lock);
+	xa_lock_bh(&pool->xarray.xa);
 	obj = __rxe_get_xarray_locked(pool, index);
-	read_unlock_bh(&pool->pool_lock);
+	xa_unlock_bh(&pool->xarray.xa);
 
 	return obj;
 }
@@ -523,9 +521,9 @@ void *rxe_pool_get_key(struct rxe_pool *pool, void *key)
 {
 	void *obj;
 
-	read_lock_bh(&pool->pool_lock);
+	xa_lock_bh(&pool->xarray.xa);
 	obj = rxe_pool_get_key_locked(pool, key);
-	read_unlock_bh(&pool->pool_lock);
+	xa_unlock_bh(&pool->xarray.xa);
 
 	return obj;
 }
@@ -546,9 +544,9 @@ int __rxe_add_ref(struct rxe_pool_elem *elem)
 	struct rxe_pool *pool = elem->pool;
 	int ret;
 
-	read_lock_bh(&pool->pool_lock);
+	xa_lock_bh(&pool->xarray.xa);
 	ret = __rxe_add_ref_locked(elem);
-	read_unlock_bh(&pool->pool_lock);
+	xa_unlock_bh(&pool->xarray.xa);
 
 	return ret;
 }
@@ -569,9 +567,9 @@ int __rxe_drop_ref(struct rxe_pool_elem *elem)
 	struct rxe_pool *pool = elem->pool;
 	int ret;
 
-	read_lock_bh(&pool->pool_lock);
+	xa_lock_bh(&pool->xarray.xa);
 	ret = __rxe_drop_ref_locked(elem);
-	read_unlock_bh(&pool->pool_lock);
+	xa_unlock_bh(&pool->xarray.xa);
 
 	return ret;
 }
@@ -584,7 +582,7 @@ static int __rxe_fini(struct rxe_pool_elem *elem)
 	done = refcount_dec_if_one(&elem->refcnt);
 	if (done) {
 		if (pool->flags & RXE_POOL_XARRAY)
-			xa_erase(&pool->xarray.xa, elem->index);
+			__xa_erase(&pool->xarray.xa, elem->index);
 		if (pool->flags & RXE_POOL_INDEX)
 			rxe_drop_index(elem);
 		if (pool->flags & RXE_POOL_KEY)
@@ -621,9 +619,9 @@ int __rxe_fini_ref(struct rxe_pool_elem *elem)
 	struct rxe_pool *pool = elem->pool;
 	int ret;
 
-	read_lock_bh(&pool->pool_lock);
+	xa_lock_bh(&pool->xarray.xa);
 	ret = __rxe_fini(elem);
-	read_unlock_bh(&pool->pool_lock);
+	xa_unlock_bh(&pool->xarray.xa);
 
 	if (!ret) {
 		if (pool->cleanup)
-- 
2.30.2




[Index of Archives]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Photo]     [Yosemite News]     [Yosemite Photos]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux