Currently the rxe driver uses a spin_lock_irqsave call to protect the producer end of a completion queue from multiple QPs which may be sharing the cq. Since all accesses to the cq are from tasklets this is overkill and a simple spin_lock will be sufficient. Signed-off-by: Bob Pearson <rpearsonhpe@xxxxxxxxx> --- drivers/infiniband/sw/rxe/rxe_cq.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c index 76534bc66cb6..408ffe2e6f14 100644 --- a/drivers/infiniband/sw/rxe/rxe_cq.c +++ b/drivers/infiniband/sw/rxe/rxe_cq.c @@ -104,13 +104,12 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited) struct ib_event ev; int full; void *addr; - unsigned long flags; - spin_lock_irqsave(&cq->cq_lock, flags); + spin_lock(&cq->cq_lock); full = queue_full(cq->queue, QUEUE_TYPE_TO_CLIENT); if (unlikely(full)) { - spin_unlock_irqrestore(&cq->cq_lock, flags); + spin_unlock(&cq->cq_lock); if (cq->ibcq.event_handler) { ev.device = cq->ibcq.device; ev.element.cq = &cq->ibcq; @@ -126,7 +125,7 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited) queue_advance_producer(cq->queue, QUEUE_TYPE_TO_CLIENT); - spin_unlock_irqrestore(&cq->cq_lock, flags); + spin_unlock(&cq->cq_lock); if ((cq->notify == IB_CQ_NEXT_COMP) || (cq->notify == IB_CQ_SOLICITED && solicited)) { base-commit: cbdae01d8b517b81ed271981395fee8ebd08ba7d -- 2.34.1