This is a note to let you know that I've just added the patch titled RDMA/rxe: Remove __rxe_do_task() to the 6.2-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary The filename of the patch is: rdma-rxe-remove-__rxe_do_task.patch and it can be found in the queue-6.2 subdirectory. If you, or anyone else, feels it should not be added to the stable tree, please let <stable@xxxxxxxxxxxxxxx> know about it. commit 33266522a29a06dcec4b50b834bd07443433f907 Author: Bob Pearson <rpearsonhpe@xxxxxxxxx> Date: Sat Mar 4 11:45:32 2023 -0600 RDMA/rxe: Remove __rxe_do_task() [ Upstream commit 960ebe97e5238565d15063c8f4d1b2108efe2e65 ] The subroutine __rxe_do_task is not thread safe and it has no way to guarantee that the tasks, which are designed with the assumption that they are non-reentrant, are not reentered. All of its uses are non-performance critical. This patch replaces calls to __rxe_do_task with calls to rxe_sched_task. It also removes irrelevant or unneeded if tests. Instead of calling the task machinery a single call to the tasklet function (rxe_requester, etc.) is sufficient to draing the queues if task execution has been disabled or stopped. Together these changes allow the removal of __rxe_do_task. Link: https://lore.kernel.org/r/20230304174533.11296-7-rpearsonhpe@xxxxxxxxx Signed-off-by: Ian Ziemba <ian.ziemba@xxxxxxx> Signed-off-by: Bob Pearson <rpearsonhpe@xxxxxxxxx> Signed-off-by: Jason Gunthorpe <jgg@xxxxxxxxxx> Stable-dep-of: b2b1ddc45745 ("RDMA/rxe: Fix the error "trying to register non-static key in rxe_cleanup_task"") Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx> diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c index c954dd9394baf..49891f8ed4e61 100644 --- a/drivers/infiniband/sw/rxe/rxe_qp.c +++ b/drivers/infiniband/sw/rxe/rxe_qp.c @@ -473,29 +473,23 @@ static void rxe_qp_reset(struct rxe_qp *qp) { /* stop tasks from running */ rxe_disable_task(&qp->resp.task); - - /* stop request/comp */ - if (qp->sq.queue) { - if (qp_type(qp) == IB_QPT_RC) - rxe_disable_task(&qp->comp.task); - rxe_disable_task(&qp->req.task); - } + rxe_disable_task(&qp->comp.task); + rxe_disable_task(&qp->req.task); /* move qp to the reset state */ qp->req.state = QP_STATE_RESET; qp->comp.state = QP_STATE_RESET; qp->resp.state = QP_STATE_RESET; - /* let state machines reset themselves drain work and packet queues - * etc. - */ - __rxe_do_task(&qp->resp.task); + /* drain work and packet queuesc */ + rxe_requester(qp); + rxe_completer(qp); + rxe_responder(qp); - if (qp->sq.queue) { - __rxe_do_task(&qp->comp.task); - __rxe_do_task(&qp->req.task); + if (qp->rq.queue) + rxe_queue_reset(qp->rq.queue); + if (qp->sq.queue) rxe_queue_reset(qp->sq.queue); - } /* cleanup attributes */ atomic_set(&qp->ssn, 0); @@ -518,13 +512,8 @@ static void rxe_qp_reset(struct rxe_qp *qp) /* reenable tasks */ rxe_enable_task(&qp->resp.task); - - if (qp->sq.queue) { - if (qp_type(qp) == IB_QPT_RC) - rxe_enable_task(&qp->comp.task); - - rxe_enable_task(&qp->req.task); - } + rxe_enable_task(&qp->comp.task); + rxe_enable_task(&qp->req.task); } /* drain the send queue */ @@ -533,10 +522,7 @@ static void rxe_qp_drain(struct rxe_qp *qp) if (qp->sq.queue) { if (qp->req.state != QP_STATE_DRAINED) { qp->req.state = QP_STATE_DRAIN; - if (qp_type(qp) == IB_QPT_RC) - rxe_sched_task(&qp->comp.task); - else - __rxe_do_task(&qp->comp.task); + rxe_sched_task(&qp->comp.task); rxe_sched_task(&qp->req.task); } } @@ -552,11 +538,7 @@ void rxe_qp_error(struct rxe_qp *qp) /* drain work and packet queues */ rxe_sched_task(&qp->resp.task); - - if (qp_type(qp) == IB_QPT_RC) - rxe_sched_task(&qp->comp.task); - else - __rxe_do_task(&qp->comp.task); + rxe_sched_task(&qp->comp.task); rxe_sched_task(&qp->req.task); } @@ -773,24 +755,20 @@ static void rxe_qp_do_cleanup(struct work_struct *work) qp->valid = 0; qp->qp_timeout_jiffies = 0; - rxe_cleanup_task(&qp->resp.task); if (qp_type(qp) == IB_QPT_RC) { del_timer_sync(&qp->retrans_timer); del_timer_sync(&qp->rnr_nak_timer); } + rxe_cleanup_task(&qp->resp.task); rxe_cleanup_task(&qp->req.task); rxe_cleanup_task(&qp->comp.task); /* flush out any receive wr's or pending requests */ - if (qp->req.task.func) - __rxe_do_task(&qp->req.task); - - if (qp->sq.queue) { - __rxe_do_task(&qp->comp.task); - __rxe_do_task(&qp->req.task); - } + rxe_requester(qp); + rxe_completer(qp); + rxe_responder(qp); if (qp->sq.queue) rxe_queue_cleanup(qp->sq.queue); diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c index 959cc6229a34e..a67f485454436 100644 --- a/drivers/infiniband/sw/rxe/rxe_task.c +++ b/drivers/infiniband/sw/rxe/rxe_task.c @@ -6,19 +6,6 @@ #include "rxe.h" -int __rxe_do_task(struct rxe_task *task) - -{ - int ret; - - while ((ret = task->func(task->qp)) == 0) - ; - - task->ret = ret; - - return ret; -} - /* * this locking is due to a potential race where * a second caller finds the task already running diff --git a/drivers/infiniband/sw/rxe/rxe_task.h b/drivers/infiniband/sw/rxe/rxe_task.h index 41efd5fd49b03..99585e40cef92 100644 --- a/drivers/infiniband/sw/rxe/rxe_task.h +++ b/drivers/infiniband/sw/rxe/rxe_task.h @@ -39,12 +39,6 @@ int rxe_init_task(struct rxe_task *task, struct rxe_qp *qp, /* cleanup task */ void rxe_cleanup_task(struct rxe_task *task); -/* - * raw call to func in loop without any checking - * can call when tasklets are disabled - */ -int __rxe_do_task(struct rxe_task *task); - void rxe_run_task(struct rxe_task *task); void rxe_sched_task(struct rxe_task *task);