Jinpu I updated the changes as below, and tested for thousand rounds. From d441c0e2496c1795b5af2b6b8ae4672203d6af3c Mon Sep 17 00:00:00 2001 From: Li Zhijian <lizhijian@xxxxxxxxxxx> Date: Thu, 20 Apr 2023 17:28:28 +0800 Subject: [PATCH] RDMA/rtrs: Fix rxe_dealloc_pd warning In current design: 1. PD and clt_path->s.dev are shared among connections. 2. every con[n]'s cleanup phase will call destroy_con_cq_qp() 3. clt_path->s.dev will be always decreased in destroy_con_cq_qp(), and when clt_path->s.dev become zero, it will destroy PD. 4. when con[1] failed to create, con[1] will not take clt_path->s.dev, but it try to decreased clt_path->s.dev So, in case create_cm(con[0]) succeeds but create_cm(con[1]) fails, destroy_con_cq_qp(con[1]) will be called first which will destory the PD while this PD is still taken by con[0]. Here, we refactor the error path of create_cm() and init_conns(), so that we do the cleanup in the order they are created. Signed-off-by: Li Zhijian <lizhijian@xxxxxxxxxxx> --- drivers/infiniband/ulp/rtrs/rtrs-clt.c | 47 +++++++++++--------------- 1 file changed, 19 insertions(+), 28 deletions(-) diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c index 80abf45a197a..5faf0ecb726b 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c +++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c @@ -2040,6 +2040,7 @@ static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id, return 0; } +/* The caller should the do the cleanup in case of error */ static int create_cm(struct rtrs_clt_con *con) { struct rtrs_path *s = con->c.path; @@ -2062,14 +2063,14 @@ static int create_cm(struct rtrs_clt_con *con) err = rdma_set_reuseaddr(cm_id, 1); if (err != 0) { rtrs_err(s, "Set address reuse failed, err: %d\n", err); - goto destroy_cm; + return err; } err = rdma_resolve_addr(cm_id, (struct sockaddr *)&clt_path->s.src_addr, (struct sockaddr *)&clt_path->s.dst_addr, RTRS_CONNECT_TIMEOUT_MS); if (err) { rtrs_err(s, "Failed to resolve address, err: %d\n", err); - goto destroy_cm; + return err; } /* * Combine connection status and session events. This is needed @@ -2084,29 +2085,17 @@ static int create_cm(struct rtrs_clt_con *con) if (err == 0) err = -ETIMEDOUT; /* Timedout or interrupted */ - goto errr; + return err; } if (con->cm_err < 0) { - err = con->cm_err; - goto errr; + return con->cm_err; } if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTING) { /* Device removal */ - err = -ECONNABORTED; - goto errr; + return -ECONNABORTED; } return 0; - -errr: - stop_cm(con); - mutex_lock(&con->con_mutex); - destroy_con_cq_qp(con); - mutex_unlock(&con->con_mutex); -destroy_cm: - destroy_cm(con); - - return err; } static void rtrs_clt_path_up(struct rtrs_clt_path *clt_path) @@ -2334,7 +2323,7 @@ static void rtrs_clt_close_work(struct work_struct *work) static int init_conns(struct rtrs_clt_path *clt_path) { unsigned int cid; - int err; + int err, i; /* * On every new session connections increase reconnect counter @@ -2350,10 +2339,8 @@ static int init_conns(struct rtrs_clt_path *clt_path) goto destroy; err = create_cm(to_clt_con(clt_path->s.con[cid])); - if (err) { - destroy_con(to_clt_con(clt_path->s.con[cid])); + if (err) goto destroy; - } } err = alloc_path_reqs(clt_path); if (err) @@ -2364,15 +2351,19 @@ static int init_conns(struct rtrs_clt_path *clt_path) return 0; destroy: - while (cid--) { + /* Make sure we do the cleanup in the order they are created */ + for (i = 0; i <= cid; i++) { struct rtrs_clt_con *con = to_clt_con(clt_path->s.con[cid]); - stop_cm(con); - - mutex_lock(&con->con_mutex); - destroy_con_cq_qp(con); - mutex_unlock(&con->con_mutex); - destroy_cm(con); + if (!con) + break; + if (con->c.cm_id) { + stop_cm(con); + mutex_lock(&con->con_mutex); + destroy_con_cq_qp(con); + mutex_unlock(&con->con_mutex); + destroy_cm(con); + } destroy_con(con); } /* -- 2.29.2 On 20/04/2023 10:00, Li Zhijian wrote: > On 19/04/2023 21:20, Jinpu Wang wrote: >> On Wed, Apr 19, 2023 at 11:53 AM Zhijian Li (Fujitsu) >> <lizhijian@xxxxxxxxxxx> wrote: >>> >>> Leon, Guoqing >>> >>> >>> On 18/04/2023 15:57, Leon Romanovsky wrote: >>>>>>> Currently, without this patch: >>>>>>> 1. PD and clt_path->s.dev are shared among connections. >>>>>>> 2. every con[n]'s cleanup phase will call destroy_con_cq_qp() >>>>>>> 3. clt_path->s.dev will be always decreased in destroy_con_cq_qp(), and when >>>>>>> clt_path->s.dev become zero, it will destroy PD. >>>>>>> 4. when con[1] failed to create, con[1] will not take clt_path->s.dev, but it try to decreased clt_path->s.dev <<< it's wrong to do that. >>>>>> So please fix it by making sure that failure to create con[1] will >>>>>> release resources which were allocated. If con[1] didn't increase >>>>>> s.dev_ref, it shouldn't decrease it either. >>>>> You are right, the current patch did exactly that. >>>>> It introduced a con owning flag 'has_dev' to indicate whether this con has taken s.dev. >>>>> so that its cleanup phase will only decrease its s.dev properly. >>>> The has_dev is a workaround and not a solution. In proper error unwind >>>> sequence, you won't need extra flag.