3.14-stable review patch. If anyone has any objections, please let me know. ------------------ From: Sagi Grimberg <sagig@xxxxxxxxxxxx> commit 19e2090fb246ca21b3e569ead51a6a7a1748eadd upstream. Take isert_conn pointer from cm_id->qp->qp_context. This will allow us to know that the cm_id context is always the network portal. This will make the cm_id event check (connection or network portal) more reliable. In order to avoid a NULL dereference in cma_id->qp->qp_context we destroy the qp after we destroy the cm_id (and make the dereference safe). session stablishment/teardown sequences can happen in parallel, we should take into account that connected_handler might race with connection teardown flow. Also, protect isert_conn->conn_device->active_qps decrement within the error patch during QP creation failure and the normal teardown path in isert_connect_release(). Squashed: iser-target: Decrement completion context active_qps in error flow Signed-off-by: Sagi Grimberg <sagig@xxxxxxxxxxxx> Signed-off-by: Nicholas Bellinger <nab@xxxxxxxxxxxxxxx> Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- drivers/infiniband/ulp/isert/ib_isert.c | 31 +++++++++++++++++++------------ 1 file changed, 19 insertions(+), 12 deletions(-) --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -136,12 +136,18 @@ isert_conn_setup_qp(struct isert_conn *i ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr); if (ret) { pr_err("rdma_create_qp failed for cma_id %d\n", ret); - return ret; + goto err; } isert_conn->conn_qp = cma_id->qp; pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n"); return 0; +err: + mutex_lock(&device_list_mutex); + device->cq_active_qps[min_index]--; + mutex_unlock(&device_list_mutex); + + return ret; } static void @@ -527,7 +533,6 @@ isert_connect_request(struct rdma_cm_id spin_lock_init(&isert_conn->conn_lock); INIT_LIST_HEAD(&isert_conn->conn_fr_pool); - cma_id->context = isert_conn; isert_conn->conn_cm_id = cma_id; isert_conn->responder_resources = event->param.conn.responder_resources; isert_conn->initiator_depth = event->param.conn.initiator_depth; @@ -649,18 +654,20 @@ isert_connect_release(struct isert_conn if (device && device->use_fastreg) isert_conn_free_fastreg_pool(isert_conn); + isert_free_rx_descriptors(isert_conn); + rdma_destroy_id(isert_conn->conn_cm_id); + if (isert_conn->conn_qp) { cq_index = ((struct isert_cq_desc *) isert_conn->conn_qp->recv_cq->cq_context)->cq_index; pr_debug("isert_connect_release: cq_index: %d\n", cq_index); + mutex_lock(&device_list_mutex); isert_conn->conn_device->cq_active_qps[cq_index]--; + mutex_unlock(&device_list_mutex); - rdma_destroy_qp(isert_conn->conn_cm_id); + ib_destroy_qp(isert_conn->conn_qp); } - isert_free_rx_descriptors(isert_conn); - rdma_destroy_id(isert_conn->conn_cm_id); - ib_dereg_mr(isert_conn->conn_mr); ib_dealloc_pd(isert_conn->conn_pd); @@ -683,7 +690,7 @@ isert_connect_release(struct isert_conn static void isert_connected_handler(struct rdma_cm_id *cma_id) { - struct isert_conn *isert_conn = cma_id->context; + struct isert_conn *isert_conn = cma_id->qp->qp_context; pr_info("conn %p\n", isert_conn); @@ -761,16 +768,16 @@ isert_conn_terminate(struct isert_conn * static int isert_disconnected_handler(struct rdma_cm_id *cma_id) { + struct iscsi_np *np = cma_id->context; + struct isert_np *isert_np = np->np_context; struct isert_conn *isert_conn; - if (!cma_id->qp) { - struct isert_np *isert_np = cma_id->context; - + if (isert_np->np_cm_id == cma_id) { isert_np->np_cm_id = NULL; return -1; } - isert_conn = (struct isert_conn *)cma_id->context; + isert_conn = cma_id->qp->qp_context; mutex_lock(&isert_conn->conn_mutex); isert_conn_terminate(isert_conn); @@ -785,7 +792,7 @@ isert_disconnected_handler(struct rdma_c static void isert_connect_error(struct rdma_cm_id *cma_id) { - struct isert_conn *isert_conn = (struct isert_conn *)cma_id->context; + struct isert_conn *isert_conn = cma_id->qp->qp_context; isert_put_conn(isert_conn); } -- To unsubscribe from this list: send the line "unsubscribe stable" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html