In situations such as bond failover, The new session establishment implicitly invokes the termination of the old connection. So, we don't want to wait for the old connection wait_conn to completely terminate before we accept the new connection and post a login response. The solution is to deffer the comp_wait completion and the conn_put to a work so wait_conn will effectively be non-blocking (flush errors are assumed to come very fast). We allocate isert_release_wq with WQ_UNBOUND and WQ_UNBOUND_MAX_ACTIVE to spread the concurrency of release works. Cc: <stable@xxxxxxxxxxxxxxx> # 3.10+ Reported-by: Slava Shwartsman <valyushash@xxxxxxxxx> Signed-off-by: Sagi Grimberg <sagig@xxxxxxxxxxxx> --- drivers/infiniband/ulp/isert/ib_isert.c | 42 +++++++++++++++++++++++++------ drivers/infiniband/ulp/isert/ib_isert.h | 1 + 2 files changed, 35 insertions(+), 8 deletions(-) diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index ef97197..13dd729 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -41,6 +41,7 @@ static DEFINE_MUTEX(device_list_mutex); static LIST_HEAD(device_list); static struct workqueue_struct *isert_rx_wq; static struct workqueue_struct *isert_comp_wq; +static struct workqueue_struct *isert_release_wq; static void isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn); @@ -3318,6 +3319,24 @@ isert_free_np(struct iscsi_np *np) kfree(isert_np); } +static void isert_release_work(struct work_struct *work) +{ + struct isert_conn *isert_conn = container_of(work, + struct isert_conn, + release_work); + + pr_info("Starting release conn %p\n", isert_conn); + + wait_for_completion(&isert_conn->conn_wait); + + mutex_lock(&isert_conn->conn_mutex); + isert_conn->state = ISER_CONN_DOWN; + mutex_unlock(&isert_conn->conn_mutex); + + pr_info("Destroying conn %p\n", isert_conn); + isert_put_conn(isert_conn); +} + static void isert_wait_conn(struct iscsi_conn *conn) { struct isert_conn *isert_conn = conn->context; @@ -3337,14 +3356,9 @@ static void isert_wait_conn(struct iscsi_conn *conn) mutex_unlock(&isert_conn->conn_mutex); wait_for_completion(&isert_conn->conn_wait_comp_err); - wait_for_completion(&isert_conn->conn_wait); - - mutex_lock(&isert_conn->conn_mutex); - isert_conn->state = ISER_CONN_DOWN; - mutex_unlock(&isert_conn->conn_mutex); - pr_info("Destroying conn %p\n", isert_conn); - isert_put_conn(isert_conn); + INIT_WORK(&isert_conn->release_work, isert_release_work); + queue_work(isert_release_wq, &isert_conn->release_work); } static void isert_free_conn(struct iscsi_conn *conn) @@ -3392,10 +3406,21 @@ static int __init isert_init(void) goto destroy_rx_wq; } + isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND, + WQ_UNBOUND_MAX_ACTIVE); + if (!isert_release_wq) { + pr_err("Unable to allocate isert_release_wq\n"); + ret = -ENOMEM; + goto destroy_comp_wq; + } + iscsit_register_transport(&iser_target_transport); - pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n"); + pr_info("iSER_TARGET[0] - Loaded iser_target_transport\n"); + return 0; +destroy_comp_wq: + destroy_workqueue(isert_comp_wq); destroy_rx_wq: destroy_workqueue(isert_rx_wq); return ret; @@ -3404,6 +3429,7 @@ destroy_rx_wq: static void __exit isert_exit(void) { flush_scheduled_work(); + destroy_workqueue(isert_release_wq); destroy_workqueue(isert_comp_wq); destroy_workqueue(isert_rx_wq); iscsit_unregister_transport(&iser_target_transport); diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index 5e9148d..f8b4208 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h @@ -150,6 +150,7 @@ struct isert_conn { int conn_fr_pool_size; /* lock to protect fastreg pool */ spinlock_t conn_lock; + struct work_struct release_work; #define ISERT_COMP_BATCH_COUNT 8 int conn_comp_batch; struct llist_head conn_comp_llist; -- 1.7.1 -- To unsubscribe from this list: send the line "unsubscribe stable" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html