[PATCH 1/3] qla2xxx/tcm_qla2xxx: Add active I/O session shutdown logic

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Nicholas Bellinger <nab@xxxxxxxxxxxxxxx>

This patch adds active I/O shutdown logic into qla_target.c and tcm_qla2xxx
starting with a new qla_tgt_sess->sess_cmd_list.  Each qla_tgt_cmd descriptor
is added to the list within the incoming tcm_qla2xxx_handle_cmd() path, and
removed within the tcm_qla2xxx_release_cmd() descriptor release path.  This
includes checks within the incoming ATIO path in qla_tgt_handle_cmd_for_atio()
and qla24xx_atio_pkt() to determine if qla_tgt_sess->tearing_down or
qla_tgt->tgt_stop has been set to signal endpoint or session shutdown.

This patch also adds qla_target.c:qla_tgt_wait_for_cmds() to splice the
list and perform accounting for the descriptors that are currently outstanding
and active.  This includes checkpoints in the tcm_qla2xxx_free_cmd() and
tcm_qla2xxx_release_cmd() release paths to signal when individual descriptors
have been accounted for, and are ready to be released via qla_tgt_free_cmd()
directly in qla_tgt_wait_for_cmds().

The qla_tgt_wait_for_cmds() logic called from within qla_tgt_free_session_done()
without hardware_lock held, and can be called from both configfs shutdown or
qla_tgt_del_sess_work_fn() context.  Finally, the qla_tgt_clear_tgt_db() call
within qla_tgt_stop_phase1() has been moved after the draining of
qla_tgt->sess_del_work and qla_tgt->sess_works_list has occured in order
to allow for outstanding session work to complete before the teardown.

Reported-by: Roland Dreier <roland@xxxxxxxxxxxxxxx>
Cc: Madhuranath Iyengar <mni@xxxxxxxxxxxxxxxxxxxxx>
Signed-off-by: Nicholas Bellinger <nab@xxxxxxxxxxxxxxxxxxxxx>
---
 drivers/scsi/qla2xxx/qla_target.c               |  121 ++++++++++++++++++++---
 drivers/scsi/qla2xxx/qla_target.h               |    8 ++
 drivers/target/tcm_qla2xxx/tcm_qla2xxx_fabric.c |   49 +++++++++-
 3 files changed, 165 insertions(+), 13 deletions(-)

diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 2d8a883..08ec48f 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -369,6 +369,70 @@ void qla_tgt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt)
 
 }
 
+static void qla_tgt_wait_for_cmds(struct qla_tgt_sess *sess)
+{
+	LIST_HEAD(tmp_list);
+	struct qla_tgt_cmd *cmd;
+	struct se_cmd *se_cmd;
+	unsigned long flags;
+	int cmd_free;
+
+	spin_lock_irqsave(&sess->sess_cmd_lock, flags);
+	list_splice_init(&sess->sess_cmd_list, &tmp_list);
+	spin_unlock_irqrestore(&sess->sess_cmd_lock, flags);
+
+	while (!list_empty(&tmp_list)) {
+
+		cmd = list_entry(tmp_list.next, struct qla_tgt_cmd, cmd_list);
+		DEBUG22(qla_printk(KERN_INFO, sess->vha->hw, "Waiting for cmd:"
+			" %p\n", cmd));
+
+		if ((atomic_read(&cmd->cmd_free) != 0) ||
+		    (atomic_read(&cmd->cmd_stop_free) != 0))	
+			cmd_free = 1;
+		else {
+			cmd_free = 0;
+			atomic_set(&cmd->cmd_free_comp_set, 1);
+			smp_mb__after_atomic_dec();
+		}
+		list_del(&cmd->cmd_list);
+
+		DEBUG22(qla_printk(KERN_INFO, sess->vha->hw, "Waiting for cmd: %p,"
+				" cmd_free: %d\n", cmd, cmd_free));
+
+		se_cmd = &cmd->se_cmd;
+
+		if (!cmd_free) {
+			if (se_cmd->transport_wait_for_tasks) {
+				DEBUG22(qla_printk(KERN_INFO, sess->vha->hw, "Before"
+					" se_cmd->transport_wait_for_tasks cmd:"
+					" %p, se_cmd: %p\n", cmd, se_cmd));
+				se_cmd->transport_wait_for_tasks(se_cmd, 0, 0);
+
+				DEBUG22(qla_printk(KERN_INFO, sess->vha->hw, "After"
+					" se_cmd->transport_wait_for_tasks ----------->\n"));
+			}
+		}
+		
+		DEBUG22(qla_printk(KERN_INFO, sess->vha->hw, "Before"
+			" wait_for_completion(&cmd->cmd_free_comp); cmd: %p,"
+			" se_cmd: %p\n", cmd, se_cmd));
+		wait_for_completion(&cmd->cmd_free_comp);
+		DEBUG22(qla_printk(KERN_INFO, sess->vha->hw, "After"
+			" wait_for_completion(&cmd->cmd_free_comp); cmd: %p,"
+			" se_cmd: %p\n", cmd, se_cmd));
+
+		atomic_set(&cmd->cmd_free, 0);
+		smp_mb__after_atomic_dec();
+
+		qla_tgt_free_cmd(cmd);
+
+		DEBUG22(qla_printk(KERN_INFO, sess->vha->hw, "After"
+			" qla_tgt_free_cmd --------------------->\n"));
+	}
+
+}
+
 
 /* ha->hardware_lock supposed to be held on entry */
 static void qla_tgt_free_session_done(struct qla_tgt_sess *sess)
@@ -378,6 +442,12 @@ static void qla_tgt_free_session_done(struct qla_tgt_sess *sess)
 	struct qla_hw_data *ha = vha->hw;
 
 	tgt = sess->tgt;
+
+	sess->tearing_down = 1;
+	spin_unlock_irq(&ha->hardware_lock);
+	qla_tgt_wait_for_cmds(sess);
+	spin_lock_irq(&ha->hardware_lock);
+
 	/*
 	 * Release the target session for FC Nexus from fabric module code.
 	 */
@@ -547,10 +617,11 @@ static void qla_tgt_schedule_sess_for_deletion(struct qla_tgt_sess *sess)
 /* ha->hardware_lock supposed to be held on entry */
 static void qla_tgt_clear_tgt_db(struct qla_tgt *tgt, bool local_only)
 {
-	struct qla_tgt_sess *sess, *sess_tmp;
+	struct qla_tgt_sess *sess;
 
-	list_for_each_entry_safe(sess, sess_tmp, &tgt->sess_list,
-					sess_list_entry) {
+	while (!list_empty(&tgt->sess_list)) {
+		sess = list_first_entry(&tgt->sess_list, struct qla_tgt_sess,
+					sess_list_entry);
 		if (local_only) {
 			if (!sess->local)
 				continue;
@@ -860,6 +931,9 @@ static struct qla_tgt_sess *qla_tgt_create_sess(
 	sess->loop_id = fcport->loop_id;
 	sess->local = local;
 
+	INIT_LIST_HEAD(&sess->sess_cmd_list);
+	spin_lock_init(&sess->sess_cmd_lock);	
+
 	DEBUG22(qla_printk(KERN_INFO, ha, "Adding sess %p to tgt %p via"
 		" ->check_initiator_node_acl()\n", sess, ha->qla_tgt));
 
@@ -1042,7 +1116,6 @@ void qla_tgt_stop_phase1(struct qla_tgt *tgt)
 	mutex_lock(&ha->tgt_mutex);
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 	tgt->tgt_stop = 1;
-	qla_tgt_clear_tgt_db(tgt, false);
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 	mutex_unlock(&ha->tgt_mutex);
 
@@ -1057,6 +1130,12 @@ void qla_tgt_stop_phase1(struct qla_tgt *tgt)
 	}
 	spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
 
+	mutex_lock(&ha->tgt_mutex);
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	qla_tgt_clear_tgt_db(tgt, false);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	mutex_unlock(&ha->tgt_mutex);
+
 	DEBUG22(qla_printk(KERN_INFO, ha, "Waiting for tgt %p: list_empty(sess_list)=%d "
 		"sess_count=%d\n", tgt, list_empty(&tgt->sess_list),
 		tgt->sess_count));
@@ -3213,6 +3292,9 @@ static int qla_tgt_handle_cmd_for_atio(struct scsi_qla_host *vha, atio_t *atio)
 		return -ENOMEM;
 	}
 
+	INIT_LIST_HEAD(&cmd->cmd_list);
+	init_completion(&cmd->cmd_free_comp);
+
 	memcpy(&cmd->atio.atio2x, atio, sizeof(*atio));
 	cmd->state = QLA_TGT_STATE_NEW;
 	cmd->locked_rsp = 1;
@@ -3242,6 +3324,9 @@ static int qla_tgt_handle_cmd_for_atio(struct scsi_qla_host *vha, atio_t *atio)
 		}
 	}
 
+	if (sess->tearing_down)
+		goto out_free_cmd;
+
 	res = qla_tgt_send_cmd_to_target(vha, cmd, sess);
 	if (unlikely(res != 0))
 		goto out_free_cmd;
@@ -4323,10 +4408,15 @@ static void qla24xx_atio_pkt(struct scsi_qla_host *vha, atio7_entry_t *atio)
 				qla24xx_send_term_exchange(vha, NULL, atio, 1);
 #endif
 			} else {
-				printk(KERN_INFO "qla_target(%d): Unable to send "
-				   "command to target, sending BUSY status\n",
-				   vha->vp_idx);
-				qla24xx_send_busy(vha, atio, SAM_STAT_BUSY);
+				if (tgt->tgt_stop) {
+					printk(KERN_INFO "qla_target: Unable to send "
+					"command to target for req, ignoring \n");
+				} else {
+					printk(KERN_INFO "qla_target(%d): Unable to send "
+					   "command to target, sending BUSY status\n",
+					   vha->vp_idx);
+					qla24xx_send_busy(vha, atio, SAM_STAT_BUSY);
+				}
 			}
 		}
 		break;
@@ -4420,10 +4510,17 @@ static void qla_tgt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
 				qla2xxx_send_term_exchange(vha, NULL, atio, 1);
 #endif
 			} else {
-				printk(KERN_INFO "qla_target(%d): Unable to send "
-					"command to target, sending BUSY status\n",
-					vha->vp_idx);
-				qla2xxx_send_busy(vha, atio);
+				if (tgt->tgt_stop) {
+					printk(KERN_INFO "qla_target: Unable to send "
+						"command to target, sending TERM EXCHANGE"
+						" for rsp\n");
+					qla2xxx_send_term_exchange(vha, NULL, atio, 1);
+				} else {
+					printk(KERN_INFO "qla_target(%d): Unable to send "
+						"command to target, sending BUSY status\n",
+						vha->vp_idx);
+					qla2xxx_send_busy(vha, atio);
+				}
 			}
 		}
 	}
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 3f9a6ed..42d5479 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -876,6 +876,7 @@ struct qla_tgt_sess {
 	unsigned int conf_compl_supported:1;
 	unsigned int deleted:1;
 	unsigned int local:1;
+	unsigned int tearing_down:1;
 
 	struct se_session *se_sess;
 	struct scsi_qla_host *vha;
@@ -887,6 +888,9 @@ struct qla_tgt_sess {
 	unsigned long expires;
 	struct list_head del_list_entry;
 
+	struct list_head sess_cmd_list;
+	spinlock_t sess_cmd_lock;
+
 	uint8_t port_name[WWN_SIZE];
 };
 
@@ -895,6 +899,9 @@ struct qla_tgt_cmd {
 	int state;
 	int locked_rsp;
 	atomic_t cmd_stop_free;
+	atomic_t cmd_free;
+	atomic_t cmd_free_comp_set;
+	struct completion cmd_free_comp;
 	struct se_cmd se_cmd;
 	/* Sense buffer that will be mapped into outgoing status */
 	unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
@@ -917,6 +924,7 @@ struct qla_tgt_cmd {
 	uint16_t loop_id;		    /* to save extra sess dereferences */
 	struct qla_tgt *tgt;		    /* to save extra sess dereferences */
 	struct scsi_qla_host *vha;
+	struct list_head cmd_list;
 
 	union {
 		atio7_entry_t atio7;
diff --git a/drivers/target/tcm_qla2xxx/tcm_qla2xxx_fabric.c b/drivers/target/tcm_qla2xxx/tcm_qla2xxx_fabric.c
index 22d4051..ff39978 100644
--- a/drivers/target/tcm_qla2xxx/tcm_qla2xxx_fabric.c
+++ b/drivers/target/tcm_qla2xxx/tcm_qla2xxx_fabric.c
@@ -401,6 +401,23 @@ void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd)
 		return;
 	}
 
+	if (cmd->tgt->tgt_stop) {
+		pr_warn("tcm_qla2xxx_free_cmd: Detected tgt_stop"
+			" for cmd: %p !!!!!!\n", cmd);
+		complete(&cmd->cmd_free_comp);
+		return;
+	}
+
+	if (cmd->sess->tearing_down) {
+		pr_warn("tcm_qla2xxx_free_cmd: Detected tearing_down"
+			" for cmd: %p !!!!!!\n", cmd);
+		complete(&cmd->cmd_free_comp);
+		return;
+	}
+
+	atomic_set(&cmd->cmd_free, 1);
+	smp_mb__after_atomic_dec();
+
 	transport_generic_free_cmd_intr(&cmd->se_cmd);
 }
 
@@ -432,17 +449,41 @@ void tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd)
  */
 void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd)
 {
-	struct qla_tgt_cmd *cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
+	struct qla_tgt_cmd *cmd;
+	struct qla_tgt_sess *sess;
+	unsigned long flags;
 
 	if (se_cmd->se_tmr_req != NULL)
 		return;
 
+	cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
+	sess = cmd->sess;
+
+	if (!sess)
+		BUG();
+
 	while (atomic_read(&cmd->cmd_stop_free) != 1) {
 		pr_warn("Hit atomic_read(&cmd->cmd_stop_free)=1"
 				" in tcm_qla2xxx_release_cmd\n");
 		cpu_relax();
 	}
 
+
+	spin_lock_irqsave(&sess->sess_cmd_lock, flags);
+	if (cmd->tgt->tgt_stop || sess->tearing_down) {
+		if (atomic_read(&cmd->cmd_free_comp_set) ||
+		    atomic_read(&cmd->cmd_free)) {
+			pr_warn("Detected shutdown, calling complete("
+				"&cmd->cmd_free_comp): cmd: %p\n", cmd);
+			spin_unlock_irqrestore(&sess->sess_cmd_lock, flags);
+			complete(&cmd->cmd_free_comp);
+			return;
+		}
+	}
+	if (atomic_read(&cmd->cmd_free))
+		list_del(&cmd->cmd_list);
+	spin_unlock_irqrestore(&sess->sess_cmd_lock, flags);
+
 	qla_tgt_free_cmd(cmd);
 }
 
@@ -599,6 +640,7 @@ int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
 	struct se_session *se_sess;
 	struct se_portal_group *se_tpg;
 	struct qla_tgt_sess *sess;
+	unsigned long flags;
 
 	sess = cmd->sess;
 	if (!sess) {
@@ -619,6 +661,11 @@ int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
 	transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
 			data_length, data_dir,
 			fcp_task_attr, &cmd->sense_buffer[0]);
+
+	spin_lock_irqsave(&sess->sess_cmd_lock, flags);
+	list_add_tail(&cmd->cmd_list, &sess->sess_cmd_list);
+	spin_unlock_irqrestore(&sess->sess_cmd_lock, flags);
+
 	/*
 	 * Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi
 	 */
-- 
1.7.2.5

--
To unsubscribe from this list: send the line "unsubscribe target-devel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[Index of Archives]     [Linux SCSI]     [Kernel Newbies]     [Linux SCSI Target Infrastructure]     [Share Photos]     [IDE]     [Security]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux ATA RAID]     [Linux IIO]     [Device Mapper]

  Powered by Linux