[PATCH 06/21] target: Rework abort and LUN reset handling

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Instead of invoking target driver callback functions from the
context that handles an abort or LUN reset task management function,
only set the abort flag from that context and perform the actual
abort handling from the context of the regular command processing
flow. This approach has the following advantages:
- The task management function code becomes much easier to read and
  to verify since the number of potential race conditions against
  the command processing flow is strongly reduced.
- It is no longer needed to store the command state into the command
  itself since that information is no longer needed from the context
  where a task management function is processed.

Note: the target_remove_from_state_list() in transport_cmd_check_stop()
has been moved up such that it is called without t_state_lock held.
This is necessary to avoid triggering lock inversion against
core_tmr_drain_state_list(), which now locks execute_task_lock and
t_state_lock in that order.

Signed-off-by: Bart Van Assche <bart.vanassche@xxxxxxxxxxx>
---
 drivers/infiniband/ulp/srpt/ib_srpt.c  |   5 -
 drivers/target/target_core_internal.h  |   3 -
 drivers/target/target_core_sbc.c       |   2 +-
 drivers/target/target_core_tmr.c       | 225 ++++++++++++++++--------------
 drivers/target/target_core_transport.c | 242 +++++++++------------------------
 include/target/target_core_base.h      |  11 +-
 6 files changed, 192 insertions(+), 296 deletions(-)

diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 8b6c5d7..93edd69 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1352,11 +1352,6 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
 		break;
 	case SRPT_STATE_NEED_DATA:
 		/* DMA_TO_DEVICE (write) - RDMA read error. */
-
-		/* XXX(hch): this is a horrible layering violation.. */
-		spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags);
-		ioctx->cmd.transport_state &= ~CMD_T_ACTIVE;
-		spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags);
 		break;
 	case SRPT_STATE_CMD_RSP_SENT:
 		/*
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 3351287..f264b46 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -98,7 +98,6 @@ int	init_se_kmem_caches(void);
 void	release_se_kmem_caches(void);
 u32	scsi_get_new_index(scsi_index_t);
 void	transport_subsystem_check_init(void);
-void	transport_cmd_finish_abort(struct se_cmd *, int);
 unsigned char *transport_dump_cmd_direction(struct se_cmd *);
 void	transport_dump_dev_state(struct se_device *, char *, int *);
 void	transport_dump_dev_info(struct se_device *, struct se_lun *,
@@ -107,9 +106,7 @@ void	transport_dump_vpd_proto_id(struct t10_vpd *, unsigned char *, int);
 int	transport_dump_vpd_assoc(struct t10_vpd *, unsigned char *, int);
 int	transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int);
 int	transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int);
-bool	target_stop_cmd(struct se_cmd *cmd, unsigned long *flags);
 void	transport_clear_lun_ref(struct se_lun *);
-void	transport_send_task_abort(struct se_cmd *);
 sense_reason_t	target_cmd_size_check(struct se_cmd *cmd, unsigned int size);
 void	target_qf_do_work(struct work_struct *work);
 bool	target_check_wce(struct se_device *dev);
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index f871bde..2b947c1 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -534,7 +534,7 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes
 
 	spin_lock_irq(&cmd->t_state_lock);
 	cmd->t_state = TRANSPORT_PROCESSING;
-	cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT;
+	cmd->transport_state |= CMD_T_SENT;
 	spin_unlock_irq(&cmd->t_state_lock);
 
 	__target_execute_cmd(cmd);
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 288d551..f9b5497 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -77,23 +77,6 @@ void core_tmr_release_req(struct se_tmr_req *tmr)
 	kfree(tmr);
 }
 
-static void core_tmr_handle_tas_abort(
-	struct se_node_acl *tmr_nacl,
-	struct se_cmd *cmd,
-	int tas)
-{
-	bool remove = true;
-	/*
-	 * TASK ABORTED status (TAS) bit support
-	 */
-	if ((tmr_nacl && (tmr_nacl != cmd->se_sess->se_node_acl)) && tas) {
-		remove = false;
-		transport_send_task_abort(cmd);
-	}
-
-	transport_cmd_finish_abort(cmd, remove);
-}
-
 static int target_check_cdb_and_preempt(struct list_head *list,
 		struct se_cmd *cmd)
 {
@@ -109,15 +92,100 @@ static int target_check_cdb_and_preempt(struct list_head *list,
 	return 1;
 }
 
+/**
+ * __target_abort_cmd - set abort flag if it has not yet been set
+ * @cmd:	Command to be aborted.
+ * @tmr_nacl:	Node ACL through which the task management function had been
+ *		received. NULL if TASK ABORT STATUS has not been set.
+ * @uncond_cb:  Whether to call the callback function upon command completion
+ *              if the abort flag had already been set.
+ * @done:	Callback function to be called upon command completion.
+ * @done_arg:	Argument to be passed to the function @done.
+ */
+static bool __target_abort_cmd(struct se_cmd *cmd, struct se_node_acl *tmr_nacl,
+			       bool uncond_cb, void (*done)(void *),
+			       void *done_arg)
+{
+	bool ret = false;
+
+	lockdep_assert_held(&cmd->t_state_lock);
+
+	if (!(cmd->transport_state & CMD_T_ABORTED)) {
+		cmd->transport_state |= CMD_T_ABORTED;
+		cmd->send_abort_response = tmr_nacl &&
+				tmr_nacl != cmd->se_sess->se_node_acl;
+		__target_add_cmd_cb(cmd, done, done_arg);
+		ret = true;
+	} else if (uncond_cb) {
+		__target_add_cmd_cb(cmd, done, done_arg);
+	}
+
+	return ret;
+}
+
+static bool target_abort_cmd(struct se_cmd *cmd, struct se_node_acl *tmr_nacl,
+			     bool uncond_cb, void (*done)(void *),
+			     void *done_arg)
+{
+	unsigned long flags;
+	bool ret;
+
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
+	ret = __target_abort_cmd(cmd, tmr_nacl, uncond_cb, done, done_arg);
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+	return ret;
+}
+
+struct cmd_abort_ctx {
+	atomic_t		cmd_count;
+	struct completion	done;
+};
+
+static void target_init_cmd_abort_ctx(struct cmd_abort_ctx *ctx)
+{
+	atomic_set(&ctx->cmd_count, 1);
+	init_completion(&ctx->done);
+}
+
+static void target_cmd_abort_done(void *arg)
+{
+	struct cmd_abort_ctx *ctx = arg;
+
+	if (atomic_dec_return(&ctx->cmd_count) == 0)
+		complete(&ctx->done);
+}
+
+static void target_finish_abort(struct cmd_abort_ctx *ctx, const char *action,
+				void (*show)(void *), void *arg)
+{
+	target_cmd_abort_done(ctx);
+	while (wait_for_completion_interruptible_timeout(&ctx->done, 30 * HZ)
+	       <= 0) {
+		pr_info("%s: waiting for %d commands to finish\n", action,
+			atomic_read(&ctx->cmd_count));
+		if (show)
+			show(arg);
+	}
+}
+
 void core_tmr_abort_task(
 	struct se_device *dev,
 	struct se_tmr_req *tmr,
 	struct se_session *se_sess)
 {
+	struct se_node_acl *tmr_nacl = NULL;
+	struct cmd_abort_ctx abort_ctx;
 	struct se_cmd *se_cmd;
 	unsigned long flags;
 	u64 ref_tag;
 
+	if (dev->dev_attrib.emulate_tas && tmr && tmr->task_cmd &&
+	    tmr->task_cmd->se_sess)
+		tmr_nacl = tmr->task_cmd->se_sess->se_node_acl;
+
+	target_init_cmd_abort_ctx(&abort_ctx);
+
 	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
 	list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
 
@@ -135,32 +203,13 @@ void core_tmr_abort_task(
 		printk("ABORT_TASK: Found referenced %s task_tag: %llu\n",
 			se_cmd->se_tfo->get_fabric_name(), ref_tag);
 
-		spin_lock(&se_cmd->t_state_lock);
-		if (se_cmd->transport_state & CMD_T_COMPLETE) {
-			printk("ABORT_TASK: ref_tag: %llu already complete,"
-			       " skipping\n", ref_tag);
-			spin_unlock(&se_cmd->t_state_lock);
-			spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
-			goto out;
-		}
-		se_cmd->transport_state |= CMD_T_ABORTED;
-		spin_unlock(&se_cmd->t_state_lock);
-
-		list_del_init(&se_cmd->se_cmd_list);
-		kref_get(&se_cmd->cmd_kref);
-		spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
-
-		cancel_work_sync(&se_cmd->work);
-		transport_wait_for_tasks(se_cmd);
-
-		target_put_sess_cmd(se_cmd);
-		transport_cmd_finish_abort(se_cmd, true);
-
-		goto out;
+		if (target_abort_cmd(se_cmd, tmr_nacl, false,
+				     target_cmd_abort_done, &abort_ctx))
+			atomic_inc(&abort_ctx.cmd_count);
 	}
 	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
 
-out:
+	target_finish_abort(&abort_ctx, "ABORT TASK", NULL, NULL);
 	pr_info("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for ref_tag %llu\n",
 		tmr->ref_task_tag);
 	tmr->response = TMR_FUNCTION_COMPLETE;
@@ -171,10 +220,18 @@ static void core_tmr_drain_tmr_list(
 	struct se_tmr_req *tmr,
 	struct list_head *preempt_and_abort_list)
 {
-	LIST_HEAD(drain_tmr_list);
+	struct se_node_acl *tmr_nacl = NULL;
+	struct cmd_abort_ctx abort_ctx;
 	struct se_tmr_req *tmr_p, *tmr_pp;
 	struct se_cmd *cmd;
 	unsigned long flags;
+
+	if (dev->dev_attrib.emulate_tas && tmr && tmr->task_cmd &&
+	    tmr->task_cmd->se_sess)
+		tmr_nacl = tmr->task_cmd->se_sess->se_node_acl;
+
+	target_init_cmd_abort_ctx(&abort_ctx);
+
 	/*
 	 * Release all pending and outgoing TMRs aside from the received
 	 * LUN_RESET tmr..
@@ -200,32 +257,27 @@ static void core_tmr_drain_tmr_list(
 		if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
 			continue;
 
-		spin_lock(&cmd->t_state_lock);
-		if (!(cmd->transport_state & CMD_T_ACTIVE)) {
-			spin_unlock(&cmd->t_state_lock);
-			continue;
-		}
-		if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
-			spin_unlock(&cmd->t_state_lock);
-			continue;
-		}
-		spin_unlock(&cmd->t_state_lock);
-
-		list_move_tail(&tmr_p->tmr_list, &drain_tmr_list);
+		target_abort_cmd(cmd, tmr_nacl, true, target_cmd_abort_done,
+				 &abort_ctx);
+		atomic_inc(&abort_ctx.cmd_count);
 	}
 	spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
 
-	list_for_each_entry_safe(tmr_p, tmr_pp, &drain_tmr_list, tmr_list) {
-		list_del_init(&tmr_p->tmr_list);
-		cmd = tmr_p->task_cmd;
+	target_finish_abort(&abort_ctx, "LUN RESET (task management)",
+			    NULL, NULL);
+}
 
-		pr_debug("LUN_RESET: %s releasing TMR %p Function: 0x%02x,"
-			" Response: 0x%02x, t_state: %d\n",
-			(preempt_and_abort_list) ? "Preempt" : "", tmr_p,
-			tmr_p->function, tmr_p->response, cmd->t_state);
+static void target_show_dev_cmds(struct se_device *dev)
+{
+	struct se_cmd *cmd;
+	unsigned long flags;
 
-		transport_cmd_finish_abort(cmd, 1);
+	spin_lock_irqsave(&dev->execute_task_lock, flags);
+	list_for_each_entry(cmd, &dev->state_list, state_list_entry) {
+		pr_info("cmd %p: tag %lld; state %d; flags %#x\n", cmd,
+			cmd->tag, cmd->t_state, cmd->transport_state);
 	}
+	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
 }
 
 static void core_tmr_drain_state_list(
@@ -235,10 +287,12 @@ static void core_tmr_drain_state_list(
 	int tas,
 	struct list_head *preempt_and_abort_list)
 {
-	LIST_HEAD(drain_task_list);
+	struct cmd_abort_ctx abort_ctx;
 	struct se_cmd *cmd, *next;
 	unsigned long flags;
 
+	target_init_cmd_abort_ctx(&abort_ctx);
+
 	/*
 	 * Complete outstanding commands with TASK_ABORTED SAM status.
 	 *
@@ -277,49 +331,14 @@ static void core_tmr_drain_state_list(
 		if (prout_cmd == cmd)
 			continue;
 
-		list_move_tail(&cmd->state_list_entry, &drain_task_list);
-		cmd->state_active = false;
+		target_abort_cmd(cmd, tmr_nacl, true, target_cmd_abort_done,
+				 &abort_ctx);
+		atomic_inc(&abort_ctx.cmd_count);
 	}
 	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
 
-	while (!list_empty(&drain_task_list)) {
-		cmd = list_entry(drain_task_list.next, struct se_cmd,
-				 state_list_entry);
-		list_del(&cmd->state_list_entry);
-
-		pr_debug("LUN_RESET: %s cmd: %p"
-			" ITT/CmdSN: 0x%08llx/0x%08x, i_state: %d, t_state: %d"
-			"cdb: 0x%02x\n",
-			(preempt_and_abort_list) ? "Preempt" : "", cmd,
-			cmd->tag, 0,
-			cmd->se_tfo->get_cmd_state(cmd), cmd->t_state,
-			cmd->t_task_cdb[0]);
-		pr_debug("LUN_RESET: ITT[0x%08llx] - pr_res_key: 0x%016Lx"
-			" -- CMD_T_ACTIVE: %d"
-			" CMD_T_STOP: %d CMD_T_SENT: %d\n",
-			cmd->tag, cmd->pr_res_key,
-			(cmd->transport_state & CMD_T_ACTIVE) != 0,
-			(cmd->transport_state & CMD_T_STOP) != 0,
-			(cmd->transport_state & CMD_T_SENT) != 0);
-
-		/*
-		 * If the command may be queued onto a workqueue cancel it now.
-		 *
-		 * This is equivalent to removal from the execute queue in the
-		 * loop above, but we do it down here given that
-		 * cancel_work_sync may block.
-		 */
-		if (cmd->t_state == TRANSPORT_COMPLETE)
-			cancel_work_sync(&cmd->work);
-
-		spin_lock_irqsave(&cmd->t_state_lock, flags);
-		target_stop_cmd(cmd, &flags);
-
-		cmd->transport_state |= CMD_T_ABORTED;
-		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
-		core_tmr_handle_tas_abort(tmr_nacl, cmd, tas);
-	}
+	target_finish_abort(&abort_ctx, "LUN RESET (all cmds)",
+			    (void *)target_show_dev_cmds, dev);
 }
 
 int core_tmr_lun_reset(
@@ -347,7 +366,7 @@ int core_tmr_lun_reset(
 	 * Determine if this se_tmr is coming from a $FABRIC_MOD
 	 * or struct se_device passthrough..
 	 */
-	if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) {
+	if (tas && tmr && tmr->task_cmd && tmr->task_cmd->se_sess) {
 		tmr_nacl = tmr->task_cmd->se_sess->se_node_acl;
 		tmr_tpg = tmr->task_cmd->se_sess->se_tpg;
 		if (tmr_nacl && tmr_tpg) {
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 3538a36..3ab737b 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -559,14 +559,8 @@ static void target_remove_from_state_list(struct se_cmd *cmd)
 	if (!dev)
 		return;
 
-	if (cmd->transport_state & CMD_T_BUSY)
-		return;
-
 	spin_lock_irqsave(&dev->execute_task_lock, flags);
-	if (cmd->state_active) {
-		list_del(&cmd->state_list_entry);
-		cmd->state_active = false;
-	}
+	list_del_init(&cmd->state_list_entry);
 	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
 }
 
@@ -575,10 +569,6 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
 {
 	unsigned long flags;
 
-	spin_lock_irqsave(&cmd->t_state_lock, flags);
-	if (write_pending)
-		cmd->t_state = TRANSPORT_WRITE_PENDING;
-
 	if (remove_from_lists) {
 		target_remove_from_state_list(cmd);
 
@@ -588,6 +578,11 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
 		cmd->se_lun = NULL;
 	}
 
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
+	if (write_pending)
+		cmd->t_state = TRANSPORT_WRITE_PENDING;
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
 	/*
 	 * Determine if frontend context caller is requesting the stopping of
 	 * this command for frontend exceptions.
@@ -595,30 +590,19 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
 	if (cmd->transport_state & CMD_T_STOP) {
 		pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
 			__func__, __LINE__, cmd->tag);
-
-		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
 		return 1;
 	}
 
-	cmd->transport_state &= ~CMD_T_ACTIVE;
-	if (remove_from_lists) {
-		/*
-		 * Some fabric modules like tcm_loop can release
-		 * their internally allocated I/O reference now and
-		 * struct se_cmd now.
-		 *
-		 * Fabric modules are expected to return '1' here if the
-		 * se_cmd being passed is released at this point,
-		 * or zero if not being released.
-		 */
-		if (cmd->se_tfo->check_stop_free != NULL) {
-			spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-			return cmd->se_tfo->check_stop_free(cmd);
-		}
-	}
+	/*
+	 * Some fabric modules like tcm_loop can release their internally
+	 * allocated I/O reference now and struct se_cmd now.
+	 *
+	 * Fabric modules are expected to return '1' here if the se_cmd being
+	 * passed is released at this point, or zero if not being released.
+	 */
+	if (remove_from_lists && cmd->se_tfo->check_stop_free)
+		return cmd->se_tfo->check_stop_free(cmd);
 
-	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 	return 0;
 }
 
@@ -638,23 +622,6 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
 		percpu_ref_put(&lun->lun_ref);
 }
 
-void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
-{
-	if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
-		transport_lun_remove_cmd(cmd);
-	/*
-	 * Allow the fabric driver to unmap any resources before
-	 * releasing the descriptor via TFO->release_cmd()
-	 */
-	if (remove)
-		cmd->se_tfo->aborted_task(cmd);
-
-	if (transport_cmd_check_stop_to_fabric(cmd))
-		return;
-	if (remove)
-		transport_put_cmd(cmd);
-}
-
 static void target_complete_failure_work(struct work_struct *work)
 {
 	struct se_cmd *cmd = container_of(work, struct se_cmd, work);
@@ -686,17 +653,45 @@ static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd)
 	return cmd->sense_buffer;
 }
 
+static void transport_handle_abort(struct se_cmd *cmd)
+{
+	transport_lun_remove_cmd(cmd);
+
+	if (cmd->send_abort_response) {
+		cmd->scsi_status = SAM_STAT_TASK_ABORTED;
+		pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n",
+			 cmd->t_task_cdb[0], cmd->tag);
+		trace_target_cmd_complete(cmd);
+		cmd->se_tfo->queue_status(cmd);
+		transport_cmd_check_stop_to_fabric(cmd);
+	} else {
+		/*
+		 * Allow the fabric driver to unmap any resources before
+		 * releasing the descriptor via TFO->release_cmd()
+		 */
+		cmd->se_tfo->aborted_task(cmd);
+		/*
+		 * To do: establish a unit attention condition on the I_T
+		 * nexus associated with cmd. See also the paragraph "Aborting
+		 * commands" in SAM.
+		 */
+		if (transport_cmd_check_stop_to_fabric(cmd) == 0)
+			transport_put_cmd(cmd);
+	}
+}
+
 void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
 {
 	struct se_device *dev = cmd->se_dev;
 	int success = scsi_status == GOOD;
 	unsigned long flags;
 
-	cmd->scsi_status = scsi_status;
-
+	if (cmd->transport_state & CMD_T_ABORTED) {
+		transport_handle_abort(cmd);
+		return;
+	}
 
-	spin_lock_irqsave(&cmd->t_state_lock, flags);
-	cmd->transport_state &= ~CMD_T_BUSY;
+	cmd->scsi_status = scsi_status;
 
 	if (dev && dev->transport->transport_complete) {
 		dev->transport->transport_complete(cmd,
@@ -706,33 +701,17 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
 			success = 1;
 	}
 
-	/*
-	 * See if we are waiting to complete for an exception condition.
-	 */
-	if (cmd->transport_state & CMD_T_REQUEST_STOP) {
-		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-		complete(&cmd->task_stop_comp);
-		return;
-	}
+	spin_lock_irqsave(&cmd->t_state_lock, flags);
+	cmd->t_state = TRANSPORT_COMPLETE;
+	cmd->transport_state |= CMD_T_COMPLETE;
+	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
-	/*
-	 * Check for case where an explicit ABORT_TASK has been received
-	 * and transport_wait_for_tasks() will be waiting for completion..
-	 */
-	if (cmd->transport_state & CMD_T_ABORTED &&
-	    cmd->transport_state & CMD_T_STOP) {
-		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-		return;
-	} else if (!success) {
-		INIT_WORK(&cmd->work, target_complete_failure_work);
-	} else {
+	if (success) {
 		INIT_WORK(&cmd->work, target_complete_ok_work);
+	} else {
+		INIT_WORK(&cmd->work, target_complete_failure_work);
 	}
 
-	cmd->t_state = TRANSPORT_COMPLETE;
-	cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
-	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
 	queue_work(target_completion_wq, &cmd->work);
 }
 EXPORT_SYMBOL(target_complete_cmd);
@@ -760,10 +739,7 @@ static void target_add_to_state_list(struct se_cmd *cmd)
 	unsigned long flags;
 
 	spin_lock_irqsave(&dev->execute_task_lock, flags);
-	if (!cmd->state_active) {
-		list_add_tail(&cmd->state_list_entry, &dev->state_list);
-		cmd->state_active = true;
-	}
+	list_add_tail(&cmd->state_list_entry, &dev->state_list);
 	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
 }
 
@@ -1163,7 +1139,6 @@ void transport_init_se_cmd(
 	INIT_LIST_HEAD(&cmd->state_list_entry);
 	init_completion(&cmd->t_transport_stop_comp);
 	init_completion(&cmd->cmd_wait_comp);
-	init_completion(&cmd->task_stop_comp);
 	spin_lock_init(&cmd->t_state_lock);
 	kref_init(&cmd->cmd_kref);
 	cmd->transport_state = CMD_T_DEV_ACTIVE;
@@ -1176,7 +1151,6 @@ void transport_init_se_cmd(
 	cmd->sense_buffer = sense_buffer;
 
 	cmd->cb_count = 0;
-	cmd->state_active = false;
 }
 EXPORT_SYMBOL(transport_init_se_cmd);
 
@@ -1321,7 +1295,7 @@ int transport_handle_cdb_direct(
 		return -EINVAL;
 	}
 	/*
-	 * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that
+	 * Set TRANSPORT_NEW_CMD state to ensure that
 	 * outstanding descriptors are handled correctly during shutdown via
 	 * transport_wait_for_tasks()
 	 *
@@ -1329,7 +1303,6 @@ int transport_handle_cdb_direct(
 	 * this to be called for initial descriptor submission.
 	 */
 	cmd->t_state = TRANSPORT_NEW_CMD;
-	cmd->transport_state |= CMD_T_ACTIVE;
 
 	/*
 	 * transport_generic_new_cmd() is already handling QUEUE_FULL,
@@ -1619,33 +1592,6 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
 EXPORT_SYMBOL(target_submit_tmr);
 
 /*
- * If the cmd is active, request it to be stopped and sleep until it
- * has completed.
- */
-bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags)
-	__releases(&cmd->t_state_lock)
-	__acquires(&cmd->t_state_lock)
-{
-	bool was_active = false;
-
-	if (cmd->transport_state & CMD_T_BUSY) {
-		cmd->transport_state |= CMD_T_REQUEST_STOP;
-		spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
-
-		pr_debug("cmd %p waiting to complete\n", cmd);
-		wait_for_completion(&cmd->task_stop_comp);
-		pr_debug("cmd %p stopped successfully\n", cmd);
-
-		spin_lock_irqsave(&cmd->t_state_lock, *flags);
-		cmd->transport_state &= ~CMD_T_REQUEST_STOP;
-		cmd->transport_state &= ~CMD_T_BUSY;
-		was_active = true;
-	}
-
-	return was_active;
-}
-
-/*
  * Handle SAM-esque emulation for generic transport request failures.
  */
 void transport_generic_request_failure(struct se_cmd *cmd,
@@ -1658,8 +1604,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
 	pr_debug("-----[ i_state: %d t_state: %d sense_reason: %d\n",
 		cmd->se_tfo->get_cmd_state(cmd),
 		cmd->t_state, sense_reason);
-	pr_debug("-----[ CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
-		(cmd->transport_state & CMD_T_ACTIVE) != 0,
+	pr_debug("-----[ CMD_T_STOP: %d CMD_T_SENT: %d\n",
 		(cmd->transport_state & CMD_T_STOP) != 0,
 		(cmd->transport_state & CMD_T_SENT) != 0);
 
@@ -1752,7 +1697,7 @@ void __target_execute_cmd(struct se_cmd *cmd)
 		ret = cmd->execute_cmd(cmd);
 		if (ret) {
 			spin_lock_irq(&cmd->t_state_lock);
-			cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
+			cmd->transport_state &= ~CMD_T_SENT;
 			spin_unlock_irq(&cmd->t_state_lock);
 
 			transport_generic_request_failure(cmd, ret);
@@ -1782,7 +1727,7 @@ static int target_write_prot_action(struct se_cmd *cmd)
 					     sectors, 0, cmd->t_prot_sg, 0);
 		if (unlikely(cmd->pi_err)) {
 			spin_lock_irq(&cmd->t_state_lock);
-			cmd->transport_state &= ~(CMD_T_BUSY | CMD_T_SENT);
+			cmd->transport_state &= ~CMD_T_SENT;
 			spin_unlock_irq(&cmd->t_state_lock);
 			transport_generic_request_failure(cmd, cmd->pi_err);
 			return -1;
@@ -1870,7 +1815,7 @@ void target_execute_cmd(struct se_cmd *cmd)
 	}
 
 	cmd->t_state = TRANSPORT_PROCESSING;
-	cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT;
+	cmd->transport_state |= CMD_T_SENT;
 	spin_unlock_irq(&cmd->t_state_lock);
 
 	if (target_write_prot_action(cmd))
@@ -1878,7 +1823,7 @@ void target_execute_cmd(struct se_cmd *cmd)
 
 	if (target_handle_task_attr(cmd)) {
 		spin_lock_irq(&cmd->t_state_lock);
-		cmd->transport_state &= ~(CMD_T_BUSY | CMD_T_SENT);
+		cmd->transport_state &= ~CMD_T_SENT;
 		spin_unlock_irq(&cmd->t_state_lock);
 		return;
 	}
@@ -2453,7 +2398,6 @@ static void transport_write_pending_qf(struct se_cmd *cmd)
 
 int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
 {
-	unsigned long flags;
 	int ret = 0;
 
 	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
@@ -2469,11 +2413,10 @@ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
 		 * has already added se_cmd to state_list, but fabric has
 		 * failed command before I/O submission.
 		 */
-		if (cmd->state_active) {
-			spin_lock_irqsave(&cmd->t_state_lock, flags);
-			target_remove_from_state_list(cmd);
-			spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-		}
+		target_remove_from_state_list(cmd);
+
+		if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
+			transport_lun_remove_cmd(cmd);
 
 		if (cmd->se_lun)
 			transport_lun_remove_cmd(cmd);
@@ -2658,11 +2601,6 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
 		return false;
 	}
 
-	if (!(cmd->transport_state & CMD_T_ACTIVE)) {
-		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-		return false;
-	}
-
 	cmd->transport_state |= CMD_T_STOP;
 
 	pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d, t_state: %d, CMD_T_STOP\n",
@@ -2674,7 +2612,7 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
 	wait_for_completion(&cmd_done);
 
 	spin_lock_irqsave(&cmd->t_state_lock, flags);
-	cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
+	cmd->transport_state &= ~CMD_T_STOP;
 
 	pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->t_transport_stop_comp) for ITT: 0x%08llx\n",
 		cmd->tag);
@@ -2972,17 +2910,12 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
 	if (!(cmd->transport_state & CMD_T_ABORTED))
 		return 0;
 
-	/*
-	 * If cmd has been aborted but either no status is to be sent or it has
-	 * already been sent, just return
-	 */
-	if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS))
+	if (!send_status)
 		return 1;
 
 	pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08llx\n",
 		 cmd->t_task_cdb[0], cmd->tag);
 
-	cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS;
 	cmd->scsi_status = SAM_STAT_TASK_ABORTED;
 	trace_target_cmd_complete(cmd);
 	cmd->se_tfo->queue_status(cmd);
@@ -2991,41 +2924,6 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
 }
 EXPORT_SYMBOL(transport_check_aborted_status);
 
-void transport_send_task_abort(struct se_cmd *cmd)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&cmd->t_state_lock, flags);
-	if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) {
-		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-		return;
-	}
-	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
-	/*
-	 * If there are still expected incoming fabric WRITEs, we wait
-	 * until until they have completed before sending a TASK_ABORTED
-	 * response.  This response with TASK_ABORTED status will be
-	 * queued back to fabric module by transport_check_aborted_status().
-	 */
-	if (cmd->data_direction == DMA_TO_DEVICE) {
-		if (cmd->se_tfo->write_pending_status(cmd) != 0) {
-			cmd->transport_state |= CMD_T_ABORTED;
-			cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
-			return;
-		}
-	}
-	cmd->scsi_status = SAM_STAT_TASK_ABORTED;
-
-	transport_lun_remove_cmd(cmd);
-
-	pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n",
-		 cmd->t_task_cdb[0], cmd->tag);
-
-	trace_target_cmd_complete(cmd);
-	cmd->se_tfo->queue_status(cmd);
-}
-
 static void target_tmr_work(struct work_struct *work)
 {
 	struct se_cmd *cmd = container_of(work, struct se_cmd, work);
@@ -3069,12 +2967,6 @@ static void target_tmr_work(struct work_struct *work)
 int transport_generic_handle_tmr(
 	struct se_cmd *cmd)
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(&cmd->t_state_lock, flags);
-	cmd->transport_state |= CMD_T_ACTIVE;
-	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
 	INIT_WORK(&cmd->work, target_tmr_work);
 	queue_work(cmd->se_dev->tmr_wq, &cmd->work);
 	return 0;
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 65f7258..7da733e 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -150,7 +150,6 @@ enum se_cmd_flags_table {
 	SCF_SENT_CHECK_CONDITION	= 0x00000800,
 	SCF_OVERFLOW_BIT		= 0x00001000,
 	SCF_UNDERFLOW_BIT		= 0x00002000,
-	SCF_SEND_DELAYED_TAS		= 0x00004000,
 	SCF_ALUA_NON_OPTIMIZED		= 0x00008000,
 	SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00020000,
 	SCF_COMPARE_AND_WRITE		= 0x00080000,
@@ -495,6 +494,7 @@ struct se_cmd {
 	enum transport_state_table t_state;
 	unsigned		cmd_wait_set:1;
 	unsigned		unknown_data_length:1;
+	unsigned		send_abort_response:1;
 	/* See se_cmd_flags_table */
 	u32			se_cmd_flags;
 	u32			se_ordered_id;
@@ -528,17 +528,14 @@ struct se_cmd {
 	unsigned int		t_task_nolb;
 	unsigned int		transport_state;
 #define CMD_T_ABORTED		(1 << 0)
-#define CMD_T_ACTIVE		(1 << 1)
 #define CMD_T_COMPLETE		(1 << 2)
 #define CMD_T_SENT		(1 << 4)
 #define CMD_T_STOP		(1 << 5)
 #define CMD_T_DEV_ACTIVE	(1 << 7)
-#define CMD_T_REQUEST_STOP	(1 << 8)
-#define CMD_T_BUSY		(1 << 9)
 	spinlock_t		t_state_lock;
 	struct completion	t_transport_stop_comp;
 	unsigned int		cb_count;   /* protected by t_state_lock */
-	struct se_cmd_done_cb	done_cb[1]; /* protected by t_state_lock */
+	struct se_cmd_done_cb	done_cb[3]; /* protected by t_state_lock */
 
 	struct work_struct	work;
 
@@ -551,10 +548,6 @@ struct se_cmd {
 	unsigned int		t_bidi_data_nents;
 
 	struct list_head	state_list_entry;
-	bool			state_active;
-
-	/* old task stop completion, consider merging with some of the above */
-	struct completion	task_stop_comp;
 
 	/* backend private data */
 	void			*priv;
-- 
2.1.4

--
To unsubscribe from this list: send the line "unsubscribe target-devel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html




[Index of Archives]     [Linux SCSI]     [Kernel Newbies]     [Linux SCSI Target Infrastructure]     [Share Photos]     [IDE]     [Security]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux ATA RAID]     [Linux IIO]     [Device Mapper]

  Powered by Linux