[PATCH, RFC]: move full command queue handling into fabric drivers

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Currently there is a lot of code in the target core to deal with the fact
that the qla2xxx might return errors from ->queue_data_in and ->queue_status
because its internal command queues.  The code adds commands that failed in
this way onto a per-device list, then immediately schedules a work struct
to add them to the per-device command queue, which then retries the action.
In short it's a very complex busy loop.  This patch moves the busy looping
into qla2xxx to get rid of all this code.  While this still isn't optimal
it gets rid of a lot of useless core complexity, and allows to add a
waitqueue to the qla2xxx host queue to actually allow targeted wakeups.

I brifely looked into implementing these proper wakeups, but I failed to
understand the related code in qla2xxx - someone more familar with the
hardware really should look into waking up callers once we actually have
free command slots.

Signed-off-by: Christoph Hellwig <hch@xxxxxx>

---
 drivers/scsi/qla2xxx/qla_target.c               |   19 ++
 drivers/target/target_core_transport.c          |  185 +-----------------------
 drivers/target/tcm_qla2xxx/tcm_qla2xxx_fabric.c |    3 
 include/target/target_core_base.h               |    7 
 4 files changed, 32 insertions(+), 182 deletions(-)

Index: lio-core/drivers/target/target_core_transport.c
===================================================================
--- lio-core.orig/drivers/target/target_core_transport.c	2011-11-29 08:54:56.738146308 +0100
+++ lio-core/drivers/target/target_core_transport.c	2011-11-29 09:03:21.275412994 +0100
@@ -69,8 +69,6 @@ static int transport_generic_write_pendi
 static int transport_processing_thread(void *param);
 static int __transport_execute_tasks(struct se_device *dev);
 static void transport_complete_task_attr(struct se_cmd *cmd);
-static void transport_handle_queue_full(struct se_cmd *cmd,
-		struct se_device *dev);
 static void transport_free_dev_tasks(struct se_cmd *cmd);
 static int transport_generic_get_mem(struct se_cmd *cmd);
 static void transport_put_cmd(struct se_cmd *cmd);
@@ -576,8 +574,7 @@ void transport_cmd_finish_abort(struct s
 	}
 }
 
-static void transport_add_cmd_to_queue(struct se_cmd *cmd, int t_state,
-		bool at_head)
+static void transport_add_cmd_to_queue(struct se_cmd *cmd, int t_state)
 {
 	struct se_device *dev = cmd->se_dev;
 	struct se_queue_obj *qobj = &dev->dev_queue_obj;
@@ -598,10 +595,7 @@ static void transport_add_cmd_to_queue(s
 	else
 		atomic_inc(&qobj->queue_cnt);
 
-	if (at_head)
-		list_add(&cmd->se_queue_node, &qobj->qobj_list);
-	else
-		list_add_tail(&cmd->se_queue_node, &qobj->qobj_list);
+	list_add_tail(&cmd->se_queue_node, &qobj->qobj_list);
 	atomic_set(&cmd->t_transport_queue_active, 1);
 	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
 
@@ -899,36 +893,6 @@ static void transport_remove_task_from_e
 	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
 }
 
-/*
- * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status
- */
-
-static void target_qf_do_work(struct work_struct *work)
-{
-	struct se_device *dev = container_of(work, struct se_device,
-					qf_work_queue);
-	LIST_HEAD(qf_cmd_list);
-	struct se_cmd *cmd, *cmd_tmp;
-
-	spin_lock_irq(&dev->qf_cmd_lock);
-	list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
-	spin_unlock_irq(&dev->qf_cmd_lock);
-
-	list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
-		list_del(&cmd->se_qf_node);
-		atomic_dec(&dev->dev_qf_count);
-		smp_mb__after_atomic_dec();
-
-		pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
-			" context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
-			(cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :
-			(cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
-			: "UNKNOWN");
-
-		transport_add_cmd_to_queue(cmd, cmd->t_state, true);
-	}
-}
-
 unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
 {
 	switch (cmd->data_direction) {
@@ -1321,14 +1285,12 @@ struct se_device *transport_add_device_t
 	INIT_LIST_HEAD(&dev->execute_task_list);
 	INIT_LIST_HEAD(&dev->delayed_cmd_list);
 	INIT_LIST_HEAD(&dev->state_task_list);
-	INIT_LIST_HEAD(&dev->qf_cmd_list);
 	spin_lock_init(&dev->execute_task_lock);
 	spin_lock_init(&dev->delayed_cmd_lock);
 	spin_lock_init(&dev->dev_reservation_lock);
 	spin_lock_init(&dev->dev_status_lock);
 	spin_lock_init(&dev->se_port_lock);
 	spin_lock_init(&dev->se_tmr_lock);
-	spin_lock_init(&dev->qf_cmd_lock);
 
 	dev->queue_depth	= dev_limits->queue_depth;
 	atomic_set(&dev->depth_left, dev->queue_depth);
@@ -1372,10 +1334,7 @@ struct se_device *transport_add_device_t
 			dev->transport->name);
 		goto out;
 	}
-	/*
-	 * Setup work_queue for QUEUE_FULL
-	 */
-	INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
+
 	/*
 	 * Preload the initial INQUIRY const values if we are doing
 	 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
@@ -1481,7 +1440,6 @@ void transport_init_se_cmd(
 {
 	INIT_LIST_HEAD(&cmd->se_lun_node);
 	INIT_LIST_HEAD(&cmd->se_delayed_node);
-	INIT_LIST_HEAD(&cmd->se_qf_node);
 	INIT_LIST_HEAD(&cmd->se_queue_node);
 	INIT_LIST_HEAD(&cmd->se_cmd_list);
 	INIT_LIST_HEAD(&cmd->t_task_list);
@@ -1657,7 +1615,7 @@ int transport_generic_handle_cdb_map(
 		return -EINVAL;
 	}
 
-	transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP, false);
+	transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP);
 	return 0;
 }
 EXPORT_SYMBOL(transport_generic_handle_cdb_map);
@@ -1687,7 +1645,7 @@ int transport_generic_handle_data(
 	if (transport_check_aborted_status(cmd, 1) != 0)
 		return 0;
 
-	transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE, false);
+	transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE);
 	return 0;
 }
 EXPORT_SYMBOL(transport_generic_handle_data);
@@ -1699,7 +1657,7 @@ EXPORT_SYMBOL(transport_generic_handle_d
 int transport_generic_handle_tmr(
 	struct se_cmd *cmd)
 {
-	transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR, false);
+	transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR);
 	return 0;
 }
 EXPORT_SYMBOL(transport_generic_handle_tmr);
@@ -1776,8 +1734,6 @@ static int transport_stop_tasks_for_cmd(
  */
 static void transport_generic_request_failure(struct se_cmd *cmd)
 {
-	int ret = 0;
-
 	pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
 		" CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
 		cmd->t_task_cdb[0]);
@@ -1834,9 +1790,7 @@ static void transport_generic_request_fa
 				cmd->orig_fe_lun, 0x2C,
 				ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
 
-		ret = cmd->se_tfo->queue_status(cmd);
-		if (ret == -EAGAIN || ret == -ENOMEM)
-			goto queue_full;
+		cmd->se_tfo->queue_status(cmd);
 		goto check_stop;
 	default:
 		pr_err("Unknown transport error for CDB 0x%02x: %d\n",
@@ -1851,20 +1805,12 @@ static void transport_generic_request_fa
 	 * transport_send_check_condition_and_sense() after handling
 	 * possible unsoliticied write data payloads.
 	 */
-	ret = transport_send_check_condition_and_sense(cmd,
+	transport_send_check_condition_and_sense(cmd,
 			cmd->scsi_sense_reason, 0);
-	if (ret == -EAGAIN || ret == -ENOMEM)
-		goto queue_full;
 
 check_stop:
 	transport_lun_remove_cmd(cmd);
-	if (!transport_cmd_check_stop_to_fabric(cmd))
-		;
-	return;
-
-queue_full:
-	cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
-	transport_handle_queue_full(cmd, cmd->se_dev);
+	transport_cmd_check_stop_to_fabric(cmd);
 }
 
 static inline u32 transport_lba_21(unsigned char *cdb)
@@ -3124,63 +3070,10 @@ static void transport_complete_task_attr
 		wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
 }
 
-static void transport_complete_qf(struct se_cmd *cmd)
-{
-	int ret = 0;
-
-	if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
-		transport_complete_task_attr(cmd);
-
-	if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
-		ret = cmd->se_tfo->queue_status(cmd);
-		if (ret)
-			goto out;
-	}
-
-	switch (cmd->data_direction) {
-	case DMA_FROM_DEVICE:
-		ret = cmd->se_tfo->queue_data_in(cmd);
-		break;
-	case DMA_TO_DEVICE:
-		if (cmd->t_bidi_data_sg) {
-			ret = cmd->se_tfo->queue_data_in(cmd);
-			if (ret < 0)
-				break;
-		}
-		/* Fall through for DMA_TO_DEVICE */
-	case DMA_NONE:
-		ret = cmd->se_tfo->queue_status(cmd);
-		break;
-	default:
-		break;
-	}
-
-out:
-	if (ret < 0) {
-		transport_handle_queue_full(cmd, cmd->se_dev);
-		return;
-	}
-	transport_lun_remove_cmd(cmd);
-	transport_cmd_check_stop_to_fabric(cmd);
-}
-
-static void transport_handle_queue_full(
-	struct se_cmd *cmd,
-	struct se_device *dev)
-{
-	spin_lock_irq(&dev->qf_cmd_lock);
-	list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
-	atomic_inc(&dev->dev_qf_count);
-	smp_mb__after_atomic_inc();
-	spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
-
-	schedule_work(&cmd->se_dev->qf_work_queue);
-}	
-
 static void target_complete_ok_work(struct work_struct *work)
 {
 	struct se_cmd *cmd = container_of(work, struct se_cmd, work);
-	int reason = 0, ret;
+	int reason = 0;
 
 	/*
 	 * Check if we need to move delayed/dormant tasks from cmds on the
@@ -3189,12 +3082,6 @@ static void target_complete_ok_work(stru
 	 */
 	if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
 		transport_complete_task_attr(cmd);
-	/*
-	 * Check to schedule QUEUE_FULL work, or execute an existing
-	 * cmd->transport_qf_callback()
-	 */
-	if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
-		schedule_work(&cmd->se_dev->qf_work_queue);
 
 	/*
 	 * Check if we need to retrieve a sense buffer from
@@ -3209,10 +3096,8 @@ static void target_complete_ok_work(stru
 		 * a non GOOD status.
 		 */
 		if (cmd->scsi_status) {
-			ret = transport_send_check_condition_and_sense(
+			transport_send_check_condition_and_sense(
 					cmd, reason, 1);
-			if (ret == -EAGAIN || ret == -ENOMEM)
-				goto queue_full;
 
 			transport_lun_remove_cmd(cmd);
 			transport_cmd_check_stop_to_fabric(cmd);
@@ -3235,9 +3120,7 @@ static void target_complete_ok_work(stru
 		}
 		spin_unlock(&cmd->se_lun->lun_sep_lock);
 
-		ret = cmd->se_tfo->queue_data_in(cmd);
-		if (ret == -EAGAIN || ret == -ENOMEM)
-			goto queue_full;
+		cmd->se_tfo->queue_data_in(cmd);
 		break;
 	case DMA_TO_DEVICE:
 		spin_lock(&cmd->se_lun->lun_sep_lock);
@@ -3256,16 +3139,12 @@ static void target_complete_ok_work(stru
 					cmd->data_length;
 			}
 			spin_unlock(&cmd->se_lun->lun_sep_lock);
-			ret = cmd->se_tfo->queue_data_in(cmd);
-			if (ret == -EAGAIN || ret == -ENOMEM)
-				goto queue_full;
+			cmd->se_tfo->queue_data_in(cmd);
 			break;
 		}
 		/* Fall through for DMA_TO_DEVICE */
 	case DMA_NONE:
-		ret = cmd->se_tfo->queue_status(cmd);
-		if (ret == -EAGAIN || ret == -ENOMEM)
-			goto queue_full;
+		cmd->se_tfo->queue_status(cmd);
 		break;
 	default:
 		break;
@@ -3273,13 +3152,6 @@ static void target_complete_ok_work(stru
 
 	transport_lun_remove_cmd(cmd);
 	transport_cmd_check_stop_to_fabric(cmd);
-	return;
-
-queue_full:
-	pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
-		" data_direction: %d\n", cmd, cmd->data_direction);
-	cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
-	transport_handle_queue_full(cmd, cmd->se_dev);
 }
 
 static void transport_free_dev_tasks(struct se_cmd *cmd)
@@ -3836,18 +3708,6 @@ void transport_generic_process_write(str
 }
 EXPORT_SYMBOL(transport_generic_process_write);
 
-static void transport_write_pending_qf(struct se_cmd *cmd)
-{
-	int ret;
-
-	ret = cmd->se_tfo->write_pending(cmd);
-	if (ret == -EAGAIN || ret == -ENOMEM) {
-		pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
-			 cmd);
-		transport_handle_queue_full(cmd, cmd->se_dev);
-	}
-}
-
 static int transport_generic_write_pending(struct se_cmd *cmd)
 {
 	unsigned long flags;
@@ -3871,18 +3731,9 @@ static int transport_generic_write_pendi
 	 * frontend know that WRITE buffers are ready.
 	 */
 	ret = cmd->se_tfo->write_pending(cmd);
-	if (ret == -EAGAIN || ret == -ENOMEM)
-		goto queue_full;
-	else if (ret < 0)
+	if (ret < 0)
 		return ret;
-
 	return 1;
-
-queue_full:
-	pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
-	cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
-	transport_handle_queue_full(cmd, cmd->se_dev);
-	return 0;
 }
 
 void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
@@ -4624,12 +4475,6 @@ get_cmd:
 		case TRANSPORT_PROCESS_TMR:
 			transport_generic_do_tmr(cmd);
 			break;
-		case TRANSPORT_COMPLETE_QF_WP:
-			transport_write_pending_qf(cmd);
-			break;
-		case TRANSPORT_COMPLETE_QF_OK:
-			transport_complete_qf(cmd);
-			break;
 		default:
 			pr_err("Unknown t_state: %d  for ITT: 0x%08x "
 				"i_state: %d on SE LUN: %u\n",
Index: lio-core/include/target/target_core_base.h
===================================================================
--- lio-core.orig/include/target/target_core_base.h	2011-11-29 08:54:56.000000000 +0100
+++ lio-core/include/target/target_core_base.h	2011-11-29 08:55:32.444619536 +0100
@@ -157,8 +157,6 @@ enum transport_state_table {
 	TRANSPORT_PROCESS_TMR	= 9,
 	TRANSPORT_ISTATE_PROCESSING = 11,
 	TRANSPORT_NEW_CMD_MAP	= 16,
-	TRANSPORT_COMPLETE_QF_WP = 18,
-	TRANSPORT_COMPLETE_QF_OK = 19,
 };
 
 /* Used for struct se_cmd->se_cmd_flags */
@@ -531,7 +529,6 @@ struct se_cmd {
 	void			*sense_buffer;
 	struct list_head	se_delayed_node;
 	struct list_head	se_lun_node;
-	struct list_head	se_qf_node;
 	struct se_device      *se_dev;
 	struct se_dev_entry   *se_deve;
 	struct se_lun		*se_lun;
@@ -797,7 +794,6 @@ struct se_device {
 	atomic_t		dev_ordered_id;
 	atomic_t		execute_tasks;
 	atomic_t		dev_ordered_sync;
-	atomic_t		dev_qf_count;
 	struct se_obj		dev_obj;
 	struct se_obj		dev_access_obj;
 	struct se_obj		dev_export_obj;
@@ -808,7 +804,6 @@ struct se_device {
 	spinlock_t		dev_status_lock;
 	spinlock_t		se_port_lock;
 	spinlock_t		se_tmr_lock;
-	spinlock_t		qf_cmd_lock;
 	/* Used for legacy SPC-2 reservationsa */
 	struct se_node_acl	*dev_reserved_node_acl;
 	/* Used for ALUA Logical Unit Group membership */
@@ -819,11 +814,9 @@ struct se_device {
 	struct list_head	dev_tmr_list;
 	/* Pointer to descriptor for processing thread */
 	struct task_struct	*process_thread;
-	struct work_struct	qf_work_queue;
 	struct list_head	delayed_cmd_list;
 	struct list_head	execute_task_list;
 	struct list_head	state_task_list;
-	struct list_head	qf_cmd_list;
 	/* Pointer to associated SE HBA */
 	struct se_hba		*se_hba;
 	struct se_subsystem_dev *se_sub_dev;
Index: lio-core/drivers/scsi/qla2xxx/qla_target.c
===================================================================
--- lio-core.orig/drivers/scsi/qla2xxx/qla_target.c	2011-11-29 08:57:01.607469832 +0100
+++ lio-core/drivers/scsi/qla2xxx/qla_target.c	2011-11-29 08:59:27.223347631 +0100
@@ -2317,15 +2317,28 @@ static inline void qla_tgt_check_srr_deb
 int qla_tgt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, uint8_t scsi_status)
 {
 	qla_tgt_check_srr_debug(cmd, &xmit_type);
+	int error;
 
+again:
 	ql_dbg(ql_dbg_tgt, cmd->vha, 0xe017, "is_send_status=%d,"
 		" cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d",
 		(xmit_type & QLA_TGT_XMIT_STATUS) ? 1 : 0, cmd->bufflen,
 		cmd->sg_cnt, cmd->dma_data_direction);
 
-	return (IS_FWI2_CAPABLE(cmd->tgt->ha)) ?
-		__qla_tgt_24xx_xmit_response(cmd, xmit_type, scsi_status) :
-		__qla_tgt_2xxx_xmit_response(cmd, xmit_type, scsi_status);
+	if (IS_FWI2_CAPABLE(cmd->tgt->ha))
+		error = __qla_tgt_24xx_xmit_response(cmd, xmit_type, scsi_status);
+	else
+		error = __qla_tgt_2xxx_xmit_response(cmd, xmit_type, scsi_status);
+
+	if (error == -EAGAIN || error == -ENOMEM) {
+		/*
+		 * XXX: we should use a waitqueue for the actual resource here.
+		 */
+		msleep(1);
+		goto again;
+	}
+
+	return error;
 }
 EXPORT_SYMBOL(qla_tgt_xmit_response);
 
Index: lio-core/drivers/target/tcm_qla2xxx/tcm_qla2xxx_fabric.c
===================================================================
--- lio-core.orig/drivers/target/tcm_qla2xxx/tcm_qla2xxx_fabric.c	2011-11-29 08:57:01.587469940 +0100
+++ lio-core/drivers/target/tcm_qla2xxx/tcm_qla2xxx_fabric.c	2011-11-29 09:02:20.609074986 +0100
@@ -626,8 +626,7 @@ int tcm_qla2xxx_write_pending_status(str
 	 * CTIO aborts to be posted via hardware in tcm_qla2xxx_handle_data().
 	 */
 	spin_lock_irqsave(&se_cmd->t_state_lock, flags);
-	if (se_cmd->t_state == TRANSPORT_WRITE_PENDING ||
-	    se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) {
+	if (se_cmd->t_state == TRANSPORT_WRITE_PENDING) {
 		spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
 		wait_for_completion_timeout(&se_cmd->t_transport_stop_comp, 3000);
 		return 0;
--
To unsubscribe from this list: send the line "unsubscribe target-devel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[Index of Archives]     [Linux SCSI]     [Kernel Newbies]     [Linux SCSI Target Infrastructure]     [Share Photos]     [IDE]     [Security]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux ATA RAID]     [Linux IIO]     [Device Mapper]

  Powered by Linux