[PATCH 1/2] target: do not use work items for QUEUE FULL handling

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Instead of adding commands that hit a QUEUE FULL condition to list and then
use a work item to process the list add them to the front of the execution
list directly, and avoid the overhead of an additional list and context
switch.  Also remove the SCF_EMULATE_QUEUE_FULL by providing a version
of __transport_add_cmd_to_queue that adds to the head of the queue.

Signed-off-by: Christoph Hellwig <hch@xxxxxx>

Index: lio-core/drivers/target/target_core_transport.c
===================================================================
--- lio-core.orig/drivers/target/target_core_transport.c	2011-10-12 17:03:42.221772520 +0200
+++ lio-core/drivers/target/target_core_transport.c	2011-10-12 17:05:49.549271375 +0200
@@ -600,9 +600,8 @@ void transport_cmd_finish_abort(struct s
 	}
 }
 
-static void transport_add_cmd_to_queue(
-	struct se_cmd *cmd,
-	int t_state)
+static void __transport_add_cmd_to_queue(struct se_cmd *cmd, int t_state,
+		bool at_head)
 {
 	struct se_device *dev = cmd->se_dev;
 	struct se_queue_obj *qobj = &dev->dev_queue_obj;
@@ -623,10 +622,9 @@ static void transport_add_cmd_to_queue(
 	else
 		atomic_inc(&qobj->queue_cnt);
 
-	if (cmd->se_cmd_flags & SCF_EMULATE_QUEUE_FULL) {
-		cmd->se_cmd_flags &= ~SCF_EMULATE_QUEUE_FULL;
+	if (at_head)
 		list_add(&cmd->se_queue_node, &qobj->qobj_list);
-	} else
+	else
 		list_add_tail(&cmd->se_queue_node, &qobj->qobj_list);
 	atomic_set(&cmd->t_transport_queue_active, 1);
 	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
@@ -634,6 +632,11 @@ static void transport_add_cmd_to_queue(
 	wake_up_interruptible(&qobj->thread_wq);
 }
 
+static void transport_add_cmd_to_queue(struct se_cmd *cmd, int t_state)
+{
+	__transport_add_cmd_to_queue(cmd, t_state, false);
+}
+
 static struct se_cmd *
 transport_get_cmd_from_queue(struct se_queue_obj *qobj)
 {
@@ -950,39 +953,6 @@ void transport_remove_task_from_execute_
 	spin_unlock_irqrestore(&dev->execute_task_lock, flags);
 }
 
-/*
- * Handle QUEUE_FULL / -EAGAIN status
- */
-
-static void target_qf_do_work(struct work_struct *work)
-{
-	struct se_device *dev = container_of(work, struct se_device,
-					qf_work_queue);
-	LIST_HEAD(qf_cmd_list);
-	struct se_cmd *cmd, *cmd_tmp;
-
-	spin_lock_irq(&dev->qf_cmd_lock);
-	list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
-	spin_unlock_irq(&dev->qf_cmd_lock);
-
-	list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
-		list_del(&cmd->se_qf_node);
-		atomic_dec(&dev->dev_qf_count);
-		smp_mb__after_atomic_dec();
-
-		pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
-			" context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
-			(cmd->t_state == TRANSPORT_COMPLETE_OK) ? "COMPLETE_OK" :
-			(cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
-			: "UNKNOWN");
-		/*
-		 * The SCF_EMULATE_QUEUE_FULL flag will be cleared once se_cmd
-		 * has been added to head of queue
-		 */
-		transport_add_cmd_to_queue(cmd, cmd->t_state);
-	}
-}
-
 unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
 {
 	switch (cmd->data_direction) {
@@ -1377,7 +1347,6 @@ struct se_device *transport_add_device_t
 	INIT_LIST_HEAD(&dev->delayed_cmd_list);
 	INIT_LIST_HEAD(&dev->ordered_cmd_list);
 	INIT_LIST_HEAD(&dev->state_task_list);
-	INIT_LIST_HEAD(&dev->qf_cmd_list);
 	spin_lock_init(&dev->execute_task_lock);
 	spin_lock_init(&dev->delayed_cmd_lock);
 	spin_lock_init(&dev->ordered_cmd_lock);
@@ -1432,10 +1401,7 @@ struct se_device *transport_add_device_t
 			dev->transport->name);
 		goto out;
 	}
-	/*
-	 * Setup work_queue for QUEUE_FULL
-	 */
-	INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
+
 	/*
 	 * Preload the initial INQUIRY const values if we are doing
 	 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
@@ -1542,7 +1508,6 @@ void transport_init_se_cmd(
 	INIT_LIST_HEAD(&cmd->se_lun_node);
 	INIT_LIST_HEAD(&cmd->se_delayed_node);
 	INIT_LIST_HEAD(&cmd->se_ordered_node);
-	INIT_LIST_HEAD(&cmd->se_qf_node);
 	INIT_LIST_HEAD(&cmd->se_queue_node);
 	INIT_LIST_HEAD(&cmd->t_task_list);
 	init_completion(&cmd->transport_lun_fe_stop_comp);
@@ -3466,15 +3431,17 @@ static void transport_handle_queue_full(
 	struct se_device *dev,
 	int (*qf_callback)(struct se_cmd *))
 {
-	spin_lock_irq(&dev->qf_cmd_lock);
-	cmd->se_cmd_flags |= SCF_EMULATE_QUEUE_FULL;
+	pr_debug("Processing %s cmd: %p QUEUE_FULL %s",
+		 cmd->se_tfo->get_fabric_name(), cmd,
+		 (cmd->t_state == TRANSPORT_COMPLETE_OK) ? "COMPLETE_OK" :
+		 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
+		 : "UNKNOWN");
+
+	spin_lock_irq(&cmd->t_state_lock);
 	cmd->transport_qf_callback = qf_callback;
-	list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
-	atomic_inc(&dev->dev_qf_count);
-	smp_mb__after_atomic_inc();
-	spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
+	spin_unlock_irq(&cmd->t_state_lock);
 
-	schedule_work(&cmd->se_dev->qf_work_queue);
+	__transport_add_cmd_to_queue(cmd, cmd->t_state, true);
 }	
 
 static void transport_generic_complete_ok(struct se_cmd *cmd)
@@ -3487,12 +3454,6 @@ static void transport_generic_complete_o
 	 */
 	if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
 		transport_complete_task_attr(cmd);
-	/*
-	 * Check to schedule QUEUE_FULL work, or execute an existing
-	 * cmd->transport_qf_callback()
-	 */
-	if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
-		schedule_work(&cmd->se_dev->qf_work_queue);
 
 	if (cmd->transport_qf_callback) {
 		ret = cmd->transport_qf_callback(cmd);
Index: lio-core/include/target/target_core_base.h
===================================================================
--- lio-core.orig/include/target/target_core_base.h	2011-10-12 17:03:42.225772748 +0200
+++ lio-core/include/target/target_core_base.h	2011-10-12 17:05:49.549271375 +0200
@@ -126,7 +126,6 @@ enum se_cmd_flags_table {
 	SCF_UNUSED			= 0x00100000,
 	SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00400000,
 	SCF_EMULATE_CDB_ASYNC		= 0x01000000,
-	SCF_EMULATE_QUEUE_FULL		= 0x02000000,
 };
 
 /* struct se_dev_entry->lun_flags and struct se_lun->lun_access */
@@ -458,7 +457,6 @@ struct se_cmd {
 	struct list_head	se_delayed_node;
 	struct list_head	se_ordered_node;
 	struct list_head	se_lun_node;
-	struct list_head	se_qf_node;
 	struct se_device      *se_dev;
 	struct se_dev_entry   *se_deve;
 	struct se_device	*se_obj_ptr;
@@ -739,7 +737,6 @@ struct se_device {
 	atomic_t		dev_status_thr_count;
 	atomic_t		dev_hoq_count;
 	atomic_t		dev_ordered_sync;
-	atomic_t		dev_qf_count;
 	struct se_obj		dev_obj;
 	struct se_obj		dev_access_obj;
 	struct se_obj		dev_export_obj;
@@ -769,12 +766,10 @@ struct se_device {
 	struct task_struct	*process_thread;
 	pid_t			process_thread_pid;
 	struct task_struct		*dev_mgmt_thread;
-	struct work_struct	qf_work_queue;
 	struct list_head	delayed_cmd_list;
 	struct list_head	ordered_cmd_list;
 	struct list_head	execute_task_list;
 	struct list_head	state_task_list;
-	struct list_head	qf_cmd_list;
 	/* Pointer to associated SE HBA */
 	struct se_hba		*se_hba;
 	struct se_subsystem_dev *se_sub_dev;

--
To unsubscribe from this list: send the line "unsubscribe target-devel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[Index of Archives]     [Linux SCSI]     [Kernel Newbies]     [Linux SCSI Target Infrastructure]     [Share Photos]     [IDE]     [Security]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux ATA RAID]     [Linux IIO]     [Device Mapper]

  Powered by Linux