From: Nicholas Bellinger <nab@xxxxxxxxxxxxxxx> Historically, pSCSI devices have been the ones that required target-core to enforce a per se_device->depth_left. This patch changes target-core to no longer (by default) enforce a per se_device->depth_left or sleep in transport_tcq_window_closed() when we out of queue slots. It adds se_device->enforce_dev_tcq bitfield + se_subsystem_dev->su_dev_flags SDF_ENFORCE_DEVICE_TCQ to signal the legacy usage in target-core with pSCSI. So now aside from this special case, IBLOCK, FILEIO and RAMDISK with ->enforce_dev_tcq=0 by default. It also moves all depth_left access under se_device->execute_task_lock for the legacy case, and converts ->depth_left into a non atomic_t. Cc: Christoph Hellwig <hch@xxxxxx> Cc: Roland Dreier <roland@xxxxxxxxxxxxxxx> Cc: Joern Engel <joern@xxxxxxxxx> Signed-off-by: Nicholas Bellinger <nab@xxxxxxxxxxxxxxx> --- drivers/target/target_core_device.c | 6 ++- drivers/target/target_core_pscsi.c | 4 ++ drivers/target/target_core_transport.c | 57 +++++++++++++++++++------------ include/target/target_core_base.h | 3 +- include/target/target_core_transport.h | 1 + 5 files changed, 46 insertions(+), 25 deletions(-) diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 5e10beb..ed06be4 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -1164,10 +1164,12 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth) } dev->se_sub_dev->se_dev_attrib.queue_depth = dev->queue_depth = queue_depth; + spin_lock_irq(&dev->execute_task_lock); if (queue_depth > orig_queue_depth) - atomic_add(queue_depth - orig_queue_depth, &dev->depth_left); + dev->depth_left += (queue_depth - orig_queue_depth); else if (queue_depth < orig_queue_depth) - atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left); + dev->depth_left -= (orig_queue_depth - queue_depth); + spin_unlock_irq(&dev->execute_task_lock); pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n", dev, queue_depth); diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 53060ed..be5b6dd 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -350,6 +350,10 @@ static struct se_device *pscsi_add_device_to_list( * scsi_device_put() and the pdv->pdv_sd cleared. */ pdv->pdv_sd = sd; + /* + * Make pSCSI enforce per se_device TCQ per scsi_device->queue_depth + */ + se_dev->su_dev_flags |= SDF_ENFORCE_DEVICE_TCQ; dev = transport_add_device_to_core_hba(hba, &pscsi_template, se_dev, dev_flags, pdv, diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 8bff7e71..bfe625b 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -708,12 +708,12 @@ void transport_complete_task(struct se_task *task, int success) struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = cmd->se_dev; unsigned long flags; -#if 0 - pr_debug("task: %p CDB: 0x%02x obj_ptr: %p\n", task, - cmd->t_task_cdb[0], dev); -#endif - if (dev) - atomic_inc(&dev->depth_left); + + if (dev->enforce_dev_tcq) { + spin_lock_irq(&dev->execute_task_lock); + dev->depth_left++; + spin_unlock_irq(&dev->execute_task_lock); + } spin_lock_irqsave(&cmd->t_state_lock, flags); task->task_flags &= ~TF_ACTIVE; @@ -996,7 +996,7 @@ void transport_dump_dev_state( } *bl += sprintf(b + *bl, " Execute/Left/Max Queue Depth: %d/%d/%d", - atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left), + atomic_read(&dev->execute_tasks), dev->depth_left, dev->queue_depth); *bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n", dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors); @@ -1358,9 +1358,16 @@ struct se_device *transport_add_device_to_core_hba( spin_lock_init(&dev->se_port_lock); spin_lock_init(&dev->se_tmr_lock); spin_lock_init(&dev->qf_cmd_lock); - - dev->queue_depth = dev_limits->queue_depth; - atomic_set(&dev->depth_left, dev->queue_depth); + /* + * Only setup and enforce the se_device queue depth for + * pSCSI backends using SDF_ENFORCE_DEVICE_TCQ. + */ + if (se_dev->su_dev_flags & SDF_ENFORCE_DEVICE_TCQ) { + dev->queue_depth = dev->depth_left = dev_limits->queue_depth; + dev->enforce_dev_tcq = 1; + pr_debug("Using dev->enforce_dev_tcq: 1 for %s backend" + " device: %p\n", transport->name, dev); + } atomic_set(&dev->dev_ordered_id, 0); se_dev_set_default_attribs(dev, dev_limits); @@ -2163,18 +2170,21 @@ static int __transport_execute_tasks(struct se_device *dev) struct se_cmd *cmd = NULL; struct se_task *task = NULL; unsigned long flags; - /* - * Check if there is enough room in the device and HBA queue to send - * struct se_tasks to the selected transport. + * By default, enforce_dev_tcq is only enabled for pSCSI devices + * All other device backends (including IBLOCK and FILEIO) + * set enforce_dev_tcq=0, and dispatch new tasks as soon as + * possible. */ check_depth: - if (!atomic_read(&dev->depth_left)) - return transport_tcq_window_closed(dev); - - dev->dev_tcq_window_closed = 0; - spin_lock_irq(&dev->execute_task_lock); + if (dev->enforce_dev_tcq) { + if (!dev->depth_left) { + spin_unlock_irq(&dev->execute_task_lock); + return transport_tcq_window_closed(dev); + } + dev->dev_tcq_window_closed = 0; + } if (list_empty(&dev->execute_task_list)) { spin_unlock_irq(&dev->execute_task_lock); return 0; @@ -2182,12 +2192,11 @@ check_depth: task = list_first_entry(&dev->execute_task_list, struct se_task, t_execute_list); __transport_remove_task_from_execute_queue(task, dev); + if (dev->enforce_dev_tcq) + dev->depth_left--; spin_unlock_irq(&dev->execute_task_lock); - atomic_dec(&dev->depth_left); - cmd = task->task_se_cmd; - spin_lock_irqsave(&cmd->t_state_lock, flags); task->task_flags |= (TF_ACTIVE | TF_SENT); atomic_inc(&cmd->t_task_cdbs_sent); @@ -2208,7 +2217,11 @@ check_depth: spin_unlock_irqrestore(&cmd->t_state_lock, flags); atomic_set(&cmd->t_transport_sent, 0); transport_stop_tasks_for_cmd(cmd); - atomic_inc(&dev->depth_left); + if (dev->enforce_dev_tcq) { + spin_lock_irq(&dev->execute_task_lock); + dev->depth_left++; + spin_unlock_irq(&dev->execute_task_lock); + } transport_generic_request_failure(cmd); } diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 81bd34c..0fe8183 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -710,11 +710,13 @@ struct se_device { u32 dev_tcq_window_closed; /* Physical device queue depth */ u32 queue_depth; + u32 depth_left; /* Used for SPC-2 reservations enforce of ISIDs */ u64 dev_res_bin_isid; t10_task_attr_index_t dev_task_attr_type; /* Pointer to transport specific device structure */ void *dev_ptr; + unsigned int enforce_dev_tcq:1; u32 dev_index; u64 creation_time; u32 num_resets; @@ -725,7 +727,6 @@ struct se_device { /* Active commands on this virtual SE device */ atomic_t active_cmds; atomic_t simple_cmds; - atomic_t depth_left; atomic_t dev_ordered_id; atomic_t dev_tur_active; atomic_t execute_tasks; diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h index b3e80e7..6d2f329 100644 --- a/include/target/target_core_transport.h +++ b/include/target/target_core_transport.h @@ -21,6 +21,7 @@ #define SDF_EMULATED_VPD_UNIT_SERIAL 0x00000002 #define SDF_USING_UDEV_PATH 0x00000004 #define SDF_USING_ALIAS 0x00000008 +#define SDF_ENFORCE_DEVICE_TCQ 0x00000010 /* * struct se_device->dev_flags -- 1.7.2.5 -- To unsubscribe from this list: send the line "unsubscribe target-devel" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html