Re: [PATCH 02/11] target: add workqueue cmd submission helper

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 2/4/21 5:13 PM, Chaitanya Kulkarni wrote:
On 2/4/21 03:41, Mike Christie wrote:
loop and vhost-scsi do their target cmd submission from driver
workqueues. This allows them to avoid an issue where the backend may
block waiting for resources like tags/requests, mem/locks, etc
and that ends up blocking their entire submission path and for the
case of vhost-scsi both the submission and completion path.

This patch adds a helper these drivers can use to submit from the
lio workqueue. This code will then be extended in the next patches
to fix the plugging of backend devices.

Signed-off-by: Mike Christie <michael.christie@xxxxxxxxxx>
---
  drivers/target/target_core_transport.c | 102 ++++++++++++++++++++++++-
  include/target/target_core_base.h      |  10 ++-
  include/target/target_core_fabric.h    |   3 +
  3 files changed, 111 insertions(+), 4 deletions(-)

diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 7c5d37bac561..dec89e911348 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -41,6 +41,7 @@
  #include <trace/events/target.h>
static struct workqueue_struct *target_completion_wq;
+static struct workqueue_struct *target_submission_wq;
  static struct kmem_cache *se_sess_cache;
  struct kmem_cache *se_ua_cache;
  struct kmem_cache *t10_pr_reg_cache;
@@ -129,8 +130,15 @@ int init_se_kmem_caches(void)
  	if (!target_completion_wq)
  		goto out_free_lba_map_mem_cache;
+ target_submission_wq = alloc_workqueue("target_submission",
+					       WQ_MEM_RECLAIM, 0);
+	if (!target_submission_wq)
+		goto out_free_completion_wq;
+
  	return 0;
+out_free_completion_wq:
+	destroy_workqueue(target_completion_wq);
  out_free_lba_map_mem_cache:
  	kmem_cache_destroy(t10_alua_lba_map_mem_cache);
  out_free_lba_map_cache:
@@ -153,6 +161,7 @@ int init_se_kmem_caches(void)
void release_se_kmem_caches(void)
  {
+	destroy_workqueue(target_submission_wq);
  	destroy_workqueue(target_completion_wq);
  	kmem_cache_destroy(se_sess_cache);
  	kmem_cache_destroy(se_ua_cache);
@@ -218,6 +227,69 @@ static void target_release_sess_cmd_refcnt(struct percpu_ref *ref)
  	wake_up(&sess->cmd_count_wq);
  }
+static void target_queued_submit_work(struct work_struct *work)
+{
+	struct se_sess_cmd_queue *sq =
+				container_of(work, struct se_sess_cmd_queue,
+					     work);
+	struct se_session *se_sess = sq->se_sess;
+	struct se_cmd *se_cmd, *next_cmd;
+	struct llist_node *cmd_list;
+
+	cmd_list = llist_del_all(&sq->cmd_list);
+	if (!cmd_list)
+		/* Previous call took what we were queued to submit */
+		return;
+
+	cmd_list = llist_reverse_order(cmd_list);
+	llist_for_each_entry_safe(se_cmd, next_cmd, cmd_list, se_cmd_list)
+		se_sess->tfo->submit_queued_cmd(se_cmd);
+}
+
+static void target_queue_cmd_work(struct se_sess_cmd_queue *q,
+				  struct se_cmd *se_cmd, int cpu)
+{
+	llist_add(&se_cmd->se_cmd_list, &q->cmd_list);
+	queue_work_on(cpu, target_submission_wq, &q->work);
+}
+
+/**
+ * target_queue_cmd_submit - queue a se_cmd to be executed from the lio wq
+ * @se_sess: cmd's session
+ * @cmd_list: cmd to queue
+ */
+void target_queue_cmd_submit(struct se_session *se_sess, struct se_cmd *se_cmd)
+{
+	int cpu = smp_processor_id();
+
+	target_queue_cmd_work(&se_sess->sq[cpu], se_cmd, cpu);
+}
+EXPORT_SYMBOL_GPL(target_queue_cmd_submit);
+
+static void target_flush_queued_cmds(struct se_session *se_sess)
+{
+	int i;
+
+	if (!se_sess->sq)
+		return;
+
+	for (i = 0; i < se_sess->q_cnt; i++)
+		cancel_work_sync(&se_sess->sq[i].work);
+}
+
+static void target_init_sess_cmd_queues(struct se_session *se_sess,
+					struct se_sess_cmd_queue *q,
+					void (*work_fn)(struct work_struct *work))
+{
+	int i;
+
+	for (i = 0; i < se_sess->q_cnt; i++) {
+		init_llist_head(&q[i].cmd_list);
+		INIT_WORK(&q[i].work, work_fn);
+		q[i].se_sess = se_sess;
+	}
+}
+
Can we opencode above function if there is only one caller ?
unless there is a specific reason to have it on its own which I failed to
understand.

Patch 10 also calls it. I tried to say that in the end of the patch description but it was not too clear now that I read it again.

I couldn't decide if I should do it now or later. I selected now since it made the 10th pach smaller.



[Index of Archives]     [Linux SCSI]     [Kernel Newbies]     [Linux SCSI Target Infrastructure]     [Share Photos]     [IDE]     [Security]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux ATA RAID]     [Linux IIO]     [Device Mapper]

  Powered by Linux