Re: [PATCH 14/17] blk-mq: move the srcu_struct used for quiescing to the tagset

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 





On 10/26/22 11:48, Chao Leng wrote:


On 2022/10/25 22:40, Christoph Hellwig wrote:
All I/O submissions have fairly similar latencies, and a tagset-wide
quiesce is a fairly common operation.

Signed-off-by: Christoph Hellwig <hch@xxxxxx>
Reviewed-by: Keith Busch <kbusch@xxxxxxxxxx>
Reviewed-by: Ming Lei <ming.lei@xxxxxxxxxx>
Reviewed-by: Chao Leng <lengchao@xxxxxxxxxx>
Reviewed-by: Sagi Grimberg <sagi@xxxxxxxxxxx>
Reviewed-by: Hannes Reinecke <hare@xxxxxxx>
---
  block/blk-core.c       | 27 +++++----------------------
  block/blk-mq.c         | 33 +++++++++++++++++++++++++--------
  block/blk-mq.h         | 14 +++++++-------
  block/blk-sysfs.c      |  9 ++-------
  block/blk.h            |  9 +--------
  block/genhd.c          |  2 +-
  include/linux/blk-mq.h |  4 ++++
  include/linux/blkdev.h |  9 ---------
  8 files changed, 45 insertions(+), 62 deletions(-)

diff --git a/block/blk-core.c b/block/blk-core.c
index 17667159482e0..3a2ed8dadf738 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -65,7 +65,6 @@ DEFINE_IDA(blk_queue_ida);
   * For queue allocation
   */
  struct kmem_cache *blk_requestq_cachep;
-struct kmem_cache *blk_requestq_srcu_cachep;
  /*
   * Controlling structure to kblockd
@@ -373,26 +372,20 @@ static void blk_timeout_work(struct work_struct *work)
  {
  }
-struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu)
+struct request_queue *blk_alloc_queue(int node_id)
  {
      struct request_queue *q;
-    q = kmem_cache_alloc_node(blk_get_queue_kmem_cache(alloc_srcu),
-            GFP_KERNEL | __GFP_ZERO, node_id);
+    q = kmem_cache_alloc_node(blk_requestq_cachep, GFP_KERNEL | __GFP_ZERO,
+                  node_id);
      if (!q)
          return NULL;
-    if (alloc_srcu) {
-        blk_queue_flag_set(QUEUE_FLAG_HAS_SRCU, q);
-        if (init_srcu_struct(q->srcu) != 0)
-            goto fail_q;
-    }
-
      q->last_merge = NULL;
      q->id = ida_alloc(&blk_queue_ida, GFP_KERNEL);
      if (q->id < 0)
-        goto fail_srcu;
+        goto fail_q;
      q->stats = blk_alloc_queue_stats();
      if (!q->stats)
@@ -435,11 +428,8 @@ struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu)
      blk_free_queue_stats(q->stats);
  fail_id:
      ida_free(&blk_queue_ida, q->id);
-fail_srcu:
-    if (alloc_srcu)
-        cleanup_srcu_struct(q->srcu);
  fail_q:
-    kmem_cache_free(blk_get_queue_kmem_cache(alloc_srcu), q);
+    kmem_cache_free(blk_requestq_cachep, q);
      return NULL;
  }
@@ -1184,9 +1174,6 @@ int __init blk_dev_init(void)
              sizeof_field(struct request, cmd_flags));
      BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
              sizeof_field(struct bio, bi_opf));
-    BUILD_BUG_ON(ALIGN(offsetof(struct request_queue, srcu),
-               __alignof__(struct request_queue)) !=
-             sizeof(struct request_queue));
      /* used for unplugging and affects IO latency/throughput - HIGHPRI */
      kblockd_workqueue = alloc_workqueue("kblockd",
@@ -1197,10 +1184,6 @@ int __init blk_dev_init(void)
      blk_requestq_cachep = kmem_cache_create("request_queue",
              sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
-    blk_requestq_srcu_cachep = kmem_cache_create("request_queue_srcu",
-            sizeof(struct request_queue) +
-            sizeof(struct srcu_struct), 0, SLAB_PANIC, NULL);
-
      blk_debugfs_root = debugfs_create_dir("block", NULL);
      return 0;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 802fdd3d737e3..6cbf34921e33f 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -261,8 +261,8 @@ EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);
   */
  void blk_mq_wait_quiesce_done(struct request_queue *q)
  {
-    if (blk_queue_has_srcu(q))
-        synchronize_srcu(q->srcu);
+    if (q->tag_set->flags & BLK_MQ_F_BLOCKING)
+        synchronize_srcu(q->tag_set->srcu);
      else
          synchronize_rcu();
  }
@@ -3971,7 +3971,7 @@ static struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
      struct request_queue *q;
      int ret;
-    q = blk_alloc_queue(set->numa_node, set->flags & BLK_MQ_F_BLOCKING);
+    q = blk_alloc_queue(set->numa_node);
      if (!q)
          return ERR_PTR(-ENOMEM);
      q->queuedata = queuedata;
@@ -4138,9 +4138,6 @@ static void blk_mq_update_poll_flag(struct request_queue *q)
  int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
          struct request_queue *q)
  {
-    WARN_ON_ONCE(blk_queue_has_srcu(q) !=
-            !!(set->flags & BLK_MQ_F_BLOCKING));
-
      /* mark the queue as mq asap */
      q->mq_ops = set->ops;
@@ -4398,9 +4395,19 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
       */
      if (set->nr_maps == 1 && set->nr_hw_queues > nr_cpu_ids)
          set->nr_hw_queues = nr_cpu_ids;
+
+    if (set->flags & BLK_MQ_F_BLOCKING) {
+        set->srcu = kmalloc(sizeof(*set->srcu), GFP_KERNEL);
Memory with contiguous physical addresses is not necessary, maybe it is a better choice to use kvmalloc,
because sizeof(*set->srcu) is a little large.
kvmalloc() is more friendly to scenarios where memory is insufficient and running for a long time.

Huh?

(gdb) p sizeof(struct srcu_struct)
$1 = 392




[Index of Archives]     [Linux RAID]     [Linux SCSI]     [Linux ATA RAID]     [IDE]     [Linux Wireless]     [Linux Kernel]     [ATH6KL]     [Linux Bluetooth]     [Linux Netdev]     [Kernel Newbies]     [Security]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Device Mapper]

  Powered by Linux