RE: [PATCH RFC v7 00/12] blk-mq/scsi: Provide hostwide shared tags for SCSI HBAs

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



>
> On Mon, Jun 22, 2020 at 08:24:39AM +0200, Hannes Reinecke wrote:
> > On 6/17/20 1:26 PM, Kashyap Desai wrote:
> > > >
> > > > ->queued is increased only and not decreased just for debug
> > > > ->purpose so far, so
> > > > it can't be relied for this purpose.
> > >
> > > Thanks. I overlooked that that it is only incremental counter.
> > >
> > > >
> > > > One approach is to add one similar counter, and maintain it by
> > > scheduler's
> > > > insert/dispatch callback.
> > >
> > > I tried below  and I see performance is on expected range.
> > >
> > > diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index
> > > fdcc2c1..ea201d0 100644
> > > --- a/block/blk-mq-sched.c
> > > +++ b/block/blk-mq-sched.c
> > > @@ -485,6 +485,7 @@ void blk_mq_sched_insert_request(struct request
> > > *rq, bool at_head,
> > >
> > >                  list_add(&rq->queuelist, &list);
> > >                  e->type->ops.insert_requests(hctx, &list, at_head);
> > > +               atomic_inc(&hctx->elevator_queued);
> > >          } else {
> > >                  spin_lock(&ctx->lock);
> > >                  __blk_mq_insert_request(hctx, rq, at_head); @@
> > > -511,8 +512,10 @@ void blk_mq_sched_insert_requests(struct
> > > blk_mq_hw_ctx *hctx,
> > >          percpu_ref_get(&q->q_usage_counter);
> > >
> > >          e = hctx->queue->elevator;
> > > -       if (e && e->type->ops.insert_requests)
> > > +       if (e && e->type->ops.insert_requests) {
> > >                  e->type->ops.insert_requests(hctx, list, false);
> > > +               atomic_inc(&hctx->elevator_queued);
> > > +       }
> > >          else {
> > >                  /*
> > >                   * try to issue requests directly if the hw queue
> > > isn't diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index
> > > 126021f..946b47a 100644
> > > --- a/block/blk-mq-sched.h
> > > +++ b/block/blk-mq-sched.h
> > > @@ -74,6 +74,13 @@ static inline bool blk_mq_sched_has_work(struct
> > > blk_mq_hw_ctx *hctx)
> > >   {
> > >          struct elevator_queue *e = hctx->queue->elevator;
> > >
> > > +       /* If current hctx has not queued any request, there is no
> > > + need to
> > > run.
> > > +        * blk_mq_run_hw_queue() on hctx which has queued IO will
handle
> > > +        * running specific hctx.
> > > +        */
> > > +       if (!atomic_read(&hctx->elevator_queued))
> > > +               return false;
> > > +
> > >          if (e && e->type->ops.has_work)
> > >                  return e->type->ops.has_work(hctx);
> > >
> > > diff --git a/block/blk-mq.c b/block/blk-mq.c index f73a2f9..48f1824
> > > 100644
> > > --- a/block/blk-mq.c
> > > +++ b/block/blk-mq.c
> > > @@ -517,8 +517,10 @@ void blk_mq_free_request(struct request *rq)
> > >          struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
> > >
> > >          if (rq->rq_flags & RQF_ELVPRIV) {
> > > -               if (e && e->type->ops.finish_request)
> > > +               if (e && e->type->ops.finish_request) {
> > >                          e->type->ops.finish_request(rq);
> > > +                       atomic_dec(&hctx->elevator_queued);
> > > +               }
> > >                  if (rq->elv.icq) {
> > >                          put_io_context(rq->elv.icq->ioc);
> > >                          rq->elv.icq = NULL; @@ -2571,6 +2573,7 @@
> > > blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set
> > > *set,
> > >                  goto free_hctx;
> > >
> > >          atomic_set(&hctx->nr_active, 0);
> > > +       atomic_set(&hctx->elevator_queued, 0);
> > >          if (node == NUMA_NO_NODE)
> > >                  node = set->numa_node;
> > >          hctx->numa_node = node;
> > > diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index
> > > 66711c7..ea1ddb1 100644
> > > --- a/include/linux/blk-mq.h
> > > +++ b/include/linux/blk-mq.h
> > > @@ -139,6 +139,10 @@ struct blk_mq_hw_ctx {
> > >           * shared across request queues.
> > >           */
> > >          atomic_t                nr_active;
> > > +       /**
> > > +        * @elevator_queued: Number of queued requests on hctx.
> > > +        */
> > > +       atomic_t                elevator_queued;
> > >
> > >          /** @cpuhp_online: List to store request if CPU is going to
die */
> > >          struct hlist_node       cpuhp_online;
> > >
> > >
> > >
> > Would it make sense to move it into the elevator itself?

I am not sure where exactly I should add this counter since I need counter
per hctx. Elevator data is per request object.
Please suggest.

>
> That is my initial suggestion, and the counter is just done for bfq &
mq-
> deadline, then we needn't to pay the cost for others.

I have updated patch -

diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index a1123d4..3e0005c 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -4640,6 +4640,12 @@ static bool bfq_has_work(struct blk_mq_hw_ctx
*hctx)
 {
        struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;

+       /* If current hctx has not queued any request, there is no need to
run.
+        * blk_mq_run_hw_queue() on hctx which has queued IO will handle
+        * running specific hctx.
+        */
+       if (!atomic_read(&hctx->elevator_queued))
+               return false;
        /*
         * Avoiding lock: a race on bfqd->busy_queues should cause at
         * most a call to dispatch for nothing
@@ -5554,6 +5561,7 @@ static void bfq_insert_requests(struct blk_mq_hw_ctx
*hctx,
                rq = list_first_entry(list, struct request, queuelist);
                list_del_init(&rq->queuelist);
                bfq_insert_request(hctx, rq, at_head);
+              atomic_inc(&hctx->elevator_queued);
        }
 }

@@ -5925,6 +5933,7 @@ static void bfq_finish_requeue_request(struct
request *rq)

        if (likely(rq->rq_flags & RQF_STARTED)) {
                unsigned long flags;
+              struct blk_mq_hw_ctx *mq_hctx = rq->mq_hctx;

                spin_lock_irqsave(&bfqd->lock, flags);

@@ -5934,6 +5943,7 @@ static void bfq_finish_requeue_request(struct
request *rq)
                bfq_completed_request(bfqq, bfqd);
                bfq_finish_requeue_request_body(bfqq);

+              atomic_dec(&hctx->elevator_queued);
                spin_unlock_irqrestore(&bfqd->lock, flags);
        } else {
                /*
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index 126021f..946b47a 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -74,6 +74,13 @@ static inline bool blk_mq_sched_has_work(struct
blk_mq_hw_ctx *hctx)
 {
        struct elevator_queue *e = hctx->queue->elevator;

+       /* If current hctx has not queued any request, there is no need to
run.
+        * blk_mq_run_hw_queue() on hctx which has queued IO will handle
+        * running specific hctx.
+        */
+       if (!atomic_read(&hctx->elevator_queued))
+               return false;
+
        if (e && e->type->ops.has_work)
                return e->type->ops.has_work(hctx);

diff --git a/block/blk-mq.c b/block/blk-mq.c
index f73a2f9..82dd152 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2571,6 +2571,7 @@ blk_mq_alloc_hctx(struct request_queue *q, struct
blk_mq_tag_set *set,
                goto free_hctx;

        atomic_set(&hctx->nr_active, 0);
+      atomic_set(&hctx->elevator_queued, 0);
        if (node == NUMA_NO_NODE)
                node = set->numa_node;
        hctx->numa_node = node;
diff --git a/block/mq-deadline.c b/block/mq-deadline.c
index b57470e..703ac55 100644
--- a/block/mq-deadline.c
+++ b/block/mq-deadline.c
@@ -533,6 +533,7 @@ static void dd_insert_requests(struct blk_mq_hw_ctx
*hctx,
                rq = list_first_entry(list, struct request, queuelist);
                list_del_init(&rq->queuelist);
                dd_insert_request(hctx, rq, at_head);
+              atomic_inc(&hctx->elevator_queued);
        }
        spin_unlock(&dd->lock);
 }
@@ -562,6 +563,7 @@ static void dd_prepare_request(struct request *rq)
 static void dd_finish_request(struct request *rq)
 {
        struct request_queue *q = rq->q;
+      struct blk_mq_hw_ctx *hctx = rq->mq_hctx;

        if (blk_queue_is_zoned(q)) {
                struct deadline_data *dd = q->elevator->elevator_data;
@@ -570,15 +572,23 @@ static void dd_finish_request(struct request *rq)
                spin_lock_irqsave(&dd->zone_lock, flags);
                blk_req_zone_write_unlock(rq);
                if (!list_empty(&dd->fifo_list[WRITE]))
-                       blk_mq_sched_mark_restart_hctx(rq->mq_hctx);
+                       blk_mq_sched_mark_restart_hctx(hctx);
                spin_unlock_irqrestore(&dd->zone_lock, flags);
        }
+       atomic_dec(&hctx->elevator_queued);
 }

 static bool dd_has_work(struct blk_mq_hw_ctx *hctx)
 {
        struct deadline_data *dd = hctx->queue->elevator->elevator_data;

+       /* If current hctx has not queued any request, there is no need to
run.
+        * blk_mq_run_hw_queue() on hctx which has queued IO will handle
+        * running specific hctx.
+        */
+       if (!atomic_read(&hctx->elevator_queued))
+               return false;
+
        return !list_empty_careful(&dd->dispatch) ||
                !list_empty_careful(&dd->fifo_list[0]) ||
                !list_empty_careful(&dd->fifo_list[1]);
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 66711c7..ea1ddb1 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -139,6 +139,10 @@ struct blk_mq_hw_ctx {
         * shared across request queues.
         */
        atomic_t                nr_active;
+       /**
+        * @elevator_queued: Number of queued requests on hctx.
+        */
+       atomic_t                elevator_queued;

        /** @cpuhp_online: List to store request if CPU is going to die */
        struct hlist_node       cpuhp_online;


>
> Thanks,
> Ming



[Index of Archives]     [Linux RAID]     [Linux SCSI]     [Linux ATA RAID]     [IDE]     [Linux Wireless]     [Linux Kernel]     [ATH6KL]     [Linux Bluetooth]     [Linux Netdev]     [Kernel Newbies]     [Security]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Device Mapper]

  Powered by Linux