Vivek Goyal wrote: ... > } > +#ifdef CONFIG_GROUP_IOSCHED > +static ssize_t queue_group_requests_show(struct request_queue *q, char *page) > +{ > + return queue_var_show(q->nr_group_requests, (page)); > +} > + > +static ssize_t > +queue_group_requests_store(struct request_queue *q, const char *page, > + size_t count) > +{ > + unsigned long nr; > + int ret = queue_var_store(&nr, page, count); > + if (nr < BLKDEV_MIN_RQ) > + nr = BLKDEV_MIN_RQ; > + > + spin_lock_irq(q->queue_lock); > + q->nr_group_requests = nr; > + spin_unlock_irq(q->queue_lock); > + return ret; > +} > +#endif Hi Vivek, Do we need to update the congestion thresholds for allocated io groups? Signed-off-by: Gui Jianfeng <guijianfeng@xxxxxxxxxxxxxx> --- block/blk-sysfs.c | 15 +++++++++++++++ 1 files changed, 15 insertions(+), 0 deletions(-) diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 577ed42..92b9f25 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -83,17 +83,32 @@ static ssize_t queue_group_requests_show(struct request_queue *q, char *page) return queue_var_show(q->nr_group_requests, (page)); } +extern void elv_io_group_congestion_threshold(struct request_queue *q, + struct io_group *iog); + static ssize_t queue_group_requests_store(struct request_queue *q, const char *page, size_t count) { + struct hlist_node *n; + struct io_group *iog; + struct elv_fq_data *efqd; unsigned long nr; int ret = queue_var_store(&nr, page, count); + if (nr < BLKDEV_MIN_RQ) nr = BLKDEV_MIN_RQ; spin_lock_irq(q->queue_lock); + q->nr_group_requests = nr; + + efqd = &q->elevator->efqd; + + hlist_for_each_entry(iog, n, &efqd->group_list, elv_data_node) { + elv_io_group_congestion_threshold(q, iog); + } + spin_unlock_irq(q->queue_lock); return ret; } -- 1.5.4.rc3 -- dm-devel mailing list dm-devel@xxxxxxxxxx https://www.redhat.com/mailman/listinfo/dm-devel