From: Wang Jianchao <wangjianchao@xxxxxxxxxxxx> Make blk-iolatency pluggable and modular. Then we can close or open it through /sys/block/xxx/queue/qos and rmmod the module if we don't need it which can release one blkcg policy slot. Signed-off-by: Wang Jianchao <wangjianchao@xxxxxxxxxxxx> --- block/Kconfig | 2 +- block/Makefile | 4 +++- block/blk-cgroup.c | 6 ------ block/blk-iolatency.c | 39 +++++++++++++++++++++++++++++++-------- block/blk-mq-debugfs.c | 2 -- block/blk-rq-qos.h | 6 ------ block/blk.h | 6 ------ 7 files changed, 35 insertions(+), 30 deletions(-) diff --git a/block/Kconfig b/block/Kconfig index c6ce41a5e5b2..1c0d05df2aec 100644 --- a/block/Kconfig +++ b/block/Kconfig @@ -111,7 +111,7 @@ config BLK_WBT_MQ Enable writeback throttling by default for request-based block devices. config BLK_CGROUP_IOLATENCY - bool "Enable support for latency based cgroup IO protection" + tristate "Enable support for latency based cgroup IO protection" depends on BLK_CGROUP help Enabling this option enables the .latency interface for IO throttling. diff --git a/block/Makefile b/block/Makefile index 44df57e562bf..ccf61c57e1d4 100644 --- a/block/Makefile +++ b/block/Makefile @@ -18,7 +18,9 @@ obj-$(CONFIG_BLK_CGROUP) += blk-cgroup.o obj-$(CONFIG_BLK_CGROUP_RWSTAT) += blk-cgroup-rwstat.o obj-$(CONFIG_BLK_DEV_THROTTLING) += blk-throttle.o obj-$(CONFIG_BLK_CGROUP_IOPRIO) += blk-ioprio.o -obj-$(CONFIG_BLK_CGROUP_IOLATENCY) += blk-iolatency.o +iolat-y := blk-iolatency.o +obj-$(CONFIG_BLK_CGROUP_IOLATENCY) += iolat.o + obj-$(CONFIG_BLK_CGROUP_IOCOST) += blk-iocost.o obj-$(CONFIG_MQ_IOSCHED_DEADLINE) += mq-deadline.o obj-$(CONFIG_MQ_IOSCHED_KYBER) += kyber-iosched.o diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index fb56d74f1c8e..fd874dfd38ed 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -1212,12 +1212,6 @@ int blkcg_init_queue(struct request_queue *q) if (ret) goto err_destroy_all; - ret = blk_iolatency_init(q); - if (ret) { - blk_throtl_exit(q); - goto err_destroy_all; - } - return 0; err_destroy_all: diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c index 6593c7123b97..6aaf0775e484 100644 --- a/block/blk-iolatency.c +++ b/block/blk-iolatency.c @@ -90,6 +90,12 @@ struct blk_iolatency { atomic_t enabled; }; +static struct rq_qos_ops blkcg_iolatency_ops; +static inline struct rq_qos *blkcg_rq_qos(struct request_queue *q) +{ + return rq_qos_by_id(q, blkcg_iolatency_ops.id); +} + static inline struct blk_iolatency *BLKIOLATENCY(struct rq_qos *rqos) { return container_of(rqos, struct blk_iolatency, rqos); @@ -646,13 +652,21 @@ static void blkcg_iolatency_exit(struct rq_qos *rqos) del_timer_sync(&blkiolat->timer); blkcg_deactivate_policy(rqos->q, &blkcg_policy_iolatency); + rq_qos_deactivate(rqos); kfree(blkiolat); } +static int blk_iolatency_init(struct request_queue *q); static struct rq_qos_ops blkcg_iolatency_ops = { +#if IS_MODULE(CONFIG_BLK_CGROUP_IOLATENCY) + .owner = THIS_MODULE, +#endif + .name = "iolat", + .flags = RQOS_FLAG_CGRP_POL, .throttle = blkcg_iolatency_throttle, .done_bio = blkcg_iolatency_done_bio, .exit = blkcg_iolatency_exit, + .init = blk_iolatency_init, }; static void blkiolatency_timer_fn(struct timer_list *t) @@ -727,15 +741,10 @@ int blk_iolatency_init(struct request_queue *q) return -ENOMEM; rqos = &blkiolat->rqos; - rqos->id = RQ_QOS_LATENCY; - rqos->ops = &blkcg_iolatency_ops; - rqos->q = q; - - rq_qos_add(q, rqos); - + rq_qos_activate(q, rqos, &blkcg_iolatency_ops); ret = blkcg_activate_policy(q, &blkcg_policy_iolatency); if (ret) { - rq_qos_del(q, rqos); + rq_qos_deactivate(rqos); kfree(blkiolat); return ret; } @@ -1046,13 +1055,27 @@ static struct blkcg_policy blkcg_policy_iolatency = { static int __init iolatency_init(void) { - return blkcg_policy_register(&blkcg_policy_iolatency); + int ret; + + ret = rq_qos_register(&blkcg_iolatency_ops); + if (ret) + return ret; + + ret = blkcg_policy_register(&blkcg_policy_iolatency); + if (ret) + rq_qos_unregister(&blkcg_iolatency_ops); + + return ret; } static void __exit iolatency_exit(void) { blkcg_policy_unregister(&blkcg_policy_iolatency); + rq_qos_unregister(&blkcg_iolatency_ops); } module_init(iolatency_init); module_exit(iolatency_exit); +MODULE_AUTHOR("Josef Bacik"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Latency based cgroup IO protection"); diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index 9c786b63c847..57c33f4730f2 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -820,8 +820,6 @@ void blk_mq_debugfs_unregister_sched(struct request_queue *q) static const char *rq_qos_id_to_name(enum rq_qos_id id) { switch (id) { - case RQ_QOS_LATENCY: - return "latency"; case RQ_QOS_COST: return "cost"; case RQ_QOS_IOPRIO: diff --git a/block/blk-rq-qos.h b/block/blk-rq-qos.h index de82eb951bdd..6ca46c69e325 100644 --- a/block/blk-rq-qos.h +++ b/block/blk-rq-qos.h @@ -14,7 +14,6 @@ struct blk_mq_debugfs_attr; enum rq_qos_id { - RQ_QOS_LATENCY, RQ_QOS_COST, RQ_QOS_IOPRIO, }; @@ -85,11 +84,6 @@ static inline struct rq_qos *rq_qos_by_id(struct request_queue *q, int id) return rqos; } -static inline struct rq_qos *blkcg_rq_qos(struct request_queue *q) -{ - return rq_qos_by_id(q, RQ_QOS_LATENCY); -} - static inline void rq_wait_init(struct rq_wait *rq_wait) { atomic_set(&rq_wait->inflight, 0); diff --git a/block/blk.h b/block/blk.h index ccde6e6f1736..e2e4fbb9a58d 100644 --- a/block/blk.h +++ b/block/blk.h @@ -436,12 +436,6 @@ static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) __blk_queue_bounce(q, bio); } -#ifdef CONFIG_BLK_CGROUP_IOLATENCY -extern int blk_iolatency_init(struct request_queue *q); -#else -static inline int blk_iolatency_init(struct request_queue *q) { return 0; } -#endif - struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp); #ifdef CONFIG_BLK_DEV_ZONED -- 2.17.1