All of the blk-rq-qos policies has been changed to use the new interfaces. Thus we can export the sysfs interface, namely /sys/block/xxx/queue/qos and get rid of the unused interfaces. Signed-off-by: Wang Jianchao (Kuaishou) <jianchao.wan9@xxxxxxxxx> --- block/blk-mq-debugfs.c | 10 +------ block/blk-rq-qos.h | 63 +----------------------------------------- block/blk-sysfs.c | 2 ++ 3 files changed, 4 insertions(+), 71 deletions(-) diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index cbbd668029a1..3defd5cb1cea 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -823,11 +823,6 @@ void blk_mq_debugfs_unregister_sched(struct request_queue *q) q->sched_debugfs_dir = NULL; } -static const char *rq_qos_id_to_name(enum rq_qos_id id) -{ - return "unknown"; -} - void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos) { debugfs_remove_recursive(rqos->debugfs_dir); @@ -837,9 +832,6 @@ void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos) void blk_mq_debugfs_register_rqos(struct rq_qos *rqos) { struct request_queue *q = rqos->q; - const char *dir_name; - - dir_name = rqos->ops->name ? rqos->ops->name : rq_qos_id_to_name(rqos->id); if (rqos->debugfs_dir || !rqos->ops->debugfs_attrs) return; @@ -848,7 +840,7 @@ void blk_mq_debugfs_register_rqos(struct rq_qos *rqos) q->rqos_debugfs_dir = debugfs_create_dir("rqos", q->debugfs_dir); - rqos->debugfs_dir = debugfs_create_dir(dir_name, + rqos->debugfs_dir = debugfs_create_dir(rqos->ops->name, rqos->q->rqos_debugfs_dir); debugfs_create_files(rqos->debugfs_dir, rqos, rqos->ops->debugfs_attrs); diff --git a/block/blk-rq-qos.h b/block/blk-rq-qos.h index bba829bbb461..262d221794f5 100644 --- a/block/blk-rq-qos.h +++ b/block/blk-rq-qos.h @@ -13,10 +13,6 @@ struct blk_mq_debugfs_attr; -enum rq_qos_id { - RQ_QOS_UNUSED, -}; - struct rq_wait { wait_queue_head_t wait; atomic_t inflight; @@ -25,7 +21,7 @@ struct rq_wait { struct rq_qos { const struct rq_qos_ops *ops; struct request_queue *q; - enum rq_qos_id id; + int id; refcount_t ref; wait_queue_head_t waitq; bool dying; @@ -69,17 +65,6 @@ struct rq_depth { unsigned int default_depth; }; -static inline struct rq_qos *rq_qos_id(struct request_queue *q, - enum rq_qos_id id) -{ - struct rq_qos *rqos; - for (rqos = q->rq_qos; rqos; rqos = rqos->next) { - if (rqos->id == id) - break; - } - return rqos; -} - static inline struct rq_qos *rq_qos_by_id(struct request_queue *q, int id) { struct rq_qos *rqos; @@ -99,52 +84,6 @@ static inline void rq_wait_init(struct rq_wait *rq_wait) init_waitqueue_head(&rq_wait->wait); } -static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos) -{ - /* - * No IO can be in-flight when adding rqos, so freeze queue, which - * is fine since we only support rq_qos for blk-mq queue. - * - * Reuse ->queue_lock for protecting against other concurrent - * rq_qos adding/deleting - */ - blk_mq_freeze_queue(q); - - spin_lock_irq(&q->queue_lock); - rqos->next = q->rq_qos; - q->rq_qos = rqos; - spin_unlock_irq(&q->queue_lock); - - blk_mq_unfreeze_queue(q); - - if (rqos->ops->debugfs_attrs) - blk_mq_debugfs_register_rqos(rqos); -} - -static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos) -{ - struct rq_qos **cur; - - /* - * See comment in rq_qos_add() about freezing queue & using - * ->queue_lock. - */ - blk_mq_freeze_queue(q); - - spin_lock_irq(&q->queue_lock); - for (cur = &q->rq_qos; *cur; cur = &(*cur)->next) { - if (*cur == rqos) { - *cur = rqos->next; - break; - } - } - spin_unlock_irq(&q->queue_lock); - - blk_mq_unfreeze_queue(q); - - blk_mq_debugfs_unregister_rqos(rqos); -} - int rq_qos_register(struct rq_qos_ops *ops); void rq_qos_unregister(struct rq_qos_ops *ops); void rq_qos_activate(struct request_queue *q, diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 9f32882ceb2f..c02747db4e3b 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -574,6 +574,7 @@ QUEUE_RO_ENTRY(queue_max_segments, "max_segments"); QUEUE_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments"); QUEUE_RO_ENTRY(queue_max_segment_size, "max_segment_size"); QUEUE_RW_ENTRY(elv_iosched, "scheduler"); +QUEUE_RW_ENTRY(queue_qos, "qos"); QUEUE_RO_ENTRY(queue_logical_block_size, "logical_block_size"); QUEUE_RO_ENTRY(queue_physical_block_size, "physical_block_size"); @@ -633,6 +634,7 @@ static struct attribute *queue_attrs[] = { &queue_max_integrity_segments_entry.attr, &queue_max_segment_size_entry.attr, &elv_iosched_entry.attr, + &queue_qos_entry.attr, &queue_hw_sector_size_entry.attr, &queue_logical_block_size_entry.attr, &queue_physical_block_size_entry.attr, -- 2.17.1