From: Zhaoyang Huang <zhaoyang.huang@xxxxxxxxxx> In previous version, block layer will set bdi to be congested when get_request fail, which may throttle direct_reclaim. Move them back under current blk-mq design. Signed-off-by: Zhaoyang Huang <zhaoyang.huang@xxxxxxxxxx> --- v2: simplify the code --- --- block/blk-mq-tag.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 995336a..0cbc445 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -11,6 +11,7 @@ #include <linux/blk-mq.h> #include <linux/delay.h> +#include <linux/backing-dev.h> #include "blk.h" #include "blk-mq.h" #include "blk-mq-sched.h" @@ -126,6 +127,8 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) if (tag != BLK_MQ_NO_TAG) goto found_tag; + set_bdi_congested(data->q->disk->bdi,BLK_RW_SYNC); + set_bdi_congested(data->q->disk->bdi,BLK_RW_ASYNC); if (data->flags & BLK_MQ_REQ_NOWAIT) return BLK_MQ_NO_TAG; @@ -190,6 +193,8 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) blk_mq_put_tag(tags, data->ctx, tag + tag_offset); return BLK_MQ_NO_TAG; } + clear_bdi_congested(data->q->disk->bdi,BLK_RW_SYNC); + clear_bdi_congested(data->q->disk->bdi,BLK_RW_ASYNC); return tag + tag_offset; } -- 1.9.1