From: Artur Paszkiewicz <artur.paszkiewicz@xxxxxxxxx> Provide a callback for polling the mddev which in turn polls the active member devices in non-spinning manner. Enable it only if all members support polling. Signed-off-by: Artur Paszkiewicz <artur.paszkiewicz@xxxxxxxxx> Signed-off-by: Andrzej Jakowski <andrzej.jakowski@xxxxxxxxxxxxxxx> --- drivers/md/md.c | 40 ++++++++++++++++++++++++++++++++++++---- 1 file changed, 36 insertions(+), 4 deletions(-) diff --git a/drivers/md/md.c b/drivers/md/md.c index 469f551863be..849d22a2108f 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -5564,6 +5564,28 @@ int mddev_init_writes_pending(struct mddev *mddev) } EXPORT_SYMBOL_GPL(mddev_init_writes_pending); +static int md_poll(struct request_queue *q, blk_qc_t cookie, bool spin) +{ + struct mddev *mddev = q->queuedata; + struct md_rdev *rdev; + int ret = 0; + int rv; + + rdev_for_each(rdev, mddev) { + if (rdev->raid_disk < 0 || test_bit(Faulty, &rdev->flags)) + continue; + + rv = blk_poll(bdev_get_queue(rdev->bdev), cookie, false); + if (rv < 0) { + ret = rv; + break; + } + ret += rv; + } + + return ret; +} + static int md_alloc(dev_t dev, char *name) { /* @@ -5628,6 +5650,7 @@ static int md_alloc(dev_t dev, char *name) blk_queue_make_request(mddev->queue, md_make_request); blk_set_stacking_limits(&mddev->queue->limits); + mddev->queue->poll_fn = md_poll; disk = alloc_disk(1 << shift); if (!disk) { @@ -5932,12 +5955,17 @@ int md_run(struct mddev *mddev) if (mddev->queue) { bool nonrot = true; + bool poll = true; rdev_for_each(rdev, mddev) { - if (rdev->raid_disk >= 0 && - !blk_queue_nonrot(bdev_get_queue(rdev->bdev))) { - nonrot = false; - break; + if (rdev->raid_disk >= 0) { + struct request_queue *q; + + q = bdev_get_queue(rdev->bdev); + if (!blk_queue_nonrot(q)) + nonrot = false; + if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) + poll = false; } } if (mddev->degraded) @@ -5946,6 +5974,10 @@ int md_run(struct mddev *mddev) blk_queue_flag_set(QUEUE_FLAG_NONROT, mddev->queue); else blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue); + if (poll) + blk_queue_flag_set(QUEUE_FLAG_POLL, mddev->queue); + else + blk_queue_flag_clear(QUEUE_FLAG_POLL, mddev->queue); mddev->queue->backing_dev_info->congested_data = mddev; mddev->queue->backing_dev_info->congested_fn = md_congested; } -- 2.20.1