Instead of running the request queue of each device associated with a host every 3 ms (BLK_MQ_RESOURCE_DELAY) while host error handling is in progress, run the request queue after error handling has finished. Cc: Christoph Hellwig <hch@xxxxxx> Cc: Ming Lei <ming.lei@xxxxxxxxxx> Cc: Hannes Reinecke <hare@xxxxxxx> Cc: John Garry <john.g.garry@xxxxxxxxxx> Cc: Mike Christie <michael.christie@xxxxxxxxxx> Signed-off-by: Bart Van Assche <bvanassche@xxxxxxx> --- drivers/scsi/scsi_lib.c | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index e59eb0cbfc83..a34390d35f1d 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -122,11 +122,9 @@ static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd, unsigned long msecs) WARN_ON_ONCE(true); } - if (msecs) { - blk_mq_requeue_request(rq, false); + blk_mq_requeue_request(rq, false); + if (!scsi_host_in_recovery(cmd->device->host)) blk_mq_delay_kick_requeue_list(rq->q, msecs); - } else - blk_mq_requeue_request(rq, true); } /** @@ -165,7 +163,8 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy) */ cmd->result = 0; - blk_mq_requeue_request(scsi_cmd_to_rq(cmd), true); + blk_mq_requeue_request(scsi_cmd_to_rq(cmd), + !scsi_host_in_recovery(cmd->device->host)); } /** @@ -462,10 +461,16 @@ void scsi_requeue_run_queue(struct work_struct *work) scsi_run_queue(q); } +/* + * Transfer requests from the requeue_list to from where these can be dispatched + * and run the request queues. + */ void scsi_run_host_queues(struct Scsi_Host *shost) { struct scsi_device *sdev; + shost_for_each_device(sdev, shost) + blk_mq_kick_requeue_list(sdev->request_queue); shost_for_each_device(sdev, shost) scsi_run_queue(sdev->request_queue); } @@ -499,6 +504,9 @@ static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd) static void scsi_run_queue_async(struct scsi_device *sdev) { + if (scsi_host_in_recovery(sdev->host)) + return; + if (scsi_target(sdev)->single_lun || !list_empty(&sdev->host->starved_list)) { kblockd_schedule_work(&sdev->requeue_work);