On 2020-07-02 22:30, Hannes Reinecke wrote: > And it's called from srp_reconnect_rport() and __rport_fail_io_fast(), > so we have this call chain: > > srp_reconnect_rport() > - scsi_target_block() > -> __rport_fail_io_fast() > - scsi_target_block() How about the (untested) patch below? diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c index d4d1104fac99..bfb240675f06 100644 --- a/drivers/scsi/scsi_transport_srp.c +++ b/drivers/scsi/scsi_transport_srp.c @@ -402,13 +402,9 @@ static void __rport_fail_io_fast(struct srp_rport *rport) lockdep_assert_held(&rport->mutex); + WARN_ON_ONCE(rport->state != SRP_RPORT_BLOCKED); if (srp_rport_set_state(rport, SRP_RPORT_FAIL_FAST)) return; - /* - * Call scsi_target_block() to wait for ongoing shost->queuecommand() - * calls before invoking i->f->terminate_rport_io(). - */ - scsi_target_block(rport->dev.parent); scsi_target_unblock(rport->dev.parent, SDEV_TRANSPORT_OFFLINE); /* Involve the LLD if possible to terminate all I/O on the rport. */ @@ -569,9 +565,9 @@ int srp_reconnect_rport(struct srp_rport *rport) * and dev_loss off. Mark the port as failed and start the TL * failure timers if these had not yet been started. */ + WARN_ON_ONCE(srp_rport_set_state(rport, SRP_RPORT_BLOCKED)); + scsi_target_block(rport->dev.parent); __rport_fail_io_fast(rport); - scsi_target_unblock(&shost->shost_gendev, - SDEV_TRANSPORT_OFFLINE); __srp_start_tl_fail_timers(rport); } else if (rport->state != SRP_RPORT_BLOCKED) { scsi_target_unblock(&shost->shost_gendev,