Signed-off-by: Hannes Reinecke <hare@xxxxxxxx> --- drivers/scsi/libfc/fc_exch.c | 18 +++++++++++++++--- drivers/scsi/libfc/fc_fcp.c | 39 +++++++++++++++++++++++++++++++++------ drivers/scsi/libfc/fc_rport.c | 11 ++++++++--- 3 files changed, 56 insertions(+), 12 deletions(-) diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index e72673b..dc078d3 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -362,8 +362,10 @@ static inline void fc_exch_timer_set_locked(struct fc_exch *ep, fc_exch_hold(ep); /* hold for timer */ if (!queue_delayed_work(fc_exch_workqueue, &ep->timeout_work, - msecs_to_jiffies(timer_msec))) + msecs_to_jiffies(timer_msec))) { + FC_EXCH_DBG(ep, "Exchange already queued\n"); fc_exch_release(ep); + } } /** @@ -632,9 +634,13 @@ static int fc_exch_abort_locked(struct fc_exch *ep, struct fc_frame *fp; int error; + FC_EXCH_DBG(ep, "exch: abort, time %d msecs\n", timer_msec); if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL) || - ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP)) + ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP)) { + FC_EXCH_DBG(ep, "exch: already completed esb %x state %x\n", + ep->esb_stat, ep->state); return -ENXIO; + } /* * Send the abort on a new sequence if possible. @@ -758,7 +764,7 @@ static void fc_exch_timeout(struct work_struct *work) u32 e_stat; int rc = 1; - FC_EXCH_DBG(ep, "Exchange timed out\n"); + FC_EXCH_DBG(ep, "Exchange timed out state %x\n", ep->state); spin_lock_bh(&ep->ex_lock); if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) @@ -1383,6 +1389,7 @@ static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp) if (!ep) goto reject; + FC_EXCH_DBG(ep, "exch: ABTS received\n"); fp = fc_frame_alloc(ep->lp, sizeof(*ap)); if (!fp) goto free; @@ -1978,6 +1985,8 @@ static void fc_exch_els_rec(struct fc_frame *rfp) explan = ELS_EXPL_OXID_RXID; if (!ep) goto reject; + FC_EXCH_DBG(ep, "REC request from %x: rxid %x oxid %x\n", + sid, rxid, oxid); if (ep->oid != sid || oxid != ep->oxid) goto rel; if (rxid != FC_XID_UNKNOWN && rxid != ep->rxid) @@ -2177,6 +2186,7 @@ static void fc_exch_rrq(struct fc_exch *ep) return; retry: + FC_EXCH_DBG(ep, "exch: RRQ send failed\n"); spin_lock_bh(&ep->ex_lock); if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) { spin_unlock_bh(&ep->ex_lock); @@ -2219,6 +2229,8 @@ static void fc_exch_els_rrq(struct fc_frame *fp) if (!ep) goto reject; spin_lock_bh(&ep->ex_lock); + FC_EXCH_DBG(ep, "RRQ request from %x: xid %x rxid %x oxid %x\n", + sid, xid, ntohs(rp->rrq_rx_id), ntohs(rp->rrq_ox_id)); if (ep->oxid != ntohs(rp->rrq_ox_id)) goto unlock_reject; if (ep->rxid != ntohs(rp->rrq_rx_id) && diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index 5121272..bd4bdbf 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -764,8 +764,11 @@ static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg) fh = fc_frame_header_get(fp); r_ctl = fh->fh_r_ctl; - if (lport->state != LPORT_ST_READY) + if (lport->state != LPORT_ST_READY) { + FC_FCP_DBG(fsp, "lport state %d, ignoring r_ctl %x\n", + lport->state, r_ctl); goto out; + } if (fc_fcp_lock_pkt(fsp)) goto out; @@ -774,8 +777,10 @@ static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg) goto unlock; } - if (fsp->state & (FC_SRB_ABORTED | FC_SRB_ABORT_PENDING)) + if (fsp->state & (FC_SRB_ABORTED | FC_SRB_ABORT_PENDING)) { + FC_FCP_DBG(fsp, "command aborted, ignoring r_ctl %x\n", r_ctl); goto unlock; + } if (r_ctl == FC_RCTL_DD_DATA_DESC) { /* @@ -910,6 +915,10 @@ static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp) * Wait a at least one jiffy to see if it is delivered. * If this expires without data, we may do SRR. */ + FC_FCP_DBG(fsp, "tgt %6.6x xfer len %zx data underrun " + "len %x, data len %x\n", + fsp->rport->port_id, + fsp->xfer_len, expected_len, fsp->data_len); fc_fcp_timer_set(fsp, 2); return; } @@ -959,8 +968,12 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp) if (fsp->cdb_status == SAM_STAT_GOOD && fsp->xfer_len < fsp->data_len && !fsp->io_status && (!(fsp->scsi_comp_flags & FCP_RESID_UNDER) || - fsp->xfer_len < fsp->data_len - fsp->scsi_resid)) + fsp->xfer_len < fsp->data_len - fsp->scsi_resid)) { + FC_FCP_DBG( fsp, "data underrun, " + "xfer_len %zx data_len %x\n", + fsp->xfer_len, fsp->data_len ); fsp->status_code = FC_DATA_UNDRUN; + } } seq = fsp->seq_ptr; @@ -1222,8 +1235,11 @@ static int fc_fcp_pkt_abort(struct fc_fcp_pkt *fsp) int rc = FAILED; unsigned long ticks_left; - if (fc_fcp_send_abort(fsp)) + FC_FCP_DBG(fsp, "pkt abort state %x\n", fsp->state); + if (fc_fcp_send_abort(fsp)) { + FC_FCP_DBG(fsp, "failed to send abort\n"); return FAILED; + } init_completion(&fsp->tm_done); fsp->wait_for_comp = 1; @@ -1394,6 +1410,8 @@ static void fc_fcp_timeout(unsigned long data) if (fsp->cdb_cmd.fc_tm_flags) goto unlock; + FC_FCP_DBG(fsp, "fcp timeout, flags %x state %x\n", + rpriv->flags, fsp->state); fsp->state |= FC_SRB_FCP_PROCESSING_TMO; if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED) @@ -1503,6 +1521,10 @@ static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) break; case ELS_RJT_LOGIC: case ELS_RJT_UNAB: + FC_FCP_DBG(fsp, "device %x REC reject " + "reason %d expl %d\n", + fsp->rport->port_id, rjt->er_reason, + rjt->er_explan); /* * If no data transfer, the command frame got dropped * so we just retry. If data was transferred, we @@ -1608,6 +1630,8 @@ static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) switch (error) { case -FC_EX_CLOSED: + FC_FCP_DBG(fsp, "REC %p fid %6.6x exchange closed\n", + fsp, fsp->rport->port_id); fc_fcp_retry_cmd(fsp); break; @@ -1622,8 +1646,8 @@ static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) * Assume REC or LS_ACC was lost. * The exchange manager will have aborted REC, so retry. */ - FC_FCP_DBG(fsp, "REC fid %6.6x error error %d retry %d/%d\n", - fsp->rport->port_id, error, fsp->recov_retry, + FC_FCP_DBG(fsp, "REC %p fid %6.6x exchange timeout retry %d/%d\n", + fsp, fsp->rport->port_id, fsp->recov_retry, FC_MAX_RECOV_RETRY); if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) fc_fcp_rec(fsp); @@ -1642,6 +1666,7 @@ out: */ static void fc_fcp_recovery(struct fc_fcp_pkt *fsp, u8 code) { + FC_FCP_DBG(fsp, "start recovery code %x\n", code); fsp->status_code = code; fsp->cdb_status = 0; fsp->io_status = 0; @@ -1768,12 +1793,14 @@ static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp) goto out; switch (PTR_ERR(fp)) { case -FC_EX_TIMEOUT: + FC_FCP_DBG(fsp, "SRR timeout, retries %d\n", fsp->recov_retry); if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY) fc_fcp_rec(fsp); else fc_fcp_recovery(fsp, FC_TIMED_OUT); break; case -FC_EX_CLOSED: /* e.g., link failure */ + FC_FCP_DBG(fsp, "SRR error, exchange closed\n"); /* fall through */ default: fc_fcp_retry_cmd(fsp); diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 6a98bb8..afc1f9b 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -387,8 +387,10 @@ static void fc_rport_work(struct work_struct *work) * Re-open for events. Reissue READY event if ready. */ rdata->event = RPORT_EV_NONE; - if (rdata->rp_state == RPORT_ST_READY) + if (rdata->rp_state == RPORT_ST_READY) { + FC_RPORT_DBG(rdata, "work reopen\n"); fc_rport_enter_ready(rdata); + } mutex_unlock(&rdata->rp_mutex); } break; @@ -1052,6 +1054,7 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp, struct fc_els_spp spp; } *pp; struct fc_els_spp temp_spp; + struct fc_els_ls_rjt *rjt; struct fc4_prov *prov; u32 roles = FC_RPORT_ROLE_UNKNOWN; u32 fcp_parm = 0; @@ -1121,8 +1124,10 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp, fc_rport_enter_rtv(rdata); } else { - FC_RPORT_DBG(rdata, "Bad ELS response for PRLI command\n"); - fc_rport_error_retry(rdata, fp); + rjt = fc_frame_payload_get(fp, sizeof (*rjt)); + FC_RPORT_DBG(rdata, "PRLI ELS rejected, reason %x expl %x\n", + rjt->er_reason, rjt->er_explan); + fc_rport_error_retry(rdata, NULL); } out: -- 1.8.5.6 -- To unsubscribe from this list: send the line "unsubscribe linux-scsi" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html