[PATCH RESEND 04/11] bnx2fc: REC/SRR link service request and response handling

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Build REC and SRR link service requests and handle the completions.

Signed-off-by: Bhanu Prakash Gollapudi <bprakash@xxxxxxxxxxxx>
---
 drivers/scsi/bnx2fc/bnx2fc.h     |   16 ++
 drivers/scsi/bnx2fc/bnx2fc_els.c |  416 +++++++++++++++++++++++++++++++++++++-
 drivers/scsi/bnx2fc/bnx2fc_io.c  |    9 +-
 3 files changed, 435 insertions(+), 6 deletions(-)

diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index c16fa78..cd506c0 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -141,6 +141,9 @@
 
 #define BNX2FC_RNID_HBA			0x7
 
+#define SRR_RETRY_COUNT			5
+#define REC_RETRY_COUNT			1
+
 /* bnx2fc driver uses only one instance of fcoe_percpu_s */
 extern struct fcoe_percpu_s bnx2fc_global;
 
@@ -357,6 +360,8 @@ struct bnx2fc_els_cb_arg {
 	struct bnx2fc_cmd *aborted_io_req;
 	struct bnx2fc_cmd *io_req;
 	u16 l2_oxid;
+	u32 offset;
+	enum fc_rctl r_ctl;
 };
 
 /* bnx2fc command structure */
@@ -384,6 +389,7 @@ struct bnx2fc_cmd {
 	struct completion tm_done;
 	int wait_for_comp;
 	u16 xid;
+	struct fcoe_err_report_entry err_entry;
 	struct fcoe_task_ctx_entry *task;
 	struct io_bdt *bd_tbl;
 	struct fcp_rsp *rsp;
@@ -400,6 +406,12 @@ struct bnx2fc_cmd {
 #define BNX2FC_FLAG_IO_COMPL		0x9
 #define BNX2FC_FLAG_ELS_DONE		0xa
 #define BNX2FC_FLAG_ELS_TIMEOUT		0xb
+#define BNX2FC_FLAG_CMD_LOST		0xc
+#define BNX2FC_FLAG_SRR_SENT		0xd
+	u8 rec_retry;
+	u8 srr_retry;
+	u32 srr_offset;
+	u8 srr_rctl;
 	u32 fcp_resid;
 	u32 fcp_rsp_len;
 	u32 fcp_sns_len;
@@ -430,6 +442,7 @@ struct bnx2fc_unsol_els {
 
 
 
+struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt);
 struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type);
 void bnx2fc_cmd_release(struct kref *ref);
 int bnx2fc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd);
@@ -520,6 +533,9 @@ void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
 				   unsigned char *buf,
 				   u32 frame_len, u16 l2_oxid);
 int bnx2fc_send_stat_req(struct bnx2fc_hba *hba);
+int bnx2fc_post_io_req(struct bnx2fc_rport *tgt, struct bnx2fc_cmd *io_req);
+int bnx2fc_send_rec(struct bnx2fc_cmd *orig_io_req);
+int bnx2fc_send_srr(struct bnx2fc_cmd *orig_io_req, u32 offset, u8 r_ctl);
 void bnx2fc_process_seq_cleanup_compl(struct bnx2fc_cmd *seq_clnup_req,
 				      struct fcoe_task_ctx_entry *task,
 				      u8 rx_state);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_els.c b/drivers/scsi/bnx2fc/bnx2fc_els.c
index 5d7baa2..2795ede 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_els.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_els.c
@@ -253,6 +253,411 @@ int bnx2fc_send_rls(struct bnx2fc_rport *tgt, struct fc_frame *fp)
 	return rc;
 }
 
+void bnx2fc_srr_compl(struct bnx2fc_els_cb_arg *cb_arg)
+{
+	struct bnx2fc_mp_req *mp_req;
+	struct fc_frame_header *fc_hdr, *fh;
+	struct bnx2fc_cmd *srr_req;
+	struct bnx2fc_cmd *orig_io_req;
+	struct fc_frame *fp;
+	unsigned char *buf;
+	void *resp_buf;
+	u32 resp_len, hdr_len;
+	u8 opcode;
+	int rc = 0;
+
+	orig_io_req = cb_arg->aborted_io_req;
+	srr_req = cb_arg->io_req;
+	if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags)) {
+		BNX2FC_IO_DBG(srr_req, "srr_compl: xid - 0x%x completed",
+			orig_io_req->xid);
+		goto srr_compl_done;
+	}
+	if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
+		BNX2FC_IO_DBG(srr_req, "rec abts in prog "
+		       "orig_io - 0x%x\n",
+			orig_io_req->xid);
+		goto srr_compl_done;
+	}
+	if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &srr_req->req_flags)) {
+		/* SRR timedout */
+		BNX2FC_IO_DBG(srr_req, "srr timed out, abort "
+		       "orig_io - 0x%x\n",
+			orig_io_req->xid);
+		rc = bnx2fc_initiate_abts(srr_req);
+		if (rc != SUCCESS) {
+			BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
+				"failed. issue cleanup\n");
+			bnx2fc_initiate_cleanup(srr_req);
+		}
+		orig_io_req->srr_retry++;
+		if (orig_io_req->srr_retry <= SRR_RETRY_COUNT) {
+			struct bnx2fc_rport *tgt = orig_io_req->tgt;
+			spin_unlock_bh(&tgt->tgt_lock);
+			rc = bnx2fc_send_srr(orig_io_req,
+					     orig_io_req->srr_offset,
+					     orig_io_req->srr_rctl);
+			spin_lock_bh(&tgt->tgt_lock);
+			if (!rc)
+				goto srr_compl_done;
+		}
+
+		rc = bnx2fc_initiate_abts(orig_io_req);
+		if (rc != SUCCESS) {
+			BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
+				"failed xid = 0x%x. issue cleanup\n",
+				orig_io_req->xid);
+			bnx2fc_initiate_cleanup(orig_io_req);
+		}
+		goto srr_compl_done;
+	}
+	mp_req = &(srr_req->mp_req);
+	fc_hdr = &(mp_req->resp_fc_hdr);
+	resp_len = mp_req->resp_len;
+	resp_buf = mp_req->resp_buf;
+
+	hdr_len = sizeof(*fc_hdr);
+	buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
+	if (!buf) {
+		printk(KERN_ERR PFX "srr buf: mem alloc failure\n");
+		goto srr_compl_done;
+	}
+	memcpy(buf, fc_hdr, hdr_len);
+	memcpy(buf + hdr_len, resp_buf, resp_len);
+
+	fp = fc_frame_alloc(NULL, resp_len);
+	if (!fp) {
+		printk(KERN_ERR PFX "fc_frame_alloc failure\n");
+		goto free_buf;
+	}
+
+	fh = (struct fc_frame_header *) fc_frame_header_get(fp);
+	/* Copy FC Frame header and payload into the frame */
+	memcpy(fh, buf, hdr_len + resp_len);
+
+	opcode = fc_frame_payload_op(fp);
+	switch (opcode) {
+	case ELS_LS_ACC:
+		BNX2FC_IO_DBG(srr_req, "SRR success\n");
+		break;
+	case ELS_LS_RJT:
+		BNX2FC_IO_DBG(srr_req, "SRR rejected\n");
+		rc = bnx2fc_initiate_abts(orig_io_req);
+		if (rc != SUCCESS) {
+			BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
+				"failed xid = 0x%x. issue cleanup\n",
+				orig_io_req->xid);
+			bnx2fc_initiate_cleanup(orig_io_req);
+		}
+		break;
+	default:
+		BNX2FC_IO_DBG(srr_req, "srr compl - invalid opcode = %d\n",
+			opcode);
+		break;
+	}
+	fc_frame_free(fp);
+free_buf:
+	kfree(buf);
+srr_compl_done:
+	kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
+}
+
+void bnx2fc_rec_compl(struct bnx2fc_els_cb_arg *cb_arg)
+{
+	struct bnx2fc_cmd *orig_io_req, *new_io_req;
+	struct bnx2fc_cmd *rec_req;
+	struct bnx2fc_mp_req *mp_req;
+	struct fc_frame_header *fc_hdr, *fh;
+	struct fc_els_ls_rjt *rjt;
+	struct fc_els_rec_acc *acc;
+	struct bnx2fc_rport *tgt;
+	struct fcoe_err_report_entry *err_entry;
+	struct scsi_cmnd *sc_cmd;
+	enum fc_rctl r_ctl;
+	unsigned char *buf;
+	void *resp_buf;
+	struct fc_frame *fp;
+	u8 opcode;
+	u32 offset;
+	u32 e_stat;
+	u32 resp_len, hdr_len;
+	int rc = 0;
+	bool send_seq_clnp = false;
+	bool abort_io = false;
+
+	BNX2FC_MISC_DBG("Entered rec_compl callback\n");
+	rec_req = cb_arg->io_req;
+	orig_io_req = cb_arg->aborted_io_req;
+	BNX2FC_IO_DBG(rec_req, "rec_compl: orig xid = 0x%x", orig_io_req->xid);
+	tgt = orig_io_req->tgt;
+
+	if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags)) {
+		BNX2FC_IO_DBG(rec_req, "completed"
+		       "orig_io - 0x%x\n",
+			orig_io_req->xid);
+		goto rec_compl_done;
+	}
+	if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
+		BNX2FC_IO_DBG(rec_req, "abts in prog "
+		       "orig_io - 0x%x\n",
+			orig_io_req->xid);
+		goto rec_compl_done;
+	}
+	/* Handle REC timeout case */
+	if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rec_req->req_flags)) {
+		BNX2FC_IO_DBG(rec_req, "timed out, abort "
+		       "orig_io - 0x%x\n",
+			orig_io_req->xid);
+		/* els req is timed out. send abts for els */
+		rc = bnx2fc_initiate_abts(rec_req);
+		if (rc != SUCCESS) {
+			BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
+				"failed. issue cleanup\n");
+			bnx2fc_initiate_cleanup(rec_req);
+		}
+		orig_io_req->rec_retry++;
+		/* REC timedout. send ABTS to the orig IO req */
+		if (orig_io_req->rec_retry <= REC_RETRY_COUNT) {
+			spin_unlock_bh(&tgt->tgt_lock);
+			rc = bnx2fc_send_rec(orig_io_req);
+			spin_lock_bh(&tgt->tgt_lock);
+			if (!rc)
+				goto rec_compl_done;
+		}
+		rc = bnx2fc_initiate_abts(orig_io_req);
+		if (rc != SUCCESS) {
+			BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
+				"failed xid = 0x%x. issue cleanup\n",
+				orig_io_req->xid);
+			bnx2fc_initiate_cleanup(orig_io_req);
+		}
+		goto rec_compl_done;
+	}
+	mp_req = &(rec_req->mp_req);
+	fc_hdr = &(mp_req->resp_fc_hdr);
+	resp_len = mp_req->resp_len;
+	acc = resp_buf = mp_req->resp_buf;
+
+	hdr_len = sizeof(*fc_hdr);
+
+	buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
+	if (!buf) {
+		printk(KERN_ERR PFX "rec buf: mem alloc failure\n");
+		goto rec_compl_done;
+	}
+	memcpy(buf, fc_hdr, hdr_len);
+	memcpy(buf + hdr_len, resp_buf, resp_len);
+
+	fp = fc_frame_alloc(NULL, resp_len);
+	if (!fp) {
+		printk(KERN_ERR PFX "fc_frame_alloc failure\n");
+		goto free_buf;
+	}
+
+	fh = (struct fc_frame_header *) fc_frame_header_get(fp);
+	/* Copy FC Frame header and payload into the frame */
+	memcpy(fh, buf, hdr_len + resp_len);
+
+	opcode = fc_frame_payload_op(fp);
+	if (opcode == ELS_LS_RJT) {
+		BNX2FC_IO_DBG(rec_req, "opcode is RJT\n");
+		rjt = fc_frame_payload_get(fp, sizeof(*rjt));
+		if ((rjt->er_reason == ELS_RJT_LOGIC ||
+		    rjt->er_reason == ELS_RJT_UNAB) &&
+		    rjt->er_explan == ELS_EXPL_OXID_RXID) {
+			BNX2FC_IO_DBG(rec_req, "handle CMD LOST case\n");
+			new_io_req = bnx2fc_cmd_alloc(tgt);
+			if (!new_io_req)
+				goto abort_io;
+			new_io_req->sc_cmd = orig_io_req->sc_cmd;
+			/* cleanup orig_io_req that is with the FW */
+			set_bit(BNX2FC_FLAG_CMD_LOST,
+				&orig_io_req->req_flags);
+			bnx2fc_initiate_cleanup(orig_io_req);
+			/* Post a new IO req with the same sc_cmd */
+			BNX2FC_IO_DBG(rec_req, "Post IO request again\n");
+			spin_unlock_bh(&tgt->tgt_lock);
+			rc = bnx2fc_post_io_req(tgt, new_io_req);
+			spin_lock_bh(&tgt->tgt_lock);
+			if (!rc)
+				goto free_frame;
+			BNX2FC_IO_DBG(rec_req, "REC: io post err\n");
+		}
+abort_io:
+		rc = bnx2fc_initiate_abts(orig_io_req);
+		if (rc != SUCCESS) {
+			BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
+				"failed. issue cleanup\n");
+			bnx2fc_initiate_cleanup(orig_io_req);
+		}
+	} else if (opcode == ELS_LS_ACC) {
+		/* REVISIT: Check if the exchange is already aborted */
+		offset = ntohl(acc->reca_fc4value);
+		e_stat = ntohl(acc->reca_e_stat);
+		if (e_stat & ESB_ST_SEQ_INIT)  {
+			BNX2FC_IO_DBG(rec_req, "target has the seq init\n");
+			goto free_frame;
+		}
+		BNX2FC_IO_DBG(rec_req, "e_stat = 0x%x, offset = 0x%x\n",
+			e_stat, offset);
+		/* Seq initiative is with us */
+		err_entry = (struct fcoe_err_report_entry *)
+			     &orig_io_req->err_entry;
+		sc_cmd = orig_io_req->sc_cmd;
+		if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
+			/* SCSI WRITE command */
+			if (offset == orig_io_req->data_xfer_len) {
+				BNX2FC_IO_DBG(rec_req, "WRITE - resp lost\n");
+				/* FCP_RSP lost */
+				r_ctl = FC_RCTL_DD_CMD_STATUS;
+				offset = 0;
+			} else  {
+				/* start transmitting from offset */
+				BNX2FC_IO_DBG(rec_req, "XFER_RDY/DATA lost\n");
+				send_seq_clnp = true;
+				r_ctl = FC_RCTL_DD_DATA_DESC;
+				if (bnx2fc_initiate_seq_cleanup(orig_io_req,
+								offset, r_ctl))
+					abort_io = true;
+				/* XFER_RDY */
+			}
+		} else {
+			/* SCSI READ command */
+			if (err_entry->data.rx_buf_off ==
+					orig_io_req->data_xfer_len) {
+				/* FCP_RSP lost */
+				BNX2FC_IO_DBG(rec_req, "READ - resp lost\n");
+				r_ctl = FC_RCTL_DD_CMD_STATUS;
+				offset = 0;
+			} else  {
+				/* request retransmission from this offset */
+				send_seq_clnp = true;
+				offset = err_entry->data.rx_buf_off;
+				BNX2FC_IO_DBG(rec_req, "RD DATA lost\n");
+				/* FCP_DATA lost */
+				r_ctl = FC_RCTL_DD_SOL_DATA;
+				if (bnx2fc_initiate_seq_cleanup(orig_io_req,
+								offset, r_ctl))
+					abort_io = true;
+			}
+		}
+		if (abort_io) {
+			rc = bnx2fc_initiate_abts(orig_io_req);
+			if (rc != SUCCESS) {
+				BNX2FC_IO_DBG(rec_req, "rec_compl:initiate_abts"
+					      " failed. issue cleanup\n");
+				bnx2fc_initiate_cleanup(orig_io_req);
+			}
+		} else if (!send_seq_clnp) {
+			BNX2FC_IO_DBG(rec_req, "Send SRR - FCP_RSP\n");
+			spin_unlock_bh(&tgt->tgt_lock);
+			rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl);
+			spin_lock_bh(&tgt->tgt_lock);
+
+			if (rc) {
+				BNX2FC_IO_DBG(rec_req, "Unable to send SRR"
+					" IO will abort\n");
+			}
+		}
+	}
+free_frame:
+	fc_frame_free(fp);
+free_buf:
+	kfree(buf);
+rec_compl_done:
+	kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
+	kfree(cb_arg);
+}
+
+int bnx2fc_send_rec(struct bnx2fc_cmd *orig_io_req)
+{
+	struct fc_els_rec rec;
+	struct bnx2fc_rport *tgt = orig_io_req->tgt;
+	struct fc_lport *lport = tgt->rdata->local_port;
+	struct bnx2fc_els_cb_arg *cb_arg = NULL;
+	u32 sid = tgt->sid;
+	u32 r_a_tov = lport->r_a_tov;
+	int rc;
+
+	BNX2FC_IO_DBG(orig_io_req, "Sending REC\n");
+	memset(&rec, 0, sizeof(rec));
+
+	cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
+	if (!cb_arg) {
+		printk(KERN_ERR PFX "Unable to allocate cb_arg for REC\n");
+		rc = -ENOMEM;
+		goto rec_err;
+	}
+	kref_get(&orig_io_req->refcount);
+
+	cb_arg->aborted_io_req = orig_io_req;
+
+	rec.rec_cmd = ELS_REC;
+	hton24(rec.rec_s_id, sid);
+	rec.rec_ox_id = htons(orig_io_req->xid);
+	rec.rec_rx_id = htons(orig_io_req->task->rxwr_txrd.var_ctx.rx_id);
+
+	rc = bnx2fc_initiate_els(tgt, ELS_REC, &rec, sizeof(rec),
+				 bnx2fc_rec_compl, cb_arg,
+				 r_a_tov);
+rec_err:
+	if (rc) {
+		BNX2FC_IO_DBG(orig_io_req, "REC failed - release\n");
+		spin_lock_bh(&tgt->tgt_lock);
+		kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
+		spin_unlock_bh(&tgt->tgt_lock);
+		kfree(cb_arg);
+	}
+	return rc;
+}
+
+int bnx2fc_send_srr(struct bnx2fc_cmd *orig_io_req, u32 offset, u8 r_ctl)
+{
+	struct fcp_srr srr;
+	struct bnx2fc_rport *tgt = orig_io_req->tgt;
+	struct fc_lport *lport = tgt->rdata->local_port;
+	struct bnx2fc_els_cb_arg *cb_arg = NULL;
+	u32 r_a_tov = lport->r_a_tov;
+	int rc;
+
+	BNX2FC_IO_DBG(orig_io_req, "Sending SRR\n");
+	memset(&srr, 0, sizeof(srr));
+
+	cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
+	if (!cb_arg) {
+		printk(KERN_ERR PFX "Unable to allocate cb_arg for SRR\n");
+		rc = -ENOMEM;
+		goto srr_err;
+	}
+	kref_get(&orig_io_req->refcount);
+
+	cb_arg->aborted_io_req = orig_io_req;
+
+	srr.srr_op = ELS_SRR;
+	srr.srr_ox_id = htons(orig_io_req->xid);
+	srr.srr_rx_id = htons(orig_io_req->task->rxwr_txrd.var_ctx.rx_id);
+	srr.srr_rel_off = htonl(offset);
+	srr.srr_r_ctl = r_ctl;
+	orig_io_req->srr_offset = offset;
+	orig_io_req->srr_rctl = r_ctl;
+
+	rc = bnx2fc_initiate_els(tgt, ELS_SRR, &srr, sizeof(srr),
+				 bnx2fc_srr_compl, cb_arg,
+				 r_a_tov);
+srr_err:
+	if (rc) {
+		BNX2FC_IO_DBG(orig_io_req, "SRR failed - release\n");
+		spin_lock_bh(&tgt->tgt_lock);
+		kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
+		spin_unlock_bh(&tgt->tgt_lock);
+		kfree(cb_arg);
+	} else
+		set_bit(BNX2FC_FLAG_SRR_SENT, &orig_io_req->req_flags);
+
+	return rc;
+}
+
+
 static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
 			void *data, u32 data_len,
 			void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg),
@@ -342,9 +747,14 @@ static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
 	did = tgt->rport->port_id;
 	sid = tgt->sid;
 
-	__fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid,
-			   FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
-			   FC_FC_SEQ_INIT, 0);
+	if (op == ELS_SRR)
+		__fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS4_REQ, did, sid,
+				   FC_TYPE_FCP, FC_FC_FIRST_SEQ |
+				   FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
+	else
+		__fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid,
+				   FC_TYPE_ELS, FC_FC_FIRST_SEQ |
+				   FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
 
 	/* Obtain exchange id */
 	xid = els_req->xid;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 9820d30..797b005 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -18,8 +18,6 @@ static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len,
 			   int bd_index);
 static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req);
 static void bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req);
-static int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
-			       struct bnx2fc_cmd *io_req);
 static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req);
 static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req);
 static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
@@ -218,6 +216,11 @@ static void bnx2fc_scsi_done(struct bnx2fc_cmd *io_req, int err_code)
 		return;
 
 	BNX2FC_IO_DBG(io_req, "scsi_done. err_code = 0x%x\n", err_code);
+	if (test_bit(BNX2FC_FLAG_CMD_LOST, &io_req->req_flags)) {
+		/* Do not call scsi done for this IO */
+		return;
+	}
+
 	bnx2fc_unmap_sg_list(io_req);
 	io_req->sc_cmd = NULL;
 	if (!sc_cmd) {
@@ -1902,7 +1905,7 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
 	kref_put(&io_req->refcount, bnx2fc_cmd_release);
 }
 
-static int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
+int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
 			       struct bnx2fc_cmd *io_req)
 {
 	struct fcoe_task_ctx_entry *task;
-- 
1.7.0.6


--
To unsubscribe from this list: send the line "unsubscribe linux-scsi" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Index of Archives]     [SCSI Target Devel]     [Linux SCSI Target Infrastructure]     [Kernel Newbies]     [IDE]     [Security]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux ATA RAID]     [Linux IIO]     [Samba]     [Device Mapper]
  Powered by Linux