[PATCH 2/3] [RFC] libfc: a modular software Fibre Channel implementation

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Signed-off-by: Robert Love <robert.w.love@xxxxxxxxx>
Signed-off-by: Chris Leech <christopher.leech@xxxxxxxxx>
Signed-off-by: Vasu Dev <vasu.dev@xxxxxxxxx>
Signed-off-by: Yi Zou <yi.zou@xxxxxxxxx>
Signed-off-by: Steve Ma <steve.ma@xxxxxxxxx>
---

 drivers/scsi/Kconfig          |    6 
 drivers/scsi/Makefile         |    1 
 drivers/scsi/libfc/Makefile   |   12 
 drivers/scsi/libfc/fc_attr.c  |  129 ++
 drivers/scsi/libfc/fc_exch.c  | 1902 +++++++++++++++++++++++++++++++++++++
 drivers/scsi/libfc/fc_fcp.c   | 2121 +++++++++++++++++++++++++++++++++++++++++
 drivers/scsi/libfc/fc_frame.c |   88 ++
 drivers/scsi/libfc/fc_lport.c |  914 ++++++++++++++++++
 drivers/scsi/libfc/fc_ns.c    | 1229 ++++++++++++++++++++++++
 drivers/scsi/libfc/fc_rport.c | 1265 ++++++++++++++++++++++++
 include/scsi/libfc/fc_frame.h |  236 +++++
 include/scsi/libfc/libfc.h    |  737 ++++++++++++++
 12 files changed, 8640 insertions(+), 0 deletions(-)

diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index c7f0629..ae5e574 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -328,6 +328,12 @@ menuconfig SCSI_LOWLEVEL
 
 if SCSI_LOWLEVEL && SCSI
 
+config LIBFC
+	tristate "LibFC module"
+	depends on SCSI && SCSI_FC_ATTRS
+	---help---
+	  Fibre Channel library module
+
 config ISCSI_TCP
 	tristate "iSCSI Initiator over TCP/IP"
 	depends on SCSI && INET
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 72fd504..9158dc6 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -36,6 +36,7 @@ obj-$(CONFIG_SCSI_SAS_LIBSAS)	+= libsas/
 obj-$(CONFIG_SCSI_SRP_ATTRS)	+= scsi_transport_srp.o
 obj-$(CONFIG_SCSI_DH)		+= device_handler/
 
+obj-$(CONFIG_LIBFC)		+= libfc/
 obj-$(CONFIG_ISCSI_TCP) 	+= libiscsi.o	iscsi_tcp.o
 obj-$(CONFIG_INFINIBAND_ISER) 	+= libiscsi.o
 obj-$(CONFIG_SCSI_A4000T)	+= 53c700.o	a4000t.o
diff --git a/drivers/scsi/libfc/Makefile b/drivers/scsi/libfc/Makefile
new file mode 100644
index 0000000..0a31ca2
--- /dev/null
+++ b/drivers/scsi/libfc/Makefile
@@ -0,0 +1,12 @@
+# $Id: Makefile
+
+obj-$(CONFIG_LIBFC) += libfc.o
+
+libfc-objs := \
+	fc_ns.o \
+	fc_exch.o \
+	fc_frame.o \
+	fc_lport.o \
+	fc_rport.o \
+	fc_attr.o \
+	fc_fcp.o
diff --git a/drivers/scsi/libfc/fc_attr.c b/drivers/scsi/libfc/fc_attr.c
new file mode 100644
index 0000000..d73f39e
--- /dev/null
+++ b/drivers/scsi/libfc/fc_attr.c
@@ -0,0 +1,129 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include <scsi/scsi_host.h>
+
+#include <scsi/libfc/libfc.h>
+
+MODULE_AUTHOR("Open-FCoE.org");
+MODULE_DESCRIPTION("libfc");
+MODULE_LICENSE("GPL");
+
+void fc_get_host_port_id(struct Scsi_Host *shost)
+{
+	struct fc_lport *lp = shost_priv(shost);
+
+	fc_host_port_id(shost) = fc_lport_get_fid(lp);
+}
+EXPORT_SYMBOL(fc_get_host_port_id);
+
+void fc_get_host_speed(struct Scsi_Host *shost)
+{
+	/*
+	 * should be obtain from DEC or Enet Driver
+	 */
+	fc_host_speed(shost) = 1;	/* for now it is 1g */
+}
+EXPORT_SYMBOL(fc_get_host_speed);
+
+void fc_get_host_port_type(struct Scsi_Host *shost)
+{
+	fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
+}
+EXPORT_SYMBOL(fc_get_host_port_type);
+
+void fc_get_host_fabric_name(struct Scsi_Host *shost)
+{
+	struct fc_lport *lp = shost_priv(shost);
+
+	fc_host_fabric_name(shost) = lp->wwnn;
+}
+EXPORT_SYMBOL(fc_get_host_fabric_name);
+
+void fc_attr_init(struct fc_lport *lp)
+{
+	fc_host_node_name(lp->host) = lp->wwnn;
+	fc_host_port_name(lp->host) = lp->wwpn;
+	fc_host_supported_classes(lp->host) = FC_COS_CLASS3;
+	memset(fc_host_supported_fc4s(lp->host), 0,
+	       sizeof(fc_host_supported_fc4s(lp->host)));
+	fc_host_supported_fc4s(lp->host)[2] = 1;
+	fc_host_supported_fc4s(lp->host)[7] = 1;
+	/* This value is also unchanging */
+	memset(fc_host_active_fc4s(lp->host), 0,
+	       sizeof(fc_host_active_fc4s(lp->host)));
+	fc_host_active_fc4s(lp->host)[2] = 1;
+	fc_host_active_fc4s(lp->host)[7] = 1;
+	fc_host_maxframe_size(lp->host) = lp->mfs;
+}
+EXPORT_SYMBOL(fc_attr_init);
+
+void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout)
+{
+	if (timeout)
+		rport->dev_loss_tmo = timeout + 5;
+	else
+		rport->dev_loss_tmo = 30;
+
+}
+EXPORT_SYMBOL(fc_set_rport_loss_tmo);
+
+struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost)
+{
+	int i;
+	struct fc_host_statistics *fcoe_stats;
+	struct fc_lport *lp = shost_priv(shost);
+	struct timespec v0, v1;
+
+	fcoe_stats = &lp->host_stats;
+	memset(fcoe_stats, 0, sizeof(struct fc_host_statistics));
+
+	jiffies_to_timespec(jiffies, &v0);
+	jiffies_to_timespec(lp->boot_time, &v1);
+	fcoe_stats->seconds_since_last_reset = (v0.tv_sec - v1.tv_sec);
+
+	for_each_online_cpu(i) {
+		struct fcoe_dev_stats *stats = lp->dev_stats[i];
+		if (stats == NULL)
+			continue;
+		fcoe_stats->tx_frames += stats->TxFrames;
+		fcoe_stats->tx_words += stats->TxWords;
+		fcoe_stats->rx_frames += stats->RxFrames;
+		fcoe_stats->rx_words += stats->RxWords;
+		fcoe_stats->error_frames += stats->ErrorFrames;
+		fcoe_stats->invalid_crc_count += stats->InvalidCRCCount;
+		fcoe_stats->fcp_input_requests += stats->InputRequests;
+		fcoe_stats->fcp_output_requests += stats->OutputRequests;
+		fcoe_stats->fcp_control_requests += stats->ControlRequests;
+		fcoe_stats->fcp_input_megabytes += stats->InputMegabytes;
+		fcoe_stats->fcp_output_megabytes += stats->OutputMegabytes;
+		fcoe_stats->link_failure_count += stats->LinkFailureCount;
+	}
+	fcoe_stats->lip_count = -1;
+	fcoe_stats->nos_count = -1;
+	fcoe_stats->loss_of_sync_count = -1;
+	fcoe_stats->loss_of_signal_count = -1;
+	fcoe_stats->prim_seq_protocol_err_count = -1;
+	fcoe_stats->dumped_frames = -1;
+	return fcoe_stats;
+}
+EXPORT_SYMBOL(fc_get_host_stats);
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
new file mode 100644
index 0000000..4e552c0
--- /dev/null
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -0,0 +1,1902 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+/*
+ * Fibre Channel exchange and sequence handling.
+ */
+
+#include <linux/timer.h>
+#include <linux/gfp.h>
+#include <linux/err.h>
+
+#include <scsi/fc/fc_fc2.h>
+
+#include <scsi/libfc/libfc.h>
+
+#define	  FC_DEF_R_A_TOV      (10 * 1000) /* resource allocation timeout */
+
+/*
+ * fc_exch_debug can be set in debugger or at compile time to get more logs.
+ */
+static int fc_exch_debug;
+
+/*
+ * Structure and function definitions for managing Fibre Channel Exchanges
+ * and Sequences.
+ *
+ * The three primary structures used here are fc_exch_mgr, fc_exch, and fc_seq.
+ *
+ * fc_exch_mgr holds the exchange state for an N port
+ *
+ * fc_exch holds state for one exchange and links to its active sequence.
+ *
+ * fc_seq holds the state for an individual sequence.
+ */
+
+/*
+ * Sequence.
+ */
+struct fc_seq {
+	u8	id;		/* seq ID */
+	u16	ssb_stat;	/* status flags for sequence status block */
+	u16	cnt;		/* frames sent so far on sequence */
+	u32	f_ctl;		/* F_CTL flags for frames */
+	u32	rec_data;	/* FC-4 value for REC */
+};
+
+/*
+ * Exchange.
+ *
+ * Locking notes: The ex_lock protects changes to the following fields:
+ *	esb_stat, f_ctl, seq.ssb_stat, seq.f_ctl.
+ *	seq_id
+ *	sequence allocation
+ */
+struct fc_exch {
+	struct fc_exch_mgr *em;		/* exchange manager */
+	u16		xid;		/* our exchange ID */
+	struct list_head	ex_list;	/* free or busy list linkage */
+	spinlock_t	ex_lock;	/* lock covering exchange state */
+	atomic_t	ex_refcnt;	/* reference counter */
+	struct timer_list ex_timer;	/* timer for upper level protocols */
+	struct fc_lport	*lp;		/* fc device instance */
+	u16		oxid;		/* originator's exchange ID */
+	u16		rxid;		/* responder's exchange ID */
+	u32		oid;		/* originator's FCID */
+	u32		sid;		/* source FCID */
+	u32		did;		/* destination FCID */
+	u32		esb_stat;	/* exchange status for ESB */
+	u32		r_a_tov;	/* r_a_tov from rport (msec) */
+	u8		seq_id;		/* next sequence ID to use */
+	u32		f_ctl;		/* F_CTL flags for sequences */
+	enum fc_class	class;		/* class of service */
+	struct fc_seq	seq;		/* single sequence */
+
+	/*
+	 * Handler for responses to this current exchange.
+	 */
+	void		(*resp)(struct fc_seq *, struct fc_frame *, void *);
+	void		*resp_arg;	/* 3rd arg for exchange resp handler */
+};
+
+/*
+ * Exchange manager.
+ *
+ * This structure is the center for creating exchanges and sequences.
+ * It manages the allocation of exchange IDs.
+ */
+struct fc_exch_mgr {
+	enum fc_class	class;		/* default class for sequences */
+	spinlock_t	em_lock;	/* exchange manager lock */
+	u16		last_xid;	/* last allocated exchange ID */
+	u16		min_xid;	/* min exchange ID */
+	u16		max_xid;	/* max exchange ID */
+	char em_cache_name[20];		/* cache name string */
+	struct	kmem_cache	*em_cache;	/* cache for exchanges */
+	u32	total_exches;		/* total allocated exchanges */
+	struct list_head	ex_list;	/* allocated exchanges list */
+	struct fc_lport	*lp;		/* fc device instance */
+
+	/*
+	 * currently exchange mgr stats are updated but not used.
+	 * either stats can be expose via sysfs or remove them
+	 * all together if not used XXX
+	 */
+	struct {
+		atomic_t no_free_exch;
+		atomic_t no_free_exch_xid;
+		atomic_t xid_not_found;
+		atomic_t xid_busy;
+		atomic_t seq_not_found;
+		atomic_t non_bls_resp;
+	} stats;
+	struct fc_exch **exches;	/* for exch pointers indexed by xid */
+};
+
+#define	fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
+#define	fc_exch_next_xid(mp, id) ((id == mp->max_xid) ? mp->min_xid : id + 1)
+
+static void fc_exch_rrq(struct fc_exch *);
+static void fc_seq_ls_acc(struct fc_seq *);
+static void fc_seq_ls_rjt(struct fc_seq *, enum fc_els_rjt_reason,
+		  enum fc_els_rjt_explan);
+static void fc_exch_els_rec(struct fc_seq *, struct fc_frame *);
+static void fc_exch_els_rrq(struct fc_seq *, struct fc_frame *);
+
+/*
+ * Internal implementation notes.
+ *
+ * The exchange manager is one by default in libfc but LLD may choose
+ * to have one per CPU. The sequence manager is one per exchange manager
+ * and currently never separated.
+ *
+ * Section 9.8 in FC-FS-2 specifies:  "The SEQ_ID is a one-byte field
+ * assigned by the Sequence Initiator that shall be unique for a specific
+ * D_ID and S_ID pair while the Sequence is open."   Note that it isn't
+ * qualified by exchange ID, which one might think it would be.
+ * In practice this limits the number of open sequences and exchanges to 256
+ * per session.	 For most targets we could treat this limit as per exchange.
+ *
+ * The exchange and its sequence are freed when the last sequence is received.
+ * It's possible for the remote port to leave an exchange open without
+ * sending any sequences.
+ *
+ * Notes on reference counts:
+ *
+ * Exchanges are reference counted and exchange gets freed when the reference
+ * count becomes zero.
+ *
+ * Timeouts:
+ * Sequences are timed out for E_D_TOV and R_A_TOV.
+ *
+ * Sequence event handling:
+ *
+ * The following events may occur on initiator sequences:
+ *
+ *	Send.
+ *	    For now, the whole thing is sent.
+ *	Receive ACK
+ *	    This applies only to class F.
+ *	    The sequence is marked complete.
+ *	ULP completion.
+ *	    The upper layer calls fc_exch_done() when done
+ *	    with exchange and sequence tuple.
+ *	RX-inferred completion.
+ *	    When we receive the next sequence on the same exchange, we can
+ *	    retire the previous sequence ID.  (XXX not implemented).
+ *	Timeout.
+ *	    R_A_TOV frees the sequence ID.  If we're waiting for ACK,
+ *	    E_D_TOV causes abort and calls upper layer response handler
+ *	    with FC_EX_TIMEOUT error.
+ *	Receive RJT
+ *	    XXX defer.
+ *	Send ABTS
+ *	    On timeout.
+ *
+ * The following events may occur on recipient sequences:
+ *
+ *	Receive
+ *	    Allocate sequence for first frame received.
+ *	    Hold during receive handler.
+ *	    Release when final frame received.
+ *	    Keep status of last N of these for the ELS RES command.  XXX TBD.
+ *	Receive ABTS
+ *	    Deallocate sequence
+ *	Send RJT
+ *	    Deallocate
+ *
+ * For now, we neglect conditions where only part of a sequence was
+ * received or transmitted, or where out-of-order receipt is detected.
+ */
+
+/*
+ * Locking notes:
+ *
+ * The EM code run in a per-CPU worker thread.
+ *
+ * To protect against concurrency between a worker thread code and timers,
+ * sequence allocation and deallocation must be locked.
+ *  - exchange refcnt can be done atomicly without locks.
+ *  - sequence allocation must be locked by exch lock.
+ */
+
+/*
+ * opcode names for debugging.
+ */
+static char *fc_exch_rctl_names[] = FC_RCTL_NAMES_INIT;
+
+#define FC_TABLE_SIZE(x)   (sizeof(x) / sizeof(x[0]))
+
+static inline const char *fc_exch_name_lookup(unsigned int op, char **table,
+					      unsigned int max_index)
+{
+	const char *name = NULL;
+
+	if (op < max_index)
+		name = table[op];
+	if (!name)
+		name = "unknown";
+	return name;
+}
+
+static const char *fc_exch_rctl_name(unsigned int op)
+{
+	return fc_exch_name_lookup(op, fc_exch_rctl_names,
+				   FC_TABLE_SIZE(fc_exch_rctl_names));
+}
+
+/*
+ * Hold an exchange - keep it from being freed.
+ */
+static void fc_exch_hold(struct fc_exch *ep)
+{
+	atomic_inc(&ep->ex_refcnt);
+}
+
+/*
+ * Fill in frame header.
+ *
+ * The following fields are the responsibility of this routine:
+ *	d_id, s_id, df_ctl, oxid, rxid, cs_ctl, seq_id
+ *
+ * The following fields are handled by the caller.
+ *	r_ctl, type, f_ctl, seq_cnt, parm_offset
+ *
+ * That should be a complete list.
+ *
+ * We may be the originator or responder to the sequence.
+ */
+static void fc_seq_fill_hdr(struct fc_seq *sp, struct fc_frame *fp)
+{
+	struct fc_frame_header *fh = fc_frame_header_get(fp);
+	struct fc_exch *ep;
+
+	ep = fc_seq_exch(sp);
+
+	hton24(fh->fh_s_id, ep->sid);
+	hton24(fh->fh_d_id, ep->did);
+	fh->fh_ox_id = htons(ep->oxid);
+	fh->fh_rx_id = htons(ep->rxid);
+	fh->fh_seq_id = sp->id;
+	fh->fh_cs_ctl = 0;
+	fh->fh_df_ctl = 0;
+}
+
+/*
+ * Release a reference to an exchange.
+ * If the refcnt goes to zero and the exchange is complete, it is freed.
+ */
+static void fc_exch_release(struct fc_exch *ep)
+{
+	struct fc_exch_mgr *mp;
+
+	if (atomic_dec_and_test(&ep->ex_refcnt)) {
+		WARN_ON(!ep->esb_stat & ESB_ST_COMPLETE);
+		del_timer(&ep->ex_timer);
+		mp = ep->em;
+		if (ep->lp->tt.exch_put)
+			ep->lp->tt.exch_put(ep->lp, mp, ep->xid);
+		spin_lock_bh(&mp->em_lock);
+		WARN_ON(mp->total_exches <= 0);
+		mp->total_exches--;
+		mp->exches[ep->xid - mp->min_xid] = NULL;
+		list_del(&ep->ex_list);
+		spin_unlock_bh(&mp->em_lock);
+		kmem_cache_free(mp->em_cache, ep);
+	}
+}
+
+/*
+ * Internal version of fc_exch_timer_set - used with lock held.
+ */
+static void fc_exch_timer_set_locked(struct fc_exch *ep,
+				     unsigned int timer_msec)
+{
+	if (!timer_pending(&ep->ex_timer))
+		fc_exch_hold(ep);		/* hold for timer */
+	mod_timer(&ep->ex_timer, jiffies + msecs_to_jiffies(timer_msec));
+}
+
+/*
+ * Set timer for an exchange.
+ * The time is a minimum delay in milliseconds until the timer fires.
+ * Used for upper level protocols to time out the exchange.
+ * The timer is cancelled when it fires or when the exchange completes.
+ * Returns non-zero if a timer couldn't be allocated.
+ */
+static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec)
+{
+	spin_lock_bh(&ep->ex_lock);
+	fc_exch_timer_set_locked(ep, timer_msec);
+	spin_unlock_bh(&ep->ex_lock);
+}
+
+/*
+ * Abort the exchange for a sequence due to timeout or an upper-level abort.
+ * Called without the exchange manager em_lock held.
+ * Returns non-zero if a sequence could not be allocated.
+ */
+int fc_seq_exch_abort(const struct fc_seq *req_sp)
+{
+	struct fc_seq *sp;
+	struct fc_exch *ep;
+	struct fc_frame *fp;
+	int error;
+
+	ep = fc_seq_exch(req_sp);
+
+	/*
+	 * Send the abort on a new sequence if possible.
+	 */
+	error = ENOMEM;
+	sp = fc_seq_start_next(&ep->seq);
+	if (sp) {
+		spin_lock_bh(&ep->ex_lock);
+		sp->f_ctl |= FC_FC_SEQ_INIT;
+		ep->esb_stat |= ESB_ST_SEQ_INIT | ESB_ST_ABNORMAL;
+		fc_exch_timer_set_locked(ep, ep->r_a_tov);
+		spin_unlock_bh(&ep->ex_lock);
+
+		/*
+		 * If not logged into the fabric, don't send ABTS but leave
+		 * sequence active until next timeout.
+		 */
+		if (!ep->sid)
+			return 0;
+
+		/*
+		 * Send an abort for the sequence that timed out.
+		 */
+		fp = fc_frame_alloc(ep->lp, 0);
+		if (fp) {
+			fc_frame_setup(fp, FC_RCTL_BA_ABTS, FC_TYPE_BLS);
+			error = fc_seq_send(ep->lp, sp, fp, FC_FC_END_SEQ);
+		} else {
+			error = ENOBUFS;
+		}
+	}
+	return error;
+}
+EXPORT_SYMBOL(fc_seq_exch_abort);
+
+/*
+ * Exchange timeout - handle exchange timer expiration.
+ * The timer will have been cancelled before this is called.
+ */
+static void fc_exch_timeout(unsigned long ep_arg)
+{
+	struct fc_exch *ep = (struct fc_exch *)ep_arg;
+	struct fc_seq *sp = &ep->seq;
+	void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
+	void *arg;
+	u32 e_stat;
+
+	spin_lock_bh(&ep->ex_lock);
+	e_stat = ep->esb_stat;
+	if (e_stat & ESB_ST_COMPLETE) {
+		ep->esb_stat = e_stat & ~ESB_ST_REC_QUAL;
+		spin_unlock_bh(&ep->ex_lock);
+		if (e_stat & ESB_ST_REC_QUAL)
+			fc_exch_rrq(ep);
+	} else if (e_stat & ESB_ST_ABNORMAL) {
+		ep->esb_stat |= ESB_ST_COMPLETE;
+		spin_unlock_bh(&ep->ex_lock);
+	} else {
+		fc_exch_hold(ep);
+		resp = ep->resp;
+		ep->resp = NULL;
+		arg = ep->resp_arg;
+		spin_unlock_bh(&ep->ex_lock);
+		fc_seq_exch_abort(sp);
+		fc_exch_release(ep);
+
+		if (resp)
+			resp(sp, ERR_PTR(-FC_EX_TIMEOUT), arg);
+	}
+
+	/*
+	 * This release matches the hold taken when the timer was set.
+	 */
+	fc_exch_release(ep);
+}
+
+/*
+ * Allocate a sequence.
+ *
+ * We don't support multiple originated sequences on the same exchange.
+ * By implication, any previously originated sequence on this exchange
+ * is complete, and we reallocate the same sequence.
+ */
+static struct fc_seq *fc_seq_alloc(struct fc_exch *ep, u8 seq_id)
+{
+	struct fc_seq *sp;
+
+	sp = &ep->seq;
+	sp->ssb_stat = 0;
+	sp->f_ctl = 0;
+	sp->cnt = 0;
+	sp->id = seq_id;
+	return sp;
+}
+
+/*
+ * Allocate an exchange.
+ *
+ * if xid is supplied zero then assign next free exchange ID
+ * from exchange manager, otherwise use supplied xid.
+ */
+struct fc_exch *fc_exch_alloc(struct fc_exch_mgr *mp, u16 xid)
+{
+	struct fc_exch *ep = NULL;
+	u16 min_xid, max_xid;
+
+	min_xid = mp->min_xid;
+	max_xid = mp->max_xid;
+	/*
+	 * if xid is supplied then verify its xid range
+	 */
+	if (xid) {
+		if (unlikely((xid < min_xid) || (xid > max_xid))) {
+			FC_DBG("Invalid xid 0x:%x\n", xid);
+			goto out;
+		}
+		if (unlikely(mp->exches[xid - min_xid] != NULL)) {
+			FC_DBG("xid 0x:%x is already in use\n", xid);
+			goto out;
+		}
+	}
+
+	/*
+	 * Allocate new exchange
+	 */
+	ep = kmem_cache_zalloc(mp->em_cache, GFP_ATOMIC);
+	if (!ep) {
+		atomic_inc(&mp->stats.no_free_exch);
+		goto out;
+	}
+
+	spin_lock_bh(&mp->em_lock);
+
+	/*
+	 * if xid is zero then assign next free exchange ID
+	 */
+	if (!xid) {
+		xid = fc_exch_next_xid(mp, mp->last_xid);
+		/*
+		 * find next free xid using linear search
+		 */
+		while (mp->exches[xid - min_xid] != NULL) {
+			if (xid == mp->last_xid)
+				break;
+			xid = fc_exch_next_xid(mp, xid);
+		}
+
+		if (likely(mp->exches[xid - min_xid] == NULL)) {
+			mp->exches[xid - min_xid] = ep;
+			mp->last_xid = xid;
+		} else {
+			spin_unlock_bh(&mp->em_lock);
+			atomic_inc(&mp->stats.no_free_exch_xid);
+			kmem_cache_free(mp->em_cache, ep);
+			goto out;
+		}
+	}
+
+	list_add_tail(&ep->ex_list, &mp->ex_list);
+	fc_seq_alloc(ep, ep->seq_id++);
+	mp->total_exches++;
+	spin_unlock_bh(&mp->em_lock);
+
+	/*
+	 *  update exchange
+	 */
+	ep->oxid = ep->xid = xid;
+	ep->em = mp;
+	ep->lp = mp->lp;
+	ep->f_ctl = FC_FC_FIRST_SEQ;	/* next seq is first seq */
+	ep->rxid = FC_XID_UNKNOWN;
+	ep->class = mp->class;
+
+	spin_lock_init(&ep->ex_lock);
+	setup_timer(&ep->ex_timer, fc_exch_timeout, (unsigned long)ep);
+
+	fc_exch_hold(ep);	/* hold for caller */
+out:
+	return ep;
+}
+EXPORT_SYMBOL(fc_exch_alloc);
+
+/*
+ * Lookup and hold an exchange.
+ */
+static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
+{
+	struct fc_exch *ep = NULL;
+
+	if ((xid >= mp->min_xid) && (xid <= mp->max_xid)) {
+		spin_lock_bh(&mp->em_lock);
+		ep = mp->exches[xid - mp->min_xid];
+		if (ep) {
+			fc_exch_hold(ep);
+			WARN_ON(ep->xid != xid);
+		}
+		spin_unlock_bh(&mp->em_lock);
+	}
+	return ep;
+}
+
+/*
+ * Mark exchange complete - internal version called with ex_lock held.
+ */
+static void fc_exch_complete_locked(struct fc_exch *ep)
+{
+	ep->esb_stat |= ESB_ST_COMPLETE;
+	ep->resp = NULL;
+
+	/*
+	 * Assuming in-order delivery, the timeout for RRQ is 0, not R_A_TOV.
+	 * Here, we allow a short time for frames which may have been
+	 * re-ordered in various kernel queues or due to interrupt balancing.
+	 * Also, using a timer here allows us to issue the RRQ after the
+	 * exchange lock is dropped.
+	 */
+	if (unlikely(ep->esb_stat & ESB_ST_REC_QUAL)) {
+		fc_exch_timer_set_locked(ep, 10);
+	} else {
+		if (timer_pending(&ep->ex_timer)) {
+			del_timer(&ep->ex_timer);
+			/*
+			 * drop hold for timer
+			 */
+			atomic_dec(&ep->ex_refcnt);
+		}
+		atomic_dec(&ep->ex_refcnt);
+	}
+}
+
+/*
+ * Mark exchange complete.
+ * The state may be available for ILS Read Exchange Status (RES) for a time.
+ * The caller doesn't necessarily hold the exchange.
+ */
+static void fc_exch_complete(struct fc_exch *ep)
+{
+	spin_lock_bh(&ep->ex_lock);
+	fc_exch_complete_locked(ep);
+	spin_unlock_bh(&ep->ex_lock);
+}
+
+/*
+ * Allocate a new exchange as responder.
+ * Sets the responder ID in the frame header.
+ */
+static struct fc_exch *fc_exch_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
+{
+	struct fc_exch *ep;
+	struct fc_frame_header *fh;
+	u16 rxid;
+
+	ep = mp->lp->tt.exch_get(mp->lp, fp);
+	if (ep) {
+		ep->class = fc_frame_class(fp);
+
+		/*
+		 * Set EX_CTX indicating we're responding on this exchange.
+		 */
+		ep->f_ctl |= FC_FC_EX_CTX;	/* we're responding */
+		ep->f_ctl &= ~FC_FC_FIRST_SEQ;	/* not new */
+		fh = fc_frame_header_get(fp);
+		ep->sid = ntoh24(fh->fh_d_id);
+		ep->did = ntoh24(fh->fh_s_id);
+		ep->oid = ep->did;
+
+		/*
+		 * Allocated exchange has placed the XID in the
+		 * originator field. Move it to the responder field,
+		 * and set the originator XID from the frame.
+		 */
+		ep->rxid = ep->xid;
+		ep->oxid = ntohs(fh->fh_ox_id);
+		ep->esb_stat |= ESB_ST_RESP | ESB_ST_SEQ_INIT;
+		if ((ntoh24(fh->fh_f_ctl) & FC_FC_SEQ_INIT) == 0)
+			ep->esb_stat &= ~ESB_ST_SEQ_INIT;
+
+		/*
+		 * Set the responder ID in the frame header.
+		 * The old one should've been 0xffff.
+		 * If it isn't, don't assign one.
+		 * Incoming basic link service frames may specify
+		 * a referenced RX_ID.
+		 */
+		if (fh->fh_type != FC_TYPE_BLS) {
+			rxid = ntohs(fh->fh_rx_id);
+			WARN_ON(rxid != FC_XID_UNKNOWN);
+			fh->fh_rx_id = htons(ep->rxid);
+		}
+	}
+	return ep;
+}
+
+/*
+ * Find a sequence for receive where the other end is originating the sequence.
+ */
+static enum fc_pf_rjt_reason
+fc_seq_lookup_recip(struct fc_exch_mgr *mp, struct fc_frame *fp)
+{
+	struct fc_frame_header *fh = fc_frame_header_get(fp);
+	struct fc_exch *ep = NULL;
+	struct fc_seq *sp = NULL;
+	enum fc_pf_rjt_reason reject = FC_RJT_NONE;
+	u32 f_ctl;
+	u16 xid;
+
+	f_ctl = ntoh24(fh->fh_f_ctl);
+	WARN_ON((f_ctl & FC_FC_SEQ_CTX) != 0);
+
+	/*
+	 * Lookup or create the exchange if we will be creating the sequence.
+	 */
+	if (f_ctl & FC_FC_EX_CTX) {
+		xid = ntohs(fh->fh_ox_id);	/* we originated exch */
+		ep = fc_exch_find(mp, xid);
+		if (!ep) {
+			atomic_inc(&mp->stats.xid_not_found);
+			reject = FC_RJT_OX_ID;
+			goto out;
+		}
+		if (ep->rxid == FC_XID_UNKNOWN)
+			ep->rxid = ntohs(fh->fh_rx_id);
+		else if (ep->rxid != ntohs(fh->fh_rx_id)) {
+			fc_exch_release(ep);
+			reject = FC_RJT_OX_ID;
+			goto out;
+		}
+	} else {
+		xid = ntohs(fh->fh_rx_id);	/* we are the responder */
+
+		/*
+		 * Special case for MDS issuing an ELS TEST with a
+		 * bad rxid of 0.
+		 * XXX take this out once we do the proper reject.
+		 */
+		if (xid == 0 && fh->fh_r_ctl == FC_RCTL_ELS_REQ &&
+		    fc_frame_payload_op(fp) == ELS_TEST) {
+			fh->fh_rx_id = htons(FC_XID_UNKNOWN);
+			xid = FC_XID_UNKNOWN;
+		}
+
+		/*
+		 * new sequence - find the exchange
+		 */
+		ep = fc_exch_find(mp, xid);
+		if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
+			if (ep) {
+				fc_exch_release(ep);
+				atomic_inc(&mp->stats.xid_busy);
+				reject = FC_RJT_RX_ID;
+				goto out;
+			}
+			ep = fc_exch_resp(mp, fp);
+			if (!ep) {
+				reject = FC_RJT_EXCH_EST;	/* XXX */
+				goto out;
+			}
+			xid = ep->xid;	/* get our XID */
+		} else if (!ep) {
+			atomic_inc(&mp->stats.xid_not_found);
+			reject = FC_RJT_RX_ID;	/* XID not found */
+			goto out;
+		}
+	}
+
+	/*
+	 * At this point, we have the exchange held.
+	 * Find or create the sequence.
+	 */
+	if (fc_sof_is_init(fr_sof(fp))) {
+		sp = fc_seq_start_next(&ep->seq);
+		sp->id = fh->fh_seq_id;
+		if (!sp) {
+			reject = FC_RJT_SEQ_XS;	/* exchange shortage */
+			goto out;
+		}
+		sp->ssb_stat |= SSB_ST_RESP;
+	} else {
+		sp = &ep->seq;
+		if (sp->id != fh->fh_seq_id) {
+			atomic_inc(&mp->stats.seq_not_found);
+			reject = FC_RJT_SEQ_ID;	/* sequence/exch should exist */
+			goto out;
+		}
+	}
+	WARN_ON(ep != fc_seq_exch(sp));
+
+	if (f_ctl & FC_FC_SEQ_INIT)
+		ep->esb_stat |= ESB_ST_SEQ_INIT;
+
+	fr_seq(fp) = sp;
+out:
+	return reject;
+}
+
+/*
+ * Find the sequence for a frame being received.
+ * We originated the sequence, so it should be found.
+ * We may or may not have originated the exchange.
+ * Does not hold the sequence for the caller.
+ */
+static struct fc_seq *fc_seq_lookup_orig(struct fc_exch_mgr *mp,
+					 struct fc_frame *fp)
+{
+	struct fc_frame_header *fh = fc_frame_header_get(fp);
+	struct fc_exch *ep;
+	struct fc_seq *sp = NULL;
+	u32 f_ctl;
+	u16 xid;
+
+	f_ctl = ntoh24(fh->fh_f_ctl);
+	WARN_ON((f_ctl & FC_FC_SEQ_CTX) != FC_FC_SEQ_CTX);
+	xid = ntohs((f_ctl & FC_FC_EX_CTX) ? fh->fh_ox_id : fh->fh_rx_id);
+	ep = fc_exch_find(mp, xid);
+	if (!ep)
+		return NULL;
+	if (ep->seq.id == fh->fh_seq_id) {
+		/*
+		 * Save the RX_ID if we didn't previously know it.
+		 */
+		sp = &ep->seq;
+		if ((f_ctl & FC_FC_EX_CTX) != 0 &&
+		    ep->rxid == FC_XID_UNKNOWN) {
+			ep->rxid = ntohs(fh->fh_rx_id);
+		}
+	}
+	fc_exch_release(ep);
+	return sp;
+}
+
+/*
+ * Set addresses for an exchange.
+ * Note this must be done before the first sequence of the exchange is sent.
+ */
+static void fc_exch_set_addr(struct fc_exch *ep,
+			     u32 orig_id, u32 resp_id)
+{
+	ep->oid = orig_id;
+	if (ep->esb_stat & ESB_ST_RESP) {
+		ep->sid = resp_id;
+		ep->did = orig_id;
+	} else {
+		ep->sid = orig_id;
+		ep->did = resp_id;
+	}
+}
+
+/*
+ * Allocate a new sequence on the same exchange as the supplied sequence.
+ * This will never return NULL.
+ */
+struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
+{
+	struct fc_exch *ep = fc_seq_exch(sp);
+
+	spin_lock_bh(&ep->ex_lock);
+	WARN_ON((ep->esb_stat & ESB_ST_COMPLETE) != 0);
+
+	sp = fc_seq_alloc(ep, ep->seq_id++);
+
+	if (fc_exch_debug)
+		FC_DBG("exch %4x f_ctl %6x seq %2x f_ctl %6x\n",
+		       ep->xid, ep->f_ctl, sp->id, sp->f_ctl);
+	spin_unlock_bh(&ep->ex_lock);
+	return sp;
+}
+EXPORT_SYMBOL(fc_seq_start_next);
+
+int fc_seq_send(struct fc_lport *lp, struct fc_seq *sp,
+		struct fc_frame *fp, u32 f_ctl)
+{
+	struct fc_exch *ep;
+	struct fc_frame_header *fh;
+	enum fc_class class;
+	u16 fill = 0;
+	int error;
+
+	ep = fc_seq_exch(sp);
+	WARN_ON((ep->esb_stat & ESB_ST_SEQ_INIT) != ESB_ST_SEQ_INIT);
+
+	fc_seq_fill_hdr(sp, fp);
+	fh = fc_frame_header_get(fp);
+	class = ep->class;
+	fr_sof(fp) = class;
+	if (sp->cnt)
+		fr_sof(fp) = fc_sof_normal(class);
+
+	if (f_ctl & FC_FC_END_SEQ) {
+		fr_eof(fp) = FC_EOF_T;
+		if (fc_sof_needs_ack(class))
+			fr_eof(fp) = FC_EOF_N;
+		/*
+		 * Form f_ctl.
+		 * The number of fill bytes to make the length a 4-byte
+		 * multiple is the low order 2-bits of the f_ctl.
+		 * The fill itself will have been cleared by the frame
+		 * allocation.
+		 * After this, the length will be even, as expected by
+		 * the transport. Don't include the fill in the f_ctl
+		 * saved in the sequence.
+		 */
+		fill = fr_len(fp) & 3;
+		if (fill) {
+			fill = 4 - fill;
+			/* TODO, this may be a problem with fragmented skb */
+			skb_put(fp_skb(fp), fill);
+		}
+		f_ctl |= sp->f_ctl | ep->f_ctl;
+	} else {
+		WARN_ON(fr_len(fp) % 4 != 0);	/* no pad to non last frame */
+		f_ctl |= sp->f_ctl | ep->f_ctl;
+		f_ctl &= ~FC_FC_SEQ_INIT;
+		fr_eof(fp) = FC_EOF_N;
+	}
+
+	hton24(fh->fh_f_ctl, f_ctl | fill);
+	fh->fh_seq_cnt = htons(sp->cnt++);
+
+	/*
+	 * Send the frame.
+	 */
+	error = lp->tt.frame_send(lp, fp);
+
+	/*
+	 * Update the exchange and sequence flags,
+	 * assuming all frames for the sequence have been sent.
+	 * We can only be called to send once for each sequence.
+	 */
+	spin_lock_bh(&ep->ex_lock);
+	sp->f_ctl = f_ctl;	/* save for possible abort */
+	ep->f_ctl &= ~FC_FC_FIRST_SEQ;	/* not first seq */
+	if (f_ctl & FC_FC_END_SEQ) {
+		if (f_ctl & FC_FC_SEQ_INIT)
+			ep->esb_stat &= ~ESB_ST_SEQ_INIT;
+	}
+	spin_unlock_bh(&ep->ex_lock);
+	return error;
+}
+EXPORT_SYMBOL(fc_seq_send);
+
+void fc_seq_els_rsp_send(struct fc_seq *sp, enum fc_els_cmd els_cmd,
+			 struct fc_seq_els_data *els_data)
+{
+	switch (els_cmd) {
+	case ELS_LS_RJT:
+		fc_seq_ls_rjt(sp, els_data->reason, els_data->explan);
+		break;
+	case ELS_LS_ACC:
+		fc_seq_ls_acc(sp);
+		break;
+	case ELS_RRQ:
+		fc_exch_els_rrq(sp, els_data->fp);
+		break;
+	case ELS_REC:
+		fc_exch_els_rec(sp, els_data->fp);
+		break;
+	default:
+		FC_DBG("Invalid ELS CMD:%x\n", els_cmd);
+	}
+}
+EXPORT_SYMBOL(fc_seq_els_rsp_send);
+
+/*
+ * Send a sequence, which is also the last sequence in the exchange.
+ */
+static void fc_seq_send_last(struct fc_seq *sp, struct fc_frame *fp,
+			     enum fc_rctl rctl, enum fc_fh_type fh_type)
+{
+	u32 f_ctl;
+
+	fc_frame_setup(fp, rctl, fh_type);
+	f_ctl = FC_FC_SEQ_INIT | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
+	fc_seq_send(fc_seq_exch(sp)->lp, sp, fp, f_ctl);
+}
+
+/*
+ * Send ACK_1 (or equiv.) indicating we received something.
+ * The frame we're acking is supplied.
+ */
+static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp)
+{
+	struct fc_frame *fp;
+	struct fc_frame_header *rx_fh;
+	struct fc_frame_header *fh;
+	struct fc_lport *lp = fc_seq_exch(sp)->lp;
+	unsigned int f_ctl;
+
+	/*
+	 * Don't send ACKs for class 3.
+	 */
+	if (fc_sof_needs_ack(fr_sof(rx_fp))) {
+		fp = fc_frame_alloc(lp, 0);
+		BUG_ON(!fp);
+		if (!fp)
+			return;
+
+		fc_seq_fill_hdr(sp, fp);
+		fh = fc_frame_header_get(fp);
+		fh->fh_r_ctl = FC_RCTL_ACK_1;
+		fh->fh_type = FC_TYPE_BLS;
+
+		/*
+		 * Form f_ctl by inverting EX_CTX and SEQ_CTX (bits 23, 22).
+		 * Echo FIRST_SEQ, LAST_SEQ, END_SEQ, END_CONN, SEQ_INIT.
+		 * Bits 9-8 are meaningful (retransmitted or unidirectional).
+		 * Last ACK uses bits 7-6 (continue sequence),
+		 * bits 5-4 are meaningful (what kind of ACK to use).
+		 */
+		rx_fh = fc_frame_header_get(rx_fp);
+		f_ctl = ntoh24(rx_fh->fh_f_ctl);
+		f_ctl &= FC_FC_EX_CTX | FC_FC_SEQ_CTX |
+			FC_FC_FIRST_SEQ | FC_FC_LAST_SEQ |
+			FC_FC_END_SEQ | FC_FC_END_CONN | FC_FC_SEQ_INIT |
+			FC_FC_RETX_SEQ | FC_FC_UNI_TX;
+		f_ctl ^= FC_FC_EX_CTX | FC_FC_SEQ_CTX;
+		hton24(fh->fh_f_ctl, f_ctl);
+
+		fh->fh_seq_id = rx_fh->fh_seq_id;
+		fh->fh_seq_cnt = rx_fh->fh_seq_cnt;
+		fh->fh_parm_offset = htonl(1);	/* ack single frame */
+
+		fr_sof(fp) = fr_sof(rx_fp);
+		if (f_ctl & FC_FC_END_SEQ)
+			fr_eof(fp) = FC_EOF_T;
+		else
+			fr_eof(fp) = FC_EOF_N;
+
+		(void) lp->tt.frame_send(lp, fp);
+	}
+}
+
+/*
+ * Send BLS Reject.
+ * This is for rejecting BA_ABTS only.
+ */
+static void
+fc_exch_send_ba_rjt(struct fc_frame *rx_fp, enum fc_ba_rjt_reason reason,
+		    enum fc_ba_rjt_explan explan)
+{
+	struct fc_frame *fp;
+	struct fc_frame_header *rx_fh;
+	struct fc_frame_header *fh;
+	struct fc_ba_rjt *rp;
+	struct fc_lport *lp;
+	unsigned int f_ctl;
+
+	lp = fr_dev(rx_fp);
+	fp = fc_frame_alloc(lp, sizeof(*rp));
+	if (!fp)
+		return;
+	fh = fc_frame_header_get(fp);
+	rx_fh = fc_frame_header_get(rx_fp);
+
+	memset(fh, 0, sizeof(*fh) + sizeof(*rp));
+
+	rp = fc_frame_payload_get(fp, sizeof(*rp));
+	rp->br_reason = reason;
+	rp->br_explan = explan;
+
+	/*
+	 * seq_id, cs_ctl, df_ctl and param/offset are zero.
+	 */
+	memcpy(fh->fh_s_id, rx_fh->fh_d_id, 3);
+	memcpy(fh->fh_d_id, rx_fh->fh_s_id, 3);
+	fh->fh_ox_id = rx_fh->fh_rx_id;
+	fh->fh_rx_id = rx_fh->fh_ox_id;
+	fh->fh_seq_cnt = rx_fh->fh_seq_cnt;
+	fh->fh_r_ctl = FC_RCTL_BA_RJT;
+	fh->fh_type = FC_TYPE_BLS;
+
+	/*
+	 * Form f_ctl by inverting EX_CTX and SEQ_CTX (bits 23, 22).
+	 * Echo FIRST_SEQ, LAST_SEQ, END_SEQ, END_CONN, SEQ_INIT.
+	 * Bits 9-8 are meaningful (retransmitted or unidirectional).
+	 * Last ACK uses bits 7-6 (continue sequence),
+	 * bits 5-4 are meaningful (what kind of ACK to use).
+	 * Always set LAST_SEQ, END_SEQ.
+	 */
+	f_ctl = ntoh24(rx_fh->fh_f_ctl);
+	f_ctl &= FC_FC_EX_CTX | FC_FC_SEQ_CTX |
+		FC_FC_END_CONN | FC_FC_SEQ_INIT |
+		FC_FC_RETX_SEQ | FC_FC_UNI_TX;
+	f_ctl ^= FC_FC_EX_CTX | FC_FC_SEQ_CTX;
+	f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ;
+	f_ctl &= ~FC_FC_FIRST_SEQ;
+	hton24(fh->fh_f_ctl, f_ctl);
+
+	fr_sof(fp) = fc_sof_class(fr_sof(rx_fp));
+	fr_eof(fp) = FC_EOF_T;
+	if (fc_sof_needs_ack(fr_sof(fp)))
+		fr_eof(fp) = FC_EOF_N;
+
+	(void) lp->tt.frame_send(lp, fp);
+}
+
+/*
+ * Handle an incoming ABTS.  This would be for target mode usually,
+ * but could be due to lost FCP transfer ready, confirm or RRQ.
+ * We always handle this as an exchange abort, ignoring the parameter.
+ */
+static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp)
+{
+	struct fc_frame *fp;
+	struct fc_ba_acc *ap;
+	struct fc_frame_header *fh;
+	struct fc_seq *sp;
+
+	if (!ep)
+		goto reject;
+	spin_lock_bh(&ep->ex_lock);
+	if (ep->esb_stat & ESB_ST_COMPLETE) {
+		spin_unlock_bh(&ep->ex_lock);
+		goto reject;
+	}
+	if (!(ep->esb_stat & ESB_ST_REC_QUAL))
+		fc_exch_hold(ep);		/* hold for REC_QUAL */
+	ep->esb_stat |= ESB_ST_ABNORMAL | ESB_ST_REC_QUAL;
+	fc_exch_timer_set_locked(ep, ep->r_a_tov);
+
+	fp = fc_frame_alloc(ep->lp, sizeof(*ap));
+	if (!fp) {
+		spin_unlock_bh(&ep->ex_lock);
+		goto free;
+	}
+	fh = fc_frame_header_get(fp);
+	ap = fc_frame_payload_get(fp, sizeof(*ap));
+	memset(ap, 0, sizeof(*ap));
+	sp = &ep->seq;
+	ap->ba_high_seq_cnt = htons(0xffff);
+	if (sp->ssb_stat & SSB_ST_RESP) {
+		ap->ba_seq_id = sp->id;
+		ap->ba_seq_id_val = FC_BA_SEQ_ID_VAL;
+		ap->ba_high_seq_cnt = fh->fh_seq_cnt;
+		ap->ba_low_seq_cnt = htons(sp->cnt);
+	}
+	sp = fc_seq_start_next(sp);
+	spin_unlock_bh(&ep->ex_lock);
+	fc_seq_send_last(sp, fp, FC_RCTL_BA_ACC, FC_TYPE_BLS);
+	fc_frame_free(rx_fp);
+	return;
+
+reject:
+	fc_exch_send_ba_rjt(rx_fp, FC_BA_RJT_UNABLE, FC_BA_RJT_INV_XID);
+free:
+	fc_frame_free(rx_fp);
+}
+
+/*
+ * Handle receive where the other end is originating the sequence.
+ */
+static void fc_exch_recv_req(struct fc_lport *lp, struct fc_exch_mgr *mp,
+			     struct fc_frame *fp)
+{
+	struct fc_frame_header *fh = fc_frame_header_get(fp);
+	struct fc_seq *sp = NULL;
+	struct fc_exch *ep = NULL;
+	enum fc_sof sof;
+	enum fc_eof eof;
+	u32 f_ctl;
+	enum fc_pf_rjt_reason reject;
+
+	fr_seq(fp) = NULL;
+	reject = fc_seq_lookup_recip(mp, fp);
+	if (reject == FC_RJT_NONE) {
+		sp = fr_seq(fp);	/* sequence will be held */
+		ep = fc_seq_exch(sp);
+		sof = fr_sof(fp);
+		eof = fr_eof(fp);
+		f_ctl = ntoh24(fh->fh_f_ctl);
+		fc_seq_send_ack(sp, fp);
+
+		/*
+		 * Call the receive function.
+		 *
+		 * The receive function may allocate a new sequence
+		 * over the old one, so we shouldn't change the
+		 * sequence after this.
+		 *
+		 * The frame will be freed by the receive function.
+		 * If new exch resp handler is valid then call that
+		 * first.
+		 */
+		if (ep->resp)
+			ep->resp(sp, fp, ep->resp_arg);
+		else
+			lp->tt.lport_recv(lp, sp, fp);
+	} else {
+		if (fc_exch_debug)
+			FC_DBG("exch/seq lookup failed: reject %x\n", reject);
+		fc_frame_free(fp);
+	}
+}
+
+/*
+ * Handle receive where the other end is originating the sequence in
+ * response to our exchange.
+ */
+static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
+{
+	struct fc_frame_header *fh = fc_frame_header_get(fp);
+	struct fc_seq *sp;
+	struct fc_exch *ep;
+	enum fc_sof sof;
+	u32 f_ctl;
+	void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
+	void *ex_resp_arg;
+
+	ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
+	if (!ep) {
+		atomic_inc(&mp->stats.xid_not_found);
+		goto out;
+	}
+	if (ep->rxid == FC_XID_UNKNOWN)
+		ep->rxid = ntohs(fh->fh_rx_id);
+	if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
+		atomic_inc(&mp->stats.xid_not_found);
+		goto rel;
+	}
+	if (ep->did != ntoh24(fh->fh_s_id) &&
+	    ep->did != FC_FID_FLOGI) {
+		atomic_inc(&mp->stats.xid_not_found);
+		goto rel;
+	}
+	sof = fr_sof(fp);
+	if (fc_sof_is_init(sof)) {
+		sp = fc_seq_start_next(&ep->seq);
+		sp->id = fh->fh_seq_id;
+		sp->ssb_stat |= SSB_ST_RESP;
+	} else {
+		sp = &ep->seq;
+		if (sp->id != fh->fh_seq_id) {
+			atomic_inc(&mp->stats.seq_not_found);
+			goto rel;
+		}
+	}
+	f_ctl = ntoh24(fh->fh_f_ctl);
+	fr_seq(fp) = sp;
+	if (f_ctl & FC_FC_SEQ_INIT)
+		ep->esb_stat |= ESB_ST_SEQ_INIT;
+
+	if (fc_sof_needs_ack(sof))
+		fc_seq_send_ack(sp, fp);
+	resp = ep->resp;
+	ex_resp_arg = ep->resp_arg;
+
+	if (fh->fh_type != FC_TYPE_FCP && fr_eof(fp) == FC_EOF_T &&
+	    (f_ctl & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
+	    (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
+		spin_lock_bh(&ep->ex_lock);
+		fc_exch_complete_locked(ep);
+		WARN_ON(fc_seq_exch(sp) != ep);
+		spin_unlock_bh(&ep->ex_lock);
+	}
+
+	/*
+	 * Call the receive function.
+	 * The sequence is held (has a refcnt) for us,
+	 * but not for the receive function.
+	 *
+	 * The receive function may allocate a new sequence
+	 * over the old one, so we shouldn't change the
+	 * sequence after this.
+	 *
+	 * The frame will be freed by the receive function.
+	 * If new exch resp handler is valid then call that
+	 * first.
+	 */
+	if (resp)
+		resp(sp, fp, ex_resp_arg);
+	else
+		fc_frame_free(fp);
+	fc_exch_release(ep);
+	return;
+rel:
+	fc_exch_release(ep);
+out:
+	fc_frame_free(fp);
+}
+
+/*
+ * Handle receive for a sequence where other end is responding to our sequence.
+ */
+static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
+{
+	struct fc_seq *sp;
+
+	sp = fc_seq_lookup_orig(mp, fp);	/* doesn't hold sequence */
+	if (!sp) {
+		atomic_inc(&mp->stats.xid_not_found);
+		if (fc_exch_debug)
+			FC_DBG("seq lookup failed\n");
+	} else {
+		atomic_inc(&mp->stats.non_bls_resp);
+		if (fc_exch_debug)
+			FC_DBG("non-BLS response to sequence");
+	}
+	fc_frame_free(fp);
+}
+
+/*
+ * Handle the response to an ABTS for exchange or sequence.
+ * This can be BA_ACC or BA_RJT.
+ */
+static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
+{
+	struct fc_frame_header *fh;
+	struct fc_ba_acc *ap;
+	u16 low;
+	u16 high;
+
+	fh = fc_frame_header_get(fp);
+	if (fc_exch_debug)
+		FC_DBG("exch: BLS rctl %x - %s\n",
+		       fh->fh_r_ctl, fc_exch_rctl_name(fh->fh_r_ctl));
+	fc_exch_hold(ep);
+	spin_lock_bh(&ep->ex_lock);
+	switch (fh->fh_r_ctl) {
+	case FC_RCTL_BA_ACC:
+		ap = fc_frame_payload_get(fp, sizeof(*ap));
+		if (!ap)
+			break;
+
+		/*
+		 * Decide whether to establish a Recovery Qualifier.
+		 * We do this if there is a non-empty SEQ_CNT range and
+		 * SEQ_ID is the same as the one we aborted.
+		 */
+		low = ntohs(ap->ba_low_seq_cnt);
+		high = ntohs(ap->ba_high_seq_cnt);
+		if ((ep->esb_stat & ESB_ST_REC_QUAL) == 0 &&
+		    (ap->ba_seq_id_val != FC_BA_SEQ_ID_VAL ||
+		     ap->ba_seq_id == ep->seq_id) && low != high) {
+			ep->esb_stat |= ESB_ST_REC_QUAL;
+			fc_exch_hold(ep);  /* hold for recovery qualifier */
+			fc_exch_timer_set_locked(ep, 2 * ep->r_a_tov);
+		}
+		break;
+	case FC_RCTL_BA_RJT:
+		break;
+	default:
+		break;
+	}
+	if (ntoh24(fh->fh_f_ctl) & FC_FC_LAST_SEQ)
+		fc_exch_complete_locked(ep);
+	spin_unlock_bh(&ep->ex_lock);
+	fc_exch_release(ep);
+	fc_frame_free(fp);
+}
+
+/*
+ * Receive BLS sequence.
+ * This is always a sequence initiated by the remote side.
+ * We may be either the originator or recipient of the exchange.
+ */
+static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp)
+{
+	struct fc_frame_header *fh;
+	struct fc_exch *ep;
+	u32 f_ctl;
+
+	fh = fc_frame_header_get(fp);
+	f_ctl = ntoh24(fh->fh_f_ctl);
+	fr_seq(fp) = NULL;
+
+	ep = fc_exch_find(mp, (f_ctl & FC_FC_EX_CTX) ?
+			  ntohs(fh->fh_ox_id) : ntohs(fh->fh_rx_id));
+	if (ep && (f_ctl & FC_FC_SEQ_INIT)) {
+		spin_lock_bh(&ep->ex_lock);
+		ep->esb_stat |= ESB_ST_SEQ_INIT;
+		spin_unlock_bh(&ep->ex_lock);
+	}
+	if (f_ctl & FC_FC_SEQ_CTX) {
+		/*
+		 * A response to a sequence we initiated.
+		 * This should only be ACKs for class 2 or F.
+		 */
+		switch (fh->fh_r_ctl) {
+		case FC_RCTL_ACK_1:
+		case FC_RCTL_ACK_0:
+			break;
+		default:
+			if (fc_exch_debug)
+				FC_DBG("BLS rctl %x - %s received",
+				       fh->fh_r_ctl,
+				       fc_exch_rctl_name(fh->fh_r_ctl));
+			break;
+		}
+		fc_frame_free(fp);
+	} else {
+		switch (fh->fh_r_ctl) {
+		case FC_RCTL_BA_RJT:
+		case FC_RCTL_BA_ACC:
+			if (ep)
+				fc_exch_abts_resp(ep, fp);
+			else
+				fc_frame_free(fp);
+			break;
+		case FC_RCTL_BA_ABTS:
+			fc_exch_recv_abts(ep, fp);
+			break;
+		default:			/* ignore junk */
+			fc_frame_free(fp);
+			break;
+		}
+	}
+	if (ep)
+		fc_exch_release(ep);	/* release hold taken by fc_exch_find */
+}
+
+/*
+ * Accept sequence with LS_ACC.
+ * If this fails due to allocation or transmit congestion, assume the
+ * originator will repeat the sequence.
+ */
+static void fc_seq_ls_acc(struct fc_seq *req_sp)
+{
+	struct fc_seq *sp;
+	struct fc_els_ls_acc *acc;
+	struct fc_frame *fp;
+
+	sp = fc_seq_start_next(req_sp);
+	fp = fc_frame_alloc(fc_seq_exch(sp)->lp, sizeof(*acc));
+	if (fp) {
+		acc = fc_frame_payload_get(fp, sizeof(*acc));
+		memset(acc, 0, sizeof(*acc));
+		acc->la_cmd = ELS_LS_ACC;
+		fc_seq_send_last(sp, fp, FC_RCTL_ELS_REP, FC_TYPE_ELS);
+	}
+}
+
+/*
+ * Reject sequence with ELS LS_RJT.
+ * If this fails due to allocation or transmit congestion, assume the
+ * originator will repeat the sequence.
+ */
+static void fc_seq_ls_rjt(struct fc_seq *req_sp, enum fc_els_rjt_reason reason,
+			  enum fc_els_rjt_explan explan)
+{
+	struct fc_seq *sp;
+	struct fc_els_ls_rjt *rjt;
+	struct fc_frame *fp;
+
+	sp = fc_seq_start_next(req_sp);
+	fp = fc_frame_alloc(fc_seq_exch(sp)->lp, sizeof(*rjt));
+	if (fp) {
+		rjt = fc_frame_payload_get(fp, sizeof(*rjt));
+		memset(rjt, 0, sizeof(*rjt));
+		rjt->er_cmd = ELS_LS_RJT;
+		rjt->er_reason = reason;
+		rjt->er_explan = explan;
+		fc_seq_send_last(sp, fp, FC_RCTL_ELS_REP, FC_TYPE_ELS);
+	}
+}
+
+static void fc_exch_reset(struct fc_exch *ep)
+{
+	struct fc_seq *sp;
+	void (*resp)(struct fc_seq *, struct fc_frame *, void *);
+	void *arg;
+
+	fc_exch_hold(ep);
+	spin_lock_bh(&ep->ex_lock);
+	resp = ep->resp;
+	ep->resp = NULL;
+	if (ep->esb_stat & ESB_ST_REC_QUAL)
+		atomic_dec(&ep->ex_refcnt);	/* drop hold for rec_qual */
+	ep->esb_stat &= ~ESB_ST_REC_QUAL;
+	if (ep->esb_stat & ESB_ST_COMPLETE)
+		resp = NULL;
+	arg = ep->resp_arg;
+	if (timer_pending(&ep->ex_timer)) {
+		del_timer(&ep->ex_timer);
+		atomic_dec(&ep->ex_refcnt);	/* drop hold for timer */
+	}
+	sp = &ep->seq;
+	ep->esb_stat |= ESB_ST_COMPLETE;
+	spin_unlock_bh(&ep->ex_lock);
+	if (resp)
+		resp(sp, ERR_PTR(-FC_EX_CLOSED), arg);
+	fc_exch_release(ep);
+}
+
+/*
+ * Reset an exchange manager, releasing all sequences and exchanges.
+ * If sid is non-zero, reset only exchanges we source from that FID.
+ * If did is non-zero, reset only exchanges destined to that FID.
+ */
+void fc_exch_mgr_reset(struct fc_exch_mgr *mp, u32 sid, u32 did)
+{
+	struct fc_exch *ep;
+	struct fc_exch *next;
+
+	list_for_each_entry_safe(ep, next, &mp->ex_list, ex_list) {
+		if ((sid == 0 || sid == ep->sid) &&
+		    (did == 0 || did == ep->did))
+			fc_exch_reset(ep);
+	}
+}
+EXPORT_SYMBOL(fc_exch_mgr_reset);
+
+void fc_seq_get_xids(struct fc_seq *sp, u16 *oxid, u16 *rxid)
+{
+	struct fc_exch *ep;
+
+	ep = fc_seq_exch(sp);
+	*oxid = ep->oxid;
+	*rxid = ep->rxid;
+}
+EXPORT_SYMBOL(fc_seq_get_xids);
+
+void fc_seq_set_rec_data(struct fc_seq *sp, u32 rec_data)
+{
+	sp->rec_data = rec_data;
+}
+EXPORT_SYMBOL(fc_seq_set_rec_data);
+
+/*
+ * Handle incoming ELS REC - Read Exchange Concise.
+ * Note that the requesting port may be different than the S_ID in the request.
+ */
+static void fc_exch_els_rec(struct fc_seq *sp, struct fc_frame *rfp)
+{
+	struct fc_frame *fp;
+	struct fc_exch *ep;
+	struct fc_exch_mgr *em;
+	struct fc_els_rec *rp;
+	struct fc_els_rec_acc *acc;
+	enum fc_els_rjt_reason reason = ELS_RJT_LOGIC;
+	enum fc_els_rjt_explan explan;
+	u32 sid;
+	u16 rxid;
+	u16 oxid;
+
+	rp = fc_frame_payload_get(rfp, sizeof(*rp));
+	explan = ELS_EXPL_INV_LEN;
+	if (!rp)
+		goto reject;
+	sid = ntoh24(rp->rec_s_id);
+	rxid = ntohs(rp->rec_rx_id);
+	oxid = ntohs(rp->rec_ox_id);
+
+	/*
+	 * Currently it's hard to find the local S_ID from the exchange
+	 * manager.  This will eventually be fixed, but for now it's easier
+	 * to lookup the subject exchange twice, once as if we were
+	 * the initiator, and then again if we weren't.
+	 */
+	em = fc_seq_exch(sp)->em;
+	ep = fc_exch_find(em, oxid);
+	explan = ELS_EXPL_OXID_RXID;
+	if (ep && ep->oid == sid) {
+		if (ep->rxid != FC_XID_UNKNOWN &&
+		    rxid != FC_XID_UNKNOWN &&
+		    ep->rxid != rxid)
+			goto rel;
+	} else {
+		if (ep)
+			fc_exch_release(ep);
+		ep = NULL;
+		if (rxid != FC_XID_UNKNOWN)
+			ep = fc_exch_find(em, rxid);
+		if (!ep)
+			goto reject;
+	}
+
+	fp = fc_frame_alloc(fc_seq_exch(sp)->lp, sizeof(*acc));
+	if (!fp) {
+		fc_exch_done(sp);
+		goto out;
+	}
+	sp = fc_seq_start_next(sp);
+	acc = fc_frame_payload_get(fp, sizeof(*acc));
+	memset(acc, 0, sizeof(*acc));
+	acc->reca_cmd = ELS_LS_ACC;
+	acc->reca_ox_id = rp->rec_ox_id;
+	memcpy(acc->reca_ofid, rp->rec_s_id, 3);
+	acc->reca_rx_id = htons(ep->rxid);
+	if (ep->sid == ep->oid)
+		hton24(acc->reca_rfid, ep->did);
+	else
+		hton24(acc->reca_rfid, ep->sid);
+	acc->reca_fc4value = htonl(ep->seq.rec_data);
+	acc->reca_e_stat = htonl(ep->esb_stat & (ESB_ST_RESP |
+						  ESB_ST_SEQ_INIT |
+						  ESB_ST_COMPLETE));
+	sp = fc_seq_start_next(sp);
+	fc_seq_send_last(sp, fp, FC_RCTL_ELS_REP, FC_TYPE_ELS);
+out:
+	fc_exch_release(ep);
+	fc_frame_free(rfp);
+	return;
+
+rel:
+	fc_exch_release(ep);
+reject:
+	fc_seq_ls_rjt(sp, reason, explan);
+	fc_frame_free(rfp);
+}
+
+/*
+ * Handle response from RRQ.
+ * Not much to do here, really.
+ * Should report errors.
+ */
+static void fc_exch_rrq_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg)
+{
+	unsigned int op;
+
+	if (IS_ERR(fp))
+		return;
+	op = fc_frame_payload_op(fp);
+	if (op == ELS_LS_RJT)
+		FC_DBG("LS_RJT for RRQ");
+	else if (op != ELS_LS_ACC)
+		FC_DBG("unexpected response op %x for RRQ", op);
+	fc_frame_free(fp);
+}
+
+/*
+ * Send ELS RRQ - Reinstate Recovery Qualifier.
+ * This tells the remote port to stop blocking the use of
+ * the exchange and the seq_cnt range.
+ */
+static void fc_exch_rrq(struct fc_exch *ep)
+{
+	struct fc_lport *lp;
+	struct fc_els_rrq *rrq;
+	struct fc_frame *fp;
+	u32 did;
+
+	lp = ep->lp;
+
+	fp = fc_frame_alloc(lp, sizeof(*rrq));
+	if (!fp)
+		return;
+	fc_frame_setup(fp, FC_RCTL_ELS_REQ, FC_TYPE_ELS);
+	rrq = fc_frame_payload_get(fp, sizeof(*rrq));
+	memset(rrq, 0, sizeof(*rrq));
+	rrq->rrq_cmd = ELS_RRQ;
+	hton24(rrq->rrq_s_id, ep->sid);
+	rrq->rrq_ox_id = htons(ep->oxid);
+	rrq->rrq_rx_id = htons(ep->rxid);
+
+	did = ep->did;
+	if (ep->esb_stat & ESB_ST_RESP)
+		did = ep->sid;
+	if (!fc_exch_seq_send(lp, fp, fc_exch_rrq_resp, NULL, lp->e_d_tov,
+			      lp->fid, did, FC_FC_SEQ_INIT | FC_FC_END_SEQ))
+		fc_exch_timer_set(ep, ep->r_a_tov);
+}
+
+/*
+ * Handle incoming ELS RRQ - Reset Recovery Qualifier.
+ */
+static void fc_exch_els_rrq(struct fc_seq *sp, struct fc_frame *fp)
+{
+	struct fc_exch *ep;		/* request or subject exchange */
+	struct fc_els_rrq *rp;
+	u32 sid;
+	u16 xid;
+	enum fc_els_rjt_explan explan;
+
+	rp = fc_frame_payload_get(fp, sizeof(*rp));
+	explan = ELS_EXPL_INV_LEN;
+	if (!rp)
+		goto reject;
+
+	/*
+	 * lookup subject exchange.
+	 */
+	ep = fc_seq_exch(sp);
+	sid = ntoh24(rp->rrq_s_id);		/* subject source */
+	xid = ep->did == sid ? ntohs(rp->rrq_ox_id) : ntohs(rp->rrq_rx_id);
+	ep = fc_exch_find(ep->em, xid);
+
+	explan = ELS_EXPL_OXID_RXID;
+	if (!ep)
+		goto reject;
+	spin_lock_bh(&ep->ex_lock);
+	if (ep->oxid != ntohs(rp->rrq_ox_id))
+		goto unlock_reject;
+	if (ep->rxid != ntohs(rp->rrq_rx_id) &&
+	    ep->rxid != FC_XID_UNKNOWN)
+		goto unlock_reject;
+	explan = ELS_EXPL_SID;
+	if (ep->sid != sid)
+		goto unlock_reject;
+
+	/*
+	 * Clear Recovery Qualifier state, and cancel timer if complete.
+	 */
+	if (ep->esb_stat & ESB_ST_REC_QUAL) {
+		ep->esb_stat &= ~ESB_ST_REC_QUAL;
+		atomic_dec(&ep->ex_refcnt);	/* drop hold for rec qual */
+	}
+	if ((ep->esb_stat & ESB_ST_COMPLETE) && timer_pending(&ep->ex_timer)) {
+		del_timer(&ep->ex_timer);
+		atomic_dec(&ep->ex_refcnt);	/* drop hold for timer */
+	}
+	spin_unlock_bh(&ep->ex_lock);
+
+	/*
+	 * Send LS_ACC.
+	 */
+	fc_seq_ls_acc(sp);
+	fc_frame_free(fp);
+	return;
+
+unlock_reject:
+	spin_unlock_bh(&ep->ex_lock);
+	fc_exch_release(ep);	/* drop hold from fc_exch_find */
+reject:
+	fc_seq_ls_rjt(sp, ELS_RJT_LOGIC, explan);
+	fc_frame_free(fp);
+}
+
+struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp,
+				      enum fc_class class,
+				      u16 min_xid,
+				      u16 max_xid,
+				      u32 em_idx)
+{
+	struct fc_exch_mgr *mp;
+	size_t len;
+
+	if (max_xid <= min_xid || min_xid == 0 || max_xid == FC_XID_UNKNOWN) {
+		FC_DBG("Invalid min_xid 0x:%x and max_xid 0x:%x\n",
+		       min_xid, max_xid);
+		return NULL;
+	}
+
+	/*
+	 * Memory need for EM
+	 */
+	len = (max_xid - min_xid + 1) * (sizeof(struct fc_exch *));
+	len += sizeof(struct fc_exch_mgr);
+
+	mp = kzalloc(len, GFP_ATOMIC);
+	if (mp) {
+		mp->class = class;
+		mp->total_exches = 0;
+		mp->exches = (struct fc_exch **)(mp + 1);
+		mp->last_xid = min_xid - 1;
+		mp->min_xid = min_xid;
+		mp->max_xid = max_xid;
+		mp->lp = lp;
+		INIT_LIST_HEAD(&mp->ex_list);
+
+		spin_lock_init(&mp->em_lock);
+
+		sprintf(mp->em_cache_name,
+			"libfc-host%d-EM%d",
+			lp->host->host_no, em_idx);
+		mp->em_cache = kmem_cache_create(mp->em_cache_name,
+					      sizeof(struct fc_exch),
+					      0, SLAB_HWCACHE_ALIGN,
+					      NULL);
+
+		if (!mp->em_cache) {
+			kfree(mp);
+			mp = NULL;
+		}
+	}
+	return mp;
+}
+EXPORT_SYMBOL(fc_exch_mgr_alloc);
+
+void fc_exch_mgr_free(struct fc_exch_mgr *mp)
+{
+	WARN_ON(!mp);
+	/*
+	 * The total exch count must be zero
+	 * before freeing exchange manager.
+	 */
+	WARN_ON(mp->total_exches != 0);
+	kmem_cache_destroy(mp->em_cache);
+	kfree(mp);
+}
+EXPORT_SYMBOL(fc_exch_mgr_free);
+
+void fc_exch_done(struct fc_seq *sp)
+{
+	struct fc_exch *ep;
+
+	ep = fc_seq_exch(sp);
+	spin_lock_bh(&ep->ex_lock);
+	ep->esb_stat |= ESB_ST_COMPLETE;
+	ep->resp = NULL;
+	if (timer_pending(&ep->ex_timer)) {
+		del_timer(&ep->ex_timer);
+		atomic_dec(&ep->ex_refcnt);	/* drop hold for timer */
+	}
+	spin_unlock_bh(&ep->ex_lock);
+	fc_exch_release(fc_seq_exch(sp));
+}
+EXPORT_SYMBOL(fc_exch_done);
+
+struct fc_exch *fc_exch_get(struct fc_lport *lp, struct fc_frame *fp)
+{
+	if (!lp || !lp->emp)
+		return NULL;
+	return fc_exch_alloc(lp->emp, 0);
+}
+EXPORT_SYMBOL(fc_exch_get);
+
+struct fc_seq *fc_exch_seq_send(struct fc_lport *lp,
+				struct fc_frame *fp,
+				void (*resp)(struct fc_seq *,
+					     struct fc_frame *fp,
+					     void *arg),
+				void *resp_arg, u32 timer_msec,
+				u32 sid, u32 did, u32 f_ctl)
+{
+	struct fc_exch *ep;
+	struct fc_seq *sp = NULL;
+	struct fc_frame_header *fh;
+	u16 fill;
+
+	ep = lp->tt.exch_get(lp, fp);
+	if (!ep) {
+		fc_frame_free(fp);
+		return NULL;
+	}
+	ep->esb_stat |= ESB_ST_SEQ_INIT;
+	fc_exch_set_addr(ep, sid, did);
+	ep->resp = resp;
+	ep->resp_arg = resp_arg;
+	ep->r_a_tov = FC_DEF_R_A_TOV;
+	ep->lp = lp;
+	sp = &ep->seq;
+	WARN_ON((sp->f_ctl & FC_FC_END_SEQ) != 0);
+
+	fr_sof(fp) = ep->class;
+	if (sp->cnt)
+		fr_sof(fp) = fc_sof_normal(ep->class);
+	fr_eof(fp) = FC_EOF_T;
+	if (fc_sof_needs_ack(ep->class))
+		fr_eof(fp) = FC_EOF_N;
+
+	fc_seq_fill_hdr(sp, fp);
+	/*
+	 * Form f_ctl.
+	 * The number of fill bytes to make the length a 4-byte multiple is
+	 * the low order 2-bits of the f_ctl.  The fill itself will have been
+	 * cleared by the frame allocation.
+	 * After this, the length will be even, as expected by the transport.
+	 * Don't include the fill in the f_ctl saved in the sequence.
+	 */
+	fill = fr_len(fp) & 3;
+	if (fill) {
+		fill = 4 - fill;
+		/* TODO, this may be a problem with fragmented skb */
+		skb_put(fp_skb(fp), fill);
+	}
+	f_ctl |= ep->f_ctl;
+	fh = fc_frame_header_get(fp);
+	hton24(fh->fh_f_ctl, f_ctl | fill);
+	fh->fh_seq_cnt = htons(sp->cnt++);
+
+	if (unlikely(lp->tt.frame_send(lp, fp)))
+		goto err;
+
+	spin_lock_bh(&ep->ex_lock);
+	if (timer_msec)
+		fc_exch_timer_set_locked(ep, timer_msec);
+	sp->f_ctl = f_ctl;	/* save for possible abort */
+	ep->f_ctl &= ~FC_FC_FIRST_SEQ;	/* not first seq */
+	if (f_ctl & FC_FC_SEQ_INIT)
+		ep->esb_stat &= ~ESB_ST_SEQ_INIT;
+	spin_unlock_bh(&ep->ex_lock);
+	return sp;
+err:
+	fc_exch_complete(ep);
+	return NULL;
+}
+EXPORT_SYMBOL(fc_exch_seq_send);
+
+/*
+ * Receive a frame
+ */
+void fc_exch_recv(struct fc_lport *lp, struct fc_exch_mgr *mp,
+		  struct fc_frame *fp)
+{
+	struct fc_frame_header *fh = fc_frame_header_get(fp);
+	u32 f_ctl;
+
+	if (!lp || !mp || (lp->state == LPORT_ST_NONE)) {
+		FC_DBG("fc_lport or EM is not allocated and configured");
+		fc_frame_free(fp);
+		return;
+	}
+
+	/*
+	 * If frame is marked invalid, just drop it.
+	 */
+	f_ctl = ntoh24(fh->fh_f_ctl);
+	switch (fr_eof(fp)) {
+	case FC_EOF_T:
+		if (f_ctl & FC_FC_END_SEQ)
+			skb_trim(fp_skb(fp), fr_len(fp) - FC_FC_FILL(f_ctl));
+		/* fall through */
+	case FC_EOF_N:
+		if (fh->fh_type == FC_TYPE_BLS)
+			fc_exch_recv_bls(mp, fp);
+		else if ((f_ctl & (FC_FC_EX_CTX | FC_FC_SEQ_CTX)) ==
+			 FC_FC_EX_CTX)
+			fc_exch_recv_seq_resp(mp, fp);
+		else if (f_ctl & FC_FC_SEQ_CTX)
+			fc_exch_recv_resp(mp, fp);
+		else
+			fc_exch_recv_req(lp, mp, fp);
+		break;
+	default:
+		FC_DBG("dropping invalid frame (eof %x)", fr_eof(fp));
+		fc_frame_free(fp);
+		break;
+	}
+}
+EXPORT_SYMBOL(fc_exch_recv);
+
+int fc_exch_init(struct fc_lport *lp)
+{
+	if (!lp->tt.exch_get) {
+		/*
+		 *  exch_put() should be NULL if
+		 *  exch_get() is NULL
+		 */
+		WARN_ON(lp->tt.exch_put);
+		lp->tt.exch_get = fc_exch_get;
+	}
+
+	if (!lp->tt.seq_start_next)
+		lp->tt.seq_start_next = fc_seq_start_next;
+
+	if (!lp->tt.exch_seq_send)
+		lp->tt.exch_seq_send = fc_exch_seq_send;
+
+	if (!lp->tt.seq_send)
+		lp->tt.seq_send = fc_seq_send;
+
+	if (!lp->tt.seq_els_rsp_send)
+		lp->tt.seq_els_rsp_send = fc_seq_els_rsp_send;
+
+	if (!lp->tt.exch_done)
+		lp->tt.exch_done = fc_exch_done;
+
+	if (!lp->tt.exch_mgr_reset)
+		lp->tt.exch_mgr_reset = fc_exch_mgr_reset;
+
+	if (!lp->tt.seq_exch_abort)
+		lp->tt.seq_exch_abort = fc_seq_exch_abort;
+
+	if (!lp->tt.seq_get_xids)
+		lp->tt.seq_get_xids = fc_seq_get_xids;
+
+	if (!lp->tt.seq_set_rec_data)
+		lp->tt.seq_set_rec_data = fc_seq_set_rec_data;
+	return 0;
+}
+EXPORT_SYMBOL(fc_exch_init);
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
new file mode 100644
index 0000000..a5f7aba
--- /dev/null
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -0,0 +1,2121 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 Red Hat, Inc.  All rights reserved.
+ * Copyright(c) 2008 Mike Christie
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/scatterlist.h>
+#include <linux/err.h>
+#include <linux/crc32.h>
+
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+
+#include <scsi/fc/fc_fc2.h>
+
+#include <scsi/libfc/libfc.h>
+
+int fc_fcp_debug;
+static struct kmem_cache *scsi_pkt_cachep;
+
+/* SRB state definitions */
+#define FC_SRB_FREE		0		/* cmd is free */
+#define FC_SRB_CMD_SENT		(1 << 0)	/* cmd has been sent */
+#define FC_SRB_RCV_STATUS	(1 << 1)	/* response has arrived */
+#define FC_SRB_ABORT_PENDING	(1 << 2)	/* cmd abort sent to device */
+#define FC_SRB_ABORTED		(1 << 3)	/* abort acknowleged */
+#define FC_SRB_DISCONTIG	(1 << 4)	/* non-sequential data recvd */
+#define FC_SRB_COMPL		(1 << 5)	/* fc_io_compl has been run */
+#define FC_SRB_FCP_PROCESSING_TMO (1 << 6)	/* timer function processing */
+
+#define FC_SRB_READ		    (1 << 1)
+#define FC_SRB_WRITE		    (1 << 0)
+
+/*
+ * scsi request structure, one for each scsi request
+ */
+struct fc_fcp_pkt {
+	/*
+	 * housekeeping stuff
+	 */
+	struct fc_lport *lp;	/* handle to hba struct */
+	u16		state;		/* scsi_pkt state state */
+	u16		tgt_flags;	/* target flags	 */
+	atomic_t	ref_cnt;	/* only used byr REC ELS */
+	spinlock_t	scsi_pkt_lock;	/* Must be taken before the host lock
+					 * if both are held at the same time */
+	/*
+	 * SCSI I/O related stuff
+	 */
+	struct scsi_cmnd *cmd;		/* scsi command pointer. set/clear
+					 * under host lock */
+	struct list_head list;		/* tracks queued commands. access under
+					 * host lock */
+	/*
+	 * timeout related stuff
+	 */
+	struct timer_list timer;	/* command timer */
+	struct completion tm_done;
+	int	wait_for_comp;
+	unsigned long	start_time;	/* start jiffie */
+	unsigned long	end_time;	/* end jiffie */
+	unsigned long	last_pkt_time;	 /* jiffies of last frame received */
+
+	/*
+	 * scsi cmd and data transfer information
+	 */
+	u32		data_len;
+	/*
+	 * transport related veriables
+	 */
+	struct fcp_cmnd cdb_cmd;
+	size_t		xfer_len;
+	u32		xfer_contig_end; /* offset of end of contiguous xfer */
+	u16		max_payload;	/* max payload size in bytes */
+
+	/*
+	 * scsi/fcp return status
+	 */
+	u32		io_status;	/* SCSI result upper 24 bits */
+	u8		cdb_status;
+	u8		status_code;	/* FCP I/O status */
+	/* bit 3 Underrun bit 2: overrun */
+	u8		scsi_comp_flags;
+	u32		req_flags;	/* bit 0: read bit:1 write */
+	u32		scsi_resid;	/* residule length */
+
+	struct fc_rport	*rport;		/* remote port pointer */
+	struct fc_seq	*seq_ptr;	/* current sequence pointer */
+	/*
+	 * Error Processing
+	 */
+	u8		recov_retry;	/* count of recovery retries */
+	struct fc_seq	*recov_seq;	/* sequence for REC or SRR */
+};
+
+/*
+ * The SCp.ptr should be tested and set under the host lock. NULL indicates
+ * that the command has been retruned to the scsi layer.
+ */
+#define CMD_SP(Cmnd)		    ((struct fc_fcp_pkt *)(Cmnd)->SCp.ptr)
+#define CMD_ENTRY_STATUS(Cmnd)	    ((Cmnd)->SCp.have_data_in)
+#define CMD_COMPL_STATUS(Cmnd)	    ((Cmnd)->SCp.this_residual)
+#define CMD_SCSI_STATUS(Cmnd)	    ((Cmnd)->SCp.Status)
+#define CMD_RESID_LEN(Cmnd)	    ((Cmnd)->SCp.buffers_residual)
+
+struct fc_fcp_internal {
+	mempool_t	*scsi_pkt_pool;
+	struct list_head scsi_pkt_queue;
+};
+
+#define fc_get_scsi_internal(x)	((struct fc_fcp_internal *)(x)->scsi_priv)
+
+/*
+ * function prototypes
+ * FC scsi I/O related functions
+ */
+static void fc_fcp_recv_data(struct fc_fcp_pkt *, struct fc_frame *);
+static void fc_fcp_recv(struct fc_seq *, struct fc_frame *, void *);
+static void fc_fcp_fcp_resp(struct fc_fcp_pkt *, struct fc_frame *);
+static void fc_fcp_complete(struct fc_fcp_pkt *);
+static void fc_tm_done(struct fc_seq *, struct fc_frame *, void *);
+static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp);
+static void fc_abort_internal(struct fc_fcp_pkt *);
+static void fc_timeout_error(struct fc_fcp_pkt *);
+static void fc_fcp_retry_cmd(struct fc_fcp_pkt *);
+static int fc_fcp_send_cmd(struct fc_fcp_pkt *);
+static void fc_fcp_timeout(unsigned long data);
+static void fc_fcp_rec(struct fc_fcp_pkt *);
+static void fc_fcp_rec_error(struct fc_fcp_pkt *, struct fc_frame *);
+static void fc_fcp_rec_resp(struct fc_seq *, struct fc_frame *, void *);
+static void fc_io_compl(struct fc_fcp_pkt *);
+
+static void fc_fcp_srr(struct fc_fcp_pkt *, enum fc_rctl, u32);
+static void fc_fcp_srr_resp(struct fc_seq *, struct fc_frame *, void *);
+static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *);
+
+/*
+ * command status codes
+ */
+#define FC_COMPLETE		    0
+#define FC_CMD_ABORTED		    1
+#define FC_CMD_RESET		    2
+#define FC_CMD_PLOGO		    3
+#define FC_SNS_RCV		    4
+#define FC_TRANS_ERR		    5
+#define FC_DATA_OVRRUN		    6
+#define FC_DATA_UNDRUN		    7
+#define FC_ERROR		    8
+#define FC_HRD_ERROR		    9
+#define FC_CMD_TIME_OUT		   10
+
+/*
+ * Error recovery timeout values.
+ */
+#define FC_SCSI_ER_TIMEOUT	(10 * HZ)
+#define FC_SCSI_TM_TOV		(10 * HZ)
+#define FC_SCSI_REC_TOV		(2 * HZ)
+
+#define FC_MAX_ERROR_CNT  5
+#define FC_MAX_RECOV_RETRY 3
+
+#define FC_FCP_DFLT_QUEUE_DEPTH 32
+
+/**
+ * fc_fcp_pkt_alloc - allocation routine for scsi_pkt packet
+ * @lp:		fc lport struct
+ *
+ * This is used by upper layer scsi driver.
+ * Return Value : scsi_pkt structure or null on allocation failure.
+ * Context	: call from process context. no locking required.
+ */
+static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lp)
+{
+	struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
+	struct fc_fcp_pkt *sp;
+
+	sp = mempool_alloc(si->scsi_pkt_pool, GFP_ATOMIC);
+	if (sp) {
+		memset(sp, 0, sizeof(*sp));
+		sp->lp = lp;
+		atomic_set(&sp->ref_cnt, 1);
+		init_timer(&sp->timer);
+		INIT_LIST_HEAD(&sp->list);
+	}
+	return sp;
+}
+
+/**
+ * fc_fcp_pkt_free - free routine for scsi_pkt packet
+ * @sp:		fcp packet struct
+ *
+ * This is used by upper layer scsi driver.
+ * Context	: call from process  and interrupt context.
+ *		  no locking required
+ */
+static void fc_fcp_pkt_free(struct fc_fcp_pkt *sp)
+{
+	if (atomic_dec_and_test(&sp->ref_cnt)) {
+		struct fc_fcp_internal *si = fc_get_scsi_internal(sp->lp);
+
+		mempool_free(sp, si->scsi_pkt_pool);
+	}
+}
+
+static void fc_fcp_pkt_hold(struct fc_fcp_pkt *sp)
+{
+	atomic_inc(&sp->ref_cnt);
+}
+
+static void fc_fcp_pkt_release(struct fc_fcp_pkt *sp)
+{
+	fc_fcp_pkt_free(sp);
+}
+
+/**
+ * fc_fcp_lock_pkt - lock a packet and get a ref to it.
+ * @fsp:	fcp packet
+ *
+ * We should only return error if we return a command to scsi-ml before
+ * getting a response. This can happen in cases where we send a abort, but
+ * do not wait for the response and the abort and command can be passing
+ * each other on the wire/network-layer.
+ *
+ * Note: this function locks the packet and gets a reference to allow
+ * callers to call the completion function while the lock is held and
+ * not have to worry about the packets refcount.
+ *
+ * TODO: Maybe we should just have callers grab/release the lock and
+ * have a function that they call to verify the fsp and grab a ref if
+ * needed.
+ */
+static inline int fc_fcp_lock_pkt(struct fc_fcp_pkt *fsp)
+{
+	/*
+	 * TODO mnc: locking is not right. This can be called
+	 * from a timer context so we need to stop bottom halves from the
+	 * thread caller.
+	 *
+	 * It can also be called while sending packets, which can result
+	 * in bh's being enabled and disabled.
+	 */
+	spin_lock(&fsp->scsi_pkt_lock);
+	if (!fsp->cmd) {
+		spin_unlock(&fsp->scsi_pkt_lock);
+		FC_DBG("Invalid scsi cmd pointer on fcp packet.\n");
+		return -EINVAL;
+	}
+
+	fc_fcp_pkt_hold(fsp);
+	return 0;
+}
+
+static inline void fc_fcp_unlock_pkt(struct fc_fcp_pkt *fsp)
+{
+	spin_unlock(&fsp->scsi_pkt_lock);
+	fc_fcp_pkt_release(fsp);
+}
+
+static void fc_fcp_timer_set(struct fc_fcp_pkt *fsp, unsigned long delay)
+{
+	if (!(fsp->state & FC_SRB_COMPL))
+		mod_timer(&fsp->timer, jiffies + delay);
+}
+
+/*
+ * End a request with a retry suggestion.
+ */
+static void fc_fcp_retry(struct fc_fcp_pkt *fsp)
+{
+	fsp->status_code = FC_ERROR;
+	fsp->io_status = SUGGEST_RETRY << 24;
+	fc_fcp_complete(fsp);
+}
+
+/*
+ * Receive SCSI data from target.
+ * Called after receiving solicited data.
+ */
+static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
+{
+	struct scsi_cmnd *sc = fsp->cmd;
+	struct fc_lport *lp = fsp->lp;
+	struct fcoe_dev_stats *sp;
+	struct fc_frame_header *fh;
+	size_t start_offset;
+	size_t offset;
+	u32 crc;
+	u32 copy_len = 0;
+	size_t len;
+	void *buf;
+	struct scatterlist *sg;
+	size_t remaining;
+
+	fh = fc_frame_header_get(fp);
+	offset = ntohl(fh->fh_parm_offset);
+	start_offset = offset;
+	len = fr_len(fp) - sizeof(*fh);
+	buf = fc_frame_payload_get(fp, 0);
+
+	if (offset + len > fsp->data_len) {
+		/*
+		 * this should never happen
+		 */
+		if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) &&
+		    fc_frame_crc_check(fp))
+			goto crc_err;
+		if (fc_fcp_debug) {
+			FC_DBG("data received past end.	 "
+			       "len %zx offset %zx "
+			       "data_len %x\n", len, offset, fsp->data_len);
+		}
+		fc_fcp_retry(fsp);
+		return;
+	}
+	if (offset != fsp->xfer_len)
+		fsp->state |= FC_SRB_DISCONTIG;
+
+	crc = 0;
+	if (fr_flags(fp) & FCPHF_CRC_UNCHECKED)
+		crc = crc32(~0, (u8 *) fh, sizeof(*fh));
+
+	sg = scsi_sglist(sc);
+	remaining = len;
+
+	while (remaining > 0 && sg) {
+		size_t off;
+		void *page_addr;
+		size_t sg_bytes;
+
+		if (offset >= sg->length) {
+			offset -= sg->length;
+			sg = sg_next(sg);
+			continue;
+		}
+		sg_bytes = min(remaining, sg->length - offset);
+
+		/*
+		 * The scatterlist item may be bigger than PAGE_SIZE,
+		 * but we are limited to mapping PAGE_SIZE at a time.
+		 */
+		off = offset + sg->offset;
+		sg_bytes = min(sg_bytes, (size_t)
+			       (PAGE_SIZE - (off & ~PAGE_MASK)));
+		page_addr = kmap_atomic(sg_page(sg) + (off >> PAGE_SHIFT),
+					KM_SOFTIRQ0);
+		if (!page_addr)
+			break;		/* XXX panic? */
+
+		if (!(fsp->state & FC_SRB_ABORT_PENDING)) {
+			if (fr_flags(fp) & FCPHF_CRC_UNCHECKED)
+				crc = crc32(crc, buf, sg_bytes);
+			memcpy((char *)page_addr + (off & ~PAGE_MASK), buf,
+			       sg_bytes);
+		}
+		kunmap_atomic(page_addr, KM_SOFTIRQ0);
+		buf += sg_bytes;
+		offset += sg_bytes;
+		remaining -= sg_bytes;
+		copy_len += sg_bytes;
+	}
+
+	if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) {
+		buf = fc_frame_payload_get(fp, 0);
+		if (len % 4) {
+			crc = crc32(crc, buf + len, 4 - (len % 4));
+			len += 4 - (len % 4);
+		}
+
+		if (~crc != le32_to_cpu(*(__le32 *)(buf + len))) {
+crc_err:
+			sp = lp->dev_stats[smp_processor_id()];
+			sp->ErrorFrames++;
+			if (sp->InvalidCRCCount++ < 5)
+				FC_DBG("CRC error on data frame\n");
+			/*
+			 * Assume the frame is total garbage.
+			 * We may have copied it over the good part
+			 * of the buffer.
+			 * If so, we need to retry the entire operation.
+			 * Otherwise, ignore it.
+			 */
+			if (fsp->state & FC_SRB_DISCONTIG)
+				fc_fcp_retry(fsp);
+			return;
+		}
+	}
+
+	if (fsp->xfer_contig_end == start_offset)
+		fsp->xfer_contig_end += copy_len;
+	fsp->xfer_len += copy_len;
+
+	/*
+	 * In the very rare event that this data arrived after the response
+	 * and completes the transfer, call the completion handler.
+	 */
+	if (unlikely(fsp->state & FC_SRB_RCV_STATUS) &&
+	    fsp->xfer_len == fsp->data_len - fsp->scsi_resid)
+		fc_fcp_complete(fsp);
+}
+
+/*
+ * Send SCSI data to target.
+ * Called after receiving a Transfer Ready data descriptor.
+ */
+static void fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *sp,
+			     size_t offset, size_t len,
+			     struct fc_frame *oldfp, int sg_supp)
+{
+	struct scsi_cmnd *sc;
+	struct scatterlist *sg;
+	struct fc_frame *fp = NULL;
+	struct fc_lport *lp = fsp->lp;
+	size_t remaining;
+	size_t mfs;
+	size_t tlen;
+	size_t sg_bytes;
+	size_t frame_offset;
+	int error;
+	void *data = NULL;
+	void *page_addr;
+	int using_sg = sg_supp;
+	u32 f_ctl;
+
+	if (unlikely(offset + len > fsp->data_len)) {
+		/*
+		 * this should never happen
+		 */
+		if (fc_fcp_debug) {
+			FC_DBG("xfer-ready past end. len %zx offset %zx\n",
+			       len, offset);
+		}
+		fc_abort_internal(fsp);
+		return;
+	} else if (offset != fsp->xfer_len) {
+		/*
+		 * Out of Order Data Request - no problem, but unexpected.
+		 */
+		if (fc_fcp_debug) {
+			FC_DBG("xfer-ready non-contiguous. "
+			       "len %zx offset %zx\n", len, offset);
+		}
+	}
+	mfs = fsp->max_payload;
+	WARN_ON(mfs > FC_MAX_PAYLOAD);
+	WARN_ON(mfs < FC_MIN_MAX_PAYLOAD);
+	if (mfs > 512)
+		mfs &= ~(512 - 1);	/* round down to block size */
+	WARN_ON(mfs < FC_MIN_MAX_PAYLOAD);	/* won't go below 256 */
+	WARN_ON(len <= 0);
+	sc = fsp->cmd;
+
+	remaining = len;
+	frame_offset = offset;
+	tlen = 0;
+	sp = lp->tt.seq_start_next(sp);
+	f_ctl = FC_FC_REL_OFF;
+	WARN_ON(!sp);
+
+	/*
+	 * If a get_page()/put_page() will fail, don't use sg lists
+	 * in the fc_frame structure.
+	 *
+	 * The put_page() may be long after the I/O has completed
+	 * in the case of FCoE, since the network driver does it
+	 * via free_skb().  See the test in free_pages_check().
+	 *
+	 * Test this case with 'dd </dev/zero >/dev/st0 bs=64k'.
+	 */
+	if (using_sg) {
+		for (sg = scsi_sglist(sc); sg; sg = sg_next(sg)) {
+			if (page_count(sg_page(sg)) == 0 ||
+			    (sg_page(sg)->flags & (1 << PG_lru |
+						   1 << PG_private |
+						   1 << PG_locked |
+						   1 << PG_active |
+						   1 << PG_slab |
+						   1 << PG_swapcache |
+						   1 << PG_writeback |
+						   1 << PG_reserved |
+						   1 << PG_buddy))) {
+				using_sg = 0;
+				break;
+			}
+		}
+	}
+	sg = scsi_sglist(sc);
+
+	while (remaining > 0 && sg) {
+		if (offset >= sg->length) {
+			offset -= sg->length;
+			sg = sg_next(sg);
+			continue;
+		}
+		if (!fp) {
+			tlen = min(mfs, remaining);
+
+			/*
+			 * TODO.  Temporary workaround.	 fc_seq_send() can't
+			 * handle odd lengths in non-linear skbs.
+			 * This will be the final fragment only.
+			 */
+			if (tlen % 4)
+				using_sg = 0;
+			if (using_sg) {
+				fp = _fc_frame_alloc(lp, 0);
+			} else {
+				fp = fc_frame_alloc(lp, tlen);
+				data = (void *)(fr_hdr(fp)) +
+					sizeof(struct fc_frame_header);
+			}
+			BUG_ON(!fp);
+			fc_frame_setup(fp, FC_RCTL_DD_SOL_DATA, FC_TYPE_FCP);
+			fc_frame_set_offset(fp, frame_offset);
+		}
+		sg_bytes = min(tlen, sg->length - offset);
+		if (using_sg) {
+			WARN_ON(skb_shinfo(fp_skb(fp))->nr_frags >
+				FC_FRAME_SG_LEN);
+			get_page(sg_page(sg));
+			skb_fill_page_desc(fp_skb(fp),
+					   skb_shinfo(fp_skb(fp))->nr_frags,
+					   sg_page(sg), sg->offset + offset,
+					   sg_bytes);
+			fp_skb(fp)->data_len += sg_bytes;
+			fr_len(fp) += sg_bytes;
+			fp_skb(fp)->truesize += PAGE_SIZE;
+		} else {
+			size_t off = offset + sg->offset;
+
+			/*
+			 * The scatterlist item may be bigger than PAGE_SIZE,
+			 * but we must not cross pages inside the kmap.
+			 */
+			sg_bytes = min(sg_bytes, (size_t) (PAGE_SIZE -
+							   (off & ~PAGE_MASK)));
+			page_addr = kmap_atomic(sg_page(sg) +
+						(off >> PAGE_SHIFT),
+						KM_SOFTIRQ0);
+			memcpy(data, (char *)page_addr + (off & ~PAGE_MASK),
+			       sg_bytes);
+			kunmap_atomic(page_addr, KM_SOFTIRQ0);
+			data += sg_bytes;
+		}
+		offset += sg_bytes;
+		frame_offset += sg_bytes;
+		tlen -= sg_bytes;
+		remaining -= sg_bytes;
+
+		if (remaining == 0) {
+			/*
+			 * Send a request sequence with
+			 * transfer sequence initiative.
+			 */
+			f_ctl |= FC_FC_SEQ_INIT | FC_FC_END_SEQ;
+			error = lp->tt.seq_send(lp, sp, fp, f_ctl);
+		} else if (tlen == 0) {
+			/*
+			 * send fragment using for a sequence.
+			 */
+			error = lp->tt.seq_send(lp, sp, fp, f_ctl);
+		} else {
+			continue;
+		}
+		fp = NULL;
+
+		if (error) {
+			WARN_ON(1);		/* send error should be rare */
+			fc_fcp_retry(fsp);
+			return;
+		}
+	}
+	fsp->xfer_len += len;	/* premature count? */
+}
+
+/*
+ * exch mgr calls this routine to process scsi
+ * exchanges.
+ *
+ * Return   : None
+ * Context  : called from Soft IRQ context
+ *	      can not called holding list lock
+ */
+static void fc_fcp_recv(struct fc_seq *sp, struct fc_frame *fp, void *arg)
+{
+	struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)arg;
+	struct fc_lport *lp;
+	struct fc_frame_header *fh;
+	struct fc_data_desc *dd;
+	u8 r_ctl;
+
+	if (IS_ERR(fp))
+		goto errout;
+
+	fh = fc_frame_header_get(fp);
+	r_ctl = fh->fh_r_ctl;
+	lp = fsp->lp;
+
+	if (!(lp->state & LPORT_ST_READY))
+		goto out;
+	if (fc_fcp_lock_pkt(fsp))
+		goto out;
+	fsp->last_pkt_time = jiffies;
+
+	if (r_ctl == FC_RCTL_DD_DATA_DESC) {
+		/*
+		 * received XFER RDY from the target
+		 * need to send data to the target
+		 */
+		WARN_ON(fr_flags(fp) & FCPHF_CRC_UNCHECKED);
+		dd = fc_frame_payload_get(fp, sizeof(*dd));
+		WARN_ON(!dd);
+
+		fc_fcp_send_data(fsp, sp,
+				 (size_t) ntohl(dd->dd_offset),
+				 (size_t) ntohl(dd->dd_len), fp,
+				 lp->capabilities & TRANS_C_SG);
+		lp->tt.seq_set_rec_data(sp, fsp->xfer_len);
+	} else if (r_ctl == FC_RCTL_DD_SOL_DATA) {
+		/*
+		 * received a DATA frame
+		 * next we will copy the data to the system buffer
+		 */
+		WARN_ON(fr_len(fp) < sizeof(*fh));	/* len may be 0 */
+		fc_fcp_recv_data(fsp, fp);
+		lp->tt.seq_set_rec_data(sp, fsp->xfer_contig_end);
+	} else if (r_ctl == FC_RCTL_DD_CMD_STATUS) {
+		WARN_ON(fr_flags(fp) & FCPHF_CRC_UNCHECKED);
+
+		fc_fcp_fcp_resp(fsp, fp);
+	} else {
+		FC_DBG("unexpected frame.  r_ctl %x\n", r_ctl);
+	}
+	fc_fcp_unlock_pkt(fsp);
+out:
+	fc_frame_free(fp);
+errout:
+	if (IS_ERR(fp))
+		fc_fcp_error(fsp, fp);
+}
+
+static void fc_fcp_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
+{
+	struct fc_frame_header *fh;
+	struct fcp_resp *fc_rp;
+	struct fcp_resp_ext *rp_ex;
+	struct fcp_resp_rsp_info *fc_rp_info;
+	u32 plen;
+	u32 expected_len;
+	u32 respl = 0;
+	u32 snsl = 0;
+	u8 flags = 0;
+
+	plen = fr_len(fp);
+	fh = (struct fc_frame_header *)fr_hdr(fp);
+	if (unlikely(plen < sizeof(*fh) + sizeof(*fc_rp)))
+		goto len_err;
+	plen -= sizeof(*fh);
+	fc_rp = (struct fcp_resp *)(fh + 1);
+	fsp->cdb_status = fc_rp->fr_status;
+	flags = fc_rp->fr_flags;
+	fsp->scsi_comp_flags = flags;
+	expected_len = fsp->data_len;
+
+	if (unlikely((flags & ~FCP_CONF_REQ) || fc_rp->fr_status)) {
+		rp_ex = (void *)(fc_rp + 1);
+		if (flags & (FCP_RSP_LEN_VAL | FCP_SNS_LEN_VAL)) {
+			if (plen < sizeof(*fc_rp) + sizeof(*rp_ex))
+				goto len_err;
+			fc_rp_info = (struct fcp_resp_rsp_info *)(rp_ex + 1);
+			if (flags & FCP_RSP_LEN_VAL) {
+				respl = ntohl(rp_ex->fr_rsp_len);
+				if (respl != sizeof(*fc_rp_info))
+					goto len_err;
+				if (fsp->wait_for_comp) {
+					/* Abuse cdb_status for rsp code */
+					fsp->cdb_status = fc_rp_info->rsp_code;
+					complete(&fsp->tm_done);
+					/*
+					 * tmfs will not have any scsi cmd so
+					 * exit here
+					 */
+					return;
+				} else
+					goto err;
+			}
+			if (flags & FCP_SNS_LEN_VAL) {
+				snsl = ntohl(rp_ex->fr_sns_len);
+				if (snsl > SCSI_SENSE_BUFFERSIZE)
+					snsl = SCSI_SENSE_BUFFERSIZE;
+				memcpy(fsp->cmd->sense_buffer,
+				       (char *)fc_rp_info + respl, snsl);
+			}
+		}
+		if (flags & (FCP_RESID_UNDER | FCP_RESID_OVER)) {
+			if (plen < sizeof(*fc_rp) + sizeof(rp_ex->fr_resid))
+				goto len_err;
+			if (flags & FCP_RESID_UNDER) {
+				fsp->scsi_resid = ntohl(rp_ex->fr_resid);
+				/*
+				 * The cmnd->underflow is the minimum number of
+				 * bytes that must be transfered for this
+				 * command.  Provided a sense condition is not
+				 * present, make sure the actual amount
+				 * transferred is at least the underflow value
+				 * or fail.
+				 */
+				if (!(flags & FCP_SNS_LEN_VAL) &&
+				    (fc_rp->fr_status == 0) &&
+				    (scsi_bufflen(fsp->cmd) -
+				     fsp->scsi_resid) < fsp->cmd->underflow)
+					goto err;
+				expected_len -= fsp->scsi_resid;
+			} else {
+				fsp->status_code = FC_ERROR;
+			}
+		}
+	}
+	fsp->state |= FC_SRB_RCV_STATUS;
+
+	/*
+	 * Check for missing or extra data frames.
+	 */
+	if (unlikely(fsp->xfer_len != expected_len)) {
+		if (fsp->xfer_len < expected_len) {
+			/*
+			 * Some data may be queued locally,
+			 * Wait a at least one jiffy to see if it is delivered.
+			 * If this expires without data, we may do SRR.
+			 */
+			fc_fcp_timer_set(fsp, 2);
+			return;
+		}
+		fsp->status_code = FC_DATA_OVRRUN;
+		FC_DBG("tgt %6x xfer len %zx greater than expected len %x. "
+		       "data len %x\n",
+		       fsp->rport->port_id,
+		       fsp->xfer_len, expected_len, fsp->data_len);
+	}
+	fc_fcp_complete(fsp);
+	return;
+
+len_err:
+	FC_DBG("short FCP response. flags 0x%x len %u respl %u snsl %u\n",
+	       flags, fr_len(fp), respl, snsl);
+err:
+	fsp->status_code = FC_ERROR;
+	fc_fcp_complete(fsp);
+}
+
+/**
+ * fc_fcp_complete - complete processing of a fcp packet
+ * @fsp:	fcp packet
+ *
+ * This function may sleep if a timer is pending. The packet lock must be
+ * held, and the host lock must not be held.
+ */
+static void fc_fcp_complete(struct fc_fcp_pkt *fsp)
+{
+	struct fc_lport *lp = fsp->lp;
+	struct fc_seq *sp;
+	u32 f_ctl;
+
+	/*
+	 * Test for transport underrun, independent of response underrun status.
+	 */
+	if (fsp->xfer_len < fsp->data_len && !fsp->io_status &&
+	    (!(fsp->scsi_comp_flags & FCP_RESID_UNDER) ||
+	     fsp->xfer_len < fsp->data_len - fsp->scsi_resid)) {
+		fsp->status_code = FC_DATA_UNDRUN;
+		fsp->io_status = SUGGEST_RETRY << 24;
+	}
+
+	sp = fsp->seq_ptr;
+	if (sp) {
+		fsp->seq_ptr = NULL;
+		if (unlikely(fsp->scsi_comp_flags & FCP_CONF_REQ)) {
+			struct fc_frame *conf_frame;
+			struct fc_seq *csp;
+
+			csp = lp->tt.seq_start_next(sp);
+			conf_frame = fc_frame_alloc(fsp->lp, 0);
+			if (conf_frame) {
+				fc_frame_setup(conf_frame,
+					       FC_RCTL_DD_SOL_CTL, FC_TYPE_FCP);
+				f_ctl = FC_FC_SEQ_INIT;
+				f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ;
+				lp->tt.seq_send(lp, csp, conf_frame, f_ctl);
+			} else
+				lp->tt.exch_done(csp);
+		} else
+			lp->tt.exch_done(sp);
+	}
+	fc_io_compl(fsp);
+}
+
+/**
+ * fc_fcp_cleanup_each_cmd - run fn on each active command
+ * @lp:		logical port
+ * @id:		target id
+ * @lun:	lun
+ * @fn:		actor function
+ *
+ * If lun or id is -1, they are ignored.
+ *
+ * @fn must not call fs_io_compl on the fsp.
+ */
+static void fc_fcp_cleanup_each_cmd(struct fc_lport *lp, unsigned int id,
+				    unsigned int lun,
+				    void (*fn)(struct fc_fcp_pkt *))
+{
+	struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
+	struct fc_fcp_pkt *fsp;
+	struct scsi_cmnd *sc_cmd;
+	unsigned long flags;
+
+	spin_lock_irqsave(lp->host->host_lock, flags);
+restart:
+	list_for_each_entry(fsp, &si->scsi_pkt_queue, list) {
+		sc_cmd = fsp->cmd;
+		if (id != -1 && scmd_id(sc_cmd) != id)
+			continue;
+
+		if (lun != -1 && sc_cmd->device->lun != lun)
+			continue;
+
+		fc_fcp_pkt_hold(fsp);
+		spin_unlock_irqrestore(lp->host->host_lock, flags);
+
+		if (!fc_fcp_lock_pkt(fsp)) {
+			fn(fsp);
+			fc_io_compl(fsp);
+			fc_fcp_unlock_pkt(fsp);
+		}
+
+		fc_fcp_pkt_release(fsp);
+		spin_lock_irqsave(lp->host->host_lock, flags);
+		/*
+		 * while we dropped the lock multiple pkts could
+		 * have been released, so we have to start over.
+		 */
+		goto restart;
+	}
+	spin_unlock_irqrestore(lp->host->host_lock, flags);
+}
+
+static void fc_fcp_cleanup_aborted_io(struct fc_fcp_pkt *fsp)
+{
+	struct fc_lport *lp = fsp->lp;
+
+	if (!(fsp->state & FC_SRB_RCV_STATUS)) {
+		if (fsp->seq_ptr) {
+			lp->tt.exch_done(fsp->seq_ptr);
+			fsp->seq_ptr = NULL;
+		}
+	}
+	fsp->status_code = FC_ERROR;
+	fsp->io_status = (SUGGEST_RETRY << 24);
+}
+
+static void fc_fcp_abort_io(struct fc_lport *lp)
+{
+	fc_fcp_cleanup_each_cmd(lp, -1, -1, fc_fcp_cleanup_aborted_io);
+}
+
+/**
+ * fc_fcp_pkt_send - send a fcp packet to the lower level.
+ * @lp:		fc lport
+ * @fsp:	fc packet.
+ *
+ * This is called by upper layer protocol.
+ * Return   : zero for success and -1 for failure
+ * Context  : called from queuecommand which can be called from process
+ *            or scsi soft irq.
+ * Locks    : called with the host lock and irqs disabled.
+ */
+static int fc_fcp_pkt_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp)
+{
+	struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
+	int rc;
+
+	fsp->cmd->SCp.ptr = (char *)fsp;
+	fsp->cdb_cmd.fc_dl = htonl(fsp->data_len);
+	fsp->cdb_cmd.fc_flags = fsp->req_flags & ~FCP_CFL_LEN_MASK;
+
+	int_to_scsilun(fsp->cmd->device->lun,
+		       (struct scsi_lun *)fsp->cdb_cmd.fc_lun);
+	memcpy(fsp->cdb_cmd.fc_cdb, fsp->cmd->cmnd, fsp->cmd->cmd_len);
+	list_add_tail(&fsp->list, &si->scsi_pkt_queue);
+
+	spin_unlock_irq(lp->host->host_lock);
+	rc = fc_fcp_send_cmd(fsp);
+	spin_lock_irq(lp->host->host_lock);
+	if (rc)
+		list_del(&fsp->list);
+
+	return rc;
+}
+static void fc_fcp_retry_send_cmd(unsigned long data)
+{
+	fc_fcp_send_cmd((struct fc_fcp_pkt *)data);
+}
+
+static int fc_fcp_send_cmd(struct fc_fcp_pkt *fsp)
+{
+	struct fc_lport *lp;
+	struct fc_frame *fp;
+	struct fc_seq *sp;
+	struct fc_rport *rport;
+	struct fc_rport_libfc_priv *rp;
+
+	if (fc_fcp_lock_pkt(fsp))
+		return -1;
+
+	if (fsp->state & FC_SRB_COMPL)
+		goto unlock;
+
+	lp = fsp->lp;
+	fp = fc_frame_alloc(lp, sizeof(fsp->cdb_cmd));
+	if (!fp)
+		goto retry;
+	memcpy(fc_frame_payload_get(fp, sizeof(fsp->cdb_cmd)),
+	       &fsp->cdb_cmd, sizeof(fsp->cdb_cmd));
+	fc_frame_setup(fp, FC_RCTL_DD_UNSOL_CMD, FC_TYPE_FCP);
+	fc_frame_set_offset(fp, 0);
+	rport = fsp->rport;
+	fsp->max_payload = rport->maxframe_size;
+	rp = rport->dd_data;
+	sp = lp->tt.exch_seq_send(lp, fp,
+				  fc_fcp_recv,
+				  fsp, 0,
+				  rp->local_port->fid,
+				  rport->port_id,
+				  FC_FC_SEQ_INIT | FC_FC_END_SEQ);
+	if (!sp) {
+		fc_frame_free(fp);
+		goto retry;
+	}
+	fsp->seq_ptr = sp;
+
+	setup_timer(&fsp->timer, fc_fcp_timeout, (unsigned long)fsp);
+	fc_fcp_timer_set(fsp,
+			(fsp->tgt_flags & FC_RP_FLAGS_REC_SUPPORTED) ?
+			FC_SCSI_REC_TOV : FC_SCSI_ER_TIMEOUT);
+unlock:
+	fc_fcp_unlock_pkt(fsp);
+	return 0;
+retry:
+	setup_timer(&fsp->timer, fc_fcp_retry_send_cmd, (unsigned long)fsp);
+	fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
+	fc_fcp_unlock_pkt(fsp);
+	return 0;
+}
+
+/*
+ * transport error handler
+ */
+static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
+{
+	struct fc_lport *lp = fsp->lp;
+
+	if (lp->state == LPORT_ST_LOGO)
+		return;
+
+	if (fc_fcp_lock_pkt(fsp))
+		return;
+
+	FC_DBG("unknown error %ld\n", PTR_ERR(fp));
+	fsp->status_code = FC_CMD_PLOGO;
+	fc_fcp_complete(fsp);
+	fc_fcp_unlock_pkt(fsp);
+}
+
+static void fc_abort_internal(struct fc_fcp_pkt *fsp)
+{
+	fsp->state |= FC_SRB_ABORT_PENDING;
+	fsp->cdb_status = -1;
+	if (fsp->lp->tt.seq_exch_abort(fsp->seq_ptr))
+		fc_fcp_complete(fsp);	/* abort couldn't be sent */
+	else
+		fsp->seq_ptr = NULL;
+}
+
+/*
+ * Scsi abort handler- calls to send an abort
+ * and then wait for abort completion
+ */
+static int fc_fcp_pkt_abort(struct fc_lport *lp, struct fc_fcp_pkt *fsp)
+{
+	int rc = FAILED;
+
+	if (!fsp->seq_ptr)
+		return rc;
+	if (lp->tt.seq_exch_abort(fsp->seq_ptr))
+		return rc;
+
+	fsp->state |= FC_SRB_ABORT_PENDING;
+
+	init_completion(&fsp->tm_done);
+	fsp->wait_for_comp = 1;
+
+	spin_unlock(&fsp->scsi_pkt_lock);
+	rc = wait_for_completion_timeout(&fsp->tm_done,
+					 msecs_to_jiffies(FC_SCSI_TM_TOV));
+	spin_lock(&fsp->scsi_pkt_lock);
+
+	if (fsp->seq_ptr) {
+		lp->tt.exch_done(fsp->seq_ptr);
+		fsp->seq_ptr = NULL;
+	}
+
+	if (!rc) {
+		FC_DBG("target abort cmd  failed\n");
+		rc = FAILED;
+	} else if (fsp->state & FC_SRB_ABORTED) {
+		FC_DBG("target abort cmd  passed\n");
+		rc = SUCCESS;
+
+		fsp->status_code = FC_CMD_ABORTED;
+		fc_io_compl(fsp);
+	}
+
+	return rc;
+}
+
+/*
+ * Retry LUN reset after resource allocation failed.
+ */
+static void fc_lun_reset_send(unsigned long data)
+{
+	struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data;
+	const size_t len = sizeof(fsp->cdb_cmd);
+	struct fc_lport *lp = fsp->lp;
+	struct fc_frame *fp;
+	struct fc_seq  *sp;
+	struct fc_rport *rport;
+	struct fc_rport_libfc_priv *rp;
+
+	spin_lock(&fsp->scsi_pkt_lock);
+	if (fsp->state & FC_SRB_COMPL)
+		goto unlock;
+
+	fp = fc_frame_alloc(lp, len);
+	if (!fp)
+		goto retry;
+	memcpy(fc_frame_payload_get(fp, len), &fsp->cdb_cmd, len);
+	fc_frame_setup(fp, FC_RCTL_DD_UNSOL_CMD, FC_TYPE_FCP);
+	fc_frame_set_offset(fp, 0);
+	rport = fsp->rport;
+	rp = rport->dd_data;
+	sp = lp->tt.exch_seq_send(lp, fp,
+				  fc_tm_done,
+				  fsp, 0,
+				  rp->local_port->fid,
+				  rport->port_id,
+				  FC_FC_SEQ_INIT | FC_FC_END_SEQ);
+
+	if (sp) {
+		fsp->seq_ptr = sp;
+		goto unlock;
+	}
+	/*
+	 * Exchange or frame allocation failed.	 Set timer and retry.
+	 */
+	fc_frame_free(fp);
+retry:
+	setup_timer(&fsp->timer, fc_lun_reset_send, (unsigned long)fsp);
+	fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
+unlock:
+	spin_unlock(&fsp->scsi_pkt_lock);
+}
+
+static void fc_fcp_cleanup_lun_reset(struct fc_fcp_pkt *fsp)
+{
+	struct fc_lport *lp = fsp->lp;
+
+	fsp->status_code = FC_CMD_ABORTED;
+	if (fsp->seq_ptr) {
+		lp->tt.exch_done(fsp->seq_ptr);
+		fsp->seq_ptr = NULL;
+	}
+}
+
+/*
+ * Scsi device reset handler- send a LUN RESET to the device
+ * and wait for reset reply
+ */
+static int fc_lun_reset(struct fc_lport *lp, struct fc_fcp_pkt *fsp,
+			unsigned int id, unsigned int lun)
+{
+	int rc;
+
+	fsp->cdb_cmd.fc_dl = htonl(fsp->data_len);
+	fsp->cdb_cmd.fc_tm_flags = FCP_TMF_LUN_RESET;
+	int_to_scsilun(lun, (struct scsi_lun *)fsp->cdb_cmd.fc_lun);
+
+	fsp->wait_for_comp = 1;
+	init_completion(&fsp->tm_done);
+
+	fc_lun_reset_send((unsigned long)fsp);
+
+	/*
+	 * wait for completion of reset
+	 * after that make sure all commands are terminated
+	 */
+	rc = wait_for_completion_timeout(&fsp->tm_done, FC_SCSI_TM_TOV);
+
+	spin_lock(&fsp->scsi_pkt_lock);
+	fsp->state |= FC_SRB_COMPL;
+	spin_unlock(&fsp->scsi_pkt_lock);
+
+	del_timer_sync(&fsp->timer);
+
+	spin_lock(&fsp->scsi_pkt_lock);
+	if (fsp->seq_ptr) {
+		/* TODO:
+		 * if the exch resp function is running and trying to grab
+		 * the scsi_pkt_lock, this could free the exch from under
+		 * it and it could allow the fsp to be freed from under
+		 * fc_tm_done.
+		 */
+		lp->tt.exch_done(fsp->seq_ptr);
+		fsp->seq_ptr = NULL;
+	}
+	fsp->wait_for_comp = 0;
+	spin_unlock(&fsp->scsi_pkt_lock);
+
+	if (!rc) {
+		FC_DBG("lun reset failed\n");
+		return FAILED;
+	}
+
+	/* cdb_status holds the tmf's rsp code */
+	if (fsp->cdb_status != FCP_TMF_CMPL)
+		return FAILED;
+
+	FC_DBG("lun reset to lun %u completed\n", lun);
+	fc_fcp_cleanup_each_cmd(lp, id, lun, fc_fcp_cleanup_lun_reset);
+	return SUCCESS;
+}
+
+/*
+ * Task Managment response handler
+ */
+static void fc_tm_done(struct fc_seq *sp, struct fc_frame *fp, void *arg)
+{
+	struct fc_fcp_pkt *fsp = arg;
+
+	spin_lock(&fsp->scsi_pkt_lock);
+	/*
+	 * raced with eh timeout handler.
+	 *
+	 * TODO: If this happens we could be freeing the fsp right now and
+	 * would oops. Next patches will fix this race.
+	 */
+	if ((fsp->state & FC_SRB_COMPL) || !fsp->seq_ptr ||
+	    !fsp->wait_for_comp) {
+		spin_unlock(&fsp->scsi_pkt_lock);
+		return;
+	}
+
+	if (IS_ERR(fp)) {
+		/*
+		 * If there is an error just let it timeout.
+		 * scsi-eh will escalate for us.
+		 */
+		spin_unlock(&fsp->scsi_pkt_lock);
+		return;
+	}
+
+	fc_fcp_fcp_resp(fsp, fp);
+	fsp->seq_ptr = NULL;
+	fsp->lp->tt.exch_done(sp);
+	fc_frame_free(fp);
+	spin_unlock(&fsp->scsi_pkt_lock);
+}
+
+static void fc_fcp_cleanup_io(struct fc_fcp_pkt *fsp)
+{
+	fsp->status_code = FC_HRD_ERROR;
+}
+
+static void fc_fcp_cleanup(struct fc_lport *lp)
+{
+	fc_fcp_cleanup_each_cmd(lp, -1, -1, fc_fcp_cleanup_io);
+}
+
+/*
+ * fc_fcp_timeout: called by OS timer function.
+ *
+ * The timer has been inactivated and must be reactivated if desired
+ * using fc_fcp_timer_set().
+ *
+ * Algorithm:
+ *
+ * If REC is supported, just issue it, and return.  The REC exchange will
+ * complete or time out, and recovery can continue at that point.
+ *
+ * Otherwise, if the response has been received without all the data,
+ * it has been ER_TIMEOUT since the response was received.
+ *
+ * If the response has not been received,
+ * we see if data was received recently.  If it has been, we continue waiting,
+ * otherwise, we abort the command.
+ */
+static void fc_fcp_timeout(unsigned long data)
+{
+	struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data;
+	struct fc_rport *rport = fsp->rport;
+	struct fc_rport_libfc_priv *rp = rport->dd_data;
+
+	if (fc_fcp_lock_pkt(fsp))
+		return;
+
+	if (fsp->state & FC_SRB_COMPL)
+		goto unlock;
+	fsp->state |= FC_SRB_FCP_PROCESSING_TMO;
+
+	if (rp->flags & FC_RP_FLAGS_REC_SUPPORTED)
+		fc_fcp_rec(fsp);
+	/* TODO: change this to time_before/after */
+	else if (jiffies - fsp->last_pkt_time < FC_SCSI_ER_TIMEOUT / 2)
+		fc_fcp_timer_set(fsp, FC_SCSI_ER_TIMEOUT);
+	else if (fsp->state & FC_SRB_RCV_STATUS)
+		fc_fcp_complete(fsp);
+	else
+		fc_timeout_error(fsp);
+
+	fsp->state &= ~FC_SRB_FCP_PROCESSING_TMO;
+unlock:
+	fc_fcp_unlock_pkt(fsp);
+}
+
+/*
+ * Send a REC ELS request
+ */
+static void fc_fcp_rec(struct fc_fcp_pkt *fsp)
+{
+	struct fc_lport *lp;
+	struct fc_seq *sp;
+	struct fc_frame *fp;
+	struct fc_els_rec *rec;
+	struct fc_rport *rport;
+	struct fc_rport_libfc_priv *rp;
+	u16 ox_id;
+	u16 rx_id;
+
+	lp = fsp->lp;
+	rport = fsp->rport;
+	rp = rport->dd_data;
+	sp = fsp->seq_ptr;
+	if (!sp || rp->rp_state != RPORT_ST_READY) {
+		fsp->status_code = FC_HRD_ERROR;
+		fsp->io_status = SUGGEST_RETRY << 24;
+		fc_fcp_complete(fsp);
+		return;
+	}
+	lp->tt.seq_get_xids(sp, &ox_id, &rx_id);
+	fp = fc_frame_alloc(lp, sizeof(*rec));
+	if (!fp)
+		goto retry;
+
+	rec = fc_frame_payload_get(fp, sizeof(*rec));
+	memset(rec, 0, sizeof(*rec));
+	rec->rec_cmd = ELS_REC;
+	hton24(rec->rec_s_id, lp->fid);
+	rec->rec_ox_id = htons(ox_id);
+	rec->rec_rx_id = htons(rx_id);
+
+	fc_frame_setup(fp, FC_RCTL_ELS_REQ, FC_TYPE_ELS);
+	fc_frame_set_offset(fp, 0);
+	sp = lp->tt.exch_seq_send(lp, fp,
+				  fc_fcp_rec_resp,
+				  fsp, jiffies_to_msecs(FC_SCSI_REC_TOV),
+				  rp->local_port->fid,
+				  rport->port_id,
+				  FC_FC_SEQ_INIT | FC_FC_END_SEQ);
+
+	if (sp) {
+		fc_fcp_pkt_hold(fsp);		/* hold while REC outstanding */
+		return;
+	} else
+		fc_frame_free(fp);
+retry:
+	if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
+		fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
+	else
+		fc_timeout_error(fsp);
+}
+
+/*
+ * Receive handler for REC ELS frame
+ * if it is a reject then let the scsi layer to handle
+ * the timeout. if it is a LS_ACC then if the io was not completed
+ * then set the timeout and return otherwise complete the exchange
+ * and tell the scsi layer to restart the I/O.
+ */
+static void fc_fcp_rec_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg)
+{
+	struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)arg;
+	struct fc_els_rec_acc *recp;
+	struct fc_els_ls_rjt *rjt;
+	u32 e_stat;
+	u8 opcode;
+	u32 offset;
+	enum dma_data_direction data_dir;
+	enum fc_rctl r_ctl;
+	struct fc_rport_libfc_priv *rp;
+
+	if (IS_ERR(fp)) {
+		fc_fcp_rec_error(fsp, fp);
+		return;
+	}
+
+	if (fc_fcp_lock_pkt(fsp))
+		goto out;
+
+	fsp->recov_retry = 0;
+	opcode = fc_frame_payload_op(fp);
+	if (opcode == ELS_LS_RJT) {
+		rjt = fc_frame_payload_get(fp, sizeof(*rjt));
+		switch (rjt->er_reason) {
+		default:
+			if (fc_fcp_debug)
+				FC_DBG("device %x unexpected REC reject "
+				       "reason %d expl %d\n",
+				       fsp->rport->port_id, rjt->er_reason,
+				       rjt->er_explan);
+			/* fall through */
+
+		case ELS_RJT_UNSUP:
+			if (fc_fcp_debug)
+				FC_DBG("device does not support REC\n");
+			rp = fsp->rport->dd_data;
+			rp->flags &= ~FC_RP_FLAGS_REC_SUPPORTED;
+			/* fall through */
+
+		case ELS_RJT_LOGIC:
+		case ELS_RJT_UNAB:
+			/*
+			 * If no data transfer, the command frame got dropped
+			 * so we just retry.  If data was transferred, we
+			 * lost the response but the target has no record,
+			 * so we abort and retry.
+			 */
+			if (rjt->er_explan == ELS_EXPL_OXID_RXID &&
+			    fsp->xfer_len == 0) {
+				fc_fcp_retry_cmd(fsp);
+				break;
+			}
+			fc_timeout_error(fsp);
+			break;
+		}
+	} else if (opcode == ELS_LS_ACC) {
+		if (fsp->state & FC_SRB_ABORTED)
+			goto unlock_out;
+
+		data_dir = fsp->cmd->sc_data_direction;
+		recp = fc_frame_payload_get(fp, sizeof(*recp));
+		offset = ntohl(recp->reca_fc4value);
+		e_stat = ntohl(recp->reca_e_stat);
+
+		if (e_stat & ESB_ST_COMPLETE) {
+
+			/*
+			 * The exchange is complete.
+			 *
+			 * For output, we must've lost the response.
+			 * For input, all data must've been sent.
+			 * We lost may have lost the response
+			 * (and a confirmation was requested) and maybe
+			 * some data.
+			 *
+			 * If all data received, send SRR
+			 * asking for response.	 If partial data received,
+			 * or gaps, SRR requests data at start of gap.
+			 * Recovery via SRR relies on in-order-delivery.
+			 */
+			if (data_dir == DMA_TO_DEVICE) {
+				r_ctl = FC_RCTL_DD_CMD_STATUS;
+			} else if (fsp->xfer_contig_end == offset) {
+				r_ctl = FC_RCTL_DD_CMD_STATUS;
+			} else {
+				offset = fsp->xfer_contig_end;
+				r_ctl = FC_RCTL_DD_SOL_DATA;
+			}
+			fc_fcp_srr(fsp, r_ctl, offset);
+		} else if (e_stat & ESB_ST_SEQ_INIT) {
+
+			/*
+			 * The remote port has the initiative, so just
+			 * keep waiting for it to complete.
+			 */
+			fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
+		} else {
+
+			/*
+			 * The exchange is incomplete, we have seq. initiative.
+			 * Lost response with requested confirmation,
+			 * lost confirmation, lost transfer ready or
+			 * lost write data.
+			 *
+			 * For output, if not all data was received, ask
+			 * for transfer ready to be repeated.
+			 *
+			 * If we received or sent all the data, send SRR to
+			 * request response.
+			 *
+			 * If we lost a response, we may have lost some read
+			 * data as well.
+			 */
+			r_ctl = FC_RCTL_DD_SOL_DATA;
+			if (data_dir == DMA_TO_DEVICE) {
+				r_ctl = FC_RCTL_DD_CMD_STATUS;
+				if (offset < fsp->data_len)
+					r_ctl = FC_RCTL_DD_DATA_DESC;
+			} else if (offset == fsp->xfer_contig_end) {
+				r_ctl = FC_RCTL_DD_CMD_STATUS;
+			} else if (fsp->xfer_contig_end < offset) {
+				offset = fsp->xfer_contig_end;
+			}
+			fc_fcp_srr(fsp, r_ctl, offset);
+		}
+	}
+unlock_out:
+	fc_fcp_unlock_pkt(fsp);
+out:
+	fc_fcp_pkt_release(fsp);	/* drop hold for outstanding REC */
+	fc_frame_free(fp);
+}
+
+/*
+ * Handle error response or timeout for REC exchange.
+ */
+static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
+{
+	struct fc_lport *lp = fsp->lp;
+	int error = PTR_ERR(fp);
+
+	if (lp->state == LPORT_ST_LOGO)
+		return;
+
+	if (fc_fcp_lock_pkt(fsp))
+		goto out;
+
+	switch (error) {
+	case -FC_EX_CLOSED:
+		fc_timeout_error(fsp);
+		break;
+
+	default:
+		FC_DBG("REC %p fid %x error unexpected error %d\n",
+		       fsp, fsp->rport->port_id, error);
+		fsp->status_code = FC_CMD_PLOGO;
+		/* fall through */
+
+	case -FC_EX_TIMEOUT:
+		/*
+		 * Assume REC or LS_ACC was lost.
+		 * The exchange manager will have aborted REC, so retry.
+		 */
+		FC_DBG("REC fid %x error error %d retry %d/%d\n",
+		       fsp->rport->port_id, error, fsp->recov_retry,
+		       FC_MAX_RECOV_RETRY);
+		if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
+			fc_fcp_rec(fsp);
+		else
+			fc_timeout_error(fsp);
+		break;
+	}
+	fc_fcp_unlock_pkt(fsp);
+out:
+	fc_fcp_pkt_release(fsp);	/* drop hold for outstanding REC */
+}
+
+/*
+ * Time out error routine:
+ * abort's the I/O close the exchange and
+ * send completion notification to scsi layer
+ */
+static void fc_timeout_error(struct fc_fcp_pkt *fsp)
+{
+	struct fc_lport *lp = fsp->lp;
+
+	fsp->state |= FC_SRB_ABORT_PENDING;
+	if (fsp->seq_ptr)
+		lp->tt.seq_exch_abort(fsp->seq_ptr);
+
+	fsp->seq_ptr = NULL;
+	fsp->status_code = FC_CMD_TIME_OUT;
+	fsp->cdb_status = 0;
+	fsp->io_status = 0;
+
+	fc_io_compl(fsp);
+}
+
+/*
+ * Retry command.
+ * An abort isn't needed.
+ *
+ * We treat it like a timeout because the command did not complete -
+ * presumably due to cmd packet loss. We will fail the command and
+ * have scsi-ml decide if we should retry or not.
+ *
+ * TODO: Instead we could continue to retry the command until the scsi
+ * command fires, or add port level counters to determine
+ * when to mark it as failed (the latter would be useful in the class eh
+ * for lpfc and qla2xxx).
+ *
+ */
+static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp)
+{
+	if (fsp->seq_ptr) {
+		fsp->lp->tt.exch_done(fsp->seq_ptr);
+		fsp->seq_ptr = NULL;
+	}
+
+	fsp->status_code = FC_CMD_TIME_OUT;
+	fc_fcp_complete(fsp);
+}
+
+/*
+ * Sequence retransmission request.
+ * This is called after receiving status but insufficient data, or
+ * when expecting status but the request has timed out.
+ */
+static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
+{
+	struct fc_lport *lp = fsp->lp;
+	struct fc_rport *rport;
+	struct fc_rport_libfc_priv *rp;
+	struct fc_seq *sp;
+	struct fcp_srr *srr;
+	struct fc_frame *fp;
+	u8 cdb_op;
+	u16 ox_id;
+	u16 rx_id;
+
+	rport = fsp->rport;
+	rp = rport->dd_data;
+	cdb_op = fsp->cdb_cmd.fc_cdb[0];
+	lp->tt.seq_get_xids(fsp->seq_ptr, &ox_id, &rx_id);
+
+	if (!(rp->flags & FC_RP_FLAGS_RETRY) || rp->rp_state != RPORT_ST_READY)
+		goto retry;			/* shouldn't happen */
+	fp = fc_frame_alloc(lp, sizeof(*srr));
+	if (!fp)
+		goto retry;
+
+	srr = fc_frame_payload_get(fp, sizeof(*srr));
+	memset(srr, 0, sizeof(*srr));
+	srr->srr_op = ELS_SRR;
+	srr->srr_ox_id = htons(ox_id);
+	srr->srr_rx_id = htons(rx_id);
+	srr->srr_r_ctl = r_ctl;
+	srr->srr_rel_off = htonl(offset);
+
+	fc_frame_setup(fp, FC_RCTL_ELS4_REQ, FC_TYPE_FCP);
+	fc_frame_set_offset(fp, 0);
+	sp = lp->tt.exch_seq_send(lp, fp,
+				  fc_fcp_srr_resp,
+				  fsp, jiffies_to_msecs(FC_SCSI_REC_TOV),
+				  rp->local_port->fid,
+				  rport->port_id,
+				  FC_FC_SEQ_INIT | FC_FC_END_SEQ);
+	if (!sp) {
+		fc_frame_free(fp);
+		goto retry;
+	}
+	fsp->recov_seq = sp;
+	fsp->xfer_len = offset;
+	fsp->xfer_contig_end = offset;
+	fsp->state &= ~FC_SRB_RCV_STATUS;
+	fc_fcp_pkt_hold(fsp);		/* hold for outstanding SRR */
+	return;
+retry:
+	fc_fcp_retry(fsp);
+}
+
+/*
+ * Handle response from SRR.
+ */
+static void fc_fcp_srr_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg)
+{
+	struct fc_fcp_pkt *fsp = arg;
+	u16 ox_id;
+	u16 rx_id;
+
+	if (IS_ERR(fp)) {
+		fc_fcp_srr_error(fsp, fp);
+		return;
+	}
+
+	if (fc_fcp_lock_pkt(fsp))
+		goto out;
+
+	fsp->recov_seq = NULL;
+
+	fsp->lp->tt.seq_get_xids(fsp->seq_ptr, &ox_id, &rx_id);
+	switch (fc_frame_payload_op(fp)) {
+	case ELS_LS_ACC:
+		fsp->recov_retry = 0;
+		fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
+		break;
+	case ELS_LS_RJT:
+	default:
+		fc_timeout_error(fsp);
+		break;
+	}
+	fc_fcp_unlock_pkt(fsp);
+	fsp->lp->tt.exch_done(sp);
+out:
+	fc_frame_free(fp);
+	fc_fcp_pkt_release(fsp);	/* drop hold for outstanding SRR */
+}
+
+static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
+{
+	if (fc_fcp_lock_pkt(fsp))
+		goto out;
+	fsp->lp->tt.exch_done(fsp->recov_seq);
+	fsp->recov_seq = NULL;
+	switch (PTR_ERR(fp)) {
+	case -FC_EX_CLOSED:			/* e.g., link failure */
+		fc_timeout_error(fsp);
+		break;
+	case -FC_EX_TIMEOUT:
+		if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
+			fc_fcp_rec(fsp);
+		else
+			fc_timeout_error(fsp);
+		break;
+	default:
+		fc_fcp_retry(fsp);
+		break;
+	}
+	fc_fcp_unlock_pkt(fsp);
+out:
+	fc_fcp_pkt_release(fsp);	/* drop hold for outstanding SRR */
+}
+
+/**
+ * fc_queuecommand - The queuecommand function of the scsi template
+ * @cmd:	struct scsi_cmnd to be executed
+ * @done:	Callback function to be called when cmd is completed
+ *
+ * this is the i/o strategy routine, called by the scsi layer
+ * this routine is called with holding the host_lock.
+ */
+int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
+{
+	struct fc_lport *lp;
+	struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
+	struct fc_fcp_pkt *sp;
+	struct fc_rport_libfc_priv *rp;
+	int rval;
+	int rc = 0;
+	struct fcoe_dev_stats *stats;
+
+	lp = shost_priv(sc_cmd->device->host);
+
+	rval = fc_remote_port_chkready(rport);
+	if (rval) {
+		sc_cmd->result = rval;
+		done(sc_cmd);
+		goto out;
+	}
+
+	if (!*(struct fc_remote_port **)rport->dd_data) {
+		/*
+		 * rport is transitioning from blocked/deleted to
+		 * online
+		 */
+		sc_cmd->result = DID_IMM_RETRY << 16;
+		done(sc_cmd);
+		goto out;
+	}
+
+	rp = rport->dd_data;
+
+	if (lp->state != LPORT_ST_READY) {
+		if (lp->link_status & FC_PAUSE) {
+			rc = SCSI_MLQUEUE_HOST_BUSY;
+			goto out;
+		} else {
+			sc_cmd->result = DID_NO_CONNECT << 16;
+			done(sc_cmd);
+			goto out;
+		}
+	} else {
+		if (!(lp->link_status & FC_LINK_UP)) {
+			sc_cmd->result = DID_NO_CONNECT << 16;
+			done(sc_cmd);
+			goto out;
+		}
+	}
+
+	sp = fc_fcp_pkt_alloc(lp);
+	if (sp == NULL) {
+		rc = SCSI_MLQUEUE_HOST_BUSY;
+		goto out;
+	}
+
+	/*
+	 * build the libfc request pkt
+	 */
+	sp->cmd = sc_cmd;	/* save the cmd */
+	sp->lp = lp;		/* save the softc ptr */
+	sp->rport = rport;	/* set the remote port ptr */
+	sc_cmd->scsi_done = done;
+
+	/*
+	 * set up the transfer length
+	 */
+	sp->data_len = scsi_bufflen(sc_cmd);
+	sp->xfer_len = 0;
+
+	/*
+	 * setup the data direction
+	 */
+	stats = lp->dev_stats[smp_processor_id()];
+	if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
+		sp->req_flags = FC_SRB_READ;
+		stats->InputRequests++;
+		stats->InputMegabytes = sp->data_len;
+	} else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
+		sp->req_flags = FC_SRB_WRITE;
+		stats->OutputRequests++;
+		stats->OutputMegabytes = sp->data_len;
+	} else {
+		sp->req_flags = 0;
+		stats->ControlRequests++;
+	}
+
+	sp->tgt_flags = rp->flags;
+
+	init_timer(&sp->timer);
+	sp->timer.data = (unsigned long)sp;
+
+	/*
+	 * send it to the lower layer
+	 * if we get -1 return then put the request in the pending
+	 * queue.
+	 */
+	rval = fc_fcp_pkt_send(lp, sp);
+	if (rval != 0) {
+		sp->state = FC_SRB_FREE;
+		fc_fcp_pkt_free(sp);
+		rc = SCSI_MLQUEUE_HOST_BUSY;
+	}
+out:
+	return rc;
+}
+EXPORT_SYMBOL(fc_queuecommand);
+
+/**
+ * fc_io_compl -  Handle responses for completed commands
+ * @sp:		scsi packet
+ *
+ * Translates a error to a Linux SCSI error.
+ *
+ * The fcp packet lock must be held when calling.
+ */
+static void fc_io_compl(struct fc_fcp_pkt *sp)
+{
+	struct scsi_cmnd *sc_cmd;
+	struct fc_lport *lp;
+	unsigned long flags;
+
+	sp->state |= FC_SRB_COMPL;
+	if (!(sp->state & FC_SRB_FCP_PROCESSING_TMO)) {
+		spin_unlock(&sp->scsi_pkt_lock);
+		del_timer_sync(&sp->timer);
+		spin_lock(&sp->scsi_pkt_lock);
+	}
+
+	lp = sp->lp;
+	spin_lock_irqsave(lp->host->host_lock, flags);
+	if (!sp->cmd) {
+		spin_unlock_irqrestore(lp->host->host_lock, flags);
+		return;
+	}
+
+	sc_cmd = sp->cmd;
+	sp->cmd = NULL;
+
+	if (!sc_cmd->SCp.ptr) {
+		spin_unlock_irqrestore(lp->host->host_lock, flags);
+		return;
+	}
+
+	CMD_SCSI_STATUS(sc_cmd) = sp->cdb_status;
+	switch (sp->status_code) {
+	case FC_COMPLETE:
+		if (sp->cdb_status == 0) {
+			/*
+			 * good I/O status
+			 */
+			sc_cmd->result = DID_OK << 16;
+			if (sp->scsi_resid)
+				CMD_RESID_LEN(sc_cmd) = sp->scsi_resid;
+		} else if (sp->cdb_status == QUEUE_FULL) {
+			struct scsi_device *tmp_sdev;
+			struct scsi_device *sdev = sc_cmd->device;
+
+			shost_for_each_device(tmp_sdev, sdev->host) {
+				if (tmp_sdev->id != sdev->id)
+					continue;
+
+				if (tmp_sdev->queue_depth > 1) {
+					scsi_track_queue_full(tmp_sdev,
+							      tmp_sdev->
+							      queue_depth - 1);
+				}
+			}
+			sc_cmd->result = (DID_OK << 16) | sp->cdb_status;
+		} else {
+			/*
+			 * transport level I/O was ok but scsi
+			 * has non zero status
+			 */
+			sc_cmd->result = (DID_OK << 16) | sp->cdb_status;
+		}
+		break;
+	case FC_ERROR:
+		if (sp->io_status & (SUGGEST_RETRY << 24))
+			sc_cmd->result = DID_IMM_RETRY << 16;
+		else
+			sc_cmd->result = (DID_ERROR << 16) | sp->io_status;
+		break;
+	case FC_DATA_UNDRUN:
+		if (sp->cdb_status == 0) {
+			/*
+			 * scsi status is good but transport level
+			 * underrun. for read it should be an error??
+			 */
+			sc_cmd->result = (DID_OK << 16) | sp->cdb_status;
+		} else {
+			/*
+			 * scsi got underrun, this is an error
+			 */
+			CMD_RESID_LEN(sc_cmd) = sp->scsi_resid;
+			sc_cmd->result = (DID_ERROR << 16) | sp->cdb_status;
+		}
+		break;
+	case FC_DATA_OVRRUN:
+		/*
+		 * overrun is an error
+		 */
+		sc_cmd->result = (DID_ERROR << 16) | sp->cdb_status;
+		break;
+	case FC_CMD_ABORTED:
+		sc_cmd->result = (DID_ABORT << 16) | sp->io_status;
+		break;
+	case FC_CMD_TIME_OUT:
+		sc_cmd->result = (DID_BUS_BUSY << 16) | sp->io_status;
+		break;
+	case FC_CMD_RESET:
+		sc_cmd->result = (DID_RESET << 16);
+		break;
+	case FC_HRD_ERROR:
+		sc_cmd->result = (DID_NO_CONNECT << 16);
+		break;
+	default:
+		sc_cmd->result = (DID_ERROR << 16);
+		break;
+	}
+
+	list_del(&sp->list);
+	sc_cmd->SCp.ptr = NULL;
+	sc_cmd->scsi_done(sc_cmd);
+	spin_unlock_irqrestore(lp->host->host_lock, flags);
+
+	/* release ref from initial allocation in queue command */
+	fc_fcp_pkt_release(sp);
+}
+
+/**
+ * fc_eh_abort - Abort a command...from scsi host template
+ * @sc_cmd:	scsi command to abort
+ *
+ * send ABTS to the target device  and wait for the response
+ * sc_cmd is the pointer to the command to be aborted.
+ */
+int fc_eh_abort(struct scsi_cmnd *sc_cmd)
+{
+	struct fc_fcp_pkt *sp;
+	struct fc_lport *lp;
+	int rc = FAILED;
+	unsigned long flags;
+
+	lp = shost_priv(sc_cmd->device->host);
+	if (lp->state != LPORT_ST_READY)
+		return rc;
+	else if (!(lp->link_status & FC_LINK_UP))
+		return rc;
+
+	spin_lock_irqsave(lp->host->host_lock, flags);
+	sp = CMD_SP(sc_cmd);
+	if (!sp) {
+		/* command completed while scsi eh was setting up */
+		spin_unlock_irqrestore(lp->host->host_lock, flags);
+		return SUCCESS;
+	}
+	/* grab a ref so the sp and sc_cmd cannot be relased from under us */
+	fc_fcp_pkt_hold(sp);
+	spin_unlock_irqrestore(lp->host->host_lock, flags);
+
+	if (fc_fcp_lock_pkt(sp)) {
+		/* completed while we were waiting for timer to be deleted */
+		rc = SUCCESS;
+		goto release_pkt;
+	}
+
+	sp->state |= FC_SRB_ABORT_PENDING;
+	rc = fc_fcp_pkt_abort(lp, sp);
+	fc_fcp_unlock_pkt(sp);
+
+release_pkt:
+	fc_fcp_pkt_release(sp);
+	return rc;
+}
+EXPORT_SYMBOL(fc_eh_abort);
+
+/**
+ * fc_eh_device_reset: Reset a single LUN
+ * @sc_cmd:	scsi command
+ *
+ * Set from scsi host template to send tm cmd to the target and wait for the
+ * response.
+ */
+int fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
+{
+	struct fc_lport *lp;
+	struct fc_fcp_pkt *sp;
+	struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
+	int rc = FAILED;
+	struct fc_rport_libfc_priv *rp;
+	int rval;
+
+	rval = fc_remote_port_chkready(rport);
+	if (rval)
+		goto out;
+
+	rp = rport->dd_data;
+	lp = shost_priv(sc_cmd->device->host);
+
+	if (lp->state != LPORT_ST_READY)
+		return rc;
+
+	sp = fc_fcp_pkt_alloc(lp);
+	if (sp == NULL) {
+		FC_DBG("could not allocate scsi_pkt\n");
+		sc_cmd->result = DID_NO_CONNECT << 16;
+		goto out;
+	}
+
+	/*
+	 * Build the libfc request pkt. Do not set the scsi cmnd, because
+	 * the sc passed in is not setup for execution like when sent
+	 * through the queuecommand callout.
+	 */
+	sp->lp = lp;		/* save the softc ptr */
+	sp->rport = rport;	/* set the remote port ptr */
+
+	/*
+	 * flush outstanding commands
+	 */
+	rc = fc_lun_reset(lp, sp, scmd_id(sc_cmd), sc_cmd->device->lun);
+	sp->state = FC_SRB_FREE;
+	fc_fcp_pkt_free(sp);
+
+out:
+	return rc;
+}
+EXPORT_SYMBOL(fc_eh_device_reset);
+
+/**
+ * fc_eh_host_reset - The reset function will reset the ports on the host.
+ * @sc_cmd:	scsi command
+ */
+int fc_eh_host_reset(struct scsi_cmnd *sc_cmd)
+{
+	struct fc_lport *lp;
+
+	lp = shost_priv(sc_cmd->device->host);
+	return lp->tt.lport_reset(lp) ? FAILED : SUCCESS;
+}
+EXPORT_SYMBOL(fc_eh_host_reset);
+
+/**
+ * fc_slave_alloc - configure queue depth
+ * @sdev:	scsi device
+ *
+ * Configures queue depth based on host's cmd_per_len. If not set
+ * then we use the libfc default.
+ */
+int fc_slave_alloc(struct scsi_device *sdev)
+{
+	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+	int queue_depth;
+
+	if (!rport || fc_remote_port_chkready(rport))
+		return -ENXIO;
+
+	if (sdev->tagged_supported) {
+		if (sdev->host->hostt->cmd_per_lun)
+			queue_depth = sdev->host->hostt->cmd_per_lun;
+		else
+			queue_depth = FC_FCP_DFLT_QUEUE_DEPTH;
+		scsi_activate_tcq(sdev, queue_depth);
+	}
+	return 0;
+}
+EXPORT_SYMBOL(fc_slave_alloc);
+
+int fc_change_queue_depth(struct scsi_device *sdev, int qdepth)
+{
+	scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
+	return sdev->queue_depth;
+}
+EXPORT_SYMBOL(fc_change_queue_depth);
+
+int fc_change_queue_type(struct scsi_device *sdev, int tag_type)
+{
+	if (sdev->tagged_supported) {
+		scsi_set_tag_type(sdev, tag_type);
+		if (tag_type)
+			scsi_activate_tcq(sdev, sdev->queue_depth);
+		else
+			scsi_deactivate_tcq(sdev, sdev->queue_depth);
+	} else
+		tag_type = 0;
+
+	return tag_type;
+}
+EXPORT_SYMBOL(fc_change_queue_type);
+
+void fc_fcp_destroy(struct fc_lport *lp)
+{
+	struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
+
+	if (!list_empty(&si->scsi_pkt_queue))
+		printk(KERN_ERR "Leaked scsi packets.\n");
+
+	mempool_destroy(si->scsi_pkt_pool);
+	kfree(si);
+	lp->scsi_priv = NULL;
+}
+EXPORT_SYMBOL(fc_fcp_destroy);
+
+int fc_fcp_init(struct fc_lport *lp)
+{
+	int rc;
+	struct fc_fcp_internal *si;
+
+	if (!lp->tt.scsi_cleanup)
+		lp->tt.scsi_cleanup = fc_fcp_cleanup;
+
+	if (!lp->tt.scsi_abort_io)
+		lp->tt.scsi_abort_io = fc_fcp_abort_io;
+
+	si = kzalloc(sizeof(struct fc_fcp_internal), GFP_KERNEL);
+	if (!si)
+		return -ENOMEM;
+	lp->scsi_priv = si;
+	INIT_LIST_HEAD(&si->scsi_pkt_queue);
+
+	si->scsi_pkt_pool = mempool_create_slab_pool(2, scsi_pkt_cachep);
+	if (!si->scsi_pkt_pool) {
+		rc = -ENOMEM;
+		goto free_internal;
+	}
+	return 0;
+
+free_internal:
+	kfree(si);
+	return rc;
+}
+EXPORT_SYMBOL(fc_fcp_init);
+
+static int __init libfc_init(void)
+{
+	scsi_pkt_cachep = kmem_cache_create("libfc_fcp_pkt",
+					    sizeof(struct fc_fcp_pkt),
+					    0, SLAB_HWCACHE_ALIGN, NULL);
+	if (scsi_pkt_cachep == NULL) {
+		FC_DBG("Unable to allocate SRB cache...module load failed!");
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+static void __exit libfc_exit(void)
+{
+	kmem_cache_destroy(scsi_pkt_cachep);
+}
+
+module_init(libfc_init);
+module_exit(libfc_exit);
diff --git a/drivers/scsi/libfc/fc_frame.c b/drivers/scsi/libfc/fc_frame.c
new file mode 100644
index 0000000..7ba241e
--- /dev/null
+++ b/drivers/scsi/libfc/fc_frame.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+/*
+ * Frame allocation.
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/crc32.h>
+
+#include <scsi/libfc/fc_frame.h>
+
+/*
+ * Check the CRC in a frame.
+ */
+u32 fc_frame_crc_check(struct fc_frame *fp)
+{
+	u32 crc;
+	u32 error;
+	const u8 *bp;
+	unsigned int len;
+
+	WARN_ON(!fc_frame_is_linear(fp));
+	fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
+	len = (fr_len(fp) + 3) & ~3;	/* round up length to include fill */
+	bp = (const u8 *) fr_hdr(fp);
+	crc = ~crc32(~0, bp, len);
+	error = crc ^ *(u32 *) (bp + len);
+	return error;
+}
+EXPORT_SYMBOL(fc_frame_crc_check);
+
+/*
+ * Allocate a frame intended to be sent via fcoe_xmit.
+ * Get an sk_buff for the frame and set the length.
+ */
+struct fc_frame *__fc_frame_alloc(size_t len)
+{
+	struct fc_frame *fp;
+	struct sk_buff *skb;
+
+	WARN_ON((len % sizeof(u32)) != 0);
+	len += sizeof(struct fc_frame_header);
+	skb = dev_alloc_skb(len + FC_FRAME_HEADROOM + FC_FRAME_TAILROOM);
+	if (!skb)
+		return NULL;
+	fp = (struct fc_frame *) skb;
+	fc_frame_init(fp);
+	skb_reserve(skb, FC_FRAME_HEADROOM);
+	skb_put(skb, len);
+	return fp;
+}
+EXPORT_SYMBOL(__fc_frame_alloc);
+
+
+struct fc_frame *fc_frame_alloc_fill(struct fc_lport *lp, size_t payload_len)
+{
+	struct fc_frame *fp;
+	size_t fill;
+
+	fill = payload_len % 4;
+	if (fill != 0)
+		fill = 4 - fill;
+	fp = __fc_frame_alloc(payload_len + fill);
+	if (fp) {
+		memset((char *) fr_hdr(fp) + payload_len, 0, fill);
+		/* trim is OK, we just allocated it so there are no fragments */
+		skb_trim(fp_skb(fp), payload_len);
+	}
+	return fp;
+}
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
new file mode 100644
index 0000000..33cd556
--- /dev/null
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -0,0 +1,914 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+/*
+ * Logical interface support.
+ */
+
+#include <linux/timer.h>
+#include <asm/unaligned.h>
+
+#include <scsi/fc/fc_gs.h>
+
+#include <scsi/libfc/libfc.h>
+
+/* Fabric IDs to use for point-to-point mode, chosen on whims. */
+#define FC_LOCAL_PTP_FID_LO   0x010101
+#define FC_LOCAL_PTP_FID_HI   0x010102
+
+#define	DNS_DELAY	      3 /* Discovery delay after RSCN (in seconds)*/
+
+static int fc_lport_debug;
+
+static void fc_lport_enter_flogi(struct fc_lport *);
+static void fc_lport_enter_logo(struct fc_lport *);
+
+static const char *fc_lport_state_names[] = {
+	[LPORT_ST_NONE] =     "none",
+	[LPORT_ST_FLOGI] =    "FLOGI",
+	[LPORT_ST_DNS] =      "dNS",
+	[LPORT_ST_REG_PN] =   "REG_PN",
+	[LPORT_ST_REG_FT] =   "REG_FT",
+	[LPORT_ST_SCR] =      "SCR",
+	[LPORT_ST_READY] =    "ready",
+	[LPORT_ST_DNS_STOP] = "stop",
+	[LPORT_ST_LOGO] =     "LOGO",
+	[LPORT_ST_RESET] =    "reset",
+};
+
+static int fc_frame_drop(struct fc_lport *lp, struct fc_frame *fp)
+{
+	fc_frame_free(fp);
+	return 0;
+}
+
+static const char *fc_lport_state(struct fc_lport *lp)
+{
+	const char *cp;
+
+	cp = fc_lport_state_names[lp->state];
+	if (!cp)
+		cp = "unknown";
+	return cp;
+}
+
+static void fc_lport_ptp_setup(struct fc_lport *lp,
+			       u32 remote_fid, u64 remote_wwpn,
+			       u64 remote_wwnn)
+{
+	struct fc_rport *rport;
+	struct fc_rport_identifiers ids = {
+		.port_id = remote_fid,
+		.port_name = remote_wwpn,
+		.node_name = remote_wwnn,
+	};
+
+	/*
+	 * if we have to create a rport the fc class can sleep so we must
+	 * drop the lock here
+	 */
+	fc_lport_unlock(lp);
+	rport = lp->tt.rport_lookup(lp, ids.port_id); /* lookup and hold */
+	if (rport == NULL)
+		rport = lp->tt.rport_create(lp, &ids); /* create and hold */
+	fc_lport_lock(lp);
+	if (rport) {
+		if (lp->ptp_rp)
+			fc_remote_port_delete(lp->ptp_rp);
+		lp->ptp_rp = rport;
+		fc_lport_state_enter(lp, LPORT_ST_READY);
+	}
+}
+
+static void fc_lport_ptp_clear(struct fc_lport *lp)
+{
+	if (lp->ptp_rp) {
+		fc_remote_port_delete(lp->ptp_rp);
+		lp->ptp_rp = NULL;
+	}
+}
+
+/*
+ * Fill in FLOGI command for request.
+ */
+static void
+fc_lport_flogi_fill(struct fc_lport *lp,
+		    struct fc_els_flogi *flogi, unsigned int op)
+{
+	struct fc_els_csp *sp;
+	struct fc_els_cssp *cp;
+
+	memset(flogi, 0, sizeof(*flogi));
+	flogi->fl_cmd = (u8) op;
+	put_unaligned_be64(lp->wwpn, &flogi->fl_wwpn);
+	put_unaligned_be64(lp->wwnn, &flogi->fl_wwnn);
+	sp = &flogi->fl_csp;
+	sp->sp_hi_ver = 0x20;
+	sp->sp_lo_ver = 0x20;
+	sp->sp_bb_cred = htons(10);	/* this gets set by gateway */
+	sp->sp_bb_data = htons((u16) lp->mfs);
+	cp = &flogi->fl_cssp[3 - 1];	/* class 3 parameters */
+	cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ);
+	if (op != ELS_FLOGI) {
+		sp->sp_features = htons(FC_SP_FT_CIRO);
+		sp->sp_tot_seq = htons(255);	/* seq. we accept */
+		sp->sp_rel_off = htons(0x1f);
+		sp->sp_e_d_tov = htonl(lp->e_d_tov);
+
+		cp->cp_rdfs = htons((u16) lp->mfs);
+		cp->cp_con_seq = htons(255);
+		cp->cp_open_seq = 1;
+	}
+}
+
+/*
+ * Set the fid. This indicates that we have a new connection to the
+ * fabric so we should reset our list of fc_rports. Passing a fid of
+ * 0 will also reset the rport list regardless of the previous fid.
+ */
+static void fc_lport_set_fid(struct fc_lport *lp, u32 fid)
+{
+	if (fid != 0 && lp->fid == fid)
+		return;
+
+	if (fc_lport_debug)
+		FC_DBG("changing local port fid from %x to %x",
+		       lp->fid, fid);
+	lp->fid = fid;
+	lp->tt.rport_reset_list(lp);
+}
+
+/*
+ * Add a supported FC-4 type.
+ */
+static void fc_lport_add_fc4_type(struct fc_lport *lp, enum fc_fh_type type)
+{
+	__be32 *mp;
+
+	mp = &lp->fcts.ff_type_map[type / FC_NS_BPW];
+	*mp = htonl(ntohl(*mp) | 1UL << (type % FC_NS_BPW));
+}
+
+/*
+ * Handle received RLIR - registered link incident report.
+ */
+static void fc_lport_rlir_req(struct fc_seq *sp, struct fc_frame *fp,
+			      struct fc_lport *lp)
+{
+	lp->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
+	fc_frame_free(fp);
+}
+
+/*
+ * Handle received ECHO.
+ */
+static void fc_lport_echo_req(struct fc_seq *sp, struct fc_frame *in_fp,
+			      struct fc_lport *lp)
+{
+	struct fc_frame *fp;
+	unsigned int len;
+	void *pp;
+	void *dp;
+	u32 f_ctl;
+
+	len = fr_len(in_fp) - sizeof(struct fc_frame_header);
+	pp = fc_frame_payload_get(in_fp, len);
+
+	if (len < sizeof(__be32))
+		len = sizeof(__be32);
+	fp = fc_frame_alloc(lp, len);
+	if (fp) {
+		dp = fc_frame_payload_get(fp, len);
+		memcpy(dp, pp, len);
+		*((u32 *)dp) = htonl(ELS_LS_ACC << 24);
+		sp = lp->tt.seq_start_next(sp);
+		f_ctl = FC_FC_LAST_SEQ | FC_FC_END_SEQ;
+		fc_frame_setup(fp, FC_RCTL_ELS_REP, FC_TYPE_ELS);
+		lp->tt.seq_send(lp, sp, fp, f_ctl);
+	}
+	fc_frame_free(in_fp);
+}
+
+/*
+ * Handle received RNID.
+ */
+static void fc_lport_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp,
+			      struct fc_lport *lp)
+{
+	struct fc_frame *fp;
+	struct fc_els_rnid *req;
+	struct {
+		struct fc_els_rnid_resp rnid;
+		struct fc_els_rnid_cid cid;
+		struct fc_els_rnid_gen gen;
+	} *rp;
+	struct fc_seq_els_data rjt_data;
+	u8 fmt;
+	size_t len;
+	u32 f_ctl;
+
+	req = fc_frame_payload_get(in_fp, sizeof(*req));
+	if (!req) {
+		rjt_data.fp = NULL;
+		rjt_data.reason = ELS_RJT_LOGIC;
+		rjt_data.explan = ELS_EXPL_NONE;
+		lp->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
+	} else {
+		fmt = req->rnid_fmt;
+		len = sizeof(*rp);
+		if (fmt != ELS_RNIDF_GEN ||
+		    ntohl(lp->rnid_gen.rnid_atype) == 0) {
+			fmt = ELS_RNIDF_NONE;	/* nothing to provide */
+			len -= sizeof(rp->gen);
+		}
+		fp = fc_frame_alloc(lp, len);
+		if (fp) {
+			rp = fc_frame_payload_get(fp, len);
+			memset(rp, 0, len);
+			rp->rnid.rnid_cmd = ELS_LS_ACC;
+			rp->rnid.rnid_fmt = fmt;
+			rp->rnid.rnid_cid_len = sizeof(rp->cid);
+			rp->cid.rnid_wwpn = htonll(lp->wwpn);
+			rp->cid.rnid_wwnn = htonll(lp->wwnn);
+			if (fmt == ELS_RNIDF_GEN) {
+				rp->rnid.rnid_sid_len = sizeof(rp->gen);
+				memcpy(&rp->gen, &lp->rnid_gen,
+				       sizeof(rp->gen));
+			}
+			sp = lp->tt.seq_start_next(sp);
+			f_ctl = FC_FC_SEQ_INIT | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
+			fc_frame_setup(fp, FC_RCTL_ELS_REP, FC_TYPE_ELS);
+			lp->tt.seq_send(lp, sp, fp, f_ctl);
+		}
+	}
+	fc_frame_free(in_fp);
+}
+
+/*
+ * Handle received fabric logout request.
+ */
+static void fc_lport_recv_logo_req(struct fc_seq *sp, struct fc_frame *fp,
+				   struct fc_lport *lp)
+{
+	lp->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
+	fc_lport_enter_reset(lp);
+	fc_frame_free(fp);
+}
+
+/*
+ * Receive request frame
+ */
+
+int fc_fabric_login(struct fc_lport *lp)
+{
+	int rc = -1;
+
+	if (lp->state == LPORT_ST_NONE) {
+		fc_lport_lock(lp);
+		fc_lport_enter_reset(lp);
+		fc_lport_unlock(lp);
+		rc = 0;
+	}
+	return rc;
+}
+EXPORT_SYMBOL(fc_fabric_login);
+
+/**
+ * fc_linkup -	link up notification
+ * @dev:      Pointer to fc_lport .
+ **/
+void fc_linkup(struct fc_lport *lp)
+{
+	if ((lp->link_status & FC_LINK_UP) != FC_LINK_UP) {
+		lp->link_status |= FC_LINK_UP;
+		fc_lport_lock(lp);
+		if (lp->state == LPORT_ST_RESET)
+			lp->tt.lport_login(lp);
+		fc_lport_unlock(lp);
+	}
+}
+EXPORT_SYMBOL(fc_linkup);
+
+/**
+ * fc_linkdown -  link down notification
+ * @dev:      Pointer to fc_lport .
+ **/
+void fc_linkdown(struct fc_lport *lp)
+{
+	if ((lp->link_status & FC_LINK_UP) == FC_LINK_UP) {
+		lp->link_status &= ~(FC_LINK_UP);
+		fc_lport_enter_reset(lp);
+		lp->tt.scsi_cleanup(lp);
+	}
+}
+EXPORT_SYMBOL(fc_linkdown);
+
+void fc_pause(struct fc_lport *lp)
+{
+	lp->link_status |= FC_PAUSE;
+}
+EXPORT_SYMBOL(fc_pause);
+
+void fc_unpause(struct fc_lport *lp)
+{
+	lp->link_status &= ~(FC_PAUSE);
+}
+EXPORT_SYMBOL(fc_unpause);
+
+int fc_fabric_logoff(struct fc_lport *lp)
+{
+	fc_lport_lock(lp);
+	switch (lp->state) {
+	case LPORT_ST_NONE:
+		break;
+	case LPORT_ST_FLOGI:
+	case LPORT_ST_LOGO:
+	case LPORT_ST_RESET:
+		fc_lport_enter_reset(lp);
+		break;
+	case LPORT_ST_DNS:
+	case LPORT_ST_DNS_STOP:
+		fc_lport_enter_logo(lp);
+		break;
+	case LPORT_ST_REG_PN:
+	case LPORT_ST_REG_FT:
+	case LPORT_ST_SCR:
+	case LPORT_ST_READY:
+		lp->tt.disc_stop(lp);
+		break;
+	}
+	fc_lport_unlock(lp);
+	lp->tt.scsi_cleanup(lp);
+
+	return 0;
+}
+EXPORT_SYMBOL(fc_fabric_logoff);
+
+/**
+ * fc_lport_destroy - unregister a fc_lport
+ * @lp:	   fc_lport pointer to unregister
+ *
+ * Return value:
+ *	None
+ * Note:
+ * exit routine for fc_lport instance
+ * clean-up all the allocated memory
+ * and free up other system resources.
+ *
+ **/
+int fc_lport_destroy(struct fc_lport *lp)
+{
+	fc_lport_lock(lp);
+	fc_lport_state_enter(lp, LPORT_ST_LOGO);
+	fc_lport_unlock(lp);
+
+	cancel_delayed_work_sync(&lp->ns_disc_work);
+
+	lp->tt.scsi_abort_io(lp);
+
+	lp->tt.frame_send = fc_frame_drop;
+
+	lp->tt.exch_mgr_reset(lp->emp, 0, 0);
+
+	return 0;
+}
+EXPORT_SYMBOL(fc_lport_destroy);
+
+int fc_set_mfs(struct fc_lport *lp, u32 mfs)
+{
+	unsigned int old_mfs;
+	int rc = -1;
+
+	old_mfs = lp->mfs;
+
+	if (mfs >= FC_MIN_MAX_FRAME) {
+		mfs &= ~3;
+		WARN_ON((size_t) mfs < FC_MIN_MAX_FRAME);
+		if (mfs > FC_MAX_FRAME)
+			mfs = FC_MAX_FRAME;
+		mfs -= sizeof(struct fc_frame_header);
+		lp->mfs = mfs;
+		rc = 0;
+	}
+
+	if (!rc && mfs < old_mfs) {
+		lp->ns_disc_done = 0;
+		fc_lport_enter_reset(lp);
+	}
+	return rc;
+}
+EXPORT_SYMBOL(fc_set_mfs);
+
+/*
+ * re-enter state for retrying a request after a timeout or alloc failure.
+ */
+static void fc_lport_enter_retry(struct fc_lport *lp)
+{
+	switch (lp->state) {
+	case LPORT_ST_NONE:
+	case LPORT_ST_READY:
+	case LPORT_ST_RESET:
+	case LPORT_ST_DNS:
+	case LPORT_ST_DNS_STOP:
+	case LPORT_ST_REG_PN:
+	case LPORT_ST_REG_FT:
+	case LPORT_ST_SCR:
+		WARN_ON(1);
+		break;
+	case LPORT_ST_FLOGI:
+		fc_lport_enter_flogi(lp);
+		break;
+	case LPORT_ST_LOGO:
+		fc_lport_enter_logo(lp);
+		break;
+	}
+}
+
+/*
+ * enter next state for handling an exchange reject or retry exhaustion
+ * in the current state.
+ */
+static void fc_lport_enter_reject(struct fc_lport *lp)
+{
+	switch (lp->state) {
+	case LPORT_ST_NONE:
+	case LPORT_ST_READY:
+	case LPORT_ST_RESET:
+	case LPORT_ST_REG_PN:
+	case LPORT_ST_REG_FT:
+	case LPORT_ST_SCR:
+	case LPORT_ST_DNS_STOP:
+	case LPORT_ST_DNS:
+		WARN_ON(1);
+		break;
+	case LPORT_ST_FLOGI:
+		fc_lport_enter_flogi(lp);
+		break;
+	case LPORT_ST_LOGO:
+		fc_lport_enter_reset(lp);
+		break;
+	}
+}
+
+/*
+ * Handle resource allocation problem by retrying in a bit.
+ */
+static void fc_lport_retry(struct fc_lport *lp)
+{
+	if (lp->retry_count == 0)
+		FC_DBG("local port %6x alloc failure in state %s "
+		       "- will retry", lp->fid, fc_lport_state(lp));
+	if (lp->retry_count < lp->max_retry_count) {
+		lp->retry_count++;
+		mod_timer(&lp->state_timer,
+			  jiffies + msecs_to_jiffies(lp->e_d_tov));
+	} else {
+		FC_DBG("local port %6x alloc failure in state %s "
+		       "- retries exhausted", lp->fid,
+		       fc_lport_state(lp));
+		fc_lport_enter_reject(lp);
+	}
+}
+
+/*
+ * A received FLOGI request indicates a point-to-point connection.
+ * Accept it with the common service parameters indicating our N port.
+ * Set up to do a PLOGI if we have the higher-number WWPN.
+ */
+static void fc_lport_recv_flogi_req(struct fc_seq *sp_in,
+				    struct fc_frame *rx_fp,
+				    struct fc_lport *lp)
+{
+	struct fc_frame *fp;
+	struct fc_frame_header *fh;
+	struct fc_seq *sp;
+	struct fc_els_flogi *flp;
+	struct fc_els_flogi *new_flp;
+	u64 remote_wwpn;
+	u32 remote_fid;
+	u32 local_fid;
+	u32 f_ctl;
+
+	fh = fc_frame_header_get(rx_fp);
+	remote_fid = ntoh24(fh->fh_s_id);
+	flp = fc_frame_payload_get(rx_fp, sizeof(*flp));
+	if (!flp)
+		goto out;
+	remote_wwpn = get_unaligned_be64(&flp->fl_wwpn);
+	if (remote_wwpn == lp->wwpn) {
+		FC_DBG("FLOGI from port with same WWPN %llx "
+		       "possible configuration error.", remote_wwpn);
+		goto out;
+	}
+	FC_DBG("FLOGI from port WWPN %llx ", remote_wwpn);
+	fc_lport_lock(lp);
+
+	/*
+	 * XXX what is the right thing to do for FIDs?
+	 * The originator might expect our S_ID to be 0xfffffe.
+	 * But if so, both of us could end up with the same FID.
+	 */
+	local_fid = FC_LOCAL_PTP_FID_LO;
+	if (remote_wwpn < lp->wwpn) {
+		local_fid = FC_LOCAL_PTP_FID_HI;
+		if (!remote_fid || remote_fid == local_fid)
+			remote_fid = FC_LOCAL_PTP_FID_LO;
+	} else if (!remote_fid) {
+		remote_fid = FC_LOCAL_PTP_FID_HI;
+	}
+	fc_lport_set_fid(lp, local_fid);
+
+	fp = fc_frame_alloc(lp, sizeof(*flp));
+	if (fp) {
+		sp = lp->tt.seq_start_next(fr_seq(rx_fp));
+		new_flp = fc_frame_payload_get(fp, sizeof(*flp));
+		fc_lport_flogi_fill(lp, new_flp, ELS_FLOGI);
+		new_flp->fl_cmd = (u8) ELS_LS_ACC;
+
+		/*
+		 * Send the response.  If this fails, the originator should
+		 * repeat the sequence.
+		 */
+		f_ctl = FC_FC_LAST_SEQ | FC_FC_END_SEQ;
+		fc_frame_setup(fp, FC_RCTL_ELS_REP, FC_TYPE_ELS);
+		lp->tt.seq_send(lp, sp, fp, f_ctl);
+
+	} else {
+		fc_lport_retry(lp);
+	}
+	fc_lport_ptp_setup(lp, remote_fid, remote_wwpn,
+			   get_unaligned_be64(&flp->fl_wwnn));
+	fc_lport_unlock(lp);
+	if (lp->tt.disc_start(lp))
+		FC_DBG("target discovery start error\n");
+out:
+	sp = fr_seq(rx_fp);
+	fc_frame_free(rx_fp);
+}
+
+static void fc_lport_recv(struct fc_lport *lp, struct fc_seq *sp,
+			  struct fc_frame *fp)
+{
+	struct fc_frame_header *fh = fc_frame_header_get(fp);
+	void (*recv) (struct fc_seq *, struct fc_frame *, struct fc_lport *);
+	struct fc_rport *rport;
+	struct fc_rport_libfc_priv *rp;
+	u32 s_id;
+	u32 d_id;
+	struct fc_seq_els_data rjt_data;
+
+	/*
+	 * Handle special ELS cases like FLOGI, LOGO, and
+	 * RSCN here.  These don't require a session.
+	 * Even if we had a session, it might not be ready.
+	 */
+	if (fh->fh_type == FC_TYPE_ELS && fh->fh_r_ctl == FC_RCTL_ELS_REQ) {
+		/*
+		 * Check opcode.
+		 */
+		recv = NULL;
+		switch (fc_frame_payload_op(fp)) {
+		case ELS_FLOGI:
+			recv = fc_lport_recv_flogi_req;
+			break;
+		case ELS_LOGO:
+			fh = fc_frame_header_get(fp);
+			if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI)
+				recv = fc_lport_recv_logo_req;
+			break;
+		case ELS_RSCN:
+			recv = lp->tt.disc_recv_req;
+			break;
+		case ELS_ECHO:
+			recv = fc_lport_echo_req;
+			break;
+		case ELS_RLIR:
+			recv = fc_lport_rlir_req;
+			break;
+		case ELS_RNID:
+			recv = fc_lport_rnid_req;
+			break;
+		}
+
+		if (recv)
+			recv(sp, fp, lp);
+		else {
+			/*
+			 * Find session.
+			 * If this is a new incoming PLOGI, we won't find it.
+			 */
+			s_id = ntoh24(fh->fh_s_id);
+			d_id = ntoh24(fh->fh_d_id);
+
+			rport = lp->tt.rport_lookup(lp, s_id);
+			if (rport) {
+				rp = rport->dd_data;
+				lp->tt.rport_recv_req(sp, fp, rp);
+				put_device(&rport->dev); /* hold from lookup */
+			} else {
+				rjt_data.fp = NULL;
+				rjt_data.reason = ELS_RJT_UNAB;
+				rjt_data.explan = ELS_EXPL_NONE;
+				lp->tt.seq_els_rsp_send(sp,
+							ELS_LS_RJT, &rjt_data);
+				fc_frame_free(fp);
+			}
+		}
+	} else {
+		FC_DBG("dropping invalid frame (eof %x)", fr_eof(fp));
+		fc_frame_free(fp);
+	}
+
+	/*
+	 *  The common exch_done for all request may not be good
+	 *  if any request requires longer hold on exhange. XXX
+	 */
+	lp->tt.exch_done(sp);
+}
+
+/*
+ * Put the local port back into the initial state.  Reset all sessions.
+ * This is called after a SCSI reset or the driver is unloading
+ * or the program is exiting.
+ */
+int fc_lport_enter_reset(struct fc_lport *lp)
+{
+	if (fc_lport_debug)
+		FC_DBG("Processing RESET state");
+
+	if (lp->dns_rp) {
+		fc_remote_port_delete(lp->dns_rp);
+		lp->dns_rp = NULL;
+	}
+	fc_lport_ptp_clear(lp);
+
+	/*
+	 * Setting state RESET keeps fc_lport_error() callbacks
+	 * by exch_mgr_reset() from recursing on the lock.
+	 * It also causes fc_lport_sess_event() to ignore events.
+	 * The lock is held for the duration of the time in RESET state.
+	 */
+	fc_lport_state_enter(lp, LPORT_ST_RESET);
+	lp->tt.exch_mgr_reset(lp->emp, 0, 0);
+	fc_lport_set_fid(lp, 0);
+	if ((lp->link_status & FC_LINK_UP) == FC_LINK_UP)
+		fc_lport_enter_flogi(lp);
+	return 0;
+}
+EXPORT_SYMBOL(fc_lport_enter_reset);
+
+/*
+ * Handle errors on local port requests.
+ * Don't get locks if in RESET state.
+ * The only possible errors so far are exchange TIMEOUT and CLOSED (reset).
+ */
+static void fc_lport_error(struct fc_lport *lp, struct fc_frame *fp)
+{
+	if (lp->state == LPORT_ST_RESET)
+		return;
+
+	fc_lport_lock(lp);
+	if (PTR_ERR(fp) == -FC_EX_TIMEOUT) {
+		if (lp->retry_count < lp->max_retry_count) {
+			lp->retry_count++;
+			fc_lport_enter_retry(lp);
+		} else {
+			fc_lport_enter_reject(lp);
+
+		}
+	}
+	if (fc_lport_debug)
+		FC_DBG("error %ld retries %d limit %d",
+		       PTR_ERR(fp), lp->retry_count, lp->max_retry_count);
+	fc_lport_unlock(lp);
+}
+
+static void fc_lport_timeout(unsigned long lp_arg)
+{
+	struct fc_lport *lp = (struct fc_lport *)lp_arg;
+
+	fc_lport_lock(lp);
+	fc_lport_enter_retry(lp);
+	fc_lport_unlock(lp);
+}
+
+static void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
+			       void *lp_arg)
+{
+	struct fc_lport *lp = lp_arg;
+
+	if (IS_ERR(fp))
+		fc_lport_error(lp, fp);
+	else {
+		fc_frame_free(fp);
+		fc_lport_lock(lp);
+		fc_lport_enter_reset(lp);
+		fc_lport_unlock(lp);
+	}
+}
+
+/* Logout of the FC fabric */
+static void fc_lport_enter_logo(struct fc_lport *lp)
+{
+	struct fc_frame *fp;
+	struct fc_els_logo *logo;
+
+	if (fc_lport_debug)
+		FC_DBG("Processing LOGO state");
+
+	fc_lport_state_enter(lp, LPORT_ST_LOGO);
+
+	/* DNS session should be closed so we can release it here */
+	if (lp->dns_rp) {
+		fc_remote_port_delete(lp->dns_rp);
+		lp->dns_rp = NULL;
+	}
+
+	fp = fc_frame_alloc(lp, sizeof(*logo));
+	if (!fp) {
+		FC_DBG("failed to allocate frame\n");
+		return;
+	}
+
+	logo = fc_frame_payload_get(fp, sizeof(*logo));
+	memset(logo, 0, sizeof(*logo));
+	logo->fl_cmd = ELS_LOGO;
+	hton24(logo->fl_n_port_id, lp->fid);
+	logo->fl_n_port_wwn = htonll(lp->wwpn);
+
+	fc_frame_setup(fp, FC_RCTL_ELS_REQ, FC_TYPE_ELS);
+	fc_frame_set_offset(fp, 0);
+
+	lp->tt.exch_seq_send(lp, fp,
+			      fc_lport_logo_resp,
+			      lp, lp->e_d_tov,
+			      lp->fid, FC_FID_FLOGI,
+			      FC_FC_SEQ_INIT | FC_FC_END_SEQ);
+}
+
+static int fc_lport_logout(struct fc_lport *lp)
+{
+	fc_lport_lock(lp);
+	if (lp->state != LPORT_ST_LOGO)
+		fc_lport_enter_logo(lp);
+	fc_lport_unlock(lp);
+	return 0;
+}
+
+/*
+ * Handle incoming ELS FLOGI response.
+ * Save parameters of remote switch.  Finish exchange.
+ */
+static void
+fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, void *lp_arg)
+{
+	struct fc_lport *lp = lp_arg;
+	struct fc_frame_header *fh;
+	struct fc_els_flogi *flp;
+	u32 did;
+	u16 csp_flags;
+	unsigned int r_a_tov;
+	unsigned int e_d_tov;
+	u16 mfs;
+
+	if (IS_ERR(fp))
+		goto out;
+
+	fh = fc_frame_header_get(fp);
+	did = ntoh24(fh->fh_d_id);
+	if (fc_frame_payload_op(fp) == ELS_LS_ACC && did != 0) {
+		if (fc_lport_debug)
+			FC_DBG("assigned fid %x", did);
+		fc_lport_lock(lp);
+		fc_lport_set_fid(lp, did);
+		flp = fc_frame_payload_get(fp, sizeof(*flp));
+		if (flp) {
+			mfs = ntohs(flp->fl_csp.sp_bb_data) &
+				FC_SP_BB_DATA_MASK;
+			if (mfs >= FC_SP_MIN_MAX_PAYLOAD &&
+			    mfs < lp->mfs)
+				lp->mfs = mfs;
+			csp_flags = ntohs(flp->fl_csp.sp_features);
+			r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov);
+			e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov);
+			if (csp_flags & FC_SP_FT_EDTR)
+				e_d_tov /= 1000000;
+			if ((csp_flags & FC_SP_FT_FPORT) == 0) {
+				if (e_d_tov > lp->e_d_tov)
+					lp->e_d_tov = e_d_tov;
+				lp->r_a_tov = 2 * e_d_tov;
+				FC_DBG("point-to-point mode");
+				fc_lport_ptp_setup(lp, ntoh24(fh->fh_s_id),
+						   get_unaligned_be64(
+							   &flp->fl_wwpn),
+						   get_unaligned_be64(
+							   &flp->fl_wwnn));
+			} else {
+				lp->e_d_tov = e_d_tov;
+				lp->r_a_tov = r_a_tov;
+				lp->tt.disc_enter_dns(lp);
+			}
+		}
+		fc_lport_unlock(lp);
+		if (flp) {
+			csp_flags = ntohs(flp->fl_csp.sp_features);
+			if ((csp_flags & FC_SP_FT_FPORT) == 0) {
+				if (lp->tt.disc_start(lp))
+					FC_DBG("target disc start error\n");
+			}
+		}
+	} else {
+		FC_DBG("bad FLOGI response\n");
+	}
+	fc_frame_free(fp);
+out:
+	fc_lport_error(lp, fp);
+}
+
+/*
+ * Send ELS (extended link service) FLOGI request to peer.
+ */
+static void fc_lport_flogi_send(struct fc_lport *lp)
+{
+	struct fc_frame *fp;
+	struct fc_els_flogi *flp;
+
+	fp = fc_frame_alloc(lp, sizeof(*flp));
+	if (!fp)
+		return fc_lport_retry(lp);
+
+	flp = fc_frame_payload_get(fp, sizeof(*flp));
+	fc_lport_flogi_fill(lp, flp, ELS_FLOGI);
+
+	fc_frame_setup(fp, FC_RCTL_ELS_REQ, FC_TYPE_ELS);
+	fc_frame_set_offset(fp, 0);
+
+	if (!lp->tt.exch_seq_send(lp, fp,
+				   fc_lport_flogi_resp,
+				   lp, lp->e_d_tov,
+				   0, FC_FID_FLOGI,
+				   FC_FC_SEQ_INIT | FC_FC_END_SEQ))
+		fc_lport_retry(lp);
+
+}
+
+void fc_lport_enter_flogi(struct fc_lport *lp)
+{
+	if (fc_lport_debug)
+		FC_DBG("Processing FLOGI state");
+	fc_lport_state_enter(lp, LPORT_ST_FLOGI);
+	fc_lport_flogi_send(lp);
+}
+
+/* Configure a fc_lport */
+int fc_lport_config(struct fc_lport *lp)
+{
+	setup_timer(&lp->state_timer, fc_lport_timeout, (unsigned long)lp);
+	spin_lock_init(&lp->state_lock);
+
+	fc_lport_lock(lp);
+	fc_lport_state_enter(lp, LPORT_ST_NONE);
+	fc_lport_unlock(lp);
+
+	lp->ns_disc_delay = DNS_DELAY;
+
+	fc_lport_add_fc4_type(lp, FC_TYPE_FCP);
+	fc_lport_add_fc4_type(lp, FC_TYPE_CT);
+
+	return 0;
+}
+EXPORT_SYMBOL(fc_lport_config);
+
+int fc_lport_init(struct fc_lport *lp)
+{
+	if (!lp->tt.lport_recv)
+		lp->tt.lport_recv = fc_lport_recv;
+
+	if (!lp->tt.lport_login)
+		lp->tt.lport_login = fc_lport_enter_reset;
+
+	if (!lp->tt.lport_reset)
+		lp->tt.lport_reset = fc_lport_enter_reset;
+
+	if (!lp->tt.lport_logout)
+		lp->tt.lport_logout = fc_lport_logout;
+
+	return 0;
+}
+EXPORT_SYMBOL(fc_lport_init);
diff --git a/drivers/scsi/libfc/fc_ns.c b/drivers/scsi/libfc/fc_ns.c
new file mode 100644
index 0000000..5ac0e6f
--- /dev/null
+++ b/drivers/scsi/libfc/fc_ns.c
@@ -0,0 +1,1229 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+/*
+ * Target Discovery
+ * Actually, this discovers all FC-4 remote ports, including FCP initiators.
+ */
+
+#include <linux/timer.h>
+#include <linux/err.h>
+#include <asm/unaligned.h>
+
+#include <scsi/fc/fc_gs.h>
+
+#include <scsi/libfc/libfc.h>
+
+#define FC_NS_RETRY_LIMIT	3	/* max retries */
+#define FC_NS_RETRY_DELAY	500UL	/* (msecs) delay */
+
+int fc_ns_debug;
+
+static void fc_ns_gpn_ft_req(struct fc_lport *);
+static void fc_ns_gpn_ft_resp(struct fc_seq *, struct fc_frame *, void *);
+static int fc_ns_new_target(struct fc_lport *, struct fc_rport *,
+			    struct fc_rport_identifiers *);
+static void fc_ns_del_target(struct fc_lport *, struct fc_rport *);
+static void fc_ns_disc_done(struct fc_lport *);
+static void fcdt_ns_error(struct fc_lport *, struct fc_frame *);
+static void fc_ns_timeout(struct work_struct *);
+
+struct fc_ns_port {
+	struct fc_lport *lp;
+	struct list_head peers;
+	struct fc_rport_identifiers ids;
+};
+
+static int fc_ns_gpn_id_req(struct fc_lport *, struct fc_ns_port *);
+static void fc_ns_gpn_id_resp(struct fc_seq *, struct fc_frame *, void *);
+static void fc_ns_gpn_id_error(struct fc_ns_port *rp, struct fc_frame *fp);
+
+static int fc_ns_gnn_id_req(struct fc_lport *, struct fc_ns_port *);
+static void fc_ns_gnn_id_resp(struct fc_seq *, struct fc_frame *, void *);
+static void fc_ns_gnn_id_error(struct fc_ns_port *, struct fc_frame *);
+static void fc_ns_enter_reg_pn(struct fc_lport *lp);
+static void fc_ns_error(struct fc_lport *lp, struct fc_frame *fp);
+static void fc_lport_fill_dns_hdr(struct fc_lport *lp, struct fc_ct_hdr *ct,
+				  unsigned int op, unsigned int req_size);
+static void fc_ns_resp(struct fc_seq *sp, struct fc_frame *fp,
+		       void *lp_arg);
+static void fc_ns_retry(struct fc_lport *lp);
+static void fc_ns_single(struct fc_lport *, struct fc_ns_port *);
+static int fc_ns_restart(struct fc_lport *);
+
+
+/*
+ * Handle received RSCN - registered state change notification.
+ */
+static void fc_ns_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
+			   struct fc_lport *lp)
+{
+	struct fc_els_rscn *rp;
+	struct fc_els_rscn_page *pp;
+	struct fc_seq_els_data rjt_data;
+	unsigned int len;
+	int redisc = 0;
+	enum fc_els_rscn_ev_qual ev_qual;
+	enum fc_els_rscn_addr_fmt fmt;
+	LIST_HEAD(disc_list);
+	struct fc_ns_port *dp, *next;
+
+	rp = fc_frame_payload_get(fp, sizeof(*rp));
+
+	if (!rp || rp->rscn_page_len != sizeof(*pp))
+		goto reject;
+
+	len = ntohs(rp->rscn_plen);
+	if (len < sizeof(*rp))
+		goto reject;
+	len -= sizeof(*rp);
+
+	for (pp = (void *)(rp + 1); len; len -= sizeof(*pp), pp++) {
+		ev_qual = pp->rscn_page_flags >> ELS_RSCN_EV_QUAL_BIT;
+		ev_qual &= ELS_RSCN_EV_QUAL_MASK;
+		fmt = pp->rscn_page_flags >> ELS_RSCN_ADDR_FMT_BIT;
+		fmt &= ELS_RSCN_ADDR_FMT_MASK;
+		/*
+		 * if we get an address format other than port
+		 * (area, domain, fabric), then do a full discovery
+		 */
+		switch (fmt) {
+		case ELS_ADDR_FMT_PORT:
+			dp = kzalloc(sizeof(*dp), GFP_KERNEL);
+			if (!dp) {
+				redisc = 1;
+				break;
+			}
+			dp->lp = lp;
+			dp->ids.port_id = ntoh24(pp->rscn_fid);
+			dp->ids.port_name = -1;
+			dp->ids.node_name = -1;
+			dp->ids.roles = FC_RPORT_ROLE_UNKNOWN;
+			list_add_tail(&dp->peers, &disc_list);
+			break;
+		case ELS_ADDR_FMT_AREA:
+		case ELS_ADDR_FMT_DOM:
+		case ELS_ADDR_FMT_FAB:
+		default:
+			redisc = 1;
+			break;
+		}
+	}
+	lp->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
+	if (redisc) {
+		if (fc_ns_debug)
+			FC_DBG("RSCN received: rediscovering");
+		list_for_each_entry_safe(dp, next, &disc_list, peers) {
+			list_del(&dp->peers);
+			kfree(dp);
+		}
+		fc_ns_restart(lp);
+	} else {
+		if (fc_ns_debug)
+			FC_DBG("RSCN received: not rediscovering. "
+				"redisc %d state %d in_prog %d",
+				redisc, lp->state, lp->ns_disc_pending);
+		list_for_each_entry_safe(dp, next, &disc_list, peers) {
+			list_del(&dp->peers);
+			fc_ns_single(lp, dp);
+		}
+	}
+	fc_frame_free(fp);
+	return;
+reject:
+	rjt_data.fp = NULL;
+	rjt_data.reason = ELS_RJT_LOGIC;
+	rjt_data.explan = ELS_EXPL_NONE;
+	lp->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
+	fc_frame_free(fp);
+}
+
+static void fc_ns_recv_req(struct fc_seq *sp, struct fc_frame *fp,
+			   struct fc_lport *lp)
+{
+	switch (fc_frame_payload_op(fp)) {
+	case ELS_RSCN:
+		fc_ns_rscn_req(sp, fp, lp);
+		break;
+	default:
+		FC_DBG("fc_ns recieved an unexpected request\n");
+		break;
+	}
+}
+
+static void fc_ns_scr_resp(struct fc_seq *sp, struct fc_frame *fp,
+			   void *lp_arg)
+{
+	struct fc_lport *lp = lp_arg;
+	int err;
+
+	if (IS_ERR(fp))
+		fc_ns_error(lp, fp);
+	else {
+		fc_lport_lock(lp);
+		fc_lport_state_enter(lp, LPORT_ST_READY);
+		fc_lport_unlock(lp);
+		err = lp->tt.disc_start(lp);
+		if (err)
+			FC_DBG("target discovery start error\n");
+		fc_frame_free(fp);
+	}
+}
+
+static void fc_ns_enter_scr(struct fc_lport *lp)
+{
+	struct fc_frame *fp;
+	struct fc_els_scr *scr;
+
+	if (fc_ns_debug)
+		FC_DBG("Processing SCR state");
+
+	fc_lport_state_enter(lp, LPORT_ST_SCR);
+
+	fp = fc_frame_alloc(lp, sizeof(*scr));
+	if (fp) {
+		scr = fc_frame_payload_get(fp, sizeof(*scr));
+		memset(scr, 0, sizeof(*scr));
+		scr->scr_cmd = ELS_SCR;
+		scr->scr_reg_func = ELS_SCRF_FULL;
+	}
+	fc_frame_setup(fp, FC_RCTL_ELS_REQ, FC_TYPE_ELS);
+	fc_frame_set_offset(fp, 0);
+
+	lp->tt.exch_seq_send(lp, fp,
+			     fc_ns_scr_resp,
+			     lp, lp->e_d_tov,
+			     lp->fid, FC_FID_FCTRL,
+			     FC_FC_SEQ_INIT | FC_FC_END_SEQ);
+}
+
+/*
+ * Register FC4-types with name server.
+ */
+static void fc_ns_enter_reg_ft(struct fc_lport *lp)
+{
+	struct fc_frame *fp;
+	struct req {
+		struct fc_ct_hdr ct;
+		struct fc_ns_fid fid;	/* port ID object */
+		struct fc_ns_fts fts;	/* FC4-types object */
+	} *req;
+	struct fc_ns_fts *lps;
+	int i;
+
+	if (fc_ns_debug)
+		FC_DBG("Processing REG_FT state");
+
+	fc_lport_state_enter(lp, LPORT_ST_REG_FT);
+
+	lps = &lp->fcts;
+	i = sizeof(lps->ff_type_map) / sizeof(lps->ff_type_map[0]);
+	while (--i >= 0)
+		if (ntohl(lps->ff_type_map[i]) != 0)
+			break;
+	if (i >= 0) {
+		fp = fc_frame_alloc(lp, sizeof(*req));
+		if (fp) {
+			req = fc_frame_payload_get(fp, sizeof(*req));
+			fc_lport_fill_dns_hdr(lp, &req->ct,
+					      FC_NS_RFT_ID,
+					      sizeof(*req) -
+					      sizeof(struct fc_ct_hdr));
+			hton24(req->fid.fp_fid, lp->fid);
+			req->fts = *lps;
+			fc_frame_setup(fp, FC_RCTL_DD_UNSOL_CTL, FC_TYPE_CT);
+			if (!lp->tt.exch_seq_send(lp, fp,
+						  fc_ns_resp, lp,
+						  lp->e_d_tov,
+						  lp->fid,
+						  lp->dns_rp->port_id,
+						  FC_FC_SEQ_INIT |
+						  FC_FC_END_SEQ))
+				fc_ns_retry(lp);
+		} else {
+			fc_ns_retry(lp);
+		}
+	} else {
+		fc_ns_enter_scr(lp);
+	}
+}
+
+/*
+ * enter next state for handling an exchange reject or retry exhaustion
+ * in the current state.
+ */
+static void fc_ns_enter_reject(struct fc_lport *lp)
+{
+	switch (lp->state) {
+	case LPORT_ST_NONE:
+	case LPORT_ST_READY:
+	case LPORT_ST_RESET:
+	case LPORT_ST_FLOGI:
+	case LPORT_ST_LOGO:
+		WARN_ON(1);
+		break;
+	case LPORT_ST_REG_PN:
+		fc_ns_enter_reg_ft(lp);
+		break;
+	case LPORT_ST_REG_FT:
+		fc_ns_enter_scr(lp);
+		break;
+	case LPORT_ST_SCR:
+	case LPORT_ST_DNS_STOP:
+		lp->tt.disc_stop(lp);
+		break;
+	case LPORT_ST_DNS:
+		lp->tt.lport_reset(lp);
+		break;
+	}
+}
+
+static void fc_ns_enter_retry(struct fc_lport *lp)
+{
+	switch (lp->state) {
+	case LPORT_ST_NONE:
+	case LPORT_ST_RESET:
+	case LPORT_ST_READY:
+	case LPORT_ST_FLOGI:
+	case LPORT_ST_LOGO:
+		WARN_ON(1);
+		break;
+	case LPORT_ST_DNS:
+		lp->tt.disc_enter_dns(lp);
+		break;
+	case LPORT_ST_DNS_STOP:
+		lp->tt.disc_stop(lp);
+		break;
+	case LPORT_ST_REG_PN:
+		fc_ns_enter_reg_pn(lp);
+		break;
+	case LPORT_ST_REG_FT:
+		fc_ns_enter_reg_ft(lp);
+		break;
+	case LPORT_ST_SCR:
+		fc_ns_enter_scr(lp);
+		break;
+	}
+}
+
+/*
+ * Refresh target discovery, perhaps due to an RSCN.
+ * A configurable delay is introduced to collect any subsequent RSCNs.
+ */
+static int fc_ns_restart(struct fc_lport *lp)
+{
+	fc_lport_lock(lp);
+	if (!lp->ns_disc_requested && !lp->ns_disc_pending) {
+		schedule_delayed_work(&lp->ns_disc_work,
+				msecs_to_jiffies(lp->ns_disc_delay * 1000));
+	}
+	lp->ns_disc_requested = 1;
+	fc_lport_unlock(lp);
+	return 0;
+}
+
+/* unlocked varient of scsi_target_block from scsi_lib.c */
+#include "../scsi_priv.h"
+
+static void __device_block(struct scsi_device *sdev, void *data)
+{
+	scsi_internal_device_block(sdev);
+}
+
+static int __target_block(struct device *dev, void *data)
+{
+	if (scsi_is_target_device(dev))
+		__starget_for_each_device(to_scsi_target(dev),
+					  NULL, __device_block);
+	return 0;
+}
+
+static void __scsi_target_block(struct device *dev)
+{
+	if (scsi_is_target_device(dev))
+		__starget_for_each_device(to_scsi_target(dev),
+					  NULL, __device_block);
+	else
+		device_for_each_child(dev, NULL, __target_block);
+}
+
+static void fc_block_rports(struct fc_lport *lp)
+{
+	struct Scsi_Host *shost = lp->host;
+	struct fc_rport *rport;
+	unsigned long flags;
+
+	spin_lock_irqsave(shost->host_lock, flags);
+	list_for_each_entry(rport, &fc_host_rports(shost), peers) {
+		/* protect the name service remote port */
+		if (rport == lp->dns_rp)
+			continue;
+		if (rport->port_state != FC_PORTSTATE_ONLINE)
+			continue;
+		rport->port_state = FC_PORTSTATE_BLOCKED;
+		rport->flags |= FC_RPORT_DEVLOSS_PENDING;
+		__scsi_target_block(&rport->dev);
+	}
+	spin_unlock_irqrestore(shost->host_lock, flags);
+}
+
+/*
+ * Fibre Channel Target discovery.
+ *
+ * Returns non-zero if discovery cannot be started.
+ *
+ * Callback is called for each target remote port found in discovery.
+ * When discovery is complete, the callback is called with a NULL remote port.
+ * Discovery may be restarted after an RSCN is received, causing the
+ * callback to be called after discovery complete is indicated.
+ */
+int fc_ns_disc_start(struct fc_lport *lp)
+{
+	struct fc_rport *rport;
+	int error;
+	struct fc_rport_identifiers ids;
+
+	fc_lport_lock(lp);
+
+	/*
+	 * If not ready, or already running discovery, just set request flag.
+	 */
+	if (!fc_lport_test_ready(lp) || lp->ns_disc_pending) {
+		lp->ns_disc_requested = 1;
+		fc_lport_unlock(lp);
+		return 0;
+	}
+	lp->ns_disc_pending = 1;
+	lp->ns_disc_requested = 0;
+	lp->ns_disc_retry_count = 0;
+
+	/*
+	 * Handle point-to-point mode as a simple discovery
+	 * of the remote port.
+	 */
+	rport = lp->ptp_rp;
+	if (rport) {
+		ids.port_id = rport->port_id;
+		ids.port_name = rport->port_name;
+		ids.node_name = rport->node_name;
+		ids.roles = FC_RPORT_ROLE_UNKNOWN;
+		get_device(&rport->dev);
+		fc_lport_unlock(lp);
+		error = fc_ns_new_target(lp, rport, &ids);
+		put_device(&rport->dev);
+		if (!error)
+			fc_ns_disc_done(lp);
+	} else {
+		fc_lport_unlock(lp);
+		fc_block_rports(lp);
+		fc_ns_gpn_ft_req(lp);	/* get ports by FC-4 type */
+		error = 0;
+	}
+	return error;
+}
+
+/*
+ * Handle resource allocation problem by retrying in a bit.
+ */
+static void fc_ns_retry(struct fc_lport *lp)
+{
+	if (lp->retry_count == 0)
+		FC_DBG("local port %6x alloc failure "
+		       "- will retry", lp->fid);
+	if (lp->retry_count < lp->max_retry_count) {
+		lp->retry_count++;
+		mod_timer(&lp->state_timer,
+			  jiffies + msecs_to_jiffies(lp->e_d_tov));
+	} else {
+		FC_DBG("local port %6x alloc failure "
+		       "- retries exhausted", lp->fid);
+		fc_ns_enter_reject(lp);
+	}
+}
+
+/*
+ * Handle errors on local port requests.
+ * Don't get locks if in RESET state.
+ * The only possible errors so far are exchange TIMEOUT and CLOSED (reset).
+ */
+static void fc_ns_error(struct fc_lport *lp, struct fc_frame *fp)
+{
+	if (lp->state == LPORT_ST_RESET)
+		return;
+
+	fc_lport_lock(lp);
+	if (PTR_ERR(fp) == -FC_EX_TIMEOUT) {
+		if (lp->retry_count < lp->max_retry_count) {
+			lp->retry_count++;
+			fc_ns_enter_retry(lp);
+		} else {
+			fc_ns_enter_reject(lp);
+		}
+	}
+	if (fc_ns_debug)
+		FC_DBG("error %ld retries %d limit %d",
+		       PTR_ERR(fp), lp->retry_count, lp->max_retry_count);
+	fc_lport_unlock(lp);
+}
+
+/*
+ * Restart discovery after a delay due to resource shortages.
+ * If the error persists, the discovery will be abandoned.
+ */
+static void fcdt_ns_retry(struct fc_lport *lp)
+{
+	unsigned long delay = FC_NS_RETRY_DELAY;
+
+	if (!lp->ns_disc_retry_count)
+		delay /= 4;	/* timeout faster first time */
+	if (lp->ns_disc_retry_count++ < FC_NS_RETRY_LIMIT)
+		schedule_delayed_work(&lp->ns_disc_work,
+				      msecs_to_jiffies(delay));
+	else
+		fc_ns_disc_done(lp);
+}
+
+/*
+ * Test for dNS accept in response payload.
+ */
+static int fc_lport_dns_acc(struct fc_frame *fp)
+{
+	struct fc_frame_header *fh;
+	struct fc_ct_hdr *ct;
+	int rc = 0;
+
+	fh = fc_frame_header_get(fp);
+	ct = fc_frame_payload_get(fp, sizeof(*ct));
+	if (fh && ct && fh->fh_type == FC_TYPE_CT &&
+	    ct->ct_fs_type == FC_FST_DIR &&
+	    ct->ct_fs_subtype == FC_NS_SUBTYPE &&
+	    ntohs(ct->ct_cmd) == FC_FS_ACC) {
+		rc = 1;
+	}
+	return rc;
+}
+
+/*
+ * Handle response from name server.
+ */
+static void
+fc_ns_resp(struct fc_seq *sp, struct fc_frame *fp, void *lp_arg)
+{
+	struct fc_lport *lp = lp_arg;
+
+	if (!IS_ERR(fp)) {
+		fc_lport_lock(lp);
+		del_timer(&lp->state_timer);
+		if (fc_lport_dns_acc(fp)) {
+			if (lp->state == LPORT_ST_REG_PN)
+				fc_ns_enter_reg_ft(lp);
+			else
+				fc_ns_enter_scr(lp);
+
+		} else {
+			fc_ns_retry(lp);
+		}
+		fc_lport_unlock(lp);
+		fc_frame_free(fp);
+	} else
+		fc_ns_error(lp, fp);
+}
+
+/*
+ * Handle new target found by discovery.
+ * Create remote port and session if needed.
+ * Ignore returns of our own FID & WWPN.
+ *
+ * If a non-NULL rp is passed in, it is held for the caller, but not for us.
+ *
+ * Events delivered are:
+ *  FC_EV_READY, when remote port is rediscovered.
+ */
+static int fc_ns_new_target(struct fc_lport *lp,
+			    struct fc_rport *rport,
+			    struct fc_rport_identifiers *ids)
+{
+	struct fc_rport_libfc_priv *rp;
+	int error = 0;
+
+	if (rport && ids->port_name) {
+		if (rport->port_name == -1) {
+			/*
+			 * Set WWN and fall through to notify of create.
+			 */
+			fc_rport_set_name(rport, ids->port_name,
+					  rport->node_name);
+		} else if (rport->port_name != ids->port_name) {
+			/*
+			 * This is a new port with the same FCID as
+			 * a previously-discovered port.  Presumably the old
+			 * port logged out and a new port logged in and was
+			 * assigned the same FCID.  This should be rare.
+			 * Delete the old one and fall thru to re-create.
+			 */
+			fc_ns_del_target(lp, rport);
+			rport = NULL;
+		}
+	}
+	if (((ids->port_name != -1) || (ids->port_id != -1)) &&
+	    ids->port_id != lp->fid && ids->port_name != lp->wwpn) {
+		if (!rport) {
+			rport = lp->tt.rport_lookup(lp, ids->port_id);
+			if (rport == NULL)
+				rport = lp->tt.rport_create(lp, ids);
+			if (!rport)
+				error = ENOMEM;
+		}
+		if (rport) {
+			rp = rport->dd_data;
+			rp->rp_state = RPORT_ST_INIT;
+			lp->tt.rport_login(rport);
+		}
+	}
+	return error;
+}
+
+/*
+ * Delete the remote port.
+ */
+static void fc_ns_del_target(struct fc_lport *lp, struct fc_rport *rport)
+{
+	lp->tt.rport_reset(rport);
+	fc_remote_port_delete(rport);	/* release hold from create */
+}
+
+/*
+ * Done with discovery
+ */
+static void fc_ns_disc_done(struct fc_lport *lp)
+{
+	lp->ns_disc_done = 1;
+	lp->ns_disc_pending = 0;
+	if (lp->ns_disc_requested)
+		lp->tt.disc_start(lp);
+}
+
+/*
+ * Fill in request header.
+ */
+static void fc_ns_fill_dns_hdr(struct fc_lport *lp, struct fc_ct_hdr *ct,
+			       unsigned int op, unsigned int req_size)
+{
+	memset(ct, 0, sizeof(*ct) + req_size);
+	ct->ct_rev = FC_CT_REV;
+	ct->ct_fs_type = FC_FST_DIR;
+	ct->ct_fs_subtype = FC_NS_SUBTYPE;
+	ct->ct_cmd = htons((u16) op);
+}
+
+static void fc_ns_gpn_ft_req(struct fc_lport *lp)
+{
+	struct fc_frame *fp;
+	struct fc_seq *sp = NULL;
+	struct req {
+		struct fc_ct_hdr ct;
+		struct fc_ns_gid_ft gid;
+	} *rp;
+	int error = 0;
+
+	lp->ns_disc_buf_len = 0;
+	lp->ns_disc_seq_count = 0;
+	fp = fc_frame_alloc(lp, sizeof(*rp));
+	if (fp == NULL) {
+		error = ENOMEM;
+	} else {
+		rp = fc_frame_payload_get(fp, sizeof(*rp));
+		fc_ns_fill_dns_hdr(lp, &rp->ct, FC_NS_GPN_FT, sizeof(rp->gid));
+		rp->gid.fn_fc4_type = FC_TYPE_FCP;
+
+		WARN_ON(!fc_lport_test_ready(lp));
+
+		fc_frame_setup(fp, FC_RCTL_DD_UNSOL_CTL, FC_TYPE_CT);
+		sp = lp->tt.exch_seq_send(lp, fp,
+					  fc_ns_gpn_ft_resp,
+					  lp, lp->e_d_tov,
+					  lp->fid,
+					  lp->dns_rp->port_id,
+					  FC_FC_SEQ_INIT | FC_FC_END_SEQ);
+	}
+	if (error || sp == NULL)
+		fcdt_ns_retry(lp);
+}
+
+/*
+ * Handle error on dNS request.
+ */
+static void fcdt_ns_error(struct fc_lport *lp, struct fc_frame *fp)
+{
+	int err = PTR_ERR(fp);
+
+	switch (err) {
+	case -FC_EX_TIMEOUT:
+		if (lp->ns_disc_retry_count++ < FC_NS_RETRY_LIMIT) {
+			fc_ns_gpn_ft_req(lp);
+		} else {
+			FC_DBG("err %d - ending", err);
+			fc_ns_disc_done(lp);
+		}
+		break;
+	default:
+		FC_DBG("err %d - ending", err);
+		fc_ns_disc_done(lp);
+		break;
+	}
+}
+
+/*
+ * Parse the list of port IDs and names resulting from a discovery request.
+ */
+static int fc_ns_gpn_ft_parse(struct fc_lport *lp, void *buf, size_t len)
+{
+	struct fc_gpn_ft_resp *np;
+	char *bp;
+	size_t plen;
+	size_t tlen;
+	int error = 0;
+	struct fc_ns_port *dp;
+
+	/*
+	 * Handle partial name record left over from previous call.
+	 */
+	bp = buf;
+	plen = len;
+	np = (struct fc_gpn_ft_resp *)bp;
+	tlen = lp->ns_disc_buf_len;
+	if (tlen) {
+		WARN_ON(tlen >= sizeof(*np));
+		plen = sizeof(*np) - tlen;
+		WARN_ON(plen <= 0);
+		WARN_ON(plen >= sizeof(*np));
+		if (plen > len)
+			plen = len;
+		np = &lp->ns_disc_buf;
+		memcpy((char *)np + tlen, bp, plen);
+
+		/*
+		 * Set bp so that the loop below will advance it to the
+		 * first valid full name element.
+		 */
+		bp -= tlen;
+		len += tlen;
+		plen += tlen;
+		lp->ns_disc_buf_len = (unsigned char) plen;
+		if (plen == sizeof(*np))
+			lp->ns_disc_buf_len = 0;
+	}
+
+	/*
+	 * Handle full name records, including the one filled from above.
+	 * Normally, np == bp and plen == len, but from the partial case above,
+	 * bp, len describe the overall buffer, and np, plen describe the
+	 * partial buffer, which if would usually be full now.
+	 * After the first time through the loop, things return to "normal".
+	 */
+	while (plen >= sizeof(*np)) {
+		dp = kzalloc(sizeof(*dp), GFP_KERNEL);
+		if (!dp)
+			break;
+		dp->lp = lp;
+		dp->ids.port_id = ntoh24(np->fp_fid);
+		dp->ids.port_name = ntohll(np->fp_wwpn);
+		dp->ids.node_name = -1;
+		dp->ids.roles = FC_RPORT_ROLE_UNKNOWN;
+		error = fc_ns_gnn_id_req(lp, dp);
+		if (error)
+			break;
+		if (np->fp_flags & FC_NS_FID_LAST) {
+			fc_ns_disc_done(lp);
+			len = 0;
+			break;
+		}
+		len -= sizeof(*np);
+		bp += sizeof(*np);
+		np = (struct fc_gpn_ft_resp *)bp;
+		plen = len;
+	}
+
+	/*
+	 * Save any partial record at the end of the buffer for next time.
+	 */
+	if (error == 0 && len > 0 && len < sizeof(*np)) {
+		if (np != &lp->ns_disc_buf)
+			memcpy(&lp->ns_disc_buf, np, len);
+		lp->ns_disc_buf_len = (unsigned char) len;
+	} else {
+		lp->ns_disc_buf_len = 0;
+	}
+	return error;
+}
+
+/*
+ * Handle retry of memory allocation for remote ports.
+ */
+static void fc_ns_timeout(struct work_struct *work)
+{
+	struct fc_lport *lp;
+
+	lp = container_of(work, struct fc_lport, ns_disc_work.work);
+
+	if (lp->ns_disc_pending)
+		fc_ns_gpn_ft_req(lp);
+	else
+		lp->tt.disc_start(lp);
+}
+
+/*
+ * Handle a response frame from Get Port Names (GPN_FT).
+ * The response may be in multiple frames
+ */
+static void fc_ns_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
+			      void *lp_arg)
+{
+	struct fc_lport *lp = lp_arg;
+	struct fc_ct_hdr *cp;
+	struct fc_frame_header *fh;
+	unsigned int seq_cnt;
+	void *buf = NULL;
+	unsigned int len;
+	int error;
+
+	if (IS_ERR(fp)) {
+		fcdt_ns_error(lp, fp);
+		return;
+	}
+
+	WARN_ON(!fc_frame_is_linear(fp));	/* buffer must be contiguous */
+	fh = fc_frame_header_get(fp);
+	len = fr_len(fp) - sizeof(*fh);
+	seq_cnt = ntohs(fh->fh_seq_cnt);
+	if (fr_sof(fp) == FC_SOF_I3 && seq_cnt == 0 &&
+	    lp->ns_disc_seq_count == 0) {
+		cp = fc_frame_payload_get(fp, sizeof(*cp));
+		if (cp == NULL) {
+			FC_DBG("GPN_FT response too short.  len %d",
+			       fr_len(fp));
+		} else if (ntohs(cp->ct_cmd) == FC_FS_ACC) {
+
+			/*
+			 * Accepted.  Parse response.
+			 */
+			buf = cp + 1;
+			len -= sizeof(*cp);
+		} else if (ntohs(cp->ct_cmd) == FC_FS_RJT) {
+			FC_DBG("GPN_FT rejected reason %x exp %x "
+			       "(check zoning)", cp->ct_reason, cp->ct_explan);
+			fc_ns_disc_done(lp);
+		} else {
+			FC_DBG("GPN_FT unexpected response code %x\n",
+			       ntohs(cp->ct_cmd));
+		}
+	} else if (fr_sof(fp) == FC_SOF_N3 &&
+		   seq_cnt == lp->ns_disc_seq_count) {
+		buf = fh + 1;
+	} else {
+		FC_DBG("GPN_FT unexpected frame - out of sequence? "
+		       "seq_cnt %x expected %x sof %x eof %x",
+		       seq_cnt, lp->ns_disc_seq_count, fr_sof(fp), fr_eof(fp));
+	}
+	if (buf) {
+		error = fc_ns_gpn_ft_parse(lp, buf, len);
+		if (error)
+			fcdt_ns_retry(lp);
+		else
+			lp->ns_disc_seq_count++;
+	}
+	fc_frame_free(fp);
+}
+
+/*
+ * Discover the directory information for a single target.
+ * This could be from an RSCN that reported a change for the target.
+ */
+static void fc_ns_single(struct fc_lport *lp, struct fc_ns_port *dp)
+{
+	struct fc_rport *rport;
+
+	if (dp->ids.port_id == lp->fid)
+		goto out;
+
+	rport = lp->tt.rport_lookup(lp, dp->ids.port_id);
+	if (rport) {
+		fc_ns_del_target(lp, rport);
+		put_device(&rport->dev); /* hold from lookup */
+	}
+
+	if (fc_ns_gpn_id_req(lp, dp) != 0)
+		goto error;
+	return;
+error:
+	fc_ns_restart(lp);
+out:
+	kfree(dp);
+}
+
+/*
+ * Send Get Port Name by ID (GPN_ID) request.
+ * The remote port is held by the caller for us.
+ */
+static int fc_ns_gpn_id_req(struct fc_lport *lp, struct fc_ns_port *dp)
+{
+	struct fc_frame *fp;
+	struct req {
+		struct fc_ct_hdr ct;
+		struct fc_ns_fid fid;
+	} *cp;
+	int error = 0;
+
+	fp = fc_frame_alloc(lp, sizeof(*cp));
+	if (fp == NULL)
+		return -ENOMEM;
+
+	cp = fc_frame_payload_get(fp, sizeof(*cp));
+	fc_ns_fill_dns_hdr(lp, &cp->ct, FC_NS_GPN_ID, sizeof(cp->fid));
+	hton24(cp->fid.fp_fid, dp->ids.port_id);
+
+	WARN_ON(!fc_lport_test_ready(lp));
+
+	fc_frame_setup(fp, FC_RCTL_DD_UNSOL_CTL, FC_TYPE_CT);
+	if (!lp->tt.exch_seq_send(lp, fp,
+				  fc_ns_gpn_id_resp,
+				  dp, lp->e_d_tov,
+				  lp->fid,
+				  lp->dns_rp->port_id,
+				  FC_FC_SEQ_INIT | FC_FC_END_SEQ))
+		error = -ENOMEM;
+
+	return error;
+}
+
+/*
+ * Handle a response frame from Get Port Name by ID (GPN_ID).
+ */
+static void fc_ns_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
+			      void *dp_arg)
+{
+	struct fc_ns_port *dp = dp_arg;
+	struct fc_lport *lp;
+	struct resp {
+		struct fc_ct_hdr ct;
+		__be64 wwn;
+	} *cp;
+	unsigned int cmd;
+
+	if (IS_ERR(fp)) {
+		fc_ns_gpn_id_error(dp, fp);
+		return;
+	}
+
+	lp = dp->lp;
+	WARN_ON(!fc_frame_is_linear(fp));	/* buffer must be contiguous */
+
+	cp = fc_frame_payload_get(fp, sizeof(cp->ct));
+	if (cp == NULL) {
+		FC_DBG("GPN_ID response too short.  len %d", fr_len(fp));
+		return;
+	}
+	cmd = ntohs(cp->ct.ct_cmd);
+	switch (cmd) {
+	case FC_FS_ACC:
+		cp = fc_frame_payload_get(fp, sizeof(*cp));
+		if (cp == NULL) {
+			FC_DBG("GPN_ID response payload too short.  len %d",
+			       fr_len(fp));
+			break;
+		}
+		dp->ids.port_name = ntohll(cp->wwn);
+		fc_ns_gnn_id_req(lp, dp);
+		break;
+	case FC_FS_RJT:
+		fc_ns_restart(lp);
+		break;
+	default:
+		FC_DBG("GPN_ID unexpected CT response cmd %x\n", cmd);
+		break;
+	}
+	fc_frame_free(fp);
+}
+
+/*
+ * Handle error from GPN_ID.
+ */
+static void fc_ns_gpn_id_error(struct fc_ns_port *dp, struct fc_frame *fp)
+{
+	struct fc_lport *lp = dp->lp;
+
+	switch (PTR_ERR(fp)) {
+	case -FC_EX_TIMEOUT:
+		fc_ns_restart(lp);
+		break;
+	case -FC_EX_CLOSED:
+	default:
+		break;
+	}
+	kfree(dp);
+}
+
+/*
+ * Setup session to dNS if not already set up.
+ */
+static void fc_ns_enter_dns(struct fc_lport *lp)
+{
+	struct fc_rport *rport;
+	struct fc_rport_libfc_priv *rp;
+	struct fc_rport_identifiers ids = {
+		.port_id = FC_FID_DIR_SERV,
+		.port_name = -1,
+		.node_name = -1,
+		.roles = FC_RPORT_ROLE_UNKNOWN,
+	};
+
+	if (fc_ns_debug)
+		FC_DBG("Processing DNS state");
+
+	fc_lport_state_enter(lp, LPORT_ST_DNS);
+
+	if (!lp->dns_rp) {
+		/*
+		 * Set up remote port to directory server.
+		 */
+
+		/*
+		 * we are called with the state_lock, but if rport_lookup_create
+		 * needs to create a rport then it will sleep.
+		 */
+		fc_lport_unlock(lp);
+		rport = lp->tt.rport_lookup(lp, ids.port_id);
+		if (rport == NULL)
+			rport = lp->tt.rport_create(lp, &ids);
+		fc_lport_lock(lp);
+		if (!rport)
+			goto err;
+		lp->dns_rp = rport;
+	}
+
+	rport = lp->dns_rp;
+	rp = rport->dd_data;
+
+	/*
+	 * If dNS session isn't ready, start its logon.
+	 */
+	if (rp->rp_state != RPORT_ST_READY) {
+		lp->tt.rport_login(rport);
+	} else {
+		del_timer(&lp->state_timer);
+		fc_ns_enter_reg_pn(lp);
+	}
+	return;
+
+	/*
+	 * Resource allocation problem (malloc).  Try again in 500 mS.
+	 */
+err:
+	fc_ns_retry(lp);
+}
+
+/*
+ * Logoff DNS session.
+ * We should get an event call when the session has been logged out.
+ */
+static void fc_ns_enter_dns_stop(struct fc_lport *lp)
+{
+	struct fc_rport *rport = lp->dns_rp;
+
+	if (fc_ns_debug)
+		FC_DBG("Processing DNS_STOP state");
+
+	fc_lport_state_enter(lp, LPORT_ST_DNS_STOP);
+
+	if (rport)
+		lp->tt.rport_logout(rport);
+	else
+		lp->tt.lport_logout(lp);
+}
+
+/*
+ * Fill in dNS request header.
+ */
+static void
+fc_lport_fill_dns_hdr(struct fc_lport *lp, struct fc_ct_hdr *ct,
+		      unsigned int op, unsigned int req_size)
+{
+	memset(ct, 0, sizeof(*ct) + req_size);
+	ct->ct_rev = FC_CT_REV;
+	ct->ct_fs_type = FC_FST_DIR;
+	ct->ct_fs_subtype = FC_NS_SUBTYPE;
+	ct->ct_cmd = htons(op);
+}
+
+/*
+ * Register port name with name server.
+ */
+static void fc_ns_enter_reg_pn(struct fc_lport *lp)
+{
+	struct fc_frame *fp;
+	struct req {
+		struct fc_ct_hdr ct;
+		struct fc_ns_rn_id rn;
+	} *req;
+
+	if (fc_ns_debug)
+		FC_DBG("Processing REG_PN state");
+
+	fc_lport_state_enter(lp, LPORT_ST_REG_PN);
+	fp = fc_frame_alloc(lp, sizeof(*req));
+	if (!fp) {
+		fc_ns_retry(lp);
+		return;
+	}
+	req = fc_frame_payload_get(fp, sizeof(*req));
+	memset(req, 0, sizeof(*req));
+	fc_lport_fill_dns_hdr(lp, &req->ct, FC_NS_RPN_ID, sizeof(req->rn));
+	hton24(req->rn.fr_fid.fp_fid, lp->fid);
+	put_unaligned_be64(lp->wwpn, &req->rn.fr_wwn);
+	fc_frame_setup(fp, FC_RCTL_DD_UNSOL_CTL, FC_TYPE_CT);
+	if (!lp->tt.exch_seq_send(lp, fp,
+				  fc_ns_resp, lp,
+				  lp->e_d_tov,
+				  lp->fid,
+				  lp->dns_rp->port_id,
+				  FC_FC_SEQ_INIT | FC_FC_END_SEQ))
+		fc_ns_retry(lp);
+}
+
+int fc_ns_init(struct fc_lport *lp)
+{
+	INIT_DELAYED_WORK(&lp->ns_disc_work, fc_ns_timeout);
+
+	if (!lp->tt.disc_start)
+		lp->tt.disc_start = fc_ns_disc_start;
+
+	if (!lp->tt.disc_recv_req)
+		lp->tt.disc_recv_req = fc_ns_recv_req;
+
+	if (!lp->tt.disc_enter_dns)
+		lp->tt.disc_enter_dns = fc_ns_enter_dns;
+
+	if (!lp->tt.disc_stop)
+		lp->tt.disc_stop = fc_ns_enter_dns_stop;
+
+	return 0;
+}
+EXPORT_SYMBOL(fc_ns_init);
+
+/*
+ * Send Get Port Name by ID (GNN_ID) request.
+ */
+static int fc_ns_gnn_id_req(struct fc_lport *lp, struct fc_ns_port *dp)
+{
+	struct fc_frame *fp;
+	struct req {
+		struct fc_ct_hdr ct;
+		struct fc_ns_fid fid;
+	} *cp;
+	int error = 0;
+
+	fp = fc_frame_alloc(lp, sizeof(*cp));
+	if (fp == NULL)
+		return -ENOMEM;
+
+	cp = fc_frame_payload_get(fp, sizeof(*cp));
+	fc_ns_fill_dns_hdr(lp, &cp->ct, FC_NS_GNN_ID, sizeof(cp->fid));
+	hton24(cp->fid.fp_fid, dp->ids.port_id);
+
+	WARN_ON(!fc_lport_test_ready(lp));
+
+	fc_frame_setup(fp, FC_RCTL_DD_UNSOL_CTL, FC_TYPE_CT);
+	if (!lp->tt.exch_seq_send(lp, fp,
+				  fc_ns_gnn_id_resp,
+				  dp, lp->e_d_tov,
+				  lp->fid,
+				  lp->dns_rp->port_id,
+				  FC_FC_SEQ_INIT | FC_FC_END_SEQ))
+		error = -ENOMEM;
+
+	return error;
+}
+
+/*
+ * Handle a response frame from Get Port Name by ID (GNN_ID).
+ */
+static void fc_ns_gnn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
+			      void *dp_arg)
+{
+	struct fc_ns_port *dp = dp_arg;
+	struct fc_lport *lp;
+	struct resp {
+		struct fc_ct_hdr ct;
+		__be64 wwn;
+	} *cp;
+	unsigned int cmd;
+
+	if (IS_ERR(fp)) {
+		fc_ns_gnn_id_error(dp, fp);
+		return;
+	}
+
+	lp = dp->lp;
+	WARN_ON(!fc_frame_is_linear(fp));	/* buffer must be contiguous */
+
+	cp = fc_frame_payload_get(fp, sizeof(cp->ct));
+	if (cp == NULL) {
+		FC_DBG("GNN_ID response too short.  len %d", fr_len(fp));
+		return;
+	}
+	cmd = ntohs(cp->ct.ct_cmd);
+	switch (cmd) {
+	case FC_FS_ACC:
+		cp = fc_frame_payload_get(fp, sizeof(*cp));
+		if (cp == NULL) {
+			FC_DBG("GNN_ID response payload too short.  len %d",
+			       fr_len(fp));
+			break;
+		}
+		dp->ids.node_name = ntohll(cp->wwn);
+		fc_ns_new_target(lp, NULL, &dp->ids);
+		break;
+	case FC_FS_RJT:
+		fc_ns_restart(lp);
+		break;
+	default:
+		FC_DBG("GNN_ID unexpected CT response cmd %x\n", cmd);
+		break;
+	}
+	kfree(dp);
+	fc_frame_free(fp);
+}
+
+/*
+ * Handle error from GNN_ID.
+ */
+static void fc_ns_gnn_id_error(struct fc_ns_port *dp, struct fc_frame *fp)
+{
+	struct fc_lport *lp = dp->lp;
+
+	switch (PTR_ERR(fp)) {
+	case -FC_EX_TIMEOUT:
+		fc_ns_restart(lp);
+		break;
+	case -FC_EX_CLOSED:
+	default:
+		break;
+	}
+	kfree(dp);
+}
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
new file mode 100644
index 0000000..4050596
--- /dev/null
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -0,0 +1,1265 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+/*
+ * Remote Port support.
+ *
+ * A remote port structure contains information about an N port to which we
+ * will create sessions.
+ */
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/rcupdate.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+#include <asm/unaligned.h>
+
+#include <scsi/libfc/libfc.h>
+
+static int fc_rp_debug;
+
+/*
+ * static functions.
+ */
+static void fc_rport_enter_start(struct fc_rport *);
+static void fc_rport_enter_plogi(struct fc_rport *);
+static void fc_rport_enter_prli(struct fc_rport *);
+static void fc_rport_enter_rtv(struct fc_rport *);
+static void fc_rport_enter_logo(struct fc_rport *);
+static void fc_rport_recv_plogi_req(struct fc_rport *,
+				    struct fc_seq *, struct fc_frame *);
+static void fc_rport_recv_prli_req(struct fc_rport *,
+				   struct fc_seq *, struct fc_frame *);
+static void fc_rport_recv_prlo_req(struct fc_rport *,
+				   struct fc_seq *, struct fc_frame *);
+static void fc_rport_recv_logo_req(struct fc_rport *,
+				   struct fc_seq *, struct fc_frame *);
+static void fc_rport_timeout(struct work_struct *);
+
+static struct fc_rport *fc_remote_port_create(struct fc_lport *,
+					      struct fc_rport_identifiers *);
+
+/**
+ * fc_rport_lookup - lookup a remote port by fcid
+ */
+struct fc_rport *fc_rport_lookup(const struct fc_lport *lp, u32 fid)
+{
+	struct Scsi_Host *shost = lp->host;
+	struct fc_rport *rport, *found;
+	unsigned long flags;
+
+	found = NULL;
+	spin_lock_irqsave(shost->host_lock, flags);
+	list_for_each_entry(rport, &fc_host_rports(shost), peers)
+		if (rport->port_id == fid &&
+		    rport->port_state == FC_PORTSTATE_ONLINE) {
+			found = rport;
+			get_device(&found->dev);
+			break;
+		}
+	spin_unlock_irqrestore(shost->host_lock, flags);
+	return found;
+}
+
+/**
+ * fc_remote_port_create - create a remote port.
+ * @vf:		ptr to virtual fabric structure
+ * @port_name:	world wide port name for the remote port
+ *
+ * create a new remote port struct and assign the virtual
+ * fabric to it. also the world wide port name.
+ */
+static struct fc_rport *fc_remote_port_create(struct fc_lport *lp,
+					      struct fc_rport_identifiers *ids)
+{
+	struct fc_rport_libfc_priv *rp;
+	struct fc_rport *rport;
+
+	rport = fc_remote_port_add(lp->host, 0, ids);
+	if (!rport)
+		return NULL;
+
+	rp = rport->dd_data;
+	rp->local_port = lp;
+
+	/* default value until service parameters are exchanged in PLOGI */
+	rport->maxframe_size = FC_MIN_MAX_PAYLOAD;
+
+	spin_lock_init(&rp->rp_lock);
+	rp->rp_state = RPORT_ST_INIT;
+	rp->local_port = lp;
+	rp->e_d_tov = lp->e_d_tov;
+	rp->r_a_tov = lp->r_a_tov;
+	rp->flags = FC_RP_FLAGS_REC_SUPPORTED;
+	INIT_DELAYED_WORK(&rp->retry_work, fc_rport_timeout);
+
+	return rport;
+}
+
+/*
+ * Lock session.
+ */
+static inline void fc_rport_lock(struct fc_rport *rport)
+{
+	struct fc_rport_libfc_priv *rp = rport->dd_data;
+	spin_lock_bh(&rp->rp_lock);
+}
+
+/*
+ * Unlock session without invoking pending events.
+ */
+static inline void fc_rport_unlock(struct fc_rport *rport)
+{
+	struct fc_rport_libfc_priv *rp = rport->dd_data;
+	spin_unlock_bh(&rp->rp_lock);
+}
+
+static unsigned int
+fc_plogi_get_maxframe(struct fc_els_flogi *flp, unsigned int maxval)
+{
+	unsigned int mfs;
+
+	/*
+	 * Get max payload from the common service parameters and the
+	 * class 3 receive data field size.
+	 */
+	mfs = ntohs(flp->fl_csp.sp_bb_data) & FC_SP_BB_DATA_MASK;
+	if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
+		maxval = mfs;
+	mfs = ntohs(flp->fl_cssp[3 - 1].cp_rdfs);
+	if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
+		maxval = mfs;
+	return maxval;
+}
+
+/*
+ * Fill in PLOGI command for request.
+ */
+static void
+fc_lport_plogi_fill(struct fc_lport *lp,
+		    struct fc_els_flogi *flogi, unsigned int op)
+{
+	struct fc_els_csp *sp;
+	struct fc_els_cssp *cp;
+
+	memset(flogi, 0, sizeof(*flogi));
+	flogi->fl_cmd = (u8) op;
+	put_unaligned_be64(lp->wwpn, &flogi->fl_wwpn);
+	put_unaligned_be64(lp->wwnn, &flogi->fl_wwnn);
+
+	sp = &flogi->fl_csp;
+	sp->sp_hi_ver = 0x20;
+	sp->sp_lo_ver = 0x20;
+	sp->sp_bb_cred = htons(10);	/* this gets set by gateway */
+	sp->sp_bb_data = htons((u16) lp->mfs);
+	cp = &flogi->fl_cssp[3 - 1];	/* class 3 parameters */
+	cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ);
+	if (op != ELS_FLOGI) {
+		sp->sp_features = htons(FC_SP_FT_CIRO);
+		sp->sp_tot_seq = htons(255);	/* seq. we accept */
+		sp->sp_rel_off = htons(0x1f);
+		sp->sp_e_d_tov = htonl(lp->e_d_tov);
+
+		cp->cp_rdfs = htons((u16) lp->mfs);
+		cp->cp_con_seq = htons(255);
+		cp->cp_open_seq = 1;
+	}
+}
+
+static void fc_rport_state_enter(struct fc_rport *rport,
+				 enum fc_rport_state new)
+{
+	struct fc_rport_libfc_priv *rp = rport->dd_data;
+	if (rp->rp_state != new)
+		rp->retries = 0;
+	rp->rp_state = new;
+}
+
+/*
+ * Start the session login state machine.
+ * Set it to wait for the local_port to be ready if it isn't.
+ */
+int fc_rport_login(struct fc_rport *rport)
+{
+	struct fc_rport_libfc_priv *rp = rport->dd_data;
+	struct fc_lport *lp = rp->local_port;
+
+	fc_rport_lock(rport);
+	if (rp->rp_state == RPORT_ST_INIT) {
+		fc_rport_unlock(rport);
+		fc_rport_enter_start(rport);
+	} else if (rp->rp_state == RPORT_ST_ERROR) {
+		fc_rport_state_enter(rport, RPORT_ST_INIT);
+		fc_rport_unlock(rport);
+		if (fc_rp_debug)
+			FC_DBG("remote %6x closed", rport->port_id);
+
+		if (rport == lp->dns_rp &&
+		    lp->state != LPORT_ST_RESET) {
+			fc_lport_lock(lp);
+			del_timer(&lp->state_timer);
+			lp->dns_rp = NULL;
+
+			if (lp->state == LPORT_ST_DNS_STOP) {
+				fc_lport_unlock(lp);
+				lp->tt.lport_logout(lp);
+			} else {
+				lp->tt.lport_login(lp);
+				fc_lport_unlock(lp);
+			}
+			fc_remote_port_delete(rport);
+		}
+	} else
+		fc_rport_unlock(rport);
+
+	return 0;
+}
+
+/*
+ * Stop the session - log it off.
+ */
+int fc_rport_logout(struct fc_rport *rport)
+{
+	struct fc_rport_libfc_priv *rp = rport->dd_data;
+	struct fc_lport *lp = rp->local_port;
+
+	fc_rport_lock(rport);
+	switch (rp->rp_state) {
+	case RPORT_ST_PRLI:
+	case RPORT_ST_RTV:
+	case RPORT_ST_READY:
+		fc_rport_enter_logo(rport);
+		fc_rport_unlock(rport);
+		break;
+	default:
+		fc_rport_state_enter(rport, RPORT_ST_INIT);
+		fc_rport_unlock(rport);
+		if (fc_rp_debug)
+			FC_DBG("remote %6x closed", rport->port_id);
+		if (rport == lp->dns_rp &&
+		    lp->state != LPORT_ST_RESET) {
+			fc_lport_lock(lp);
+			del_timer(&lp->state_timer);
+			lp->dns_rp = NULL;
+
+			if (lp->state == LPORT_ST_DNS_STOP) {
+				fc_lport_unlock(lp);
+				lp->tt.lport_logout(lp);
+			} else {
+				lp->tt.lport_login(lp);
+				fc_lport_unlock(lp);
+			}
+
+			fc_remote_port_delete(rport);
+		}
+		break;
+	}
+
+	return 0;
+}
+
+/*
+ * Reset the session - assume it is logged off.	 Used after fabric logoff.
+ * The local port code takes care of resetting the exchange manager.
+ */
+void fc_rport_reset(struct fc_rport *rport)
+{
+	struct fc_rport_libfc_priv *rp = rport->dd_data;
+	struct fc_lport *lp;
+
+	if (fc_rp_debug)
+		FC_DBG("sess to %6x reset", rport->port_id);
+	fc_rport_lock(rport);
+
+	lp = rp->local_port;
+	fc_rport_state_enter(rport, RPORT_ST_INIT);
+	fc_rport_unlock(rport);
+
+	if (fc_rp_debug)
+		FC_DBG("remote %6x closed", rport->port_id);
+	if (rport == lp->dns_rp &&
+	    lp->state != LPORT_ST_RESET) {
+		fc_lport_lock(lp);
+		del_timer(&lp->state_timer);
+		lp->dns_rp = NULL;
+		if (lp->state == LPORT_ST_DNS_STOP) {
+			fc_lport_unlock(lp);
+			lp->tt.lport_logout(lp);
+		} else {
+			lp->tt.lport_login(lp);
+			fc_lport_unlock(lp);
+		}
+		fc_remote_port_delete(rport);
+	}
+}
+
+/*
+ * Reset all sessions for a local port session list.
+ * The vf_lock protects the list.
+ * Don't hold the lock over the reset call, instead hold the session
+ * as well as the next session on the list.
+ * Holding the session must guarantee it'll stay on the same list.
+ */
+void fc_rport_reset_list(struct fc_lport *lp)
+{
+	struct Scsi_Host *shost = lp->host;
+	struct fc_rport *rport;
+	struct fc_rport *next;
+	unsigned long flags;
+
+	spin_lock_irqsave(shost->host_lock, flags);
+	list_for_each_entry_safe(rport, next, &fc_host_rports(shost), peers) {
+		lp->tt.rport_reset(rport);
+	}
+	spin_unlock_irqrestore(shost->host_lock, flags);
+}
+
+static void fc_rport_enter_start(struct fc_rport *rport)
+{
+	struct fc_rport_libfc_priv *rp = rport->dd_data;
+	struct fc_lport *lp = rp->local_port;
+
+	/*
+	 * If the local port is already logged on, advance to next state.
+	 * Otherwise the local port will be logged on by fc_rport_unlock().
+	 */
+	fc_rport_state_enter(rport, RPORT_ST_STARTED);
+
+	if (rport == lp->dns_rp || fc_lport_test_ready(lp))
+		fc_rport_enter_plogi(rport);
+}
+
+/*
+ * Handle exchange reject or retry exhaustion in various states.
+ */
+static void fc_rport_reject(struct fc_rport *rport)
+{
+	struct fc_rport_libfc_priv *rp = rport->dd_data;
+	struct fc_lport *lp = rp->local_port;
+	switch (rp->rp_state) {
+	case RPORT_ST_PLOGI:
+	case RPORT_ST_PRLI:
+		fc_rport_state_enter(rport, RPORT_ST_ERROR);
+		if (rport == lp->dns_rp &&
+		    lp->state != LPORT_ST_RESET) {
+			fc_lport_lock(lp);
+			del_timer(&lp->state_timer);
+			lp->dns_rp = NULL;
+			if (lp->state == LPORT_ST_DNS_STOP) {
+				fc_lport_unlock(lp);
+				lp->tt.lport_logout(lp);
+			} else {
+				lp->tt.lport_login(lp);
+				fc_lport_unlock(lp);
+			}
+			fc_remote_port_delete(rport);
+		}
+		break;
+	case RPORT_ST_RTV:
+		fc_rport_state_enter(rport, RPORT_ST_READY);
+		if (fc_rp_debug)
+			FC_DBG("remote %6x ready", rport->port_id);
+		if (rport == lp->dns_rp &&
+		    lp->state == LPORT_ST_DNS) {
+			fc_lport_lock(lp);
+			del_timer(&lp->state_timer);
+			lp->tt.disc_enter_dns(lp);
+			fc_lport_unlock(lp);
+		}
+		break;
+	case RPORT_ST_LOGO:
+		fc_rport_state_enter(rport, RPORT_ST_INIT);
+		if (fc_rp_debug)
+			FC_DBG("remote %6x closed", rport->port_id);
+		if (rport == lp->dns_rp &&
+		    lp->state != LPORT_ST_RESET) {
+			fc_lport_lock(lp);
+			del_timer(&lp->state_timer);
+			lp->dns_rp = NULL;
+			if (lp->state == LPORT_ST_DNS_STOP) {
+				fc_lport_unlock(lp);
+				lp->tt.lport_logout(lp);
+			} else {
+				lp->tt.lport_login(lp);
+				fc_lport_unlock(lp);
+			}
+			fc_remote_port_delete(rport);
+		}
+		break;
+	case RPORT_ST_NONE:
+	case RPORT_ST_READY:
+	case RPORT_ST_ERROR:
+	case RPORT_ST_PLOGI_RECV:
+	case RPORT_ST_STARTED:
+	case RPORT_ST_INIT:
+		BUG();
+		break;
+	}
+	return;
+}
+
+/*
+ * Timeout handler for retrying after allocation failures or exchange timeout.
+ */
+static void fc_rport_timeout(struct work_struct *work)
+{
+	struct fc_rport_libfc_priv *rp =
+		container_of(work, struct fc_rport_libfc_priv, retry_work.work);
+	struct fc_rport *rport = (((void *)rp) - sizeof(struct fc_rport));
+
+	switch (rp->rp_state) {
+	case RPORT_ST_PLOGI:
+		fc_rport_enter_plogi(rport);
+		break;
+	case RPORT_ST_PRLI:
+		fc_rport_enter_prli(rport);
+		break;
+	case RPORT_ST_RTV:
+		fc_rport_enter_rtv(rport);
+		break;
+	case RPORT_ST_LOGO:
+		fc_rport_enter_logo(rport);
+		break;
+	case RPORT_ST_READY:
+	case RPORT_ST_ERROR:
+	case RPORT_ST_INIT:
+		break;
+	case RPORT_ST_NONE:
+	case RPORT_ST_PLOGI_RECV:
+	case RPORT_ST_STARTED:
+		BUG();
+		break;
+	}
+	put_device(&rport->dev);
+}
+
+/*
+ * Handle retry for allocation failure via timeout.
+ */
+static void fc_rport_retry(struct fc_rport *rport)
+{
+	struct fc_rport_libfc_priv *rp = rport->dd_data;
+	struct fc_lport *lp = rp->local_port;
+
+	if (rp->retries < lp->max_retry_count) {
+		rp->retries++;
+		get_device(&rport->dev);
+		schedule_delayed_work(&rp->retry_work,
+				      msecs_to_jiffies(rp->e_d_tov));
+	} else {
+		FC_DBG("sess %6x alloc failure in state %d, retries exhausted",
+		       rport->port_id, rp->rp_state);
+		fc_rport_reject(rport);
+	}
+}
+
+/*
+ * Handle error from a sequence issued by the session state machine.
+ */
+static void fc_rport_error(struct fc_rport *rport, struct fc_frame *fp)
+{
+	struct fc_rport_libfc_priv *rp = rport->dd_data;
+	fc_rport_lock(rport);
+	if (fc_rp_debug)
+		FC_DBG("state %d error %ld retries %d\n",
+		       rp->rp_state, PTR_ERR(fp), rp->retries);
+
+	if (PTR_ERR(fp) == -FC_EX_TIMEOUT &&
+	    rp->retries++ >= rp->local_port->max_retry_count) {
+		get_device(&rport->dev);
+		schedule_delayed_work(&rp->retry_work, 0);
+	} else
+		fc_rport_reject(rport);
+
+	fc_rport_unlock(rport);
+}
+
+/*
+ * Handle incoming ELS PLOGI response.
+ * Save parameters of target.  Finish exchange.
+ */
+static void fc_rport_plogi_recv_resp(struct fc_seq *sp, struct fc_frame *fp,
+				     void *rp_arg)
+{
+	struct fc_els_ls_rjt *rjp;
+	struct fc_els_flogi *plp;
+	u64 wwpn, wwnn;
+	unsigned int tov;
+	u16 csp_seq;
+	u16 cssp_seq;
+	u8 op;
+	struct fc_rport *rport = rp_arg;
+	struct fc_rport_libfc_priv *rp = rport->dd_data;
+
+	if (!IS_ERR(fp)) {
+		op = fc_frame_payload_op(fp);
+		fc_rport_lock(rport);
+		if (op == ELS_LS_ACC &&
+		    (plp = fc_frame_payload_get(fp, sizeof(*plp))) != NULL) {
+			wwpn = get_unaligned_be64(&plp->fl_wwpn);
+			wwnn = get_unaligned_be64(&plp->fl_wwnn);
+
+			fc_rport_set_name(rport, wwpn, wwnn);
+			tov = ntohl(plp->fl_csp.sp_e_d_tov);
+			if (ntohs(plp->fl_csp.sp_features) & FC_SP_FT_EDTR)
+				tov /= 1000;
+			if (tov > rp->e_d_tov)
+				rp->e_d_tov = tov;
+			csp_seq = ntohs(plp->fl_csp.sp_tot_seq);
+			cssp_seq = ntohs(plp->fl_cssp[3 - 1].cp_con_seq);
+			if (cssp_seq < csp_seq)
+				csp_seq = cssp_seq;
+			rp->max_seq = csp_seq;
+			rport->maxframe_size =
+				fc_plogi_get_maxframe(plp, rp->local_port->mfs);
+			if (rp->rp_state == RPORT_ST_PLOGI)
+				fc_rport_enter_prli(rport);
+		} else {
+			if (fc_rp_debug)
+				FC_DBG("bad PLOGI response");
+
+			rjp = fc_frame_payload_get(fp, sizeof(*rjp));
+			if (op == ELS_LS_RJT && rjp != NULL &&
+			    rjp->er_reason == ELS_RJT_INPROG)
+				fc_rport_retry(rport);    /* try again */
+			else
+				fc_rport_reject(rport);   /* error */
+		}
+		fc_rport_unlock(rport);
+		fc_frame_free(fp);
+	} else {
+		fc_rport_error(rport, fp);
+	}
+}
+
+/*
+ * Send ELS (extended link service) PLOGI request to peer.
+ */
+static void fc_rport_enter_plogi(struct fc_rport *rport)
+{
+	struct fc_frame *fp;
+	struct fc_els_flogi *plogi;
+	struct fc_lport *lp;
+	struct fc_rport_libfc_priv *rp = rport->dd_data;
+
+	lp = rp->local_port;
+	fc_rport_state_enter(rport, RPORT_ST_PLOGI);
+	rport->maxframe_size = FC_MIN_MAX_PAYLOAD;
+	fp = fc_frame_alloc(lp, sizeof(*plogi));
+	if (!fp)
+		return fc_rport_retry(rport);
+	plogi = fc_frame_payload_get(fp, sizeof(*plogi));
+	WARN_ON(!plogi);
+	fc_lport_plogi_fill(rp->local_port, plogi, ELS_PLOGI);
+	rp->e_d_tov = lp->e_d_tov;
+	fc_frame_setup(fp, FC_RCTL_ELS_REQ, FC_TYPE_ELS);
+	if (!lp->tt.exch_seq_send(lp, fp,
+				   fc_rport_plogi_recv_resp,
+				   rport, lp->e_d_tov,
+				   rp->local_port->fid,
+				   rport->port_id,
+				   FC_FC_SEQ_INIT | FC_FC_END_SEQ))
+		fc_rport_retry(rport);
+}
+
+static void fc_rport_prli_recv_resp(struct fc_seq *sp, struct fc_frame *fp,
+				   void *rp_arg)
+{
+	struct fc_rport *rport = rp_arg;
+	struct fc_rport_libfc_priv *rp = rport->dd_data;
+	struct fc_lport *lp = rp->local_port;
+	struct {
+		struct fc_els_prli prli;
+		struct fc_els_spp spp;
+	} *pp;
+	u32 roles = FC_RPORT_ROLE_UNKNOWN;
+	u32 fcp_parm = 0;
+	u8 op;
+
+	if (IS_ERR(fp)) {
+		fc_rport_error(rport, fp);
+		return;
+	}
+
+	fc_rport_lock(rport);
+	op = fc_frame_payload_op(fp);
+	if (op == ELS_LS_ACC) {
+		pp = fc_frame_payload_get(fp, sizeof(*pp));
+		if (pp && pp->prli.prli_spp_len >= sizeof(pp->spp)) {
+			fcp_parm = ntohl(pp->spp.spp_params);
+			if (fcp_parm & FCP_SPPF_RETRY)
+				rp->flags |= FC_RP_FLAGS_RETRY;
+		}
+
+		rport->supported_classes = FC_COS_CLASS3;
+		if (fcp_parm & FCP_SPPF_INIT_FCN)
+			roles |= FC_RPORT_ROLE_FCP_INITIATOR;
+		if (fcp_parm & FCP_SPPF_TARG_FCN)
+			roles |= FC_RPORT_ROLE_FCP_TARGET;
+
+		fc_rport_enter_rtv(rport);
+		fc_rport_unlock(rport);
+		fc_remote_port_rolechg(rport, roles);
+	} else {
+		FC_DBG("bad ELS response\n");
+		fc_rport_state_enter(rport, RPORT_ST_ERROR);
+		fc_rport_unlock(rport);
+		if (rport == lp->dns_rp && lp->state != LPORT_ST_RESET) {
+			fc_lport_lock(lp);
+			del_timer(&lp->state_timer);
+			lp->dns_rp = NULL;
+			if (lp->state == LPORT_ST_DNS_STOP) {
+				fc_lport_unlock(lp);
+				lp->tt.lport_logout(lp);
+			} else {
+				lp->tt.lport_login(lp);
+				fc_lport_unlock(lp);
+			}
+			fc_remote_port_delete(rport);
+		}
+	}
+
+	fc_frame_free(fp);
+}
+
+static void fc_rport_logo_recv_resp(struct fc_seq *sp, struct fc_frame *fp,
+				   void *rp_arg)
+{
+	struct fc_rport *rport = rp_arg;
+	struct fc_rport_libfc_priv *rp = rport->dd_data;
+	struct fc_lport *lp = rp->local_port;
+	u8 op;
+
+	if (IS_ERR(fp)) {
+		fc_rport_error(rport, fp);
+		return;
+	}
+
+	fc_rport_lock(rport);
+	op = fc_frame_payload_op(fp);
+	if (op == ELS_LS_ACC) {
+		fc_rport_enter_rtv(rport);
+		fc_rport_unlock(rport);
+	} else {
+		FC_DBG("bad ELS response\n");
+		fc_rport_state_enter(rport, RPORT_ST_ERROR);
+		fc_rport_unlock(rport);
+		if (rport == lp->dns_rp && lp->state != LPORT_ST_RESET) {
+			fc_lport_lock(lp);
+			del_timer(&lp->state_timer);
+			lp->dns_rp = NULL;
+			if (lp->state == LPORT_ST_DNS_STOP) {
+				fc_lport_unlock(lp);
+				lp->tt.lport_logout(lp);
+			} else {
+				lp->tt.lport_login(lp);
+				fc_lport_unlock(lp);
+			}
+			fc_remote_port_delete(rport);
+		}
+	}
+
+	fc_frame_free(fp);
+}
+
+/*
+ * Send ELS PRLI request to target.
+ */
+static void fc_rport_enter_prli(struct fc_rport *rport)
+{
+	struct {
+		struct fc_els_prli prli;
+		struct fc_els_spp spp;
+	} *pp;
+	struct fc_frame *fp;
+	struct fc_rport_libfc_priv *rp = rport->dd_data;
+	struct fc_lport *lp = rp->local_port;
+
+	fc_rport_state_enter(rport, RPORT_ST_PRLI);
+
+	/*
+	 * Special case if session is for name server or any other
+	 * well-known address:	Skip the PRLI step.
+	 * This should be made more general, possibly moved to the FCP layer.
+	 */
+	if (rport->port_id >= FC_FID_DOM_MGR) {
+		fc_rport_state_enter(rport, RPORT_ST_READY);
+		if (fc_rp_debug)
+			FC_DBG("remote %6x ready", rport->port_id);
+		if (rport == lp->dns_rp &&
+		    lp->state == LPORT_ST_DNS) {
+			fc_lport_lock(lp);
+			del_timer(&lp->state_timer);
+			lp->tt.disc_enter_dns(lp);
+			fc_lport_unlock(lp);
+		}
+		return;
+	}
+	fp = fc_frame_alloc(lp, sizeof(*pp));
+	if (!fp)
+		return fc_rport_retry(rport);
+	pp = fc_frame_payload_get(fp, sizeof(*pp));
+	WARN_ON(!pp);
+	memset(pp, 0, sizeof(*pp));
+	pp->prli.prli_cmd = ELS_PRLI;
+	pp->prli.prli_spp_len = sizeof(struct fc_els_spp);
+	pp->prli.prli_len = htons(sizeof(*pp));
+	pp->spp.spp_type = FC_TYPE_FCP;
+	pp->spp.spp_flags = FC_SPP_EST_IMG_PAIR;
+	pp->spp.spp_params = htonl(rp->local_port->service_params);
+	fc_frame_setup(fp, FC_RCTL_ELS_REQ, FC_TYPE_ELS);
+	if (!lp->tt.exch_seq_send(lp, fp,
+				  fc_rport_prli_recv_resp,
+				  rport, lp->e_d_tov,
+				  rp->local_port->fid,
+				  rport->port_id,
+				  FC_FC_SEQ_INIT | FC_FC_END_SEQ))
+		fc_rport_retry(rport);
+}
+
+/*
+ * Handle incoming ELS response.
+ * Many targets don't seem to support this.
+ */
+static void fc_rport_els_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
+				  void *rp_arg)
+{
+	struct fc_rport *rport = rp_arg;
+	struct fc_rport_libfc_priv *rp = rport->dd_data;
+	struct fc_lport *lp = rp->local_port;
+	u8 op;
+
+	if (IS_ERR(fp)) {
+		fc_rport_error(rport, fp);
+		return;
+	}
+
+	fc_rport_lock(rport);
+	op = fc_frame_payload_op(fp);
+	if (op == ELS_LS_ACC) {
+		struct fc_els_rtv_acc *rtv;
+		u32 toq;
+		u32 tov;
+
+		rtv = fc_frame_payload_get(fp, sizeof(*rtv));
+		if (rtv) {
+			toq = ntohl(rtv->rtv_toq);
+			tov = ntohl(rtv->rtv_r_a_tov);
+			if (tov == 0)
+				tov = 1;
+			rp->r_a_tov = tov;
+			tov = ntohl(rtv->rtv_e_d_tov);
+			if (toq & FC_ELS_RTV_EDRES)
+				tov /= 1000000;
+			if (tov == 0)
+				tov = 1;
+			rp->e_d_tov = tov;
+		}
+	}
+	fc_rport_state_enter(rport, RPORT_ST_READY);
+	fc_rport_unlock(rport);
+	if (fc_rp_debug)
+		FC_DBG("remote %6x ready", rport->port_id);
+	if (rport == lp->dns_rp &&
+	    lp->state == LPORT_ST_DNS) {
+		fc_lport_lock(lp);
+		del_timer(&lp->state_timer);
+		lp->tt.disc_enter_dns(lp);
+		fc_lport_unlock(lp);
+	}
+	fc_frame_free(fp);
+}
+
+/*
+ * Send ELS RTV (Request Timeout Value) request to remote port.
+ */
+static void fc_rport_enter_rtv(struct fc_rport *rport)
+{
+	struct fc_els_rtv *rtv;
+	struct fc_frame *fp;
+	struct fc_lport *lp;
+	struct fc_rport_libfc_priv *rp = rport->dd_data;
+
+	lp = rp->local_port;
+	fc_rport_state_enter(rport, RPORT_ST_RTV);
+
+	fp = fc_frame_alloc(lp, sizeof(*rtv));
+	if (!fp)
+		return fc_rport_retry(rport);
+	rtv = fc_frame_payload_get(fp, sizeof(*rtv));
+	WARN_ON(!rtv);
+	memset(rtv, 0, sizeof(*rtv));
+	rtv->rtv_cmd = ELS_RTV;
+	fc_frame_setup(fp, FC_RCTL_ELS_REQ, FC_TYPE_ELS);
+	if (!lp->tt.exch_seq_send(lp, fp,
+				  fc_rport_els_rtv_resp,
+				  rport, lp->e_d_tov,
+				  rp->local_port->fid,
+				  rport->port_id,
+				  FC_FC_SEQ_INIT | FC_FC_END_SEQ))
+		fc_rport_retry(rport);
+}
+
+static void fc_rport_enter_logo(struct fc_rport *rport)
+{
+	struct fc_frame *fp;
+	struct fc_els_logo *logo;
+	struct fc_lport *lp;
+	struct fc_rport_libfc_priv *rp = rport->dd_data;
+
+	fc_rport_state_enter(rport, RPORT_ST_LOGO);
+
+	lp = rp->local_port;
+	fp = fc_frame_alloc(lp, sizeof(*logo));
+	if (!fp)
+		return fc_rport_retry(rport);
+	logo = fc_frame_payload_get(fp, sizeof(*logo));
+	memset(logo, 0, sizeof(*logo));
+	logo->fl_cmd = ELS_LOGO;
+	hton24(logo->fl_n_port_id, lp->fid);
+	logo->fl_n_port_wwn = htonll(lp->wwpn);
+
+	fc_frame_setup(fp, FC_RCTL_ELS_REQ, FC_TYPE_ELS);
+	if (!lp->tt.exch_seq_send(lp, fp,
+				   fc_rport_logo_recv_resp,
+				   rport, lp->e_d_tov,
+				   rp->local_port->fid,
+				   rport->port_id,
+				   FC_FC_SEQ_INIT | FC_FC_END_SEQ))
+		fc_rport_retry(rport);
+}
+
+/*
+ * Handle a request received by the exchange manager for the session.
+ * This may be an entirely new session, or a PLOGI or LOGO for an existing one.
+ * This will free the frame.
+ */
+void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp, void *rp_arg)
+{
+	struct fc_rport *rport = rp_arg;
+	struct fc_rport_libfc_priv *rp = rport->dd_data;
+	struct fc_frame_header *fh;
+	struct fc_lport *lp = rp->local_port;
+	struct fc_seq_els_data els_data;
+	u8 op;
+
+	els_data.fp = NULL;
+	els_data.explan = ELS_EXPL_NONE;
+	els_data.reason = ELS_RJT_NONE;
+
+	fh = fc_frame_header_get(fp);
+
+	if (fh->fh_r_ctl == FC_RCTL_ELS_REQ && fh->fh_type == FC_TYPE_ELS) {
+		op = fc_frame_payload_op(fp);
+		switch (op) {
+		case ELS_PLOGI:
+			fc_rport_recv_plogi_req(rport, sp, fp);
+			break;
+		case ELS_PRLI:
+			fc_rport_recv_prli_req(rport, sp, fp);
+			break;
+		case ELS_PRLO:
+			fc_rport_recv_prlo_req(rport, sp, fp);
+			break;
+		case ELS_LOGO:
+			fc_rport_recv_logo_req(rport, sp, fp);
+			break;
+		case ELS_RRQ:
+			els_data.fp = fp;
+			lp->tt.seq_els_rsp_send(sp, ELS_RRQ, &els_data);
+			break;
+		case ELS_REC:
+			els_data.fp = fp;
+			lp->tt.seq_els_rsp_send(sp, ELS_REC, &els_data);
+			break;
+		default:
+			els_data.reason = ELS_RJT_UNSUP;
+			lp->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &els_data);
+			fc_frame_free(fp);
+			break;
+		}
+	} else {
+		fc_frame_free(fp);
+	}
+}
+
+/*
+ * Handle incoming PLOGI request.
+ */
+static void fc_rport_recv_plogi_req(struct fc_rport *rport,
+				    struct fc_seq *sp, struct fc_frame *rx_fp)
+{
+	struct fc_rport_libfc_priv *rp = rport->dd_data;
+	struct fc_frame *fp = rx_fp;
+	struct fc_frame_header *fh;
+	struct fc_lport *lp;
+	struct fc_els_flogi *pl;
+	struct fc_seq_els_data rjt_data;
+	u32 sid;
+	u64 wwpn;
+	u64 wwnn;
+	enum fc_els_rjt_reason reject = 0;
+	u32 f_ctl;
+
+	rjt_data.fp = NULL;
+	fh = fc_frame_header_get(fp);
+	sid = ntoh24(fh->fh_s_id);
+	pl = fc_frame_payload_get(fp, sizeof(*pl));
+	if (!pl) {
+		FC_DBG("incoming PLOGI from %x too short", sid);
+		WARN_ON(1);
+		/* XXX TBD: send reject? */
+		fc_frame_free(fp);
+		return;
+	}
+	wwpn = get_unaligned_be64(&pl->fl_wwpn);
+	wwnn = get_unaligned_be64(&pl->fl_wwnn);
+	fc_rport_lock(rport);
+	lp = rp->local_port;
+
+	/*
+	 * If the session was just created, possibly due to the incoming PLOGI,
+	 * set the state appropriately and accept the PLOGI.
+	 *
+	 * If we had also sent a PLOGI, and if the received PLOGI is from a
+	 * higher WWPN, we accept it, otherwise an LS_RJT is sent with reason
+	 * "command already in progress".
+	 *
+	 * XXX TBD: If the session was ready before, the PLOGI should result in
+	 * all outstanding exchanges being reset.
+	 */
+	switch (rp->rp_state) {
+	case RPORT_ST_INIT:
+		if (fc_rp_debug)
+			FC_DBG("incoming PLOGI from %6x wwpn %llx state INIT "
+			       "- reject\n", sid, wwpn);
+		reject = ELS_RJT_UNSUP;
+		break;
+	case RPORT_ST_STARTED:
+		/*
+		 * we'll only accept a login if the port name
+		 * matches or was unknown.
+		 */
+		if (rport->port_name != -1 &&
+		    rport->port_name != wwpn) {
+			FC_DBG("incoming PLOGI from name %llx expected %llx\n",
+			       wwpn, rport->port_name);
+			reject = ELS_RJT_UNAB;
+		}
+		break;
+	case RPORT_ST_PLOGI:
+		if (fc_rp_debug)
+			FC_DBG("incoming PLOGI from %x in PLOGI state %d",
+			       sid, rp->rp_state);
+		if (wwpn < lp->wwpn)
+			reject = ELS_RJT_INPROG;
+		break;
+	case RPORT_ST_PRLI:
+	case RPORT_ST_ERROR:
+	case RPORT_ST_READY:
+		if (fc_rp_debug)
+			FC_DBG("incoming PLOGI from %x in logged-in state %d "
+			       "- ignored for now", sid, rp->rp_state);
+		/* XXX TBD - should reset */
+		break;
+	case RPORT_ST_NONE:
+	default:
+		if (fc_rp_debug)
+			FC_DBG("incoming PLOGI from %x in unexpected state %d",
+			       sid, rp->rp_state);
+		break;
+	}
+
+	if (reject) {
+		rjt_data.reason = reject;
+		rjt_data.explan = ELS_EXPL_NONE;
+		lp->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
+		fc_frame_free(fp);
+	} else {
+		fp = fc_frame_alloc(lp, sizeof(*pl));
+		if (fp == NULL) {
+			fp = rx_fp;
+			rjt_data.reason = ELS_RJT_UNAB;
+			rjt_data.explan = ELS_EXPL_NONE;
+			lp->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
+			fc_frame_free(fp);
+		} else {
+			sp = lp->tt.seq_start_next(sp);
+			WARN_ON(!sp);
+			fc_rport_set_name(rport, wwpn, wwnn);
+
+			/*
+			 * Get session payload size from incoming PLOGI.
+			 */
+			rport->maxframe_size =
+				fc_plogi_get_maxframe(pl, lp->mfs);
+			fc_frame_free(rx_fp);
+			pl = fc_frame_payload_get(fp, sizeof(*pl));
+			WARN_ON(!pl);
+			fc_lport_plogi_fill(lp, pl, ELS_LS_ACC);
+
+			/*
+			 * Send LS_ACC.	 If this fails,
+			 * the originator should retry.
+			 */
+			f_ctl = FC_FC_SEQ_INIT | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
+			fc_frame_setup(fp, FC_RCTL_ELS_REP, FC_TYPE_ELS);
+			lp->tt.seq_send(lp, sp, fp, f_ctl);
+			if (rp->rp_state == RPORT_ST_PLOGI)
+				fc_rport_enter_prli(rport);
+			else
+				fc_rport_state_enter(rport,
+						     RPORT_ST_PLOGI_RECV);
+		}
+	}
+	fc_rport_unlock(rport);
+}
+
+/*
+ * Handle incoming PRLI request.
+ */
+static void fc_rport_recv_prli_req(struct fc_rport *rport,
+				   struct fc_seq *sp, struct fc_frame *rx_fp)
+{
+	struct fc_rport_libfc_priv *rp = rport->dd_data;
+	struct fc_frame *fp;
+	struct fc_frame_header *fh;
+	struct fc_lport *lp;
+	struct {
+		struct fc_els_prli prli;
+		struct fc_els_spp spp;
+	} *pp;
+	struct fc_els_spp *rspp;	/* request service param page */
+	struct fc_els_spp *spp;	/* response spp */
+	unsigned int len;
+	unsigned int plen;
+	enum fc_els_rjt_reason reason = ELS_RJT_UNAB;
+	enum fc_els_rjt_explan explan = ELS_EXPL_NONE;
+	enum fc_els_spp_resp resp;
+	struct fc_seq_els_data rjt_data;
+	u32 f_ctl;
+	u32 fcp_parm;
+	u32 roles = FC_RPORT_ROLE_UNKNOWN;
+
+	rjt_data.fp = NULL;
+	fh = fc_frame_header_get(rx_fp);
+	lp = rp->local_port;
+	switch (rp->rp_state) {
+	case RPORT_ST_PLOGI_RECV:
+	case RPORT_ST_PRLI:
+	case RPORT_ST_READY:
+		reason = ELS_RJT_NONE;
+		break;
+	default:
+		break;
+	}
+	len = fr_len(rx_fp) - sizeof(*fh);
+	pp = fc_frame_payload_get(rx_fp, sizeof(*pp));
+	if (pp == NULL) {
+		reason = ELS_RJT_PROT;
+		explan = ELS_EXPL_INV_LEN;
+	} else {
+		plen = ntohs(pp->prli.prli_len);
+		if ((plen % 4) != 0 || plen > len) {
+			reason = ELS_RJT_PROT;
+			explan = ELS_EXPL_INV_LEN;
+		} else if (plen < len) {
+			len = plen;
+		}
+		plen = pp->prli.prli_spp_len;
+		if ((plen % 4) != 0 || plen < sizeof(*spp) ||
+		    plen > len || len < sizeof(*pp)) {
+			reason = ELS_RJT_PROT;
+			explan = ELS_EXPL_INV_LEN;
+		}
+		rspp = &pp->spp;
+	}
+	if (reason != ELS_RJT_NONE ||
+	    (fp = fc_frame_alloc(lp, len)) == NULL) {
+		rjt_data.reason = reason;
+		rjt_data.explan = explan;
+		lp->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
+	} else {
+		sp = lp->tt.seq_start_next(sp);
+		WARN_ON(!sp);
+		pp = fc_frame_payload_get(fp, len);
+		WARN_ON(!pp);
+		memset(pp, 0, len);
+		pp->prli.prli_cmd = ELS_LS_ACC;
+		pp->prli.prli_spp_len = plen;
+		pp->prli.prli_len = htons(len);
+		len -= sizeof(struct fc_els_prli);
+
+		/*
+		 * Go through all the service parameter pages and build
+		 * response.  If plen indicates longer SPP than standard,
+		 * use that.  The entire response has been pre-cleared above.
+		 */
+		spp = &pp->spp;
+		while (len >= plen) {
+			spp->spp_type = rspp->spp_type;
+			spp->spp_type_ext = rspp->spp_type_ext;
+			spp->spp_flags = rspp->spp_flags & FC_SPP_EST_IMG_PAIR;
+			resp = FC_SPP_RESP_ACK;
+			if (rspp->spp_flags & FC_SPP_RPA_VAL)
+				resp = FC_SPP_RESP_NO_PA;
+			switch (rspp->spp_type) {
+			case 0:	/* common to all FC-4 types */
+				break;
+			case FC_TYPE_FCP:
+				fcp_parm = ntohl(rspp->spp_params);
+				if (fcp_parm * FCP_SPPF_RETRY)
+					rp->flags |= FC_RP_FLAGS_RETRY;
+				rport->supported_classes = FC_COS_CLASS3;
+				if (fcp_parm & FCP_SPPF_INIT_FCN)
+					roles |= FC_RPORT_ROLE_FCP_INITIATOR;
+				if (fcp_parm & FCP_SPPF_TARG_FCN)
+					roles |= FC_RPORT_ROLE_FCP_TARGET;
+				fc_remote_port_rolechg(rport, roles);
+				spp->spp_params =
+					htonl(rp->local_port->service_params);
+				break;
+			default:
+				resp = FC_SPP_RESP_INVL;
+				break;
+			}
+			spp->spp_flags |= resp;
+			len -= plen;
+			rspp = (struct fc_els_spp *)((char *)rspp + plen);
+			spp = (struct fc_els_spp *)((char *)spp + plen);
+		}
+
+		/*
+		 * Send LS_ACC.	 If this fails, the originator should retry.
+		 */
+		f_ctl = FC_FC_SEQ_INIT | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
+		fc_frame_setup(fp, FC_RCTL_ELS_REP, FC_TYPE_ELS);
+		lp->tt.seq_send(lp, sp, fp, f_ctl);
+
+		/*
+		 * Get lock and re-check state.
+		 */
+		fc_rport_lock(rport);
+		switch (rp->rp_state) {
+		case RPORT_ST_PLOGI_RECV:
+		case RPORT_ST_PRLI:
+			fc_rport_state_enter(rport, RPORT_ST_READY);
+			if (fc_rp_debug)
+				FC_DBG("remote %6x ready", rport->port_id);
+			if (rport == lp->dns_rp &&
+			    lp->state == LPORT_ST_DNS) {
+				fc_lport_lock(lp);
+				del_timer(&lp->state_timer);
+				lp->tt.disc_enter_dns(lp);
+				fc_lport_unlock(lp);
+			}
+			break;
+		case RPORT_ST_READY:
+			break;
+		default:
+			break;
+		}
+		fc_rport_unlock(rport);
+	}
+	fc_frame_free(rx_fp);
+}
+
+/*
+ * Handle incoming PRLO request.
+ */
+static void fc_rport_recv_prlo_req(struct fc_rport *rport, struct fc_seq *sp,
+				   struct fc_frame *fp)
+{
+	struct fc_rport_libfc_priv *rp = rport->dd_data;
+	struct fc_frame_header *fh;
+	struct fc_lport *lp = rp->local_port;
+	struct fc_seq_els_data rjt_data;
+
+	fh = fc_frame_header_get(fp);
+	FC_DBG("incoming PRLO from %x state %d",
+	       ntoh24(fh->fh_s_id), rp->rp_state);
+	rjt_data.fp = NULL;
+	rjt_data.reason = ELS_RJT_UNAB;
+	rjt_data.explan = ELS_EXPL_NONE;
+	lp->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
+	fc_frame_free(fp);
+}
+
+/*
+ * Handle incoming LOGO request.
+ */
+static void fc_rport_recv_logo_req(struct fc_rport *rport, struct fc_seq *sp,
+				   struct fc_frame *fp)
+{
+	struct fc_frame_header *fh;
+	struct fc_rport_libfc_priv *rp = rport->dd_data;
+	struct fc_lport *lp = rp->local_port;
+
+	fh = fc_frame_header_get(fp);
+	fc_rport_lock(rport);
+	fc_rport_state_enter(rport, RPORT_ST_INIT);
+	fc_rport_unlock(rport);
+	if (fc_rp_debug)
+		FC_DBG("remote %6x closed", rport->port_id);
+	if (rport == lp->dns_rp &&
+	    lp->state != LPORT_ST_RESET) {
+		fc_lport_lock(lp);
+		del_timer(&lp->state_timer);
+		lp->dns_rp = NULL;
+		if (lp->state == LPORT_ST_DNS_STOP) {
+			fc_lport_unlock(lp);
+			lp->tt.lport_logout(lp);
+		} else {
+			lp->tt.lport_login(lp);
+			fc_lport_unlock(lp);
+		}
+		fc_remote_port_delete(rport);
+	}
+	lp->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
+	fc_frame_free(fp);
+}
+
+int fc_rport_init(struct fc_lport *lp)
+{
+	if (!lp->tt.rport_login)
+		lp->tt.rport_login = fc_rport_login;
+
+	if (!lp->tt.rport_logout)
+		lp->tt.rport_logout = fc_rport_logout;
+
+	if (!lp->tt.rport_recv_req)
+		lp->tt.rport_recv_req = fc_rport_recv_req;
+
+	if (!lp->tt.rport_create)
+		lp->tt.rport_create = fc_remote_port_create;
+
+	if (!lp->tt.rport_lookup)
+		lp->tt.rport_lookup = fc_rport_lookup;
+
+	if (!lp->tt.rport_reset)
+		lp->tt.rport_reset = fc_rport_reset;
+
+	if (!lp->tt.rport_reset_list)
+		lp->tt.rport_reset_list = fc_rport_reset_list;
+
+	return 0;
+}
+EXPORT_SYMBOL(fc_rport_init);
diff --git a/include/scsi/libfc/fc_frame.h b/include/scsi/libfc/fc_frame.h
new file mode 100644
index 0000000..c7a52bb
--- /dev/null
+++ b/include/scsi/libfc/fc_frame.h
@@ -0,0 +1,236 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#ifndef _FC_FRAME_H_
+#define _FC_FRAME_H_
+
+#include <linux/scatterlist.h>
+#include <linux/skbuff.h>
+
+#include <scsi/fc/fc_fs.h>
+#include <scsi/fc/fc_encaps.h>
+
+/*
+ * The fc_frame interface is used to pass frame data between functions.
+ * The frame includes the data buffer, length, and SOF / EOF delimiter types.
+ * A pointer to the port structure of the receiving port is also includeded.
+ */
+
+#define	FC_FRAME_HEADROOM	32	/* headroom for VLAN + FCoE headers */
+#define	FC_FRAME_TAILROOM	8	/* trailer space for FCoE */
+
+/*
+ * Information about an individual fibre channel frame received or to be sent.
+ * The buffer may be in up to 4 additional non-contiguous sections,
+ * but the linear section must hold the frame header.
+ */
+#define FC_FRAME_SG_LEN		4	/* scatter/gather list maximum length */
+
+#define fp_skb(fp)	(&((fp)->skb))
+#define fr_hdr(fp)	((fp)->skb.data)
+#define fr_len(fp)	((fp)->skb.len)
+#define fr_cb(fp)	((struct fcoe_rcv_info *)&((fp)->skb.cb[0]))
+#define fr_dev(fp)	(fr_cb(fp)->fr_dev)
+#define fr_seq(fp)	(fr_cb(fp)->fr_seq)
+#define fr_sof(fp)	(fr_cb(fp)->fr_sof)
+#define fr_eof(fp)	(fr_cb(fp)->fr_eof)
+#define fr_flags(fp)	(fr_cb(fp)->fr_flags)
+
+struct fc_frame {
+	struct sk_buff skb;
+};
+
+struct fcoe_rcv_info {
+	struct packet_type  *ptype;
+	struct fc_lport	*fr_dev;	/* transport layer private pointer */
+	struct fc_seq	*fr_seq;	/* for use with exchange manager */
+	enum fc_sof	fr_sof;		/* start of frame delimiter */
+	enum fc_eof	fr_eof;		/* end of frame delimiter */
+	u8		fr_flags;	/* flags - see below */
+};
+
+/*
+ * Get fc_frame pointer for an skb that's already been imported.
+ */
+static inline struct fcoe_rcv_info *fcoe_dev_from_skb(const struct sk_buff *skb)
+{
+	BUILD_BUG_ON(sizeof(struct fcoe_rcv_info) > sizeof(skb->cb));
+	return (struct fcoe_rcv_info *) skb->cb;
+}
+
+/*
+ * fr_flags.
+ */
+#define	FCPHF_CRC_UNCHECKED	0x01	/* CRC not computed, still appended */
+
+/*
+ * Initialize a frame.
+ * We don't do a complete memset here for performance reasons.
+ * The caller must set fr_free, fr_hdr, fr_len, fr_sof, and fr_eof eventually.
+ */
+static inline void fc_frame_init(struct fc_frame *fp)
+{
+	fr_dev(fp) = NULL;
+	fr_seq(fp) = NULL;
+	fr_flags(fp) = 0;
+}
+
+struct fc_frame *fc_frame_alloc_fill(struct fc_lport *, size_t payload_len);
+
+struct fc_frame *__fc_frame_alloc(size_t payload_len);
+
+/*
+ * Get frame for sending via port.
+ */
+static inline struct fc_frame *_fc_frame_alloc(struct fc_lport *dev,
+					       size_t payload_len)
+{
+	return __fc_frame_alloc(payload_len);
+}
+
+/*
+ * Allocate fc_frame structure and buffer.  Set the initial length to
+ * payload_size + sizeof (struct fc_frame_header).
+ */
+static inline struct fc_frame *fc_frame_alloc(struct fc_lport *dev, size_t len)
+{
+	struct fc_frame *fp;
+
+	/*
+	 * Note: Since len will often be a constant multiple of 4,
+	 * this check will usually be evaluated and eliminated at compile time.
+	 */
+	if ((len % 4) != 0)
+		fp = fc_frame_alloc_fill(dev, len);
+	else
+		fp = _fc_frame_alloc(dev, len);
+	return fp;
+}
+
+/*
+ * Free the fc_frame structure and buffer.
+ */
+static inline void fc_frame_free(struct fc_frame *fp)
+{
+	kfree_skb(fp_skb(fp));
+}
+
+static inline int fc_frame_is_linear(struct fc_frame *fp)
+{
+	return !skb_is_nonlinear(fp_skb(fp));
+}
+
+/*
+ * Get frame header from message in fc_frame structure.
+ * This hides a cast and provides a place to add some checking.
+ */
+static inline
+struct fc_frame_header *fc_frame_header_get(const struct fc_frame *fp)
+{
+	WARN_ON(fr_len(fp) < sizeof(struct fc_frame_header));
+	return (struct fc_frame_header *) fr_hdr(fp);
+}
+
+/*
+ * Get frame payload from message in fc_frame structure.
+ * This hides a cast and provides a place to add some checking.
+ * The len parameter is the minimum length for the payload portion.
+ * Returns NULL if the frame is too short.
+ *
+ * This assumes the interesting part of the payload is in the first part
+ * of the buffer for received data.  This may not be appropriate to use for
+ * buffers being transmitted.
+ */
+static inline void *fc_frame_payload_get(const struct fc_frame *fp,
+					 size_t len)
+{
+	void *pp = NULL;
+
+	if (fr_len(fp) >= sizeof(struct fc_frame_header) + len)
+		pp = fc_frame_header_get(fp) + 1;
+	return pp;
+}
+
+/*
+ * Get frame payload opcode (first byte) from message in fc_frame structure.
+ * This hides a cast and provides a place to add some checking. Return 0
+ * if the frame has no payload.
+ */
+static inline u8 fc_frame_payload_op(const struct fc_frame *fp)
+{
+	u8 *cp;
+
+	cp = fc_frame_payload_get(fp, sizeof(u8));
+	if (!cp)
+		return 0;
+	return *cp;
+
+}
+
+/*
+ * Get FC class from frame.
+ */
+static inline enum fc_class fc_frame_class(const struct fc_frame *fp)
+{
+	return fc_sof_class(fr_sof(fp));
+}
+
+/*
+ * Set r_ctl and type in preparation for sending frame.
+ * This also clears fh_parm_offset.
+ */
+static inline void fc_frame_setup(struct fc_frame *fp, enum fc_rctl r_ctl,
+				  enum fc_fh_type type)
+{
+	struct fc_frame_header *fh;
+
+	fh = fc_frame_header_get(fp);
+	WARN_ON(r_ctl == 0);
+	fh->fh_r_ctl = r_ctl;
+	fh->fh_type = type;
+	fh->fh_parm_offset = htonl(0);
+}
+
+/*
+ * Set offset in preparation for sending frame.
+ */
+static inline void
+fc_frame_set_offset(struct fc_frame *fp, u32 offset)
+{
+	struct fc_frame_header *fh;
+
+	fh = fc_frame_header_get(fp);
+	fh->fh_parm_offset = htonl(offset);
+}
+
+/*
+ * Check the CRC in a frame.
+ * The CRC immediately follows the last data item *AFTER* the length.
+ * The return value is zero if the CRC matches.
+ */
+u32 fc_frame_crc_check(struct fc_frame *);
+
+/*
+ * Check for leaks.
+ * Print the frame header of any currently allocated frame, assuming there
+ * should be none at this point.
+ */
+void fc_frame_leak_check(void);
+
+#endif /* _FC_FRAME_H_ */
diff --git a/include/scsi/libfc/libfc.h b/include/scsi/libfc/libfc.h
new file mode 100644
index 0000000..d3a2569
--- /dev/null
+++ b/include/scsi/libfc/libfc.h
@@ -0,0 +1,737 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#ifndef _LIBFC_H_
+#define _LIBFC_H_
+
+#include <linux/timer.h>
+#include <linux/if.h>
+
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include <scsi/fc/fc_fcp.h>
+#include <scsi/fc/fc_ns.h>
+#include <scsi/fc/fc_els.h>
+
+#include <scsi/libfc/fc_frame.h>
+
+#define LIBFC_DEBUG
+
+#ifdef LIBFC_DEBUG
+/*
+ * Log message.
+ */
+#define FC_DBG(fmt, args...)						\
+	do {								\
+		printk(KERN_INFO "%s " fmt, __func__, ##args);	\
+	} while (0)
+#else
+#define FC_DBG(fmt, args...)
+#endif
+
+/*
+ * libfc error codes
+ */
+#define	FC_NO_ERR	0	/* no error */
+#define	FC_EX_TIMEOUT	1	/* Exchange timeout */
+#define	FC_EX_CLOSED	2	/* Exchange closed */
+
+/* some helpful macros */
+
+#define ntohll(x) be64_to_cpu(x)
+#define htonll(x) cpu_to_be64(x)
+
+#define ntoh24(p)	(((p)[0] << 16) | ((p)[1] << 8) | ((p)[2]))
+
+#define hton24(p, v)	do { \
+	p[0] = (((v) >> 16) & 0xFF); \
+	p[1] = (((v) >> 8) & 0xFF); \
+	p[2] = ((v) & 0xFF); \
+} while (0)
+
+struct fc_exch_mgr;
+
+/*
+ * tgt_flags
+ */
+#define FC_TGT_REC_SUPPORTED	    (1 << 0)
+
+/*
+ * FC HBA status
+ */
+#define FC_PAUSE		    (1 << 1)
+#define FC_LINK_UP		    (1 << 0)
+
+/* for fc_softc */
+#define FC_MAX_OUTSTANDING_COMMANDS 1024
+
+/*
+ * Transport Capabilities
+ */
+#define TRANS_C_SG		    (1 << 0)  /* Scatter gather */
+
+enum fc_lport_state {
+	LPORT_ST_NONE = 0,
+	LPORT_ST_FLOGI,
+	LPORT_ST_DNS,
+	LPORT_ST_REG_PN,
+	LPORT_ST_REG_FT,
+	LPORT_ST_SCR,
+	LPORT_ST_READY,
+	LPORT_ST_DNS_STOP,
+	LPORT_ST_LOGO,
+	LPORT_ST_RESET
+};
+
+enum fc_rport_state {
+	RPORT_ST_NONE = 0,
+	RPORT_ST_INIT,		/* initialized */
+	RPORT_ST_STARTED,	/* started */
+	RPORT_ST_PLOGI,		/* waiting for PLOGI completion */
+	RPORT_ST_PLOGI_RECV,	/* received PLOGI (as target) */
+	RPORT_ST_PRLI,		/* waiting for PRLI completion */
+	RPORT_ST_RTV,		/* waiting for RTV completion */
+	RPORT_ST_ERROR,		/* error */
+	RPORT_ST_READY,		/* ready for use */
+	RPORT_ST_LOGO,		/* port logout sent */
+};
+
+/*
+ * Fibre Channel information about remote N port.
+ */
+struct fc_rport_libfc_priv {
+	struct fc_lport		*local_port;
+	enum fc_rport_state rp_state;
+	u16			flags;
+	#define FC_RP_FLAGS_REC_SUPPORTED	(1 << 0)
+	#define FC_RP_FLAGS_RETRY		(1 << 1)
+	u16		max_seq;	/* max concurrent sequences */
+	unsigned int	retries;	/* retry count in current state */
+	unsigned int	e_d_tov;	/* negotiated e_d_tov (msec) */
+	unsigned int	r_a_tov;	/* received r_a_tov (msec) */
+	spinlock_t	rp_lock;	/* lock on state changes */
+	struct delayed_work	retry_work;
+};
+
+static inline void fc_rport_set_name(struct fc_rport *rport, u64 wwpn, u64 wwnn)
+{
+	rport->node_name = wwnn;
+	rport->port_name = wwpn;
+}
+
+/*
+ * fcoe stats structure
+ */
+struct fcoe_dev_stats {
+	u64		SecondsSinceLastReset;
+	u64		TxFrames;
+	u64		TxWords;
+	u64		RxFrames;
+	u64		RxWords;
+	u64		ErrorFrames;
+	u64		DumpedFrames;
+	u64		LinkFailureCount;
+	u64		LossOfSignalCount;
+	u64		InvalidTxWordCount;
+	u64		InvalidCRCCount;
+	u64		InputRequests;
+	u64		OutputRequests;
+	u64		ControlRequests;
+	u64		InputMegabytes;
+	u64		OutputMegabytes;
+};
+
+/*
+ * els data is used for passing ELS respone specific
+ * data to send ELS response mainly using infomation
+ * in exchange and sequence in EM layer.
+ */
+struct fc_seq_els_data {
+	struct fc_frame *fp;
+	enum fc_els_rjt_reason reason;
+	enum fc_els_rjt_explan explan;
+};
+
+struct libfc_function_template {
+
+	/**
+	 * Mandatory Fields
+	 *
+	 * These handlers must be implemented by the LLD.
+	 */
+
+	/*
+	 * Interface to send a FC frame
+	 */
+	int (*frame_send)(struct fc_lport *lp, struct fc_frame *fp);
+
+	/**
+	 * Optional Fields
+	 *
+	 * The LLD may choose to implement any of the following handlers.
+	 * If LLD doesn't specify hander and leaves its pointer NULL then
+	 * the default libfc function will be used for that handler.
+	 */
+
+	/**
+	 * Exhance Manager interfaces
+	 */
+
+	/*
+	 * Send the FC frame payload using a new exchange and sequence.
+	 *
+	 * The frame pointer with some of the header's fields must be
+	 * filled before calling exch_seq_send(), those fields are,
+	 *
+	 * - routing control
+	 * - FC header type
+	 * - parameter or relative offset
+	 *
+	 * The exchange response handler is set in this routine to resp()
+	 * function pointer. It can be called in two scenarios: if a timeout
+	 * occurs or if a response frame is received for the exchange. The
+	 * fc_frame pointer in response handler will also indicate timeout
+	 * as error using IS_ERR related macros.
+	 *
+	 * The response handler argumemt resp_arg is passed back to resp
+	 * handler when it is invoked by EM layer in above mentioned
+	 * two scenarios.
+	 *
+	 * The timeout value (in msec) for an exchange is set if non zero
+	 * timer_msec argument is specified. The timer is canceled when
+	 * it fires or when the exchange is done. The exchange timeout handler
+	 * is registered by EM layer.
+	 *
+	 * The caller also need to specify FC sid, did and frame control field.
+	 */
+	struct fc_seq *(*exch_seq_send)(struct fc_lport *lp,
+					struct fc_frame *fp,
+					void (*resp)(struct fc_seq *,
+						     struct fc_frame *fp,
+						     void *arg),
+					void *resp_arg,	unsigned int timer_msec,
+					u32 sid, u32 did, u32 f_ctl);
+
+	/*
+	 * send a frame using existing sequence and exchange.
+	 */
+	int (*seq_send)(struct fc_lport *lp, struct fc_seq *sp,
+			struct fc_frame *fp, u32 f_ctl);
+
+	/*
+	 * Send ELS response using mainly infomation
+	 * in exchange and sequence in EM layer.
+	 */
+	void (*seq_els_rsp_send)(struct fc_seq *sp, enum fc_els_cmd els_cmd,
+				 struct fc_seq_els_data *els_data);
+
+	/*
+	 * Abort an exchange and sequence. Generally called because of a
+	 * timeout or an abort from the upper layer.
+	 */
+	int (*seq_exch_abort)(const struct fc_seq *req_sp);
+
+	/*
+	 * Indicate that an exchange/sequence tuple is complete and the memory
+	 * allocated for the related objects may be freed.
+	 */
+	void (*exch_done)(struct fc_seq *sp);
+
+	/*
+	 * Assigns a EM and a free XID for an new exchange and then
+	 * allocates a new exchange and sequence pair.
+	 * The fp can be used to determine free XID.
+	 */
+	struct fc_exch *(*exch_get)(struct fc_lport *lp, struct fc_frame *fp);
+
+	/*
+	 * Release previously assigned XID by exch_get API.
+	 * The LLD may implement this if XID is assigned by LLD
+	 * in exch_get().
+	 */
+	void (*exch_put)(struct fc_lport *lp, struct fc_exch_mgr *mp,
+			 u16 ex_id);
+
+	/*
+	 * Start a new sequence on the same exchange/sequence tuple.
+	 */
+	struct fc_seq *(*seq_start_next)(struct fc_seq *sp);
+
+	/*
+	 * Reset an exchange manager, completing all sequences and exchanges.
+	 * If s_id is non-zero, reset only exchanges originating from that FID.
+	 * If d_id is non-zero, reset only exchanges sending to that FID.
+	 */
+	void (*exch_mgr_reset)(struct fc_exch_mgr *,
+			       u32 s_id, u32 d_id);
+
+	/*
+	 * Get exchange Ids of a sequence
+	 */
+	void (*seq_get_xids)(struct fc_seq *sp, u16 *oxid, u16 *rxid);
+
+	/*
+	 * Set REC data to a sequence
+	 */
+	void (*seq_set_rec_data)(struct fc_seq *sp, u32 rec_data);
+
+	/**
+	 * Local Port interfaces
+	 */
+
+	/*
+	 * Receive a frame to a local port.
+	 */
+	void (*lport_recv)(struct fc_lport *lp, struct fc_seq *sp,
+			   struct fc_frame *fp);
+
+	int (*lport_login)(struct fc_lport *);
+	int (*lport_reset)(struct fc_lport *);
+	int (*lport_logout)(struct fc_lport *);
+
+	/**
+	 * Remote Port interfaces
+	 */
+
+	/*
+	 * Initiates the RP state machine. It is called from the LP module.
+	 * This function will issue the following commands to the N_Port
+	 * identified by the FC ID provided.
+	 *
+	 * - PLOGI
+	 * - PRLI
+	 * - RTV
+	 */
+	int (*rport_login)(struct fc_rport *rport);
+
+	/*
+	 * Logs the specified local port out of a N_Port identified
+	 * by the ID provided.
+	 */
+	int (*rport_logout)(struct fc_rport *rport);
+
+	void (*rport_recv_req)(struct fc_seq *, struct fc_frame *, void *);
+
+	struct fc_rport *(*rport_lookup)(const struct fc_lport *, u32);
+
+	struct fc_rport *(*rport_create)(struct fc_lport *,
+					 struct fc_rport_identifiers *);
+
+	void (*rport_reset)(struct fc_rport *);
+
+	void (*rport_reset_list)(struct fc_lport *);
+
+	/**
+	 * SCSI interfaces
+	 */
+
+	/*
+	 * Used at least durring linkdown and reset
+	 */
+	void (*scsi_cleanup)(struct fc_lport *);
+
+	/*
+	 * Abort all I/O on a local port
+	 */
+	void (*scsi_abort_io)(struct fc_lport *);
+
+	/**
+	 * Discovery interfaces
+	 */
+
+	void (*disc_recv_req)(struct fc_seq *,
+			      struct fc_frame *, struct fc_lport *);
+
+	/*
+	 * Start discovery for a local port.
+	 */
+	int (*disc_start)(struct fc_lport *);
+
+	void (*disc_enter_dns)(struct fc_lport *);
+	void (*disc_stop)(struct fc_lport *);
+};
+
+struct fc_lport {
+	struct list_head list;
+
+	/* Associations */
+	struct Scsi_Host	*host;
+	struct fc_exch_mgr	*emp;
+	struct fc_rport		*dns_rp;
+	struct fc_rport		*ptp_rp;
+	void			*scsi_priv;
+
+	/* Operational Information */
+	struct libfc_function_template tt;
+	u16			link_status;
+	u8			ns_disc_done;
+	enum fc_lport_state	state;
+	unsigned long		boot_time;
+
+	struct fc_host_statistics host_stats;
+	struct fcoe_dev_stats	*dev_stats[NR_CPUS];
+
+	u64			wwpn;
+	u64			wwnn;
+	u32			fid;
+	u8			retry_count;
+	unsigned char		ns_disc_retry_count;
+	unsigned char		ns_disc_delay;
+	unsigned char		ns_disc_pending;
+	unsigned char		ns_disc_requested;
+	unsigned short		ns_disc_seq_count;
+	unsigned char		ns_disc_buf_len;
+
+	/* Capabilities */
+	char			ifname[IFNAMSIZ];
+	u32			capabilities;
+	u32			mfs;	/* max FC payload size */
+	unsigned int		service_params;
+	unsigned int		e_d_tov;
+	unsigned int		r_a_tov;
+	u8			max_retry_count;
+	u16			link_speed;
+	u16			link_supported_speeds;
+	struct fc_ns_fts	fcts;	        /* FC-4 type masks */
+	struct fc_els_rnid_gen	rnid_gen;	/* RNID information */
+
+	/* Locks */
+	spinlock_t		state_lock;	/* serializes state changes */
+
+	/* Miscellaneous */
+	struct fc_gpn_ft_resp	ns_disc_buf;	/* partial name buffer */
+	struct timer_list	state_timer;	/* timer for state events */
+	struct delayed_work	ns_disc_work;
+
+	void			*drv_priv;
+};
+
+/**
+ * FC_LPORT HELPER FUNCTIONS
+ *****************************/
+
+static inline int fc_lport_test_ready(struct fc_lport *lp)
+{
+	return lp->state == LPORT_ST_READY;
+}
+
+static inline u32 fc_lport_get_fid(const struct fc_lport *lp)
+{
+	return lp->fid;
+}
+
+static inline void fc_set_wwnn(struct fc_lport *lp, u64 wwnn)
+{
+	lp->wwnn = wwnn;
+}
+
+static inline void fc_set_wwpn(struct fc_lport *lp, u64 wwnn)
+{
+	lp->wwpn = wwnn;
+}
+
+static inline int fc_lport_locked(struct fc_lport *lp)
+{
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+	return spin_is_locked(&lp->state_lock);
+#else
+	return 1;
+#endif /* CONFIG_SMP || CONFIG_DEBUG_SPINLOCK */
+}
+
+/*
+ * Locking code.
+ */
+static inline void fc_lport_lock(struct fc_lport *lp)
+{
+	spin_lock_bh(&lp->state_lock);
+}
+
+static inline void fc_lport_unlock(struct fc_lport *lp)
+{
+	spin_unlock_bh(&lp->state_lock);
+}
+
+static inline void fc_lport_state_enter(struct fc_lport *lp,
+					enum fc_lport_state state)
+{
+	WARN_ON(!fc_lport_locked(lp));
+	del_timer(&lp->state_timer);
+	if (state != lp->state)
+		lp->retry_count = 0;
+	lp->state = state;
+}
+
+
+/**
+ * LOCAL PORT LAYER
+ *****************************/
+int fc_lport_init(struct fc_lport *lp);
+
+/*
+ * Destroy the specified local port by finding and freeing all
+ * fc_rports associated with it and then by freeing the fc_lport
+ * itself.
+ */
+int fc_lport_destroy(struct fc_lport *lp);
+
+/*
+ * Logout the specified local port from the fabric
+ */
+int fc_fabric_logoff(struct fc_lport *lp);
+
+/*
+ * Initiate the LP state machine. This handler will use fc_host_attr
+ * to store the FLOGI service parameters, so fc_host_attr must be
+ * initialized before calling this handler.
+ */
+int fc_fabric_login(struct fc_lport *lp);
+
+/*
+ * The link is up for the given local port.
+ */
+void fc_linkup(struct fc_lport *);
+
+/*
+ * Link is down for the given local port.
+ */
+void fc_linkdown(struct fc_lport *);
+
+/*
+ * Pause and unpause traffic.
+ */
+void fc_pause(struct fc_lport *);
+void fc_unpause(struct fc_lport *);
+
+/*
+ * Configure the local port.
+ */
+int fc_lport_config(struct fc_lport *);
+
+/*
+ * Reset the local port.
+ */
+int fc_lport_enter_reset(struct fc_lport *);
+
+/*
+ * Set the mfs or reset
+ */
+int fc_set_mfs(struct fc_lport *lp, u32 mfs);
+
+
+/**
+ * REMOTE PORT LAYER
+ *****************************/
+int fc_rport_init(struct fc_lport *lp);
+
+
+/**
+ * DISCOVERY LAYER
+ *****************************/
+int fc_ns_init(struct fc_lport *lp);
+
+
+/**
+ * SCSI LAYER
+ *****************************/
+/*
+ * Initialize the SCSI block of libfc
+ */
+int fc_fcp_init(struct fc_lport *);
+
+/*
+ * This section provides an API which allows direct interaction
+ * with the SCSI-ml. Each of these functions satisfies a function
+ * pointer defined in Scsi_Host and therefore is always called
+ * directly from the SCSI-ml.
+ */
+int fc_queuecommand(struct scsi_cmnd *sc_cmd,
+		    void (*done)(struct scsi_cmnd *));
+
+/*
+ * Send an ABTS frame to the target device. The sc_cmd argument
+ * is a pointer to the SCSI command to be aborted.
+ */
+int fc_eh_abort(struct scsi_cmnd *sc_cmd);
+
+/*
+ * Reset a LUN by sending send the tm cmd to the target.
+ */
+int fc_eh_device_reset(struct scsi_cmnd *sc_cmd);
+
+/*
+ * Reset the host adapter.
+ */
+int fc_eh_host_reset(struct scsi_cmnd *sc_cmd);
+
+/*
+ * Check rport status.
+ */
+int fc_slave_alloc(struct scsi_device *sdev);
+
+/*
+ * Adjust the queue depth.
+ */
+int fc_change_queue_depth(struct scsi_device *sdev, int qdepth);
+
+/*
+ * Change the tag type.
+ */
+int fc_change_queue_type(struct scsi_device *sdev, int tag_type);
+
+/*
+ * Free memory pools used by the FCP layer.
+ */
+void fc_fcp_destroy(struct fc_lport *);
+
+
+/**
+ * EXCHANGE MANAGER LAYER
+ *****************************/
+/*
+ * Initializes Exchange Manager related
+ * function pointers in struct libfc_function_template.
+ */
+int fc_exch_init(struct fc_lport *lp);
+
+/*
+ * Allocates an Exchange Manager (EM).
+ *
+ * The EM manages exchanges for their allocation and
+ * free, also allows exchange lookup for received
+ * frame.
+ *
+ * The class is used for initializing FC class of
+ * allocated exchange from EM.
+ *
+ * The min_xid and max_xid will limit new
+ * exchange ID (XID) within this range for
+ * a new exchange.
+ * The LLD may choose to have multiple EMs,
+ * e.g. one EM instance per CPU receive thread in LLD.
+ * The LLD can use exch_get() of struct libfc_function_template
+ * to specify XID for a new exchange within
+ * a specified EM instance.
+ *
+ * The em_idx to uniquely identify an EM instance.
+ */
+struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp,
+				      enum fc_class class,
+				      u16 min_xid,
+				      u16 max_xid,
+				      u32 em_idx);
+
+/*
+ * Free an exchange manager.
+ */
+void fc_exch_mgr_free(struct fc_exch_mgr *mp);
+
+/*
+ * Receive a frame on specified local port and exchange manager.
+ */
+void fc_exch_recv(struct fc_lport *lp, struct fc_exch_mgr *mp,
+		  struct fc_frame *fp);
+
+/*
+ * This function is for exch_seq_send function pointer in
+ * struct libfc_function_template, see comment block on
+ * exch_seq_send for description of this function.
+ */
+struct fc_seq *fc_exch_seq_send(struct fc_lport *lp,
+				struct fc_frame *fp,
+				void (*resp)(struct fc_seq *,
+					     struct fc_frame *fp,
+					     void *arg),
+				void *resp_arg, u32 timer_msec,
+				u32 sid, u32 did, u32 f_ctl);
+
+/*
+ * send a frame using existing sequence and exchange.
+ */
+int fc_seq_send(struct fc_lport *lp, struct fc_seq *sp,
+		struct fc_frame *fp, u32 f_ctl);
+
+/*
+ * Send ELS response using mainly infomation
+ * in exchange and sequence in EM layer.
+ */
+void fc_seq_els_rsp_send(struct fc_seq *sp, enum fc_els_cmd els_cmd,
+			 struct fc_seq_els_data *els_data);
+
+
+/*
+ * Abort the exchange used by the given sequence.
+ */
+int fc_seq_exch_abort(const struct fc_seq *req_sp);
+
+/*
+ * Indicate that an exchange/sequence tuple is complete and the memory
+ * allocated for the related objects may be freed.
+ */
+void fc_exch_done(struct fc_seq *sp);
+
+/*
+ * Assigns a EM and XID for a frame and then allocates
+ * a new exchange and sequence pair.
+ * The fp can be used to determine free XID.
+ */
+struct fc_exch *fc_exch_get(struct fc_lport *lp, struct fc_frame *fp);
+
+/*
+ * Allocate a new exchange and sequence pair.
+ * if ex_id is zero then next free exchange id
+ * from specified exchange manger mp will be assigned.
+ */
+struct fc_exch *fc_exch_alloc(struct fc_exch_mgr *mp, u16 ex_id);
+
+/*
+ * Start a new sequence on the same exchange as the supplied sequence.
+ */
+struct fc_seq *fc_seq_start_next(struct fc_seq *sp);
+
+/*
+ * Reset an exchange manager, completing all sequences and exchanges.
+ * If s_id is non-zero, reset only exchanges originating from that FID.
+ * If d_id is non-zero, reset only exchanges sending to that FID.
+ */
+void fc_exch_mgr_reset(struct fc_exch_mgr *, u32 s_id, u32 d_id);
+
+/*
+ * Get exchange Ids of a sequence
+ */
+void fc_seq_get_xids(struct fc_seq *sp, u16 *oxid, u16 *rxid);
+
+/*
+ * Set REC data to a sequence
+ */
+void fc_seq_set_rec_data(struct fc_seq *sp, u32 rec_data);
+
+/**
+ * fc_functions_template
+ *****************************/
+void fc_attr_init(struct fc_lport *);
+void fc_get_host_port_id(struct Scsi_Host *shost);
+void fc_get_host_speed(struct Scsi_Host *shost);
+void fc_get_host_port_type(struct Scsi_Host *shost);
+void fc_get_host_fabric_name(struct Scsi_Host *shost);
+void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout);
+struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *);
+
+#endif /* _LIBFC_H_ */

--
To unsubscribe from this list: send the line "unsubscribe linux-scsi" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Index of Archives]     [SCSI Target Devel]     [Linux SCSI Target Infrastructure]     [Kernel Newbies]     [IDE]     [Security]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux ATA RAID]     [Linux IIO]     [Samba]     [Device Mapper]
  Powered by Linux