[PATCH] staging: unisys: Add s-Par visorhba

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: David Kershner <david.kershner@xxxxxxxxxx>

This driver create a host bus adapter device when s-Par sends a
device create message to create a storage adapter on the visorbus.
When the message is received by visorbus, the visorhba_probe function
is called and the hba device is created and managed by the visorhba
driver.

Signed-off-by: Erik Arfvidson <erik.arfvidson@xxxxxxxxxx>
Signed-off-by: Benjamin Romer <benjamin.romer@xxxxxxxxxx>
Signed-off-by: David Kershner <david.kershner@xxxxxxxxxx>
---
 drivers/staging/unisys/Kconfig                  |    1 +
 drivers/staging/unisys/Makefile                 |    1 +
 drivers/staging/unisys/include/iochannel.h      |    4 +
 drivers/staging/unisys/visorhba/Kconfig         |   14 +
 drivers/staging/unisys/visorhba/Makefile        |   10 +
 drivers/staging/unisys/visorhba/visorhba_main.c | 1372 +++++++++++++++++++++++
 6 files changed, 1402 insertions(+)
 create mode 100644 drivers/staging/unisys/visorhba/Kconfig
 create mode 100644 drivers/staging/unisys/visorhba/Makefile
 create mode 100644 drivers/staging/unisys/visorhba/visorhba_main.c

diff --git a/drivers/staging/unisys/Kconfig b/drivers/staging/unisys/Kconfig
index ca850a8..5cbb791 100644
--- a/drivers/staging/unisys/Kconfig
+++ b/drivers/staging/unisys/Kconfig
@@ -14,5 +14,6 @@ if UNISYSSPAR
 source "drivers/staging/unisys/visorbus/Kconfig"
 source "drivers/staging/unisys/visornic/Kconfig"
 source "drivers/staging/unisys/visorhid/Kconfig"
+source "drivers/staging/unisys/visorhba/Kconfig"
 
 endif # UNISYSSPAR
diff --git a/drivers/staging/unisys/Makefile b/drivers/staging/unisys/Makefile
index d071094..79c9036 100644
--- a/drivers/staging/unisys/Makefile
+++ b/drivers/staging/unisys/Makefile
@@ -4,3 +4,4 @@
 obj-$(CONFIG_UNISYS_VISORBUS)		+= visorbus/
 obj-$(CONFIG_UNISYS_VISORNIC)		+= visornic/
 obj-$(CONFIG_UNISYS_VISORHID)          += visorhid/
+obj-$(CONFIG_UNISYS_VISORHBA)		+= visorhba/
diff --git a/drivers/staging/unisys/include/iochannel.h b/drivers/staging/unisys/include/iochannel.h
index a559812..1cca0fb 100644
--- a/drivers/staging/unisys/include/iochannel.h
+++ b/drivers/staging/unisys/include/iochannel.h
@@ -147,6 +147,10 @@ struct phys_info {
 	u16 pi_len;
 } __packed;
 
+#define MIN_NUMSIGNALS 64
+
+/* structs with pragma pack  */
+
 struct guest_phys_info {
 	u64 address;
 	u64 length;
diff --git a/drivers/staging/unisys/visorhba/Kconfig b/drivers/staging/unisys/visorhba/Kconfig
new file mode 100644
index 0000000..241d803
--- /dev/null
+++ b/drivers/staging/unisys/visorhba/Kconfig
@@ -0,0 +1,14 @@
+#
+# Unisys visorhba configuration
+#
+
+config UNISYS_VISORHBA
+	tristate "Unisys visorhba driver"
+	depends on UNISYSSPAR && UNISYS_VISORBUS && SCSI
+	---help---
+	The Unisys visorhba driver provides support for s-Par HBA
+	devices exposed on the s-Par visorbus. When a message is sent
+	to visorbus to create a HBA device, the probe function of
+	visorhba is called to create the scsi device.
+	If you say Y here, you will enable the Unisys visorhba driver.
+
diff --git a/drivers/staging/unisys/visorhba/Makefile b/drivers/staging/unisys/visorhba/Makefile
new file mode 100644
index 0000000..a8a8e0e
--- /dev/null
+++ b/drivers/staging/unisys/visorhba/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for Unisys channel
+#
+
+obj-$(CONFIG_UNISYS_VISORHBA)	+= visorhba.o
+
+visorhba-y := visorhba_main.o
+
+ccflags-y += -Idrivers/staging/unisys/include
+
diff --git a/drivers/staging/unisys/visorhba/visorhba_main.c b/drivers/staging/unisys/visorhba/visorhba_main.c
new file mode 100644
index 0000000..04fe110
--- /dev/null
+++ b/drivers/staging/unisys/visorhba/visorhba_main.c
@@ -0,0 +1,1372 @@
+/* Copyright (c) 2012 - 2015 UNISYS CORPORATION
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/skbuff.h>
+#include <linux/kthread.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+
+#include "visorbus.h"
+#include "iochannel.h"
+
+#define VERSION "1.0.0.0"
+
+/* The Send and Receive Buffers of the IO Queue may both be full */
+#define MAX_PENDING_REQUESTS (MIN_NUMSIGNALS * 2)
+
+#define VISORHBA_ERROR_COUNT 30
+#define IOS_ERROR_THRESHOLD 1000
+
+#define MAXDEVICES 1024
+#define VISORHBAOPENMAX 1
+
+/* MAX_BUF = 6 lines x 10 MAXVHBA x 80 characters
+ *         = 4800 bytes ~ 2^13 = 8192 bytes
+ */
+#define MAX_BUF 8192
+#define MAX_NAME_LENGTH 99
+
+static LIST_HEAD(list_all_devices);
+static DEFINE_SPINLOCK(lock_all_devices);
+static spinlock_t dev_num_pool_lock;	/* Lock to modify dev_num_pool */
+static void *dev_num_pool;		/* pool to grab device numbers from */
+
+static int visorhba_queue_command_lck(struct scsi_cmnd *scsicmd,
+				      void (*visorhba_cmnd_done)
+					    (struct scsi_cmnd *));
+#ifdef DEF_SCSI_QCMD
+static DEF_SCSI_QCMD(visorhba_queue_command)
+#else
+#define visorhba_queue_command visorhba_queue_command_lck
+#endif
+static int visorhba_probe(struct visor_device *dev);
+static void visorhba_remove(struct visor_device *dev);
+static int visorhba_pause(struct visor_device *dev,
+			  visorbus_state_complete_func complete_func);
+static int visorhba_resume(struct visor_device *dev,
+			   visorbus_state_complete_func complete_func);
+
+static ssize_t info_debugfs_read(struct file *file, char __user *buf,
+				 size_t len, loff_t *offset);
+static struct dentry *visorhba_debugfs_dir;
+static const struct file_operations debugfs_info_fops = {
+	.read = info_debugfs_read,
+};
+
+/* GUIDS for HBA channel type supported by this driver */
+static struct visor_channeltype_descriptor visorhba_channel_types[] = {
+	/* Note that the only channel type we expect to be reported by the
+	 * bus driver is the SPAR_VHBA channel.
+	 */
+	{ SPAR_VHBA_CHANNEL_PROTOCOL_UUID, "sparvhba" },
+	{ NULL_UUID_LE, NULL }
+};
+
+static struct workqueue_struct *visorhba_serverdown_workqueue;
+
+/* This is used to tell the visor bus driver which types of visor devices
+ * we support, and what functions to call when a visor device that we support
+ * is attached or removed.
+ */
+static struct visor_driver visorhba_driver = {
+	.name = "visorhba",
+	.version = VERSION,
+	.vertag = NULL,
+	.owner = THIS_MODULE,
+	.channel_types = visorhba_channel_types,
+	.probe = visorhba_probe,
+	.remove = visorhba_remove,
+	.pause = visorhba_pause,
+	.resume = visorhba_resume,
+	.channel_interrupt = NULL,
+};
+
+struct visor_thread_info {
+	struct task_struct *task;
+	struct completion has_stopped;
+	int id;
+};
+
+struct visordisk_info {
+	u32 valid;
+	u32 channel, id, lun;	/* Disk Path */
+	atomic_t ios_threshold;
+	atomic_t error_count;
+	struct visordisk_info *next;
+};
+
+struct scsipending {
+	char cmdtype;		/* Type of pointer that is being stored */
+	void *sent;		/* The Data being tracked */
+};
+
+/* Work Data for dar_work_queue */
+struct diskaddremove {
+	u8 add;			/* 0-remove, 1-add */
+	struct Scsi_Host *shost; /* Scsi Host for this visorhba instance */
+	u32 channel, id, lun;	/* Disk Path */
+	struct diskaddremove *next;
+};
+
+/* Each scsi_host has a host_data area that contains this struct. */
+struct visorhba_devdata {
+	struct Scsi_Host *scsihost;
+	struct visor_device *dev;
+	struct list_head dev_info_list;
+	/* Tracks the requests that have been forwarded to
+	 * the IOVM and haven't returned yet
+	 */
+	struct scsipending pending[MAX_PENDING_REQUESTS];
+	/* Start search for next pending free slot here */
+	unsigned int nextinsert;
+	spinlock_t privlock; /* lock to protect data in devdata */
+	bool serverdown;
+	bool serverchangingstate;
+	unsigned long long acquire_failed_cnt;
+	unsigned long long interrupts_rcvd;
+	unsigned long long interrupts_notme;
+	unsigned long long interrupts_disabled;
+	struct work_struct serverdown_completion;
+	u64 __iomem *flags_addr;
+	atomic_t interrupt_rcvd;
+	wait_queue_head_t rsp_queue;
+	struct visordisk_info head;
+	unsigned int max_buff_len;
+	int devnum;
+	char name[MAX_NAME_LENGTH];
+	struct kref kref;
+	struct visor_thread_info threadinfo;
+	struct list_head list_all;
+	int thread_wait_ms;
+};
+
+struct visorhba_devices_open {
+	struct visorhba_devdata *devdata;
+};
+
+static struct visorhba_devices_open visorhbas_open[VISORHBAOPENMAX];
+
+/**
+ *	visor_thread_start - starts a thread for the device
+ *	@thrinfo: The thread to start
+ *	@threadfn: Function the thread starts
+ *	@thrcontext: Context to pass to the thread, i.e. devdata
+ *	@name: string describing name of thread
+ *
+ *	Starts a thread for the device.
+ *
+ *	Return 0 on success;
+ */
+static int visor_thread_start(struct visor_thread_info *thrinfo,
+			      int (*threadfn)(void *),
+			      void *thrcontext, char *name)
+{
+	/* used to stop the thread */
+	init_completion(&thrinfo->has_stopped);
+	thrinfo->task = kthread_run(threadfn, thrcontext, name);
+	if (IS_ERR(thrinfo->task)) {
+		thrinfo->id = 0;
+		return -1;
+	}
+	thrinfo->id = thrinfo->task->pid;
+	return 0;
+}
+
+/**
+ *	visor_thread_stop = stop a thread for the device
+ *	@thrinfo: The thread to stop
+ *
+ *	Stop the thread and wait for completion for a minute
+ *	Returns void.
+ */
+static void visor_thread_stop(struct visor_thread_info *thrinfo)
+{
+	if (!thrinfo->id)
+		return;
+	kthread_stop(thrinfo->task);
+	/* give up if the thread has NOT died in 1 second */
+	if (wait_for_completion_timeout(&thrinfo->has_stopped,
+					msecs_to_jiffies(1000)))
+		thrinfo->id = 0;
+}
+
+/**
+ *	add_scsipending_entry - save off io command that is pending in
+ *				Service Partition
+ *	@devdata: Pointer to devdata
+ *	@cmdtype: Specifies the type of command pending
+ *	@new:	The command to be saved
+ *
+ *	Saves off the io command that is being handled by the Service
+ *	Partition so that it can be handled when it completes.
+ *	Returns insert_location where entry was added,
+ *	SCSI_MLQUEUE_DEVICE_BUSYif it can't
+ */
+static int add_scsipending_entry(struct visorhba_devdata *devdata,
+				 char cmdtype, void *new)
+{
+	unsigned long flags;
+	int insert_location;
+
+	spin_lock_irqsave(&devdata->privlock, flags);
+	insert_location = devdata->nextinsert;
+	while (devdata->pending[insert_location].sent) {
+		insert_location = (insert_location + 1) % MAX_PENDING_REQUESTS;
+		if (insert_location == (int)devdata->nextinsert) {
+			spin_unlock_irqrestore(&devdata->privlock, flags);
+			return SCSI_MLQUEUE_DEVICE_BUSY;
+		}
+	}
+
+	devdata->pending[insert_location].cmdtype = cmdtype;
+	devdata->pending[insert_location].sent = new;
+	devdata->nextinsert = (insert_location + 1) % MAX_PENDING_REQUESTS;
+	spin_unlock_irqrestore(&devdata->privlock, flags);
+
+	return insert_location;
+}
+
+/**
+ *	del_scsipending_enty - removes an entry from the pending array
+ *	@devdata: Device holding the pending array
+ *	@del: Entry to remove
+ *
+ *	Removes the entry pointed at by del and returns it.
+ *	Returns the scsipending entry pointed at
+ */
+static void *del_scsipending_ent(struct visorhba_devdata *devdata,
+				 uintptr_t del)
+{
+	unsigned long flags;
+	void *sent = NULL;
+
+	if (del < MAX_PENDING_REQUESTS) {
+		spin_lock_irqsave(&devdata->privlock, flags);
+		sent = devdata->pending[del].sent;
+
+		devdata->pending[del].cmdtype = 0;
+		devdata->pending[del].sent = NULL;
+		spin_unlock_irqrestore(&devdata->privlock, flags);
+	}
+
+	return sent;
+}
+
+/**
+ *	forward_taskmgmt_command - send taskmegmt command to the Service
+ *				   Partition
+ *	@tasktype: Type of taskmgmt command
+ *	@scsidev: Scsidev that issued command
+ *
+ *	Create a cmdrsp packet and send it to the Serivce Partition
+ *	that will service this request.
+ *	Returns whether the command was queued successfully or not.
+ */
+static int forward_taskmgmt_command(enum task_mgmt_types tasktype,
+				    struct scsi_device *scsidev)
+{
+	struct uiscmdrsp *cmdrsp;
+	struct visorhba_devdata *devdata =
+		(struct visorhba_devdata *)scsidev->host->hostdata;
+	int notifyresult = 0xffff;
+	wait_queue_head_t notifyevent;
+	int scsicmd_id = 0;
+	int err = 0;
+
+	if (devdata->serverdown || devdata->serverchangingstate)
+		return -EINVAL;
+
+	cmdrsp = kzalloc(sizeof(*cmdrsp), GFP_ATOMIC);
+	if (!cmdrsp)
+		return -ENOMEM;
+
+	init_waitqueue_head(&notifyevent);
+
+	/* issue TASK_MGMT_ABORT_TASK */
+	cmdrsp->cmdtype = CMD_SCSITASKMGMT_TYPE;
+	/* specify the event that has to be triggered when this */
+	/* cmd is complete */
+	cmdrsp->scsitaskmgmt.notify = (void *)&notifyevent;
+	cmdrsp->scsitaskmgmt.notifyresult = (void *)&notifyresult;
+
+	/* save destination */
+	cmdrsp->scsitaskmgmt.tasktype = tasktype;
+	cmdrsp->scsitaskmgmt.vdest.channel = scsidev->channel;
+	cmdrsp->scsitaskmgmt.vdest.id = scsidev->id;
+	cmdrsp->scsitaskmgmt.vdest.lun = scsidev->lun;
+	scsicmd_id = add_scsipending_entry(devdata, CMD_SCSITASKMGMT_TYPE,
+					   (void *)cmdrsp);
+	if (scsicmd_id < 0) {
+		err = scsicmd_id;
+		goto cleanup;
+	}
+	cmdrsp->scsitaskmgmt.scsicmd = (void *)(uintptr_t)scsicmd_id;
+
+	if (!visorchannel_signalinsert(devdata->dev->visorchannel,
+				       IOCHAN_TO_IOPART,
+				       cmdrsp)) {
+		err = SCSI_MLQUEUE_DEVICE_BUSY;
+		goto cleanup;
+	}
+
+	wait_event(notifyevent, notifyresult != 0xffff);
+	kfree(cmdrsp);
+	return SUCCESS;
+
+cleanup:
+	kfree(cmdrsp);
+	return err;
+}
+
+/**
+ *	visorhba_abort_handler - Send TASK_MGMT_ABORT_TASK
+ *	@scsicmd: The scsicmd that needs aborted
+ *
+ *	Returns SUCCESS, waits forever if can't insert
+ *
+ */
+static int visorhba_abort_handler(struct scsi_cmnd *scsicmd)
+{
+	/* issue TASK_MGMT_ABORT_TASK */
+	struct scsi_device *scsidev;
+	struct visordisk_info *vdisk;
+	struct visorhba_devdata *devdata;
+
+	scsidev = scsicmd->device;
+	devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
+	for (vdisk = &devdata->head; vdisk->next; vdisk = vdisk->next) {
+		if ((scsidev->channel == vdisk->channel) &&
+		    (scsidev->id == vdisk->id) &&
+		    (scsidev->lun == vdisk->lun)) {
+			if (atomic_read(&vdisk->error_count) <
+			    VISORHBA_ERROR_COUNT)
+				atomic_inc(&vdisk->error_count);
+			else
+				atomic_set(&vdisk->ios_threshold,
+					   IOS_ERROR_THRESHOLD);
+		}
+	}
+	return forward_taskmgmt_command(TASK_MGMT_ABORT_TASK, scsicmd->device);
+}
+
+/**
+ *	visorhba_device_reset_handler - Send TASK_MGMT_LUN_RESET
+ *	@scsicmd: The scsicmd that needs aborted
+ *
+ *	Returns SUCCESS, waits forever if can't insert
+ */
+static int visorhba_device_reset_handler(struct scsi_cmnd *scsicmd)
+{
+	/* issue TASK_MGMT_LUN_RESET */
+	struct scsi_device *scsidev;
+	struct visordisk_info *vdisk;
+	struct visorhba_devdata *devdata;
+
+	scsidev = scsicmd->device;
+	devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
+	for (vdisk = &devdata->head; vdisk->next; vdisk = vdisk->next) {
+		if ((scsidev->channel == vdisk->channel) &&
+		    (scsidev->id == vdisk->id) &&
+		    (scsidev->lun == vdisk->lun)) {
+			if (atomic_read(&vdisk->error_count) <
+			    VISORHBA_ERROR_COUNT) {
+				atomic_inc(&vdisk->error_count);
+			} else {
+				atomic_set(&vdisk->ios_threshold,
+					   IOS_ERROR_THRESHOLD);
+			}
+		}
+	}
+	return forward_taskmgmt_command(TASK_MGMT_LUN_RESET, scsicmd->device);
+}
+
+/**
+ *	visorhba_bus_reset_handler - Send TASK_MGMT_TARGET_RESET for each
+ *				     target on the bus
+ *	@scsicmd: The scsicmd that needs aborted
+ *
+ *	Returns SUCCESS
+ */
+static int visorhba_bus_reset_handler(struct scsi_cmnd *scsicmd)
+{
+	struct scsi_device *scsidev;
+	struct visordisk_info *vdisk;
+	struct visorhba_devdata *devdata;
+
+	scsidev = scsicmd->device;
+	devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
+	for (vdisk = &devdata->head; vdisk->next; vdisk = vdisk->next) {
+		if ((scsidev->channel == vdisk->channel) &&
+		    (scsidev->id == vdisk->id) &&
+		    (scsidev->lun == vdisk->lun)) {
+			if (atomic_read(&vdisk->error_count) <
+			    VISORHBA_ERROR_COUNT)
+				atomic_inc(&vdisk->error_count);
+			else
+				atomic_set(&vdisk->ios_threshold,
+					   IOS_ERROR_THRESHOLD);
+		}
+	}
+	return forward_taskmgmt_command(TASK_MGMT_BUS_RESET, scsicmd->device);
+}
+
+/**
+ *	visorhba_host_reset_handler - Not supported
+ *	@scsicmd: The scsicmd that needs aborted
+ *
+ *	Not supported, return SUCCESS
+ *	Returns SUCCESS
+ */
+static int
+visorhba_host_reset_handler(struct scsi_cmnd *scsicmd)
+{
+	/* issue TASK_MGMT_TARGET_RESET for each target on each bus for host */
+	return SUCCESS;
+}
+
+/**
+ *	visorhba_get_info
+ *	@shp: Scsi host that is requesting information
+ *
+ *	Returns string with info for visorhba, now just version
+ */
+static const char *visorhba_get_info(struct Scsi_Host *shp)
+{
+	/* Return version string */
+	return "Version " VERSION "\n";
+}
+
+/**
+ *	visorhba_queue_command_lck -- queues command to the Service Partition
+ *	@scsicmd: Command to be queued
+ *	@vsiorhba_cmnd_done: Done command to call when scsicmd is returned
+ *
+ *	Queues to scsicmd to the ServicePartition after converting it to a
+ *	uiscmdrsp structure.
+ *
+ *	Returns success if queued to the Service Partition, otherwise
+ *	failure.
+ */
+static int
+visorhba_queue_command_lck(struct scsi_cmnd *scsicmd,
+			   void (*visorhba_cmnd_done)(struct scsi_cmnd *))
+{
+	struct scsi_device *scsidev = scsicmd->device;
+	int insert_location;
+	unsigned char op;
+	unsigned char *cdb = scsicmd->cmnd;
+	struct Scsi_Host *scsihost = scsidev->host;
+	struct uiscmdrsp *cmdrsp;
+	unsigned int i;
+	struct visorhba_devdata *devdata =
+		(struct visorhba_devdata *)scsihost->hostdata;
+	struct scatterlist *sg = NULL;
+	struct scatterlist *sglist = NULL;
+	int err = 0;
+
+	if (devdata->serverdown || devdata->serverchangingstate)
+		return SCSI_MLQUEUE_DEVICE_BUSY;
+
+	cmdrsp = kzalloc(sizeof(*cmdrsp), GFP_ATOMIC);
+	if (!cmdrsp)
+		return -ENOMEM;
+
+	/* now saving everything we need from scsi_cmd into cmdrsp
+	 * before we queue cmdrsp set type to command - as opposed to
+	 * task mgmt
+	 */
+	cmdrsp->cmdtype = CMD_SCSI_TYPE;
+	/* save the pending insertion location. Deletion from pending
+	 * will return the scsicmd pointer for completion
+	 */
+	insert_location =
+		add_scsipending_entry(devdata, CMD_SCSI_TYPE, (void *)scsicmd);
+	if (insert_location != -1) {
+		cmdrsp->scsi.scsicmd = (void *)(uintptr_t)insert_location;
+	} else {
+		kfree(cmdrsp);
+		return SCSI_MLQUEUE_DEVICE_BUSY;
+	}
+	/* save done function that we have call when cmd is complete */
+	scsicmd->scsi_done = visorhba_cmnd_done;
+	/* save destination */
+	cmdrsp->scsi.vdest.channel = scsidev->channel;
+	cmdrsp->scsi.vdest.id = scsidev->id;
+	cmdrsp->scsi.vdest.lun = scsidev->lun;
+	/* save datadir */
+	cmdrsp->scsi.data_dir = scsicmd->sc_data_direction;
+	if (!memcpy(cmdrsp->scsi.cmnd, cdb, MAX_CMND_SIZE)) {
+		err = -ENOMEM;
+		goto fail;
+	}
+
+	cmdrsp->scsi.bufflen = scsi_bufflen(scsicmd);
+
+	/* keep track of the max buffer length so far. */
+	if (cmdrsp->scsi.bufflen > devdata->max_buff_len)
+		devdata->max_buff_len = cmdrsp->scsi.bufflen;
+
+	if (scsi_sg_count(scsicmd) > MAX_PHYS_INFO) {
+		err = -EINVAL;
+		goto fail;
+	}
+
+	/* convert buffer to phys information */
+	if ((scsi_sg_count(scsicmd) == 0) && (scsi_bufflen(scsicmd) > 0)) {
+		BUG_ON(scsi_sg_count(scsicmd) == 0);
+	} else {
+		/* buffer is scatterlist - copy it out */
+		sglist = scsi_sglist(scsicmd);
+
+		for_each_sg(sglist, sg, scsi_sg_count(scsicmd), i) {
+			cmdrsp->scsi.gpi_list[i].address = sg_phys(sg);
+			cmdrsp->scsi.gpi_list[i].length = sg->length;
+		}
+		cmdrsp->scsi.guest_phys_entries = scsi_sg_count(scsicmd);
+	}
+
+	op = cdb[0];
+	if (!visorchannel_signalinsert(devdata->dev->visorchannel,
+				       IOCHAN_TO_IOPART,
+				       cmdrsp)) {
+		/* queue must be full and we aren't going to wait */
+		err = SCSI_MLQUEUE_DEVICE_BUSY;
+		goto fail;
+	}
+	kfree(cmdrsp);
+	return 0;
+fail:
+	del_scsipending_ent(devdata, (uintptr_t)insert_location);
+	kfree(cmdrsp);
+	return err;
+}
+
+/**
+ *	visorhba_slave_alloc - called when new disk is discovered
+ *	@scsidev: New disk
+ *
+ *	Create a new visordisk_info structure and add it to our
+ *	list of vdisks.
+ *
+ *	Returns success when created, otherwise error.
+ */
+static int visorhba_slave_alloc(struct scsi_device *scsidev)
+{
+	/* this is called by the midlayer before scan for new devices --
+	 * LLD can alloc any struct & do init if needed.
+	 */
+	struct visordisk_info *vdisk;
+	struct visordisk_info *tmpvdisk;
+	struct visorhba_devdata *devdata;
+	struct Scsi_Host *scsihost = (struct Scsi_Host *)scsidev->host;
+
+	devdata = (struct visorhba_devdata *)scsihost->hostdata;
+	if (!devdata)
+		return 0; /* even though we errored, treat as success */
+
+	for (vdisk = &devdata->head; vdisk->next; vdisk = vdisk->next) {
+		if (vdisk->next->valid &&
+		    (vdisk->next->channel == scsidev->channel) &&
+		    (vdisk->next->id == scsidev->id) &&
+		    (vdisk->next->lun == scsidev->lun))
+			return 0; /* already allocated return success */
+	}
+	tmpvdisk = kzalloc(sizeof(*tmpvdisk), GFP_ATOMIC);
+	if (!tmpvdisk)
+		return -ENOMEM;
+
+	tmpvdisk->channel = scsidev->channel;
+	tmpvdisk->id = scsidev->id;
+	tmpvdisk->lun = scsidev->lun;
+	vdisk->next = tmpvdisk;
+	return 0;
+}
+
+/**
+ *	visorhba_slave_destroy - disk is going away
+ *	@scsidev: scsi device going away
+ *
+ *	Disk is going away, clean up resources.
+ *	Returns void.
+ */
+static void visorhba_slave_destroy(struct scsi_device *scsidev)
+{
+	/* midlevel calls this after device has been quiesced and
+	 * before it is to be deleted.
+	 */
+	struct visordisk_info *vdisk, *delvdisk;
+	struct visorhba_devdata *devdata;
+	struct Scsi_Host *scsihost = (struct Scsi_Host *)scsidev->host;
+
+	devdata = (struct visorhba_devdata *)scsihost->hostdata;
+	for (vdisk = &devdata->head; vdisk->next; vdisk = vdisk->next) {
+		if (vdisk->next->valid &&
+		    (vdisk->next->channel == scsidev->channel) &&
+		    (vdisk->next->id == scsidev->id) &&
+		    (vdisk->next->lun == scsidev->lun)) {
+			delvdisk = vdisk->next;
+			vdisk->next = delvdisk->next;
+			kfree(delvdisk);
+			return;
+		}
+	}
+}
+
+static struct scsi_host_template visorhba_driver_template = {
+	.name = "Unisys Visor HBA",
+	.info = visorhba_get_info,
+	.queuecommand = visorhba_queue_command,
+	.eh_abort_handler = visorhba_abort_handler,
+	.eh_device_reset_handler = visorhba_device_reset_handler,
+	.eh_bus_reset_handler = visorhba_bus_reset_handler,
+	.eh_host_reset_handler = visorhba_host_reset_handler,
+	.shost_attrs = NULL,
+#define visorhba_MAX_CMNDS 128
+	.can_queue = visorhba_MAX_CMNDS,
+	.sg_tablesize = 64,
+	.this_id = -1,
+	.slave_alloc = visorhba_slave_alloc,
+	.slave_destroy = visorhba_slave_destroy,
+	.use_clustering = ENABLE_CLUSTERING,
+};
+
+/**
+ *	info_debugfs_read - debugfs interface to dump visorhba states
+ *	@file: Debug file
+ *	@buf: buffer to send back to user
+ *	@len: len that can be written to buf
+ *	@offset: offset into buf
+ *
+ *	Dumps information about the visorhba driver and devices
+ *	TODO: Make this per vhba
+ *	Returns bytes_read
+ */
+static ssize_t info_debugfs_read(struct file *file, char __user *buf,
+				 size_t len, loff_t *offset)
+{
+	ssize_t bytes_read = 0;
+	int str_pos = 0;
+	u64 phys_flags_addr;
+	int i;
+	struct visorhba_devdata *devdata;
+	char *vbuf;
+
+	if (len > MAX_BUF)
+		len = MAX_BUF;
+	vbuf = kzalloc(len, GFP_KERNEL);
+	if (!vbuf)
+		return -ENOMEM;
+
+	for (i = 0; i < VISORHBAOPENMAX; i++) {
+		if (!visorhbas_open[i].devdata)
+			continue;
+
+		devdata = visorhbas_open[i].devdata;
+
+		str_pos += scnprintf(vbuf + str_pos,
+				len - str_pos, "max_buff_len:%u\n",
+				devdata->max_buff_len);
+
+		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+				"\ninterrupts_rcvd = %llu, interrupts_disabled = %llu\n",
+				devdata->interrupts_rcvd,
+				devdata->interrupts_disabled);
+		str_pos += scnprintf(vbuf + str_pos,
+				len - str_pos, "\ninterrupts_notme = %llu,\n",
+				devdata->interrupts_notme);
+		phys_flags_addr = virt_to_phys((__force  void *)
+					       devdata->flags_addr);
+		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+				"flags_addr = %p, phys_flags_addr=0x%016llx, FeatureFlags=%llu\n",
+				devdata->flags_addr, phys_flags_addr,
+				(__le64)readq(devdata->flags_addr));
+		str_pos += scnprintf(vbuf + str_pos,
+			len - str_pos, "acquire_failed_cnt:%llu\n",
+			devdata->acquire_failed_cnt);
+		str_pos += scnprintf(vbuf + str_pos, len - str_pos, "\n");
+	}
+
+	bytes_read = simple_read_from_buffer(buf, len, offset, vbuf, str_pos);
+	kfree(vbuf);
+	return bytes_read;
+}
+
+/**
+ *	devdata_initialize - Initialize a visorhba_devdata structure
+ *	@devdata: private data device created by visorhba
+ *	@dev: visor_device created by visorbus
+ *
+ *	Initializes fields in the visorhba_devdata structure
+ *	Returns a pointer to the devdata structure on SUCCESS otherwise NULL
+ */
+static struct visorhba_devdata *
+devdata_initialize(struct visorhba_devdata *devdata, struct visor_device *dev)
+{
+	int devnum = -1;
+
+	if (!devdata)
+		return NULL;
+	spin_lock(&dev_num_pool_lock);
+	devnum = find_first_zero_bit(dev_num_pool, MAXDEVICES);
+	set_bit(devnum, dev_num_pool);
+	spin_unlock(&dev_num_pool_lock);
+	if (devnum == MAXDEVICES)
+		devnum = -1;
+	if (devnum < 0)
+		return NULL;
+	devdata->devnum = devnum;
+	devdata->dev = dev;
+	strncpy(devdata->name, dev_name(&dev->device), sizeof(devdata->name));
+	kref_init(&devdata->kref);
+	spin_lock(&lock_all_devices);
+	list_add_tail(&devdata->list_all, &list_all_devices);
+	spin_unlock(&lock_all_devices);
+	return devdata;
+}
+
+/**
+ *	devdata_release - called when we are done with devdata
+ *	@mykref: kref reference to visorhba_devdata
+ *
+ *	Free up devnum, remove us from the list of active devdatas
+ *	Returns void.
+ */
+static void devdata_release(struct kref *mykref)
+{
+	struct visorhba_devdata *devdata;
+
+	devdata = container_of(mykref, struct visorhba_devdata, kref);
+	spin_lock(&dev_num_pool_lock);
+	clear_bit(devdata->devnum, dev_num_pool);
+	spin_unlock(&dev_num_pool_lock);
+
+	spin_lock(&lock_all_devices);
+	list_del(&devdata->list_all);
+	spin_unlock(&lock_all_devices);
+}
+
+/**
+ *	visorhba_serverdown_complete - Called when we are done cleaning up
+ *				       from serverdown
+ *	@work: work structure for this serverdown request
+ *
+ *	Called when we are done cleanning up from serverdown, stop processing
+ *	queue, fail pending IOs.
+ *	Returns void when finished cleaning up
+ */
+static void visorhba_serverdown_complete(struct work_struct *work)
+{
+	struct visorhba_devdata *devdata;
+	int i;
+	struct scsipending *pendingdel = NULL;
+	struct scsi_cmnd *scsicmd = NULL;
+	struct uiscmdrsp *cmdrsp;
+	unsigned long flags;
+
+	devdata = container_of(work, struct visorhba_devdata,
+			       serverdown_completion);
+
+	/* Stop using the IOVM response queue (queue should be drained
+	 * by the end)
+	 */
+	visor_thread_stop(&devdata->threadinfo);
+
+	/* Fail commands that weren't completed */
+	spin_lock_irqsave(&devdata->privlock, flags);
+	for (i = 0; i < MAX_PENDING_REQUESTS; i++) {
+		pendingdel = &devdata->pending[i];
+		switch (pendingdel->cmdtype) {
+		case CMD_SCSI_TYPE:
+			scsicmd = (struct scsi_cmnd *)pendingdel->sent;
+			scsicmd->result = DID_RESET << 16;
+			if (scsicmd->scsi_done)
+				scsicmd->scsi_done(scsicmd);
+			break;
+		case CMD_SCSITASKMGMT_TYPE:
+			cmdrsp = (struct uiscmdrsp *)pendingdel->sent;
+			wake_up_all((wait_queue_head_t *)
+				    cmdrsp->scsitaskmgmt.notify);
+			*(int *)cmdrsp->scsitaskmgmt.notifyresult =
+				TASK_MGMT_FAILED;
+			break;
+		case CMD_VDISKMGMT_TYPE:
+			cmdrsp = (struct uiscmdrsp *)pendingdel->sent;
+			*(int *)cmdrsp->vdiskmgmt.notifyresult =
+				VDISK_MGMT_FAILED;
+			wake_up_all((wait_queue_head_t *)
+				    cmdrsp->vdiskmgmt.notify);
+			break;
+		default:
+			break;
+		}
+		pendingdel->cmdtype = 0;
+		pendingdel->sent = NULL;
+	}
+	spin_unlock_irqrestore(&devdata->privlock, flags);
+
+	devdata->serverdown = true;
+	devdata->serverchangingstate = false;
+}
+
+/**
+ *	visorhba_serverdown - Got notified that the IOVM is down
+ *	@devdata: visorhba that is being serviced by downed IOVM.
+ *
+ *	Something happened to the IOVM, return immediately and
+ *	schedule work cleanup work.
+ *	Return SUCCESS or EINVAL
+ */
+static int visorhba_serverdown(struct visorhba_devdata *devdata)
+{
+	if (!devdata->serverdown && !devdata->serverchangingstate) {
+		devdata->serverchangingstate = true;
+		queue_work(visorhba_serverdown_workqueue,
+			   &devdata->serverdown_completion);
+	} else if (devdata->serverchangingstate) {
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/**
+ *	do_scsi_linuxstat - scsi command returned linuxstat
+ *	@cmdrsp: response from IOVM
+ *	@scsicmd: Command issued.
+ *
+ *	Don't log errors for disk-not-present inquiries
+ *	Returns void
+ */
+static void
+do_scsi_linuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
+{
+	struct visorhba_devdata *devdata;
+	struct visordisk_info *vdisk;
+	struct scsi_device *scsidev;
+	struct sense_data *sd;
+
+	scsidev = scsicmd->device;
+	memcpy(scsicmd->sense_buffer, cmdrsp->scsi.sensebuf, MAX_SENSE_SIZE);
+	sd = (struct sense_data *)scsicmd->sense_buffer;
+
+	/* Do not log errors for disk-not-present inquiries */
+	if ((cmdrsp->scsi.cmnd[0] == INQUIRY) &&
+	    (host_byte(cmdrsp->scsi.linuxstat) == DID_NO_CONNECT) &&
+	    (cmdrsp->scsi.addlstat == ADDL_SEL_TIMEOUT))
+		return;
+	/* Okay see what our error_count is here.... */
+	devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
+	for (vdisk = &devdata->head; vdisk->next; vdisk = vdisk->next) {
+		if ((scsidev->channel != vdisk->channel) ||
+		    (scsidev->id != vdisk->id) ||
+		    (scsidev->lun != vdisk->lun))
+			continue;
+
+		if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT) {
+			atomic_inc(&vdisk->error_count);
+			atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
+		}
+	}
+}
+
+/**
+ *	do_scsi_nolinuxstat - scsi command didn't have linuxstat
+ *	@cmdrsp: response from IOVM
+ *	@scsicmd: Command issued.
+ *
+ *	Handle response when no linuxstat was returned
+ *	Returns void
+ */
+static void
+do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
+{
+	struct scsi_device *scsidev;
+	unsigned char buf[36];
+	struct scatterlist *sg;
+	unsigned int i;
+	char *this_page;
+	char *this_page_orig;
+	int bufind = 0;
+	struct visordisk_info *vdisk;
+	struct visorhba_devdata *devdata;
+
+	scsidev = scsicmd->device;
+	if ((cmdrsp->scsi.cmnd[0] == INQUIRY) &&
+	    (cmdrsp->scsi.bufflen >= MIN_INQUIRY_RESULT_LEN)) {
+		if (cmdrsp->scsi.no_disk_result == 0)
+			return;
+
+		/* Linux scsi code wants a device at Lun 0
+		 * to issue report luns, but we don't want
+		 * a disk there so we'll present a processor
+		 * there.
+		 */
+		SET_NO_DISK_INQUIRY_RESULT(buf, cmdrsp->scsi.bufflen,
+					   scsidev->lun,
+					   DEV_DISK_CAPABLE_NOT_PRESENT,
+					   DEV_NOT_CAPABLE);
+
+		if (scsi_sg_count(scsicmd) == 0) {
+			if (scsi_bufflen(scsicmd) > 0)
+				BUG_ON(scsi_sg_count(scsicmd) == 0);
+			memcpy(scsi_sglist(scsicmd), buf,
+			       cmdrsp->scsi.bufflen);
+			return;
+		}
+
+		sg = scsi_sglist(scsicmd);
+		for (i = 0; i < scsi_sg_count(scsicmd); i++) {
+			this_page_orig = kmap_atomic(sg_page(sg + i));
+			this_page = (void *)((unsigned long)this_page_orig |
+					     sg[i].offset);
+			memcpy(this_page, buf + bufind, sg[i].length);
+			kunmap_atomic(this_page_orig);
+		}
+	} else {
+		devdata = (struct visorhba_devdata *)scsidev->host->hostdata;
+		vdisk = &devdata->head;
+		for ( ; vdisk->next; vdisk = vdisk->next) {
+			if ((scsidev->channel != vdisk->channel) ||
+			    (scsidev->id != vdisk->id) ||
+			    (scsidev->lun != vdisk->lun))
+				continue;
+			if (atomic_read(&vdisk->ios_threshold) > 0) {
+				atomic_dec(&vdisk->ios_threshold);
+				if (atomic_read(&vdisk->ios_threshold) == 0)
+					atomic_set(&vdisk->error_count, 0);
+			}
+		}
+	}
+}
+
+/**
+ *	complete_scsi_command - complete a scsi command
+ *	@uiscmdrsp: Response from Service Partition
+ *	@scsicmd: The scsi command
+ *
+ *	Response returned by the Service Partition, finish it and send
+ *	completion to the scsi midlayer.
+ *	Returns void.
+ */
+static void
+complete_scsi_command(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
+{
+	/* take what we need out of cmdrsp and complete the scsicmd */
+	scsicmd->result = cmdrsp->scsi.linuxstat;
+	if (cmdrsp->scsi.linuxstat)
+		do_scsi_linuxstat(cmdrsp, scsicmd);
+	else
+		do_scsi_nolinuxstat(cmdrsp, scsicmd);
+
+	scsicmd->scsi_done(scsicmd);
+}
+
+/* DELETE VDISK TASK MGMT COMMANDS */
+static inline void complete_vdiskmgmt_command(struct uiscmdrsp *cmdrsp)
+{
+	/* copy the result of the taskmgmt and
+	 * wake up the error handler that is waiting for this
+	 */
+	*(int *)cmdrsp->vdiskmgmt.notifyresult = cmdrsp->vdiskmgmt.result;
+	wake_up_all((wait_queue_head_t *)cmdrsp->vdiskmgmt.notify);
+}
+
+/**
+ *	complete_taskmgmt_command - complete task management
+ *	@cmdrsp: Response from the IOVM
+ *
+ *	Service Partition returned the result of the task management
+ *	command. Wake up anyone waiting for it.
+ *	Returns void
+ */
+static inline void complete_taskmgmt_command(struct uiscmdrsp *cmdrsp)
+{
+	/* copy the result of the taskgmgt and
+	 * wake up the error handler that is waiting for this
+	 */
+	*(int *)cmdrsp->vdiskmgmt.notifyresult = cmdrsp->vdiskmgmt.result;
+	wake_up_all((wait_queue_head_t *)cmdrsp->scsitaskmgmt.notify);
+}
+
+static struct work_struct dar_work_queue;
+static struct diskaddremove *dar_work_queue_head;
+static spinlock_t dar_work_queue_lock; /* Lock to protet dar_work_queue_head */
+static unsigned short dar_work_queue_sched;
+
+/**
+ *	queue_disk_add_remove - IOSP has sent us a add/remove request
+ *	@dar: disk add/remove request
+ *
+ *	Queue the work needed to add/remove a disk.
+ *	Returns void
+ */
+static inline void queue_disk_add_remove(struct diskaddremove *dar)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dar_work_queue_lock, flags);
+	if (!dar_work_queue_head) {
+		dar_work_queue_head = dar;
+		dar->next = NULL;
+	} else {
+		dar->next = dar_work_queue_head;
+		dar_work_queue_head = dar;
+	}
+	if (!dar_work_queue_sched) {
+		schedule_work(&dar_work_queue);
+		dar_work_queue_sched = 1;
+	}
+	spin_unlock_irqrestore(&dar_work_queue_lock, flags);
+}
+
+/**
+ *	process_disk_notify - IOSP has sent a process disk notify event
+ *	@shost: Scsi hot
+ *	@cmdrsp: Response from the IOSP
+ *
+ *	Queue it to the work queue.
+ *	Return void.
+ */
+static void process_disk_notify(struct Scsi_Host *shost,
+				struct uiscmdrsp *cmdrsp)
+{
+	struct diskaddremove *dar;
+
+	dar = kzalloc(sizeof(*dar), GFP_ATOMIC);
+	if (dar) {
+		dar->add = cmdrsp->disknotify.add;
+		dar->shost = shost;
+		dar->channel = cmdrsp->disknotify.channel;
+		dar->id = cmdrsp->disknotify.id;
+		dar->lun = cmdrsp->disknotify.lun;
+		queue_disk_add_remove(dar);
+	}
+}
+
+/**
+ *	drain_queue - pull responses out of iochannel
+ *	@cmdrsp: Response from the IOSP
+ *	@devdata: device that owns this iochannel
+ *
+ *	Pulls responses out of the iochannel and process the responses.
+ *	Restuns void
+ */
+static void
+drain_queue(struct uiscmdrsp *cmdrsp, struct visorhba_devdata *devdata)
+{
+	struct scsi_cmnd *scsicmd;
+	struct Scsi_Host *shost = devdata->scsihost;
+
+	while (1) {
+		if (!visorchannel_signalremove(devdata->dev->visorchannel,
+					       IOCHAN_FROM_IOPART,
+					       cmdrsp))
+			break; /* queue empty */
+
+		if (cmdrsp->cmdtype == CMD_SCSI_TYPE) {
+			/* scsicmd location is returned by the
+			 * deletion
+			 */
+			scsicmd = del_scsipending_ent(devdata,
+						      (uintptr_t)
+						      cmdrsp->scsi.scsicmd);
+			if (!scsicmd)
+				break;
+			/* complete the orig cmd */
+			complete_scsi_command(cmdrsp, scsicmd);
+		} else if (cmdrsp->cmdtype == CMD_SCSITASKMGMT_TYPE) {
+			if (!del_scsipending_ent(devdata, (uintptr_t)
+						 cmdrsp->scsitaskmgmt.scsicmd))
+				break;
+			complete_taskmgmt_command(cmdrsp);
+		} else if (cmdrsp->cmdtype == CMD_NOTIFYGUEST_TYPE) {
+			/* The vHba pointer has no meaning in a
+			 * guest partition. Let's be safe and set it
+			 * to NULL now. Do not use it here!
+			 */
+			cmdrsp->disknotify.v_hba = NULL;
+			process_disk_notify(shost, cmdrsp);
+		} else if (cmdrsp->cmdtype == CMD_VDISKMGMT_TYPE) {
+			if (!del_scsipending_ent(devdata,
+						 (uintptr_t)
+						 cmdrsp->vdiskmgmt.scsicmd))
+				break;
+			complete_vdiskmgmt_command(cmdrsp);
+		}
+		/* cmdrsp is now available for resuse */
+	}
+}
+
+/**
+ *	process_incoming_rsps - Process responses from IOSP
+ *	@v: void pointer to visorhba_devdata
+ *
+ *	Main function for the thread that processes the responses
+ *	from the IO Service Partition. When the queue is empty, wait
+ *	to check to see if it is full again.
+ */
+static int process_incoming_rsps(void *v)
+{
+	struct visorhba_devdata *devdata = v;
+	struct uiscmdrsp *cmdrsp = NULL;
+	const int size = sizeof(*cmdrsp);
+
+	cmdrsp = kmalloc(size, GFP_ATOMIC);
+	if (!cmdrsp)
+		complete_and_exit(&devdata->threadinfo.has_stopped, 0);
+
+	while (1) {
+		if (kthread_should_stop())
+			break;
+		wait_event_interruptible_timeout(
+			devdata->rsp_queue, (atomic_read(
+					     &devdata->interrupt_rcvd) == 1),
+				msecs_to_jiffies(devdata->thread_wait_ms));
+		/* drain queue */
+		drain_queue(cmdrsp, devdata);
+	}
+	kfree(cmdrsp);
+	complete_and_exit(&devdata->threadinfo.has_stopped, 0);
+}
+
+/**
+ *	visorhba_pause - function to handle visorbus pause messages
+ *	@dev: device that is pausing.
+ *	@complete_func: function to call when finished
+ *
+ *	Something has happened to the IO Service Partition that is
+ *	handling this device. Quiet this device and reset commands
+ *	so that the Service Partition can be corrected.
+ *	Returns SUCCESS
+ */
+static int visorhba_pause(struct visor_device *dev,
+			  visorbus_state_complete_func complete_func)
+{
+	struct visorhba_devdata *devdata = dev_get_drvdata(&dev->device);
+
+	visorhba_serverdown(devdata);
+	complete_func(dev, 0);
+	return 0;
+}
+
+/**
+ *	visorhba_resume - function called when the IO Service Partition is back
+ *	@dev: device that is pausing.
+ *	@complete_func: function to call when finished
+ *
+ *	Yay! The IO Service Partition is back, the channel has been wiped
+ *	so lets re-establish connection and start processing responses.
+ *	Returns 0 on success, error on failure.
+ */
+static int visorhba_resume(struct visor_device *dev,
+			   visorbus_state_complete_func complete_func)
+{
+	struct visorhba_devdata *devdata;
+
+	devdata = dev_get_drvdata(&dev->device);
+	if (!devdata)
+		return -EINVAL;
+
+	if (devdata->serverdown && !devdata->serverchangingstate)
+		devdata->serverchangingstate = 1;
+
+	visor_thread_start(&devdata->threadinfo, process_incoming_rsps,
+			   devdata, "vhba_incming");
+
+	devdata->serverdown = false;
+	devdata->serverchangingstate = false;
+
+	return 0;
+}
+
+/**
+ *	visorhba_probe - device has been discovered, do acquire
+ *	@dev: visor_device that was discovered
+ *
+ *	A new HBA was discovered, do the initial connections of it.
+ *	Return 0 on success, otherwise error.
+ */
+static int visorhba_probe(struct visor_device *dev)
+{
+	struct Scsi_Host *scsihost;
+	struct vhba_config_max max;
+	struct visorhba_devdata *devdata = NULL;
+	int i, err, channel_offset;
+	u64 features;
+
+	scsihost = scsi_host_alloc(&visorhba_driver_template,
+				   sizeof(*devdata));
+	if (!scsihost)
+		return -ENODEV;
+
+	channel_offset = offsetof(struct spar_io_channel_protocol,
+				  vhba.max);
+	err = visorbus_read_channel(dev, channel_offset, &max,
+				    sizeof(struct vhba_config_max));
+	if (err < 0)
+		goto fail;
+
+	scsihost->max_id = (unsigned)max.max_id;
+	scsihost->max_lun = (unsigned)max.max_lun;
+	scsihost->cmd_per_lun = (unsigned)max.cmd_per_lun;
+	scsihost->max_sectors =
+	    (unsigned short)(max.max_io_size >> 9);
+	scsihost->sg_tablesize =
+	    (unsigned short)(max.max_io_size / PAGE_SIZE);
+	if (scsihost->sg_tablesize > MAX_PHYS_INFO)
+		scsihost->sg_tablesize = MAX_PHYS_INFO;
+	err = scsi_add_host(scsihost, &dev->device);
+	if (err < 0)
+		goto fail;
+
+	devdata = devdata_initialize((struct visorhba_devdata *)
+				     scsihost->hostdata,
+				     dev);
+	if (!devdata)
+		goto fail_host;
+	for (i = 0; i < VISORHBAOPENMAX; i++) {
+		if (!visorhbas_open[i].devdata) {
+			visorhbas_open[i].devdata = devdata;
+			break;
+		}
+	}
+
+	devdata->dev = dev;
+	dev_set_drvdata(&dev->device, devdata);
+
+	init_waitqueue_head(&devdata->rsp_queue);
+	spin_lock_init(&devdata->privlock);
+	devdata->serverdown = false;
+	devdata->serverchangingstate = false;
+	devdata->scsihost = scsihost;
+
+	INIT_WORK(&devdata->serverdown_completion,
+		  visorhba_serverdown_complete);
+
+	channel_offset = offsetof(struct spar_io_channel_protocol,
+				  channel_header.features);
+	err = visorbus_read_channel(dev, channel_offset, &features, 8);
+	if (err)
+		goto fail_host;
+	features |= ULTRA_IO_CHANNEL_IS_POLLING;
+	err = visorbus_write_channel(dev, channel_offset, &features, 8);
+	if (err)
+		goto fail_host;
+
+	devdata->thread_wait_ms = 2;
+	visor_thread_start(&devdata->threadinfo, process_incoming_rsps,
+			   devdata, "vhba_incoming");
+
+	scsi_scan_host(scsihost);
+
+	return 0;
+fail_host:
+	scsi_remove_host(scsihost);
+
+fail:
+	scsi_host_put(scsihost);
+	return err;
+}
+
+/**
+ *	visorhba_remove - remove a visorhba device
+ *	@dev: Device to remove
+ *
+ *	Removes the visorhba device.
+ *	Returns void.
+ */
+static void visorhba_remove(struct visor_device *dev)
+{
+	struct visorhba_devdata *devdata = dev_get_drvdata(&dev->device);
+	struct Scsi_Host *scsihost = NULL;
+
+	if (!devdata)
+		return;
+
+	scsihost = devdata->scsihost;
+	visor_thread_stop(&devdata->threadinfo);
+	scsi_remove_host(scsihost);
+	scsi_host_put(scsihost);
+
+	dev_set_drvdata(&dev->device, NULL);
+	/* host_side_disappeared(devdata); */
+	kref_put(&devdata->kref, devdata_release);
+}
+
+/**
+ *	visorhba_init		- driver init routine
+ *
+ *	Initialize the visorhba driver and register it with visorbus
+ *	to handle s-Par virtual host bus adapter.
+ */
+static int visorhba_init(void)
+{
+	struct dentry *ret;
+	int rc = -ENOMEM;
+
+	spin_lock_init(&dev_num_pool_lock);
+	dev_num_pool = kzalloc(BITS_TO_LONGS(MAXDEVICES), GFP_KERNEL);
+	if (!dev_num_pool)
+		return rc;
+
+	visorhba_debugfs_dir = debugfs_create_dir("visorhba", NULL);
+	if (!visorhba_debugfs_dir)
+		goto cleanup_dev_num_pool;
+
+	ret = debugfs_create_file("info", S_IRUSR, visorhba_debugfs_dir, NULL,
+				  &debugfs_info_fops);
+
+	if (!ret) {
+		rc = -EIO;
+		goto cleanup_debugfs;
+	}
+
+	rc = visorbus_register_visor_driver(&visorhba_driver);
+	if (rc)
+		goto cleanup_debugfs;
+
+	return rc;
+
+cleanup_debugfs:
+	debugfs_remove_recursive(visorhba_debugfs_dir);
+
+cleanup_dev_num_pool:
+	kfree(dev_num_pool);
+	dev_num_pool = NULL;
+	return rc;
+}
+
+/**
+ *	visorhba_cleanup	- driver exit routine
+ *
+ *	Unregister driver from the bus and free up memory.
+ */
+static void visorhba_exit(void)
+{
+	visorbus_unregister_visor_driver(&visorhba_driver);
+	debugfs_remove_recursive(visorhba_debugfs_dir);
+	kfree(dev_num_pool);
+	dev_num_pool = NULL;
+}
+
+module_init(visorhba_init);
+module_exit(visorhba_exit);
+
+MODULE_AUTHOR("Unisys");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("sPAR hba driver for sparlinux: ver " VERSION);
+MODULE_VERSION(VERSION);
-- 
2.1.4

_______________________________________________
devel mailing list
devel@xxxxxxxxxxxxxxxxxxxxxx
http://driverdev.linuxdriverproject.org/mailman/listinfo/driverdev-devel



[Index of Archives]     [Linux Driver Backports]     [DMA Engine]     [Linux GPIO]     [Linux SPI]     [Video for Linux]     [Linux USB Devel]     [Linux Coverity]     [Linux Audio Users]     [Linux Kernel]     [Linux SCSI]     [Yosemite Backpacking]
  Powered by Linux