[RFC 2/2] TCM: a userspace backend

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This is a userspace backend for TCM. A userspace backend allows complex data
transformation like dedup, compression and so on, and data doesn't need to be
from file or block device. It has some features:
1. Zero-copy. All command/completion/data don't need copy between userspace and
kernel. Both command/completion have rings between kernel/userspace. Kernel
used dma pages are directly mapped to user space.
2. Directly poll and fileio approaches to communicate events between kernel and
userspace. Userspace can directly poll command ring to fetch commands. Another
alternative is communicating with fileio. A file can be used to do the
communication. The communication just passes events, so very small data
transfer. Ofcourse, the file supports poll.
3. Easy data handling in userspace. The command has an iovec. And both data and
command can be accessed in userspace directly. So application can directly use
readv/writev to handle data if they don't need complex data transformation.
4. Exception handling. If userspace daemon is abnormal, the backend has a
timeout mechanism to fail commands.
5. Efficient. userspace and kernel switch is minimal. Multiple commands or
completions only need 1 userspace/kernel switch.

The setup process:
1. userspace mmap a virtual address space for communication with kernel. The
first page of the region is mailbox page, which records ring head/tail. Both
kernel and userspace will access it.
2. userspace configures TCM
3. kernel configures TCM and creates a anon_file, and pass the fd to mailbox
4. userspace read/write the file to communicate with kernel

Data handling process:
1. kernel setup command in command ring and signal userspace though anon_file.
command's iovec points to userspace address. If it's a write, the address
already has data.
2. userspace gets the event, handle the command.
3. userspace signals kernel though anon_file after command finishes.
4. kernel gets the signal and finish se_cmd.

The exit process:
1. userspace unconfigure TCM
2. kernel unconfigure TCM
3. userspace close anon_file
4. userspace unmap the virtual address space.

Currently command/completion memory is pinned, but data pages are not. If
get_user_pages isn't fast enough, we can easily pin data pages too.

TODO: comprehensive tests and performance tuning.

Signed-off-by: Shaohua Li <shli@xxxxxxxxxx>
---
 drivers/target/Kconfig            |    5 
 drivers/target/Makefile           |    1 
 drivers/target/target_core_user.c | 1414 ++++++++++++++++++++++++++++++++++++++
 drivers/target/target_core_user.h |   86 ++
 4 files changed, 1506 insertions(+)

Index: linux/drivers/target/Kconfig
===================================================================
--- linux.orig/drivers/target/Kconfig	2013-11-16 09:14:01.678714415 +0800
+++ linux/drivers/target/Kconfig	2013-11-16 09:14:01.670714889 +0800
@@ -29,6 +29,11 @@ config TCM_PSCSI
 	Say Y here to enable the TCM/pSCSI subsystem plugin for non-buffered
 	passthrough access to Linux/SCSI device
 
+config TCM_USER
+	tristate "TCM/USER Subsystem Plugin for Linux"
+	help
+	Say Y here to enable the TCM/USER subsystem plugin
+
 source "drivers/target/loopback/Kconfig"
 source "drivers/target/tcm_fc/Kconfig"
 source "drivers/target/iscsi/Kconfig"
Index: linux/drivers/target/Makefile
===================================================================
--- linux.orig/drivers/target/Makefile	2013-11-16 09:14:01.678714415 +0800
+++ linux/drivers/target/Makefile	2013-11-16 09:14:01.674714678 +0800
@@ -22,6 +22,7 @@ obj-$(CONFIG_TARGET_CORE)	+= target_core
 obj-$(CONFIG_TCM_IBLOCK)	+= target_core_iblock.o
 obj-$(CONFIG_TCM_FILEIO)	+= target_core_file.o
 obj-$(CONFIG_TCM_PSCSI)		+= target_core_pscsi.o
+obj-$(CONFIG_TCM_USER)		+= target_core_user.o
 
 # Fabric modules
 obj-$(CONFIG_LOOPBACK_TARGET)	+= loopback/
Index: linux/drivers/target/target_core_user.c
===================================================================
--- /dev/null	1970-01-01 00:00:00.000000000 +0000
+++ linux/drivers/target/target_core_user.c	2013-11-16 09:14:01.674714678 +0800
@@ -0,0 +1,1414 @@
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <linux/idr.h>
+#include <linux/poll.h>
+#include <linux/timer.h>
+#include <linux/string.h>
+#include <linux/parser.h>
+#include <linux/anon_inodes.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <target/target_core_base.h>
+#include <target/target_core_backend.h>
+#include "target_core_user.h"
+
+#define TCMU_TIME_OUT 30000 /* 30s */
+
+struct tcmu_host {
+	uint32_t dev_id_count;
+	uint32_t host_id;
+};
+
+struct tcmu_dev {
+	struct se_device se_dev;
+	uint32_t dev_id;
+	struct task_struct *tsk;
+	uint64_t mailbox_addr;
+	struct page *mailbox_page;
+	pgoff_t mailbox_pgoff;
+	uint32_t mailbox_size;
+	uint64_t dev_size;
+
+	uint64_t cmdr_addr;
+	uint32_t cmdr_size;
+	struct page **cmdr_pages;
+	uint64_t cplr_addr;
+	uint32_t cplr_size;
+	struct page **cplr_pages;
+	uint64_t data_addr;
+	uint32_t data_size;
+
+	uint32_t user_event;
+	spinlock_t user_event_lock;
+	wait_queue_head_t wait_user_event;
+
+	unsigned long *free_page_bitmap;
+	wait_queue_head_t wait_free_page;
+	spinlock_t free_page_lock;
+
+	wait_queue_head_t wait_cmdr;
+	spinlock_t cmdr_lock;
+
+	spinlock_t cplr_lock;
+
+	struct idr commands;
+	spinlock_t commands_lock;
+
+	struct timer_list timeout;
+
+	unsigned int state;
+	struct kref ref;
+};
+
+#define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev)
+
+enum {
+	TCMU_DEV_INIT = 0,
+	TCMU_DEV_CONFIGURED,
+	TCMU_DEV_READY,
+	TCMU_DEV_EXISTING,
+	TCMU_DEV_DIED,
+};
+
+struct tcmu_cmd {
+	struct se_cmd *se_cmd;
+	struct tcmu_dev *tcmu_dev;
+	union {
+		struct {
+			struct scatterlist *sgl;
+			uint32_t sgl_nents;
+			uint64_t writesame_repeat;
+		};
+		uint64_t size;
+	};
+	uint64_t pos;
+	uint8_t opcode;
+
+	uint16_t cmd_id;
+	int16_t result;
+
+	struct list_head list;
+	unsigned long deadline;
+	unsigned long flags;
+
+	struct completion completion;
+};
+#define EXPIRED_CMD ((struct tcmu_cmd *)-1)
+enum {
+	TCMU_CMD_COMPLETED = (1 << 0),
+	TCMU_CMD_NO_TARGET = (1 << 1),
+	TCMU_CMD_NO_FREE = (1 << 2),
+};
+static struct kmem_cache *tcmu_cmd_cache;
+
+static void tcmu_send_kernel_event(struct tcmu_dev *tcmu_dev, uint32_t event)
+{
+	uint32_t old_event;
+	if (event == 0)
+		return;
+	spin_lock_irq(&tcmu_dev->user_event_lock);
+	old_event = tcmu_dev->user_event;
+	tcmu_dev->user_event |= event;
+	spin_unlock_irq(&tcmu_dev->user_event_lock);
+	if (old_event == 0)
+		wake_up(&tcmu_dev->wait_user_event);
+}
+
+static void tcmu_handle_completions(struct tcmu_dev *tcmu_dev);
+static void tcmu_handle_user_event(struct tcmu_dev *tcmu_dev, uint32_t event)
+{
+	if (event & TCMU_EVT_USER_CPL_PENDING)
+		tcmu_handle_completions(tcmu_dev);
+	if (event & TCMU_EVT_USER_CMDR_NOT_FULL)
+		wake_up(&tcmu_dev->wait_cmdr);
+}
+
+static ssize_t tcmu_file_read(struct file *file, char __user *buf, size_t count,
+			    loff_t *ppos)
+{
+	struct tcmu_dev *tcmu_dev = file->private_data;
+	DEFINE_WAIT(__wait);
+	int ret;
+	uint32_t event;
+
+	if (count < sizeof(event))
+		return -EINVAL;
+	spin_lock_irq(&tcmu_dev->user_event_lock);
+	if (!tcmu_dev->user_event && (file->f_flags & O_NONBLOCK)) {
+		ret = -EAGAIN;
+		goto out;
+	}
+
+	ret = wait_event_interruptible_lock_irq(tcmu_dev->wait_user_event,
+		tcmu_dev->user_event != 0, tcmu_dev->user_event_lock);
+	if (ret)
+		goto out;
+
+	event = tcmu_dev->user_event;
+	tcmu_dev->user_event = 0;
+
+out:
+	spin_unlock_irq(&tcmu_dev->user_event_lock);
+	if (ret)
+		return ret;
+	return put_user(event, (uint32_t __user *) buf) ? -EFAULT : sizeof(event);
+}
+
+static ssize_t tcmu_file_write(struct file *file, const char __user *buf,
+	size_t count, loff_t *ppos)
+{
+	struct tcmu_dev *tcmu_dev = file->private_data;
+	uint32_t event;
+
+	if (count < sizeof(event))
+		return -EINVAL;
+	if (copy_from_user(&event, buf, sizeof(event)))
+		return -EFAULT;
+
+	smp_wmb();
+	tcmu_handle_user_event(tcmu_dev, event);
+	return sizeof(event);
+}
+
+static unsigned int tcmu_file_poll(struct file *file, poll_table *wait)
+{
+	struct tcmu_dev *tcmu_dev = file->private_data;
+	unsigned int events = POLLOUT; /* write is always ok */
+	unsigned long flags;
+
+	poll_wait(file, &tcmu_dev->wait_user_event, wait);
+	spin_lock_irqsave(&tcmu_dev->user_event_lock, flags);
+	if (tcmu_dev->user_event)
+		events |= POLLIN;
+	spin_unlock_irqrestore(&tcmu_dev->user_event_lock, flags);
+
+	return events;
+}
+
+static void tcmu_destroy_device(struct kref *kref)
+{
+	struct tcmu_dev *tcmu_dev = container_of(kref, struct tcmu_dev, ref);
+
+	BUG_ON(tcmu_dev->state != TCMU_DEV_DIED);
+	kfree(tcmu_dev);
+}
+
+static int tcmu_file_release(struct inode *inode, struct file *file)
+{
+	struct tcmu_dev *tcmu_dev = file->private_data;
+
+	tcmu_dev->state = TCMU_DEV_DIED;
+
+	wake_up_poll(&tcmu_dev->wait_user_event, POLLHUP);
+
+	kref_put(&tcmu_dev->ref, tcmu_destroy_device);
+	return 0;
+}
+
+static const struct file_operations tcmu_fops = {
+	.owner = THIS_MODULE,
+	.read = tcmu_file_read,
+	.write = tcmu_file_write,
+	.poll = tcmu_file_poll,
+	.release = tcmu_file_release,
+	.llseek = noop_llseek,
+};
+
+static inline uint64_t tcmu_page_user_addr(struct tcmu_dev *tcmu_dev, struct page *page)
+{
+	pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+	return ((pgoff - tcmu_dev->mailbox_pgoff) << PAGE_SHIFT) +
+			tcmu_dev->mailbox_addr;
+}
+
+static int tcmu_get_user_pages(struct tcmu_dev *tcmu_dev, struct page **pages, int cnt)
+{
+	int max_pages = tcmu_dev->data_size / PAGE_SIZE;
+	unsigned long index, last_index;
+	int start, i, ret;
+	unsigned long addr;
+	DEFINE_WAIT(__wait);
+	struct mm_struct *mm = tcmu_dev->tsk->mm;
+
+	spin_lock(&tcmu_dev->free_page_lock);
+	for (i = 0; i < cnt; i++) {
+		while (1) {
+			prepare_to_wait(&tcmu_dev->wait_free_page, &__wait,
+				TASK_INTERRUPTIBLE);
+			index = find_first_zero_bit(tcmu_dev->free_page_bitmap,
+				max_pages);
+
+			if (index < max_pages) {
+				set_bit(index, tcmu_dev->free_page_bitmap);
+				pages[i] = (struct page *)index;
+				break;
+			}
+			spin_unlock(&tcmu_dev->free_page_lock);
+
+			ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT));
+			spin_lock(&tcmu_dev->free_page_lock);
+			if (!ret)
+				goto timedout;
+		}
+	}
+	spin_unlock(&tcmu_dev->free_page_lock);
+	finish_wait(&tcmu_dev->wait_free_page, &__wait);
+
+	start = 0;
+	last_index = ((unsigned long)pages[0]) - 1;
+	for (i = 0; i < cnt; i++) {
+		index = (unsigned long)pages[i];
+		/* Their virtual address adjacent */
+		if (index == last_index + 1)
+			continue;
+
+		addr = ((unsigned long)pages[start]) * PAGE_SIZE +
+			tcmu_dev->data_addr;
+		down_read(&mm->mmap_sem);
+		ret = get_user_pages(tcmu_dev->tsk, mm, addr, i - start,
+			1, 0, &pages[start], NULL);
+		up_read(&mm->mmap_sem);
+		if (ret < i - start)
+			goto err;
+
+		last_index = index;
+		start = i;
+	}
+
+	addr = ((unsigned long)pages[start]) * PAGE_SIZE +
+			tcmu_dev->data_addr;
+	down_read(&mm->mmap_sem);
+	ret = get_user_pages(tcmu_dev->tsk, mm, addr, i - start,
+			1, 0, &pages[start], NULL);
+	up_read(&mm->mmap_sem);
+	if (ret < i - start)
+		goto err;
+
+	return 0;
+err:
+	if (ret < 0)
+		ret = 0;
+	spin_lock(&tcmu_dev->free_page_lock);
+	for (i = 0; i < start + ret; i++) {
+		addr = tcmu_page_user_addr(tcmu_dev, pages[i]);
+		put_page(pages[i]);
+
+		addr -= tcmu_dev->data_addr;
+		addr >>= PAGE_SHIFT;
+		clear_bit(addr, tcmu_dev->free_page_bitmap);
+	}
+	for (; i < cnt; i++)
+		clear_bit((unsigned long)pages[i], tcmu_dev->free_page_bitmap);
+	spin_unlock(&tcmu_dev->free_page_lock);
+	return -ENOMEM;
+timedout:
+	while (i > 0) {
+		clear_bit((unsigned long)pages[i - 1], tcmu_dev->free_page_bitmap);
+		i--;
+	}
+	spin_unlock(&tcmu_dev->free_page_lock);
+	finish_wait(&tcmu_dev->wait_free_page, &__wait);
+	return -ENOMEM;
+}
+
+static int tcmu_alloc_sgl(struct se_device *dev, struct scatterlist **sgl,
+	unsigned int *nents, uint32_t length, bool zero_page)
+{
+	struct tcmu_dev *tcmu_dev = TCMU_DEV(dev);
+	struct scatterlist *sg;
+	struct page *page;
+	unsigned int nent;
+	int i = 0;
+	struct page **pages;
+
+	nent = DIV_ROUND_UP(length, PAGE_SIZE);
+	sg = kmalloc(sizeof(struct scatterlist) * nent, GFP_KERNEL);
+	if (!sg)
+		return -ENOMEM;
+	pages = kmalloc(sizeof(struct page *) * nent, GFP_KERNEL);
+	if (!pages) {
+		kfree(sg);
+		return -ENOMEM;
+	}
+
+	sg_init_table(sg, nent);
+
+	if (tcmu_get_user_pages(tcmu_dev, pages, nent))
+		goto out;
+
+	while (length) {
+		uint32_t page_len = min_t(uint32_t, length, PAGE_SIZE);
+		page = pages[i];
+		if (zero_page) {
+			clear_highpage(page);
+			flush_dcache_page(page);
+		}
+
+		sg_set_page(&sg[i], page, page_len, 0);
+		length -= page_len;
+		i++;
+	}
+	*sgl = sg;
+	*nents = nent;
+	kfree(pages);
+	return 0;
+
+out:
+	kfree(sg);
+	kfree(pages);
+	return -ENOMEM;
+}
+
+static void tcmu_free_sgl(struct se_device *dev, struct scatterlist *sgl, int nents)
+{
+	struct tcmu_dev *tcmu_dev = TCMU_DEV(dev);
+	struct scatterlist *sg;
+	struct page *page;
+	unsigned long addr;
+	int count;
+
+	spin_lock(&tcmu_dev->free_page_lock);
+	for_each_sg(sgl, sg, nents, count) {
+		page = sg_page(sg);
+		addr = tcmu_page_user_addr(tcmu_dev, page);
+
+		put_page(page);
+
+		addr -= tcmu_dev->data_addr;
+		addr >>= PAGE_SHIFT;
+		clear_bit(addr, tcmu_dev->free_page_bitmap);
+	}
+	spin_unlock(&tcmu_dev->free_page_lock);
+
+	wake_up(&tcmu_dev->wait_free_page);
+
+	kfree(sgl);
+}
+
+static void __tcmu_complete_cmd(struct tcmu_cmd *cmd, s16 result)
+{
+	if (!(cmd->flags & TCMU_CMD_NO_TARGET)) {
+		if (result)
+			target_complete_cmd(cmd->se_cmd, SAM_STAT_CHECK_CONDITION);
+		else
+			target_complete_cmd(cmd->se_cmd, SAM_STAT_GOOD);
+	}
+	if (!(cmd->flags & TCMU_CMD_NO_FREE))
+		kmem_cache_free(tcmu_cmd_cache, cmd);
+	else
+		complete(&cmd->completion);
+}
+
+static void tcmu_complete_cmd(struct tcmu_dev *tcmu_dev, struct tcmu_cmd *cmd)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&tcmu_dev->commands_lock, flags);
+	idr_remove(&tcmu_dev->commands, cmd->cmd_id);
+	spin_unlock_irqrestore(&tcmu_dev->commands_lock, flags);
+
+	__tcmu_complete_cmd(cmd, cmd->result);
+}
+
+static void tcmu_handle_completions(struct tcmu_dev *tcmu_dev)
+{
+	struct tcmu_mailbox *mb;
+	struct page *ring_page;
+	int offset;
+	struct tcmu_cpl_entry *ent;
+	uint64_t pos, mb_head, mb_tail;
+	struct tcmu_cmd *cmd;
+	LIST_HEAD(cpl_cmds);
+	bool cplr_full;
+
+	flush_dcache_page(tcmu_dev->mailbox_page);
+	mb = kmap_atomic(tcmu_dev->mailbox_page);
+
+	spin_lock(&tcmu_dev->cplr_lock);
+	mb_head = mb->cpl_head;
+	mb_tail = mb->cpl_tail;
+
+	if (mb_head == mb_tail ||
+	    !IN_RANGE(mb_head, tcmu_dev->cplr_addr, tcmu_dev->cplr_size) ||
+	    !IN_RANGE(mb_tail, tcmu_dev->cplr_addr, tcmu_dev->cplr_size)) {
+		spin_unlock(&tcmu_dev->cplr_lock);
+		kunmap_atomic(mb);
+		return;
+	}
+
+	pos = mb_head + sizeof(struct tcmu_cpl_entry);
+	if (pos >= tcmu_dev->cplr_addr + tcmu_dev->cplr_size)
+		pos = tcmu_dev->cplr_addr;
+	cplr_full = (pos == mb_tail);
+
+	pos = mb_tail;
+	while (pos != mb_head) {
+		char *addr;
+
+		ring_page = tcmu_dev->cplr_pages[(pos - tcmu_dev->cplr_addr) >>
+			PAGE_SHIFT];
+		flush_dcache_page(ring_page);
+		addr = kmap_atomic(ring_page);
+
+		while (pos != mb_head) {
+			unsigned long flags;
+			bool do_complete = false;
+
+			offset = pos % PAGE_SIZE;
+			ent = (struct tcmu_cpl_entry *)(addr + offset);
+
+			spin_lock_irqsave(&tcmu_dev->commands_lock, flags);
+			cmd = idr_find(&tcmu_dev->commands, ent->cmd_id);
+			if (cmd == EXPIRED_CMD) {
+				idr_remove(&tcmu_dev->commands, cmd->cmd_id);
+			} else if (cmd) {
+				cmd->flags |= TCMU_CMD_COMPLETED;
+				do_complete = true;
+			}
+			spin_unlock_irqrestore(&tcmu_dev->commands_lock, flags);
+
+			/* we don't race with expiration check */
+			if (do_complete) {
+				cmd->result = ent->result;
+				list_add_tail(&cmd->list, &cpl_cmds);
+			}
+
+			pos += sizeof(struct tcmu_cpl_entry);
+			if ((pos % PAGE_SIZE) == 0)
+				break;
+		}
+		kunmap_atomic(addr);
+		if (pos >= tcmu_dev->cplr_addr + tcmu_dev->cplr_size)
+			pos = tcmu_dev->cplr_addr;
+	}
+	mb->cpl_tail = pos;
+	spin_unlock(&tcmu_dev->cplr_lock);
+
+	kunmap_atomic(mb);
+	flush_dcache_page(tcmu_dev->mailbox_page);
+
+	if (cplr_full) {
+		smp_wmb();
+		tcmu_send_kernel_event(tcmu_dev, TCMU_EVT_KERN_CPLR_NOT_FULL);
+	}
+
+	while (!list_empty(&cpl_cmds)) {
+		cmd = container_of(cpl_cmds.next, struct tcmu_cmd, list);
+		list_del(&cmd->list);
+		tcmu_complete_cmd(tcmu_dev, cmd);
+	}
+}
+
+static void tcmu_copy_to_cmd_ring(struct tcmu_dev *tcmu_dev, uint64_t head,
+	struct page **last_page, void **last_page_addr, void *data, ssize_t len)
+{
+	struct page *this_page;
+	void *addr;
+	int offset;
+
+again:
+	offset = head % PAGE_SIZE;
+	this_page = tcmu_dev->cmdr_pages[(head - tcmu_dev->cmdr_addr) >> PAGE_SHIFT];
+	if (*last_page && *last_page == this_page)
+		addr = *last_page_addr;
+	else {
+		if (*last_page) {
+			kunmap_atomic(*last_page_addr);
+			flush_dcache_page(*last_page);
+		}
+		addr = kmap_atomic(this_page);
+		*last_page = this_page;
+		*last_page_addr = addr;
+	}
+
+	if (offset + len <= PAGE_SIZE) {
+		memcpy(addr + offset, data, len);
+		return;
+	}
+	memcpy(addr + offset, data, PAGE_SIZE - offset);
+	head += PAGE_SIZE - offset;
+	data += PAGE_SIZE - offset;
+	len -= PAGE_SIZE - offset;
+	goto again;
+}
+
+static uint64_t tcmu_dispatch_cmd(struct tcmu_dev *tcmu_dev, uint64_t head, bool pad,
+	struct tcmu_cmd *cmd)
+{
+	struct scatterlist *sg;
+	uint16_t iov_cnt;
+	struct iovec iov;
+	int i, cmd_size = 0;
+	struct page *ring_page = NULL;
+	void *ring_page_addr;
+	bool new_iov;
+
+	if (pad) {
+		uint8_t opcode = TCMU_OP_PAD;
+		while (head < tcmu_dev->cmdr_addr + tcmu_dev->cmdr_size) {
+			tcmu_copy_to_cmd_ring(tcmu_dev, head, &ring_page,
+				&ring_page_addr, &opcode, sizeof(opcode));
+			head += TCMU_OP_PAD_SIZE;
+		}
+		head = tcmu_dev->cmdr_addr;
+	}
+
+	tcmu_copy_to_cmd_ring(tcmu_dev,
+		head + offsetof(struct tcmu_cmd_entry, opcode), &ring_page,
+		&ring_page_addr, &cmd->opcode, sizeof(uint8_t));
+
+	tcmu_copy_to_cmd_ring(tcmu_dev,
+		head + offsetof(struct tcmu_cmd_entry, cmd_id), &ring_page,
+		&ring_page_addr, &cmd->cmd_id, sizeof(uint16_t));
+
+	tcmu_copy_to_cmd_ring(tcmu_dev,
+		head + offsetof(struct tcmu_cmd_entry, offset), &ring_page,
+		&ring_page_addr, &cmd->pos, sizeof(uint64_t));
+
+	if (!tcmu_opcode_has_sgl(cmd->opcode)) {
+		tcmu_copy_to_cmd_ring(tcmu_dev,
+			head + offsetof(struct tcmu_cmd_entry, size), &ring_page,
+			&ring_page_addr, &cmd->size, sizeof(uint64_t));
+		cmd_size = sizeof(struct tcmu_cmd_entry);
+		goto skip_iov;
+	}
+
+	iov_cnt = 0;
+	new_iov = true;
+	for_each_sg(cmd->sgl, sg, cmd->sgl_nents, i) {
+		uint64_t page_user_addr = tcmu_page_user_addr(tcmu_dev, sg_page(sg));
+
+		if (new_iov) {
+			iov_cnt++;
+			iov.iov_len = sg->length;
+			iov.iov_base = (void *)(page_user_addr + sg->offset);
+			new_iov = false;
+			continue;
+		}
+
+		if ((uint64_t)(iov.iov_base + iov.iov_len) ==
+		    page_user_addr + sg->offset) {
+			iov.iov_len += sg->length;
+			continue;
+		}
+
+		tcmu_copy_to_cmd_ring(tcmu_dev,
+			head + offsetof(struct tcmu_cmd_entry, iov) +
+			sizeof(struct iovec) * (iov_cnt - 1),
+			&ring_page, &ring_page_addr, &iov, sizeof(iov));
+
+		iov_cnt++;
+		iov.iov_len = sg->length;
+		iov.iov_base = (void *)(page_user_addr + sg->offset);
+	}
+
+	if (iov_cnt)
+		tcmu_copy_to_cmd_ring(tcmu_dev,
+			head + offsetof(struct tcmu_cmd_entry, iov) +
+			sizeof(struct iovec) * (iov_cnt - 1),
+			&ring_page, &ring_page_addr, &iov, sizeof(iov));
+
+	cmd_size = sizeof(struct tcmu_cmd_entry) + sizeof(struct iovec) * iov_cnt;
+
+	if (cmd->opcode == TCMU_OP_WRITE_SAME) {
+		tcmu_copy_to_cmd_ring(tcmu_dev,
+			head + offsetof(struct tcmu_cmd_entry, writesame_repeat),
+			&ring_page, &ring_page_addr, &cmd->writesame_repeat,
+			sizeof(uint64_t));
+	} else
+		tcmu_copy_to_cmd_ring(tcmu_dev,
+			head + offsetof(struct tcmu_cmd_entry, iov_cnt),
+			&ring_page, &ring_page_addr, &iov_cnt, sizeof(uint16_t));
+
+skip_iov:
+	kunmap_atomic(ring_page_addr);
+	flush_dcache_page(ring_page);
+
+	head += cmd_size;
+	if (head >= tcmu_dev->cmdr_addr + tcmu_dev->cmdr_size)
+		head = tcmu_dev->cmdr_addr;
+	return head;
+}
+
+static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd, uint8_t opcode,
+	uint64_t pos, struct scatterlist *sgl, uint32_t sgl_nents,
+	uint64_t writesame_repeat, uint64_t size, unsigned long flags)
+{
+	struct se_device *se_dev = se_cmd->se_dev;
+	struct tcmu_dev *tcmu_dev = TCMU_DEV(se_dev);
+	struct tcmu_cmd *tcmu_cmd;
+	uint16_t cmd_id;
+
+	/* pending requests will timedout, new requests fail directly */
+	if (tcmu_dev->state != TCMU_DEV_READY)
+		return NULL;
+
+	tcmu_cmd = kmem_cache_alloc(tcmu_cmd_cache, GFP_KERNEL);
+	if (!tcmu_cmd)
+		return NULL;
+
+	tcmu_cmd->se_cmd = se_cmd;
+	tcmu_cmd->tcmu_dev = tcmu_dev;
+	tcmu_cmd->opcode = opcode;
+	tcmu_cmd->pos = pos;
+	if (tcmu_opcode_has_sgl(opcode)) {
+		tcmu_cmd->sgl = sgl;
+		tcmu_cmd->sgl_nents = sgl_nents;
+		tcmu_cmd->writesame_repeat = writesame_repeat;
+	} else
+		tcmu_cmd->size = size;
+	INIT_LIST_HEAD(&tcmu_cmd->list);
+	tcmu_cmd->deadline = jiffies + msecs_to_jiffies(TCMU_TIME_OUT);
+	tcmu_cmd->flags = flags;
+	init_completion(&tcmu_cmd->completion);
+
+	idr_preload(GFP_KERNEL);
+	spin_lock_irq(&tcmu_dev->commands_lock);
+	cmd_id = idr_alloc(&tcmu_dev->commands, tcmu_cmd, 0,
+		USHRT_MAX, GFP_NOWAIT);
+	spin_unlock_irq(&tcmu_dev->commands_lock);
+	idr_preload_end();
+
+	if (cmd_id < 0) {
+		kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
+		return NULL;
+	}
+	tcmu_cmd->cmd_id = cmd_id;
+
+	return tcmu_cmd;
+}
+
+static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
+{
+	struct tcmu_dev *tcmu_dev = tcmu_cmd->tcmu_dev;
+	int command_size, pad_size;
+	struct tcmu_mailbox *mb;
+	uint64_t mb_head, mb_tail;
+	DEFINE_WAIT(__wait);
+	int ret = 0;
+
+	command_size = sizeof(struct tcmu_cmd_entry);
+	if (tcmu_opcode_has_sgl(tcmu_cmd->opcode))
+		command_size += sizeof(struct iovec) * tcmu_cmd->sgl_nents;
+
+	mb = kmap(tcmu_dev->mailbox_page);
+
+	spin_lock(&tcmu_dev->cmdr_lock);
+	while (true) {
+		prepare_to_wait(&tcmu_dev->wait_cmdr, &__wait, TASK_INTERRUPTIBLE);
+
+		flush_dcache_page(tcmu_dev->mailbox_page);
+		mb_head = mb->cmd_head;
+		mb_tail = mb->cmd_tail;
+		if (!IN_RANGE(mb_head, tcmu_dev->cmdr_addr, tcmu_dev->cmdr_size) ||
+		    !IN_RANGE(mb_tail, tcmu_dev->cmdr_addr, tcmu_dev->cmdr_size)) {
+			spin_unlock(&tcmu_dev->cmdr_lock);
+			ret = -EIO;
+			goto err_timedout;
+		}
+
+		if (mb_head - tcmu_dev->cmdr_addr + command_size >
+		    tcmu_dev->cmdr_size)
+			pad_size = tcmu_dev->cmdr_size +
+				tcmu_dev->cmdr_addr - mb_head;
+		else
+			pad_size = 0;
+		/* This will guarantee a command is in contiguous virtual address */
+		if (tcmu_dev->cmdr_size - ((mb_head - mb_tail) %
+			tcmu_dev->cmdr_size) >= command_size + pad_size)
+			break;
+		spin_unlock(&tcmu_dev->cmdr_lock);
+		ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT));
+		if (!ret) {
+			ret = -EIO;
+			goto err_timedout;
+		}
+		spin_lock(&tcmu_dev->cmdr_lock);
+	}
+	finish_wait(&tcmu_dev->wait_cmdr, &__wait);
+
+	mb->cmd_head = tcmu_dispatch_cmd(tcmu_dev, mb_head,
+		!!pad_size, tcmu_cmd);
+	spin_unlock(&tcmu_dev->cmdr_lock);
+
+	kunmap(tcmu_dev->mailbox_page);
+	flush_dcache_page(tcmu_dev->mailbox_page);
+
+	smp_wmb();
+	tcmu_send_kernel_event(tcmu_dev, TCMU_EVT_KERN_CMD_PENDING);
+	return 0;
+
+err_timedout:
+	finish_wait(&tcmu_dev->wait_cmdr, &__wait);
+	kunmap(tcmu_dev->mailbox_page);
+	return ret;
+}
+
+static int tcmu_queue_rw_cmd(struct se_cmd *se_cmd, uint64_t pos,
+	struct scatterlist *sgl, uint32_t sgl_nents,
+	uint64_t writesame_repeat, uint8_t opcode)
+{
+	struct se_device *se_dev = se_cmd->se_dev;
+	struct tcmu_dev *tcmu_dev = TCMU_DEV(se_dev);
+	struct tcmu_cmd *tcmu_cmd;
+
+	tcmu_cmd = tcmu_alloc_cmd(se_cmd, opcode, pos, sgl, sgl_nents,
+				writesame_repeat, 0, 0);
+	if (!tcmu_cmd)
+		return -ENOMEM;
+
+	if (!tcmu_queue_cmd_ring(tcmu_cmd))
+		return 0;
+
+	spin_lock_irq(&tcmu_dev->commands_lock);
+	idr_remove(&tcmu_dev->commands, tcmu_cmd->cmd_id);
+	spin_unlock_irq(&tcmu_dev->commands_lock);
+
+	kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
+	return -EIO;
+}
+
+/* return value can't be used unless TCMU_CMD_NO_FREE is set in flags */
+static struct tcmu_cmd *tcmu_queue_range_cmd(struct se_cmd *se_cmd, uint8_t opcode,
+	uint64_t pos, uint64_t size, unsigned long flags)
+{
+	struct se_device *se_dev = se_cmd->se_dev;
+	struct tcmu_dev *tcmu_dev = TCMU_DEV(se_dev);
+	struct tcmu_cmd *tcmu_cmd;
+
+	tcmu_cmd = tcmu_alloc_cmd(se_cmd, opcode, pos, NULL, 0, 0, size, flags);
+	if (!tcmu_cmd)
+		return NULL;
+
+	if (!tcmu_queue_cmd_ring(tcmu_cmd))
+		return tcmu_cmd;
+
+	spin_lock_irq(&tcmu_dev->commands_lock);
+	idr_remove(&tcmu_dev->commands, tcmu_cmd->cmd_id);
+	spin_unlock_irq(&tcmu_dev->commands_lock);
+
+	kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
+	return NULL;
+}
+
+static int tcmu_check_expired_cmd(int id, void *p, void *data)
+{
+	struct tcmu_cmd *cmd = p;
+	struct tcmu_dev *tcmu_dev = cmd->tcmu_dev;
+
+	if (cmd == EXPIRED_CMD)
+		return 0;
+
+	if (time_after(cmd->deadline, jiffies))
+		return 0;
+
+	/* race with tcmu_handle_completions */
+	if (cmd->flags & TCMU_CMD_COMPLETED)
+		return 0;
+
+	/*
+	 * We shouldn't free the id, the command can finish after expiration
+	 * check. So we could leave some ID stale, but we have errors anyway.
+	 */
+	idr_replace(&tcmu_dev->commands, EXPIRED_CMD, cmd->cmd_id);
+	__tcmu_complete_cmd(cmd, -EIO);
+	return 0;
+}
+
+static void tcmu_device_timedout(unsigned long data)
+{
+	struct tcmu_dev *tcmu_dev = (struct tcmu_dev *)data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&tcmu_dev->commands_lock, flags);
+	idr_for_each(&tcmu_dev->commands, tcmu_check_expired_cmd, NULL);
+	spin_unlock_irqrestore(&tcmu_dev->commands_lock, flags);
+
+	mod_timer(&tcmu_dev->timeout,
+		round_jiffies_up(jiffies + msecs_to_jiffies(TCMU_TIME_OUT)));
+}
+
+static int tcmu_attach_hba(struct se_hba *hba, u32 host_id)
+{
+	struct tcmu_host *tcmu_host;
+
+	tcmu_host = kzalloc(sizeof(struct tcmu_host), GFP_KERNEL);
+	if (!tcmu_host)
+		return -ENOMEM;
+
+	tcmu_host->host_id = host_id;
+
+	hba->hba_ptr = tcmu_host;
+
+	return 0;
+}
+
+static void tcmu_detach_hba(struct se_hba *hba)
+{
+	struct tcmu_host *tcmu_host = hba->hba_ptr;
+
+	kfree(tcmu_host);
+	hba->hba_ptr = NULL;
+}
+
+static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
+{
+	struct tcmu_dev *tcmu_dev;
+
+	tcmu_dev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL);
+	if (!tcmu_dev)
+		return NULL;
+
+	tcmu_dev->user_event = 0;
+	init_waitqueue_head(&tcmu_dev->wait_user_event);
+	spin_lock_init(&tcmu_dev->user_event_lock);
+
+	init_waitqueue_head(&tcmu_dev->wait_free_page);
+	spin_lock_init(&tcmu_dev->free_page_lock);
+
+	init_waitqueue_head(&tcmu_dev->wait_cmdr);
+	spin_lock_init(&tcmu_dev->cmdr_lock);
+
+	spin_lock_init(&tcmu_dev->cplr_lock);
+
+	idr_init(&tcmu_dev->commands);
+	spin_lock_init(&tcmu_dev->commands_lock);
+
+	setup_timer(&tcmu_dev->timeout, tcmu_device_timedout,
+		(unsigned long)tcmu_dev);
+
+	tcmu_dev->state = TCMU_DEV_INIT;
+
+	return &tcmu_dev->se_dev;
+}
+
+/* can only be called by user space daemon, as we need its address space */
+static int tcmu_configure_device(struct se_device *dev)
+{
+	struct tcmu_dev *tcmu_dev = TCMU_DEV(dev);
+	struct mm_struct *mm = current->mm;
+	struct vm_area_struct *vma;
+	struct tcmu_mailbox *mailbox;
+	int ret = 0;
+	int cnt;
+	int i;
+
+	if (tcmu_dev->state != TCMU_DEV_CONFIGURED)
+		return -EINVAL;
+	tcmu_dev->tsk = current;
+
+	if (!tcmu_dev->dev_size || !tcmu_dev->mailbox_addr ||
+	    !tcmu_dev->mailbox_size)
+		return -EINVAL;
+
+	if ((tcmu_dev->mailbox_addr % PAGE_SIZE) ||
+	    (tcmu_dev->mailbox_size % PAGE_SIZE))
+		return -EINVAL;
+
+	down_read(&mm->mmap_sem);
+	vma = find_vma(mm, tcmu_dev->mailbox_addr);
+	if (!vma || vma->vm_end < tcmu_dev->mailbox_size + tcmu_dev->mailbox_addr ||
+	    is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_NONLINEAR)) {
+		up_read(&mm->mmap_sem);
+		return -EINVAL;
+	}
+
+	ret = get_user_pages(tcmu_dev->tsk, mm, tcmu_dev->mailbox_addr, 1,
+			1, 0, &tcmu_dev->mailbox_page, NULL);
+	if (ret != 1) {
+		up_read(&mm->mmap_sem);
+		return -EFAULT;
+	}
+	tcmu_dev->mailbox_pgoff = tcmu_dev->mailbox_page->index <<
+		(PAGE_CACHE_SHIFT - PAGE_SHIFT);
+
+	up_read(&mm->mmap_sem);
+
+	flush_dcache_page(tcmu_dev->mailbox_page);
+	mailbox = kmap(tcmu_dev->mailbox_page);
+
+	if (mailbox->version != TCMU_MAILBOX_VERSION) {
+		ret = -EINVAL;
+		goto err_check;
+	}
+
+	if ((mailbox->cmdr_addr % PAGE_SIZE) ||
+	    (mailbox->cmdr_size % PAGE_SIZE) ||
+	    (mailbox->cplr_addr % PAGE_SIZE) ||
+	    (mailbox->cplr_size % PAGE_SIZE) ||
+	    (mailbox->data_addr % PAGE_SIZE) ||
+	    (mailbox->data_size % PAGE_SIZE) ||
+	    !mailbox->cmdr_size ||
+	    !mailbox->cplr_size ||
+	    !mailbox->data_size) {
+		ret = -EINVAL;
+		goto err_check;
+	}
+
+	if (!(mailbox->cmdr_addr >= tcmu_dev->mailbox_addr &&
+	     mailbox->cmdr_addr + mailbox->cmdr_size <=
+	      tcmu_dev->mailbox_addr + tcmu_dev->mailbox_size) ||
+	    !(mailbox->cplr_addr >= tcmu_dev->mailbox_addr &&
+	     mailbox->cplr_addr + mailbox->cplr_size <=
+	      tcmu_dev->mailbox_addr + tcmu_dev->mailbox_size) ||
+	    !(mailbox->data_addr >= tcmu_dev->mailbox_addr &&
+	     mailbox->data_addr + mailbox->data_size <=
+	      tcmu_dev->mailbox_addr + tcmu_dev->mailbox_size)) {
+		ret = -EINVAL;
+		goto err_check;
+	}
+
+	if (mailbox->cmdr_size < sizeof(struct tcmu_cmd_entry) +
+	      sizeof(struct iovec) * SCSI_MAX_SG_CHAIN_SEGMENTS ||
+	    mailbox->data_size < PAGE_SIZE * SCSI_MAX_SG_CHAIN_SEGMENTS) {
+		ret = -EINVAL;
+		goto err_check;
+	}
+
+	tcmu_dev->cmdr_addr = mailbox->cmdr_addr;
+	tcmu_dev->cmdr_size = mailbox->cmdr_size;
+	tcmu_dev->cplr_addr = mailbox->cplr_addr;
+	tcmu_dev->cplr_size = mailbox->cplr_size;
+	tcmu_dev->data_addr = mailbox->data_addr;
+	tcmu_dev->data_size = mailbox->data_size;
+
+	cnt = tcmu_dev->cmdr_size >> PAGE_SHIFT;
+	tcmu_dev->cmdr_pages = kzalloc(sizeof(struct page *) * cnt, GFP_KERNEL);
+	if (!tcmu_dev->cmdr_pages) {
+		ret = -ENOMEM;
+		goto err_check;
+	}
+	down_read(&mm->mmap_sem);
+	ret = get_user_pages(tcmu_dev->tsk, mm, tcmu_dev->cmdr_addr, cnt,
+			1, 0, tcmu_dev->cmdr_pages, NULL);
+	up_read(&mm->mmap_sem);
+	if (ret != cnt) {
+		cnt = 0;
+		if (ret > 0)
+			cnt = ret;
+		ret = -EFAULT;
+		goto err_gup_cmdr;
+	}
+
+	cnt = tcmu_dev->cplr_size >> PAGE_SHIFT;
+	tcmu_dev->cplr_pages = kzalloc(sizeof(struct page *) * cnt, GFP_KERNEL);
+	if (!tcmu_dev->cplr_pages) {
+		ret = -ENOMEM;
+		cnt = tcmu_dev->cmdr_size >> PAGE_SHIFT;
+		goto err_gup_cmdr;
+	}
+	down_read(&mm->mmap_sem);
+	ret = get_user_pages(tcmu_dev->tsk, mm, tcmu_dev->cplr_addr, cnt,
+			1, 0, tcmu_dev->cplr_pages, NULL);
+	up_read(&mm->mmap_sem);
+	if (ret != cnt) {
+		cnt = 0;
+		if (ret > 0)
+			cnt = ret;
+		ret = -EFAULT;
+		goto err_gup_cplr;
+	}
+
+	tcmu_dev->free_page_bitmap = kzalloc(sizeof(unsigned long) *
+		BITS_TO_LONGS(mailbox->data_size/PAGE_SIZE), GFP_KERNEL);
+	if (!tcmu_dev->free_page_bitmap) {
+		cnt = tcmu_dev->cplr_size >> PAGE_SHIFT;
+		ret = -ENOMEM;
+		goto err_gup_cplr;
+	}
+
+	ret = anon_inode_getfd("[tcm_user]", &tcmu_fops, tcmu_dev, O_RDWR | O_NONBLOCK);
+	if (ret < 0)
+		goto err_getfd;
+
+	mailbox->signal_fd = ret;
+	mailbox->cmd_head = tcmu_dev->cmdr_addr;
+	mailbox->cmd_tail = tcmu_dev->cmdr_addr;
+	mailbox->cpl_head = tcmu_dev->cplr_addr;
+	mailbox->cpl_tail = tcmu_dev->cplr_addr;
+
+	/* Other attributes can be configured in userspace */
+	dev->dev_attrib.hw_block_size = 512;
+	dev->dev_attrib.hw_max_sectors = 2048;
+	dev->dev_attrib.hw_queue_depth = 2048;
+
+	kunmap(tcmu_dev->mailbox_page);
+	flush_dcache_page(tcmu_dev->mailbox_page);
+
+	mod_timer(&tcmu_dev->timeout,
+		round_jiffies_up(jiffies + msecs_to_jiffies(TCMU_TIME_OUT)));
+
+	/* one for allocation, one for signal fd */
+	kref_init(&tcmu_dev->ref);
+	kref_get(&tcmu_dev->ref);
+
+	tcmu_dev->state = TCMU_DEV_READY;
+
+	tcmu_dev->dev_id = ((struct tcmu_host *)dev->se_hba->hba_ptr)->dev_id_count++;
+	return 0;
+err_getfd:
+	kfree(tcmu_dev->free_page_bitmap);
+	cnt = tcmu_dev->cplr_size >> PAGE_SHIFT;
+err_gup_cplr:
+	for (i = 0; i < cnt; i++)
+		put_page(tcmu_dev->cplr_pages[i]);
+	kfree(tcmu_dev->cplr_pages);
+	cnt = tcmu_dev->cmdr_size >> PAGE_SHIFT;
+err_gup_cmdr:
+	for (i = 0; i < cnt; i++)
+		put_page(tcmu_dev->cmdr_pages[i]);
+	kfree(tcmu_dev->cmdr_pages);
+err_check:
+	kunmap(tcmu_dev->mailbox_page);
+	put_page(tcmu_dev->mailbox_page);
+	return ret;
+}
+
+static int tcmu_check_pending_cmd(int id, void *p, void *data)
+{
+	if (p == EXPIRED_CMD)
+		return 0;
+	return -EINVAL;
+}
+
+static void tcmu_free_device(struct se_device *dev)
+{
+	int i, cnt;
+	struct tcmu_dev *tcmu_dev = TCMU_DEV(dev);
+
+	if (tcmu_dev->state == TCMU_DEV_INIT) {
+		idr_destroy(&tcmu_dev->commands);
+		kfree(tcmu_dev);
+		return;
+	}
+
+	cmpxchg(&tcmu_dev->state, TCMU_DEV_READY, TCMU_DEV_EXISTING);
+
+	del_timer_sync(&tcmu_dev->timeout);
+
+	kfree(tcmu_dev->free_page_bitmap);
+	cnt = tcmu_dev->cplr_size >> PAGE_SHIFT;
+	for (i = 0; i < cnt; i++)
+		put_page(tcmu_dev->cplr_pages[i]);
+	kfree(tcmu_dev->cplr_pages);
+	cnt = tcmu_dev->cmdr_size >> PAGE_SHIFT;
+	for (i = 0; i < cnt; i++)
+		put_page(tcmu_dev->cmdr_pages[i]);
+	kfree(tcmu_dev->cmdr_pages);
+	put_page(tcmu_dev->mailbox_page);
+
+	/* upper layer should drain all requests before calling this */
+	spin_lock_irq(&tcmu_dev->commands_lock);
+	i = idr_for_each(&tcmu_dev->commands, tcmu_check_pending_cmd, NULL);
+	idr_destroy(&tcmu_dev->commands);
+	spin_unlock_irq(&tcmu_dev->commands_lock);
+	BUG_ON(i);
+
+	kref_put(&tcmu_dev->ref, tcmu_destroy_device);
+}
+
+enum {
+	Opt_mailbox_addr, Opt_mailbox_size, Opt_dev_size, Opt_err
+};
+
+static match_table_t tokens = {
+	{Opt_mailbox_addr, "mailbox_addr=%s"},
+	{Opt_mailbox_size, "mailbox_size=%s"},
+	{Opt_dev_size, "dev_size=%s"},
+	{Opt_err, NULL}
+};
+
+static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
+		const char *page, ssize_t count)
+{
+	struct tcmu_dev *udev = TCMU_DEV(dev);
+	char *orig, *ptr, *arg_p, *opts;
+	substring_t args[MAX_OPT_ARGS];
+	int ret = 0, token;
+
+	if (udev->state != TCMU_DEV_INIT)
+		return -EINVAL;
+
+	opts = kstrdup(page, GFP_KERNEL);
+	if (!opts)
+		return -ENOMEM;
+
+	orig = opts;
+
+	while ((ptr = strsep(&opts, ",\n")) != NULL) {
+		if (!*ptr)
+			continue;
+
+		token = match_token(ptr, tokens, args);
+		switch (token) {
+		case Opt_mailbox_addr:
+			arg_p = match_strdup(&args[0]);
+			if (!arg_p) {
+				ret = -ENOMEM;
+				break;
+			}
+			ret = kstrtoull(arg_p, 0, &udev->mailbox_addr);
+			kfree(arg_p);
+			if (ret < 0)
+				goto out;
+			break;
+		case Opt_mailbox_size:
+			arg_p = match_strdup(&args[0]);
+			if (!arg_p) {
+				ret = -ENOMEM;
+				break;
+			}
+			ret = kstrtouint(arg_p, 0, &udev->mailbox_size);
+			kfree(arg_p);
+			if (ret < 0)
+				goto out;
+			break;
+		case Opt_dev_size:
+			arg_p = match_strdup(&args[0]);
+			if (!arg_p) {
+				ret = -ENOMEM;
+				break;
+			}
+			ret = kstrtoull(arg_p, 0, &udev->dev_size);
+			kfree(arg_p);
+			if (ret < 0)
+				goto out;
+			break;
+		default:
+			break;
+		}
+	}
+
+	udev->state = TCMU_DEV_CONFIGURED;
+out:
+	kfree(orig);
+	return (!ret) ? count : ret;
+}
+
+static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b)
+{
+	struct tcmu_dev *udev = TCMU_DEV(dev);
+	ssize_t bl = 0;
+
+	bl = sprintf(b + bl, "TCM USER ID: %u", udev->dev_id);
+	bl += sprintf(b + bl,
+	    "      Mailbox_addr: %llu Mailbox_size: %u Dev_size: %llu\n",
+	    udev->mailbox_addr, udev->mailbox_size, udev->dev_size);
+	return bl;
+}
+
+static sector_t tcmu_get_blocks(struct se_device *dev)
+{
+	struct tcmu_dev *udev = TCMU_DEV(dev);
+
+	return div_u64(udev->dev_size, dev->dev_attrib.block_size);
+}
+
+static sense_reason_t tcmu_execute_rw(struct se_cmd *cmd,
+	struct scatterlist *sgl, u32 sgl_nents,
+	enum dma_data_direction data_direction)
+{
+	struct se_device *se_dev = cmd->se_dev;
+	int ret;
+	uint8_t opcode;
+
+	if (data_direction == DMA_FROM_DEVICE)
+		opcode = TCMU_OP_READ;
+	else if (cmd->se_cmd_flags & SCF_FUA)
+		opcode = TCMU_OP_WRITE_FUA;
+	else
+		opcode = TCMU_OP_WRITE;
+
+	ret = tcmu_queue_rw_cmd(cmd,
+		cmd->t_task_lba * se_dev->dev_attrib.block_size,
+		sgl, sgl_nents, 0, opcode);
+
+	if (ret < 0)
+		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+	return 0;
+}
+
+static sense_reason_t tcmu_execute_sync_cache(struct se_cmd *cmd)
+{
+	struct se_device *dev = cmd->se_dev;
+	int immed = (cmd->t_task_cdb[1] & 0x2);
+	loff_t start, end;
+	int ret = 0;
+
+	/*
+	 * If the Immediate bit is set, queue up the GOOD response
+	 * for this SYNCHRONIZE_CACHE op
+	 */
+	if (immed)
+		target_complete_cmd(cmd, SAM_STAT_GOOD);
+
+	/*
+	 * Determine if we will be flushing the entire device.
+	 */
+	if (cmd->t_task_lba == 0 && cmd->data_length == 0) {
+		start = 0;
+		end = LLONG_MAX;
+	} else {
+		start = cmd->t_task_lba * dev->dev_attrib.block_size;
+		if (cmd->data_length)
+			end = start + cmd->data_length;
+		else
+			end = LLONG_MAX;
+	}
+
+	if (!tcmu_queue_range_cmd(cmd, TCMU_OP_SYNC, start, end - start,
+	     immed ? TCMU_CMD_NO_TARGET : 0))
+		ret = -EIO;
+
+	if (immed)
+		return 0;
+	if (ret)
+		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+	return 0;
+}
+
+static sense_reason_t tcmu_execute_write_same(struct se_cmd *cmd)
+{
+	struct se_device *se_dev = cmd->se_dev;
+	sector_t nolb = sbc_get_write_same_sectors(cmd);
+	loff_t pos = cmd->t_task_lba * se_dev->dev_attrib.block_size;
+	struct scatterlist *sg;
+	int ret;
+
+	if (!nolb) {
+		target_complete_cmd(cmd, SAM_STAT_GOOD);
+		return 0;
+	}
+	sg = &cmd->t_data_sg[0];
+
+	if (cmd->t_data_nents > 1 ||
+	    sg->length != cmd->se_dev->dev_attrib.block_size) {
+		return TCM_INVALID_CDB_FIELD;
+	}
+
+	ret = tcmu_queue_rw_cmd(cmd, pos, cmd->t_data_sg, cmd->t_data_nents,
+		nolb, TCMU_OP_WRITE_SAME);
+
+	if (ret < 0)
+		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+	return 0;
+}
+
+static sense_reason_t tcmu_execute_write_same_unmap(struct se_cmd *cmd)
+{
+	struct se_device *se_dev = cmd->se_dev;
+	sector_t lba = cmd->t_task_lba;
+	sector_t nolb = sbc_get_write_same_sectors(cmd);
+
+	if (!nolb) {
+		target_complete_cmd(cmd, SAM_STAT_GOOD);
+		return 0;
+	}
+
+	if (!tcmu_queue_range_cmd(cmd, TCMU_OP_UNMAP,
+	    lba * se_dev->dev_attrib.block_size,
+	    nolb * se_dev->dev_attrib.block_size, 0))
+		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+	return 0;
+}
+
+static sense_reason_t tcmu_do_unmap(struct se_cmd *cmd, void *priv,
+	sector_t lba, sector_t nolb)
+{
+	struct se_device *se_dev = cmd->se_dev;
+	struct tcmu_cmd *tcmu_cmd;
+	int16_t result;
+
+	tcmu_cmd = tcmu_queue_range_cmd(cmd, TCMU_OP_UNMAP,
+		lba * se_dev->dev_attrib.block_size,
+		nolb * se_dev->dev_attrib.block_size,
+		TCMU_CMD_NO_TARGET|TCMU_CMD_NO_FREE);
+
+	if (!tcmu_cmd)
+		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+	wait_for_completion(&tcmu_cmd->completion);
+	result = tcmu_cmd->result;
+	kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
+
+	if (result)
+		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+	return 0;
+}
+
+static sense_reason_t tcmu_execute_unmap(struct se_cmd *cmd)
+{
+	struct se_device *se_dev = cmd->se_dev;
+	struct tcmu_dev *udev = TCMU_DEV(se_dev);
+
+	return sbc_execute_unmap(cmd, tcmu_do_unmap, udev);
+}
+
+static struct sbc_ops tcmu_sbc_ops = {
+	.execute_rw = tcmu_execute_rw,
+	.execute_sync_cache = tcmu_execute_sync_cache,
+	.execute_write_same = tcmu_execute_write_same,
+	.execute_write_same_unmap = tcmu_execute_write_same_unmap,
+	.execute_unmap = tcmu_execute_unmap,
+};
+
+static sense_reason_t
+tcmu_parse_cdb(struct se_cmd *cmd)
+{
+	return sbc_parse_cdb(cmd, &tcmu_sbc_ops);
+}
+
+static struct se_subsystem_api tcmu_template = {
+	.name			= "tcmu",
+	.inquiry_prod		= "TCM_USER",
+	.inquiry_rev		= TCMU_VERSION,
+	.owner			= THIS_MODULE,
+	.transport_type		= TRANSPORT_PLUGIN_VHBA_PDEV,
+	.attach_hba		= tcmu_attach_hba,
+	.detach_hba		= tcmu_detach_hba,
+	.alloc_device		= tcmu_alloc_device,
+	.configure_device	= tcmu_configure_device,
+	.free_device		= tcmu_free_device,
+	.parse_cdb		= tcmu_parse_cdb,
+	.set_configfs_dev_params = tcmu_set_configfs_dev_params,
+	.show_configfs_dev_params = tcmu_show_configfs_dev_params,
+	.get_device_type	= sbc_get_device_type,
+	.get_blocks		= tcmu_get_blocks,
+	.alloc_sgl		= tcmu_alloc_sgl,
+	.free_sgl		= tcmu_free_sgl,
+};
+
+static int __init tcmu_module_init(void)
+{
+	int ret;
+
+	BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_PAD_SIZE) != 0);
+
+	tcmu_cmd_cache = kmem_cache_create("tcmu_cmd_cache",
+				sizeof(struct tcmu_cmd),
+				__alignof__(struct tcmu_cmd),
+				0, NULL);
+	if (!tcmu_cmd_cache)
+		return -ENOMEM;
+
+	ret = transport_subsystem_register(&tcmu_template);
+	if (ret) {
+		kmem_cache_destroy(tcmu_cmd_cache);
+		return ret;
+	}
+	return 0;
+}
+
+static void __exit tcmu_module_exit(void)
+{
+	kmem_cache_destroy(tcmu_cmd_cache);
+	transport_subsystem_release(&tcmu_template);
+}
+
+MODULE_DESCRIPTION("TCM USER subsystem plugin");
+MODULE_AUTHOR("Shaohua Li <shli@xxxxxxxxxx>");
+MODULE_LICENSE("GPL");
+
+module_init(tcmu_module_init);
+module_exit(tcmu_module_exit);
Index: linux/drivers/target/target_core_user.h
===================================================================
--- /dev/null	1970-01-01 00:00:00.000000000 +0000
+++ linux/drivers/target/target_core_user.h	2013-11-16 09:14:01.674714678 +0800
@@ -0,0 +1,86 @@
+#ifndef __TARGET_CORE_USER_H
+#define __TARGET_CORE_USER_H
+
+/* This header will be used by application too */
+
+#include <linux/types.h>
+#include <linux/uio.h>
+
+#define TCMU_VERSION "1.0"
+
+struct tcmu_cmd_entry {
+	uint8_t opcode;
+	uint8_t __pad1;
+	uint16_t cmd_id;
+	uint32_t __pad2;
+	union {
+		uint16_t iov_cnt; /* Normal read/write/write_fua */
+		uint64_t size; /* sync, unmap */
+		uint64_t writesame_repeat; /* write same. Should always have one iovec */
+	};
+	uint64_t offset;
+	struct iovec iov[0];
+} __attribute__ ((__packed__));
+
+enum tcmu_opcode {
+	TCMU_OP_PAD = 0,
+	TCMU_OP_READ,
+	TCMU_OP_WRITE,
+	TCMU_OP_WRITE_FUA,
+	TCMU_OP_SYNC,
+	TCMU_OP_UNMAP,
+	TCMU_OP_WRITE_SAME,
+};
+
+#define TCMU_OP_PAD_SIZE sizeof(uint64_t)
+
+static int tcmu_opcode_has_sgl(uint8_t opcode)
+{
+	return opcode == TCMU_OP_READ || opcode == TCMU_OP_WRITE ||
+		opcode == TCMU_OP_WRITE_FUA || opcode == TCMU_OP_WRITE_SAME;
+}
+
+struct tcmu_cpl_entry {
+	uint16_t cmd_id;
+	int16_t result;
+} __attribute__ ((__packed__));
+
+#define TCMU_MAILBOX_VERSION 1
+#define ALIGN_SIZE 128 /* Should be enough for most CPUs */
+struct tcmu_mailbox {
+	uint8_t version;
+	uint64_t cmdr_addr; /* command ring */
+	uint32_t cmdr_size;
+	uint64_t cplr_addr; /* completion ring */
+	uint32_t cplr_size;
+	uint64_t data_addr; /* data pages */
+	uint32_t data_size;
+
+	int32_t signal_fd; /* updated by kernel */
+
+	/* Below fields can be changed by userspace any time, be careful */
+	volatile uint64_t cmd_head __attribute__((__aligned__(ALIGN_SIZE))); /* updated by kernel */
+	volatile uint64_t cmd_tail; /* updated by user */
+	volatile uint64_t cpl_head __attribute__((__aligned__(ALIGN_SIZE))); /* updated by user */
+	volatile uint64_t cpl_tail; /* updated by kernel */
+
+	/* aligned(PAGE_SIZE) */
+	/* command ring virtual address space */
+	/* aligned(PAGE_SIZE) */
+	/* completion ring virtual address space */
+	/* aligned(PAGE_SIZE) */
+	/* data virtual address space */
+} __attribute__ ((__packed__));
+
+enum tcmu_event {
+	/* kernel to user event */
+	TCMU_EVT_KERN_CMD_PENDING = (1 << 0),
+	TCMU_EVT_KERN_CPLR_NOT_FULL = (1 << 1),
+	/* user to kernel event */
+	TCMU_EVT_USER_CMDR_NOT_FULL = (1 << 2),
+	TCMU_EVT_USER_CPL_PENDING = (1 << 3),
+};
+
+#define IN_RANGE(pos, start, size) (pos >= start && pos < start + size)
+
+#endif

--
To unsubscribe from this list: send the line "unsubscribe target-devel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html




[Index of Archives]     [Linux SCSI]     [Kernel Newbies]     [Linux SCSI Target Infrastructure]     [Share Photos]     [IDE]     [Security]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux ATA RAID]     [Linux IIO]     [Device Mapper]

  Powered by Linux