Re: [PATCH 3/3] vfio-ccw: add handling for asnyc channel instructions

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 





On 11/22/2018 11:54 AM, Cornelia Huck wrote:
Add a region to the vfio-ccw device that can be used to submit
asynchronous I/O instructions. ssch continues to be handled by the
existing I/O region; the new region handles hsch and csch.

Interrupt status continues to be reported through the same channels
as for ssch.

Signed-off-by: Cornelia Huck <cohuck@xxxxxxxxxx>
---
  drivers/s390/cio/Makefile           |   3 +-
  drivers/s390/cio/vfio_ccw_async.c   |  88 ++++++++++++++++
  drivers/s390/cio/vfio_ccw_drv.c     |  48 ++++++---
  drivers/s390/cio/vfio_ccw_fsm.c     | 158 +++++++++++++++++++++++++++-
  drivers/s390/cio/vfio_ccw_ops.c     |  13 ++-
  drivers/s390/cio/vfio_ccw_private.h |   6 ++
  include/uapi/linux/vfio.h           |   4 +
  include/uapi/linux/vfio_ccw.h       |  12 +++
  8 files changed, 313 insertions(+), 19 deletions(-)
  create mode 100644 drivers/s390/cio/vfio_ccw_async.c

diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile
index f230516abb96..f6a8db04177c 100644
--- a/drivers/s390/cio/Makefile
+++ b/drivers/s390/cio/Makefile
@@ -20,5 +20,6 @@ obj-$(CONFIG_CCWGROUP) += ccwgroup.o
  qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_setup.o
  obj-$(CONFIG_QDIO) += qdio.o
-vfio_ccw-objs += vfio_ccw_drv.o vfio_ccw_cp.o vfio_ccw_ops.o vfio_ccw_fsm.o
+vfio_ccw-objs += vfio_ccw_drv.o vfio_ccw_cp.o vfio_ccw_ops.o vfio_ccw_fsm.o \
+	vfio_ccw_async.o
  obj-$(CONFIG_VFIO_CCW) += vfio_ccw.o
diff --git a/drivers/s390/cio/vfio_ccw_async.c b/drivers/s390/cio/vfio_ccw_async.c
new file mode 100644
index 000000000000..8c7f51d17d70
--- /dev/null
+++ b/drivers/s390/cio/vfio_ccw_async.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Async I/O region for vfio_ccw
+ *
+ * Copyright Red Hat, Inc. 2018
+ *
+ * Author(s): Cornelia Huck <cohuck@xxxxxxxxxx>
+ */
+
+#include <linux/vfio.h>
+#include <linux/mdev.h>
+
+#include "vfio_ccw_private.h"
+
+static size_t vfio_ccw_async_region_read(struct vfio_ccw_private *private,
+					 char __user *buf, size_t count,
+					 loff_t *ppos)
+{
+	unsigned int i = VFIO_CCW_OFFSET_TO_INDEX(*ppos) - VFIO_CCW_NUM_REGIONS;
+	loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
+	struct ccw_cmd_region *region;
+
+	if (pos + count > sizeof(*region))
+		return -EINVAL;
+
+	region = private->region[i].data;
+	if (copy_to_user(buf, (void *)region + pos, count))
+		return -EFAULT;
+
+	return count;
+
+}
+
+static size_t vfio_ccw_async_region_write(struct vfio_ccw_private *private,
+					  const char __user *buf, size_t count,
+					  loff_t *ppos)
+{
+	unsigned int i = VFIO_CCW_OFFSET_TO_INDEX(*ppos) - VFIO_CCW_NUM_REGIONS;
+	loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
+	struct ccw_cmd_region *region;
+
+	if (pos + count > sizeof(*region))
+		return -EINVAL;
+
+	if (private->state == VFIO_CCW_STATE_NOT_OPER ||
+	    private->state == VFIO_CCW_STATE_STANDBY)
+		return -EACCES;
+
+	region = private->region[i].data;
+	if (copy_from_user((void *)region + pos, buf, count))
+		return -EFAULT;
+
+	switch (region->command) {
+	case VFIO_CCW_ASYNC_CMD_HSCH:
+		vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_HALT_REQ);
+		break;
+	case VFIO_CCW_ASYNC_CMD_CSCH:
+		vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_CLEAR_REQ);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return region->ret_code ? region->ret_code : count;
+}
+
+static void vfio_ccw_async_region_release(struct vfio_ccw_private *private,
+					  struct vfio_ccw_region *region)
+{
+
+}
+
+const struct vfio_ccw_regops vfio_ccw_async_region_ops = {
+	.read = vfio_ccw_async_region_read,
+	.write = vfio_ccw_async_region_write,
+	.release = vfio_ccw_async_region_release,
+};
+
+int vfio_ccw_register_async_dev_regions(struct vfio_ccw_private *private)
+{
+	return vfio_ccw_register_dev_region(private,
+					    VFIO_REGION_SUBTYPE_CCW_ASYNC_CMD,
+					    &vfio_ccw_async_region_ops,
+					    sizeof(struct ccw_cmd_region),
+					    VFIO_REGION_INFO_FLAG_READ |
+					    VFIO_REGION_INFO_FLAG_WRITE,
+					    private->cmd_region);
+}
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index a10cec0e86eb..890c588a3a61 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -3,9 +3,11 @@
   * VFIO based Physical Subchannel device driver
   *
   * Copyright IBM Corp. 2017
+ * Copyright Red Hat, Inc. 2018
   *
   * Author(s): Dong Jia Shi <bjsdjshi@xxxxxxxxxxxxxxxxxx>
   *            Xiao Feng Ren <renxiaof@xxxxxxxxxxxxxxxxxx>
+ *            Cornelia Huck <cohuck@xxxxxxxxxx>
   */
#include <linux/module.h>
@@ -23,6 +25,7 @@
struct workqueue_struct *vfio_ccw_work_q;
  static struct kmem_cache *vfio_ccw_io_region;
+static struct kmem_cache *vfio_ccw_cmd_region;
/*
   * Helpers
@@ -76,7 +79,8 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
  	private = container_of(work, struct vfio_ccw_private, io_work);
  	irb = &private->irb;
- if (scsw_is_solicited(&irb->scsw)) {
+	if (scsw_is_solicited(&irb->scsw) &&
+	    (scsw_fctl(&irb->scsw) & SCSW_FCTL_START_FUNC)) {
  		cp_update_scsw(&private->cp, &irb->scsw);
  		cp_free(&private->cp);
  	}

I am a little confused about this. Why do we need to update the scsw.cpa if we have the start function function control bit set? Is it an optimization?

The Linux CIO code does accumulate the scsw.cpa and is not dependent on start function control bit, or did I miss something?

Maybe we could update condition in the if statement like Linux CIO layer does here https://elixir.bootlin.com/linux/latest/source/drivers/s390/cio/device_status.c#L265?

@@ -104,7 +108,7 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
  {
  	struct pmcw *pmcw = &sch->schib.pmcw;
  	struct vfio_ccw_private *private;
-	int ret;
+	int ret = -ENOMEM;
if (pmcw->qf) {
  		dev_warn(&sch->dev, "vfio: ccw: does not support QDIO: %s\n",
@@ -118,10 +122,13 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
private->io_region = kmem_cache_zalloc(vfio_ccw_io_region,
  					       GFP_KERNEL | GFP_DMA);
-	if (!private->io_region) {
-		kfree(private);
-		return -ENOMEM;
-	}
+	if (!private->io_region)
+		goto out_free;
+
+	private->cmd_region = kmem_cache_zalloc(vfio_ccw_cmd_region,
+						GFP_KERNEL | GFP_DMA);
+	if (!private->cmd_region)
+		goto out_free;
private->sch = sch;
  	dev_set_drvdata(&sch->dev, private);
@@ -148,7 +155,10 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
  	cio_disable_subchannel(sch);
  out_free:
  	dev_set_drvdata(&sch->dev, NULL);
-	kmem_cache_free(vfio_ccw_io_region, private->io_region);
+	if (private->cmd_region)
+		kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region);
+	if (private->io_region)
+		kmem_cache_free(vfio_ccw_io_region, private->io_region);
  	kfree(private);
  	return ret;
  }
@@ -237,7 +247,7 @@ static struct css_driver vfio_ccw_sch_driver = {
static int __init vfio_ccw_sch_init(void)
  {
-	int ret;
+	int ret = -ENOMEM;
vfio_ccw_work_q = create_singlethread_workqueue("vfio-ccw");
  	if (!vfio_ccw_work_q)
@@ -247,20 +257,30 @@ static int __init vfio_ccw_sch_init(void)
  					sizeof(struct ccw_io_region), 0,
  					SLAB_ACCOUNT, 0,
  					sizeof(struct ccw_io_region), NULL);
-	if (!vfio_ccw_io_region) {
-		destroy_workqueue(vfio_ccw_work_q);
-		return -ENOMEM;
-	}
+	if (!vfio_ccw_io_region)
+		goto out_err;
+
+	vfio_ccw_cmd_region = kmem_cache_create_usercopy("vfio_ccw_cmd_region",
+					sizeof(struct ccw_cmd_region), 0,
+					SLAB_ACCOUNT, 0,
+					sizeof(struct ccw_cmd_region), NULL);
+	if (!vfio_ccw_cmd_region)
+		goto out_err;
isc_register(VFIO_CCW_ISC);
  	ret = css_driver_register(&vfio_ccw_sch_driver);
  	if (ret) {
  		isc_unregister(VFIO_CCW_ISC);
-		kmem_cache_destroy(vfio_ccw_io_region);
-		destroy_workqueue(vfio_ccw_work_q);
+		goto out_err;
  	}
return ret;
+
+out_err:
+	kmem_cache_destroy(vfio_ccw_cmd_region);
+	kmem_cache_destroy(vfio_ccw_io_region);
+	destroy_workqueue(vfio_ccw_work_q);
+	return ret;
  }
static void __exit vfio_ccw_sch_exit(void)
diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c
index f94aa01f9c36..0caf77e8f377 100644
--- a/drivers/s390/cio/vfio_ccw_fsm.c
+++ b/drivers/s390/cio/vfio_ccw_fsm.c
@@ -3,8 +3,10 @@
   * Finite state machine for vfio-ccw device handling
   *
   * Copyright IBM Corp. 2017
+ * Copyright Red Hat, Inc. 2018
   *
   * Author(s): Dong Jia Shi <bjsdjshi@xxxxxxxxxxxxxxxxxx>
+ *            Cornelia Huck <cohuck@xxxxxxxxxx>
   */
#include <linux/vfio.h>
@@ -68,6 +70,81 @@ static int fsm_io_helper(struct vfio_ccw_private *private)
  	return ret;
  }
+static int fsm_do_halt(struct vfio_ccw_private *private)
+{
+	struct subchannel *sch;
+	unsigned long flags;
+	int ccode;
+	int ret;
+
+	sch = private->sch;
+
+	spin_lock_irqsave(sch->lock, flags);
+	private->state = VFIO_CCW_STATE_BUSY;
+
+	/* Issue "Halt Subchannel" */
+	ccode = hsch(sch->schid);
+
+	switch (ccode) {
+	case 0:
+		/*
+		 * Initialize device status information
+		 */
+		sch->schib.scsw.cmd.actl |= SCSW_ACTL_HALT_PEND;
+		ret = 0;
+		break;
+	case 1:		/* Status pending */
+	case 2:		/* Busy */
+		ret = -EBUSY;
+		break;
+	case 3:		/* Device not operational */
+	{
+		ret = -ENODEV;
+		break;
+	}
+	default:
+		ret = ccode;
+	}
+	spin_unlock_irqrestore(sch->lock, flags);
+	return ret;
+}
+
+static int fsm_do_clear(struct vfio_ccw_private *private)
+{
+	struct subchannel *sch;
+	unsigned long flags;
+	int ccode;
+	int ret;
+
+	sch = private->sch;
+
+	spin_lock_irqsave(sch->lock, flags);
+	private->state = VFIO_CCW_STATE_BUSY;
+
+	/* Issue "Clear Subchannel" */
+	ccode = csch(sch->schid);
+
+	switch (ccode) {
+	case 0:
+		/*
+		 * Initialize device status information
+		 */
+		sch->schib.scsw.cmd.actl = SCSW_ACTL_CLEAR_PEND;
+		/* TODO: check what else we might need to clear */
+		ret = 0;
+		break;
+	case 3:		/* Device not operational */
+	{
+		ret = -ENODEV;
+		break;
+	}
+	default:
+		ret = ccode;
+	}
+	spin_unlock_irqrestore(sch->lock, flags);
+	return ret;
+}
+
  static void fsm_notoper(struct vfio_ccw_private *private,
  			enum vfio_ccw_event event)
  {
@@ -102,6 +179,20 @@ static void fsm_io_busy(struct vfio_ccw_private *private,
  	private->io_region->ret_code = -EBUSY;
  }
+static void fsm_async_error(struct vfio_ccw_private *private,
+			    enum vfio_ccw_event event)
+{
+	pr_err("vfio-ccw: FSM: halt/clear request from state:%d\n",
+	       private->state);
+	private->cmd_region->ret_code = -EIO;
+}
+
+static void fsm_async_busy(struct vfio_ccw_private *private,
+			   enum vfio_ccw_event event)
+{
+	private->cmd_region->ret_code = -EBUSY;
+}
+
  static void fsm_disabled_irq(struct vfio_ccw_private *private,
  			     enum vfio_ccw_event event)
  {
@@ -166,11 +257,11 @@ static void fsm_io_request(struct vfio_ccw_private *private,
  		}
  		return;
  	} else if (scsw->cmd.fctl & SCSW_FCTL_HALT_FUNC) {
-		/* XXX: Handle halt. */
+		/* halt is handled via the async cmd region */
  		io_region->ret_code = -EOPNOTSUPP;
  		goto err_out;
  	} else if (scsw->cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
-		/* XXX: Handle clear. */
+		/* clear is handled via the async cmd region */
  		io_region->ret_code = -EOPNOTSUPP;
  		goto err_out;
  	}
@@ -181,6 +272,59 @@ static void fsm_io_request(struct vfio_ccw_private *private,
  			       io_region->ret_code, errstr);
  }
+/*
+ * Deal with a halt request from userspace.
+ */
+static void fsm_halt_request(struct vfio_ccw_private *private,
+			     enum vfio_ccw_event event)
+{
+	struct ccw_cmd_region *cmd_region = private->cmd_region;
+	int state = private->state;
+
+	private->state = VFIO_CCW_STATE_BOXED;
+
+	if (cmd_region->command != VFIO_CCW_ASYNC_CMD_HSCH) {
+		/* should not happen? */
+		cmd_region->ret_code = -EINVAL;
+		goto err_out;
+	}
+
+	cmd_region->ret_code = fsm_do_halt(private);
+	if (cmd_region->ret_code)
+		goto err_out;
+
+	return;
+
+err_out:
+	private->state = state;
+}
+
+/*
+ * Deal with a clear request from userspace.
+ */
+static void fsm_clear_request(struct vfio_ccw_private *private,
+			      enum vfio_ccw_event event)
+{
+	struct ccw_cmd_region *cmd_region = private->cmd_region;
+	int state = private->state;
+
+	private->state = VFIO_CCW_STATE_BOXED;
+
+	if (cmd_region->command != VFIO_CCW_ASYNC_CMD_CSCH) {
+		/* should not happen? */
+		cmd_region->ret_code = -EINVAL;
+		goto err_out;
+	}
+
+	cmd_region->ret_code = fsm_do_clear(private);
+	if (cmd_region->ret_code)
+		goto err_out;
+
+	return;
+
+err_out:
+	private->state = state;
+}
  /*
   * Got an interrupt for a normal io (state busy).
   */
@@ -204,26 +348,36 @@ fsm_func_t *vfio_ccw_jumptable[NR_VFIO_CCW_STATES][NR_VFIO_CCW_EVENTS] = {
  	[VFIO_CCW_STATE_NOT_OPER] = {
  		[VFIO_CCW_EVENT_NOT_OPER]	= fsm_nop,
  		[VFIO_CCW_EVENT_IO_REQ]		= fsm_io_error,
+		[VFIO_CCW_EVENT_HALT_REQ]	= fsm_async_error,
+		[VFIO_CCW_EVENT_CLEAR_REQ]	= fsm_async_error,
  		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_disabled_irq,
  	},
  	[VFIO_CCW_STATE_STANDBY] = {
  		[VFIO_CCW_EVENT_NOT_OPER]	= fsm_notoper,
  		[VFIO_CCW_EVENT_IO_REQ]		= fsm_io_error,
+		[VFIO_CCW_EVENT_HALT_REQ]	= fsm_async_error,
+		[VFIO_CCW_EVENT_CLEAR_REQ]	= fsm_async_error,
  		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_irq,
  	},
  	[VFIO_CCW_STATE_IDLE] = {
  		[VFIO_CCW_EVENT_NOT_OPER]	= fsm_notoper,
  		[VFIO_CCW_EVENT_IO_REQ]		= fsm_io_request,
+		[VFIO_CCW_EVENT_HALT_REQ]	= fsm_halt_request,
+		[VFIO_CCW_EVENT_CLEAR_REQ]	= fsm_clear_request,
  		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_irq,
  	},
  	[VFIO_CCW_STATE_BOXED] = {
  		[VFIO_CCW_EVENT_NOT_OPER]	= fsm_notoper,
  		[VFIO_CCW_EVENT_IO_REQ]		= fsm_io_busy,
+		[VFIO_CCW_EVENT_HALT_REQ]	= fsm_async_busy,
+		[VFIO_CCW_EVENT_CLEAR_REQ]	= fsm_async_busy,
  		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_irq,
  	},
  	[VFIO_CCW_STATE_BUSY] = {
  		[VFIO_CCW_EVENT_NOT_OPER]	= fsm_notoper,
  		[VFIO_CCW_EVENT_IO_REQ]		= fsm_io_busy,
+		[VFIO_CCW_EVENT_HALT_REQ]	= fsm_halt_request,
+		[VFIO_CCW_EVENT_CLEAR_REQ]	= fsm_clear_request,
  		[VFIO_CCW_EVENT_INTERRUPT]	= fsm_irq,
  	},
  };
diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c
index a5d731ed2a39..0e1f7f7bf927 100644
--- a/drivers/s390/cio/vfio_ccw_ops.c
+++ b/drivers/s390/cio/vfio_ccw_ops.c
@@ -148,11 +148,20 @@ static int vfio_ccw_mdev_open(struct mdev_device *mdev)
  	struct vfio_ccw_private *private =
  		dev_get_drvdata(mdev_parent_dev(mdev));
  	unsigned long events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
+	int ret;
private->nb.notifier_call = vfio_ccw_mdev_notifier; - return vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
-				      &events, &private->nb);
+	ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
+				     &events, &private->nb);
+	if (ret)
+		return ret;
+
+	ret = vfio_ccw_register_async_dev_regions(private);
+	if (ret)
+		vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
+					 &private->nb);
+	return ret;
  }
static void vfio_ccw_mdev_release(struct mdev_device *mdev)
diff --git a/drivers/s390/cio/vfio_ccw_private.h b/drivers/s390/cio/vfio_ccw_private.h
index a6f9f84526e2..1a41a14831ae 100644
--- a/drivers/s390/cio/vfio_ccw_private.h
+++ b/drivers/s390/cio/vfio_ccw_private.h
@@ -53,6 +53,8 @@ int vfio_ccw_register_dev_region(struct vfio_ccw_private *private,
  				 const struct vfio_ccw_regops *ops,
  				 size_t size, u32 flags, void *data);
+int vfio_ccw_register_async_dev_regions(struct vfio_ccw_private *private);
+
  /**
   * struct vfio_ccw_private
   * @sch: pointer to the subchannel
@@ -62,6 +64,7 @@ int vfio_ccw_register_dev_region(struct vfio_ccw_private *private,
   * @mdev: pointer to the mediated device
   * @nb: notifier for vfio events
   * @io_region: MMIO region to input/output I/O arguments/results
+ * @cmd_region: MMIO region for asynchronous I/O commands other than START
   * @region: additional regions for other subchannel operations
   * @num_regions: number of additional regions
   * @cp: channel program for the current I/O operation
@@ -79,6 +82,7 @@ struct vfio_ccw_private {
  	struct notifier_block	nb;
  	struct ccw_io_region	*io_region;
  	struct vfio_ccw_region *region;
+	struct ccw_cmd_region	*cmd_region;
  	int num_regions;
struct channel_program cp;
@@ -114,6 +118,8 @@ enum vfio_ccw_event {
  	VFIO_CCW_EVENT_NOT_OPER,
  	VFIO_CCW_EVENT_IO_REQ,
  	VFIO_CCW_EVENT_INTERRUPT,
+	VFIO_CCW_EVENT_HALT_REQ,
+	VFIO_CCW_EVENT_CLEAR_REQ,
  	/* last element! */
  	NR_VFIO_CCW_EVENTS
  };
diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
index 565669f95534..c01472ec77ea 100644
--- a/include/uapi/linux/vfio.h
+++ b/include/uapi/linux/vfio.h
@@ -304,6 +304,7 @@ struct vfio_region_info_cap_type {
  #define VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG	(2)
  #define VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG	(3)
+
  #define VFIO_REGION_TYPE_GFX                    (1)
  #define VFIO_REGION_SUBTYPE_GFX_EDID            (1)
@@ -354,6 +355,9 @@ struct vfio_region_gfx_edid {
  #define VFIO_DEVICE_GFX_LINK_STATE_DOWN  2
  };
+/* ccw sub-types */
+#define VFIO_REGION_SUBTYPE_CCW_ASYNC_CMD	(1)
+
  /*
   * The MSIX mappable capability informs that MSIX data of a BAR can be mmapped
   * which allows direct access to non-MSIX registers which happened to be within
diff --git a/include/uapi/linux/vfio_ccw.h b/include/uapi/linux/vfio_ccw.h
index 2ec5f367ff78..cbecbf0cd54f 100644
--- a/include/uapi/linux/vfio_ccw.h
+++ b/include/uapi/linux/vfio_ccw.h
@@ -12,6 +12,7 @@
#include <linux/types.h> +/* used for START SUBCHANNEL, always present */
  struct ccw_io_region {
  #define ORB_AREA_SIZE 12
  	__u8	orb_area[ORB_AREA_SIZE];
@@ -22,4 +23,15 @@ struct ccw_io_region {
  	__u32	ret_code;
  } __packed;
+/*
+ * used for processing commands that trigger asynchronous actions
+ * Note: this is controlled by a capability
+ */
+#define VFIO_CCW_ASYNC_CMD_HSCH (1 << 0)
+#define VFIO_CCW_ASYNC_CMD_CSCH (1 << 1)
+struct ccw_cmd_region {
+	__u32 command;
+	__u32 ret_code;
+} __packed;
+
  #endif





[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux