This patch adds the VFIO_IOMMU_SET_MSI_BINDING ioctl which aim to (un)register the guest MSI binding to the host. This latter then can use those stage 1 bindings to build a nested stage binding targeting the physical MSIs. Signed-off-by: Eric Auger <eric.auger@xxxxxxxxxx> --- v8 -> v9: - merge VFIO_IOMMU_BIND_MSI/VFIO_IOMMU_UNBIND_MSI into a single VFIO_IOMMU_SET_MSI_BINDING ioctl - ioctl id changed v6 -> v7: - removed the dev arg v3 -> v4: - add UNBIND - unwind on BIND error v2 -> v3: - adapt to new proto of bind_guest_msi - directly use vfio_iommu_for_each_dev v1 -> v2: - s/vfio_iommu_type1_guest_msi_binding/vfio_iommu_type1_bind_guest_msi --- drivers/vfio/vfio_iommu_type1.c | 55 +++++++++++++++++++++++++++++++++ include/uapi/linux/vfio.h | 20 ++++++++++++ 2 files changed, 75 insertions(+) diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 307f059d3080..c858be878590 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -1829,6 +1829,42 @@ static int vfio_cache_inv_fn(struct device *dev, void *data) return iommu_cache_invalidate(dc->domain, dev, &ustruct->info); } +static int +vfio_bind_msi(struct vfio_iommu *iommu, + dma_addr_t giova, phys_addr_t gpa, size_t size) +{ + struct vfio_domain *d; + int ret = 0; + + mutex_lock(&iommu->lock); + + list_for_each_entry(d, &iommu->domain_list, next) { + ret = iommu_bind_guest_msi(d->domain, giova, gpa, size); + if (ret) + goto unwind; + } + goto unlock; +unwind: + list_for_each_entry_continue_reverse(d, &iommu->domain_list, next) { + iommu_unbind_guest_msi(d->domain, giova); + } +unlock: + mutex_unlock(&iommu->lock); + return ret; +} + +static void +vfio_unbind_msi(struct vfio_iommu *iommu, dma_addr_t giova) +{ + struct vfio_domain *d; + + mutex_lock(&iommu->lock); + list_for_each_entry(d, &iommu->domain_list, next) { + iommu_unbind_guest_msi(d->domain, giova); + } + mutex_unlock(&iommu->lock); +} + static long vfio_iommu_type1_ioctl(void *iommu_data, unsigned int cmd, unsigned long arg) { @@ -1936,6 +1972,25 @@ static long vfio_iommu_type1_ioctl(void *iommu_data, &ustruct); mutex_unlock(&iommu->lock); return ret; + } else if (cmd == VFIO_IOMMU_SET_MSI_BINDING) { + struct vfio_iommu_type1_set_msi_binding ustruct; + + minsz = offsetofend(struct vfio_iommu_type1_set_msi_binding, + size); + + if (copy_from_user(&ustruct, (void __user *)arg, minsz)) + return -EFAULT; + + if (ustruct.argsz < minsz) + return -EINVAL; + + if (ustruct.flags == VFIO_IOMMU_UNBIND_MSI) + vfio_unbind_msi(iommu, ustruct.iova); + else if (ustruct.flags == VFIO_IOMMU_BIND_MSI) + return vfio_bind_msi(iommu, ustruct.iova, ustruct.gpa, + ustruct.size); + else + return -EINVAL; } return -ENOTTY; diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h index b31c25b682c5..deadbd84f2cf 100644 --- a/include/uapi/linux/vfio.h +++ b/include/uapi/linux/vfio.h @@ -795,6 +795,26 @@ struct vfio_iommu_type1_cache_invalidate { }; #define VFIO_IOMMU_CACHE_INVALIDATE _IO(VFIO_TYPE, VFIO_BASE + 23) +/** + * VFIO_IOMMU_SET_MSI_BINDING - _IOWR(VFIO_TYPE, VFIO_BASE + 24, + * struct vfio_iommu_type1_set_msi_binding) + * + * Pass a stage 1 MSI doorbell mapping to the host so that this + * latter can build a nested stage2 mapping. Or conversely tear + * down a previously bound stage 1 MSI binding. + */ +struct vfio_iommu_type1_set_msi_binding { + __u32 argsz; + __u32 flags; +#define VFIO_IOMMU_BIND_MSI (1 << 0) +#define VFIO_IOMMU_UNBIND_MSI (1 << 1) + __u64 iova; /* MSI guest IOVA */ + /* Fields below are used on BIND */ + __u64 gpa; /* MSI guest physical address */ + __u64 size; /* size of stage1 mapping (bytes) */ +}; +#define VFIO_IOMMU_SET_MSI_BINDING _IO(VFIO_TYPE, VFIO_BASE + 24) + /* -------- Additional API for SPAPR TCE (Server POWERPC) IOMMU -------- */ /* -- 2.20.1