From: "Liu, Yi L" <yi.l.liu@xxxxxxxxxxxxxxx> When the guest "owns" the stage 1 translation structures, the host IOMMU driver has no knowledge of caching structure updates unless the guest invalidation requests are trapped and passed down to the host. This patch adds the VFIO_IOMMU_CACHE_INVALIDATE ioctl with aims at propagating guest stage1 IOMMU cache invalidations to the host. Signed-off-by: Liu, Yi L <yi.l.liu@xxxxxxxxxxxxxxx> Signed-off-by: Eric Auger <eric.auger@xxxxxxxxxx> --- v2 -> v3: - introduce vfio_iommu_for_each_dev back in this patch v1 -> v2: - s/TLB/CACHE - remove vfio_iommu_task usage - commit message rewording --- drivers/vfio/vfio_iommu_type1.c | 47 +++++++++++++++++++++++++++++++++ include/uapi/linux/vfio.h | 13 +++++++++ 2 files changed, 60 insertions(+) diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 222e9199edbf..12a40b9db6aa 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -113,6 +113,26 @@ struct vfio_regions { #define IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu) \ (!list_empty(&iommu->domain_list)) +/* iommu->lock must be held */ +static int +vfio_iommu_for_each_dev(struct vfio_iommu *iommu, void *data, + int (*fn)(struct device *, void *)) +{ + struct vfio_domain *d; + struct vfio_group *g; + int ret = 0; + + list_for_each_entry(d, &iommu->domain_list, next) { + list_for_each_entry(g, &d->group_list, next) { + ret = iommu_group_for_each_dev(g->iommu_group, + data, fn); + if (ret) + break; + } + } + return ret; +} + static int put_pfn(unsigned long pfn, int prot); /* @@ -1681,6 +1701,15 @@ vfio_attach_pasid_table(struct vfio_iommu *iommu, return ret; } +static int vfio_cache_inv_fn(struct device *dev, void *data) +{ + struct vfio_iommu_type1_cache_invalidate *ustruct = + (struct vfio_iommu_type1_cache_invalidate *)data; + struct iommu_domain *d = iommu_get_domain_for_dev(dev); + + return iommu_cache_invalidate(d, dev, &ustruct->info); +} + static long vfio_iommu_type1_ioctl(void *iommu_data, unsigned int cmd, unsigned long arg) { @@ -1767,6 +1796,24 @@ static long vfio_iommu_type1_ioctl(void *iommu_data, } else if (cmd == VFIO_IOMMU_DETACH_PASID_TABLE) { vfio_detach_pasid_table(iommu); return 0; + } else if (cmd == VFIO_IOMMU_CACHE_INVALIDATE) { + struct vfio_iommu_type1_cache_invalidate ustruct; + int ret; + + minsz = offsetofend(struct vfio_iommu_type1_cache_invalidate, + info); + + if (copy_from_user(&ustruct, (void __user *)arg, minsz)) + return -EFAULT; + + if (ustruct.argsz < minsz || ustruct.flags) + return -EINVAL; + + mutex_lock(&iommu->lock); + ret = vfio_iommu_for_each_dev(iommu, &ustruct, + vfio_cache_inv_fn); + mutex_unlock(&iommu->lock); + return ret; } return -ENOTTY; diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h index 329d378565d9..29f0ef2d805d 100644 --- a/include/uapi/linux/vfio.h +++ b/include/uapi/linux/vfio.h @@ -776,6 +776,19 @@ struct vfio_iommu_type1_attach_pasid_table { #define VFIO_IOMMU_ATTACH_PASID_TABLE _IO(VFIO_TYPE, VFIO_BASE + 22) #define VFIO_IOMMU_DETACH_PASID_TABLE _IO(VFIO_TYPE, VFIO_BASE + 23) +/** + * VFIO_IOMMU_CACHE_INVALIDATE - _IOWR(VFIO_TYPE, VFIO_BASE + 24, + * struct vfio_iommu_type1_cache_invalidate) + * + * Propagate guest IOMMU cache invalidation to the host. + */ +struct vfio_iommu_type1_cache_invalidate { + __u32 argsz; + __u32 flags; + struct iommu_cache_invalidate_info info; +}; +#define VFIO_IOMMU_CACHE_INVALIDATE _IO(VFIO_TYPE, VFIO_BASE + 24) + /* -------- Additional API for SPAPR TCE (Server POWERPC) IOMMU -------- */ /* -- 2.20.1