This is a note to let you know that I've just added the patch titled iommu/virtio: Detach domain on endpoint release to the 6.4-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary The filename of the patch is: iommu-virtio-detach-domain-on-endpoint-release.patch and it can be found in the queue-6.4 subdirectory. If you, or anyone else, feels it should not be added to the stable tree, please let <stable@xxxxxxxxxxxxxxx> know about it. commit ed0cc99a82518444e4579c309fb7d1e887d66c1d Author: Jean-Philippe Brucker <jean-philippe@xxxxxxxxxx> Date: Mon May 15 12:39:48 2023 +0100 iommu/virtio: Detach domain on endpoint release [ Upstream commit 809d0810e3520da669d231303608cdf5fe5c1a70 ] When an endpoint is released, for example a PCIe VF being destroyed or a function hot-unplugged, it should be detached from its domain. Send a DETACH request. Fixes: edcd69ab9a32 ("iommu: Add virtio-iommu driver") Reported-by: Akihiko Odaki <akihiko.odaki@xxxxxxxxxx> Link: https://lore.kernel.org/all/15bf1b00-3aa0-973a-3a86-3fa5c4d41d2c@xxxxxxxxxx/ Signed-off-by: Jean-Philippe Brucker <jean-philippe@xxxxxxxxxx> Tested-by: Akihiko Odaki <akihiko.odaki@xxxxxxxxxx> Link: https://lore.kernel.org/r/20230515113946.1017624-2-jean-philippe@xxxxxxxxxx Signed-off-by: Joerg Roedel <jroedel@xxxxxxx> Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx> diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c index 5b8fe9bfa9a5b..fd316a37d7562 100644 --- a/drivers/iommu/virtio-iommu.c +++ b/drivers/iommu/virtio-iommu.c @@ -788,6 +788,29 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev) return 0; } +static void viommu_detach_dev(struct viommu_endpoint *vdev) +{ + int i; + struct virtio_iommu_req_detach req; + struct viommu_domain *vdomain = vdev->vdomain; + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(vdev->dev); + + if (!vdomain) + return; + + req = (struct virtio_iommu_req_detach) { + .head.type = VIRTIO_IOMMU_T_DETACH, + .domain = cpu_to_le32(vdomain->id), + }; + + for (i = 0; i < fwspec->num_ids; i++) { + req.endpoint = cpu_to_le32(fwspec->ids[i]); + WARN_ON(viommu_send_req_sync(vdev->viommu, &req, sizeof(req))); + } + vdomain->nr_endpoints--; + vdev->vdomain = NULL; +} + static int viommu_map_pages(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t pgsize, size_t pgcount, int prot, gfp_t gfp, size_t *mapped) @@ -990,6 +1013,7 @@ static void viommu_release_device(struct device *dev) { struct viommu_endpoint *vdev = dev_iommu_priv_get(dev); + viommu_detach_dev(vdev); iommu_put_resv_regions(dev, &vdev->resv_regions); kfree(vdev); }