Hi Alex, On 6/4/19 12:31 AM, Alex Williamson wrote: > On Sun, 26 May 2019 18:10:01 +0200 > Eric Auger <eric.auger@xxxxxxxxxx> wrote: > >> This patch registers a fault handler which records faults in >> a circular buffer and then signals an eventfd. This buffer is >> exposed within the fault region. >> >> Signed-off-by: Eric Auger <eric.auger@xxxxxxxxxx> >> >> --- >> >> v3 -> v4: >> - move iommu_unregister_device_fault_handler to vfio_pci_release >> --- >> drivers/vfio/pci/vfio_pci.c | 49 +++++++++++++++++++++++++++++ >> drivers/vfio/pci/vfio_pci_private.h | 1 + >> 2 files changed, 50 insertions(+) >> >> diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c >> index f75f61127277..520999994ba8 100644 >> --- a/drivers/vfio/pci/vfio_pci.c >> +++ b/drivers/vfio/pci/vfio_pci.c >> @@ -30,6 +30,7 @@ >> #include <linux/vfio.h> >> #include <linux/vgaarb.h> >> #include <linux/nospec.h> >> +#include <linux/circ_buf.h> >> >> #include "vfio_pci_private.h" >> >> @@ -296,6 +297,46 @@ static const struct vfio_pci_regops vfio_pci_fault_prod_regops = { >> .add_capability = vfio_pci_fault_prod_add_capability, >> }; >> >> +int vfio_pci_iommu_dev_fault_handler(struct iommu_fault_event *evt, void *data) >> +{ >> + struct vfio_pci_device *vdev = (struct vfio_pci_device *) data; >> + struct vfio_region_fault_prod *prod_region = >> + (struct vfio_region_fault_prod *)vdev->fault_pages; >> + struct vfio_region_fault_cons *cons_region = >> + (struct vfio_region_fault_cons *)(vdev->fault_pages + 2 * PAGE_SIZE); >> + struct iommu_fault *new = >> + (struct iommu_fault *)(vdev->fault_pages + prod_region->offset + >> + prod_region->prod * prod_region->entry_size); >> + int prod, cons, size; >> + >> + mutex_lock(&vdev->fault_queue_lock); >> + >> + if (!vdev->fault_abi) >> + goto unlock; >> + >> + prod = prod_region->prod; >> + cons = cons_region->cons; >> + size = prod_region->nb_entries; >> + >> + if (CIRC_SPACE(prod, cons, size) < 1) >> + goto unlock; >> + >> + *new = evt->fault; >> + prod = (prod + 1) % size; >> + prod_region->prod = prod; >> + mutex_unlock(&vdev->fault_queue_lock); >> + >> + mutex_lock(&vdev->igate); >> + if (vdev->dma_fault_trigger) >> + eventfd_signal(vdev->dma_fault_trigger, 1); >> + mutex_unlock(&vdev->igate); >> + return 0; >> + >> +unlock: >> + mutex_unlock(&vdev->fault_queue_lock); >> + return -EINVAL; >> +} >> + >> static int vfio_pci_init_fault_region(struct vfio_pci_device *vdev) >> { >> struct vfio_region_fault_prod *header; >> @@ -328,6 +369,13 @@ static int vfio_pci_init_fault_region(struct vfio_pci_device *vdev) >> header = (struct vfio_region_fault_prod *)vdev->fault_pages; >> header->version = -1; >> header->offset = PAGE_SIZE; >> + >> + ret = iommu_register_device_fault_handler(&vdev->pdev->dev, >> + vfio_pci_iommu_dev_fault_handler, >> + vdev); >> + if (ret) >> + goto out; >> + >> return 0; >> out: >> kfree(vdev->fault_pages); >> @@ -570,6 +618,7 @@ static void vfio_pci_release(void *device_data) >> if (!(--vdev->refcnt)) { >> vfio_spapr_pci_eeh_release(vdev->pdev); >> vfio_pci_disable(vdev); >> + iommu_unregister_device_fault_handler(&vdev->pdev->dev); > > > But this can fail if there are pending faults which leaves a device > reference and then the system is broken :( This series only features unrecoverable errors and for those the unregistration cannot fail. Now unrecoverable errors were added I admit this is confusing. We need to sort this out or clean the dependencies. Thanks Eric > >> } >> >> mutex_unlock(&vdev->reflck->lock); >> diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h >> index 8e0a55682d3f..a9276926f008 100644 >> --- a/drivers/vfio/pci/vfio_pci_private.h >> +++ b/drivers/vfio/pci/vfio_pci_private.h >> @@ -122,6 +122,7 @@ struct vfio_pci_device { >> int ioeventfds_nr; >> struct eventfd_ctx *err_trigger; >> struct eventfd_ctx *req_trigger; >> + struct eventfd_ctx *dma_fault_trigger; >> struct mutex fault_queue_lock; >> int fault_abi; >> struct list_head dummy_resources_list; > _______________________________________________ kvmarm mailing list kvmarm@xxxxxxxxxxxxxxxxxxxxx https://lists.cs.columbia.edu/mailman/listinfo/kvmarm