As the iommu_report_device_fault() has been converted to auto-respond a page fault if it fails to enqueue it, there's no need to return a code in any case. Make it return void. Suggested-by: Jason Gunthorpe <jgg@xxxxxxxxxx> Signed-off-by: Lu Baolu <baolu.lu@xxxxxxxxxxxxxxx> --- include/linux/iommu.h | 5 ++--- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 4 ++-- drivers/iommu/intel/svm.c | 19 ++++++---------- drivers/iommu/io-pgfault.c | 25 +++++++-------------- 4 files changed, 19 insertions(+), 34 deletions(-) diff --git a/include/linux/iommu.h b/include/linux/iommu.h index ceadbeaffd98..6ab90cb93da9 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -1545,7 +1545,7 @@ struct iopf_queue *iopf_queue_alloc(const char *name); void iopf_queue_free(struct iopf_queue *queue); int iopf_queue_discard_partial(struct iopf_queue *queue); void iopf_free_group(struct iopf_group *group); -int iommu_report_device_fault(struct device *dev, struct iopf_fault *evt); +void iommu_report_device_fault(struct device *dev, struct iopf_fault *evt); void iopf_group_response(struct iopf_group *group, enum iommu_page_response_code status); #else @@ -1583,10 +1583,9 @@ static inline void iopf_free_group(struct iopf_group *group) { } -static inline int +static inline void iommu_report_device_fault(struct device *dev, struct iopf_fault *evt) { - return -ENODEV; } static inline void iopf_group_response(struct iopf_group *group, diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 42eb59cb99f4..02580364acda 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -1455,7 +1455,7 @@ arm_smmu_find_master(struct arm_smmu_device *smmu, u32 sid) /* IRQ and event handlers */ static int arm_smmu_handle_evt(struct arm_smmu_device *smmu, u64 *evt) { - int ret; + int ret = 0; u32 perm = 0; struct arm_smmu_master *master; bool ssid_valid = evt[0] & EVTQ_0_SSV; @@ -1511,7 +1511,7 @@ static int arm_smmu_handle_evt(struct arm_smmu_device *smmu, u64 *evt) goto out_unlock; } - ret = iommu_report_device_fault(master->dev, &fault_evt); + iommu_report_device_fault(master->dev, &fault_evt); out_unlock: mutex_unlock(&smmu->streams_mutex); return ret; diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c index 2f8716636dbb..b644d57da841 100644 --- a/drivers/iommu/intel/svm.c +++ b/drivers/iommu/intel/svm.c @@ -561,14 +561,11 @@ static int prq_to_iommu_prot(struct page_req_dsc *req) return prot; } -static int intel_svm_prq_report(struct intel_iommu *iommu, struct device *dev, - struct page_req_dsc *desc) +static void intel_svm_prq_report(struct intel_iommu *iommu, struct device *dev, + struct page_req_dsc *desc) { struct iopf_fault event = { }; - if (!dev || !dev_is_pci(dev)) - return -ENODEV; - /* Fill in event data for device specific processing */ event.fault.type = IOMMU_FAULT_PAGE_REQ; event.fault.prm.addr = (u64)desc->addr << VTD_PAGE_SHIFT; @@ -601,7 +598,7 @@ static int intel_svm_prq_report(struct intel_iommu *iommu, struct device *dev, event.fault.prm.private_data[0] = ktime_to_ns(ktime_get()); } - return iommu_report_device_fault(dev, &event); + iommu_report_device_fault(dev, &event); } static void handle_bad_prq_event(struct intel_iommu *iommu, @@ -704,12 +701,10 @@ static irqreturn_t prq_event_thread(int irq, void *d) if (!pdev) goto bad_req; - if (intel_svm_prq_report(iommu, &pdev->dev, req)) - handle_bad_prq_event(iommu, req, QI_RESP_INVALID); - else - trace_prq_report(iommu, &pdev->dev, req->qw_0, req->qw_1, - req->priv_data[0], req->priv_data[1], - iommu->prq_seq_number++); + intel_svm_prq_report(iommu, &pdev->dev, req); + trace_prq_report(iommu, &pdev->dev, req->qw_0, req->qw_1, + req->priv_data[0], req->priv_data[1], + iommu->prq_seq_number++); pci_dev_put(pdev); prq_advance: head = (head + sizeof(*req)) & PRQ_RING_MASK; diff --git a/drivers/iommu/io-pgfault.c b/drivers/iommu/io-pgfault.c index e932ab5e8dc9..f70a09149b95 100644 --- a/drivers/iommu/io-pgfault.c +++ b/drivers/iommu/io-pgfault.c @@ -176,26 +176,22 @@ static struct iopf_group *iopf_group_alloc(struct iommu_fault_param *iopf_param, * freed after the device has stopped generating page faults (or the iommu * hardware has been set to block the page faults) and the pending page faults * have been flushed. - * - * Return: 0 on success and <0 on error. */ -int iommu_report_device_fault(struct device *dev, struct iopf_fault *evt) +void iommu_report_device_fault(struct device *dev, struct iopf_fault *evt) { struct iommu_fault *fault = &evt->fault; struct iommu_fault_param *iopf_param; struct iopf_group abort_group = {}; struct iopf_group *group; - int ret; iopf_param = iopf_get_dev_fault_param(dev); if (WARN_ON(!iopf_param)) - return -ENODEV; + return; if (!(fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) { - ret = report_partial_fault(iopf_param, fault); + report_partial_fault(iopf_param, fault); iopf_put_dev_fault_param(iopf_param); /* A request that is not the last does not need to be ack'd */ - return ret; } /* @@ -207,25 +203,21 @@ int iommu_report_device_fault(struct device *dev, struct iopf_fault *evt) * leaving, otherwise partial faults will be stuck. */ group = iopf_group_alloc(iopf_param, evt, &abort_group); - if (group == &abort_group) { - ret = -ENOMEM; + if (group == &abort_group) goto err_abort; - } group->domain = get_domain_for_iopf(dev, fault); - if (!group->domain) { - ret = -EINVAL; + if (!group->domain) goto err_abort; - } /* * On success iopf_handler must call iopf_group_response() and * iopf_free_group() */ - ret = group->domain->iopf_handler(group); - if (ret) + if (group->domain->iopf_handler(group)) goto err_abort; - return 0; + + return; err_abort: iopf_group_response(group, IOMMU_PAGE_RESP_FAILURE); @@ -233,7 +225,6 @@ int iommu_report_device_fault(struct device *dev, struct iopf_fault *evt) __iopf_free_group(group); else iopf_free_group(group); - return ret; } EXPORT_SYMBOL_GPL(iommu_report_device_fault); -- 2.34.1