From: Lu Baolu <baolu.lu@xxxxxxxxxxxxxxx> The pasid_mutex was used to protect the paths of set/remove_dev_pasid(). It's duplicate with iommu_sva_lock. Remove it to avoid duplicate code. Reviewed-by: Jacob Pan <jacob.jun.pan@xxxxxxxxxxxxxxx> Signed-off-by: Lu Baolu <baolu.lu@xxxxxxxxxxxxxxx> --- drivers/iommu/intel/svm.c | 45 +++++---------------------------------- 1 file changed, 5 insertions(+), 40 deletions(-) diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c index e95b339e9cdc..2a82864e9d57 100644 --- a/drivers/iommu/intel/svm.c +++ b/drivers/iommu/intel/svm.c @@ -259,8 +259,6 @@ static const struct mmu_notifier_ops intel_mmuops = { .invalidate_range = intel_invalidate_range, }; -static DEFINE_MUTEX(pasid_mutex); - static int pasid_to_svm_sdev(struct device *dev, unsigned int pasid, struct intel_svm **rsvm, struct intel_svm_dev **rsdev) @@ -268,10 +266,6 @@ static int pasid_to_svm_sdev(struct device *dev, unsigned int pasid, struct intel_svm_dev *sdev = NULL; struct intel_svm *svm; - /* The caller should hold the pasid_mutex lock */ - if (WARN_ON(!mutex_is_locked(&pasid_mutex))) - return -EINVAL; - if (pasid == IOMMU_PASID_INVALID || pasid >= PASID_MAX) return -EINVAL; @@ -371,22 +365,19 @@ static int intel_svm_bind_mm(struct intel_iommu *iommu, struct device *dev, return ret; } -/* Caller must hold pasid_mutex */ -static int intel_svm_unbind_mm(struct device *dev, u32 pasid) +void intel_svm_remove_dev_pasid(struct device *dev, u32 pasid) { struct intel_svm_dev *sdev; struct intel_iommu *iommu; struct intel_svm *svm; struct mm_struct *mm; - int ret = -EINVAL; iommu = device_to_iommu(dev, NULL, NULL); if (!iommu) - goto out; + return; - ret = pasid_to_svm_sdev(dev, pasid, &svm, &sdev); - if (ret) - goto out; + if (pasid_to_svm_sdev(dev, pasid, &svm, &sdev)) + return; mm = svm->mm; if (sdev) { @@ -418,8 +409,6 @@ static int intel_svm_unbind_mm(struct device *dev, u32 pasid) kfree(svm); } } -out: - return ret; } /* Page request queue descriptor */ @@ -520,19 +509,7 @@ static void intel_svm_drain_prq(struct device *dev, u32 pasid) goto prq_retry; } - /* - * A work in IO page fault workqueue may try to lock pasid_mutex now. - * Holding pasid_mutex while waiting in iopf_queue_flush_dev() for - * all works in the workqueue to finish may cause deadlock. - * - * It's unnecessary to hold pasid_mutex in iopf_queue_flush_dev(). - * Unlock it to allow the works to be handled while waiting for - * them to finish. - */ - lockdep_assert_held(&pasid_mutex); - mutex_unlock(&pasid_mutex); iopf_queue_flush_dev(dev); - mutex_lock(&pasid_mutex); /* * Perform steps described in VT-d spec CH7.10 to drain page @@ -827,26 +804,14 @@ int intel_svm_page_response(struct device *dev, return ret; } -void intel_svm_remove_dev_pasid(struct device *dev, ioasid_t pasid) -{ - mutex_lock(&pasid_mutex); - intel_svm_unbind_mm(dev, pasid); - mutex_unlock(&pasid_mutex); -} - static int intel_svm_set_dev_pasid(struct iommu_domain *domain, struct device *dev, ioasid_t pasid) { struct device_domain_info *info = dev_iommu_priv_get(dev); struct intel_iommu *iommu = info->iommu; struct mm_struct *mm = domain->mm; - int ret; - mutex_lock(&pasid_mutex); - ret = intel_svm_bind_mm(iommu, dev, mm); - mutex_unlock(&pasid_mutex); - - return ret; + return intel_svm_bind_mm(iommu, dev, mm); } static void intel_svm_domain_free(struct iommu_domain *domain) -- 2.25.1