The driver now supports domain_alloc_paging, ensuring that a valid device pointer is provided whenever a paging domain is allocated. Additionally, the dmar_domain attributes are set up at the time of allocation. Consistent with the established semantics in the IOMMU core, if a domain is attached to a device and found to be incompatible with the IOMMU hardware capabilities, the operation will return an -EINVAL error. This implicitly advises the caller to allocate a new domain for the device and attempt the domain attachment again. Signed-off-by: Lu Baolu <baolu.lu@xxxxxxxxxxxxxxx> --- drivers/iommu/intel/iommu.c | 39 ++++++++++++++++++------------------- drivers/iommu/intel/pasid.c | 28 +------------------------- 2 files changed, 20 insertions(+), 47 deletions(-) diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 693a6d7c79ed..e9393f5c2c50 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -3826,27 +3826,26 @@ int prepare_domain_attach_device(struct iommu_domain *domain, if (domain->dirty_ops && !ssads_supported(iommu)) return -EINVAL; - /* check if this iommu agaw is sufficient for max mapped address */ - addr_width = agaw_to_width(iommu->agaw); - if (addr_width > cap_mgaw(iommu->cap)) - addr_width = cap_mgaw(iommu->cap); - - if (dmar_domain->max_addr > (1LL << addr_width)) + if (dmar_domain->iommu_coherency != + iommu_paging_structure_coherency(iommu)) return -EINVAL; - dmar_domain->gaw = addr_width; - - /* - * Knock out extra levels of page tables if necessary - */ - while (iommu->agaw < dmar_domain->agaw) { - struct dma_pte *pte; - - pte = dmar_domain->pgd; - if (dma_pte_present(pte)) { - dmar_domain->pgd = phys_to_virt(dma_pte_addr(pte)); - iommu_free_page(pte); - } - dmar_domain->agaw--; + + if (domain->type & __IOMMU_DOMAIN_PAGING) { + if (dmar_domain->iommu_superpage != + iommu_superpage_capability(iommu, dmar_domain->use_first_level)) + return -EINVAL; + + if (dmar_domain->use_first_level && + (!sm_supported(iommu) || !ecap_flts(iommu->ecap))) + return -EINVAL; + + /* check if this iommu agaw is sufficient for max mapped address */ + addr_width = agaw_to_width(iommu->agaw); + if (addr_width > cap_mgaw(iommu->cap)) + addr_width = cap_mgaw(iommu->cap); + + if (dmar_domain->gaw > addr_width || dmar_domain->agaw > iommu->agaw) + return -EINVAL; } if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev) && diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c index abce19e2ad6f..573e1b8e3cfb 100644 --- a/drivers/iommu/intel/pasid.c +++ b/drivers/iommu/intel/pasid.c @@ -345,25 +345,6 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu, return 0; } -/* - * Skip top levels of page tables for iommu which has less agaw - * than default. Unnecessary for PT mode. - */ -static int iommu_skip_agaw(struct dmar_domain *domain, - struct intel_iommu *iommu, - struct dma_pte **pgd) -{ - int agaw; - - for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) { - *pgd = phys_to_virt(dma_pte_addr(*pgd)); - if (!dma_pte_present(*pgd)) - return -EINVAL; - } - - return agaw; -} - /* * Set up the scalable mode pasid entry for second only translation type. */ @@ -374,7 +355,6 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu, struct pasid_entry *pte; struct dma_pte *pgd; u64 pgd_val; - int agaw; u16 did; /* @@ -388,12 +368,6 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu, } pgd = domain->pgd; - agaw = iommu_skip_agaw(domain, iommu, &pgd); - if (agaw < 0) { - dev_err(dev, "Invalid domain page table\n"); - return -EINVAL; - } - pgd_val = virt_to_phys(pgd); did = domain_id_iommu(domain, iommu); @@ -412,7 +386,7 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu, pasid_clear_entry(pte); pasid_set_domain_id(pte, did); pasid_set_slptr(pte, pgd_val); - pasid_set_address_width(pte, agaw); + pasid_set_address_width(pte, domain->agaw); pasid_set_translation_type(pte, PASID_ENTRY_PGTT_SL_ONLY); pasid_set_fault_enable(pte); pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap)); -- 2.34.1