On Mon, Nov 14, 2022 at 05:50:50AM +0000, Tian, Kevin wrote: > > From: Jason Gunthorpe <jgg@xxxxxxxxxx> > > Sent: Tuesday, November 8, 2022 8:49 AM > > > > @@ -171,7 +183,7 @@ static struct iopt_area > > *iopt_pages_find_domain_area(struct iopt_pages *pages, > > */ > > struct pfn_batch { > > unsigned long *pfns; > > - u16 *npfns; > > + u32 *npfns; > > why not making it u32 and removing later FIXME directly in patch7? Rebase error, I fixed it > > static bool batch_add_pfn(struct pfn_batch *batch, unsigned long pfn) > > { > > - /* FIXME: U16 is too small */ > > + const unsigned int MAX_NPFNS = type_max(typeof(*batch->npfns)); > > use lowercase i.e. max_npfns. It is customary to use caps for constants, eg enum values and things > > +static void __iopt_area_unfill_domain(struct iopt_area *area, > > + struct iopt_pages *pages, > > + struct iommu_domain *domain, > > + unsigned long last_index) > > +{ > > + struct interval_tree_double_span_iter span; > > + unsigned long start_index = iopt_area_index(area); > > + unsigned long unmapped_end_index = start_index; > > + u64 backup[BATCH_BACKUP_SIZE]; > > + struct pfn_batch batch; > > + > > + lockdep_assert_held(&pages->mutex); > > + > > + batch_init_backup(&batch, last_index + 1, backup, sizeof(backup)); > > + interval_tree_for_each_double_span(&span, &pages- > > >domains_itree, > > + &pages->access_itree, start_index, > > + last_index) { > > + if (span.is_used) { > > + batch_skip_carry(&batch, > > + span.last_used - span.start_used + 1); > > + continue; > > + } > > + iopt_area_unpin_domain(&batch, area, pages, domain, > > + span.start_hole, span.last_hole, > > + &unmapped_end_index, last_index); > > + } > > + if (unmapped_end_index != last_index + 1) > > + iopt_area_unmap_domain_range(area, domain, > > unmapped_end_index, > > + last_index); > > a comment marking that it's for the last trailing used span of which > the pages are not contiguous to previous span. /* * If the range ends in a access then we do the residual unmap without * any unpins. */ > btw it is not easy to understand how this func plus unpin_domain() > actually work. more comments are welcomed to help readability. /* * For security we must not unpin something that is still DMA mapped, * so this must unmap any IOVA before we go ahead and unpin the pages. * This creates a complexity where we need to skip over unpinning pages * held in the xarray, but continue to unmap from the domain. * * The domain unmap cannot stop in the middle of a contiguous range of * PFNs. To solve this problem the unpinning step will read ahead to the * end of any contiguous span, unmap that whole span, and then only * unpin the leading part that does not have any accesses. The residual * PFNs that were unmapped but not unpinned are called a "carry" in the * batch as they are moved to the front of the PFN list and continue on * to the next iteration(s). */ > > +/* > > + * This can do everything and is fully coherent with what a iommu_domain > > would > > + * see. > > + */ > > +static int iopt_pages_rw_slow(struct iopt_pages *pages, > > Can you elaborate what guarantees coherency in this function and how it > becomes different in other rw variations? /* * This uses the pfn_reader instead of taking a shortcut by using the mm. It can * do every scenario and is fully consistent with what an iommu_domain would * see. */ > > + * iopt_pages_remove_access() - Release an in-kernel access for PFNs > > + * @area: The source of PFNs > > + * @start_index: First page index > > + * @last_index: Inclusive last page index > > + * > > + * Undo iopt_pages_add_access() and unpin the pages if necessary. The > > caller > > + * must stop using the PFNs before calling this. > > + */ > > +void iopt_pages_remove_access(struct iopt_area *area, unsigned long > > start_index, > > + unsigned long last_index) > > this is called iopt_pages_xxx() but the first parameter is iopt_area. > > also it's not balanced with iopt_pages_add_access() which requires the > caller to hold pages->mutex and populate area->num_accesses. OK, see below Thanks, Jason --- a/drivers/iommu/iommufd/device.c +++ b/drivers/iommu/iommufd/device.c @@ -565,7 +565,7 @@ void iommufd_access_unpin_pages(struct iommufd_access *access, down_read(&iopt->iova_rwsem); iopt_for_each_contig_area(&iter, area, iopt, iova, last_iova) - iopt_pages_remove_access( + iopt_area_remove_access( area, iopt_area_iova_to_index(area, iter.cur_iova), iopt_area_iova_to_index( area, @@ -650,15 +650,10 @@ int iommufd_access_pin_pages(struct iommufd_access *access, unsigned long iova, goto err_remove; } - mutex_lock(&area->pages->mutex); - rc = iopt_pages_add_access(area->pages, index, last_index, - out_pages, flags); - if (rc) { - mutex_unlock(&area->pages->mutex); + rc = iopt_pages_add_access(area, index, last_index, out_pages, + flags); + if (rc) goto err_remove; - } - area->num_accesses++; - mutex_unlock(&area->pages->mutex); out_pages += last_index - index + 1; } if (!iopt_area_contig_done(&iter)) { @@ -673,7 +668,7 @@ int iommufd_access_pin_pages(struct iommufd_access *access, unsigned long iova, if (iova < iter.cur_iova) { last_iova = iter.cur_iova - 1; iopt_for_each_contig_area(&iter, area, iopt, iova, last_iova) - iopt_pages_remove_access( + iopt_area_remove_access( area, iopt_area_iova_to_index(area, iter.cur_iova), iopt_area_iova_to_index( diff --git a/drivers/iommu/iommufd/io_pagetable.h b/drivers/iommu/iommufd/io_pagetable.h index 3b85fa344f6be3..68bc3957534dd7 100644 --- a/drivers/iommu/iommufd/io_pagetable.h +++ b/drivers/iommu/iommufd/io_pagetable.h @@ -221,10 +221,10 @@ int iopt_pages_fill_xarray(struct iopt_pages *pages, unsigned long start, void iopt_pages_unfill_xarray(struct iopt_pages *pages, unsigned long start, unsigned long last); -int iopt_pages_add_access(struct iopt_pages *pages, unsigned long start, - unsigned long last, struct page **out_pages, - unsigned int flags); -void iopt_pages_remove_access(struct iopt_area *area, unsigned long start, +int iopt_area_add_access(struct iopt_area *area, unsigned long start, + unsigned long last, struct page **out_pages, + unsigned int flags); +void iopt_area_remove_access(struct iopt_area *area, unsigned long start, unsigned long last); int iopt_pages_rw_access(struct iopt_pages *pages, unsigned long start_byte, void *data, unsigned long length, unsigned int flags); diff --git a/drivers/iommu/iommufd/pages.c b/drivers/iommu/iommufd/pages.c index 01d2447eac4ede..e5f267d9e2b491 100644 --- a/drivers/iommu/iommufd/pages.c +++ b/drivers/iommu/iommufd/pages.c @@ -1807,8 +1826,8 @@ iopt_pages_get_exact_access(struct iopt_pages *pages, unsigned long index, } /** - * iopt_pages_add_access() - Record an in-knerel access for PFNs - * @pages: The source of PFNs + * iopt_area_add_access() - Record an in-knerel access for PFNs + * @area: The source of PFNs * @start_index: First page index * @last_index: Inclusive last page index * @out_pages: Output list of struct page's representing the PFNs @@ -1819,40 +1838,49 @@ iopt_pages_get_exact_access(struct iopt_pages *pages, unsigned long index, * * This should be undone through a matching call to iopt_pages_remove_access() */ -int iopt_pages_add_access(struct iopt_pages *pages, unsigned long start_index, +int iopt_area_add_access(struct iopt_area *area, unsigned long start_index, unsigned long last_index, struct page **out_pages, unsigned int flags) { + struct iopt_pages *pages = area->pages; struct iopt_pages_access *access; int rc; if ((flags & IOMMUFD_ACCESS_RW_WRITE) && !pages->writable) return -EPERM; + mutex_lock(&pages->mutex); access = iopt_pages_get_exact_access(pages, start_index, last_index); if (access) { refcount_inc(&access->refcount); iopt_pages_fill_from_xarray(pages, start_index, last_index, out_pages); + mutex_unlock(&pages->mutex); return 0; } access = kzalloc(sizeof(*access), GFP_KERNEL_ACCOUNT); - if (!access) - return -ENOMEM; + if (!access) { + rc = -ENOMEM; + goto err_unlock; + } rc = iopt_pages_fill_xarray(pages, start_index, last_index, out_pages); if (rc) - goto out_free; + goto err_free; access->node.start = start_index; access->node.last = last_index; refcount_set(&access->refcount, 1); + area->num_accesses++; interval_tree_insert(&access->node, &pages->access_itree); + mutex_unlock(&pages->mutex); return 0; -out_free: +err_free: kfree(access); +err_unlock: + mutex_unlock(&pages->mutex); return rc; } @@ -1865,11 +1893,11 @@ int iopt_pages_add_access(struct iopt_pages *pages, unsigned long start_index, * Undo iopt_pages_add_access() and unpin the pages if necessary. The caller * must stop using the PFNs before calling this. */ -void iopt_pages_remove_access(struct iopt_area *area, unsigned long start_index, - unsigned long last_index) +void iopt_area_remove_access(struct iopt_area *area, unsigned long start_index, + unsigned long last_index) { - struct iopt_pages_access *access; struct iopt_pages *pages = area->pages; + struct iopt_pages_access *access; mutex_lock(&pages->mutex); access = iopt_pages_get_exact_access(pages, start_index, last_index);