Change the internal freelist to use struct iommu_pages_list. AMD uses the freelist to batch free the entire table during domain destruction, and to replace table levels with leafs during map. Signed-off-by: Jason Gunthorpe <jgg@xxxxxxxxxx> --- drivers/iommu/amd/io_pgtable.c | 26 ++++++++++---------------- 1 file changed, 10 insertions(+), 16 deletions(-) diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c index f3399087859fd1..e375ddefdb2cde 100644 --- a/drivers/iommu/amd/io_pgtable.c +++ b/drivers/iommu/amd/io_pgtable.c @@ -54,14 +54,7 @@ static u64 *first_pte_l7(u64 *pte, unsigned long *page_size, * ****************************************************************************/ -static void free_pt_page(u64 *pt, struct list_head *freelist) -{ - struct page *p = virt_to_page(pt); - - list_add_tail(&p->lru, freelist); -} - -static void free_pt_lvl(u64 *pt, struct list_head *freelist, int lvl) +static void free_pt_lvl(u64 *pt, struct iommu_pages_list *freelist, int lvl) { u64 *p; int i; @@ -84,20 +77,20 @@ static void free_pt_lvl(u64 *pt, struct list_head *freelist, int lvl) if (lvl > 2) free_pt_lvl(p, freelist, lvl - 1); else - free_pt_page(p, freelist); + iommu_pages_list_add(freelist, p); } - free_pt_page(pt, freelist); + iommu_pages_list_add(freelist, pt); } -static void free_sub_pt(u64 *root, int mode, struct list_head *freelist) +static void free_sub_pt(u64 *root, int mode, struct iommu_pages_list *freelist) { switch (mode) { case PAGE_MODE_NONE: case PAGE_MODE_7_LEVEL: break; case PAGE_MODE_1_LEVEL: - free_pt_page(root, freelist); + iommu_pages_list_add(freelist, root); break; case PAGE_MODE_2_LEVEL: case PAGE_MODE_3_LEVEL: @@ -306,7 +299,8 @@ static u64 *fetch_pte(struct amd_io_pgtable *pgtable, return pte; } -static void free_clear_pte(u64 *pte, u64 pteval, struct list_head *freelist) +static void free_clear_pte(u64 *pte, u64 pteval, + struct iommu_pages_list *freelist) { u64 *pt; int mode; @@ -335,7 +329,7 @@ static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova, int prot, gfp_t gfp, size_t *mapped) { struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops); - LIST_HEAD(freelist); + struct iommu_pages_list freelist = IOMMU_PAGES_LIST_INIT(freelist); bool updated = false; u64 __pte, *pte; int ret, i, count; @@ -360,7 +354,7 @@ static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova, for (i = 0; i < count; ++i) free_clear_pte(&pte[i], pte[i], &freelist); - if (!list_empty(&freelist)) + if (!iommu_pages_list_empty(&freelist)) updated = true; if (count > 1) { @@ -531,7 +525,7 @@ static int iommu_v1_read_and_clear_dirty(struct io_pgtable_ops *ops, static void v1_free_pgtable(struct io_pgtable *iop) { struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, pgtbl); - LIST_HEAD(freelist); + struct iommu_pages_list freelist = IOMMU_PAGES_LIST_INIT(freelist); if (pgtable->mode == PAGE_MODE_NONE) return; -- 2.43.0