On 2023-11-28 8:49 pm, Pasha Tatashin wrote:
Convert iommu/io-pgtable-arm-v7s.c to use the new page allocation functions
provided in iommu-pages.h.
Signed-off-by: Pasha Tatashin <pasha.tatashin@xxxxxxxxxx>
---
drivers/iommu/io-pgtable-arm-v7s.c | 9 ++++++---
1 file changed, 6 insertions(+), 3 deletions(-)
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index 75f244a3e12d..3d494ca1f671 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -34,6 +34,7 @@
#include <linux/types.h>
#include <asm/barrier.h>
+#include "iommu-pages.h"
/* Struct accessors */
#define io_pgtable_to_data(x) \
@@ -255,7 +256,7 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
GFP_KERNEL : ARM_V7S_TABLE_GFP_DMA;
if (lvl == 1)
- table = (void *)__get_free_pages(gfp_l1 | __GFP_ZERO, get_order(size));
+ table = iommu_alloc_pages(gfp_l1, get_order(size));
else if (lvl == 2)
table = kmem_cache_zalloc(data->l2_tables, gfp);
Is it really meaningful to account the L1 table which is always
allocated upon initial creation, yet not the L2 tables which are
allocated in use?
Thanks,
Robin.
@@ -283,6 +284,7 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
}
if (lvl == 2)
kmemleak_ignore(table);
+
return table;
out_unmap:
@@ -290,7 +292,7 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
out_free:
if (lvl == 1)
- free_pages((unsigned long)table, get_order(size));
+ iommu_free_pages(table, get_order(size));
else
kmem_cache_free(data->l2_tables, table);
return NULL;
@@ -306,8 +308,9 @@ static void __arm_v7s_free_table(void *table, int lvl,
if (!cfg->coherent_walk)
dma_unmap_single(dev, __arm_v7s_dma_addr(table), size,
DMA_TO_DEVICE);
+
if (lvl == 1)
- free_pages((unsigned long)table, get_order(size));
+ iommu_free_pages(table, get_order(size));
else
kmem_cache_free(data->l2_tables, table);
}