[PATCH v2 2/2] tee: amdtee: add support for CMA buffer allocations

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



amdtee driver shall use CMA region for contiguous
buffer allocation, if CMA is available.

since CMA and DMA contiguous APIs are not exported,
this support is enabled only when amdtee is built
as a builtin driver.

Signed-off-by: Devaraj Rangasamy <Devaraj.Rangasamy@xxxxxxx>
Signed-off-by: SivaSangeetha SK <SivaSangeetha.SK@xxxxxxx>
Reviewed-by: Rijo Thomas <Rijo-john.Thomas@xxxxxxx>
---
v2:
* Switched to generic CMA pool.
* Replaced __get_free_pages() with alloc_pages_exact().
* Compacted patch with simplified changes within single file.

 drivers/tee/amdtee/shm_pool.c | 51 +++++++++++++++++++++++++++++++++--
 1 file changed, 49 insertions(+), 2 deletions(-)

diff --git a/drivers/tee/amdtee/shm_pool.c b/drivers/tee/amdtee/shm_pool.c
index 156e8a6f631f..d504d9749114 100644
--- a/drivers/tee/amdtee/shm_pool.c
+++ b/drivers/tee/amdtee/shm_pool.c
@@ -5,10 +5,50 @@
 
 #include <linux/slab.h>
 #include <linux/mm.h>
+#include <linux/dma-map-ops.h>
 #include <linux/tee_drv.h>
 #include <linux/psp.h>
 #include "amdtee_private.h"
 
+#if IS_BUILTIN(CONFIG_AMDTEE) && IS_ENABLED(CONFIG_DMA_CMA)
+static void *alloc_from_cma(size_t size)
+{
+
+	int nr_pages = size >> PAGE_SHIFT;
+	struct page *page;
+
+	page = dma_alloc_from_contiguous(NULL, nr_pages, 0, false);
+	if (page)
+		return page_to_virt(page);
+
+	return NULL;
+}
+
+static bool free_from_cma(struct tee_shm *shm)
+{
+
+	int nr_pages;
+	struct page *page;
+
+	if (!dev_get_cma_area(NULL))
+		return false;
+
+	nr_pages = shm->size >> PAGE_SHIFT;
+	page = virt_to_page(shm->kaddr);
+	return dma_release_from_contiguous(NULL, page, nr_pages);
+}
+#else
+static void *alloc_from_cma(size_t size)
+{
+	return NULL;
+}
+
+static bool free_from_cma(struct tee_shm *shm)
+{
+	return false;
+}
+#endif
+
 static int pool_op_alloc(struct tee_shm_pool *pool, struct tee_shm *shm,
 			 size_t size, size_t align)
 {
@@ -17,7 +57,11 @@ static int pool_op_alloc(struct tee_shm_pool *pool, struct tee_shm *shm,
 
 	size = PAGE_ALIGN(size);
 
-	va = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
+	va = alloc_from_cma(size);
+
+	if (!va)
+		va = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
+
 	if (!va)
 		return -ENOMEM;
 
@@ -40,7 +84,10 @@ static void pool_op_free(struct tee_shm_pool *pool, struct tee_shm *shm)
 {
 	/* Unmap the shared memory from TEE */
 	amdtee_unmap_shmem(shm);
-	free_pages_exact(shm->kaddr, shm->size);
+
+	if (!free_from_cma(shm))
+		free_pages_exact(shm->kaddr, shm->size);
+
 	shm->kaddr = NULL;
 }
 
-- 
2.25.1




[Index of Archives]     [Kernel Newbies]     [Security]     [Netfilter]     [Bugtraq]     [Linux FS]     [Yosemite Forum]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Samba]     [Video 4 Linux]     [Device Mapper]     [Linux Resources]

  Powered by Linux