On 11/22/2023 5:47 AM, Pratyush Brahma wrote: > From: Vijayanand Jitta <quic_vjitta@xxxxxxxxxxx> > > Add secure system for Pixel and Non pixel video usecases, this > allocates from system heap and secures using qcom_scm_aasign_mem. ^^^^^^ typo > > Change-Id: If0702f85bff651843c6a5c83694043364229e66b > Signed-off-by: Vijayanand Jitta <quic_vjitta@xxxxxxxxxxx> Please get these patches reviewed internally before sending to mailing list for basic checks. You can review go/upstream when within Qualcomm corp network. Pavan mentioned S-o-B is incorrect. Commit text should also not have Change-Id. Please be sure to send to linux-arm-msm mailing list as well since this affects Qualcomm chipsets > --- > drivers/dma-buf/heaps/secure_heap.c | 163 +++++++++++++++++++++++++++- > 1 file changed, 160 insertions(+), 3 deletions(-) > > diff --git a/drivers/dma-buf/heaps/secure_heap.c b/drivers/dma-buf/heaps/secure_heap.c > index 04e2ee000e19..cdcf4b3f5333 100644 > --- a/drivers/dma-buf/heaps/secure_heap.c > +++ b/drivers/dma-buf/heaps/secure_heap.c > @@ -58,6 +58,11 @@ enum secure_memory_type { > * protect it, then the detail memory management also is inside the TEE. > */ > SECURE_MEMORY_TYPE_MTK_CM_CMA = 2, > + /* > + * QCOM secure system heap, use system heap to alloc/free. > + * and use qcom_scm_assign_mem to secure the memory. > + */ > + SECURE_MEMORY_TYPE_QCOM_SYSTEM = 3, > }; > > struct secure_buffer { > @@ -69,6 +74,7 @@ struct secure_buffer { > */ > u32 sec_handle; > struct page *cma_page; > + struct sg_table sg_table; > }; > > #define TEE_MEM_COMMAND_ID_BASE_MTK 0x10000 > @@ -329,11 +335,26 @@ static int secure_heap_qcom_secure_memory(struct secure_heap *sec_heap, > next[0].vmid = data->vmid; > next[0].perm = data->perm; > > - > - ret = qcom_scm_assign_mem(page_to_phys(sec_buf->cma_page), > + if (sec_heap->mem_type == SECURE_MEMORY_TYPE_CMA) { > + ret = qcom_scm_assign_mem(page_to_phys(sec_buf->cma_page), > sec_buf->size, &src_perms, > next, 1); > + } else if (sec_heap->mem_type == SECURE_MEMORY_TYPE_QCOM_SYSTEM) { > + struct sg_table *table; > + struct scatterlist *sg; > + int i = 0; > + > + table = &sec_buf->sg_table; > + for_each_sgtable_sg(table, sg, i) { > + struct page *page = sg_page(sg); > > + ret = qcom_scm_assign_mem(page_to_phys(page), > + page_size(page), &src_perms, > + next, 1); > + if (ret) > + break; > + } > + } > return ret; > } > > @@ -347,9 +368,24 @@ static void secure_heap_qcom_unsecure_memory(struct secure_heap *sec_heap, > next[0].vmid = QCOM_SCM_VMID_HLOS; > next[0].perm = QCOM_SCM_PERM_RWX; > > - qcom_scm_assign_mem(page_to_phys(sec_buf->cma_page), > + if (sec_heap->mem_type == SECURE_MEMORY_TYPE_CMA) { > + qcom_scm_assign_mem(page_to_phys(sec_buf->cma_page), > sec_buf->size, &src_perms, > next, 1); > + } else if (sec_heap->mem_type == SECURE_MEMORY_TYPE_QCOM_SYSTEM) { > + struct sg_table *table; > + struct scatterlist *sg; > + int i = 0; > + > + table = &sec_buf->sg_table; > + for_each_sgtable_sg(table, sg, i) { > + struct page *page = sg_page(sg); > + > + qcom_scm_assign_mem(page_to_phys(page), > + page_size(page), &src_perms, > + next, 1); > + } > + } > } > > const struct secure_heap_prv_data qcom_cma_sec_mem_data = { > @@ -361,6 +397,117 @@ const struct secure_heap_prv_data qcom_cma_sec_mem_data = { > .unsecure_the_memory = secure_heap_qcom_unsecure_memory, > }; > > +/* Using system heap allocator */ > +#define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO) > +#define HIGH_ORDER_GFP (((GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN \ > + | __GFP_NORETRY) & ~__GFP_RECLAIM) \ > + | __GFP_COMP) > +static gfp_t order_flags[] = {HIGH_ORDER_GFP, HIGH_ORDER_GFP, LOW_ORDER_GFP}; > +static const unsigned int orders[] = {8, 4, 0}; > +#define NUM_ORDERS ARRAY_SIZE(orders) > + > +static struct page *alloc_largest_available(unsigned long size, > + unsigned int max_order) > +{ > + struct page *page; > + int i; > + > + for (i = 0; i < NUM_ORDERS; i++) { > + if (size < (PAGE_SIZE << orders[i])) > + continue; > + if (max_order < orders[i]) > + continue; > + > + page = alloc_pages(order_flags[i], orders[i]); > + if (!page) > + continue; > + return page; > + } > + return NULL; > +} > + > +static int qcom_system_secure_memory_allocate(struct secure_heap *sec_heap, > + struct secure_buffer *sec_buf) > +{ > + unsigned long size_remaining = sec_buf->size; > + unsigned int max_order = orders[0]; > + struct sg_table *table; > + struct scatterlist *sg; > + struct list_head pages; > + struct page *page, *tmp_page; > + int i = 0, ret = -ENOMEM; > + > + INIT_LIST_HEAD(&pages); > + while (size_remaining > 0) { > + /* > + * Avoid trying to allocate memory if the process > + * has been killed by SIGKILL > + */ > + if (fatal_signal_pending(current)) { > + return -EINTR; > + } > + > + page = alloc_largest_available(size_remaining, max_order); > + if (!page) > + goto free; > + > + list_add_tail(&page->lru, &pages); > + size_remaining -= page_size(page); > + max_order = compound_order(page); > + i++; > + } > + table = &sec_buf->sg_table; > + if (sg_alloc_table(table, i, GFP_KERNEL)) > + goto free; > + > + sg = table->sgl; > + list_for_each_entry_safe(page, tmp_page, &pages, lru) { > + sg_set_page(sg, page, page_size(page), 0); > + sg = sg_next(sg); > + list_del(&page->lru); > + } > + return 0; > +free: > + list_for_each_entry_safe(page, tmp_page, &pages, lru) > + __free_pages(page, compound_order(page)); > + > + return ret; > +} > + > +static void qcom_system_secure_memory_free(struct secure_heap *sec_heap, > + struct secure_buffer *sec_buf) > +{ > + struct sg_table *table; > + struct scatterlist *sg; > + int i; > + > + table = &sec_buf->sg_table; > + for_each_sgtable_sg(table, sg, i) { > + struct page *page = sg_page(sg); > + > + __free_pages(page, compound_order(page)); > + } > + sg_free_table(table); > +} > + > +const struct secure_heap_prv_data qcom_system_pixel_sec_mem_data = { > + .vmid = QCOM_SCM_VMID_CP_PIXEL, > + .perm = QCOM_SCM_PERM_RW, > + .memory_alloc = qcom_system_secure_memory_allocate, > + .memory_free = qcom_system_secure_memory_free, > + .secure_the_memory = secure_heap_qcom_secure_memory, > + .unsecure_the_memory = secure_heap_qcom_unsecure_memory, > +}; > + > +const struct secure_heap_prv_data qcom_system_non_pixel_sec_mem_data = { > + .vmid = QCOM_SCM_VMID_CP_NON_PIXEL, > + .perm = QCOM_SCM_PERM_RW, > + .memory_alloc = qcom_system_secure_memory_allocate, > + .memory_free = qcom_system_secure_memory_free, > + .secure_the_memory = secure_heap_qcom_secure_memory, > + .unsecure_the_memory = secure_heap_qcom_unsecure_memory, > +}; > + > static int secure_heap_secure_memory_allocate(struct secure_heap *sec_heap, > struct secure_buffer *sec_buf) > { > @@ -585,6 +732,16 @@ static struct secure_heap secure_heaps[] = { > .mem_type = SECURE_MEMORY_TYPE_MTK_CM_CMA, > .data = &mtk_sec_mem_data_cma, > }, > + { > + .name = "secure_system_pixel", > + .mem_type = SECURE_MEMORY_TYPE_QCOM_SYSTEM, > + .data = &qcom_system_pixel_sec_mem_data, > + }, > + { > + .name = "secure_system_non_pixel", > + .mem_type = SECURE_MEMORY_TYPE_QCOM_SYSTEM, > + .data = &qcom_system_non_pixel_sec_mem_data, > + }, > }; > > static int __init secure_cma_init(struct reserved_mem *rmem)