This patch adds support for a chunk heap that allows for buffers that are made up of a list of fixed size chunks taken from a CMA. Chunk sizes are configuratd when the heaps are created. Signed-off-by: Hyesoo Yu <hyesoo.yu@xxxxxxxxxxx> --- drivers/dma-buf/heaps/Kconfig | 9 ++ drivers/dma-buf/heaps/Makefile | 1 + drivers/dma-buf/heaps/chunk_heap.c | 222 +++++++++++++++++++++++++++++++++++++ 3 files changed, 232 insertions(+) create mode 100644 drivers/dma-buf/heaps/chunk_heap.c diff --git a/drivers/dma-buf/heaps/Kconfig b/drivers/dma-buf/heaps/Kconfig index a5eef06..98552fa 100644 --- a/drivers/dma-buf/heaps/Kconfig +++ b/drivers/dma-buf/heaps/Kconfig @@ -12,3 +12,12 @@ config DMABUF_HEAPS_CMA Choose this option to enable dma-buf CMA heap. This heap is backed by the Contiguous Memory Allocator (CMA). If your system has these regions, you should say Y here. + +config DMABUF_HEAPS_CHUNK + tristate "DMA-BUF CHUNK Heap" + depends on DMABUF_HEAPS && DMA_CMA + help + Choose this option to enable dma-buf CHUNK heap. This heap is backed + by the Contiguous Memory Allocator (CMA) and allocate the buffers that + are made up to a list of fixed size chunks tasken from CMA. Chunk sizes + are configurated when the heaps are created. diff --git a/drivers/dma-buf/heaps/Makefile b/drivers/dma-buf/heaps/Makefile index 6e54cde..3b2a0986 100644 --- a/drivers/dma-buf/heaps/Makefile +++ b/drivers/dma-buf/heaps/Makefile @@ -2,3 +2,4 @@ obj-y += heap-helpers.o obj-$(CONFIG_DMABUF_HEAPS_SYSTEM) += system_heap.o obj-$(CONFIG_DMABUF_HEAPS_CMA) += cma_heap.o +obj-$(CONFIG_DMABUF_HEAPS_CHUNK) += chunk_heap.o diff --git a/drivers/dma-buf/heaps/chunk_heap.c b/drivers/dma-buf/heaps/chunk_heap.c new file mode 100644 index 0000000..1eefaec --- /dev/null +++ b/drivers/dma-buf/heaps/chunk_heap.c @@ -0,0 +1,222 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ION Memory Allocator chunk heap exporter + * + * Copyright (c) 2020 Samsung Electronics Co., Ltd. + * Author: <hyesoo.yu@xxxxxxxxxxx> for Samsung Electronics. + */ + +#include <linux/platform_device.h> +#include <linux/cma.h> +#include <linux/device.h> +#include <linux/dma-buf.h> +#include <linux/dma-heap.h> +#include <linux/dma-contiguous.h> +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/highmem.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/scatterlist.h> +#include <linux/sched/signal.h> +#include <linux/of_reserved_mem.h> +#include <linux/of.h> + +#include "heap-helpers.h" + +struct chunk_heap { + struct dma_heap *heap; + phys_addr_t base; + phys_addr_t size; + atomic_t cur_pageblock_idx; + unsigned int max_num_pageblocks; + unsigned int order; +}; + +static void chunk_heap_free(struct heap_helper_buffer *buffer) +{ + struct chunk_heap *chunk_heap = dma_heap_get_drvdata(buffer->heap); + pgoff_t pg; + + for (pg = 0; pg < buffer->pagecount; pg++) + __free_pages(buffer->pages[pg], chunk_heap->order); + kvfree(buffer->pages); + kfree(buffer); +} + +static inline unsigned long chunk_get_next_pfn(struct chunk_heap *chunk_heap) +{ + unsigned long i = atomic_inc_return(&chunk_heap->cur_pageblock_idx) % + chunk_heap->max_num_pageblocks; + + return PHYS_PFN(chunk_heap->base) + i * pageblock_nr_pages; +} + +static int chunk_alloc_pages(struct chunk_heap *chunk_heap, struct page **pages, + unsigned int order, unsigned int count) +{ + unsigned long base; + unsigned int i = 0, nr_block = 0, nr_elem, ret; + + while (count) { + /* + * If the number of scanned page block is the same as max block, + * the tries of allocation fails. + */ + if (nr_block++ == chunk_heap->max_num_pageblocks) { + ret = -ENOMEM; + goto err_bulk; + } + base = chunk_get_next_pfn(chunk_heap); + nr_elem = min_t(unsigned int, count, pageblock_nr_pages >> order); + ret = alloc_pages_bulk(base, base + pageblock_nr_pages, MIGRATE_CMA, + GFP_KERNEL, order, nr_elem, pages + i); + if (ret < 0) + goto err_bulk; + + i += ret; + count -= ret; + } + + return 0; + +err_bulk: + while (i-- > 0) + __free_pages(pages[i], order); + + return ret; +} + +static int chunk_heap_allocate(struct dma_heap *heap, unsigned long len, + unsigned long fd_flags, unsigned long heap_flags) +{ + + struct chunk_heap *chunk_heap = dma_heap_get_drvdata(heap); + struct heap_helper_buffer *helper_buffer; + struct dma_buf *dmabuf; + unsigned int count = DIV_ROUND_UP(len, PAGE_SIZE << chunk_heap->order); + int ret = -ENOMEM; + + helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL); + if (!helper_buffer) + return ret; + + init_heap_helper_buffer(helper_buffer, chunk_heap_free); + + helper_buffer->heap = heap; + helper_buffer->size = ALIGN(len, PAGE_SIZE << chunk_heap->order); + helper_buffer->pagecount = count; + helper_buffer->pages = kvmalloc_array(helper_buffer->pagecount, + sizeof(*helper_buffer->pages), GFP_KERNEL); + if (!helper_buffer->pages) + goto err0; + + ret = chunk_alloc_pages(chunk_heap, helper_buffer->pages, + chunk_heap->order, helper_buffer->pagecount); + if (ret < 0) + goto err1; + + dmabuf = heap_helper_export_dmabuf(helper_buffer, fd_flags); + if (IS_ERR(dmabuf)) { + ret = PTR_ERR(dmabuf); + goto err2; + } + + helper_buffer->dmabuf = dmabuf; + + ret = dma_buf_fd(dmabuf, fd_flags); + if (ret < 0) { + dma_buf_put(dmabuf); + return ret; + } + + return ret; + +err2: + while (count-- > 0) + __free_pages(helper_buffer->pages[count], chunk_heap->order); +err1: + kvfree(helper_buffer->pages); +err0: + kfree(helper_buffer); + + return ret; +} + +static void rmem_remove_callback(void *p) +{ + of_reserved_mem_device_release((struct device *)p); +} + +static const struct dma_heap_ops chunk_heap_ops = { + .allocate = chunk_heap_allocate, +}; + +static int chunk_heap_probe(struct platform_device *pdev) +{ + struct chunk_heap *chunk_heap; + struct reserved_mem *rmem; + struct device_node *rmem_np; + struct dma_heap_export_info exp_info; + unsigned int alignment; + int ret; + + ret = of_reserved_mem_device_init(&pdev->dev); + if (ret || !pdev->dev.cma_area) { + dev_err(&pdev->dev, "The CMA reserved area is not assigned (ret %d)", ret); + return -EINVAL; + } + + ret = devm_add_action(&pdev->dev, rmem_remove_callback, &pdev->dev); + if (ret) { + of_reserved_mem_device_release(&pdev->dev); + return ret; + } + + rmem_np = of_parse_phandle(pdev->dev.of_node, "memory-region", 0); + rmem = of_reserved_mem_lookup(rmem_np); + + chunk_heap = devm_kzalloc(&pdev->dev, sizeof(*chunk_heap), GFP_KERNEL); + if (!chunk_heap) + return -ENOMEM; + + chunk_heap->base = rmem->base; + chunk_heap->size = rmem->size; + chunk_heap->max_num_pageblocks = rmem->size >> (pageblock_order + PAGE_SHIFT); + + of_property_read_u32(pdev->dev.of_node, "alignment", &alignment); + chunk_heap->order = get_order(alignment); + + exp_info.name = rmem->name; + exp_info.ops = &chunk_heap_ops; + exp_info.priv = chunk_heap; + + chunk_heap->heap = dma_heap_add(&exp_info); + if (IS_ERR(chunk_heap->heap)) + return PTR_ERR(chunk_heap->heap); + + return 0; +} + +static const struct of_device_id chunk_heap_of_match[] = { + { .compatible = "dma_heap,chunk", }, + { }, +}; + +MODULE_DEVICE_TABLE(of, chunk_heap_of_match); + +static struct platform_driver chunk_heap_driver = { + .driver = { + .name = "chunk_heap", + .of_match_table = chunk_heap_of_match, + }, + .probe = chunk_heap_probe, +}; + +static int __init chunk_heap_init(void) +{ + return platform_driver_register(&chunk_heap_driver); +} +module_init(chunk_heap_init); +MODULE_DESCRIPTION("DMA-BUF Chunk Heap"); +MODULE_LICENSE("GPL v2"); -- 2.7.4