Add secure buffer control flow to mtk_drm_gem. When user space takes DRM_MTK_GEM_CREATE_RESTRICTED flag and size to create a mtk_drm_gem object, mtk_drm_gem will find a matched size dma buffer from secure dma-heap and bind it to mtk_drm_gem object. TODO: 1. Drop the mtk_gem_create_from_heap() after we change the ioctl to DMA_HEAP_IOCTL_ALLOC. 2. Checking the private data of dmabuf instead of strncmp the exp_name. Signed-off-by: Jason-JH.Lin <jason-jh.lin@xxxxxxxxxxxx> Signed-off-by: Hsiao Chien Sung <shawn.sung@xxxxxxxxxxxx> --- drivers/gpu/drm/mediatek/mtk_drm_drv.c | 2 +- drivers/gpu/drm/mediatek/mtk_gem.c | 85 +++++++++++++++++++++++++- drivers/gpu/drm/mediatek/mtk_gem.h | 4 ++ 3 files changed, 89 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index 11e1555e9aa4..ff40ca5dd2a0 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -573,7 +573,7 @@ static void mtk_drm_kms_deinit(struct drm_device *drm) static const struct drm_ioctl_desc mtk_ioctls[] = { DRM_IOCTL_DEF_DRV(MTK_GEM_CREATE, mtk_gem_create_ioctl, - DRM_UNLOCKED | DRM_AUTH | DRM_RENDER_ALLOW), + DRM_AUTH | DRM_RENDER_ALLOW), }; DEFINE_DRM_GEM_FOPS(mtk_drm_fops); diff --git a/drivers/gpu/drm/mediatek/mtk_gem.c b/drivers/gpu/drm/mediatek/mtk_gem.c index 91f6cfa3f1b7..118ea7f0a71c 100644 --- a/drivers/gpu/drm/mediatek/mtk_gem.c +++ b/drivers/gpu/drm/mediatek/mtk_gem.c @@ -5,6 +5,8 @@ #include <linux/dma-buf.h> #include <linux/vmalloc.h> +#include <linux/dma-heap.h> +#include <uapi/linux/dma-heap.h> #include <drm/mediatek_drm.h> #include <drm/drm.h> @@ -103,6 +105,81 @@ struct mtk_gem_obj *mtk_gem_create(struct drm_device *dev, return ERR_PTR(ret); } +struct mtk_gem_obj *mtk_gem_create_from_heap(struct drm_device *dev, + const char *heap, size_t size) +{ + struct mtk_drm_private *priv = dev->dev_private; + struct mtk_gem_obj *mtk_gem; + struct drm_gem_object *obj; + struct dma_heap *dma_heap; + struct dma_buf *dma_buf; + struct dma_buf_attachment *attach; + struct sg_table *sgt; + struct iosys_map map = {}; + int ret; + + mtk_gem = mtk_gem_init(dev, size); + if (IS_ERR(mtk_gem)) + return ERR_CAST(mtk_gem); + + obj = &mtk_gem->base; + + dma_heap = dma_heap_find(heap); + if (!dma_heap) { + DRM_ERROR("heap find fail\n"); + goto err_gem_free; + } + dma_buf = dma_heap_buffer_alloc(dma_heap, size, + O_RDWR | O_CLOEXEC, DMA_HEAP_VALID_HEAP_FLAGS); + if (IS_ERR(dma_buf)) { + DRM_ERROR("buffer alloc fail\n"); + dma_heap_put(dma_heap); + goto err_gem_free; + } + dma_heap_put(dma_heap); + + attach = dma_buf_attach(dma_buf, priv->dma_dev); + if (IS_ERR(attach)) { + DRM_ERROR("attach fail, return\n"); + dma_buf_put(dma_buf); + goto err_gem_free; + } + + sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); + if (IS_ERR(sgt)) { + DRM_ERROR("map failed, detach and return\n"); + dma_buf_detach(dma_buf, attach); + dma_buf_put(dma_buf); + goto err_gem_free; + } + obj->import_attach = attach; + mtk_gem->dma_addr = sg_dma_address(sgt->sgl); + mtk_gem->sg = sgt; + mtk_gem->size = dma_buf->size; + + if (!strcmp(heap, "restricted_mtk_cm") || !strcmp(heap, "restricted_mtk_cma")) { + /* secure buffer can not be mapped */ + mtk_gem->secure = true; + } else { + ret = dma_buf_vmap(dma_buf, &map); + mtk_gem->kvaddr = map.vaddr; + if (ret) { + DRM_ERROR("map failed, ret=%d\n", ret); + dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); + dma_buf_detach(dma_buf, attach); + dma_buf_put(dma_buf); + mtk_gem->kvaddr = NULL; + } + } + + return mtk_gem; + +err_gem_free: + drm_gem_object_release(obj); + kfree(mtk_gem); + return ERR_PTR(-ENOMEM); +} + void mtk_gem_free_object(struct drm_gem_object *obj) { struct mtk_gem_obj *mtk_gem = to_mtk_gem_obj(obj); @@ -230,7 +307,9 @@ struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev, if (IS_ERR(mtk_gem)) return ERR_CAST(mtk_gem); + mtk_gem->secure = (!strncmp(attach->dmabuf->exp_name, "restricted", 10)); mtk_gem->dma_addr = sg_dma_address(sg->sgl); + mtk_gem->size = attach->dmabuf->size; mtk_gem->sg = sg; return &mtk_gem->base; @@ -296,7 +375,11 @@ int mtk_gem_create_ioctl(struct drm_device *dev, void *data, struct drm_mtk_gem_create *args = data; int ret; - mtk_gem = mtk_gem_create(dev, args->size, false); + if (args->flags & DRM_MTK_GEM_CREATE_RESTRICTED) + mtk_gem = mtk_gem_create_from_heap(dev, "restricted_mtk_cma", args->size); + else + mtk_gem = mtk_gem_create(dev, args->size, false); + if (IS_ERR(mtk_gem)) return PTR_ERR(mtk_gem); diff --git a/drivers/gpu/drm/mediatek/mtk_gem.h b/drivers/gpu/drm/mediatek/mtk_gem.h index b71a7e7b405a..7f6b23b9875a 100644 --- a/drivers/gpu/drm/mediatek/mtk_gem.h +++ b/drivers/gpu/drm/mediatek/mtk_gem.h @@ -27,9 +27,11 @@ struct mtk_gem_obj { void *cookie; void *kvaddr; dma_addr_t dma_addr; + size_t size; unsigned long dma_attrs; struct sg_table *sg; struct page **pages; + bool secure; }; #define to_mtk_gem_obj(x) container_of(x, struct mtk_gem_obj, base) @@ -37,6 +39,8 @@ struct mtk_gem_obj { void mtk_gem_free_object(struct drm_gem_object *gem); struct mtk_gem_obj *mtk_gem_create(struct drm_device *dev, size_t size, bool alloc_kmap); +struct mtk_gem_obj *mtk_gem_create_from_heap(struct drm_device *dev, + const char *heap, size_t size); int mtk_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args); struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj); -- 2.18.0