Many embedded drm devices do not have a IOMMU and no dedicated memory for graphics. These devices use CMA (Contiguous Memory Allocator) backed graphics memory. This patch provides helper functions to be able to share the code. Signed-off-by: Sascha Hauer <s.hauer@xxxxxxxxxxxxxx> --- Lars-Peter, please let me know if this fits your needs or if you are missing something. drivers/gpu/drm/Kconfig | 6 + drivers/gpu/drm/Makefile | 1 + drivers/gpu/drm/drm_gem_cma_helper.c | 321 ++++++++++++++++++++++++++++++++++ include/drm/drm_gem_cma_helper.h | 47 +++++ 4 files changed, 375 insertions(+) create mode 100644 drivers/gpu/drm/drm_gem_cma_helper.c create mode 100644 include/drm/drm_gem_cma_helper.h diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index e354bc0..f62717e 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -53,6 +53,12 @@ config DRM_TTM GPU memory types. Will be enabled automatically if a device driver uses it. +config DRM_GEM_CMA_HELPER + tristate + depends on DRM + help + Choose this if you need the GEM cma helper functions + config DRM_TDFX tristate "3dfx Banshee/Voodoo3+" depends on DRM && PCI diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index c20da5b..9a0d98a 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -15,6 +15,7 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \ drm_trace_points.o drm_global.o drm_prime.o drm-$(CONFIG_COMPAT) += drm_ioc32.o +drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o drm-usb-y := drm_usb.o diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c new file mode 100644 index 0000000..75534ff --- /dev/null +++ b/drivers/gpu/drm/drm_gem_cma_helper.c @@ -0,0 +1,321 @@ +/* + * drm gem cma (contiguous memory allocator) helper functions + * + * Copyright (C) 2012 Sascha Hauer, Pengutronix + * + * Based on Samsung Exynos code + * + * Copyright (c) 2011 Samsung Electronics Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include <drm/drmP.h> +#include <drm/drm.h> +#include <drm/drm_gem_cma_helper.h> + +static unsigned int convert_to_vm_err_msg(int msg) +{ + unsigned int out_msg; + + switch (msg) { + case 0: + case -ERESTARTSYS: + case -EINTR: + out_msg = VM_FAULT_NOPAGE; + break; + + case -ENOMEM: + out_msg = VM_FAULT_OOM; + break; + + default: + out_msg = VM_FAULT_SIGBUS; + break; + } + + return out_msg; +} + +static unsigned int get_gem_mmap_offset(struct drm_gem_object *obj) +{ + return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT; +} + +static void drm_gem_cma_buf_destroy(struct drm_device *drm, + struct drm_gem_cma_obj *cma_obj) +{ + dma_free_writecombine(drm->dev, cma_obj->base.size, cma_obj->vaddr, + cma_obj->paddr); +} + +/* + * drm_gem_cma_create - allocate an object with the given size + * + * returns a struct drm_gem_cma_obj* on success or ERR_PTR values + * on failure. + */ +struct drm_gem_cma_obj *drm_gem_cma_create(struct drm_device *drm, + unsigned int size) +{ + struct drm_gem_cma_obj *cma_obj; + struct drm_gem_object *gem_obj; + int ret; + + size = roundup(size, PAGE_SIZE); + + cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL); + if (!cma_obj) + return ERR_PTR(-ENOMEM); + + cma_obj->vaddr = dma_alloc_writecombine(drm->dev, size, + (dma_addr_t *)&cma_obj->paddr, + GFP_KERNEL | __GFP_NOWARN); + if (!cma_obj->vaddr) { + dev_err(drm->dev, "failed to allocate buffer with size %d\n", size); + ret = -ENOMEM; + goto err_dma_alloc; + } + + gem_obj = &cma_obj->base; + + ret = drm_gem_object_init(drm, gem_obj, size); + if (ret) + goto err_obj_init; + + ret = drm_gem_create_mmap_offset(gem_obj); + if (ret) + goto err_create_mmap_offset; + + return cma_obj; + +err_create_mmap_offset: + drm_gem_object_release(gem_obj); + +err_obj_init: + drm_gem_cma_buf_destroy(drm, cma_obj); + +err_dma_alloc: + kfree(cma_obj); + + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(drm_gem_cma_create); + +/* + * drm_gem_cma_create_with_handle - allocate an object with the given + * size and create a gem handle on it + * + * returns a struct drm_gem_cma_obj* on success or ERR_PTR values + * on failure. + */ +struct drm_gem_cma_obj *drm_gem_cma_create_with_handle( + struct drm_file *file_priv, + struct drm_device *drm, unsigned int size, + unsigned int *handle) +{ + struct drm_gem_cma_obj *cma_obj; + struct drm_gem_object *gem_obj; + int ret; + + cma_obj = drm_gem_cma_create(drm, size); + if (IS_ERR(cma_obj)) + return cma_obj; + + gem_obj = &cma_obj->base; + + /* + * allocate a id of idr table where the obj is registered + * and handle has the id what user can see. + */ + ret = drm_gem_handle_create(file_priv, gem_obj, handle); + if (ret) + goto err_handle_create; + + /* drop reference from allocate - handle holds it now. */ + drm_gem_object_unreference_unlocked(gem_obj); + + return cma_obj; + +err_handle_create: + drm_gem_cma_free_object(gem_obj); + + return ERR_PTR(ret); +} + +static int drm_gem_cma_mmap_buffer(struct file *filp, + struct vm_area_struct *vma) +{ + struct drm_gem_object *gem_obj = filp->private_data; + struct drm_gem_cma_obj *cma_obj = to_dma_alloc_gem_obj(gem_obj); + unsigned long pfn, vm_size; + + vma->vm_flags |= VM_IO | VM_RESERVED; + + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + vma->vm_file = filp; + + vm_size = vma->vm_end - vma->vm_start; + + /* check if user-requested size is valid. */ + if (vm_size > gem_obj->size) + return -EINVAL; + + /* + * get page frame number to physical memory to be mapped + * to user space. + */ + pfn = cma_obj->paddr >> PAGE_SHIFT; + + if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size, + vma->vm_page_prot)) { + dev_err(gem_obj->dev->dev, "failed to remap pfn range.\n"); + return -EAGAIN; + } + + return 0; +} + +static const struct file_operations drm_gem_cma_fops = { + .mmap = drm_gem_cma_mmap_buffer, +}; + +/* + * drm_gem_cma_free_object - (struct drm_driver)->gem_free_object callback + * function + */ +void drm_gem_cma_free_object(struct drm_gem_object *gem_obj) +{ + struct drm_gem_cma_obj *cma_obj; + + if (gem_obj->map_list.map) + drm_gem_free_mmap_offset(gem_obj); + + drm_gem_object_release(gem_obj); + + cma_obj = to_dma_alloc_gem_obj(gem_obj); + + drm_gem_cma_buf_destroy(gem_obj->dev, cma_obj); + + kfree(cma_obj); +} +EXPORT_SYMBOL_GPL(drm_gem_cma_free_object); + +/* + * drm_gem_cma_dumb_create - (struct drm_driver)->dumb_create callback + * function + * + * This aligns the pitch and size arguments to the minimum required. wrap + * this into your own function if you need bigger alignment. + */ +int drm_gem_cma_dumb_create(struct drm_file *file_priv, + struct drm_device *dev, struct drm_mode_create_dumb *args) +{ + struct drm_gem_cma_obj *cma_obj; + + if (args->pitch < args->width * args->bpp >> 3) + args->pitch = args->width * args->bpp >> 3; + + if (args->size < args->pitch * args->height) + args->size = args->pitch * args->height; + + cma_obj = drm_gem_cma_create_with_handle(file_priv, dev, + args->size, &args->handle); + if (IS_ERR(cma_obj)) + return PTR_ERR(cma_obj); + + return 0; +} +EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create); + +/* + * drm_gem_cma_dumb_map_offset - (struct drm_driver)->dumb_map_offset callback + * function + */ +int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv, + struct drm_device *drm, uint32_t handle, uint64_t *offset) +{ + struct drm_gem_cma_obj *cma_obj; + struct drm_gem_object *gem_obj; + + mutex_lock(&drm->struct_mutex); + + gem_obj = drm_gem_object_lookup(drm, file_priv, handle); + if (!gem_obj) { + dev_err(drm->dev, "failed to lookup gem object\n"); + mutex_unlock(&drm->struct_mutex); + return -EINVAL; + } + + cma_obj = to_dma_alloc_gem_obj(gem_obj); + + *offset = get_gem_mmap_offset(&cma_obj->base); + + drm_gem_object_unreference(gem_obj); + + mutex_unlock(&drm->struct_mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_map_offset); + +/* + * drm_gem_cma_fault - (struct vm_operations_struct)->fault callback function + */ +int drm_gem_cma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + struct drm_gem_object *gem_obj = vma->vm_private_data; + struct drm_gem_cma_obj *cma_obj = to_dma_alloc_gem_obj(gem_obj); + struct drm_device *dev = gem_obj->dev; + unsigned long pfn; + pgoff_t page_offset; + int ret; + + page_offset = ((unsigned long)vmf->virtual_address - + vma->vm_start) >> PAGE_SHIFT; + + mutex_lock(&dev->struct_mutex); + + pfn = (cma_obj->paddr >> PAGE_SHIFT) + page_offset; + + ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn); + + mutex_unlock(&dev->struct_mutex); + + return convert_to_vm_err_msg(ret); +} +EXPORT_SYMBOL_GPL(drm_gem_cma_fault); + +/* + * drm_gem_cma_mmap - (struct file_operation)->mmap callback function + */ +int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma) +{ + int ret; + + ret = drm_gem_mmap(filp, vma); + if (ret) + return ret; + + vma->vm_flags &= ~VM_PFNMAP; + vma->vm_flags |= VM_MIXEDMAP; + + return ret; +} +EXPORT_SYMBOL_GPL(drm_gem_cma_mmap); + +/* + * drm_gem_cma_dumb_destroy - (struct drm_driver)->dumb_destroy callback function + */ +int drm_gem_cma_dumb_destroy(struct drm_file *file_priv, + struct drm_device *drm, unsigned int handle) +{ + return drm_gem_handle_delete(file_priv, handle); +} +EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_destroy); diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h new file mode 100644 index 0000000..53b007c --- /dev/null +++ b/include/drm/drm_gem_cma_helper.h @@ -0,0 +1,47 @@ +#ifndef __DRM_GEM_DMA_ALLOC_HELPER_H__ +#define __DRM_GEM_DMA_ALLOC_HELPER_H__ + +struct drm_gem_cma_obj { + struct drm_gem_object base; + dma_addr_t paddr; + void __iomem *vaddr; +}; + +#define to_dma_alloc_gem_obj(x) \ + container_of(x, struct drm_gem_cma_obj, base) + +/* free gem object. */ +void drm_gem_cma_free_object(struct drm_gem_object *gem_obj); + +/* create memory region for drm framebuffer. */ +int drm_gem_cma_dumb_create(struct drm_file *file_priv, + struct drm_device *drm, struct drm_mode_create_dumb *args); + +/* map memory region for drm framebuffer to user space. */ +int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv, + struct drm_device *drm, uint32_t handle, uint64_t *offset); + +/* page fault handler and mmap fault address(virtual) to physical memory. */ +int drm_gem_cma_fault(struct vm_area_struct *vma, struct vm_fault *vmf); + +/* set vm_flags and we can change the vm attribute to other one at here. */ +int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma); + +/* + * destroy memory region allocated. + * - a gem handle and physical memory region pointed by a gem object + * would be released by drm_gem_handle_delete(). + */ +int drm_gem_cma_dumb_destroy(struct drm_file *file_priv, + struct drm_device *drm, unsigned int handle); + +/* allocate physical memory. */ +struct drm_gem_cma_obj *drm_gem_cma_create(struct drm_device *drm, + unsigned int size); + +struct drm_gem_cma_obj *drm_gem_cma_create_with_handle( + struct drm_file *file_priv, + struct drm_device *drm, unsigned int size, + unsigned int *handle); + +#endif -- 1.7.10 _______________________________________________ dri-devel mailing list dri-devel@xxxxxxxxxxxxxxxxxxxxx http://lists.freedesktop.org/mailman/listinfo/dri-devel