Hi Sascha, On Wednesday 27 June 2012 15:30:18 Sascha Hauer wrote: > Many embedded drm devices do not have a IOMMU and no dedicated > memory for graphics. These devices use CMA (Contiguous Memory > Allocator) backed graphics memory. This patch provides helper > functions to be able to share the code. The code technically does > not depend on CMA as the backend allocator, the name has been chosen > because cma makes for a nice, short but still descriptive function > prefix. > > Signed-off-by: Sascha Hauer <s.hauer@xxxxxxxxxxxxxx> > --- > > changes since v3: > - *really* use DIV_ROUND_UP(args->width * args->bpp, 8) to calculate the > pitch changes since v2: > - make drm_gem_cma_create_with_handle static > - use DIV_ROUND_UP(args->width * args->bpp, 8) to calculate the pitch > - make drm_gem_cma_vm_ops const > - add missing #include <linux/export.h> > changes since v1: > - map whole buffer at mmap time, not page by page at fault time > - several cleanups as suggested by Lars-Peter Clausen and Laurent Pinchart > > drivers/gpu/drm/Kconfig | 6 + > drivers/gpu/drm/Makefile | 1 + > drivers/gpu/drm/drm_gem_cma_helper.c | 251 +++++++++++++++++++++++++++++++ > include/drm/drm_gem_cma_helper.h | 44 ++++++ > 4 files changed, 302 insertions(+) > create mode 100644 drivers/gpu/drm/drm_gem_cma_helper.c > create mode 100644 include/drm/drm_gem_cma_helper.h > > diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig > index dc5df12..8f362ec 100644 > --- a/drivers/gpu/drm/Kconfig > +++ b/drivers/gpu/drm/Kconfig > @@ -53,6 +53,12 @@ config DRM_TTM > GPU memory types. Will be enabled automatically if a device driver > uses it. > > +config DRM_GEM_CMA_HELPER > + tristate > + depends on DRM > + help > + Choose this if you need the GEM CMA helper functions > + > config DRM_TDFX > tristate "3dfx Banshee/Voodoo3+" > depends on DRM && PCI > diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile > index 0487ff6..8759cda 100644 > --- a/drivers/gpu/drm/Makefile > +++ b/drivers/gpu/drm/Makefile > @@ -15,6 +15,7 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o > drm_cache.o \ drm_trace_points.o drm_global.o drm_prime.o > > drm-$(CONFIG_COMPAT) += drm_ioc32.o > +drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o This results in a built failure for me with CONFIG_DRM=y CONFIG_DRM_KMS_HELPER=m # CONFIG_DRM_LOAD_EDID_FIRMWARE is not set CONFIG_DRM_GEM_CMA_HELPER=m CONFIG_DRM_KMS_CMA_HELPER=m drm_gem_cma_helper.o isn't compiled at all. Can you reproduce the problem ? > drm-usb-y := drm_usb.o > > diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c > b/drivers/gpu/drm/drm_gem_cma_helper.c new file mode 100644 > index 0000000..1aa8fee > --- /dev/null > +++ b/drivers/gpu/drm/drm_gem_cma_helper.c > @@ -0,0 +1,251 @@ > +/* > + * drm gem CMA (contiguous memory allocator) helper functions > + * > + * Copyright (C) 2012 Sascha Hauer, Pengutronix > + * > + * Based on Samsung Exynos code > + * > + * Copyright (c) 2011 Samsung Electronics Co., Ltd. > + * > + * This program is free software; you can redistribute it and/or > + * modify it under the terms of the GNU General Public License > + * as published by the Free Software Foundation; either version 2 > + * of the License, or (at your option) any later version. > + * This program is distributed in the hope that it will be useful, > + * but WITHOUT ANY WARRANTY; without even the implied warranty of > + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the > + * GNU General Public License for more details. > + */ > + > +#include <linux/mm.h> > +#include <linux/slab.h> > +#include <linux/mutex.h> > +#include <linux/export.h> > +#include <linux/dma-mapping.h> > + > +#include <drm/drmP.h> > +#include <drm/drm.h> > +#include <drm/drm_gem_cma_helper.h> > + > +static unsigned int get_gem_mmap_offset(struct drm_gem_object *obj) > +{ > + return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT; > +} > + > +static void drm_gem_cma_buf_destroy(struct drm_device *drm, > + struct drm_gem_cma_object *cma_obj) > +{ > + dma_free_writecombine(drm->dev, cma_obj->base.size, cma_obj->vaddr, > + cma_obj->paddr); > +} > + > +/* > + * drm_gem_cma_create - allocate an object with the given size > + * > + * returns a struct drm_gem_cma_object* on success or ERR_PTR values > + * on failure. > + */ > +struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm, > + unsigned int size) > +{ > + struct drm_gem_cma_object *cma_obj; > + struct drm_gem_object *gem_obj; > + int ret; > + > + size = round_up(size, PAGE_SIZE); > + > + cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL); > + if (!cma_obj) > + return ERR_PTR(-ENOMEM); > + > + cma_obj->vaddr = dma_alloc_writecombine(drm->dev, size, > + &cma_obj->paddr, GFP_KERNEL | __GFP_NOWARN); > + if (!cma_obj->vaddr) { > + dev_err(drm->dev, "failed to allocate buffer with size %d\n", size); > + ret = -ENOMEM; > + goto err_dma_alloc; > + } > + > + gem_obj = &cma_obj->base; > + > + ret = drm_gem_object_init(drm, gem_obj, size); > + if (ret) > + goto err_obj_init; > + > + ret = drm_gem_create_mmap_offset(gem_obj); > + if (ret) > + goto err_create_mmap_offset; > + > + return cma_obj; > + > +err_create_mmap_offset: > + drm_gem_object_release(gem_obj); > + > +err_obj_init: > + drm_gem_cma_buf_destroy(drm, cma_obj); > + > +err_dma_alloc: > + kfree(cma_obj); > + > + return ERR_PTR(ret); > +} > +EXPORT_SYMBOL_GPL(drm_gem_cma_create); > + > +/* > + * drm_gem_cma_create_with_handle - allocate an object with the given > + * size and create a gem handle on it > + * > + * returns a struct drm_gem_cma_object* on success or ERR_PTR values > + * on failure. > + */ > +static struct drm_gem_cma_object *drm_gem_cma_create_with_handle( > + struct drm_file *file_priv, > + struct drm_device *drm, unsigned int size, > + unsigned int *handle) > +{ > + struct drm_gem_cma_object *cma_obj; > + struct drm_gem_object *gem_obj; > + int ret; > + > + cma_obj = drm_gem_cma_create(drm, size); > + if (IS_ERR(cma_obj)) > + return cma_obj; > + > + gem_obj = &cma_obj->base; > + > + /* > + * allocate a id of idr table where the obj is registered > + * and handle has the id what user can see. > + */ > + ret = drm_gem_handle_create(file_priv, gem_obj, handle); > + if (ret) > + goto err_handle_create; > + > + /* drop reference from allocate - handle holds it now. */ > + drm_gem_object_unreference_unlocked(gem_obj); > + > + return cma_obj; > + > +err_handle_create: > + drm_gem_cma_free_object(gem_obj); > + > + return ERR_PTR(ret); > +} > + > +/* > + * drm_gem_cma_free_object - (struct drm_driver)->gem_free_object callback > + * function > + */ > +void drm_gem_cma_free_object(struct drm_gem_object *gem_obj) > +{ > + struct drm_gem_cma_object *cma_obj; > + > + if (gem_obj->map_list.map) > + drm_gem_free_mmap_offset(gem_obj); > + > + drm_gem_object_release(gem_obj); > + > + cma_obj = to_drm_gem_cma_obj(gem_obj); > + > + drm_gem_cma_buf_destroy(gem_obj->dev, cma_obj); > + > + kfree(cma_obj); > +} > +EXPORT_SYMBOL_GPL(drm_gem_cma_free_object); > + > +/* > + * drm_gem_cma_dumb_create - (struct drm_driver)->dumb_create callback > + * function > + * > + * This aligns the pitch and size arguments to the minimum required. wrap > + * this into your own function if you need bigger alignment. > + */ > +int drm_gem_cma_dumb_create(struct drm_file *file_priv, > + struct drm_device *dev, struct drm_mode_create_dumb *args) > +{ > + struct drm_gem_cma_object *cma_obj; > + int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); > + > + if (args->pitch < min_pitch) > + args->pitch = min_pitch; > + > + if (args->size < args->pitch * args->height) > + args->size = args->pitch * args->height; > + > + cma_obj = drm_gem_cma_create_with_handle(file_priv, dev, > + args->size, &args->handle); > + if (IS_ERR(cma_obj)) > + return PTR_ERR(cma_obj); > + > + return 0; > +} > +EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create); > + > +/* > + * drm_gem_cma_dumb_map_offset - (struct drm_driver)->dumb_map_offset > callback + * function > + */ > +int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv, > + struct drm_device *drm, uint32_t handle, uint64_t *offset) > +{ > + struct drm_gem_object *gem_obj; > + > + mutex_lock(&drm->struct_mutex); > + > + gem_obj = drm_gem_object_lookup(drm, file_priv, handle); > + if (!gem_obj) { > + dev_err(drm->dev, "failed to lookup gem object\n"); > + mutex_unlock(&drm->struct_mutex); > + return -EINVAL; > + } > + > + *offset = get_gem_mmap_offset(gem_obj); > + > + drm_gem_object_unreference(gem_obj); > + > + mutex_unlock(&drm->struct_mutex); > + > + return 0; > +} > +EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_map_offset); > + > +const struct vm_operations_struct drm_gem_cma_vm_ops = { > + .open = drm_gem_vm_open, > + .close = drm_gem_vm_close, > +}; > +EXPORT_SYMBOL_GPL(drm_gem_cma_vm_ops); > + > +/* > + * drm_gem_cma_mmap - (struct file_operation)->mmap callback function > + */ > +int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma) > +{ > + struct drm_gem_object *gem_obj; > + struct drm_gem_cma_object *cma_obj; > + int ret; > + > + ret = drm_gem_mmap(filp, vma); > + if (ret) > + return ret; > + > + gem_obj = vma->vm_private_data; > + cma_obj = to_drm_gem_cma_obj(gem_obj); > + > + ret = remap_pfn_range(vma, vma->vm_start, cma_obj->paddr >> PAGE_SHIFT, > + vma->vm_end - vma->vm_start, vma->vm_page_prot); > + if (ret) > + drm_gem_vm_close(vma); > + > + return ret; > +} > +EXPORT_SYMBOL_GPL(drm_gem_cma_mmap); > + > +/* > + * drm_gem_cma_dumb_destroy - (struct drm_driver)->dumb_destroy callback > function + */ > +int drm_gem_cma_dumb_destroy(struct drm_file *file_priv, > + struct drm_device *drm, unsigned int handle) > +{ > + return drm_gem_handle_delete(file_priv, handle); > +} > +EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_destroy); > diff --git a/include/drm/drm_gem_cma_helper.h > b/include/drm/drm_gem_cma_helper.h new file mode 100644 > index 0000000..f0f6b1a > --- /dev/null > +++ b/include/drm/drm_gem_cma_helper.h > @@ -0,0 +1,44 @@ > +#ifndef __DRM_GEM_CMA_HELPER_H__ > +#define __DRM_GEM_CMA_HELPER_H__ > + > +struct drm_gem_cma_object { > + struct drm_gem_object base; > + dma_addr_t paddr; > + void *vaddr; > +}; > + > +static inline struct drm_gem_cma_object * > +to_drm_gem_cma_obj(struct drm_gem_object *gem_obj) > +{ > + return container_of(gem_obj, struct drm_gem_cma_object, base); > +} > + > +/* free gem object. */ > +void drm_gem_cma_free_object(struct drm_gem_object *gem_obj); > + > +/* create memory region for drm framebuffer. */ > +int drm_gem_cma_dumb_create(struct drm_file *file_priv, > + struct drm_device *drm, struct drm_mode_create_dumb *args); > + > +/* map memory region for drm framebuffer to user space. */ > +int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv, > + struct drm_device *drm, uint32_t handle, uint64_t *offset); > + > +/* set vm_flags and we can change the vm attribute to other one at here. */ > +int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma); + > +/* > + * destroy memory region allocated. > + * - a gem handle and physical memory region pointed by a gem object > + * would be released by drm_gem_handle_delete(). > + */ > +int drm_gem_cma_dumb_destroy(struct drm_file *file_priv, > + struct drm_device *drm, unsigned int handle); > + > +/* allocate physical memory. */ > +struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm, > + unsigned int size); > + > +extern const struct vm_operations_struct drm_gem_cma_vm_ops; > + > +#endif /* __DRM_GEM_CMA_HELPER_H__ */ -- Regards, Laurent Pinchart _______________________________________________ dri-devel mailing list dri-devel@xxxxxxxxxxxxxxxxxxxxx http://lists.freedesktop.org/mailman/listinfo/dri-devel