Helpers for framebuffers backed by cma allocator. Signed-off-by: Hyun Kwon <hyun.kwon@xxxxxxxxxx> --- drivers/gpu/drm/xlnx/xlnx_fb.c | 467 +++++++++++++++++++++++++++++++++++++++++ drivers/gpu/drm/xlnx/xlnx_fb.h | 30 +++ 2 files changed, 497 insertions(+) create mode 100644 drivers/gpu/drm/xlnx/xlnx_fb.c create mode 100644 drivers/gpu/drm/xlnx/xlnx_fb.h diff --git a/drivers/gpu/drm/xlnx/xlnx_fb.c b/drivers/gpu/drm/xlnx/xlnx_fb.c new file mode 100644 index 0000000..dbe9fbf --- /dev/null +++ b/drivers/gpu/drm/xlnx/xlnx_fb.c @@ -0,0 +1,467 @@ +/* + * Xilinx DRM KMS Framebuffer helper + * + * Copyright (C) 2015 - 2018 Xilinx, Inc. + * + * Author: Hyun Woo Kwon <hyun.kwon@xxxxxxxxxx> + * + * Based on drm_fb_cma_helper.c + * + * Copyright (C) 2012 Analog Device Inc. + * + * SPDX-License-Identifier: GPL-2.0 + */ + +#include <drm/drmP.h> +#include <drm/drm_crtc.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_fb_helper.h> +#include <drm/drm_gem_cma_helper.h> + +#include "xlnx_fb.h" + +#define XLNX_MAX_PLANES 4 + +struct xlnx_fb { + struct drm_framebuffer base; + struct drm_gem_cma_object *obj[XLNX_MAX_PLANES]; +}; + +struct xlnx_fbdev { + struct drm_fb_helper fb_helper; + struct xlnx_fb *fb; + unsigned int align; + unsigned int vres_mult; +}; + +static inline struct xlnx_fbdev *to_fbdev(struct drm_fb_helper *fb_helper) +{ + return container_of(fb_helper, struct xlnx_fbdev, fb_helper); +} + +static inline struct xlnx_fb *to_fb(struct drm_framebuffer *base_fb) +{ + return container_of(base_fb, struct xlnx_fb, base); +} + +static void xlnx_fb_destroy(struct drm_framebuffer *base_fb) +{ + struct xlnx_fb *fb = to_fb(base_fb); + int i; + + for (i = 0; i < XLNX_MAX_PLANES; i++) + if (fb->obj[i]) + drm_gem_object_unreference_unlocked(&fb->obj[i]->base); + + drm_framebuffer_cleanup(base_fb); + kfree(fb); +} + +static int xlnx_fb_create_handle(struct drm_framebuffer *base_fb, + struct drm_file *file_priv, + unsigned int *handle) +{ + struct xlnx_fb *fb = to_fb(base_fb); + + return drm_gem_handle_create(file_priv, &fb->obj[0]->base, handle); +} + +static struct drm_framebuffer_funcs xlnx_fb_funcs = { + .destroy = xlnx_fb_destroy, + .create_handle = xlnx_fb_create_handle, +}; + +/** + * xlnx_fb_alloc - Allocate a xlnx_fb + * @drm: DRM object + * @mode_cmd: drm_mode_fb_cmd2 struct + * @obj: pointers for returned drm_gem_cma_objects + * @num_planes: number of planes to be allocated + * + * This function is based on drm_fb_cma_alloc(). + * + * Return: a xlnx_fb object, or ERR_PTR. + */ +static struct xlnx_fb * +xlnx_fb_alloc(struct drm_device *drm, + const struct drm_mode_fb_cmd2 *mode_cmd, + struct drm_gem_cma_object **obj, unsigned int num_planes) +{ + struct xlnx_fb *fb; + int ret; + int i; + + fb = kzalloc(sizeof(*fb), GFP_KERNEL); + if (!fb) + return ERR_PTR(-ENOMEM); + + drm_helper_mode_fill_fb_struct(drm, &fb->base, mode_cmd); + + for (i = 0; i < num_planes; i++) + fb->obj[i] = obj[i]; + + ret = drm_framebuffer_init(drm, &fb->base, &xlnx_fb_funcs); + if (ret) { + dev_err(drm->dev, "Failed to initialize fb: %d\n", ret); + kfree(fb); + return ERR_PTR(ret); + } + + return fb; +} + +/** + * xlnx_fb_get_paddr - Get physycal address of framebuffer + * @base_fb: the framebuffer + * @plane: which plane + * + * This function is based on drm_fb_cma_get_gem_obj(). + * + * Return: a physical address of the plane, or 0 + */ +dma_addr_t +xlnx_fb_get_paddr(struct drm_framebuffer *base_fb, unsigned int plane) +{ + struct xlnx_fb *fb = to_fb(base_fb); + + if (plane >= XLNX_MAX_PLANES) + return 0; + + return fb->obj[plane]->paddr; +} +EXPORT_SYMBOL_GPL(xlnx_fb_get_paddr); + +static int +xlnx_fb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg) +{ + struct drm_fb_helper *fb_helper = info->par; + unsigned int i; + int ret = 0; + + switch (cmd) { + case FBIO_WAITFORVSYNC: + for (i = 0; i < fb_helper->crtc_count; i++) { + struct drm_mode_set *mode_set; + struct drm_crtc *crtc; + + mode_set = &fb_helper->crtc_info[i].mode_set; + crtc = mode_set->crtc; + ret = drm_crtc_vblank_get(crtc); + if (!ret) { + drm_crtc_wait_one_vblank(crtc); + drm_crtc_vblank_put(crtc); + } + } + return ret; + default: + return -ENOTTY; + } + + return 0; +} + +static struct fb_ops xlnx_fbdev_ops = { + .owner = THIS_MODULE, + .fb_fillrect = sys_fillrect, + .fb_copyarea = sys_copyarea, + .fb_imageblit = sys_imageblit, + .fb_check_var = drm_fb_helper_check_var, + .fb_set_par = drm_fb_helper_set_par, + .fb_blank = drm_fb_helper_blank, + .fb_pan_display = drm_fb_helper_pan_display, + .fb_setcmap = drm_fb_helper_setcmap, + .fb_ioctl = xlnx_fb_ioctl, +}; + +/** + * xlnx_fbdev_create - Create the fbdev with a framebuffer + * @fb_helper: fb helper structure + * @size: framebuffer size info + * + * This function is based on drm_fbdev_cma_create(). + * + * Return: 0 if successful, or the error code. + */ +static int xlnx_fbdev_create(struct drm_fb_helper *fb_helper, + struct drm_fb_helper_surface_size *size) +{ + struct xlnx_fbdev *fbdev = to_fbdev(fb_helper); + struct drm_mode_fb_cmd2 mode_cmd = { 0 }; + struct drm_device *drm = fb_helper->dev; + struct drm_gem_cma_object *obj; + struct drm_framebuffer *base_fb; + unsigned int bytes_per_pixel; + unsigned long offset; + struct fb_info *fbi; + size_t bytes; + int ret; + + dev_dbg(drm->dev, "surface width(%d), height(%d) and bpp(%d)\n", + size->surface_width, size->surface_height, size->surface_bpp); + + bytes_per_pixel = DIV_ROUND_UP(size->surface_bpp, 8); + + mode_cmd.width = size->surface_width; + mode_cmd.height = size->surface_height; + mode_cmd.pitches[0] = ALIGN(size->surface_width * bytes_per_pixel, + fbdev->align); + mode_cmd.pixel_format = xlnx_get_format(drm); + + mode_cmd.height *= fbdev->vres_mult; + bytes = mode_cmd.pitches[0] * mode_cmd.height; + obj = drm_gem_cma_create(drm, bytes); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + fbi = framebuffer_alloc(0, drm->dev); + if (!fbi) { + dev_err(drm->dev, "Failed to allocate framebuffer info.\n"); + ret = -ENOMEM; + goto err_drm_gem_cma_free_object; + } + + fbdev->fb = xlnx_fb_alloc(drm, &mode_cmd, &obj, 1); + if (IS_ERR(fbdev->fb)) { + dev_err(drm->dev, "Failed to allocate DRM framebuffer.\n"); + ret = PTR_ERR(fbdev->fb); + goto err_framebuffer_release; + } + + base_fb = &fbdev->fb->base; + fb_helper->fb = base_fb; + fb_helper->fbdev = fbi; + fbi->par = fb_helper; + fbi->flags = FBINFO_FLAG_DEFAULT; + fbi->fbops = &xlnx_fbdev_ops; + + ret = fb_alloc_cmap(&fbi->cmap, 256, 0); + if (ret) { + dev_err(drm->dev, "Failed to allocate color map.\n"); + goto err_xlnx_fb_destroy; + } + + drm_fb_helper_fill_fix(fbi, base_fb->pitches[0], + base_fb->format->depth); + drm_fb_helper_fill_var(fbi, fb_helper, base_fb->width, base_fb->height); + fbi->var.yres = base_fb->height / fbdev->vres_mult; + + offset = fbi->var.xoffset * bytes_per_pixel; + offset += fbi->var.yoffset * base_fb->pitches[0]; + + drm->mode_config.fb_base = (resource_size_t)obj->paddr; + fbi->screen_base = (char __iomem *)(obj->vaddr + offset); + fbi->fix.smem_start = (unsigned long)(obj->paddr + offset); + fbi->screen_size = bytes; + fbi->fix.smem_len = bytes; + + return 0; + +err_xlnx_fb_destroy: + drm_framebuffer_unregister_private(base_fb); + xlnx_fb_destroy(base_fb); +err_framebuffer_release: + framebuffer_release(fbi); +err_drm_gem_cma_free_object: + drm_gem_cma_free_object(&obj->base); + return ret; +} + +static struct drm_fb_helper_funcs xlnx_fb_helper_funcs = { + .fb_probe = xlnx_fbdev_create, +}; + +/** + * xlnx_fb_init - Allocate and initializes the Xilinx framebuffer + * @drm: DRM device + * @preferred_bpp: preferred bits per pixel for the device + * @max_conn_count: maximum number of connectors + * @align: alignment value for pitch + * @vres_mult: multiplier for virtual resolution + * + * This function is based on drm_fbdev_cma_init(). + * + * Return: a newly allocated drm_fb_helper struct or a ERR_PTR. + */ +struct drm_fb_helper * +xlnx_fb_init(struct drm_device *drm, int preferred_bpp, + unsigned int max_conn_count, unsigned int align, + unsigned int vres_mult) +{ + struct xlnx_fbdev *fbdev; + struct drm_fb_helper *fb_helper; + int ret; + + fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL); + if (!fbdev) + return ERR_PTR(-ENOMEM); + + fbdev->vres_mult = vres_mult; + fbdev->align = align; + fb_helper = &fbdev->fb_helper; + drm_fb_helper_prepare(drm, fb_helper, &xlnx_fb_helper_funcs); + + ret = drm_fb_helper_init(drm, fb_helper, max_conn_count); + if (ret < 0) { + dev_err(drm->dev, "Failed to initialize drm fb helper.\n"); + goto err_free; + } + + ret = drm_fb_helper_single_add_all_connectors(fb_helper); + if (ret < 0) { + dev_err(drm->dev, "Failed to add connectors.\n"); + goto err_drm_fb_helper_fini; + } + + ret = drm_fb_helper_initial_config(fb_helper, preferred_bpp); + if (ret < 0) { + dev_err(drm->dev, "Failed to set initial hw configuration.\n"); + goto err_drm_fb_helper_fini; + } + + return fb_helper; + +err_drm_fb_helper_fini: + drm_fb_helper_fini(fb_helper); +err_free: + kfree(fbdev); + return ERR_PTR(ret); +} + +/** + * xlnx_fbdev_defio_fini - Free the defio fb + * @fbi: fb_info struct + * + * This function is based on drm_fbdev_cma_defio_fini(). + */ +static void xlnx_fbdev_defio_fini(struct fb_info *fbi) +{ + if (!fbi->fbdefio) + return; + + fb_deferred_io_cleanup(fbi); + kfree(fbi->fbdefio); + kfree(fbi->fbops); +} + +/** + * xlnx_fbdev_fini - Free the Xilinx framebuffer + * @fb_helper: drm_fb_helper struct + * + * This function is based on drm_fbdev_cma_fini(). + */ +void xlnx_fb_fini(struct drm_fb_helper *fb_helper) +{ + struct xlnx_fbdev *fbdev = to_fbdev(fb_helper); + + drm_fb_helper_unregister_fbi(&fbdev->fb_helper); + if (fbdev->fb_helper.fbdev) + xlnx_fbdev_defio_fini(fbdev->fb_helper.fbdev); + + if (fbdev->fb_helper.fb) + drm_framebuffer_remove(fbdev->fb_helper.fb); + + drm_fb_helper_fini(&fbdev->fb_helper); + kfree(fbdev); +} + +/** + * xlnx_fb_restore_mode - Restores initial framebuffer mode + * @fb_helper: drm_fb_helper struct, may be NULL + * + * This function is based on drm_fbdev_cma_restore_mode() and usually called + * from the Xilinx DRM drivers lastclose callback. + */ +void xlnx_fb_restore_mode(struct drm_fb_helper *fb_helper) +{ + if (!fb_helper) + return; + drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper); +} + +/** + * xlnx_fb_create - (struct drm_mode_config_funcs *)->fb_create callback + * @drm: DRM device + * @file_priv: drm file private data + * @mode_cmd: mode command for fb creation + * + * This functions creates a drm_framebuffer for given mode @mode_cmd. This + * functions is intended to be used for the fb_create callback function of + * drm_mode_config_funcs. + * + * Return: a drm_framebuffer object if successful, or ERR_PTR. + */ +struct drm_framebuffer * +xlnx_fb_create(struct drm_device *drm, struct drm_file *file_priv, + const struct drm_mode_fb_cmd2 *mode_cmd) +{ + struct xlnx_fb *fb; + struct drm_gem_cma_object *objs[XLNX_MAX_PLANES]; + struct drm_gem_object *obj; + const struct drm_format_info *info; + struct drm_format_name_buf format_name; + int ret; + int i; + + info = drm_format_info(mode_cmd->pixel_format); + if (!info) { + dev_err(drm->dev, "unsupported framebuffer format %s\n", + drm_get_format_name(mode_cmd->pixel_format, + &format_name)); + ret = -EINVAL; + goto err_out; + } + + for (i = 0; i < info->num_planes; i++) { + unsigned int width = mode_cmd->width / (i ? info->hsub : 1); + unsigned int height = mode_cmd->height / (i ? info->vsub : 1); + unsigned int min_size; + + obj = drm_gem_object_lookup(file_priv, + mode_cmd->handles[i]); + if (!obj) { + dev_err(drm->dev, "Failed to lookup GEM object\n"); + ret = -ENXIO; + goto err_gem_object_unreference; + } + + min_size = (height - 1) * mode_cmd->pitches[i] + width * + info->cpp[i] + mode_cmd->offsets[i]; + + if (obj->size < min_size) { + drm_gem_object_unreference_unlocked(obj); + ret = -EINVAL; + goto err_gem_object_unreference; + } + objs[i] = to_drm_gem_cma_obj(obj); + } + + fb = xlnx_fb_alloc(drm, mode_cmd, objs, i); + if (IS_ERR(fb)) { + ret = PTR_ERR(fb); + goto err_gem_object_unreference; + } + + fb->base.format = info; + + return &fb->base; + +err_gem_object_unreference: + for (i--; i >= 0; i--) + drm_gem_object_unreference_unlocked(&objs[i]->base); +err_out: + return ERR_PTR(ret); +} + +/** + * xlnx_fb_hotplug_event - Poll for hotpulug events + * @fb_helper: drm_fb_helper struct, may be NULL + * + * This function is based on drm_fbdev_cma_hotplug_event() and usually called + * from the Xilinx DRM drivers output_poll_changed callback. + */ +void xlnx_fb_hotplug_event(struct drm_fb_helper *fb_helper) +{ + if (!fb_helper) + return; + drm_fb_helper_hotplug_event(fb_helper); +} diff --git a/drivers/gpu/drm/xlnx/xlnx_fb.h b/drivers/gpu/drm/xlnx/xlnx_fb.h new file mode 100644 index 0000000..3f7e962 --- /dev/null +++ b/drivers/gpu/drm/xlnx/xlnx_fb.h @@ -0,0 +1,30 @@ +/* + * Xilinx DRM KMS Framebuffer helper header + * + * Copyright (C) 2015 - 2018 Xilinx, Inc. + * + * Author: Hyun Woo Kwon <hyun.kwon@xxxxxxxxxx> + * + * SPDX-License-Identifier: GPL-2.0 + */ + +#ifndef _XLNX_FB_H_ +#define _XLNX_FB_H_ + +struct drm_fb_helper; + +dma_addr_t +xlnx_fb_get_paddr(struct drm_framebuffer *base_fb, unsigned int plane); + +void xlnx_fb_restore_mode(struct drm_fb_helper *fb_helper); +struct drm_framebuffer * +xlnx_fb_create(struct drm_device *drm, struct drm_file *file_priv, + const struct drm_mode_fb_cmd2 *mode_cmd); +void xlnx_fb_hotplug_event(struct drm_fb_helper *fb_helper); +struct drm_fb_helper * +xlnx_fb_init(struct drm_device *drm, int preferred_bpp, + unsigned int max_conn_count, unsigned int align, + unsigned int vres_mult); +void xlnx_fb_fini(struct drm_fb_helper *fb_helper); + +#endif /* _XLNX_FB_H_ */ -- 2.7.4 -- To unsubscribe from this list: send the line "unsubscribe devicetree" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html