Bring dmabuf sharing through implementing prime_import_sg_table callback. This will help to validate userspace conformance in prime configurations without using any actual hardware (e.g. in the cloud). Cc: Rodrigo Siqueira <rodrigosiqueiramelo@xxxxxxxxx> Cc: Haneen Mohammed <hamohammed.sa@xxxxxxxxx> Cc: Daniel Vetter <daniel@xxxxxxxx> Signed-off-by: Oleg Vasilev <oleg.vasilev@xxxxxxxxx> --- drivers/gpu/drm/vkms/vkms_drv.c | 6 +++++ drivers/gpu/drm/vkms/vkms_drv.h | 9 +++++++ drivers/gpu/drm/vkms/vkms_gem.c | 46 +++++++++++++++++++++++++++++++++ 3 files changed, 61 insertions(+) diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c index cc53ef88a331..b71c16d9ca09 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.c +++ b/drivers/gpu/drm/vkms/vkms_drv.c @@ -10,6 +10,7 @@ */ #include <linux/module.h> +#include <linux/dma-buf.h> #include <drm/drm_gem.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> @@ -96,6 +97,8 @@ static struct drm_driver vkms_driver = { .gem_vm_ops = &vkms_gem_vm_ops, .gem_free_object_unlocked = vkms_gem_free_object, .get_vblank_timestamp = vkms_get_vblank_timestamp, + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, + .gem_prime_import_sg_table = vkms_prime_import_sg_table, .name = DRIVER_NAME, .desc = DRIVER_DESC, @@ -147,6 +150,9 @@ static int __init vkms_init(void) ret = drm_dev_init(&vkms_device->drm, &vkms_driver, &vkms_device->platform->dev); + + dma_coerce_mask_and_coherent(vkms_device->drm.dev, + DMA_BIT_MASK(64)); if (ret) goto out_unregister; diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h index 12b4db7ac641..fb15101c8f3e 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.h +++ b/drivers/gpu/drm/vkms/vkms_drv.h @@ -126,6 +126,9 @@ struct drm_gem_object *vkms_gem_create(struct drm_device *dev, u32 *handle, u64 size); +struct vkms_gem_object *vkms_gem_create_private(struct drm_device *dev, + u64 size); + vm_fault_t vkms_gem_fault(struct vm_fault *vmf); int vkms_dumb_create(struct drm_file *file, struct drm_device *dev, @@ -137,6 +140,12 @@ int vkms_gem_vmap(struct drm_gem_object *obj); void vkms_gem_vunmap(struct drm_gem_object *obj); +/* Prime */ +struct drm_gem_object * +vkms_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sg); + /* CRC Support */ const char *const *vkms_get_crc_sources(struct drm_crtc *crtc, size_t *count); diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c index 69048e73377d..a1b837460f63 100644 --- a/drivers/gpu/drm/vkms/vkms_gem.c +++ b/drivers/gpu/drm/vkms/vkms_gem.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0+ +#include <linux/dma-buf.h> #include <linux/shmem_fs.h> #include "vkms_drv.h" @@ -117,6 +118,25 @@ struct drm_gem_object *vkms_gem_create(struct drm_device *dev, return &obj->gem; } +struct vkms_gem_object *vkms_gem_create_private(struct drm_device *dev, + u64 size) +{ + struct vkms_gem_object *obj; + + obj = kzalloc(sizeof(*obj), GFP_KERNEL); + + if (!obj) + return ERR_PTR(-ENOMEM); + + size = roundup(size, PAGE_SIZE); + + drm_gem_private_object_init(dev, &obj->gem, size); + + mutex_init(&obj->pages_lock); + + return obj; +} + int vkms_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args) { @@ -217,3 +237,29 @@ int vkms_gem_vmap(struct drm_gem_object *obj) mutex_unlock(&vkms_obj->pages_lock); return ret; } + +struct drm_gem_object * +vkms_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sg) +{ + struct vkms_gem_object *obj; + int npages; + + obj = __vkms_gem_create(dev, attach->dmabuf->size); + if (IS_ERR(obj)) + return ERR_CAST(obj); + + npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE; + DRM_DEBUG_PRIME("Importing %d pages\n", npages); + + obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); + if (!obj->pages) { + vkms_gem_free_object(&obj->gem); + return ERR_PTR(-ENOMEM); + } + + drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, + npages); + return &obj->gem; +} -- 2.22.0 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx