Once userptr becomes part of client API, it is almost a certainly that eventually someone will try to create a new object from a mapping of another client object, e.g. new = vaImport(vaMap(old, &size), size); (using a hypothethical API, not meaning to pick on anyone!) Since this is actually fairly safe to implement and to allow (since it is within a single process space and the memory access passes the standard permissions test) let us not limit the Client possibilities. Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> Cc: Gwenole Beauchesne <gwenole.beauchesne@xxxxxxxxx> Cc: Michał Winiarski <michal.winiarski@xxxxxxxxx> Cc: Tvrtko Ursulin <tvrtko.ursulin@xxxxxxxxx> --- drivers/gpu/drm/i915/i915_gem_userptr.c | 46 ++++++++++++++++++++++++++++++--- 1 file changed, 43 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index d96276caab49..8031ebe424fd 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c @@ -750,6 +750,35 @@ static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = { .release = i915_gem_userptr_release, }; +static struct drm_i915_gem_object * +find_object_from_vma(struct drm_device *dev, + struct drm_i915_gem_userptr *args) +{ + struct drm_i915_gem_object *obj = NULL; + struct vm_area_struct *vma; + + down_read(¤t->mm->mmap_sem); + vma = find_vma(current->mm, args->user_ptr); + if (vma == NULL) + goto out; + + if (vma->vm_ops != dev->driver->gem_vm_ops) + goto out; + + if (vma->vm_start != args->user_ptr || + vma->vm_end != args->user_ptr + args->user_size) { + obj = ERR_PTR(-EINVAL); + goto out; + } + + obj = to_intel_bo(vma->vm_private_data); + drm_gem_object_reference(obj); + +out: + up_read(¤t->mm->mmap_sem); + return obj; +} + /** * Creates a new mm object that wraps some normal memory from the process * context - user memory. @@ -757,8 +786,11 @@ static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = { * We impose several restrictions upon the memory being mapped * into the GPU. * 1. It must be page aligned (both start/end addresses, i.e ptr and size). - * 2. It must be normal system memory, not a pointer into another map of IO - * space (e.g. it must not be a GTT mmapping of another object). + * 2. It must either be: + * a) normal system memory, not a pointer into another map of IO + * space (e.g. it must not be part of a GTT mmapping of another object). + * b) a pointer to the complete GTT mmap of another object in your + * address space. * 3. We only allow a bo as large as we could in theory map into the GTT, * that is we limit the size to the total size of the GTT. * 4. The bo is marked as being snoopable. The backing pages are left @@ -812,6 +844,14 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file return -ENODEV; } + obj = find_object_from_vma(dev, args); + if (obj) { + if (IS_ERR(obj)) + return PTR_ERR(obj); + else + goto out; + } + obj = i915_gem_object_alloc(dev); if (obj == NULL) return -ENOMEM; @@ -833,7 +873,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file if (ret == 0) ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags); if (ret == 0) - ret = drm_gem_handle_create(file, &obj->base, &handle); +out: ret = drm_gem_handle_create(file, &obj->base, &handle); /* drop reference from allocate - handle holds it now */ drm_gem_object_unreference_unlocked(&obj->base); -- 2.1.4 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx http://lists.freedesktop.org/mailman/listinfo/intel-gfx