Re: [PATCH v2 16/37] drm/i915/lmem: support pread

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Quoting Matthew Auld (2019-06-27 21:56:12)
> We need to add support for pread'ing an LMEM object.
> 
> Signed-off-by: Matthew Auld <matthew.auld@xxxxxxxxx>
> Cc: Joonas Lahtinen <joonas.lahtinen@xxxxxxxxxxxxxxx>
> Cc: Abdiel Janulgue <abdiel.janulgue@xxxxxxxxxxxxxxx>
> ---
>  .../gpu/drm/i915/gem/i915_gem_object_types.h  |  2 +
>  drivers/gpu/drm/i915/i915_gem.c               |  6 ++
>  drivers/gpu/drm/i915/intel_region_lmem.c      | 76 +++++++++++++++++++
>  3 files changed, 84 insertions(+)
> 
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
> index 80ff5ad9bc07..8cdee185251a 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
> @@ -52,6 +52,8 @@ struct drm_i915_gem_object_ops {
>         void (*truncate)(struct drm_i915_gem_object *obj);
>         void (*writeback)(struct drm_i915_gem_object *obj);
>  
> +       int (*pread)(struct drm_i915_gem_object *,
> +                    const struct drm_i915_gem_pread *arg);
>         int (*pwrite)(struct drm_i915_gem_object *obj,
>                       const struct drm_i915_gem_pwrite *arg);
>  
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 85677ae89849..4ba386ab35e7 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -463,6 +463,12 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
>  
>         trace_i915_gem_object_pread(obj, args->offset, args->size);
>  
> +       ret = -ENODEV;
> +       if (obj->ops->pread)
> +               ret = obj->ops->pread(obj, args);
> +       if (ret != -ENODEV)
> +               goto out;
> +
>         ret = i915_gem_object_wait(obj,
>                                    I915_WAIT_INTERRUPTIBLE,
>                                    MAX_SCHEDULE_TIMEOUT);
> diff --git a/drivers/gpu/drm/i915/intel_region_lmem.c b/drivers/gpu/drm/i915/intel_region_lmem.c
> index 701bcac3479e..54b2c7bf177d 100644
> --- a/drivers/gpu/drm/i915/intel_region_lmem.c
> +++ b/drivers/gpu/drm/i915/intel_region_lmem.c
> @@ -7,10 +7,86 @@
>  #include "intel_memory_region.h"
>  #include "intel_region_lmem.h"
>  
> +static int lmem_pread(struct drm_i915_gem_object *obj,
> +                     const struct drm_i915_gem_pread *arg)
> +{
> +       struct drm_i915_private *i915 = to_i915(obj->base.dev);
> +       struct intel_runtime_pm *rpm = &i915->runtime_pm;
> +       intel_wakeref_t wakeref;
> +       struct dma_fence *fence;
> +       char __user *user_data;
> +       unsigned int offset;
> +       unsigned long idx;
> +       u64 remain;
> +       int ret;
> +
> +       ret = i915_gem_object_pin_pages(obj);
> +       if (ret)
> +               return ret;
> +
> +       i915_gem_object_lock(obj);
> +       ret = i915_gem_object_set_to_wc_domain(obj, false);

You chose to opt out of the unlocked wait before the locked wait?


> +       if (ret) {
> +               i915_gem_object_unlock(obj);
> +               goto out_unpin;
> +       }
> +
> +       fence = i915_gem_object_lock_fence(obj);
> +       i915_gem_object_unlock(obj);
> +       if (!fence) {
> +               ret = -ENOMEM;
> +               goto out_unpin;
> +       }
> +
> +       wakeref = intel_runtime_pm_get(rpm);

Something not mentioned so far is the story for mm->rpm.

> +       remain = arg->size;
> +       user_data = u64_to_user_ptr(arg->data_ptr);
> +       offset = offset_in_page(arg->offset);
> +       for (idx = arg->offset >> PAGE_SHIFT; remain; idx++) {
> +               unsigned long unwritten;
> +               void __iomem *vaddr;
> +               int length;
> +
> +               length = remain;
> +               if (offset + length > PAGE_SIZE)
> +                       length = PAGE_SIZE - offset;
> +
> +               vaddr = i915_gem_object_lmem_io_map_page(obj, idx);
> +               if (!vaddr) {
> +                       ret = -ENOMEM;
> +                       goto out_put;
> +               }
> +
> +               unwritten = copy_to_user(user_data,

Except this is a secret atomic section!!!

> +                                        (void __force *)vaddr + offset,
> +                                        length);
> +               io_mapping_unmap_atomic(vaddr);
> +               if (unwritten) {
> +                       ret = -EFAULT;
> +                       goto out_put;
> +               }
> +
> +               remain -= length;
> +               user_data += length;
> +               offset = 0;
> +       }
> +
> +out_put:
> +       intel_runtime_pm_put(rpm, wakeref);
> +       i915_gem_object_unlock_fence(obj, fence);
> +out_unpin:
> +       i915_gem_object_unpin_pages(obj);
> +
> +       return ret;
> +}
> +
>  static const struct drm_i915_gem_object_ops region_lmem_obj_ops = {
>         .get_pages = i915_memory_region_get_pages_buddy,
>         .put_pages = i915_memory_region_put_pages_buddy,
>         .release = i915_gem_object_release_memory_region,
> +
> +       .pread = lmem_pread,
>  };
>  
>  static struct drm_i915_gem_object *
> -- 
> 2.20.1
> 
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx
_______________________________________________
Intel-gfx mailing list
Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/intel-gfx




[Index of Archives]     [AMD Graphics]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux