Re: [PATCH -fixes] drm/vmwgfx: Protect from excessive execbuf kernel memory allocations

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Wed, Dec 12, 2018 at 12:26 PM Thomas Hellstrom <thomas@xxxxxxxxxxxx> wrote:
>
> On 12/12/18 12:06 PM, Thomas Hellstrom wrote:
> > With the new validation code, a malicious user-space app could
> > potentially submit command streams with enough buffer-object and resource
> > references in them to have the resulting allocated validion nodes and
> > relocations make the kernel run out of GFP_KERNEL memory.
> >
> > Protect from this by having the validation code reserve TTM graphics
> > memory when allocating.
> >
> > Signed-off-by: Thomas Hellstrom <thellstrom@xxxxxxxxxx>

For protecting GFP_KERNEL memory ... isn't this what cgroups are for?
Ioctl will fail with -ENOMEM way before the kernel has actually run
out of memory. Including the benefit that it's cross-driver and
configurable.

I know that the ttm memory reservations are older than cgroups, so
there's good reasons we have them. Just feels like there's better
tools available now, which would allow us to provide a better (more
consistent across vendors at least) interface to userspace to control
the limits.

Cheers, Daniel

> > ---
> >   drivers/gpu/drm/vmwgfx/vmwgfx_drv.c        |  4 ++-
> >   drivers/gpu/drm/vmwgfx/vmwgfx_drv.h        |  5 +++
> >   drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c    |  2 ++
> >   drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c   | 38 ++++++++++++++++++++++
> >   drivers/gpu/drm/vmwgfx/vmwgfx_validation.c | 18 +++++++++-
> >   drivers/gpu/drm/vmwgfx/vmwgfx_validation.h | 37 +++++++++++++++++++++
> >   6 files changed, 102 insertions(+), 2 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
> > index 61a84b958d67..d7a2dfb8ee9b 100644
> > --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
> > +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
> > @@ -49,6 +49,8 @@
> >
> >   #define VMWGFX_REPO "In Tree"
> >
> > +#define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
> > +
> >
> >   /**
> >    * Fully encoded drm commands. Might move to vmw_drm.h
> > @@ -918,7 +920,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
> >               spin_unlock(&dev_priv->cap_lock);
> >       }
> >
> > -
> > +     vmw_validation_mem_init_ttm(dev_priv, VMWGFX_VALIDATION_MEM_GRAN);
> >       ret = vmw_kms_init(dev_priv);
> >       if (unlikely(ret != 0))
> >               goto out_no_kms;
> > diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
> > index 59f614225bcd..aca974b14b55 100644
> > --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
> > +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
> > @@ -606,6 +606,9 @@ struct vmw_private {
> >
> >       struct vmw_cmdbuf_man *cman;
> >       DECLARE_BITMAP(irqthread_pending, VMW_IRQTHREAD_MAX);
> > +
> > +     /* Validation memory reservation */
> > +     struct vmw_validation_mem vvm;
> >   };
> >
> >   static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
> > @@ -846,6 +849,8 @@ extern int vmw_ttm_global_init(struct vmw_private *dev_priv);
> >   extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
> >   extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
> >
> > +extern void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv,
> > +                                     size_t gran);
> >   /**
> >    * TTM buffer object driver - vmwgfx_ttm_buffer.c
> >    */
> > diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
> > index 260650bb5560..f2d13a72c05d 100644
> > --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
> > +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
> > @@ -3835,6 +3835,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
> >       struct sync_file *sync_file = NULL;
> >       DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1);
> >
> > +     vmw_validation_set_val_mem(&val_ctx, &dev_priv->vvm);
> > +
> >       if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
> >               out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
> >               if (out_fence_fd < 0) {
> > diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
> > index 7b1e5a5cbd2c..15fa9d491186 100644
> > --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
> > +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
> > @@ -96,3 +96,41 @@ void vmw_ttm_global_release(struct vmw_private *dev_priv)
> >       drm_global_item_unref(&dev_priv->bo_global_ref.ref);
> >       drm_global_item_unref(&dev_priv->mem_global_ref);
> >   }
> > +
> > +/* struct vmw_validation_mem callback */
> > +static int vmw_vmt_reserve(struct vmw_validation_mem *m, size_t size)
> > +{
> > +     static struct ttm_operation_ctx ctx = {.interruptible = false,
> > +                                            .no_wait_gpu = false};
> > +     struct vmw_private *dev_priv = container_of(m, struct vmw_private, vvm);
> > +
> > +     DRM_INFO("Reserving %llu\n", (unsigned long long) size);
>
> Oops. Leftover-debugging printouts. Will be fixed in v2.
>
>
> > +     return ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, &ctx);
> > +}
> > +
> > +/* struct vmw_validation_mem callback */
> > +static void vmw_vmt_unreserve(struct vmw_validation_mem *m, size_t size)
> > +{
> > +     struct vmw_private *dev_priv = container_of(m, struct vmw_private, vvm);
> > +
> > +     DRM_INFO("Unreserving %llu\n", (unsigned long long) size);
> > +     return ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
> > +}
> > +
> > +/**
> > + * vmw_validation_mem_init_ttm - Interface the validation memory tracker
> > + * to ttm.
> > + * @dev_priv: Pointer to struct vmw_private. The reason we choose a vmw private
> > + * rather than a struct vmw_validation_mem is to make sure assumption in the
> > + * callbacks that struct vmw_private derives from struct vmw_validation_mem
> > + * holds true.
> > + * @gran: The recommended allocation granularity
> > + */
> > +void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv, size_t gran)
> > +{
> > +     struct vmw_validation_mem *vvm = &dev_priv->vvm;
> > +
> > +     vvm->reserve_mem = vmw_vmt_reserve;
> > +     vvm->unreserve_mem = vmw_vmt_unreserve;
> > +     vvm->gran = gran;
> > +}
> > diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
> > index 184025fa938e..1f83b90fd259 100644
> > --- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
> > +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
> > @@ -104,11 +104,25 @@ void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
> >               return NULL;
> >
> >       if (ctx->mem_size_left < size) {
> > -             struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
> > +             struct page *page;
> >
> > +             if (ctx->vm && ctx->vm_size_left < PAGE_SIZE) {
> > +                     int ret = ctx->vm->reserve_mem(ctx->vm, ctx->vm->gran);
> > +
> > +                     if (ret)
> > +                             return NULL;
> > +
> > +                     ctx->vm_size_left += ctx->vm->gran;
> > +                     ctx->total_mem += ctx->vm->gran;
> > +             }
> > +
> > +             page = alloc_page(GFP_KERNEL | __GFP_ZERO);
> >               if (!page)
> >                       return NULL;
> >
> > +             if (ctx->vm)
> > +                     ctx->vm_size_left -= PAGE_SIZE;
> > +
> >               list_add_tail(&page->lru, &ctx->page_list);
> >               ctx->page_address = page_address(page);
> >               ctx->mem_size_left = PAGE_SIZE;
> > @@ -138,6 +152,8 @@ static void vmw_validation_mem_free(struct vmw_validation_context *ctx)
> >       }
> >
> >       ctx->mem_size_left = 0;
> > +     if (ctx->vm && ctx->total_mem)
> > +             ctx->vm->unreserve_mem(ctx->vm, ctx->total_mem);
> >   }
> >
> >   /**
> > diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
> > index b57e3292c386..3b396fea40d7 100644
> > --- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
> > +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
> > @@ -33,6 +33,21 @@
> >   #include <linux/ww_mutex.h>
> >   #include <drm/ttm/ttm_execbuf_util.h>
> >
> > +/**
> > + * struct vmw_validation_mem - Custom interface to provide memory reservations
> > + * for the validation code.
> > + * @reserve_mem: Callback to reserve memory
> > + * @unreserve_mem: Callback to unreserve memory
> > + * @gran: Reservation granularity. Contains a hint how much memory should
> > + * be reserved in each call to @reserve_mem(). A slow implementation may want
> > + * reservation to be done in large batches.
> > + */
> > +struct vmw_validation_mem {
> > +     int (*reserve_mem)(struct vmw_validation_mem *m, size_t size);
> > +     void (*unreserve_mem)(struct vmw_validation_mem *m, size_t size);
> > +     size_t gran;
> > +};
> > +
> >   /**
> >    * struct vmw_validation_context - Per command submission validation context
> >    * @ht: Hash table used to find resource- or buffer object duplicates
> > @@ -47,6 +62,10 @@
> >    * buffer objects
> >    * @mem_size_left: Free memory left in the last page in @page_list
> >    * @page_address: Kernel virtual address of the last page in @page_list
> > + * @vm: A pointer to the memory reservation interface or NULL if no
> > + * memory reservation is needed.
> > + * @vm_size_left: Amount of reserved memory that so far has not been allocated.
> > + * @total_mem: Amount of reserved memory.
> >    */
> >   struct vmw_validation_context {
> >       struct drm_open_hash *ht;
> > @@ -59,6 +78,9 @@ struct vmw_validation_context {
> >       unsigned int merge_dups;
> >       unsigned int mem_size_left;
> >       u8 *page_address;
> > +     struct vmw_validation_mem *vm;
> > +     size_t vm_size_left;
> > +     size_t total_mem;
> >   };
> >
> >   struct vmw_buffer_object;
> > @@ -101,6 +123,21 @@ vmw_validation_has_bos(struct vmw_validation_context *ctx)
> >       return !list_empty(&ctx->bo_list);
> >   }
> >
> > +/**
> > + * vmw_validation_set_val_mem - Register a validation mem object for
> > + * validation memory reservation
> > + * @ctx: The validation context
> > + * @vm: Pointer to a struct vmw_validation_mem
> > + *
> > + * Must be set before the first attempt to allocate validation memory.
> > + */
> > +static inline void
> > +vmw_validation_set_val_mem(struct vmw_validation_context *ctx,
> > +                        struct vmw_validation_mem *vm)
> > +{
> > +     ctx->vm = vm;
> > +}
> > +
> >   /**
> >    * vmw_validation_set_ht - Register a hash table for duplicate finding
> >    * @ctx: The validation context
>
>
> _______________________________________________
> dri-devel mailing list
> dri-devel@xxxxxxxxxxxxxxxxxxxxx
> https://lists.freedesktop.org/mailman/listinfo/dri-devel



-- 
Daniel Vetter
Software Engineer, Intel Corporation
+41 (0) 79 365 57 48 - http://blog.ffwll.ch
_______________________________________________
dri-devel mailing list
dri-devel@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/dri-devel




[Index of Archives]     [Linux DRI Users]     [Linux Intel Graphics]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [XFree86]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux