[PATCH] drm/amdgpu: add flag for high priority contexts

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Tue, Jan 3, 2017 at 5:54 PM, Andres Rodriguez <andresx7 at gmail.com> wrote:
> Add a new context creation flag, AMDGPU_CTX_FLAG_HIGHPRIORITY. This flag
> results in the allocated context receiving a higher scheduler priority
> that other contexts system-wide.
>
> Signed-off-by: Andres Rodriguez <andresx7 at gmail.com>
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c       | 24 ++++++++++++++++++------
>  drivers/gpu/drm/amd/scheduler/gpu_scheduler.h |  1 +
>  include/uapi/drm/amdgpu_drm.h                 |  3 ++-
>  3 files changed, 21 insertions(+), 7 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> index 400c66b..ce470e2 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> @@ -25,11 +25,17 @@
>  #include <drm/drmP.h>
>  #include "amdgpu.h"
>
> -static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
> +static int amdgpu_ctx_init(struct amdgpu_device *adev, int priority, struct amdgpu_ctx *ctx)
>  {
>         unsigned i, j;
>         int r;
>
> +       if (priority < 0 || priority >= AMD_SCHED_MAX_PRIORITY)
> +               return -EINVAL;
> +
> +       if (priority == AMD_SCHED_PRIORITY_HIGH && !capable(CAP_SYS_ADMIN))
> +               return -EACCES;
> +
>         memset(ctx, 0, sizeof(*ctx));
>         ctx->adev = adev;
>         kref_init(&ctx->refcount);
> @@ -51,7 +57,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
>                 struct amdgpu_ring *ring = adev->rings[i];
>                 struct amd_sched_rq *rq;
>
> -               rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
> +               rq = &ring->sched.sched_rq[priority];
>                 r = amd_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
>                                           rq, amdgpu_sched_jobs);
>                 if (r)
> @@ -90,11 +96,15 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
>
>  static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
>                             struct amdgpu_fpriv *fpriv,
> +                           int flags,
>                             uint32_t *id)
>  {
>         struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
>         struct amdgpu_ctx *ctx;
> -       int r;
> +       int r, priority;
> +
> +       priority = flags & AMDGPU_CTX_FLAG_HIGHPRIORITY ?
> +               AMD_SCHED_PRIORITY_HIGH : AMD_SCHED_PRIORITY_NORMAL;
>
>         ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
>         if (!ctx)
> @@ -107,8 +117,9 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
>                 kfree(ctx);
>                 return r;
>         }
> +
>         *id = (uint32_t)r;
> -       r = amdgpu_ctx_init(adev, ctx);
> +       r = amdgpu_ctx_init(adev, priority, ctx);
>         if (r) {
>                 idr_remove(&mgr->ctx_handles, *id);
>                 *id = 0;
> @@ -186,7 +197,7 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
>                      struct drm_file *filp)
>  {
>         int r;
> -       uint32_t id;
> +       uint32_t id, flags;
>
>         union drm_amdgpu_ctx *args = data;
>         struct amdgpu_device *adev = dev->dev_private;
> @@ -194,10 +205,11 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
>
>         r = 0;
>         id = args->in.ctx_id;
> +       flags = args->in.flags;
>
>         switch (args->in.op) {
>         case AMDGPU_CTX_OP_ALLOC_CTX:
> -               r = amdgpu_ctx_alloc(adev, fpriv, &id);
> +               r = amdgpu_ctx_alloc(adev, fpriv, flags, &id);
>                 args->out.alloc.ctx_id = id;
>                 break;
>         case AMDGPU_CTX_OP_FREE_CTX:
> diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
> index d8dc681..2e458de 100644
> --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
> +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
> @@ -108,6 +108,7 @@ struct amd_sched_backend_ops {
>
>  enum amd_sched_priority {
>         AMD_SCHED_PRIORITY_KERNEL = 0,
> +       AMD_SCHED_PRIORITY_HIGH,
>         AMD_SCHED_PRIORITY_NORMAL,
>         AMD_SCHED_MAX_PRIORITY
>  };
> diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
> index 3961836..702332e 100644
> --- a/include/uapi/drm/amdgpu_drm.h
> +++ b/include/uapi/drm/amdgpu_drm.h
> @@ -160,10 +160,11 @@ union drm_amdgpu_bo_list {
>  /* unknown cause */
>  #define AMDGPU_CTX_UNKNOWN_RESET       3
>
> +#define AMDGPU_CTX_FLAG_HIGHPRIORITY   (1 << 0)

Would it be better to expose a priority level rather than a single
flag?  If we want to expose more than just normal/high in the future
for example.

Alex

>  struct drm_amdgpu_ctx_in {
>         /** AMDGPU_CTX_OP_* */
>         __u32   op;
> -       /** For future use, no flags defined so far */
> +       /** AMDGPU_CTX_FLAG_* */
>         __u32   flags;
>         __u32   ctx_id;
>         __u32   _pad;
> --
> 2.7.4
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx at lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux