RE: [RFC PATCH V2] drm/xe/guc: Use exec queue hints for GT frequency

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 




> -----Original Message-----
> From: dri-devel <dri-devel-bounces@xxxxxxxxxxxxxxxxxxxxx> On
> Behalf Of Tejas Upadhyay
> Sent: January 9, 2025 7:07 AM
> To: intel-xe@xxxxxxxxxxxxxxxxxxxxx
> Cc: dri-devel@xxxxxxxxxxxxxxxxxxxxx; Nilawar, Badal
> <badal.nilawar@xxxxxxxxx>; Belgaumkar, Vinay
> <vinay.belgaumkar@xxxxxxxxx>; Mrozek, Michal
> <michal.mrozek@xxxxxxxxx>; Morek, Szymon
> <szymon.morek@xxxxxxxxx>; Souza, Jose <jose.souza@xxxxxxxxx>;
> De Marchi, Lucas <lucas.demarchi@xxxxxxxxx>; Upadhyay, Tejas
> <tejas.upadhyay@xxxxxxxxx>
> Subject: [RFC PATCH V2] drm/xe/guc: Use exec queue hints for GT
> frequency
> 
> Allow user to provide a low latency hint per exec queue. When set,
> KMD sends a hint to GuC which results in special handling for this
> exec queue. SLPC will ramp the GT frequency aggressively every time
> it switches to this exec queue.
> 
> We need to enable the use of SLPC Compute strategy during init, but
> it will apply only to exec queues that set this bit during exec queue
> creation.
> 
> Improvement with this approach as below:
> 
> Before,
> 
> :~$ NEOReadDebugKeys=1 EnableDirectSubmission=0 clpeak --
> kernel-latency
> Platform: Intel(R) OpenCL Graphics
>   Device: Intel(R) Graphics [0xe20b]
>     Driver version  : 24.52.0 (Linux x64)
>     Compute units   : 160
>     Clock frequency : 2850 MHz
>     Kernel launch latency : 283.16 us
> 
> After,
> 
> :~$ NEOReadDebugKeys=1 EnableDirectSubmission=0 clpeak --
> kernel-latency
> Platform: Intel(R) OpenCL Graphics
>   Device: Intel(R) Graphics [0xe20b]
>     Driver version  : 24.52.0 (Linux x64)
>     Compute units   : 160
>     Clock frequency : 2850 MHz
> 
>     Kernel launch latency : 63.38 us
> 
> UMD will indicate low latency hint with flag as mentioned below,
> 
> *     struct drm_xe_exec_queue_create exec_queue_create = {
> *          .flags = DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT or 0
> *          .extensions = 0,
> *          .vm_id = vm,
> *          .num_bb_per_exec = 1,
> *          .num_eng_per_bb = 1,
> *          .instances = to_user_pointer(&instance),
> *     };
> *     ioctl(fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE,
> &exec_queue_create);
> 
> Link to UMD PR : https://github.com/intel/compute-runtime/pull/794
> 
> Note: There is outstanding issue on guc side to be not able to switch
> to max
> frequency as per strategy indicated by KMD, so for experminet/test
> result
> hardcoding apporch was taken and passed to guc as policy. Effort on
> debugging
> from guc side is going on in parallel.
> 
> V2:
>   - DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT 1 is already planned
> for other hint(Szymon)
>   - Add motivation to description (Lucas)
> 
> Cc:dri-devel@xxxxxxxxxxxxxxxxxxxxx
> Cc:vinay.belgaumkar@xxxxxxxxx
> Cc:Michal Mrozek <michal.mrozek@xxxxxxxxx>
> Cc:Szymon Morek <szymon.morek@xxxxxxxxx>
> Cc:José Roberto de Souza <jose.souza@xxxxxxxxx>
> Signed-off-by: Tejas Upadhyay <tejas.upadhyay@xxxxxxxxx>
> ---
>  drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h |  3 +++
>  drivers/gpu/drm/xe/xe_exec_queue.c            |  7 ++++---
>  drivers/gpu/drm/xe/xe_guc_pc.c                | 16 ++++++++++++++++
>  drivers/gpu/drm/xe/xe_guc_submit.c            |  7 +++++++
>  include/uapi/drm/xe_drm.h                     |  3 ++-
>  5 files changed, 32 insertions(+), 4 deletions(-)
> 
> diff --git a/drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h
> b/drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h
> index 85abe4f09ae2..c50075b8270f 100644
> --- a/drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h
> +++ b/drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h
> @@ -174,6 +174,9 @@ struct slpc_task_state_data {
>  	};
>  } __packed;
> 
> +#define SLPC_EXEC_QUEUE_FREQ_REQ_IS_COMPUTE
> 	REG_BIT(28)
> +#define SLPC_OPTIMIZED_STRATEGY_COMPUTE
> 	REG_BIT(0)
> +
>  struct slpc_shared_data_header {
>  	/* Total size in bytes of this shared buffer. */
>  	u32 size;
> diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c
> b/drivers/gpu/drm/xe/xe_exec_queue.c
> index 8948f50ee58f..7747ba6c4bb8 100644
> --- a/drivers/gpu/drm/xe/xe_exec_queue.c
> +++ b/drivers/gpu/drm/xe/xe_exec_queue.c
> @@ -553,7 +553,8 @@ int xe_exec_queue_create_ioctl(struct
> drm_device *dev, void *data,
>  	u32 len;
>  	int err;
> 
> -	if (XE_IOCTL_DBG(xe, args->flags) ||
> +	if (XE_IOCTL_DBG(xe, args->flags &&
> +			 !(args->flags &
> DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT)) ||
>  	    XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
>  		return -EINVAL;
> 
> @@ -578,7 +579,7 @@ int xe_exec_queue_create_ioctl(struct
> drm_device *dev, void *data,
> 
>  		for_each_tile(tile, xe, id) {
>  			struct xe_exec_queue *new;
> -			u32 flags = EXEC_QUEUE_FLAG_VM;
> +			u32 flags = args->flags |
> EXEC_QUEUE_FLAG_VM;


You are mixing internal and external flags here. Args->flags is an external definition. Note the current value of 
DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT conflict with the internal definition of:

#define EXEC_QUEUE_FLAG_PERMANENT		BIT(1)

I think the better way to do it is, define an internal value for this purpose, such as:

#define EXEC_QUEUE_FLAG_LOW_LATENCY		BIT(5)

Then write: if (args->flags & DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT)
		flags |= EXEC_QUEUE_FLAG_LOW_LATENCY;


> 
>  			if (id)
>  				flags |=
> EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD;
> @@ -626,7 +627,7 @@ int xe_exec_queue_create_ioctl(struct
> drm_device *dev, void *data,
>  		}
> 
>  		q = xe_exec_queue_create(xe, vm, logical_mask,
> -					 args->width, hwe, 0,
> +					 args->width, hwe, args->flags,

Use internal flag also here if you do what I said above


>  					 args->extensions);
>  		up_read(&vm->lock);
>  		xe_vm_put(vm);
> diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c
> b/drivers/gpu/drm/xe/xe_guc_pc.c
> index df7f130fb663..ff0b98ccf1a7 100644
> --- a/drivers/gpu/drm/xe/xe_guc_pc.c
> +++ b/drivers/gpu/drm/xe/xe_guc_pc.c
> @@ -992,6 +992,19 @@ static int pc_init_freqs(struct xe_guc_pc *pc)
>  	return ret;
>  }
> 
> +static int xe_guc_pc_set_strategy(struct xe_guc_pc *pc, u32 val)
> +{
> +	int ret = 0;
> +
> +	xe_pm_runtime_get(pc_to_xe(pc));
> +	ret = pc_action_set_param(pc,
> +				  SLPC_PARAM_STRATEGIES,
> +				  val);
> +	xe_pm_runtime_put(pc_to_xe(pc));
> +
> +	return ret;
> +}
> +
>  /**
>   * xe_guc_pc_start - Start GuC's Power Conservation component
>   * @pc: Xe_GuC_PC instance
> @@ -1052,6 +1065,9 @@ int xe_guc_pc_start(struct xe_guc_pc *pc)
> 
>  	ret = pc_action_setup_gucrc(pc,
> GUCRC_FIRMWARE_CONTROL);
> 
> +	/* Enable SLPC Optimized Strategy for compute */
> +	xe_guc_pc_set_strategy(pc,
> SLPC_OPTIMIZED_STRATEGY_COMPUTE);
> +
>  out:
>  	xe_force_wake_put(gt_to_fw(gt), fw_ref);
>  	return ret;
> diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c
> b/drivers/gpu/drm/xe/xe_guc_submit.c
> index 9c36329fe857..88a1987ac360 100644
> --- a/drivers/gpu/drm/xe/xe_guc_submit.c
> +++ b/drivers/gpu/drm/xe/xe_guc_submit.c
> @@ -15,6 +15,7 @@
>  #include <drm/drm_managed.h>
> 
>  #include "abi/guc_actions_abi.h"
> +#include "abi/guc_actions_slpc_abi.h"
>  #include "abi/guc_klvs_abi.h"
>  #include "regs/xe_lrc_layout.h"
>  #include "xe_assert.h"
> @@ -400,6 +401,7 @@ static void
> __guc_exec_queue_policy_add_##func(struct exec_queue_policy
> *policy,
>  MAKE_EXEC_QUEUE_POLICY_ADD(execution_quantum,
> EXECUTION_QUANTUM)
>  MAKE_EXEC_QUEUE_POLICY_ADD(preemption_timeout,
> PREEMPTION_TIMEOUT)
>  MAKE_EXEC_QUEUE_POLICY_ADD(priority, SCHEDULING_PRIORITY)
> +MAKE_EXEC_QUEUE_POLICY_ADD(slpc_ctx_freq_req,
> SLPM_GT_FREQUENCY)
>  #undef MAKE_EXEC_QUEUE_POLICY_ADD
> 
>  static const int xe_exec_queue_prio_to_guc[] = {
> @@ -414,14 +416,19 @@ static void init_policies(struct xe_guc *guc,
> struct xe_exec_queue *q)
>  	struct exec_queue_policy policy;
>  	enum xe_exec_queue_priority prio = q-
> >sched_props.priority;
>  	u32 timeslice_us = q->sched_props.timeslice_us;
> +	u32 slpc_ctx_freq_req = 0;
>  	u32 preempt_timeout_us = q-
> >sched_props.preempt_timeout_us;
> 
>  	xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
> 
> +	if (q->flags & DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT)

Use internal definition

> +		slpc_ctx_freq_req |=
> SLPC_EXEC_QUEUE_FREQ_REQ_IS_COMPUTE;


[Index of Archives]     [Linux DRI Users]     [Linux Intel Graphics]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [XFree86]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux