+ */
+ u64 runtime[XE_ENGINE_CLASS_MAX];
+
/** @client: drm client */
struct xe_drm_client *client;
};
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c
b/drivers/gpu/drm/xe/xe_exec_queue.c
index 395de93579fa..b7b6256cb96a 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.c
+++ b/drivers/gpu/drm/xe/xe_exec_queue.c
@@ -214,6 +214,8 @@ void xe_exec_queue_fini(struct xe_exec_queue *q)
{
int i;
+ xe_exec_queue_update_runtime(q);
+
for (i = 0; i < q->width; ++i)
xe_lrc_finish(q->lrc + i);
if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && (q->flags &
EXEC_QUEUE_FLAG_VM || !q->vm))
@@ -769,6 +771,41 @@ bool xe_exec_queue_is_idle(struct xe_exec_queue *q)
q->lrc[0].fence_ctx.next_seqno - 1;
}
+/**
+ * xe_exec_queue_update_runtime() - Update runtime for this exec
queue from hw
+ * @q: The exec queue
+ *
+ * Update the timestamp saved by HW for this exec queue and save
runtime
+ * calculated by using the delta from last update. On multi-lrc
case, only the
+ * first is considered.
+ */
+void xe_exec_queue_update_runtime(struct xe_exec_queue *q)
+{
+ struct xe_file *xef;
+ struct xe_lrc *lrc;
+ u32 old_ts, new_ts;
+
+ /*
+ * Jobs that are run during driver load may use an exec_queue,
but are
+ * not associated with a user xe file, so avoid accumulating
busyness
+ * for kernel specific work.
+ */
+ if (!q->vm || !q->vm->xef)
+ return;
+
+ xef = q->vm->xef;
+ lrc = &q->lrc[0];
+
+ new_ts = xe_lrc_update_timestamp(lrc, &old_ts);
+
+ /*
+ * Special case the very first timestamp: we don't want the
+ * initial delta to be a huge value
+ */
+ if (old_ts)
+ xef->runtime[q->class] += new_ts - old_ts;
+}
+
void xe_exec_queue_kill(struct xe_exec_queue *q)
{
struct xe_exec_queue *eq = q, *next;
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.h
b/drivers/gpu/drm/xe/xe_exec_queue.h
index 02ce8d204622..45b72daa2db3 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue.h
@@ -66,5 +66,6 @@ struct dma_fence
*xe_exec_queue_last_fence_get(struct xe_exec_queue *e,
struct xe_vm *vm);
void xe_exec_queue_last_fence_set(struct xe_exec_queue *e, struct
xe_vm *vm,
struct dma_fence *fence);
+void xe_exec_queue_update_runtime(struct xe_exec_queue *q);
#endif
diff --git a/drivers/gpu/drm/xe/xe_sched_job.c
b/drivers/gpu/drm/xe/xe_sched_job.c
index cd8a2fba5438..6a081a4fa190 100644
--- a/drivers/gpu/drm/xe/xe_sched_job.c
+++ b/drivers/gpu/drm/xe/xe_sched_job.c
@@ -242,6 +242,8 @@ bool xe_sched_job_completed(struct xe_sched_job
*job)
{
struct xe_lrc *lrc = job->q->lrc;
+ xe_exec_queue_update_runtime(job->q);
+
/*
* Can safely check just LRC[0] seqno as that is last seqno
written when
* parallel handshake is done.