From: Tvrtko Ursulin <tvrtko.ursulin@xxxxxxxxx> Expose per-client and per-engine busyness under the previously added sysfs client root. The new file is named 'busy' and contains a list of, one line for each engine, monotonically increasing nano-second resolution times each client's jobs were executing on the GPU. $ cat /sys/class/drm/card0/clients/5/busy 32516602 0 0 0 This data can serve as an interface to implement a top like utility for GPU jobs. For instance I have prototyped a tool in IGT which produces periodic output like: neverball[ 6011]: rcs0: 41.01% bcs0: 0.00% vcs0: 0.00% vecs0: 0.00% Xorg[ 5664]: rcs0: 31.16% bcs0: 0.00% vcs0: 0.00% vecs0: 0.00% xfwm4[ 5727]: rcs0: 0.00% bcs0: 0.00% vcs0: 0.00% vecs0: 0.00% This tools can also be extended to use the i915 PMU and show overall engine busyness, and engine loads using the queue depth metric. Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@xxxxxxxxx> --- drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/i915_gem.c | 65 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 66 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index cc98c643f42b..d74111b397b4 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -631,6 +631,7 @@ struct drm_i915_file_private { struct { struct device_attribute pid; struct device_attribute name; + struct device_attribute busy; } attr; }; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index e8a30c1ceb4f..7a0b96c04666 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -5251,6 +5251,56 @@ show_client_pid(struct device *kdev, struct device_attribute *attr, char *buf) return snprintf(buf, PAGE_SIZE, "%u", file_priv->client_pid); } +struct busy_ctx { + u64 total[I915_NUM_ENGINES]; +}; + +static int busy_add(int _id, void *p, void *data) +{ + struct i915_gem_context *ctx = p; + struct busy_ctx *bc = data; + struct intel_engine_cs *engine; + enum intel_engine_id id; + + for_each_engine(engine, ctx->i915, id) + bc->total[id] += ctx->engine[id].stats.total; + + return 0; +} + +static ssize_t +show_client_busy(struct device *kdev, struct device_attribute *attr, char *buf) +{ + struct drm_i915_file_private *file_priv = + container_of(attr, struct drm_i915_file_private, attr.busy); + struct drm_i915_private *i915 = file_priv->dev_priv; + unsigned int len = PAGE_SIZE; + struct busy_ctx bc = { }; + ssize_t res = 0; + struct intel_engine_cs *engine; + enum intel_engine_id id; + ssize_t ret; + + ret = i915_mutex_lock_interruptible(&i915->drm); + if (ret) + return ret; + + idr_for_each(&file_priv->context_idr, busy_add, &bc); + + mutex_unlock(&i915->drm.struct_mutex); + + for_each_engine(engine, i915, id) { + ret = snprintf(buf, len, "%llu\n", bc.total[id]); + if (ret <= 0) + break; + res += ret; + len -= ret; + buf += ret; + } + + return res; +} + int i915_gem_add_client(struct drm_i915_private *i915, struct drm_i915_file_private *file_priv, @@ -5291,10 +5341,23 @@ i915_gem_add_client(struct drm_i915_private *i915, if (ret) goto err_attr_pid; + attr = &file_priv->attr.busy; + attr->attr.name = "busy"; + attr->attr.mode = 0444; + attr->show = show_client_busy; + + ret = sysfs_create_file(file_priv->client_root, + (struct attribute *)attr); + if (ret) + goto err_attr_busy; + file_priv->client_pid = pid_nr(get_task_pid(task, PIDTYPE_PID)); return 0; +err_attr_busy: + sysfs_remove_file(file_priv->client_root, + (struct attribute *)&file_priv->attr.pid); err_attr_pid: sysfs_remove_file(file_priv->client_root, (struct attribute *)&file_priv->attr.name); @@ -5309,6 +5372,8 @@ i915_gem_add_client(struct drm_i915_private *i915, void i915_gem_remove_client(struct drm_i915_file_private *file_priv) { sysfs_remove_file(file_priv->client_root, + (struct attribute *)&file_priv->attr.busy); + sysfs_remove_file(file_priv->client_root, (struct attribute *)&file_priv->attr.pid); sysfs_remove_file(file_priv->client_root, (struct attribute *)&file_priv->attr.name); -- 2.9.5 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx