Hi Tvrtko, Thank you for your valuable comments. We have gone through it. I'll be submitting revised patch-sets after incorporating all your review comments. > On 21/09/2018 10:13, kedar.j.karanje@xxxxxxxxx wrote: > > From: Praveen Diwakar <praveen.diwakar@xxxxxxxxx> > > > > High resoluton timer is used for this purpose. > > > > Debugfs is provided to enable/disable/update timer configuration > > > > Change-Id: I35d692c5afe962fcad4573185bc6f744487711d0 > > Signed-off-by: Praveen Diwakar <praveen.diwakar@xxxxxxxxx> > > Signed-off-by: Yogesh Marathe <yogesh.marathe@xxxxxxxxx> > > Signed-off-by: Aravindan Muthukumar <aravindan.muthukumar@xxxxxxxxx> > > Signed-off-by: Kedar J Karanje <kedar.j.karanje@xxxxxxxxx> > > Signed-off-by: Ankit Navik <ankit.p.navik@xxxxxxxxx > > --- > > drivers/gpu/drm/i915/i915_debugfs.c | 94 > ++++++++++++++++++++++++++++++++++++- > > drivers/gpu/drm/i915/i915_drv.h | 1 + > > 2 files changed, 94 insertions(+), 1 deletion(-) > > > > diff --git a/drivers/gpu/drm/i915/i915_debugfs.c > > b/drivers/gpu/drm/i915/i915_debugfs.c > > index f9ce35d..81ba509 100644 > > --- a/drivers/gpu/drm/i915/i915_debugfs.c > > +++ b/drivers/gpu/drm/i915/i915_debugfs.c > > @@ -4740,6 +4740,97 @@ static const struct drm_info_list > i915_debugfs_list[] = { > > {"i915_drrs_status", i915_drrs_status, 0}, > > {"i915_rps_boost_info", i915_rps_boost_info, 0}, > > }; > > + > > +#define POLL_PERIOD_MS (1000 * 1000) > > +#define PENDING_REQ_0 0 /* No active request pending*/ > > +#define PENDING_REQ_3 3 /* Threshold value of 3 active request > pending*/ > > + /* Anything above this is considered as HIGH load > > + * context > > + */ > > + /* And less is considered as LOW load*/ > > + /* And equal is considered as mediaum load */ > > Wonky comments and some typos up to here. > > > + > > +static int predictive_load_enable; > > +static int predictive_load_timer_init; > > + > > +static enum hrtimer_restart predictive_load_cb(struct hrtimer > > +*hrtimer) { > > + struct drm_i915_private *dev_priv = > > + container_of(hrtimer, typeof(*dev_priv), > > + pred_timer); > > + enum intel_engine_id id; > > + struct intel_engine_cs *engine; > > Some unused's. > > > + struct i915_gem_context *ctx; > > + u64 req_pending; > > unsigned long, and also please try to order declaration so the right edge of text > is moving in one direction only. > > > + > > + list_for_each_entry(ctx, &dev_priv->contexts.list, link) { > > + > > + if (!ctx->name) > > + continue; > > What is this? > > > + > > + mutex_lock(&dev_priv->pred_mutex); > > Here the mutex bites you since you cannot sleep in the timer callback. > atomic_t would solve it. Or would a native unsigned int/long since lock to read a > native word on x86 is not needed. > > > + req_pending = ctx->req_cnt; > > + mutex_unlock(&dev_priv->pred_mutex); > > + > > + if (req_pending == PENDING_REQ_0) > > + continue; > > + > > + if (req_pending > PENDING_REQ_3) > > + ctx->load_type = LOAD_TYPE_HIGH; > > + else if (req_pending == PENDING_REQ_3) > > + ctx->load_type = LOAD_TYPE_MEDIUM; > > + else if (req_pending < PENDING_REQ_3) > > Must be smaller if not greater or equal, but maybe the compiler does that for > you. > > > + ctx->load_type = LOAD_TYPE_LOW; > > + > > + i915_set_optimum_config(ctx->load_type, ctx, > KABYLAKE_GT3); > > Only KBL? Idea to put the table in dev_priv FTW! :) > > ctx->load_type used only as a temporary uncovered here? :) > > > + } > > + > > + hrtimer_forward_now(hrtimer, > > + > ns_to_ktime(predictive_load_enable*POLL_PERIOD_MS)); > > Or HRTIMER_NORESTART if disabled? Hard to call it, details.. > > > + > > + return HRTIMER_RESTART; > > +} > > + > > +static int > > +i915_predictive_load_get(void *data, u64 *val) { > > + struct drm_i915_private *dev_priv = data; > > + > > + *val = predictive_load_enable; > > + return 0; > > +} > > + > > +static int > > +i915_predictive_load_set(void *data, u64 val) { > > + struct drm_i915_private *dev_priv = data; > > + struct intel_device_info *info; > > + > > + info = mkwrite_device_info(dev_priv); > > Unused, why? > > > + > > + predictive_load_enable = val; > > + > > + if (predictive_load_enable) { > > + if (!predictive_load_timer_init) { > > + hrtimer_init(&dev_priv->pred_timer, > CLOCK_MONOTONIC, > > + HRTIMER_MODE_REL); > > + dev_priv->pred_timer.function = predictive_load_cb; > > + predictive_load_timer_init = 1; > > Move timer init to dev_priv setup. > > > + } > > + hrtimer_start(&dev_priv->pred_timer, > > + > ns_to_ktime(predictive_load_enable*POLL_PERIOD_MS), > > + HRTIMER_MODE_REL_PINNED); > > Two threads can race to here. > > Also you can give some slack to the timer since precision is not critical to you > and can save you some CPU power. > > > + } else { > > + hrtimer_cancel(&dev_priv->pred_timer); > > + } > > + > > + return 0; > > +} > > + > > +DEFINE_SIMPLE_ATTRIBUTE(i915_predictive_load_ctl, > > + i915_predictive_load_get, i915_predictive_load_set, > > + "%llu\n"); > > + > > #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) > > > > static const struct i915_debugfs_files { @@ -4769,7 +4860,8 @@ > > static const struct i915_debugfs_files { > > {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops}, > > {"i915_ipc_status", &i915_ipc_status_fops}, > > {"i915_drrs_ctl", &i915_drrs_ctl_fops}, > > - {"i915_edp_psr_debug", &i915_edp_psr_debug_fops} > > + {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}, > > + {"i915_predictive_load_ctl", &i915_predictive_load_ctl} > > And if the feature is to become real one day, given it's nature, the knob would > probably have to go to sysfs, not debugfs. > > > }; > > > > int i915_debugfs_register(struct drm_i915_private *dev_priv) diff > > --git a/drivers/gpu/drm/i915/i915_drv.h > > b/drivers/gpu/drm/i915/i915_drv.h index 137ec33..0505c47 100644 > > --- a/drivers/gpu/drm/i915/i915_drv.h > > +++ b/drivers/gpu/drm/i915/i915_drv.h > > @@ -1552,6 +1552,7 @@ struct intel_cdclk_state { > > }; > > > > struct drm_i915_private { > > + struct hrtimer pred_timer; > > Surely not the first member! :) > > Regards, > > Tvrtko > > > struct drm_device drm; > > > > struct kmem_cache *objects; > > Regards, Ankit _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx