Quoting Andi Shyti (2020-02-05 22:49:49) > +int live_rc6_busy(void *arg) > +{ > + struct intel_gt *gt = arg; > + struct intel_rc6 *rc6 = >->rc6; > + struct intel_engine_cs *engine; > + struct igt_spinner spin; > + intel_wakeref_t wakeref; > + enum intel_engine_id id; > + int err; > + > + if (!rc6->supported) > + return 0; > + > + err = igt_spinner_init(&spin, gt); > + if (err) > + return err; > + > + wakeref = intel_runtime_pm_get(gt->uncore->rpm); > + for_each_engine(engine, gt, id) { > + struct i915_request *rq; > + > + rq = igt_spinner_create_request(&spin, > + engine->kernel_context, > + MI_NOOP); > + if (IS_ERR(rq)) { > + err = PTR_ERR(rq); > + break; > + } > + > + i915_request_get(rq); > + i915_request_add(rq); > + > + igt_wait_for_spinner(&spin, rq); /* it's enough waiting */ > + > + /* gpu is busy, we shouldn't be in rc6 */ > + if (is_rc6_active(rc6)) { > + pr_err("%s: never busy enough for having a nap\n", > + engine->name); > + err = -EINVAL; > + } > + > + igt_spinner_end(&spin); > + if (i915_request_wait(rq, 0, HZ / 5) < 0) > + err = -ETIME; > + i915_request_put(rq); > + if (err) > + break; > + > + intel_gt_wait_for_idle(gt, HZ / 5); > + intel_gt_pm_wait_for_idle(gt); > + > + /* gpu is idle, we should be in rc6 */ > + if (!is_rc6_active(rc6)) { > + pr_err("%s is idle but doesn't go in rc6\n", > + engine->name); > + err = -EINVAL; > + break; > + } > + } > + intel_runtime_pm_put(gt->uncore->rpm, wakeref); > + > + igt_spinner_fini(&spin); > + return err; I'm afraid I think we should split the patch one more time, and push ahead with live_rc6_busy() as that should be working regardless of our discovery process around the thresholds. -Chris _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx