Requests are run from the gt and are tided into the gt runtime power management, so pull the runtime request management under gt/ Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> --- drivers/gpu/drm/i915/Makefile | 1 + drivers/gpu/drm/i915/gem/i915_gem_mman.c | 6 +- drivers/gpu/drm/i915/gem/i915_gem_pm.c | 22 +--- .../drm/i915/gem/selftests/i915_gem_context.c | 5 +- .../drm/i915/gem/selftests/i915_gem_mman.c | 2 +- drivers/gpu/drm/i915/gt/intel_gt.c | 2 + drivers/gpu/drm/i915/gt/intel_gt_pm.c | 3 + drivers/gpu/drm/i915/gt/intel_gt_requests.c | 123 ++++++++++++++++++ drivers/gpu/drm/i915/gt/intel_gt_requests.h | 24 ++++ drivers/gpu/drm/i915/gt/intel_gt_types.h | 11 ++ drivers/gpu/drm/i915/gt/selftest_timeline.c | 8 +- drivers/gpu/drm/i915/i915_debugfs.c | 17 +-- drivers/gpu/drm/i915/i915_drv.h | 10 -- drivers/gpu/drm/i915/i915_gem.c | 17 --- drivers/gpu/drm/i915/i915_gem_evict.c | 14 +- drivers/gpu/drm/i915/i915_gem_gtt.c | 5 +- drivers/gpu/drm/i915/i915_request.c | 64 +-------- drivers/gpu/drm/i915/i915_request.h | 7 +- .../gpu/drm/i915/selftests/igt_flush_test.c | 9 +- .../gpu/drm/i915/selftests/igt_live_test.c | 5 +- .../gpu/drm/i915/selftests/mock_gem_device.c | 10 +- 21 files changed, 211 insertions(+), 154 deletions(-) create mode 100644 drivers/gpu/drm/i915/gt/intel_gt_requests.c create mode 100644 drivers/gpu/drm/i915/gt/intel_gt_requests.h diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 6313e7b4bd78..cba19470feb5 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -83,6 +83,7 @@ gt-y += \ gt/intel_gt_irq.o \ gt/intel_gt_pm.o \ gt/intel_gt_pm_irq.o \ + gt/intel_gt_requests.o \ gt/intel_hangcheck.o \ gt/intel_lrc.o \ gt/intel_renderstate.o \ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index 9cb59fe1a736..96c09d40bb7a 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -8,6 +8,7 @@ #include <linux/sizes.h> #include "gt/intel_gt.h" +#include "gt/intel_gt_requests.h" #include "i915_drv.h" #include "i915_gem_gtt.h" @@ -423,6 +424,7 @@ void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj) static int create_mmap_offset(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = to_i915(obj->base.dev); + struct intel_gt *gt = &i915->gt; int err; err = drm_gem_create_mmap_offset(&obj->base); @@ -431,7 +433,7 @@ static int create_mmap_offset(struct drm_i915_gem_object *obj) /* Attempt to reap some mmap space from dead objects */ do { - err = i915_gem_wait_for_idle(i915, MAX_SCHEDULE_TIMEOUT); + err = intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT); if (err) break; @@ -440,7 +442,7 @@ static int create_mmap_offset(struct drm_i915_gem_object *obj) if (!err) break; - } while (flush_delayed_work(&i915->gem.retire_work)); + } while (flush_delayed_work(>->requests.retire_work)); return err; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c index 04a9a27d659d..1e18045ee982 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c @@ -7,31 +7,18 @@ #include "gem/i915_gem_pm.h" #include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" +#include "gt/intel_gt_requests.h" #include "i915_drv.h" #include "i915_globals.h" static void i915_gem_park(struct drm_i915_private *i915) { - cancel_delayed_work(&i915->gem.retire_work); - i915_vma_parked(i915); i915_globals_park(); } -static void retire_work_handler(struct work_struct *work) -{ - struct drm_i915_private *i915 = - container_of(work, typeof(*i915), gem.retire_work.work); - - i915_retire_requests(i915); - - queue_delayed_work(i915->wq, - &i915->gem.retire_work, - round_jiffies_up_relative(HZ)); -} - static int pm_notifier(struct notifier_block *nb, unsigned long action, void *data) @@ -42,9 +29,6 @@ static int pm_notifier(struct notifier_block *nb, switch (action) { case INTEL_GT_UNPARK: i915_globals_unpark(); - queue_delayed_work(i915->wq, - &i915->gem.retire_work, - round_jiffies_up_relative(HZ)); break; case INTEL_GT_PARK: @@ -59,7 +43,7 @@ static bool switch_to_kernel_context_sync(struct intel_gt *gt) { bool result = !intel_gt_is_wedged(gt); - if (i915_gem_wait_for_idle(gt->i915, I915_GEM_IDLE_TIMEOUT) == -ETIME) { + if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) { /* XXX hide warning from gem_eio */ if (i915_modparams.reset) { dev_err(gt->i915->drm.dev, @@ -239,8 +223,6 @@ void i915_gem_resume(struct drm_i915_private *i915) void i915_gem_init__pm(struct drm_i915_private *i915) { - INIT_DELAYED_WORK(&i915->gem.retire_work, retire_work_handler); - i915->gem.pm_notifier.notifier_call = pm_notifier; blocking_notifier_chain_register(&i915->gt.pm_notifications, &i915->gem.pm_notifier); diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index b26f550f8430..b39224ed3817 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -8,6 +8,7 @@ #include "gem/i915_gem_pm.h" #include "gt/intel_gt.h" +#include "gt/intel_gt_requests.h" #include "gt/intel_reset.h" #include "i915_selftest.h" @@ -304,7 +305,7 @@ create_test_object(struct i915_address_space *vm, int err; /* Keep in GEM's good graces */ - i915_retire_requests(vm->i915); + intel_gt_retire_requests(vm->gt); size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE); size = round_down(size, DW_PER_PAGE * PAGE_SIZE); @@ -922,7 +923,7 @@ __sseu_finish(const char *name, igt_spinner_end(spin); if ((flags & TEST_IDLE) && ret == 0) { - ret = i915_gem_wait_for_idle(ce->engine->i915, + ret = intel_gt_wait_for_idle(ce->engine->gt, MAX_SCHEDULE_TIMEOUT); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index 4ba6ed5c8313..1cd25cfd0246 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -573,7 +573,7 @@ static void disable_retire_worker(struct drm_i915_private *i915) { i915_gem_driver_unregister__shrinker(i915); intel_gt_pm_get(&i915->gt); - cancel_delayed_work_sync(&i915->gem.retire_work); + cancel_delayed_work_sync(&i915->gt.requests.retire_work); } static void restore_retire_worker(struct drm_i915_private *i915) diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index 95e67db52668..39fb69609be4 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -6,6 +6,7 @@ #include "i915_drv.h" #include "intel_gt.h" #include "intel_gt_pm.h" +#include "intel_gt_requests.h" #include "intel_mocs.h" #include "intel_uncore.h" #include "intel_pm.h" @@ -22,6 +23,7 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915) intel_gt_init_hangcheck(gt); intel_gt_init_reset(gt); + intel_gt_init_requests(gt); intel_gt_pm_init_early(gt); intel_uc_init_early(>->uc); } diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c index 2ccf8cacaa0a..7b12e9dc6c2f 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c @@ -10,6 +10,7 @@ #include "intel_engine_pm.h" #include "intel_gt.h" #include "intel_gt_pm.h" +#include "intel_gt_requests.h" #include "intel_pm.h" #include "intel_wakeref.h" @@ -48,6 +49,7 @@ static int __gt_unpark(struct intel_wakeref *wf) i915_pmu_gt_unparked(i915); intel_gt_queue_hangcheck(gt); + intel_gt_unpark_requests(gt); pm_notify(gt, INTEL_GT_UNPARK); @@ -63,6 +65,7 @@ static int __gt_park(struct intel_wakeref *wf) GEM_TRACE("\n"); pm_notify(gt, INTEL_GT_PARK); + intel_gt_park_requests(gt); i915_pmu_gt_parked(i915); if (INTEL_GEN(i915) >= 6) diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c new file mode 100644 index 000000000000..8aed89fd2cdc --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c @@ -0,0 +1,123 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include "i915_request.h" +#include "intel_gt.h" +#include "intel_gt_pm.h" +#include "intel_gt_requests.h" +#include "intel_timeline.h" + +static void retire_requests(struct intel_timeline *tl) +{ + struct i915_request *rq, *rn; + + list_for_each_entry_safe(rq, rn, &tl->requests, link) + if (!i915_request_retire(rq)) + break; +} + +long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) +{ + struct intel_gt_timelines *timelines = >->timelines; + struct intel_timeline *tl, *tn; + unsigned long active_count = 0; + unsigned long flags; + bool interruptible; + LIST_HEAD(free); + + interruptible = true; + if (unlikely(timeout < 0)) + timeout = -timeout, interruptible = false; + + spin_lock_irqsave(&timelines->lock, flags); + list_for_each_entry_safe(tl, tn, &timelines->active_list, link) { + if (!mutex_trylock(&tl->mutex)) + continue; + + intel_timeline_get(tl); + GEM_BUG_ON(!tl->active_count); + tl->active_count++; /* pin the list element */ + spin_unlock_irqrestore(&timelines->lock, flags); + + if (timeout > 0) { + struct dma_fence *fence; + + fence = i915_active_fence_get(&tl->last_request); + if (fence) { + timeout = dma_fence_wait_timeout(fence, + true, + timeout); + dma_fence_put(fence); + } + } + + retire_requests(tl); + + spin_lock_irqsave(&timelines->lock, flags); + + /* Resume iteration after dropping lock */ + list_safe_reset_next(tl, tn, link); + if (--tl->active_count) + active_count += !!rcu_access_pointer(tl->last_request.fence); + else + list_del(&tl->link); + + mutex_unlock(&tl->mutex); + + /* Defer the final release to after the spinlock */ + if (refcount_dec_and_test(&tl->kref.refcount)) { + GEM_BUG_ON(tl->active_count); + list_add(&tl->link, &free); + } + } + spin_unlock_irqrestore(&timelines->lock, flags); + + list_for_each_entry_safe(tl, tn, &free, link) + __intel_timeline_free(&tl->kref); + + return active_count ? timeout : 0; +} + +int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout) +{ + /* If the device is asleep, we have no requests outstanding */ + if (!intel_gt_pm_is_awake(gt)) + return 0; + + while ((timeout = intel_gt_retire_requests_timeout(gt, timeout)) > 0) { + cond_resched(); + if (signal_pending(current)) + return -EINTR; + } + + return timeout; +} + +static void retire_work_handler(struct work_struct *work) +{ + struct intel_gt *gt = + container_of(work, typeof(*gt), requests.retire_work.work); + + intel_gt_retire_requests(gt); + schedule_delayed_work(>->requests.retire_work, + round_jiffies_up_relative(HZ)); +} + +void intel_gt_init_requests(struct intel_gt *gt) +{ + INIT_DELAYED_WORK(>->requests.retire_work, retire_work_handler); +} + +void intel_gt_park_requests(struct intel_gt *gt) +{ + cancel_delayed_work(>->requests.retire_work); +} + +void intel_gt_unpark_requests(struct intel_gt *gt) +{ + schedule_delayed_work(>->requests.retire_work, + round_jiffies_up_relative(HZ)); +} diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.h b/drivers/gpu/drm/i915/gt/intel_gt_requests.h new file mode 100644 index 000000000000..bd31cbce47e0 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.h @@ -0,0 +1,24 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef INTEL_GT_REQUESTS_H +#define INTEL_GT_REQUESTS_H + +struct intel_gt; + +long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout); +static inline void intel_gt_retire_requests(struct intel_gt *gt) +{ + intel_gt_retire_requests_timeout(gt, 0); +} + +int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout); + +void intel_gt_init_requests(struct intel_gt *gt); +void intel_gt_park_requests(struct intel_gt *gt); +void intel_gt_unpark_requests(struct intel_gt *gt); + +#endif /* INTEL_GT_REQUESTS_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h index 3039cef64b11..f524942c82aa 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_types.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h @@ -49,6 +49,17 @@ struct intel_gt { struct list_head hwsp_free_list; } timelines; + struct intel_gt_requests { + /** + * We leave the user IRQ off as much as possible, + * but this means that requests will finish and never + * be retired once the system goes idle. Set a timer to + * fire periodically while the ring is running. When it + * fires, go retire requests. + */ + struct delayed_work retire_work; + } requests; + struct intel_wakeref wakeref; atomic_t user_wakeref; diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c index 16abfabf08c7..d6df40cdc8a6 100644 --- a/drivers/gpu/drm/i915/gt/selftest_timeline.c +++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c @@ -8,6 +8,7 @@ #include "intel_engine_pm.h" #include "intel_gt.h" +#include "intel_gt_requests.h" #include "../selftests/i915_random.h" #include "../i915_selftest.h" @@ -641,6 +642,7 @@ static int live_hwsp_alternate(void *arg) static int live_hwsp_wrap(void *arg) { struct drm_i915_private *i915 = arg; + struct intel_gt *gt = &i915->gt; struct intel_engine_cs *engine; struct intel_timeline *tl; enum intel_engine_id id; @@ -651,7 +653,7 @@ static int live_hwsp_wrap(void *arg) * foreign GPU references. */ - tl = intel_timeline_create(&i915->gt, NULL); + tl = intel_timeline_create(gt, NULL); if (IS_ERR(tl)) return PTR_ERR(tl); @@ -662,7 +664,7 @@ static int live_hwsp_wrap(void *arg) if (err) goto out_free; - for_each_engine(engine, i915, id) { + for_each_engine(engine, gt->i915, id) { const u32 *hwsp_seqno[2]; struct i915_request *rq; u32 seqno[2]; @@ -734,7 +736,7 @@ static int live_hwsp_wrap(void *arg) goto out; } - i915_retire_requests(i915); /* recycle HWSP */ + intel_gt_retire_requests(gt); /* recycle HWSP */ } out: diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 033764326601..5f9703ca3104 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -41,6 +41,7 @@ #include "gem/i915_gem_context.h" #include "gt/intel_gt_pm.h" +#include "gt/intel_gt_requests.h" #include "gt/intel_reset.h" #include "gt/uc/intel_guc_submission.h" @@ -3618,33 +3619,33 @@ static int i915_drop_caches_set(void *data, u64 val) { struct drm_i915_private *i915 = data; + struct intel_gt *gt = &i915->gt; int ret; DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n", val, val & DROP_ALL); if (val & DROP_RESET_ACTIVE && - wait_for(intel_engines_are_idle(&i915->gt), - I915_IDLE_ENGINES_TIMEOUT)) - intel_gt_set_wedged(&i915->gt); + wait_for(intel_engines_are_idle(gt), I915_IDLE_ENGINES_TIMEOUT)) + intel_gt_set_wedged(gt); if (val & DROP_RETIRE) - i915_retire_requests(i915); + intel_gt_retire_requests(gt); if (val & (DROP_IDLE | DROP_ACTIVE)) { - ret = i915_gem_wait_for_idle(i915, MAX_SCHEDULE_TIMEOUT); + ret = intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT); if (ret) return ret; } if (val & DROP_IDLE) { - ret = intel_gt_pm_wait_for_idle(&i915->gt); + ret = intel_gt_pm_wait_for_idle(gt); if (ret) return ret; } - if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(&i915->gt)) - intel_gt_handle_error(&i915->gt, ALL_ENGINES, 0, NULL); + if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(gt)) + intel_gt_handle_error(gt, ALL_ENGINES, 0, NULL); fs_reclaim_acquire(GFP_KERNEL); if (val & DROP_BOUND) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 6e5d5e4ca91d..4e0c4ba2d257 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1717,15 +1717,6 @@ struct drm_i915_private { struct { struct notifier_block pm_notifier; - - /** - * We leave the user IRQ off as much as possible, - * but this means that requests will finish and never - * be retired once the system goes idle. Set a timer to - * fire periodically while the ring is running. When it - * fires, go retire requests. - */ - struct delayed_work retire_work; } gem; /* For i945gm vblank irq vs. C3 workaround */ @@ -2328,7 +2319,6 @@ void i915_gem_driver_register(struct drm_i915_private *i915); void i915_gem_driver_unregister(struct drm_i915_private *i915); void i915_gem_driver_remove(struct drm_i915_private *dev_priv); void i915_gem_driver_release(struct drm_i915_private *dev_priv); -int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv, long timeout); void i915_gem_suspend(struct drm_i915_private *dev_priv); void i915_gem_suspend_late(struct drm_i915_private *dev_priv); void i915_gem_resume(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index ab23944571fa..9c2877441540 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -883,23 +883,6 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915) } } -int i915_gem_wait_for_idle(struct drm_i915_private *i915, long timeout) -{ - struct intel_gt *gt = &i915->gt; - - /* If the device is asleep, we have no requests outstanding */ - if (!intel_gt_pm_is_awake(gt)) - return 0; - - while ((timeout = i915_retire_requests_timeout(i915, timeout)) > 0) { - cond_resched(); - if (signal_pending(current)) - return -EINTR; - } - - return timeout; -} - struct i915_vma * i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, const struct i915_ggtt_view *view, diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 0a412f6d01d7..7e62c310290f 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -29,6 +29,7 @@ #include <drm/i915_drm.h> #include "gem/i915_gem_context.h" +#include "gt/intel_gt_requests.h" #include "i915_drv.h" #include "i915_trace.h" @@ -37,7 +38,7 @@ I915_SELFTEST_DECLARE(static struct igt_evict_ctl { bool fail_if_busy:1; } igt_evict_ctl;) -static int ggtt_flush(struct drm_i915_private *i915) +static int ggtt_flush(struct intel_gt *gt) { /* * Not everything in the GGTT is tracked via vma (otherwise we @@ -46,7 +47,7 @@ static int ggtt_flush(struct drm_i915_private *i915) * the hopes that we can then remove contexts and the like only * bound by their active reference. */ - return i915_gem_wait_for_idle(i915, MAX_SCHEDULE_TIMEOUT); + return intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT); } static bool @@ -92,7 +93,6 @@ i915_gem_evict_something(struct i915_address_space *vm, u64 start, u64 end, unsigned flags) { - struct drm_i915_private *dev_priv = vm->i915; struct drm_mm_scan scan; struct list_head eviction_list; struct i915_vma *vma, *next; @@ -124,7 +124,7 @@ i915_gem_evict_something(struct i915_address_space *vm, min_size, alignment, color, start, end, mode); - i915_retire_requests(vm->i915); + intel_gt_retire_requests(vm->gt); search_again: active = NULL; @@ -197,7 +197,7 @@ i915_gem_evict_something(struct i915_address_space *vm, if (I915_SELFTEST_ONLY(igt_evict_ctl.fail_if_busy)) return -EBUSY; - ret = ggtt_flush(dev_priv); + ret = ggtt_flush(vm->gt); if (ret) return ret; @@ -270,7 +270,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm, * a stray pin (preventing eviction) that can only be resolved by * retiring. */ - i915_retire_requests(vm->i915); + intel_gt_retire_requests(vm->gt); if (i915_vm_has_cache_coloring(vm)) { /* Expand search to cover neighbouring guard pages (or lack!) */ @@ -372,7 +372,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm) * switch otherwise is ineffective. */ if (i915_is_ggtt(vm)) { - ret = ggtt_flush(vm->i915); + ret = ggtt_flush(vm->gt); if (ret) return ret; } diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 30ee016ac2ba..0d040517787b 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -38,6 +38,7 @@ #include "display/intel_frontbuffer.h" #include "gt/intel_gt.h" +#include "gt/intel_gt_requests.h" #include "i915_drv.h" #include "i915_scatterlist.h" @@ -2529,8 +2530,8 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj, if (unlikely(ggtt->do_idle_maps)) { /* XXX This does not prevent more requests being submitted! */ - if (i915_retire_requests_timeout(dev_priv, - -MAX_SCHEDULE_TIMEOUT)) { + if (intel_gt_retire_requests_timeout(ggtt->vm.gt, + -MAX_SCHEDULE_TIMEOUT)) { DRM_ERROR("Failed to wait for idle; VT'd may hang.\n"); /* Wait a bit, in hopes it avoids the hang */ udelay(10); diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 52f7c4e5b644..437f9fc6282e 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -216,7 +216,7 @@ static void remove_from_engine(struct i915_request *rq) spin_unlock(&locked->active.lock); } -static bool i915_request_retire(struct i915_request *rq) +bool i915_request_retire(struct i915_request *rq) { if (!i915_request_completed(rq)) return false; @@ -1508,68 +1508,6 @@ long i915_request_wait(struct i915_request *rq, return timeout; } -long i915_retire_requests_timeout(struct drm_i915_private *i915, long timeout) -{ - struct intel_gt_timelines *timelines = &i915->gt.timelines; - struct intel_timeline *tl, *tn; - unsigned long active_count = 0; - unsigned long flags; - bool interruptible; - LIST_HEAD(free); - - interruptible = true; - if (timeout < 0) - timeout = -timeout, interruptible = false; - - spin_lock_irqsave(&timelines->lock, flags); - list_for_each_entry_safe(tl, tn, &timelines->active_list, link) { - if (!mutex_trylock(&tl->mutex)) - continue; - - intel_timeline_get(tl); - GEM_BUG_ON(!tl->active_count); - tl->active_count++; /* pin the list element */ - spin_unlock_irqrestore(&timelines->lock, flags); - - if (timeout > 0) { - struct dma_fence *fence; - - fence = i915_active_fence_get(&tl->last_request); - if (fence) { - timeout = dma_fence_wait_timeout(fence, - interruptible, - timeout); - dma_fence_put(fence); - } - } - - retire_requests(tl); - - spin_lock_irqsave(&timelines->lock, flags); - - /* Resume iteration after dropping lock */ - list_safe_reset_next(tl, tn, link); - if (--tl->active_count) - active_count += !!rcu_access_pointer(tl->last_request.fence); - else - list_del(&tl->link); - - mutex_unlock(&tl->mutex); - - /* Defer the final release to after the spinlock */ - if (refcount_dec_and_test(&tl->kref.refcount)) { - GEM_BUG_ON(tl->active_count); - list_add(&tl->link, &free); - } - } - spin_unlock_irqrestore(&timelines->lock, flags); - - list_for_each_entry_safe(tl, tn, &free, link) - __intel_timeline_free(&tl->kref); - - return active_count ? timeout : 0; -} - #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #include "selftests/mock_request.c" #include "selftests/i915_request.c" diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index 256b0715180f..6a95242b280d 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -250,6 +250,7 @@ struct i915_request *__i915_request_commit(struct i915_request *request); void __i915_request_queue(struct i915_request *rq, const struct i915_sched_attr *attr); +bool i915_request_retire(struct i915_request *rq); void i915_request_retire_upto(struct i915_request *rq); static inline struct i915_request * @@ -459,10 +460,4 @@ i915_request_active_timeline(struct i915_request *rq) lockdep_is_held(&rq->engine->active.lock)); } -long i915_retire_requests_timeout(struct drm_i915_private *i915, long timeout); -static inline void i915_retire_requests(struct drm_i915_private *i915) -{ - i915_retire_requests_timeout(i915, 0); -} - #endif /* I915_REQUEST_H */ diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c index ed496bd6d84f..7b0939e3f007 100644 --- a/drivers/gpu/drm/i915/selftests/igt_flush_test.c +++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c @@ -4,8 +4,8 @@ * Copyright © 2018 Intel Corporation */ -#include "gem/i915_gem_context.h" #include "gt/intel_gt.h" +#include "gt/intel_gt_requests.h" #include "i915_drv.h" #include "i915_selftest.h" @@ -14,11 +14,12 @@ int igt_flush_test(struct drm_i915_private *i915) { - int ret = intel_gt_is_wedged(&i915->gt) ? -EIO : 0; + struct intel_gt *gt = &i915->gt; + int ret = intel_gt_is_wedged(gt) ? -EIO : 0; cond_resched(); - if (i915_gem_wait_for_idle(i915, HZ / 5) == -ETIME) { + if (intel_gt_wait_for_idle(gt, HZ / 5) == -ETIME) { pr_err("%pS timed out, cancelling all further testing.\n", __builtin_return_address(0)); @@ -26,7 +27,7 @@ int igt_flush_test(struct drm_i915_private *i915) __builtin_return_address(0)); GEM_TRACE_DUMP(); - intel_gt_set_wedged(&i915->gt); + intel_gt_set_wedged(gt); ret = -EIO; } diff --git a/drivers/gpu/drm/i915/selftests/igt_live_test.c b/drivers/gpu/drm/i915/selftests/igt_live_test.c index eae90f97df6c..810b60100c2c 100644 --- a/drivers/gpu/drm/i915/selftests/igt_live_test.c +++ b/drivers/gpu/drm/i915/selftests/igt_live_test.c @@ -4,7 +4,8 @@ * Copyright © 2018 Intel Corporation */ -#include "../i915_drv.h" +#include "i915_drv.h" +#include "gt/intel_gt_requests.h" #include "../i915_selftest.h" #include "igt_flush_test.h" @@ -23,7 +24,7 @@ int igt_live_test_begin(struct igt_live_test *t, t->func = func; t->name = name; - err = i915_gem_wait_for_idle(i915, MAX_SCHEDULE_TIMEOUT); + err = intel_gt_wait_for_idle(&i915->gt, MAX_SCHEDULE_TIMEOUT); if (err) { pr_err("%s(%s): failed to idle before, with err=%d!", func, name, err); diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 87d34167bf89..efe40c29fcf2 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -26,6 +26,7 @@ #include <linux/pm_runtime.h> #include "gt/intel_gt.h" +#include "gt/intel_gt_requests.h" #include "gt/mock_engine.h" #include "mock_request.h" @@ -44,7 +45,8 @@ void mock_device_flush(struct drm_i915_private *i915) do { for_each_engine(engine, i915, id) mock_engine_flush(engine); - } while (i915_retire_requests_timeout(i915, MAX_SCHEDULE_TIMEOUT)); + } while (intel_gt_retire_requests_timeout(&i915->gt, + MAX_SCHEDULE_TIMEOUT)); } static void mock_device_release(struct drm_device *dev) @@ -98,10 +100,6 @@ static void release_dev(struct device *dev) kfree(pdev); } -static void mock_retire_work_handler(struct work_struct *work) -{ -} - static int pm_domain_resume(struct device *dev) { return pm_generic_runtime_resume(dev); @@ -180,8 +178,6 @@ struct drm_i915_private *mock_gem_device(void) mock_init_contexts(i915); - INIT_DELAYED_WORK(&i915->gem.retire_work, mock_retire_work_handler); - i915->gt.awake = -1; intel_timelines_init(i915); -- 2.23.0 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx