Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> writes: > As intel_runtime_pm_get/_put may be called from any blockable context, > we need to avoid allowing reclaim from our mallocs, as we need to > avoid tainting any mutexes held by the callers (as they may themselves > not allow for allocations as they are taken in the shrinker). > > <4> [435.339331] WARNING: possible circular locking dependency detected > <4> [435.339364] 5.1.0-rc4-CI-Trybot_4116+ #1 Tainted: G U > <4> [435.339395] ------------------------------------------------------ > <4> [435.339426] gem_caching/1334 is trying to acquire lock: > <4> [435.339456] 000000004505c39b (wakeref#3){+.+.}, at: intel_engine_pm_put+0x1b/0x40 [i915] > <4> [435.339788] > but task is already holding lock: > <4> [435.339819] 00000000ee77b4ed (fs_reclaim){+.+.}, at: fs_reclaim_acquire.part.24+0x0/0x30 > <4> [435.339879] > which lock already depends on the new lock. > > <4> [435.339918] > the existing dependency chain (in reverse order) is: > <4> [435.339952] > -> #1 (fs_reclaim){+.+.}: > <4> [435.339998] fs_reclaim_acquire.part.24+0x24/0x30 > <4> [435.340035] kmem_cache_alloc_trace+0x2a/0x290 > <4> [435.340311] __print_intel_runtime_pm_wakeref+0x24/0x160 [i915] > <4> [435.340590] untrack_intel_runtime_pm_wakeref+0x16e/0x1d0 [i915] > <4> [435.340869] intel_runtime_pm_put_unchecked+0xd/0x30 [i915] > <4> [435.341147] __intel_wakeref_put_once+0x22/0x40 [i915] > <4> [435.341508] i915_request_retire+0x477/0xaf0 [i915] > <4> [435.341871] ring_retire_requests+0x86/0x160 [i915] > <4> [435.342226] i915_retire_requests+0x58/0xc0 [i915] > <4> [435.342576] retire_work_handler+0x5b/0x70 [i915] > <4> [435.342615] process_one_work+0x245/0x610 > <4> [435.342646] worker_thread+0x37/0x380 > <4> [435.342679] kthread+0x119/0x130 > <4> [435.342714] ret_from_fork+0x3a/0x50 > <4> [435.342739] > -> #0 (wakeref#3){+.+.}: > <4> [435.342788] lock_acquire+0xa6/0x1c0 > <4> [435.342822] __mutex_lock+0x8c/0x960 > <4> [435.342853] atomic_dec_and_mutex_lock+0x33/0x50 > <4> [435.343151] intel_engine_pm_put+0x1b/0x40 [i915] > <4> [435.343501] i915_request_retire+0x477/0xaf0 [i915] > <4> [435.343851] ring_retire_requests+0x86/0x160 [i915] > <4> [435.344202] i915_retire_requests+0x58/0xc0 [i915] > <4> [435.344543] i915_gem_shrink+0xd8/0x5b0 [i915] > <4> [435.344835] i915_drop_caches_set+0x17b/0x250 [i915] > <4> [435.344877] simple_attr_write+0xb0/0xd0 > <4> [435.344911] full_proxy_write+0x51/0x80 > <4> [435.344943] vfs_write+0xbd/0x1b0 > <4> [435.344972] ksys_write+0x55/0xe0 > <4> [435.345002] do_syscall_64+0x55/0x190 > <4> [435.345040] entry_SYSCALL_64_after_hwframe+0x49/0xbe > > Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> > --- > drivers/gpu/drm/i915/intel_runtime_pm.c | 8 +++++--- > 1 file changed, 5 insertions(+), 3 deletions(-) > > diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c > index e6d1e592225b..3107a742d8ad 100644 > --- a/drivers/gpu/drm/i915/intel_runtime_pm.c > +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c > @@ -162,7 +162,7 @@ static void cancel_intel_runtime_pm_wakeref(struct drm_i915_private *i915, > rpm->debug.count, atomic_read(&rpm->wakeref_count))) { > char *buf; > > - buf = kmalloc(PAGE_SIZE, GFP_KERNEL); > + buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN); > if (!buf) > return; Ok we then just give up on printing the stack so no harm done even if we increase our chances to fail the alloc. And there will be a log entry apriori to indicate the unmatch regardless. Reviewed-by: Mika Kuoppala <mika.kuoppala@xxxxxxxxxxxxxxx> > > @@ -198,7 +198,7 @@ __print_intel_runtime_pm_wakeref(struct drm_printer *p, > unsigned long i; > char *buf; > > - buf = kmalloc(PAGE_SIZE, GFP_KERNEL); > + buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN); > if (!buf) > return; > > @@ -282,7 +282,9 @@ void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915, > if (dbg.count <= alloc) > break; > > - s = krealloc(dbg.owners, dbg.count * sizeof(*s), GFP_KERNEL); > + s = krealloc(dbg.owners, > + dbg.count * sizeof(*s), > + GFP_NOWAIT | __GFP_NOWARN); > if (!s) > goto out; > > -- > 2.20.1 > > _______________________________________________ > Intel-gfx mailing list > Intel-gfx@xxxxxxxxxxxxxxxxxxxxx > https://lists.freedesktop.org/mailman/listinfo/intel-gfx _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx