From: Rob Clark <robdclark@xxxxxxxxxxxx> If we are under enough memory pressure, we should stall waiting for active buffers to become idle in order to evict. Signed-off-by: Rob Clark <robdclark@xxxxxxxxxxxx> --- drivers/gpu/drm/msm/msm_gem_shrinker.c | 68 +++++++++++++++++++++----- drivers/gpu/drm/msm/msm_gpu_trace.h | 16 +++--- 2 files changed, 66 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c index 5cc05d669a08..b0bee040432a 100644 --- a/drivers/gpu/drm/msm/msm_gem_shrinker.c +++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c @@ -24,6 +24,11 @@ static bool can_swap(void) return enable_eviction && get_nr_swap_pages() > 0; } +static bool can_block(struct shrink_control *sc) +{ + return current_is_kswapd() || (sc->gfp_mask & __GFP_RECLAIM); +} + static unsigned long msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) { @@ -65,26 +70,65 @@ evict(struct drm_gem_object *obj) return true; } +static bool +wait_for_idle(struct drm_gem_object *obj) +{ + enum dma_resv_usage usage = dma_resv_usage_rw(true); + return dma_resv_wait_timeout(obj->resv, usage, false, 1000) > 0; +} + +static bool +active_purge(struct drm_gem_object *obj) +{ + if (!wait_for_idle(obj)) + return false; + + return purge(obj); +} + +static bool +active_evict(struct drm_gem_object *obj) +{ + if (!wait_for_idle(obj)) + return false; + + return evict(obj); +} + static unsigned long msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) { struct msm_drm_private *priv = container_of(shrinker, struct msm_drm_private, shrinker); + struct { + struct drm_gem_lru *lru; + bool (*shrink)(struct drm_gem_object *obj); + bool cond; + unsigned long freed; + } stages[] = { + /* Stages of progressively more aggressive/expensive reclaim: */ + { &priv->lru.dontneed, purge, true }, + { &priv->lru.willneed, evict, can_swap() }, + { &priv->lru.dontneed, active_purge, can_block(sc) }, + { &priv->lru.willneed, active_evict, can_swap() && can_block(sc) }, + }; long nr = sc->nr_to_scan; - unsigned long freed, purged, evicted = 0; - - purged = drm_gem_lru_scan(&priv->lru.dontneed, nr, purge); - nr -= purged; - - if (can_swap() && nr > 0) { - evicted = drm_gem_lru_scan(&priv->lru.willneed, nr, evict); - nr -= evicted; + unsigned long freed = 0; + + for (unsigned i = 0; (nr > 0) && (i < ARRAY_SIZE(stages)); i++) { + if (!stages[i].cond) + continue; + stages[i].freed = + drm_gem_lru_scan(stages[i].lru, nr, stages[i].shrink); + nr -= stages[i].freed; + freed += stages[i].freed; } - freed = purged + evicted; - - if (freed) - trace_msm_gem_shrink(sc->nr_to_scan, purged, evicted); + if (freed) { + trace_msm_gem_shrink(sc->nr_to_scan, stages[0].freed, + stages[1].freed, stages[2].freed, + stages[3].freed); + } return (freed > 0) ? freed : SHRINK_STOP; } diff --git a/drivers/gpu/drm/msm/msm_gpu_trace.h b/drivers/gpu/drm/msm/msm_gpu_trace.h index 8867fa0a0306..ac40d857bc45 100644 --- a/drivers/gpu/drm/msm/msm_gpu_trace.h +++ b/drivers/gpu/drm/msm/msm_gpu_trace.h @@ -116,22 +116,26 @@ TRACE_EVENT(msm_gmu_freq_change, TRACE_EVENT(msm_gem_shrink, - TP_PROTO(u32 nr_to_scan, u32 purged, u32 evicted), - TP_ARGS(nr_to_scan, purged, evicted), + TP_PROTO(u32 nr_to_scan, u32 purged, u32 evicted, + u32 active_purged, u32 active_evicted), + TP_ARGS(nr_to_scan, purged, evicted, active_purged, active_evicted), TP_STRUCT__entry( __field(u32, nr_to_scan) __field(u32, purged) __field(u32, evicted) + __field(u32, active_purged) + __field(u32, active_evicted) ), TP_fast_assign( __entry->nr_to_scan = nr_to_scan; __entry->purged = purged; __entry->evicted = evicted; + __entry->active_purged = active_purged; + __entry->active_evicted = active_evicted; ), - TP_printk("nr_to_scan=%u pages, purged=%u pages, evicted=%u pages", - __entry->nr_to_scan, - __entry->purged, - __entry->evicted) + TP_printk("nr_to_scan=%u pg, purged=%u pg, evicted=%u pg, active_purged=%u pg, active_evicted=%u pg", + __entry->nr_to_scan, __entry->purged, __entry->evicted, + __entry->active_purged, __entry->active_evicted) ); -- 2.36.1