With the introduction of the scheduler, most of the code related to timeout detection, recovery and submission retirement are no longer needed in the msm driver. This patch simply removes the no longer used code. Signed-off-by: Sharat Masetty <smasetty@xxxxxxxxxxxxxx> --- drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 3 - drivers/gpu/drm/msm/msm_drv.h | 2 - drivers/gpu/drm/msm/msm_fence.c | 22 ---- drivers/gpu/drm/msm/msm_gem.c | 36 ------ drivers/gpu/drm/msm/msm_gpu.c | 204 ---------------------------------- drivers/gpu/drm/msm/msm_gpu.h | 6 - 6 files changed, 273 deletions(-) diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index d39400e..6f5a4c5 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -1034,9 +1034,6 @@ static void a5xx_fault_detect_irq(struct msm_gpu *gpu) gpu_read64(gpu, REG_A5XX_CP_IB2_BASE, REG_A5XX_CP_IB2_BASE_HI), gpu_read(gpu, REG_A5XX_CP_IB2_BUFSZ)); - /* Turn off the hangcheck timer to keep it from bothering us */ - del_timer(&gpu->hangcheck_timer); - queue_work(priv->wq, &gpu->recover_work); } diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index e461a9c..9004738 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -211,8 +211,6 @@ struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, void *msm_gem_get_vaddr_active(struct drm_gem_object *obj); void msm_gem_put_vaddr(struct drm_gem_object *obj); int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv); -int msm_gem_sync_object(struct drm_gem_object *obj, - struct msm_fence_context *fctx, bool exclusive); void msm_gem_move_to_active(struct drm_gem_object *obj, struct msm_gpu *gpu); void msm_gem_move_to_inactive(struct drm_gem_object *obj); int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout); diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c index 0e7912b..d5bba25 100644 --- a/drivers/gpu/drm/msm/msm_fence.c +++ b/drivers/gpu/drm/msm/msm_fence.c @@ -44,11 +44,6 @@ void msm_fence_context_free(struct msm_fence_context *fctx) kfree(fctx); } -static inline bool fence_completed(struct msm_fence_context *fctx, uint32_t fence) -{ - return (int32_t)(fctx->completed_fence - fence) >= 0; -} - /* legacy path for WAIT_FENCE ioctl: */ int msm_wait_fence(struct msm_ringbuffer *ring, uint32_t fence_id, ktime_t *timeout) @@ -86,16 +81,6 @@ int msm_wait_fence(struct msm_ringbuffer *ring, uint32_t fence_id, return ret; } -/* called from workqueue */ -void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence) -{ - spin_lock(&fctx->spinlock); - fctx->completed_fence = max(fence, fctx->completed_fence); - spin_unlock(&fctx->spinlock); - - wake_up_all(&fctx->event); -} - struct msm_fence { struct dma_fence base; struct msm_fence_context *fctx; @@ -122,17 +107,10 @@ static bool msm_fence_enable_signaling(struct dma_fence *fence) return true; } -static bool msm_fence_signaled(struct dma_fence *fence) -{ - struct msm_fence *f = to_msm_fence(fence); - return fence_completed(f->fctx, f->base.seqno); -} - static const struct dma_fence_ops msm_fence_ops = { .get_driver_name = msm_fence_get_driver_name, .get_timeline_name = msm_fence_get_timeline_name, .enable_signaling = msm_fence_enable_signaling, - .signaled = msm_fence_signaled, .wait = dma_fence_default_wait, .release = dma_fence_free, }; diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 7a12f30..e916c00 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -627,42 +627,6 @@ void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass) mutex_unlock(&msm_obj->lock); } -/* must be called before _move_to_active().. */ -int msm_gem_sync_object(struct drm_gem_object *obj, - struct msm_fence_context *fctx, bool exclusive) -{ - struct msm_gem_object *msm_obj = to_msm_bo(obj); - struct reservation_object_list *fobj; - struct dma_fence *fence; - int i, ret; - - fobj = reservation_object_get_list(msm_obj->resv); - if (!fobj || (fobj->shared_count == 0)) { - fence = reservation_object_get_excl(msm_obj->resv); - /* don't need to wait on our own fences, since ring is fifo */ - if (fence && (fence->context != fctx->context)) { - ret = dma_fence_wait(fence, true); - if (ret) - return ret; - } - } - - if (!exclusive || !fobj) - return 0; - - for (i = 0; i < fobj->shared_count; i++) { - fence = rcu_dereference_protected(fobj->shared[i], - reservation_object_held(msm_obj->resv)); - if (fence->context != fctx->context) { - ret = dma_fence_wait(fence, true); - if (ret) - return ret; - } - } - - return 0; -} - void msm_gem_move_to_active(struct drm_gem_object *obj, struct msm_gpu *gpu) { struct msm_gem_object *msm_obj = to_msm_bo(obj); diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 481a55c..1cc8745 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -20,7 +20,6 @@ #include "msm_mmu.h" #include "msm_fence.h" -#include <linux/string_helpers.h> #include <linux/pm_opp.h> #include <linux/devfreq.h> @@ -273,24 +272,6 @@ int msm_gpu_hw_init(struct msm_gpu *gpu) return ret; } -/* - * Hangcheck detection for locked gpu: - */ - -static void update_fences(struct msm_gpu *gpu, struct msm_ringbuffer *ring, - uint32_t fence) -{ - struct msm_gem_submit *submit; - - list_for_each_entry(submit, &ring->submits, node) { - if (submit->seqno > fence) - break; - - msm_update_fence(submit->ring->fctx, - submit->hw_fence->seqno); - } -} - static struct msm_gem_submit * find_submit(struct msm_ringbuffer *ring, uint32_t fence) { @@ -310,146 +291,14 @@ static void update_fences(struct msm_gpu *gpu, struct msm_ringbuffer *ring, return NULL; } -static void retire_submits(struct msm_gpu *gpu); - static void recover_worker(struct work_struct *work) { struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work); - struct drm_device *dev = gpu->dev; - struct msm_drm_private *priv = dev->dev_private; struct msm_gem_submit *submit; struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu); - int i; submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1); return msm_sched_gpu_recovery(gpu, submit); - - /* - * The unused code below will be removed in a subsequent patch - */ - mutex_lock(&dev->struct_mutex); - - dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name); - - submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1); - if (submit) { - struct task_struct *task; - - rcu_read_lock(); - task = pid_task(submit->pid, PIDTYPE_PID); - if (task) { - char *cmd; - - /* - * So slightly annoying, in other paths like - * mmap'ing gem buffers, mmap_sem is acquired - * before struct_mutex, which means we can't - * hold struct_mutex across the call to - * get_cmdline(). But submits are retired - * from the same in-order workqueue, so we can - * safely drop the lock here without worrying - * about the submit going away. - */ - mutex_unlock(&dev->struct_mutex); - cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL); - mutex_lock(&dev->struct_mutex); - - dev_err(dev->dev, "%s: offending task: %s (%s)\n", - gpu->name, task->comm, cmd); - - msm_rd_dump_submit(priv->hangrd, submit, - "offending task: %s (%s)", task->comm, cmd); - - kfree(cmd); - } else { - msm_rd_dump_submit(priv->hangrd, submit, NULL); - } - rcu_read_unlock(); - } - - - /* - * Update all the rings with the latest and greatest fence.. this - * needs to happen after msm_rd_dump_submit() to ensure that the - * bo's referenced by the offending submit are still around. - */ - for (i = 0; i < gpu->nr_rings; i++) { - struct msm_ringbuffer *ring = gpu->rb[i]; - - uint32_t fence = ring->memptrs->fence; - - /* - * For the current (faulting?) ring/submit advance the fence by - * one more to clear the faulting submit - */ - if (ring == cur_ring) - fence++; - - update_fences(gpu, ring, fence); - } - - if (msm_gpu_active(gpu)) { - /* retire completed submits, plus the one that hung: */ - retire_submits(gpu); - - pm_runtime_get_sync(&gpu->pdev->dev); - gpu->funcs->recover(gpu); - pm_runtime_put_sync(&gpu->pdev->dev); - - /* - * Replay all remaining submits starting with highest priority - * ring - */ - for (i = 0; i < gpu->nr_rings; i++) { - struct msm_ringbuffer *ring = gpu->rb[i]; - - list_for_each_entry(submit, &ring->submits, node) - gpu->funcs->submit(gpu, submit, NULL); - } - } - - mutex_unlock(&dev->struct_mutex); - - msm_gpu_retire(gpu); -} - -static void hangcheck_timer_reset(struct msm_gpu *gpu) -{ - DBG("%s", gpu->name); - mod_timer(&gpu->hangcheck_timer, - round_jiffies_up(jiffies + DRM_MSM_HANGCHECK_JIFFIES)); -} - -static void hangcheck_handler(struct timer_list *t) -{ - struct msm_gpu *gpu = from_timer(gpu, t, hangcheck_timer); - struct drm_device *dev = gpu->dev; - struct msm_drm_private *priv = dev->dev_private; - struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu); - uint32_t fence = ring->memptrs->fence; - - if (fence != ring->hangcheck_fence) { - /* some progress has been made.. ya! */ - ring->hangcheck_fence = fence; - } else if (fence < ring->seqno) { - /* no progress and not done.. hung! */ - ring->hangcheck_fence = fence; - dev_err(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n", - gpu->name, ring->id); - dev_err(dev->dev, "%s: completed fence: %u\n", - gpu->name, fence); - dev_err(dev->dev, "%s: submitted fence: %u\n", - gpu->name, ring->seqno); - - queue_work(priv->wq, &gpu->recover_work); - } - - /* if still more pending work, reset the hangcheck timer: */ - if (ring->seqno > ring->hangcheck_fence) - hangcheck_timer_reset(gpu); - - /* workaround for missing irq: */ - queue_work(priv->wq, &gpu->retire_work); } /* @@ -553,55 +402,6 @@ int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime, /* * Cmdstream submission/retirement: */ - -static void retire_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) -{ - int i; - - for (i = 0; i < submit->nr_bos; i++) { - struct msm_gem_object *msm_obj = submit->bos[i].obj; - /* move to inactive: */ - msm_gem_move_to_inactive(&msm_obj->base); - } - - pm_runtime_mark_last_busy(&gpu->pdev->dev); - pm_runtime_put_autosuspend(&gpu->pdev->dev); - msm_gem_submit_free(submit); -} - -static void retire_submits(struct msm_gpu *gpu) -{ - struct drm_device *dev = gpu->dev; - struct msm_gem_submit *submit, *tmp; - int i; - - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); - - /* Retire the commits starting with highest priority */ - for (i = 0; i < gpu->nr_rings; i++) { - struct msm_ringbuffer *ring = gpu->rb[i]; - - list_for_each_entry_safe(submit, tmp, &ring->submits, node) { - if (dma_fence_is_signaled(submit->hw_fence)) - retire_submit(gpu, submit); - } - } -} - -static void retire_worker(struct work_struct *work) -{ - struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work); - struct drm_device *dev = gpu->dev; - int i; - - for (i = 0; i < gpu->nr_rings; i++) - update_fences(gpu, gpu->rb[i], gpu->rb[i]->memptrs->fence); - - mutex_lock(&dev->struct_mutex); - retire_submits(gpu); - mutex_unlock(&dev->struct_mutex); -} - static void signal_hw_fences(struct msm_ringbuffer *ring) { unsigned long flags; @@ -791,12 +591,8 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, gpu->name = name; INIT_LIST_HEAD(&gpu->active_list); - INIT_WORK(&gpu->retire_work, retire_worker); INIT_WORK(&gpu->recover_work, recover_worker); - - timer_setup(&gpu->hangcheck_timer, hangcheck_handler, 0); - spin_lock_init(&gpu->perf_lock); diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index 3bd1920..6296758 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -98,9 +98,6 @@ struct msm_gpu { /* does gpu need hw_init? */ bool needs_hw_init; - /* worker for handling active-list retiring: */ - struct work_struct retire_work; - void __iomem *mmio; int irq; @@ -117,9 +114,6 @@ struct msm_gpu { */ #define DRM_MSM_INACTIVE_PERIOD 66 /* in ms (roughly four frames) */ -#define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */ -#define DRM_MSM_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_MSM_HANGCHECK_PERIOD) - struct timer_list hangcheck_timer; struct work_struct recover_work; struct drm_gem_object *memptrs_bo; -- 1.9.1