Quoting Matthew Auld (2019-05-21 21:42:35) > +static void clear_pages_signal_irq_worker(struct irq_work *work) > +{ > + struct clear_pages_work *w = container_of(work, typeof(*w), irq_work); > + > + dma_fence_signal(&w->dma); > + dma_fence_put(&w->dma); > +} > + > +static void clear_pages_dma_fence_cb(struct dma_fence *fence, > + struct dma_fence_cb *cb) > +{ > + struct clear_pages_work *w = container_of(cb, typeof(*w), cb); if (fence->error) dma_fence_set_error(&w->dma, fence->error); > + > + /* > + * Push the signalling of the fence into yet another worker to avoid > + * the nightmare locking around the fence spinlock. > + */ > + irq_work_queue(&w->irq_work); > +} > + > +static void clear_pages_worker(struct work_struct *work) > +{ > + struct clear_pages_work *w = container_of(work, typeof(*w), work); > + struct drm_i915_private *i915 = w->ce->gem_context->i915; > + struct drm_i915_gem_object *obj = w->sleeve->obj; > + struct i915_vma *vma = w->sleeve->vma; > + struct i915_request *rq; > + int err = w->dma.error; > + > + if (unlikely(err)) > + goto out_signal; > + > + if (obj->cache_dirty) { > + obj->write_domain = 0; > + if (i915_gem_object_has_struct_page(obj)) > + drm_clflush_sg(w->sleeve->pages); > + obj->cache_dirty = false; > + } > + > + mutex_lock(&i915->drm.struct_mutex); > + err = i915_vma_pin(vma, 0, 0, PIN_USER); > + if (unlikely(err)) > + goto out_unlock; > + > + rq = i915_request_create(w->ce); > + if (IS_ERR(rq)) { > + err = PTR_ERR(rq); > + goto out_unpin; > + } > + > + /* There's no way the fence has signalled */ > + if (dma_fence_add_callback(&rq->fence, &w->cb, > + clear_pages_dma_fence_cb)) > + GEM_BUG_ON(1); > + > + if (w->ce->engine->emit_init_breadcrumb) { > + err = w->ce->engine->emit_init_breadcrumb(rq); > + if (unlikely(err)) > + goto out_request; > + } > + > + err = intel_emit_vma_fill_blt(rq, vma, w->value); > + if (unlikely(err)) > + goto out_request; > + > + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); > +out_request: > + if (unlikely(err)) { > + dma_fence_set_error(&w->dma, err); Propagation should happen via rq signaling. > + i915_request_skip(rq, err); > + err = 0; > + } So much work to be done to unravel these locks. -Chris _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx