Currently dma_fence_signal() tries to avoid the spinlock and only takes it if absolutely required to walk the callback list. However, to allow for some users to surreptitiously insert lazy signal callbacks that do not depend on enabling the signaling mechanism around every fence, we always need to notify the callbacks on signaling. As such, we will always need to take the spinlock and dma_fence_signal() effectively becomes a clone of dma_fence_signal_locked(). Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> Cc: Christian König <christian.koenig@xxxxxxx> Cc: Daniel Vetter <daniel.vetter@xxxxxxxx> --- drivers/dma-buf/dma-fence.c | 19 +++++-------------- 1 file changed, 5 insertions(+), 14 deletions(-) diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c index ff0cd6eae766..f23eb9f19b4e 100644 --- a/drivers/dma-buf/dma-fence.c +++ b/drivers/dma-buf/dma-fence.c @@ -176,6 +176,7 @@ EXPORT_SYMBOL(dma_fence_signal_locked); int dma_fence_signal(struct dma_fence *fence) { unsigned long flags; + int ret; if (!fence) return -EINVAL; @@ -183,21 +184,11 @@ int dma_fence_signal(struct dma_fence *fence) if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) return -EINVAL; - fence->timestamp = ktime_get(); - set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags); - trace_dma_fence_signaled(fence); - - if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) { - struct dma_fence_cb *cur, *tmp; + spin_lock_irqsave(fence->lock, flags); + ret = dma_fence_signal_locked(fence); + spin_unlock_irqrestore(fence->lock, flags); - spin_lock_irqsave(fence->lock, flags); - list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { - list_del_init(&cur->node); - cur->func(fence, cur); - } - spin_unlock_irqrestore(fence->lock, flags); - } - return 0; + return ret; } EXPORT_SYMBOL(dma_fence_signal); -- 2.23.0.rc1 _______________________________________________ dri-devel mailing list dri-devel@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/dri-devel