On Thu, Jul 27, 2023 at 08:35:18AM +0300, Jouni Högander wrote: > Take into account dma fences in dirtyfb callback. If there is no > unsignaled dma fences perform flush immediately. If there are > unsignaled dma fences perform invalidate and add callback which will > queue flush when the fence gets signaled. > > v2: Use dma_resv_get_singleton > > Signed-off-by: Jouni Högander <jouni.hogander@xxxxxxxxx> > --- > drivers/gpu/drm/i915/display/intel_fb.c | 57 +++++++++++++++++++++++-- > 1 file changed, 54 insertions(+), 3 deletions(-) > > diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c > index 446bbf7986b6..56a21377680d 100644 > --- a/drivers/gpu/drm/i915/display/intel_fb.c > +++ b/drivers/gpu/drm/i915/display/intel_fb.c > @@ -7,6 +7,9 @@ > #include <drm/drm_framebuffer.h> > #include <drm/drm_modeset_helper.h> > > +#include <linux/dma-fence.h> > +#include <linux/dma-resv.h> > + > #include "i915_drv.h" > #include "intel_display.h" > #include "intel_display_types.h" > @@ -1896,6 +1899,21 @@ static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, > return drm_gem_handle_create(file, &obj->base, handle); > } > > +struct frontbuffer_fence_cb { > + struct dma_fence_cb base; > + struct intel_frontbuffer *front; > +}; > + > +static void intel_user_framebuffer_fence_wake(struct dma_fence *dma, > + struct dma_fence_cb *data) > +{ > + struct frontbuffer_fence_cb *cb = container_of(data, typeof(*cb), base); > + > + intel_frontbuffer_queue_flush(cb->front); > + kfree(cb); > + dma_fence_put(dma); > +} > + > static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb, > struct drm_file *file, > unsigned int flags, unsigned int color, > @@ -1903,11 +1921,44 @@ static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb, > unsigned int num_clips) > { > struct drm_i915_gem_object *obj = intel_fb_obj(fb); > + struct intel_frontbuffer *front = to_intel_frontbuffer(fb); > + struct dma_fence *fence; > + struct frontbuffer_fence_cb *cb; > + int ret = 0; > + > + if (dma_resv_test_signaled(obj->base.resv, dma_resv_usage_rw(false))) > + goto flush; > + > + intel_frontbuffer_invalidate(front, ORIGIN_DIRTYFB); Looks like this could be deferred until dma_fence_add_callback() has informed us whether the fence has signalled in the meantime. That way we only do the invalidate if we succesfully add the callback. Could in theory drop the dma_resv_test_signaled() as well, but maybe it's worth keeping that around to avoid the kmalloc()/etc. whenever possible. Apart from that and the few comments I gave to the other patch it all looks pretty reasonable to me. > + > + ret = dma_resv_get_singleton(obj->base.resv, dma_resv_usage_rw(false), > + &fence); > + if (ret || !fence) > + goto flush; > + > + cb = kmalloc(sizeof(*cb), GFP_KERNEL); > + if (!cb) { > + dma_fence_put(fence); > + ret = -ENOMEM; > + goto flush; > + } > > - i915_gem_object_flush_if_display(obj); > - intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB); > + cb->front = front; > > - return 0; > + ret = dma_fence_add_callback(fence, &cb->base, > + intel_user_framebuffer_fence_wake); > + if (ret) { > + intel_user_framebuffer_fence_wake(fence, &cb->base); > + if (ret == -ENOENT) > + ret = 0; > + } > + > + return ret; > + > +flush: > + i915_gem_object_flush_if_display(obj); > + intel_frontbuffer_flush(front, ORIGIN_DIRTYFB); > + return ret; > } > > static const struct drm_framebuffer_funcs intel_fb_funcs = { > -- > 2.34.1 -- Ville Syrjälä Intel