Just handle them the same way as fences with the DMA_RESV_USAGE_WRITE flag when the DRIVER_USER_FENCE flag is set. Signed-off-by: Christian König <christian.koenig@xxxxxxx> --- drivers/gpu/drm/drm_gem_atomic_helper.c | 68 ++++++++++++++++--------- 1 file changed, 45 insertions(+), 23 deletions(-) diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c index a5026f617739..75d04333ff2e 100644 --- a/drivers/gpu/drm/drm_gem_atomic_helper.c +++ b/drivers/gpu/drm/drm_gem_atomic_helper.c @@ -5,6 +5,7 @@ #include <drm/drm_atomic_state_helper.h> #include <drm/drm_atomic_uapi.h> +#include <drm/drm_drv.h> #include <drm/drm_gem.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_framebuffer_helper.h> @@ -121,6 +122,40 @@ * Plane Helpers */ +static int chain_fb_fences(struct drm_framebuffer *fb, + enum dma_resv_usage usage, + struct dma_fence **fence) +{ + size_t i; + int ret; + + for (i = 0; i < fb->format->num_planes; ++i) { + struct drm_gem_object *obj = drm_gem_fb_get_obj(fb, i); + struct dma_fence *new; + + if (WARN_ON_ONCE(!obj)) + continue; + + ret = dma_resv_get_singleton(obj->resv, usage, &new); + if (ret) + return ret; + + if (new && *fence) { + struct dma_fence_chain *chain = dma_fence_chain_alloc(); + + if (!chain) + return -ENOMEM; + + dma_fence_chain_init(chain, *fence, new, 1); + *fence = &chain->base; + + } else if (new) { + *fence = new; + } + } + return 0; +} + /** * drm_gem_plane_helper_prepare_fb() - Prepare a GEM backed framebuffer * @plane: Plane @@ -143,8 +178,6 @@ int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state) { struct dma_fence *fence = dma_fence_get(state->fence); - enum dma_resv_usage usage; - size_t i; int ret; if (!state->fb) @@ -163,32 +196,21 @@ int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, * obeys both implicit and explicit fences for plane updates, then it * will break all the benefits of explicit fencing. */ - usage = fence ? DMA_RESV_USAGE_KERNEL : DMA_RESV_USAGE_WRITE; - - for (i = 0; i < state->fb->format->num_planes; ++i) { - struct drm_gem_object *obj = drm_gem_fb_get_obj(state->fb, i); - struct dma_fence *new; - - if (WARN_ON_ONCE(!obj)) - continue; - - ret = dma_resv_get_singleton(obj->resv, usage, &new); + if (fence) { + ret = chain_fb_fences(state->fb, DMA_RESV_USAGE_KERNEL, &fence); if (ret) goto error; - if (new && fence) { - struct dma_fence_chain *chain = dma_fence_chain_alloc(); + } else { + ret = chain_fb_fences(state->fb, DMA_RESV_USAGE_WRITE, &fence); + if (ret) + goto error; - if (!chain) { - ret = -ENOMEM; + if (drm_core_check_feature(plane->dev, DRIVER_USER_FENCE)) { + ret = chain_fb_fences(state->fb, DMA_RESV_USAGE_USER, + &fence); + if (ret) goto error; - } - - dma_fence_chain_init(chain, fence, new, 1); - fence = &chain->base; - - } else if (new) { - fence = new; } } -- 2.25.1