Before we indicate return control of the SHM Pixmap to the client (that is prior to the next XReply), we ensure that the original SHM buffer is uptodate with any changes made on the GPU. We must flush the GPU writes back to the CPU and so not allow ourselves to keep the dirty cache of the CPU bo. Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> Cc: Alexei Podtelezhnikov <apodtele@xxxxxxxxx> --- src/sna/sna_accel.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c index edbfe5a4a..6e14cf7b4 100644 --- a/src/sna/sna_accel.c +++ b/src/sna/sna_accel.c @@ -2119,6 +2119,11 @@ static inline bool operate_inplace(struct sna_pixmap *priv, unsigned flags) if (!USE_INPLACE) return false; + if (flags & __MOVE_FORCE) { + DBG(("%s: no, inplace operation denied by force\n", __FUNCTION__)); + return false; + } + if ((flags & MOVE_INPLACE_HINT) == 0) { DBG(("%s: no, inplace operation not suitable\n", __FUNCTION__)); return false; @@ -2327,7 +2332,7 @@ skip_inplace_map: sna_pixmap_unmap(pixmap, priv); - if (USE_INPLACE && + if (USE_INPLACE && !(flags & __MOVE_FORCE) && (flags & MOVE_WRITE ? (void *)priv->gpu_bo : (void *)priv->gpu_damage) && priv->cpu_damage == NULL && priv->gpu_bo->tiling == I915_TILING_NONE && (flags & MOVE_READ || kgem_bo_can_map__cpu(&sna->kgem, priv->gpu_bo, flags & MOVE_WRITE)) && @@ -17409,7 +17414,10 @@ void sna_accel_flush(struct sna *sna) priv->pixmap->refcnt)); assert(!priv->flush); ret = sna_pixmap_move_to_cpu(priv->pixmap, - MOVE_READ | MOVE_WRITE); + MOVE_READ | + MOVE_WRITE | + MOVE_WHOLE_HINT | + __MOVE_FORCE); assert(!ret || priv->gpu_bo == NULL); if (priv->pixmap->refcnt == 0) { sna_damage_destroy(&priv->cpu_damage); -- 2.26.0 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx