Re: [RFC PATCH 2/3] drm/virtio: new fence for every plane update

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 8/17/2023 7:21 PM, Dmitry Osipenko wrote:
...
+static struct
+drm_plane_state *virtio_gpu_plane_duplicate_state(struct drm_plane *plane)
+{
+	struct virtio_gpu_plane_state *new;
+
+	if (WARN_ON(!plane->state))
+		return NULL;
When plane->state can be NULL?

Honestly this error check is from another drm driver. I am not sure if there is *any* case where this will hit. But wouldn't it safe to make sure this is not NULL here?

+	new = kzalloc(sizeof(*new), GFP_KERNEL);
+	if (!new)
+		return NULL;
+
+	__drm_atomic_helper_plane_duplicate_state(plane, &new->base);
+
+	return &new->base;
+}
+
+static void virtio_gpu_plane_destroy_state(struct drm_plane *plane,
+					   struct drm_plane_state *state)
+{
+	__drm_atomic_helper_plane_destroy_state(state);
+	kfree(to_virtio_gpu_plane_state(state));
+}
+
  static const struct drm_plane_funcs virtio_gpu_plane_funcs = {
  	.update_plane		= drm_atomic_helper_update_plane,
  	.disable_plane		= drm_atomic_helper_disable_plane,
  	.reset			= drm_atomic_helper_plane_reset,
-	.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
-	.atomic_destroy_state	= drm_atomic_helper_plane_destroy_state,
+	.atomic_duplicate_state = virtio_gpu_plane_duplicate_state,
+	.atomic_destroy_state	= virtio_gpu_plane_destroy_state,
Similar to the other email, please see how container_of() works. There
is no need change .atomic_destroy_state

Thanks again for pointing this out. I will fix this.


...
@@ -237,41 +262,29 @@ static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane,
  	struct drm_device *dev = plane->dev;
  	struct virtio_gpu_device *vgdev = dev->dev_private;
  	struct virtio_gpu_framebuffer *vgfb;
+	struct virtio_gpu_plane_state *vgplane_st;
  	struct virtio_gpu_object *bo;
if (!new_state->fb)
  		return 0;
vgfb = to_virtio_gpu_framebuffer(new_state->fb);
+	vgplane_st = to_virtio_gpu_plane_state(new_state);
  	bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
  	if (!bo || (plane->type == DRM_PLANE_TYPE_PRIMARY && !bo->guest_blob))
  		return 0;
- if (bo->dumb && (plane->state->fb != new_state->fb)) {
-		vgfb->fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context,
+	if (bo->dumb) {
Why "&& (plane->state->fb != new_state->fb)" disappeared?

Because same FB could be flushed again and fence is needed for synchronous operation in such case.


+		vgplane_st->fence = virtio_gpu_fence_alloc(vgdev,
+						     vgdev->fence_drv.context,
  						     0);
-		if (!vgfb->fence)
+		if (!vgplane_st->fence)
  			return -ENOMEM;
  	}
return 0;
  }
-static void virtio_gpu_plane_cleanup_fb(struct drm_plane *plane,
-					struct drm_plane_state *state)
-{
-	struct virtio_gpu_framebuffer *vgfb;
-
-	if (!state->fb)
-		return;
-
-	vgfb = to_virtio_gpu_framebuffer(state->fb);
-	if (vgfb->fence) {
-		dma_fence_put(&vgfb->fence->f);
-		vgfb->fence = NULL;
-	}
-}
How come that virtio_gpu_plane_cleanup_fb() isn't needed anymore? You
created fence in prepare_fb(), you must release it in cleanup_fb() if
fence still presents.

This fence is put during virt-gpu dequeuing process (the response is received)
and eventually released after resource flush is done.

      virtio_gpu_notify(vgdev);
      dma_fence_wait_timeout(&vgplane_st->fence->f, true,
                             msecs_to_jiffies(50));
      dma_fence_put(&vgplane_st->fence->f);

But even so, I guess what you said makes sense ("if fence still presents").
I will check this again.





[Index of Archives]     [Linux DRI Users]     [Linux Intel Graphics]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [XFree86]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux