Initialize objects with TTM_PL_FLAG_SYSTEM placement, move them over to TTM_PL_FLAG_TT after they are created (i.e. registered in the host). That way the ttm backend will handle attach/detach just fine and we don't need the extra virtio_gpu_object_attach() calls. TODO: check ioctl fence. Signed-off-by: Gerd Hoffmann <kraxel@xxxxxxxxxx> --- drivers/gpu/drm/virtio/virtgpu_drv.h | 1 + drivers/gpu/drm/virtio/virtgpu_fb.c | 5 ++--- drivers/gpu/drm/virtio/virtgpu_gem.c | 4 ++-- drivers/gpu/drm/virtio/virtgpu_ioctl.c | 13 +++++++++--- drivers/gpu/drm/virtio/virtgpu_object.c | 35 ++++++++++++++++++++++++++++++++- 5 files changed, 49 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index 1c321e484d..554d887c6d 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -360,6 +360,7 @@ void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev, int virtio_gpu_object_create(struct virtio_gpu_device *vgdev, unsigned long size, bool kernel, bool pinned, struct virtio_gpu_object **bo_ptr); +int virtio_gpu_object_move(struct virtio_gpu_object *vgbo); void virtio_gpu_object_kunmap(struct virtio_gpu_object *bo); int virtio_gpu_object_kmap(struct virtio_gpu_object *bo); int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev, diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c index fb1cc8b2f1..4dd01520be 100644 --- a/drivers/gpu/drm/virtio/virtgpu_fb.c +++ b/drivers/gpu/drm/virtio/virtgpu_fb.c @@ -240,8 +240,8 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper, goto err_obj_vmap; } - /* attach the object to the resource */ - ret = virtio_gpu_object_attach(vgdev, obj, NULL); + /* attach the object to the resource (via ttm backend) */ + ret = virtio_gpu_object_move(obj); if (ret) goto err_obj_attach; @@ -277,7 +277,6 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper, return 0; err_fb_alloc: - virtio_gpu_object_detach(vgdev, obj); err_obj_attach: err_obj_vmap: virtio_gpu_gem_free_object(&obj->gem_base); diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c index f065863939..e3f0c46cbf 100644 --- a/drivers/gpu/drm/virtio/virtgpu_gem.c +++ b/drivers/gpu/drm/virtio/virtgpu_gem.c @@ -106,8 +106,8 @@ int virtio_gpu_mode_dumb_create(struct drm_file *file_priv, virtio_gpu_cmd_create_resource(vgdev, obj, format, args->width, args->height); - /* attach the object to the resource */ - ret = virtio_gpu_object_attach(vgdev, obj, NULL); + /* attach the object to the resource (via ttm backend) */ + ret = virtio_gpu_object_move(obj); if (ret) goto fail; diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c index 681edd9c92..55baad8e90 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c @@ -258,7 +258,7 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data, virtio_gpu_cmd_create_resource(vgdev, qobj, rc->format, rc->width, rc->height); - ret = virtio_gpu_object_attach(vgdev, qobj, NULL); + ret = virtio_gpu_object_move(qobj); } else { /* use a gem reference since unref list undoes them */ drm_gem_object_get(&qobj->gem_base); @@ -285,12 +285,17 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data, virtio_gpu_cmd_resource_create_3d(vgdev, &rc_3d, NULL); qobj->created = true; +#if 1 + ret = virtio_gpu_object_move(qobj); + ttm_eu_backoff_reservation(&ticket, &validate_list); +#else ret = virtio_gpu_object_attach(vgdev, qobj, &fence); if (ret) { ttm_eu_backoff_reservation(&ticket, &validate_list); goto fail_unref; } ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f); +#endif } ret = drm_gem_handle_create(file_priv, obj, &handle); @@ -299,7 +304,8 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data, drm_gem_object_release(obj); if (vgdev->has_virgl_3d) { virtio_gpu_unref_list(&validate_list); - dma_fence_put(&fence->f); + if (fence) + dma_fence_put(&fence->f); } return ret; } @@ -310,7 +316,8 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data, if (vgdev->has_virgl_3d) { virtio_gpu_unref_list(&validate_list); - dma_fence_put(&fence->f); + if (fence) + dma_fence_put(&fence->f); } return 0; fail_unref: diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c index 8bd1ebe13b..0c2fc10859 100644 --- a/drivers/gpu/drm/virtio/virtgpu_object.c +++ b/drivers/gpu/drm/virtio/virtgpu_object.c @@ -75,12 +75,45 @@ static void virtio_gpu_init_ttm_placement(struct virtio_gpu_object *vgbo, vgbo->placement_code.fpfn = 0; vgbo->placement_code.lpfn = 0; vgbo->placement_code.flags = - TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT | pflag; + TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag; vgbo->placement.num_placement = c; vgbo->placement.num_busy_placement = c; } +int virtio_gpu_object_move(struct virtio_gpu_object *vgbo) +{ + struct ttm_operation_ctx ctx = { true, true }; + struct ttm_place placement_memtype = { + .fpfn = 0, + .lpfn = 0, + .flags = vgbo->placement_code.flags, + }; + struct ttm_placement placement; + struct ttm_mem_reg new_reg = { 0 }; + int ret; + + placement_memtype.flags &= ~TTM_PL_MASK_MEM; + placement_memtype.flags |= TTM_PL_FLAG_TT; + + placement.num_placement = placement.num_busy_placement = 1; + placement.placement = placement.busy_placement = &placement_memtype; + + ret = ttm_bo_mem_space(&vgbo->tbo, &placement, &new_reg, &ctx); + if (ret) { + DRM_INFO("%s: ttm_bo_mem_space: %d\n", __func__, ret); + return ret; + } + + ret = ttm_bo_move_ttm(&vgbo->tbo, &ctx, &new_reg); + if (ret) { + DRM_INFO("%s: ttm_bo_move_ttm: %d\n", __func__, ret); + ttm_bo_mem_put(&vgbo->tbo, &new_reg); + } + + return ret; +} + int virtio_gpu_object_create(struct virtio_gpu_device *vgdev, unsigned long size, bool kernel, bool pinned, struct virtio_gpu_object **bo_ptr) -- 2.9.3 _______________________________________________ dri-devel mailing list dri-devel@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/dri-devel