move BO allocation in sw_init. Signed-off-by: xinhui pan <xinhui.pan@xxxxxxx> --- change from v1: only use pre-allocated BO for direct IB submission. and take its reservation lock to avoid any potential race. better safe than sorry. --- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 103 +++++++++++++++++------- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h | 1 + drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 8 +- drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 8 +- 4 files changed, 78 insertions(+), 42 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index d451c359606a..04a6f7a18d0a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -134,6 +134,51 @@ MODULE_FIRMWARE(FIRMWARE_VEGA12); MODULE_FIRMWARE(FIRMWARE_VEGA20); static void amdgpu_uvd_idle_work_handler(struct work_struct *work); +static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo); + +static int amdgpu_uvd_create_msg_bo_helper(struct amdgpu_device *adev, + uint32_t size, + struct amdgpu_bo **bo_ptr) +{ + struct ttm_operation_ctx ctx = { true, false }; + struct amdgpu_bo *bo = NULL; + void *addr; + int r; + + r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_GTT, + &bo, NULL, &addr); + if (r) + return r; + + if (adev->uvd.address_64_bit) { + *bo_ptr = bo; + return 0; + } + + amdgpu_bo_kunmap(bo); + amdgpu_bo_unpin(bo); + amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM); + amdgpu_uvd_force_into_uvd_segment(bo); + r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); + if (r) + goto err; + r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_VRAM); + if (r) + goto err_pin; + r = amdgpu_bo_kmap(bo, &addr); + if (r) + goto err_kmap; + *bo_ptr = bo; + return 0; +err_kmap: + amdgpu_bo_unpin(bo); +err_pin: +err: + amdgpu_bo_unreserve(bo); + amdgpu_bo_unref(&bo); + return r; +} int amdgpu_uvd_sw_init(struct amdgpu_device *adev) { @@ -302,6 +347,11 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) if (!amdgpu_device_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0)) adev->uvd.address_64_bit = true; + r = amdgpu_uvd_create_msg_bo_helper(adev, 128 << 10, &adev->uvd.ib_bo); + if (r) + return r; + amdgpu_bo_unreserve(adev->uvd.ib_bo); + switch (adev->asic_type) { case CHIP_TONGA: adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_65_10; @@ -324,6 +374,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) { + void *addr = amdgpu_bo_kptr(adev->uvd.ib_bo); int i, j; drm_sched_entity_destroy(&adev->uvd.entity); @@ -342,6 +393,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i) amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]); } + amdgpu_bo_free_kernel(&adev->uvd.ib_bo, NULL, &addr); release_firmware(adev->uvd.fw); return 0; @@ -1080,23 +1132,10 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, unsigned offset_idx = 0; unsigned offset[3] = { UVD_BASE_SI, 0, 0 }; - amdgpu_bo_kunmap(bo); - amdgpu_bo_unpin(bo); - - if (!ring->adev->uvd.address_64_bit) { - struct ttm_operation_ctx ctx = { true, false }; - - amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM); - amdgpu_uvd_force_into_uvd_segment(bo); - r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); - if (r) - goto err; - } - r = amdgpu_job_alloc_with_ib(adev, 64, direct ? AMDGPU_IB_POOL_DIRECT : AMDGPU_IB_POOL_DELAYED, &job); if (r) - goto err; + return r; if (adev->asic_type >= CHIP_VEGA10) { offset_idx = 1 + ring->me; @@ -1148,8 +1187,6 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, } amdgpu_bo_fence(bo, f, false); - amdgpu_bo_unreserve(bo); - amdgpu_bo_unref(&bo); if (fence) *fence = dma_fence_get(f); @@ -1159,10 +1196,6 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, err_free: amdgpu_job_free(job); - -err: - amdgpu_bo_unreserve(bo); - amdgpu_bo_unref(&bo); return r; } @@ -1173,16 +1206,15 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, struct dma_fence **fence) { struct amdgpu_device *adev = ring->adev; - struct amdgpu_bo *bo = NULL; + struct amdgpu_bo *bo = adev->uvd.ib_bo; uint32_t *msg; int r, i; - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_GTT, - &bo, NULL, (void **)&msg); + r = amdgpu_bo_reserve(bo, false); if (r) return r; + msg = amdgpu_bo_kptr(bo); /* stitch together an UVD create msg */ msg[0] = cpu_to_le32(0x00000de4); msg[1] = cpu_to_le32(0x00000000); @@ -1198,23 +1230,28 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, for (i = 11; i < 1024; ++i) msg[i] = cpu_to_le32(0x0); - return amdgpu_uvd_send_msg(ring, bo, true, fence); + r = amdgpu_uvd_send_msg(ring, bo, true, fence); + + amdgpu_bo_unreserve(bo); + return r; } int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, bool direct, struct dma_fence **fence) { struct amdgpu_device *adev = ring->adev; - struct amdgpu_bo *bo = NULL; + struct amdgpu_bo *bo = adev->uvd.ib_bo; uint32_t *msg; int r, i; - r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_GTT, - &bo, NULL, (void **)&msg); + if (direct) + r = amdgpu_bo_reserve(bo, false); + else + r = amdgpu_uvd_create_msg_bo_helper(adev, 4096, &bo); if (r) return r; + msg = amdgpu_bo_kptr(bo); /* stitch together an UVD destroy msg */ msg[0] = cpu_to_le32(0x00000de4); msg[1] = cpu_to_le32(0x00000002); @@ -1223,7 +1260,13 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, for (i = 4; i < 1024; ++i) msg[i] = cpu_to_le32(0x0); - return amdgpu_uvd_send_msg(ring, bo, direct, fence); + r = amdgpu_uvd_send_msg(ring, bo, direct, fence); + + amdgpu_bo_unreserve(bo); + if (!direct) + amdgpu_bo_free_kernel(&bo, NULL, (void **)&msg); + + return r; } static void amdgpu_uvd_idle_work_handler(struct work_struct *work) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h index edbb8194ee81..76ac9699885d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h @@ -68,6 +68,7 @@ struct amdgpu_uvd { /* store image width to adjust nb memory state */ unsigned decode_image_width; uint32_t keyselect; + struct amdgpu_bo *ib_bo; }; int amdgpu_uvd_sw_init(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index bc571833632e..4ab62ef6f04a 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -332,12 +332,10 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring, static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct dma_fence *fence = NULL; - struct amdgpu_bo *bo = NULL; + struct amdgpu_bo *bo = ring->adev->uvd.ib_bo; long r; - r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, - &bo, NULL, NULL); + r = amdgpu_bo_reserve(bo, false); if (r) return r; @@ -357,9 +355,7 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) error: dma_fence_put(fence); - amdgpu_bo_unpin(bo); amdgpu_bo_unreserve(bo); - amdgpu_bo_unref(&bo); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index b6e82d75561f..85ad5fb358c8 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -338,12 +338,10 @@ static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handl static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct dma_fence *fence = NULL; - struct amdgpu_bo *bo = NULL; + struct amdgpu_bo *bo = ring->adev->uvd.ib_bo; long r; - r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, - &bo, NULL, NULL); + r = amdgpu_bo_reserve(bo, false); if (r) return r; @@ -363,9 +361,7 @@ static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) error: dma_fence_put(fence); - amdgpu_bo_unpin(bo); amdgpu_bo_unreserve(bo); - amdgpu_bo_unref(&bo); return r; } -- 2.25.1