Change-Id: I76ddeb3212c96d87a2d15a608ae8c0771e2d94ed Signed-off-by: Monk Liu <Monk.Liu at amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 90 ++++++++++++++++++++++++++++++++ 1 file changed, 90 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index a6bce36..5b15483 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -42,3 +42,93 @@ static void amdgpu_deallocate_static_csa(struct amdgpu_device *adev) { amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL); } + +/* + * amdgpu_map_static_csa should be called during amdgpu_vm_init + * it maps virtual address "AMDGPU_VA_RESERVED_SIZE - adev->virt.csa_size" + * to this VM, and each command submission of GFX should use this virtual + * address within META_DATA init package to support SRIOV gfx preemption. + */ + +static int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm) +{ + int r; + uint64_t vaddr; + struct amdgpu_bo_va *bo_va; + struct ww_acquire_ctx ticket; + struct list_head list, duplicates; + struct amdgpu_bo_list_entry pd; + + INIT_LIST_HEAD(&list); + INIT_LIST_HEAD(&duplicates); + INIT_LIST_HEAD(&vm->vm_virt.csa_tv.head); + vm->vm_virt.csa_tv.bo = &adev->virt.csa_obj->tbo; + vm->vm_virt.csa_tv.shared = true; + + list_add(&vm->vm_virt.csa_tv.head, &list); + amdgpu_vm_get_pd_bo(vm, &list, &pd); + + vaddr = AMDGPU_VA_RESERVED_SIZE - adev->virt.csa_size; + r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); + if (r) { + DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r); + return r; + } + + bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj); + if (!bo_va) { + ttm_eu_backoff_reservation(&ticket, &list); + DRM_ERROR("failed to create bo_va for static CSA\n"); + return -ENOMEM; + } + + r = amdgpu_vm_bo_map(adev, bo_va, vaddr, 0, adev->virt.csa_size, + AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | + AMDGPU_PTE_EXECUTABLE); + + if (r) { + DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r); + amdgpu_vm_bo_rmv(adev, bo_va); + ttm_eu_backoff_reservation(&ticket, &list); + kfree(bo_va); + return r; + } + + ttm_eu_backoff_reservation(&ticket, &list); + + vm->vm_virt.csa_bo_va = bo_va; + vm->vm_virt.vm_csa_addr = vaddr; + vm->vm_virt.vm_gds_addr = vaddr + PAGE_SIZE; + return 0; +} + +static void amdgpu_unmap_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm) +{ + int r; + struct ww_acquire_ctx ticket; + struct list_head list, duplicate; + struct amdgpu_bo_list_entry pd; + + if (vm->vm_virt.csa_bo_va) { + INIT_LIST_HEAD(&list); + INIT_LIST_HEAD(&duplicate); + list_add(&vm->vm_virt.csa_tv.head, &list); + amdgpu_vm_get_pd_bo(vm, &list, &pd); + + r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicate); + if (r) { + DRM_ERROR("failed to reserve static CSA buffer: err=%d\n", r); + return; + } + + amdgpu_vm_bo_rmv(adev, vm->vm_virt.csa_bo_va); + /* maybe no need to do real clearing cuz this vm will die soon */ + r = amdgpu_vm_clear_freed(adev, vm); + if (r) + DRM_ERROR("failed to clear static CSA bo: err=%d", r); + + ttm_eu_backoff_reservation(&ticket, &list); + vm->vm_virt.csa_bo_va = NULL; + vm->vm_virt.vm_csa_addr = vm->vm_virt.vm_gds_addr = 0; + } +} -- 2.7.4