From: Philip Yang <Philip.Yang@xxxxxxx> Add mem_id_plus1 parameter to amdgpu_gem_object_create and pass it to amdgpu_bo_create. For dGPU mode allocation, mem_id is used by VRAM manager to get the memory partition fpfn, lpfn from xcp manager. For APU native mode allocation, mem_id is used to get NUMA node id from xcp manager, then pass to TTM as numa pool id to alloc memory from the specific NUMA node. mem_id -1 means for entire VRAM or any NUMA nodes. Signed-off-by: Philip Yang <Philip.Yang@xxxxxxx> Reviewed-by: Felix Kuehling <Felix.Kuehling@xxxxxxx> Signed-off-by: Alex Deucher <alexander.deucher@xxxxxxx> --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 9 +++++---- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h | 3 +-- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 3 +++ drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 5 +++++ 6 files changed, 17 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 12149b317b88..6d0c25e34af1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -289,7 +289,7 @@ create_dmamap_sg_bo(struct amdgpu_device *adev, ret = amdgpu_gem_object_create(adev, mem->bo->tbo.base.size, 1, AMDGPU_GEM_DOMAIN_CPU, AMDGPU_GEM_CREATE_PREEMPTIBLE | flags, - ttm_bo_type_sg, mem->bo->tbo.base.resv, &gem_obj); + ttm_bo_type_sg, mem->bo->tbo.base.resv, &gem_obj, 0); amdgpu_bo_unreserve(mem->bo); @@ -1720,7 +1720,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( va, (*mem)->aql_queue ? size << 1 : size, domain_string(alloc_domain)); ret = amdgpu_gem_object_create(adev, aligned_size, 1, alloc_domain, alloc_flags, - bo_type, NULL, &gobj); + bo_type, NULL, &gobj, 0); if (ret) { pr_debug("Failed to create BO on domain %s. ret %d\n", domain_string(alloc_domain), ret); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index e97b1eef2c9d..8b162f05d1fd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -335,7 +335,7 @@ amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf) ret = amdgpu_gem_object_create(adev, dma_buf->size, PAGE_SIZE, AMDGPU_GEM_DOMAIN_CPU, flags, - ttm_bo_type_sg, resv, &gobj); + ttm_bo_type_sg, resv, &gobj, 0); if (ret) goto error; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 6936cd63df42..01029b495f5a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -97,7 +97,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, int alignment, u32 initial_domain, u64 flags, enum ttm_bo_type type, struct dma_resv *resv, - struct drm_gem_object **obj) + struct drm_gem_object **obj, int8_t mem_id_plus1) { struct amdgpu_bo *bo; struct amdgpu_bo_user *ubo; @@ -115,6 +115,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, bp.flags = flags; bp.domain = initial_domain; bp.bo_ptr_size = sizeof(struct amdgpu_bo); + bp.mem_id_plus1 = mem_id_plus1; r = amdgpu_bo_create_user(adev, &bp, &ubo); if (r) @@ -335,7 +336,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, retry: r = amdgpu_gem_object_create(adev, size, args->in.alignment, initial_domain, - flags, ttm_bo_type_device, resv, &gobj); + flags, ttm_bo_type_device, resv, &gobj, 0); if (r && r != -ERESTARTSYS) { if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) { flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; @@ -404,7 +405,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, /* create a gem object to contain this object in */ r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU, - 0, ttm_bo_type_device, NULL, &gobj); + 0, ttm_bo_type_device, NULL, &gobj, 0); if (r) return r; @@ -930,7 +931,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv, domain = amdgpu_bo_get_preferred_domain(adev, amdgpu_display_supported_domains(adev, flags)); r = amdgpu_gem_object_create(adev, args->size, 0, domain, flags, - ttm_bo_type_device, NULL, &gobj); + ttm_bo_type_device, NULL, &gobj, 0); if (r) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h index 637bf51dbf06..646c4fcc8e40 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h @@ -43,8 +43,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, int alignment, u32 initial_domain, u64 flags, enum ttm_bo_type type, struct dma_resv *resv, - struct drm_gem_object **obj); - + struct drm_gem_object **obj, int8_t mem_id_plus1); int amdgpu_mode_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index c6214db42bda..155b62971a33 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -574,6 +574,9 @@ int amdgpu_bo_create(struct amdgpu_device *adev, bo->flags = bp->flags; + /* bo->mem_id -1 means any partition */ + bo->mem_id = bp->mem_id_plus1 - 1; + if (!amdgpu_bo_support_uswc(bo->flags)) bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 8fdfa739a4f2..521a432348a0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -56,6 +56,8 @@ struct amdgpu_bo_param { bool no_wait_gpu; struct dma_resv *resv; void (*destroy)(struct ttm_buffer_object *bo); + /* memory partition number plus 1, 0 means any partition */ + int8_t mem_id_plus1; }; /* bo virtual addresses in a vm */ @@ -108,6 +110,9 @@ struct amdgpu_bo { struct mmu_interval_notifier notifier; #endif struct kgd_mem *kfd_bo; + + /* memory partition number, -1 means any partition */ + int8_t mem_id; }; struct amdgpu_bo_user { -- 2.40.1