Am 24.04.24 um 09:13 schrieb Arunpravin Paneer Selvam:
Now we have two flags for contiguous VRAM buffer allocation.
If the application request for AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
it would set the ttm place TTM_PL_FLAG_CONTIGUOUS flag in the
buffer's placement function.
This patch will change the default behaviour of the two flags.
When we set AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS
- This means contiguous is not mandatory.
- we will try to allocate the contiguous buffer. Say if the
allocation fails, we fallback to allocate the individual pages.
When we setTTM_PL_FLAG_CONTIGUOUS
- This means contiguous allocation is mandatory.
- we are setting this in amdgpu_bo_pin_restricted() before bo validation
and check this flag in the vram manager file.
- if this is set, we should allocate the buffer pages contiguously.
the allocation fails, we return -ENOSPC.
v2:
- keep the mem_flags and bo->flags check as is(Christian)
- place the TTM_PL_FLAG_CONTIGUOUS flag setting into the
amdgpu_bo_pin_restricted function placement range iteration
loop(Christian)
- rename find_pages with amdgpu_vram_mgr_calculate_pages_per_block
(Christian)
- Keep the kernel BO allocation as is(Christain)
- If BO pin vram allocation failed, we need to return -ENOSPC as
RDMA cannot work with scattered VRAM pages(Philip)
v3(Christian):
- keep contiguous flag handling outside of pages_per_block
calculation
- remove the hacky implementation in contiguous flag error
handling code
Signed-off-by: Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam@xxxxxxx>
Suggested-by: Christian König <christian.koenig@xxxxxxx>
---
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 8 +-
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | 83 ++++++++++++++------
2 files changed, 65 insertions(+), 26 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 492aebc44e51..c594d2a5978e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -154,8 +154,10 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
else
places[c].flags |= TTM_PL_FLAG_TOPDOWN;
- if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
+ if (abo->tbo.type == ttm_bo_type_kernel &&
+ flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
places[c].flags |= TTM_PL_FLAG_CONTIGUOUS;
+
c++;
}
@@ -965,6 +967,10 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
if (!bo->placements[i].lpfn ||
(lpfn && lpfn < bo->placements[i].lpfn))
bo->placements[i].lpfn = lpfn;
+
+ if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS &&
+ bo->placements[i].mem_type == TTM_PL_VRAM)
+ bo->placements[i].flags |= TTM_PL_FLAG_CONTIGUOUS;
}
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index e494f5bf136a..17c5d9ce9927 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -88,6 +88,23 @@ static inline u64 amdgpu_vram_mgr_blocks_size(struct list_head *head)
return size;
}
+static inline void amdgpu_vram_mgr_limit_min_block_size(unsigned long pages_per_block,
+ u64 size,
+ u64 *min_block_size,
+ bool contiguous_enabled)
+{
+ if (contiguous_enabled)
+ return;
+
+ /*
+ * if size >= 2MiB, limit the min_block_size to 2MiB
+ * for better TLB usage.
+ */
+ if ((size >= (u64)pages_per_block << PAGE_SHIFT) &&
+ !(size & (((u64)pages_per_block << PAGE_SHIFT) - 1)))
+ *min_block_size = (u64)pages_per_block << PAGE_SHIFT;
+}
+
/**
* DOC: mem_info_vram_total
*
@@ -452,11 +469,12 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
struct amdgpu_device *adev = to_amdgpu_device(mgr);
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
u64 vis_usage = 0, max_bytes, min_block_size;
+ struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
struct amdgpu_vram_mgr_resource *vres;
u64 size, remaining_size, lpfn, fpfn;
struct drm_buddy *mm = &mgr->mm;
- struct drm_buddy_block *block;
unsigned long pages_per_block;
+ struct drm_buddy_block *block;
int r;
lpfn = (u64)place->lpfn << PAGE_SHIFT;
@@ -469,18 +487,14 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
if (tbo->type != ttm_bo_type_kernel)
max_bytes -= AMDGPU_VM_RESERVED_VRAM;
- if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
- pages_per_block = ~0ul;
- } else {
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
pages_per_block = HPAGE_PMD_NR;
That won't work like this.
HPAGE_PMD_NR is only guaranteed to be defined when
CONFIG_TRANSPARENT_HUGEPAGE is enabled.
So you will run into some compile errors on platforms where that isn't
available.
-#else
- /* default to 2MB */
+ else
+ /* default to 2MiB */
pages_per_block = 2UL << (20UL - PAGE_SHIFT);
-#endif
- pages_per_block = max_t(uint32_t, pages_per_block,
- tbo->page_alignment);
- }
+
+ pages_per_block = max_t(uint32_t, pages_per_block,
+ tbo->page_alignment);
vres = kzalloc(sizeof(*vres), GFP_KERNEL);
if (!vres)
@@ -499,7 +513,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
if (place->flags & TTM_PL_FLAG_TOPDOWN)
vres->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION;
- if (place->flags & TTM_PL_FLAG_CONTIGUOUS)
+ if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
vres->flags |= DRM_BUDDY_CONTIGUOUS_ALLOCATION;
if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED)
@@ -509,23 +523,23 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
/* Allocate blocks in desired range */
vres->flags |= DRM_BUDDY_RANGE_ALLOCATION;
+ if (tbo->page_alignment)
+ min_block_size = (u64)tbo->page_alignment << PAGE_SHIFT;
+ else
+ min_block_size = mgr->default_page_size;
+
remaining_size = (u64)vres->base.size;
mutex_lock(&mgr->lock);
while (remaining_size) {
- if (tbo->page_alignment)
- min_block_size = (u64)tbo->page_alignment << PAGE_SHIFT;
- else
- min_block_size = mgr->default_page_size;
-
- BUG_ON(min_block_size < mm->chunk_size);
-
/* Limit maximum size to 2GiB due to SG table limitations */
size = min(remaining_size, 2ULL << 30);
-
- if ((size >= (u64)pages_per_block << PAGE_SHIFT) &&
- !(size & (((u64)pages_per_block << PAGE_SHIFT) - 1)))
- min_block_size = (u64)pages_per_block << PAGE_SHIFT;
+ amdgpu_vram_mgr_limit_min_block_size(pages_per_block,
+ size,
+ &min_block_size,
+ bo->flags &
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS);
+ BUG_ON(min_block_size < mm->chunk_size);
r = drm_buddy_alloc_blocks(mm, fpfn,
lpfn,
@@ -533,8 +547,27 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
min_block_size,
&vres->blocks,
vres->flags);
- if (unlikely(r))
- goto error_free_blocks;
+ if (unlikely(r)) {
+ if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS &&
+ !(place->flags & TTM_PL_FLAG_CONTIGUOUS)) {
Better check for the variable you modify, in this case min_block_size
and then restart the loop.
And check for the explicit return value.
In other words something like this
if (unlikely(r == -ENOSPC) && min_block_size >= ... &&
!(place->flags & TTM_PL_FLAG_CONTIGUOUS))
min_block_size = ...;
continue;
}
if (unlikely(r))
goto error_free_blocks;
Regards,
Christian.
+ /* Fallback to non-contiguous allocation */
+ amdgpu_vram_mgr_limit_min_block_size(pages_per_block,
+ size,
+ &min_block_size,
+ !(bo->flags &
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
+ r = drm_buddy_alloc_blocks(mm, fpfn,
+ lpfn,
+ size,
+ min_block_size,
+ &vres->blocks,
+ vres->flags);
+ if (unlikely(r))
+ goto error_free_blocks;
+ } else {
+ goto error_free_blocks;
+ }
+ }
if (size > remaining_size)
remaining_size = 0;