Re: [PATCH 2/2] drm/amdgpu: Enable clear page functionality

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Am 07.12.23 um 16:11 schrieb Arunpravin Paneer Selvam:
Add clear page support in vram memory region.

The first patch looks good, but this here needs quite some work.


Signed-off-by: Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam@xxxxxxx>
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_object.c    | 13 +++--
  .../gpu/drm/amd/amdgpu/amdgpu_res_cursor.h    | 25 ++++++++++
  drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c       | 50 +++++++++++++++++++
  drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h       |  4 ++
  drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c  | 14 +++++-
  drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h  |  5 ++
  6 files changed, 105 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index cef920a93924..bc4ea87f8b5e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -39,6 +39,7 @@
  #include "amdgpu.h"
  #include "amdgpu_trace.h"
  #include "amdgpu_amdkfd.h"
+#include "amdgpu_vram_mgr.h"
/**
   * DOC: amdgpu_object
@@ -629,15 +630,17 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
  	    bo->tbo.resource->mem_type == TTM_PL_VRAM) {
-		struct dma_fence *fence;
+		struct dma_fence *fence = NULL;
- r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence, true);
+		r = amdgpu_clear_buffer(bo, bo->tbo.base.resv, &fence, true);
  		if (unlikely(r))
  			goto fail_unreserve;
- dma_resv_add_fence(bo->tbo.base.resv, fence,
-				   DMA_RESV_USAGE_KERNEL);
-		dma_fence_put(fence);
+		if (fence) {
+			dma_resv_add_fence(bo->tbo.base.resv, fence,
+					   DMA_RESV_USAGE_KERNEL);
+			dma_fence_put(fence);
+		}
  	}
  	if (!bp->resv)
  		amdgpu_bo_unreserve(bo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
index 381101d2bf05..50fcd86e1033 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
@@ -164,4 +164,29 @@ static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size)
  	}
  }
+/**
+ * amdgpu_res_cleared - check if blocks are cleared
+ *
+ * @cur: the cursor to extract the block
+ *
+ * Check if the @cur block is cleared
+ */
+static inline bool amdgpu_res_cleared(struct amdgpu_res_cursor *cur)
+{
+	struct drm_buddy_block *block;
+
+	switch (cur->mem_type) {
+	case TTM_PL_VRAM:
+		block = cur->node;
+
+		if (!amdgpu_vram_mgr_is_cleared(block))
+			return false;
+		break;
+	default:
+		return false;
+	}
+
+	return true;
+}
+
  #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 05991c5c8ddb..6d7514e8f40c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -2222,6 +2222,56 @@ static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data,
  	return 0;
  }
+int amdgpu_clear_buffer(struct amdgpu_bo *bo,
+			struct dma_resv *resv,
+			struct dma_fence **fence,
+			bool delayed)

Drop the delayed parameter, that doesn't make any sense here.

And as Alex said please use an amdgpu_ttm_ prefix for the function name.

+{
+	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+	struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+	struct amdgpu_res_cursor cursor;
+	struct dma_fence *f = NULL;
+	u64 addr;
+	int r;
+
+	if (!adev->mman.buffer_funcs_enabled)
+		return -EINVAL;
+
+	amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor);
+
+	mutex_lock(&adev->mman.gtt_window_lock);
+	while (cursor.remaining) {
+		struct dma_fence *next = NULL;
+		u64 size;
+
+		/* Never clear more than 256MiB at once to avoid timeouts */
+		size = min(cursor.size, 256ULL << 20);
+
+		if (!amdgpu_res_cleared(&cursor)) {

This needs to come before the min(cursor.size....) directly above. I suggest a handling like this:

if (amdgpu_res_cleared(&cursor)) {
	amdgpu_res_next(&cursor, cursor.size);
	continue;
}

size = min(....

+			r = amdgpu_ttm_map_buffer(&bo->tbo, bo->tbo.resource, &cursor,
+						  1, ring, false, &size, &addr);
+			if (r)
+				goto err;
+
+			r = amdgpu_ttm_fill_mem(ring, 0, addr, size, resv,
+						&next, true, delayed);
+			if (r)
+				goto err;
+		}
+		dma_fence_put(f);
+		f = next;
+
+		amdgpu_res_next(&cursor, size);
+	}
+err:
+	mutex_unlock(&adev->mman.gtt_window_lock);
+	if (fence)
+		*fence = dma_fence_get(f);
+	dma_fence_put(f);
+
+	return r;
+}
+
  int amdgpu_fill_buffer(struct amdgpu_bo *bo,
  			uint32_t src_data,
  			struct dma_resv *resv,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 65ec82141a8e..838251166883 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -155,6 +155,10 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
  			       uint64_t size, bool tmz,
  			       struct dma_resv *resv,
  			       struct dma_fence **f);
+int amdgpu_clear_buffer(struct amdgpu_bo *bo,
+			struct dma_resv *resv,
+			struct dma_fence **fence,
+			bool delayed);
  int amdgpu_fill_buffer(struct amdgpu_bo *bo,
  			uint32_t src_data,
  			struct dma_resv *resv,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index d0e199cc8f17..ff74c324b5b5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -435,6 +435,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
  {
  	struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
  	struct amdgpu_device *adev = to_amdgpu_device(mgr);
+	struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
  	u64 vis_usage = 0, max_bytes, min_block_size;
  	struct amdgpu_vram_mgr_resource *vres;
  	u64 size, remaining_size, lpfn, fpfn;
@@ -486,6 +487,9 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
  	if (place->flags & TTM_PL_FLAG_CONTIGUOUS)
  		vres->flags |= DRM_BUDDY_CONTIGUOUS_ALLOCATION;
+ if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED)
+		vres->flags |= DRM_BUDDY_CLEAR_ALLOCATION;
+
  	if (fpfn || lpfn != mgr->mm.size)
  		/* Allocate blocks in desired range */
  		vres->flags |= DRM_BUDDY_RANGE_ALLOCATION;
@@ -579,7 +583,9 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
  	struct amdgpu_vram_mgr_resource *vres = to_amdgpu_vram_mgr_resource(res);
  	struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
  	struct amdgpu_device *adev = to_amdgpu_device(mgr);
+	struct amdgpu_bo *bo = ttm_to_amdgpu_bo(res->bo);
  	struct drm_buddy *mm = &mgr->mm;
+	struct dma_fence *fence = NULL;
  	struct drm_buddy_block *block;
  	uint64_t vis_usage = 0;
@@ -589,7 +595,13 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man, amdgpu_vram_mgr_do_reserve(man); - drm_buddy_free_list(mm, &vres->blocks, 0);
+	/* Clear all the blocks in free path */
+	if (!amdgpu_fill_buffer(bo, 0, NULL, &fence, true)) {
+		vres->flags |= DRM_BUDDY_CLEARED;
+		dma_fence_put(fence);
+	}
+

That's a pretty clear no-go. This is the backend and CS is done from the front end. E.g. can't properly wait for the fence for example.

Instead use the AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE flag for this.

IIRC we already always set this flag when ras is enabled, just make it mandatory for now.

+	drm_buddy_free_list(mm, &vres->blocks, vres->flags);
  	mutex_unlock(&mgr->lock);
atomic64_sub(vis_usage, &mgr->vis_usage);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
index 0e04e42cf809..8478522d7366 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
@@ -53,6 +53,11 @@ static inline u64 amdgpu_vram_mgr_block_size(struct drm_buddy_block *block)
  	return (u64)PAGE_SIZE << drm_buddy_block_order(block);
  }
+static inline bool amdgpu_vram_mgr_is_cleared(struct drm_buddy_block *block)
+{
+	return drm_buddy_block_is_clear(block);
+}
+

You also need a functionality which resets all cleared blocks to uncleared after suspend/resume.

No idea how to do this, maybe Alex knows of hand.

Regards,
Christian.

  static inline struct amdgpu_vram_mgr_resource *
  to_amdgpu_vram_mgr_resource(struct ttm_resource *res)
  {




[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux