Re: [PATCH 3/3] drm/ttm: Change the meaning of the fields in the drm_mm_nodes structure from pfn to bytes v2

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Am 14.02.23 um 11:31 schrieb Matthew Auld:
On Tue, 14 Feb 2023 at 07:43, Christian König
<ckoenig.leichtzumerken@xxxxxxxxx> wrote:
From: Somalapuram Amaranath <Amaranath.Somalapuram@xxxxxxx>

Change the ttm_range_man_alloc() allocation from pages to size in bytes.
Fix the dependent drm_mm_nodes start and size from pages to bytes.

v2 (chk): Change the drm_mm_node usage in amdgpu as well. re-order the
           patch to be independent of the resource->start change.

Signed-off-by: Somalapuram Amaranath <Amaranath.Somalapuram@xxxxxxx>
Reviewed-by: Christian König <christian.koenig@xxxxxxx>
Signed-off-by: Christian König <christian.koenig@xxxxxxx>
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c    | 15 ++++++++-------
  drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h |  8 ++++----
  drivers/gpu/drm/i915/i915_scatterlist.c        |  6 +++---
  drivers/gpu/drm/ttm/ttm_range_manager.c        | 17 ++++++++---------
  4 files changed, 23 insertions(+), 23 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 44367f03316f..c90423cd1292 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -116,7 +116,6 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
                               struct ttm_resource **res)
  {
         struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
-       uint32_t num_pages = PFN_UP(tbo->base.size);
         struct ttm_range_mgr_node *node;
         int r;

@@ -134,17 +133,19 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
         if (place->lpfn) {
                 spin_lock(&mgr->lock);
                 r = drm_mm_insert_node_in_range(&mgr->mm, &node->mm_nodes[0],
-                                               num_pages, tbo->page_alignment,
-                                               0, place->fpfn, place->lpfn,
+                                               tbo->base.size,
+                                               tbo->page_alignment << PAGE_SHIFT, 0,
+                                               place->fpfn << PAGE_SHIFT,
+                                               place->lpfn << PAGE_SHIFT,
                                                 DRM_MM_INSERT_BEST);
                 spin_unlock(&mgr->lock);
                 if (unlikely(r))
                         goto err_free;

-               node->base.start = node->mm_nodes[0].start;
+               node->base.start = node->mm_nodes[0].start >> PAGE_SHIFT;
         } else {
                 node->mm_nodes[0].start = 0;
-               node->mm_nodes[0].size = PFN_UP(node->base.size);
+               node->mm_nodes[0].size = node->base.size;
                 node->base.start = AMDGPU_BO_INVALID_OFFSET;
         }

@@ -285,8 +286,8 @@ int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size)

         ttm_resource_manager_init(man, &adev->mman.bdev, gtt_size);

-       start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS;
-       size = (adev->gmc.gart_size >> PAGE_SHIFT) - start;
+       start = (AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS) << PAGE_SHIFT;
+       size = adev->gmc.gart_size - start;
         drm_mm_init(&mgr->mm, start, size);
         spin_lock_init(&mgr->lock);

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
index 5c4f93ee0c57..5c78f0b09351 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
@@ -94,8 +94,8 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
                 while (start >= node->size << PAGE_SHIFT)
                         start -= node++->size << PAGE_SHIFT;

-               cur->start = (node->start << PAGE_SHIFT) + start;
-               cur->size = min((node->size << PAGE_SHIFT) - start, size);
+               cur->start = node->start + start;
+               cur->size = min(node->size - start, size);
                 cur->remaining = size;
                 cur->node = node;
                 break;
@@ -155,8 +155,8 @@ static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size)
                 node = cur->node;

                 cur->node = ++node;
-               cur->start = node->start << PAGE_SHIFT;
-               cur->size = min(node->size << PAGE_SHIFT, cur->remaining);
+               cur->start = node->start;
+               cur->size = min(node->size, cur->remaining);
                 break;
         default:
                 return;
diff --git a/drivers/gpu/drm/i915/i915_scatterlist.c b/drivers/gpu/drm/i915/i915_scatterlist.c
index 756289e43dff..7defda1219d0 100644
--- a/drivers/gpu/drm/i915/i915_scatterlist.c
+++ b/drivers/gpu/drm/i915/i915_scatterlist.c
@@ -94,7 +94,7 @@ struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,
         if (!rsgt)
                 return ERR_PTR(-ENOMEM);

-       i915_refct_sgt_init(rsgt, node->size << PAGE_SHIFT);
+       i915_refct_sgt_init(rsgt, node->size);
         st = &rsgt->table;
         /* restricted by sg_alloc_table */
         if (WARN_ON(overflows_type(DIV_ROUND_UP_ULL(node->size, segment_pages),
@@ -110,8 +110,8 @@ struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,
         sg = st->sgl;
         st->nents = 0;
         prev_end = (resource_size_t)-1;
-       block_size = node->size << PAGE_SHIFT;
-       offset = node->start << PAGE_SHIFT;
+       block_size = node->size;
+       offset = node->start;

         while (block_size) {
                 u64 len;
diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c
index 62fddcc59f02..9da6054f2955 100644
--- a/drivers/gpu/drm/ttm/ttm_range_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_range_manager.c
@@ -83,9 +83,10 @@ static int ttm_range_man_alloc(struct ttm_resource_manager *man,

         spin_lock(&rman->lock);
         ret = drm_mm_insert_node_in_range(mm, &node->mm_nodes[0],
-                                         PFN_UP(node->base.size),
-                                         bo->page_alignment, 0,
-                                         place->fpfn, lpfn, mode);
+                                         node->base.size,
+                                         bo->page_alignment << PAGE_SHIFT, 0,
+                                         place->fpfn << PAGE_SHIFT,
place->fpfn is only u32. I assume we need some (u64) cast or so?

Good point. I've already found a couple of those, but looks like I missed that one.

Going to fix that.

Thanks,
Christian.


+                                         lpfn << PAGE_SHIFT, mode);
         spin_unlock(&rman->lock);

         if (unlikely(ret)) {
@@ -94,7 +95,7 @@ static int ttm_range_man_alloc(struct ttm_resource_manager *man,
                 return ret;
         }

-       node->base.start = node->mm_nodes[0].start;
+       node->base.start = node->mm_nodes[0].start >> PAGE_SHIFT;
         *res = &node->base;
         return 0;
  }
@@ -119,11 +120,10 @@ static bool ttm_range_man_intersects(struct ttm_resource_manager *man,
                                      size_t size)
  {
         struct drm_mm_node *node = &to_ttm_range_mgr_node(res)->mm_nodes[0];
-       u32 num_pages = PFN_UP(size);

         /* Don't evict BOs outside of the requested placement range */
-       if (place->fpfn >= (node->start + num_pages) ||
-           (place->lpfn && place->lpfn <= node->start))
+       if ((place->fpfn << PAGE_SHIFT) >= (node->start + size) ||
+           (place->lpfn && (place->lpfn << PAGE_SHIFT) <= node->start))
Same here.

                 return false;

         return true;
@@ -135,10 +135,9 @@ static bool ttm_range_man_compatible(struct ttm_resource_manager *man,
                                      size_t size)
  {
         struct drm_mm_node *node = &to_ttm_range_mgr_node(res)->mm_nodes[0];
-       u32 num_pages = PFN_UP(size);

         if (node->start < place->fpfn ||
-           (place->lpfn && (node->start + num_pages) > place->lpfn))
+           (place->lpfn && (node->start + size) > place->lpfn << PAGE_SHIFT))
And here.

                 return false;

         return true;
--
2.34.1





[Index of Archives]     [AMD Graphics]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux