Re: [PATCH] drm/amdkfd: use resource cursor in svm_migrate_copy_to_vram

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 




On 2021-05-23 1:10 p.m., Christian König wrote:
Am 21.05.21 um 21:28 schrieb philip yang:

This simply the logic, several comments inline.

Thanks,

Philip

On 2021-05-21 9:52 a.m., Christian König wrote:
Access to the mm_node is now forbidden. So instead of hand wiring that
use the cursor functionality.

Signed-off-by: Christian König<christian.koenig@xxxxxxx>
---
  drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 76 +++---------------------
  1 file changed, 9 insertions(+), 67 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index fd8f544f0de2..cb28d1e660af 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -29,6 +29,7 @@
  #include "amdgpu_object.h"
  #include "amdgpu_vm.h"
  #include "amdgpu_mn.h"
+#include "amdgpu_res_cursor.h"
  #include "kfd_priv.h"
  #include "kfd_svm.h"
  #include "kfd_migrate.h"
@@ -205,34 +206,6 @@ svm_migrate_copy_done(struct amdgpu_device *adev, struct dma_fence *mfence)
      return r;
  }
  -static uint64_t
-svm_migrate_node_physical_addr(struct amdgpu_device *adev,
-                   struct drm_mm_node **mm_node, uint64_t *offset)
-{
-    struct drm_mm_node *node = *mm_node;
-    uint64_t pos = *offset;
-
-    if (node->start == AMDGPU_BO_INVALID_OFFSET) {
-        pr_debug("drm node is not validated\n");
-        return 0;
-    }
-
-    pr_debug("vram node start 0x%llx npages 0x%llx\n", node->start,
-         node->size);
-
-    if (pos >= node->size) {
-        do  {
-            pos -= node->size;
-            node++;
-        } while (pos >= node->size);
-
-        *mm_node = node;
-        *offset = pos;
-    }
-
-    return (node->start + pos) << PAGE_SHIFT;
-}
-
  unsigned long
  svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr)
  {
@@ -297,11 +270,9 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
  {
      uint64_t npages = migrate->cpages;
      struct device *dev = adev->dev;
-    struct drm_mm_node *node;
+    struct amdgpu_res_cursor cursor;
      dma_addr_t *src;
      uint64_t *dst;
-    uint64_t vram_addr;
-    uint64_t offset;
      uint64_t i, j;
      int r;
  @@ -317,19 +288,12 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
          goto out;
      }
  -    node = prange->ttm_res->mm_node;
-    offset = prange->offset;
-    vram_addr = svm_migrate_node_physical_addr(adev, &node, &offset);
-    if (!vram_addr) {
The prange->ttm_res valid check is not needed because we already check svm_range_vram_node_new return value
-        WARN_ONCE(1, "vram node address is 0\n");
-        r = -ENOMEM;
-        goto out;
-    }
-
+    amdgpu_res_first(prange->ttm_res, prange->offset, npages << PAGE_SHIFT,

prange->offset<< PAGE_SHIFT

amdgpu_res_first takes start and size in bytes, prange->offset use page aligned offset


Ah, yes good point.


+             &cursor);
      for (i = j = 0; i < npages; i++) {
          struct page *spage;
  -        dst[i] = vram_addr + (j << PAGE_SHIFT);
+        dst[i] = cursor.start + (j << PAGE_SHIFT);
          migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
          svm_migrate_get_vram_page(prange, migrate->dst[i]);
  @@ -354,18 +318,10 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
                          mfence);
                  if (r)
                      goto out_free_vram_pages;
-                offset += j;
-                vram_addr = (node->start + offset) << PAGE_SHIFT;
+                amdgpu_res_next(&cursor, j << PAGE_SHIFT);
                  j = 0;
              } else {
-                offset++;
-                vram_addr += PAGE_SIZE;
-            }
-            if (offset >= node->size) {
-                node++;
-                pr_debug("next node size 0x%llx\n", node->size);
-                vram_addr = node->start << PAGE_SHIFT;
-                offset = 0;
+                amdgpu_res_next(&cursor, PAGE_SIZE);
              }
              continue;
          }
@@ -373,22 +329,8 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
          pr_debug("dma mapping src to 0x%llx, page_to_pfn 0x%lx\n",
               src[i] >> PAGE_SHIFT, page_to_pfn(spage));
  -        if (j + offset >= node->size - 1 && i < npages - 1) {
-            r = svm_migrate_copy_memory_gart(adev, src + i - j,
-                             dst + i - j, j + 1,
-                             FROM_RAM_TO_VRAM,
-                             mfence);
-            if (r)
-                goto out_free_vram_pages;
-
-            node++;
-            pr_debug("next node size 0x%llx\n", node->size);
-            vram_addr = node->start << PAGE_SHIFT;
-            offset = 0;
-            j = 0;
-        } else {
-            j++;
-        }
+        amdgpu_res_next(&cursor, PAGE_SIZE);
+        j++;
Here to handle cross mm_node case.

if (j >= cursor->size - 1 && i < npages - 1) {

    r = svm_migrate_copy_memory_gart(adev, src + i - j,

                                dst + i - j, j + 1,

            FROM_RAM_TO_VRAM,
             mfence);
     if (r)
    goto out_free_vram_pages;
          amdgpu_res_next(&cursor, (j + 1) * PAGE_SIZE);
     j= 0;
} else {
    j++;
}

Yeah, that was the point I couldn't understand. Why would we want that anyway?

svm_migrate_copy_memory_gart uses sdma to copy from system memory to vram, system memory is gart mapping paged memory, and vram is direct mapping, physically continuous memory. We have to call svm_migrate_copy_memory_gart to setup new sdma for two cases:

1. system memory pages array, if src page is not MIGRATE_PFN_VALID

2. if dst vram page cross mm_nodes, physical vram address is not continuous.

This if is for case 2.

Regards,

Philip


Regards,
Christian.



      }
        r = svm_migrate_copy_memory_gart(adev, src + i - j, dst + i - j, j,

_______________________________________________
amd-gfx mailing list
amd-gfx@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux