On 2023-07-27 19:43, Alex Sierra wrote:
DMA address reference within svm_ranges should be unmapped only after
the memory has been released from the system. In case of range
splitting, the DMA address information should be copied to the
corresponding range after this has split. But leaving dma mapping
intact.
Signed-off-by: Alex Sierra <alex.sierra@xxxxxxx>
---
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 2 +-
drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 67 ++++++++++++++++++------
drivers/gpu/drm/amd/amdkfd/kfd_svm.h | 2 +-
3 files changed, 52 insertions(+), 19 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index 709ac885ca6d..2586ac070190 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -461,7 +461,7 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
0, node->id, trigger);
svm_range_dma_unmap(adev->dev, scratch, 0, npages);
- svm_range_free_dma_mappings(prange);
+ svm_range_free_dma_mappings(prange, true);
Do we even need to call svm_range_dma_unmap just before? Looks like
that's done inside svm_range_free_dma_mappings anyway.
Maybe this should also be moved to svm_migrate_ram_to_vram because it
affects the entire prange and not just one VMA. So you only need to do
it once per prange. Let's clean that up in a follow up change.
out_free:
kvfree(buf);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 1b50eae051a4..d1ff1c7e96d0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -241,7 +241,7 @@ void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
}
}
-void svm_range_free_dma_mappings(struct svm_range *prange)
+void svm_range_free_dma_mappings(struct svm_range *prange, bool unmap_dma)
{
struct kfd_process_device *pdd;
dma_addr_t *dma_addr;
@@ -262,7 +262,8 @@ void svm_range_free_dma_mappings(struct svm_range *prange)
continue;
}
dev = &pdd->dev->adev->pdev->dev;
- svm_range_dma_unmap(dev, dma_addr, 0, prange->npages);
+ if (unmap_dma)
+ svm_range_dma_unmap(dev, dma_addr, 0, prange->npages);
kvfree(dma_addr);
prange->dma_addr[gpuidx] = NULL;
}
@@ -277,7 +278,7 @@ static void svm_range_free(struct svm_range *prange, bool update_mem_usage)
I'd rename the update_mem_usage parameter to better represent what it
means. Maybe something like "do_unmap".
prange->start, prange->last);
svm_range_vram_node_free(prange);
- svm_range_free_dma_mappings(prange);
+ svm_range_free_dma_mappings(prange, update_mem_usage);
if (update_mem_usage && !p->xnack_enabled) {
pr_debug("unreserve prange 0x%p size: 0x%llx\n", prange, size);
@@ -851,12 +852,46 @@ static void svm_range_debug_dump(struct svm_range_list *svms)
}
}
+static int
+svm_range_copy_array(void *ppdst, void *ppsrc, size_t size,
ppdst and pprsc should be defined as void ** to avoid some ugly pointer
type casts below. I'm not sure why ppsrc is a pointer to a pointer in
the first place. I think it should just be a pointer because you don't
need to update the caller's pointer.
It may also be cleaner if you return the destination pointer as return
value instead and return NULL if allocation failed.
+ uint64_t num_elements, uint64_t offset)
+{
+ unsigned char *dst, *psrc;
+
+ psrc = *(unsigned char **)ppsrc;
+ dst = kvmalloc_array(num_elements, size, GFP_KERNEL);
+ if (!dst)
+ return -ENOMEM;
+ memcpy(dst, psrc + offset, num_elements * size);
+ *(void **)ppdst = dst;
+
+ return 0;
+}
+
+static int
+svm_range_copy_dma_addrs(struct svm_range *dst, struct svm_range *src)
+{
+ int i, r;
+
+ for (i = 0; i < MAX_GPU_INSTANCE; i++) {
+ if (!src->dma_addr[i])
+ continue;
+ r = svm_range_copy_array(&dst->dma_addr[i], &src->dma_addr[i],
+ sizeof(*src->dma_addr[i]), src->npages, 0);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
static int
svm_range_split_array(void *ppnew, void *ppold, size_t size,
uint64_t old_start, uint64_t old_n,
uint64_t new_start, uint64_t new_n)
{
unsigned char *new, *old, *pold;
+ int r;
uint64_t d;
if (!ppold)
@@ -865,22 +900,16 @@ svm_range_split_array(void *ppnew, void *ppold, size_t size,
if (!pold)
return 0;
- new = kvmalloc_array(new_n, size, GFP_KERNEL);
- if (!new)
- return -ENOMEM;
-
d = (new_start - old_start) * size;
- memcpy(new, pold + d, new_n * size);
-
- old = kvmalloc_array(old_n, size, GFP_KERNEL);
- if (!old) {
+ r = svm_range_copy_array(&new, ppold, size, new_n, d);
+ if (r)
+ return r;
+ d = (new_start == old_start) ? new_n * size : 0;
+ r = svm_range_copy_array(&old, ppold, size, old_n, d);
+ if (r) {
kvfree(new);
- return -ENOMEM;
+ return r;
}
-
- d = (new_start == old_start) ? new_n * size : 0;
- memcpy(old, pold + d, old_n * size);
-
kvfree(pold);
*(void **)ppold = old;
*(void **)ppnew = new;
@@ -2075,7 +2104,11 @@ svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
r = -ENOMEM;
goto out;
}
-
+ r = svm_range_copy_dma_addrs(prange, old);
This should be in svm_range_clone instead.
Regards,
Felix
+ if (r) {
+ svm_range_free(prange, false);
+ goto out;
+ }
list_add(&old->update_list, remove_list);
list_add(&prange->list, insert_list);
list_add(&prange->update_list, update_list);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
index 21b14510882b..9e668eeefb32 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
@@ -183,7 +183,7 @@ void svm_range_add_list_work(struct svm_range_list *svms,
void schedule_deferred_list_work(struct svm_range_list *svms);
void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
unsigned long offset, unsigned long npages);
-void svm_range_free_dma_mappings(struct svm_range *prange);
+void svm_range_free_dma_mappings(struct svm_range *prange, bool unmap_dma);
int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
uint64_t *svm_priv_data_size);
int kfd_criu_checkpoint_svm(struct kfd_process *p,