Patch "drm/etnaviv: Map and unmap GPUVA range with respect to the GPUVA size" has been added to the 6.12-stable tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This is a note to let you know that I've just added the patch titled

    drm/etnaviv: Map and unmap GPUVA range with respect to the GPUVA size

to the 6.12-stable tree which can be found at:
    http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary

The filename of the patch is:
     drm-etnaviv-map-and-unmap-gpuva-range-with-respect-t.patch
and it can be found in the queue-6.12 subdirectory.

If you, or anyone else, feels it should not be added to the stable tree,
please let <stable@xxxxxxxxxxxxxxx> know about it.



commit bc12edee127f8517535382a81bfdf28bf27ca462
Author: Sui Jingfeng <sui.jingfeng@xxxxxxxxx>
Date:   Sat Oct 26 04:43:55 2024 +0800

    drm/etnaviv: Map and unmap GPUVA range with respect to the GPUVA size
    
    [ Upstream commit 68786b7f49873c69ec332a045a9bf4337d71ec20 ]
    
    Etnaviv assumes that GPU page size is 4KiB, however, GPUVA ranges collision
    when using softpin capable GPUs on a non 4KiB CPU page size configuration.
    The root cause is that kernel side BO takes up bigger address space than
    userspace expect, the size of backing memory of GEM buffer objects are
    required to align to the CPU PAGE_SIZE. Therefore, results in userspace
    allocated GPUVA range fails to be inserted to the specified hole exactly.
    
    To solve this problem, record the GPU visiable size of a BO firstly, then
    map and unmap the SG entry strictly with respect to the total GPUVA size.
    
    Signed-off-by: Sui Jingfeng <sui.jingfeng@xxxxxxxxx>
    Signed-off-by: Lucas Stach <l.stach@xxxxxxxxxxxxxx>
    Stable-dep-of: 9aad03e7f5db ("drm/etnaviv: Drop the offset in page manipulation")
    Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx>

diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index 1661d589bf3e7..a382920ae2be0 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -69,9 +69,11 @@ static int etnaviv_context_map(struct etnaviv_iommu_context *context,
 	return ret;
 }
 
-static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova,
+static int etnaviv_iommu_map(struct etnaviv_iommu_context *context,
+			     u32 iova, unsigned int va_len,
 			     struct sg_table *sgt, int prot)
-{	struct scatterlist *sg;
+{
+	struct scatterlist *sg;
 	unsigned int da = iova;
 	unsigned int i;
 	int ret;
@@ -81,14 +83,16 @@ static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova,
 
 	for_each_sgtable_dma_sg(sgt, sg, i) {
 		phys_addr_t pa = sg_dma_address(sg) - sg->offset;
-		size_t bytes = sg_dma_len(sg) + sg->offset;
+		unsigned int da_len = sg_dma_len(sg) + sg->offset;
+		unsigned int bytes = min_t(unsigned int, da_len, va_len);
 
-		VERB("map[%d]: %08x %pap(%zx)", i, iova, &pa, bytes);
+		VERB("map[%d]: %08x %pap(%x)", i, iova, &pa, bytes);
 
 		ret = etnaviv_context_map(context, da, pa, bytes, prot);
 		if (ret)
 			goto fail;
 
+		va_len -= bytes;
 		da += bytes;
 	}
 
@@ -104,21 +108,7 @@ static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova,
 static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 iova,
 				struct sg_table *sgt, unsigned len)
 {
-	struct scatterlist *sg;
-	unsigned int da = iova;
-	int i;
-
-	for_each_sgtable_dma_sg(sgt, sg, i) {
-		size_t bytes = sg_dma_len(sg) + sg->offset;
-
-		etnaviv_context_unmap(context, da, bytes);
-
-		VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
-
-		BUG_ON(!PAGE_ALIGNED(bytes));
-
-		da += bytes;
-	}
+	etnaviv_context_unmap(context, iova, len);
 
 	context->flush_seq++;
 }
@@ -131,7 +121,7 @@ static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context *context,
 	lockdep_assert_held(&context->lock);
 
 	etnaviv_iommu_unmap(context, mapping->vram_node.start,
-			    etnaviv_obj->sgt, etnaviv_obj->base.size);
+			    etnaviv_obj->sgt, etnaviv_obj->size);
 	drm_mm_remove_node(&mapping->vram_node);
 }
 
@@ -305,16 +295,14 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context,
 	node = &mapping->vram_node;
 
 	if (va)
-		ret = etnaviv_iommu_insert_exact(context, node,
-						 etnaviv_obj->base.size, va);
+		ret = etnaviv_iommu_insert_exact(context, node, etnaviv_obj->size, va);
 	else
-		ret = etnaviv_iommu_find_iova(context, node,
-					      etnaviv_obj->base.size);
+		ret = etnaviv_iommu_find_iova(context, node, etnaviv_obj->size);
 	if (ret < 0)
 		goto unlock;
 
 	mapping->iova = node->start;
-	ret = etnaviv_iommu_map(context, node->start, sgt,
+	ret = etnaviv_iommu_map(context, node->start, etnaviv_obj->size, sgt,
 				ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE);
 
 	if (ret < 0) {




[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux