We discussed if that is really the right approach for quite a while
now, but
digging deeper into a bug report on arm turned out that this is actually
horrible broken right now.
The reason for this is that vmf_insert_mixed_prot() always tries to grab
a reference to the underlaying page on architectures without
ARCH_HAS_PTE_SPECIAL and as far as I can see also enabled GUP.
So nuke using VM_MIXEDMAP here and use VM_PFNMAP instead.
Also make sure to reject mappings without VM_SHARED.
v2: reject COW mappings, merge function with only caller
Signed-off-by: Christian König <christian.koenig@xxxxxxx>
Bugs: https://gitlab.freedesktop.org/drm/amd/-/issues/1606#note_936174
---
drivers/gpu/drm/ttm/ttm_bo_vm.c | 44 +++++++++++----------------------
1 file changed, 14 insertions(+), 30 deletions(-)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c
b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 61828488ae2b..c9edb75626d9 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -359,12 +359,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct
vm_fault *vmf,
* at arbitrary times while the data is mmap'ed.
* See vmf_insert_mixed_prot() for a discussion.
*/
- if (vma->vm_flags & VM_MIXEDMAP)
- ret = vmf_insert_mixed_prot(vma, address,
- __pfn_to_pfn_t(pfn, PFN_DEV),
- prot);
- else
- ret = vmf_insert_pfn_prot(vma, address, pfn, prot);
+ ret = vmf_insert_pfn_prot(vma, address, pfn, prot);
/* Never error on prefaulted PTEs */
if (unlikely((ret & VM_FAULT_ERROR))) {
@@ -411,15 +406,9 @@ vm_fault_t ttm_bo_vm_dummy_page(struct vm_fault
*vmf, pgprot_t prot)
pfn = page_to_pfn(page);
/* Prefault the entire VMA range right away to avoid further
faults */
- for (address = vma->vm_start; address < vma->vm_end; address +=
PAGE_SIZE) {
-
- if (vma->vm_flags & VM_MIXEDMAP)
- ret = vmf_insert_mixed_prot(vma, address,
- __pfn_to_pfn_t(pfn, PFN_DEV),
- prot);
- else
- ret = vmf_insert_pfn_prot(vma, address, pfn, prot);
- }
+ for (address = vma->vm_start; address < vma->vm_end;
+ address += PAGE_SIZE)
+ ret = vmf_insert_pfn_prot(vma, address, pfn, prot);
return ret;
}
@@ -560,8 +549,16 @@ static const struct vm_operations_struct
ttm_bo_vm_ops = {
.access = ttm_bo_vm_access,
};
-static void ttm_bo_mmap_vma_setup(struct ttm_buffer_object *bo,
struct vm_area_struct *vma)
+int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct
ttm_buffer_object *bo)
{
+ /* Enforce VM_SHARED here since without it we would have really
strange
+ * behavior on COW.
+ */