RE: [PATCH] drivers: hv: dxgkrnl: Allow user to specify CPU VA for device allocation

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



[Public]

ping

>-----Original Message-----
>From: Yu, Lang <Lang.Yu@xxxxxxx>
>Sent: Wednesday, December 27, 2023 11:50 AM
>To: Iouri Tarassov <iourit@xxxxxxxxxxxxxxxxxxx>
>Cc: linux-hyperv@xxxxxxxxxxxxxxx; Yu, Lang <Lang.Yu@xxxxxxx>
>Subject: [PATCH] drivers: hv: dxgkrnl: Allow user to specify CPU VA for device
>allocation
>
>The movtivation is we want to unify CPU VA and GPU VA.
>
>Signed-off-by: Lang Yu <Lang.Yu@xxxxxxx>
>---
> drivers/hv/dxgkrnl/dxgvmbus.c | 24 ++++++++++++++----------
> 1 file changed, 14 insertions(+), 10 deletions(-)
>
>diff --git a/drivers/hv/dxgkrnl/dxgvmbus.c b/drivers/hv/dxgkrnl/dxgvmbus.c index
>9320bede3a0a..a4bca27a7cc8 100644
>--- a/drivers/hv/dxgkrnl/dxgvmbus.c
>+++ b/drivers/hv/dxgkrnl/dxgvmbus.c
>@@ -580,7 +580,7 @@ int dxg_unmap_iospace(void *va, u32 size)
>       return 0;
> }
>
>-static u8 *dxg_map_iospace(u64 iospace_address, u32 size,
>+static u8 *dxg_map_iospace(u64 iospace_address, u64 user_va, u32 size,
>                          unsigned long protection, bool cached)  {
>       struct vm_area_struct *vma;
>@@ -594,7 +594,12 @@ static u8 *dxg_map_iospace(u64 iospace_address, u32
>size,
>               return NULL;
>       }
>
>-      va = vm_mmap(NULL, 0, size, protection, MAP_SHARED |
>MAP_ANONYMOUS, 0);
>+      if (user_va)
>+              va = vm_mmap(untagged_addr(user_va), 0, size, protection,
>+                           MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, 0);
>+      else
>+              va = vm_mmap(NULL, 0, size, protection,
>+                           MAP_SHARED | MAP_ANONYMOUS, 0);
>       if ((long)va <= 0) {
>               DXG_ERR("vm_mmap failed %lx %d", va, size);
>               return NULL;
>@@ -789,9 +794,8 @@ int dxgvmb_send_open_sync_object_nt(struct dxgprocess
>*process,
>
>       args->sync_object = result.sync_object;
>       if (syncobj->monitored_fence) {
>-              void *va = dxg_map_iospace(result.guest_cpu_physical_address,
>-                                         PAGE_SIZE, PROT_READ |
>PROT_WRITE,
>-                                         true);
>+              void *va = dxg_map_iospace(result.guest_cpu_physical_address,
>0,
>+                                         PAGE_SIZE, PROT_READ |
>PROT_WRITE, true);
>               if (va == NULL) {
>                       ret = -ENOMEM;
>                       goto cleanup;
>@@ -1315,8 +1319,8 @@ int dxgvmb_send_create_paging_queue(struct
>dxgprocess *process,
>       args->paging_queue = result.paging_queue;
>       args->sync_object = result.sync_object;
>       args->fence_cpu_virtual_address =
>-          dxg_map_iospace(result.fence_storage_physical_address, PAGE_SIZE,
>-                          PROT_READ | PROT_WRITE, true);
>+          dxg_map_iospace(result.fence_storage_physical_address, 0,
>+                          PAGE_SIZE, PROT_READ | PROT_WRITE, true);
>       if (args->fence_cpu_virtual_address == NULL) {
>               ret = -ENOMEM;
>               goto cleanup;
>@@ -2867,7 +2871,7 @@ dxgvmb_send_create_sync_object(struct dxgprocess
>*process,
>       }
>
>       if (syncobj->monitored_fence) {
>-              va = dxg_map_iospace(result.fence_storage_address, PAGE_SIZE,
>+              va = dxg_map_iospace(result.fence_storage_address, 0,
>PAGE_SIZE,
>                                    PROT_READ | PROT_WRITE, true);
>               if (va == NULL) {
>                       ret = -ENOMEM;
>@@ -3156,7 +3160,7 @@ int dxgvmb_send_lock2(struct dxgprocess *process,
>               } else {
>                       u64 offset = (u64)result.cpu_visible_buffer_offset;
>
>-                      args->data = dxg_map_iospace(offset,
>+                      args->data = dxg_map_iospace(offset, args->data,
>                                       alloc->num_pages << PAGE_SHIFT,
>                                       PROT_READ | PROT_WRITE, alloc-
>>cached);
>                       if (args->data) {
>@@ -3712,7 +3716,7 @@ int dxgvmb_send_create_hwqueue(struct dxgprocess
>*process,
>       }
>
>       hwqueue->progress_fence_mapped_address =
>-              dxg_map_iospace((u64)command-
>>hwqueue_progress_fence_cpuva,
>+              dxg_map_iospace((u64)command-
>>hwqueue_progress_fence_cpuva, 0,
>                               PAGE_SIZE, PROT_READ | PROT_WRITE, true);
>       if (hwqueue->progress_fence_mapped_address == NULL) {
>               ret = -ENOMEM;
>--
>2.25.1






[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux