Support pinning of vmas using XE_VM_BIND_FLAG_PIN, initially for userptr only. Pinned memory becomes accounted against RLIMIT_MEMLOCK and processes with CAP_IPC_LOCK will not apply the limit. This is pretty similar to mlock()'ing userptr memory with the added benefit that the driver is aware and can ignore some actions in the MMU invalidation notifier. This will initially become useful for compute VMs on hardware without mid-thread-preemption capability since with pinned pages, the MMU invalidation notifier never tries to preempt a running compute kernel. If that were the only usage we could restrict this to a flag that always pins userptr VMAs on compute VMs on such hardware, but there are indications that this may become needed in other situations as well. >From a more general point of view, the usage pattern of a system may be such that in most cases it only ever runs a single workload per system and then the sysadmin would want to configure the system to allow extensive pinning for performance reasons. Hence we might want to extend the pinning capability to bo-backed VMAs as well. How that pinning will be accounted remains an open but to build on the current drm CGROUP work would be an option. Signed-off-by: Thomas Hellström <thomas.hellstrom@xxxxxxxxxxxxxxx> Reviewed-by: Matthew Brost <matthew.brost@xxxxxxxxx> --- drivers/gpu/drm/xe/xe_vm.c | 33 +++++++++++++++++++++++++------- drivers/gpu/drm/xe/xe_vm_types.h | 2 ++ include/uapi/drm/xe_drm.h | 18 +++++++++++++++++ 3 files changed, 46 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index a645cfa131ca..fdfe5a411386 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -950,6 +950,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm, u64 start, u64 end, bool read_only, bool is_null, + bool pin, u8 tile_mask) { struct xe_vma *vma; @@ -981,6 +982,8 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm, vma->gpuva.flags |= XE_VMA_READ_ONLY; if (is_null) vma->gpuva.flags |= DRM_GPUVA_SPARSE; + if (pin) + vma->gpuva.flags |= XE_VMA_PINNED; if (tile_mask) { vma->tile_mask = tile_mask; @@ -2382,6 +2385,7 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo, op->map.read_only = operation & XE_VM_BIND_FLAG_READONLY; op->map.is_null = operation & XE_VM_BIND_FLAG_NULL; + op->map.pin = operation & XE_VM_BIND_FLAG_PIN; } break; case XE_VM_BIND_OP_UNMAP: @@ -2446,7 +2450,8 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo, } static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op, - u8 tile_mask, bool read_only, bool is_null) + u8 tile_mask, bool read_only, bool is_null, + bool pin) { struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL; struct xe_vma *vma; @@ -2462,7 +2467,7 @@ static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op, } vma = xe_vma_create(vm, bo, op->gem.offset, op->va.addr, op->va.addr + - op->va.range - 1, read_only, is_null, + op->va.range - 1, read_only, is_null, pin, tile_mask); if (bo) xe_bo_unlock(bo, &ww); @@ -2577,7 +2582,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q, vma = new_vma(vm, &op->base.map, op->tile_mask, op->map.read_only, - op->map.is_null); + op->map.is_null, op->map.pin); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto free_fence; @@ -2602,10 +2607,13 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q, bool is_null = op->base.remap.unmap->va->flags & DRM_GPUVA_SPARSE; + bool pin = + op->base.remap.unmap->va->flags & + XE_VMA_PINNED; vma = new_vma(vm, op->base.remap.prev, op->tile_mask, read_only, - is_null); + is_null, pin); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto free_fence; @@ -2638,10 +2646,13 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q, bool is_null = op->base.remap.unmap->va->flags & DRM_GPUVA_SPARSE; + bool pin = + op->base.remap.unmap->va->flags & + XE_VMA_PINNED; vma = new_vma(vm, op->base.remap.next, op->tile_mask, read_only, - is_null); + is_null, pin); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto free_fence; @@ -3146,11 +3157,12 @@ static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm, #define SUPPORTED_FLAGS \ (FORCE_ASYNC_OP_ERROR | XE_VM_BIND_FLAG_ASYNC | \ XE_VM_BIND_FLAG_READONLY | XE_VM_BIND_FLAG_IMMEDIATE | \ - XE_VM_BIND_FLAG_NULL | 0xffff) + XE_VM_BIND_FLAG_NULL | XE_VM_BIND_FLAG_PIN | 0xffff) #else #define SUPPORTED_FLAGS \ (XE_VM_BIND_FLAG_ASYNC | XE_VM_BIND_FLAG_READONLY | \ - XE_VM_BIND_FLAG_IMMEDIATE | XE_VM_BIND_FLAG_NULL | 0xffff) + XE_VM_BIND_FLAG_IMMEDIATE | XE_VM_BIND_FLAG_NULL | \ + XE_VM_BIND_FLAG_PIN | 0xffff) #endif #define XE_64K_PAGE_MASK 0xffffull @@ -3220,6 +3232,13 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, goto free_bind_ops; } + /* TODO: Support OP_PREFETCH, OP_MAP */ + if (XE_IOCTL_DBG(xe, (op & XE_VM_BIND_FLAG_PIN) && + VM_BIND_OP(op) != XE_VM_BIND_OP_MAP_USERPTR)) { + err = -EINVAL; + goto free_bind_ops; + } + if (XE_IOCTL_DBG(xe, VM_BIND_OP(op) > XE_VM_BIND_OP_PREFETCH) || XE_IOCTL_DBG(xe, op & ~SUPPORTED_FLAGS) || diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h index 9b90e649cd69..024ccabadd12 100644 --- a/drivers/gpu/drm/xe/xe_vm_types.h +++ b/drivers/gpu/drm/xe/xe_vm_types.h @@ -360,6 +360,8 @@ struct xe_vma_op_map { bool read_only; /** @is_null: is NULL binding */ bool is_null; + /** @pin: pin underlying memory */ + bool pin; }; /** struct xe_vma_op_remap - VMA remap operation */ diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h index 86f16d50e9cc..fc3d9cd4f8d0 100644 --- a/include/uapi/drm/xe_drm.h +++ b/include/uapi/drm/xe_drm.h @@ -631,6 +631,24 @@ struct drm_xe_vm_bind_op { * intended to implement VK sparse bindings. */ #define XE_VM_BIND_FLAG_NULL (0x1 << 19) + /* + * When the PIN flag is set, the user requests the underlying + * backing store of the vma to be pinned, that is, it will be + * resident while bound and the underlying physical memory + * will not change. For userptr VMAs this means that if the + * user performs an operation that changes the underlying + * pages of the CPU virtual space, the corresponding pinned + * GPU virtual space will not pick up the new memory unless + * an OP_UNMAP followed by a OP_MAP_USERPTR is performed. + * Pinned userptr memory is accounted in the same way as + * mlock(2), and if pinning fails the following error codes + * may be returned: + * -EINVAL: The memory region does not support pinning. + * -EPERM: The process is not permitted to pin. + * -ENOMEM: The pinning limit does not allow pinning. + * For userptr memory, CAP_IPC_LOCK will bypass the limit checking. + */ +#define XE_VM_BIND_FLAG_PIN (0x1 << 20) /** @op: Operation to perform (lower 16 bits) and flags (upper 16 bits) */ __u32 op; -- 2.41.0