+ struct pvr_gem_object *pvr_gem = gem_to_pvr_gem(op->remap.unmap->va->gem.obj);
+
+ drm_gpuva_unlink(op->unmap.va);
+ kfree(op->unmap.va);
+
+ pvr_gem_object_put(pvr_gem);
+ }
+
+ return 0;
+}
+
+/*
+ * Public API
+ *
+ * For an overview of these functions, see *DOC: Public API* in "pvr_vm.h".
+ */
+
+/**
+ * pvr_device_addr_is_valid() - Tests whether a device-virtual address
+ * is valid.
+ * @device_addr: Virtual device address to test.
+ *
+ * Return:
+ * * %true if @device_addr is within the valid range for a device page
+ * table and is aligned to the device page size, or
+ * * %false otherwise.
+ */
+bool
+pvr_device_addr_is_valid(u64 device_addr)
+{
+ return (device_addr & ~PVR_PAGE_TABLE_ADDR_MASK) == 0 &&
+ (device_addr & ~PVR_DEVICE_PAGE_MASK) == 0;
+}
+
+/**
+ * pvr_device_addr_and_size_are_valid() - Tests whether a device-virtual
+ * address and associated size are both valid.
+ * @device_addr: Virtual device address to test.
+ * @size: Size of the range based at @device_addr to test.
+ *
+ * Calling pvr_device_addr_is_valid() twice (once on @size, and again on
+ * @device_addr + @size) to verify a device-virtual address range initially
+ * seems intuitive, but it produces a false-negative when the address range
+ * is right at the end of device-virtual address space.
+ *
+ * This function catches that corner case, as well as checking that
+ * @size is non-zero.
+ *
+ * Return:
+ * * %true if @device_addr is device page aligned; @size is device page
+ * aligned; the range specified by @device_addr and @size is within the
+ * bounds of the device-virtual address space, and @size is non-zero, or
+ * * %false otherwise.
+ */
+bool
+pvr_device_addr_and_size_are_valid(u64 device_addr, u64 size)
+{
+ return pvr_device_addr_is_valid(device_addr) &&
+ size != 0 && (size & ~PVR_DEVICE_PAGE_MASK) == 0 &&
+ (device_addr + size <= PVR_PAGE_TABLE_ADDR_SPACE_SIZE);
+}
+
+static const struct drm_gpuva_fn_ops pvr_vm_gpuva_ops = {
+ .sm_step_map = pvr_vm_gpuva_map,
+ .sm_step_remap = pvr_vm_gpuva_remap,
+ .sm_step_unmap = pvr_vm_gpuva_unmap,
+};
+
+/**
+ * pvr_vm_create_context() - Create a new VM context.
+ * @pvr_dev: Target PowerVR device.
+ * @is_userspace_context: %true if this context is for userspace. This will
+ * create a firmware memory context for the VM context
+ * and disable warnings when tearing down mappings.
+ *
+ * Return:
+ * * A handle to the newly-minted VM context on success,
+ * * -%EINVAL if the feature "virtual address space bits" on @pvr_dev is
+ * missing or has an unsupported value,
+ * * -%ENOMEM if allocation of the structure behind the opaque handle fails,
+ * or
+ * * Any error encountered while setting up internal structures.
+ */
+struct pvr_vm_context *
+pvr_vm_create_context(struct pvr_device *pvr_dev, bool is_userspace_context)
+{
+ struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+
+ struct pvr_vm_context *vm_ctx;
+ u16 device_addr_bits;
+
+ int err;
+
+ err = PVR_FEATURE_VALUE(pvr_dev, virtual_address_space_bits,
+ &device_addr_bits);
+ if (err) {
+ drm_err(drm_dev,
+ "Failed to get device virtual address space bits\n");
+ return ERR_PTR(err);
+ }
+
+ if (device_addr_bits != PVR_PAGE_TABLE_ADDR_BITS) {
+ drm_err(drm_dev,
+ "Device has unsupported virtual address space size\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ vm_ctx = kzalloc(sizeof(*vm_ctx), GFP_KERNEL);
+ if (!vm_ctx)
+ return ERR_PTR(-ENOMEM);
+
+ vm_ctx->pvr_dev = pvr_dev;
+ kref_init(&vm_ctx->ref_count);
+ mutex_init(&vm_ctx->lock);
+
+ drm_gpuva_manager_init(&vm_ctx->gpuva_mgr,
+ is_userspace_context ? "PowerVR-user-VM" : "PowerVR-FW-VM",
+ 0, 1ULL << device_addr_bits, 0, 0, &pvr_vm_gpuva_ops);
+
+ vm_ctx->mmu_ctx = pvr_mmu_context_create(pvr_dev);
+ err = PTR_ERR_OR_ZERO(&vm_ctx->mmu_ctx);
+ if (err) {
+ vm_ctx->mmu_ctx = NULL;
+ goto err_put_ctx;
+ }
+
+ if (is_userspace_context) {
+ /* TODO: Create FW mem context */
+ err = -ENODEV;
+ goto err_put_ctx;
+ }
+
+ return vm_ctx;
+
+err_put_ctx:
+ pvr_vm_context_put(vm_ctx);
+
+ return ERR_PTR(err);
+}
+
+/**
+ * pvr_vm_context_release() - Teardown a VM context.
+ * @ref_count: Pointer to reference counter of the VM context.
+ *
+ * This function ensures that no mappings are left dangling by unmapping them
+ * all in order of ascending device-virtual address.
+ */
+static void
+pvr_vm_context_release(struct kref *ref_count)
+{
+ struct pvr_vm_context *vm_ctx =
+ container_of(ref_count, struct pvr_vm_context, ref_count);
+
+ /* TODO: Destroy FW mem context */
+ WARN_ON(vm_ctx->fw_mem_ctx_obj);
+
+ WARN_ON(pvr_vm_unmap(vm_ctx, vm_ctx->gpuva_mgr.mm_start,
+ vm_ctx->gpuva_mgr.mm_range));
+
+ drm_gpuva_manager_destroy(&vm_ctx->gpuva_mgr);
+ pvr_mmu_context_destroy(vm_ctx->mmu_ctx);
+ mutex_destroy(&vm_ctx->lock);
+
+ kfree(vm_ctx);
+}
+
+/**
+ * pvr_vm_context_lookup() - Look up VM context from handle
+ * @pvr_file: Pointer to pvr_file structure.
+ * @handle: Object handle.
+ *
+ * Takes reference on VM context object. Call pvr_vm_context_put() to release.
+ *
+ * Returns:
+ * * The requested object on success, or
+ * * %NULL on failure (object does not exist in list, or is not a VM context)
+ */
+struct pvr_vm_context *
+pvr_vm_context_lookup(struct pvr_file *pvr_file, u32 handle)
+{
+ struct pvr_vm_context *vm_ctx;
+
+ xa_lock(&pvr_file->vm_ctx_handles);
+ vm_ctx = xa_load(&pvr_file->vm_ctx_handles, handle);
+ if (vm_ctx)
+ kref_get(&vm_ctx->ref_count);
+
+ xa_unlock(&pvr_file->vm_ctx_handles);
+
+ return vm_ctx;
+}
+
+/**
+ * pvr_vm_context_put() - Release a reference on a VM context
+ * @vm_ctx: Target VM context.
+ *
+ * Returns:
+ * * %true if the VM context was destroyed, or
+ * * %false if there are any references still remaining.
+ */
+bool
+pvr_vm_context_put(struct pvr_vm_context *vm_ctx)
+{
+ WARN_ON(!vm_ctx);
+
+ if (vm_ctx)
+ return kref_put(&vm_ctx->ref_count, pvr_vm_context_release);
+
+ return true;
+}
+
+/**
+ * pvr_destroy_vm_contexts_for_file: Destroy any VM contexts associated with the
+ * given file.
+ * @pvr_file: Pointer to pvr_file structure.
+ *
+ * Removes all vm_contexts associated with @pvr_file from the device VM context
+ * list and drops initial references. vm_contexts will then be destroyed once
+ * all outstanding references are dropped.
+ */
+void pvr_destroy_vm_contexts_for_file(struct pvr_file *pvr_file)
+{
+ struct pvr_vm_context *vm_ctx;
+ unsigned long handle;
+
+ xa_for_each(&pvr_file->vm_ctx_handles, handle, vm_ctx) {
+ /* vm_ctx is not used here because that would create a race with xa_erase */
+ pvr_vm_context_put(xa_erase(&pvr_file->vm_ctx_handles, handle));
+ }
+}
+
+/**
+ * pvr_vm_map() - Map a section of physical memory into a section of device-virtual memory.
+ * @vm_ctx: Target VM context.
+ * @pvr_obj: Target PowerVR memory object.
+ * @pvr_obj_offset: Offset into @pvr_obj to map from.
+ * @device_addr: Virtual device address at the start of the requested mapping.
+ * @size: Size of the requested mapping.
+ *
+ * No handle is returned to represent the mapping. Instead, callers should
+ * remember @device_addr and use that as a handle.
+ *
+ * Return:
+ * * 0 on success,
+ * * -%EINVAL if @device_addr is not a valid page-aligned device-virtual
+ * address; the region specified by @pvr_obj_offset and @size does not fall
+ * entirely within @pvr_obj, or any part of the specified region of @pvr_obj
+ * is not device-virtual page-aligned,
+ * * Any error encountered while performing internal operations required to
+ * destroy the mapping (returned from pvr_vm_gpuva_map or
+ * pvr_vm_gpuva_remap).
+ */
+int
+pvr_vm_map(struct pvr_vm_context *vm_ctx,
+ struct pvr_gem_object *pvr_obj, u64 pvr_obj_offset,
+ u64 device_addr, u64 size)
+{
+ const size_t pvr_obj_size = pvr_gem_object_size(pvr_obj);
+ struct pvr_vm_gpuva_op_ctx op_ctx = { .vm_ctx = vm_ctx };
+ struct sg_table *sgt;
+ int err;
+
+ if (!pvr_device_addr_and_size_are_valid(device_addr, size) ||
+ pvr_obj_offset & ~PAGE_MASK || size & ~PAGE_MASK ||
+ pvr_obj_offset + size > pvr_obj_size ||
+ pvr_obj_offset > pvr_obj_size) {
+ return -EINVAL;
+ }
+
+ op_ctx.new_va = kzalloc(sizeof(*op_ctx.new_va), GFP_KERNEL);
+ op_ctx.prev_va = kzalloc(sizeof(*op_ctx.prev_va), GFP_KERNEL);
+ op_ctx.next_va = kzalloc(sizeof(*op_ctx.next_va), GFP_KERNEL);
+ if (!op_ctx.new_va || !op_ctx.prev_va || !op_ctx.next_va) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ sgt = pvr_gem_object_get_pages_sgt(pvr_obj);
+ err = PTR_ERR_OR_ZERO(sgt);
+ if (err)
+ goto out_free;
+
+ op_ctx.mmu_op_ctx = pvr_mmu_op_context_create(vm_ctx->mmu_ctx, sgt,
+ pvr_obj_offset, size);
+ err = PTR_ERR_OR_ZERO(op_ctx.mmu_op_ctx);
+ if (err) {
+ op_ctx.mmu_op_ctx = NULL;
+ goto out_mmu_op_ctx_destroy;
+ }
+
+ mutex_lock(&vm_ctx->lock);
+ err = drm_gpuva_sm_map(&vm_ctx->gpuva_mgr, &op_ctx, device_addr, size,
+ gem_from_pvr_gem(pvr_obj), pvr_obj_offset);
+ mutex_unlock(&vm_ctx->lock);
+
+out_mmu_op_ctx_destroy:
+ pvr_mmu_op_context_destroy(op_ctx.mmu_op_ctx);
+
+out_free:
+ kfree(op_ctx.next_va);
+ kfree(op_ctx.prev_va);
+ kfree(op_ctx.new_va);
+
+ return err;
+}
+
+/**
+ * pvr_vm_unmap() - Unmap an already mapped section of device-virtual memory.
+ * @vm_ctx: Target VM context.
+ * @device_addr: Virtual device address at the start of the target mapping.
+ * @size: Size of the target mapping.
+ *
+ * Return:
+ * * 0 on success,
+ * * -%EINVAL if @device_addr is not a valid page-aligned device-virtual
+ * address,
+ * * Any error encountered while performing internal operations required to
+ * destroy the mapping (returned from pvr_vm_gpuva_unmap or
+ * pvr_vm_gpuva_remap).
+ */
+int
+pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size)
+{
+ struct pvr_vm_gpuva_op_ctx op_ctx = { .vm_ctx = vm_ctx };
+ int err;
+
+ if (!pvr_device_addr_and_size_are_valid(device_addr, size))
+ return -EINVAL;
+
+ op_ctx.prev_va = kzalloc(sizeof(*op_ctx.prev_va), GFP_KERNEL);
+ op_ctx.next_va = kzalloc(sizeof(*op_ctx.next_va), GFP_KERNEL);
+ if (!op_ctx.prev_va || !op_ctx.next_va) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ op_ctx.mmu_op_ctx =
+ pvr_mmu_op_context_create(vm_ctx->mmu_ctx, NULL, 0, 0);
+ err = PTR_ERR_OR_ZERO(op_ctx.mmu_op_ctx);
+ if (err) {
+ op_ctx.mmu_op_ctx = NULL;
+ goto out;
+ }
+
+ mutex_lock(&vm_ctx->lock);
+ err = drm_gpuva_sm_unmap(&vm_ctx->gpuva_mgr, &op_ctx, device_addr, size);
+ mutex_unlock(&vm_ctx->lock);
+
+out:
+ pvr_mmu_op_context_destroy(op_ctx.mmu_op_ctx);
+ kfree(op_ctx.next_va);
+ kfree(op_ctx.prev_va);
+
+ return err;
+}
+
+/*
+ * Static data areas are determined by firmware.
+ *
+ * When adding a new static data area you will also need to update the reserved_size field for the
+ * heap in pvr_heaps[].
+ */
+static const struct drm_pvr_static_data_area static_data_areas[] = {
+ {
+ .area_usage = DRM_PVR_STATIC_DATA_AREA_FENCE,
+ .location_heap_id = DRM_PVR_HEAP_GENERAL,
+ .offset = 0,
+ .size = 128,
+ },
+ {
+ .area_usage = DRM_PVR_STATIC_DATA_AREA_YUV_CSC,
+ .location_heap_id = DRM_PVR_HEAP_GENERAL,
+ .offset = 128,
+ .size = 1024,
+ },
+ {
+ .area_usage = DRM_PVR_STATIC_DATA_AREA_VDM_SYNC,
+ .location_heap_id = DRM_PVR_HEAP_PDS_CODE_DATA,
+ .offset = 0,
+ .size = 128,
+ },
+ {
+ .area_usage = DRM_PVR_STATIC_DATA_AREA_EOT,
+ .location_heap_id = DRM_PVR_HEAP_PDS_CODE_DATA,
+ .offset = 128,
+ .size = 128,
+ },
+ {
+ .area_usage = DRM_PVR_STATIC_DATA_AREA_VDM_SYNC,
+ .location_heap_id = DRM_PVR_HEAP_USC_CODE,
+ .offset = 0,
+ .size = 128,
+ },
+};
+
+#define GET_RESERVED_SIZE(last_offset, last_size) round_up((last_offset) + (last_size), PAGE_SIZE)
+
+/*
+ * The values given to GET_RESERVED_SIZE() are taken from the last entry in the corresponding
+ * static data area for each heap.
+ */
+static const struct drm_pvr_heap pvr_heaps[] = {
+ [DRM_PVR_HEAP_GENERAL] = {
+ .base = ROGUE_GENERAL_HEAP_BASE,
+ .size = ROGUE_GENERAL_HEAP_SIZE,
+ .flags = 0,
+ .page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
+ },
+ [DRM_PVR_HEAP_PDS_CODE_DATA] = {
+ .base = ROGUE_PDSCODEDATA_HEAP_BASE,
+ .size = ROGUE_PDSCODEDATA_HEAP_SIZE,
+ .flags = 0,
+ .page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
+ },
+ [DRM_PVR_HEAP_USC_CODE] = {
+ .base = ROGUE_USCCODE_HEAP_BASE,
+ .size = ROGUE_USCCODE_HEAP_SIZE,
+ .flags = 0,
+ .page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
+ },
+ [DRM_PVR_HEAP_RGNHDR] = {
+ .base = ROGUE_RGNHDR_HEAP_BASE,
+ .size = ROGUE_RGNHDR_HEAP_SIZE,
+ .flags = 0,
+ .page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
+ },
+ [DRM_PVR_HEAP_VIS_TEST] = {
+ .base = ROGUE_VISTEST_HEAP_BASE,
+ .size = ROGUE_VISTEST_HEAP_SIZE,
+ .flags = 0,
+ .page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
+ },
+ [DRM_PVR_HEAP_TRANSFER_FRAG] = {
+ .base = ROGUE_TRANSFER_FRAG_HEAP_BASE,
+ .size = ROGUE_TRANSFER_FRAG_HEAP_SIZE,
+ .flags = 0,
+ .page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
+ },
+};
+
+int
+pvr_static_data_areas_get(const struct pvr_device *pvr_dev,
+ struct drm_pvr_ioctl_dev_query_args *args)
+{
+ struct drm_pvr_dev_query_static_data_areas query = {0};
+ int err;
+
+ if (!args->pointer) {
+ args->size = sizeof(struct drm_pvr_dev_query_static_data_areas);
+ return 0;
+ }
+
+ err = PVR_UOBJ_GET(query, args->size, args->pointer);
+ if (err < 0)
+ return err;
+
+ if (!query.static_data_areas.array) {
+ query.static_data_areas.count = ARRAY_SIZE(static_data_areas);
+ query.static_data_areas.stride = sizeof(struct drm_pvr_static_data_area);
+ goto copy_out;
+ }
+
+ if (query.static_data_areas.count > ARRAY_SIZE(static_data_areas))
+ query.static_data_areas.count = ARRAY_SIZE(static_data_areas);
+
+ err = PVR_UOBJ_SET_ARRAY(&query.static_data_areas, static_data_areas);
+ if (err < 0)
+ return err;
+
+copy_out:
+ err = PVR_UOBJ_SET(args->pointer, args->size, query);
+ if (err < 0)
+ return err;
+
+ args->size = sizeof(query);
+ return 0;
+}
+
+int
+pvr_heap_info_get(const struct pvr_device *pvr_dev,
+ struct drm_pvr_ioctl_dev_query_args *args)
+{
+ struct drm_pvr_dev_query_heap_info query = {0};
+ u64 dest;
+ int err;
+
+ if (!args->pointer) {
+ args->size = sizeof(struct drm_pvr_dev_query_heap_info);
+ return 0;
+ }
+
+ err = PVR_UOBJ_GET(query, args->size, args->pointer);
+ if (err < 0)
+ return err;
+
+ if (!query.heaps.array) {
+ query.heaps.count = ARRAY_SIZE(pvr_heaps);
+ query.heaps.stride = sizeof(struct drm_pvr_heap);
+ goto copy_out;
+ }
+
+ if (query.heaps.count > ARRAY_SIZE(pvr_heaps))
+ query.heaps.count = ARRAY_SIZE(pvr_heaps);
+
+ /* Region header heap is only present if BRN63142 is present. */
+ dest = query.heaps.array;
+ for (size_t i = 0; i < query.heaps.count; i++) {
+ struct drm_pvr_heap heap = pvr_heaps[i];
+
+ if (i == DRM_PVR_HEAP_RGNHDR && !PVR_HAS_QUIRK(pvr_dev, 63142))
+ heap.size = 0;
+
+ err = PVR_UOBJ_SET(dest, query.heaps.stride, heap);
+ if (err < 0)
+ return err;
+
+ dest += query.heaps.stride;
+ }
+
+copy_out:
+ err = PVR_UOBJ_SET(args->pointer, args->size, query);
+ if (err < 0)
+ return err;
+
+ args->size = sizeof(query);
+ return 0;
+}
+
+/**
+ * pvr_heap_contains_range() - Determine if a given heap contains the specified
+ * device-virtual address range.
+ * @pvr_heap: Target heap.
+ * @start: Inclusive start of the target range.
+ * @end: Inclusive end of the target range.
+ *
+ * It is an error to call this function with values of @start and @end that do
+ * not satisfy the condition @start <= @end.
+ */
+static __always_inline bool
+pvr_heap_contains_range(const struct drm_pvr_heap *pvr_heap, u64 start, u64 end)
+{
+ return pvr_heap->base <= start && end < pvr_heap->base + pvr_heap->size;
+}
+
+/**
+ * pvr_find_heap_containing() - Find a heap which contains the specified
+ * device-virtual address range.
+ * @pvr_dev: Target PowerVR device.
+ * @start: Start of the target range.
+ * @size: Size of the target range.
+ *
+ * Return:
+ * * A pointer to a constant instance of struct drm_pvr_heap representing the
+ * heap containing the entire range specified by @start and @size on
+ * success, or
+ * * %NULL if no such heap exists.
+ */
+const struct drm_pvr_heap *
+pvr_find_heap_containing(struct pvr_device *pvr_dev, u64 start, u64 size)
+{
+ u64 end;
+
+ if (check_add_overflow(start, size - 1, &end))
+ return NULL;
+
+ /*
+ * There are no guarantees about the order of address ranges in
+ * &pvr_heaps, so iterate over the entire array for a heap whose
+ * range completely encompasses the given range.
+ */
+ for (u32 heap_id = 0; heap_id < ARRAY_SIZE(pvr_heaps); heap_id++) {
+ /* Filter heaps that present only with an associated quirk */
+ if (heap_id == DRM_PVR_HEAP_RGNHDR &&
+ !PVR_HAS_QUIRK(pvr_dev, 63142)) {
+ continue;
+ }
+
+ if (pvr_heap_contains_range(&pvr_heaps[heap_id], start, end))
+ return &pvr_heaps[heap_id];
+ }
+
+ return NULL;
+}
+
+/**
+ * pvr_vm_find_gem_object() - Look up a buffer object from a given
+ * device-virtual address.
+ * @vm_ctx: [IN] Target VM context.
+ * @device_addr: [IN] Virtual device address at the start of the required
+ * object.
+ * @mapped_offset_out: [OUT] Pointer to location to write offset of the start
+ * of the mapped region within the buffer object. May be
+ * %NULL if this information is not required.
+ * @mapped_size_out: [OUT] Pointer to location to write size of the mapped
+ * region. May be %NULL if this information is not required.
+ *
+ * If successful, a reference will be taken on the buffer object. The caller
+ * must drop the reference with pvr_gem_object_put().
+ *
+ * Return:
+ * * The PowerVR buffer object mapped at @device_addr if one exists, or
+ * * %NULL otherwise.
+ */
+struct pvr_gem_object *
+pvr_vm_find_gem_object(struct pvr_vm_context *vm_ctx, u64 device_addr,
+ u64 *mapped_offset_out, u64 *mapped_size_out)
+{
+ struct pvr_gem_object *pvr_obj;
+ struct drm_gpuva *va;
+
+ mutex_lock(&vm_ctx->lock);
+
+ va = drm_gpuva_find_first(&vm_ctx->gpuva_mgr, device_addr, 1);
+ if (!va)
+ goto err_unlock;
+
+ pvr_obj = gem_to_pvr_gem(va->gem.obj);
+ pvr_gem_object_get(pvr_obj);
+
+ if (mapped_offset_out)
+ *mapped_offset_out = va->gem.offset;
+ if (mapped_size_out)
+ *mapped_size_out = va->va.range;
+
+ mutex_unlock(&vm_ctx->lock);
+
+ return pvr_obj;
+
+err_unlock:
+ mutex_unlock(&vm_ctx->lock);
+
+ return NULL;
+}
+
+/**
+ * pvr_vm_get_fw_mem_context: Get object representing firmware memory context
+ * @vm_ctx: Target VM context.
+ *
+ * Returns:
+ * * FW object representing firmware memory context, or
+ * * %NULL if this VM context does not have a firmware memory context.
+ */
+struct pvr_fw_object *
+pvr_vm_get_fw_mem_context(struct pvr_vm_context *vm_ctx)
+{
+ return vm_ctx->fw_mem_ctx_obj;
+}
diff --git a/drivers/gpu/drm/imagination/pvr_vm.h b/drivers/gpu/drm/imagination/pvr_vm.h
new file mode 100644
index 000000000000..b98bc3981807
--- /dev/null
+++ b/drivers/gpu/drm/imagination/pvr_vm.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_VM_H
+#define PVR_VM_H
+
+#include "pvr_rogue_mmu_defs.h"
+
+#include <uapi/drm/pvr_drm.h>
+
+#include <linux/types.h>
+
+/* Forward declaration from "pvr_device.h" */
+struct pvr_device;
+struct pvr_file;
+
+/* Forward declaration from "pvr_gem.h" */
+struct pvr_gem_object;
+
+/* Forward declaration from "pvr_vm.c" */
+struct pvr_vm_context;
+
+/* Forward declaration from <uapi/drm/pvr_drm.h> */
+struct drm_pvr_ioctl_get_heap_info_args;
+
+/* Functions defined in pvr_vm.c */
+
+bool pvr_device_addr_is_valid(u64 device_addr);
+bool pvr_device_addr_and_size_are_valid(u64 device_addr, u64 size);
+
+struct pvr_vm_context *pvr_vm_create_context(struct pvr_device *pvr_dev,
+ bool is_userspace_context);
+
+int pvr_vm_map(struct pvr_vm_context *vm_ctx,
+ struct pvr_gem_object *pvr_obj, u64 pvr_obj_offset,
+ u64 device_addr, u64 size);
+int pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size);
+
+dma_addr_t pvr_vm_get_page_table_root_addr(struct pvr_vm_context *vm_ctx);
+
+int pvr_static_data_areas_get(const struct pvr_device *pvr_dev,
+ struct drm_pvr_ioctl_dev_query_args *args);
+int pvr_heap_info_get(const struct pvr_device *pvr_dev,
+ struct drm_pvr_ioctl_dev_query_args *args);
+const struct drm_pvr_heap *pvr_find_heap_containing(struct pvr_device *pvr_dev,
+ u64 addr, u64 size);
+
+struct pvr_gem_object *pvr_vm_find_gem_object(struct pvr_vm_context *vm_ctx,
+ u64 device_addr,
+ u64 *mapped_offset_out,
+ u64 *mapped_size_out);
+
+struct pvr_fw_object *
+pvr_vm_get_fw_mem_context(struct pvr_vm_context *vm_ctx);
+
+struct pvr_vm_context *pvr_vm_context_lookup(struct pvr_file *pvr_file, u32 handle);
+bool pvr_vm_context_put(struct pvr_vm_context *vm_ctx);
+void pvr_destroy_vm_contexts_for_file(struct pvr_file *pvr_file);
+
+#endif /* PVR_VM_H */
--
2.41.0