The VMA offset manager uses a device-global address-space, hence, any user can currently map any offset-node they want. They only need to guess the right offset. If we wanted per open-file offset spaces, we'd either need VM_NONLINEAR mappings or multiple "struct address_space" trees. As both doesn't really scale, we implement access management in the VMA manager itself. We use an rb-tree to store open-files for each VMA node. On each mmap call, GEM, TTM or the drivers must check whether the current user is allowed to map this file. We add a separate lock for each node as there is no generic lock available for the caller to protect the node easily. Signed-off-by: David Herrmann <dh.herrmann@xxxxxxxxx> --- drivers/gpu/drm/drm_vma_manager.c | 148 ++++++++++++++++++++++++++++++++++++++ include/drm/drm_vma_manager.h | 16 +++++ 2 files changed, 164 insertions(+) diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c index d07247e..781f7da 100644 --- a/drivers/gpu/drm/drm_vma_manager.c +++ b/drivers/gpu/drm/drm_vma_manager.c @@ -25,6 +25,7 @@ #include <drm/drmP.h> #include <drm/drm_mm.h> #include <drm/drm_vma_manager.h> +#include <linux/fs.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/rbtree.h> @@ -58,6 +59,13 @@ * must always be page-aligned (as usual). * If you want to get a valid byte-based user-space address for a given offset, * please see drm_vma_node_offset_addr(). + * + * Additionally to offset management, the vma offset manager also handles access + * management. For every open-file context that is allowed to access a given + * node, you must call drm_vma_node_allow(). Otherwise, an mmap() call on this + * open-file with the offset of the node will fail with -EACCES. To revoke + * access again, use drm_vma_node_revoke(). However, the caller is responsible + * for destroying already existing mappings, if required. */ /** @@ -258,3 +266,143 @@ void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr, write_unlock(&mgr->vm_lock); } EXPORT_SYMBOL(drm_vma_offset_remove); + +/** + * drm_vma_node_allow - Add open-file to list of allowed users + * @node: Node to modify + * @filp: Open file to add + * + * Add @filp to the list of allowed open-files for this node. If @filp is + * already on this list, the ref-count is incremented. + * + * The list of allowed-users is preserved across drm_vma_offset_add() and + * drm_vma_offset_remove() calls. You may even call it if the node is currently + * not added to any offset-manager. + * + * You must remove all open-files the same number of times as you added them + * before destroying the node. Otherwise, you will leak memory. + * + * This is locked against concurrent access internally. + * + * RETURNS: + * 0 on success, negative error code on internal failure (out-of-mem) + */ +int drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *filp) +{ + struct rb_node **iter; + struct rb_node *parent = NULL; + struct drm_vma_offset_file *entry; + int ret = 0; + + write_lock(&node->vm_lock); + + iter = &node->vm_files.rb_node; + + while (likely(*iter)) { + parent = *iter; + entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb); + + if (filp == entry->vm_filp) { + entry->vm_count++; + goto unlock; + } else if (filp > entry->vm_filp) { + iter = &(*iter)->rb_right; + } else { + iter = &(*iter)->rb_left; + } + } + + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) { + ret = -ENOMEM; + goto unlock; + } + + entry->vm_filp = filp; + entry->vm_count = 1; + rb_link_node(&entry->vm_rb, parent, iter); + rb_insert_color(&entry->vm_rb, &node->vm_files); + +unlock: + write_unlock(&node->vm_lock); + return ret; +} +EXPORT_SYMBOL(drm_vma_node_allow); + +/** + * drm_vma_node_revoke - Remove open-file from list of allowed users + * @node: Node to modify + * @filp: Open file to remove + * + * Decrement the ref-count of @filp in the list of allowed open-files on @node. + * If the ref-count drops to zero, remove @filp from the list. You must call + * this once for every drm_vma_node_allow() on @filp. + * + * This is locked against concurrent access internally. + * + * If @filp is not on the list, nothing is done. + */ +void drm_vma_node_revoke(struct drm_vma_offset_node *node, struct file *filp) +{ + struct drm_vma_offset_file *entry; + struct rb_node *iter; + + write_lock(&node->vm_lock); + + iter = node->vm_files.rb_node; + while (likely(iter)) { + entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb); + if (filp == entry->vm_filp) { + if (!--entry->vm_count) { + rb_erase(&entry->vm_rb, &node->vm_files); + kfree(entry); + } + break; + } else if (filp > entry->vm_filp) { + iter = iter->rb_right; + } else { + iter = iter->rb_left; + } + } + + write_unlock(&node->vm_lock); +} +EXPORT_SYMBOL(drm_vma_node_revoke); + +/** + * drm_vma_node_is_allowed - Check whether an open-file is granted access + * @node: Node to check + * @filp: Open-file to check for + * + * Search the list in @node whether @filp is currently on the list of allowed + * open-files (see drm_vma_node_allow()). + * + * This is locked against concurrent access internally. + * + * RETURNS: + * true iff @filp is on the list + */ +bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node, + struct file *filp) +{ + struct drm_vma_offset_file *entry; + struct rb_node *iter; + + read_lock(&node->vm_lock); + + iter = node->vm_files.rb_node; + while (likely(iter)) { + entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb); + if (filp == entry->vm_filp) + break; + else if (filp > entry->vm_filp) + iter = iter->rb_right; + else + iter = iter->rb_left; + } + + read_unlock(&node->vm_lock); + + return iter; +} +EXPORT_SYMBOL(drm_vma_node_is_allowed); diff --git a/include/drm/drm_vma_manager.h b/include/drm/drm_vma_manager.h index 355a0e5..52bb189 100644 --- a/include/drm/drm_vma_manager.h +++ b/include/drm/drm_vma_manager.h @@ -24,16 +24,25 @@ */ #include <drm/drm_mm.h> +#include <linux/fs.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/rbtree.h> #include <linux/spinlock.h> #include <linux/types.h> +struct drm_vma_offset_file { + struct rb_node vm_rb; + struct file *vm_filp; + unsigned long vm_count; +}; + struct drm_vma_offset_node { + rwlock_t vm_lock; struct drm_mm_node vm_node; struct rb_node vm_rb; unsigned long vm_pages; + struct rb_root vm_files; }; struct drm_vma_offset_manager { @@ -54,6 +63,11 @@ int drm_vma_offset_add(struct drm_vma_offset_manager *mgr, void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr, struct drm_vma_offset_node *node); +int drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *filp); +void drm_vma_node_revoke(struct drm_vma_offset_node *node, struct file *filp); +bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node, + struct file *filp); + /** * drm_vma_offset_exact_lookup() - Look up node by exact address * @mgr: Manager object @@ -85,6 +99,8 @@ drm_vma_offset_exact_lookup(struct drm_vma_offset_manager *mgr, static inline void drm_vma_node_reset(struct drm_vma_offset_node *node) { memset(node, 0, sizeof(*node)); + node->vm_files = RB_ROOT; + rwlock_init(&node->vm_lock); } /** -- 1.8.3.2 _______________________________________________ dri-devel mailing list dri-devel@xxxxxxxxxxxxxxxxxxxxx http://lists.freedesktop.org/mailman/listinfo/dri-devel