Assume that pages may be pinned in a background task and use a completion event to synchronise with callers that must access the pages immediately. Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> --- drivers/gpu/drm/i915/gem/i915_gem_object.c | 1 + drivers/gpu/drm/i915/gem/i915_gem_object.h | 7 +-- .../gpu/drm/i915/gem/i915_gem_object_types.h | 3 ++ drivers/gpu/drm/i915/gem/i915_gem_pages.c | 53 +++++++++++++++---- 4 files changed, 52 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index 2702e060102e..2c5a02274170 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -79,6 +79,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, obj->mm.madv = I915_MADV_WILLNEED; INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN); mutex_init(&obj->mm.get_page.lock); + init_completion(&obj->mm.completion); } /** diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index 7cb1871d7128..194e4fb6a259 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -240,7 +240,7 @@ int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj); int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj); static inline int __must_check -i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) +i915_gem_object_pin_pages_async(struct drm_i915_gem_object *obj) { might_lock(&obj->mm.lock); @@ -250,6 +250,9 @@ i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) return __i915_gem_object_get_pages(obj); } +int __must_check +i915_gem_object_pin_pages(struct drm_i915_gem_object *obj); + static inline bool i915_gem_object_has_pages(struct drm_i915_gem_object *obj) { @@ -273,9 +276,7 @@ i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj) static inline void __i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) { - GEM_BUG_ON(!i915_gem_object_has_pages(obj)); GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); - atomic_dec(&obj->mm.pages_pin_count); } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h index 41d2e7c8e332..615a59b927d6 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h @@ -7,6 +7,7 @@ #ifndef __I915_GEM_OBJECT_TYPES_H__ #define __I915_GEM_OBJECT_TYPES_H__ +#include <linux/completion.h> #include <linux/reservation.h> #include <drm/drm_gem.h> @@ -211,6 +212,8 @@ struct drm_i915_gem_object { */ struct list_head link; + struct completion completion; + /** * Advice: are the backing pages purgeable? */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index 7868dd48d931..68262231f56f 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -72,21 +72,18 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, spin_unlock(&i915->mm.obj_lock); } + + complete_all(&obj->mm.completion); } int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj) { - int err; - if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) { DRM_DEBUG("Attempting to obtain a purgeable object\n"); return -EFAULT; } - err = obj->ops->get_pages(obj); - GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj)); - - return err; + return obj->ops->get_pages(obj); } /* Ensure that the associated pages are gathered from the backing storage @@ -104,7 +101,7 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj) if (err) return err; - if (unlikely(!i915_gem_object_has_pages(obj))) { + if (!obj->mm.pages) { GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj)); err = ____i915_gem_object_get_pages(obj); @@ -120,6 +117,32 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj) return err; } +int i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) +{ + int err; + + err = i915_gem_object_pin_pages_async(obj); + if (err) + return err; + + err = wait_for_completion_interruptible(&obj->mm.completion); + if (err) + goto err_unpin; + + if (IS_ERR(obj->mm.pages)) { + err = PTR_ERR(obj->mm.pages); + goto err_unpin; + } + + GEM_BUG_ON(!i915_gem_object_has_pages(obj)); + return 0; + +err_unpin: + GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); + atomic_dec(&obj->mm.pages_pin_count); + return err; +} + /* Immediately discard the backing storage */ void i915_gem_object_truncate(struct drm_i915_gem_object *obj) { @@ -196,6 +219,9 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, GEM_BUG_ON(atomic_read(&obj->bind_count)); + if (obj->mm.pages == ERR_PTR(-EAGAIN)) + wait_for_completion(&obj->mm.completion); + /* May be called by shrinker from within get_pages() (on another bo) */ mutex_lock_nested(&obj->mm.lock, subclass); if (unlikely(atomic_read(&obj->mm.pages_pin_count))) { @@ -222,6 +248,7 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, if (!IS_ERR(pages)) obj->ops->put_pages(obj, pages); + reinit_completion(&obj->mm.completion); err = 0; unlock: mutex_unlock(&obj->mm.lock); @@ -299,7 +326,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, type &= ~I915_MAP_OVERRIDE; if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) { - if (unlikely(!i915_gem_object_has_pages(obj))) { + if (!obj->mm.pages) { GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj)); err = ____i915_gem_object_get_pages(obj); @@ -311,7 +338,6 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, atomic_inc(&obj->mm.pages_pin_count); pinned = false; } - GEM_BUG_ON(!i915_gem_object_has_pages(obj)); ptr = page_unpack_bits(obj->mm.mapping, &has_type); if (ptr && has_type != type) { @@ -329,6 +355,15 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, } if (!ptr) { + err = wait_for_completion_interruptible(&obj->mm.completion); + if (err) + goto err_unpin; + + if (IS_ERR(obj->mm.pages)) { + err = PTR_ERR(obj->mm.pages); + goto err_unpin; + } + ptr = i915_gem_object_map(obj, type); if (!ptr) { err = -ENOMEM; -- 2.20.1 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx