On 29/02/16 12:36, Tvrtko Ursulin wrote:
On 29/02/16 11:13, Dave Gordon wrote:
This is essentially Chris Wilson's patch of a similar name, reworked on
top of Alex Dai's recent patch:
drm/i915: Add i915_gem_object_vmap to map GEM object to virtual space
Chris' original commentary said:
We now have two implementations for vmapping a whole object, one for
dma-buf and one for the ringbuffer. If we couple the vmapping into
the obj->pages lifetime, then we can reuse an obj->vmapping for both
and at the same time couple it into the shrinker.
v2: Mark the failable kmalloc() as __GFP_NOWARN (vsyrjala)
v3: Call unpin_vmap from the right dmabuf unmapper
v4: reimplements the same functionality, but now as wrappers round the
recently-introduced i915_gem_object_vmap_range() from Alex's patch
mentioned above.
v5: separated from two minor but unrelated changes [Tvrtko Ursulin];
this is the third and most substantial portion.
Decided not to hold onto vmappings after the pin count goes to zero.
This may reduce the benefit of Chris' scheme a bit, but does avoid
any increased risk of exhausting kernel vmap space on 32-bit kernels
[Tvrtko Ursulin]. Potentially, the vunmap() could be deferred until
the put_pages() stage if a suitable notifier were written, but we're
not doing that here. Nonetheless, the simplification of both dmabuf
and ringbuffer code makes it worthwhile in its own right.
With this change, we have only one vmap() in the whole driver :)
The above line does not apply to this patch after the split any more. :)
Yes, actually that comment now applies to patch [3/7] which unified the
various vmap callers into i915_gem_object_vmap_range() and then this
patch wraps that and bundles it with get_pages() and pin_pages() ...
there will still be one more respin, so I'll tweak the message a bit more :)
Signed-off-by: Dave Gordon <david.s.gordon@xxxxxxxxx>
Cc: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx>
Cc: Alex Dai <yu.dai@xxxxxxxxx>
Cc: Tvrtko Ursulin <tvrtko.ursulin@xxxxxxxxx>
---
drivers/gpu/drm/i915/i915_drv.h | 22 +++++++++++++++-----
drivers/gpu/drm/i915/i915_gem.c | 36
++++++++++++++++++++++++++++++++
drivers/gpu/drm/i915/i915_gem_dmabuf.c | 37
++++-----------------------------
drivers/gpu/drm/i915/intel_ringbuffer.c | 10 ++++-----
4 files changed, 62 insertions(+), 43 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_drv.h
b/drivers/gpu/drm/i915/i915_drv.h
index 918b0bb..7facdb5 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2172,10 +2172,7 @@ struct drm_i915_gem_object {
struct scatterlist *sg;
int last;
} get_page;
-
- /* prime dma-buf support */
- void *dma_buf_vmapping;
- int vmapping_count;
+ void *vmapping;
/** Breadcrumb of last rendering to the buffer.
* There can only be one writer, but we allow for multiple readers.
@@ -2980,7 +2977,22 @@ static inline void
i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
static inline void i915_gem_object_unpin_pages(struct
drm_i915_gem_object *obj)
{
BUG_ON(obj->pages_pin_count == 0);
- obj->pages_pin_count--;
+ if (--obj->pages_pin_count == 0 && obj->vmapping) {
+ /*
+ * Releasing the vmapping here may yield less benefit than
+ * if we kept it until put_pages(), but on the other hand
+ * avoids issues of exhausting kernel vmappable address
+ * space on 32-bit kernels.
+ */
+ vunmap(obj->vmapping);
+ obj->vmapping = NULL;
+ }
+}
+
+void *__must_check i915_gem_object_pin_vmap(struct
drm_i915_gem_object *obj);
+static inline void i915_gem_object_unpin_vmap(struct
drm_i915_gem_object *obj)
+{
+ i915_gem_object_unpin_pages(obj);
}
void *__must_check i915_gem_object_vmap_range(struct
drm_i915_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/i915_gem.c
b/drivers/gpu/drm/i915/i915_gem.c
index c126211..79d3d5b 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2235,6 +2235,8 @@ static void
i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
ops->put_pages(obj);
obj->pages = NULL;
+ BUG_ON(obj->vmapping);
+
Do we know for certain kernel will crash and not just maybe leak a
vmapping? I think not so would be better to just WARN.
Maybe even WARN_ON and return -EBUSY higher up, as the function already
does for obj->pages_pin_count?
This BUG_ON marks the point where Chris originally had the vunmap(),
before we realised it would need a new notifier and decided to drop the
mapping in i915_gem_object_unpin_pages() instead. It was really just
there for me to check that you couldn't reach put_pages() without having
gone through that function (e.g. playing with pages_pin_count).
Hmm ... actually there *is* a path where pages_pin_count gets set to
zero other than by i915_gem_object_unpin_pages() :(
If you try to free a pinned object, you get a WARNING, but then
i915_gem_free_object() will force pages_pin_count to 0 anyway, and then
call i915_gem_object_put_pages(), which would definitely trigger the
BUG_ON().
So I think I'll make it a WARN_ON() and then release the mapping anyway.
Then if anyone wants to extend the vmap lifetime back to as Chris had
originally it, it's just a matter of removing the WARNing here and the
early-unmap code in i915_gem_object_unpin_pages().
.Dave.
i915_gem_object_invalidate(obj);
return 0;
@@ -2452,6 +2454,40 @@ void *i915_gem_object_vmap_range(struct
drm_i915_gem_object *obj,
return addr;
}
+/**
+ * i915_gem_object_pin_vmap - pin a GEM object and map it into kernel
space
+ * @obj: the GEM object to be mapped
+ *
+ * Combines the functions of get_pages(), pin_pages() and
vmap_range() on
+ * the whole object. The caller should release the mapping by calling
+ * i915_gem_object_unpin_vmap() when it is no longer required.
+ *
+ * Returns the address at which the object has been mapped, or an
ERR_PTR
+ * on failure.
+ */
+void *i915_gem_object_pin_vmap(struct drm_i915_gem_object *obj)
+{
+ int ret;
+
+ ret = i915_gem_object_get_pages(obj);
+ if (ret)
+ return ERR_PTR(ret);
+
+ i915_gem_object_pin_pages(obj);
+
+ if (obj->vmapping == NULL) {
+ obj->vmapping = i915_gem_object_vmap_range(obj, 0,
+ obj->base.size >> PAGE_SHIFT);
+
+ if (obj->vmapping == NULL) {
+ i915_gem_object_unpin_pages(obj);
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ return obj->vmapping;
+}
+
void i915_vma_move_to_active(struct i915_vma *vma,
struct drm_i915_gem_request *req)
{
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 68e21d1..adc7b5e 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -108,41 +108,17 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf
*dma_buf)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
struct drm_device *dev = obj->base.dev;
+ void *addr;
int ret;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ERR_PTR(ret);
- if (obj->dma_buf_vmapping) {
- obj->vmapping_count++;
- goto out_unlock;
- }
-
- ret = i915_gem_object_get_pages(obj);
- if (ret)
- goto err;
-
- i915_gem_object_pin_pages(obj);
-
- ret = -ENOMEM;
-
- obj->dma_buf_vmapping = i915_gem_object_vmap_range(obj, 0,
- dma_buf->size >> PAGE_SHIFT);
-
- if (!obj->dma_buf_vmapping)
- goto err_unpin;
-
- obj->vmapping_count = 1;
-out_unlock:
+ addr = i915_gem_object_pin_vmap(obj);
mutex_unlock(&dev->struct_mutex);
- return obj->dma_buf_vmapping;
-err_unpin:
- i915_gem_object_unpin_pages(obj);
-err:
- mutex_unlock(&dev->struct_mutex);
- return ERR_PTR(ret);
+ return addr;
}
static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void
*vaddr)
@@ -151,12 +127,7 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf
*dma_buf, void *vaddr)
struct drm_device *dev = obj->base.dev;
mutex_lock(&dev->struct_mutex);
- if (--obj->vmapping_count == 0) {
- vunmap(obj->dma_buf_vmapping);
- obj->dma_buf_vmapping = NULL;
-
- i915_gem_object_unpin_pages(obj);
- }
+ i915_gem_object_unpin_vmap(obj);
mutex_unlock(&dev->struct_mutex);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c
b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 15e2d29..47f186e 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -2056,7 +2056,7 @@ static int init_phys_status_page(struct
intel_engine_cs *ring)
void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
{
if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen)
- vunmap(ringbuf->virtual_start);
+ i915_gem_object_unpin_vmap(ringbuf->obj);
else
iounmap(ringbuf->virtual_start);
ringbuf->virtual_start = NULL;
@@ -2080,10 +2080,10 @@ int intel_pin_and_map_ringbuffer_obj(struct
drm_device *dev,
if (ret)
goto unpin;
- ringbuf->virtual_start = i915_gem_object_vmap_range(obj, 0,
- ringbuf->size >> PAGE_SHIFT);
- if (ringbuf->virtual_start == NULL) {
- ret = -ENOMEM;
+ ringbuf->virtual_start = i915_gem_object_pin_vmap(obj);
+ if (IS_ERR(ringbuf->virtual_start)) {
+ ret = PTR_ERR(ringbuf->virtual_start);
+ ringbuf->virtual_start = NULL;
goto unpin;
}
} else {
Looks OK to me. Even the BUG_ON is not critical since it can't happen
when using the API although I don't like to see them. Either way:
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@xxxxxxxxx>
Regards,
Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/intel-gfx