Hi Christian,
Thanks for your reply, and sorry i didn't make it clear.
On 8/8 星期一 0:52, Christian König wrote:
Am 03.08.22 um 10:32 schrieb Jeffy Chen:
Currently we are assuming a one to one mapping between dmabuf and handle
when releasing GEM handles.
But that is not always true, since we would create extra handles for the
GEM obj in cases like gem_open() and getfb{,2}().
A similar issue was reported at:
https://lore.kernel.org/all/20211105083308.392156-1-jay.xu@xxxxxxxxxxxxxx/
Another problem is that the drm_gem_remove_prime_handles() now only
remove handle to the exported dmabuf (gem_obj->dma_buf), so the imported
ones would leak:
WARNING: CPU: 2 PID: 236 at drivers/gpu/drm/drm_prime.c:228
drm_prime_destroy_file_private+0x18/0x24
Let's fix these by using handle to find the exact map to remove.
Well we are clearly something missing here. As far as I can see the
current code is correct.
Creating multiple GEM handles for the same DMA-buf is possible, but
illegal. >
In other words when a GEM handle is exported as DMA-buf and imported
again you should intentionally always get the same handle.
These issue are not about having handles for importing an exported
dma-buf case, but for having multiple handles to a GEM object(which
means having multiple handles to a dma-buf).
I know the drm-prime is trying to make dma-buf and handle maps one to
one, but the drm-gem is allowing to create extra handles for a GEM
object, for example:
drm_gem_open_ioctl -> drm_gem_handle_create_tail
drm_mode_getfb2_ioctl -> drm_gem_handle_create
drm_mode_getfb -> fb->funcs->create_handle
So we are allowing GEM object to have multiple handles, and GEM object
could have at most one dma-buf, doesn't that means that dma-buf could
map to multiple handles?
Or should we rewrite the GEM framework to limit GEM object with uniq handle?
The other issue is that we are leaking dma-buf <-> handle map for the
imported dma-buf, since the drm_gem_remove_prime_handles doesn't take
care of obj->import_attach->dmabuf.
But of cause this can be fixed in other way:
+++ b/drivers/gpu/drm/drm_gem.c
@@ -180,6 +180,9 @@ drm_gem_remove_prime_handles(struct drm_gem_object
*obj, struct drm_file *filp)
drm_prime_remove_buf_handle_locked(&filp->prime,
obj->dma_buf);
}
+ if (obj->import_attach)
+ drm_prime_remove_buf_handle_locked(&filp->prime,
+
obj->import_attach->dmabuf);
mutex_unlock(&filp->prime.lock);
}
So this is pretty much a clear NAK to this patch since it shouldn't be
necessary or something is seriously broken somewhere else.
Regards,
Christian.
Signed-off-by: Jeffy Chen <jeffy.chen@xxxxxxxxxxxxxx>
---
Changes in v2:
Fix a typo of rbtree.
drivers/gpu/drm/drm_gem.c | 17 +----------------
drivers/gpu/drm/drm_internal.h | 4 ++--
drivers/gpu/drm/drm_prime.c | 20 ++++++++++++--------
3 files changed, 15 insertions(+), 26 deletions(-)
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index eb0c2d041f13..ed39da383570 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -168,21 +168,6 @@ void drm_gem_private_object_init(struct
drm_device *dev,
}
EXPORT_SYMBOL(drm_gem_private_object_init);
-static void
-drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct
drm_file *filp)
-{
- /*
- * Note: obj->dma_buf can't disappear as long as we still hold a
- * handle reference in obj->handle_count.
- */
- mutex_lock(&filp->prime.lock);
- if (obj->dma_buf) {
- drm_prime_remove_buf_handle_locked(&filp->prime,
- obj->dma_buf);
- }
- mutex_unlock(&filp->prime.lock);
-}
-
/**
* drm_gem_object_handle_free - release resources bound to userspace
handles
* @obj: GEM object to clean up.
@@ -253,7 +238,7 @@ drm_gem_object_release_handle(int id, void *ptr,
void *data)
if (obj->funcs->close)
obj->funcs->close(obj, file_priv);
- drm_gem_remove_prime_handles(obj, file_priv);
+ drm_prime_remove_buf_handle(&file_priv->prime, id);
drm_vma_node_revoke(&obj->vma_node, file_priv);
drm_gem_object_handle_put_unlocked(obj);
diff --git a/drivers/gpu/drm/drm_internal.h
b/drivers/gpu/drm/drm_internal.h
index 1fbbc19f1ac0..7bb98e6a446d 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -74,8 +74,8 @@ int drm_prime_fd_to_handle_ioctl(struct drm_device
*dev, void *data,
void drm_prime_init_file_private(struct drm_prime_file_private
*prime_fpriv);
void drm_prime_destroy_file_private(struct drm_prime_file_private
*prime_fpriv);
-void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private
*prime_fpriv,
- struct dma_buf *dma_buf);
+void drm_prime_remove_buf_handle(struct drm_prime_file_private
*prime_fpriv,
+ uint32_t handle);
/* drm_drv.c */
struct drm_minor *drm_minor_acquire(unsigned int minor_id);
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index e3f09f18110c..bd5366b16381 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -190,29 +190,33 @@ static int drm_prime_lookup_buf_handle(struct
drm_prime_file_private *prime_fpri
return -ENOENT;
}
-void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private
*prime_fpriv,
- struct dma_buf *dma_buf)
+void drm_prime_remove_buf_handle(struct drm_prime_file_private
*prime_fpriv,
+ uint32_t handle)
{
struct rb_node *rb;
- rb = prime_fpriv->dmabufs.rb_node;
+ mutex_lock(&prime_fpriv->lock);
+
+ rb = prime_fpriv->handles.rb_node;
while (rb) {
struct drm_prime_member *member;
- member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
- if (member->dma_buf == dma_buf) {
+ member = rb_entry(rb, struct drm_prime_member, handle_rb);
+ if (member->handle == handle) {
rb_erase(&member->handle_rb, &prime_fpriv->handles);
rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs);
- dma_buf_put(dma_buf);
+ dma_buf_put(member->dma_buf);
kfree(member);
- return;
- } else if (member->dma_buf < dma_buf) {
+ break;
+ } else if (member->handle < handle) {
rb = rb->rb_right;
} else {
rb = rb->rb_left;
}
}
+
+ mutex_unlock(&prime_fpriv->lock);
}
void drm_prime_init_file_private(struct drm_prime_file_private
*prime_fpriv)