From: Jiasheng Jiang <jiasheng@xxxxxxxxxxx> Add drmm_alloc_workqueue() and drmm_alloc_ordered_workqueue(), the helpers that provide managed workqueue cleanup. The workqueue will be destroyed with the final reference of the DRM device. Signed-off-by: Jiasheng Jiang <jiasheng@xxxxxxxxxxx> Reviewed-by: Daniel Vetter <daniel.vetter@xxxxxxxx> [jhugo: fix API to return the alloc'd workqueue] Signed-off-by: Jeffrey Hugo <quic_jhugo@xxxxxxxxxxx> Reviewed-by: Carl Vanderlip <quic_carlv@xxxxxxxxxxx> Reviewed-by: Pranjal Ramajor Asha Kanojiya <quic_pkanojiy@xxxxxxxxxxx> --- drivers/gpu/drm/drm_managed.c | 82 +++++++++++++++++++++++++++++++++++ include/drm/drm_managed.h | 8 ++++ 2 files changed, 90 insertions(+) diff --git a/drivers/gpu/drm/drm_managed.c b/drivers/gpu/drm/drm_managed.c index 7646f67bda4e..d5135a471ff2 100644 --- a/drivers/gpu/drm/drm_managed.c +++ b/drivers/gpu/drm/drm_managed.c @@ -310,3 +310,85 @@ void __drmm_mutex_release(struct drm_device *dev, void *res) mutex_destroy(lock); } EXPORT_SYMBOL(__drmm_mutex_release); + +static void drmm_destroy_workqueue(struct drm_device *dev, void *res) +{ + struct workqueue_struct *wq = res; + + destroy_workqueue(wq); +} + +/** + * drmm_alloc_workqueue - &drm_device-managed alloc_workqueue() + * @dev: DRM device + * @wq: workqueue to be allocated + * + * Returns: + * Valid pointer on success, NULL on error. + * + * This is a &drm_device-managed version of alloc_workqueue(). + * The initialized lock is automatically destroyed on the final + * drm_dev_put(). + */ +struct workqueue_struct *drmm_alloc_workqueue(struct drm_device *dev, + const char *fmt, unsigned int flags, + int max_active, ...) +{ + struct workqueue_struct *wq; + va_list args; + int ret; + + va_start(args, max_active); + wq = alloc_workqueue(fmt, flags, max_active, args); + va_end(args); + + if (!wq) + return NULL; + + ret = drmm_add_action_or_reset(dev, drmm_destroy_workqueue, wq); + if (ret) { + destroy_workqueue(wq); + return NULL; + } + + return wq; +} +EXPORT_SYMBOL(drmm_alloc_workqueue); + +/** + * drmm_alloc_ordered_workqueue - &drm_device-managed + * alloc_ordered_workqueue() + * @dev: DRM device + * @wq: workqueue to be allocated + * + * Returns: + * Valid pointer on success, NULL on error. + * + * This is a &drm_device-managed version of alloc_ordered_workqueue(). + * The initialized lock is automatically destroyed on the final + * drm_dev_put(). + */ +struct workqueue_struct *drmm_alloc_ordered_workqueue(struct drm_device *dev, + const char *fmt, + unsigned int flags, ...) +{ + struct workqueue_struct *wq; + va_list args; + int ret; + + va_start(args, flags); + wq = alloc_ordered_workqueue(fmt, flags, args); + va_end(args); + + if (!wq) + return NULL; + + ret = drmm_add_action_or_reset(dev, drmm_destroy_workqueue, wq); + if (ret) { + destroy_workqueue(wq); + return NULL; + } + + return wq; +} +EXPORT_SYMBOL(drmm_alloc_ordered_workqueue); diff --git a/include/drm/drm_managed.h b/include/drm/drm_managed.h index f547b09ca023..cb42fb252648 100644 --- a/include/drm/drm_managed.h +++ b/include/drm/drm_managed.h @@ -127,4 +127,12 @@ void __drmm_mutex_release(struct drm_device *dev, void *res); drmm_add_action_or_reset(dev, __drmm_mutex_release, lock); \ }) \ +struct workqueue_struct *drmm_alloc_workqueue(struct drm_device *dev, + const char *fmt, unsigned int flags, + int max_active, ...); + +struct workqueue_struct *drmm_alloc_ordered_workqueue(struct drm_device *dev, + const char *fmt, + unsigned int flags, ...); + #endif -- 2.34.1