> ---
> drivers/gpu/drm/msm/msm_atomic.c | 120 +------------------------------
> drivers/gpu/drm/msm/msm_drv.c | 7 +-
> drivers/gpu/drm/msm/msm_drv.h | 3 +-
> 3 files changed, 8 insertions(+), 122 deletions(-)
>
> diff --git a/drivers/gpu/drm/msm/msm_atomic.c
> b/drivers/gpu/drm/msm/msm_atomic.c
> index 204c66cbfd31..91ffded576d8 100644
> --- a/drivers/gpu/drm/msm/msm_atomic.c
> +++ b/drivers/gpu/drm/msm/msm_atomic.c
> @@ -18,8 +18,6 @@
>
> #include "msm_drv.h"
> #include "msm_kms.h"
> -#include "msm_gem.h"
> -#include "msm_fence.h"
>
> static void msm_atomic_wait_for_commit_done(
> struct drm_device *dev,
> @@ -39,7 +37,7 @@ static void msm_atomic_wait_for_commit_done(
> }
> }
>
> -static void msm_atomic_commit_tail(struct drm_atomic_state *state)
> +void msm_atomic_commit_tail(struct drm_atomic_state *state)
> {
> struct drm_device *dev = state->dev;
> struct msm_drm_private *priv = dev->dev_private;
> @@ -81,119 +79,3 @@ static void msm_atomic_commit_tail(struct
> drm_atomic_state *state)
>
> kms->funcs->complete_commit(kms, state);
> }
> -
> -/* The (potentially) asynchronous part of the commit. At this point
> - * nothing can fail short of armageddon.
> - */
> -static void commit_tail(struct drm_atomic_state *state)
> -{
> - drm_atomic_helper_wait_for_fences(state->dev, state, false);
> -
> - drm_atomic_helper_wait_for_dependencies(state);
> -
> - msm_atomic_commit_tail(state);
> -
> - drm_atomic_helper_commit_cleanup_done(state);
> -
> - drm_atomic_state_put(state);
> -}
> -
> -static void commit_work(struct work_struct *work)
> -{
> - struct drm_atomic_state *state = container_of(work,
> - struct drm_atomic_state,
> - commit_work);
> - commit_tail(state);
> -}
> -
> -/**
> - * drm_atomic_helper_commit - commit validated state object
> - * @dev: DRM device
> - * @state: the driver state object
> - * @nonblock: nonblocking commit
> - *
> - * This function commits a with drm_atomic_helper_check() pre-validated
> state
> - * object. This can still fail when e.g. the framebuffer reservation
> fails.
> - *
> - * RETURNS
> - * Zero for success or -errno.
> - */
> -int msm_atomic_commit(struct drm_device *dev,
> - struct drm_atomic_state *state, bool nonblock)
> -{
> - struct msm_drm_private *priv = dev->dev_private;
> - struct drm_crtc *crtc;
> - struct drm_crtc_state *crtc_state;
> - struct drm_plane *plane;
> - struct drm_plane_state *old_plane_state, *new_plane_state;
> - int i, ret;
> -
> - /*
> - * Note that plane->atomic_async_check() should fail if we need
> - * to re-assign hwpipe or anything that touches global atomic
> - * state, so we'll never go down the async update path in those
> - * cases.
> - */
> - if (state->async_update) {
> - ret = drm_atomic_helper_prepare_planes(dev, state);
> - if (ret)
> - return ret;
> -
> - drm_atomic_helper_async_commit(dev, state);
> - drm_atomic_helper_cleanup_planes(dev, state);
> - return 0;
> - }
> -
> - ret = drm_atomic_helper_setup_commit(state, nonblock);
> - if (ret)
> - return ret;
> -
> - INIT_WORK(&state->commit_work, commit_work);
> -
> - ret = drm_atomic_helper_prepare_planes(dev, state);
> - if (ret)
> - return ret;
> -
> - if (!nonblock) {
> - ret = drm_atomic_helper_wait_for_fences(dev, state, true);
> - if (ret)
> - goto error;
> - }
> -
> - /*
> - * This is the point of no return - everything below never fails
> except
> - * when the hw goes bonghits. Which means we can commit the new state
> on
> - * the software side now.
> - *
> - * swap driver private state while still holding state_lock
> - */
> - BUG_ON(drm_atomic_helper_swap_state(state, false) < 0);
> -
> - /*
> - * Everything below can be run asynchronously without the need to grab
> - * any modeset locks at all under one conditions: It must be
> guaranteed
> - * that the asynchronous work has either been cancelled (if the driver
> - * supports it, which at least requires that the framebuffers get
> - * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
> - * before the new state gets committed on the software side with
> - * drm_atomic_helper_swap_state().
> - *
> - * This scheme allows new atomic state updates to be prepared and
> - * checked in parallel to the asynchronous completion of the previous
> - * update. Which is important since compositors need to figure out the
> - * composition of the next frame right after having submitted the
> - * current layout.
> - */
> -
> - drm_atomic_state_get(state);
> - if (nonblock)
> - queue_work(system_unbound_wq, &state->commit_work);
> - else
> - commit_tail(state);
> -
> - return 0;
> -
> -error:
> - drm_atomic_helper_cleanup_planes(dev, state);
> - return ret;
> -}
> diff --git a/drivers/gpu/drm/msm/msm_drv.c
> b/drivers/gpu/drm/msm/msm_drv.c
> index e582d5889b66..fd9a7d0825cd 100644
> --- a/drivers/gpu/drm/msm/msm_drv.c
> +++ b/drivers/gpu/drm/msm/msm_drv.c
> @@ -120,7 +120,11 @@ static const struct drm_mode_config_funcs
> mode_config_funcs = {
> .fb_create = msm_framebuffer_create,
> .output_poll_changed = msm_fb_output_poll_changed,
> .atomic_check = msm_atomic_check,
> - .atomic_commit = msm_atomic_commit,
> + .atomic_commit = drm_atomic_helper_commit,
> +};
> +
> +static const struct drm_mode_config_helper_funcs
> mode_config_helper_funcs = {
> + .atomic_commit_tail = msm_atomic_commit_tail,
> };
>
> static inline
> @@ -694,6 +698,7 @@ static int msm_drm_init(struct device *dev, struct
> drm_driver *drv)
> * an unset value during call to drm_drv_uses_atomic_modeset()
> */
> ddev->mode_config.funcs = &mode_config_funcs;
> + ddev->mode_config.helper_private = &mode_config_helper_funcs;
>
> if (kms) {
> ret = kms->funcs->hw_init(kms);
> diff --git a/drivers/gpu/drm/msm/msm_drv.h
> b/drivers/gpu/drm/msm/msm_drv.h
> index e92376acbcfe..657ea052a1ee 100644
> --- a/drivers/gpu/drm/msm/msm_drv.h
> +++ b/drivers/gpu/drm/msm/msm_drv.h
> @@ -593,8 +593,7 @@ struct msm_format {
> uint32_t pixel_format;
> };
>
> -int msm_atomic_commit(struct drm_device *dev,
> - struct drm_atomic_state *state, bool nonblock);
> +void msm_atomic_commit_tail(struct drm_atomic_state *state);
> struct msm_kms_state *msm_kms_get_state(struct drm_atomic_state
> *state);
>
> void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,