The QMP mailbox expects to be notified of the ACD (Adaptive Clock Distribution) state. Get a handle to the mailbox at probe time and poke it at GMU resume. Since we don't fully support ACD yet, hardcode the message to "val: 0" (state = disabled). Tested-by: Neil Armstrong <neil.armstrong@xxxxxxxxxx> # on SM8550-QRD Tested-by: Dmitry Baryshkov <dmitry.baryshkov@xxxxxxxxxx> # sm8450 Signed-off-by: Konrad Dybcio <konrad.dybcio@xxxxxxxxxx> --- drivers/gpu/drm/msm/adreno/a6xx_gmu.c | 21 +++++++++++++++++++++ drivers/gpu/drm/msm/adreno/a6xx_gmu.h | 3 +++ 2 files changed, 24 insertions(+) diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index 75984260898e..17e1e72f5d7d 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -980,11 +980,13 @@ static void a6xx_gmu_set_initial_bw(struct msm_gpu *gpu, struct a6xx_gmu *gmu) dev_pm_opp_put(gpu_opp); } +#define GMU_ACD_STATE_MSG_LEN 36 int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) { struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; struct msm_gpu *gpu = &adreno_gpu->base; struct a6xx_gmu *gmu = &a6xx_gpu->gmu; + char buf[GMU_ACD_STATE_MSG_LEN]; int status, ret; if (WARN(!gmu->initialized, "The GMU is not set up yet\n")) @@ -992,6 +994,18 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) gmu->hung = false; + /* Notify AOSS about the ACD state (unimplemented for now => disable it) */ + if (!IS_ERR(gmu->qmp)) { + ret = snprintf(buf, sizeof(buf), + "{class: gpu, res: acd, val: %d}", + 0 /* Hardcode ACD to be disabled for now */); + WARN_ON(ret >= GMU_ACD_STATE_MSG_LEN); + + ret = qmp_send(gmu->qmp, buf, sizeof(buf)); + if (ret) + dev_err(gmu->dev, "failed to send GPU ACD state\n"); + } + /* Turn on the resources */ pm_runtime_get_sync(gmu->dev); @@ -1744,6 +1758,10 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) goto detach_cxpd; } + gmu->qmp = qmp_get(gmu->dev); + if (IS_ERR(gmu->qmp) && adreno_is_a7xx(adreno_gpu)) + return PTR_ERR(gmu->qmp); + init_completion(&gmu->pd_gate); complete_all(&gmu->pd_gate); gmu->pd_nb.notifier_call = cxpd_notifier_cb; @@ -1767,6 +1785,9 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) return 0; + if (!IS_ERR_OR_NULL(gmu->qmp)) + qmp_put(gmu->qmp); + detach_cxpd: dev_pm_domain_detach(gmu->cxpd, false); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h index 236f81a43caa..592b296aab22 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h @@ -8,6 +8,7 @@ #include <linux/iopoll.h> #include <linux/interrupt.h> #include <linux/notifier.h> +#include <linux/soc/qcom/qcom_aoss.h> #include "msm_drv.h" #include "a6xx_hfi.h" @@ -96,6 +97,8 @@ struct a6xx_gmu { /* For power domain callback */ struct notifier_block pd_nb; struct completion pd_gate; + + struct qmp *qmp; }; static inline u32 gmu_read(struct a6xx_gmu *gmu, u32 offset) -- 2.42.0