From: Qingqing Zhuo <qingqing.zhuo@xxxxxxx> [Why] With the addition of dc_lock acquire before dc_link_handle_hpd_rx_irq, there will be a deadlock situation where commit state sends a request for payload allocation on MST and wait for HPD to process DOWN_REP. [How] Move forward the MST message handling in handle_hpd_rx_irq so that it will not rely on call to dc_link_handle_hpd_rx_irq. Signed-off-by: Qingqing Zhuo <qingqing.zhuo@xxxxxxx> Reviewed-by: Harry Wentland <Harry.Wentland@xxxxxxx> Acked-by: Eryk Brol <eryk.brol@xxxxxxx> --- .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 21 ++++++++++++++----- .../gpu/drm/amd/display/dc/core/dc_link_dp.c | 2 +- drivers/gpu/drm/amd/display/dc/dc_link.h | 4 ++++ 3 files changed, 21 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 2d2c8a3c809d..59f738008734 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2566,11 +2566,9 @@ static void handle_hpd_rx_irq(void *param) bool result = false; enum dc_connection_type new_connection_type = dc_connection_none; struct amdgpu_device *adev = drm_to_adev(dev); -#ifdef CONFIG_DRM_AMD_DC_HDCP union hpd_irq_data hpd_irq_data; memset(&hpd_irq_data, 0, sizeof(hpd_irq_data)); -#endif /* * TODO:Temporary add mutex to protect hpd interrupt not have a gpio @@ -2580,6 +2578,21 @@ static void handle_hpd_rx_irq(void *param) if (dc_link->type != dc_connection_mst_branch) mutex_lock(&aconnector->hpd_lock); + read_hpd_rx_irq_data(dc_link, &hpd_irq_data); + + if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) || + (dc_link->type == dc_connection_mst_branch)) { + if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) { + result = true; + dm_handle_hpd_rx_irq(aconnector); + goto out; + } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) { + result = false; + dm_handle_hpd_rx_irq(aconnector); + goto out; + } + } + mutex_lock(&adev->dm.dc_lock); #ifdef CONFIG_DRM_AMD_DC_HDCP result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL); @@ -2588,6 +2601,7 @@ static void handle_hpd_rx_irq(void *param) #endif mutex_unlock(&adev->dm.dc_lock); +out: if (result && !is_mst_root_connector) { /* Downstream Port status changed. */ if (!dc_link_detect_sink(dc_link, &new_connection_type)) @@ -2628,9 +2642,6 @@ static void handle_hpd_rx_irq(void *param) hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index); } #endif - if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) || - (dc_link->type == dc_connection_mst_branch)) - dm_handle_hpd_rx_irq(aconnector); if (dc_link->type != dc_connection_mst_branch) { drm_dp_cec_irq(&aconnector->dm_dp_aux.aux); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index dbbc0ec0b699..6b11d4af54af 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -1874,7 +1874,7 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link) return max_link_cap; } -static enum dc_status read_hpd_rx_irq_data( +enum dc_status read_hpd_rx_irq_data( struct dc_link *link, union hpd_irq_data *irq_data) { diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index 66445e34fd37..6d9a60c9dcc0 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h @@ -259,6 +259,10 @@ enum dc_status dc_link_reallocate_mst_payload(struct dc_link *link); bool dc_link_handle_hpd_rx_irq(struct dc_link *dc_link, union hpd_irq_data *hpd_irq_dpcd_data, bool *out_link_loss); +enum dc_status read_hpd_rx_irq_data( + struct dc_link *link, + union hpd_irq_data *irq_data); + struct dc_sink_init_data; struct dc_sink *dc_link_add_remote_sink( -- 2.25.1 _______________________________________________ amd-gfx mailing list amd-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/amd-gfx