On Wed, 2017-07-19 at 16:46 +0300, Imre Deak wrote: > Currently we may process up/down message transactions containing > uninitialized data. This can happen if there was an error during the > reception of any message in the transaction, but we happened to > receive > the last message correctly with the end-of-message flag set. > > To avoid this abort the reception of the transaction when the first > error is detected, rejecting any messages until a message with the > start-of-message flag is received (which will start a new > transaction). > This is also what the DP 1.4 spec 2.11.8.2 calls for in this case. > > In addtion this also prevents receiving bogus transactions without > the s/addtion/addition/ With the one small spelling fix: Reviewed-by: Lyude <lyude@xxxxxxxxxx> > first message with the the start-of-message flag set. > > v2: > - unchanged > v3: > - git add the part that actually skips messages after an error in > drm_dp_sideband_msg_build() > > Cc: Dave Airlie <airlied@xxxxxxxxxx> > Cc: Lyude <lyude@xxxxxxxxxx> > Cc: Daniel Vetter <daniel.vetter@xxxxxxxx> > Signed-off-by: Imre Deak <imre.deak@xxxxxxxxx> > --- > drivers/gpu/drm/drm_dp_mst_topology.c | 31 ++++++++++++++++++++++++- > ------ > 1 file changed, 24 insertions(+), 7 deletions(-) > > diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c > b/drivers/gpu/drm/drm_dp_mst_topology.c > index 78e9a7d58794..41b492f99955 100644 > --- a/drivers/gpu/drm/drm_dp_mst_topology.c > +++ b/drivers/gpu/drm/drm_dp_mst_topology.c > @@ -332,6 +332,13 @@ static bool drm_dp_sideband_msg_build(struct > drm_dp_sideband_msg_rx *msg, > return false; > } > > + /* > + * ignore out-of-order messages or messages that are > part of a > + * failed transaction > + */ > + if (!recv_hdr.somt && !msg->have_somt) > + return false; > + > /* get length contained in this portion */ > msg->curchunk_len = recv_hdr.msg_len; > msg->curchunk_hdrlen = hdrlen; > @@ -2168,7 +2175,7 @@ int drm_dp_mst_topology_mgr_resume(struct > drm_dp_mst_topology_mgr *mgr) > } > EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume); > > -static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr > *mgr, bool up) > +static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr > *mgr, bool up) > { > int len; > u8 replyblock[32]; > @@ -2183,12 +2190,12 @@ static void drm_dp_get_one_sb_msg(struct > drm_dp_mst_topology_mgr *mgr, bool up) > replyblock, len); > if (ret != len) { > DRM_DEBUG_KMS("failed to read DPCD down rep %d > %d\n", len, ret); > - return; > + return false; > } > ret = drm_dp_sideband_msg_build(msg, replyblock, len, true); > if (!ret) { > DRM_DEBUG_KMS("sideband msg build failed %d\n", > replyblock[0]); > - return; > + return false; > } > replylen = msg->curchunk_len + msg->curchunk_hdrlen; > > @@ -2202,25 +2209,30 @@ static void drm_dp_get_one_sb_msg(struct > drm_dp_mst_topology_mgr *mgr, bool up) > if (ret != len) { > DRM_DEBUG_KMS("failed to read a chunk (len > %d, ret %d)\n", > len, ret); > - return; > + return false; > } > > ret = drm_dp_sideband_msg_build(msg, replyblock, > len, false); > if (!ret) { > DRM_DEBUG_KMS("failed to build sideband > msg\n"); > - return; > + return false; > } > > curreply += len; > replylen -= len; > } > + return true; > } > > static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr > *mgr) > { > int ret = 0; > > - drm_dp_get_one_sb_msg(mgr, false); > + if (!drm_dp_get_one_sb_msg(mgr, false)) { > + memset(&mgr->down_rep_recv, 0, > + sizeof(struct drm_dp_sideband_msg_rx)); > + return 0; > + } > > if (mgr->down_rep_recv.have_eomt) { > struct drm_dp_sideband_msg_tx *txmsg; > @@ -2276,7 +2288,12 @@ static int drm_dp_mst_handle_down_rep(struct > drm_dp_mst_topology_mgr *mgr) > static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr > *mgr) > { > int ret = 0; > - drm_dp_get_one_sb_msg(mgr, true); > + > + if (!drm_dp_get_one_sb_msg(mgr, true)) { > + memset(&mgr->up_req_recv, 0, > + sizeof(struct drm_dp_sideband_msg_rx)); > + return 0; > + } > > if (mgr->up_req_recv.have_eomt) { > struct drm_dp_sideband_msg_req_body msg; _______________________________________________ dri-devel mailing list dri-devel@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/dri-devel