Going through the currently programmed payloads isn't safe without holding mgr->payload_lock, so actually do that and warn if anyone tries calling nv50_msto_payload() in the future without grabbing the right locks. Signed-off-by: Lyude Paul <lyude@xxxxxxxxxx> Cc: Daniel Vetter <daniel@xxxxxxxx> Cc: David Airlie <airlied@xxxxxxxxxx> Cc: Jerry Zuo <Jerry.Zuo@xxxxxxx> Cc: Harry Wentland <harry.wentland@xxxxxxx> Cc: Juston Li <juston.li@xxxxxxxxx> --- drivers/gpu/drm/nouveau/dispnv50/disp.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 6f8a54e81727..8044ebba56a4 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -679,6 +679,8 @@ nv50_msto_payload(struct nv50_msto *msto) struct nv50_mstm *mstm = mstc->mstm; int vcpi = mstc->port->vcpi.vcpi, i; + WARN_ON(!mutex_is_locked(&mstm->mgr.payload_lock)); + NV_ATOMIC(drm, "%s: vcpi %d\n", msto->encoder.name, vcpi); for (i = 0; i < mstm->mgr.max_payloads; i++) { struct drm_dp_payload *payload = &mstm->mgr.payloads[i]; @@ -732,6 +734,8 @@ nv50_msto_prepare(struct nv50_msto *msto) (0x0100 << msto->head->base.index), }; + mutex_lock(&mstm->mgr.payload_lock); + NV_ATOMIC(drm, "%s: msto prepare\n", msto->encoder.name); if (mstc->port->vcpi.vcpi > 0) { struct drm_dp_payload *payload = nv50_msto_payload(msto); @@ -747,7 +751,9 @@ nv50_msto_prepare(struct nv50_msto *msto) msto->encoder.name, msto->head->base.base.name, args.vcpi.start_slot, args.vcpi.num_slots, args.vcpi.pbn, args.vcpi.aligned_pbn); + nvif_mthd(&drm->display->disp.object, 0, &args, sizeof(args)); + mutex_unlock(&mstm->mgr.payload_lock); } static int -- 2.20.1 _______________________________________________ amd-gfx mailing list amd-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/amd-gfx