summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/drm_dp_mst_topology.c
diff options
context:
space:
mode:
authorLyude Paul <lyude@redhat.com>2020-04-23 12:42:24 -0400
committerLyude Paul <lyude@redhat.com>2020-04-23 13:18:17 -0400
commit973a5909e99d0301a8832ca0cf07f9be01c4e97a (patch)
tree06db975e571fd4d5edeaecee89374a7ecd3db479 /drivers/gpu/drm/drm_dp_mst_topology.c
parent776d58823a60c689816972b51100cb322a0834ce (diff)
Revert "drm/dp_mst: Remove single tx msg restriction."
This reverts commit 6bb0942e8f46863a745489cce27efe5be2a3885e. Unfortunately it would appear that the rumors we've heard of sideband message interleaving not being very well supported are true. On the Lenovo ThinkPad Thunderbolt 3 dock that I have, interleaved messages appear to just get dropped: [drm:drm_dp_mst_wait_tx_reply [drm_kms_helper]] timedout msg send 00000000571ddfd0 2 1 [dp_mst] txmsg cur_offset=2 cur_len=2 seqno=1 state=SENT path_msg=1 dst=00 [dp_mst] type=ENUM_PATH_RESOURCES contents: [dp_mst] port=2 DP descriptor for this hub: OUI 90-cc-24 dev-ID SYNA3 HW-rev 1.0 SW-rev 3.12 quirks 0x0008 It would seem like as well that this is a somewhat well known issue in the field. From section 5.4.2 of the DisplayPort 2.0 specification: There are MST Sink/Branch devices in the field that do not handle interleaved message transactions. To facilitate message transaction handling by downstream devices, an MST Source device shall generate message transactions in an atomic manner (i.e., the MST Source device shall not concurrently interleave multiple message transactions). Therefore, an MST Source device shall clear the Message_Sequence_No value in the Sideband_MSG_Header to 0. MST Source devices that support field policy updates by way of software should update the policy to forego the generation of interleaved message transactions. This is a bit disappointing, as features like HDCP require that we send a sideband request every ~2 seconds for each active stream. However, there isn't really anything in the specification that allows us to accurately probe for interleaved messages. If it ends up being that we -really- need this in the future, we might be able to whitelist hubs where interleaving is known to work-or maybe try some sort of heuristics. But for now, let's just play it safe and not use it. Signed-off-by: Lyude Paul <lyude@redhat.com> Fixes: 6bb0942e8f46 ("drm/dp_mst: Remove single tx msg restriction.") Cc: Wayne Lin <Wayne.Lin@amd.com> Cc: Sean Paul <seanpaul@chromium.org> Link: https://patchwork.freedesktop.org/patch/msgid/20200423164225.680178-1-lyude@redhat.com Reviewed-by: Sean Paul <sean@poorly.run>
Diffstat (limited to 'drivers/gpu/drm/drm_dp_mst_topology.c')
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c14
1 files changed, 12 insertions, 2 deletions
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 13213c4b77d1..d2c19791b2b6 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1205,6 +1205,8 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
mstb->tx_slots[txmsg->seqno] = NULL;
}
+ mgr->is_waiting_for_dwn_reply = false;
+
}
out:
if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
@@ -1214,6 +1216,7 @@ out:
}
mutex_unlock(&mgr->qlock);
+ drm_dp_mst_kick_tx(mgr);
return ret;
}
@@ -2789,9 +2792,11 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
ret = process_single_tx_qlock(mgr, txmsg, false);
if (ret == 1) {
/* txmsg is sent it should be in the slots now */
+ mgr->is_waiting_for_dwn_reply = true;
list_del(&txmsg->next);
} else if (ret) {
DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
+ mgr->is_waiting_for_dwn_reply = false;
list_del(&txmsg->next);
if (txmsg->seqno != -1)
txmsg->dst->tx_slots[txmsg->seqno] = NULL;
@@ -2831,7 +2836,8 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
}
- if (list_is_singular(&mgr->tx_msg_downq))
+ if (list_is_singular(&mgr->tx_msg_downq) &&
+ !mgr->is_waiting_for_dwn_reply)
process_single_down_tx_qlock(mgr);
mutex_unlock(&mgr->qlock);
}
@@ -3823,6 +3829,7 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
mutex_lock(&mgr->qlock);
txmsg->state = DRM_DP_SIDEBAND_TX_RX;
mstb->tx_slots[seqno] = NULL;
+ mgr->is_waiting_for_dwn_reply = false;
mutex_unlock(&mgr->qlock);
wake_up_all(&mgr->tx_waitq);
@@ -3830,6 +3837,9 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
return 0;
out_clear_reply:
+ mutex_lock(&mgr->qlock);
+ mgr->is_waiting_for_dwn_reply = false;
+ mutex_unlock(&mgr->qlock);
if (msg)
memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
out:
@@ -4682,7 +4692,7 @@ static void drm_dp_tx_work(struct work_struct *work)
struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
mutex_lock(&mgr->qlock);
- if (!list_empty(&mgr->tx_msg_downq))
+ if (!list_empty(&mgr->tx_msg_downq) && !mgr->is_waiting_for_dwn_reply)
process_single_down_tx_qlock(mgr);
mutex_unlock(&mgr->qlock);
}