diff options
Diffstat (limited to 'drivers/gpu/drm/display/drm_dp_mst_topology.c')
| -rw-r--r-- | drivers/gpu/drm/display/drm_dp_mst_topology.c | 504 |
1 files changed, 273 insertions, 231 deletions
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c index 7f8e1cfbe19d..64e5c176d5cc 100644 --- a/drivers/gpu/drm/display/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c @@ -23,13 +23,13 @@ #include <linux/bitfield.h> #include <linux/delay.h> #include <linux/errno.h> +#include <linux/export.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/random.h> #include <linux/sched.h> #include <linux/seq_file.h> -#include <linux/iopoll.h> #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) #include <linux/stacktrace.h> @@ -68,9 +68,6 @@ static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr, static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port); -static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr, - int id, u8 start_slot, u8 num_slots); - static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int offset, int size, u8 *bytes); @@ -89,7 +86,7 @@ static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *port); static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr, - u8 *guid); + guid_t *guid); static int drm_dp_mst_register_i2c_bus(struct drm_dp_mst_port *port); static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_mst_port *port); @@ -175,18 +172,30 @@ static const char *drm_dp_mst_sideband_tx_state_str(int state) return sideband_reason_str[state]; } +static inline u8 +drm_dp_mst_get_ufp_num_at_lct_from_rad(u8 lct, const u8 *rad) +{ + int idx = (lct / 2) - 1; + int shift = (lct % 2) ? 0 : 4; + u8 ufp_num; + + /* mst_primary, it's rad is unset*/ + if (lct == 1) + return 0; + + ufp_num = (rad[idx] >> shift) & 0xf; + + return ufp_num; +} + static int drm_dp_mst_rad_to_str(const u8 rad[8], u8 lct, char *out, size_t len) { int i; - u8 unpacked_rad[16]; + u8 unpacked_rad[16] = {}; - for (i = 0; i < lct; i++) { - if (i % 2) - unpacked_rad[i] = rad[i / 2] >> 4; - else - unpacked_rad[i] = rad[i / 2] & BIT_MASK(4); - } + for (i = 0; i < lct; i++) + unpacked_rad[i] = drm_dp_mst_get_ufp_num_at_lct_from_rad(i + 1, rad); /* TODO: Eventually add something to printk so we can format the rad * like this: 1.2.3 @@ -320,6 +329,9 @@ static bool drm_dp_decode_sideband_msg_hdr(const struct drm_dp_mst_topology_mgr hdr->broadcast = (buf[idx] >> 7) & 0x1; hdr->path_msg = (buf[idx] >> 6) & 0x1; hdr->msg_len = buf[idx] & 0x3f; + if (hdr->msg_len < 1) /* min space for body CRC */ + return false; + idx++; hdr->somt = (buf[idx] >> 7) & 0x1; hdr->eomt = (buf[idx] >> 6) & 0x1; @@ -801,7 +813,7 @@ static bool drm_dp_sideband_parse_link_address(const struct drm_dp_mst_topology_ int idx = 1; int i; - memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16); + import_guid(&repmsg->u.link_addr.guid, &raw->msg[idx]); idx += 16; repmsg->u.link_addr.nports = raw->msg[idx] & 0xf; idx++; @@ -829,7 +841,7 @@ static bool drm_dp_sideband_parse_link_address(const struct drm_dp_mst_topology_ idx++; if (idx > raw->curlen) goto fail_len; - memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16); + import_guid(&repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx]); idx += 16; if (idx > raw->curlen) goto fail_len; @@ -1029,7 +1041,7 @@ static bool drm_dp_sideband_parse_reply(const struct drm_dp_mst_topology_mgr *mg msg->req_type = (raw->msg[0] & 0x7f); if (msg->reply_type == DP_SIDEBAND_REPLY_NAK) { - memcpy(msg->u.nak.guid, &raw->msg[1], 16); + import_guid(&msg->u.nak.guid, &raw->msg[1]); msg->u.nak.reason = raw->msg[17]; msg->u.nak.nak_data = raw->msg[18]; return false; @@ -1078,7 +1090,7 @@ drm_dp_sideband_parse_connection_status_notify(const struct drm_dp_mst_topology_ if (idx > raw->curlen) goto fail_len; - memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16); + import_guid(&msg->u.conn_stat.guid, &raw->msg[idx]); idx += 16; if (idx > raw->curlen) goto fail_len; @@ -1107,7 +1119,7 @@ static bool drm_dp_sideband_parse_resource_status_notify(const struct drm_dp_mst if (idx > raw->curlen) goto fail_len; - memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16); + import_guid(&msg->u.resource_stat.guid, &raw->msg[idx]); idx += 16; if (idx > raw->curlen) goto fail_len; @@ -2174,27 +2186,27 @@ ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux, offset, size, buffer); } -static int drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid) +static int drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, guid_t *guid) { int ret = 0; - memcpy(mstb->guid, guid, 16); + guid_copy(&mstb->guid, guid); - if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) { - if (mstb->port_parent) { - ret = drm_dp_send_dpcd_write(mstb->mgr, - mstb->port_parent, - DP_GUID, 16, mstb->guid); - } else { - ret = drm_dp_dpcd_write(mstb->mgr->aux, - DP_GUID, mstb->guid, 16); - } - } + if (!drm_dp_validate_guid(mstb->mgr, &mstb->guid)) { + struct drm_dp_aux *aux; + u8 buf[UUID_SIZE]; - if (ret < 16 && ret > 0) - return -EPROTO; + export_guid(buf, &mstb->guid); - return ret == 16 ? 0 : ret; + if (mstb->port_parent) + aux = &mstb->port_parent->aux; + else + aux = mstb->mgr->aux; + + ret = drm_dp_dpcd_write_data(aux, DP_GUID, buf, sizeof(buf)); + } + + return ret; } static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb, @@ -2278,7 +2290,7 @@ drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb, port->cached_edid = drm_edid_read_ddc(port->connector, &port->aux.ddc); - drm_connector_register(port->connector); + drm_connector_dynamic_register(port->connector); return; error: @@ -2339,7 +2351,7 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb, { struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; struct drm_dp_mst_port *port; - int old_ddps = 0, ret; + int ret; u8 new_pdt = DP_PEER_DEVICE_NONE; bool new_mcs = 0; bool created = false, send_link_addr = false, changed = false; @@ -2372,7 +2384,6 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb, */ drm_modeset_lock(&mgr->base.lock, NULL); - old_ddps = port->ddps; changed = port->ddps != port_msg->ddps || (port->ddps && (port->ldps != port_msg->legacy_device_plug_status || @@ -2407,15 +2418,13 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb, * Reprobe PBN caps on both hotplug, and when re-probing the link * for our parent mstb */ - if (old_ddps != port->ddps || !created) { - if (port->ddps && !port->input) { - ret = drm_dp_send_enum_path_resources(mgr, mstb, - port); - if (ret == 1) - changed = true; - } else { - port->full_pbn = 0; - } + if (port->ddps && !port->input) { + ret = drm_dp_send_enum_path_resources(mgr, mstb, + port); + if (ret == 1) + changed = true; + } else { + port->full_pbn = 0; } ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs); @@ -2544,9 +2553,8 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_ if (!mstb) goto out; - for (i = 0; i < lct - 1; i++) { - int shift = (i % 2) ? 0 : 4; - int port_num = (rad[i / 2] >> shift) & 0xf; + for (i = 1; i < lct; i++) { + int port_num = drm_dp_mst_get_ufp_num_at_lct_from_rad(i + 1, rad); list_for_each_entry(port, &mstb->ports, next) { if (port->port_num == port_num) { @@ -2570,9 +2578,9 @@ out: return mstb; } -static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper( - struct drm_dp_mst_branch *mstb, - const uint8_t *guid) +static struct drm_dp_mst_branch * +get_mst_branch_device_by_guid_helper(struct drm_dp_mst_branch *mstb, + const guid_t *guid) { struct drm_dp_mst_branch *found_mstb; struct drm_dp_mst_port *port; @@ -2580,10 +2588,9 @@ static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper( if (!mstb) return NULL; - if (memcmp(mstb->guid, guid, 16) == 0) + if (guid_equal(&mstb->guid, guid)) return mstb; - list_for_each_entry(port, &mstb->ports, next) { found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid); @@ -2596,7 +2603,7 @@ static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper( static struct drm_dp_mst_branch * drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr, - const uint8_t *guid) + const guid_t *guid) { struct drm_dp_mst_branch *mstb; int ret; @@ -2692,18 +2699,18 @@ static void drm_dp_mst_link_probe_work(struct work_struct *work) drm_kms_helper_hotplug_event(dev); } -static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr, - u8 *guid) +static void drm_dp_mst_queue_probe_work(struct drm_dp_mst_topology_mgr *mgr) { - u64 salt; + queue_work(system_long_wq, &mgr->work); +} - if (memchr_inv(guid, 0, 16)) +static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr, + guid_t *guid) +{ + if (!guid_is_null(guid)) return true; - salt = get_jiffies_64(); - - memcpy(&guid[0], &salt, sizeof(u64)); - memcpy(&guid[8], &salt, sizeof(u64)); + guid_gen(guid); return false; } @@ -2734,14 +2741,13 @@ retry: do { tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total); - ret = drm_dp_dpcd_write(mgr->aux, regbase + offset, - &msg[offset], - tosend); - if (ret != tosend) { - if (ret == -EIO && retries < 5) { - retries++; - goto retry; - } + ret = drm_dp_dpcd_write_data(mgr->aux, regbase + offset, + &msg[offset], + tosend); + if (ret == -EIO && retries < 5) { + retries++; + goto retry; + } else if (ret < 0) { drm_dbg_kms(mgr->dev, "failed to dpcd write %d %d\n", tosend, ret); return -EIO; @@ -2929,7 +2935,7 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, /* FIXME: Actually do some real error handling here */ ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); - if (ret <= 0) { + if (ret < 0) { drm_err(mgr->dev, "Sending link address failed with %d\n", ret); goto out; } @@ -2943,7 +2949,7 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, drm_dbg_kms(mgr->dev, "link address reply: %d\n", reply->nports); drm_dp_dump_link_address(mgr, reply); - ret = drm_dp_check_mstb_guid(mstb, reply->guid); + ret = drm_dp_check_mstb_guid(mstb, &reply->guid); if (ret) { char buf[64]; @@ -2981,7 +2987,7 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, mutex_unlock(&mgr->lock); out: - if (ret <= 0) + if (ret < 0) mstb->link_address_sent = false; kfree(txmsg); return ret < 0 ? ret : changed; @@ -3264,7 +3270,7 @@ EXPORT_SYMBOL(drm_dp_send_query_stream_enc_status); static int drm_dp_create_payload_at_dfp(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_atomic_payload *payload) { - return drm_dp_dpcd_write_payload(mgr, payload->vcpi, payload->vc_start_slot, + return drm_dp_dpcd_write_payload(mgr->aux, payload->vcpi, payload->vc_start_slot, payload->time_slots); } @@ -3295,7 +3301,7 @@ static void drm_dp_destroy_payload_at_remote_and_dfp(struct drm_dp_mst_topology_ } if (payload->payload_allocation_status == DRM_DP_MST_PAYLOAD_ALLOCATION_DFP) - drm_dp_dpcd_write_payload(mgr, payload->vcpi, payload->vc_start_slot, 0); + drm_dp_dpcd_write_payload(mgr->aux, payload->vcpi, payload->vc_start_slot, 0); } /** @@ -3573,8 +3579,7 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr, } /** - * drm_dp_get_vc_payload_bw - get the VC payload BW for an MST link - * @mgr: The &drm_dp_mst_topology_mgr to use + * drm_dp_get_vc_payload_bw - get the VC payload BW for an MTP link * @link_rate: link rate in 10kbits/s units * @link_lane_count: lane count * @@ -3585,17 +3590,12 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr, * * Returns the BW / timeslot value in 20.12 fixed point format. */ -fixed20_12 drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr, - int link_rate, int link_lane_count) +fixed20_12 drm_dp_get_vc_payload_bw(int link_rate, int link_lane_count) { int ch_coding_efficiency = drm_dp_bw_channel_coding_efficiency(drm_dp_is_uhbr_rate(link_rate)); fixed20_12 ret; - if (link_rate == 0 || link_lane_count == 0) - drm_dbg_kms(mgr->dev, "invalid link rate/lane count: (%d / %d)\n", - link_rate, link_lane_count); - /* See DP v2.0 2.6.4.2, 2.7.6.3 VCPayload_Bandwidth_for_OneTimeSlotPer_MTP_Allocation */ ret.full = DIV_ROUND_DOWN_ULL(mul_u32_u32(link_rate * link_lane_count, ch_coding_efficiency), @@ -3620,7 +3620,7 @@ enum drm_dp_mst_mode drm_dp_read_mst_cap(struct drm_dp_aux *aux, if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_12) return DRM_DP_SST; - if (drm_dp_dpcd_readb(aux, DP_MSTM_CAP, &mstm_cap) != 1) + if (drm_dp_dpcd_read_byte(aux, DP_MSTM_CAP, &mstm_cap) < 0) return DRM_DP_SST; if (mstm_cap & DP_MST_CAP) @@ -3675,17 +3675,17 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms mgr->mst_primary = mstb; drm_dp_mst_topology_get_mstb(mgr->mst_primary); - ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, - DP_MST_EN | - DP_UP_REQ_EN | - DP_UPSTREAM_IS_SRC); + ret = drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL, + DP_MST_EN | + DP_UP_REQ_EN | + DP_UPSTREAM_IS_SRC); if (ret < 0) goto out_unlock; /* Write reset payload */ - drm_dp_dpcd_write_payload(mgr, 0, 0, 0x3f); + drm_dp_dpcd_clear_payload(mgr->aux); - queue_work(system_long_wq, &mgr->work); + drm_dp_mst_queue_probe_work(mgr); ret = 0; } else { @@ -3693,12 +3693,11 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms mstb = mgr->mst_primary; mgr->mst_primary = NULL; /* this can fail if the device is gone */ - drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0); + drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL, 0); ret = 0; mgr->payload_id_table_cleared = false; - memset(&mgr->down_rep_recv, 0, sizeof(mgr->down_rep_recv)); - memset(&mgr->up_req_recv, 0, sizeof(mgr->up_req_recv)); + mgr->reset_rx_state = true; } out_unlock: @@ -3724,6 +3723,33 @@ drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb) } /** + * drm_dp_mst_topology_queue_probe - Queue a topology probe + * @mgr: manager to probe + * + * Queue a work to probe the MST topology. Driver's should call this only to + * sync the topology's HW->SW state after the MST link's parameters have + * changed in a way the state could've become out-of-sync. This is the case + * for instance when the link rate between the source and first downstream + * branch device has switched between UHBR and non-UHBR rates. Except of those + * cases - for instance when a sink gets plugged/unplugged to a port - the SW + * state will get updated automatically via MST UP message notifications. + */ +void drm_dp_mst_topology_queue_probe(struct drm_dp_mst_topology_mgr *mgr) +{ + mutex_lock(&mgr->lock); + + if (drm_WARN_ON(mgr->dev, !mgr->mst_state || !mgr->mst_primary)) + goto out_unlock; + + drm_dp_mst_topology_mgr_invalidate_mstb(mgr->mst_primary); + drm_dp_mst_queue_probe_work(mgr); + +out_unlock: + mutex_unlock(&mgr->lock); +} +EXPORT_SYMBOL(drm_dp_mst_topology_queue_probe); + +/** * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager * @mgr: manager to suspend * @@ -3733,8 +3759,8 @@ drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb) void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr) { mutex_lock(&mgr->lock); - drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, - DP_MST_EN | DP_UPSTREAM_IS_SRC); + drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL, + DP_MST_EN | DP_UPSTREAM_IS_SRC); mutex_unlock(&mgr->lock); flush_work(&mgr->up_req_work); flush_work(&mgr->work); @@ -3770,8 +3796,9 @@ EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend); int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr, bool sync) { + u8 buf[UUID_SIZE]; + guid_t guid; int ret; - u8 guid[16]; mutex_lock(&mgr->lock); if (!mgr->mst_primary) @@ -3782,23 +3809,25 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr, goto out_fail; } - ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, - DP_MST_EN | - DP_UP_REQ_EN | - DP_UPSTREAM_IS_SRC); + ret = drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL, + DP_MST_EN | + DP_UP_REQ_EN | + DP_UPSTREAM_IS_SRC); if (ret < 0) { drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n"); goto out_fail; } /* Some hubs forget their guids after they resume */ - ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16); - if (ret != 16) { + ret = drm_dp_dpcd_read_data(mgr->aux, DP_GUID, buf, sizeof(buf)); + if (ret < 0) { drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n"); goto out_fail; } - ret = drm_dp_check_mstb_guid(mgr->mst_primary, guid); + import_guid(&guid, buf); + + ret = drm_dp_check_mstb_guid(mgr->mst_primary, &guid); if (ret) { drm_dbg_kms(mgr->dev, "check mstb failed - undocked during suspend?\n"); goto out_fail; @@ -3809,7 +3838,7 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr, * state of our in-memory topology back into sync with reality. So, * restart the probing process as if we're probing a new hub */ - queue_work(system_long_wq, &mgr->work); + drm_dp_mst_queue_probe_work(mgr); mutex_unlock(&mgr->lock); if (sync) { @@ -3826,6 +3855,11 @@ out_fail: } EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume); +static void reset_msg_rx_state(struct drm_dp_sideband_msg_rx *msg) +{ + memset(msg, 0, sizeof(*msg)); +} + static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up, struct drm_dp_mst_branch **mstb) @@ -3845,8 +3879,8 @@ drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up, *mstb = NULL; len = min(mgr->max_dpcd_transaction_bytes, 16); - ret = drm_dp_dpcd_read(mgr->aux, basereg, replyblock, len); - if (ret != len) { + ret = drm_dp_dpcd_read_data(mgr->aux, basereg, replyblock, len); + if (ret < 0) { drm_dbg_kms(mgr->dev, "failed to read DPCD down rep %d %d\n", len, ret); return false; } @@ -3884,9 +3918,9 @@ drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up, curreply = len; while (replylen > 0) { len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16); - ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply, - replyblock, len); - if (ret != len) { + ret = drm_dp_dpcd_read_data(mgr->aux, basereg + curreply, + replyblock, len); + if (ret < 0) { drm_dbg_kms(mgr->dev, "failed to read a chunk (len %d, ret %d)\n", len, ret); return false; @@ -3904,6 +3938,34 @@ drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up, return true; } +static int get_msg_request_type(u8 data) +{ + return data & 0x7f; +} + +static bool verify_rx_request_type(struct drm_dp_mst_topology_mgr *mgr, + const struct drm_dp_sideband_msg_tx *txmsg, + const struct drm_dp_sideband_msg_rx *rxmsg) +{ + const struct drm_dp_sideband_msg_hdr *hdr = &rxmsg->initial_hdr; + const struct drm_dp_mst_branch *mstb = txmsg->dst; + int tx_req_type = get_msg_request_type(txmsg->msg[0]); + int rx_req_type = get_msg_request_type(rxmsg->msg[0]); + char rad_str[64]; + + if (tx_req_type == rx_req_type) + return true; + + drm_dp_mst_rad_to_str(mstb->rad, mstb->lct, rad_str, sizeof(rad_str)); + drm_dbg_kms(mgr->dev, + "Got unexpected MST reply, mstb: %p seqno: %d lct: %d rad: %s rx_req_type: %s (%02x) != tx_req_type: %s (%02x)\n", + mstb, hdr->seqno, mstb->lct, rad_str, + drm_dp_mst_req_type_str(rx_req_type), rx_req_type, + drm_dp_mst_req_type_str(tx_req_type), tx_req_type); + + return false; +} + static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) { struct drm_dp_sideband_msg_tx *txmsg; @@ -3919,9 +3981,9 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) /* find the message */ mutex_lock(&mgr->qlock); + txmsg = list_first_entry_or_null(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next); - mutex_unlock(&mgr->qlock); /* Were we actually expecting a response, and from this mstb? */ if (!txmsg || txmsg->dst != mstb) { @@ -3930,6 +3992,15 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) hdr = &msg->initial_hdr; drm_dbg_kms(mgr->dev, "Got MST reply with no msg %p %d %d %02x %02x\n", mstb, hdr->seqno, hdr->lct, hdr->rad[0], msg->msg[0]); + + mutex_unlock(&mgr->qlock); + + goto out_clear_reply; + } + + if (!verify_rx_request_type(mgr, txmsg, msg)) { + mutex_unlock(&mgr->qlock); + goto out_clear_reply; } @@ -3945,20 +4016,15 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) txmsg->reply.u.nak.nak_data); } - memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx)); - drm_dp_mst_topology_put_mstb(mstb); - - mutex_lock(&mgr->qlock); txmsg->state = DRM_DP_SIDEBAND_TX_RX; list_del(&txmsg->next); + mutex_unlock(&mgr->qlock); wake_up_all(&mgr->tx_waitq); - return 0; - out_clear_reply: - memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx)); + reset_msg_rx_state(msg); out: if (mstb) drm_dp_mst_topology_put_mstb(mstb); @@ -3966,6 +4032,22 @@ out: return 0; } +static bool primary_mstb_probing_is_done(struct drm_dp_mst_topology_mgr *mgr) +{ + bool probing_done = false; + + mutex_lock(&mgr->lock); + + if (mgr->mst_primary && drm_dp_mst_topology_try_get_mstb(mgr->mst_primary)) { + probing_done = mgr->mst_primary->link_address_sent; + drm_dp_mst_topology_put_mstb(mgr->mst_primary); + } + + mutex_unlock(&mgr->lock); + + return probing_done; +} + static inline bool drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_pending_up_req *up_req) @@ -3976,12 +4058,12 @@ drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr, bool hotplug = false, dowork = false; if (hdr->broadcast) { - const u8 *guid = NULL; + const guid_t *guid = NULL; if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) - guid = msg->u.conn_stat.guid; + guid = &msg->u.conn_stat.guid; else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY) - guid = msg->u.resource_stat.guid; + guid = &msg->u.resource_stat.guid; if (guid) mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid); @@ -3996,8 +4078,12 @@ drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr, /* TODO: Add missing handler for DP_RESOURCE_STATUS_NOTIFY events */ if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) { - dowork = drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat); - hotplug = true; + if (!primary_mstb_probing_is_done(mgr)) { + drm_dbg_kms(mgr->dev, "Got CSN before finish topology probing. Skip it.\n"); + } else { + dowork = drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat); + hotplug = true; + } } drm_dp_mst_topology_put_mstb(mstb); @@ -4040,16 +4126,20 @@ static void drm_dp_mst_up_req_work(struct work_struct *work) static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) { struct drm_dp_pending_up_req *up_req; + struct drm_dp_mst_branch *mst_primary; + int ret = 0; if (!drm_dp_get_one_sb_msg(mgr, true, NULL)) - goto out; + goto out_clear_reply; if (!mgr->up_req_recv.have_eomt) return 0; up_req = kzalloc(sizeof(*up_req), GFP_KERNEL); - if (!up_req) - return -ENOMEM; + if (!up_req) { + ret = -ENOMEM; + goto out_clear_reply; + } INIT_LIST_HEAD(&up_req->next); @@ -4060,12 +4150,23 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) drm_dbg_kms(mgr->dev, "Received unknown up req type, ignoring: %x\n", up_req->msg.req_type); kfree(up_req); - goto out; + goto out_clear_reply; + } + + mutex_lock(&mgr->lock); + mst_primary = mgr->mst_primary; + if (!mst_primary || !drm_dp_mst_topology_try_get_mstb(mst_primary)) { + mutex_unlock(&mgr->lock); + kfree(up_req); + goto out_clear_reply; } + mutex_unlock(&mgr->lock); - drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, up_req->msg.req_type, + drm_dp_send_up_ack_reply(mgr, mst_primary, up_req->msg.req_type, false); + drm_dp_mst_topology_put_mstb(mst_primary); + if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) { const struct drm_dp_connection_status_notify *conn_stat = &up_req->msg.u.conn_stat; @@ -4091,10 +4192,20 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) list_add_tail(&up_req->next, &mgr->up_req_list); mutex_unlock(&mgr->up_req_lock); queue_work(system_long_wq, &mgr->up_req_work); +out_clear_reply: + reset_msg_rx_state(&mgr->up_req_recv); + return ret; +} -out: - memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); - return 0; +static void update_msg_rx_state(struct drm_dp_mst_topology_mgr *mgr) +{ + mutex_lock(&mgr->lock); + if (mgr->reset_rx_state) { + mgr->reset_rx_state = false; + reset_msg_rx_state(&mgr->down_rep_recv); + reset_msg_rx_state(&mgr->up_req_recv); + } + mutex_unlock(&mgr->lock); } /** @@ -4131,6 +4242,8 @@ int drm_dp_mst_hpd_irq_handle_event(struct drm_dp_mst_topology_mgr *mgr, const u *handled = true; } + update_msg_rx_state(mgr); + if (esi[1] & DP_DOWN_REP_MSG_RDY) { ret = drm_dp_mst_handle_down_rep(mgr); *handled = true; @@ -4639,61 +4752,6 @@ void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_ } EXPORT_SYMBOL(drm_dp_mst_update_slots); -static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr, - int id, u8 start_slot, u8 num_slots) -{ - u8 payload_alloc[3], status; - int ret; - int retries = 0; - - drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, - DP_PAYLOAD_TABLE_UPDATED); - - payload_alloc[0] = id; - payload_alloc[1] = start_slot; - payload_alloc[2] = num_slots; - - ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3); - if (ret != 3) { - drm_dbg_kms(mgr->dev, "failed to write payload allocation %d\n", ret); - goto fail; - } - -retry: - ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status); - if (ret < 0) { - drm_dbg_kms(mgr->dev, "failed to read payload table status %d\n", ret); - goto fail; - } - - if (!(status & DP_PAYLOAD_TABLE_UPDATED)) { - retries++; - if (retries < 20) { - usleep_range(10000, 20000); - goto retry; - } - drm_dbg_kms(mgr->dev, "status not set after read payload table status %d\n", - status); - ret = -EINVAL; - goto fail; - } - ret = 0; -fail: - return ret; -} - -static int do_get_act_status(struct drm_dp_aux *aux) -{ - int ret; - u8 status; - - ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status); - if (ret < 0) - return ret; - - return status; -} - /** * drm_dp_check_act_status() - Polls for ACT handled status. * @mgr: manager to use @@ -4711,28 +4769,9 @@ int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr) * There doesn't seem to be any recommended retry count or timeout in * the MST specification. Since some hubs have been observed to take * over 1 second to update their payload allocations under certain - * conditions, we use a rather large timeout value. + * conditions, we use a rather large timeout value of 3 seconds. */ - const int timeout_ms = 3000; - int ret, status; - - ret = readx_poll_timeout(do_get_act_status, mgr->aux, status, - status & DP_PAYLOAD_ACT_HANDLED || status < 0, - 200, timeout_ms * USEC_PER_MSEC); - if (ret < 0 && status >= 0) { - drm_err(mgr->dev, "Failed to get ACT after %dms, last status: %02x\n", - timeout_ms, status); - return -EINVAL; - } else if (status < 0) { - /* - * Failure here isn't unexpected - the hub may have - * just been unplugged - */ - drm_dbg_kms(mgr->dev, "Failed to read payload table status: %d\n", status); - return status; - } - - return 0; + return drm_dp_dpcd_poll_act_handled(mgr->aux, 3000); } EXPORT_SYMBOL(drm_dp_check_act_status); @@ -4838,9 +4877,9 @@ static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr, int i; for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) { - if (drm_dp_dpcd_read(mgr->aux, - DP_PAYLOAD_TABLE_UPDATE_STATUS + i, - &buf[i], 16) != 16) + if (drm_dp_dpcd_read_data(mgr->aux, + DP_PAYLOAD_TABLE_UPDATE_STATUS + i, + &buf[i], 16) < 0) return false; } return true; @@ -4929,30 +4968,31 @@ void drm_dp_mst_dump_topology(struct seq_file *m, } seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf); - ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2); - if (ret != 2) { + ret = drm_dp_dpcd_read_data(mgr->aux, DP_FAUX_CAP, buf, 2); + if (ret < 0) { seq_printf(m, "faux/mst read failed\n"); goto out; } seq_printf(m, "faux/mst: %*ph\n", 2, buf); - ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1); - if (ret != 1) { + ret = drm_dp_dpcd_read_data(mgr->aux, DP_MSTM_CTRL, buf, 1); + if (ret < 0) { seq_printf(m, "mst ctrl read failed\n"); goto out; } seq_printf(m, "mst ctrl: %*ph\n", 1, buf); /* dump the standard OUI branch header */ - ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE); - if (ret != DP_BRANCH_OUI_HEADER_SIZE) { + ret = drm_dp_dpcd_read_data(mgr->aux, DP_BRANCH_OUI, buf, + DP_BRANCH_OUI_HEADER_SIZE); + if (ret < 0) { seq_printf(m, "branch oui read failed\n"); goto out; } seq_printf(m, "branch oui: %*phN devid: ", 3, buf); for (i = 0x3; i < 0x8 && buf[i]; i++) - seq_printf(m, "%c", buf[i]); + seq_putc(m, buf[i]); seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n", buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]); if (dump_dp_payload_table(mgr, buf)) @@ -5558,7 +5598,6 @@ EXPORT_SYMBOL(drm_dp_mst_atomic_check_mgr); * drm_dp_atomic_release_time_slots() * * Returns: - * * 0 if the new state is valid, negative error code otherwise. */ int drm_dp_mst_atomic_check(struct drm_atomic_state *state) @@ -5595,7 +5634,6 @@ EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs); * topology object. * * RETURNS: - * * The MST topology state or error pointer. */ struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state, @@ -5615,7 +5653,6 @@ EXPORT_SYMBOL(drm_atomic_get_mst_topology_state); * topology object. * * Returns: - * * The old MST topology state, or NULL if there's no topology state for this MST mgr * in the global atomic state */ @@ -5640,7 +5677,6 @@ EXPORT_SYMBOL(drm_atomic_get_old_mst_topology_state); * topology object. * * Returns: - * * The new MST topology state, or NULL if there's no topology state for this MST mgr * in the global atomic state */ @@ -6046,6 +6082,7 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port) struct drm_dp_aux *immediate_upstream_aux; struct drm_dp_mst_port *fec_port; struct drm_dp_desc desc = {}; + u8 upstream_dsc; u8 endpoint_fec; u8 endpoint_dsc; @@ -6072,16 +6109,14 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port) /* DP-to-DP peer device */ if (drm_dp_mst_is_virtual_dpcd(immediate_upstream_port)) { - u8 upstream_dsc; - - if (drm_dp_dpcd_read(&port->aux, - DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1) + if (drm_dp_dpcd_read_data(&port->aux, + DP_DSC_SUPPORT, &endpoint_dsc, 1) < 0) return NULL; - if (drm_dp_dpcd_read(&port->aux, - DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1) + if (drm_dp_dpcd_read_data(&port->aux, + DP_FEC_CAPABILITY, &endpoint_fec, 1) < 0) return NULL; - if (drm_dp_dpcd_read(&immediate_upstream_port->aux, - DP_DSC_SUPPORT, &upstream_dsc, 1) != 1) + if (drm_dp_dpcd_read_data(&immediate_upstream_port->aux, + DP_DSC_SUPPORT, &upstream_dsc, 1) < 0) return NULL; /* Enpoint decompression with DP-to-DP peer device */ @@ -6119,6 +6154,13 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port) if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD)) { u8 dpcd_ext[DP_RECEIVER_CAP_SIZE]; + if (drm_dp_dpcd_read_data(immediate_upstream_aux, + DP_DSC_SUPPORT, &upstream_dsc, 1) < 0) + return NULL; + + if (!(upstream_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED)) + return NULL; + if (drm_dp_read_dpcd_caps(immediate_upstream_aux, dpcd_ext) < 0) return NULL; @@ -6135,11 +6177,11 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port) * therefore the endpoint needs to be * both DSC and FEC capable. */ - if (drm_dp_dpcd_read(&port->aux, - DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1) + if (drm_dp_dpcd_read_data(&port->aux, + DP_DSC_SUPPORT, &endpoint_dsc, 1) < 0) return NULL; - if (drm_dp_dpcd_read(&port->aux, - DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1) + if (drm_dp_dpcd_read_data(&port->aux, + DP_FEC_CAPABILITY, &endpoint_fec, 1) < 0) return NULL; if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) && (endpoint_fec & DP_FEC_CAPABLE)) |
