diff options
Diffstat (limited to 'drivers/thunderbolt/tunnel.c')
| -rw-r--r-- | drivers/thunderbolt/tunnel.c | 981 |
1 files changed, 623 insertions, 358 deletions
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c index a6810fb36860..9fa95c595ecc 100644 --- a/drivers/thunderbolt/tunnel.c +++ b/drivers/thunderbolt/tunnel.c @@ -21,12 +21,18 @@ #define TB_PCI_PATH_DOWN 0 #define TB_PCI_PATH_UP 1 +#define TB_PCI_PRIORITY 3 +#define TB_PCI_WEIGHT 1 + /* USB3 adapters use always HopID of 8 for both directions */ #define TB_USB3_HOPID 8 #define TB_USB3_PATH_DOWN 0 #define TB_USB3_PATH_UP 1 +#define TB_USB3_PRIORITY 3 +#define TB_USB3_WEIGHT 2 + /* DP adapters use HopID 8 for AUX and 9 for Video */ #define TB_DP_AUX_TX_HOPID 8 #define TB_DP_AUX_RX_HOPID 8 @@ -36,6 +42,12 @@ #define TB_DP_AUX_PATH_OUT 1 #define TB_DP_AUX_PATH_IN 2 +#define TB_DP_VIDEO_PRIORITY 1 +#define TB_DP_VIDEO_WEIGHT 1 + +#define TB_DP_AUX_PRIORITY 2 +#define TB_DP_AUX_WEIGHT 1 + /* Minimum number of credits needed for PCIe path */ #define TB_MIN_PCIE_CREDITS 6U /* @@ -46,6 +58,36 @@ /* Minimum number of credits for DMA path */ #define TB_MIN_DMA_CREDITS 1 +#define TB_DMA_PRIORITY 5 +#define TB_DMA_WEIGHT 1 + +/* + * Reserve additional bandwidth for USB 3.x and PCIe bulk traffic + * according to USB4 v2 Connection Manager guide. This ends up reserving + * 1500 Mb/s for PCIe and 3000 Mb/s for USB 3.x taking weights into + * account. + */ +#define USB4_V2_PCI_MIN_BANDWIDTH (1500 * TB_PCI_WEIGHT) +#define USB4_V2_USB3_MIN_BANDWIDTH (1500 * TB_USB3_WEIGHT) + +/* + * According to VESA spec, the DPRX negotiation shall compete in 5 + * seconds after tunnel is established. Since at least i915 can runtime + * suspend if there is nothing connected, and that it polls any new + * connections every 10 seconds, we use 12 seconds here. + * + * These are in ms. + */ +#define TB_DPRX_TIMEOUT 12000 +#define TB_DPRX_WAIT_TIMEOUT 25 +#define TB_DPRX_POLL_DELAY 50 + +static int dprx_timeout = TB_DPRX_TIMEOUT; +module_param(dprx_timeout, int, 0444); +MODULE_PARM_DESC(dprx_timeout, + "DPRX capability read timeout in ms, -1 waits forever (default: " + __MODULE_STRING(TB_DPRX_TIMEOUT) ")"); + static unsigned int dma_credits = TB_DMA_CREDITS; module_param(dma_credits, uint, 0444); MODULE_PARM_DESC(dma_credits, "specify custom credits for DMA tunnels (default: " @@ -58,26 +100,16 @@ MODULE_PARM_DESC(bw_alloc_mode, static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" }; -#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \ - do { \ - struct tb_tunnel *__tunnel = (tunnel); \ - level(__tunnel->tb, "%llx:%u <-> %llx:%u (%s): " fmt, \ - tb_route(__tunnel->src_port->sw), \ - __tunnel->src_port->port, \ - tb_route(__tunnel->dst_port->sw), \ - __tunnel->dst_port->port, \ - tb_tunnel_names[__tunnel->type], \ - ## arg); \ - } while (0) - -#define tb_tunnel_WARN(tunnel, fmt, arg...) \ - __TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg) -#define tb_tunnel_warn(tunnel, fmt, arg...) \ - __TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg) -#define tb_tunnel_info(tunnel, fmt, arg...) \ - __TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg) -#define tb_tunnel_dbg(tunnel, fmt, arg...) \ - __TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg) +static const char * const tb_event_names[] = { + [TB_TUNNEL_ACTIVATED] = "activated", + [TB_TUNNEL_CHANGED] = "changed", + [TB_TUNNEL_DEACTIVATED] = "deactivated", + [TB_TUNNEL_LOW_BANDWIDTH] = "low bandwidth", + [TB_TUNNEL_NO_BANDWIDTH] = "insufficient bandwidth", +}; + +/* Synchronizes kref_get()/put() of struct tb_tunnel */ +static DEFINE_MUTEX(tb_tunnel_lock); static inline unsigned int tb_usable_credits(const struct tb_port *port) { @@ -89,6 +121,8 @@ static inline unsigned int tb_usable_credits(const struct tb_port *port) * @port: Lane adapter to check * @max_dp_streams: If non-%NULL stores maximum number of simultaneous DP * streams possible through this lane adapter + * + * Return: Number of available credits. */ static unsigned int tb_available_credits(const struct tb_port *port, size_t *max_dp_streams) @@ -131,6 +165,16 @@ static unsigned int tb_available_credits(const struct tb_port *port, return credits > 0 ? credits : 0; } +static void tb_init_pm_support(struct tb_path_hop *hop) +{ + struct tb_port *out_port = hop->out_port; + struct tb_port *in_port = hop->in_port; + + if (tb_port_is_null(in_port) && tb_port_is_null(out_port) && + usb4_switch_version(in_port->sw) >= 2) + hop->pm_support = true; +} + static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths, enum tb_tunnel_type type) { @@ -142,7 +186,7 @@ static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths, tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL); if (!tunnel->paths) { - tb_tunnel_free(tunnel); + kfree(tunnel); return NULL; } @@ -150,26 +194,136 @@ static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths, tunnel->tb = tb; tunnel->npaths = npaths; tunnel->type = type; + kref_init(&tunnel->kref); return tunnel; } +static void tb_tunnel_get(struct tb_tunnel *tunnel) +{ + mutex_lock(&tb_tunnel_lock); + kref_get(&tunnel->kref); + mutex_unlock(&tb_tunnel_lock); +} + +static void tb_tunnel_destroy(struct kref *kref) +{ + struct tb_tunnel *tunnel = container_of(kref, typeof(*tunnel), kref); + int i; + + if (tunnel->destroy) + tunnel->destroy(tunnel); + + for (i = 0; i < tunnel->npaths; i++) { + if (tunnel->paths[i]) + tb_path_free(tunnel->paths[i]); + } + + kfree(tunnel->paths); + kfree(tunnel); +} + +void tb_tunnel_put(struct tb_tunnel *tunnel) +{ + mutex_lock(&tb_tunnel_lock); + kref_put(&tunnel->kref, tb_tunnel_destroy); + mutex_unlock(&tb_tunnel_lock); +} + +/** + * tb_tunnel_event() - Notify userspace about tunneling event + * @tb: Domain where the event occurred + * @event: Event that happened + * @type: Type of the tunnel in question + * @src_port: Tunnel source port (can be %NULL) + * @dst_port: Tunnel destination port (can be %NULL) + * + * Notifies userspace about tunneling @event in the domain. The tunnel + * does not need to exist (e.g the tunnel was not activated because + * there is not enough bandwidth). If the @src_port and @dst_port are + * given fill in full %TUNNEL_DETAILS environment variable. Otherwise + * uses the shorter one (just the tunnel type). + */ +void tb_tunnel_event(struct tb *tb, enum tb_tunnel_event event, + enum tb_tunnel_type type, + const struct tb_port *src_port, + const struct tb_port *dst_port) +{ + char *envp[3] = { NULL }; + + if (WARN_ON_ONCE(event >= ARRAY_SIZE(tb_event_names))) + return; + if (WARN_ON_ONCE(type >= ARRAY_SIZE(tb_tunnel_names))) + return; + + envp[0] = kasprintf(GFP_KERNEL, "TUNNEL_EVENT=%s", tb_event_names[event]); + if (!envp[0]) + return; + + if (src_port != NULL && dst_port != NULL) { + envp[1] = kasprintf(GFP_KERNEL, "TUNNEL_DETAILS=%llx:%u <-> %llx:%u (%s)", + tb_route(src_port->sw), src_port->port, + tb_route(dst_port->sw), dst_port->port, + tb_tunnel_names[type]); + } else { + envp[1] = kasprintf(GFP_KERNEL, "TUNNEL_DETAILS=(%s)", + tb_tunnel_names[type]); + } + + if (envp[1]) + tb_domain_event(tb, envp); + + kfree(envp[1]); + kfree(envp[0]); +} + +static inline void tb_tunnel_set_active(struct tb_tunnel *tunnel, bool active) +{ + if (active) { + tunnel->state = TB_TUNNEL_ACTIVE; + tb_tunnel_event(tunnel->tb, TB_TUNNEL_ACTIVATED, tunnel->type, + tunnel->src_port, tunnel->dst_port); + } else { + tunnel->state = TB_TUNNEL_INACTIVE; + tb_tunnel_event(tunnel->tb, TB_TUNNEL_DEACTIVATED, tunnel->type, + tunnel->src_port, tunnel->dst_port); + } +} + +static inline void tb_tunnel_changed(struct tb_tunnel *tunnel) +{ + tb_tunnel_event(tunnel->tb, TB_TUNNEL_CHANGED, tunnel->type, + tunnel->src_port, tunnel->dst_port); +} + static int tb_pci_set_ext_encapsulation(struct tb_tunnel *tunnel, bool enable) { + struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw); int ret; - /* Only supported of both routers are at least USB4 v2 */ - if (usb4_switch_version(tunnel->src_port->sw) < 2 || - usb4_switch_version(tunnel->dst_port->sw) < 2) + /* Only supported if both routers are at least USB4 v2 */ + if ((usb4_switch_version(tunnel->src_port->sw) < 2) || + (usb4_switch_version(tunnel->dst_port->sw) < 2)) + return 0; + + if (enable && tb_port_get_link_generation(port) < 4) return 0; ret = usb4_pci_port_set_ext_encapsulation(tunnel->src_port, enable); if (ret) return ret; + /* + * Downstream router could be unplugged so disable of encapsulation + * in upstream router is still possible. + */ ret = usb4_pci_port_set_ext_encapsulation(tunnel->dst_port, enable); - if (ret) - return ret; + if (ret) { + if (enable) + return ret; + if (ret != -ENODEV) + return ret; + } tb_tunnel_dbg(tunnel, "extended encapsulation %s\n", str_enabled_disabled(enable)); @@ -186,14 +340,21 @@ static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate) return res; } - res = tb_pci_port_enable(tunnel->src_port, activate); + if (activate) + res = tb_pci_port_enable(tunnel->dst_port, activate); + else + res = tb_pci_port_enable(tunnel->src_port, activate); if (res) return res; - if (tb_port_is_pcie_up(tunnel->dst_port)) { - res = tb_pci_port_enable(tunnel->dst_port, activate); + + if (activate) { + res = tb_pci_port_enable(tunnel->src_port, activate); if (res) return res; + } else { + /* Downstream router could be unplugged */ + tb_pci_port_enable(tunnel->dst_port, activate); } return activate ? 0 : tb_pci_set_ext_encapsulation(tunnel, activate); @@ -234,8 +395,8 @@ static int tb_pci_init_path(struct tb_path *path) path->egress_shared_buffer = TB_PATH_NONE; path->ingress_fc_enable = TB_PATH_ALL; path->ingress_shared_buffer = TB_PATH_NONE; - path->priority = 3; - path->weight = 1; + path->priority = TB_PCI_PRIORITY; + path->weight = TB_PCI_WEIGHT; path->drop_packages = 0; tb_path_for_each_hop(path, hop) { @@ -256,8 +417,9 @@ static int tb_pci_init_path(struct tb_path *path) * @alloc_hopid: Allocate HopIDs from visited ports * * If @down adapter is active, follows the tunnel to the PCIe upstream - * adapter and back. Returns the discovered tunnel or %NULL if there was - * no tunnel. + * adapter and back. + * + * Return: Pointer to &struct tb_tunnel or %NULL if there was no tunnel. */ struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down, bool alloc_hopid) @@ -323,7 +485,7 @@ struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down, err_deactivate: tb_tunnel_deactivate(tunnel); err_free: - tb_tunnel_free(tunnel); + tb_tunnel_put(tunnel); return NULL; } @@ -337,7 +499,7 @@ err_free: * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and * TB_TYPE_PCIE_DOWN. * - * Return: Returns a tb_tunnel on success or NULL on failure. + * Return: Pointer to @struct tb_tunnel or %NULL on failure. */ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up, struct tb_port *down) @@ -372,10 +534,58 @@ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up, return tunnel; err_free: - tb_tunnel_free(tunnel); + tb_tunnel_put(tunnel); return NULL; } +/** + * tb_tunnel_reserved_pci() - Amount of bandwidth to reserve for PCIe + * @port: Lane 0 adapter + * @reserved_up: Upstream bandwidth in Mb/s to reserve + * @reserved_down: Downstream bandwidth in Mb/s to reserve + * + * Can be called to any connected lane 0 adapter to find out how much + * bandwidth needs to be left in reserve for possible PCIe bulk traffic. + * + * Return: + * * %true - If there is something to be reserved. Writes the amount to + * @reserved_down/@reserved_up. + * * %false - Nothing to be reserved. Leaves @reserved_down/@reserved_up + * unmodified. + */ +bool tb_tunnel_reserved_pci(struct tb_port *port, int *reserved_up, + int *reserved_down) +{ + if (WARN_ON_ONCE(!port->remote)) + return false; + + if (!tb_acpi_may_tunnel_pcie()) + return false; + + if (tb_port_get_link_generation(port) < 4) + return false; + + /* Must have PCIe adapters */ + if (tb_is_upstream_port(port)) { + if (!tb_switch_find_port(port->sw, TB_TYPE_PCIE_UP)) + return false; + if (!tb_switch_find_port(port->remote->sw, TB_TYPE_PCIE_DOWN)) + return false; + } else { + if (!tb_switch_find_port(port->sw, TB_TYPE_PCIE_DOWN)) + return false; + if (!tb_switch_find_port(port->remote->sw, TB_TYPE_PCIE_UP)) + return false; + } + + *reserved_up = USB4_V2_PCI_MIN_BANDWIDTH; + *reserved_down = USB4_V2_PCI_MIN_BANDWIDTH; + + tb_port_dbg(port, "reserving %u/%u Mb/s for PCIe\n", *reserved_up, + *reserved_down); + return true; +} + static bool tb_dp_is_usb4(const struct tb_switch *sw) { /* Titan Ridge DP adapters need the same treatment as USB4 */ @@ -614,8 +824,9 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel) in_rate = tb_dp_cap_get_rate(in_dp_cap); in_lanes = tb_dp_cap_get_lanes(in_dp_cap); - tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n", - in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes)); + tb_tunnel_dbg(tunnel, + "DP IN maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n", + in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes)); /* * If the tunnel bandwidth is limited (max_bw is set) then see @@ -624,10 +835,11 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel) out_rate = tb_dp_cap_get_rate(out_dp_cap); out_lanes = tb_dp_cap_get_lanes(out_dp_cap); bw = tb_dp_bandwidth(out_rate, out_lanes); - tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n", - out_rate, out_lanes, bw); + tb_tunnel_dbg(tunnel, + "DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n", + out_rate, out_lanes, bw); - if (in->sw->config.depth < out->sw->config.depth) + if (tb_tunnel_direction_downstream(tunnel)) max_bw = tunnel->max_down; else max_bw = tunnel->max_up; @@ -639,13 +851,14 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel) out_rate, out_lanes, &new_rate, &new_lanes); if (ret) { - tb_port_info(out, "not enough bandwidth for DP tunnel\n"); + tb_tunnel_info(tunnel, "not enough bandwidth\n"); return ret; } new_bw = tb_dp_bandwidth(new_rate, new_lanes); - tb_port_dbg(out, "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n", - new_rate, new_lanes, new_bw); + tb_tunnel_dbg(tunnel, + "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n", + new_rate, new_lanes, new_bw); /* * Set new rate and number of lanes before writing it to @@ -662,7 +875,7 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel) */ if (tb_route(out->sw) && tb_switch_is_titan_ridge(out->sw)) { out_dp_cap |= DP_COMMON_CAP_LTTPR_NS; - tb_port_dbg(out, "disabling LTTPR\n"); + tb_tunnel_dbg(tunnel, "disabling LTTPR\n"); } return tb_port_write(in, &out_dp_cap, TB_CFG_PORT, @@ -712,8 +925,8 @@ static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel) lanes = min(in_lanes, out_lanes); tmp = tb_dp_bandwidth(rate, lanes); - tb_port_dbg(in, "non-reduced bandwidth %u Mb/s x%u = %u Mb/s\n", rate, - lanes, tmp); + tb_tunnel_dbg(tunnel, "non-reduced bandwidth %u Mb/s x%u = %u Mb/s\n", + rate, lanes, tmp); ret = usb4_dp_port_set_nrd(in, rate, lanes); if (ret) @@ -728,15 +941,15 @@ static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel) rate = min(in_rate, out_rate); tmp = tb_dp_bandwidth(rate, lanes); - tb_port_dbg(in, - "maximum bandwidth through allocation mode %u Mb/s x%u = %u Mb/s\n", - rate, lanes, tmp); + tb_tunnel_dbg(tunnel, + "maximum bandwidth through allocation mode %u Mb/s x%u = %u Mb/s\n", + rate, lanes, tmp); for (granularity = 250; tmp / granularity > 255 && granularity <= 1000; granularity *= 2) ; - tb_port_dbg(in, "granularity %d Mb/s\n", granularity); + tb_tunnel_dbg(tunnel, "granularity %d Mb/s\n", granularity); /* * Returns -EINVAL if granularity above is outside of the @@ -751,12 +964,12 @@ static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel) * max_up/down fields. For discovery we just read what the * estimation was set to. */ - if (in->sw->config.depth < out->sw->config.depth) + if (tb_tunnel_direction_downstream(tunnel)) estimated_bw = tunnel->max_down; else estimated_bw = tunnel->max_up; - tb_port_dbg(in, "estimated bandwidth %d Mb/s\n", estimated_bw); + tb_tunnel_dbg(tunnel, "estimated bandwidth %d Mb/s\n", estimated_bw); ret = usb4_dp_port_set_estimated_bandwidth(in, estimated_bw); if (ret) @@ -767,11 +980,11 @@ static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel) if (ret) return ret; - tb_port_dbg(in, "bandwidth allocation mode enabled\n"); + tb_tunnel_dbg(tunnel, "bandwidth allocation mode enabled\n"); return 0; } -static int tb_dp_init(struct tb_tunnel *tunnel) +static int tb_dp_pre_activate(struct tb_tunnel *tunnel) { struct tb_port *in = tunnel->src_port; struct tb_switch *sw = in->sw; @@ -788,7 +1001,7 @@ static int tb_dp_init(struct tb_tunnel *tunnel) if (!usb4_dp_port_bandwidth_mode_supported(in)) return 0; - tb_port_dbg(in, "bandwidth allocation mode supported\n"); + tb_tunnel_dbg(tunnel, "bandwidth allocation mode supported\n"); ret = usb4_dp_port_set_cm_id(in, tb->index); if (ret) @@ -797,7 +1010,7 @@ static int tb_dp_init(struct tb_tunnel *tunnel) return tb_dp_bandwidth_alloc_mode_enable(tunnel); } -static void tb_dp_deinit(struct tb_tunnel *tunnel) +static void tb_dp_post_deactivate(struct tb_tunnel *tunnel) { struct tb_port *in = tunnel->src_port; @@ -805,7 +1018,97 @@ static void tb_dp_deinit(struct tb_tunnel *tunnel) return; if (usb4_dp_port_bandwidth_mode_enabled(in)) { usb4_dp_port_set_cm_bandwidth_mode_supported(in, false); - tb_port_dbg(in, "bandwidth allocation mode disabled\n"); + tb_tunnel_dbg(tunnel, "bandwidth allocation mode disabled\n"); + } +} + +static ktime_t dprx_timeout_to_ktime(int timeout_msec) +{ + return timeout_msec >= 0 ? + ktime_add_ms(ktime_get(), timeout_msec) : KTIME_MAX; +} + +static int tb_dp_wait_dprx(struct tb_tunnel *tunnel, int timeout_msec) +{ + ktime_t timeout = dprx_timeout_to_ktime(timeout_msec); + struct tb_port *in = tunnel->src_port; + + /* + * Wait for DPRX done. Normally it should be already set for + * active tunnel. + */ + do { + u32 val; + int ret; + + ret = tb_port_read(in, &val, TB_CFG_PORT, + in->cap_adap + DP_COMMON_CAP, 1); + if (ret) + return ret; + + if (val & DP_COMMON_CAP_DPRX_DONE) + return 0; + + usleep_range(100, 150); + } while (ktime_before(ktime_get(), timeout)); + + tb_tunnel_dbg(tunnel, "DPRX read timeout\n"); + return -ETIMEDOUT; +} + +static void tb_dp_dprx_work(struct work_struct *work) +{ + struct tb_tunnel *tunnel = container_of(work, typeof(*tunnel), dprx_work.work); + struct tb *tb = tunnel->tb; + + if (!tunnel->dprx_canceled) { + mutex_lock(&tb->lock); + if (tb_dp_is_usb4(tunnel->src_port->sw) && + tb_dp_wait_dprx(tunnel, TB_DPRX_WAIT_TIMEOUT)) { + if (ktime_before(ktime_get(), tunnel->dprx_timeout)) { + queue_delayed_work(tb->wq, &tunnel->dprx_work, + msecs_to_jiffies(TB_DPRX_POLL_DELAY)); + mutex_unlock(&tb->lock); + return; + } + } else { + tb_tunnel_set_active(tunnel, true); + } + mutex_unlock(&tb->lock); + } + + if (tunnel->callback) + tunnel->callback(tunnel, tunnel->callback_data); + tb_tunnel_put(tunnel); +} + +static int tb_dp_dprx_start(struct tb_tunnel *tunnel) +{ + /* + * Bump up the reference to keep the tunnel around. It will be + * dropped in tb_dp_dprx_stop() once the tunnel is deactivated. + */ + tb_tunnel_get(tunnel); + + tunnel->dprx_started = true; + + if (tunnel->callback) { + tunnel->dprx_timeout = dprx_timeout_to_ktime(dprx_timeout); + queue_delayed_work(tunnel->tb->wq, &tunnel->dprx_work, 0); + return -EINPROGRESS; + } + + return tb_dp_is_usb4(tunnel->src_port->sw) ? + tb_dp_wait_dprx(tunnel, dprx_timeout) : 0; +} + +static void tb_dp_dprx_stop(struct tb_tunnel *tunnel) +{ + if (tunnel->dprx_started) { + tunnel->dprx_started = false; + tunnel->dprx_canceled = true; + if (cancel_delayed_work(&tunnel->dprx_work)) + tb_tunnel_put(tunnel); } } @@ -830,6 +1133,7 @@ static int tb_dp_activate(struct tb_tunnel *tunnel, bool active) paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index, paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index); } else { + tb_dp_dprx_stop(tunnel); tb_dp_port_hpd_clear(tunnel->src_port); tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0); if (tb_port_is_dpout(tunnel->dst_port)) @@ -840,24 +1144,34 @@ static int tb_dp_activate(struct tb_tunnel *tunnel, bool active) if (ret) return ret; - if (tb_port_is_dpout(tunnel->dst_port)) - return tb_dp_port_enable(tunnel->dst_port, active); + if (tb_port_is_dpout(tunnel->dst_port)) { + ret = tb_dp_port_enable(tunnel->dst_port, active); + if (ret) + return ret; + } - return 0; + return active ? tb_dp_dprx_start(tunnel) : 0; } -/* max_bw is rounded up to next granularity */ +/** + * tb_dp_bandwidth_mode_maximum_bandwidth() - Maximum possible bandwidth + * @tunnel: DP tunnel to check + * @max_bw_rounded: Maximum bandwidth in Mb/s rounded up to the next granularity + * + * Return: Maximum possible bandwidth for this tunnel in Mb/s, negative errno + * in case of failure. + */ static int tb_dp_bandwidth_mode_maximum_bandwidth(struct tb_tunnel *tunnel, - int *max_bw) + int *max_bw_rounded) { struct tb_port *in = tunnel->src_port; - int ret, rate, lanes, nrd_bw; + int ret, rate, lanes, max_bw; u32 cap; /* * DP IN adapter DP_LOCAL_CAP gets updated to the lowest AUX - * read parameter values so this so we can use this to determine - * the maximum possible bandwidth over this link. + * read parameter values so we can use this to determine the + * maximum possible bandwidth over this link. * * See USB4 v2 spec 1.0 10.4.4.5. */ @@ -867,41 +1181,26 @@ static int tb_dp_bandwidth_mode_maximum_bandwidth(struct tb_tunnel *tunnel, return ret; rate = tb_dp_cap_get_rate_ext(cap); - if (tb_dp_is_uhbr_rate(rate)) { - /* - * When UHBR is used there is no reduction in lanes so - * we can use this directly. - */ - lanes = tb_dp_cap_get_lanes(cap); - } else { - /* - * If there is no UHBR supported then check the - * non-reduced rate and lanes. - */ - ret = usb4_dp_port_nrd(in, &rate, &lanes); - if (ret) - return ret; - } + lanes = tb_dp_cap_get_lanes(cap); - nrd_bw = tb_dp_bandwidth(rate, lanes); + max_bw = tb_dp_bandwidth(rate, lanes); - if (max_bw) { + if (max_bw_rounded) { ret = usb4_dp_port_granularity(in); if (ret < 0) return ret; - *max_bw = roundup(nrd_bw, ret); + *max_bw_rounded = roundup(max_bw, ret); } - return nrd_bw; + return max_bw; } static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up, int *consumed_down) { - struct tb_port *out = tunnel->dst_port; struct tb_port *in = tunnel->src_port; - int ret, allocated_bw, max_bw; + int ret, allocated_bw, max_bw_rounded; if (!usb4_dp_port_bandwidth_mode_enabled(in)) return -EOPNOTSUPP; @@ -915,16 +1214,13 @@ static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel, return ret; allocated_bw = ret; - ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw); + ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw_rounded); if (ret < 0) return ret; - if (allocated_bw == max_bw) + if (allocated_bw == max_bw_rounded) allocated_bw = ret; - tb_port_dbg(in, "consumed bandwidth through allocation mode %d Mb/s\n", - allocated_bw); - - if (in->sw->config.depth < out->sw->config.depth) { + if (tb_tunnel_direction_downstream(tunnel)) { *consumed_up = 0; *consumed_down = allocated_bw; } else { @@ -938,7 +1234,6 @@ static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel, static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up, int *allocated_down) { - struct tb_port *out = tunnel->dst_port; struct tb_port *in = tunnel->src_port; /* @@ -946,20 +1241,21 @@ static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up * Otherwise we read it from the DPRX. */ if (usb4_dp_port_bandwidth_mode_enabled(in) && tunnel->bw_mode) { - int ret, allocated_bw, max_bw; + int ret, allocated_bw, max_bw_rounded; ret = usb4_dp_port_allocated_bandwidth(in); if (ret < 0) return ret; allocated_bw = ret; - ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw); + ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, + &max_bw_rounded); if (ret < 0) return ret; - if (allocated_bw == max_bw) + if (allocated_bw == max_bw_rounded) allocated_bw = ret; - if (in->sw->config.depth < out->sw->config.depth) { + if (tb_tunnel_direction_downstream(tunnel)) { *allocated_up = 0; *allocated_down = allocated_bw; } else { @@ -976,26 +1272,25 @@ static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up, int *alloc_down) { - struct tb_port *out = tunnel->dst_port; struct tb_port *in = tunnel->src_port; - int max_bw, ret, tmp; + int max_bw_rounded, ret, tmp; if (!usb4_dp_port_bandwidth_mode_enabled(in)) return -EOPNOTSUPP; - ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw); + ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw_rounded); if (ret < 0) return ret; - if (in->sw->config.depth < out->sw->config.depth) { - tmp = min(*alloc_down, max_bw); + if (tb_tunnel_direction_downstream(tunnel)) { + tmp = min(*alloc_down, max_bw_rounded); ret = usb4_dp_port_allocate_bandwidth(in, tmp); if (ret) return ret; *alloc_down = tmp; *alloc_up = 0; } else { - tmp = min(*alloc_up, max_bw); + tmp = min(*alloc_up, max_bw_rounded); ret = usb4_dp_port_allocate_bandwidth(in, tmp); if (ret) return ret; @@ -1006,45 +1301,9 @@ static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up, /* Now we can use BW mode registers to figure out the bandwidth */ /* TODO: need to handle discovery too */ tunnel->bw_mode = true; - - tb_port_dbg(in, "allocated bandwidth through allocation mode %d Mb/s\n", - tmp); return 0; } -static int tb_dp_read_dprx(struct tb_tunnel *tunnel, u32 *rate, u32 *lanes, - int timeout_msec) -{ - ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec); - struct tb_port *in = tunnel->src_port; - - /* - * Wait for DPRX done. Normally it should be already set for - * active tunnel. - */ - do { - u32 val; - int ret; - - ret = tb_port_read(in, &val, TB_CFG_PORT, - in->cap_adap + DP_COMMON_CAP, 1); - if (ret) - return ret; - - if (val & DP_COMMON_CAP_DPRX_DONE) { - *rate = tb_dp_cap_get_rate(val); - *lanes = tb_dp_cap_get_lanes(val); - - tb_port_dbg(in, "consumed bandwidth through DPRX %d Mb/s\n", - tb_dp_bandwidth(*rate, *lanes)); - return 0; - } - usleep_range(100, 150); - } while (ktime_before(ktime_get(), timeout)); - - return -ETIMEDOUT; -} - /* Read cap from tunnel DP IN */ static int tb_dp_read_cap(struct tb_tunnel *tunnel, unsigned int cap, u32 *rate, u32 *lanes) @@ -1056,6 +1315,7 @@ static int tb_dp_read_cap(struct tb_tunnel *tunnel, unsigned int cap, u32 *rate, switch (cap) { case DP_LOCAL_CAP: case DP_REMOTE_CAP: + case DP_COMMON_CAP: break; default: @@ -1073,26 +1333,22 @@ static int tb_dp_read_cap(struct tb_tunnel *tunnel, unsigned int cap, u32 *rate, *rate = tb_dp_cap_get_rate(val); *lanes = tb_dp_cap_get_lanes(val); - - tb_port_dbg(in, "bandwidth from %#x capability %d Mb/s\n", cap, - tb_dp_bandwidth(*rate, *lanes)); return 0; } static int tb_dp_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up, int *max_down) { - struct tb_port *in = tunnel->src_port; int ret; - if (!usb4_dp_port_bandwidth_mode_enabled(in)) + if (!usb4_dp_port_bandwidth_mode_enabled(tunnel->src_port)) return -EOPNOTSUPP; ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, NULL); if (ret < 0) return ret; - if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) { + if (tb_tunnel_direction_downstream(tunnel)) { *max_up = 0; *max_down = ret; } else { @@ -1106,36 +1362,41 @@ static int tb_dp_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up, static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up, int *consumed_down) { - struct tb_port *in = tunnel->src_port; - const struct tb_switch *sw = in->sw; + const struct tb_switch *sw = tunnel->src_port->sw; u32 rate = 0, lanes = 0; int ret; if (tb_dp_is_usb4(sw)) { - /* - * On USB4 routers check if the bandwidth allocation - * mode is enabled first and then read the bandwidth - * through those registers. - */ - ret = tb_dp_bandwidth_mode_consumed_bandwidth(tunnel, consumed_up, - consumed_down); - if (ret < 0) { - if (ret != -EOPNOTSUPP) - return ret; - } else if (!ret) { - return 0; - } - /* - * Then see if the DPRX negotiation is ready and if yes - * return that bandwidth (it may be smaller than the - * reduced one). Otherwise return the remote (possibly - * reduced) caps. - */ - ret = tb_dp_read_dprx(tunnel, &rate, &lanes, 150); + ret = tb_dp_wait_dprx(tunnel, 0); if (ret) { - if (ret == -ETIMEDOUT) + if (ret == -ETIMEDOUT) { + /* + * While we wait for DPRX complete the + * tunnel consumes as much as it had + * been reserved initially. + */ ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP, &rate, &lanes); + if (ret) + return ret; + } else { + return ret; + } + } else { + /* + * On USB4 routers check if the bandwidth allocation + * mode is enabled first and then read the bandwidth + * through those registers. + */ + ret = tb_dp_bandwidth_mode_consumed_bandwidth(tunnel, consumed_up, + consumed_down); + if (ret < 0) { + if (ret != -EOPNOTSUPP) + return ret; + } else if (!ret) { + return 0; + } + ret = tb_dp_read_cap(tunnel, DP_COMMON_CAP, &rate, &lanes); if (ret) return ret; } @@ -1150,7 +1411,7 @@ static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up, return 0; } - if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) { + if (tb_tunnel_direction_downstream(tunnel)) { *consumed_up = 0; *consumed_down = tb_dp_bandwidth(rate, lanes); } else { @@ -1172,7 +1433,7 @@ static void tb_dp_init_aux_credits(struct tb_path_hop *hop) hop->initial_credits = 1; } -static void tb_dp_init_aux_path(struct tb_path *path) +static void tb_dp_init_aux_path(struct tb_path *path, bool pm_support) { struct tb_path_hop *hop; @@ -1180,11 +1441,14 @@ static void tb_dp_init_aux_path(struct tb_path *path) path->egress_shared_buffer = TB_PATH_NONE; path->ingress_fc_enable = TB_PATH_ALL; path->ingress_shared_buffer = TB_PATH_NONE; - path->priority = 2; - path->weight = 1; + path->priority = TB_DP_AUX_PRIORITY; + path->weight = TB_DP_AUX_WEIGHT; - tb_path_for_each_hop(path, hop) + tb_path_for_each_hop(path, hop) { tb_dp_init_aux_credits(hop); + if (pm_support) + tb_init_pm_support(hop); + } } static int tb_dp_init_video_credits(struct tb_path_hop *hop) @@ -1216,7 +1480,7 @@ static int tb_dp_init_video_credits(struct tb_path_hop *hop) return 0; } -static int tb_dp_init_video_path(struct tb_path *path) +static int tb_dp_init_video_path(struct tb_path *path, bool pm_support) { struct tb_path_hop *hop; @@ -1224,8 +1488,8 @@ static int tb_dp_init_video_path(struct tb_path *path) path->egress_shared_buffer = TB_PATH_NONE; path->ingress_fc_enable = TB_PATH_NONE; path->ingress_shared_buffer = TB_PATH_NONE; - path->priority = 1; - path->weight = 1; + path->priority = TB_DP_VIDEO_PRIORITY; + path->weight = TB_DP_VIDEO_WEIGHT; tb_path_for_each_hop(path, hop) { int ret; @@ -1233,6 +1497,8 @@ static int tb_dp_init_video_path(struct tb_path *path) ret = tb_dp_init_video_credits(hop); if (ret) return ret; + if (pm_support) + tb_init_pm_support(hop); } return 0; @@ -1253,10 +1519,9 @@ static void tb_dp_dump(struct tb_tunnel *tunnel) rate = tb_dp_cap_get_rate(dp_cap); lanes = tb_dp_cap_get_lanes(dp_cap); - tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n", - rate, lanes, tb_dp_bandwidth(rate, lanes)); - - out = tunnel->dst_port; + tb_tunnel_dbg(tunnel, + "DP IN maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n", + rate, lanes, tb_dp_bandwidth(rate, lanes)); if (tb_port_read(out, &dp_cap, TB_CFG_PORT, out->cap_adap + DP_LOCAL_CAP, 1)) @@ -1265,8 +1530,9 @@ static void tb_dp_dump(struct tb_tunnel *tunnel) rate = tb_dp_cap_get_rate(dp_cap); lanes = tb_dp_cap_get_lanes(dp_cap); - tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n", - rate, lanes, tb_dp_bandwidth(rate, lanes)); + tb_tunnel_dbg(tunnel, + "DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n", + rate, lanes, tb_dp_bandwidth(rate, lanes)); if (tb_port_read(in, &dp_cap, TB_CFG_PORT, in->cap_adap + DP_REMOTE_CAP, 1)) @@ -1275,8 +1541,8 @@ static void tb_dp_dump(struct tb_tunnel *tunnel) rate = tb_dp_cap_get_rate(dp_cap); lanes = tb_dp_cap_get_lanes(dp_cap); - tb_port_dbg(in, "reduced bandwidth %u Mb/s x%u = %u Mb/s\n", - rate, lanes, tb_dp_bandwidth(rate, lanes)); + tb_tunnel_dbg(tunnel, "reduced bandwidth %u Mb/s x%u = %u Mb/s\n", + rate, lanes, tb_dp_bandwidth(rate, lanes)); } /** @@ -1289,7 +1555,7 @@ static void tb_dp_dump(struct tb_tunnel *tunnel) * and back. Returns the discovered tunnel or %NULL if there was no * tunnel. * - * Return: DP tunnel or %NULL if no tunnel found. + * Return: Pointer to &struct tb_tunnel or %NULL if no tunnel found. */ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in, bool alloc_hopid) @@ -1305,9 +1571,9 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in, if (!tunnel) return NULL; - tunnel->init = tb_dp_init; - tunnel->deinit = tb_dp_deinit; + tunnel->pre_activate = tb_dp_pre_activate; tunnel->activate = tb_dp_activate; + tunnel->post_deactivate = tb_dp_post_deactivate; tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth; tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth; tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth; @@ -1322,7 +1588,7 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in, goto err_free; } tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path; - if (tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT])) + if (tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT], false)) goto err_free; path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX", @@ -1330,14 +1596,14 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in, if (!path) goto err_deactivate; tunnel->paths[TB_DP_AUX_PATH_OUT] = path; - tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT]); + tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT], false); path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID, &port, "AUX RX", alloc_hopid); if (!path) goto err_deactivate; tunnel->paths[TB_DP_AUX_PATH_IN] = path; - tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN]); + tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN], false); /* Validate that the tunnel is complete */ if (!tb_port_is_dpout(tunnel->dst_port)) { @@ -1364,7 +1630,7 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in, err_deactivate: tb_tunnel_deactivate(tunnel); err_free: - tb_tunnel_free(tunnel); + tb_tunnel_put(tunnel); return NULL; } @@ -1375,23 +1641,33 @@ err_free: * @in: DP in adapter port * @out: DP out adapter port * @link_nr: Preferred lane adapter when the link is not bonded - * @max_up: Maximum available upstream bandwidth for the DP tunnel (%0 - * if not limited) - * @max_down: Maximum available downstream bandwidth for the DP tunnel - * (%0 if not limited) + * @max_up: Maximum available upstream bandwidth for the DP tunnel. + * %0 if no available bandwidth. + * @max_down: Maximum available downstream bandwidth for the DP tunnel. + * %0 if no available bandwidth. + * @callback: Optional callback that is called when the DP tunnel is + * fully activated (or there is an error) + * @callback_data: Optional data for @callback * * Allocates a tunnel between @in and @out that is capable of tunneling - * Display Port traffic. + * Display Port traffic. If @callback is not %NULL it will be called + * after tb_tunnel_activate() once the tunnel has been fully activated. + * It can call tb_tunnel_is_active() to check if activation was + * successful (or if it returns %false there was some sort of issue). + * The @callback is called without @tb->lock held. * - * Return: Returns a tb_tunnel on success or NULL on failure. + * Return: Pointer to @struct tb_tunnel or %NULL in case of failure. */ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in, struct tb_port *out, int link_nr, - int max_up, int max_down) + int max_up, int max_down, + void (*callback)(struct tb_tunnel *, void *), + void *callback_data) { struct tb_tunnel *tunnel; struct tb_path **paths; struct tb_path *path; + bool pm_support; if (WARN_ON(!in->cap_adap || !out->cap_adap)) return NULL; @@ -1400,9 +1676,9 @@ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in, if (!tunnel) return NULL; - tunnel->init = tb_dp_init; - tunnel->deinit = tb_dp_deinit; + tunnel->pre_activate = tb_dp_pre_activate; tunnel->activate = tb_dp_activate; + tunnel->post_deactivate = tb_dp_post_deactivate; tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth; tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth; tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth; @@ -1411,34 +1687,38 @@ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in, tunnel->dst_port = out; tunnel->max_up = max_up; tunnel->max_down = max_down; + tunnel->callback = callback; + tunnel->callback_data = callback_data; + INIT_DELAYED_WORK(&tunnel->dprx_work, tb_dp_dprx_work); paths = tunnel->paths; + pm_support = usb4_switch_version(in->sw) >= 2; path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID, link_nr, "Video"); if (!path) goto err_free; - tb_dp_init_video_path(path); + tb_dp_init_video_path(path, pm_support); paths[TB_DP_VIDEO_PATH_OUT] = path; path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out, TB_DP_AUX_TX_HOPID, link_nr, "AUX TX"); if (!path) goto err_free; - tb_dp_init_aux_path(path); + tb_dp_init_aux_path(path, pm_support); paths[TB_DP_AUX_PATH_OUT] = path; path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in, TB_DP_AUX_RX_HOPID, link_nr, "AUX RX"); if (!path) goto err_free; - tb_dp_init_aux_path(path); + tb_dp_init_aux_path(path, pm_support); paths[TB_DP_AUX_PATH_IN] = path; return tunnel; err_free: - tb_tunnel_free(tunnel); + tb_tunnel_put(tunnel); return NULL; } @@ -1497,14 +1777,14 @@ static int tb_dma_init_rx_path(struct tb_path *path, unsigned int credits) path->ingress_fc_enable = TB_PATH_ALL; path->egress_shared_buffer = TB_PATH_NONE; path->ingress_shared_buffer = TB_PATH_NONE; - path->priority = 5; - path->weight = 1; + path->priority = TB_DMA_PRIORITY; + path->weight = TB_DMA_WEIGHT; path->clear_fc = true; /* * First lane adapter is the one connected to the remote host. - * We don't tunnel other traffic over this link so can use all - * the credits (except the ones reserved for control traffic). + * We don't tunnel other traffic over this link so we can use + * all the credits (except the ones reserved for control traffic). */ hop = &path->hops[0]; tmp = min(tb_usable_credits(hop->in_port), credits); @@ -1531,8 +1811,8 @@ static int tb_dma_init_tx_path(struct tb_path *path, unsigned int credits) path->ingress_fc_enable = TB_PATH_ALL; path->egress_shared_buffer = TB_PATH_NONE; path->ingress_shared_buffer = TB_PATH_NONE; - path->priority = 5; - path->weight = 1; + path->priority = TB_DMA_PRIORITY; + path->weight = TB_DMA_WEIGHT; path->clear_fc = true; tb_path_for_each_hop(path, hop) { @@ -1558,7 +1838,7 @@ static void tb_dma_release_credits(struct tb_path_hop *hop) } } -static void tb_dma_deinit_path(struct tb_path *path) +static void tb_dma_destroy_path(struct tb_path *path) { struct tb_path_hop *hop; @@ -1566,14 +1846,14 @@ static void tb_dma_deinit_path(struct tb_path *path) tb_dma_release_credits(hop); } -static void tb_dma_deinit(struct tb_tunnel *tunnel) +static void tb_dma_destroy(struct tb_tunnel *tunnel) { int i; for (i = 0; i < tunnel->npaths; i++) { if (!tunnel->paths[i]) continue; - tb_dma_deinit_path(tunnel->paths[i]); + tb_dma_destroy_path(tunnel->paths[i]); } } @@ -1589,7 +1869,7 @@ static void tb_dma_deinit(struct tb_tunnel *tunnel) * @receive_ring: NHI ring number used to receive packets from the * other domain. Set to %-1 if RX path is not needed. * - * Return: Returns a tb_tunnel on success or NULL on failure. + * Return: Pointer to @struct tb_tunnel or %NULL in case of failure. */ struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi, struct tb_port *dst, int transmit_path, @@ -1619,7 +1899,7 @@ struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi, tunnel->src_port = nhi; tunnel->dst_port = dst; - tunnel->deinit = tb_dma_deinit; + tunnel->destroy = tb_dma_destroy; credits = min_not_zero(dma_credits, nhi->sw->max_dma_credits); @@ -1650,7 +1930,7 @@ struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi, return tunnel; err_free: - tb_tunnel_free(tunnel); + tb_tunnel_put(tunnel); return NULL; } @@ -1666,7 +1946,8 @@ err_free: * * This function can be used to match specific DMA tunnel, if there are * multiple DMA tunnels going through the same XDomain connection. - * Returns true if there is match and false otherwise. + * + * Return: %true if there is a match, %false otherwise. */ bool tb_tunnel_match_dma(const struct tb_tunnel *tunnel, int transmit_path, int transmit_ring, int receive_path, int receive_ring) @@ -1731,7 +2012,7 @@ static int tb_usb3_max_link_rate(struct tb_port *up, struct tb_port *down) return min(up_max_rate, down_max_rate); } -static int tb_usb3_init(struct tb_tunnel *tunnel) +static int tb_usb3_pre_activate(struct tb_tunnel *tunnel) { tb_tunnel_dbg(tunnel, "allocating initial bandwidth %d/%d Mb/s\n", tunnel->allocated_up, tunnel->allocated_down); @@ -1758,14 +2039,23 @@ static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate) static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up, int *consumed_down) { - int pcie_enabled = tb_acpi_may_tunnel_pcie(); + struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw); + int pcie_weight = tb_acpi_may_tunnel_pcie() ? TB_PCI_WEIGHT : 0; /* * PCIe tunneling, if enabled, affects the USB3 bandwidth so - * take that it into account here. + * take that into account here. */ - *consumed_up = tunnel->allocated_up * (3 + pcie_enabled) / 3; - *consumed_down = tunnel->allocated_down * (3 + pcie_enabled) / 3; + *consumed_up = tunnel->allocated_up * + (TB_USB3_WEIGHT + pcie_weight) / TB_USB3_WEIGHT; + *consumed_down = tunnel->allocated_down * + (TB_USB3_WEIGHT + pcie_weight) / TB_USB3_WEIGHT; + + if (tb_port_get_link_generation(port) >= 4) { + *consumed_up = max(*consumed_up, USB4_V2_USB3_MIN_BANDWIDTH); + *consumed_down = max(*consumed_down, USB4_V2_USB3_MIN_BANDWIDTH); + } + return 0; } @@ -1790,17 +2080,10 @@ static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel, { int ret, max_rate, allocate_up, allocate_down; - ret = usb4_usb3_port_actual_link_rate(tunnel->src_port); + ret = tb_usb3_max_link_rate(tunnel->dst_port, tunnel->src_port); if (ret < 0) { - tb_tunnel_warn(tunnel, "failed to read actual link rate\n"); + tb_tunnel_warn(tunnel, "failed to read maximum link rate\n"); return; - } else if (!ret) { - /* Use maximum link rate if the link valid is not set */ - ret = tb_usb3_max_link_rate(tunnel->dst_port, tunnel->src_port); - if (ret < 0) { - tb_tunnel_warn(tunnel, "failed to read maximum link rate\n"); - return; - } } /* @@ -1871,8 +2154,8 @@ static void tb_usb3_init_path(struct tb_path *path) path->egress_shared_buffer = TB_PATH_NONE; path->ingress_fc_enable = TB_PATH_ALL; path->ingress_shared_buffer = TB_PATH_NONE; - path->priority = 3; - path->weight = 3; + path->priority = TB_USB3_PRIORITY; + path->weight = TB_USB3_WEIGHT; path->drop_packages = 0; tb_path_for_each_hop(path, hop) @@ -1886,8 +2169,9 @@ static void tb_usb3_init_path(struct tb_path *path) * @alloc_hopid: Allocate HopIDs from visited ports * * If @down adapter is active, follows the tunnel to the USB3 upstream - * adapter and back. Returns the discovered tunnel or %NULL if there was - * no tunnel. + * adapter and back. + * + * Return: Pointer to &struct tb_tunnel or %NULL if there was no tunnel. */ struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down, bool alloc_hopid) @@ -1960,7 +2244,7 @@ struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down, tb_tunnel_dbg(tunnel, "currently allocated bandwidth %d/%d Mb/s\n", tunnel->allocated_up, tunnel->allocated_down); - tunnel->init = tb_usb3_init; + tunnel->pre_activate = tb_usb3_pre_activate; tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth; tunnel->release_unused_bandwidth = tb_usb3_release_unused_bandwidth; @@ -1974,7 +2258,7 @@ struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down, err_deactivate: tb_tunnel_deactivate(tunnel); err_free: - tb_tunnel_free(tunnel); + tb_tunnel_put(tunnel); return NULL; } @@ -1984,15 +2268,15 @@ err_free: * @tb: Pointer to the domain structure * @up: USB3 upstream adapter port * @down: USB3 downstream adapter port - * @max_up: Maximum available upstream bandwidth for the USB3 tunnel (%0 - * if not limited). - * @max_down: Maximum available downstream bandwidth for the USB3 tunnel - * (%0 if not limited). + * @max_up: Maximum available upstream bandwidth for the USB3 tunnel. + * %0 if no available bandwidth. + * @max_down: Maximum available downstream bandwidth for the USB3 tunnel. + * %0 if no available bandwidth. * * Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and * @TB_TYPE_USB3_DOWN. * - * Return: Returns a tb_tunnel on success or %NULL on failure. + * Return: Pointer to @struct tb_tunnel or %NULL in case of failure. */ struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up, struct tb_port *down, int max_up, @@ -2002,24 +2286,19 @@ struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up, struct tb_path *path; int max_rate = 0; - /* - * Check that we have enough bandwidth available for the new - * USB3 tunnel. - */ - if (max_up > 0 || max_down > 0) { + if (!tb_route(down->sw) && (max_up > 0 || max_down > 0)) { + /* + * For USB3 isochronous transfers, we allow bandwidth which is + * not higher than 90% of maximum supported bandwidth by USB3 + * adapters. + */ max_rate = tb_usb3_max_link_rate(down, up); if (max_rate < 0) return NULL; - /* Only 90% can be allocated for USB3 isochronous transfers */ max_rate = max_rate * 90 / 100; - tb_port_dbg(up, "required bandwidth for USB3 tunnel %d Mb/s\n", + tb_port_dbg(up, "maximum required bandwidth for USB3 tunnel %d Mb/s\n", max_rate); - - if (max_rate > max_up || max_rate > max_down) { - tb_port_warn(up, "not enough bandwidth for USB3 tunnel\n"); - return NULL; - } } tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3); @@ -2034,27 +2313,23 @@ struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up, path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0, "USB3 Down"); - if (!path) { - tb_tunnel_free(tunnel); - return NULL; - } + if (!path) + goto err_free; tb_usb3_init_path(path); tunnel->paths[TB_USB3_PATH_DOWN] = path; path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0, "USB3 Up"); - if (!path) { - tb_tunnel_free(tunnel); - return NULL; - } + if (!path) + goto err_free; tb_usb3_init_path(path); tunnel->paths[TB_USB3_PATH_UP] = path; if (!tb_route(down->sw)) { - tunnel->allocated_up = max_rate; - tunnel->allocated_down = max_rate; + tunnel->allocated_up = min(max_rate, max_up); + tunnel->allocated_down = min(max_rate, max_down); - tunnel->init = tb_usb3_init; + tunnel->pre_activate = tb_usb3_pre_activate; tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth; tunnel->release_unused_bandwidth = tb_usb3_release_unused_bandwidth; @@ -2063,36 +2338,17 @@ struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up, } return tunnel; -} - -/** - * tb_tunnel_free() - free a tunnel - * @tunnel: Tunnel to be freed - * - * Frees a tunnel. The tunnel does not need to be deactivated. - */ -void tb_tunnel_free(struct tb_tunnel *tunnel) -{ - int i; - - if (!tunnel) - return; - - if (tunnel->deinit) - tunnel->deinit(tunnel); - - for (i = 0; i < tunnel->npaths; i++) { - if (tunnel->paths[i]) - tb_path_free(tunnel->paths[i]); - } - kfree(tunnel->paths); - kfree(tunnel); +err_free: + tb_tunnel_put(tunnel); + return NULL; } /** * tb_tunnel_is_invalid - check whether an activated path is still valid * @tunnel: Tunnel to check + * + * Return: %true if path is valid, %false otherwise. */ bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel) { @@ -2108,12 +2364,16 @@ bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel) } /** - * tb_tunnel_restart() - activate a tunnel after a hardware reset - * @tunnel: Tunnel to restart + * tb_tunnel_activate() - activate a tunnel + * @tunnel: Tunnel to activate * - * Return: 0 on success and negative errno in case if failure + * Return: + * * %0 - On success. + * * %-EINPROGRESS - If the tunnel activation is still in progress (that's + * for DP tunnels to complete DPRX capabilities read). + * * Negative errno - Another error occurred. */ -int tb_tunnel_restart(struct tb_tunnel *tunnel) +int tb_tunnel_activate(struct tb_tunnel *tunnel) { int res, i; @@ -2130,8 +2390,10 @@ int tb_tunnel_restart(struct tb_tunnel *tunnel) } } - if (tunnel->init) { - res = tunnel->init(tunnel); + tunnel->state = TB_TUNNEL_ACTIVATING; + + if (tunnel->pre_activate) { + res = tunnel->pre_activate(tunnel); if (res) return res; } @@ -2144,10 +2406,14 @@ int tb_tunnel_restart(struct tb_tunnel *tunnel) if (tunnel->activate) { res = tunnel->activate(tunnel, true); - if (res) + if (res) { + if (res == -EINPROGRESS) + return res; goto err; + } } + tb_tunnel_set_active(tunnel, true); return 0; err: @@ -2157,27 +2423,6 @@ err: } /** - * tb_tunnel_activate() - activate a tunnel - * @tunnel: Tunnel to activate - * - * Return: Returns 0 on success or an error code on failure. - */ -int tb_tunnel_activate(struct tb_tunnel *tunnel) -{ - int i; - - for (i = 0; i < tunnel->npaths; i++) { - if (tunnel->paths[i]->activated) { - tb_tunnel_WARN(tunnel, - "trying to activate an already activated tunnel\n"); - return -EINVAL; - } - } - - return tb_tunnel_restart(tunnel); -} - -/** * tb_tunnel_deactivate() - deactivate a tunnel * @tunnel: Tunnel to deactivate */ @@ -2194,6 +2439,11 @@ void tb_tunnel_deactivate(struct tb_tunnel *tunnel) if (tunnel->paths[i] && tunnel->paths[i]->activated) tb_path_deactivate(tunnel->paths[i]); } + + if (tunnel->post_deactivate) + tunnel->post_deactivate(tunnel); + + tb_tunnel_set_active(tunnel, false); } /** @@ -2201,8 +2451,8 @@ void tb_tunnel_deactivate(struct tb_tunnel *tunnel) * @tunnel: Tunnel to check * @port: Port to check * - * Returns true if @tunnel goes through @port (direction does not matter), - * false otherwise. + * Return: %true if @tunnel goes through @port (direction does not matter), + * %false otherwise. */ bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel, const struct tb_port *port) @@ -2220,18 +2470,10 @@ bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel, return false; } -static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel) +// Is tb_tunnel_activate() called for the tunnel +static bool tb_tunnel_is_activated(const struct tb_tunnel *tunnel) { - int i; - - for (i = 0; i < tunnel->npaths; i++) { - if (!tunnel->paths[i]) - return false; - if (!tunnel->paths[i]->activated) - return false; - } - - return true; + return tunnel->state == TB_TUNNEL_ACTIVATING || tb_tunnel_is_active(tunnel); } /** @@ -2240,15 +2482,17 @@ static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel) * @max_up: Maximum upstream bandwidth in Mb/s * @max_down: Maximum downstream bandwidth in Mb/s * - * Returns maximum possible bandwidth this tunnel can go if not limited - * by other bandwidth clients. If the tunnel does not support this - * returns %-EOPNOTSUPP. + * Return: + * * Maximum possible bandwidth this tunnel can support if not + * limited by other bandwidth clients. + * * %-EOPNOTSUPP - If the tunnel does not support this function. + * * %-ENOTCONN - If the tunnel is not active. */ int tb_tunnel_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up, int *max_down) { if (!tb_tunnel_is_active(tunnel)) - return -EINVAL; + return -ENOTCONN; if (tunnel->maximum_bandwidth) return tunnel->maximum_bandwidth(tunnel, max_up, max_down); @@ -2262,14 +2506,18 @@ int tb_tunnel_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up, * @allocated_down: Currently allocated downstream bandwidth in Mb/s is * stored here * - * Returns the bandwidth allocated for the tunnel. This may be higher - * than what the tunnel actually consumes. + * Return: + * * Bandwidth allocated for the tunnel. This may be higher than what the + * tunnel actually consumes. + * * %-EOPNOTSUPP - If the tunnel does not support this function. + * * %-ENOTCONN - If the tunnel is not active. + * * Negative errno - Another error occurred. */ int tb_tunnel_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up, int *allocated_down) { if (!tb_tunnel_is_active(tunnel)) - return -EINVAL; + return -ENOTCONN; if (tunnel->allocated_bandwidth) return tunnel->allocated_bandwidth(tunnel, allocated_up, @@ -2283,19 +2531,29 @@ int tb_tunnel_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up, * @alloc_up: New upstream bandwidth in Mb/s * @alloc_down: New downstream bandwidth in Mb/s * - * Tries to change tunnel bandwidth allocation. If succeeds returns %0 - * and updates @alloc_up and @alloc_down to that was actually allocated - * (it may not be the same as passed originally). Returns negative errno - * in case of failure. + * Tries to change tunnel bandwidth allocation. + * + * Return: + * * %0 - On success. Updates @alloc_up and @alloc_down to values that were + * actually allocated (it may not be the same as passed originally). + * * Negative errno - In case of failure. */ int tb_tunnel_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up, int *alloc_down) { if (!tb_tunnel_is_active(tunnel)) - return -EINVAL; + return -ENOTCONN; + + if (tunnel->alloc_bandwidth) { + int ret; + + ret = tunnel->alloc_bandwidth(tunnel, alloc_up, alloc_down); + if (ret) + return ret; - if (tunnel->alloc_bandwidth) - return tunnel->alloc_bandwidth(tunnel, alloc_up, alloc_down); + tb_tunnel_changed(tunnel); + return 0; + } return -EOPNOTSUPP; } @@ -2309,34 +2567,36 @@ int tb_tunnel_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up, * Can be %NULL. * * Stores the amount of isochronous bandwidth @tunnel consumes in - * @consumed_up and @consumed_down. In case of success returns %0, - * negative errno otherwise. + * @consumed_up and @consumed_down. + * + * Return: %0 on success, negative errno otherwise. */ int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up, int *consumed_down) { int up_bw = 0, down_bw = 0; - if (!tb_tunnel_is_active(tunnel)) - goto out; - - if (tunnel->consumed_bandwidth) { + /* + * Here we need to distinguish between not active tunnel from + * tunnels that are either fully active or activation started. + * The latter is true for DP tunnels where we must report the + * consumed to be the maximum we gave it until DPRX capabilities + * read is done by the graphics driver. + */ + if (tb_tunnel_is_activated(tunnel) && tunnel->consumed_bandwidth) { int ret; ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw); if (ret) return ret; - - tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw, - down_bw); } -out: if (consumed_up) *consumed_up = up_bw; if (consumed_down) *consumed_down = down_bw; + tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw, down_bw); return 0; } @@ -2345,14 +2605,14 @@ out: * @tunnel: Tunnel whose unused bandwidth to release * * If tunnel supports dynamic bandwidth management (USB3 tunnels at the - * moment) this function makes it to release all the unused bandwidth. + * moment) this function makes it release all the unused bandwidth. * - * Returns %0 in case of success and negative errno otherwise. + * Return: %0 on success, negative errno otherwise. */ int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel) { if (!tb_tunnel_is_active(tunnel)) - return 0; + return -ENOTCONN; if (tunnel->release_unused_bandwidth) { int ret; @@ -2387,3 +2647,8 @@ void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel, tunnel->reclaim_available_bandwidth(tunnel, available_up, available_down); } + +const char *tb_tunnel_type_name(const struct tb_tunnel *tunnel) +{ + return tb_tunnel_names[tunnel->type]; +} |
