diff options
Diffstat (limited to 'drivers/thunderbolt/tunnel.c')
-rw-r--r-- | drivers/thunderbolt/tunnel.c | 526 |
1 files changed, 301 insertions, 225 deletions
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c index 6fffb2c82d3d..717b31d78728 100644 --- a/drivers/thunderbolt/tunnel.c +++ b/drivers/thunderbolt/tunnel.c @@ -70,6 +70,24 @@ #define USB4_V2_PCI_MIN_BANDWIDTH (1500 * TB_PCI_WEIGHT) #define USB4_V2_USB3_MIN_BANDWIDTH (1500 * TB_USB3_WEIGHT) +/* + * According to VESA spec, the DPRX negotiation shall compete in 5 + * seconds after tunnel is established. Since at least i915 can runtime + * suspend if there is nothing connected, and that it polls any new + * connections every 10 seconds, we use 12 seconds here. + * + * These are in ms. + */ +#define TB_DPRX_TIMEOUT 12000 +#define TB_DPRX_WAIT_TIMEOUT 25 +#define TB_DPRX_POLL_DELAY 50 + +static int dprx_timeout = TB_DPRX_TIMEOUT; +module_param(dprx_timeout, int, 0444); +MODULE_PARM_DESC(dprx_timeout, + "DPRX capability read timeout in ms, -1 waits forever (default: " + __MODULE_STRING(TB_DPRX_TIMEOUT) ")"); + static unsigned int dma_credits = TB_DMA_CREDITS; module_param(dma_credits, uint, 0444); MODULE_PARM_DESC(dma_credits, "specify custom credits for DMA tunnels (default: " @@ -82,6 +100,9 @@ MODULE_PARM_DESC(bw_alloc_mode, static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" }; +/* Synchronizes kref_get()/put() of struct tb_tunnel */ +static DEFINE_MUTEX(tb_tunnel_lock); + static inline unsigned int tb_usable_credits(const struct tb_port *port) { return port->total_credits - port->ctl_credits; @@ -155,7 +176,7 @@ static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths, tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL); if (!tunnel->paths) { - tb_tunnel_free(tunnel); + kfree(tunnel); return NULL; } @@ -163,10 +184,42 @@ static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths, tunnel->tb = tb; tunnel->npaths = npaths; tunnel->type = type; + kref_init(&tunnel->kref); return tunnel; } +static void tb_tunnel_get(struct tb_tunnel *tunnel) +{ + mutex_lock(&tb_tunnel_lock); + kref_get(&tunnel->kref); + mutex_unlock(&tb_tunnel_lock); +} + +static void tb_tunnel_destroy(struct kref *kref) +{ + struct tb_tunnel *tunnel = container_of(kref, typeof(*tunnel), kref); + int i; + + if (tunnel->destroy) + tunnel->destroy(tunnel); + + for (i = 0; i < tunnel->npaths; i++) { + if (tunnel->paths[i]) + tb_path_free(tunnel->paths[i]); + } + + kfree(tunnel->paths); + kfree(tunnel); +} + +void tb_tunnel_put(struct tb_tunnel *tunnel) +{ + mutex_lock(&tb_tunnel_lock); + kref_put(&tunnel->kref, tb_tunnel_destroy); + mutex_unlock(&tb_tunnel_lock); +} + static int tb_pci_set_ext_encapsulation(struct tb_tunnel *tunnel, bool enable) { struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw); @@ -355,7 +408,7 @@ struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down, err_deactivate: tb_tunnel_deactivate(tunnel); err_free: - tb_tunnel_free(tunnel); + tb_tunnel_put(tunnel); return NULL; } @@ -404,7 +457,7 @@ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up, return tunnel; err_free: - tb_tunnel_free(tunnel); + tb_tunnel_put(tunnel); return NULL; } @@ -706,7 +759,7 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel) "DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n", out_rate, out_lanes, bw); - if (tb_port_path_direction_downstream(in, out)) + if (tb_tunnel_direction_downstream(tunnel)) max_bw = tunnel->max_down; else max_bw = tunnel->max_up; @@ -831,7 +884,7 @@ static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel) * max_up/down fields. For discovery we just read what the * estimation was set to. */ - if (tb_port_path_direction_downstream(in, out)) + if (tb_tunnel_direction_downstream(tunnel)) estimated_bw = tunnel->max_down; else estimated_bw = tunnel->max_up; @@ -851,7 +904,7 @@ static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel) return 0; } -static int tb_dp_init(struct tb_tunnel *tunnel) +static int tb_dp_pre_activate(struct tb_tunnel *tunnel) { struct tb_port *in = tunnel->src_port; struct tb_switch *sw = in->sw; @@ -877,7 +930,7 @@ static int tb_dp_init(struct tb_tunnel *tunnel) return tb_dp_bandwidth_alloc_mode_enable(tunnel); } -static void tb_dp_deinit(struct tb_tunnel *tunnel) +static void tb_dp_post_deactivate(struct tb_tunnel *tunnel) { struct tb_port *in = tunnel->src_port; @@ -889,6 +942,95 @@ static void tb_dp_deinit(struct tb_tunnel *tunnel) } } +static ktime_t dprx_timeout_to_ktime(int timeout_msec) +{ + return timeout_msec >= 0 ? + ktime_add_ms(ktime_get(), timeout_msec) : KTIME_MAX; +} + +static int tb_dp_wait_dprx(struct tb_tunnel *tunnel, int timeout_msec) +{ + ktime_t timeout = dprx_timeout_to_ktime(timeout_msec); + struct tb_port *in = tunnel->src_port; + + /* + * Wait for DPRX done. Normally it should be already set for + * active tunnel. + */ + do { + u32 val; + int ret; + + ret = tb_port_read(in, &val, TB_CFG_PORT, + in->cap_adap + DP_COMMON_CAP, 1); + if (ret) + return ret; + + if (val & DP_COMMON_CAP_DPRX_DONE) + return 0; + + usleep_range(100, 150); + } while (ktime_before(ktime_get(), timeout)); + + tb_tunnel_dbg(tunnel, "DPRX read timeout\n"); + return -ETIMEDOUT; +} + +static void tb_dp_dprx_work(struct work_struct *work) +{ + struct tb_tunnel *tunnel = container_of(work, typeof(*tunnel), dprx_work.work); + struct tb *tb = tunnel->tb; + + if (!tunnel->dprx_canceled) { + mutex_lock(&tb->lock); + if (tb_dp_is_usb4(tunnel->src_port->sw) && + tb_dp_wait_dprx(tunnel, TB_DPRX_WAIT_TIMEOUT)) { + if (ktime_before(ktime_get(), tunnel->dprx_timeout)) { + queue_delayed_work(tb->wq, &tunnel->dprx_work, + msecs_to_jiffies(TB_DPRX_POLL_DELAY)); + mutex_unlock(&tb->lock); + return; + } + } else { + tunnel->state = TB_TUNNEL_ACTIVE; + } + mutex_unlock(&tb->lock); + } + + if (tunnel->callback) + tunnel->callback(tunnel, tunnel->callback_data); +} + +static int tb_dp_dprx_start(struct tb_tunnel *tunnel) +{ + /* + * Bump up the reference to keep the tunnel around. It will be + * dropped in tb_dp_dprx_stop() once the tunnel is deactivated. + */ + tb_tunnel_get(tunnel); + + tunnel->dprx_started = true; + + if (tunnel->callback) { + tunnel->dprx_timeout = dprx_timeout_to_ktime(dprx_timeout); + queue_delayed_work(tunnel->tb->wq, &tunnel->dprx_work, 0); + return -EINPROGRESS; + } + + return tb_dp_is_usb4(tunnel->src_port->sw) ? + tb_dp_wait_dprx(tunnel, dprx_timeout) : 0; +} + +static void tb_dp_dprx_stop(struct tb_tunnel *tunnel) +{ + if (tunnel->dprx_started) { + tunnel->dprx_started = false; + tunnel->dprx_canceled = true; + cancel_delayed_work(&tunnel->dprx_work); + tb_tunnel_put(tunnel); + } +} + static int tb_dp_activate(struct tb_tunnel *tunnel, bool active) { int ret; @@ -910,6 +1052,7 @@ static int tb_dp_activate(struct tb_tunnel *tunnel, bool active) paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index, paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index); } else { + tb_dp_dprx_stop(tunnel); tb_dp_port_hpd_clear(tunnel->src_port); tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0); if (tb_port_is_dpout(tunnel->dst_port)) @@ -920,18 +1063,27 @@ static int tb_dp_activate(struct tb_tunnel *tunnel, bool active) if (ret) return ret; - if (tb_port_is_dpout(tunnel->dst_port)) - return tb_dp_port_enable(tunnel->dst_port, active); + if (tb_port_is_dpout(tunnel->dst_port)) { + ret = tb_dp_port_enable(tunnel->dst_port, active); + if (ret) + return ret; + } - return 0; + return active ? tb_dp_dprx_start(tunnel) : 0; } -/* max_bw is rounded up to next granularity */ +/** + * tb_dp_bandwidth_mode_maximum_bandwidth() - Maximum possible bandwidth + * @tunnel: DP tunnel to check + * @max_bw_rounded: Maximum bandwidth in Mb/s rounded up to the next granularity + * + * Returns maximum possible bandwidth for this tunnel in Mb/s. + */ static int tb_dp_bandwidth_mode_maximum_bandwidth(struct tb_tunnel *tunnel, - int *max_bw) + int *max_bw_rounded) { struct tb_port *in = tunnel->src_port; - int ret, rate, lanes, nrd_bw; + int ret, rate, lanes, max_bw; u32 cap; /* @@ -947,41 +1099,26 @@ static int tb_dp_bandwidth_mode_maximum_bandwidth(struct tb_tunnel *tunnel, return ret; rate = tb_dp_cap_get_rate_ext(cap); - if (tb_dp_is_uhbr_rate(rate)) { - /* - * When UHBR is used there is no reduction in lanes so - * we can use this directly. - */ - lanes = tb_dp_cap_get_lanes(cap); - } else { - /* - * If there is no UHBR supported then check the - * non-reduced rate and lanes. - */ - ret = usb4_dp_port_nrd(in, &rate, &lanes); - if (ret) - return ret; - } + lanes = tb_dp_cap_get_lanes(cap); - nrd_bw = tb_dp_bandwidth(rate, lanes); + max_bw = tb_dp_bandwidth(rate, lanes); - if (max_bw) { + if (max_bw_rounded) { ret = usb4_dp_port_granularity(in); if (ret < 0) return ret; - *max_bw = roundup(nrd_bw, ret); + *max_bw_rounded = roundup(max_bw, ret); } - return nrd_bw; + return max_bw; } static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up, int *consumed_down) { - struct tb_port *out = tunnel->dst_port; struct tb_port *in = tunnel->src_port; - int ret, allocated_bw, max_bw; + int ret, allocated_bw, max_bw_rounded; if (!usb4_dp_port_bandwidth_mode_enabled(in)) return -EOPNOTSUPP; @@ -995,13 +1132,13 @@ static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel, return ret; allocated_bw = ret; - ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw); + ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw_rounded); if (ret < 0) return ret; - if (allocated_bw == max_bw) + if (allocated_bw == max_bw_rounded) allocated_bw = ret; - if (tb_port_path_direction_downstream(in, out)) { + if (tb_tunnel_direction_downstream(tunnel)) { *consumed_up = 0; *consumed_down = allocated_bw; } else { @@ -1015,7 +1152,6 @@ static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel, static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up, int *allocated_down) { - struct tb_port *out = tunnel->dst_port; struct tb_port *in = tunnel->src_port; /* @@ -1023,20 +1159,21 @@ static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up * Otherwise we read it from the DPRX. */ if (usb4_dp_port_bandwidth_mode_enabled(in) && tunnel->bw_mode) { - int ret, allocated_bw, max_bw; + int ret, allocated_bw, max_bw_rounded; ret = usb4_dp_port_allocated_bandwidth(in); if (ret < 0) return ret; allocated_bw = ret; - ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw); + ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, + &max_bw_rounded); if (ret < 0) return ret; - if (allocated_bw == max_bw) + if (allocated_bw == max_bw_rounded) allocated_bw = ret; - if (tb_port_path_direction_downstream(in, out)) { + if (tb_tunnel_direction_downstream(tunnel)) { *allocated_up = 0; *allocated_down = allocated_bw; } else { @@ -1053,26 +1190,25 @@ static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up, int *alloc_down) { - struct tb_port *out = tunnel->dst_port; struct tb_port *in = tunnel->src_port; - int max_bw, ret, tmp; + int max_bw_rounded, ret, tmp; if (!usb4_dp_port_bandwidth_mode_enabled(in)) return -EOPNOTSUPP; - ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw); + ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw_rounded); if (ret < 0) return ret; - if (tb_port_path_direction_downstream(in, out)) { - tmp = min(*alloc_down, max_bw); + if (tb_tunnel_direction_downstream(tunnel)) { + tmp = min(*alloc_down, max_bw_rounded); ret = usb4_dp_port_allocate_bandwidth(in, tmp); if (ret) return ret; *alloc_down = tmp; *alloc_up = 0; } else { - tmp = min(*alloc_up, max_bw); + tmp = min(*alloc_up, max_bw_rounded); ret = usb4_dp_port_allocate_bandwidth(in, tmp); if (ret) return ret; @@ -1086,35 +1222,6 @@ static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up, return 0; } -static int tb_dp_wait_dprx(struct tb_tunnel *tunnel, int timeout_msec) -{ - ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec); - struct tb_port *in = tunnel->src_port; - - /* - * Wait for DPRX done. Normally it should be already set for - * active tunnel. - */ - do { - u32 val; - int ret; - - ret = tb_port_read(in, &val, TB_CFG_PORT, - in->cap_adap + DP_COMMON_CAP, 1); - if (ret) - return ret; - - if (val & DP_COMMON_CAP_DPRX_DONE) { - tb_tunnel_dbg(tunnel, "DPRX read done\n"); - return 0; - } - usleep_range(100, 150); - } while (ktime_before(ktime_get(), timeout)); - - tb_tunnel_dbg(tunnel, "DPRX read timeout\n"); - return -ETIMEDOUT; -} - /* Read cap from tunnel DP IN */ static int tb_dp_read_cap(struct tb_tunnel *tunnel, unsigned int cap, u32 *rate, u32 *lanes) @@ -1150,17 +1257,16 @@ static int tb_dp_read_cap(struct tb_tunnel *tunnel, unsigned int cap, u32 *rate, static int tb_dp_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up, int *max_down) { - struct tb_port *in = tunnel->src_port; int ret; - if (!usb4_dp_port_bandwidth_mode_enabled(in)) + if (!usb4_dp_port_bandwidth_mode_enabled(tunnel->src_port)) return -EOPNOTSUPP; ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, NULL); if (ret < 0) return ret; - if (tb_port_path_direction_downstream(in, tunnel->dst_port)) { + if (tb_tunnel_direction_downstream(tunnel)) { *max_up = 0; *max_down = ret; } else { @@ -1174,42 +1280,44 @@ static int tb_dp_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up, static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up, int *consumed_down) { - struct tb_port *in = tunnel->src_port; - const struct tb_switch *sw = in->sw; + const struct tb_switch *sw = tunnel->src_port->sw; u32 rate = 0, lanes = 0; int ret; if (tb_dp_is_usb4(sw)) { - /* - * On USB4 routers check if the bandwidth allocation - * mode is enabled first and then read the bandwidth - * through those registers. - */ - ret = tb_dp_bandwidth_mode_consumed_bandwidth(tunnel, consumed_up, - consumed_down); - if (ret < 0) { - if (ret != -EOPNOTSUPP) - return ret; - } else if (!ret) { - return 0; - } - /* - * Then see if the DPRX negotiation is ready and if yes - * return that bandwidth (it may be smaller than the - * reduced one). Otherwise return the remote (possibly - * reduced) caps. - */ - ret = tb_dp_wait_dprx(tunnel, 150); + ret = tb_dp_wait_dprx(tunnel, 0); if (ret) { - if (ret == -ETIMEDOUT) + if (ret == -ETIMEDOUT) { + /* + * While we wait for DPRX complete the + * tunnel consumes as much as it had + * been reserved initially. + */ ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP, &rate, &lanes); + if (ret) + return ret; + } else { + return ret; + } + } else { + /* + * On USB4 routers check if the bandwidth allocation + * mode is enabled first and then read the bandwidth + * through those registers. + */ + ret = tb_dp_bandwidth_mode_consumed_bandwidth(tunnel, consumed_up, + consumed_down); + if (ret < 0) { + if (ret != -EOPNOTSUPP) + return ret; + } else if (!ret) { + return 0; + } + ret = tb_dp_read_cap(tunnel, DP_COMMON_CAP, &rate, &lanes); if (ret) return ret; } - ret = tb_dp_read_cap(tunnel, DP_COMMON_CAP, &rate, &lanes); - if (ret) - return ret; } else if (sw->generation >= 2) { ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP, &rate, &lanes); if (ret) @@ -1221,7 +1329,7 @@ static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up, return 0; } - if (tb_port_path_direction_downstream(in, tunnel->dst_port)) { + if (tb_tunnel_direction_downstream(tunnel)) { *consumed_up = 0; *consumed_down = tb_dp_bandwidth(rate, lanes); } else { @@ -1381,9 +1489,9 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in, if (!tunnel) return NULL; - tunnel->init = tb_dp_init; - tunnel->deinit = tb_dp_deinit; + tunnel->pre_activate = tb_dp_pre_activate; tunnel->activate = tb_dp_activate; + tunnel->post_deactivate = tb_dp_post_deactivate; tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth; tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth; tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth; @@ -1440,7 +1548,7 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in, err_deactivate: tb_tunnel_deactivate(tunnel); err_free: - tb_tunnel_free(tunnel); + tb_tunnel_put(tunnel); return NULL; } @@ -1451,19 +1559,28 @@ err_free: * @in: DP in adapter port * @out: DP out adapter port * @link_nr: Preferred lane adapter when the link is not bonded - * @max_up: Maximum available upstream bandwidth for the DP tunnel (%0 - * if not limited) - * @max_down: Maximum available downstream bandwidth for the DP tunnel - * (%0 if not limited) + * @max_up: Maximum available upstream bandwidth for the DP tunnel. + * %0 if no available bandwidth. + * @max_down: Maximum available downstream bandwidth for the DP tunnel. + * %0 if no available bandwidth. + * @callback: Optional callback that is called when the DP tunnel is + * fully activated (or there is an error) + * @callback_data: Optional data for @callback * * Allocates a tunnel between @in and @out that is capable of tunneling - * Display Port traffic. + * Display Port traffic. If @callback is not %NULL it will be called + * after tb_tunnel_activate() once the tunnel has been fully activated. + * It can call tb_tunnel_is_active() to check if activation was + * successful (or if it returns %false there was some sort of issue). + * The @callback is called without @tb->lock held. * - * Return: Returns a tb_tunnel on success or NULL on failure. + * Return: Returns a tb_tunnel on success or &NULL on failure. */ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in, struct tb_port *out, int link_nr, - int max_up, int max_down) + int max_up, int max_down, + void (*callback)(struct tb_tunnel *, void *), + void *callback_data) { struct tb_tunnel *tunnel; struct tb_path **paths; @@ -1477,9 +1594,9 @@ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in, if (!tunnel) return NULL; - tunnel->init = tb_dp_init; - tunnel->deinit = tb_dp_deinit; + tunnel->pre_activate = tb_dp_pre_activate; tunnel->activate = tb_dp_activate; + tunnel->post_deactivate = tb_dp_post_deactivate; tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth; tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth; tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth; @@ -1488,6 +1605,9 @@ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in, tunnel->dst_port = out; tunnel->max_up = max_up; tunnel->max_down = max_down; + tunnel->callback = callback; + tunnel->callback_data = callback_data; + INIT_DELAYED_WORK(&tunnel->dprx_work, tb_dp_dprx_work); paths = tunnel->paths; pm_support = usb4_switch_version(in->sw) >= 2; @@ -1516,7 +1636,7 @@ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in, return tunnel; err_free: - tb_tunnel_free(tunnel); + tb_tunnel_put(tunnel); return NULL; } @@ -1636,7 +1756,7 @@ static void tb_dma_release_credits(struct tb_path_hop *hop) } } -static void tb_dma_deinit_path(struct tb_path *path) +static void tb_dma_destroy_path(struct tb_path *path) { struct tb_path_hop *hop; @@ -1644,14 +1764,14 @@ static void tb_dma_deinit_path(struct tb_path *path) tb_dma_release_credits(hop); } -static void tb_dma_deinit(struct tb_tunnel *tunnel) +static void tb_dma_destroy(struct tb_tunnel *tunnel) { int i; for (i = 0; i < tunnel->npaths; i++) { if (!tunnel->paths[i]) continue; - tb_dma_deinit_path(tunnel->paths[i]); + tb_dma_destroy_path(tunnel->paths[i]); } } @@ -1697,7 +1817,7 @@ struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi, tunnel->src_port = nhi; tunnel->dst_port = dst; - tunnel->deinit = tb_dma_deinit; + tunnel->destroy = tb_dma_destroy; credits = min_not_zero(dma_credits, nhi->sw->max_dma_credits); @@ -1728,7 +1848,7 @@ struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi, return tunnel; err_free: - tb_tunnel_free(tunnel); + tb_tunnel_put(tunnel); return NULL; } @@ -1809,7 +1929,7 @@ static int tb_usb3_max_link_rate(struct tb_port *up, struct tb_port *down) return min(up_max_rate, down_max_rate); } -static int tb_usb3_init(struct tb_tunnel *tunnel) +static int tb_usb3_pre_activate(struct tb_tunnel *tunnel) { tb_tunnel_dbg(tunnel, "allocating initial bandwidth %d/%d Mb/s\n", tunnel->allocated_up, tunnel->allocated_down); @@ -2040,7 +2160,7 @@ struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down, tb_tunnel_dbg(tunnel, "currently allocated bandwidth %d/%d Mb/s\n", tunnel->allocated_up, tunnel->allocated_down); - tunnel->init = tb_usb3_init; + tunnel->pre_activate = tb_usb3_pre_activate; tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth; tunnel->release_unused_bandwidth = tb_usb3_release_unused_bandwidth; @@ -2054,7 +2174,7 @@ struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down, err_deactivate: tb_tunnel_deactivate(tunnel); err_free: - tb_tunnel_free(tunnel); + tb_tunnel_put(tunnel); return NULL; } @@ -2064,10 +2184,10 @@ err_free: * @tb: Pointer to the domain structure * @up: USB3 upstream adapter port * @down: USB3 downstream adapter port - * @max_up: Maximum available upstream bandwidth for the USB3 tunnel (%0 - * if not limited). - * @max_down: Maximum available downstream bandwidth for the USB3 tunnel - * (%0 if not limited). + * @max_up: Maximum available upstream bandwidth for the USB3 tunnel. + * %0 if no available bandwidth. + * @max_down: Maximum available downstream bandwidth for the USB3 tunnel. + * %0 if no available bandwidth. * * Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and * @TB_TYPE_USB3_DOWN. @@ -2082,24 +2202,19 @@ struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up, struct tb_path *path; int max_rate = 0; - /* - * Check that we have enough bandwidth available for the new - * USB3 tunnel. - */ - if (max_up > 0 || max_down > 0) { + if (!tb_route(down->sw) && (max_up > 0 || max_down > 0)) { + /* + * For USB3 isochronous transfers, we allow bandwidth which is + * not higher than 90% of maximum supported bandwidth by USB3 + * adapters. + */ max_rate = tb_usb3_max_link_rate(down, up); if (max_rate < 0) return NULL; - /* Only 90% can be allocated for USB3 isochronous transfers */ max_rate = max_rate * 90 / 100; - tb_port_dbg(up, "required bandwidth for USB3 tunnel %d Mb/s\n", + tb_port_dbg(up, "maximum required bandwidth for USB3 tunnel %d Mb/s\n", max_rate); - - if (max_rate > max_up || max_rate > max_down) { - tb_port_warn(up, "not enough bandwidth for USB3 tunnel\n"); - return NULL; - } } tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3); @@ -2115,7 +2230,7 @@ struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up, path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0, "USB3 Down"); if (!path) { - tb_tunnel_free(tunnel); + tb_tunnel_put(tunnel); return NULL; } tb_usb3_init_path(path); @@ -2124,17 +2239,17 @@ struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up, path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0, "USB3 Up"); if (!path) { - tb_tunnel_free(tunnel); + tb_tunnel_put(tunnel); return NULL; } tb_usb3_init_path(path); tunnel->paths[TB_USB3_PATH_UP] = path; if (!tb_route(down->sw)) { - tunnel->allocated_up = max_rate; - tunnel->allocated_down = max_rate; + tunnel->allocated_up = min(max_rate, max_up); + tunnel->allocated_down = min(max_rate, max_down); - tunnel->init = tb_usb3_init; + tunnel->pre_activate = tb_usb3_pre_activate; tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth; tunnel->release_unused_bandwidth = tb_usb3_release_unused_bandwidth; @@ -2146,31 +2261,6 @@ struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up, } /** - * tb_tunnel_free() - free a tunnel - * @tunnel: Tunnel to be freed - * - * Frees a tunnel. The tunnel does not need to be deactivated. - */ -void tb_tunnel_free(struct tb_tunnel *tunnel) -{ - int i; - - if (!tunnel) - return; - - if (tunnel->deinit) - tunnel->deinit(tunnel); - - for (i = 0; i < tunnel->npaths; i++) { - if (tunnel->paths[i]) - tb_path_free(tunnel->paths[i]); - } - - kfree(tunnel->paths); - kfree(tunnel); -} - -/** * tb_tunnel_is_invalid - check whether an activated path is still valid * @tunnel: Tunnel to check */ @@ -2188,12 +2278,15 @@ bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel) } /** - * tb_tunnel_restart() - activate a tunnel after a hardware reset - * @tunnel: Tunnel to restart + * tb_tunnel_activate() - activate a tunnel + * @tunnel: Tunnel to activate * - * Return: 0 on success and negative errno in case if failure + * Return: 0 on success and negative errno in case if failure. + * Specifically returns %-EINPROGRESS if the tunnel activation is still + * in progress (that's for DP tunnels to complete DPRX capabilities + * read). */ -int tb_tunnel_restart(struct tb_tunnel *tunnel) +int tb_tunnel_activate(struct tb_tunnel *tunnel) { int res, i; @@ -2210,8 +2303,10 @@ int tb_tunnel_restart(struct tb_tunnel *tunnel) } } - if (tunnel->init) { - res = tunnel->init(tunnel); + tunnel->state = TB_TUNNEL_ACTIVATING; + + if (tunnel->pre_activate) { + res = tunnel->pre_activate(tunnel); if (res) return res; } @@ -2224,10 +2319,14 @@ int tb_tunnel_restart(struct tb_tunnel *tunnel) if (tunnel->activate) { res = tunnel->activate(tunnel, true); - if (res) + if (res) { + if (res == -EINPROGRESS) + return res; goto err; + } } + tunnel->state = TB_TUNNEL_ACTIVE; return 0; err: @@ -2237,27 +2336,6 @@ err: } /** - * tb_tunnel_activate() - activate a tunnel - * @tunnel: Tunnel to activate - * - * Return: Returns 0 on success or an error code on failure. - */ -int tb_tunnel_activate(struct tb_tunnel *tunnel) -{ - int i; - - for (i = 0; i < tunnel->npaths; i++) { - if (tunnel->paths[i]->activated) { - tb_tunnel_WARN(tunnel, - "trying to activate an already activated tunnel\n"); - return -EINVAL; - } - } - - return tb_tunnel_restart(tunnel); -} - -/** * tb_tunnel_deactivate() - deactivate a tunnel * @tunnel: Tunnel to deactivate */ @@ -2274,6 +2352,11 @@ void tb_tunnel_deactivate(struct tb_tunnel *tunnel) if (tunnel->paths[i] && tunnel->paths[i]->activated) tb_path_deactivate(tunnel->paths[i]); } + + if (tunnel->post_deactivate) + tunnel->post_deactivate(tunnel); + + tunnel->state = TB_TUNNEL_INACTIVE; } /** @@ -2300,18 +2383,10 @@ bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel, return false; } -static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel) +// Is tb_tunnel_activate() called for the tunnel +static bool tb_tunnel_is_activated(const struct tb_tunnel *tunnel) { - int i; - - for (i = 0; i < tunnel->npaths; i++) { - if (!tunnel->paths[i]) - return false; - if (!tunnel->paths[i]->activated) - return false; - } - - return true; + return tunnel->state == TB_TUNNEL_ACTIVATING || tb_tunnel_is_active(tunnel); } /** @@ -2328,7 +2403,7 @@ int tb_tunnel_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up, int *max_down) { if (!tb_tunnel_is_active(tunnel)) - return -EINVAL; + return -ENOTCONN; if (tunnel->maximum_bandwidth) return tunnel->maximum_bandwidth(tunnel, max_up, max_down); @@ -2349,7 +2424,7 @@ int tb_tunnel_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up, int *allocated_down) { if (!tb_tunnel_is_active(tunnel)) - return -EINVAL; + return -ENOTCONN; if (tunnel->allocated_bandwidth) return tunnel->allocated_bandwidth(tunnel, allocated_up, @@ -2372,7 +2447,7 @@ int tb_tunnel_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up, int *alloc_down) { if (!tb_tunnel_is_active(tunnel)) - return -EINVAL; + return -ENOTCONN; if (tunnel->alloc_bandwidth) return tunnel->alloc_bandwidth(tunnel, alloc_up, alloc_down); @@ -2397,26 +2472,27 @@ int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up, { int up_bw = 0, down_bw = 0; - if (!tb_tunnel_is_active(tunnel)) - goto out; - - if (tunnel->consumed_bandwidth) { + /* + * Here we need to distinguish between not active tunnel from + * tunnels that are either fully active or activation started. + * The latter is true for DP tunnels where we must report the + * consumed to be the maximum we gave it until DPRX capabilities + * read is done by the graphics driver. + */ + if (tb_tunnel_is_activated(tunnel) && tunnel->consumed_bandwidth) { int ret; ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw); if (ret) return ret; - - tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw, - down_bw); } -out: if (consumed_up) *consumed_up = up_bw; if (consumed_down) *consumed_down = down_bw; + tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw, down_bw); return 0; } @@ -2432,7 +2508,7 @@ out: int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel) { if (!tb_tunnel_is_active(tunnel)) - return 0; + return -ENOTCONN; if (tunnel->release_unused_bandwidth) { int ret; |