summaryrefslogtreecommitdiff
path: root/drivers/thunderbolt/tunnel.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/thunderbolt/tunnel.c')
-rw-r--r--drivers/thunderbolt/tunnel.c597
1 files changed, 399 insertions, 198 deletions
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
index 41cf6378ad25..9fa95c595ecc 100644
--- a/drivers/thunderbolt/tunnel.c
+++ b/drivers/thunderbolt/tunnel.c
@@ -70,6 +70,24 @@
#define USB4_V2_PCI_MIN_BANDWIDTH (1500 * TB_PCI_WEIGHT)
#define USB4_V2_USB3_MIN_BANDWIDTH (1500 * TB_USB3_WEIGHT)
+/*
+ * According to VESA spec, the DPRX negotiation shall compete in 5
+ * seconds after tunnel is established. Since at least i915 can runtime
+ * suspend if there is nothing connected, and that it polls any new
+ * connections every 10 seconds, we use 12 seconds here.
+ *
+ * These are in ms.
+ */
+#define TB_DPRX_TIMEOUT 12000
+#define TB_DPRX_WAIT_TIMEOUT 25
+#define TB_DPRX_POLL_DELAY 50
+
+static int dprx_timeout = TB_DPRX_TIMEOUT;
+module_param(dprx_timeout, int, 0444);
+MODULE_PARM_DESC(dprx_timeout,
+ "DPRX capability read timeout in ms, -1 waits forever (default: "
+ __MODULE_STRING(TB_DPRX_TIMEOUT) ")");
+
static unsigned int dma_credits = TB_DMA_CREDITS;
module_param(dma_credits, uint, 0444);
MODULE_PARM_DESC(dma_credits, "specify custom credits for DMA tunnels (default: "
@@ -82,6 +100,17 @@ MODULE_PARM_DESC(bw_alloc_mode,
static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
+static const char * const tb_event_names[] = {
+ [TB_TUNNEL_ACTIVATED] = "activated",
+ [TB_TUNNEL_CHANGED] = "changed",
+ [TB_TUNNEL_DEACTIVATED] = "deactivated",
+ [TB_TUNNEL_LOW_BANDWIDTH] = "low bandwidth",
+ [TB_TUNNEL_NO_BANDWIDTH] = "insufficient bandwidth",
+};
+
+/* Synchronizes kref_get()/put() of struct tb_tunnel */
+static DEFINE_MUTEX(tb_tunnel_lock);
+
static inline unsigned int tb_usable_credits(const struct tb_port *port)
{
return port->total_credits - port->ctl_credits;
@@ -92,6 +121,8 @@ static inline unsigned int tb_usable_credits(const struct tb_port *port)
* @port: Lane adapter to check
* @max_dp_streams: If non-%NULL stores maximum number of simultaneous DP
* streams possible through this lane adapter
+ *
+ * Return: Number of available credits.
*/
static unsigned int tb_available_credits(const struct tb_port *port,
size_t *max_dp_streams)
@@ -155,7 +186,7 @@ static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL);
if (!tunnel->paths) {
- tb_tunnel_free(tunnel);
+ kfree(tunnel);
return NULL;
}
@@ -163,16 +194,114 @@ static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
tunnel->tb = tb;
tunnel->npaths = npaths;
tunnel->type = type;
+ kref_init(&tunnel->kref);
return tunnel;
}
+static void tb_tunnel_get(struct tb_tunnel *tunnel)
+{
+ mutex_lock(&tb_tunnel_lock);
+ kref_get(&tunnel->kref);
+ mutex_unlock(&tb_tunnel_lock);
+}
+
+static void tb_tunnel_destroy(struct kref *kref)
+{
+ struct tb_tunnel *tunnel = container_of(kref, typeof(*tunnel), kref);
+ int i;
+
+ if (tunnel->destroy)
+ tunnel->destroy(tunnel);
+
+ for (i = 0; i < tunnel->npaths; i++) {
+ if (tunnel->paths[i])
+ tb_path_free(tunnel->paths[i]);
+ }
+
+ kfree(tunnel->paths);
+ kfree(tunnel);
+}
+
+void tb_tunnel_put(struct tb_tunnel *tunnel)
+{
+ mutex_lock(&tb_tunnel_lock);
+ kref_put(&tunnel->kref, tb_tunnel_destroy);
+ mutex_unlock(&tb_tunnel_lock);
+}
+
+/**
+ * tb_tunnel_event() - Notify userspace about tunneling event
+ * @tb: Domain where the event occurred
+ * @event: Event that happened
+ * @type: Type of the tunnel in question
+ * @src_port: Tunnel source port (can be %NULL)
+ * @dst_port: Tunnel destination port (can be %NULL)
+ *
+ * Notifies userspace about tunneling @event in the domain. The tunnel
+ * does not need to exist (e.g the tunnel was not activated because
+ * there is not enough bandwidth). If the @src_port and @dst_port are
+ * given fill in full %TUNNEL_DETAILS environment variable. Otherwise
+ * uses the shorter one (just the tunnel type).
+ */
+void tb_tunnel_event(struct tb *tb, enum tb_tunnel_event event,
+ enum tb_tunnel_type type,
+ const struct tb_port *src_port,
+ const struct tb_port *dst_port)
+{
+ char *envp[3] = { NULL };
+
+ if (WARN_ON_ONCE(event >= ARRAY_SIZE(tb_event_names)))
+ return;
+ if (WARN_ON_ONCE(type >= ARRAY_SIZE(tb_tunnel_names)))
+ return;
+
+ envp[0] = kasprintf(GFP_KERNEL, "TUNNEL_EVENT=%s", tb_event_names[event]);
+ if (!envp[0])
+ return;
+
+ if (src_port != NULL && dst_port != NULL) {
+ envp[1] = kasprintf(GFP_KERNEL, "TUNNEL_DETAILS=%llx:%u <-> %llx:%u (%s)",
+ tb_route(src_port->sw), src_port->port,
+ tb_route(dst_port->sw), dst_port->port,
+ tb_tunnel_names[type]);
+ } else {
+ envp[1] = kasprintf(GFP_KERNEL, "TUNNEL_DETAILS=(%s)",
+ tb_tunnel_names[type]);
+ }
+
+ if (envp[1])
+ tb_domain_event(tb, envp);
+
+ kfree(envp[1]);
+ kfree(envp[0]);
+}
+
+static inline void tb_tunnel_set_active(struct tb_tunnel *tunnel, bool active)
+{
+ if (active) {
+ tunnel->state = TB_TUNNEL_ACTIVE;
+ tb_tunnel_event(tunnel->tb, TB_TUNNEL_ACTIVATED, tunnel->type,
+ tunnel->src_port, tunnel->dst_port);
+ } else {
+ tunnel->state = TB_TUNNEL_INACTIVE;
+ tb_tunnel_event(tunnel->tb, TB_TUNNEL_DEACTIVATED, tunnel->type,
+ tunnel->src_port, tunnel->dst_port);
+ }
+}
+
+static inline void tb_tunnel_changed(struct tb_tunnel *tunnel)
+{
+ tb_tunnel_event(tunnel->tb, TB_TUNNEL_CHANGED, tunnel->type,
+ tunnel->src_port, tunnel->dst_port);
+}
+
static int tb_pci_set_ext_encapsulation(struct tb_tunnel *tunnel, bool enable)
{
struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw);
int ret;
- /* Only supported of both routers are at least USB4 v2 */
+ /* Only supported if both routers are at least USB4 v2 */
if ((usb4_switch_version(tunnel->src_port->sw) < 2) ||
(usb4_switch_version(tunnel->dst_port->sw) < 2))
return 0;
@@ -288,8 +417,9 @@ static int tb_pci_init_path(struct tb_path *path)
* @alloc_hopid: Allocate HopIDs from visited ports
*
* If @down adapter is active, follows the tunnel to the PCIe upstream
- * adapter and back. Returns the discovered tunnel or %NULL if there was
- * no tunnel.
+ * adapter and back.
+ *
+ * Return: Pointer to &struct tb_tunnel or %NULL if there was no tunnel.
*/
struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down,
bool alloc_hopid)
@@ -355,7 +485,7 @@ struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down,
err_deactivate:
tb_tunnel_deactivate(tunnel);
err_free:
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
return NULL;
}
@@ -369,7 +499,7 @@ err_free:
* Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
* TB_TYPE_PCIE_DOWN.
*
- * Return: Returns a tb_tunnel on success or NULL on failure.
+ * Return: Pointer to @struct tb_tunnel or %NULL on failure.
*/
struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
struct tb_port *down)
@@ -404,7 +534,7 @@ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
return tunnel;
err_free:
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
return NULL;
}
@@ -416,9 +546,12 @@ err_free:
*
* Can be called to any connected lane 0 adapter to find out how much
* bandwidth needs to be left in reserve for possible PCIe bulk traffic.
- * Returns true if there is something to be reserved and writes the
- * amount to @reserved_down/@reserved_up. Otherwise returns false and
- * does not touch the parameters.
+ *
+ * Return:
+ * * %true - If there is something to be reserved. Writes the amount to
+ * @reserved_down/@reserved_up.
+ * * %false - Nothing to be reserved. Leaves @reserved_down/@reserved_up
+ * unmodified.
*/
bool tb_tunnel_reserved_pci(struct tb_port *port, int *reserved_up,
int *reserved_down)
@@ -851,7 +984,7 @@ static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel)
return 0;
}
-static int tb_dp_init(struct tb_tunnel *tunnel)
+static int tb_dp_pre_activate(struct tb_tunnel *tunnel)
{
struct tb_port *in = tunnel->src_port;
struct tb_switch *sw = in->sw;
@@ -877,7 +1010,7 @@ static int tb_dp_init(struct tb_tunnel *tunnel)
return tb_dp_bandwidth_alloc_mode_enable(tunnel);
}
-static void tb_dp_deinit(struct tb_tunnel *tunnel)
+static void tb_dp_post_deactivate(struct tb_tunnel *tunnel)
{
struct tb_port *in = tunnel->src_port;
@@ -889,6 +1022,96 @@ static void tb_dp_deinit(struct tb_tunnel *tunnel)
}
}
+static ktime_t dprx_timeout_to_ktime(int timeout_msec)
+{
+ return timeout_msec >= 0 ?
+ ktime_add_ms(ktime_get(), timeout_msec) : KTIME_MAX;
+}
+
+static int tb_dp_wait_dprx(struct tb_tunnel *tunnel, int timeout_msec)
+{
+ ktime_t timeout = dprx_timeout_to_ktime(timeout_msec);
+ struct tb_port *in = tunnel->src_port;
+
+ /*
+ * Wait for DPRX done. Normally it should be already set for
+ * active tunnel.
+ */
+ do {
+ u32 val;
+ int ret;
+
+ ret = tb_port_read(in, &val, TB_CFG_PORT,
+ in->cap_adap + DP_COMMON_CAP, 1);
+ if (ret)
+ return ret;
+
+ if (val & DP_COMMON_CAP_DPRX_DONE)
+ return 0;
+
+ usleep_range(100, 150);
+ } while (ktime_before(ktime_get(), timeout));
+
+ tb_tunnel_dbg(tunnel, "DPRX read timeout\n");
+ return -ETIMEDOUT;
+}
+
+static void tb_dp_dprx_work(struct work_struct *work)
+{
+ struct tb_tunnel *tunnel = container_of(work, typeof(*tunnel), dprx_work.work);
+ struct tb *tb = tunnel->tb;
+
+ if (!tunnel->dprx_canceled) {
+ mutex_lock(&tb->lock);
+ if (tb_dp_is_usb4(tunnel->src_port->sw) &&
+ tb_dp_wait_dprx(tunnel, TB_DPRX_WAIT_TIMEOUT)) {
+ if (ktime_before(ktime_get(), tunnel->dprx_timeout)) {
+ queue_delayed_work(tb->wq, &tunnel->dprx_work,
+ msecs_to_jiffies(TB_DPRX_POLL_DELAY));
+ mutex_unlock(&tb->lock);
+ return;
+ }
+ } else {
+ tb_tunnel_set_active(tunnel, true);
+ }
+ mutex_unlock(&tb->lock);
+ }
+
+ if (tunnel->callback)
+ tunnel->callback(tunnel, tunnel->callback_data);
+ tb_tunnel_put(tunnel);
+}
+
+static int tb_dp_dprx_start(struct tb_tunnel *tunnel)
+{
+ /*
+ * Bump up the reference to keep the tunnel around. It will be
+ * dropped in tb_dp_dprx_stop() once the tunnel is deactivated.
+ */
+ tb_tunnel_get(tunnel);
+
+ tunnel->dprx_started = true;
+
+ if (tunnel->callback) {
+ tunnel->dprx_timeout = dprx_timeout_to_ktime(dprx_timeout);
+ queue_delayed_work(tunnel->tb->wq, &tunnel->dprx_work, 0);
+ return -EINPROGRESS;
+ }
+
+ return tb_dp_is_usb4(tunnel->src_port->sw) ?
+ tb_dp_wait_dprx(tunnel, dprx_timeout) : 0;
+}
+
+static void tb_dp_dprx_stop(struct tb_tunnel *tunnel)
+{
+ if (tunnel->dprx_started) {
+ tunnel->dprx_started = false;
+ tunnel->dprx_canceled = true;
+ if (cancel_delayed_work(&tunnel->dprx_work))
+ tb_tunnel_put(tunnel);
+ }
+}
+
static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
{
int ret;
@@ -910,6 +1133,7 @@ static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index,
paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index);
} else {
+ tb_dp_dprx_stop(tunnel);
tb_dp_port_hpd_clear(tunnel->src_port);
tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0);
if (tb_port_is_dpout(tunnel->dst_port))
@@ -920,10 +1144,13 @@ static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
if (ret)
return ret;
- if (tb_port_is_dpout(tunnel->dst_port))
- return tb_dp_port_enable(tunnel->dst_port, active);
+ if (tb_port_is_dpout(tunnel->dst_port)) {
+ ret = tb_dp_port_enable(tunnel->dst_port, active);
+ if (ret)
+ return ret;
+ }
- return 0;
+ return active ? tb_dp_dprx_start(tunnel) : 0;
}
/**
@@ -931,7 +1158,8 @@ static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
* @tunnel: DP tunnel to check
* @max_bw_rounded: Maximum bandwidth in Mb/s rounded up to the next granularity
*
- * Returns maximum possible bandwidth for this tunnel in Mb/s.
+ * Return: Maximum possible bandwidth for this tunnel in Mb/s, negative errno
+ * in case of failure.
*/
static int tb_dp_bandwidth_mode_maximum_bandwidth(struct tb_tunnel *tunnel,
int *max_bw_rounded)
@@ -942,8 +1170,8 @@ static int tb_dp_bandwidth_mode_maximum_bandwidth(struct tb_tunnel *tunnel,
/*
* DP IN adapter DP_LOCAL_CAP gets updated to the lowest AUX
- * read parameter values so this so we can use this to determine
- * the maximum possible bandwidth over this link.
+ * read parameter values so we can use this to determine the
+ * maximum possible bandwidth over this link.
*
* See USB4 v2 spec 1.0 10.4.4.5.
*/
@@ -1076,35 +1304,6 @@ static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
return 0;
}
-static int tb_dp_wait_dprx(struct tb_tunnel *tunnel, int timeout_msec)
-{
- ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
- struct tb_port *in = tunnel->src_port;
-
- /*
- * Wait for DPRX done. Normally it should be already set for
- * active tunnel.
- */
- do {
- u32 val;
- int ret;
-
- ret = tb_port_read(in, &val, TB_CFG_PORT,
- in->cap_adap + DP_COMMON_CAP, 1);
- if (ret)
- return ret;
-
- if (val & DP_COMMON_CAP_DPRX_DONE) {
- tb_tunnel_dbg(tunnel, "DPRX read done\n");
- return 0;
- }
- usleep_range(100, 150);
- } while (ktime_before(ktime_get(), timeout));
-
- tb_tunnel_dbg(tunnel, "DPRX read timeout\n");
- return -ETIMEDOUT;
-}
-
/* Read cap from tunnel DP IN */
static int tb_dp_read_cap(struct tb_tunnel *tunnel, unsigned int cap, u32 *rate,
u32 *lanes)
@@ -1168,32 +1367,39 @@ static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
int ret;
if (tb_dp_is_usb4(sw)) {
- /*
- * On USB4 routers check if the bandwidth allocation
- * mode is enabled first and then read the bandwidth
- * through those registers.
- */
- ret = tb_dp_bandwidth_mode_consumed_bandwidth(tunnel, consumed_up,
- consumed_down);
- if (ret < 0) {
- if (ret != -EOPNOTSUPP)
+ ret = tb_dp_wait_dprx(tunnel, 0);
+ if (ret) {
+ if (ret == -ETIMEDOUT) {
+ /*
+ * While we wait for DPRX complete the
+ * tunnel consumes as much as it had
+ * been reserved initially.
+ */
+ ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP,
+ &rate, &lanes);
+ if (ret)
+ return ret;
+ } else {
+ return ret;
+ }
+ } else {
+ /*
+ * On USB4 routers check if the bandwidth allocation
+ * mode is enabled first and then read the bandwidth
+ * through those registers.
+ */
+ ret = tb_dp_bandwidth_mode_consumed_bandwidth(tunnel, consumed_up,
+ consumed_down);
+ if (ret < 0) {
+ if (ret != -EOPNOTSUPP)
+ return ret;
+ } else if (!ret) {
+ return 0;
+ }
+ ret = tb_dp_read_cap(tunnel, DP_COMMON_CAP, &rate, &lanes);
+ if (ret)
return ret;
- } else if (!ret) {
- return 0;
}
- /*
- * Then see if the DPRX negotiation is ready and if yes
- * return that bandwidth (it may be smaller than the
- * reduced one). According to VESA spec, the DPRX
- * negotiation shall compete in 5 seconds after tunnel
- * established. We give it 100ms extra just in case.
- */
- ret = tb_dp_wait_dprx(tunnel, 5100);
- if (ret)
- return ret;
- ret = tb_dp_read_cap(tunnel, DP_COMMON_CAP, &rate, &lanes);
- if (ret)
- return ret;
} else if (sw->generation >= 2) {
ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP, &rate, &lanes);
if (ret)
@@ -1349,7 +1555,7 @@ static void tb_dp_dump(struct tb_tunnel *tunnel)
* and back. Returns the discovered tunnel or %NULL if there was no
* tunnel.
*
- * Return: DP tunnel or %NULL if no tunnel found.
+ * Return: Pointer to &struct tb_tunnel or %NULL if no tunnel found.
*/
struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
bool alloc_hopid)
@@ -1365,9 +1571,9 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
if (!tunnel)
return NULL;
- tunnel->init = tb_dp_init;
- tunnel->deinit = tb_dp_deinit;
+ tunnel->pre_activate = tb_dp_pre_activate;
tunnel->activate = tb_dp_activate;
+ tunnel->post_deactivate = tb_dp_post_deactivate;
tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth;
tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth;
tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth;
@@ -1424,7 +1630,7 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
err_deactivate:
tb_tunnel_deactivate(tunnel);
err_free:
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
return NULL;
}
@@ -1439,15 +1645,24 @@ err_free:
* %0 if no available bandwidth.
* @max_down: Maximum available downstream bandwidth for the DP tunnel.
* %0 if no available bandwidth.
+ * @callback: Optional callback that is called when the DP tunnel is
+ * fully activated (or there is an error)
+ * @callback_data: Optional data for @callback
*
* Allocates a tunnel between @in and @out that is capable of tunneling
- * Display Port traffic.
+ * Display Port traffic. If @callback is not %NULL it will be called
+ * after tb_tunnel_activate() once the tunnel has been fully activated.
+ * It can call tb_tunnel_is_active() to check if activation was
+ * successful (or if it returns %false there was some sort of issue).
+ * The @callback is called without @tb->lock held.
*
- * Return: Returns a tb_tunnel on success or NULL on failure.
+ * Return: Pointer to @struct tb_tunnel or %NULL in case of failure.
*/
struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
struct tb_port *out, int link_nr,
- int max_up, int max_down)
+ int max_up, int max_down,
+ void (*callback)(struct tb_tunnel *, void *),
+ void *callback_data)
{
struct tb_tunnel *tunnel;
struct tb_path **paths;
@@ -1461,9 +1676,9 @@ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
if (!tunnel)
return NULL;
- tunnel->init = tb_dp_init;
- tunnel->deinit = tb_dp_deinit;
+ tunnel->pre_activate = tb_dp_pre_activate;
tunnel->activate = tb_dp_activate;
+ tunnel->post_deactivate = tb_dp_post_deactivate;
tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth;
tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth;
tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth;
@@ -1472,6 +1687,9 @@ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
tunnel->dst_port = out;
tunnel->max_up = max_up;
tunnel->max_down = max_down;
+ tunnel->callback = callback;
+ tunnel->callback_data = callback_data;
+ INIT_DELAYED_WORK(&tunnel->dprx_work, tb_dp_dprx_work);
paths = tunnel->paths;
pm_support = usb4_switch_version(in->sw) >= 2;
@@ -1500,7 +1718,7 @@ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
return tunnel;
err_free:
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
return NULL;
}
@@ -1565,8 +1783,8 @@ static int tb_dma_init_rx_path(struct tb_path *path, unsigned int credits)
/*
* First lane adapter is the one connected to the remote host.
- * We don't tunnel other traffic over this link so can use all
- * the credits (except the ones reserved for control traffic).
+ * We don't tunnel other traffic over this link so we can use
+ * all the credits (except the ones reserved for control traffic).
*/
hop = &path->hops[0];
tmp = min(tb_usable_credits(hop->in_port), credits);
@@ -1620,7 +1838,7 @@ static void tb_dma_release_credits(struct tb_path_hop *hop)
}
}
-static void tb_dma_deinit_path(struct tb_path *path)
+static void tb_dma_destroy_path(struct tb_path *path)
{
struct tb_path_hop *hop;
@@ -1628,14 +1846,14 @@ static void tb_dma_deinit_path(struct tb_path *path)
tb_dma_release_credits(hop);
}
-static void tb_dma_deinit(struct tb_tunnel *tunnel)
+static void tb_dma_destroy(struct tb_tunnel *tunnel)
{
int i;
for (i = 0; i < tunnel->npaths; i++) {
if (!tunnel->paths[i])
continue;
- tb_dma_deinit_path(tunnel->paths[i]);
+ tb_dma_destroy_path(tunnel->paths[i]);
}
}
@@ -1651,7 +1869,7 @@ static void tb_dma_deinit(struct tb_tunnel *tunnel)
* @receive_ring: NHI ring number used to receive packets from the
* other domain. Set to %-1 if RX path is not needed.
*
- * Return: Returns a tb_tunnel on success or NULL on failure.
+ * Return: Pointer to @struct tb_tunnel or %NULL in case of failure.
*/
struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
struct tb_port *dst, int transmit_path,
@@ -1681,7 +1899,7 @@ struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
tunnel->src_port = nhi;
tunnel->dst_port = dst;
- tunnel->deinit = tb_dma_deinit;
+ tunnel->destroy = tb_dma_destroy;
credits = min_not_zero(dma_credits, nhi->sw->max_dma_credits);
@@ -1712,7 +1930,7 @@ struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
return tunnel;
err_free:
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
return NULL;
}
@@ -1728,7 +1946,8 @@ err_free:
*
* This function can be used to match specific DMA tunnel, if there are
* multiple DMA tunnels going through the same XDomain connection.
- * Returns true if there is match and false otherwise.
+ *
+ * Return: %true if there is a match, %false otherwise.
*/
bool tb_tunnel_match_dma(const struct tb_tunnel *tunnel, int transmit_path,
int transmit_ring, int receive_path, int receive_ring)
@@ -1793,7 +2012,7 @@ static int tb_usb3_max_link_rate(struct tb_port *up, struct tb_port *down)
return min(up_max_rate, down_max_rate);
}
-static int tb_usb3_init(struct tb_tunnel *tunnel)
+static int tb_usb3_pre_activate(struct tb_tunnel *tunnel)
{
tb_tunnel_dbg(tunnel, "allocating initial bandwidth %d/%d Mb/s\n",
tunnel->allocated_up, tunnel->allocated_down);
@@ -1825,7 +2044,7 @@ static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel,
/*
* PCIe tunneling, if enabled, affects the USB3 bandwidth so
- * take that it into account here.
+ * take that into account here.
*/
*consumed_up = tunnel->allocated_up *
(TB_USB3_WEIGHT + pcie_weight) / TB_USB3_WEIGHT;
@@ -1950,8 +2169,9 @@ static void tb_usb3_init_path(struct tb_path *path)
* @alloc_hopid: Allocate HopIDs from visited ports
*
* If @down adapter is active, follows the tunnel to the USB3 upstream
- * adapter and back. Returns the discovered tunnel or %NULL if there was
- * no tunnel.
+ * adapter and back.
+ *
+ * Return: Pointer to &struct tb_tunnel or %NULL if there was no tunnel.
*/
struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down,
bool alloc_hopid)
@@ -2024,7 +2244,7 @@ struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down,
tb_tunnel_dbg(tunnel, "currently allocated bandwidth %d/%d Mb/s\n",
tunnel->allocated_up, tunnel->allocated_down);
- tunnel->init = tb_usb3_init;
+ tunnel->pre_activate = tb_usb3_pre_activate;
tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
tunnel->release_unused_bandwidth =
tb_usb3_release_unused_bandwidth;
@@ -2038,7 +2258,7 @@ struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down,
err_deactivate:
tb_tunnel_deactivate(tunnel);
err_free:
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
return NULL;
}
@@ -2056,7 +2276,7 @@ err_free:
* Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and
* @TB_TYPE_USB3_DOWN.
*
- * Return: Returns a tb_tunnel on success or %NULL on failure.
+ * Return: Pointer to @struct tb_tunnel or %NULL in case of failure.
*/
struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
struct tb_port *down, int max_up,
@@ -2093,19 +2313,15 @@ struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0,
"USB3 Down");
- if (!path) {
- tb_tunnel_free(tunnel);
- return NULL;
- }
+ if (!path)
+ goto err_free;
tb_usb3_init_path(path);
tunnel->paths[TB_USB3_PATH_DOWN] = path;
path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0,
"USB3 Up");
- if (!path) {
- tb_tunnel_free(tunnel);
- return NULL;
- }
+ if (!path)
+ goto err_free;
tb_usb3_init_path(path);
tunnel->paths[TB_USB3_PATH_UP] = path;
@@ -2113,7 +2329,7 @@ struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
tunnel->allocated_up = min(max_rate, max_up);
tunnel->allocated_down = min(max_rate, max_down);
- tunnel->init = tb_usb3_init;
+ tunnel->pre_activate = tb_usb3_pre_activate;
tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
tunnel->release_unused_bandwidth =
tb_usb3_release_unused_bandwidth;
@@ -2122,36 +2338,17 @@ struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
}
return tunnel;
-}
-
-/**
- * tb_tunnel_free() - free a tunnel
- * @tunnel: Tunnel to be freed
- *
- * Frees a tunnel. The tunnel does not need to be deactivated.
- */
-void tb_tunnel_free(struct tb_tunnel *tunnel)
-{
- int i;
-
- if (!tunnel)
- return;
-
- if (tunnel->deinit)
- tunnel->deinit(tunnel);
-
- for (i = 0; i < tunnel->npaths; i++) {
- if (tunnel->paths[i])
- tb_path_free(tunnel->paths[i]);
- }
- kfree(tunnel->paths);
- kfree(tunnel);
+err_free:
+ tb_tunnel_put(tunnel);
+ return NULL;
}
/**
* tb_tunnel_is_invalid - check whether an activated path is still valid
* @tunnel: Tunnel to check
+ *
+ * Return: %true if path is valid, %false otherwise.
*/
bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
{
@@ -2167,12 +2364,16 @@ bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
}
/**
- * tb_tunnel_restart() - activate a tunnel after a hardware reset
- * @tunnel: Tunnel to restart
+ * tb_tunnel_activate() - activate a tunnel
+ * @tunnel: Tunnel to activate
*
- * Return: 0 on success and negative errno in case if failure
+ * Return:
+ * * %0 - On success.
+ * * %-EINPROGRESS - If the tunnel activation is still in progress (that's
+ * for DP tunnels to complete DPRX capabilities read).
+ * * Negative errno - Another error occurred.
*/
-int tb_tunnel_restart(struct tb_tunnel *tunnel)
+int tb_tunnel_activate(struct tb_tunnel *tunnel)
{
int res, i;
@@ -2189,8 +2390,10 @@ int tb_tunnel_restart(struct tb_tunnel *tunnel)
}
}
- if (tunnel->init) {
- res = tunnel->init(tunnel);
+ tunnel->state = TB_TUNNEL_ACTIVATING;
+
+ if (tunnel->pre_activate) {
+ res = tunnel->pre_activate(tunnel);
if (res)
return res;
}
@@ -2203,10 +2406,14 @@ int tb_tunnel_restart(struct tb_tunnel *tunnel)
if (tunnel->activate) {
res = tunnel->activate(tunnel, true);
- if (res)
+ if (res) {
+ if (res == -EINPROGRESS)
+ return res;
goto err;
+ }
}
+ tb_tunnel_set_active(tunnel, true);
return 0;
err:
@@ -2216,27 +2423,6 @@ err:
}
/**
- * tb_tunnel_activate() - activate a tunnel
- * @tunnel: Tunnel to activate
- *
- * Return: Returns 0 on success or an error code on failure.
- */
-int tb_tunnel_activate(struct tb_tunnel *tunnel)
-{
- int i;
-
- for (i = 0; i < tunnel->npaths; i++) {
- if (tunnel->paths[i]->activated) {
- tb_tunnel_WARN(tunnel,
- "trying to activate an already activated tunnel\n");
- return -EINVAL;
- }
- }
-
- return tb_tunnel_restart(tunnel);
-}
-
-/**
* tb_tunnel_deactivate() - deactivate a tunnel
* @tunnel: Tunnel to deactivate
*/
@@ -2253,6 +2439,11 @@ void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
if (tunnel->paths[i] && tunnel->paths[i]->activated)
tb_path_deactivate(tunnel->paths[i]);
}
+
+ if (tunnel->post_deactivate)
+ tunnel->post_deactivate(tunnel);
+
+ tb_tunnel_set_active(tunnel, false);
}
/**
@@ -2260,8 +2451,8 @@ void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
* @tunnel: Tunnel to check
* @port: Port to check
*
- * Returns true if @tunnel goes through @port (direction does not matter),
- * false otherwise.
+ * Return: %true if @tunnel goes through @port (direction does not matter),
+ * %false otherwise.
*/
bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
const struct tb_port *port)
@@ -2279,18 +2470,10 @@ bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
return false;
}
-static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
+// Is tb_tunnel_activate() called for the tunnel
+static bool tb_tunnel_is_activated(const struct tb_tunnel *tunnel)
{
- int i;
-
- for (i = 0; i < tunnel->npaths; i++) {
- if (!tunnel->paths[i])
- return false;
- if (!tunnel->paths[i]->activated)
- return false;
- }
-
- return true;
+ return tunnel->state == TB_TUNNEL_ACTIVATING || tb_tunnel_is_active(tunnel);
}
/**
@@ -2299,15 +2482,17 @@ static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
* @max_up: Maximum upstream bandwidth in Mb/s
* @max_down: Maximum downstream bandwidth in Mb/s
*
- * Returns maximum possible bandwidth this tunnel can go if not limited
- * by other bandwidth clients. If the tunnel does not support this
- * returns %-EOPNOTSUPP.
+ * Return:
+ * * Maximum possible bandwidth this tunnel can support if not
+ * limited by other bandwidth clients.
+ * * %-EOPNOTSUPP - If the tunnel does not support this function.
+ * * %-ENOTCONN - If the tunnel is not active.
*/
int tb_tunnel_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
int *max_down)
{
if (!tb_tunnel_is_active(tunnel))
- return -EINVAL;
+ return -ENOTCONN;
if (tunnel->maximum_bandwidth)
return tunnel->maximum_bandwidth(tunnel, max_up, max_down);
@@ -2321,14 +2506,18 @@ int tb_tunnel_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
* @allocated_down: Currently allocated downstream bandwidth in Mb/s is
* stored here
*
- * Returns the bandwidth allocated for the tunnel. This may be higher
- * than what the tunnel actually consumes.
+ * Return:
+ * * Bandwidth allocated for the tunnel. This may be higher than what the
+ * tunnel actually consumes.
+ * * %-EOPNOTSUPP - If the tunnel does not support this function.
+ * * %-ENOTCONN - If the tunnel is not active.
+ * * Negative errno - Another error occurred.
*/
int tb_tunnel_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
int *allocated_down)
{
if (!tb_tunnel_is_active(tunnel))
- return -EINVAL;
+ return -ENOTCONN;
if (tunnel->allocated_bandwidth)
return tunnel->allocated_bandwidth(tunnel, allocated_up,
@@ -2342,19 +2531,29 @@ int tb_tunnel_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
* @alloc_up: New upstream bandwidth in Mb/s
* @alloc_down: New downstream bandwidth in Mb/s
*
- * Tries to change tunnel bandwidth allocation. If succeeds returns %0
- * and updates @alloc_up and @alloc_down to that was actually allocated
- * (it may not be the same as passed originally). Returns negative errno
- * in case of failure.
+ * Tries to change tunnel bandwidth allocation.
+ *
+ * Return:
+ * * %0 - On success. Updates @alloc_up and @alloc_down to values that were
+ * actually allocated (it may not be the same as passed originally).
+ * * Negative errno - In case of failure.
*/
int tb_tunnel_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
int *alloc_down)
{
if (!tb_tunnel_is_active(tunnel))
- return -EINVAL;
+ return -ENOTCONN;
- if (tunnel->alloc_bandwidth)
- return tunnel->alloc_bandwidth(tunnel, alloc_up, alloc_down);
+ if (tunnel->alloc_bandwidth) {
+ int ret;
+
+ ret = tunnel->alloc_bandwidth(tunnel, alloc_up, alloc_down);
+ if (ret)
+ return ret;
+
+ tb_tunnel_changed(tunnel);
+ return 0;
+ }
return -EOPNOTSUPP;
}
@@ -2368,34 +2567,36 @@ int tb_tunnel_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
* Can be %NULL.
*
* Stores the amount of isochronous bandwidth @tunnel consumes in
- * @consumed_up and @consumed_down. In case of success returns %0,
- * negative errno otherwise.
+ * @consumed_up and @consumed_down.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
int *consumed_down)
{
int up_bw = 0, down_bw = 0;
- if (!tb_tunnel_is_active(tunnel))
- goto out;
-
- if (tunnel->consumed_bandwidth) {
+ /*
+ * Here we need to distinguish between not active tunnel from
+ * tunnels that are either fully active or activation started.
+ * The latter is true for DP tunnels where we must report the
+ * consumed to be the maximum we gave it until DPRX capabilities
+ * read is done by the graphics driver.
+ */
+ if (tb_tunnel_is_activated(tunnel) && tunnel->consumed_bandwidth) {
int ret;
ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw);
if (ret)
return ret;
-
- tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw,
- down_bw);
}
-out:
if (consumed_up)
*consumed_up = up_bw;
if (consumed_down)
*consumed_down = down_bw;
+ tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw, down_bw);
return 0;
}
@@ -2404,14 +2605,14 @@ out:
* @tunnel: Tunnel whose unused bandwidth to release
*
* If tunnel supports dynamic bandwidth management (USB3 tunnels at the
- * moment) this function makes it to release all the unused bandwidth.
+ * moment) this function makes it release all the unused bandwidth.
*
- * Returns %0 in case of success and negative errno otherwise.
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel)
{
if (!tb_tunnel_is_active(tunnel))
- return 0;
+ return -ENOTCONN;
if (tunnel->release_unused_bandwidth) {
int ret;