summaryrefslogtreecommitdiff
path: root/drivers/thunderbolt
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2023-10-27 11:41:07 +0200
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2023-10-27 11:41:07 +0200
commit9b6db9a3a675fc2f33b587a9909dcef20c4b3794 (patch)
tree9f763b08fd7b30379de36ee027a9bc8b1aa9455e /drivers/thunderbolt
parentec098970364234411f39cd6821e6f95937b4070c (diff)
parenta558892b3456d44f2a89d238f5d650f0574fa3b2 (diff)
Merge tag 'thunderbolt-for-v6.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt into usb-next
Mika writes: thunderbolt: Changes for v6.7 merge window This includes following USB4/Thunderbolt changes for the v6.7 merge window: - Configure asymmetric link if the DisplayPort bandwidth requires so - Enable path power management packet support for USB4 v2 routers - Make the bandwidth reservations to follow the USB4 v2 connection manager guide suggestions - DisplayPort tunneling improvements - Small cleanups and improvements around the driver. All these have been in linux-next with no reported issues. * tag 'thunderbolt-for-v6.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt: (25 commits) thunderbolt: Fix one kernel-doc comment thunderbolt: Configure asymmetric link if needed and bandwidth allows thunderbolt: Add support for asymmetric link thunderbolt: Introduce tb_switch_depth() thunderbolt: Introduce tb_for_each_upstream_port_on_path() thunderbolt: Introduce tb_port_path_direction_downstream() thunderbolt: Set path power management packet support bit for USB4 v2 routers thunderbolt: Change bandwidth reservations to comply USB4 v2 thunderbolt: Make is_gen4_link() available to the rest of the driver thunderbolt: Use weight constants in tb_usb3_consumed_bandwidth() thunderbolt: Use constants for path weight and priority thunderbolt: Add DP IN added last in the head of the list of DP resources thunderbolt: Create multiple DisplayPort tunnels if there are more DP IN/OUT pairs thunderbolt: Log NVM version of routers and retimers thunderbolt: Use tb_tunnel_xxx() log macros in tb.c thunderbolt: Expose tb_tunnel_xxx() log macros to the rest of the driver thunderbolt: Use tb_tunnel_dbg() where possible to make logging more consistent thunderbolt: Fix typo of HPD bit for Hot Plug Detect thunderbolt: Fix typo in enum tb_link_width kernel-doc thunderbolt: Fix debug log when DisplayPort adapter not available for pairing ...
Diffstat (limited to 'drivers/thunderbolt')
-rw-r--r--drivers/thunderbolt/clx.c47
-rw-r--r--drivers/thunderbolt/dma_test.c14
-rw-r--r--drivers/thunderbolt/path.c7
-rw-r--r--drivers/thunderbolt/quirks.c3
-rw-r--r--drivers/thunderbolt/retimer.c1
-rw-r--r--drivers/thunderbolt/switch.c337
-rw-r--r--drivers/thunderbolt/tb.c774
-rw-r--r--drivers/thunderbolt/tb.h60
-rw-r--r--drivers/thunderbolt/tb_regs.h19
-rw-r--r--drivers/thunderbolt/tunnel.c263
-rw-r--r--drivers/thunderbolt/tunnel.h26
-rw-r--r--drivers/thunderbolt/usb4.c135
12 files changed, 1299 insertions, 387 deletions
diff --git a/drivers/thunderbolt/clx.c b/drivers/thunderbolt/clx.c
index 13d217ae98e6..787dfd1550e5 100644
--- a/drivers/thunderbolt/clx.c
+++ b/drivers/thunderbolt/clx.c
@@ -175,6 +175,28 @@ bool tb_port_clx_is_enabled(struct tb_port *port, unsigned int clx)
}
/**
+ * tb_switch_clx_is_supported() - Is CLx supported on this type of router
+ * @sw: The router to check CLx support for
+ */
+static bool tb_switch_clx_is_supported(const struct tb_switch *sw)
+{
+ if (!clx_enabled)
+ return false;
+
+ if (sw->quirks & QUIRK_NO_CLX)
+ return false;
+
+ /*
+ * CLx is not enabled and validated on Intel USB4 platforms
+ * before Alder Lake.
+ */
+ if (tb_switch_is_tiger_lake(sw))
+ return false;
+
+ return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
+}
+
+/**
* tb_switch_clx_init() - Initialize router CL states
* @sw: Router
*
@@ -273,28 +295,6 @@ static int tb_switch_mask_clx_objections(struct tb_switch *sw)
sw->cap_lp + offset, ARRAY_SIZE(val));
}
-/**
- * tb_switch_clx_is_supported() - Is CLx supported on this type of router
- * @sw: The router to check CLx support for
- */
-bool tb_switch_clx_is_supported(const struct tb_switch *sw)
-{
- if (!clx_enabled)
- return false;
-
- if (sw->quirks & QUIRK_NO_CLX)
- return false;
-
- /*
- * CLx is not enabled and validated on Intel USB4 platforms
- * before Alder Lake.
- */
- if (tb_switch_is_tiger_lake(sw))
- return false;
-
- return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
-}
-
static bool validate_mask(unsigned int clx)
{
/* Previous states need to be enabled */
@@ -405,6 +405,9 @@ int tb_switch_clx_disable(struct tb_switch *sw)
if (!clx)
return 0;
+ if (sw->is_unplugged)
+ return clx;
+
up = tb_upstream_port(sw);
down = tb_switch_downstream_port(sw);
diff --git a/drivers/thunderbolt/dma_test.c b/drivers/thunderbolt/dma_test.c
index 39476fc48801..9e47a63f28e7 100644
--- a/drivers/thunderbolt/dma_test.c
+++ b/drivers/thunderbolt/dma_test.c
@@ -101,7 +101,7 @@ struct dma_test {
unsigned int packets_sent;
unsigned int packets_received;
unsigned int link_speed;
- unsigned int link_width;
+ enum tb_link_width link_width;
unsigned int crc_errors;
unsigned int buffer_overflow_errors;
enum dma_test_result result;
@@ -465,9 +465,9 @@ DMA_TEST_DEBUGFS_ATTR(packets_to_send, packets_to_send_get,
static int dma_test_set_bonding(struct dma_test *dt)
{
switch (dt->link_width) {
- case 2:
+ case TB_LINK_WIDTH_DUAL:
return tb_xdomain_lane_bonding_enable(dt->xd);
- case 1:
+ case TB_LINK_WIDTH_SINGLE:
tb_xdomain_lane_bonding_disable(dt->xd);
fallthrough;
default:
@@ -490,12 +490,8 @@ static void dma_test_check_errors(struct dma_test *dt, int ret)
if (!dt->error_code) {
if (dt->link_speed && dt->xd->link_speed != dt->link_speed) {
dt->error_code = DMA_TEST_SPEED_ERROR;
- } else if (dt->link_width) {
- const struct tb_xdomain *xd = dt->xd;
-
- if ((dt->link_width == 1 && xd->link_width != TB_LINK_WIDTH_SINGLE) ||
- (dt->link_width == 2 && xd->link_width < TB_LINK_WIDTH_DUAL))
- dt->error_code = DMA_TEST_WIDTH_ERROR;
+ } else if (dt->link_width && dt->link_width != dt->xd->link_width) {
+ dt->error_code = DMA_TEST_WIDTH_ERROR;
} else if (dt->packets_to_send != dt->packets_sent ||
dt->packets_to_receive != dt->packets_received ||
dt->crc_errors || dt->buffer_overflow_errors) {
diff --git a/drivers/thunderbolt/path.c b/drivers/thunderbolt/path.c
index ee03fd75a472..091a81bbdbdc 100644
--- a/drivers/thunderbolt/path.c
+++ b/drivers/thunderbolt/path.c
@@ -19,9 +19,9 @@ static void tb_dump_hop(const struct tb_path_hop *hop, const struct tb_regs_hop
tb_port_dbg(port, " In HopID: %d => Out port: %d Out HopID: %d\n",
hop->in_hop_index, regs->out_port, regs->next_hop);
- tb_port_dbg(port, " Weight: %d Priority: %d Credits: %d Drop: %d\n",
- regs->weight, regs->priority,
- regs->initial_credits, regs->drop_packages);
+ tb_port_dbg(port, " Weight: %d Priority: %d Credits: %d Drop: %d PM: %d\n",
+ regs->weight, regs->priority, regs->initial_credits,
+ regs->drop_packages, regs->pmps);
tb_port_dbg(port, " Counter enabled: %d Counter index: %d\n",
regs->counter_enable, regs->counter);
tb_port_dbg(port, " Flow Control (In/Eg): %d/%d Shared Buffer (In/Eg): %d/%d\n",
@@ -535,6 +535,7 @@ int tb_path_activate(struct tb_path *path)
hop.next_hop = path->hops[i].next_hop_index;
hop.out_port = path->hops[i].out_port->port;
hop.initial_credits = path->hops[i].initial_credits;
+ hop.pmps = path->hops[i].pm_support;
hop.unknown1 = 0;
hop.enable = 1;
diff --git a/drivers/thunderbolt/quirks.c b/drivers/thunderbolt/quirks.c
index 488138a28ae1..e6bfa63b40ae 100644
--- a/drivers/thunderbolt/quirks.c
+++ b/drivers/thunderbolt/quirks.c
@@ -31,6 +31,9 @@ static void quirk_usb3_maximum_bandwidth(struct tb_switch *sw)
{
struct tb_port *port;
+ if (tb_switch_is_icm(sw))
+ return;
+
tb_switch_for_each_port(sw, port) {
if (!tb_port_is_usb3_down(port))
continue;
diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c
index 47becb363ada..d49d6628dbf2 100644
--- a/drivers/thunderbolt/retimer.c
+++ b/drivers/thunderbolt/retimer.c
@@ -94,6 +94,7 @@ static int tb_retimer_nvm_add(struct tb_retimer *rt)
goto err_nvm;
rt->nvm = nvm;
+ dev_dbg(&rt->dev, "NVM version %x.%x\n", nvm->major, nvm->minor);
return 0;
err_nvm:
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index bd5815f8f23b..1e15ffa79295 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -372,6 +372,7 @@ static int tb_switch_nvm_add(struct tb_switch *sw)
ret = tb_nvm_add_active(nvm, nvm_read);
if (ret)
goto err_nvm;
+ tb_sw_dbg(sw, "NVM version %x.%x\n", nvm->major, nvm->minor);
}
if (!sw->no_nvm_upgrade) {
@@ -915,6 +916,48 @@ int tb_port_get_link_speed(struct tb_port *port)
}
/**
+ * tb_port_get_link_generation() - Returns link generation
+ * @port: Lane adapter
+ *
+ * Returns link generation as number or negative errno in case of
+ * failure. Does not distinguish between Thunderbolt 1 and Thunderbolt 2
+ * links so for those always returns 2.
+ */
+int tb_port_get_link_generation(struct tb_port *port)
+{
+ int ret;
+
+ ret = tb_port_get_link_speed(port);
+ if (ret < 0)
+ return ret;
+
+ switch (ret) {
+ case 40:
+ return 4;
+ case 20:
+ return 3;
+ default:
+ return 2;
+ }
+}
+
+static const char *width_name(enum tb_link_width width)
+{
+ switch (width) {
+ case TB_LINK_WIDTH_SINGLE:
+ return "symmetric, single lane";
+ case TB_LINK_WIDTH_DUAL:
+ return "symmetric, dual lanes";
+ case TB_LINK_WIDTH_ASYM_TX:
+ return "asymmetric, 3 transmitters, 1 receiver";
+ case TB_LINK_WIDTH_ASYM_RX:
+ return "asymmetric, 3 receivers, 1 transmitter";
+ default:
+ return "unknown";
+ }
+}
+
+/**
* tb_port_get_link_width() - Get current link width
* @port: Port to check (USB4 or CIO)
*
@@ -939,8 +982,15 @@ int tb_port_get_link_width(struct tb_port *port)
LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT;
}
-static bool tb_port_is_width_supported(struct tb_port *port,
- unsigned int width_mask)
+/**
+ * tb_port_width_supported() - Is the given link width supported
+ * @port: Port to check
+ * @width: Widths to check (bitmask)
+ *
+ * Can be called to any lane adapter. Checks if given @width is
+ * supported by the hardware and returns %true if it is.
+ */
+bool tb_port_width_supported(struct tb_port *port, unsigned int width)
{
u32 phy, widths;
int ret;
@@ -948,20 +998,23 @@ static bool tb_port_is_width_supported(struct tb_port *port,
if (!port->cap_phy)
return false;
+ if (width & (TB_LINK_WIDTH_ASYM_TX | TB_LINK_WIDTH_ASYM_RX)) {
+ if (tb_port_get_link_generation(port) < 4 ||
+ !usb4_port_asym_supported(port))
+ return false;
+ }
+
ret = tb_port_read(port, &phy, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_0, 1);
if (ret)
return false;
- widths = (phy & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >>
- LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT;
-
- return widths & width_mask;
-}
-
-static bool is_gen4_link(struct tb_port *port)
-{
- return tb_port_get_link_speed(port) > 20;
+ /*
+ * The field encoding is the same as &enum tb_link_width (which is
+ * passed to @width).
+ */
+ widths = FIELD_GET(LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK, phy);
+ return widths & width;
}
/**
@@ -991,15 +1044,23 @@ int tb_port_set_link_width(struct tb_port *port, enum tb_link_width width)
switch (width) {
case TB_LINK_WIDTH_SINGLE:
/* Gen 4 link cannot be single */
- if (is_gen4_link(port))
+ if (tb_port_get_link_generation(port) >= 4)
return -EOPNOTSUPP;
val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE <<
LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
break;
+
case TB_LINK_WIDTH_DUAL:
+ if (tb_port_get_link_generation(port) >= 4)
+ return usb4_port_asym_set_link_width(port, width);
val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL <<
LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
break;
+
+ case TB_LINK_WIDTH_ASYM_TX:
+ case TB_LINK_WIDTH_ASYM_RX:
+ return usb4_port_asym_set_link_width(port, width);
+
default:
return -EINVAL;
}
@@ -1124,7 +1185,7 @@ void tb_port_lane_bonding_disable(struct tb_port *port)
/**
* tb_port_wait_for_link_width() - Wait until link reaches specific width
* @port: Port to wait for
- * @width_mask: Expected link width mask
+ * @width: Expected link width (bitmask)
* @timeout_msec: Timeout in ms how long to wait
*
* Should be used after both ends of the link have been bonded (or
@@ -1133,14 +1194,15 @@ void tb_port_lane_bonding_disable(struct tb_port *port)
* within the given timeout, %0 if it did. Can be passed a mask of
* expected widths and succeeds if any of the widths is reached.
*/
-int tb_port_wait_for_link_width(struct tb_port *port, unsigned int width_mask,
+int tb_port_wait_for_link_width(struct tb_port *port, unsigned int width,
int timeout_msec)
{
ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
int ret;
/* Gen 4 link does not support single lane */
- if ((width_mask & TB_LINK_WIDTH_SINGLE) && is_gen4_link(port))
+ if ((width & TB_LINK_WIDTH_SINGLE) &&
+ tb_port_get_link_generation(port) >= 4)
return -EOPNOTSUPP;
do {
@@ -1153,7 +1215,7 @@ int tb_port_wait_for_link_width(struct tb_port *port, unsigned int width_mask,
*/
if (ret != -EACCES)
return ret;
- } else if (ret & width_mask) {
+ } else if (ret & width) {
return 0;
}
@@ -1332,7 +1394,7 @@ int tb_pci_port_enable(struct tb_port *port, bool enable)
* tb_dp_port_hpd_is_active() - Is HPD already active
* @port: DP out port to check
*
- * Checks if the DP OUT adapter port has HDP bit already set.
+ * Checks if the DP OUT adapter port has HPD bit already set.
*/
int tb_dp_port_hpd_is_active(struct tb_port *port)
{
@@ -1344,14 +1406,14 @@ int tb_dp_port_hpd_is_active(struct tb_port *port)
if (ret)
return ret;
- return !!(data & ADP_DP_CS_2_HDP);
+ return !!(data & ADP_DP_CS_2_HPD);
}
/**
* tb_dp_port_hpd_clear() - Clear HPD from DP IN port
* @port: Port to clear HPD
*
- * If the DP IN port has HDP set, this function can be used to clear it.
+ * If the DP IN port has HPD set, this function can be used to clear it.
*/
int tb_dp_port_hpd_clear(struct tb_port *port)
{
@@ -1363,7 +1425,7 @@ int tb_dp_port_hpd_clear(struct tb_port *port)
if (ret)
return ret;
- data |= ADP_DP_CS_3_HDPC;
+ data |= ADP_DP_CS_3_HPDC;
return tb_port_write(port, &data, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_3, 1);
}
@@ -2697,6 +2759,38 @@ static int tb_switch_update_link_attributes(struct tb_switch *sw)
return 0;
}
+/* Must be called after tb_switch_update_link_attributes() */
+static void tb_switch_link_init(struct tb_switch *sw)
+{
+ struct tb_port *up, *down;
+ bool bonded;
+
+ if (!tb_route(sw) || tb_switch_is_icm(sw))
+ return;
+
+ tb_sw_dbg(sw, "current link speed %u.0 Gb/s\n", sw->link_speed);
+ tb_sw_dbg(sw, "current link width %s\n", width_name(sw->link_width));
+
+ bonded = sw->link_width >= TB_LINK_WIDTH_DUAL;
+
+ /*
+ * Gen 4 links come up as bonded so update the port structures
+ * accordingly.
+ */
+ up = tb_upstream_port(sw);
+ down = tb_switch_downstream_port(sw);
+
+ up->bonded = bonded;
+ if (up->dual_link_port)
+ up->dual_link_port->bonded = bonded;
+ tb_port_update_credits(up);
+
+ down->bonded = bonded;
+ if (down->dual_link_port)
+ down->dual_link_port->bonded = bonded;
+ tb_port_update_credits(down);
+}
+
/**
* tb_switch_lane_bonding_enable() - Enable lane bonding
* @sw: Switch to enable lane bonding
@@ -2705,24 +2799,20 @@ static int tb_switch_update_link_attributes(struct tb_switch *sw)
* switch. If conditions are correct and both switches support the feature,
* lanes are bonded. It is safe to call this to any switch.
*/
-int tb_switch_lane_bonding_enable(struct tb_switch *sw)
+static int tb_switch_lane_bonding_enable(struct tb_switch *sw)
{
struct tb_port *up, *down;
- u64 route = tb_route(sw);
- unsigned int width_mask;
+ unsigned int width;
int ret;
- if (!route)
- return 0;
-
if (!tb_switch_lane_bonding_possible(sw))
return 0;
up = tb_upstream_port(sw);
down = tb_switch_downstream_port(sw);
- if (!tb_port_is_width_supported(up, TB_LINK_WIDTH_DUAL) ||
- !tb_port_is_width_supported(down, TB_LINK_WIDTH_DUAL))
+ if (!tb_port_width_supported(up, TB_LINK_WIDTH_DUAL) ||
+ !tb_port_width_supported(down, TB_LINK_WIDTH_DUAL))
return 0;
/*
@@ -2746,21 +2836,10 @@ int tb_switch_lane_bonding_enable(struct tb_switch *sw)
}
/* Any of the widths are all bonded */
- width_mask = TB_LINK_WIDTH_DUAL | TB_LINK_WIDTH_ASYM_TX |
- TB_LINK_WIDTH_ASYM_RX;
-
- ret = tb_port_wait_for_link_width(down, width_mask, 100);
- if (ret) {
- tb_port_warn(down, "timeout enabling lane bonding\n");
- return ret;
- }
+ width = TB_LINK_WIDTH_DUAL | TB_LINK_WIDTH_ASYM_TX |
+ TB_LINK_WIDTH_ASYM_RX;
- tb_port_update_credits(down);
- tb_port_update_credits(up);
- tb_switch_update_link_attributes(sw);
-
- tb_sw_dbg(sw, "lane bonding enabled\n");
- return ret;
+ return tb_port_wait_for_link_width(down, width, 100);
}
/**
@@ -2770,20 +2849,27 @@ int tb_switch_lane_bonding_enable(struct tb_switch *sw)
* Disables lane bonding between @sw and parent. This can be called even
* if lanes were not bonded originally.
*/
-void tb_switch_lane_bonding_disable(struct tb_switch *sw)
+static int tb_switch_lane_bonding_disable(struct tb_switch *sw)
{
struct tb_port *up, *down;
int ret;
- if (!tb_route(sw))
- return;
-
up = tb_upstream_port(sw);
if (!up->bonded)
- return;
+ return 0;
- down = tb_switch_downstream_port(sw);
+ /*
+ * If the link is Gen 4 there is no way to switch the link to
+ * two single lane links so avoid that here. Also don't bother
+ * if the link is not up anymore (sw is unplugged).
+ */
+ ret = tb_port_get_link_generation(up);
+ if (ret < 0)
+ return ret;
+ if (ret >= 4)
+ return -EOPNOTSUPP;
+ down = tb_switch_downstream_port(sw);
tb_port_lane_bonding_disable(up);
tb_port_lane_bonding_disable(down);
@@ -2791,15 +2877,160 @@ void tb_switch_lane_bonding_disable(struct tb_switch *sw)
* It is fine if we get other errors as the router might have
* been unplugged.
*/
- ret = tb_port_wait_for_link_width(down, TB_LINK_WIDTH_SINGLE, 100);
- if (ret == -ETIMEDOUT)
- tb_sw_warn(sw, "timeout disabling lane bonding\n");
+ return tb_port_wait_for_link_width(down, TB_LINK_WIDTH_SINGLE, 100);
+}
+
+static int tb_switch_asym_enable(struct tb_switch *sw, enum tb_link_width width)
+{
+ struct tb_port *up, *down, *port;
+ enum tb_link_width down_width;
+ int ret;
+
+ up = tb_upstream_port(sw);
+ down = tb_switch_downstream_port(sw);
+
+ if (width == TB_LINK_WIDTH_ASYM_TX) {
+ down_width = TB_LINK_WIDTH_ASYM_RX;
+ port = down;
+ } else {
+ down_width = TB_LINK_WIDTH_ASYM_TX;
+ port = up;
+ }
+
+ ret = tb_port_set_link_width(up, width);
+ if (ret)
+ return ret;
+
+ ret = tb_port_set_link_width(down, down_width);
+ if (ret)
+ return ret;
+
+ /*
+ * Initiate the change in the router that one of its TX lanes is
+ * changing to RX but do so only if there is an actual change.
+ */
+ if (sw->link_width != width) {
+ ret = usb4_port_asym_start(port);
+ if (ret)
+ return ret;
+
+ ret = tb_port_wait_for_link_width(up, width, 100);
+ if (ret)
+ return ret;
+ }
+
+ sw->link_width = width;
+ return 0;
+}
+
+static int tb_switch_asym_disable(struct tb_switch *sw)
+{
+ struct tb_port *up, *down;
+ int ret;
+
+ up = tb_upstream_port(sw);
+ down = tb_switch_downstream_port(sw);
+
+ ret = tb_port_set_link_width(up, TB_LINK_WIDTH_DUAL);
+ if (ret)
+ return ret;
+
+ ret = tb_port_set_link_width(down, TB_LINK_WIDTH_DUAL);
+ if (ret)
+ return ret;
+
+ /*
+ * Initiate the change in the router that has three TX lanes and
+ * is changing one of its TX lanes to RX but only if there is a
+ * change in the link width.
+ */
+ if (sw->link_width > TB_LINK_WIDTH_DUAL) {
+ if (sw->link_width == TB_LINK_WIDTH_ASYM_TX)
+ ret = usb4_port_asym_start(up);
+ else
+ ret = usb4_port_asym_start(down);
+ if (ret)
+ return ret;
+
+ ret = tb_port_wait_for_link_width(up, TB_LINK_WIDTH_DUAL, 100);
+ if (ret)
+ return ret;
+ }
+
+ sw->link_width = TB_LINK_WIDTH_DUAL;
+ return 0;
+}
+
+/**
+ * tb_switch_set_link_width() - Configure router link width
+ * @sw: Router to configure
+ * @width: The new link width
+ *
+ * Set device router link width to @width from router upstream port
+ * perspective. Supports also asymmetric links if the routers boths side
+ * of the link supports it.
+ *
+ * Does nothing for host router.
+ *
+ * Returns %0 in case of success, negative errno otherwise.
+ */
+int tb_switch_set_link_width(struct tb_switch *sw, enum tb_link_width width)
+{
+ struct tb_port *up, *down;
+ int ret = 0;
+
+ if (!tb_route(sw))
+ return 0;
+
+ up = tb_upstream_port(sw);
+ down = tb_switch_downstream_port(sw);
+
+ switch (width) {
+ case TB_LINK_WIDTH_SINGLE:
+ ret = tb_switch_lane_bonding_disable(sw);
+ break;
+
+ case TB_LINK_WIDTH_DUAL:
+ if (sw->link_width == TB_LINK_WIDTH_ASYM_TX ||
+ sw->link_width == TB_LINK_WIDTH_ASYM_RX) {
+ ret = tb_switch_asym_disable(sw);
+ if (ret)
+ break;
+ }
+ ret = tb_switch_lane_bonding_enable(sw);
+ break;
+
+ case TB_LINK_WIDTH_ASYM_TX:
+ case TB_LINK_WIDTH_ASYM_RX:
+ ret = tb_switch_asym_enable(sw, width);
+ break;
+ }
+
+ switch (ret) {
+ case 0:
+ break;
+
+ case -ETIMEDOUT:
+ tb_sw_warn(sw, "timeout changing link width\n");
+ return ret;
+
+ case -ENOTCONN:
+ case -EOPNOTSUPP:
+ case -ENODEV:
+ return ret;
+
+ default:
+ tb_sw_dbg(sw, "failed to change link width: %d\n", ret);
+ return ret;
+ }
tb_port_update_credits(down);
tb_port_update_credits(up);
+
tb_switch_update_link_attributes(sw);
- tb_sw_dbg(sw, "lane bonding disabled\n");
+ tb_sw_dbg(sw, "link width set to %s\n", width_name(width));
+ return ret;
}
/**
@@ -2959,6 +3190,8 @@ int tb_switch_add(struct tb_switch *sw)
if (ret)
return ret;
+ tb_switch_link_init(sw);
+
ret = tb_switch_clx_init(sw);
if (ret)
return ret;
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index dd0a1ef8cf12..f250a6788b6c 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -16,8 +16,31 @@
#include "tb_regs.h"
#include "tunnel.h"
-#define TB_TIMEOUT 100 /* ms */
-#define MAX_GROUPS 7 /* max Group_ID is 7 */
+#define TB_TIMEOUT 100 /* ms */
+
+/*
+ * Minimum bandwidth (in Mb/s) that is needed in the single transmitter/receiver
+ * direction. This is 40G - 10% guard band bandwidth.
+ */
+#define TB_ASYM_MIN (40000 * 90 / 100)
+
+/*
+ * Threshold bandwidth (in Mb/s) that is used to switch the links to
+ * asymmetric and back. This is selected as 45G which means when the
+ * request is higher than this, we switch the link to asymmetric, and
+ * when it is less than this we switch it back. The 45G is selected so
+ * that we still have 27G (of the total 72G) for bulk PCIe traffic when
+ * switching back to symmetric.
+ */
+#define TB_ASYM_THRESHOLD 45000
+
+#define MAX_GROUPS 7 /* max Group_ID is 7 */
+
+static unsigned int asym_threshold = TB_ASYM_THRESHOLD;
+module_param_named(asym_threshold, asym_threshold, uint, 0444);
+MODULE_PARM_DESC(asym_threshold,
+ "threshold (Mb/s) when to Gen 4 switch link symmetry. 0 disables. (default: "
+ __MODULE_STRING(TB_ASYM_THRESHOLD) ")");
/**
* struct tb_cm - Simple Thunderbolt connection manager
@@ -190,7 +213,7 @@ static void tb_add_dp_resources(struct tb_switch *sw)
if (!tb_switch_query_dp_resource(sw, port))
continue;
- list_add_tail(&port->list, &tcm->dp_resources);
+ list_add(&port->list, &tcm->dp_resources);
tb_port_dbg(port, "DP IN resource available\n");
}
}
@@ -255,13 +278,13 @@ static int tb_enable_clx(struct tb_switch *sw)
* this in the future to cover the whole topology if it turns
* out to be beneficial.
*/
- while (sw && sw->config.depth > 1)
+ while (sw && tb_switch_depth(sw) > 1)
sw = tb_switch_parent(sw);
if (!sw)
return 0;
- if (sw->config.depth != 1)
+ if (tb_switch_depth(sw) != 1)
return 0;
/*
@@ -285,14 +308,32 @@ static int tb_enable_clx(struct tb_switch *sw)
return ret == -EOPNOTSUPP ? 0 : ret;
}
-/* Disables CL states up to the host router */
-static void tb_disable_clx(struct tb_switch *sw)
+/**
+ * tb_disable_clx() - Disable CL states up to host router
+ * @sw: Router to start
+ *
+ * Disables CL states from @sw up to the host router. Returns true if
+ * any CL state were disabled. This can be used to figure out whether
+ * the link was setup by us or the boot firmware so we don't
+ * accidentally enable them if they were not enabled during discovery.
+ */
+static bool tb_disable_clx(struct tb_switch *sw)
{
+ bool disabled = false;
+
do {
- if (tb_switch_clx_disable(sw) < 0)
+ int ret;
+
+ ret = tb_switch_clx_disable(sw);
+ if (ret > 0)
+ disabled = true;
+ else if (ret < 0)
tb_sw_warn(sw, "failed to disable CL states\n");
+
sw = tb_switch_parent(sw);
} while (sw);
+
+ return disabled;
}
static int tb_increase_switch_tmu_accuracy(struct device *dev, void *data)
@@ -553,7 +594,7 @@ static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb,
struct tb_switch *sw;
/* Pick the router that is deepest in the topology */
- if (dst_port->sw->config.depth > src_port->sw->config.depth)
+ if (tb_port_path_direction_downstream(src_port, dst_port))
sw = dst_port->sw;
else
sw = src_port->sw;
@@ -572,133 +613,294 @@ static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb,
return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL);
}
-static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
- struct tb_port *dst_port, int *available_up, int *available_down)
-{
- int usb3_consumed_up, usb3_consumed_down, ret;
- struct tb_cm *tcm = tb_priv(tb);
+/**
+ * tb_consumed_usb3_pcie_bandwidth() - Consumed USB3/PCIe bandwidth over a single link
+ * @tb: Domain structure
+ * @src_port: Source protocol adapter
+ * @dst_port: Destination protocol adapter
+ * @port: USB4 port the consumed bandwidth is calculated
+ * @consumed_up: Consumed upsream bandwidth (Mb/s)
+ * @consumed_down: Consumed downstream bandwidth (Mb/s)
+ *
+ * Calculates consumed USB3 and PCIe bandwidth at @port between path
+ * from @src_port to @dst_port. Does not take tunnel starting from
+ * @src_port and ending from @src_port into account.
+ */
+static int tb_consumed_usb3_pcie_bandwidth(struct tb *tb,
+ struct tb_port *src_port,
+ struct tb_port *dst_port,
+ struct tb_port *port,
+ int *consumed_up,
+ int *consumed_down)
+{
+ int pci_consumed_up, pci_consumed_down;
struct tb_tunnel *tunnel;
- struct tb_port *port;
- tb_dbg(tb, "calculating available bandwidth between %llx:%u <-> %llx:%u\n",
- tb_route(src_port->sw), src_port->port, tb_route(dst_port->sw),
- dst_port->port);
+ *consumed_up = *consumed_down = 0;
tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
if (tunnel && tunnel->src_port != src_port &&
tunnel->dst_port != dst_port) {
- ret = tb_tunnel_consumed_bandwidth(tunnel, &usb3_consumed_up,
- &usb3_consumed_down);
+ int ret;
+
+ ret = tb_tunnel_consumed_bandwidth(tunnel, consumed_up,
+ consumed_down);
if (ret)
return ret;
- } else {
- usb3_consumed_up = 0;
- usb3_consumed_down = 0;
}
- /* Maximum possible bandwidth asymmetric Gen 4 link is 120 Gb/s */
- *available_up = *available_down = 120000;
+ /*
+ * If there is anything reserved for PCIe bulk traffic take it
+ * into account here too.
+ */
+ if (tb_tunnel_reserved_pci(port, &pci_consumed_up, &pci_consumed_down)) {
+ *consumed_up += pci_consumed_up;
+ *consumed_down += pci_consumed_down;
+ }
- /* Find the minimum available bandwidth over all links */
- tb_for_each_port_on_path(src_port, dst_port, port) {
- int link_speed, link_width, up_bw, down_bw;
+ return 0;
+}
- if (!tb_port_is_null(port))
+/**
+ * tb_consumed_dp_bandwidth() - Consumed DP bandwidth over a single link
+ * @tb: Domain structure
+ * @src_port: Source protocol adapter
+ * @dst_port: Destination protocol adapter
+ * @port: USB4 port the consumed bandwidth is calculated
+ * @consumed_up: Consumed upsream bandwidth (Mb/s)
+ * @consumed_down: Consumed downstream bandwidth (Mb/s)
+ *
+ * Calculates consumed DP bandwidth at @port between path from @src_port
+ * to @dst_port. Does not take tunnel starting from @src_port and ending
+ * from @src_port into account.
+ */
+static int tb_consumed_dp_bandwidth(struct tb *tb,
+ struct tb_port *src_port,
+ struct tb_port *dst_port,
+ struct tb_port *port,
+ int *consumed_up,
+ int *consumed_down)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+ struct tb_tunnel *tunnel;
+ int ret;
+
+ *consumed_up = *consumed_down = 0;
+
+ /*
+ * Find all DP tunnels that cross the port and reduce
+ * their consumed bandwidth from the available.
+ */
+ list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
+ int dp_consumed_up, dp_consumed_down;
+
+ if (tb_tunnel_is_invalid(tunnel))
continue;
- if (tb_is_upstream_port(port)) {
- link_speed = port->sw->link_speed;
+ if (!tb_tunnel_is_dp(tunnel))
+ continue;
+
+ if (!tb_tunnel_port_on_path(tunnel, port))
+ continue;
+
+ /*
+ * Ignore the DP tunnel between src_port and dst_port
+ * because it is the same tunnel and we may be
+ * re-calculating estimated bandwidth.
+ */
+ if (tunnel->src_port == src_port &&
+ tunnel->dst_port == dst_port)
+ continue;
+
+ ret = tb_tunnel_consumed_bandwidth(tunnel, &dp_consumed_up,
+ &dp_consumed_down);
+ if (ret)
+ return ret;
+
+ *consumed_up += dp_consumed_up;
+ *consumed_down += dp_consumed_down;
+ }
+
+ return 0;
+}
+
+static bool tb_asym_supported(struct tb_port *src_port, struct tb_port *dst_port,
+ struct tb_port *port)
+{
+ bool downstream = tb_port_path_direction_downstream(src_port, dst_port);
+ enum tb_link_width width;
+
+ if (tb_is_upstream_port(port))
+ width = downstream ? TB_LINK_WIDTH_ASYM_RX : TB_LINK_WIDTH_ASYM_TX;
+ else
+ width = downstream ? TB_LINK_WIDTH_ASYM_TX : TB_LINK_WIDTH_ASYM_RX;
+
+ return tb_port_width_supported(port, width);
+}
+
+/**
+ * tb_maximum_bandwidth() - Maximum bandwidth over a single link
+ * @tb: Domain structure
+ * @src_port: Source protocol adapter
+ * @dst_port: Destination protocol adapter
+ * @port: USB4 port the total bandwidth is calculated
+ * @max_up: Maximum upstream bandwidth (Mb/s)
+ * @max_down: Maximum downstream bandwidth (Mb/s)
+ * @include_asym: Include bandwidth if the link is switched from
+ * symmetric to asymmetric
+ *
+ * Returns maximum possible bandwidth in @max_up and @max_down over a
+ * single link at @port. If @include_asym is set then includes the
+ * additional banwdith if the links are transitioned into asymmetric to
+ * direction from @src_port to @dst_port.
+ */
+static int tb_maximum_bandwidth(struct tb *tb, struct tb_port *src_port,
+ struct tb_port *dst_port, struct tb_port *port,
+ int *max_up, int *max_down, bool include_asym)
+{
+ bool downstream = tb_port_path_direction_downstream(src_port, dst_port);
+ int link_speed, link_width, up_bw, down_bw;
+
+ /*
+ * Can include asymmetric, only if it is actually supported by
+ * the lane adapter.
+ */
+ if (!tb_asym_supported(src_port, dst_port, port))
+ include_asym = false;
+
+ if (tb_is_upstream_port(port)) {
+ link_speed = port->sw->link_speed;
+ /*
+ * sw->link_width is from upstream perspective so we use
+ * the opposite for downstream of the host router.
+ */
+ if (port->sw->link_width == TB_LINK_WIDTH_ASYM_TX) {
+ up_bw = link_speed * 3 * 1000;
+ down_bw = link_speed * 1 * 1000;
+ } else if (port->sw->link_width == TB_LINK_WIDTH_ASYM_RX) {
+ up_bw = link_speed * 1 * 1000;
+ down_bw = link_speed * 3 * 1000;
+ } else if (include_asym) {
/*
- * sw->link_width is from upstream perspective
- * so we use the opposite for downstream of the
- * host router.
+ * The link is symmetric at the moment but we
+ * can switch it to asymmetric as needed. Report
+ * this bandwidth as available (even though it
+ * is not yet enabled).
*/
- if (port->sw->link_width == TB_LINK_WIDTH_ASYM_TX) {
- up_bw = link_speed * 3 * 1000;
- down_bw = link_speed * 1 * 1000;
- } else if (port->sw->link_width == TB_LINK_WIDTH_ASYM_RX) {
+ if (downstream) {
up_bw = link_speed * 1 * 1000;
down_bw = link_speed * 3 * 1000;
} else {
- up_bw = link_speed * port->sw->link_width * 1000;
- down_bw = up_bw;
+ up_bw = link_speed * 3 * 1000;
+ down_bw = link_speed * 1 * 1000;
}
} else {
- link_speed = tb_port_get_link_speed(port);
- if (link_speed < 0)
- return link_speed;
-
- link_width = tb_port_get_link_width(port);
- if (link_width < 0)
- return link_width;
-
- if (link_width == TB_LINK_WIDTH_ASYM_TX) {
+ up_bw = link_speed * port->sw->link_width * 1000;
+ down_bw = up_bw;
+ }
+ } else {
+ link_speed = tb_port_get_link_speed(port);
+ if (link_speed < 0)
+ return link_speed;
+
+ link_width = tb_port_get_link_width(port);
+ if (link_width < 0)
+ return link_width;
+
+ if (link_width == TB_LINK_WIDTH_ASYM_TX) {
+ up_bw = link_speed * 1 * 1000;
+ down_bw = link_speed * 3 * 1000;
+ } else if (link_width == TB_LINK_WIDTH_ASYM_RX) {
+ up_bw = link_speed * 3 * 1000;
+ down_bw = link_speed * 1 * 1000;
+ } else if (include_asym) {
+ /*
+ * The link is symmetric at the moment but we
+ * can switch it to asymmetric as needed. Report
+ * this bandwidth as available (even though it
+ * is not yet enabled).
+ */
+ if (downstream) {
up_bw = link_speed * 1 * 1000;
down_bw = link_speed * 3 * 1000;
- } else if (link_width == TB_LINK_WIDTH_ASYM_RX) {
+ } else {
up_bw = link_speed * 3 * 1000;
down_bw = link_speed * 1 * 1000;
- } else {
- up_bw = link_speed * link_width * 1000;
- down_bw = up_bw;
}
+ } else {
+ up_bw = link_speed * link_width * 1000;
+ down_bw = up_bw;
}
+ }
- /* Leave 10% guard band */
- up_bw -= up_bw / 10;
- down_bw -= down_bw / 10;
-
- tb_port_dbg(port, "link total bandwidth %d/%d Mb/s\n", up_bw,
- down_bw);
+ /* Leave 10% guard band */
+ *max_up = up_bw - up_bw / 10;
+ *max_down = down_bw - down_bw / 10;
- /*
- * Find all DP tunnels that cross the port and reduce
- * their consumed bandwidth from the available.
- */
- list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
- int dp_consumed_up, dp_consumed_down;
+ tb_port_dbg(port, "link maximum bandwidth %d/%d Mb/s\n", *max_up, *max_down);
+ return 0;
+}
- if (tb_tunnel_is_invalid(tunnel))
- continue;
+/**
+ * tb_available_bandwidth() - Available bandwidth for tunneling
+ * @tb: Domain structure
+ * @src_port: Source protocol adapter
+ * @dst_port: Destination protocol adapter
+ * @available_up: Available bandwidth upstream (Mb/s)
+ * @available_down: Available bandwidth downstream (Mb/s)
+ * @include_asym: Include bandwidth if the link is switched from
+ * symmetric to asymmetric
+ *
+ * Calculates maximum available bandwidth for protocol tunneling between
+ * @src_port and @dst_port at the moment. This is minimum of maximum
+ * link bandwidth across all links reduced by currently consumed
+ * bandwidth on that link.
+ *
+ * If @include_asym is true then includes also bandwidth that can be
+ * added when the links are transitioned into asymmetric (but does not
+ * transition the links).
+ */
+static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
+ struct tb_port *dst_port, int *available_up,
+ int *available_down, bool include_asym)
+{
+ struct tb_port *port;
+ int ret;
- if (!tb_tunnel_is_dp(tunnel))
- continue;
+ /* Maximum possible bandwidth asymmetric Gen 4 link is 120 Gb/s */
+ *available_up = *available_down = 120000;
- if (!tb_tunnel_port_on_path(tunnel, port))
- continue;
+ /* Find the minimum available bandwidth over all links */
+ tb_for_each_port_on_path(src_port, dst_port, port) {
+ int max_up, max_down, consumed_up, consumed_down;
- /*
- * Ignore the DP tunnel between src_port and
- * dst_port because it is the same tunnel and we
- * may be re-calculating estimated bandwidth.
- */
- if (tunnel->src_port == src_port &&
- tunnel->dst_port == dst_port)
- continue;
+ if (!tb_port_is_null(port))
+ continue;
- ret = tb_tunnel_consumed_bandwidth(tunnel,
- &dp_consumed_up,
- &dp_consumed_down);
- if (ret)
- return ret;
+ ret = tb_maximum_bandwidth(tb, src_port, dst_port, port,
+ &max_up, &max_down, include_asym);
+ if (ret)
+ return ret;
- up_bw -= dp_consumed_up;
- down_bw -= dp_consumed_down;
- }
+ ret = tb_consumed_usb3_pcie_bandwidth(tb, src_port, dst_port,
+ port, &consumed_up,
+ &consumed_down);
+ if (ret)
+ return ret;
+ max_up -= consumed_up;
+ max_down -= consumed_down;
- /*
- * If USB3 is tunneled from the host router down to the
- * branch leading to port we need to take USB3 consumed
- * bandwidth into account regardless whether it actually
- * crosses the port.
- */
- up_bw -= usb3_consumed_up;
- down_bw -= usb3_consumed_down;
+ ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, port,
+ &consumed_up, &consumed_down);
+ if (ret)
+ return ret;
+ max_up -= consumed_up;
+ max_down -= consumed_down;
- if (up_bw < *available_up)
- *available_up = up_bw;
- if (down_bw < *available_down)
- *available_down = down_bw;
+ if (max_up < *available_up)
+ *available_up = max_up;
+ if (max_down < *available_down)
+ *available_down = max_down;
}
if (*available_up < 0)
@@ -729,21 +931,21 @@ static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port,
if (!tunnel)
return;
- tb_dbg(tb, "reclaiming unused bandwidth for USB3\n");
+ tb_tunnel_dbg(tunnel, "reclaiming unused bandwidth\n");
/*
* Calculate available bandwidth for the first hop USB3 tunnel.
* That determines the whole USB3 bandwidth for this branch.
*/
ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port,
- &available_up, &available_down);
+ &available_up, &available_down, false);
if (ret) {
- tb_warn(tb, "failed to calculate available bandwidth\n");
+ tb_tunnel_warn(tunnel, "failed to calculate available bandwidth\n");
return;
}
- tb_dbg(tb, "available bandwidth for USB3 %d/%d Mb/s\n",
- available_up, available_down);
+ tb_tunnel_dbg(tunnel, "available bandwidth %d/%d Mb/s\n", available_up,
+ available_down);
tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down);
}
@@ -794,8 +996,8 @@ static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
return ret;
}
- ret = tb_available_bandwidth(tb, down, up, &available_up,
- &available_down);
+ ret = tb_available_bandwidth(tb, down, up, &available_up, &available_down,
+ false);
if (ret)
goto err_reclaim;
@@ -856,6 +1058,225 @@ static int tb_create_usb3_tunnels(struct tb_switch *sw)
return 0;
}
+/**
+ * tb_configure_asym() - Transition links to asymmetric if needed
+ * @tb: Domain structure
+ * @src_port: Source adapter to start the transition
+ * @dst_port: Destination adapter
+ * @requested_up: Additional bandwidth (Mb/s) required upstream
+ * @requested_down: Additional bandwidth (Mb/s) required downstream
+ *
+ * Transition links between @src_port and @dst_port into asymmetric, with
+ * three lanes in the direction from @src_port towards @dst_port and one lane
+ * in the opposite direction, if the bandwidth requirements
+ * (requested + currently consumed) on that link exceed @asym_threshold.
+ *
+ * Must be called with available >= requested over all links.
+ */
+static int tb_configure_asym(struct tb *tb, struct tb_port *src_port,
+ struct tb_port *dst_port, int requested_up,
+ int requested_down)
+{
+ struct tb_switch *sw;
+ bool clx, downstream;
+ struct tb_port *up;
+ int ret = 0;
+
+ if (!asym_threshold)
+ return 0;
+
+ /* Disable CL states before doing any transitions */
+ downstream = tb_port_path_direction_downstream(src_port, dst_port);
+ /* Pick up router deepest in the hierarchy */
+ if (downstream)
+ sw = dst_port->sw;
+ else
+ sw = src_port->sw;
+
+ clx = tb_disable_clx(sw);
+
+ tb_for_each_upstream_port_on_path(src_port, dst_port, up) {
+ int consumed_up, consumed_down;
+ enum tb_link_width width;
+
+ ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, up,
+ &consumed_up, &consumed_down);
+ if (ret)
+ break;
+
+ if (downstream) {
+ /*
+ * Downstream so make sure upstream is within the 36G
+ * (40G - guard band 10%), and the requested is above
+ * what the threshold is.
+ */
+ if (consumed_up + requested_up >= TB_ASYM_MIN) {
+ ret = -ENOBUFS;
+ break;
+ }
+ /* Does consumed + requested exceed the threshold */
+ if (consumed_down + requested_down < asym_threshold)
+ continue;
+
+ width = TB_LINK_WIDTH_ASYM_RX;
+ } else {
+ /* Upstream, the opposite of above */
+ if (consumed_down + requested_down >= TB_ASYM_MIN) {
+ ret = -ENOBUFS;
+ break;
+ }
+ if (consumed_up + requested_up < asym_threshold)
+ continue;
+
+ width = TB_LINK_WIDTH_ASYM_TX;
+ }
+
+ if (up->sw->link_width == width)
+ continue;
+
+ if (!tb_port_width_supported(up, width))
+ continue;
+
+ tb_sw_dbg(up->sw, "configuring asymmetric link\n");
+
+ /*
+ * Here requested + consumed > threshold so we need to
+ * transtion the link into asymmetric now.
+ */
+ ret = tb_switch_set_link_width(up->sw, width);
+ if (ret) {
+ tb_sw_warn(up->sw, "failed to set link width\n");
+ break;
+ }
+ }
+
+ /* Re-enable CL states if they were previosly enabled */
+ if (clx)
+ tb_enable_clx(sw);
+
+ return ret;
+}
+
+/**
+ * tb_configure_sym() - Transition links to symmetric if possible
+ * @tb: Domain structure
+ * @src_port: Source adapter to start the transition
+ * @dst_port: Destination adapter
+ * @requested_up: New lower bandwidth request upstream (Mb/s)
+ * @requested_down: New lower bandwidth request downstream (Mb/s)
+ *
+ * Goes over each link from @src_port to @dst_port and tries to
+ * transition the link to symmetric if the currently consumed bandwidth
+ * allows.
+ */
+static int tb_configure_sym(struct tb *tb, struct tb_port *src_port,
+ struct tb_port *dst_port, int requested_up,
+ int requested_down)
+{
+ struct tb_switch *sw;
+ bool clx, downstream;
+ struct tb_port *up;
+ int ret = 0;
+
+ if (!asym_threshold)
+ return 0;
+
+ /* Disable CL states before doing any transitions */
+ downstream = tb_port_path_direction_downstream(src_port, dst_port);
+ /* Pick up router deepest in the hierarchy */
+ if (downstream)
+ sw = dst_port->sw;
+ else
+ sw = src_port->sw;
+
+ clx = tb_disable_clx(sw);
+
+ tb_for_each_upstream_port_on_path(src_port, dst_port, up) {
+ int consumed_up, consumed_down;
+
+ /* Already symmetric */
+ if (up->sw->link_width <= TB_LINK_WIDTH_DUAL)
+ continue;
+ /* Unplugged, no need to switch */
+ if (up->sw->is_unplugged)
+ continue;
+
+ ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, up,
+ &consumed_up, &consumed_down);
+ if (ret)
+ break;
+
+ if (downstream) {
+ /*
+ * Downstream so we want the consumed_down < threshold.
+ * Upstream traffic should be less than 36G (40G
+ * guard band 10%) as the link was configured asymmetric
+ * already.
+ */
+ if (consumed_down + requested_down >= asym_threshold)
+ continue;
+ } else {
+ if (consumed_up + requested_up >= asym_threshold)
+ continue;
+ }
+
+ if (up->sw->link_width == TB_LINK_WIDTH_DUAL)
+ continue;
+
+ tb_sw_dbg(up->sw, "configuring symmetric link\n");
+
+ ret = tb_switch_set_link_width(up->sw, TB_LINK_WIDTH_DUAL);
+ if (ret) {
+ tb_sw_warn(up->sw, "failed to set link width\n");
+ break;
+ }
+ }
+
+ /* Re-enable CL states if they were previosly enabled */
+ if (clx)
+ tb_enable_clx(sw);
+
+ return ret;
+}
+
+static void tb_configure_link(struct tb_port *down, struct tb_port *up,
+ struct tb_switch *sw)
+{
+ struct tb *tb = sw->tb;
+
+ /* Link the routers using both links if available */
+ down->remote = up;
+ up->remote = down;
+ if (down->dual_link_port && up->dual_link_port) {
+ down->dual_link_port->remote = up->dual_link_port;
+ up->dual_link_port->remote = down->dual_link_port;
+ }
+
+ /*
+ * Enable lane bonding if the link is currently two single lane
+ * links.
+ */
+ if (sw->link_width < TB_LINK_WIDTH_DUAL)
+ tb_switch_set_link_width(sw, TB_LINK_WIDTH_DUAL);
+
+ /*
+ * Device router that comes up as symmetric link is
+ * connected deeper in the hierarchy, we transition the links
+ * above into symmetric if bandwidth allows.
+ */
+ if (tb_switch_depth(sw) > 1 &&
+ tb_port_get_link_generation(up) >= 4 &&
+ up->sw->link_width == TB_LINK_WIDTH_DUAL) {
+ struct tb_port *host_port;
+
+ host_port = tb_port_at(tb_route(sw), tb->root_switch);
+ tb_configure_sym(tb, host_port, up, 0, 0);
+ }
+
+ /* Set the link configured */
+ tb_switch_configure_link(sw);
+}
+
static void tb_scan_port(struct tb_port *port);
/*
@@ -964,19 +1385,9 @@ static void tb_scan_port(struct tb_port *port)
goto out_rpm_put;
}
- /* Link the switches using both links if available */
upstream_port = tb_upstream_port(sw);
- port->remote = upstream_port;
- upstream_port->remote = port;
- if (port->dual_link_port && upstream_port->dual_link_port) {
- port->dual_link_port->remote = upstream_port->dual_link_port;
- upstream_port->dual_link_port->remote = port->dual_link_port;
- }
+ tb_configure_link(port, upstream_port, sw);
- /* Enable lane bonding if supported */
- tb_switch_lane_bonding_enable(sw);
- /* Set the link configured */
- tb_switch_configure_link(sw);
/*
* CL0s and CL1 are enabled and supported together.
* Silently ignore CLx enabling in case CLx is not supported.
@@ -1040,6 +1451,11 @@ static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
* deallocated properly.
*/
tb_switch_dealloc_dp_resource(src_port->sw, src_port);
+ /*
+ * If bandwidth on a link is < asym_threshold
+ * transition the link to symmetric.
+ */
+ tb_configure_sym(tb, src_port, dst_port, 0, 0);
/* Now we can allow the domain to runtime suspend again */
pm_runtime_mark_last_busy(&dst_port->sw->dev);
pm_runtime_put_autosuspend(&dst_port->sw->dev);
@@ -1092,7 +1508,8 @@ static void tb_free_unplugged_children(struct tb_switch *sw)
tb_retimer_remove_all(port);
tb_remove_dp_resources(port->remote->sw);
tb_switch_unconfigure_link(port->remote->sw);
- tb_switch_lane_bonding_disable(port->remote->sw);
+ tb_switch_set_link_width(port->remote->sw,
+ TB_LINK_WIDTH_SINGLE);
tb_switch_remove(port->remote->sw);
port->remote = NULL;
if (port->dual_link_port)
@@ -1188,7 +1605,7 @@ tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group)
ret = tb_release_unused_usb3_bandwidth(tb,
first_tunnel->src_port, first_tunnel->dst_port);
if (ret) {
- tb_port_warn(in,
+ tb_tunnel_warn(tunnel,
"failed to release unused bandwidth\n");
break;
}
@@ -1196,9 +1613,9 @@ tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group)
out = tunnel->dst_port;
ret = tb_available_bandwidth(tb, in, out, &estimated_up,
- &estimated_down);
+ &estimated_down, true);
if (ret) {
- tb_port_warn(in,
+ tb_tunnel_warn(tunnel,
"failed to re-calculate estimated bandwidth\n");
break;
}
@@ -1209,16 +1626,18 @@ tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group)
* - available bandwidth along the path
* - bandwidth allocated for USB 3.x but not used.
*/
- tb_port_dbg(in, "re-calculated estimated bandwidth %u/%u Mb/s\n",
- estimated_up, estimated_down);
+ tb_tunnel_dbg(tunnel,
+ "re-calculated estimated bandwidth %u/%u Mb/s\n",
+ estimated_up, estimated_down);
- if (in->sw->config.depth < out->sw->config.depth)
+ if (tb_port_path_direction_downstream(in, out))
estimated_bw = estimated_down;
else
estimated_bw = estimated_up;
if (usb4_dp_port_set_estimated_bandwidth(in, estimated_bw))
- tb_port_warn(in, "failed to update estimated bandwidth\n");
+ tb_tunnel_warn(tunnel,
+ "failed to update estimated bandwidth\n");
}
if (first_tunnel)
@@ -1282,18 +1701,14 @@ static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
return NULL;
}
-static void tb_tunnel_dp(struct tb *tb)
+static bool tb_tunnel_one_dp(struct tb *tb)
{
int available_up, available_down, ret, link_nr;
struct tb_cm *tcm = tb_priv(tb);
struct tb_port *port, *in, *out;
+ int consumed_up, consumed_down;
struct tb_tunnel *tunnel;
- if (!tb_acpi_may_tunnel_dp()) {
- tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
- return;
- }
-
/*
* Find pair of inactive DP IN and DP OUT adapters and then
* establish a DP tunnel between them.
@@ -1311,22 +1726,21 @@ static void tb_tunnel_dp(struct tb *tb)
continue;
}
- tb_port_dbg(port, "DP IN available\n");
+ in = port;
+ tb_port_dbg(in, "DP IN available\n");
out = tb_find_dp_out(tb, port);
- if (out) {
- in = port;
+ if (out)
break;
- }
}
if (!in) {
tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
- return;
+ return false;
}
if (!out) {
tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
- return;
+ return false;
}
/*
@@ -1369,7 +1783,8 @@ static void tb_tunnel_dp(struct tb *tb)
goto err_detach_group;
}
- ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down);
+ ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down,
+ true);
if (ret)
goto err_reclaim_usb;
@@ -1391,6 +1806,13 @@ static void tb_tunnel_dp(struct tb *tb)
list_add_tail(&tunnel->list, &tcm->tunnel_list);
tb_reclaim_usb3_bandwidth(tb, in, out);
+ /*
+ * Transition the links to asymmetric if the consumption exceeds
+ * the threshold.
+ */
+ if (!tb_tunnel_consumed_bandwidth(tunnel, &consumed_up, &consumed_down))
+ tb_configure_asym(tb, in, out, consumed_up, consumed_down);
+
/* Update the domain with the new bandwidth estimation */
tb_recalc_estimated_bandwidth(tb);
@@ -1399,7 +1821,7 @@ static void tb_tunnel_dp(struct tb *tb)
* TMU mode to HiFi for CL0s to work.
*/
tb_increase_tmu_accuracy(tunnel);
- return;
+ return true;
err_free:
tb_tunnel_free(tunnel);
@@ -1414,6 +1836,19 @@ err_rpm_put:
pm_runtime_put_autosuspend(&out->sw->dev);
pm_runtime_mark_last_busy(&in->sw->dev);
pm_runtime_put_autosuspend(&in->sw->dev);
+
+ return false;
+}
+
+static void tb_tunnel_dp(struct tb *tb)
+{
+ if (!tb_acpi_may_tunnel_dp()) {
+ tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
+ return;
+ }
+
+ while (tb_tunnel_one_dp(tb))
+ ;
}
static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
@@ -1701,7 +2136,8 @@ static void tb_handle_hotplug(struct work_struct *work)
tb_remove_dp_resources(port->remote->sw);
tb_switch_tmu_disable(port->remote->sw);
tb_switch_unconfigure_link(port->remote->sw);
- tb_switch_lane_bonding_disable(port->remote->sw);
+ tb_switch_set_link_width(port->remote->sw,
+ TB_LINK_WIDTH_SINGLE);
tb_switch_remove(port->remote->sw);
port->remote = NULL;
if (port->dual_link_port)
@@ -1781,8 +2217,8 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
in = tunnel->src_port;
out = tunnel->dst_port;
- tb_port_dbg(in, "bandwidth allocated currently %d/%d Mb/s\n",
- allocated_up, allocated_down);
+ tb_tunnel_dbg(tunnel, "bandwidth allocated currently %d/%d Mb/s\n",
+ allocated_up, allocated_down);
/*
* If we get rounded up request from graphics side, say HBR2 x 4
@@ -1823,20 +2259,26 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
else if (requested_down_corrected < 0)
requested_down_corrected = 0;
- tb_port_dbg(in, "corrected bandwidth request %d/%d Mb/s\n",
- requested_up_corrected, requested_down_corrected);
+ tb_tunnel_dbg(tunnel, "corrected bandwidth request %d/%d Mb/s\n",
+ requested_up_corrected, requested_down_corrected);
if ((*requested_up >= 0 && requested_up_corrected > max_up_rounded) ||
(*requested_down >= 0 && requested_down_corrected > max_down_rounded)) {
- tb_port_dbg(in, "bandwidth request too high (%d/%d Mb/s > %d/%d Mb/s)\n",
- requested_up_corrected, requested_down_corrected,
- max_up_rounded, max_down_rounded);
+ tb_tunnel_dbg(tunnel,
+ "bandwidth request too high (%d/%d Mb/s > %d/%d Mb/s)\n",
+ requested_up_corrected, requested_down_corrected,
+ max_up_rounded, max_down_rounded);
return -ENOBUFS;
}
if ((*requested_up >= 0 && requested_up_corrected <= allocated_up) ||
(*requested_down >= 0 && requested_down_corrected <= allocated_down)) {
/*
+ * If bandwidth on a link is < asym_threshold transition
+ * the link to symmetric.
+ */
+ tb_configure_sym(tb, in, out, *requested_up, *requested_down);
+ /*
* If requested bandwidth is less or equal than what is
* currently allocated to that tunnel we simply change
* the reservation of the tunnel. Since all the tunnels
@@ -1861,17 +2303,33 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
* are also in the same group but we use the same function here
* that we use with the normal bandwidth allocation).
*/
- ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down);
+ ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down,
+ true);
if (ret)
goto reclaim;
- tb_port_dbg(in, "bandwidth available for allocation %d/%d Mb/s\n",
- available_up, available_down);
+ tb_tunnel_dbg(tunnel, "bandwidth available for allocation %d/%d Mb/s\n",
+ available_up, available_down);
if ((*requested_up >= 0 && available_up >= requested_up_corrected) ||
(*requested_down >= 0 && available_down >= requested_down_corrected)) {
+ /*
+ * If bandwidth on a link is >= asym_threshold
+ * transition the link to asymmetric.
+ */
+ ret = tb_configure_asym(tb, in, out, *requested_up,
+ *requested_down);
+ if (ret) {
+ tb_configure_sym(tb, in, out, 0, 0);
+ return ret;
+ }
+
ret = tb_tunnel_alloc_bandwidth(tunnel, requested_up,
requested_down);
+ if (ret) {
+ tb_tunnel_warn(tunnel, "failed to allocate bandwidth\n");
+ tb_configure_sym(tb, in, out, 0, 0);
+ }
} else {
ret = -ENOBUFS;
}
@@ -1937,7 +2395,7 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work)
out = tunnel->dst_port;
- if (in->sw->config.depth < out->sw->config.depth) {
+ if (tb_port_path_direction_downstream(in, out)) {
requested_up = -1;
requested_down = requested_bw;
} else {
@@ -1948,12 +2406,15 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work)
ret = tb_alloc_dp_bandwidth(tunnel, &requested_up, &requested_down);
if (ret) {
if (ret == -ENOBUFS)
- tb_port_warn(in, "not enough bandwidth available\n");
+ tb_tunnel_warn(tunnel,
+ "not enough bandwidth available\n");
else
- tb_port_warn(in, "failed to change bandwidth allocation\n");
+ tb_tunnel_warn(tunnel,
+ "failed to change bandwidth allocation\n");
} else {
- tb_port_dbg(in, "bandwidth allocation changed to %d/%d Mb/s\n",
- requested_up, requested_down);
+ tb_tunnel_dbg(tunnel,
+ "bandwidth allocation changed to %d/%d Mb/s\n",
+ requested_up, requested_down);
/* Update other clients about the allocation change */
tb_recalc_estimated_bandwidth(tb);
@@ -2179,7 +2640,8 @@ static void tb_restore_children(struct tb_switch *sw)
continue;
if (port->remote) {
- tb_switch_lane_bonding_enable(port->remote->sw);
+ tb_switch_set_link_width(port->remote->sw,
+ port->remote->sw->link_width);
tb_switch_configure_link(port->remote->sw);
tb_restore_children(port->remote->sw);
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index d2a55ad2fd3e..e299e53473ae 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -162,11 +162,6 @@ struct tb_switch_tmu {
* switches) you need to have domain lock held.
*
* In USB4 terminology this structure represents a router.
- *
- * Note @link_width is not the same as whether link is bonded or not.
- * For Gen 4 links the link is also bonded when it is asymmetric. The
- * correct way to find out whether the link is bonded or not is to look
- * @bonded field of the upstream port.
*/
struct tb_switch {
struct device dev;
@@ -348,6 +343,7 @@ struct tb_retimer {
* the path
* @nfc_credits: Number of non-flow controlled buffers allocated for the
* @in_port.
+ * @pm_support: Set path PM packet support bit to 1 (for USB4 v2 routers)
*
* Hop configuration is always done on the IN port of a switch.
* in_port and out_port have to be on the same switch. Packets arriving on
@@ -368,6 +364,7 @@ struct tb_path_hop {
int next_hop_index;
unsigned int initial_credits;
unsigned int nfc_credits;
+ bool pm_support;
};
/**
@@ -864,6 +861,15 @@ static inline struct tb_port *tb_switch_downstream_port(struct tb_switch *sw)
return tb_port_at(tb_route(sw), tb_switch_parent(sw));
}
+/**
+ * tb_switch_depth() - Returns depth of the connected router
+ * @sw: Router
+ */
+static inline int tb_switch_depth(const struct tb_switch *sw)
+{
+ return sw->config.depth;
+}
+
static inline bool tb_switch_is_light_ridge(const struct tb_switch *sw)
{
return sw->config.vendor_id == PCI_VENDOR_ID_INTEL &&
@@ -956,8 +962,7 @@ static inline bool tb_switch_is_icm(const struct tb_switch *sw)
return !sw->config.enabled;
}
-int tb_switch_lane_bonding_enable(struct tb_switch *sw);
-void tb_switch_lane_bonding_disable(struct tb_switch *sw);
+int tb_switch_set_link_width(struct tb_switch *sw, enum tb_link_width width);
int tb_switch_configure_link(struct tb_switch *sw);
void tb_switch_unconfigure_link(struct tb_switch *sw);
@@ -1001,7 +1006,6 @@ static inline bool tb_switch_tmu_is_enabled(const struct tb_switch *sw)
bool tb_port_clx_is_enabled(struct tb_port *port, unsigned int clx);
int tb_switch_clx_init(struct tb_switch *sw);
-bool tb_switch_clx_is_supported(const struct tb_switch *sw);
int tb_switch_clx_enable(struct tb_switch *sw, unsigned int clx);
int tb_switch_clx_disable(struct tb_switch *sw);
@@ -1040,6 +1044,21 @@ void tb_port_release_out_hopid(struct tb_port *port, int hopid);
struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
struct tb_port *prev);
+/**
+ * tb_port_path_direction_downstream() - Checks if path directed downstream
+ * @src: Source adapter
+ * @dst: Destination adapter
+ *
+ * Returns %true only if the specified path from source adapter (@src)
+ * to destination adapter (@dst) is directed downstream.
+ */
+static inline bool
+tb_port_path_direction_downstream(const struct tb_port *src,
+ const struct tb_port *dst)
+{
+ return src->sw->config.depth < dst->sw->config.depth;
+}
+
static inline bool tb_port_use_credit_allocation(const struct tb_port *port)
{
return tb_port_is_null(port) && port->sw->credit_allocation;
@@ -1057,12 +1076,29 @@ static inline bool tb_port_use_credit_allocation(const struct tb_port *port)
for ((p) = tb_next_port_on_path((src), (dst), NULL); (p); \
(p) = tb_next_port_on_path((src), (dst), (p)))
+/**
+ * tb_for_each_upstream_port_on_path() - Iterate over each upstreamm port on path
+ * @src: Source port
+ * @dst: Destination port
+ * @p: Port used as iterator
+ *
+ * Walks over each upstream lane adapter on path from @src to @dst.
+ */
+#define tb_for_each_upstream_port_on_path(src, dst, p) \
+ for ((p) = tb_next_port_on_path((src), (dst), NULL); (p); \
+ (p) = tb_next_port_on_path((src), (dst), (p))) \
+ if (!tb_port_is_null((p)) || !tb_is_upstream_port((p))) {\
+ continue; \
+ } else
+
int tb_port_get_link_speed(struct tb_port *port);
+int tb_port_get_link_generation(struct tb_port *port);
int tb_port_get_link_width(struct tb_port *port);
+bool tb_port_width_supported(struct tb_port *port, unsigned int width);
int tb_port_set_link_width(struct tb_port *port, enum tb_link_width width);
int tb_port_lane_bonding_enable(struct tb_port *port);
void tb_port_lane_bonding_disable(struct tb_port *port);
-int tb_port_wait_for_link_width(struct tb_port *port, unsigned int width_mask,
+int tb_port_wait_for_link_width(struct tb_port *port, unsigned int width,
int timeout_msec);
int tb_port_update_credits(struct tb_port *port);
@@ -1256,6 +1292,11 @@ int usb4_port_router_online(struct tb_port *port);
int usb4_port_enumerate_retimers(struct tb_port *port);
bool usb4_port_clx_supported(struct tb_port *port);
int usb4_port_margining_caps(struct tb_port *port, u32 *caps);
+
+bool usb4_port_asym_supported(struct tb_port *port);
+int usb4_port_asym_set_link_width(struct tb_port *port, enum tb_link_width width);
+int usb4_port_asym_start(struct tb_port *port);
+
int usb4_port_hw_margin(struct tb_port *port, unsigned int lanes,
unsigned int ber_level, bool timing, bool right_high,
u32 *results);
@@ -1283,7 +1324,6 @@ int usb4_port_retimer_nvm_read(struct tb_port *port, u8 index,
unsigned int address, void *buf, size_t size);
int usb4_usb3_port_max_link_rate(struct tb_port *port);
-int usb4_usb3_port_actual_link_rate(struct tb_port *port);
int usb4_usb3_port_allocated_bandwidth(struct tb_port *port, int *upstream_bw,
int *downstream_bw);
int usb4_usb3_port_allocate_bandwidth(struct tb_port *port, int *upstream_bw,
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
index cf9f2370878a..87e4795275fe 100644
--- a/drivers/thunderbolt/tb_regs.h
+++ b/drivers/thunderbolt/tb_regs.h
@@ -346,10 +346,14 @@ struct tb_regs_port_header {
#define LANE_ADP_CS_1 0x01
#define LANE_ADP_CS_1_TARGET_SPEED_MASK GENMASK(3, 0)
#define LANE_ADP_CS_1_TARGET_SPEED_GEN3 0xc
-#define LANE_ADP_CS_1_TARGET_WIDTH_MASK GENMASK(9, 4)
+#define LANE_ADP_CS_1_TARGET_WIDTH_MASK GENMASK(5, 4)
#define LANE_ADP_CS_1_TARGET_WIDTH_SHIFT 4
#define LANE_ADP_CS_1_TARGET_WIDTH_SINGLE 0x1
#define LANE_ADP_CS_1_TARGET_WIDTH_DUAL 0x3
+#define LANE_ADP_CS_1_TARGET_WIDTH_ASYM_MASK GENMASK(7, 6)
+#define LANE_ADP_CS_1_TARGET_WIDTH_ASYM_TX 0x1
+#define LANE_ADP_CS_1_TARGET_WIDTH_ASYM_RX 0x2
+#define LANE_ADP_CS_1_TARGET_WIDTH_ASYM_DUAL 0x0
#define LANE_ADP_CS_1_CL0S_ENABLE BIT(10)
#define LANE_ADP_CS_1_CL1_ENABLE BIT(11)
#define LANE_ADP_CS_1_CL2_ENABLE BIT(12)
@@ -382,12 +386,15 @@ struct tb_regs_port_header {
#define PORT_CS_18_WOCS BIT(16)
#define PORT_CS_18_WODS BIT(17)
#define PORT_CS_18_WOU4S BIT(18)
+#define PORT_CS_18_CSA BIT(22)
+#define PORT_CS_18_TIP BIT(24)
#define PORT_CS_19 0x13
#define PORT_CS_19_PC BIT(3)
#define PORT_CS_19_PID BIT(4)
#define PORT_CS_19_WOC BIT(16)
#define PORT_CS_19_WOD BIT(17)
#define PORT_CS_19_WOU4 BIT(18)
+#define PORT_CS_19_START_ASYM BIT(24)
/* Display Port adapter registers */
#define ADP_DP_CS_0 0x00
@@ -400,7 +407,7 @@ struct tb_regs_port_header {
#define ADP_DP_CS_1_AUX_RX_HOPID_SHIFT 11
#define ADP_DP_CS_2 0x02
#define ADP_DP_CS_2_NRD_MLC_MASK GENMASK(2, 0)
-#define ADP_DP_CS_2_HDP BIT(6)
+#define ADP_DP_CS_2_HPD BIT(6)
#define ADP_DP_CS_2_NRD_MLR_MASK GENMASK(9, 7)
#define ADP_DP_CS_2_NRD_MLR_SHIFT 7
#define ADP_DP_CS_2_CA BIT(10)
@@ -417,7 +424,7 @@ struct tb_regs_port_header {
#define ADP_DP_CS_2_ESTIMATED_BW_MASK GENMASK(31, 24)
#define ADP_DP_CS_2_ESTIMATED_BW_SHIFT 24
#define ADP_DP_CS_3 0x03
-#define ADP_DP_CS_3_HDPC BIT(9)
+#define ADP_DP_CS_3_HPDC BIT(9)
#define DP_LOCAL_CAP 0x04
#define DP_REMOTE_CAP 0x05
/* For DP IN adapter */
@@ -484,9 +491,6 @@ struct tb_regs_port_header {
#define ADP_USB3_CS_3 0x03
#define ADP_USB3_CS_3_SCALE_MASK GENMASK(5, 0)
#define ADP_USB3_CS_4 0x04
-#define ADP_USB3_CS_4_ALR_MASK GENMASK(6, 0)
-#define ADP_USB3_CS_4_ALR_20G 0x1
-#define ADP_USB3_CS_4_ULV BIT(7)
#define ADP_USB3_CS_4_MSLR_MASK GENMASK(18, 12)
#define ADP_USB3_CS_4_MSLR_SHIFT 12
#define ADP_USB3_CS_4_MSLR_20G 0x1
@@ -499,7 +503,8 @@ struct tb_regs_hop {
* out_port (on the incoming port of the next switch)
*/
u32 out_port:6; /* next port of the path (on the same switch) */
- u32 initial_credits:8;
+ u32 initial_credits:7;
+ u32 pmps:1;
u32 unknown1:6; /* set to zero */
bool enable:1;
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
index a6810fb36860..7534cd3a81f4 100644
--- a/drivers/thunderbolt/tunnel.c
+++ b/drivers/thunderbolt/tunnel.c
@@ -21,12 +21,18 @@
#define TB_PCI_PATH_DOWN 0
#define TB_PCI_PATH_UP 1
+#define TB_PCI_PRIORITY 3
+#define TB_PCI_WEIGHT 1
+
/* USB3 adapters use always HopID of 8 for both directions */
#define TB_USB3_HOPID 8
#define TB_USB3_PATH_DOWN 0
#define TB_USB3_PATH_UP 1
+#define TB_USB3_PRIORITY 3
+#define TB_USB3_WEIGHT 2
+
/* DP adapters use HopID 8 for AUX and 9 for Video */
#define TB_DP_AUX_TX_HOPID 8
#define TB_DP_AUX_RX_HOPID 8
@@ -36,6 +42,12 @@
#define TB_DP_AUX_PATH_OUT 1
#define TB_DP_AUX_PATH_IN 2
+#define TB_DP_VIDEO_PRIORITY 1
+#define TB_DP_VIDEO_WEIGHT 1
+
+#define TB_DP_AUX_PRIORITY 2
+#define TB_DP_AUX_WEIGHT 1
+
/* Minimum number of credits needed for PCIe path */
#define TB_MIN_PCIE_CREDITS 6U
/*
@@ -46,6 +58,18 @@
/* Minimum number of credits for DMA path */
#define TB_MIN_DMA_CREDITS 1
+#define TB_DMA_PRIORITY 5
+#define TB_DMA_WEIGHT 1
+
+/*
+ * Reserve additional bandwidth for USB 3.x and PCIe bulk traffic
+ * according to USB4 v2 Connection Manager guide. This ends up reserving
+ * 1500 Mb/s for PCIe and 3000 Mb/s for USB 3.x taking weights into
+ * account.
+ */
+#define USB4_V2_PCI_MIN_BANDWIDTH (1500 * TB_PCI_WEIGHT)
+#define USB4_V2_USB3_MIN_BANDWIDTH (1500 * TB_USB3_WEIGHT)
+
static unsigned int dma_credits = TB_DMA_CREDITS;
module_param(dma_credits, uint, 0444);
MODULE_PARM_DESC(dma_credits, "specify custom credits for DMA tunnels (default: "
@@ -58,27 +82,6 @@ MODULE_PARM_DESC(bw_alloc_mode,
static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
-#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \
- do { \
- struct tb_tunnel *__tunnel = (tunnel); \
- level(__tunnel->tb, "%llx:%u <-> %llx:%u (%s): " fmt, \
- tb_route(__tunnel->src_port->sw), \
- __tunnel->src_port->port, \
- tb_route(__tunnel->dst_port->sw), \
- __tunnel->dst_port->port, \
- tb_tunnel_names[__tunnel->type], \
- ## arg); \
- } while (0)
-
-#define tb_tunnel_WARN(tunnel, fmt, arg...) \
- __TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg)
-#define tb_tunnel_warn(tunnel, fmt, arg...) \
- __TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg)
-#define tb_tunnel_info(tunnel, fmt, arg...) \
- __TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg)
-#define tb_tunnel_dbg(tunnel, fmt, arg...) \
- __TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg)
-
static inline unsigned int tb_usable_credits(const struct tb_port *port)
{
return port->total_credits - port->ctl_credits;
@@ -131,6 +134,16 @@ static unsigned int tb_available_credits(const struct tb_port *port,
return credits > 0 ? credits : 0;
}
+static void tb_init_pm_support(struct tb_path_hop *hop)
+{
+ struct tb_port *out_port = hop->out_port;
+ struct tb_port *in_port = hop->in_port;
+
+ if (tb_port_is_null(in_port) && tb_port_is_null(out_port) &&
+ usb4_switch_version(in_port->sw) >= 2)
+ hop->pm_support = true;
+}
+
static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
enum tb_tunnel_type type)
{
@@ -156,11 +169,11 @@ static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
static int tb_pci_set_ext_encapsulation(struct tb_tunnel *tunnel, bool enable)
{
+ struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw);
int ret;
/* Only supported of both routers are at least USB4 v2 */
- if (usb4_switch_version(tunnel->src_port->sw) < 2 ||
- usb4_switch_version(tunnel->dst_port->sw) < 2)
+ if (tb_port_get_link_generation(port) < 4)
return 0;
ret = usb4_pci_port_set_ext_encapsulation(tunnel->src_port, enable);
@@ -234,8 +247,8 @@ static int tb_pci_init_path(struct tb_path *path)
path->egress_shared_buffer = TB_PATH_NONE;
path->ingress_fc_enable = TB_PATH_ALL;
path->ingress_shared_buffer = TB_PATH_NONE;
- path->priority = 3;
- path->weight = 1;
+ path->priority = TB_PCI_PRIORITY;
+ path->weight = TB_PCI_WEIGHT;
path->drop_packages = 0;
tb_path_for_each_hop(path, hop) {
@@ -376,6 +389,51 @@ err_free:
return NULL;
}
+/**
+ * tb_tunnel_reserved_pci() - Amount of bandwidth to reserve for PCIe
+ * @port: Lane 0 adapter
+ * @reserved_up: Upstream bandwidth in Mb/s to reserve
+ * @reserved_down: Downstream bandwidth in Mb/s to reserve
+ *
+ * Can be called to any connected lane 0 adapter to find out how much
+ * bandwidth needs to be left in reserve for possible PCIe bulk traffic.
+ * Returns true if there is something to be reserved and writes the
+ * amount to @reserved_down/@reserved_up. Otherwise returns false and
+ * does not touch the parameters.
+ */
+bool tb_tunnel_reserved_pci(struct tb_port *port, int *reserved_up,
+ int *reserved_down)
+{
+ if (WARN_ON_ONCE(!port->remote))
+ return false;
+
+ if (!tb_acpi_may_tunnel_pcie())
+ return false;
+
+ if (tb_port_get_link_generation(port) < 4)
+ return false;
+
+ /* Must have PCIe adapters */
+ if (tb_is_upstream_port(port)) {
+ if (!tb_switch_find_port(port->sw, TB_TYPE_PCIE_UP))
+ return false;
+ if (!tb_switch_find_port(port->remote->sw, TB_TYPE_PCIE_DOWN))
+ return false;
+ } else {
+ if (!tb_switch_find_port(port->sw, TB_TYPE_PCIE_DOWN))
+ return false;
+ if (!tb_switch_find_port(port->remote->sw, TB_TYPE_PCIE_UP))
+ return false;
+ }
+
+ *reserved_up = USB4_V2_PCI_MIN_BANDWIDTH;
+ *reserved_down = USB4_V2_PCI_MIN_BANDWIDTH;
+
+ tb_port_dbg(port, "reserving %u/%u Mb/s for PCIe\n", *reserved_up,
+ *reserved_down);
+ return true;
+}
+
static bool tb_dp_is_usb4(const struct tb_switch *sw)
{
/* Titan Ridge DP adapters need the same treatment as USB4 */
@@ -614,8 +672,9 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
in_rate = tb_dp_cap_get_rate(in_dp_cap);
in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
- tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
- in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
+ tb_tunnel_dbg(tunnel,
+ "DP IN maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
+ in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
/*
* If the tunnel bandwidth is limited (max_bw is set) then see
@@ -624,10 +683,11 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
out_rate = tb_dp_cap_get_rate(out_dp_cap);
out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
bw = tb_dp_bandwidth(out_rate, out_lanes);
- tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
- out_rate, out_lanes, bw);
+ tb_tunnel_dbg(tunnel,
+ "DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
+ out_rate, out_lanes, bw);
- if (in->sw->config.depth < out->sw->config.depth)
+ if (tb_port_path_direction_downstream(in, out))
max_bw = tunnel->max_down;
else
max_bw = tunnel->max_up;
@@ -639,13 +699,14 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
out_rate, out_lanes, &new_rate,
&new_lanes);
if (ret) {
- tb_port_info(out, "not enough bandwidth for DP tunnel\n");
+ tb_tunnel_info(tunnel, "not enough bandwidth\n");
return ret;
}
new_bw = tb_dp_bandwidth(new_rate, new_lanes);
- tb_port_dbg(out, "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
- new_rate, new_lanes, new_bw);
+ tb_tunnel_dbg(tunnel,
+ "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
+ new_rate, new_lanes, new_bw);
/*
* Set new rate and number of lanes before writing it to
@@ -662,7 +723,7 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
*/
if (tb_route(out->sw) && tb_switch_is_titan_ridge(out->sw)) {
out_dp_cap |= DP_COMMON_CAP_LTTPR_NS;
- tb_port_dbg(out, "disabling LTTPR\n");
+ tb_tunnel_dbg(tunnel, "disabling LTTPR\n");
}
return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
@@ -712,8 +773,8 @@ static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel)
lanes = min(in_lanes, out_lanes);
tmp = tb_dp_bandwidth(rate, lanes);
- tb_port_dbg(in, "non-reduced bandwidth %u Mb/s x%u = %u Mb/s\n", rate,
- lanes, tmp);
+ tb_tunnel_dbg(tunnel, "non-reduced bandwidth %u Mb/s x%u = %u Mb/s\n",
+ rate, lanes, tmp);
ret = usb4_dp_port_set_nrd(in, rate, lanes);
if (ret)
@@ -728,15 +789,15 @@ static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel)
rate = min(in_rate, out_rate);
tmp = tb_dp_bandwidth(rate, lanes);
- tb_port_dbg(in,
- "maximum bandwidth through allocation mode %u Mb/s x%u = %u Mb/s\n",
- rate, lanes, tmp);
+ tb_tunnel_dbg(tunnel,
+ "maximum bandwidth through allocation mode %u Mb/s x%u = %u Mb/s\n",
+ rate, lanes, tmp);
for (granularity = 250; tmp / granularity > 255 && granularity <= 1000;
granularity *= 2)
;
- tb_port_dbg(in, "granularity %d Mb/s\n", granularity);
+ tb_tunnel_dbg(tunnel, "granularity %d Mb/s\n", granularity);
/*
* Returns -EINVAL if granularity above is outside of the
@@ -751,12 +812,12 @@ static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel)
* max_up/down fields. For discovery we just read what the
* estimation was set to.
*/
- if (in->sw->config.depth < out->sw->config.depth)
+ if (tb_port_path_direction_downstream(in, out))
estimated_bw = tunnel->max_down;
else
estimated_bw = tunnel->max_up;
- tb_port_dbg(in, "estimated bandwidth %d Mb/s\n", estimated_bw);
+ tb_tunnel_dbg(tunnel, "estimated bandwidth %d Mb/s\n", estimated_bw);
ret = usb4_dp_port_set_estimated_bandwidth(in, estimated_bw);
if (ret)
@@ -767,7 +828,7 @@ static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel)
if (ret)
return ret;
- tb_port_dbg(in, "bandwidth allocation mode enabled\n");
+ tb_tunnel_dbg(tunnel, "bandwidth allocation mode enabled\n");
return 0;
}
@@ -788,7 +849,7 @@ static int tb_dp_init(struct tb_tunnel *tunnel)
if (!usb4_dp_port_bandwidth_mode_supported(in))
return 0;
- tb_port_dbg(in, "bandwidth allocation mode supported\n");
+ tb_tunnel_dbg(tunnel, "bandwidth allocation mode supported\n");
ret = usb4_dp_port_set_cm_id(in, tb->index);
if (ret)
@@ -805,7 +866,7 @@ static void tb_dp_deinit(struct tb_tunnel *tunnel)
return;
if (usb4_dp_port_bandwidth_mode_enabled(in)) {
usb4_dp_port_set_cm_bandwidth_mode_supported(in, false);
- tb_port_dbg(in, "bandwidth allocation mode disabled\n");
+ tb_tunnel_dbg(tunnel, "bandwidth allocation mode disabled\n");
}
}
@@ -921,10 +982,7 @@ static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel,
if (allocated_bw == max_bw)
allocated_bw = ret;
- tb_port_dbg(in, "consumed bandwidth through allocation mode %d Mb/s\n",
- allocated_bw);
-
- if (in->sw->config.depth < out->sw->config.depth) {
+ if (tb_port_path_direction_downstream(in, out)) {
*consumed_up = 0;
*consumed_down = allocated_bw;
} else {
@@ -959,7 +1017,7 @@ static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up
if (allocated_bw == max_bw)
allocated_bw = ret;
- if (in->sw->config.depth < out->sw->config.depth) {
+ if (tb_port_path_direction_downstream(in, out)) {
*allocated_up = 0;
*allocated_down = allocated_bw;
} else {
@@ -987,7 +1045,7 @@ static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
if (ret < 0)
return ret;
- if (in->sw->config.depth < out->sw->config.depth) {
+ if (tb_port_path_direction_downstream(in, out)) {
tmp = min(*alloc_down, max_bw);
ret = usb4_dp_port_allocate_bandwidth(in, tmp);
if (ret)
@@ -1006,9 +1064,6 @@ static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
/* Now we can use BW mode registers to figure out the bandwidth */
/* TODO: need to handle discovery too */
tunnel->bw_mode = true;
-
- tb_port_dbg(in, "allocated bandwidth through allocation mode %d Mb/s\n",
- tmp);
return 0;
}
@@ -1035,8 +1090,7 @@ static int tb_dp_read_dprx(struct tb_tunnel *tunnel, u32 *rate, u32 *lanes,
*rate = tb_dp_cap_get_rate(val);
*lanes = tb_dp_cap_get_lanes(val);
- tb_port_dbg(in, "consumed bandwidth through DPRX %d Mb/s\n",
- tb_dp_bandwidth(*rate, *lanes));
+ tb_tunnel_dbg(tunnel, "DPRX read done\n");
return 0;
}
usleep_range(100, 150);
@@ -1073,9 +1127,6 @@ static int tb_dp_read_cap(struct tb_tunnel *tunnel, unsigned int cap, u32 *rate,
*rate = tb_dp_cap_get_rate(val);
*lanes = tb_dp_cap_get_lanes(val);
-
- tb_port_dbg(in, "bandwidth from %#x capability %d Mb/s\n", cap,
- tb_dp_bandwidth(*rate, *lanes));
return 0;
}
@@ -1092,7 +1143,7 @@ static int tb_dp_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
if (ret < 0)
return ret;
- if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) {
+ if (tb_port_path_direction_downstream(in, tunnel->dst_port)) {
*max_up = 0;
*max_down = ret;
} else {
@@ -1150,7 +1201,7 @@ static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
return 0;
}
- if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) {
+ if (tb_port_path_direction_downstream(in, tunnel->dst_port)) {
*consumed_up = 0;
*consumed_down = tb_dp_bandwidth(rate, lanes);
} else {
@@ -1172,7 +1223,7 @@ static void tb_dp_init_aux_credits(struct tb_path_hop *hop)
hop->initial_credits = 1;
}
-static void tb_dp_init_aux_path(struct tb_path *path)
+static void tb_dp_init_aux_path(struct tb_path *path, bool pm_support)
{
struct tb_path_hop *hop;
@@ -1180,11 +1231,14 @@ static void tb_dp_init_aux_path(struct tb_path *path)
path->egress_shared_buffer = TB_PATH_NONE;
path->ingress_fc_enable = TB_PATH_ALL;
path->ingress_shared_buffer = TB_PATH_NONE;
- path->priority = 2;
- path->weight = 1;
+ path->priority = TB_DP_AUX_PRIORITY;
+ path->weight = TB_DP_AUX_WEIGHT;
- tb_path_for_each_hop(path, hop)
+ tb_path_for_each_hop(path, hop) {
tb_dp_init_aux_credits(hop);
+ if (pm_support)
+ tb_init_pm_support(hop);
+ }
}
static int tb_dp_init_video_credits(struct tb_path_hop *hop)
@@ -1216,7 +1270,7 @@ static int tb_dp_init_video_credits(struct tb_path_hop *hop)
return 0;
}
-static int tb_dp_init_video_path(struct tb_path *path)
+static int tb_dp_init_video_path(struct tb_path *path, bool pm_support)
{
struct tb_path_hop *hop;
@@ -1224,8 +1278,8 @@ static int tb_dp_init_video_path(struct tb_path *path)
path->egress_shared_buffer = TB_PATH_NONE;
path->ingress_fc_enable = TB_PATH_NONE;
path->ingress_shared_buffer = TB_PATH_NONE;
- path->priority = 1;
- path->weight = 1;
+ path->priority = TB_DP_VIDEO_PRIORITY;
+ path->weight = TB_DP_VIDEO_WEIGHT;
tb_path_for_each_hop(path, hop) {
int ret;
@@ -1233,6 +1287,8 @@ static int tb_dp_init_video_path(struct tb_path *path)
ret = tb_dp_init_video_credits(hop);
if (ret)
return ret;
+ if (pm_support)
+ tb_init_pm_support(hop);
}
return 0;
@@ -1253,8 +1309,9 @@ static void tb_dp_dump(struct tb_tunnel *tunnel)
rate = tb_dp_cap_get_rate(dp_cap);
lanes = tb_dp_cap_get_lanes(dp_cap);
- tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
- rate, lanes, tb_dp_bandwidth(rate, lanes));
+ tb_tunnel_dbg(tunnel,
+ "DP IN maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
+ rate, lanes, tb_dp_bandwidth(rate, lanes));
out = tunnel->dst_port;
@@ -1265,8 +1322,9 @@ static void tb_dp_dump(struct tb_tunnel *tunnel)
rate = tb_dp_cap_get_rate(dp_cap);
lanes = tb_dp_cap_get_lanes(dp_cap);
- tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
- rate, lanes, tb_dp_bandwidth(rate, lanes));
+ tb_tunnel_dbg(tunnel,
+ "DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
+ rate, lanes, tb_dp_bandwidth(rate, lanes));
if (tb_port_read(in, &dp_cap, TB_CFG_PORT,
in->cap_adap + DP_REMOTE_CAP, 1))
@@ -1275,8 +1333,8 @@ static void tb_dp_dump(struct tb_tunnel *tunnel)
rate = tb_dp_cap_get_rate(dp_cap);
lanes = tb_dp_cap_get_lanes(dp_cap);
- tb_port_dbg(in, "reduced bandwidth %u Mb/s x%u = %u Mb/s\n",
- rate, lanes, tb_dp_bandwidth(rate, lanes));
+ tb_tunnel_dbg(tunnel, "reduced bandwidth %u Mb/s x%u = %u Mb/s\n",
+ rate, lanes, tb_dp_bandwidth(rate, lanes));
}
/**
@@ -1322,7 +1380,7 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
goto err_free;
}
tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path;
- if (tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT]))
+ if (tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT], false))
goto err_free;
path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX",
@@ -1330,14 +1388,14 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
if (!path)
goto err_deactivate;
tunnel->paths[TB_DP_AUX_PATH_OUT] = path;
- tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT]);
+ tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT], false);
path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID,
&port, "AUX RX", alloc_hopid);
if (!path)
goto err_deactivate;
tunnel->paths[TB_DP_AUX_PATH_IN] = path;
- tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN]);
+ tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN], false);
/* Validate that the tunnel is complete */
if (!tb_port_is_dpout(tunnel->dst_port)) {
@@ -1392,6 +1450,7 @@ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
struct tb_tunnel *tunnel;
struct tb_path **paths;
struct tb_path *path;
+ bool pm_support;
if (WARN_ON(!in->cap_adap || !out->cap_adap))
return NULL;
@@ -1413,26 +1472,27 @@ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
tunnel->max_down = max_down;
paths = tunnel->paths;
+ pm_support = usb4_switch_version(in->sw) >= 2;
path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
link_nr, "Video");
if (!path)
goto err_free;
- tb_dp_init_video_path(path);
+ tb_dp_init_video_path(path, pm_support);
paths[TB_DP_VIDEO_PATH_OUT] = path;
path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
TB_DP_AUX_TX_HOPID, link_nr, "AUX TX");
if (!path)
goto err_free;
- tb_dp_init_aux_path(path);
+ tb_dp_init_aux_path(path, pm_support);
paths[TB_DP_AUX_PATH_OUT] = path;
path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
TB_DP_AUX_RX_HOPID, link_nr, "AUX RX");
if (!path)
goto err_free;
- tb_dp_init_aux_path(path);
+ tb_dp_init_aux_path(path, pm_support);
paths[TB_DP_AUX_PATH_IN] = path;
return tunnel;
@@ -1497,8 +1557,8 @@ static int tb_dma_init_rx_path(struct tb_path *path, unsigned int credits)
path->ingress_fc_enable = TB_PATH_ALL;
path->egress_shared_buffer = TB_PATH_NONE;
path->ingress_shared_buffer = TB_PATH_NONE;
- path->priority = 5;
- path->weight = 1;
+ path->priority = TB_DMA_PRIORITY;
+ path->weight = TB_DMA_WEIGHT;
path->clear_fc = true;
/*
@@ -1531,8 +1591,8 @@ static int tb_dma_init_tx_path(struct tb_path *path, unsigned int credits)
path->ingress_fc_enable = TB_PATH_ALL;
path->egress_shared_buffer = TB_PATH_NONE;
path->ingress_shared_buffer = TB_PATH_NONE;
- path->priority = 5;
- path->weight = 1;
+ path->priority = TB_DMA_PRIORITY;
+ path->weight = TB_DMA_WEIGHT;
path->clear_fc = true;
tb_path_for_each_hop(path, hop) {
@@ -1758,14 +1818,23 @@ static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate)
static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel,
int *consumed_up, int *consumed_down)
{
- int pcie_enabled = tb_acpi_may_tunnel_pcie();
+ struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw);
+ int pcie_weight = tb_acpi_may_tunnel_pcie() ? TB_PCI_WEIGHT : 0;
/*
* PCIe tunneling, if enabled, affects the USB3 bandwidth so
* take that it into account here.
*/
- *consumed_up = tunnel->allocated_up * (3 + pcie_enabled) / 3;
- *consumed_down = tunnel->allocated_down * (3 + pcie_enabled) / 3;
+ *consumed_up = tunnel->allocated_up *
+ (TB_USB3_WEIGHT + pcie_weight) / TB_USB3_WEIGHT;
+ *consumed_down = tunnel->allocated_down *
+ (TB_USB3_WEIGHT + pcie_weight) / TB_USB3_WEIGHT;
+
+ if (tb_port_get_link_generation(port) >= 4) {
+ *consumed_up = max(*consumed_up, USB4_V2_USB3_MIN_BANDWIDTH);
+ *consumed_down = max(*consumed_down, USB4_V2_USB3_MIN_BANDWIDTH);
+ }
+
return 0;
}
@@ -1790,17 +1859,10 @@ static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
{
int ret, max_rate, allocate_up, allocate_down;
- ret = usb4_usb3_port_actual_link_rate(tunnel->src_port);
+ ret = tb_usb3_max_link_rate(tunnel->dst_port, tunnel->src_port);
if (ret < 0) {
- tb_tunnel_warn(tunnel, "failed to read actual link rate\n");
+ tb_tunnel_warn(tunnel, "failed to read maximum link rate\n");
return;
- } else if (!ret) {
- /* Use maximum link rate if the link valid is not set */
- ret = tb_usb3_max_link_rate(tunnel->dst_port, tunnel->src_port);
- if (ret < 0) {
- tb_tunnel_warn(tunnel, "failed to read maximum link rate\n");
- return;
- }
}
/*
@@ -1871,8 +1933,8 @@ static void tb_usb3_init_path(struct tb_path *path)
path->egress_shared_buffer = TB_PATH_NONE;
path->ingress_fc_enable = TB_PATH_ALL;
path->ingress_shared_buffer = TB_PATH_NONE;
- path->priority = 3;
- path->weight = 3;
+ path->priority = TB_USB3_PRIORITY;
+ path->weight = TB_USB3_WEIGHT;
path->drop_packages = 0;
tb_path_for_each_hop(path, hop)
@@ -2387,3 +2449,8 @@ void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
tunnel->reclaim_available_bandwidth(tunnel, available_up,
available_down);
}
+
+const char *tb_tunnel_type_name(const struct tb_tunnel *tunnel)
+{
+ return tb_tunnel_names[tunnel->type];
+}
diff --git a/drivers/thunderbolt/tunnel.h b/drivers/thunderbolt/tunnel.h
index bf690f7beeee..b4cff5482112 100644
--- a/drivers/thunderbolt/tunnel.h
+++ b/drivers/thunderbolt/tunnel.h
@@ -80,6 +80,8 @@ struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down,
bool alloc_hopid);
struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
struct tb_port *down);
+bool tb_tunnel_reserved_pci(struct tb_port *port, int *reserved_up,
+ int *reserved_down);
struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
bool alloc_hopid);
struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
@@ -137,5 +139,27 @@ static inline bool tb_tunnel_is_usb3(const struct tb_tunnel *tunnel)
return tunnel->type == TB_TUNNEL_USB3;
}
-#endif
+const char *tb_tunnel_type_name(const struct tb_tunnel *tunnel);
+
+#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \
+ do { \
+ struct tb_tunnel *__tunnel = (tunnel); \
+ level(__tunnel->tb, "%llx:%u <-> %llx:%u (%s): " fmt, \
+ tb_route(__tunnel->src_port->sw), \
+ __tunnel->src_port->port, \
+ tb_route(__tunnel->dst_port->sw), \
+ __tunnel->dst_port->port, \
+ tb_tunnel_type_name(__tunnel), \
+ ## arg); \
+ } while (0)
+#define tb_tunnel_WARN(tunnel, fmt, arg...) \
+ __TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg)
+#define tb_tunnel_warn(tunnel, fmt, arg...) \
+ __TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg)
+#define tb_tunnel_info(tunnel, fmt, arg...) \
+ __TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg)
+#define tb_tunnel_dbg(tunnel, fmt, arg...) \
+ __TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg)
+
+#endif
diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
index 05ddb224c464..4277733d0021 100644
--- a/drivers/thunderbolt/usb4.c
+++ b/drivers/thunderbolt/usb4.c
@@ -1455,6 +1455,112 @@ bool usb4_port_clx_supported(struct tb_port *port)
}
/**
+ * usb4_port_asym_supported() - If the port supports asymmetric link
+ * @port: USB4 port
+ *
+ * Checks if the port and the cable supports asymmetric link and returns
+ * %true in that case.
+ */
+bool usb4_port_asym_supported(struct tb_port *port)
+{
+ u32 val;
+
+ if (!port->cap_usb4)
+ return false;
+
+ if (tb_port_read(port, &val, TB_CFG_PORT, port->cap_usb4 + PORT_CS_18, 1))
+ return false;
+
+ return !!(val & PORT_CS_18_CSA);
+}
+
+/**
+ * usb4_port_asym_set_link_width() - Set link width to asymmetric or symmetric
+ * @port: USB4 port
+ * @width: Asymmetric width to configure
+ *
+ * Sets USB4 port link width to @width. Can be called for widths where
+ * usb4_port_asym_width_supported() returned @true.
+ */
+int usb4_port_asym_set_link_width(struct tb_port *port, enum tb_link_width width)
+{
+ u32 val;
+ int ret;
+
+ if (!port->cap_phy)
+ return -EINVAL;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+ if (ret)
+ return ret;
+
+ val &= ~LANE_ADP_CS_1_TARGET_WIDTH_ASYM_MASK;
+ switch (width) {
+ case TB_LINK_WIDTH_DUAL:
+ val |= FIELD_PREP(LANE_ADP_CS_1_TARGET_WIDTH_ASYM_MASK,
+ LANE_ADP_CS_1_TARGET_WIDTH_ASYM_DUAL);
+ break;
+ case TB_LINK_WIDTH_ASYM_TX:
+ val |= FIELD_PREP(LANE_ADP_CS_1_TARGET_WIDTH_ASYM_MASK,
+ LANE_ADP_CS_1_TARGET_WIDTH_ASYM_TX);
+ break;
+ case TB_LINK_WIDTH_ASYM_RX:
+ val |= FIELD_PREP(LANE_ADP_CS_1_TARGET_WIDTH_ASYM_MASK,
+ LANE_ADP_CS_1_TARGET_WIDTH_ASYM_RX);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+}
+
+/**
+ * usb4_port_asym_start() - Start symmetry change and wait for completion
+ * @port: USB4 port
+ *
+ * Start symmetry change of the link to asymmetric or symmetric
+ * (according to what was previously set in tb_port_set_link_width().
+ * Wait for completion of the change.
+ *
+ * Returns %0 in case of success, %-ETIMEDOUT if case of timeout or
+ * a negative errno in case of a failure.
+ */
+int usb4_port_asym_start(struct tb_port *port)
+{
+ int ret;
+ u32 val;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_19, 1);
+ if (ret)
+ return ret;
+
+ val &= ~PORT_CS_19_START_ASYM;
+ val |= FIELD_PREP(PORT_CS_19_START_ASYM, 1);
+
+ ret = tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_19, 1);
+ if (ret)
+ return ret;
+
+ /*
+ * Wait for PORT_CS_19_START_ASYM to be 0. This means the USB4
+ * port started the symmetry transition.
+ */
+ ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_19,
+ PORT_CS_19_START_ASYM, 0, 1000);
+ if (ret)
+ return ret;
+
+ /* Then wait for the transtion to be completed */
+ return usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_18,
+ PORT_CS_18_TIP, 0, 5000);
+}
+
+/**
* usb4_port_margining_caps() - Read USB4 port marginig capabilities
* @port: USB4 port
* @caps: Array with at least two elements to hold the results
@@ -1946,35 +2052,6 @@ int usb4_usb3_port_max_link_rate(struct tb_port *port)
return usb4_usb3_port_max_bandwidth(port, ret);
}
-/**
- * usb4_usb3_port_actual_link_rate() - Established USB3 link rate
- * @port: USB3 adapter port
- *
- * Return actual established link rate of a USB3 adapter in Mb/s. If the
- * link is not up returns %0 and negative errno in case of failure.
- */
-int usb4_usb3_port_actual_link_rate(struct tb_port *port)
-{
- int ret, lr;
- u32 val;
-
- if (!tb_port_is_usb3_down(port) && !tb_port_is_usb3_up(port))
- return -EINVAL;
-
- ret = tb_port_read(port, &val, TB_CFG_PORT,
- port->cap_adap + ADP_USB3_CS_4, 1);
- if (ret)
- return ret;
-
- if (!(val & ADP_USB3_CS_4_ULV))
- return 0;
-
- lr = val & ADP_USB3_CS_4_ALR_MASK;
- ret = lr == ADP_USB3_CS_4_ALR_20G ? 20000 : 10000;
-
- return usb4_usb3_port_max_bandwidth(port, ret);
-}
-
static int usb4_usb3_port_cm_request(struct tb_port *port, bool request)
{
int ret;