summaryrefslogtreecommitdiff
path: root/drivers/thunderbolt
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/thunderbolt')
-rw-r--r--drivers/thunderbolt/Kconfig17
-rw-r--r--drivers/thunderbolt/Makefile1
-rw-r--r--drivers/thunderbolt/acpi.c40
-rw-r--r--drivers/thunderbolt/ctl.c30
-rw-r--r--drivers/thunderbolt/ctl.h5
-rw-r--r--drivers/thunderbolt/debugfs.c1494
-rw-r--r--drivers/thunderbolt/domain.c27
-rw-r--r--drivers/thunderbolt/eeprom.c78
-rw-r--r--drivers/thunderbolt/icm.c3
-rw-r--r--drivers/thunderbolt/lc.c45
-rw-r--r--drivers/thunderbolt/nhi.c31
-rw-r--r--drivers/thunderbolt/nhi.h4
-rw-r--r--drivers/thunderbolt/nvm.c4
-rw-r--r--drivers/thunderbolt/path.c17
-rw-r--r--drivers/thunderbolt/quirks.c14
-rw-r--r--drivers/thunderbolt/retimer.c89
-rw-r--r--drivers/thunderbolt/sb_regs.h62
-rw-r--r--drivers/thunderbolt/switch.c189
-rw-r--r--drivers/thunderbolt/tb.c1166
-rw-r--r--drivers/thunderbolt/tb.h124
-rw-r--r--drivers/thunderbolt/tb_msgs.h6
-rw-r--r--drivers/thunderbolt/tb_regs.h6
-rw-r--r--drivers/thunderbolt/test.c90
-rw-r--r--drivers/thunderbolt/trace.h197
-rw-r--r--drivers/thunderbolt/tunnel.c526
-rw-r--r--drivers/thunderbolt/tunnel.h69
-rw-r--r--drivers/thunderbolt/usb4.c325
-rw-r--r--drivers/thunderbolt/usb4_port.c2
-rw-r--r--drivers/thunderbolt/xdomain.c20
29 files changed, 3403 insertions, 1278 deletions
diff --git a/drivers/thunderbolt/Kconfig b/drivers/thunderbolt/Kconfig
index 448fd2ec8f6e..0abdb69ee9f4 100644
--- a/drivers/thunderbolt/Kconfig
+++ b/drivers/thunderbolt/Kconfig
@@ -22,20 +22,25 @@ config USB4_DEBUGFS_WRITE
bool "Enable write by debugfs to configuration spaces (DANGEROUS)"
help
Enables writing to device configuration registers through
- debugfs interface.
+ debugfs interface. You can use tools such as Thunderbolt/USB4
+ debugging tools to access these registers. For more
+ information see:
+
+ https://github.com/intel/tbtools
Only enable this if you know what you are doing! Never enable
this for production systems or distro kernels.
config USB4_DEBUGFS_MARGINING
- bool "Expose receiver lane margining operations under USB4 ports (DANGEROUS)"
+ bool "Expose receiver lane margining operations under USB4 ports and retimers (DANGEROUS)"
depends on DEBUG_FS
depends on USB4_DEBUGFS_WRITE
help
- Enables hardware and software based receiver lane margining support
- under each USB4 port. Used for electrical quality and robustness
- validation during manufacturing. Should not be enabled by distro
- kernels.
+ Enables hardware and software based receiver lane margining
+ support under each USB4 port and retimer, including retimers
+ on the other side of the cable. Used for electrical quality
+ and robustness validation during manufacturing. Should not be
+ enabled by distro kernels.
config USB4_KUNIT_TEST
bool "KUnit tests" if !KUNIT_ALL_TESTS
diff --git a/drivers/thunderbolt/Makefile b/drivers/thunderbolt/Makefile
index c8b3d7b78098..b44b32dcb832 100644
--- a/drivers/thunderbolt/Makefile
+++ b/drivers/thunderbolt/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
+ccflags-y := -I$(src)
obj-${CONFIG_USB4} := thunderbolt.o
thunderbolt-objs := nhi.o nhi_ops.o ctl.o tb.o switch.o cap.o path.o tunnel.o eeprom.o
thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o lc.o tmu.o usb4.o
diff --git a/drivers/thunderbolt/acpi.c b/drivers/thunderbolt/acpi.c
index c9b6bb46111c..d2a0054217da 100644
--- a/drivers/thunderbolt/acpi.c
+++ b/drivers/thunderbolt/acpi.c
@@ -32,40 +32,20 @@ static acpi_status tb_acpi_add_link(acpi_handle handle, u32 level, void *data,
goto out_put;
/*
- * Try to find physical device walking upwards to the hierarcy.
- * We need to do this because the xHCI driver might not yet be
- * bound so the USB3 SuperSpeed ports are not yet created.
+ * Ignore USB3 ports here as USB core will set up device links between
+ * tunneled USB3 devices and NHI host during USB device creation.
+ * USB3 ports might not even have a physical device yet if xHCI driver
+ * isn't bound yet.
*/
- do {
- dev = acpi_get_first_physical_node(adev);
- if (dev)
- break;
-
- adev = acpi_dev_parent(adev);
- } while (adev);
-
- /*
- * Check that the device is PCIe. This is because USB3
- * SuperSpeed ports have this property and they are not power
- * managed with the xHCI and the SuperSpeed hub so we create the
- * link from xHCI instead.
- */
- while (dev && !dev_is_pci(dev))
- dev = dev->parent;
-
- if (!dev)
+ dev = acpi_get_first_physical_node(adev);
+ if (!dev || !dev_is_pci(dev))
goto out_put;
- /*
- * Check that this actually matches the type of device we
- * expect. It should either be xHCI or PCIe root/downstream
- * port.
- */
+ /* Check that this matches a PCIe root/downstream port. */
pdev = to_pci_dev(dev);
- if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI ||
- (pci_is_pcie(pdev) &&
- (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
- pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM))) {
+ if (pci_is_pcie(pdev) &&
+ (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
+ pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM)) {
const struct device_link *link;
/*
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index d997a4c545f7..dc1f456736dc 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -15,6 +15,8 @@
#include "ctl.h"
+#define CREATE_TRACE_POINTS
+#include "trace.h"
#define TB_CTL_RX_PKG_COUNT 10
#define TB_CTL_RETRIES 4
@@ -32,6 +34,7 @@
* @timeout_msec: Default timeout for non-raw control messages
* @callback: Callback called when hotplug message is received
* @callback_data: Data passed to @callback
+ * @index: Domain number. This will be output with the trace record.
*/
struct tb_ctl {
struct tb_nhi *nhi;
@@ -47,6 +50,8 @@ struct tb_ctl {
int timeout_msec;
event_cb callback;
void *callback_data;
+
+ int index;
};
@@ -65,6 +70,9 @@ struct tb_ctl {
#define tb_ctl_dbg(ctl, format, arg...) \
dev_dbg(&(ctl)->nhi->pdev->dev, format, ## arg)
+#define tb_ctl_dbg_once(ctl, format, arg...) \
+ dev_dbg_once(&(ctl)->nhi->pdev->dev, format, ## arg)
+
static DECLARE_WAIT_QUEUE_HEAD(tb_cfg_request_cancel_queue);
/* Serializes access to request kref_get/put */
static DEFINE_MUTEX(tb_cfg_request_lock);
@@ -260,7 +268,7 @@ static struct tb_cfg_result parse_header(const struct ctl_pkg *pkg, u32 len,
return res;
}
-static void tb_cfg_print_error(struct tb_ctl *ctl,
+static void tb_cfg_print_error(struct tb_ctl *ctl, enum tb_cfg_space space,
const struct tb_cfg_result *res)
{
WARN_ON(res->err != 1);
@@ -274,8 +282,8 @@ static void tb_cfg_print_error(struct tb_ctl *ctl,
* Invalid cfg_space/offset/length combination in
* cfg_read/cfg_write.
*/
- tb_ctl_dbg(ctl, "%llx:%x: invalid config space or offset\n",
- res->response_route, res->response_port);
+ tb_ctl_dbg_once(ctl, "%llx:%x: invalid config space (%u) or offset\n",
+ res->response_route, res->response_port, space);
return;
case TB_CFG_ERROR_NO_SUCH_PORT:
/*
@@ -369,6 +377,9 @@ static int tb_ctl_tx(struct tb_ctl *ctl, const void *data, size_t len,
pkg->frame.size = len + 4;
pkg->frame.sof = type;
pkg->frame.eof = type;
+
+ trace_tb_tx(ctl->index, type, data, len);
+
cpu_to_be32_array(pkg->buffer, data, len / 4);
*(__be32 *) (pkg->buffer + len) = tb_crc(pkg->buffer, len);
@@ -384,6 +395,7 @@ static int tb_ctl_tx(struct tb_ctl *ctl, const void *data, size_t len,
static bool tb_ctl_handle_event(struct tb_ctl *ctl, enum tb_cfg_pkg_type type,
struct ctl_pkg *pkg, size_t size)
{
+ trace_tb_event(ctl->index, type, pkg->buffer, size);
return ctl->callback(ctl->callback_data, type, pkg->buffer, size);
}
@@ -489,6 +501,9 @@ static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame,
* triggered from messing with the active requests.
*/
req = tb_cfg_request_find(pkg->ctl, pkg);
+
+ trace_tb_rx(pkg->ctl->index, frame->eof, pkg->buffer, frame->size, !req);
+
if (req) {
if (req->copy(req, pkg))
schedule_work(&req->work);
@@ -614,6 +629,7 @@ struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl,
/**
* tb_ctl_alloc() - allocate a control channel
* @nhi: Pointer to NHI
+ * @index: Domain number
* @timeout_msec: Default timeout used with non-raw control messages
* @cb: Callback called for plug events
* @cb_data: Data passed to @cb
@@ -622,14 +638,16 @@ struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl,
*
* Return: Returns a pointer on success or NULL on failure.
*/
-struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, int timeout_msec, event_cb cb,
- void *cb_data)
+struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, int index, int timeout_msec,
+ event_cb cb, void *cb_data)
{
int i;
struct tb_ctl *ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
if (!ctl)
return NULL;
+
ctl->nhi = nhi;
+ ctl->index = index;
ctl->timeout_msec = timeout_msec;
ctl->callback = cb;
ctl->callback_data = cb_data;
@@ -1057,7 +1075,7 @@ static int tb_cfg_get_error(struct tb_ctl *ctl, enum tb_cfg_space space,
res->tb_error == TB_CFG_ERROR_INVALID_CONFIG_SPACE)
return -ENODEV;
- tb_cfg_print_error(ctl, res);
+ tb_cfg_print_error(ctl, space, res);
if (res->tb_error == TB_CFG_ERROR_LOCK)
return -EACCES;
diff --git a/drivers/thunderbolt/ctl.h b/drivers/thunderbolt/ctl.h
index eec5c953c743..7e08ca8f0895 100644
--- a/drivers/thunderbolt/ctl.h
+++ b/drivers/thunderbolt/ctl.h
@@ -21,8 +21,8 @@ struct tb_ctl;
typedef bool (*event_cb)(void *data, enum tb_cfg_pkg_type type,
const void *buf, size_t size);
-struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, int timeout_msec, event_cb cb,
- void *cb_data);
+struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, int index, int timeout_msec,
+ event_cb cb, void *cb_data);
void tb_ctl_start(struct tb_ctl *ctl);
void tb_ctl_stop(struct tb_ctl *ctl);
void tb_ctl_free(struct tb_ctl *ctl);
@@ -140,5 +140,4 @@ int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port,
enum tb_cfg_space space, u32 offset, u32 length);
int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route);
-
#endif
diff --git a/drivers/thunderbolt/debugfs.c b/drivers/thunderbolt/debugfs.c
index e324cd899719..f8328ca7e22e 100644
--- a/drivers/thunderbolt/debugfs.c
+++ b/drivers/thunderbolt/debugfs.c
@@ -7,7 +7,10 @@
* Mika Westerberg <mika.westerberg@linux.intel.com>
*/
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
#include <linux/debugfs.h>
+#include <linux/delay.h>
#include <linux/pm_runtime.h>
#include <linux/uaccess.h>
@@ -33,6 +36,70 @@
#define COUNTER_SET_LEN 3
+/*
+ * USB4 spec doesn't specify dwell range, the range of 100 ms to 500 ms
+ * probed to give good results.
+ */
+#define MIN_DWELL_TIME 100 /* ms */
+#define MAX_DWELL_TIME 500 /* ms */
+#define DWELL_SAMPLE_INTERVAL 10
+
+enum usb4_margin_cap_voltage_indp {
+ USB4_MARGIN_CAP_VOLTAGE_INDP_GEN_2_3_MIN,
+ USB4_MARGIN_CAP_VOLTAGE_INDP_GEN_2_3_HL,
+ USB4_MARGIN_CAP_VOLTAGE_INDP_GEN_2_3_BOTH,
+ USB4_MARGIN_CAP_VOLTAGE_INDP_GEN_4_MIN,
+ USB4_MARGIN_CAP_VOLTAGE_INDP_GEN_4_BOTH,
+ USB4_MARGIN_CAP_VOLTAGE_INDP_UNKNOWN,
+};
+
+enum usb4_margin_cap_time_indp {
+ USB4_MARGIN_CAP_TIME_INDP_GEN_2_3_MIN,
+ USB4_MARGIN_CAP_TIME_INDP_GEN_2_3_LR,
+ USB4_MARGIN_CAP_TIME_INDP_GEN_2_3_BOTH,
+ USB4_MARGIN_CAP_TIME_INDP_GEN_4_MIN,
+ USB4_MARGIN_CAP_TIME_INDP_GEN_4_BOTH,
+ USB4_MARGIN_CAP_TIME_INDP_UNKNOWN,
+};
+
+/* Sideband registers and their sizes as defined in the USB4 spec */
+struct sb_reg {
+ unsigned int reg;
+ unsigned int size;
+};
+
+#define SB_MAX_SIZE 64
+
+/* Sideband registers for router */
+static const struct sb_reg port_sb_regs[] = {
+ { USB4_SB_VENDOR_ID, 4 },
+ { USB4_SB_PRODUCT_ID, 4 },
+ { USB4_SB_DEBUG_CONF, 4 },
+ { USB4_SB_DEBUG, 54 },
+ { USB4_SB_LRD_TUNING, 4 },
+ { USB4_SB_OPCODE, 4 },
+ { USB4_SB_METADATA, 4 },
+ { USB4_SB_LINK_CONF, 3 },
+ { USB4_SB_GEN23_TXFFE, 4 },
+ { USB4_SB_GEN4_TXFFE, 4 },
+ { USB4_SB_VERSION, 4 },
+ { USB4_SB_DATA, 64 },
+};
+
+/* Sideband registers for retimer */
+static const struct sb_reg retimer_sb_regs[] = {
+ { USB4_SB_VENDOR_ID, 4 },
+ { USB4_SB_PRODUCT_ID, 4 },
+ { USB4_SB_FW_VERSION, 4 },
+ { USB4_SB_LRD_TUNING, 4 },
+ { USB4_SB_OPCODE, 4 },
+ { USB4_SB_METADATA, 4 },
+ { USB4_SB_GEN23_TXFFE, 4 },
+ { USB4_SB_GEN4_TXFFE, 4 },
+ { USB4_SB_VERSION, 4 },
+ { USB4_SB_DATA, 64 },
+};
+
#define DEBUGFS_ATTR(__space, __write) \
static int __space ## _open(struct inode *inode, struct file *file) \
{ \
@@ -101,6 +168,13 @@ static bool parse_line(char **line, u32 *offs, u32 *val, int short_fmt_len,
* offset relative_offset cap_id vs_cap_id value\n
* v[0] v[1] v[2] v[3] v[4]
*
+ * For Path configuration space:
+ * Short format is: offset value\n
+ * v[0] v[1]
+ * Long format as produced from the read side:
+ * offset relative_offset in_hop_id value\n
+ * v[0] v[1] v[2] v[3]
+ *
* For Counter configuration space:
* Short format is: offset\n
* v[0]
@@ -124,14 +198,33 @@ static bool parse_line(char **line, u32 *offs, u32 *val, int short_fmt_len,
}
#if IS_ENABLED(CONFIG_USB4_DEBUGFS_WRITE)
+/*
+ * Path registers need to be written in double word pairs and they both must be
+ * read before written. This writes one double word in patch config space
+ * following the spec flow.
+ */
+static int path_write_one(struct tb_port *port, u32 val, u32 offset)
+{
+ u32 index = offset % PATH_LEN;
+ u32 offs = offset - index;
+ u32 data[PATH_LEN];
+ int ret;
+
+ ret = tb_port_read(port, data, TB_CFG_HOPS, offs, PATH_LEN);
+ if (ret)
+ return ret;
+ data[index] = val;
+ return tb_port_write(port, data, TB_CFG_HOPS, offs, PATH_LEN);
+}
+
static ssize_t regs_write(struct tb_switch *sw, struct tb_port *port,
- const char __user *user_buf, size_t count,
- loff_t *ppos)
+ enum tb_cfg_space space, const char __user *user_buf,
+ size_t count, loff_t *ppos)
{
+ int long_fmt_len, ret = 0;
struct tb *tb = sw->tb;
char *line, *buf;
u32 val, offset;
- int ret = 0;
buf = validate_and_copy_from_user(user_buf, &count);
if (IS_ERR(buf))
@@ -147,12 +240,21 @@ static ssize_t regs_write(struct tb_switch *sw, struct tb_port *port,
/* User did hardware changes behind the driver's back */
add_taint(TAINT_USER, LOCKDEP_STILL_OK);
+ if (space == TB_CFG_HOPS)
+ long_fmt_len = 4;
+ else
+ long_fmt_len = 5;
+
line = buf;
- while (parse_line(&line, &offset, &val, 2, 5)) {
- if (port)
- ret = tb_port_write(port, &val, TB_CFG_PORT, offset, 1);
- else
+ while (parse_line(&line, &offset, &val, 2, long_fmt_len)) {
+ if (port) {
+ if (space == TB_CFG_HOPS)
+ ret = path_write_one(port, val, offset);
+ else
+ ret = tb_port_write(port, &val, space, offset, 1);
+ } else {
ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, offset, 1);
+ }
if (ret)
break;
}
@@ -173,7 +275,16 @@ static ssize_t port_regs_write(struct file *file, const char __user *user_buf,
struct seq_file *s = file->private_data;
struct tb_port *port = s->private;
- return regs_write(port->sw, port, user_buf, count, ppos);
+ return regs_write(port->sw, port, TB_CFG_PORT, user_buf, count, ppos);
+}
+
+static ssize_t path_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct tb_port *port = s->private;
+
+ return regs_write(port->sw, port, TB_CFG_HOPS, user_buf, count, ppos);
}
static ssize_t switch_regs_write(struct file *file, const char __user *user_buf,
@@ -182,18 +293,174 @@ static ssize_t switch_regs_write(struct file *file, const char __user *user_buf,
struct seq_file *s = file->private_data;
struct tb_switch *sw = s->private;
- return regs_write(sw, NULL, user_buf, count, ppos);
+ return regs_write(sw, NULL, TB_CFG_SWITCH, user_buf, count, ppos);
+}
+
+static bool parse_sb_line(char **line, u8 *reg, u8 *data, size_t data_size,
+ size_t *bytes_read)
+{
+ char *field, *token;
+ int i;
+
+ token = strsep(line, "\n");
+ if (!token)
+ return false;
+
+ /* Parse the register first */
+ field = strsep(&token, " ");
+ if (!field)
+ return false;
+ if (kstrtou8(field, 0, reg))
+ return false;
+
+ /* Then the values for the register, up to data_size */
+ for (i = 0; i < data_size; i++) {
+ field = strsep(&token, " ");
+ if (!field)
+ break;
+ if (kstrtou8(field, 0, &data[i]))
+ return false;
+ }
+
+ *bytes_read = i;
+ return true;
+}
+
+static ssize_t sb_regs_write(struct tb_port *port, const struct sb_reg *sb_regs,
+ size_t size, enum usb4_sb_target target, u8 index,
+ char *buf, size_t count, loff_t *ppos)
+{
+ u8 reg, data[SB_MAX_SIZE];
+ size_t bytes_read;
+ char *line = buf;
+
+ /* User did hardware changes behind the driver's back */
+ add_taint(TAINT_USER, LOCKDEP_STILL_OK);
+
+ /*
+ * For sideband registers we accept:
+ * reg b0 b1 b2...\n
+ *
+ * Here "reg" is the byte offset of the sideband register and "b0"..
+ * are the byte values. There can be less byte values than the register
+ * size. The leftovers will not be overwritten.
+ */
+ while (parse_sb_line(&line, &reg, data, ARRAY_SIZE(data), &bytes_read)) {
+ const struct sb_reg *sb_reg;
+ int ret;
+
+ /* At least one byte must be passed */
+ if (bytes_read < 1)
+ return -EINVAL;
+
+ /* Find the register */
+ sb_reg = NULL;
+ for (int i = 0; i < size; i++) {
+ if (sb_regs[i].reg == reg) {
+ sb_reg = &sb_regs[i];
+ break;
+ }
+ }
+
+ if (!sb_reg)
+ return -EINVAL;
+
+ if (bytes_read > sb_regs->size)
+ return -E2BIG;
+
+ ret = usb4_port_sb_write(port, target, index, sb_reg->reg, data,
+ bytes_read);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static ssize_t port_sb_regs_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct tb_port *port = s->private;
+ struct tb_switch *sw = port->sw;
+ struct tb *tb = sw->tb;
+ char *buf;
+ int ret;
+
+ buf = validate_and_copy_from_user(user_buf, &count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ pm_runtime_get_sync(&sw->dev);
+
+ if (mutex_lock_interruptible(&tb->lock)) {
+ ret = -ERESTARTSYS;
+ goto out;
+ }
+
+ ret = sb_regs_write(port, port_sb_regs, ARRAY_SIZE(port_sb_regs),
+ USB4_SB_TARGET_ROUTER, 0, buf, count, ppos);
+
+ mutex_unlock(&tb->lock);
+out:
+ pm_runtime_mark_last_busy(&sw->dev);
+ pm_runtime_put_autosuspend(&sw->dev);
+ free_page((unsigned long)buf);
+
+ return ret < 0 ? ret : count;
+}
+
+static ssize_t retimer_sb_regs_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct tb_retimer *rt = s->private;
+ struct tb *tb = rt->tb;
+ char *buf;
+ int ret;
+
+ buf = validate_and_copy_from_user(user_buf, &count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ pm_runtime_get_sync(&rt->dev);
+
+ if (mutex_lock_interruptible(&tb->lock)) {
+ ret = -ERESTARTSYS;
+ goto out;
+ }
+
+ ret = sb_regs_write(rt->port, retimer_sb_regs, ARRAY_SIZE(retimer_sb_regs),
+ USB4_SB_TARGET_RETIMER, rt->index, buf, count, ppos);
+
+ mutex_unlock(&tb->lock);
+out:
+ pm_runtime_mark_last_busy(&rt->dev);
+ pm_runtime_put_autosuspend(&rt->dev);
+ free_page((unsigned long)buf);
+
+ return ret < 0 ? ret : count;
}
#define DEBUGFS_MODE 0600
#else
#define port_regs_write NULL
+#define path_write NULL
#define switch_regs_write NULL
+#define port_sb_regs_write NULL
+#define retimer_sb_regs_write NULL
#define DEBUGFS_MODE 0400
#endif
#if IS_ENABLED(CONFIG_USB4_DEBUGFS_MARGINING)
/**
* struct tb_margining - Lane margining support
+ * @port: USB4 port through which the margining operations are run
+ * @target: Sideband target
+ * @index: Retimer index if taget is %USB4_SB_TARGET_RETIMER
+ * @dev: Pointer to the device that is the target (USB4 port or retimer)
+ * @gen: Link generation
+ * @asym_rx: %true% if @port supports asymmetric link with 3 Rx
* @caps: Port lane margining capabilities
* @results: Last lane margining results
* @lanes: %0, %1 or %7 (all)
@@ -202,60 +469,146 @@ static ssize_t switch_regs_write(struct file *file, const char __user *user_buf,
* @ber_level: Current BER level contour value
* @voltage_steps: Number of mandatory voltage steps
* @max_voltage_offset: Maximum mandatory voltage offset (in mV)
+ * @voltage_steps_optional_range: Number of voltage steps for optional range
+ * @max_voltage_offset_optional_range: Maximum voltage offset for the optional
+ * range (in mV).
* @time_steps: Number of time margin steps
* @max_time_offset: Maximum time margin offset (in mUI)
+ * @voltage_time_offset: Offset for voltage / time for software margining
+ * @dwell_time: Dwell time for software margining (in ms)
+ * @error_counter: Error counter operation for software margining
+ * @optional_voltage_offset_range: Enable optional extended voltage range
* @software: %true if software margining is used instead of hardware
* @time: %true if time margining is used instead of voltage
* @right_high: %false if left/low margin test is performed, %true if
* right/high
+ * @upper_eye: %false if the lower PAM3 eye is used, %true if the upper
+ * eye is used
*/
struct tb_margining {
- u32 caps[2];
- u32 results[2];
- unsigned int lanes;
+ struct tb_port *port;
+ enum usb4_sb_target target;
+ u8 index;
+ struct device *dev;
+ unsigned int gen;
+ bool asym_rx;
+ u32 caps[3];
+ u32 results[3];
+ enum usb4_margining_lane lanes;
unsigned int min_ber_level;
unsigned int max_ber_level;
unsigned int ber_level;
unsigned int voltage_steps;
unsigned int max_voltage_offset;
+ unsigned int voltage_steps_optional_range;
+ unsigned int max_voltage_offset_optional_range;
unsigned int time_steps;
unsigned int max_time_offset;
+ unsigned int voltage_time_offset;
+ unsigned int dwell_time;
+ enum usb4_margin_sw_error_counter error_counter;
+ bool optional_voltage_offset_range;
bool software;
bool time;
bool right_high;
+ bool upper_eye;
};
-static bool supports_software(const struct usb4_port *usb4)
+static int margining_modify_error_counter(struct tb_margining *margining,
+ u32 lanes, enum usb4_margin_sw_error_counter error_counter)
+{
+ struct usb4_port_margining_params params = { 0 };
+ struct tb_port *port = margining->port;
+ u32 result;
+
+ if (error_counter != USB4_MARGIN_SW_ERROR_COUNTER_CLEAR &&
+ error_counter != USB4_MARGIN_SW_ERROR_COUNTER_STOP)
+ return -EOPNOTSUPP;
+
+ params.error_counter = error_counter;
+ params.lanes = lanes;
+
+ return usb4_port_sw_margin(port, margining->target, margining->index,
+ &params, &result);
+}
+
+static bool supports_software(const struct tb_margining *margining)
{
- return usb4->margining->caps[0] & USB4_MARGIN_CAP_0_MODES_SW;
+ if (margining->gen < 4)
+ return margining->caps[0] & USB4_MARGIN_CAP_0_MODES_SW;
+ return margining->caps[2] & USB4_MARGIN_CAP_2_MODES_SW;
}
-static bool supports_hardware(const struct usb4_port *usb4)
+static bool supports_hardware(const struct tb_margining *margining)
{
- return usb4->margining->caps[0] & USB4_MARGIN_CAP_0_MODES_HW;
+ if (margining->gen < 4)
+ return margining->caps[0] & USB4_MARGIN_CAP_0_MODES_HW;
+ return margining->caps[2] & USB4_MARGIN_CAP_2_MODES_HW;
}
-static bool both_lanes(const struct usb4_port *usb4)
+static bool all_lanes(const struct tb_margining *margining)
{
- return usb4->margining->caps[0] & USB4_MARGIN_CAP_0_2_LANES;
+ return margining->caps[0] & USB4_MARGIN_CAP_0_ALL_LANES;
}
-static unsigned int independent_voltage_margins(const struct usb4_port *usb4)
+static enum usb4_margin_cap_voltage_indp
+independent_voltage_margins(const struct tb_margining *margining)
{
- return (usb4->margining->caps[0] & USB4_MARGIN_CAP_0_VOLTAGE_INDP_MASK) >>
- USB4_MARGIN_CAP_0_VOLTAGE_INDP_SHIFT;
+ if (margining->gen < 4) {
+ switch (FIELD_GET(USB4_MARGIN_CAP_0_VOLTAGE_INDP_MASK, margining->caps[0])) {
+ case USB4_MARGIN_CAP_0_VOLTAGE_MIN:
+ return USB4_MARGIN_CAP_VOLTAGE_INDP_GEN_2_3_MIN;
+ case USB4_MARGIN_CAP_0_VOLTAGE_HL:
+ return USB4_MARGIN_CAP_VOLTAGE_INDP_GEN_2_3_HL;
+ case USB4_MARGIN_CAP_1_TIME_BOTH:
+ return USB4_MARGIN_CAP_VOLTAGE_INDP_GEN_2_3_BOTH;
+ }
+ } else {
+ switch (FIELD_GET(USB4_MARGIN_CAP_2_VOLTAGE_INDP_MASK, margining->caps[2])) {
+ case USB4_MARGIN_CAP_2_VOLTAGE_MIN:
+ return USB4_MARGIN_CAP_VOLTAGE_INDP_GEN_4_MIN;
+ case USB4_MARGIN_CAP_2_VOLTAGE_BOTH:
+ return USB4_MARGIN_CAP_VOLTAGE_INDP_GEN_4_BOTH;
+ }
+ }
+ return USB4_MARGIN_CAP_VOLTAGE_INDP_UNKNOWN;
}
-static bool supports_time(const struct usb4_port *usb4)
+static bool supports_time(const struct tb_margining *margining)
{
- return usb4->margining->caps[0] & USB4_MARGIN_CAP_0_TIME;
+ if (margining->gen < 4)
+ return margining->caps[0] & USB4_MARGIN_CAP_0_TIME;
+ return margining->caps[2] & USB4_MARGIN_CAP_2_TIME;
}
/* Only applicable if supports_time() returns true */
-static unsigned int independent_time_margins(const struct usb4_port *usb4)
+static enum usb4_margin_cap_time_indp
+independent_time_margins(const struct tb_margining *margining)
{
- return (usb4->margining->caps[1] & USB4_MARGIN_CAP_1_TIME_INDP_MASK) >>
- USB4_MARGIN_CAP_1_TIME_INDP_SHIFT;
+ if (margining->gen < 4) {
+ switch (FIELD_GET(USB4_MARGIN_CAP_1_TIME_INDP_MASK, margining->caps[1])) {
+ case USB4_MARGIN_CAP_1_TIME_MIN:
+ return USB4_MARGIN_CAP_TIME_INDP_GEN_2_3_MIN;
+ case USB4_MARGIN_CAP_1_TIME_LR:
+ return USB4_MARGIN_CAP_TIME_INDP_GEN_2_3_LR;
+ case USB4_MARGIN_CAP_1_TIME_BOTH:
+ return USB4_MARGIN_CAP_TIME_INDP_GEN_2_3_BOTH;
+ }
+ } else {
+ switch (FIELD_GET(USB4_MARGIN_CAP_2_TIME_INDP_MASK, margining->caps[2])) {
+ case USB4_MARGIN_CAP_2_TIME_MIN:
+ return USB4_MARGIN_CAP_TIME_INDP_GEN_4_MIN;
+ case USB4_MARGIN_CAP_2_TIME_BOTH:
+ return USB4_MARGIN_CAP_TIME_INDP_GEN_4_BOTH;
+ }
+ }
+ return USB4_MARGIN_CAP_TIME_INDP_UNKNOWN;
+}
+
+static bool
+supports_optional_voltage_offset_range(const struct tb_margining *margining)
+{
+ return margining->caps[0] & USB4_MARGIN_CAP_0_OPT_VOLTAGE_SUPPORT;
}
static ssize_t
@@ -263,9 +616,8 @@ margining_ber_level_write(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct seq_file *s = file->private_data;
- struct tb_port *port = s->private;
- struct usb4_port *usb4 = port->usb4;
- struct tb *tb = port->sw->tb;
+ struct tb_margining *margining = s->private;
+ struct tb *tb = margining->port->sw->tb;
unsigned int val;
int ret = 0;
char *buf;
@@ -273,7 +625,7 @@ margining_ber_level_write(struct file *file, const char __user *user_buf,
if (mutex_lock_interruptible(&tb->lock))
return -ERESTARTSYS;
- if (usb4->margining->software) {
+ if (margining->software) {
ret = -EINVAL;
goto out_unlock;
}
@@ -290,13 +642,13 @@ margining_ber_level_write(struct file *file, const char __user *user_buf,
if (ret)
goto out_free;
- if (val < usb4->margining->min_ber_level ||
- val > usb4->margining->max_ber_level) {
+ if (val < margining->min_ber_level ||
+ val > margining->max_ber_level) {
ret = -EINVAL;
goto out_free;
}
- usb4->margining->ber_level = val;
+ margining->ber_level = val;
out_free:
free_page((unsigned long)buf);
@@ -316,102 +668,151 @@ static void ber_level_show(struct seq_file *s, unsigned int val)
static int margining_ber_level_show(struct seq_file *s, void *not_used)
{
- struct tb_port *port = s->private;
- struct usb4_port *usb4 = port->usb4;
+ const struct tb_margining *margining = s->private;
- if (usb4->margining->software)
+ if (margining->software)
return -EINVAL;
- ber_level_show(s, usb4->margining->ber_level);
+ ber_level_show(s, margining->ber_level);
return 0;
}
DEBUGFS_ATTR_RW(margining_ber_level);
static int margining_caps_show(struct seq_file *s, void *not_used)
{
- struct tb_port *port = s->private;
- struct usb4_port *usb4 = port->usb4;
- struct tb *tb = port->sw->tb;
- u32 cap0, cap1;
+ struct tb_margining *margining = s->private;
+ struct tb *tb = margining->port->sw->tb;
+ int ret = 0;
if (mutex_lock_interruptible(&tb->lock))
return -ERESTARTSYS;
/* Dump the raw caps first */
- cap0 = usb4->margining->caps[0];
- seq_printf(s, "0x%08x\n", cap0);
- cap1 = usb4->margining->caps[1];
- seq_printf(s, "0x%08x\n", cap1);
+ for (int i = 0; i < ARRAY_SIZE(margining->caps); i++)
+ seq_printf(s, "0x%08x\n", margining->caps[i]);
seq_printf(s, "# software margining: %s\n",
- supports_software(usb4) ? "yes" : "no");
- if (supports_hardware(usb4)) {
+ supports_software(margining) ? "yes" : "no");
+ if (supports_hardware(margining)) {
seq_puts(s, "# hardware margining: yes\n");
seq_puts(s, "# minimum BER level contour: ");
- ber_level_show(s, usb4->margining->min_ber_level);
+ ber_level_show(s, margining->min_ber_level);
seq_puts(s, "# maximum BER level contour: ");
- ber_level_show(s, usb4->margining->max_ber_level);
+ ber_level_show(s, margining->max_ber_level);
} else {
seq_puts(s, "# hardware margining: no\n");
}
- seq_printf(s, "# both lanes simultaneously: %s\n",
- both_lanes(usb4) ? "yes" : "no");
+ seq_printf(s, "# all lanes simultaneously: %s\n",
+ str_yes_no(all_lanes(margining)));
seq_printf(s, "# voltage margin steps: %u\n",
- usb4->margining->voltage_steps);
+ margining->voltage_steps);
seq_printf(s, "# maximum voltage offset: %u mV\n",
- usb4->margining->max_voltage_offset);
+ margining->max_voltage_offset);
+ seq_printf(s, "# optional voltage offset range support: %s\n",
+ str_yes_no(supports_optional_voltage_offset_range(margining)));
+ if (supports_optional_voltage_offset_range(margining)) {
+ seq_printf(s, "# voltage margin steps, optional range: %u\n",
+ margining->voltage_steps_optional_range);
+ seq_printf(s, "# maximum voltage offset, optional range: %u mV\n",
+ margining->max_voltage_offset_optional_range);
+ }
- switch (independent_voltage_margins(usb4)) {
- case USB4_MARGIN_CAP_0_VOLTAGE_MIN:
+ switch (independent_voltage_margins(margining)) {
+ case USB4_MARGIN_CAP_VOLTAGE_INDP_GEN_2_3_MIN:
seq_puts(s, "# returns minimum between high and low voltage margins\n");
break;
- case USB4_MARGIN_CAP_0_VOLTAGE_HL:
+ case USB4_MARGIN_CAP_VOLTAGE_INDP_GEN_2_3_HL:
seq_puts(s, "# returns high or low voltage margin\n");
break;
- case USB4_MARGIN_CAP_0_VOLTAGE_BOTH:
+ case USB4_MARGIN_CAP_VOLTAGE_INDP_GEN_2_3_BOTH:
seq_puts(s, "# returns both high and low margins\n");
break;
+ case USB4_MARGIN_CAP_VOLTAGE_INDP_GEN_4_MIN:
+ seq_puts(s, "# returns minimum between high and low voltage margins in both lower and upper eye\n");
+ break;
+ case USB4_MARGIN_CAP_VOLTAGE_INDP_GEN_4_BOTH:
+ seq_puts(s, "# returns both high and low margins of both upper and lower eye\n");
+ break;
+ case USB4_MARGIN_CAP_VOLTAGE_INDP_UNKNOWN:
+ tb_port_warn(margining->port,
+ "failed to parse independent voltage margining capabilities\n");
+ ret = -EIO;
+ goto out;
}
- if (supports_time(usb4)) {
+ if (supports_time(margining)) {
seq_puts(s, "# time margining: yes\n");
seq_printf(s, "# time margining is destructive: %s\n",
- cap1 & USB4_MARGIN_CAP_1_TIME_DESTR ? "yes" : "no");
+ str_yes_no(margining->caps[1] & USB4_MARGIN_CAP_1_TIME_DESTR));
- switch (independent_time_margins(usb4)) {
- case USB4_MARGIN_CAP_1_TIME_MIN:
+ switch (independent_time_margins(margining)) {
+ case USB4_MARGIN_CAP_TIME_INDP_GEN_2_3_MIN:
seq_puts(s, "# returns minimum between left and right time margins\n");
break;
- case USB4_MARGIN_CAP_1_TIME_LR:
+ case USB4_MARGIN_CAP_TIME_INDP_GEN_2_3_LR:
seq_puts(s, "# returns left or right margin\n");
break;
- case USB4_MARGIN_CAP_1_TIME_BOTH:
+ case USB4_MARGIN_CAP_TIME_INDP_GEN_2_3_BOTH:
seq_puts(s, "# returns both left and right margins\n");
break;
+ case USB4_MARGIN_CAP_TIME_INDP_GEN_4_MIN:
+ seq_puts(s, "# returns minimum between left and right time margins in both lower and upper eye\n");
+ break;
+ case USB4_MARGIN_CAP_TIME_INDP_GEN_4_BOTH:
+ seq_puts(s, "# returns both left and right margins of both upper and lower eye\n");
+ break;
+ case USB4_MARGIN_CAP_TIME_INDP_UNKNOWN:
+ tb_port_warn(margining->port,
+ "failed to parse independent time margining capabilities\n");
+ ret = -EIO;
+ goto out;
}
seq_printf(s, "# time margin steps: %u\n",
- usb4->margining->time_steps);
+ margining->time_steps);
seq_printf(s, "# maximum time offset: %u mUI\n",
- usb4->margining->max_time_offset);
+ margining->max_time_offset);
} else {
seq_puts(s, "# time margining: no\n");
}
+out:
mutex_unlock(&tb->lock);
- return 0;
+ return ret;
}
DEBUGFS_ATTR_RO(margining_caps);
+static const struct {
+ enum usb4_margining_lane lane;
+ const char *name;
+} lane_names[] = {
+ {
+ .lane = USB4_MARGINING_LANE_RX0,
+ .name = "0",
+ },
+ {
+ .lane = USB4_MARGINING_LANE_RX1,
+ .name = "1",
+ },
+ {
+ .lane = USB4_MARGINING_LANE_RX2,
+ .name = "2",
+ },
+ {
+ .lane = USB4_MARGINING_LANE_ALL,
+ .name = "all",
+ },
+};
+
static ssize_t
margining_lanes_write(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct seq_file *s = file->private_data;
- struct tb_port *port = s->private;
- struct usb4_port *usb4 = port->usb4;
+ struct tb_margining *margining = s->private;
+ struct tb_port *port = margining->port;
struct tb *tb = port->sw->tb;
- int ret = 0;
+ int lane = -1;
char *buf;
buf = validate_and_copy_from_user(user_buf, &count);
@@ -420,70 +821,263 @@ margining_lanes_write(struct file *file, const char __user *user_buf,
buf[count - 1] = '\0';
- if (mutex_lock_interruptible(&tb->lock)) {
- ret = -ERESTARTSYS;
- goto out_free;
+ for (int i = 0; i < ARRAY_SIZE(lane_names); i++) {
+ if (!strcmp(buf, lane_names[i].name)) {
+ lane = lane_names[i].lane;
+ break;
+ }
}
- if (!strcmp(buf, "0")) {
- usb4->margining->lanes = 0;
- } else if (!strcmp(buf, "1")) {
- usb4->margining->lanes = 1;
- } else if (!strcmp(buf, "all")) {
- /* Needs to be supported */
- if (both_lanes(usb4))
- usb4->margining->lanes = 7;
- else
- ret = -EINVAL;
- } else {
- ret = -EINVAL;
- }
+ free_page((unsigned long)buf);
- mutex_unlock(&tb->lock);
+ if (lane == -1)
+ return -EINVAL;
-out_free:
- free_page((unsigned long)buf);
- return ret < 0 ? ret : count;
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &tb->lock) {
+ if (lane == USB4_MARGINING_LANE_ALL && !all_lanes(margining))
+ return -EINVAL;
+ /*
+ * Enabling on RX2 requires that it is supported by the
+ * USB4 port.
+ */
+ if (lane == USB4_MARGINING_LANE_RX2 && !margining->asym_rx)
+ return -EINVAL;
+
+ margining->lanes = lane;
+ }
+
+ return count;
}
static int margining_lanes_show(struct seq_file *s, void *not_used)
{
- struct tb_port *port = s->private;
- struct usb4_port *usb4 = port->usb4;
+ struct tb_margining *margining = s->private;
+ struct tb_port *port = margining->port;
struct tb *tb = port->sw->tb;
- unsigned int lanes;
- if (mutex_lock_interruptible(&tb->lock))
- return -ERESTARTSYS;
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &tb->lock) {
+ for (int i = 0; i < ARRAY_SIZE(lane_names); i++) {
+ if (lane_names[i].lane == USB4_MARGINING_LANE_ALL &&
+ !all_lanes(margining))
+ continue;
+ if (lane_names[i].lane == USB4_MARGINING_LANE_RX2 &&
+ !margining->asym_rx)
+ continue;
- lanes = usb4->margining->lanes;
- if (both_lanes(usb4)) {
- if (!lanes)
- seq_puts(s, "[0] 1 all\n");
- else if (lanes == 1)
- seq_puts(s, "0 [1] all\n");
- else
- seq_puts(s, "0 1 [all]\n");
- } else {
- if (!lanes)
- seq_puts(s, "[0] 1\n");
- else
- seq_puts(s, "0 [1]\n");
+ if (i != 0)
+ seq_putc(s, ' ');
+
+ if (lane_names[i].lane == margining->lanes)
+ seq_printf(s, "[%s]", lane_names[i].name);
+ else
+ seq_printf(s, "%s", lane_names[i].name);
+ }
+ seq_puts(s, "\n");
}
- mutex_unlock(&tb->lock);
return 0;
}
DEBUGFS_ATTR_RW(margining_lanes);
+static ssize_t
+margining_voltage_time_offset_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct tb_margining *margining = s->private;
+ struct tb *tb = margining->port->sw->tb;
+ unsigned int max_margin;
+ unsigned int val;
+ int ret;
+
+ ret = kstrtouint_from_user(user_buf, count, 10, &val);
+ if (ret)
+ return ret;
+
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &tb->lock) {
+ if (!margining->software)
+ return -EOPNOTSUPP;
+
+ if (margining->time)
+ max_margin = margining->time_steps;
+ else
+ if (margining->optional_voltage_offset_range)
+ max_margin = margining->voltage_steps_optional_range;
+ else
+ max_margin = margining->voltage_steps;
+
+ margining->voltage_time_offset = clamp(val, 0, max_margin);
+ }
+
+ return count;
+}
+
+static int margining_voltage_time_offset_show(struct seq_file *s,
+ void *not_used)
+{
+ const struct tb_margining *margining = s->private;
+ struct tb *tb = margining->port->sw->tb;
+
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &tb->lock) {
+ if (!margining->software)
+ return -EOPNOTSUPP;
+
+ seq_printf(s, "%d\n", margining->voltage_time_offset);
+ }
+
+ return 0;
+}
+DEBUGFS_ATTR_RW(margining_voltage_time_offset);
+
+static ssize_t
+margining_error_counter_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ enum usb4_margin_sw_error_counter error_counter;
+ struct seq_file *s = file->private_data;
+ struct tb_margining *margining = s->private;
+ struct tb *tb = margining->port->sw->tb;
+ char *buf;
+
+ buf = validate_and_copy_from_user(user_buf, &count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ buf[count - 1] = '\0';
+
+ if (!strcmp(buf, "nop"))
+ error_counter = USB4_MARGIN_SW_ERROR_COUNTER_NOP;
+ else if (!strcmp(buf, "clear"))
+ error_counter = USB4_MARGIN_SW_ERROR_COUNTER_CLEAR;
+ else if (!strcmp(buf, "start"))
+ error_counter = USB4_MARGIN_SW_ERROR_COUNTER_START;
+ else if (!strcmp(buf, "stop"))
+ error_counter = USB4_MARGIN_SW_ERROR_COUNTER_STOP;
+ else
+ return -EINVAL;
+
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &tb->lock) {
+ if (!margining->software)
+ return -EOPNOTSUPP;
+
+ margining->error_counter = error_counter;
+ }
+
+ return count;
+}
+
+static int margining_error_counter_show(struct seq_file *s, void *not_used)
+{
+ const struct tb_margining *margining = s->private;
+ struct tb *tb = margining->port->sw->tb;
+
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &tb->lock) {
+ if (!margining->software)
+ return -EOPNOTSUPP;
+
+ switch (margining->error_counter) {
+ case USB4_MARGIN_SW_ERROR_COUNTER_NOP:
+ seq_puts(s, "[nop] clear start stop\n");
+ break;
+ case USB4_MARGIN_SW_ERROR_COUNTER_CLEAR:
+ seq_puts(s, "nop [clear] start stop\n");
+ break;
+ case USB4_MARGIN_SW_ERROR_COUNTER_START:
+ seq_puts(s, "nop clear [start] stop\n");
+ break;
+ case USB4_MARGIN_SW_ERROR_COUNTER_STOP:
+ seq_puts(s, "nop clear start [stop]\n");
+ break;
+ }
+ }
+
+ return 0;
+}
+DEBUGFS_ATTR_RW(margining_error_counter);
+
+static ssize_t
+margining_dwell_time_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct tb_margining *margining = s->private;
+ struct tb *tb = margining->port->sw->tb;
+ unsigned int val;
+ int ret;
+
+ ret = kstrtouint_from_user(user_buf, count, 10, &val);
+ if (ret)
+ return ret;
+
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &tb->lock) {
+ if (!margining->software)
+ return -EOPNOTSUPP;
+
+ margining->dwell_time = clamp(val, MIN_DWELL_TIME, MAX_DWELL_TIME);
+ }
+
+ return count;
+}
+
+static int margining_dwell_time_show(struct seq_file *s, void *not_used)
+{
+ struct tb_margining *margining = s->private;
+ struct tb *tb = margining->port->sw->tb;
+
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &tb->lock) {
+ if (!margining->software)
+ return -EOPNOTSUPP;
+
+ seq_printf(s, "%d\n", margining->dwell_time);
+ }
+
+ return 0;
+}
+DEBUGFS_ATTR_RW(margining_dwell_time);
+
+static ssize_t
+margining_optional_voltage_offset_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct tb_margining *margining = s->private;
+ struct tb *tb = margining->port->sw->tb;
+ bool val;
+ int ret;
+
+ ret = kstrtobool_from_user(user_buf, count, &val);
+ if (ret)
+ return ret;
+
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &tb->lock) {
+ margining->optional_voltage_offset_range = val;
+ }
+
+ return count;
+}
+
+static int margining_optional_voltage_offset_show(struct seq_file *s,
+ void *not_used)
+{
+ struct tb_margining *margining = s->private;
+ struct tb *tb = margining->port->sw->tb;
+
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &tb->lock) {
+ seq_printf(s, "%u\n", margining->optional_voltage_offset_range);
+ }
+
+ return 0;
+}
+DEBUGFS_ATTR_RW(margining_optional_voltage_offset);
+
static ssize_t margining_mode_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct seq_file *s = file->private_data;
- struct tb_port *port = s->private;
- struct usb4_port *usb4 = port->usb4;
- struct tb *tb = port->sw->tb;
+ struct tb_margining *margining = s->private;
+ struct tb *tb = margining->port->sw->tb;
int ret = 0;
char *buf;
@@ -499,13 +1093,13 @@ static ssize_t margining_mode_write(struct file *file,
}
if (!strcmp(buf, "software")) {
- if (supports_software(usb4))
- usb4->margining->software = true;
+ if (supports_software(margining))
+ margining->software = true;
else
ret = -EINVAL;
} else if (!strcmp(buf, "hardware")) {
- if (supports_hardware(usb4))
- usb4->margining->software = false;
+ if (supports_hardware(margining))
+ margining->software = false;
else
ret = -EINVAL;
} else {
@@ -521,23 +1115,22 @@ out_free:
static int margining_mode_show(struct seq_file *s, void *not_used)
{
- const struct tb_port *port = s->private;
- const struct usb4_port *usb4 = port->usb4;
- struct tb *tb = port->sw->tb;
+ struct tb_margining *margining = s->private;
+ struct tb *tb = margining->port->sw->tb;
const char *space = "";
if (mutex_lock_interruptible(&tb->lock))
return -ERESTARTSYS;
- if (supports_software(usb4)) {
- if (usb4->margining->software)
+ if (supports_software(margining)) {
+ if (margining->software)
seq_puts(s, "[software]");
else
seq_puts(s, "software");
space = " ";
}
- if (supports_hardware(usb4)) {
- if (usb4->margining->software)
+ if (supports_hardware(margining)) {
+ if (margining->software)
seq_printf(s, "%shardware", space);
else
seq_printf(s, "%s[hardware]", space);
@@ -550,12 +1143,85 @@ static int margining_mode_show(struct seq_file *s, void *not_used)
}
DEBUGFS_ATTR_RW(margining_mode);
+static int margining_run_sw(struct tb_margining *margining,
+ struct usb4_port_margining_params *params)
+{
+ u32 nsamples = margining->dwell_time / DWELL_SAMPLE_INTERVAL;
+ int ret, i;
+
+ ret = usb4_port_sw_margin(margining->port, margining->target, margining->index,
+ params, margining->results);
+ if (ret)
+ goto out_stop;
+
+ for (i = 0; i <= nsamples; i++) {
+ u32 errors = 0;
+
+ ret = usb4_port_sw_margin_errors(margining->port, margining->target,
+ margining->index, &margining->results[1]);
+ if (ret)
+ break;
+
+ if (margining->lanes == USB4_MARGINING_LANE_RX0)
+ errors = FIELD_GET(USB4_MARGIN_SW_ERR_COUNTER_LANE_0_MASK,
+ margining->results[1]);
+ else if (margining->lanes == USB4_MARGINING_LANE_RX1)
+ errors = FIELD_GET(USB4_MARGIN_SW_ERR_COUNTER_LANE_1_MASK,
+ margining->results[1]);
+ else if (margining->lanes == USB4_MARGINING_LANE_RX2)
+ errors = FIELD_GET(USB4_MARGIN_SW_ERR_COUNTER_LANE_2_MASK,
+ margining->results[1]);
+ else if (margining->lanes == USB4_MARGINING_LANE_ALL)
+ errors = margining->results[1];
+
+ /* Any errors stop the test */
+ if (errors)
+ break;
+
+ fsleep(DWELL_SAMPLE_INTERVAL * USEC_PER_MSEC);
+ }
+
+out_stop:
+ /*
+ * Stop the counters but don't clear them to allow the
+ * different error counter configurations.
+ */
+ margining_modify_error_counter(margining, margining->lanes,
+ USB4_MARGIN_SW_ERROR_COUNTER_STOP);
+ return ret;
+}
+
+static int validate_margining(struct tb_margining *margining)
+{
+ /*
+ * For running on RX2 the link must be asymmetric with 3
+ * receivers. Because this is can change dynamically, check it
+ * here before we start the margining and report back error if
+ * expectations are not met.
+ */
+ if (margining->lanes == USB4_MARGINING_LANE_RX2) {
+ int ret;
+
+ ret = tb_port_get_link_width(margining->port);
+ if (ret < 0)
+ return ret;
+ if (ret != TB_LINK_WIDTH_ASYM_RX) {
+ tb_port_warn(margining->port, "link is %s expected %s",
+ tb_width_name(ret),
+ tb_width_name(TB_LINK_WIDTH_ASYM_RX));
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int margining_run_write(void *data, u64 val)
{
- struct tb_port *port = data;
- struct usb4_port *usb4 = port->usb4;
+ struct tb_margining *margining = data;
+ struct tb_port *port = margining->port;
+ struct device *dev = margining->dev;
struct tb_switch *sw = port->sw;
- struct tb_margining *margining;
struct tb_switch *down_sw;
struct tb *tb = sw->tb;
int ret, clx;
@@ -563,13 +1229,17 @@ static int margining_run_write(void *data, u64 val)
if (val != 1)
return -EINVAL;
- pm_runtime_get_sync(&sw->dev);
+ pm_runtime_get_sync(dev);
if (mutex_lock_interruptible(&tb->lock)) {
ret = -ERESTARTSYS;
goto out_rpm_put;
}
+ ret = validate_margining(margining);
+ if (ret)
+ goto out_unlock;
+
if (tb_is_upstream_port(port))
down_sw = sw;
else if (port->remote)
@@ -590,37 +1260,52 @@ static int margining_run_write(void *data, u64 val)
clx = ret;
}
- margining = usb4->margining;
+ /* Clear the results */
+ memset(margining->results, 0, sizeof(margining->results));
if (margining->software) {
- tb_port_dbg(port, "running software %s lane margining for lanes %u\n",
- margining->time ? "time" : "voltage", margining->lanes);
- ret = usb4_port_sw_margin(port, margining->lanes, margining->time,
- margining->right_high,
- USB4_MARGIN_SW_COUNTER_CLEAR);
- if (ret)
- goto out_clx;
-
- ret = usb4_port_sw_margin_errors(port, &margining->results[0]);
+ struct usb4_port_margining_params params = {
+ .error_counter = USB4_MARGIN_SW_ERROR_COUNTER_CLEAR,
+ .lanes = margining->lanes,
+ .time = margining->time,
+ .voltage_time_offset = margining->voltage_time_offset,
+ .right_high = margining->right_high,
+ .upper_eye = margining->upper_eye,
+ .optional_voltage_offset_range = margining->optional_voltage_offset_range,
+ };
+
+ tb_port_dbg(port,
+ "running software %s lane margining for %s lanes %u\n",
+ margining->time ? "time" : "voltage", dev_name(dev),
+ margining->lanes);
+
+ ret = margining_run_sw(margining, &params);
} else {
- tb_port_dbg(port, "running hardware %s lane margining for lanes %u\n",
- margining->time ? "time" : "voltage", margining->lanes);
- /* Clear the results */
- margining->results[0] = 0;
- margining->results[1] = 0;
- ret = usb4_port_hw_margin(port, margining->lanes,
- margining->ber_level, margining->time,
- margining->right_high, margining->results);
+ struct usb4_port_margining_params params = {
+ .ber_level = margining->ber_level,
+ .lanes = margining->lanes,
+ .time = margining->time,
+ .right_high = margining->right_high,
+ .upper_eye = margining->upper_eye,
+ .optional_voltage_offset_range = margining->optional_voltage_offset_range,
+ };
+
+ tb_port_dbg(port,
+ "running hardware %s lane margining for %s lanes %u\n",
+ margining->time ? "time" : "voltage", dev_name(dev),
+ margining->lanes);
+
+ ret = usb4_port_hw_margin(port, margining->target, margining->index, &params,
+ margining->results, ARRAY_SIZE(margining->results));
}
-out_clx:
if (down_sw)
tb_switch_clx_enable(down_sw, clx);
out_unlock:
mutex_unlock(&tb->lock);
out_rpm_put:
- pm_runtime_mark_last_busy(&sw->dev);
- pm_runtime_put_autosuspend(&sw->dev);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
return ret;
}
@@ -632,16 +1317,21 @@ static ssize_t margining_results_write(struct file *file,
size_t count, loff_t *ppos)
{
struct seq_file *s = file->private_data;
- struct tb_port *port = s->private;
- struct usb4_port *usb4 = port->usb4;
- struct tb *tb = port->sw->tb;
+ struct tb_margining *margining = s->private;
+ struct tb *tb = margining->port->sw->tb;
if (mutex_lock_interruptible(&tb->lock))
return -ERESTARTSYS;
/* Just clear the results */
- usb4->margining->results[0] = 0;
- usb4->margining->results[1] = 0;
+ memset(margining->results, 0, sizeof(margining->results));
+
+ if (margining->software) {
+ /* Clear the error counters */
+ margining_modify_error_counter(margining,
+ USB4_MARGINING_LANE_ALL,
+ USB4_MARGIN_SW_ERROR_COUNTER_CLEAR);
+ }
mutex_unlock(&tb->lock);
return count;
@@ -652,12 +1342,14 @@ static void voltage_margin_show(struct seq_file *s,
{
unsigned int tmp, voltage;
- tmp = val & USB4_MARGIN_HW_RES_1_MARGIN_MASK;
+ tmp = FIELD_GET(USB4_MARGIN_HW_RES_MARGIN_MASK, val);
voltage = tmp * margining->max_voltage_offset / margining->voltage_steps;
seq_printf(s, "%u mV (%u)", voltage, tmp);
- if (val & USB4_MARGIN_HW_RES_1_EXCEEDS)
+ if (val & USB4_MARGIN_HW_RES_EXCEEDS)
seq_puts(s, " exceeds maximum");
seq_puts(s, "\n");
+ if (margining->optional_voltage_offset_range)
+ seq_puts(s, " optional voltage offset range enabled\n");
}
static void time_margin_show(struct seq_file *s,
@@ -665,73 +1357,106 @@ static void time_margin_show(struct seq_file *s,
{
unsigned int tmp, interval;
- tmp = val & USB4_MARGIN_HW_RES_1_MARGIN_MASK;
+ tmp = FIELD_GET(USB4_MARGIN_HW_RES_MARGIN_MASK, val);
interval = tmp * margining->max_time_offset / margining->time_steps;
seq_printf(s, "%u mUI (%u)", interval, tmp);
- if (val & USB4_MARGIN_HW_RES_1_EXCEEDS)
+ if (val & USB4_MARGIN_HW_RES_EXCEEDS)
seq_puts(s, " exceeds maximum");
seq_puts(s, "\n");
}
+static u8 margining_hw_result_val(const u32 *results,
+ enum usb4_margining_lane lane,
+ bool right_high)
+{
+ u32 val;
+
+ if (lane == USB4_MARGINING_LANE_RX0)
+ val = results[1];
+ else if (lane == USB4_MARGINING_LANE_RX1)
+ val = results[1] >> USB4_MARGIN_HW_RES_LANE_SHIFT;
+ else if (lane == USB4_MARGINING_LANE_RX2)
+ val = results[2];
+ else
+ val = 0;
+
+ return right_high ? val : val >> USB4_MARGIN_HW_RES_LL_SHIFT;
+}
+
+static void margining_hw_result_format(struct seq_file *s,
+ const struct tb_margining *margining,
+ enum usb4_margining_lane lane)
+{
+ u8 val;
+
+ if (margining->time) {
+ val = margining_hw_result_val(margining->results, lane, true);
+ seq_printf(s, "# lane %u right time margin: ", lane);
+ time_margin_show(s, margining, val);
+ val = margining_hw_result_val(margining->results, lane, false);
+ seq_printf(s, "# lane %u left time margin: ", lane);
+ time_margin_show(s, margining, val);
+ } else {
+ val = margining_hw_result_val(margining->results, lane, true);
+ seq_printf(s, "# lane %u high voltage margin: ", lane);
+ voltage_margin_show(s, margining, val);
+ val = margining_hw_result_val(margining->results, lane, false);
+ seq_printf(s, "# lane %u low voltage margin: ", lane);
+ voltage_margin_show(s, margining, val);
+ }
+}
+
static int margining_results_show(struct seq_file *s, void *not_used)
{
- struct tb_port *port = s->private;
- struct usb4_port *usb4 = port->usb4;
- struct tb_margining *margining;
- struct tb *tb = port->sw->tb;
+ struct tb_margining *margining = s->private;
+ struct tb *tb = margining->port->sw->tb;
if (mutex_lock_interruptible(&tb->lock))
return -ERESTARTSYS;
- margining = usb4->margining;
/* Dump the raw results first */
seq_printf(s, "0x%08x\n", margining->results[0]);
/* Only the hardware margining has two result dwords */
if (!margining->software) {
- unsigned int val;
+ for (int i = 1; i < ARRAY_SIZE(margining->results); i++)
+ seq_printf(s, "0x%08x\n", margining->results[i]);
+
+ if (margining->lanes == USB4_MARGINING_LANE_ALL) {
+ margining_hw_result_format(s, margining,
+ USB4_MARGINING_LANE_RX0);
+ margining_hw_result_format(s, margining,
+ USB4_MARGINING_LANE_RX1);
+ if (margining->asym_rx)
+ margining_hw_result_format(s, margining,
+ USB4_MARGINING_LANE_RX2);
+ } else {
+ margining_hw_result_format(s, margining,
+ margining->lanes);
+ }
+ } else {
+ u32 lane_errors, result;
seq_printf(s, "0x%08x\n", margining->results[1]);
- if (margining->time) {
- if (!margining->lanes || margining->lanes == 7) {
- val = margining->results[1];
- seq_puts(s, "# lane 0 right time margin: ");
- time_margin_show(s, margining, val);
- val = margining->results[1] >>
- USB4_MARGIN_HW_RES_1_L0_LL_MARGIN_SHIFT;
- seq_puts(s, "# lane 0 left time margin: ");
- time_margin_show(s, margining, val);
- }
- if (margining->lanes == 1 || margining->lanes == 7) {
- val = margining->results[1] >>
- USB4_MARGIN_HW_RES_1_L1_RH_MARGIN_SHIFT;
- seq_puts(s, "# lane 1 right time margin: ");
- time_margin_show(s, margining, val);
- val = margining->results[1] >>
- USB4_MARGIN_HW_RES_1_L1_LL_MARGIN_SHIFT;
- seq_puts(s, "# lane 1 left time margin: ");
- time_margin_show(s, margining, val);
- }
- } else {
- if (!margining->lanes || margining->lanes == 7) {
- val = margining->results[1];
- seq_puts(s, "# lane 0 high voltage margin: ");
- voltage_margin_show(s, margining, val);
- val = margining->results[1] >>
- USB4_MARGIN_HW_RES_1_L0_LL_MARGIN_SHIFT;
- seq_puts(s, "# lane 0 low voltage margin: ");
- voltage_margin_show(s, margining, val);
- }
- if (margining->lanes == 1 || margining->lanes == 7) {
- val = margining->results[1] >>
- USB4_MARGIN_HW_RES_1_L1_RH_MARGIN_SHIFT;
- seq_puts(s, "# lane 1 high voltage margin: ");
- voltage_margin_show(s, margining, val);
- val = margining->results[1] >>
- USB4_MARGIN_HW_RES_1_L1_LL_MARGIN_SHIFT;
- seq_puts(s, "# lane 1 low voltage margin: ");
- voltage_margin_show(s, margining, val);
- }
+ result = FIELD_GET(USB4_MARGIN_SW_LANES_MASK, margining->results[0]);
+ if (result == USB4_MARGINING_LANE_RX0 ||
+ result == USB4_MARGINING_LANE_ALL) {
+ lane_errors = FIELD_GET(USB4_MARGIN_SW_ERR_COUNTER_LANE_0_MASK,
+ margining->results[1]);
+ seq_printf(s, "# lane 0 errors: %u\n", lane_errors);
+ }
+ if (result == USB4_MARGINING_LANE_RX1 ||
+ result == USB4_MARGINING_LANE_ALL) {
+ lane_errors = FIELD_GET(USB4_MARGIN_SW_ERR_COUNTER_LANE_1_MASK,
+ margining->results[1]);
+ seq_printf(s, "# lane 1 errors: %u\n", lane_errors);
+ }
+ if (margining->asym_rx &&
+ (result == USB4_MARGINING_LANE_RX2 ||
+ result == USB4_MARGINING_LANE_ALL)) {
+ lane_errors = FIELD_GET(USB4_MARGIN_SW_ERR_COUNTER_LANE_2_MASK,
+ margining->results[1]);
+ seq_printf(s, "# lane 2 errors: %u\n", lane_errors);
}
}
@@ -745,9 +1470,8 @@ static ssize_t margining_test_write(struct file *file,
size_t count, loff_t *ppos)
{
struct seq_file *s = file->private_data;
- struct tb_port *port = s->private;
- struct usb4_port *usb4 = port->usb4;
- struct tb *tb = port->sw->tb;
+ struct tb_margining *margining = s->private;
+ struct tb *tb = margining->port->sw->tb;
int ret = 0;
char *buf;
@@ -762,10 +1486,10 @@ static ssize_t margining_test_write(struct file *file,
goto out_free;
}
- if (!strcmp(buf, "time") && supports_time(usb4))
- usb4->margining->time = true;
+ if (!strcmp(buf, "time") && supports_time(margining))
+ margining->time = true;
else if (!strcmp(buf, "voltage"))
- usb4->margining->time = false;
+ margining->time = false;
else
ret = -EINVAL;
@@ -778,15 +1502,14 @@ out_free:
static int margining_test_show(struct seq_file *s, void *not_used)
{
- struct tb_port *port = s->private;
- struct usb4_port *usb4 = port->usb4;
- struct tb *tb = port->sw->tb;
+ struct tb_margining *margining = s->private;
+ struct tb *tb = margining->port->sw->tb;
if (mutex_lock_interruptible(&tb->lock))
return -ERESTARTSYS;
- if (supports_time(usb4)) {
- if (usb4->margining->time)
+ if (supports_time(margining)) {
+ if (margining->time)
seq_puts(s, "voltage [time]\n");
else
seq_puts(s, "[voltage] time\n");
@@ -804,9 +1527,8 @@ static ssize_t margining_margin_write(struct file *file,
size_t count, loff_t *ppos)
{
struct seq_file *s = file->private_data;
- struct tb_port *port = s->private;
- struct usb4_port *usb4 = port->usb4;
- struct tb *tb = port->sw->tb;
+ struct tb_margining *margining = s->private;
+ struct tb *tb = margining->port->sw->tb;
int ret = 0;
char *buf;
@@ -821,18 +1543,18 @@ static ssize_t margining_margin_write(struct file *file,
goto out_free;
}
- if (usb4->margining->time) {
+ if (margining->time) {
if (!strcmp(buf, "left"))
- usb4->margining->right_high = false;
+ margining->right_high = false;
else if (!strcmp(buf, "right"))
- usb4->margining->right_high = true;
+ margining->right_high = true;
else
ret = -EINVAL;
} else {
if (!strcmp(buf, "low"))
- usb4->margining->right_high = false;
+ margining->right_high = false;
else if (!strcmp(buf, "high"))
- usb4->margining->right_high = true;
+ margining->right_high = true;
else
ret = -EINVAL;
}
@@ -846,20 +1568,19 @@ out_free:
static int margining_margin_show(struct seq_file *s, void *not_used)
{
- struct tb_port *port = s->private;
- struct usb4_port *usb4 = port->usb4;
- struct tb *tb = port->sw->tb;
+ struct tb_margining *margining = s->private;
+ struct tb *tb = margining->port->sw->tb;
if (mutex_lock_interruptible(&tb->lock))
return -ERESTARTSYS;
- if (usb4->margining->time) {
- if (usb4->margining->right_high)
+ if (margining->time) {
+ if (margining->right_high)
seq_puts(s, "left [right]\n");
else
seq_puts(s, "[left] right\n");
} else {
- if (usb4->margining->right_high)
+ if (margining->right_high)
seq_puts(s, "low [high]\n");
else
seq_puts(s, "[low] high\n");
@@ -870,51 +1591,118 @@ static int margining_margin_show(struct seq_file *s, void *not_used)
}
DEBUGFS_ATTR_RW(margining_margin);
-static void margining_port_init(struct tb_port *port)
+static ssize_t margining_eye_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct tb_port *port = s->private;
+ struct usb4_port *usb4 = port->usb4;
+ struct tb *tb = port->sw->tb;
+ int ret = 0;
+ char *buf;
+
+ buf = validate_and_copy_from_user(user_buf, &count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ buf[count - 1] = '\0';
+
+ scoped_cond_guard(mutex_intr, ret = -ERESTARTSYS, &tb->lock) {
+ if (!strcmp(buf, "lower"))
+ usb4->margining->upper_eye = false;
+ else if (!strcmp(buf, "upper"))
+ usb4->margining->upper_eye = true;
+ else
+ ret = -EINVAL;
+ }
+
+ free_page((unsigned long)buf);
+ return ret ? ret : count;
+}
+
+static int margining_eye_show(struct seq_file *s, void *not_used)
+{
+ struct tb_port *port = s->private;
+ struct usb4_port *usb4 = port->usb4;
+ struct tb *tb = port->sw->tb;
+
+ scoped_guard(mutex_intr, &tb->lock) {
+ if (usb4->margining->upper_eye)
+ seq_puts(s, "lower [upper]\n");
+ else
+ seq_puts(s, "[lower] upper\n");
+
+ return 0;
+ }
+
+ return -ERESTARTSYS;
+}
+DEBUGFS_ATTR_RW(margining_eye);
+
+static struct tb_margining *margining_alloc(struct tb_port *port,
+ struct device *dev,
+ enum usb4_sb_target target,
+ u8 index, struct dentry *parent)
{
struct tb_margining *margining;
- struct dentry *dir, *parent;
- struct usb4_port *usb4;
- char dir_name[10];
+ struct dentry *dir;
unsigned int val;
int ret;
- usb4 = port->usb4;
- if (!usb4)
- return;
-
- snprintf(dir_name, sizeof(dir_name), "port%d", port->port);
- parent = debugfs_lookup(dir_name, port->sw->debugfs_dir);
+ ret = tb_port_get_link_generation(port);
+ if (ret < 0) {
+ tb_port_warn(port, "failed to read link generation\n");
+ return NULL;
+ }
margining = kzalloc(sizeof(*margining), GFP_KERNEL);
if (!margining)
- return;
+ return NULL;
- ret = usb4_port_margining_caps(port, margining->caps);
+ margining->port = port;
+ margining->target = target;
+ margining->index = index;
+ margining->dev = dev;
+ margining->gen = ret;
+ margining->asym_rx = tb_port_width_supported(port, TB_LINK_WIDTH_ASYM_RX);
+
+ ret = usb4_port_margining_caps(port, target, index, margining->caps,
+ ARRAY_SIZE(margining->caps));
if (ret) {
kfree(margining);
- return;
+ return NULL;
}
- usb4->margining = margining;
-
/* Set the initial mode */
- if (supports_software(usb4))
+ if (supports_software(margining))
margining->software = true;
- val = (margining->caps[0] & USB4_MARGIN_CAP_0_VOLTAGE_STEPS_MASK) >>
- USB4_MARGIN_CAP_0_VOLTAGE_STEPS_SHIFT;
- margining->voltage_steps = val;
- val = (margining->caps[0] & USB4_MARGIN_CAP_0_MAX_VOLTAGE_OFFSET_MASK) >>
- USB4_MARGIN_CAP_0_MAX_VOLTAGE_OFFSET_SHIFT;
- margining->max_voltage_offset = 74 + val * 2;
+ if (margining->gen < 4) {
+ val = FIELD_GET(USB4_MARGIN_CAP_0_VOLTAGE_STEPS_MASK, margining->caps[0]);
+ margining->voltage_steps = val;
+ val = FIELD_GET(USB4_MARGIN_CAP_0_MAX_VOLTAGE_OFFSET_MASK, margining->caps[0]);
+ margining->max_voltage_offset = 74 + val * 2;
+ } else {
+ val = FIELD_GET(USB4_MARGIN_CAP_2_VOLTAGE_STEPS_MASK, margining->caps[2]);
+ margining->voltage_steps = val;
+ val = FIELD_GET(USB4_MARGIN_CAP_2_MAX_VOLTAGE_OFFSET_MASK, margining->caps[2]);
+ margining->max_voltage_offset = 74 + val * 2;
+ }
+
+ if (supports_optional_voltage_offset_range(margining)) {
+ val = FIELD_GET(USB4_MARGIN_CAP_0_VOLT_STEPS_OPT_MASK,
+ margining->caps[0]);
+ margining->voltage_steps_optional_range = val;
+ val = FIELD_GET(USB4_MARGIN_CAP_1_MAX_VOLT_OFS_OPT_MASK,
+ margining->caps[1]);
+ margining->max_voltage_offset_optional_range = 74 + val * 2;
+ }
- if (supports_time(usb4)) {
- val = (margining->caps[1] & USB4_MARGIN_CAP_1_TIME_STEPS_MASK) >>
- USB4_MARGIN_CAP_1_TIME_STEPS_SHIFT;
+ if (supports_time(margining)) {
+ val = FIELD_GET(USB4_MARGIN_CAP_1_TIME_STEPS_MASK, margining->caps[1]);
margining->time_steps = val;
- val = (margining->caps[1] & USB4_MARGIN_CAP_1_TIME_OFFSET_MASK) >>
- USB4_MARGIN_CAP_1_TIME_OFFSET_SHIFT;
+ val = FIELD_GET(USB4_MARGIN_CAP_1_TIME_OFFSET_MASK, margining->caps[1]);
/*
* Store it as mUI (milli Unit Interval) because we want
* to keep it as integer.
@@ -923,29 +1711,65 @@ static void margining_port_init(struct tb_port *port)
}
dir = debugfs_create_dir("margining", parent);
- if (supports_hardware(usb4)) {
- val = (margining->caps[1] & USB4_MARGIN_CAP_1_MIN_BER_MASK) >>
- USB4_MARGIN_CAP_1_MIN_BER_SHIFT;
+ if (supports_hardware(margining)) {
+ val = FIELD_GET(USB4_MARGIN_CAP_1_MIN_BER_MASK, margining->caps[1]);
margining->min_ber_level = val;
- val = (margining->caps[1] & USB4_MARGIN_CAP_1_MAX_BER_MASK) >>
- USB4_MARGIN_CAP_1_MAX_BER_SHIFT;
+ val = FIELD_GET(USB4_MARGIN_CAP_1_MAX_BER_MASK, margining->caps[1]);
margining->max_ber_level = val;
/* Set the default to minimum */
margining->ber_level = margining->min_ber_level;
- debugfs_create_file("ber_level_contour", 0400, dir, port,
+ debugfs_create_file("ber_level_contour", 0400, dir, margining,
&margining_ber_level_fops);
}
- debugfs_create_file("caps", 0400, dir, port, &margining_caps_fops);
- debugfs_create_file("lanes", 0600, dir, port, &margining_lanes_fops);
- debugfs_create_file("mode", 0600, dir, port, &margining_mode_fops);
- debugfs_create_file("run", 0600, dir, port, &margining_run_fops);
- debugfs_create_file("results", 0600, dir, port, &margining_results_fops);
- debugfs_create_file("test", 0600, dir, port, &margining_test_fops);
- if (independent_voltage_margins(usb4) ||
- (supports_time(usb4) && independent_time_margins(usb4)))
- debugfs_create_file("margin", 0600, dir, port, &margining_margin_fops);
+ debugfs_create_file("caps", 0400, dir, margining, &margining_caps_fops);
+ debugfs_create_file("lanes", 0600, dir, margining, &margining_lanes_fops);
+ debugfs_create_file("mode", 0600, dir, margining, &margining_mode_fops);
+ debugfs_create_file("run", 0600, dir, margining, &margining_run_fops);
+ debugfs_create_file("results", 0600, dir, margining,
+ &margining_results_fops);
+ debugfs_create_file("test", 0600, dir, margining, &margining_test_fops);
+ if (independent_voltage_margins(margining) == USB4_MARGIN_CAP_VOLTAGE_INDP_GEN_2_3_HL ||
+ (supports_time(margining) &&
+ independent_time_margins(margining) == USB4_MARGIN_CAP_TIME_INDP_GEN_2_3_LR))
+ debugfs_create_file("margin", 0600, dir, margining, &margining_margin_fops);
+
+ margining->error_counter = USB4_MARGIN_SW_ERROR_COUNTER_CLEAR;
+ margining->dwell_time = MIN_DWELL_TIME;
+
+ if (supports_optional_voltage_offset_range(margining))
+ debugfs_create_file("optional_voltage_offset", DEBUGFS_MODE, dir, margining,
+ &margining_optional_voltage_offset_fops);
+
+ if (supports_software(margining)) {
+ debugfs_create_file("voltage_time_offset", DEBUGFS_MODE, dir, margining,
+ &margining_voltage_time_offset_fops);
+ debugfs_create_file("error_counter", DEBUGFS_MODE, dir, margining,
+ &margining_error_counter_fops);
+ debugfs_create_file("dwell_time", DEBUGFS_MODE, dir, margining,
+ &margining_dwell_time_fops);
+ }
+
+ if (margining->gen >= 4)
+ debugfs_create_file("eye", 0600, dir, port, &margining_eye_fops);
+
+ return margining;
+}
+
+static void margining_port_init(struct tb_port *port)
+{
+ struct dentry *parent;
+ char dir_name[10];
+
+ if (!port->usb4)
+ return;
+
+ snprintf(dir_name, sizeof(dir_name), "port%d", port->port);
+ parent = debugfs_lookup(dir_name, port->sw->debugfs_dir);
+ port->usb4->margining = margining_alloc(port, &port->usb4->dev,
+ USB4_SB_TARGET_ROUTER, 0,
+ parent);
}
static void margining_port_remove(struct tb_port *port)
@@ -1019,11 +1843,27 @@ static void margining_xdomain_remove(struct tb_xdomain *xd)
downstream = tb_port_at(xd->route, parent_sw);
margining_port_remove(downstream);
}
+
+static void margining_retimer_init(struct tb_retimer *rt, struct dentry *debugfs_dir)
+{
+ rt->margining = margining_alloc(rt->port, &rt->dev,
+ USB4_SB_TARGET_RETIMER, rt->index,
+ debugfs_dir);
+}
+
+static void margining_retimer_remove(struct tb_retimer *rt)
+{
+ kfree(rt->margining);
+ rt->margining = NULL;
+}
#else
static inline void margining_switch_init(struct tb_switch *sw) { }
static inline void margining_switch_remove(struct tb_switch *sw) { }
static inline void margining_xdomain_init(struct tb_xdomain *xd) { }
static inline void margining_xdomain_remove(struct tb_xdomain *xd) { }
+static inline void margining_retimer_init(struct tb_retimer *rt,
+ struct dentry *debugfs_dir) { }
+static inline void margining_retimer_remove(struct tb_retimer *rt) { }
#endif
static int port_clear_all_counters(struct tb_port *port)
@@ -1346,7 +2186,7 @@ static int switch_basic_regs_show(struct tb_switch *sw, struct seq_file *s)
if (tb_switch_is_usb4(sw))
dwords = ARRAY_SIZE(data);
else
- dwords = 7;
+ dwords = 5;
ret = tb_sw_read(sw, data, TB_CFG_SWITCH, 0, dwords);
if (ret)
@@ -1448,7 +2288,7 @@ out_rpm_put:
return ret;
}
-DEBUGFS_ATTR_RO(path);
+DEBUGFS_ATTR_RW(path);
static int counter_set_regs_show(struct tb_port *port, struct seq_file *s,
int counter)
@@ -1504,6 +2344,60 @@ out:
}
DEBUGFS_ATTR_RW(counters);
+static int sb_regs_show(struct tb_port *port, const struct sb_reg *sb_regs,
+ size_t size, enum usb4_sb_target target, u8 index,
+ struct seq_file *s)
+{
+ int ret, i;
+
+ seq_puts(s, "# register value\n");
+
+ for (i = 0; i < size; i++) {
+ const struct sb_reg *regs = &sb_regs[i];
+ u8 data[64];
+ int j;
+
+ memset(data, 0, sizeof(data));
+ ret = usb4_port_sb_read(port, target, index, regs->reg, data,
+ regs->size);
+ if (ret)
+ return ret;
+
+ seq_printf(s, "0x%02x", regs->reg);
+ for (j = 0; j < regs->size; j++)
+ seq_printf(s, " 0x%02x", data[j]);
+ seq_puts(s, "\n");
+ }
+
+ return 0;
+}
+
+static int port_sb_regs_show(struct seq_file *s, void *not_used)
+{
+ struct tb_port *port = s->private;
+ struct tb_switch *sw = port->sw;
+ struct tb *tb = sw->tb;
+ int ret;
+
+ pm_runtime_get_sync(&sw->dev);
+
+ if (mutex_lock_interruptible(&tb->lock)) {
+ ret = -ERESTARTSYS;
+ goto out_rpm_put;
+ }
+
+ ret = sb_regs_show(port, port_sb_regs, ARRAY_SIZE(port_sb_regs),
+ USB4_SB_TARGET_ROUTER, 0, s);
+
+ mutex_unlock(&tb->lock);
+out_rpm_put:
+ pm_runtime_mark_last_busy(&sw->dev);
+ pm_runtime_put_autosuspend(&sw->dev);
+
+ return ret;
+}
+DEBUGFS_ATTR_RW(port_sb_regs);
+
/**
* tb_switch_debugfs_init() - Add debugfs entries for router
* @sw: Pointer to the router
@@ -1519,6 +2413,8 @@ void tb_switch_debugfs_init(struct tb_switch *sw)
sw->debugfs_dir = debugfs_dir;
debugfs_create_file("regs", DEBUGFS_MODE, debugfs_dir, sw,
&switch_regs_fops);
+ if (sw->drom)
+ debugfs_create_blob("drom", 0400, debugfs_dir, &sw->drom_blob);
tb_switch_for_each_port(sw, port) {
struct dentry *debugfs_dir;
@@ -1538,6 +2434,9 @@ void tb_switch_debugfs_init(struct tb_switch *sw)
if (port->config.counters_support)
debugfs_create_file("counters", 0600, debugfs_dir, port,
&counters_fops);
+ if (port->usb4)
+ debugfs_create_file("sb_regs", DEBUGFS_MODE, debugfs_dir,
+ port, &port_sb_regs_fops);
}
margining_switch_init(sw);
@@ -1589,6 +2488,59 @@ void tb_service_debugfs_remove(struct tb_service *svc)
svc->debugfs_dir = NULL;
}
+static int retimer_sb_regs_show(struct seq_file *s, void *not_used)
+{
+ struct tb_retimer *rt = s->private;
+ struct tb *tb = rt->tb;
+ int ret;
+
+ pm_runtime_get_sync(&rt->dev);
+
+ if (mutex_lock_interruptible(&tb->lock)) {
+ ret = -ERESTARTSYS;
+ goto out_rpm_put;
+ }
+
+ ret = sb_regs_show(rt->port, retimer_sb_regs, ARRAY_SIZE(retimer_sb_regs),
+ USB4_SB_TARGET_RETIMER, rt->index, s);
+
+ mutex_unlock(&tb->lock);
+out_rpm_put:
+ pm_runtime_mark_last_busy(&rt->dev);
+ pm_runtime_put_autosuspend(&rt->dev);
+
+ return ret;
+}
+DEBUGFS_ATTR_RW(retimer_sb_regs);
+
+/**
+ * tb_retimer_debugfs_init() - Add debugfs directory for retimer
+ * @rt: Pointer to retimer structure
+ *
+ * Adds and populates retimer debugfs directory.
+ */
+void tb_retimer_debugfs_init(struct tb_retimer *rt)
+{
+ struct dentry *debugfs_dir;
+
+ debugfs_dir = debugfs_create_dir(dev_name(&rt->dev), tb_debugfs_root);
+ debugfs_create_file("sb_regs", DEBUGFS_MODE, debugfs_dir, rt,
+ &retimer_sb_regs_fops);
+ margining_retimer_init(rt, debugfs_dir);
+}
+
+/**
+ * tb_retimer_debugfs_remove() - Remove retimer debugfs directory
+ * @rt: Pointer to retimer structure
+ *
+ * Removes the retimer debugfs directory along with its contents.
+ */
+void tb_retimer_debugfs_remove(struct tb_retimer *rt)
+{
+ debugfs_lookup_and_remove(dev_name(&rt->dev), tb_debugfs_root);
+ margining_retimer_remove(rt);
+}
+
void tb_debugfs_init(void)
{
tb_debugfs_root = debugfs_create_dir("thunderbolt", NULL);
diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c
index 9fb1a64f3300..144d0232a70c 100644
--- a/drivers/thunderbolt/domain.c
+++ b/drivers/thunderbolt/domain.c
@@ -45,9 +45,9 @@ static bool match_service_id(const struct tb_service_id *id,
}
static const struct tb_service_id *__tb_service_match(struct device *dev,
- struct device_driver *drv)
+ const struct device_driver *drv)
{
- struct tb_service_driver *driver;
+ const struct tb_service_driver *driver;
const struct tb_service_id *ids;
struct tb_service *svc;
@@ -55,7 +55,7 @@ static const struct tb_service_id *__tb_service_match(struct device *dev,
if (!svc)
return NULL;
- driver = container_of(drv, struct tb_service_driver, driver);
+ driver = container_of_const(drv, struct tb_service_driver, driver);
if (!driver->id_table)
return NULL;
@@ -67,7 +67,7 @@ static const struct tb_service_id *__tb_service_match(struct device *dev,
return NULL;
}
-static int tb_service_match(struct device *dev, struct device_driver *drv)
+static int tb_service_match(struct device *dev, const struct device_driver *drv)
{
return !!__tb_service_match(dev, drv);
}
@@ -321,12 +321,12 @@ static void tb_domain_release(struct device *dev)
tb_ctl_free(tb->ctl);
destroy_workqueue(tb->wq);
- ida_simple_remove(&tb_domain_ida, tb->index);
+ ida_free(&tb_domain_ida, tb->index);
mutex_destroy(&tb->lock);
kfree(tb);
}
-struct device_type tb_domain_type = {
+const struct device_type tb_domain_type = {
.name = "thunderbolt_domain",
.release = tb_domain_release,
};
@@ -389,7 +389,7 @@ struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize
tb->nhi = nhi;
mutex_init(&tb->lock);
- tb->index = ida_simple_get(&tb_domain_ida, 0, 0, GFP_KERNEL);
+ tb->index = ida_alloc(&tb_domain_ida, GFP_KERNEL);
if (tb->index < 0)
goto err_free;
@@ -397,7 +397,7 @@ struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize
if (!tb->wq)
goto err_remove_ida;
- tb->ctl = tb_ctl_alloc(nhi, timeout_msec, tb_domain_event_cb, tb);
+ tb->ctl = tb_ctl_alloc(nhi, tb->index, timeout_msec, tb_domain_event_cb, tb);
if (!tb->ctl)
goto err_destroy_wq;
@@ -413,7 +413,7 @@ struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize
err_destroy_wq:
destroy_workqueue(tb->wq);
err_remove_ida:
- ida_simple_remove(&tb_domain_ida, tb->index);
+ ida_free(&tb_domain_ida, tb->index);
err_free:
kfree(tb);
@@ -423,6 +423,7 @@ err_free:
/**
* tb_domain_add() - Add domain to the system
* @tb: Domain to add
+ * @reset: Issue reset to the host router
*
* Starts the domain and adds it to the system. Hotplugging devices will
* work after this has been returned successfully. In order to remove
@@ -431,7 +432,7 @@ err_free:
*
* Return: %0 in case of success and negative errno in case of error
*/
-int tb_domain_add(struct tb *tb)
+int tb_domain_add(struct tb *tb, bool reset)
{
int ret;
@@ -460,7 +461,7 @@ int tb_domain_add(struct tb *tb)
/* Start the domain */
if (tb->cm_ops->start) {
- ret = tb->cm_ops->start(tb);
+ ret = tb->cm_ops->start(tb, reset);
if (ret)
goto err_domain_del;
}
@@ -505,6 +506,10 @@ void tb_domain_remove(struct tb *tb)
mutex_unlock(&tb->lock);
flush_workqueue(tb->wq);
+
+ if (tb->cm_ops->deinit)
+ tb->cm_ops->deinit(tb);
+
device_unregister(&tb->dev);
}
diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c
index eb241b270f79..9c1d65d26553 100644
--- a/drivers/thunderbolt/eeprom.c
+++ b/drivers/thunderbolt/eeprom.c
@@ -435,6 +435,29 @@ static int tb_drom_parse_entries(struct tb_switch *sw, size_t header_size)
return 0;
}
+static int tb_switch_drom_alloc(struct tb_switch *sw, size_t size)
+{
+ sw->drom = kzalloc(size, GFP_KERNEL);
+ if (!sw->drom)
+ return -ENOMEM;
+
+#ifdef CONFIG_DEBUG_FS
+ sw->drom_blob.data = sw->drom;
+ sw->drom_blob.size = size;
+#endif
+ return 0;
+}
+
+static void tb_switch_drom_free(struct tb_switch *sw)
+{
+#ifdef CONFIG_DEBUG_FS
+ sw->drom_blob.data = NULL;
+ sw->drom_blob.size = 0;
+#endif
+ kfree(sw->drom);
+ sw->drom = NULL;
+}
+
/*
* tb_drom_copy_efi - copy drom supplied by EFI to sw->drom if present
*/
@@ -447,9 +470,9 @@ static int tb_drom_copy_efi(struct tb_switch *sw, u16 *size)
if (len < 0 || len < sizeof(struct tb_drom_header))
return -EINVAL;
- sw->drom = kmalloc(len, GFP_KERNEL);
- if (!sw->drom)
- return -ENOMEM;
+ res = tb_switch_drom_alloc(sw, len);
+ if (res)
+ return res;
res = device_property_read_u8_array(dev, "ThunderboltDROM", sw->drom,
len);
@@ -464,8 +487,7 @@ static int tb_drom_copy_efi(struct tb_switch *sw, u16 *size)
return 0;
err:
- kfree(sw->drom);
- sw->drom = NULL;
+ tb_switch_drom_free(sw);
return -EINVAL;
}
@@ -491,13 +513,15 @@ static int tb_drom_copy_nvm(struct tb_switch *sw, u16 *size)
/* Size includes CRC8 + UID + CRC32 */
*size += 1 + 8 + 4;
- sw->drom = kzalloc(*size, GFP_KERNEL);
- if (!sw->drom)
- return -ENOMEM;
+ ret = tb_switch_drom_alloc(sw, *size);
+ if (ret)
+ return ret;
ret = dma_port_flash_read(sw->dma_port, drom_offset, sw->drom, *size);
- if (ret)
- goto err_free;
+ if (ret) {
+ tb_switch_drom_free(sw);
+ return ret;
+ }
/*
* Read UID from the minimal DROM because the one in NVM is just
@@ -505,11 +529,6 @@ static int tb_drom_copy_nvm(struct tb_switch *sw, u16 *size)
*/
tb_drom_read_uid_only(sw, &sw->uid);
return 0;
-
-err_free:
- kfree(sw->drom);
- sw->drom = NULL;
- return ret;
}
static int usb4_copy_drom(struct tb_switch *sw, u16 *size)
@@ -522,15 +541,13 @@ static int usb4_copy_drom(struct tb_switch *sw, u16 *size)
/* Size includes CRC8 + UID + CRC32 */
*size += 1 + 8 + 4;
- sw->drom = kzalloc(*size, GFP_KERNEL);
- if (!sw->drom)
- return -ENOMEM;
+ ret = tb_switch_drom_alloc(sw, *size);
+ if (ret)
+ return ret;
ret = usb4_switch_drom_read(sw, 0, sw->drom, *size);
- if (ret) {
- kfree(sw->drom);
- sw->drom = NULL;
- }
+ if (ret)
+ tb_switch_drom_free(sw);
return ret;
}
@@ -552,19 +569,14 @@ static int tb_drom_bit_bang(struct tb_switch *sw, u16 *size)
return -EIO;
}
- sw->drom = kzalloc(*size, GFP_KERNEL);
- if (!sw->drom)
- return -ENOMEM;
+ ret = tb_switch_drom_alloc(sw, *size);
+ if (ret)
+ return ret;
ret = tb_eeprom_read_n(sw, 0, sw->drom, *size);
if (ret)
- goto err;
-
- return 0;
+ tb_switch_drom_free(sw);
-err:
- kfree(sw->drom);
- sw->drom = NULL;
return ret;
}
@@ -646,9 +658,7 @@ static int tb_drom_parse(struct tb_switch *sw, u16 size)
return 0;
err:
- kfree(sw->drom);
- sw->drom = NULL;
-
+ tb_switch_drom_free(sw);
return ret;
}
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index 56790d50f9e3..7859bccc592d 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -2144,7 +2144,7 @@ static int icm_runtime_resume(struct tb *tb)
return 0;
}
-static int icm_start(struct tb *tb)
+static int icm_start(struct tb *tb, bool not_used)
{
struct icm *icm = tb_priv(tb);
int ret;
@@ -2532,6 +2532,7 @@ struct tb *icm_probe(struct tb_nhi *nhi)
case PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_2C_NHI:
case PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_4C_NHI:
+ icm->can_upgrade_nvm = true;
icm->is_supported = icm_tgl_is_supported;
icm->get_mode = icm_ar_get_mode;
icm->driver_ready = icm_tr_driver_ready;
diff --git a/drivers/thunderbolt/lc.c b/drivers/thunderbolt/lc.c
index 633970fbe9b0..63cb4b6afb71 100644
--- a/drivers/thunderbolt/lc.c
+++ b/drivers/thunderbolt/lc.c
@@ -6,6 +6,8 @@
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
*/
+#include <linux/delay.h>
+
#include "tb.h"
/**
@@ -45,6 +47,49 @@ static int find_port_lc_cap(struct tb_port *port)
return sw->cap_lc + start + phys * size;
}
+/**
+ * tb_lc_reset_port() - Trigger downstream port reset through LC
+ * @port: Port that is reset
+ *
+ * Triggers downstream port reset through link controller registers.
+ * Returns %0 in case of success negative errno otherwise. Only supports
+ * non-USB4 routers with link controller (that's Thunderbolt 2 and
+ * Thunderbolt 3).
+ */
+int tb_lc_reset_port(struct tb_port *port)
+{
+ struct tb_switch *sw = port->sw;
+ int cap, ret;
+ u32 mode;
+
+ if (sw->generation < 2)
+ return -EINVAL;
+
+ cap = find_port_lc_cap(port);
+ if (cap < 0)
+ return cap;
+
+ ret = tb_sw_read(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1);
+ if (ret)
+ return ret;
+
+ mode |= TB_LC_PORT_MODE_DPR;
+
+ ret = tb_sw_write(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1);
+ if (ret)
+ return ret;
+
+ fsleep(10000);
+
+ ret = tb_sw_read(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1);
+ if (ret)
+ return ret;
+
+ mode &= ~TB_LC_PORT_MODE_DPR;
+
+ return tb_sw_write(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1);
+}
+
static int tb_lc_set_port_configured(struct tb_port *port, bool configured)
{
bool upstream = tb_is_upstream_port(port);
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index fb4f46e51753..f3a2264e012b 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -48,7 +48,7 @@
static bool host_reset = true;
module_param(host_reset, bool, 0444);
-MODULE_PARM_DESC(host_reset, "reset USBv2 host router (default: true)");
+MODULE_PARM_DESC(host_reset, "reset USB4 host router (default: true)");
static int ring_interrupt_index(const struct tb_ring *ring)
{
@@ -465,7 +465,7 @@ static int ring_request_msix(struct tb_ring *ring, bool no_suspend)
if (!nhi->pdev->msix_enabled)
return 0;
- ret = ida_simple_get(&nhi->msix_ida, 0, MSIX_MAX_VECS, GFP_KERNEL);
+ ret = ida_alloc_max(&nhi->msix_ida, MSIX_MAX_VECS - 1, GFP_KERNEL);
if (ret < 0)
return ret;
@@ -485,7 +485,7 @@ static int ring_request_msix(struct tb_ring *ring, bool no_suspend)
return 0;
err_ida_remove:
- ida_simple_remove(&nhi->msix_ida, ring->vector);
+ ida_free(&nhi->msix_ida, ring->vector);
return ret;
}
@@ -496,7 +496,7 @@ static void ring_release_msix(struct tb_ring *ring)
return;
free_irq(ring->irq, ring);
- ida_simple_remove(&ring->nhi->msix_ida, ring->vector);
+ ida_free(&ring->nhi->msix_ida, ring->vector);
ring->vector = 0;
ring->irq = 0;
}
@@ -1340,18 +1340,18 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (res)
return dev_err_probe(dev, res, "cannot enable PCI device, aborting\n");
- res = pcim_iomap_regions(pdev, 1 << 0, "thunderbolt");
- if (res)
- return dev_err_probe(dev, res, "cannot obtain PCI resources, aborting\n");
-
nhi = devm_kzalloc(&pdev->dev, sizeof(*nhi), GFP_KERNEL);
if (!nhi)
return -ENOMEM;
nhi->pdev = pdev;
nhi->ops = (const struct tb_nhi_ops *)id->driver_data;
- /* cannot fail - table is allocated in pcim_iomap_regions */
- nhi->iobase = pcim_iomap_table(pdev)[0];
+
+ nhi->iobase = pcim_iomap_region(pdev, 0, "thunderbolt");
+ res = PTR_ERR_OR_ZERO(nhi->iobase);
+ if (res)
+ return dev_err_probe(dev, res, "cannot obtain PCI resources, aborting\n");
+
nhi->hop_count = ioread32(nhi->iobase + REG_CAPS) & 0x3ff;
dev_dbg(dev, "total paths: %d\n", nhi->hop_count);
@@ -1364,7 +1364,6 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
nhi_check_quirks(nhi);
nhi_check_iommu(nhi);
-
nhi_reset(nhi);
res = nhi_init_msi(nhi);
@@ -1392,7 +1391,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev_dbg(dev, "NHI initialized, starting thunderbolt\n");
- res = tb_domain_add(tb);
+ res = tb_domain_add(tb, host_reset);
if (res) {
/*
* At this point the RX/TX rings might already have been
@@ -1521,6 +1520,14 @@ static struct pci_device_id nhi_ids[] = {
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_LNL_NHI1),
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_PTL_M_NHI0),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_PTL_M_NHI1),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_PTL_P_NHI0),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_PTL_P_NHI1),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI) },
diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h
index 7a07c7c1a9c2..16744f25a9a0 100644
--- a/drivers/thunderbolt/nhi.h
+++ b/drivers/thunderbolt/nhi.h
@@ -92,6 +92,10 @@ extern const struct tb_nhi_ops icl_nhi_ops;
#define PCI_DEVICE_ID_INTEL_RPL_NHI1 0xa76d
#define PCI_DEVICE_ID_INTEL_LNL_NHI0 0xa833
#define PCI_DEVICE_ID_INTEL_LNL_NHI1 0xa834
+#define PCI_DEVICE_ID_INTEL_PTL_M_NHI0 0xe333
+#define PCI_DEVICE_ID_INTEL_PTL_M_NHI1 0xe334
+#define PCI_DEVICE_ID_INTEL_PTL_P_NHI0 0xe433
+#define PCI_DEVICE_ID_INTEL_PTL_P_NHI1 0xe434
#define PCI_CLASS_SERIAL_USB_USB4 0x0c0340
diff --git a/drivers/thunderbolt/nvm.c b/drivers/thunderbolt/nvm.c
index 69fb3b0fa34f..8901db2de327 100644
--- a/drivers/thunderbolt/nvm.c
+++ b/drivers/thunderbolt/nvm.c
@@ -330,7 +330,7 @@ struct tb_nvm *tb_nvm_alloc(struct device *dev)
if (!nvm)
return ERR_PTR(-ENOMEM);
- ret = ida_simple_get(&nvm_ida, 0, 0, GFP_KERNEL);
+ ret = ida_alloc(&nvm_ida, GFP_KERNEL);
if (ret < 0) {
kfree(nvm);
return ERR_PTR(ret);
@@ -528,7 +528,7 @@ void tb_nvm_free(struct tb_nvm *nvm)
nvmem_unregister(nvm->non_active);
nvmem_unregister(nvm->active);
vfree(nvm->buf);
- ida_simple_remove(&nvm_ida, nvm->id);
+ ida_free(&nvm_ida, nvm->id);
}
kfree(nvm);
}
diff --git a/drivers/thunderbolt/path.c b/drivers/thunderbolt/path.c
index 091a81bbdbdc..e1a5f6e3d0b6 100644
--- a/drivers/thunderbolt/path.c
+++ b/drivers/thunderbolt/path.c
@@ -446,6 +446,19 @@ static int __tb_path_deactivate_hop(struct tb_port *port, int hop_index,
return -ETIMEDOUT;
}
+/**
+ * tb_path_deactivate_hop() - Deactivate one path in path config space
+ * @port: Lane or protocol adapter
+ * @hop_index: HopID of the path to be cleared
+ *
+ * This deactivates or clears a single path config space entry at
+ * @hop_index. Returns %0 in success and negative errno otherwise.
+ */
+int tb_path_deactivate_hop(struct tb_port *port, int hop_index)
+{
+ return __tb_path_deactivate_hop(port, hop_index, true);
+}
+
static void __tb_path_deactivate_hops(struct tb_path *path, int first_hop)
{
int i, res;
@@ -568,10 +581,10 @@ int tb_path_activate(struct tb_path *path)
}
}
path->activated = true;
- tb_dbg(path->tb, "path activation complete\n");
+ tb_dbg(path->tb, "%s path activation complete\n", path->name);
return 0;
err:
- tb_WARN(path->tb, "path activation failed\n");
+ tb_WARN(path->tb, "%s path activation failed\n", path->name);
return res;
}
diff --git a/drivers/thunderbolt/quirks.c b/drivers/thunderbolt/quirks.c
index e6bfa63b40ae..e81de9c30eac 100644
--- a/drivers/thunderbolt/quirks.c
+++ b/drivers/thunderbolt/quirks.c
@@ -43,6 +43,12 @@ static void quirk_usb3_maximum_bandwidth(struct tb_switch *sw)
}
}
+static void quirk_block_rpm_in_redrive(struct tb_switch *sw)
+{
+ sw->quirks |= QUIRK_KEEP_POWER_IN_DP_REDRIVE;
+ tb_sw_dbg(sw, "preventing runtime PM in DP redrive mode\n");
+}
+
struct tb_quirk {
u16 hw_vendor_id;
u16 hw_device_id;
@@ -87,6 +93,14 @@ static const struct tb_quirk tb_quirks[] = {
{ 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HUB_40G_BRIDGE, 0x0000, 0x0000,
quirk_usb3_maximum_bandwidth },
/*
+ * Block Runtime PM in DP redrive mode for Intel Barlow Ridge host
+ * controllers.
+ */
+ { 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI, 0x0000, 0x0000,
+ quirk_block_rpm_in_redrive },
+ { 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI, 0x0000, 0x0000,
+ quirk_block_rpm_in_redrive },
+ /*
* CLx is not supported on AMD USB4 Yellow Carp and Pink Sardine platforms.
*/
{ 0x0438, 0x0208, 0x0000, 0x0000, quirk_clx_disable },
diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c
index d49d6628dbf2..1f25529fe05d 100644
--- a/drivers/thunderbolt/retimer.c
+++ b/drivers/thunderbolt/retimer.c
@@ -14,7 +14,11 @@
#include "sb_regs.h"
#include "tb.h"
+#if IS_ENABLED(CONFIG_USB4_DEBUGFS_MARGINING)
#define TB_MAX_RETIMER_INDEX 6
+#else
+#define TB_MAX_RETIMER_INDEX 2
+#endif
/**
* tb_retimer_nvm_read() - Read contents of retimer NVM
@@ -99,6 +103,7 @@ static int tb_retimer_nvm_add(struct tb_retimer *rt)
err_nvm:
dev_dbg(&rt->dev, "NVM upgrade disabled\n");
+ rt->no_nvm_upgrade = true;
if (!IS_ERR(nvm))
tb_nvm_free(nvm);
@@ -178,8 +183,6 @@ static ssize_t nvm_authenticate_show(struct device *dev,
if (!rt->nvm)
ret = -EAGAIN;
- else if (rt->no_nvm_upgrade)
- ret = -EOPNOTSUPP;
else
ret = sysfs_emit(buf, "%#x\n", rt->auth_status);
@@ -199,8 +202,10 @@ static void tb_retimer_nvm_authenticate_status(struct tb_port *port, u32 *status
* If the retimer has it set, store it for the new retimer
* device instance.
*/
- for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
- usb4_port_retimer_nvm_authenticate_status(port, i, &status[i]);
+ for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) {
+ if (usb4_port_retimer_nvm_authenticate_status(port, i, &status[i]))
+ break;
+ }
}
static void tb_retimer_set_inbound_sbtx(struct tb_port *port)
@@ -234,8 +239,10 @@ static void tb_retimer_unset_inbound_sbtx(struct tb_port *port)
tb_port_dbg(port, "disabling sideband transactions\n");
- for (i = TB_MAX_RETIMER_INDEX; i >= 1; i--)
- usb4_port_retimer_unset_inbound_sbtx(port, i);
+ for (i = TB_MAX_RETIMER_INDEX; i >= 1; i--) {
+ if (usb4_port_retimer_unset_inbound_sbtx(port, i))
+ break;
+ }
}
static ssize_t nvm_authenticate_store(struct device *dev,
@@ -332,6 +339,19 @@ static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(vendor);
+static umode_t retimer_is_visible(struct kobject *kobj, struct attribute *attr,
+ int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct tb_retimer *rt = tb_to_retimer(dev);
+
+ if (attr == &dev_attr_nvm_authenticate.attr ||
+ attr == &dev_attr_nvm_version.attr)
+ return rt->no_nvm_upgrade ? 0 : attr->mode;
+
+ return attr->mode;
+}
+
static struct attribute *retimer_attrs[] = {
&dev_attr_device.attr,
&dev_attr_nvm_authenticate.attr,
@@ -341,6 +361,7 @@ static struct attribute *retimer_attrs[] = {
};
static const struct attribute_group retimer_group = {
+ .is_visible = retimer_is_visible,
.attrs = retimer_attrs,
};
@@ -356,41 +377,35 @@ static void tb_retimer_release(struct device *dev)
kfree(rt);
}
-struct device_type tb_retimer_type = {
+const struct device_type tb_retimer_type = {
.name = "thunderbolt_retimer",
.groups = retimer_groups,
.release = tb_retimer_release,
};
-static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status)
+static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status,
+ bool on_board)
{
struct tb_retimer *rt;
u32 vendor, device;
int ret;
- ret = usb4_port_retimer_read(port, index, USB4_SB_VENDOR_ID, &vendor,
- sizeof(vendor));
+ ret = usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index,
+ USB4_SB_VENDOR_ID, &vendor, sizeof(vendor));
if (ret) {
if (ret != -ENODEV)
tb_port_warn(port, "failed read retimer VendorId: %d\n", ret);
return ret;
}
- ret = usb4_port_retimer_read(port, index, USB4_SB_PRODUCT_ID, &device,
- sizeof(device));
+ ret = usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index,
+ USB4_SB_PRODUCT_ID, &device, sizeof(device));
if (ret) {
if (ret != -ENODEV)
tb_port_warn(port, "failed read retimer ProductId: %d\n", ret);
return ret;
}
- /*
- * Check that it supports NVM operations. If not then don't add
- * the device at all.
- */
- ret = usb4_port_retimer_nvm_sector_size(port, index);
- if (ret < 0)
- return ret;
rt = kzalloc(sizeof(*rt), GFP_KERNEL);
if (!rt)
@@ -403,6 +418,13 @@ static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status)
rt->port = port;
rt->tb = port->sw->tb;
+ /*
+ * Only support NVM upgrade for on-board retimers. The retimers
+ * on the other side of the connection.
+ */
+ if (!on_board || usb4_port_retimer_nvm_sector_size(port, index) <= 0)
+ rt->no_nvm_upgrade = true;
+
rt->dev.parent = &port->usb4->dev;
rt->dev.bus = &tb_bus_type;
rt->dev.type = &tb_retimer_type;
@@ -433,12 +455,14 @@ static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status)
pm_runtime_mark_last_busy(&rt->dev);
pm_runtime_use_autosuspend(&rt->dev);
+ tb_retimer_debugfs_init(rt);
return 0;
}
static void tb_retimer_remove(struct tb_retimer *rt)
{
dev_info(&rt->dev, "retimer disconnected\n");
+ tb_retimer_debugfs_remove(rt);
tb_nvm_free(rt->nvm);
device_unregister(&rt->dev);
}
@@ -448,7 +472,7 @@ struct tb_retimer_lookup {
u8 index;
};
-static int retimer_match(struct device *dev, void *data)
+static int retimer_match(struct device *dev, const void *data)
{
const struct tb_retimer_lookup *lookup = data;
struct tb_retimer *rt = tb_to_retimer(dev);
@@ -481,7 +505,7 @@ static struct tb_retimer *tb_port_find_retimer(struct tb_port *port, u8 index)
int tb_retimer_scan(struct tb_port *port, bool add)
{
u32 status[TB_MAX_RETIMER_INDEX + 1] = {};
- int ret, i, last_idx = 0;
+ int ret, i, max, last_idx = 0;
/*
* Send broadcast RT to make sure retimer indices facing this
@@ -503,7 +527,7 @@ int tb_retimer_scan(struct tb_port *port, bool add)
*/
tb_retimer_set_inbound_sbtx(port);
- for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) {
+ for (max = 1, i = 1; i <= TB_MAX_RETIMER_INDEX; i++) {
/*
* Last retimer is true only for the last on-board
* retimer (the one connected directly to the Type-C
@@ -514,28 +538,33 @@ int tb_retimer_scan(struct tb_port *port, bool add)
last_idx = i;
else if (ret < 0)
break;
- }
- tb_retimer_unset_inbound_sbtx(port);
-
- if (!last_idx)
- return 0;
+ max = i;
+ }
- /* Add on-board retimers if they do not exist already */
ret = 0;
- for (i = 1; i <= last_idx; i++) {
+ if (!IS_ENABLED(CONFIG_USB4_DEBUGFS_MARGINING))
+ max = min(last_idx, max);
+
+ /* Add retimers if they do not exist already */
+ for (i = 1; i <= max; i++) {
struct tb_retimer *rt;
+ /* Skip cable retimers */
+ if (usb4_port_retimer_is_cable(port, i))
+ continue;
+
rt = tb_port_find_retimer(port, i);
if (rt) {
put_device(&rt->dev);
} else if (add) {
- ret = tb_retimer_add(port, i, status[i]);
+ ret = tb_retimer_add(port, i, status[i], i <= last_idx);
if (ret && ret != -EOPNOTSUPP)
break;
}
}
+ tb_retimer_unset_inbound_sbtx(port);
return ret;
}
diff --git a/drivers/thunderbolt/sb_regs.h b/drivers/thunderbolt/sb_regs.h
index f37a4320f10a..5391502a4b87 100644
--- a/drivers/thunderbolt/sb_regs.h
+++ b/drivers/thunderbolt/sb_regs.h
@@ -12,6 +12,10 @@
#define USB4_SB_VENDOR_ID 0x00
#define USB4_SB_PRODUCT_ID 0x01
+#define USB4_SB_FW_VERSION 0x02
+#define USB4_SB_DEBUG_CONF 0x05
+#define USB4_SB_DEBUG 0x06
+#define USB4_SB_LRD_TUNING 0x07
#define USB4_SB_OPCODE 0x08
enum usb4_sb_opcode {
@@ -22,6 +26,7 @@ enum usb4_sb_opcode {
USB4_SB_OPCODE_SET_INBOUND_SBTX = 0x5055534c, /* "LSUP" */
USB4_SB_OPCODE_UNSET_INBOUND_SBTX = 0x50555355, /* "USUP" */
USB4_SB_OPCODE_QUERY_LAST_RETIMER = 0x5453414c, /* "LAST" */
+ USB4_SB_OPCODE_QUERY_CABLE_RETIMER = 0x524c4243, /* "CBLR" */
USB4_SB_OPCODE_GET_NVM_SECTOR_SIZE = 0x53534e47, /* "GNSS" */
USB4_SB_OPCODE_NVM_SET_OFFSET = 0x53504f42, /* "BOPS" */
USB4_SB_OPCODE_NVM_BLOCK_WRITE = 0x574b4c42, /* "BLKW" */
@@ -35,60 +40,73 @@ enum usb4_sb_opcode {
#define USB4_SB_METADATA 0x09
#define USB4_SB_METADATA_NVM_AUTH_WRITE_MASK GENMASK(5, 0)
+#define USB4_SB_LINK_CONF 0x0c
+#define USB4_SB_GEN23_TXFFE 0x0d
+#define USB4_SB_GEN4_TXFFE 0x0e
+#define USB4_SB_VERSION 0x0f
#define USB4_SB_DATA 0x12
/* USB4_SB_OPCODE_READ_LANE_MARGINING_CAP */
#define USB4_MARGIN_CAP_0_MODES_HW BIT(0)
#define USB4_MARGIN_CAP_0_MODES_SW BIT(1)
-#define USB4_MARGIN_CAP_0_2_LANES BIT(2)
+#define USB4_MARGIN_CAP_0_ALL_LANES BIT(2)
#define USB4_MARGIN_CAP_0_VOLTAGE_INDP_MASK GENMASK(4, 3)
-#define USB4_MARGIN_CAP_0_VOLTAGE_INDP_SHIFT 3
#define USB4_MARGIN_CAP_0_VOLTAGE_MIN 0x0
#define USB4_MARGIN_CAP_0_VOLTAGE_HL 0x1
#define USB4_MARGIN_CAP_0_VOLTAGE_BOTH 0x2
#define USB4_MARGIN_CAP_0_TIME BIT(5)
#define USB4_MARGIN_CAP_0_VOLTAGE_STEPS_MASK GENMASK(12, 6)
-#define USB4_MARGIN_CAP_0_VOLTAGE_STEPS_SHIFT 6
#define USB4_MARGIN_CAP_0_MAX_VOLTAGE_OFFSET_MASK GENMASK(18, 13)
-#define USB4_MARGIN_CAP_0_MAX_VOLTAGE_OFFSET_SHIFT 13
+#define USB4_MARGIN_CAP_0_OPT_VOLTAGE_SUPPORT BIT(19)
+#define USB4_MARGIN_CAP_0_VOLT_STEPS_OPT_MASK GENMASK(26, 20)
+#define USB4_MARGIN_CAP_1_MAX_VOLT_OFS_OPT_MASK GENMASK(7, 0)
#define USB4_MARGIN_CAP_1_TIME_DESTR BIT(8)
#define USB4_MARGIN_CAP_1_TIME_INDP_MASK GENMASK(10, 9)
-#define USB4_MARGIN_CAP_1_TIME_INDP_SHIFT 9
#define USB4_MARGIN_CAP_1_TIME_MIN 0x0
#define USB4_MARGIN_CAP_1_TIME_LR 0x1
#define USB4_MARGIN_CAP_1_TIME_BOTH 0x2
#define USB4_MARGIN_CAP_1_TIME_STEPS_MASK GENMASK(15, 11)
-#define USB4_MARGIN_CAP_1_TIME_STEPS_SHIFT 11
#define USB4_MARGIN_CAP_1_TIME_OFFSET_MASK GENMASK(20, 16)
-#define USB4_MARGIN_CAP_1_TIME_OFFSET_SHIFT 16
#define USB4_MARGIN_CAP_1_MIN_BER_MASK GENMASK(25, 21)
-#define USB4_MARGIN_CAP_1_MIN_BER_SHIFT 21
#define USB4_MARGIN_CAP_1_MAX_BER_MASK GENMASK(30, 26)
-#define USB4_MARGIN_CAP_1_MAX_BER_SHIFT 26
-#define USB4_MARGIN_CAP_1_MAX_BER_SHIFT 26
+#define USB4_MARGIN_CAP_2_MODES_HW BIT(0)
+#define USB4_MARGIN_CAP_2_MODES_SW BIT(1)
+#define USB4_MARGIN_CAP_2_TIME BIT(2)
+#define USB4_MARGIN_CAP_2_MAX_VOLTAGE_OFFSET_MASK GENMASK(8, 3)
+#define USB4_MARGIN_CAP_2_VOLTAGE_STEPS_MASK GENMASK(15, 9)
+#define USB4_MARGIN_CAP_2_VOLTAGE_INDP_MASK GENMASK(17, 16)
+#define USB4_MARGIN_CAP_2_VOLTAGE_MIN 0x0
+#define USB4_MARGIN_CAP_2_VOLTAGE_BOTH 0x1
+#define USB4_MARGIN_CAP_2_TIME_INDP_MASK GENMASK(19, 18)
+#define USB4_MARGIN_CAP_2_TIME_MIN 0x0
+#define USB4_MARGIN_CAP_2_TIME_BOTH 0x1
/* USB4_SB_OPCODE_RUN_HW_LANE_MARGINING */
#define USB4_MARGIN_HW_TIME BIT(3)
-#define USB4_MARGIN_HW_RH BIT(4)
+#define USB4_MARGIN_HW_RHU BIT(4)
#define USB4_MARGIN_HW_BER_MASK GENMASK(9, 5)
#define USB4_MARGIN_HW_BER_SHIFT 5
+#define USB4_MARGIN_HW_OPT_VOLTAGE BIT(10)
/* Applicable to all margin values */
-#define USB4_MARGIN_HW_RES_1_MARGIN_MASK GENMASK(6, 0)
-#define USB4_MARGIN_HW_RES_1_EXCEEDS BIT(7)
-/* Different lane margin shifts */
-#define USB4_MARGIN_HW_RES_1_L0_LL_MARGIN_SHIFT 8
-#define USB4_MARGIN_HW_RES_1_L1_RH_MARGIN_SHIFT 16
-#define USB4_MARGIN_HW_RES_1_L1_LL_MARGIN_SHIFT 24
+#define USB4_MARGIN_HW_RES_MARGIN_MASK GENMASK(6, 0)
+#define USB4_MARGIN_HW_RES_EXCEEDS BIT(7)
+
+/* Shifts for parsing the lane results */
+#define USB4_MARGIN_HW_RES_LANE_SHIFT 16
+#define USB4_MARGIN_HW_RES_LL_SHIFT 8
/* USB4_SB_OPCODE_RUN_SW_LANE_MARGINING */
+#define USB4_MARGIN_SW_LANES_MASK GENMASK(2, 0)
#define USB4_MARGIN_SW_TIME BIT(3)
#define USB4_MARGIN_SW_RH BIT(4)
+#define USB4_MARGIN_SW_OPT_VOLTAGE BIT(5)
+#define USB4_MARGIN_SW_VT_MASK GENMASK(12, 6)
#define USB4_MARGIN_SW_COUNTER_MASK GENMASK(14, 13)
-#define USB4_MARGIN_SW_COUNTER_SHIFT 13
-#define USB4_MARGIN_SW_COUNTER_NOP 0x0
-#define USB4_MARGIN_SW_COUNTER_CLEAR 0x1
-#define USB4_MARGIN_SW_COUNTER_START 0x2
-#define USB4_MARGIN_SW_COUNTER_STOP 0x3
+#define USB4_MARGIN_SW_UPPER_EYE BIT(15)
+
+#define USB4_MARGIN_SW_ERR_COUNTER_LANE_0_MASK GENMASK(3, 0)
+#define USB4_MARGIN_SW_ERR_COUNTER_LANE_1_MASK GENMASK(7, 4)
+#define USB4_MARGIN_SW_ERR_COUNTER_LANE_2_MASK GENMASK(11, 8)
#endif
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index fad40c4bc710..6a2116cbb06f 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -676,6 +676,13 @@ int tb_port_disable(struct tb_port *port)
return __tb_port_enable(port, false);
}
+static int tb_port_reset(struct tb_port *port)
+{
+ if (tb_switch_is_usb4(port->sw))
+ return port->cap_usb4 ? usb4_port_reset(port) : 0;
+ return tb_lc_reset_port(port);
+}
+
/*
* tb_init_port() - initialize a port
*
@@ -771,7 +778,7 @@ static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid,
if (max_hopid < 0 || max_hopid > port_max_hopid)
max_hopid = port_max_hopid;
- return ida_simple_get(ida, min_hopid, max_hopid + 1, GFP_KERNEL);
+ return ida_alloc_range(ida, min_hopid, max_hopid, GFP_KERNEL);
}
/**
@@ -809,7 +816,7 @@ int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid)
*/
void tb_port_release_in_hopid(struct tb_port *port, int hopid)
{
- ida_simple_remove(&port->in_hopids, hopid);
+ ida_free(&port->in_hopids, hopid);
}
/**
@@ -819,7 +826,7 @@ void tb_port_release_in_hopid(struct tb_port *port, int hopid)
*/
void tb_port_release_out_hopid(struct tb_port *port, int hopid)
{
- ida_simple_remove(&port->out_hopids, hopid);
+ ida_free(&port->out_hopids, hopid);
}
static inline bool tb_switch_is_reachable(const struct tb_switch *parent,
@@ -1120,7 +1127,7 @@ int tb_port_lane_bonding_enable(struct tb_port *port)
ret = tb_port_set_link_width(port->dual_link_port,
TB_LINK_WIDTH_DUAL);
if (ret)
- goto err_lane0;
+ goto err_lane1;
}
/*
@@ -1534,29 +1541,124 @@ static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw)
regs->__unknown1, regs->__unknown4);
}
+static int tb_switch_reset_host(struct tb_switch *sw)
+{
+ if (sw->generation > 1) {
+ struct tb_port *port;
+
+ tb_switch_for_each_port(sw, port) {
+ int i, ret;
+
+ /*
+ * For lane adapters we issue downstream port
+ * reset and clear up path config spaces.
+ *
+ * For protocol adapters we disable the path and
+ * clear path config space one by one (from 8 to
+ * Max Input HopID of the adapter).
+ */
+ if (tb_port_is_null(port) && !tb_is_upstream_port(port)) {
+ ret = tb_port_reset(port);
+ if (ret)
+ return ret;
+ } else if (tb_port_is_usb3_down(port) ||
+ tb_port_is_usb3_up(port)) {
+ tb_usb3_port_enable(port, false);
+ } else if (tb_port_is_dpin(port) ||
+ tb_port_is_dpout(port)) {
+ tb_dp_port_enable(port, false);
+ } else if (tb_port_is_pcie_down(port) ||
+ tb_port_is_pcie_up(port)) {
+ tb_pci_port_enable(port, false);
+ } else {
+ continue;
+ }
+
+ /* Cleanup path config space of protocol adapter */
+ for (i = TB_PATH_MIN_HOPID;
+ i <= port->config.max_in_hop_id; i++) {
+ ret = tb_path_deactivate_hop(port, i);
+ if (ret)
+ return ret;
+ }
+ }
+ } else {
+ struct tb_cfg_result res;
+
+ /* Thunderbolt 1 uses the "reset" config space packet */
+ res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2,
+ TB_CFG_SWITCH, 2, 2);
+ if (res.err)
+ return res.err;
+ res = tb_cfg_reset(sw->tb->ctl, tb_route(sw));
+ if (res.err > 0)
+ return -EIO;
+ else if (res.err < 0)
+ return res.err;
+ }
+
+ return 0;
+}
+
+static int tb_switch_reset_device(struct tb_switch *sw)
+{
+ return tb_port_reset(tb_switch_downstream_port(sw));
+}
+
+static bool tb_switch_enumerated(struct tb_switch *sw)
+{
+ u32 val;
+ int ret;
+
+ /*
+ * Read directly from the hardware because we use this also
+ * during system sleep where sw->config.enabled is already set
+ * by us.
+ */
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_3, 1);
+ if (ret)
+ return false;
+
+ return !!(val & ROUTER_CS_3_V);
+}
+
/**
- * tb_switch_reset() - reconfigure route, enable and send TB_CFG_PKG_RESET
- * @sw: Switch to reset
+ * tb_switch_reset() - Perform reset to the router
+ * @sw: Router to reset
*
- * Return: Returns 0 on success or an error code on failure.
+ * Issues reset to the router @sw. Can be used for any router. For host
+ * routers, resets all the downstream ports and cleans up path config
+ * spaces accordingly. For device routers issues downstream port reset
+ * through the parent router, so as side effect there will be unplug
+ * soon after this is finished.
+ *
+ * If the router is not enumerated does nothing.
+ *
+ * Returns %0 on success or negative errno in case of failure.
*/
int tb_switch_reset(struct tb_switch *sw)
{
- struct tb_cfg_result res;
+ int ret;
- if (sw->generation > 1)
+ /*
+ * We cannot access the port config spaces unless the router is
+ * already enumerated. If the router is not enumerated it is
+ * equal to being reset so we can skip that here.
+ */
+ if (!tb_switch_enumerated(sw))
return 0;
- tb_sw_dbg(sw, "resetting switch\n");
+ tb_sw_dbg(sw, "resetting\n");
+
+ if (tb_route(sw))
+ ret = tb_switch_reset_device(sw);
+ else
+ ret = tb_switch_reset_host(sw);
+
+ if (ret)
+ tb_sw_warn(sw, "failed to reset\n");
- res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2,
- TB_CFG_SWITCH, 2, 2);
- if (res.err)
- return res.err;
- res = tb_cfg_reset(sw->tb->ctl, tb_route(sw));
- if (res.err > 0)
- return -EIO;
- return res.err;
+ return ret;
}
/**
@@ -2228,7 +2330,7 @@ static const struct dev_pm_ops tb_switch_pm_ops = {
NULL)
};
-struct device_type tb_switch_type = {
+const struct device_type tb_switch_type = {
.name = "thunderbolt_device",
.release = tb_switch_release,
.uevent = tb_switch_uevent,
@@ -3078,22 +3180,29 @@ void tb_switch_unconfigure_link(struct tb_switch *sw)
{
struct tb_port *up, *down;
- if (sw->is_unplugged)
- return;
if (!tb_route(sw) || tb_switch_is_icm(sw))
return;
+ /*
+ * Unconfigure downstream port so that wake-on-connect can be
+ * configured after router unplug. No need to unconfigure upstream port
+ * since its router is unplugged.
+ */
up = tb_upstream_port(sw);
- if (tb_switch_is_usb4(up->sw))
- usb4_port_unconfigure(up);
- else
- tb_lc_unconfigure_port(up);
-
down = up->remote;
if (tb_switch_is_usb4(down->sw))
usb4_port_unconfigure(down);
else
tb_lc_unconfigure_port(down);
+
+ if (sw->is_unplugged)
+ return;
+
+ up = tb_upstream_port(sw);
+ if (tb_switch_is_usb4(up->sw))
+ usb4_port_unconfigure(up);
+ else
+ tb_lc_unconfigure_port(up);
}
static void tb_switch_credits_init(struct tb_switch *sw)
@@ -3283,6 +3392,7 @@ void tb_switch_remove(struct tb_switch *sw)
tb_switch_remove(port->remote->sw);
port->remote = NULL;
} else if (port->xdomain) {
+ port->xdomain->is_unplugged = true;
tb_xdomain_remove(port->xdomain);
port->xdomain = NULL;
}
@@ -3339,7 +3449,26 @@ static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
return tb_lc_set_wake(sw, flags);
}
-int tb_switch_resume(struct tb_switch *sw)
+static void tb_switch_check_wakes(struct tb_switch *sw)
+{
+ if (device_may_wakeup(&sw->dev)) {
+ if (tb_switch_is_usb4(sw))
+ usb4_switch_check_wakes(sw);
+ }
+}
+
+/**
+ * tb_switch_resume() - Resume a switch after sleep
+ * @sw: Switch to resume
+ * @runtime: Is this resume from runtime suspend or system sleep
+ *
+ * Resumes and re-enumerates router (and all its children), if still plugged
+ * after suspend. Don't enumerate device router whose UID was changed during
+ * suspend. If this is resume from system sleep, notifies PM core about the
+ * wakes occurred during suspend. Disables all wakes, except USB4 wake of
+ * upstream port for USB4 routers that shall be always enabled.
+ */
+int tb_switch_resume(struct tb_switch *sw, bool runtime)
{
struct tb_port *port;
int err;
@@ -3388,6 +3517,9 @@ int tb_switch_resume(struct tb_switch *sw)
if (err)
return err;
+ if (!runtime)
+ tb_switch_check_wakes(sw);
+
/* Disable wakes */
tb_switch_set_wake(sw, 0);
@@ -3417,7 +3549,8 @@ int tb_switch_resume(struct tb_switch *sw)
*/
if (tb_port_unlock(port))
tb_port_warn(port, "failed to unlock port\n");
- if (port->remote && tb_switch_resume(port->remote->sw)) {
+ if (port->remote &&
+ tb_switch_resume(port->remote->sw, runtime)) {
tb_port_warn(port,
"lost during suspend, disconnecting\n");
tb_sw_set_unplugged(port->remote->sw);
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 846d2813bb1a..390abcfe7188 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -17,6 +17,13 @@
#include "tunnel.h"
#define TB_TIMEOUT 100 /* ms */
+#define TB_RELEASE_BW_TIMEOUT 10000 /* ms */
+
+/*
+ * How many time bandwidth allocation request from graphics driver is
+ * retried if the DP tunnel is still activating.
+ */
+#define TB_BW_ALLOC_RETRIES 3
/*
* Minimum bandwidth (in Mb/s) that is needed in the single transmitter/receiver
@@ -68,120 +75,20 @@ static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
}
struct tb_hotplug_event {
- struct work_struct work;
+ struct delayed_work work;
struct tb *tb;
u64 route;
u8 port;
bool unplug;
+ int retry;
};
-static void tb_init_bandwidth_groups(struct tb_cm *tcm)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
- struct tb_bandwidth_group *group = &tcm->groups[i];
-
- group->tb = tcm_to_tb(tcm);
- group->index = i + 1;
- INIT_LIST_HEAD(&group->ports);
- }
-}
-
-static void tb_bandwidth_group_attach_port(struct tb_bandwidth_group *group,
- struct tb_port *in)
-{
- if (!group || WARN_ON(in->group))
- return;
-
- in->group = group;
- list_add_tail(&in->group_list, &group->ports);
-
- tb_port_dbg(in, "attached to bandwidth group %d\n", group->index);
-}
-
-static struct tb_bandwidth_group *tb_find_free_bandwidth_group(struct tb_cm *tcm)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
- struct tb_bandwidth_group *group = &tcm->groups[i];
-
- if (list_empty(&group->ports))
- return group;
- }
-
- return NULL;
-}
-
-static struct tb_bandwidth_group *
-tb_attach_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
- struct tb_port *out)
-{
- struct tb_bandwidth_group *group;
- struct tb_tunnel *tunnel;
-
- /*
- * Find all DP tunnels that go through all the same USB4 links
- * as this one. Because we always setup tunnels the same way we
- * can just check for the routers at both ends of the tunnels
- * and if they are the same we have a match.
- */
- list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
- if (!tb_tunnel_is_dp(tunnel))
- continue;
-
- if (tunnel->src_port->sw == in->sw &&
- tunnel->dst_port->sw == out->sw) {
- group = tunnel->src_port->group;
- if (group) {
- tb_bandwidth_group_attach_port(group, in);
- return group;
- }
- }
- }
-
- /* Pick up next available group then */
- group = tb_find_free_bandwidth_group(tcm);
- if (group)
- tb_bandwidth_group_attach_port(group, in);
- else
- tb_port_warn(in, "no available bandwidth groups\n");
-
- return group;
-}
-
-static void tb_discover_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
- struct tb_port *out)
-{
- if (usb4_dp_port_bandwidth_mode_enabled(in)) {
- int index, i;
-
- index = usb4_dp_port_group_id(in);
- for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
- if (tcm->groups[i].index == index) {
- tb_bandwidth_group_attach_port(&tcm->groups[i], in);
- return;
- }
- }
- }
-
- tb_attach_bandwidth_group(tcm, in, out);
-}
-
-static void tb_detach_bandwidth_group(struct tb_port *in)
-{
- struct tb_bandwidth_group *group = in->group;
-
- if (group) {
- in->group = NULL;
- list_del_init(&in->group_list);
-
- tb_port_dbg(in, "detached from bandwidth group %d\n", group->index);
- }
-}
-
+static void tb_scan_port(struct tb_port *port);
static void tb_handle_hotplug(struct work_struct *work);
+static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port,
+ const char *reason);
+static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port,
+ int retry, unsigned long delay);
static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
{
@@ -195,8 +102,8 @@ static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
ev->route = route;
ev->port = port;
ev->unplug = unplug;
- INIT_WORK(&ev->work, tb_handle_hotplug);
- queue_work(tb->wq, &ev->work);
+ INIT_DELAYED_WORK(&ev->work, tb_handle_hotplug);
+ queue_delayed_work(tb->wq, &ev->work, 0);
}
/* enumeration & hot plug handling */
@@ -393,6 +300,24 @@ static void tb_increase_tmu_accuracy(struct tb_tunnel *tunnel)
device_for_each_child(&sw->dev, NULL, tb_increase_switch_tmu_accuracy);
}
+static int tb_switch_tmu_hifi_uni_required(struct device *dev, void *not_used)
+{
+ struct tb_switch *sw = tb_to_switch(dev);
+
+ if (sw && tb_switch_tmu_is_enabled(sw) &&
+ tb_switch_tmu_is_configured(sw, TB_SWITCH_TMU_MODE_HIFI_UNI))
+ return 1;
+
+ return device_for_each_child(dev, NULL,
+ tb_switch_tmu_hifi_uni_required);
+}
+
+static bool tb_tmu_hifi_uni_required(struct tb *tb)
+{
+ return device_for_each_child(&tb->dev, NULL,
+ tb_switch_tmu_hifi_uni_required) == 1;
+}
+
static int tb_enable_tmu(struct tb_switch *sw)
{
int ret;
@@ -407,12 +332,30 @@ static int tb_enable_tmu(struct tb_switch *sw)
ret = tb_switch_tmu_configure(sw,
TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI);
if (ret == -EOPNOTSUPP) {
- if (tb_switch_clx_is_enabled(sw, TB_CL1))
- ret = tb_switch_tmu_configure(sw,
- TB_SWITCH_TMU_MODE_LOWRES);
- else
- ret = tb_switch_tmu_configure(sw,
- TB_SWITCH_TMU_MODE_HIFI_BI);
+ if (tb_switch_clx_is_enabled(sw, TB_CL1)) {
+ /*
+ * Figure out uni-directional HiFi TMU requirements
+ * currently in the domain. If there are no
+ * uni-directional HiFi requirements we can put the TMU
+ * into LowRes mode.
+ *
+ * Deliberately skip bi-directional HiFi links
+ * as these work independently of other links
+ * (and they do not allow any CL states anyway).
+ */
+ if (tb_tmu_hifi_uni_required(sw->tb))
+ ret = tb_switch_tmu_configure(sw,
+ TB_SWITCH_TMU_MODE_HIFI_UNI);
+ else
+ ret = tb_switch_tmu_configure(sw,
+ TB_SWITCH_TMU_MODE_LOWRES);
+ } else {
+ ret = tb_switch_tmu_configure(sw, TB_SWITCH_TMU_MODE_HIFI_BI);
+ }
+
+ /* If not supported, fallback to bi-directional HiFi */
+ if (ret == -EOPNOTSUPP)
+ ret = tb_switch_tmu_configure(sw, TB_SWITCH_TMU_MODE_HIFI_BI);
}
if (ret)
return ret;
@@ -472,34 +415,6 @@ static void tb_switch_discover_tunnels(struct tb_switch *sw,
}
}
-static void tb_discover_tunnels(struct tb *tb)
-{
- struct tb_cm *tcm = tb_priv(tb);
- struct tb_tunnel *tunnel;
-
- tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true);
-
- list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
- if (tb_tunnel_is_pci(tunnel)) {
- struct tb_switch *parent = tunnel->dst_port->sw;
-
- while (parent != tunnel->src_port->sw) {
- parent->boot = true;
- parent = tb_switch_parent(parent);
- }
- } else if (tb_tunnel_is_dp(tunnel)) {
- struct tb_port *in = tunnel->src_port;
- struct tb_port *out = tunnel->dst_port;
-
- /* Keep the domain from powering down */
- pm_runtime_get_sync(&in->sw->dev);
- pm_runtime_get_sync(&out->sw->dev);
-
- tb_discover_bandwidth_group(tcm, in, out);
- }
- }
-}
-
static int tb_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd)
{
if (tb_switch_is_usb4(port->sw))
@@ -631,8 +546,9 @@ static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb,
* @consumed_down: Consumed downstream bandwidth (Mb/s)
*
* Calculates consumed USB3 and PCIe bandwidth at @port between path
- * from @src_port to @dst_port. Does not take tunnel starting from
- * @src_port and ending from @src_port into account.
+ * from @src_port to @dst_port. Does not take USB3 tunnel starting from
+ * @src_port and ending on @src_port into account because that bandwidth is
+ * already included in as part of the "first hop" USB3 tunnel.
*/
static int tb_consumed_usb3_pcie_bandwidth(struct tb *tb,
struct tb_port *src_port,
@@ -647,8 +563,8 @@ static int tb_consumed_usb3_pcie_bandwidth(struct tb *tb,
*consumed_up = *consumed_down = 0;
tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
- if (tunnel && tunnel->src_port != src_port &&
- tunnel->dst_port != dst_port) {
+ if (tunnel && !tb_port_is_usb3_down(src_port) &&
+ !tb_port_is_usb3_up(dst_port)) {
int ret;
ret = tb_tunnel_consumed_bandwidth(tunnel, consumed_up,
@@ -681,6 +597,10 @@ static int tb_consumed_usb3_pcie_bandwidth(struct tb *tb,
* Calculates consumed DP bandwidth at @port between path from @src_port
* to @dst_port. Does not take tunnel starting from @src_port and ending
* from @src_port into account.
+ *
+ * If there is bandwidth reserved for any of the groups between
+ * @src_port and @dst_port (but not yet used) that is also taken into
+ * account in the returned consumed bandwidth.
*/
static int tb_consumed_dp_bandwidth(struct tb *tb,
struct tb_port *src_port,
@@ -689,9 +609,11 @@ static int tb_consumed_dp_bandwidth(struct tb *tb,
int *consumed_up,
int *consumed_down)
{
+ int group_reserved[MAX_GROUPS] = {};
struct tb_cm *tcm = tb_priv(tb);
struct tb_tunnel *tunnel;
- int ret;
+ bool downstream;
+ int i, ret;
*consumed_up = *consumed_down = 0;
@@ -700,6 +622,7 @@ static int tb_consumed_dp_bandwidth(struct tb *tb,
* their consumed bandwidth from the available.
*/
list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
+ const struct tb_bandwidth_group *group;
int dp_consumed_up, dp_consumed_down;
if (tb_tunnel_is_invalid(tunnel))
@@ -712,6 +635,15 @@ static int tb_consumed_dp_bandwidth(struct tb *tb,
continue;
/*
+ * Calculate what is reserved for groups crossing the
+ * same ports only once (as that is reserved for all the
+ * tunnels in the group).
+ */
+ group = tunnel->src_port->group;
+ if (group && group->reserved && !group_reserved[group->index])
+ group_reserved[group->index] = group->reserved;
+
+ /*
* Ignore the DP tunnel between src_port and dst_port
* because it is the same tunnel and we may be
* re-calculating estimated bandwidth.
@@ -729,6 +661,14 @@ static int tb_consumed_dp_bandwidth(struct tb *tb,
*consumed_down += dp_consumed_down;
}
+ downstream = tb_port_path_direction_downstream(src_port, dst_port);
+ for (i = 0; i < ARRAY_SIZE(group_reserved); i++) {
+ if (downstream)
+ *consumed_down += group_reserved[i];
+ else
+ *consumed_up += group_reserved[i];
+ }
+
return 0;
}
@@ -1033,7 +973,7 @@ static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
return 0;
err_free:
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
err_reclaim:
if (tb_route(parent))
tb_reclaim_usb3_bandwidth(tb, down, up);
@@ -1181,8 +1121,6 @@ static int tb_configure_asym(struct tb *tb, struct tb_port *src_port,
* @tb: Domain structure
* @src_port: Source adapter to start the transition
* @dst_port: Destination adapter
- * @requested_up: New lower bandwidth request upstream (Mb/s)
- * @requested_down: New lower bandwidth request downstream (Mb/s)
* @keep_asym: Keep asymmetric link if preferred
*
* Goes over each link from @src_port to @dst_port and tries to
@@ -1190,8 +1128,7 @@ static int tb_configure_asym(struct tb *tb, struct tb_port *src_port,
* allows and link asymmetric preference is ignored (if @keep_asym is %false).
*/
static int tb_configure_sym(struct tb *tb, struct tb_port *src_port,
- struct tb_port *dst_port, int requested_up,
- int requested_down, bool keep_asym)
+ struct tb_port *dst_port, bool keep_asym)
{
bool clx = false, clx_disabled = false, downstream;
struct tb_switch *sw;
@@ -1230,10 +1167,10 @@ static int tb_configure_sym(struct tb *tb, struct tb_port *src_port,
* guard band 10%) as the link was configured asymmetric
* already.
*/
- if (consumed_down + requested_down >= asym_threshold)
+ if (consumed_down >= asym_threshold)
continue;
} else {
- if (consumed_up + requested_up >= asym_threshold)
+ if (consumed_up >= asym_threshold)
continue;
}
@@ -1306,15 +1243,13 @@ static void tb_configure_link(struct tb_port *down, struct tb_port *up,
struct tb_port *host_port;
host_port = tb_port_at(tb_route(sw), tb->root_switch);
- tb_configure_sym(tb, host_port, up, 0, 0, false);
+ tb_configure_sym(tb, host_port, up, false);
}
/* Set the link configured */
tb_switch_configure_link(sw);
}
-static void tb_scan_port(struct tb_port *port);
-
/*
* tb_scan_switch() - scan for and initialize downstream switches
*/
@@ -1464,6 +1399,297 @@ out_rpm_put:
}
}
+static void
+tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group)
+{
+ struct tb_tunnel *first_tunnel;
+ struct tb *tb = group->tb;
+ struct tb_port *in;
+ int ret;
+
+ tb_dbg(tb, "re-calculating bandwidth estimation for group %u\n",
+ group->index);
+
+ first_tunnel = NULL;
+ list_for_each_entry(in, &group->ports, group_list) {
+ int estimated_bw, estimated_up, estimated_down;
+ struct tb_tunnel *tunnel;
+ struct tb_port *out;
+
+ if (!usb4_dp_port_bandwidth_mode_enabled(in))
+ continue;
+
+ tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
+ if (WARN_ON(!tunnel))
+ break;
+
+ if (!first_tunnel) {
+ /*
+ * Since USB3 bandwidth is shared by all DP
+ * tunnels under the host router USB4 port, even
+ * if they do not begin from the host router, we
+ * can release USB3 bandwidth just once and not
+ * for each tunnel separately.
+ */
+ first_tunnel = tunnel;
+ ret = tb_release_unused_usb3_bandwidth(tb,
+ first_tunnel->src_port, first_tunnel->dst_port);
+ if (ret) {
+ tb_tunnel_warn(tunnel,
+ "failed to release unused bandwidth\n");
+ break;
+ }
+ }
+
+ out = tunnel->dst_port;
+ ret = tb_available_bandwidth(tb, in, out, &estimated_up,
+ &estimated_down, true);
+ if (ret) {
+ tb_tunnel_warn(tunnel,
+ "failed to re-calculate estimated bandwidth\n");
+ break;
+ }
+
+ /*
+ * Estimated bandwidth includes:
+ * - already allocated bandwidth for the DP tunnel
+ * - available bandwidth along the path
+ * - bandwidth allocated for USB 3.x but not used.
+ */
+ if (tb_tunnel_direction_downstream(tunnel))
+ estimated_bw = estimated_down;
+ else
+ estimated_bw = estimated_up;
+
+ /*
+ * If there is reserved bandwidth for the group that is
+ * not yet released we report that too.
+ */
+ tb_tunnel_dbg(tunnel,
+ "re-calculated estimated bandwidth %u (+ %u reserved) = %u Mb/s\n",
+ estimated_bw, group->reserved,
+ estimated_bw + group->reserved);
+
+ if (usb4_dp_port_set_estimated_bandwidth(in,
+ estimated_bw + group->reserved))
+ tb_tunnel_warn(tunnel,
+ "failed to update estimated bandwidth\n");
+ }
+
+ if (first_tunnel)
+ tb_reclaim_usb3_bandwidth(tb, first_tunnel->src_port,
+ first_tunnel->dst_port);
+
+ tb_dbg(tb, "bandwidth estimation for group %u done\n", group->index);
+}
+
+static void tb_recalc_estimated_bandwidth(struct tb *tb)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+ int i;
+
+ tb_dbg(tb, "bandwidth consumption changed, re-calculating estimated bandwidth\n");
+
+ for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
+ struct tb_bandwidth_group *group = &tcm->groups[i];
+
+ if (!list_empty(&group->ports))
+ tb_recalc_estimated_bandwidth_for_group(group);
+ }
+
+ tb_dbg(tb, "bandwidth re-calculation done\n");
+}
+
+static bool __release_group_bandwidth(struct tb_bandwidth_group *group)
+{
+ if (group->reserved) {
+ tb_dbg(group->tb, "group %d released total %d Mb/s\n", group->index,
+ group->reserved);
+ group->reserved = 0;
+ return true;
+ }
+ return false;
+}
+
+static void __configure_group_sym(struct tb_bandwidth_group *group)
+{
+ struct tb_tunnel *tunnel;
+ struct tb_port *in;
+
+ if (list_empty(&group->ports))
+ return;
+
+ /*
+ * All the tunnels in the group go through the same USB4 links
+ * so we find the first one here and pass the IN and OUT
+ * adapters to tb_configure_sym() which now transitions the
+ * links back to symmetric if bandwidth requirement < asym_threshold.
+ *
+ * We do this here to avoid unnecessary transitions (for example
+ * if the graphics released bandwidth for other tunnel in the
+ * same group).
+ */
+ in = list_first_entry(&group->ports, struct tb_port, group_list);
+ tunnel = tb_find_tunnel(group->tb, TB_TUNNEL_DP, in, NULL);
+ if (tunnel)
+ tb_configure_sym(group->tb, in, tunnel->dst_port, true);
+}
+
+static void tb_bandwidth_group_release_work(struct work_struct *work)
+{
+ struct tb_bandwidth_group *group =
+ container_of(work, typeof(*group), release_work.work);
+ struct tb *tb = group->tb;
+
+ mutex_lock(&tb->lock);
+ if (__release_group_bandwidth(group))
+ tb_recalc_estimated_bandwidth(tb);
+ __configure_group_sym(group);
+ mutex_unlock(&tb->lock);
+}
+
+static void tb_init_bandwidth_groups(struct tb_cm *tcm)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
+ struct tb_bandwidth_group *group = &tcm->groups[i];
+
+ group->tb = tcm_to_tb(tcm);
+ group->index = i + 1;
+ INIT_LIST_HEAD(&group->ports);
+ INIT_DELAYED_WORK(&group->release_work,
+ tb_bandwidth_group_release_work);
+ }
+}
+
+static void tb_bandwidth_group_attach_port(struct tb_bandwidth_group *group,
+ struct tb_port *in)
+{
+ if (!group || WARN_ON(in->group))
+ return;
+
+ in->group = group;
+ list_add_tail(&in->group_list, &group->ports);
+
+ tb_port_dbg(in, "attached to bandwidth group %d\n", group->index);
+}
+
+static struct tb_bandwidth_group *tb_find_free_bandwidth_group(struct tb_cm *tcm)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
+ struct tb_bandwidth_group *group = &tcm->groups[i];
+
+ if (list_empty(&group->ports))
+ return group;
+ }
+
+ return NULL;
+}
+
+static struct tb_bandwidth_group *
+tb_attach_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
+ struct tb_port *out)
+{
+ struct tb_bandwidth_group *group;
+ struct tb_tunnel *tunnel;
+
+ /*
+ * Find all DP tunnels that go through all the same USB4 links
+ * as this one. Because we always setup tunnels the same way we
+ * can just check for the routers at both ends of the tunnels
+ * and if they are the same we have a match.
+ */
+ list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
+ if (!tb_tunnel_is_dp(tunnel))
+ continue;
+
+ if (tunnel->src_port->sw == in->sw &&
+ tunnel->dst_port->sw == out->sw) {
+ group = tunnel->src_port->group;
+ if (group) {
+ tb_bandwidth_group_attach_port(group, in);
+ return group;
+ }
+ }
+ }
+
+ /* Pick up next available group then */
+ group = tb_find_free_bandwidth_group(tcm);
+ if (group)
+ tb_bandwidth_group_attach_port(group, in);
+ else
+ tb_port_warn(in, "no available bandwidth groups\n");
+
+ return group;
+}
+
+static void tb_discover_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
+ struct tb_port *out)
+{
+ if (usb4_dp_port_bandwidth_mode_enabled(in)) {
+ int index, i;
+
+ index = usb4_dp_port_group_id(in);
+ for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
+ if (tcm->groups[i].index == index) {
+ tb_bandwidth_group_attach_port(&tcm->groups[i], in);
+ return;
+ }
+ }
+ }
+
+ tb_attach_bandwidth_group(tcm, in, out);
+}
+
+static void tb_detach_bandwidth_group(struct tb_port *in)
+{
+ struct tb_bandwidth_group *group = in->group;
+
+ if (group) {
+ in->group = NULL;
+ list_del_init(&in->group_list);
+
+ tb_port_dbg(in, "detached from bandwidth group %d\n", group->index);
+
+ /* No more tunnels so release the reserved bandwidth if any */
+ if (list_empty(&group->ports)) {
+ cancel_delayed_work(&group->release_work);
+ __release_group_bandwidth(group);
+ }
+ }
+}
+
+static void tb_discover_tunnels(struct tb *tb)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+ struct tb_tunnel *tunnel;
+
+ tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true);
+
+ list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
+ if (tb_tunnel_is_pci(tunnel)) {
+ struct tb_switch *parent = tunnel->dst_port->sw;
+
+ while (parent != tunnel->src_port->sw) {
+ parent->boot = true;
+ parent = tb_switch_parent(parent);
+ }
+ } else if (tb_tunnel_is_dp(tunnel)) {
+ struct tb_port *in = tunnel->src_port;
+ struct tb_port *out = tunnel->dst_port;
+
+ /* Keep the domain from powering down */
+ pm_runtime_get_sync(&in->sw->dev);
+ pm_runtime_get_sync(&out->sw->dev);
+
+ tb_discover_bandwidth_group(tcm, in, out);
+ }
+ }
+}
+
static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
{
struct tb_port *src_port, *dst_port;
@@ -1491,7 +1717,7 @@ static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
* If bandwidth on a link is < asym_threshold
* transition the link to symmetric.
*/
- tb_configure_sym(tb, src_port, dst_port, 0, 0, true);
+ tb_configure_sym(tb, src_port, dst_port, true);
/* Now we can allow the domain to runtime suspend again */
pm_runtime_mark_last_busy(&dst_port->sw->dev);
pm_runtime_put_autosuspend(&dst_port->sw->dev);
@@ -1511,7 +1737,7 @@ static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
break;
}
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
/*
@@ -1605,101 +1831,6 @@ out:
return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
}
-static void
-tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group)
-{
- struct tb_tunnel *first_tunnel;
- struct tb *tb = group->tb;
- struct tb_port *in;
- int ret;
-
- tb_dbg(tb, "re-calculating bandwidth estimation for group %u\n",
- group->index);
-
- first_tunnel = NULL;
- list_for_each_entry(in, &group->ports, group_list) {
- int estimated_bw, estimated_up, estimated_down;
- struct tb_tunnel *tunnel;
- struct tb_port *out;
-
- if (!usb4_dp_port_bandwidth_mode_enabled(in))
- continue;
-
- tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
- if (WARN_ON(!tunnel))
- break;
-
- if (!first_tunnel) {
- /*
- * Since USB3 bandwidth is shared by all DP
- * tunnels under the host router USB4 port, even
- * if they do not begin from the host router, we
- * can release USB3 bandwidth just once and not
- * for each tunnel separately.
- */
- first_tunnel = tunnel;
- ret = tb_release_unused_usb3_bandwidth(tb,
- first_tunnel->src_port, first_tunnel->dst_port);
- if (ret) {
- tb_tunnel_warn(tunnel,
- "failed to release unused bandwidth\n");
- break;
- }
- }
-
- out = tunnel->dst_port;
- ret = tb_available_bandwidth(tb, in, out, &estimated_up,
- &estimated_down, true);
- if (ret) {
- tb_tunnel_warn(tunnel,
- "failed to re-calculate estimated bandwidth\n");
- break;
- }
-
- /*
- * Estimated bandwidth includes:
- * - already allocated bandwidth for the DP tunnel
- * - available bandwidth along the path
- * - bandwidth allocated for USB 3.x but not used.
- */
- tb_tunnel_dbg(tunnel,
- "re-calculated estimated bandwidth %u/%u Mb/s\n",
- estimated_up, estimated_down);
-
- if (tb_port_path_direction_downstream(in, out))
- estimated_bw = estimated_down;
- else
- estimated_bw = estimated_up;
-
- if (usb4_dp_port_set_estimated_bandwidth(in, estimated_bw))
- tb_tunnel_warn(tunnel,
- "failed to update estimated bandwidth\n");
- }
-
- if (first_tunnel)
- tb_reclaim_usb3_bandwidth(tb, first_tunnel->src_port,
- first_tunnel->dst_port);
-
- tb_dbg(tb, "bandwidth estimation for group %u done\n", group->index);
-}
-
-static void tb_recalc_estimated_bandwidth(struct tb *tb)
-{
- struct tb_cm *tcm = tb_priv(tb);
- int i;
-
- tb_dbg(tb, "bandwidth consumption changed, re-calculating estimated bandwidth\n");
-
- for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
- struct tb_bandwidth_group *group = &tcm->groups[i];
-
- if (!list_empty(&group->ports))
- tb_recalc_estimated_bandwidth_for_group(group);
- }
-
- tb_dbg(tb, "bandwidth re-calculation done\n");
-}
-
static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
{
struct tb_port *host_port, *port;
@@ -1717,6 +1848,12 @@ static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
continue;
}
+ /* Needs to be on different routers */
+ if (in->sw == port->sw) {
+ tb_port_dbg(port, "skipping DP OUT on same router\n");
+ continue;
+ }
+
tb_port_dbg(port, "DP OUT available\n");
/*
@@ -1737,47 +1874,77 @@ static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
return NULL;
}
-static bool tb_tunnel_one_dp(struct tb *tb)
+static void tb_dp_tunnel_active(struct tb_tunnel *tunnel, void *data)
{
- int available_up, available_down, ret, link_nr;
- struct tb_cm *tcm = tb_priv(tb);
- struct tb_port *port, *in, *out;
- int consumed_up, consumed_down;
- struct tb_tunnel *tunnel;
+ struct tb_port *in = tunnel->src_port;
+ struct tb_port *out = tunnel->dst_port;
+ struct tb *tb = data;
- /*
- * Find pair of inactive DP IN and DP OUT adapters and then
- * establish a DP tunnel between them.
- */
- tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
+ mutex_lock(&tb->lock);
+ if (tb_tunnel_is_active(tunnel)) {
+ int consumed_up, consumed_down, ret;
- in = NULL;
- out = NULL;
- list_for_each_entry(port, &tcm->dp_resources, list) {
- if (!tb_port_is_dpin(port))
- continue;
+ tb_tunnel_dbg(tunnel, "DPRX capabilities read completed\n");
- if (tb_port_is_enabled(port)) {
- tb_port_dbg(port, "DP IN in use\n");
- continue;
+ /* If fail reading tunnel's consumed bandwidth, tear it down */
+ ret = tb_tunnel_consumed_bandwidth(tunnel, &consumed_up,
+ &consumed_down);
+ if (ret) {
+ tb_tunnel_warn(tunnel,
+ "failed to read consumed bandwidth, tearing down\n");
+ tb_deactivate_and_free_tunnel(tunnel);
+ } else {
+ tb_reclaim_usb3_bandwidth(tb, in, out);
+ /*
+ * Transition the links to asymmetric if the
+ * consumption exceeds the threshold.
+ */
+ tb_configure_asym(tb, in, out, consumed_up,
+ consumed_down);
+ /*
+ * Update the domain with the new bandwidth
+ * estimation.
+ */
+ tb_recalc_estimated_bandwidth(tb);
+ /*
+ * In case of DP tunnel exists, change host
+ * router's 1st children TMU mode to HiFi for
+ * CL0s to work.
+ */
+ tb_increase_tmu_accuracy(tunnel);
}
+ } else {
+ struct tb_port *in = tunnel->src_port;
- in = port;
- tb_port_dbg(in, "DP IN available\n");
-
- out = tb_find_dp_out(tb, port);
- if (out)
- break;
+ /*
+ * This tunnel failed to establish. This means DPRX
+ * negotiation most likely did not complete which
+ * happens either because there is no graphics driver
+ * loaded or not all DP cables where connected to the
+ * discrete router.
+ *
+ * In both cases we remove the DP IN adapter from the
+ * available resources as it is not usable. This will
+ * also tear down the tunnel and try to re-use the
+ * released DP OUT.
+ *
+ * It will be added back only if there is hotplug for
+ * the DP IN again.
+ */
+ tb_tunnel_warn(tunnel, "not active, tearing down\n");
+ tb_dp_resource_unavailable(tb, in, "DPRX negotiation failed");
}
+ mutex_unlock(&tb->lock);
- if (!in) {
- tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
- return false;
- }
- if (!out) {
- tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
- return false;
- }
+ tb_domain_put(tb);
+}
+
+static void tb_tunnel_one_dp(struct tb *tb, struct tb_port *in,
+ struct tb_port *out)
+{
+ int available_up, available_down, ret, link_nr;
+ struct tb_cm *tcm = tb_priv(tb);
+ struct tb_tunnel *tunnel;
/*
* This is only applicable to links that are not bonded (so
@@ -1828,41 +1995,29 @@ static bool tb_tunnel_one_dp(struct tb *tb)
available_up, available_down);
tunnel = tb_tunnel_alloc_dp(tb, in, out, link_nr, available_up,
- available_down);
+ available_down, tb_dp_tunnel_active,
+ tb_domain_get(tb));
if (!tunnel) {
tb_port_dbg(out, "could not allocate DP tunnel\n");
goto err_reclaim_usb;
}
- if (tb_tunnel_activate(tunnel)) {
+ list_add_tail(&tunnel->list, &tcm->tunnel_list);
+
+ ret = tb_tunnel_activate(tunnel);
+ if (ret && ret != -EINPROGRESS) {
tb_port_info(out, "DP tunnel activation failed, aborting\n");
+ list_del(&tunnel->list);
goto err_free;
}
- list_add_tail(&tunnel->list, &tcm->tunnel_list);
- tb_reclaim_usb3_bandwidth(tb, in, out);
-
- /*
- * Transition the links to asymmetric if the consumption exceeds
- * the threshold.
- */
- if (!tb_tunnel_consumed_bandwidth(tunnel, &consumed_up, &consumed_down))
- tb_configure_asym(tb, in, out, consumed_up, consumed_down);
-
- /* Update the domain with the new bandwidth estimation */
- tb_recalc_estimated_bandwidth(tb);
-
- /*
- * In case of DP tunnel exists, change host router's 1st children
- * TMU mode to HiFi for CL0s to work.
- */
- tb_increase_tmu_accuracy(tunnel);
- return true;
+ return;
err_free:
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
err_reclaim_usb:
tb_reclaim_usb3_bandwidth(tb, in, out);
+ tb_domain_put(tb);
err_detach_group:
tb_detach_bandwidth_group(in);
err_dealloc_dp:
@@ -1872,38 +2027,144 @@ err_rpm_put:
pm_runtime_put_autosuspend(&out->sw->dev);
pm_runtime_mark_last_busy(&in->sw->dev);
pm_runtime_put_autosuspend(&in->sw->dev);
-
- return false;
}
static void tb_tunnel_dp(struct tb *tb)
{
+ struct tb_cm *tcm = tb_priv(tb);
+ struct tb_port *port, *in, *out;
+
if (!tb_acpi_may_tunnel_dp()) {
tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
return;
}
- while (tb_tunnel_one_dp(tb))
- ;
+ /*
+ * Find pair of inactive DP IN and DP OUT adapters and then
+ * establish a DP tunnel between them.
+ */
+ tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
+
+ in = NULL;
+ out = NULL;
+ list_for_each_entry(port, &tcm->dp_resources, list) {
+ if (!tb_port_is_dpin(port))
+ continue;
+
+ if (tb_port_is_enabled(port)) {
+ tb_port_dbg(port, "DP IN in use\n");
+ continue;
+ }
+
+ in = port;
+ tb_port_dbg(in, "DP IN available\n");
+
+ out = tb_find_dp_out(tb, port);
+ if (out)
+ tb_tunnel_one_dp(tb, in, out);
+ else
+ tb_port_dbg(in, "no suitable DP OUT adapter available, not tunneling\n");
+ }
+
+ if (!in)
+ tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
+}
+
+static void tb_enter_redrive(struct tb_port *port)
+{
+ struct tb_switch *sw = port->sw;
+
+ if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE))
+ return;
+
+ /*
+ * If we get hot-unplug for the DP IN port of the host router
+ * and the DP resource is not available anymore it means there
+ * is a monitor connected directly to the Type-C port and we are
+ * in "redrive" mode. For this to work we cannot enter RTD3 so
+ * we bump up the runtime PM reference count here.
+ */
+ if (!tb_port_is_dpin(port))
+ return;
+ if (tb_route(sw))
+ return;
+ if (!tb_switch_query_dp_resource(sw, port)) {
+ port->redrive = true;
+ pm_runtime_get(&sw->dev);
+ tb_port_dbg(port, "enter redrive mode, keeping powered\n");
+ }
+}
+
+static void tb_exit_redrive(struct tb_port *port)
+{
+ struct tb_switch *sw = port->sw;
+
+ if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE))
+ return;
+
+ if (!tb_port_is_dpin(port))
+ return;
+ if (tb_route(sw))
+ return;
+ if (port->redrive && tb_switch_query_dp_resource(sw, port)) {
+ port->redrive = false;
+ pm_runtime_put(&sw->dev);
+ tb_port_dbg(port, "exit redrive mode\n");
+ }
+}
+
+static void tb_switch_enter_redrive(struct tb_switch *sw)
+{
+ struct tb_port *port;
+
+ tb_switch_for_each_port(sw, port)
+ tb_enter_redrive(port);
+}
+
+/*
+ * Called during system and runtime suspend to forcefully exit redrive
+ * mode without querying whether the resource is available.
+ */
+static void tb_switch_exit_redrive(struct tb_switch *sw)
+{
+ struct tb_port *port;
+
+ if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE))
+ return;
+
+ tb_switch_for_each_port(sw, port) {
+ if (!tb_port_is_dpin(port))
+ continue;
+
+ if (port->redrive) {
+ port->redrive = false;
+ pm_runtime_put(&sw->dev);
+ tb_port_dbg(port, "exit redrive mode\n");
+ }
+ }
}
-static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
+static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port,
+ const char *reason)
{
struct tb_port *in, *out;
struct tb_tunnel *tunnel;
if (tb_port_is_dpin(port)) {
- tb_port_dbg(port, "DP IN resource unavailable\n");
+ tb_port_dbg(port, "DP IN resource unavailable: %s\n", reason);
in = port;
out = NULL;
} else {
- tb_port_dbg(port, "DP OUT resource unavailable\n");
+ tb_port_dbg(port, "DP OUT resource unavailable: %s\n", reason);
in = NULL;
out = port;
}
tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
- tb_deactivate_and_free_tunnel(tunnel);
+ if (tunnel)
+ tb_deactivate_and_free_tunnel(tunnel);
+ else
+ tb_enter_redrive(port);
list_del_init(&port->list);
/*
@@ -1930,6 +2191,7 @@ static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
tb_port_dbg(port, "DP %s resource available after hotplug\n",
tb_port_is_dpin(port) ? "IN" : "OUT");
list_add_tail(&port->list, &tcm->dp_resources);
+ tb_exit_redrive(port);
/* Look for suitable DP IN <-> DP OUT pairs now */
tb_tunnel_dp(tb);
@@ -1975,7 +2237,7 @@ static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw)
tb_tunnel_deactivate(tunnel);
list_del(&tunnel->list);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
return 0;
}
@@ -2005,7 +2267,7 @@ static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
if (tb_tunnel_activate(tunnel)) {
tb_port_info(up,
"PCIe tunnel activation failed, aborting\n");
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
return -EIO;
}
@@ -2064,7 +2326,7 @@ static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
return 0;
err_free:
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
err_clx:
tb_enable_clx(sw);
mutex_unlock(&tb->lock);
@@ -2127,7 +2389,7 @@ static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
*/
static void tb_handle_hotplug(struct work_struct *work)
{
- struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
+ struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work.work);
struct tb *tb = ev->tb;
struct tb_cm *tcm = tb_priv(tb);
struct tb_switch *sw;
@@ -2199,7 +2461,7 @@ static void tb_handle_hotplug(struct work_struct *work)
tb_xdomain_put(xd);
tb_port_unconfigure_xdomain(port);
} else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
- tb_dp_resource_unavailable(tb, port);
+ tb_dp_resource_unavailable(tb, port, "adapter unplug");
} else if (!port->port) {
tb_sw_dbg(sw, "xHCI disconnect request\n");
tb_switch_xhci_disconnect(sw);
@@ -2243,8 +2505,10 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
int allocated_up, allocated_down, available_up, available_down, ret;
int requested_up_corrected, requested_down_corrected, granularity;
int max_up, max_down, max_up_rounded, max_down_rounded;
+ struct tb_bandwidth_group *group;
struct tb *tb = tunnel->tb;
struct tb_port *in, *out;
+ bool downstream;
ret = tb_tunnel_allocated_bandwidth(tunnel, &allocated_up, &allocated_down);
if (ret)
@@ -2270,11 +2534,11 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
*/
ret = tb_tunnel_maximum_bandwidth(tunnel, &max_up, &max_down);
if (ret)
- return ret;
+ goto fail;
ret = usb4_dp_port_granularity(in);
if (ret < 0)
- return ret;
+ goto fail;
granularity = ret;
max_up_rounded = roundup(max_up, granularity);
@@ -2304,24 +2568,48 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
"bandwidth request too high (%d/%d Mb/s > %d/%d Mb/s)\n",
requested_up_corrected, requested_down_corrected,
max_up_rounded, max_down_rounded);
- return -ENOBUFS;
+ ret = -ENOBUFS;
+ goto fail;
}
+ downstream = tb_tunnel_direction_downstream(tunnel);
+ group = in->group;
+
if ((*requested_up >= 0 && requested_up_corrected <= allocated_up) ||
(*requested_down >= 0 && requested_down_corrected <= allocated_down)) {
- /*
- * If bandwidth on a link is < asym_threshold transition
- * the link to symmetric.
- */
- tb_configure_sym(tb, in, out, *requested_up, *requested_down, true);
- /*
- * If requested bandwidth is less or equal than what is
- * currently allocated to that tunnel we simply change
- * the reservation of the tunnel. Since all the tunnels
- * going out from the same USB4 port are in the same
- * group the released bandwidth will be taken into
- * account for the other tunnels automatically below.
- */
+ if (tunnel->bw_mode) {
+ int reserved;
+ /*
+ * If requested bandwidth is less or equal than
+ * what is currently allocated to that tunnel we
+ * simply change the reservation of the tunnel
+ * and add the released bandwidth for the group
+ * for the next 10s. Then we release it for
+ * others to use.
+ */
+ if (downstream)
+ reserved = allocated_down - *requested_down;
+ else
+ reserved = allocated_up - *requested_up;
+
+ if (reserved > 0) {
+ group->reserved += reserved;
+ tb_dbg(tb, "group %d reserved %d total %d Mb/s\n",
+ group->index, reserved, group->reserved);
+
+ /*
+ * If it was not already pending,
+ * schedule release now. If it is then
+ * postpone it for the next 10s (unless
+ * it is already running in which case
+ * the 10s already expired and we should
+ * give the reserved back to others).
+ */
+ mod_delayed_work(system_wq, &group->release_work,
+ msecs_to_jiffies(TB_RELEASE_BW_TIMEOUT));
+ }
+ }
+
return tb_tunnel_alloc_bandwidth(tunnel, requested_up,
requested_down);
}
@@ -2332,7 +2620,7 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
*/
ret = tb_release_unused_usb3_bandwidth(tb, in, out);
if (ret)
- return ret;
+ goto fail;
/*
* Then go over all tunnels that cross the same USB4 ports (they
@@ -2344,11 +2632,15 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
if (ret)
goto reclaim;
- tb_tunnel_dbg(tunnel, "bandwidth available for allocation %d/%d Mb/s\n",
- available_up, available_down);
+ tb_tunnel_dbg(tunnel, "bandwidth available for allocation %d/%d (+ %u reserved) Mb/s\n",
+ available_up, available_down, group->reserved);
+
+ if ((*requested_up >= 0 &&
+ available_up + group->reserved >= requested_up_corrected) ||
+ (*requested_down >= 0 &&
+ available_down + group->reserved >= requested_down_corrected)) {
+ int released = 0;
- if ((*requested_up >= 0 && available_up >= requested_up_corrected) ||
- (*requested_down >= 0 && available_down >= requested_down_corrected)) {
/*
* If bandwidth on a link is >= asym_threshold
* transition the link to asymmetric.
@@ -2356,15 +2648,28 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
ret = tb_configure_asym(tb, in, out, *requested_up,
*requested_down);
if (ret) {
- tb_configure_sym(tb, in, out, 0, 0, true);
- return ret;
+ tb_configure_sym(tb, in, out, true);
+ goto fail;
}
ret = tb_tunnel_alloc_bandwidth(tunnel, requested_up,
requested_down);
if (ret) {
tb_tunnel_warn(tunnel, "failed to allocate bandwidth\n");
- tb_configure_sym(tb, in, out, 0, 0, true);
+ tb_configure_sym(tb, in, out, true);
+ }
+
+ if (downstream) {
+ if (*requested_down > available_down)
+ released = *requested_down - available_down;
+ } else {
+ if (*requested_up > available_up)
+ released = *requested_up - available_up;
+ }
+ if (released) {
+ group->reserved -= released;
+ tb_dbg(tb, "group %d released %d total %d Mb/s\n",
+ group->index, released, group->reserved);
}
} else {
ret = -ENOBUFS;
@@ -2372,18 +2677,30 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
reclaim:
tb_reclaim_usb3_bandwidth(tb, in, out);
+fail:
+ if (ret && ret != -ENODEV) {
+ /*
+ * Write back the same allocated (so no change), this
+ * makes the DPTX request fail on graphics side.
+ */
+ tb_tunnel_dbg(tunnel,
+ "failing the request by rewriting allocated %d/%d Mb/s\n",
+ allocated_up, allocated_down);
+ tb_tunnel_alloc_bandwidth(tunnel, &allocated_up, &allocated_down);
+ }
+
return ret;
}
static void tb_handle_dp_bandwidth_request(struct work_struct *work)
{
- struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
+ struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work.work);
int requested_bw, requested_up, requested_down, ret;
- struct tb_port *in, *out;
struct tb_tunnel *tunnel;
struct tb *tb = ev->tb;
struct tb_cm *tcm = tb_priv(tb);
struct tb_switch *sw;
+ struct tb_port *in;
pm_runtime_get_sync(&tb->dev);
@@ -2404,34 +2721,50 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work)
goto put_sw;
}
- tb_port_dbg(in, "handling bandwidth allocation request\n");
+ tb_port_dbg(in, "handling bandwidth allocation request, retry %d\n", ev->retry);
+
+ tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
+ if (!tunnel) {
+ tb_port_warn(in, "failed to find tunnel\n");
+ goto put_sw;
+ }
if (!usb4_dp_port_bandwidth_mode_enabled(in)) {
- tb_port_warn(in, "bandwidth allocation mode not enabled\n");
+ if (tunnel->bw_mode) {
+ /*
+ * Reset the tunnel back to use the legacy
+ * allocation.
+ */
+ tunnel->bw_mode = false;
+ tb_port_dbg(in, "DPTX disabled bandwidth allocation mode\n");
+ } else {
+ tb_port_warn(in, "bandwidth allocation mode not enabled\n");
+ }
goto put_sw;
}
ret = usb4_dp_port_requested_bandwidth(in);
if (ret < 0) {
- if (ret == -ENODATA)
- tb_port_dbg(in, "no bandwidth request active\n");
- else
+ if (ret == -ENODATA) {
+ /*
+ * There is no request active so this means the
+ * BW allocation mode was enabled from graphics
+ * side. At this point we know that the graphics
+ * driver has read the DRPX capabilities so we
+ * can offer an better bandwidth estimatation.
+ */
+ tb_port_dbg(in, "DPTX enabled bandwidth allocation mode, updating estimated bandwidth\n");
+ tb_recalc_estimated_bandwidth(tb);
+ } else {
tb_port_warn(in, "failed to read requested bandwidth\n");
+ }
goto put_sw;
}
requested_bw = ret;
tb_port_dbg(in, "requested bandwidth %d Mb/s\n", requested_bw);
- tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
- if (!tunnel) {
- tb_port_warn(in, "failed to find tunnel\n");
- goto put_sw;
- }
-
- out = tunnel->dst_port;
-
- if (tb_port_path_direction_downstream(in, out)) {
+ if (tb_tunnel_direction_downstream(tunnel)) {
requested_up = -1;
requested_down = requested_bw;
} else {
@@ -2441,12 +2774,33 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work)
ret = tb_alloc_dp_bandwidth(tunnel, &requested_up, &requested_down);
if (ret) {
- if (ret == -ENOBUFS)
+ if (ret == -ENOBUFS) {
tb_tunnel_warn(tunnel,
"not enough bandwidth available\n");
- else
+ } else if (ret == -ENOTCONN) {
+ tb_tunnel_dbg(tunnel, "not active yet\n");
+ /*
+ * We got bandwidth allocation request but the
+ * tunnel is not yet active. This means that
+ * tb_dp_tunnel_active() is not yet called for
+ * this tunnel. Allow it some time and retry
+ * this request a couple of times.
+ */
+ if (ev->retry < TB_BW_ALLOC_RETRIES) {
+ tb_tunnel_dbg(tunnel,
+ "retrying bandwidth allocation request\n");
+ tb_queue_dp_bandwidth_request(tb, ev->route,
+ ev->port,
+ ev->retry + 1,
+ msecs_to_jiffies(50));
+ } else {
+ tb_tunnel_dbg(tunnel,
+ "run out of retries, failing the request");
+ }
+ } else {
tb_tunnel_warn(tunnel,
"failed to change bandwidth allocation\n");
+ }
} else {
tb_tunnel_dbg(tunnel,
"bandwidth allocation changed to %d/%d Mb/s\n",
@@ -2467,7 +2821,8 @@ unlock:
kfree(ev);
}
-static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port)
+static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port,
+ int retry, unsigned long delay)
{
struct tb_hotplug_event *ev;
@@ -2478,8 +2833,9 @@ static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port)
ev->tb = tb;
ev->route = route;
ev->port = port;
- INIT_WORK(&ev->work, tb_handle_dp_bandwidth_request);
- queue_work(tb->wq, &ev->work);
+ ev->retry = retry;
+ INIT_DELAYED_WORK(&ev->work, tb_handle_dp_bandwidth_request);
+ queue_delayed_work(tb->wq, &ev->work, delay);
}
static void tb_handle_notification(struct tb *tb, u64 route,
@@ -2499,7 +2855,7 @@ static void tb_handle_notification(struct tb *tb, u64 route,
if (tb_cfg_ack_notification(tb->ctl, route, error))
tb_warn(tb, "could not ack notification on %llx\n",
route);
- tb_queue_dp_bandwidth_request(tb, route, error->port);
+ tb_queue_dp_bandwidth_request(tb, route, error->port, 0, 0);
break;
default:
@@ -2554,12 +2910,22 @@ static void tb_stop(struct tb *tb)
*/
if (tb_tunnel_is_dma(tunnel))
tb_tunnel_deactivate(tunnel);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
tb_switch_remove(tb->root_switch);
tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
}
+static void tb_deinit(struct tb *tb)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+ int i;
+
+ /* Cancel all the release bandwidth workers */
+ for (i = 0; i < ARRAY_SIZE(tcm->groups); i++)
+ cancel_delayed_work_sync(&tcm->groups[i].release_work);
+}
+
static int tb_scan_finalize_switch(struct device *dev, void *data)
{
if (tb_is_switch(dev)) {
@@ -2581,9 +2947,10 @@ static int tb_scan_finalize_switch(struct device *dev, void *data)
return 0;
}
-static int tb_start(struct tb *tb)
+static int tb_start(struct tb *tb, bool reset)
{
struct tb_cm *tcm = tb_priv(tb);
+ bool discover = true;
int ret;
tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
@@ -2622,12 +2989,28 @@ static int tb_start(struct tb *tb)
tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_MODE_LOWRES);
/* Enable TMU if it is off */
tb_switch_tmu_enable(tb->root_switch);
- /* Full scan to discover devices added before the driver was loaded. */
- tb_scan_switch(tb->root_switch);
- /* Find out tunnels created by the boot firmware */
- tb_discover_tunnels(tb);
- /* Add DP resources from the DP tunnels created by the boot firmware */
- tb_discover_dp_resources(tb);
+
+ /*
+ * Boot firmware might have created tunnels of its own. Since we
+ * cannot be sure they are usable for us, tear them down and
+ * reset the ports to handle it as new hotplug for USB4 v1
+ * routers (for USB4 v2 and beyond we already do host reset).
+ */
+ if (reset && tb_switch_is_usb4(tb->root_switch)) {
+ discover = false;
+ if (usb4_switch_version(tb->root_switch) == 1)
+ tb_switch_reset(tb->root_switch);
+ }
+
+ if (discover) {
+ /* Full scan to discover devices added before the driver was loaded. */
+ tb_scan_switch(tb->root_switch);
+ /* Find out tunnels created by the boot firmware */
+ tb_discover_tunnels(tb);
+ /* Add DP resources from the DP tunnels created by the boot firmware */
+ tb_discover_dp_resources(tb);
+ }
+
/*
* If the boot firmware did not create USB 3.x tunnels create them
* now for the whole topology.
@@ -2635,6 +3018,7 @@ static int tb_start(struct tb *tb)
tb_create_usb3_tunnels(tb->root_switch);
/* Add DP IN resources for the root switch */
tb_add_dp_resources(tb->root_switch);
+ tb_switch_enter_redrive(tb->root_switch);
/* Make the discovered switches available to the userspace */
device_for_each_child(&tb->root_switch->dev, NULL,
tb_scan_finalize_switch);
@@ -2650,6 +3034,7 @@ static int tb_suspend_noirq(struct tb *tb)
tb_dbg(tb, "suspending...\n");
tb_disconnect_and_release_dp(tb);
+ tb_switch_exit_redrive(tb->root_switch);
tb_switch_suspend(tb->root_switch, false);
tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
tb_dbg(tb, "suspend finished\n");
@@ -2698,10 +3083,14 @@ static int tb_resume_noirq(struct tb *tb)
tb_dbg(tb, "resuming...\n");
- /* remove any pci devices the firmware might have setup */
- tb_switch_reset(tb->root_switch);
+ /*
+ * For non-USB4 hosts (Apple systems) remove any PCIe devices
+ * the firmware might have setup.
+ */
+ if (!tb_switch_is_usb4(tb->root_switch))
+ tb_switch_reset(tb->root_switch);
- tb_switch_resume(tb->root_switch);
+ tb_switch_resume(tb->root_switch, false);
tb_free_invalid_tunnels(tb);
tb_free_unplugged_children(tb->root_switch);
tb_restore_children(tb->root_switch);
@@ -2717,7 +3106,7 @@ static int tb_resume_noirq(struct tb *tb)
if (tb_tunnel_is_usb3(tunnel))
usb3_delay = 500;
tb_tunnel_deactivate(tunnel);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
/* Re-create our tunnels now */
@@ -2728,7 +3117,7 @@ static int tb_resume_noirq(struct tb *tb)
/* Only need to do it once */
usb3_delay = 0;
}
- tb_tunnel_restart(tunnel);
+ tb_tunnel_activate(tunnel);
}
if (!list_empty(&tcm->tunnel_list)) {
/*
@@ -2738,6 +3127,7 @@ static int tb_resume_noirq(struct tb *tb)
tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
msleep(100);
}
+ tb_switch_enter_redrive(tb->root_switch);
/* Allow tb_handle_hotplug to progress events */
tcm->hotplug_active = true;
tb_dbg(tb, "resume finished\n");
@@ -2801,6 +3191,12 @@ static int tb_runtime_suspend(struct tb *tb)
struct tb_cm *tcm = tb_priv(tb);
mutex_lock(&tb->lock);
+ /*
+ * The below call only releases DP resources to allow exiting and
+ * re-entering redrive mode.
+ */
+ tb_disconnect_and_release_dp(tb);
+ tb_switch_exit_redrive(tb->root_switch);
tb_switch_suspend(tb->root_switch, true);
tcm->hotplug_active = false;
mutex_unlock(&tb->lock);
@@ -2827,11 +3223,12 @@ static int tb_runtime_resume(struct tb *tb)
struct tb_tunnel *tunnel, *n;
mutex_lock(&tb->lock);
- tb_switch_resume(tb->root_switch);
+ tb_switch_resume(tb->root_switch, true);
tb_free_invalid_tunnels(tb);
tb_restore_children(tb->root_switch);
list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
- tb_tunnel_restart(tunnel);
+ tb_tunnel_activate(tunnel);
+ tb_switch_enter_redrive(tb->root_switch);
tcm->hotplug_active = true;
mutex_unlock(&tb->lock);
@@ -2847,6 +3244,7 @@ static int tb_runtime_resume(struct tb *tb)
static const struct tb_cm_ops tb_cm_ops = {
.start = tb_start,
.stop = tb_stop,
+ .deinit = tb_deinit,
.suspend_noirq = tb_suspend_noirq,
.resume_noirq = tb_resume_noirq,
.freeze_noirq = tb_freeze_noirq,
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index 997c5a536905..b54147a1ba87 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -9,6 +9,7 @@
#ifndef TB_H_
#define TB_H_
+#include <linux/debugfs.h>
#include <linux/nvmem-provider.h>
#include <linux/pci.h>
#include <linux/thunderbolt.h>
@@ -23,6 +24,8 @@
#define QUIRK_FORCE_POWER_LINK_CONTROLLER BIT(0)
/* Disable CLx if not supported */
#define QUIRK_NO_CLX BIT(1)
+/* Need to keep power on while USB4 port is in redrive mode */
+#define QUIRK_KEEP_POWER_IN_DP_REDRIVE BIT(2)
/**
* struct tb_nvm - Structure holding NVM information
@@ -158,6 +161,7 @@ struct tb_switch_tmu {
* @max_pcie_credits: Router preferred number of buffers for PCIe
* @max_dma_credits: Router preferred number of buffers for DMA/P2P
* @clx: CLx states on the upstream link of the router
+ * @drom_blob: DROM debugfs blob wrapper
*
* When the switch is being added or removed to the domain (other
* switches) you need to have domain lock held.
@@ -210,6 +214,9 @@ struct tb_switch {
unsigned int max_pcie_credits;
unsigned int max_dma_credits;
unsigned int clx;
+#ifdef CONFIG_DEBUG_FS
+ struct debugfs_blob_wrapper drom_blob;
+#endif
};
/**
@@ -217,6 +224,11 @@ struct tb_switch {
* @tb: Pointer to the domain the group belongs to
* @index: Index of the group (aka Group_ID). Valid values %1-%7
* @ports: DP IN adapters belonging to this group are linked here
+ * @reserved: Bandwidth released by one tunnel in the group, available
+ * to others. This is reported as part of estimated_bw for
+ * the group.
+ * @release_work: Worker to release the @reserved if it is not used by
+ * any of the tunnels.
*
* Any tunnel that requires isochronous bandwidth (that's DP for now) is
* attached to a bandwidth group. All tunnels going through the same
@@ -227,6 +239,8 @@ struct tb_bandwidth_group {
struct tb *tb;
int index;
struct list_head ports;
+ int reserved;
+ struct delayed_work release_work;
};
/**
@@ -258,6 +272,7 @@ struct tb_bandwidth_group {
* @group_list: The adapter is linked to the group's list of ports through this
* @max_bw: Maximum possible bandwidth through this adapter if set to
* non-zero.
+ * @redrive: For DP IN, if true the adapter is in redrive mode.
*
* In USB4 terminology this structure represents an adapter (protocol or
* lane adapter).
@@ -286,6 +301,7 @@ struct tb_port {
struct tb_bandwidth_group *group;
struct list_head group_list;
unsigned int max_bw;
+ bool redrive;
};
/**
@@ -318,6 +334,7 @@ struct usb4_port {
* @nvm: Pointer to the NVM if the retimer has one (%NULL otherwise)
* @no_nvm_upgrade: Prevent NVM upgrade of this retimer
* @auth_status: Status of last NVM authentication
+ * @margining: Pointer to margining structure if enabled
*/
struct tb_retimer {
struct device dev;
@@ -329,6 +346,9 @@ struct tb_retimer {
struct tb_nvm *nvm;
bool no_nvm_upgrade;
u32 auth_status;
+#ifdef CONFIG_USB4_DEBUGFS_MARGINING
+ struct tb_margining *margining;
+#endif
};
/**
@@ -452,6 +472,8 @@ struct tb_path {
* ICM to send driver ready message to the firmware.
* @start: Starts the domain
* @stop: Stops the domain
+ * @deinit: Perform any cleanup after the domain is stopped but before
+ * it is unregistered. Called without @tb->lock taken. Optional.
* @suspend_noirq: Connection manager specific suspend_noirq
* @resume_noirq: Connection manager specific resume_noirq
* @suspend: Connection manager specific suspend
@@ -483,8 +505,9 @@ struct tb_path {
*/
struct tb_cm_ops {
int (*driver_ready)(struct tb *tb);
- int (*start)(struct tb *tb);
+ int (*start)(struct tb *tb, bool reset);
void (*stop)(struct tb *tb);
+ void (*deinit)(struct tb *tb);
int (*suspend_noirq)(struct tb *tb);
int (*resume_noirq)(struct tb *tb);
int (*suspend)(struct tb *tb);
@@ -735,10 +758,10 @@ static inline int tb_port_write(struct tb_port *port, const void *buffer,
struct tb *icm_probe(struct tb_nhi *nhi);
struct tb *tb_probe(struct tb_nhi *nhi);
-extern struct device_type tb_domain_type;
-extern struct device_type tb_retimer_type;
-extern struct device_type tb_switch_type;
-extern struct device_type usb4_port_device_type;
+extern const struct device_type tb_domain_type;
+extern const struct device_type tb_retimer_type;
+extern const struct device_type tb_switch_type;
+extern const struct device_type usb4_port_device_type;
int tb_domain_init(void);
void tb_domain_exit(void);
@@ -746,7 +769,7 @@ int tb_xdomain_init(void);
void tb_xdomain_exit(void);
struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize);
-int tb_domain_add(struct tb *tb);
+int tb_domain_add(struct tb *tb, bool reset);
void tb_domain_remove(struct tb *tb);
int tb_domain_suspend_noirq(struct tb *tb);
int tb_domain_resume_noirq(struct tb *tb);
@@ -813,7 +836,7 @@ int tb_switch_configuration_valid(struct tb_switch *sw);
int tb_switch_add(struct tb_switch *sw);
void tb_switch_remove(struct tb_switch *sw);
void tb_switch_suspend(struct tb_switch *sw, bool runtime);
-int tb_switch_resume(struct tb_switch *sw);
+int tb_switch_resume(struct tb_switch *sw, bool runtime);
int tb_switch_reset(struct tb_switch *sw);
int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
u32 value, int timeout_msec);
@@ -1150,6 +1173,7 @@ struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid,
void tb_path_free(struct tb_path *path);
int tb_path_activate(struct tb_path *path);
void tb_path_deactivate(struct tb_path *path);
+int tb_path_deactivate_hop(struct tb_port *port, int hop_index);
bool tb_path_is_invalid(struct tb_path *path);
bool tb_path_port_on_path(const struct tb_path *path,
const struct tb_port *port);
@@ -1169,6 +1193,7 @@ int tb_drom_read(struct tb_switch *sw);
int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid);
int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid);
+int tb_lc_reset_port(struct tb_port *port);
int tb_lc_configure_port(struct tb_port *port);
void tb_lc_unconfigure_port(struct tb_port *port);
int tb_lc_configure_xdomain(struct tb_port *port);
@@ -1272,6 +1297,7 @@ static inline bool tb_switch_is_usb4(const struct tb_switch *sw)
return usb4_switch_version(sw) > 0;
}
+void usb4_switch_check_wakes(struct tb_switch *sw);
int usb4_switch_setup(struct tb_switch *sw);
int usb4_switch_configuration_valid(struct tb_switch *sw);
int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid);
@@ -1301,6 +1327,7 @@ void usb4_switch_remove_ports(struct tb_switch *sw);
int usb4_port_unlock(struct tb_port *port);
int usb4_port_hotplug_enable(struct tb_port *port);
+int usb4_port_reset(struct tb_port *port);
int usb4_port_configure(struct tb_port *port);
void usb4_port_unconfigure(struct tb_port *port);
int usb4_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd);
@@ -1309,26 +1336,85 @@ int usb4_port_router_offline(struct tb_port *port);
int usb4_port_router_online(struct tb_port *port);
int usb4_port_enumerate_retimers(struct tb_port *port);
bool usb4_port_clx_supported(struct tb_port *port);
-int usb4_port_margining_caps(struct tb_port *port, u32 *caps);
bool usb4_port_asym_supported(struct tb_port *port);
int usb4_port_asym_set_link_width(struct tb_port *port, enum tb_link_width width);
int usb4_port_asym_start(struct tb_port *port);
-int usb4_port_hw_margin(struct tb_port *port, unsigned int lanes,
- unsigned int ber_level, bool timing, bool right_high,
+/**
+ * enum tb_sb_target - Sideband transaction target
+ * @USB4_SB_TARGET_ROUTER: Target is the router itself
+ * @USB4_SB_TARGET_PARTNER: Target is partner
+ * @USB4_SB_TARGET_RETIMER: Target is retimer
+ */
+enum usb4_sb_target {
+ USB4_SB_TARGET_ROUTER,
+ USB4_SB_TARGET_PARTNER,
+ USB4_SB_TARGET_RETIMER,
+};
+
+int usb4_port_sb_read(struct tb_port *port, enum usb4_sb_target target, u8 index,
+ u8 reg, void *buf, u8 size);
+int usb4_port_sb_write(struct tb_port *port, enum usb4_sb_target target,
+ u8 index, u8 reg, const void *buf, u8 size);
+
+/**
+ * enum usb4_margin_sw_error_counter - Software margining error counter operation
+ * @USB4_MARGIN_SW_ERROR_COUNTER_NOP: No change in counter setup
+ * @USB4_MARGIN_SW_ERROR_COUNTER_CLEAR: Set the error counter to 0, enable counter
+ * @USB4_MARGIN_SW_ERROR_COUNTER_START: Start counter, count from last value
+ * @USB4_MARGIN_SW_ERROR_COUNTER_STOP: Stop counter, do not clear value
+ */
+enum usb4_margin_sw_error_counter {
+ USB4_MARGIN_SW_ERROR_COUNTER_NOP,
+ USB4_MARGIN_SW_ERROR_COUNTER_CLEAR,
+ USB4_MARGIN_SW_ERROR_COUNTER_START,
+ USB4_MARGIN_SW_ERROR_COUNTER_STOP,
+};
+
+enum usb4_margining_lane {
+ USB4_MARGINING_LANE_RX0 = 0,
+ USB4_MARGINING_LANE_RX1 = 1,
+ USB4_MARGINING_LANE_RX2 = 2,
+ USB4_MARGINING_LANE_ALL = 7,
+};
+
+/**
+ * struct usb4_port_margining_params - USB4 margining parameters
+ * @error_counter: Error counter operation for software margining
+ * @ber_level: Current BER level contour value
+ * @lanes: Lanes to enable for the margining operation
+ * @voltage_time_offset: Offset for voltage / time for software margining
+ * @optional_voltage_offset_range: Enable optional extended voltage range
+ * @right_high: %false if left/low margin test is performed, %true if right/high
+ * @time: %true if time margining is used instead of voltage
+ */
+struct usb4_port_margining_params {
+ enum usb4_margin_sw_error_counter error_counter;
+ u32 ber_level;
+ enum usb4_margining_lane lanes;
+ u32 voltage_time_offset;
+ bool optional_voltage_offset_range;
+ bool right_high;
+ bool upper_eye;
+ bool time;
+};
+
+int usb4_port_margining_caps(struct tb_port *port, enum usb4_sb_target target,
+ u8 index, u32 *caps, size_t ncaps);
+int usb4_port_hw_margin(struct tb_port *port, enum usb4_sb_target target,
+ u8 index, const struct usb4_port_margining_params *params,
+ u32 *results, size_t nresults);
+int usb4_port_sw_margin(struct tb_port *port, enum usb4_sb_target target,
+ u8 index, const struct usb4_port_margining_params *params,
u32 *results);
-int usb4_port_sw_margin(struct tb_port *port, unsigned int lanes, bool timing,
- bool right_high, u32 counter);
-int usb4_port_sw_margin_errors(struct tb_port *port, u32 *errors);
+int usb4_port_sw_margin_errors(struct tb_port *port, enum usb4_sb_target target,
+ u8 index, u32 *errors);
int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index);
int usb4_port_retimer_unset_inbound_sbtx(struct tb_port *port, u8 index);
-int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf,
- u8 size);
-int usb4_port_retimer_write(struct tb_port *port, u8 index, u8 reg,
- const void *buf, u8 size);
int usb4_port_retimer_is_last(struct tb_port *port, u8 index);
+int usb4_port_retimer_is_cable(struct tb_port *port, u8 index);
int usb4_port_retimer_nvm_sector_size(struct tb_port *port, u8 index);
int usb4_port_retimer_nvm_set_offset(struct tb_port *port, u8 index,
unsigned int address);
@@ -1427,6 +1513,8 @@ void tb_xdomain_debugfs_init(struct tb_xdomain *xd);
void tb_xdomain_debugfs_remove(struct tb_xdomain *xd);
void tb_service_debugfs_init(struct tb_service *svc);
void tb_service_debugfs_remove(struct tb_service *svc);
+void tb_retimer_debugfs_init(struct tb_retimer *rt);
+void tb_retimer_debugfs_remove(struct tb_retimer *rt);
#else
static inline void tb_debugfs_init(void) { }
static inline void tb_debugfs_exit(void) { }
@@ -1436,6 +1524,8 @@ static inline void tb_xdomain_debugfs_init(struct tb_xdomain *xd) { }
static inline void tb_xdomain_debugfs_remove(struct tb_xdomain *xd) { }
static inline void tb_service_debugfs_init(struct tb_service *svc) { }
static inline void tb_service_debugfs_remove(struct tb_service *svc) { }
+static inline void tb_retimer_debugfs_init(struct tb_retimer *rt) { }
+static inline void tb_retimer_debugfs_remove(struct tb_retimer *rt) { }
#endif
#endif
diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h
index cd750e4b3440..a1670a96cbdc 100644
--- a/drivers/thunderbolt/tb_msgs.h
+++ b/drivers/thunderbolt/tb_msgs.h
@@ -98,12 +98,6 @@ struct cfg_reset_pkg {
struct tb_cfg_header header;
} __packed;
-/* TB_CFG_PKG_PREPARE_TO_SLEEP */
-struct cfg_pts_pkg {
- struct tb_cfg_header header;
- u32 data;
-} __packed;
-
/* ICM messages */
enum icm_pkg_code {
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
index 6f798f6a2b84..4e43b47f9f11 100644
--- a/drivers/thunderbolt/tb_regs.h
+++ b/drivers/thunderbolt/tb_regs.h
@@ -194,6 +194,8 @@ struct tb_regs_switch_header {
#define USB4_VERSION_MAJOR_MASK GENMASK(7, 5)
#define ROUTER_CS_1 0x01
+#define ROUTER_CS_3 0x03
+#define ROUTER_CS_3_V BIT(31)
#define ROUTER_CS_4 0x04
/* Used with the router cmuv field */
#define ROUTER_CS_4_CMUV_V1 0x10
@@ -389,6 +391,7 @@ struct tb_regs_port_header {
#define PORT_CS_18_CSA BIT(22)
#define PORT_CS_18_TIP BIT(24)
#define PORT_CS_19 0x13
+#define PORT_CS_19_DPR BIT(0)
#define PORT_CS_19_PC BIT(3)
#define PORT_CS_19_PID BIT(4)
#define PORT_CS_19_WOC BIT(16)
@@ -584,6 +587,9 @@ struct tb_regs_hop {
#define TB_LC_POWER 0x740
/* Link controller registers */
+#define TB_LC_PORT_MODE 0x26
+#define TB_LC_PORT_MODE_DPR BIT(0)
+
#define TB_LC_CS_42 0x2a
#define TB_LC_CS_42_USB_PLUGGED BIT(31)
diff --git a/drivers/thunderbolt/test.c b/drivers/thunderbolt/test.c
index 9475c6698c7d..1f4318249c22 100644
--- a/drivers/thunderbolt/test.c
+++ b/drivers/thunderbolt/test.c
@@ -1382,8 +1382,8 @@ static void tb_test_tunnel_pcie(struct kunit *test)
KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[0].in_port, up);
KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[1].out_port, down);
- tb_tunnel_free(tunnel2);
- tb_tunnel_free(tunnel1);
+ tb_tunnel_put(tunnel2);
+ tb_tunnel_put(tunnel1);
}
static void tb_test_tunnel_dp(struct kunit *test)
@@ -1406,7 +1406,7 @@ static void tb_test_tunnel_dp(struct kunit *test)
in = &host->ports[5];
out = &dev->ports[13];
- tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
+ tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0, NULL, NULL);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
@@ -1421,7 +1421,7 @@ static void tb_test_tunnel_dp(struct kunit *test)
KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 2);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[1].out_port, in);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_tunnel_dp_chain(struct kunit *test)
@@ -1452,7 +1452,7 @@ static void tb_test_tunnel_dp_chain(struct kunit *test)
in = &host->ports[5];
out = &dev4->ports[14];
- tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
+ tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0, NULL, NULL);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
@@ -1467,7 +1467,7 @@ static void tb_test_tunnel_dp_chain(struct kunit *test)
KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 3);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[2].out_port, in);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_tunnel_dp_tree(struct kunit *test)
@@ -1502,7 +1502,7 @@ static void tb_test_tunnel_dp_tree(struct kunit *test)
in = &dev2->ports[13];
out = &dev5->ports[13];
- tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
+ tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0, NULL, NULL);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
@@ -1517,7 +1517,7 @@ static void tb_test_tunnel_dp_tree(struct kunit *test)
KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 4);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[3].out_port, in);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_tunnel_dp_max_length(struct kunit *test)
@@ -1567,7 +1567,7 @@ static void tb_test_tunnel_dp_max_length(struct kunit *test)
in = &dev6->ports[13];
out = &dev12->ports[13];
- tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
+ tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0, NULL, NULL);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
@@ -1597,7 +1597,7 @@ static void tb_test_tunnel_dp_max_length(struct kunit *test)
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[6].out_port,
&host->ports[1]);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[12].out_port, in);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_tunnel_3dp(struct kunit *test)
@@ -1637,7 +1637,7 @@ static void tb_test_tunnel_3dp(struct kunit *test)
out2 = &dev5->ports[13];
out3 = &dev4->ports[14];
- tunnel1 = tb_tunnel_alloc_dp(NULL, in1, out1, 1, 0, 0);
+ tunnel1 = tb_tunnel_alloc_dp(NULL, in1, out1, 1, 0, 0, NULL, NULL);
KUNIT_ASSERT_TRUE(test, tunnel1 != NULL);
KUNIT_EXPECT_EQ(test, tunnel1->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, in1);
@@ -1645,7 +1645,7 @@ static void tb_test_tunnel_3dp(struct kunit *test)
KUNIT_ASSERT_EQ(test, tunnel1->npaths, 3);
KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 3);
- tunnel2 = tb_tunnel_alloc_dp(NULL, in2, out2, 1, 0, 0);
+ tunnel2 = tb_tunnel_alloc_dp(NULL, in2, out2, 1, 0, 0, NULL, NULL);
KUNIT_ASSERT_TRUE(test, tunnel2 != NULL);
KUNIT_EXPECT_EQ(test, tunnel2->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, in2);
@@ -1653,7 +1653,7 @@ static void tb_test_tunnel_3dp(struct kunit *test)
KUNIT_ASSERT_EQ(test, tunnel2->npaths, 3);
KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 4);
- tunnel3 = tb_tunnel_alloc_dp(NULL, in3, out3, 1, 0, 0);
+ tunnel3 = tb_tunnel_alloc_dp(NULL, in3, out3, 1, 0, 0, NULL, NULL);
KUNIT_ASSERT_TRUE(test, tunnel3 != NULL);
KUNIT_EXPECT_EQ(test, tunnel3->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel3->src_port, in3);
@@ -1661,8 +1661,8 @@ static void tb_test_tunnel_3dp(struct kunit *test)
KUNIT_ASSERT_EQ(test, tunnel3->npaths, 3);
KUNIT_ASSERT_EQ(test, tunnel3->paths[0]->path_length, 3);
- tb_tunnel_free(tunnel2);
- tb_tunnel_free(tunnel1);
+ tb_tunnel_put(tunnel2);
+ tb_tunnel_put(tunnel1);
}
static void tb_test_tunnel_usb3(struct kunit *test)
@@ -1716,8 +1716,8 @@ static void tb_test_tunnel_usb3(struct kunit *test)
KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[0].in_port, up);
KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[1].out_port, down);
- tb_tunnel_free(tunnel2);
- tb_tunnel_free(tunnel1);
+ tb_tunnel_put(tunnel2);
+ tb_tunnel_put(tunnel1);
}
static void tb_test_tunnel_port_on_path(struct kunit *test)
@@ -1750,7 +1750,7 @@ static void tb_test_tunnel_port_on_path(struct kunit *test)
in = &dev2->ports[13];
out = &dev5->ports[13];
- dp_tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
+ dp_tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0, NULL, NULL);
KUNIT_ASSERT_NOT_NULL(test, dp_tunnel);
KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, in));
@@ -1783,7 +1783,7 @@ static void tb_test_tunnel_port_on_path(struct kunit *test)
port = &dev4->ports[1];
KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
- tb_tunnel_free(dp_tunnel);
+ tb_tunnel_put(dp_tunnel);
}
static void tb_test_tunnel_dma(struct kunit *test)
@@ -1826,7 +1826,7 @@ static void tb_test_tunnel_dma(struct kunit *test)
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].out_port, port);
KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].next_hop_index, 8);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_tunnel_dma_rx(struct kunit *test)
@@ -1863,7 +1863,7 @@ static void tb_test_tunnel_dma_rx(struct kunit *test)
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, nhi);
KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 2);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_tunnel_dma_tx(struct kunit *test)
@@ -1900,7 +1900,7 @@ static void tb_test_tunnel_dma_tx(struct kunit *test)
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, port);
KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 15);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_tunnel_dma_chain(struct kunit *test)
@@ -1966,7 +1966,7 @@ static void tb_test_tunnel_dma_chain(struct kunit *test)
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].out_port, port);
KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[2].next_hop_index, 8);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_tunnel_dma_match(struct kunit *test)
@@ -1993,7 +1993,7 @@ static void tb_test_tunnel_dma_match(struct kunit *test)
KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1));
KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 8, -1, 8, -1));
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 1, -1, -1);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
@@ -2005,7 +2005,7 @@ static void tb_test_tunnel_dma_match(struct kunit *test)
KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1));
KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 11, -1, -1));
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, -1, -1, 15, 11);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
@@ -2017,7 +2017,7 @@ static void tb_test_tunnel_dma_match(struct kunit *test)
KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 10, 11));
KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 11, -1, -1));
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_credit_alloc_legacy_not_bonded(struct kunit *test)
@@ -2050,7 +2050,7 @@ static void tb_test_credit_alloc_legacy_not_bonded(struct kunit *test)
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 16U);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_credit_alloc_legacy_bonded(struct kunit *test)
@@ -2083,7 +2083,7 @@ static void tb_test_credit_alloc_legacy_bonded(struct kunit *test)
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_credit_alloc_pcie(struct kunit *test)
@@ -2116,7 +2116,7 @@ static void tb_test_credit_alloc_pcie(struct kunit *test)
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_credit_alloc_without_dp(struct kunit *test)
@@ -2166,7 +2166,7 @@ static void tb_test_credit_alloc_without_dp(struct kunit *test)
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_credit_alloc_dp(struct kunit *test)
@@ -2182,7 +2182,7 @@ static void tb_test_credit_alloc_dp(struct kunit *test)
in = &host->ports[5];
out = &dev->ports[14];
- tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
+ tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0, NULL, NULL);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3);
@@ -2210,7 +2210,7 @@ static void tb_test_credit_alloc_dp(struct kunit *test)
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_credit_alloc_usb3(struct kunit *test)
@@ -2243,7 +2243,7 @@ static void tb_test_credit_alloc_usb3(struct kunit *test)
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_credit_alloc_dma(struct kunit *test)
@@ -2279,7 +2279,7 @@ static void tb_test_credit_alloc_dma(struct kunit *test)
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_credit_alloc_dma_multiple(struct kunit *test)
@@ -2356,7 +2356,7 @@ static void tb_test_credit_alloc_dma_multiple(struct kunit *test)
* Release the first DMA tunnel. That should make 14 buffers
* available for the next tunnel.
*/
- tb_tunnel_free(tunnel1);
+ tb_tunnel_put(tunnel1);
tunnel3 = tb_tunnel_alloc_dma(NULL, nhi, port, 10, 3, 10, 3);
KUNIT_ASSERT_NOT_NULL(test, tunnel3);
@@ -2375,8 +2375,8 @@ static void tb_test_credit_alloc_dma_multiple(struct kunit *test)
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
- tb_tunnel_free(tunnel3);
- tb_tunnel_free(tunnel2);
+ tb_tunnel_put(tunnel3);
+ tb_tunnel_put(tunnel2);
}
static struct tb_tunnel *TB_TEST_PCIE_TUNNEL(struct kunit *test,
@@ -2418,7 +2418,7 @@ static struct tb_tunnel *TB_TEST_DP_TUNNEL1(struct kunit *test,
in = &host->ports[5];
out = &dev->ports[13];
- dp_tunnel1 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
+ dp_tunnel1 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0, NULL, NULL);
KUNIT_ASSERT_NOT_NULL(test, dp_tunnel1);
KUNIT_ASSERT_EQ(test, dp_tunnel1->npaths, (size_t)3);
@@ -2455,7 +2455,7 @@ static struct tb_tunnel *TB_TEST_DP_TUNNEL2(struct kunit *test,
in = &host->ports[6];
out = &dev->ports[14];
- dp_tunnel2 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
+ dp_tunnel2 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0, NULL, NULL);
KUNIT_ASSERT_NOT_NULL(test, dp_tunnel2);
KUNIT_ASSERT_EQ(test, dp_tunnel2->npaths, (size_t)3);
@@ -2595,12 +2595,12 @@ static void tb_test_credit_alloc_all(struct kunit *test)
dma_tunnel1 = TB_TEST_DMA_TUNNEL1(test, host, dev);
dma_tunnel2 = TB_TEST_DMA_TUNNEL2(test, host, dev);
- tb_tunnel_free(dma_tunnel2);
- tb_tunnel_free(dma_tunnel1);
- tb_tunnel_free(usb3_tunnel);
- tb_tunnel_free(dp_tunnel2);
- tb_tunnel_free(dp_tunnel1);
- tb_tunnel_free(pcie_tunnel);
+ tb_tunnel_put(dma_tunnel2);
+ tb_tunnel_put(dma_tunnel1);
+ tb_tunnel_put(usb3_tunnel);
+ tb_tunnel_put(dp_tunnel2);
+ tb_tunnel_put(dp_tunnel1);
+ tb_tunnel_put(pcie_tunnel);
}
static const u32 root_directory[] = {
diff --git a/drivers/thunderbolt/trace.h b/drivers/thunderbolt/trace.h
new file mode 100644
index 000000000000..6d0776514d12
--- /dev/null
+++ b/drivers/thunderbolt/trace.h
@@ -0,0 +1,197 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Thunderbolt tracing support
+ *
+ * Copyright (C) 2024, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ * Gil Fine <gil.fine@intel.com>
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM thunderbolt
+
+#if !defined(TB_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define TB_TRACE_H_
+
+#include <linux/trace_seq.h>
+#include <linux/tracepoint.h>
+
+#include "tb_msgs.h"
+
+#define tb_cfg_type_name(type) { type, #type }
+#define show_type_name(val) \
+ __print_symbolic(val, \
+ tb_cfg_type_name(TB_CFG_PKG_READ), \
+ tb_cfg_type_name(TB_CFG_PKG_WRITE), \
+ tb_cfg_type_name(TB_CFG_PKG_ERROR), \
+ tb_cfg_type_name(TB_CFG_PKG_NOTIFY_ACK), \
+ tb_cfg_type_name(TB_CFG_PKG_EVENT), \
+ tb_cfg_type_name(TB_CFG_PKG_XDOMAIN_REQ), \
+ tb_cfg_type_name(TB_CFG_PKG_XDOMAIN_RESP), \
+ tb_cfg_type_name(TB_CFG_PKG_OVERRIDE), \
+ tb_cfg_type_name(TB_CFG_PKG_RESET), \
+ tb_cfg_type_name(TB_CFG_PKG_ICM_EVENT), \
+ tb_cfg_type_name(TB_CFG_PKG_ICM_CMD), \
+ tb_cfg_type_name(TB_CFG_PKG_ICM_RESP))
+
+#ifndef TB_TRACE_HELPERS
+#define TB_TRACE_HELPERS
+static inline const char *show_data_read_write(struct trace_seq *p,
+ const u32 *data)
+{
+ const struct cfg_read_pkg *msg = (const struct cfg_read_pkg *)data;
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ trace_seq_printf(p, "offset=%#x, len=%u, port=%d, config=%#x, seq=%d, ",
+ msg->addr.offset, msg->addr.length, msg->addr.port,
+ msg->addr.space, msg->addr.seq);
+
+ return ret;
+}
+
+static inline const char *show_data_error(struct trace_seq *p, const u32 *data)
+{
+ const struct cfg_error_pkg *msg = (const struct cfg_error_pkg *)data;
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ trace_seq_printf(p, "error=%#x, port=%d, plug=%#x, ", msg->error,
+ msg->port, msg->pg);
+
+ return ret;
+}
+
+static inline const char *show_data_event(struct trace_seq *p, const u32 *data)
+{
+ const struct cfg_event_pkg *msg = (const struct cfg_event_pkg *)data;
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ trace_seq_printf(p, "port=%d, unplug=%#x, ", msg->port, msg->unplug);
+
+ return ret;
+}
+
+static inline const char *show_route(struct trace_seq *p, const u32 *data)
+{
+ const struct tb_cfg_header *header = (const struct tb_cfg_header *)data;
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ trace_seq_printf(p, "route=%llx, ", tb_cfg_get_route(header));
+
+ return ret;
+}
+
+static inline const char *show_data(struct trace_seq *p, u8 type,
+ const u32 *data, u32 length)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ const char *prefix = "";
+ int i;
+
+ switch (type) {
+ case TB_CFG_PKG_READ:
+ case TB_CFG_PKG_WRITE:
+ show_route(p, data);
+ show_data_read_write(p, data);
+ break;
+
+ case TB_CFG_PKG_ERROR:
+ show_route(p, data);
+ show_data_error(p, data);
+ break;
+
+ case TB_CFG_PKG_EVENT:
+ show_route(p, data);
+ show_data_event(p, data);
+ break;
+
+ case TB_CFG_PKG_ICM_EVENT:
+ case TB_CFG_PKG_ICM_CMD:
+ case TB_CFG_PKG_ICM_RESP:
+ /* ICM messages always target the host router */
+ trace_seq_puts(p, "route=0, ");
+ break;
+
+ default:
+ show_route(p, data);
+ break;
+ }
+
+ trace_seq_printf(p, "data=[");
+ for (i = 0; i < length; i++) {
+ trace_seq_printf(p, "%s0x%08x", prefix, data[i]);
+ prefix = ", ";
+ }
+ trace_seq_printf(p, "]");
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+#endif
+
+DECLARE_EVENT_CLASS(tb_raw,
+ TP_PROTO(int index, u8 type, const void *data, size_t size),
+ TP_ARGS(index, type, data, size),
+ TP_STRUCT__entry(
+ __field(int, index)
+ __field(u8, type)
+ __field(size_t, size)
+ __dynamic_array(u32, data, size / 4)
+ ),
+ TP_fast_assign(
+ __entry->index = index;
+ __entry->type = type;
+ __entry->size = size / 4;
+ memcpy(__get_dynamic_array(data), data, size);
+ ),
+ TP_printk("type=%s, size=%zd, domain=%d, %s",
+ show_type_name(__entry->type), __entry->size, __entry->index,
+ show_data(p, __entry->type, __get_dynamic_array(data),
+ __entry->size)
+ )
+);
+
+DEFINE_EVENT(tb_raw, tb_tx,
+ TP_PROTO(int index, u8 type, const void *data, size_t size),
+ TP_ARGS(index, type, data, size)
+);
+
+DEFINE_EVENT(tb_raw, tb_event,
+ TP_PROTO(int index, u8 type, const void *data, size_t size),
+ TP_ARGS(index, type, data, size)
+);
+
+TRACE_EVENT(tb_rx,
+ TP_PROTO(int index, u8 type, const void *data, size_t size, bool dropped),
+ TP_ARGS(index, type, data, size, dropped),
+ TP_STRUCT__entry(
+ __field(int, index)
+ __field(u8, type)
+ __field(size_t, size)
+ __dynamic_array(u32, data, size / 4)
+ __field(bool, dropped)
+ ),
+ TP_fast_assign(
+ __entry->index = index;
+ __entry->type = type;
+ __entry->size = size / 4;
+ memcpy(__get_dynamic_array(data), data, size);
+ __entry->dropped = dropped;
+ ),
+ TP_printk("type=%s, dropped=%u, size=%zd, domain=%d, %s",
+ show_type_name(__entry->type), __entry->dropped,
+ __entry->size, __entry->index,
+ show_data(p, __entry->type, __get_dynamic_array(data),
+ __entry->size)
+ )
+);
+
+#endif /* TB_TRACE_H_ */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
index 6fffb2c82d3d..717b31d78728 100644
--- a/drivers/thunderbolt/tunnel.c
+++ b/drivers/thunderbolt/tunnel.c
@@ -70,6 +70,24 @@
#define USB4_V2_PCI_MIN_BANDWIDTH (1500 * TB_PCI_WEIGHT)
#define USB4_V2_USB3_MIN_BANDWIDTH (1500 * TB_USB3_WEIGHT)
+/*
+ * According to VESA spec, the DPRX negotiation shall compete in 5
+ * seconds after tunnel is established. Since at least i915 can runtime
+ * suspend if there is nothing connected, and that it polls any new
+ * connections every 10 seconds, we use 12 seconds here.
+ *
+ * These are in ms.
+ */
+#define TB_DPRX_TIMEOUT 12000
+#define TB_DPRX_WAIT_TIMEOUT 25
+#define TB_DPRX_POLL_DELAY 50
+
+static int dprx_timeout = TB_DPRX_TIMEOUT;
+module_param(dprx_timeout, int, 0444);
+MODULE_PARM_DESC(dprx_timeout,
+ "DPRX capability read timeout in ms, -1 waits forever (default: "
+ __MODULE_STRING(TB_DPRX_TIMEOUT) ")");
+
static unsigned int dma_credits = TB_DMA_CREDITS;
module_param(dma_credits, uint, 0444);
MODULE_PARM_DESC(dma_credits, "specify custom credits for DMA tunnels (default: "
@@ -82,6 +100,9 @@ MODULE_PARM_DESC(bw_alloc_mode,
static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
+/* Synchronizes kref_get()/put() of struct tb_tunnel */
+static DEFINE_MUTEX(tb_tunnel_lock);
+
static inline unsigned int tb_usable_credits(const struct tb_port *port)
{
return port->total_credits - port->ctl_credits;
@@ -155,7 +176,7 @@ static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL);
if (!tunnel->paths) {
- tb_tunnel_free(tunnel);
+ kfree(tunnel);
return NULL;
}
@@ -163,10 +184,42 @@ static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
tunnel->tb = tb;
tunnel->npaths = npaths;
tunnel->type = type;
+ kref_init(&tunnel->kref);
return tunnel;
}
+static void tb_tunnel_get(struct tb_tunnel *tunnel)
+{
+ mutex_lock(&tb_tunnel_lock);
+ kref_get(&tunnel->kref);
+ mutex_unlock(&tb_tunnel_lock);
+}
+
+static void tb_tunnel_destroy(struct kref *kref)
+{
+ struct tb_tunnel *tunnel = container_of(kref, typeof(*tunnel), kref);
+ int i;
+
+ if (tunnel->destroy)
+ tunnel->destroy(tunnel);
+
+ for (i = 0; i < tunnel->npaths; i++) {
+ if (tunnel->paths[i])
+ tb_path_free(tunnel->paths[i]);
+ }
+
+ kfree(tunnel->paths);
+ kfree(tunnel);
+}
+
+void tb_tunnel_put(struct tb_tunnel *tunnel)
+{
+ mutex_lock(&tb_tunnel_lock);
+ kref_put(&tunnel->kref, tb_tunnel_destroy);
+ mutex_unlock(&tb_tunnel_lock);
+}
+
static int tb_pci_set_ext_encapsulation(struct tb_tunnel *tunnel, bool enable)
{
struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw);
@@ -355,7 +408,7 @@ struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down,
err_deactivate:
tb_tunnel_deactivate(tunnel);
err_free:
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
return NULL;
}
@@ -404,7 +457,7 @@ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
return tunnel;
err_free:
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
return NULL;
}
@@ -706,7 +759,7 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
"DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
out_rate, out_lanes, bw);
- if (tb_port_path_direction_downstream(in, out))
+ if (tb_tunnel_direction_downstream(tunnel))
max_bw = tunnel->max_down;
else
max_bw = tunnel->max_up;
@@ -831,7 +884,7 @@ static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel)
* max_up/down fields. For discovery we just read what the
* estimation was set to.
*/
- if (tb_port_path_direction_downstream(in, out))
+ if (tb_tunnel_direction_downstream(tunnel))
estimated_bw = tunnel->max_down;
else
estimated_bw = tunnel->max_up;
@@ -851,7 +904,7 @@ static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel)
return 0;
}
-static int tb_dp_init(struct tb_tunnel *tunnel)
+static int tb_dp_pre_activate(struct tb_tunnel *tunnel)
{
struct tb_port *in = tunnel->src_port;
struct tb_switch *sw = in->sw;
@@ -877,7 +930,7 @@ static int tb_dp_init(struct tb_tunnel *tunnel)
return tb_dp_bandwidth_alloc_mode_enable(tunnel);
}
-static void tb_dp_deinit(struct tb_tunnel *tunnel)
+static void tb_dp_post_deactivate(struct tb_tunnel *tunnel)
{
struct tb_port *in = tunnel->src_port;
@@ -889,6 +942,95 @@ static void tb_dp_deinit(struct tb_tunnel *tunnel)
}
}
+static ktime_t dprx_timeout_to_ktime(int timeout_msec)
+{
+ return timeout_msec >= 0 ?
+ ktime_add_ms(ktime_get(), timeout_msec) : KTIME_MAX;
+}
+
+static int tb_dp_wait_dprx(struct tb_tunnel *tunnel, int timeout_msec)
+{
+ ktime_t timeout = dprx_timeout_to_ktime(timeout_msec);
+ struct tb_port *in = tunnel->src_port;
+
+ /*
+ * Wait for DPRX done. Normally it should be already set for
+ * active tunnel.
+ */
+ do {
+ u32 val;
+ int ret;
+
+ ret = tb_port_read(in, &val, TB_CFG_PORT,
+ in->cap_adap + DP_COMMON_CAP, 1);
+ if (ret)
+ return ret;
+
+ if (val & DP_COMMON_CAP_DPRX_DONE)
+ return 0;
+
+ usleep_range(100, 150);
+ } while (ktime_before(ktime_get(), timeout));
+
+ tb_tunnel_dbg(tunnel, "DPRX read timeout\n");
+ return -ETIMEDOUT;
+}
+
+static void tb_dp_dprx_work(struct work_struct *work)
+{
+ struct tb_tunnel *tunnel = container_of(work, typeof(*tunnel), dprx_work.work);
+ struct tb *tb = tunnel->tb;
+
+ if (!tunnel->dprx_canceled) {
+ mutex_lock(&tb->lock);
+ if (tb_dp_is_usb4(tunnel->src_port->sw) &&
+ tb_dp_wait_dprx(tunnel, TB_DPRX_WAIT_TIMEOUT)) {
+ if (ktime_before(ktime_get(), tunnel->dprx_timeout)) {
+ queue_delayed_work(tb->wq, &tunnel->dprx_work,
+ msecs_to_jiffies(TB_DPRX_POLL_DELAY));
+ mutex_unlock(&tb->lock);
+ return;
+ }
+ } else {
+ tunnel->state = TB_TUNNEL_ACTIVE;
+ }
+ mutex_unlock(&tb->lock);
+ }
+
+ if (tunnel->callback)
+ tunnel->callback(tunnel, tunnel->callback_data);
+}
+
+static int tb_dp_dprx_start(struct tb_tunnel *tunnel)
+{
+ /*
+ * Bump up the reference to keep the tunnel around. It will be
+ * dropped in tb_dp_dprx_stop() once the tunnel is deactivated.
+ */
+ tb_tunnel_get(tunnel);
+
+ tunnel->dprx_started = true;
+
+ if (tunnel->callback) {
+ tunnel->dprx_timeout = dprx_timeout_to_ktime(dprx_timeout);
+ queue_delayed_work(tunnel->tb->wq, &tunnel->dprx_work, 0);
+ return -EINPROGRESS;
+ }
+
+ return tb_dp_is_usb4(tunnel->src_port->sw) ?
+ tb_dp_wait_dprx(tunnel, dprx_timeout) : 0;
+}
+
+static void tb_dp_dprx_stop(struct tb_tunnel *tunnel)
+{
+ if (tunnel->dprx_started) {
+ tunnel->dprx_started = false;
+ tunnel->dprx_canceled = true;
+ cancel_delayed_work(&tunnel->dprx_work);
+ tb_tunnel_put(tunnel);
+ }
+}
+
static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
{
int ret;
@@ -910,6 +1052,7 @@ static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index,
paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index);
} else {
+ tb_dp_dprx_stop(tunnel);
tb_dp_port_hpd_clear(tunnel->src_port);
tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0);
if (tb_port_is_dpout(tunnel->dst_port))
@@ -920,18 +1063,27 @@ static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
if (ret)
return ret;
- if (tb_port_is_dpout(tunnel->dst_port))
- return tb_dp_port_enable(tunnel->dst_port, active);
+ if (tb_port_is_dpout(tunnel->dst_port)) {
+ ret = tb_dp_port_enable(tunnel->dst_port, active);
+ if (ret)
+ return ret;
+ }
- return 0;
+ return active ? tb_dp_dprx_start(tunnel) : 0;
}
-/* max_bw is rounded up to next granularity */
+/**
+ * tb_dp_bandwidth_mode_maximum_bandwidth() - Maximum possible bandwidth
+ * @tunnel: DP tunnel to check
+ * @max_bw_rounded: Maximum bandwidth in Mb/s rounded up to the next granularity
+ *
+ * Returns maximum possible bandwidth for this tunnel in Mb/s.
+ */
static int tb_dp_bandwidth_mode_maximum_bandwidth(struct tb_tunnel *tunnel,
- int *max_bw)
+ int *max_bw_rounded)
{
struct tb_port *in = tunnel->src_port;
- int ret, rate, lanes, nrd_bw;
+ int ret, rate, lanes, max_bw;
u32 cap;
/*
@@ -947,41 +1099,26 @@ static int tb_dp_bandwidth_mode_maximum_bandwidth(struct tb_tunnel *tunnel,
return ret;
rate = tb_dp_cap_get_rate_ext(cap);
- if (tb_dp_is_uhbr_rate(rate)) {
- /*
- * When UHBR is used there is no reduction in lanes so
- * we can use this directly.
- */
- lanes = tb_dp_cap_get_lanes(cap);
- } else {
- /*
- * If there is no UHBR supported then check the
- * non-reduced rate and lanes.
- */
- ret = usb4_dp_port_nrd(in, &rate, &lanes);
- if (ret)
- return ret;
- }
+ lanes = tb_dp_cap_get_lanes(cap);
- nrd_bw = tb_dp_bandwidth(rate, lanes);
+ max_bw = tb_dp_bandwidth(rate, lanes);
- if (max_bw) {
+ if (max_bw_rounded) {
ret = usb4_dp_port_granularity(in);
if (ret < 0)
return ret;
- *max_bw = roundup(nrd_bw, ret);
+ *max_bw_rounded = roundup(max_bw, ret);
}
- return nrd_bw;
+ return max_bw;
}
static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel,
int *consumed_up,
int *consumed_down)
{
- struct tb_port *out = tunnel->dst_port;
struct tb_port *in = tunnel->src_port;
- int ret, allocated_bw, max_bw;
+ int ret, allocated_bw, max_bw_rounded;
if (!usb4_dp_port_bandwidth_mode_enabled(in))
return -EOPNOTSUPP;
@@ -995,13 +1132,13 @@ static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel,
return ret;
allocated_bw = ret;
- ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw);
+ ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw_rounded);
if (ret < 0)
return ret;
- if (allocated_bw == max_bw)
+ if (allocated_bw == max_bw_rounded)
allocated_bw = ret;
- if (tb_port_path_direction_downstream(in, out)) {
+ if (tb_tunnel_direction_downstream(tunnel)) {
*consumed_up = 0;
*consumed_down = allocated_bw;
} else {
@@ -1015,7 +1152,6 @@ static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel,
static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
int *allocated_down)
{
- struct tb_port *out = tunnel->dst_port;
struct tb_port *in = tunnel->src_port;
/*
@@ -1023,20 +1159,21 @@ static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up
* Otherwise we read it from the DPRX.
*/
if (usb4_dp_port_bandwidth_mode_enabled(in) && tunnel->bw_mode) {
- int ret, allocated_bw, max_bw;
+ int ret, allocated_bw, max_bw_rounded;
ret = usb4_dp_port_allocated_bandwidth(in);
if (ret < 0)
return ret;
allocated_bw = ret;
- ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw);
+ ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel,
+ &max_bw_rounded);
if (ret < 0)
return ret;
- if (allocated_bw == max_bw)
+ if (allocated_bw == max_bw_rounded)
allocated_bw = ret;
- if (tb_port_path_direction_downstream(in, out)) {
+ if (tb_tunnel_direction_downstream(tunnel)) {
*allocated_up = 0;
*allocated_down = allocated_bw;
} else {
@@ -1053,26 +1190,25 @@ static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up
static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
int *alloc_down)
{
- struct tb_port *out = tunnel->dst_port;
struct tb_port *in = tunnel->src_port;
- int max_bw, ret, tmp;
+ int max_bw_rounded, ret, tmp;
if (!usb4_dp_port_bandwidth_mode_enabled(in))
return -EOPNOTSUPP;
- ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw);
+ ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw_rounded);
if (ret < 0)
return ret;
- if (tb_port_path_direction_downstream(in, out)) {
- tmp = min(*alloc_down, max_bw);
+ if (tb_tunnel_direction_downstream(tunnel)) {
+ tmp = min(*alloc_down, max_bw_rounded);
ret = usb4_dp_port_allocate_bandwidth(in, tmp);
if (ret)
return ret;
*alloc_down = tmp;
*alloc_up = 0;
} else {
- tmp = min(*alloc_up, max_bw);
+ tmp = min(*alloc_up, max_bw_rounded);
ret = usb4_dp_port_allocate_bandwidth(in, tmp);
if (ret)
return ret;
@@ -1086,35 +1222,6 @@ static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
return 0;
}
-static int tb_dp_wait_dprx(struct tb_tunnel *tunnel, int timeout_msec)
-{
- ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
- struct tb_port *in = tunnel->src_port;
-
- /*
- * Wait for DPRX done. Normally it should be already set for
- * active tunnel.
- */
- do {
- u32 val;
- int ret;
-
- ret = tb_port_read(in, &val, TB_CFG_PORT,
- in->cap_adap + DP_COMMON_CAP, 1);
- if (ret)
- return ret;
-
- if (val & DP_COMMON_CAP_DPRX_DONE) {
- tb_tunnel_dbg(tunnel, "DPRX read done\n");
- return 0;
- }
- usleep_range(100, 150);
- } while (ktime_before(ktime_get(), timeout));
-
- tb_tunnel_dbg(tunnel, "DPRX read timeout\n");
- return -ETIMEDOUT;
-}
-
/* Read cap from tunnel DP IN */
static int tb_dp_read_cap(struct tb_tunnel *tunnel, unsigned int cap, u32 *rate,
u32 *lanes)
@@ -1150,17 +1257,16 @@ static int tb_dp_read_cap(struct tb_tunnel *tunnel, unsigned int cap, u32 *rate,
static int tb_dp_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
int *max_down)
{
- struct tb_port *in = tunnel->src_port;
int ret;
- if (!usb4_dp_port_bandwidth_mode_enabled(in))
+ if (!usb4_dp_port_bandwidth_mode_enabled(tunnel->src_port))
return -EOPNOTSUPP;
ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, NULL);
if (ret < 0)
return ret;
- if (tb_port_path_direction_downstream(in, tunnel->dst_port)) {
+ if (tb_tunnel_direction_downstream(tunnel)) {
*max_up = 0;
*max_down = ret;
} else {
@@ -1174,42 +1280,44 @@ static int tb_dp_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
int *consumed_down)
{
- struct tb_port *in = tunnel->src_port;
- const struct tb_switch *sw = in->sw;
+ const struct tb_switch *sw = tunnel->src_port->sw;
u32 rate = 0, lanes = 0;
int ret;
if (tb_dp_is_usb4(sw)) {
- /*
- * On USB4 routers check if the bandwidth allocation
- * mode is enabled first and then read the bandwidth
- * through those registers.
- */
- ret = tb_dp_bandwidth_mode_consumed_bandwidth(tunnel, consumed_up,
- consumed_down);
- if (ret < 0) {
- if (ret != -EOPNOTSUPP)
- return ret;
- } else if (!ret) {
- return 0;
- }
- /*
- * Then see if the DPRX negotiation is ready and if yes
- * return that bandwidth (it may be smaller than the
- * reduced one). Otherwise return the remote (possibly
- * reduced) caps.
- */
- ret = tb_dp_wait_dprx(tunnel, 150);
+ ret = tb_dp_wait_dprx(tunnel, 0);
if (ret) {
- if (ret == -ETIMEDOUT)
+ if (ret == -ETIMEDOUT) {
+ /*
+ * While we wait for DPRX complete the
+ * tunnel consumes as much as it had
+ * been reserved initially.
+ */
ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP,
&rate, &lanes);
+ if (ret)
+ return ret;
+ } else {
+ return ret;
+ }
+ } else {
+ /*
+ * On USB4 routers check if the bandwidth allocation
+ * mode is enabled first and then read the bandwidth
+ * through those registers.
+ */
+ ret = tb_dp_bandwidth_mode_consumed_bandwidth(tunnel, consumed_up,
+ consumed_down);
+ if (ret < 0) {
+ if (ret != -EOPNOTSUPP)
+ return ret;
+ } else if (!ret) {
+ return 0;
+ }
+ ret = tb_dp_read_cap(tunnel, DP_COMMON_CAP, &rate, &lanes);
if (ret)
return ret;
}
- ret = tb_dp_read_cap(tunnel, DP_COMMON_CAP, &rate, &lanes);
- if (ret)
- return ret;
} else if (sw->generation >= 2) {
ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP, &rate, &lanes);
if (ret)
@@ -1221,7 +1329,7 @@ static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
return 0;
}
- if (tb_port_path_direction_downstream(in, tunnel->dst_port)) {
+ if (tb_tunnel_direction_downstream(tunnel)) {
*consumed_up = 0;
*consumed_down = tb_dp_bandwidth(rate, lanes);
} else {
@@ -1381,9 +1489,9 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
if (!tunnel)
return NULL;
- tunnel->init = tb_dp_init;
- tunnel->deinit = tb_dp_deinit;
+ tunnel->pre_activate = tb_dp_pre_activate;
tunnel->activate = tb_dp_activate;
+ tunnel->post_deactivate = tb_dp_post_deactivate;
tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth;
tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth;
tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth;
@@ -1440,7 +1548,7 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
err_deactivate:
tb_tunnel_deactivate(tunnel);
err_free:
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
return NULL;
}
@@ -1451,19 +1559,28 @@ err_free:
* @in: DP in adapter port
* @out: DP out adapter port
* @link_nr: Preferred lane adapter when the link is not bonded
- * @max_up: Maximum available upstream bandwidth for the DP tunnel (%0
- * if not limited)
- * @max_down: Maximum available downstream bandwidth for the DP tunnel
- * (%0 if not limited)
+ * @max_up: Maximum available upstream bandwidth for the DP tunnel.
+ * %0 if no available bandwidth.
+ * @max_down: Maximum available downstream bandwidth for the DP tunnel.
+ * %0 if no available bandwidth.
+ * @callback: Optional callback that is called when the DP tunnel is
+ * fully activated (or there is an error)
+ * @callback_data: Optional data for @callback
*
* Allocates a tunnel between @in and @out that is capable of tunneling
- * Display Port traffic.
+ * Display Port traffic. If @callback is not %NULL it will be called
+ * after tb_tunnel_activate() once the tunnel has been fully activated.
+ * It can call tb_tunnel_is_active() to check if activation was
+ * successful (or if it returns %false there was some sort of issue).
+ * The @callback is called without @tb->lock held.
*
- * Return: Returns a tb_tunnel on success or NULL on failure.
+ * Return: Returns a tb_tunnel on success or &NULL on failure.
*/
struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
struct tb_port *out, int link_nr,
- int max_up, int max_down)
+ int max_up, int max_down,
+ void (*callback)(struct tb_tunnel *, void *),
+ void *callback_data)
{
struct tb_tunnel *tunnel;
struct tb_path **paths;
@@ -1477,9 +1594,9 @@ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
if (!tunnel)
return NULL;
- tunnel->init = tb_dp_init;
- tunnel->deinit = tb_dp_deinit;
+ tunnel->pre_activate = tb_dp_pre_activate;
tunnel->activate = tb_dp_activate;
+ tunnel->post_deactivate = tb_dp_post_deactivate;
tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth;
tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth;
tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth;
@@ -1488,6 +1605,9 @@ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
tunnel->dst_port = out;
tunnel->max_up = max_up;
tunnel->max_down = max_down;
+ tunnel->callback = callback;
+ tunnel->callback_data = callback_data;
+ INIT_DELAYED_WORK(&tunnel->dprx_work, tb_dp_dprx_work);
paths = tunnel->paths;
pm_support = usb4_switch_version(in->sw) >= 2;
@@ -1516,7 +1636,7 @@ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
return tunnel;
err_free:
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
return NULL;
}
@@ -1636,7 +1756,7 @@ static void tb_dma_release_credits(struct tb_path_hop *hop)
}
}
-static void tb_dma_deinit_path(struct tb_path *path)
+static void tb_dma_destroy_path(struct tb_path *path)
{
struct tb_path_hop *hop;
@@ -1644,14 +1764,14 @@ static void tb_dma_deinit_path(struct tb_path *path)
tb_dma_release_credits(hop);
}
-static void tb_dma_deinit(struct tb_tunnel *tunnel)
+static void tb_dma_destroy(struct tb_tunnel *tunnel)
{
int i;
for (i = 0; i < tunnel->npaths; i++) {
if (!tunnel->paths[i])
continue;
- tb_dma_deinit_path(tunnel->paths[i]);
+ tb_dma_destroy_path(tunnel->paths[i]);
}
}
@@ -1697,7 +1817,7 @@ struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
tunnel->src_port = nhi;
tunnel->dst_port = dst;
- tunnel->deinit = tb_dma_deinit;
+ tunnel->destroy = tb_dma_destroy;
credits = min_not_zero(dma_credits, nhi->sw->max_dma_credits);
@@ -1728,7 +1848,7 @@ struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
return tunnel;
err_free:
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
return NULL;
}
@@ -1809,7 +1929,7 @@ static int tb_usb3_max_link_rate(struct tb_port *up, struct tb_port *down)
return min(up_max_rate, down_max_rate);
}
-static int tb_usb3_init(struct tb_tunnel *tunnel)
+static int tb_usb3_pre_activate(struct tb_tunnel *tunnel)
{
tb_tunnel_dbg(tunnel, "allocating initial bandwidth %d/%d Mb/s\n",
tunnel->allocated_up, tunnel->allocated_down);
@@ -2040,7 +2160,7 @@ struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down,
tb_tunnel_dbg(tunnel, "currently allocated bandwidth %d/%d Mb/s\n",
tunnel->allocated_up, tunnel->allocated_down);
- tunnel->init = tb_usb3_init;
+ tunnel->pre_activate = tb_usb3_pre_activate;
tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
tunnel->release_unused_bandwidth =
tb_usb3_release_unused_bandwidth;
@@ -2054,7 +2174,7 @@ struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down,
err_deactivate:
tb_tunnel_deactivate(tunnel);
err_free:
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
return NULL;
}
@@ -2064,10 +2184,10 @@ err_free:
* @tb: Pointer to the domain structure
* @up: USB3 upstream adapter port
* @down: USB3 downstream adapter port
- * @max_up: Maximum available upstream bandwidth for the USB3 tunnel (%0
- * if not limited).
- * @max_down: Maximum available downstream bandwidth for the USB3 tunnel
- * (%0 if not limited).
+ * @max_up: Maximum available upstream bandwidth for the USB3 tunnel.
+ * %0 if no available bandwidth.
+ * @max_down: Maximum available downstream bandwidth for the USB3 tunnel.
+ * %0 if no available bandwidth.
*
* Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and
* @TB_TYPE_USB3_DOWN.
@@ -2082,24 +2202,19 @@ struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
struct tb_path *path;
int max_rate = 0;
- /*
- * Check that we have enough bandwidth available for the new
- * USB3 tunnel.
- */
- if (max_up > 0 || max_down > 0) {
+ if (!tb_route(down->sw) && (max_up > 0 || max_down > 0)) {
+ /*
+ * For USB3 isochronous transfers, we allow bandwidth which is
+ * not higher than 90% of maximum supported bandwidth by USB3
+ * adapters.
+ */
max_rate = tb_usb3_max_link_rate(down, up);
if (max_rate < 0)
return NULL;
- /* Only 90% can be allocated for USB3 isochronous transfers */
max_rate = max_rate * 90 / 100;
- tb_port_dbg(up, "required bandwidth for USB3 tunnel %d Mb/s\n",
+ tb_port_dbg(up, "maximum required bandwidth for USB3 tunnel %d Mb/s\n",
max_rate);
-
- if (max_rate > max_up || max_rate > max_down) {
- tb_port_warn(up, "not enough bandwidth for USB3 tunnel\n");
- return NULL;
- }
}
tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
@@ -2115,7 +2230,7 @@ struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0,
"USB3 Down");
if (!path) {
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
return NULL;
}
tb_usb3_init_path(path);
@@ -2124,17 +2239,17 @@ struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0,
"USB3 Up");
if (!path) {
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
return NULL;
}
tb_usb3_init_path(path);
tunnel->paths[TB_USB3_PATH_UP] = path;
if (!tb_route(down->sw)) {
- tunnel->allocated_up = max_rate;
- tunnel->allocated_down = max_rate;
+ tunnel->allocated_up = min(max_rate, max_up);
+ tunnel->allocated_down = min(max_rate, max_down);
- tunnel->init = tb_usb3_init;
+ tunnel->pre_activate = tb_usb3_pre_activate;
tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
tunnel->release_unused_bandwidth =
tb_usb3_release_unused_bandwidth;
@@ -2146,31 +2261,6 @@ struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
}
/**
- * tb_tunnel_free() - free a tunnel
- * @tunnel: Tunnel to be freed
- *
- * Frees a tunnel. The tunnel does not need to be deactivated.
- */
-void tb_tunnel_free(struct tb_tunnel *tunnel)
-{
- int i;
-
- if (!tunnel)
- return;
-
- if (tunnel->deinit)
- tunnel->deinit(tunnel);
-
- for (i = 0; i < tunnel->npaths; i++) {
- if (tunnel->paths[i])
- tb_path_free(tunnel->paths[i]);
- }
-
- kfree(tunnel->paths);
- kfree(tunnel);
-}
-
-/**
* tb_tunnel_is_invalid - check whether an activated path is still valid
* @tunnel: Tunnel to check
*/
@@ -2188,12 +2278,15 @@ bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
}
/**
- * tb_tunnel_restart() - activate a tunnel after a hardware reset
- * @tunnel: Tunnel to restart
+ * tb_tunnel_activate() - activate a tunnel
+ * @tunnel: Tunnel to activate
*
- * Return: 0 on success and negative errno in case if failure
+ * Return: 0 on success and negative errno in case if failure.
+ * Specifically returns %-EINPROGRESS if the tunnel activation is still
+ * in progress (that's for DP tunnels to complete DPRX capabilities
+ * read).
*/
-int tb_tunnel_restart(struct tb_tunnel *tunnel)
+int tb_tunnel_activate(struct tb_tunnel *tunnel)
{
int res, i;
@@ -2210,8 +2303,10 @@ int tb_tunnel_restart(struct tb_tunnel *tunnel)
}
}
- if (tunnel->init) {
- res = tunnel->init(tunnel);
+ tunnel->state = TB_TUNNEL_ACTIVATING;
+
+ if (tunnel->pre_activate) {
+ res = tunnel->pre_activate(tunnel);
if (res)
return res;
}
@@ -2224,10 +2319,14 @@ int tb_tunnel_restart(struct tb_tunnel *tunnel)
if (tunnel->activate) {
res = tunnel->activate(tunnel, true);
- if (res)
+ if (res) {
+ if (res == -EINPROGRESS)
+ return res;
goto err;
+ }
}
+ tunnel->state = TB_TUNNEL_ACTIVE;
return 0;
err:
@@ -2237,27 +2336,6 @@ err:
}
/**
- * tb_tunnel_activate() - activate a tunnel
- * @tunnel: Tunnel to activate
- *
- * Return: Returns 0 on success or an error code on failure.
- */
-int tb_tunnel_activate(struct tb_tunnel *tunnel)
-{
- int i;
-
- for (i = 0; i < tunnel->npaths; i++) {
- if (tunnel->paths[i]->activated) {
- tb_tunnel_WARN(tunnel,
- "trying to activate an already activated tunnel\n");
- return -EINVAL;
- }
- }
-
- return tb_tunnel_restart(tunnel);
-}
-
-/**
* tb_tunnel_deactivate() - deactivate a tunnel
* @tunnel: Tunnel to deactivate
*/
@@ -2274,6 +2352,11 @@ void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
if (tunnel->paths[i] && tunnel->paths[i]->activated)
tb_path_deactivate(tunnel->paths[i]);
}
+
+ if (tunnel->post_deactivate)
+ tunnel->post_deactivate(tunnel);
+
+ tunnel->state = TB_TUNNEL_INACTIVE;
}
/**
@@ -2300,18 +2383,10 @@ bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
return false;
}
-static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
+// Is tb_tunnel_activate() called for the tunnel
+static bool tb_tunnel_is_activated(const struct tb_tunnel *tunnel)
{
- int i;
-
- for (i = 0; i < tunnel->npaths; i++) {
- if (!tunnel->paths[i])
- return false;
- if (!tunnel->paths[i]->activated)
- return false;
- }
-
- return true;
+ return tunnel->state == TB_TUNNEL_ACTIVATING || tb_tunnel_is_active(tunnel);
}
/**
@@ -2328,7 +2403,7 @@ int tb_tunnel_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
int *max_down)
{
if (!tb_tunnel_is_active(tunnel))
- return -EINVAL;
+ return -ENOTCONN;
if (tunnel->maximum_bandwidth)
return tunnel->maximum_bandwidth(tunnel, max_up, max_down);
@@ -2349,7 +2424,7 @@ int tb_tunnel_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
int *allocated_down)
{
if (!tb_tunnel_is_active(tunnel))
- return -EINVAL;
+ return -ENOTCONN;
if (tunnel->allocated_bandwidth)
return tunnel->allocated_bandwidth(tunnel, allocated_up,
@@ -2372,7 +2447,7 @@ int tb_tunnel_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
int *alloc_down)
{
if (!tb_tunnel_is_active(tunnel))
- return -EINVAL;
+ return -ENOTCONN;
if (tunnel->alloc_bandwidth)
return tunnel->alloc_bandwidth(tunnel, alloc_up, alloc_down);
@@ -2397,26 +2472,27 @@ int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
{
int up_bw = 0, down_bw = 0;
- if (!tb_tunnel_is_active(tunnel))
- goto out;
-
- if (tunnel->consumed_bandwidth) {
+ /*
+ * Here we need to distinguish between not active tunnel from
+ * tunnels that are either fully active or activation started.
+ * The latter is true for DP tunnels where we must report the
+ * consumed to be the maximum we gave it until DPRX capabilities
+ * read is done by the graphics driver.
+ */
+ if (tb_tunnel_is_activated(tunnel) && tunnel->consumed_bandwidth) {
int ret;
ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw);
if (ret)
return ret;
-
- tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw,
- down_bw);
}
-out:
if (consumed_up)
*consumed_up = up_bw;
if (consumed_down)
*consumed_down = down_bw;
+ tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw, down_bw);
return 0;
}
@@ -2432,7 +2508,7 @@ out:
int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel)
{
if (!tb_tunnel_is_active(tunnel))
- return 0;
+ return -ENOTCONN;
if (tunnel->release_unused_bandwidth) {
int ret;
diff --git a/drivers/thunderbolt/tunnel.h b/drivers/thunderbolt/tunnel.h
index b4cff5482112..8a0a0cb21a89 100644
--- a/drivers/thunderbolt/tunnel.h
+++ b/drivers/thunderbolt/tunnel.h
@@ -19,16 +19,33 @@ enum tb_tunnel_type {
};
/**
+ * enum tb_tunnel_state - State of a tunnel
+ * @TB_TUNNEL_INACTIVE: tb_tunnel_activate() is not called for the tunnel
+ * @TB_TUNNEL_ACTIVATING: tb_tunnel_activate() returned successfully for the tunnel
+ * @TB_TUNNEL_ACTIVE: The tunnel is fully active
+ */
+enum tb_tunnel_state {
+ TB_TUNNEL_INACTIVE,
+ TB_TUNNEL_ACTIVATING,
+ TB_TUNNEL_ACTIVE,
+};
+
+/**
* struct tb_tunnel - Tunnel between two ports
+ * @kref: Reference count
* @tb: Pointer to the domain
* @src_port: Source port of the tunnel
* @dst_port: Destination port of the tunnel. For discovered incomplete
* tunnels may be %NULL or null adapter port instead.
* @paths: All paths required by the tunnel
* @npaths: Number of paths in @paths
- * @init: Optional tunnel specific initialization
- * @deinit: Optional tunnel specific de-initialization
+ * @pre_activate: Optional tunnel specific initialization called before
+ * activation. Can touch hardware.
* @activate: Optional tunnel specific activation/deactivation
+ * @post_deactivate: Optional tunnel specific de-initialization called
+ * after deactivation. Can touch hardware.
+ * @destroy: Optional tunnel specific callback called when the tunnel
+ * memory is being released. Should not touch hardware.
* @maximum_bandwidth: Returns maximum possible bandwidth for this tunnel
* @allocated_bandwidth: Return how much bandwidth is allocated for the tunnel
* @alloc_bandwidth: Change tunnel bandwidth allocation
@@ -37,6 +54,7 @@ enum tb_tunnel_type {
* @reclaim_available_bandwidth: Reclaim back available bandwidth
* @list: Tunnels are linked using this field
* @type: Type of the tunnel
+ * @state: Current state of the tunnel
* @max_up: Maximum upstream bandwidth (Mb/s) available for the tunnel.
* Only set if the bandwidth needs to be limited.
* @max_down: Maximum downstream bandwidth (Mb/s) available for the tunnel.
@@ -45,16 +63,24 @@ enum tb_tunnel_type {
* @allocated_down: Allocated downstream bandwidth (only for USB3)
* @bw_mode: DP bandwidth allocation mode registers can be used to
* determine consumed and allocated bandwidth
+ * @dprx_started: DPRX negotiation was started (tb_dp_dprx_start() was called for it)
+ * @dprx_canceled: Was DPRX capabilities read poll canceled
+ * @dprx_timeout: If set DPRX capabilities read poll work will timeout after this passes
+ * @dprx_work: Worker that is scheduled to poll completion of DPRX capabilities read
+ * @callback: Optional callback called when DP tunnel is fully activated
+ * @callback_data: Optional data for @callback
*/
struct tb_tunnel {
+ struct kref kref;
struct tb *tb;
struct tb_port *src_port;
struct tb_port *dst_port;
struct tb_path **paths;
size_t npaths;
- int (*init)(struct tb_tunnel *tunnel);
- void (*deinit)(struct tb_tunnel *tunnel);
+ int (*pre_activate)(struct tb_tunnel *tunnel);
int (*activate)(struct tb_tunnel *tunnel, bool activate);
+ void (*post_deactivate)(struct tb_tunnel *tunnel);
+ void (*destroy)(struct tb_tunnel *tunnel);
int (*maximum_bandwidth)(struct tb_tunnel *tunnel, int *max_up,
int *max_down);
int (*allocated_bandwidth)(struct tb_tunnel *tunnel, int *allocated_up,
@@ -69,11 +95,18 @@ struct tb_tunnel {
int *available_down);
struct list_head list;
enum tb_tunnel_type type;
+ enum tb_tunnel_state state;
int max_up;
int max_down;
int allocated_up;
int allocated_down;
bool bw_mode;
+ bool dprx_started;
+ bool dprx_canceled;
+ ktime_t dprx_timeout;
+ struct delayed_work dprx_work;
+ void (*callback)(struct tb_tunnel *tunnel, void *data);
+ void *callback_data;
};
struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down,
@@ -86,7 +119,9 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
bool alloc_hopid);
struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
struct tb_port *out, int link_nr,
- int max_up, int max_down);
+ int max_up, int max_down,
+ void (*callback)(struct tb_tunnel *, void *),
+ void *callback_data);
struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
struct tb_port *dst, int transmit_path,
int transmit_ring, int receive_path,
@@ -99,10 +134,24 @@ struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
struct tb_port *down, int max_up,
int max_down);
-void tb_tunnel_free(struct tb_tunnel *tunnel);
+void tb_tunnel_put(struct tb_tunnel *tunnel);
int tb_tunnel_activate(struct tb_tunnel *tunnel);
-int tb_tunnel_restart(struct tb_tunnel *tunnel);
void tb_tunnel_deactivate(struct tb_tunnel *tunnel);
+
+/**
+ * tb_tunnel_is_active() - Is tunnel fully activated
+ * @tunnel: Tunnel to check
+ *
+ * Returns %true if @tunnel is fully activated. For other than DP
+ * tunnels this is pretty much once tb_tunnel_activate() returns
+ * successfully. However, for DP tunnels this returns %true only once the
+ * DPRX capabilities read has been issued successfully.
+ */
+static inline bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
+{
+ return tunnel->state == TB_TUNNEL_ACTIVE;
+}
+
bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel);
bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
const struct tb_port *port);
@@ -139,6 +188,12 @@ static inline bool tb_tunnel_is_usb3(const struct tb_tunnel *tunnel)
return tunnel->type == TB_TUNNEL_USB3;
}
+static inline bool tb_tunnel_direction_downstream(const struct tb_tunnel *tunnel)
+{
+ return tb_port_path_direction_downstream(tunnel->src_port,
+ tunnel->dst_port);
+}
+
const char *tb_tunnel_type_name(const struct tb_tunnel *tunnel);
#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \
diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
index 1515eff8cc3e..e51d01671d8e 100644
--- a/drivers/thunderbolt/usb4.c
+++ b/drivers/thunderbolt/usb4.c
@@ -17,12 +17,6 @@
#define USB4_DATA_RETRIES 3
#define USB4_DATA_DWORDS 16
-enum usb4_sb_target {
- USB4_SB_TARGET_ROUTER,
- USB4_SB_TARGET_PARTNER,
- USB4_SB_TARGET_RETIMER,
-};
-
#define USB4_NVM_READ_OFFSET_MASK GENMASK(23, 2)
#define USB4_NVM_READ_OFFSET_SHIFT 2
#define USB4_NVM_READ_LENGTH_MASK GENMASK(27, 24)
@@ -52,6 +46,10 @@ enum usb4_ba_index {
#define USB4_BA_VALUE_MASK GENMASK(31, 16)
#define USB4_BA_VALUE_SHIFT 16
+/* Delays in us used with usb4_port_wait_for_bit() */
+#define USB4_PORT_DELAY 50
+#define USB4_PORT_SB_DELAY 1000
+
static int usb4_native_switch_op(struct tb_switch *sw, u16 opcode,
u32 *metadata, u8 *status,
const void *tx_data, size_t tx_dwords,
@@ -155,7 +153,13 @@ static inline int usb4_switch_op_data(struct tb_switch *sw, u16 opcode,
tx_dwords, rx_data, rx_dwords);
}
-static void usb4_switch_check_wakes(struct tb_switch *sw)
+/**
+ * usb4_switch_check_wakes() - Check for wakes and notify PM core about them
+ * @sw: Router whose wakes to check
+ *
+ * Checks wakes occurred during suspend and notify the PM core about them.
+ */
+void usb4_switch_check_wakes(struct tb_switch *sw)
{
bool wakeup_usb4 = false;
struct usb4_port *usb4;
@@ -163,9 +167,6 @@ static void usb4_switch_check_wakes(struct tb_switch *sw)
bool wakeup = false;
u32 val;
- if (!device_may_wakeup(&sw->dev))
- return;
-
if (tb_route(sw)) {
if (tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1))
return;
@@ -244,8 +245,6 @@ int usb4_switch_setup(struct tb_switch *sw)
u32 val = 0;
int ret;
- usb4_switch_check_wakes(sw);
-
if (!tb_route(sw))
return 0;
@@ -1113,6 +1112,45 @@ int usb4_port_hotplug_enable(struct tb_port *port)
return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_5, 1);
}
+/**
+ * usb4_port_reset() - Issue downstream port reset
+ * @port: USB4 port to reset
+ *
+ * Issues downstream port reset to @port.
+ */
+int usb4_port_reset(struct tb_port *port)
+{
+ int ret;
+ u32 val;
+
+ if (!port->cap_usb4)
+ return -EINVAL;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_19, 1);
+ if (ret)
+ return ret;
+
+ val |= PORT_CS_19_DPR;
+
+ ret = tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_19, 1);
+ if (ret)
+ return ret;
+
+ fsleep(10000);
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_19, 1);
+ if (ret)
+ return ret;
+
+ val &= ~PORT_CS_19_DPR;
+
+ return tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_19, 1);
+}
+
static int usb4_port_set_configured(struct tb_port *port, bool configured)
{
int ret;
@@ -1205,7 +1243,7 @@ void usb4_port_unconfigure_xdomain(struct tb_port *port)
}
static int usb4_port_wait_for_bit(struct tb_port *port, u32 offset, u32 bit,
- u32 value, int timeout_msec)
+ u32 value, int timeout_msec, unsigned long delay_usec)
{
ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
@@ -1220,7 +1258,7 @@ static int usb4_port_wait_for_bit(struct tb_port *port, u32 offset, u32 bit,
if ((val & bit) == value)
return 0;
- usleep_range(50, 100);
+ fsleep(delay_usec);
} while (ktime_before(ktime_get(), timeout));
return -ETIMEDOUT;
@@ -1245,8 +1283,20 @@ static int usb4_port_write_data(struct tb_port *port, const void *data,
dwords);
}
-static int usb4_port_sb_read(struct tb_port *port, enum usb4_sb_target target,
- u8 index, u8 reg, void *buf, u8 size)
+/**
+ * usb4_port_sb_read() - Read from sideband register
+ * @port: USB4 port to read
+ * @target: Sideband target
+ * @index: Retimer index if taget is %USB4_SB_TARGET_RETIMER
+ * @reg: Sideband register index
+ * @buf: Buffer where the sideband data is copied
+ * @size: Size of @buf
+ *
+ * Reads data from sideband register @reg and copies it into @buf.
+ * Returns %0 in case of success and negative errno in case of failure.
+ */
+int usb4_port_sb_read(struct tb_port *port, enum usb4_sb_target target, u8 index,
+ u8 reg, void *buf, u8 size)
{
size_t dwords = DIV_ROUND_UP(size, 4);
int ret;
@@ -1268,7 +1318,7 @@ static int usb4_port_sb_read(struct tb_port *port, enum usb4_sb_target target,
return ret;
ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1,
- PORT_CS_1_PND, 0, 500);
+ PORT_CS_1_PND, 0, 500, USB4_PORT_SB_DELAY);
if (ret)
return ret;
@@ -1285,8 +1335,20 @@ static int usb4_port_sb_read(struct tb_port *port, enum usb4_sb_target target,
return buf ? usb4_port_read_data(port, buf, dwords) : 0;
}
-static int usb4_port_sb_write(struct tb_port *port, enum usb4_sb_target target,
- u8 index, u8 reg, const void *buf, u8 size)
+/**
+ * usb4_port_sb_write() - Write to sideband register
+ * @port: USB4 port to write
+ * @target: Sideband target
+ * @index: Retimer index if taget is %USB4_SB_TARGET_RETIMER
+ * @reg: Sideband register index
+ * @buf: Data to write
+ * @size: Size of @buf
+ *
+ * Writes @buf to sideband register @reg. Returns %0 in case of success
+ * and negative errno in case of failure.
+ */
+int usb4_port_sb_write(struct tb_port *port, enum usb4_sb_target target,
+ u8 index, u8 reg, const void *buf, u8 size)
{
size_t dwords = DIV_ROUND_UP(size, 4);
int ret;
@@ -1315,7 +1377,7 @@ static int usb4_port_sb_write(struct tb_port *port, enum usb4_sb_target target,
return ret;
ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1,
- PORT_CS_1_PND, 0, 500);
+ PORT_CS_1_PND, 0, 500, USB4_PORT_SB_DELAY);
if (ret)
return ret;
@@ -1370,6 +1432,8 @@ static int usb4_port_sb_op(struct tb_port *port, enum usb4_sb_target target,
if (val != opcode)
return usb4_port_sb_opcode_err_to_errno(val);
+
+ fsleep(USB4_PORT_SB_DELAY);
} while (ktime_before(ktime_get(), timeout));
return -ETIMEDOUT;
@@ -1551,133 +1615,157 @@ int usb4_port_asym_start(struct tb_port *port)
* port started the symmetry transition.
*/
ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_19,
- PORT_CS_19_START_ASYM, 0, 1000);
+ PORT_CS_19_START_ASYM, 0, 1000,
+ USB4_PORT_DELAY);
if (ret)
return ret;
/* Then wait for the transtion to be completed */
return usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_18,
- PORT_CS_18_TIP, 0, 5000);
+ PORT_CS_18_TIP, 0, 5000, USB4_PORT_DELAY);
}
/**
* usb4_port_margining_caps() - Read USB4 port marginig capabilities
* @port: USB4 port
+ * @target: Sideband target
+ * @index: Retimer index if taget is %USB4_SB_TARGET_RETIMER
* @caps: Array with at least two elements to hold the results
+ * @ncaps: Number of elements in the caps array
*
* Reads the USB4 port lane margining capabilities into @caps.
*/
-int usb4_port_margining_caps(struct tb_port *port, u32 *caps)
+int usb4_port_margining_caps(struct tb_port *port, enum usb4_sb_target target,
+ u8 index, u32 *caps, size_t ncaps)
{
int ret;
- ret = usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0,
+ ret = usb4_port_sb_op(port, target, index,
USB4_SB_OPCODE_READ_LANE_MARGINING_CAP, 500);
if (ret)
return ret;
- return usb4_port_sb_read(port, USB4_SB_TARGET_ROUTER, 0,
- USB4_SB_DATA, caps, sizeof(*caps) * 2);
+ return usb4_port_sb_read(port, target, index, USB4_SB_DATA, caps,
+ sizeof(*caps) * ncaps);
}
/**
* usb4_port_hw_margin() - Run hardware lane margining on port
* @port: USB4 port
- * @lanes: Which lanes to run (must match the port capabilities). Can be
- * %0, %1 or %7.
- * @ber_level: BER level contour value
- * @timing: Perform timing margining instead of voltage
- * @right_high: Use Right/high margin instead of left/low
- * @results: Array with at least two elements to hold the results
+ * @target: Sideband target
+ * @index: Retimer index if taget is %USB4_SB_TARGET_RETIMER
+ * @params: Parameters for USB4 hardware margining
+ * @results: Array to hold the results
+ * @nresults: Number of elements in the results array
*
* Runs hardware lane margining on USB4 port and returns the result in
* @results.
*/
-int usb4_port_hw_margin(struct tb_port *port, unsigned int lanes,
- unsigned int ber_level, bool timing, bool right_high,
- u32 *results)
+int usb4_port_hw_margin(struct tb_port *port, enum usb4_sb_target target,
+ u8 index, const struct usb4_port_margining_params *params,
+ u32 *results, size_t nresults)
{
u32 val;
int ret;
- val = lanes;
- if (timing)
- val |= USB4_MARGIN_HW_TIME;
- if (right_high)
- val |= USB4_MARGIN_HW_RH;
- if (ber_level)
- val |= (ber_level << USB4_MARGIN_HW_BER_SHIFT) &
- USB4_MARGIN_HW_BER_MASK;
+ if (WARN_ON_ONCE(!params))
+ return -EINVAL;
- ret = usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
- USB4_SB_METADATA, &val, sizeof(val));
+ val = params->lanes;
+ if (params->time)
+ val |= USB4_MARGIN_HW_TIME;
+ if (params->right_high || params->upper_eye)
+ val |= USB4_MARGIN_HW_RHU;
+ if (params->ber_level)
+ val |= FIELD_PREP(USB4_MARGIN_HW_BER_MASK, params->ber_level);
+ if (params->optional_voltage_offset_range)
+ val |= USB4_MARGIN_HW_OPT_VOLTAGE;
+
+ ret = usb4_port_sb_write(port, target, index, USB4_SB_METADATA, &val,
+ sizeof(val));
if (ret)
return ret;
- ret = usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0,
+ ret = usb4_port_sb_op(port, target, index,
USB4_SB_OPCODE_RUN_HW_LANE_MARGINING, 2500);
if (ret)
return ret;
- return usb4_port_sb_read(port, USB4_SB_TARGET_ROUTER, 0,
- USB4_SB_DATA, results, sizeof(*results) * 2);
+ return usb4_port_sb_read(port, target, index, USB4_SB_DATA, results,
+ sizeof(*results) * nresults);
}
/**
* usb4_port_sw_margin() - Run software lane margining on port
* @port: USB4 port
- * @lanes: Which lanes to run (must match the port capabilities). Can be
- * %0, %1 or %7.
- * @timing: Perform timing margining instead of voltage
- * @right_high: Use Right/high margin instead of left/low
- * @counter: What to do with the error counter
+ * @target: Sideband target
+ * @index: Retimer index if taget is %USB4_SB_TARGET_RETIMER
+ * @params: Parameters for USB4 software margining
+ * @results: Data word for the operation completion data
*
* Runs software lane margining on USB4 port. Read back the error
* counters by calling usb4_port_sw_margin_errors(). Returns %0 in
* success and negative errno otherwise.
*/
-int usb4_port_sw_margin(struct tb_port *port, unsigned int lanes, bool timing,
- bool right_high, u32 counter)
+int usb4_port_sw_margin(struct tb_port *port, enum usb4_sb_target target,
+ u8 index, const struct usb4_port_margining_params *params,
+ u32 *results)
{
u32 val;
int ret;
- val = lanes;
- if (timing)
+ if (WARN_ON_ONCE(!params))
+ return -EINVAL;
+
+ val = params->lanes;
+ if (params->time)
val |= USB4_MARGIN_SW_TIME;
- if (right_high)
+ if (params->optional_voltage_offset_range)
+ val |= USB4_MARGIN_SW_OPT_VOLTAGE;
+ if (params->right_high)
val |= USB4_MARGIN_SW_RH;
- val |= (counter << USB4_MARGIN_SW_COUNTER_SHIFT) &
- USB4_MARGIN_SW_COUNTER_MASK;
+ if (params->upper_eye)
+ val |= USB4_MARGIN_SW_UPPER_EYE;
+ val |= FIELD_PREP(USB4_MARGIN_SW_COUNTER_MASK, params->error_counter);
+ val |= FIELD_PREP(USB4_MARGIN_SW_VT_MASK, params->voltage_time_offset);
- ret = usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
- USB4_SB_METADATA, &val, sizeof(val));
+ ret = usb4_port_sb_write(port, target, index, USB4_SB_METADATA, &val,
+ sizeof(val));
+ if (ret)
+ return ret;
+
+ ret = usb4_port_sb_op(port, target, index,
+ USB4_SB_OPCODE_RUN_SW_LANE_MARGINING, 2500);
if (ret)
return ret;
- return usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0,
- USB4_SB_OPCODE_RUN_SW_LANE_MARGINING, 2500);
+ return usb4_port_sb_read(port, target, index, USB4_SB_DATA, results,
+ sizeof(*results));
+
}
/**
* usb4_port_sw_margin_errors() - Read the software margining error counters
* @port: USB4 port
+ * @target: Sideband target
+ * @index: Retimer index if taget is %USB4_SB_TARGET_RETIMER
* @errors: Error metadata is copied here.
*
* This reads back the software margining error counters from the port.
* Returns %0 in success and negative errno otherwise.
*/
-int usb4_port_sw_margin_errors(struct tb_port *port, u32 *errors)
+int usb4_port_sw_margin_errors(struct tb_port *port, enum usb4_sb_target target,
+ u8 index, u32 *errors)
{
int ret;
- ret = usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0,
+ ret = usb4_port_sb_op(port, target, index,
USB4_SB_OPCODE_READ_SW_MARGIN_ERR, 150);
if (ret)
return ret;
- return usb4_port_sb_read(port, USB4_SB_TARGET_ROUTER, 0,
- USB4_SB_METADATA, errors, sizeof(*errors));
+ return usb4_port_sb_read(port, target, index, USB4_SB_METADATA, errors,
+ sizeof(*errors));
}
static inline int usb4_port_retimer_op(struct tb_port *port, u8 index,
@@ -1730,68 +1818,51 @@ int usb4_port_retimer_unset_inbound_sbtx(struct tb_port *port, u8 index)
}
/**
- * usb4_port_retimer_read() - Read from retimer sideband registers
+ * usb4_port_retimer_is_last() - Is the retimer last on-board retimer
* @port: USB4 port
* @index: Retimer index
- * @reg: Sideband register to read
- * @buf: Data from @reg is stored here
- * @size: Number of bytes to read
*
- * Function reads retimer sideband registers starting from @reg. The
- * retimer is connected to @port at @index. Returns %0 in case of
- * success, and read data is copied to @buf. If there is no retimer
- * present at given @index returns %-ENODEV. In any other failure
- * returns negative errno.
+ * If the retimer at @index is last one (connected directly to the
+ * Type-C port) this function returns %1. If it is not returns %0. If
+ * the retimer is not present returns %-ENODEV. Otherwise returns
+ * negative errno.
*/
-int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf,
- u8 size)
+int usb4_port_retimer_is_last(struct tb_port *port, u8 index)
{
- return usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index, reg, buf,
- size);
-}
+ u32 metadata;
+ int ret;
-/**
- * usb4_port_retimer_write() - Write to retimer sideband registers
- * @port: USB4 port
- * @index: Retimer index
- * @reg: Sideband register to write
- * @buf: Data that is written starting from @reg
- * @size: Number of bytes to write
- *
- * Writes retimer sideband registers starting from @reg. The retimer is
- * connected to @port at @index. Returns %0 in case of success. If there
- * is no retimer present at given @index returns %-ENODEV. In any other
- * failure returns negative errno.
- */
-int usb4_port_retimer_write(struct tb_port *port, u8 index, u8 reg,
- const void *buf, u8 size)
-{
- return usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index, reg, buf,
- size);
+ ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_QUERY_LAST_RETIMER,
+ 500);
+ if (ret)
+ return ret;
+
+ ret = usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index,
+ USB4_SB_METADATA, &metadata, sizeof(metadata));
+ return ret ? ret : metadata & 1;
}
/**
- * usb4_port_retimer_is_last() - Is the retimer last on-board retimer
+ * usb4_port_retimer_is_cable() - Is the retimer cable retimer
* @port: USB4 port
* @index: Retimer index
*
- * If the retimer at @index is last one (connected directly to the
- * Type-C port) this function returns %1. If it is not returns %0. If
- * the retimer is not present returns %-ENODEV. Otherwise returns
- * negative errno.
+ * If the retimer at @index is last cable retimer this function returns
+ * %1 and %0 if it is on-board retimer. In case a retimer is not present
+ * at @index returns %-ENODEV. Otherwise returns negative errno.
*/
-int usb4_port_retimer_is_last(struct tb_port *port, u8 index)
+int usb4_port_retimer_is_cable(struct tb_port *port, u8 index)
{
u32 metadata;
int ret;
- ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_QUERY_LAST_RETIMER,
+ ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_QUERY_CABLE_RETIMER,
500);
if (ret)
return ret;
- ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, &metadata,
- sizeof(metadata));
+ ret = usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index,
+ USB4_SB_METADATA, &metadata, sizeof(metadata));
return ret ? ret : metadata & 1;
}
@@ -1816,8 +1887,8 @@ int usb4_port_retimer_nvm_sector_size(struct tb_port *port, u8 index)
if (ret)
return ret;
- ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, &metadata,
- sizeof(metadata));
+ ret = usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index,
+ USB4_SB_METADATA, &metadata, sizeof(metadata));
return ret ? ret : metadata & USB4_NVM_SECTOR_SIZE_MASK;
}
@@ -1842,8 +1913,8 @@ int usb4_port_retimer_nvm_set_offset(struct tb_port *port, u8 index,
metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) &
USB4_NVM_SET_OFFSET_MASK;
- ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata,
- sizeof(metadata));
+ ret = usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index,
+ USB4_SB_METADATA, &metadata, sizeof(metadata));
if (ret)
return ret;
@@ -1865,8 +1936,8 @@ static int usb4_port_retimer_nvm_write_next_block(void *data,
u8 index = info->index;
int ret;
- ret = usb4_port_retimer_write(port, index, USB4_SB_DATA,
- buf, dwords * 4);
+ ret = usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index,
+ USB4_SB_DATA, buf, dwords * 4);
if (ret)
return ret;
@@ -1945,8 +2016,8 @@ int usb4_port_retimer_nvm_authenticate_status(struct tb_port *port, u8 index,
u32 metadata, val;
int ret;
- ret = usb4_port_retimer_read(port, index, USB4_SB_OPCODE, &val,
- sizeof(val));
+ ret = usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index,
+ USB4_SB_OPCODE, &val, sizeof(val));
if (ret)
return ret;
@@ -1957,8 +2028,9 @@ int usb4_port_retimer_nvm_authenticate_status(struct tb_port *port, u8 index,
return 0;
case -EAGAIN:
- ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA,
- &metadata, sizeof(metadata));
+ ret = usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index,
+ USB4_SB_METADATA, &metadata,
+ sizeof(metadata));
if (ret)
return ret;
@@ -1983,8 +2055,8 @@ static int usb4_port_retimer_nvm_read_block(void *data, unsigned int dwaddress,
if (dwords < USB4_DATA_DWORDS)
metadata |= dwords << USB4_NVM_READ_LENGTH_SHIFT;
- ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata,
- sizeof(metadata));
+ ret = usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index,
+ USB4_SB_METADATA, &metadata, sizeof(metadata));
if (ret)
return ret;
@@ -1992,8 +2064,8 @@ static int usb4_port_retimer_nvm_read_block(void *data, unsigned int dwaddress,
if (ret)
return ret;
- return usb4_port_retimer_read(port, index, USB4_SB_DATA, buf,
- dwords * 4);
+ return usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index,
+ USB4_SB_DATA, buf, dwords * 4);
}
/**
@@ -2083,7 +2155,8 @@ static int usb4_usb3_port_cm_request(struct tb_port *port, bool request)
*/
val &= ADP_USB3_CS_2_CMR;
return usb4_port_wait_for_bit(port, port->cap_adap + ADP_USB3_CS_1,
- ADP_USB3_CS_1_HCA, val, 1500);
+ ADP_USB3_CS_1_HCA, val, 1500,
+ USB4_PORT_DELAY);
}
static inline int usb4_usb3_port_set_cm_request(struct tb_port *port)
@@ -2819,8 +2892,10 @@ static int usb4_dp_port_wait_and_clear_cm_ack(struct tb_port *port,
usleep_range(50, 100);
} while (ktime_before(ktime_get(), end));
- if (val & ADP_DP_CS_8_DR)
+ if (val & ADP_DP_CS_8_DR) {
+ tb_port_warn(port, "timeout waiting for DPTX request to clear\n");
return -ETIMEDOUT;
+ }
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_2, 1);
diff --git a/drivers/thunderbolt/usb4_port.c b/drivers/thunderbolt/usb4_port.c
index e355bfd6343f..5150879888ca 100644
--- a/drivers/thunderbolt/usb4_port.c
+++ b/drivers/thunderbolt/usb4_port.c
@@ -243,7 +243,7 @@ static void usb4_port_device_release(struct device *dev)
kfree(usb4);
}
-struct device_type usb4_port_device_type = {
+const struct device_type usb4_port_device_type = {
.name = "usb4_port",
.groups = usb4_port_device_groups,
.release = usb4_port_device_release,
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
index 9495742913d5..b0630e6d9472 100644
--- a/drivers/thunderbolt/xdomain.c
+++ b/drivers/thunderbolt/xdomain.c
@@ -250,7 +250,7 @@ static int tb_xdp_handle_error(const struct tb_xdp_error_response *res)
case ERROR_UNKNOWN_DOMAIN:
return -EIO;
case ERROR_NOT_SUPPORTED:
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
case ERROR_NOT_READY:
return -EAGAIN;
default:
@@ -997,12 +997,12 @@ static void tb_service_release(struct device *dev)
struct tb_xdomain *xd = tb_service_parent(svc);
tb_service_debugfs_remove(svc);
- ida_simple_remove(&xd->service_ids, svc->id);
+ ida_free(&xd->service_ids, svc->id);
kfree(svc->key);
kfree(svc);
}
-struct device_type tb_service_type = {
+const struct device_type tb_service_type = {
.name = "thunderbolt_service",
.groups = tb_service_attr_groups,
.uevent = tb_service_uevent,
@@ -1026,7 +1026,7 @@ static int remove_missing_service(struct device *dev, void *data)
return 0;
}
-static int find_service(struct device *dev, void *data)
+static int find_service(struct device *dev, const void *data)
{
const struct tb_property *p = data;
struct tb_service *svc;
@@ -1099,7 +1099,7 @@ static void enumerate_services(struct tb_xdomain *xd)
break;
}
- id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL);
+ id = ida_alloc(&xd->service_ids, GFP_KERNEL);
if (id < 0) {
kfree(svc->key);
kfree(svc);
@@ -1791,13 +1791,13 @@ static ssize_t rx_lanes_show(struct device *dev, struct device_attribute *attr,
switch (xd->link_width) {
case TB_LINK_WIDTH_SINGLE:
- case TB_LINK_WIDTH_ASYM_RX:
+ case TB_LINK_WIDTH_ASYM_TX:
width = 1;
break;
case TB_LINK_WIDTH_DUAL:
width = 2;
break;
- case TB_LINK_WIDTH_ASYM_TX:
+ case TB_LINK_WIDTH_ASYM_RX:
width = 3;
break;
default:
@@ -1817,13 +1817,13 @@ static ssize_t tx_lanes_show(struct device *dev, struct device_attribute *attr,
switch (xd->link_width) {
case TB_LINK_WIDTH_SINGLE:
- case TB_LINK_WIDTH_ASYM_TX:
+ case TB_LINK_WIDTH_ASYM_RX:
width = 1;
break;
case TB_LINK_WIDTH_DUAL:
width = 2;
break;
- case TB_LINK_WIDTH_ASYM_RX:
+ case TB_LINK_WIDTH_ASYM_TX:
width = 3;
break;
default:
@@ -1893,7 +1893,7 @@ static const struct dev_pm_ops tb_xdomain_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(tb_xdomain_suspend, tb_xdomain_resume)
};
-struct device_type tb_xdomain_type = {
+const struct device_type tb_xdomain_type = {
.name = "thunderbolt_xdomain",
.release = tb_xdomain_release,
.pm = &tb_xdomain_pm_ops,