summaryrefslogtreecommitdiff
path: root/drivers/thunderbolt
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/thunderbolt')
-rw-r--r--drivers/thunderbolt/Kconfig21
-rw-r--r--drivers/thunderbolt/acpi.c68
-rw-r--r--drivers/thunderbolt/cap.c49
-rw-r--r--drivers/thunderbolt/clx.c12
-rw-r--r--drivers/thunderbolt/ctl.c53
-rw-r--r--drivers/thunderbolt/ctl.h2
-rw-r--r--drivers/thunderbolt/debugfs.c1494
-rw-r--r--drivers/thunderbolt/dma_port.c21
-rw-r--r--drivers/thunderbolt/domain.c87
-rw-r--r--drivers/thunderbolt/eeprom.c86
-rw-r--r--drivers/thunderbolt/icm.c44
-rw-r--r--drivers/thunderbolt/lc.c60
-rw-r--r--drivers/thunderbolt/nhi.c44
-rw-r--r--drivers/thunderbolt/nhi.h5
-rw-r--r--drivers/thunderbolt/nhi_regs.h6
-rw-r--r--drivers/thunderbolt/nvm.c44
-rw-r--r--drivers/thunderbolt/path.c18
-rw-r--r--drivers/thunderbolt/property.c38
-rw-r--r--drivers/thunderbolt/retimer.c92
-rw-r--r--drivers/thunderbolt/sb_regs.h62
-rw-r--r--drivers/thunderbolt/switch.c160
-rw-r--r--drivers/thunderbolt/tb.c369
-rw-r--r--drivers/thunderbolt/tb.h165
-rw-r--r--drivers/thunderbolt/tb_msgs.h1
-rw-r--r--drivers/thunderbolt/tb_regs.h6
-rw-r--r--drivers/thunderbolt/test.c90
-rw-r--r--drivers/thunderbolt/tmu.c20
-rw-r--r--drivers/thunderbolt/tunnel.c597
-rw-r--r--drivers/thunderbolt/tunnel.h87
-rw-r--r--drivers/thunderbolt/usb4.c615
-rw-r--r--drivers/thunderbolt/usb4_port.c63
-rw-r--r--drivers/thunderbolt/xdomain.c59
32 files changed, 3276 insertions, 1262 deletions
diff --git a/drivers/thunderbolt/Kconfig b/drivers/thunderbolt/Kconfig
index 448fd2ec8f6e..db3b0bef48f4 100644
--- a/drivers/thunderbolt/Kconfig
+++ b/drivers/thunderbolt/Kconfig
@@ -4,8 +4,8 @@ menuconfig USB4
depends on PCI
select APPLE_PROPERTIES if EFI_STUB && X86
select CRC32
- select CRYPTO
- select CRYPTO_HASH
+ select CRYPTO_LIB_SHA256
+ select CRYPTO_LIB_UTILS
select NVMEM
help
USB4 and Thunderbolt driver. USB4 is the public specification
@@ -22,20 +22,25 @@ config USB4_DEBUGFS_WRITE
bool "Enable write by debugfs to configuration spaces (DANGEROUS)"
help
Enables writing to device configuration registers through
- debugfs interface.
+ debugfs interface. You can use tools such as Thunderbolt/USB4
+ debugging tools to access these registers. For more
+ information see:
+
+ https://github.com/intel/tbtools
Only enable this if you know what you are doing! Never enable
this for production systems or distro kernels.
config USB4_DEBUGFS_MARGINING
- bool "Expose receiver lane margining operations under USB4 ports (DANGEROUS)"
+ bool "Expose receiver lane margining operations under USB4 ports and retimers (DANGEROUS)"
depends on DEBUG_FS
depends on USB4_DEBUGFS_WRITE
help
- Enables hardware and software based receiver lane margining support
- under each USB4 port. Used for electrical quality and robustness
- validation during manufacturing. Should not be enabled by distro
- kernels.
+ Enables hardware and software based receiver lane margining
+ support under each USB4 port and retimer, including retimers
+ on the other side of the cable. Used for electrical quality
+ and robustness validation during manufacturing. Should not be
+ enabled by distro kernels.
config USB4_KUNIT_TEST
bool "KUnit tests" if !KUNIT_ALL_TESTS
diff --git a/drivers/thunderbolt/acpi.c b/drivers/thunderbolt/acpi.c
index c9b6bb46111c..45d1415871b4 100644
--- a/drivers/thunderbolt/acpi.c
+++ b/drivers/thunderbolt/acpi.c
@@ -32,40 +32,20 @@ static acpi_status tb_acpi_add_link(acpi_handle handle, u32 level, void *data,
goto out_put;
/*
- * Try to find physical device walking upwards to the hierarcy.
- * We need to do this because the xHCI driver might not yet be
- * bound so the USB3 SuperSpeed ports are not yet created.
+ * Ignore USB3 ports here as USB core will set up device links between
+ * tunneled USB3 devices and NHI host during USB device creation.
+ * USB3 ports might not even have a physical device yet if xHCI driver
+ * isn't bound yet.
*/
- do {
- dev = acpi_get_first_physical_node(adev);
- if (dev)
- break;
-
- adev = acpi_dev_parent(adev);
- } while (adev);
-
- /*
- * Check that the device is PCIe. This is because USB3
- * SuperSpeed ports have this property and they are not power
- * managed with the xHCI and the SuperSpeed hub so we create the
- * link from xHCI instead.
- */
- while (dev && !dev_is_pci(dev))
- dev = dev->parent;
-
- if (!dev)
+ dev = acpi_get_first_physical_node(adev);
+ if (!dev || !dev_is_pci(dev))
goto out_put;
- /*
- * Check that this actually matches the type of device we
- * expect. It should either be xHCI or PCIe root/downstream
- * port.
- */
+ /* Check that this matches a PCIe root/downstream port. */
pdev = to_pci_dev(dev);
- if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI ||
- (pci_is_pcie(pdev) &&
- (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
- pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM))) {
+ if (pci_is_pcie(pdev) &&
+ (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
+ pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM)) {
const struct device_link *link;
/*
@@ -106,7 +86,7 @@ out_put:
* @nhi ACPI node. For each reference a device link is added. The link
* is automatically removed by the driver core.
*
- * Returns %true if at least one link was created.
+ * Returns %true if at least one link was created, %false otherwise.
*/
bool tb_acpi_add_links(struct tb_nhi *nhi)
{
@@ -133,8 +113,10 @@ bool tb_acpi_add_links(struct tb_nhi *nhi)
/**
* tb_acpi_is_native() - Did the platform grant native TBT/USB4 control
*
- * Returns %true if the platform granted OS native control over
- * TBT/USB4. In this case software based connection manager can be used,
+ * Return: %true if the platform granted OS native control over
+ * TBT/USB4, %false otherwise.
+ *
+ * When returned %true, software based connection manager can be used,
* otherwise there is firmware based connection manager running.
*/
bool tb_acpi_is_native(void)
@@ -146,8 +128,8 @@ bool tb_acpi_is_native(void)
/**
* tb_acpi_may_tunnel_usb3() - Is USB3 tunneling allowed by the platform
*
- * When software based connection manager is used, this function
- * returns %true if platform allows native USB3 tunneling.
+ * Return: %true if software based connection manager is used and
+ * platform allows native USB 3.x tunneling, %false otherwise.
*/
bool tb_acpi_may_tunnel_usb3(void)
{
@@ -159,8 +141,8 @@ bool tb_acpi_may_tunnel_usb3(void)
/**
* tb_acpi_may_tunnel_dp() - Is DisplayPort tunneling allowed by the platform
*
- * When software based connection manager is used, this function
- * returns %true if platform allows native DP tunneling.
+ * Return: %true if software based connection manager is used and
+ * platform allows native DP tunneling, %false otherwise.
*/
bool tb_acpi_may_tunnel_dp(void)
{
@@ -172,8 +154,8 @@ bool tb_acpi_may_tunnel_dp(void)
/**
* tb_acpi_may_tunnel_pcie() - Is PCIe tunneling allowed by the platform
*
- * When software based connection manager is used, this function
- * returns %true if platform allows native PCIe tunneling.
+ * Return: %true if software based connection manager is used and
+ * platform allows native PCIe tunneling, %false otherwise.
*/
bool tb_acpi_may_tunnel_pcie(void)
{
@@ -185,8 +167,8 @@ bool tb_acpi_may_tunnel_pcie(void)
/**
* tb_acpi_is_xdomain_allowed() - Are XDomain connections allowed
*
- * When software based connection manager is used, this function
- * returns %true if platform allows XDomain connections.
+ * Return: %true if software based connection manager is used and
+ * platform allows XDomain tunneling, %false otherwise.
*/
bool tb_acpi_is_xdomain_allowed(void)
{
@@ -276,7 +258,7 @@ static int tb_acpi_retimer_set_power(struct tb_port *port, bool power)
*
* This should only be called if the USB4/TBT link is not up.
*
- * Returns %0 on success.
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_acpi_power_on_retimers(struct tb_port *port)
{
@@ -290,7 +272,7 @@ int tb_acpi_power_on_retimers(struct tb_port *port)
* This is the opposite of tb_acpi_power_on_retimers(). After returning
* successfully the normal operations with the @port can continue.
*
- * Returns %0 on success.
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_acpi_power_off_retimers(struct tb_port *port)
{
diff --git a/drivers/thunderbolt/cap.c b/drivers/thunderbolt/cap.c
index 8ecd610c62d5..4ab22d5291ac 100644
--- a/drivers/thunderbolt/cap.c
+++ b/drivers/thunderbolt/cap.c
@@ -64,10 +64,14 @@ static void tb_port_dummy_read(struct tb_port *port)
* @port: Port to find the capability for
* @offset: Previous capability offset (%0 for start)
*
- * Returns dword offset of the next capability in port config space
- * capability list and returns it. Passing %0 returns the first entry in
- * the capability list. If no next capability is found returns %0. In case
- * of failure returns negative errno.
+ * Finds dword offset of the next capability in port config space
+ * capability list. When passed %0 in @offset parameter, first entry
+ * will be returned, if it exists.
+ *
+ * Return:
+ * * Double word offset of the first or next capability - On success.
+ * * %0 - If no next capability is found.
+ * * Negative errno - Another error occurred.
*/
int tb_port_next_cap(struct tb_port *port, unsigned int offset)
{
@@ -112,9 +116,10 @@ static int __tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap)
* @port: Port to find the capability for
* @cap: Capability to look
*
- * Returns offset to start of capability or %-ENOENT if no such
- * capability was found. Negative errno is returned if there was an
- * error.
+ * Return:
+ * * Offset to the start of capability - On success.
+ * * %-ENOENT - If no such capability was found.
+ * * Negative errno - Another error occurred.
*/
int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap)
{
@@ -137,10 +142,14 @@ int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap)
* @sw: Switch to find the capability for
* @offset: Previous capability offset (%0 for start)
*
- * Finds dword offset of the next capability in router config space
- * capability list and returns it. Passing %0 returns the first entry in
- * the capability list. If no next capability is found returns %0. In case
- * of failure returns negative errno.
+ * Finds dword offset of the next capability in port config space
+ * capability list. When passed %0 in @offset parameter, first entry
+ * will be returned, if it exists.
+ *
+ * Return:
+ * * Double word offset of the first or next capability - On success.
+ * * %0 - If no next capability is found.
+ * * Negative errno - Another error occurred.
*/
int tb_switch_next_cap(struct tb_switch *sw, unsigned int offset)
{
@@ -181,9 +190,10 @@ int tb_switch_next_cap(struct tb_switch *sw, unsigned int offset)
* @sw: Switch to find the capability for
* @cap: Capability to look
*
- * Returns offset to start of capability or %-ENOENT if no such
- * capability was found. Negative errno is returned if there was an
- * error.
+ * Return:
+ * * Offset to the start of capability - On success.
+ * * %-ENOENT - If no such capability was found.
+ * * Negative errno - Another error occurred.
*/
int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap)
{
@@ -213,10 +223,13 @@ int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap)
* @sw: Switch to find the capability for
* @vsec: Vendor specific capability to look
*
- * Functions enumerates vendor specific capabilities (VSEC) of a switch
- * and returns offset when capability matching @vsec is found. If no
- * such capability is found returns %-ENOENT. In case of error returns
- * negative errno.
+ * This function enumerates vendor specific capabilities (VSEC) of a
+ * switch and returns offset when capability matching @vsec is found.
+ *
+ * Return:
+ * * Offset of capability - On success.
+ * * %-ENOENT - If capability was not found.
+ * * Negative errno - Another error occurred.
*/
int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec)
{
diff --git a/drivers/thunderbolt/clx.c b/drivers/thunderbolt/clx.c
index 787dfd1550e5..1637e79d988a 100644
--- a/drivers/thunderbolt/clx.c
+++ b/drivers/thunderbolt/clx.c
@@ -167,7 +167,8 @@ static int tb_port_clx(struct tb_port *port)
* @port: USB4 port to check
* @clx: Mask of CL states to check
*
- * Returns true if any of the given CL states is enabled for @port.
+ * Return: %true if any of the given CL states is enabled for @port,
+ * %false otherwise.
*/
bool tb_port_clx_is_enabled(struct tb_port *port, unsigned int clx)
{
@@ -177,6 +178,8 @@ bool tb_port_clx_is_enabled(struct tb_port *port, unsigned int clx)
/**
* tb_switch_clx_is_supported() - Is CLx supported on this type of router
* @sw: The router to check CLx support for
+ *
+ * Return: %true if CLx is supported, %false otherwise.
*/
static bool tb_switch_clx_is_supported(const struct tb_switch *sw)
{
@@ -203,7 +206,7 @@ static bool tb_switch_clx_is_supported(const struct tb_switch *sw)
* Can be called for any router. Initializes the current CL state by
* reading it from the hardware.
*
- * Returns %0 in case of success and negative errno in case of failure.
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_switch_clx_init(struct tb_switch *sw)
{
@@ -313,7 +316,7 @@ static bool validate_mask(unsigned int clx)
* is not inter-domain link. The complete set of conditions is described in CM
* Guide 1.0 section 8.1.
*
- * Returns %0 on success or an error code on failure.
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_switch_clx_enable(struct tb_switch *sw, unsigned int clx)
{
@@ -390,8 +393,7 @@ int tb_switch_clx_enable(struct tb_switch *sw, unsigned int clx)
* Disables all CL states of the given router. Can be called on any
* router and if the states were not enabled already does nothing.
*
- * Returns the CL states that were disabled or negative errno in case of
- * failure.
+ * Return: CL states that were disabled or negative errno otherwise.
*/
int tb_switch_clx_disable(struct tb_switch *sw)
{
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index 4bdb2d45e0bf..d7a535671404 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -70,6 +70,9 @@ struct tb_ctl {
#define tb_ctl_dbg(ctl, format, arg...) \
dev_dbg(&(ctl)->nhi->pdev->dev, format, ## arg)
+#define tb_ctl_dbg_once(ctl, format, arg...) \
+ dev_dbg_once(&(ctl)->nhi->pdev->dev, format, ## arg)
+
static DECLARE_WAIT_QUEUE_HEAD(tb_cfg_request_cancel_queue);
/* Serializes access to request kref_get/put */
static DEFINE_MUTEX(tb_cfg_request_lock);
@@ -79,6 +82,8 @@ static DEFINE_MUTEX(tb_cfg_request_lock);
*
* This is refcounted object so when you are done with this, call
* tb_cfg_request_put() to it.
+ *
+ * Return: &struct tb_cfg_request on success, %NULL otherwise.
*/
struct tb_cfg_request *tb_cfg_request_alloc(void)
{
@@ -148,6 +153,11 @@ static void tb_cfg_request_dequeue(struct tb_cfg_request *req)
struct tb_ctl *ctl = req->ctl;
mutex_lock(&ctl->request_queue_lock);
+ if (!test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags)) {
+ mutex_unlock(&ctl->request_queue_lock);
+ return;
+ }
+
list_del(&req->list);
clear_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
if (test_bit(TB_CFG_REQUEST_CANCELED, &req->flags))
@@ -265,7 +275,7 @@ static struct tb_cfg_result parse_header(const struct ctl_pkg *pkg, u32 len,
return res;
}
-static void tb_cfg_print_error(struct tb_ctl *ctl,
+static void tb_cfg_print_error(struct tb_ctl *ctl, enum tb_cfg_space space,
const struct tb_cfg_result *res)
{
WARN_ON(res->err != 1);
@@ -279,8 +289,8 @@ static void tb_cfg_print_error(struct tb_ctl *ctl,
* Invalid cfg_space/offset/length combination in
* cfg_read/cfg_write.
*/
- tb_ctl_dbg(ctl, "%llx:%x: invalid config space or offset\n",
- res->response_route, res->response_port);
+ tb_ctl_dbg_once(ctl, "%llx:%x: invalid config space (%u) or offset\n",
+ res->response_route, res->response_port, space);
return;
case TB_CFG_ERROR_NO_SUCH_PORT:
/*
@@ -309,7 +319,7 @@ static void tb_cfg_print_error(struct tb_ctl *ctl,
static __be32 tb_crc(const void *data, size_t len)
{
- return cpu_to_be32(~__crc32c_le(~0, data, len));
+ return cpu_to_be32(~crc32c(~0, data, len));
}
static void tb_ctl_pkg_free(struct ctl_pkg *pkg)
@@ -351,7 +361,7 @@ static void tb_ctl_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
*
* len must be a multiple of four.
*
- * Return: Returns 0 on success or an error code on failure.
+ * Return: %0 on success, negative errno otherwise.
*/
static int tb_ctl_tx(struct tb_ctl *ctl, const void *data, size_t len,
enum tb_cfg_pkg_type type)
@@ -402,7 +412,7 @@ static void tb_ctl_rx_submit(struct ctl_pkg *pkg)
* We ignore failures during stop.
* All rx packets are referenced
* from ctl->rx_packets, so we do
- * not loose them.
+ * not lose them.
*/
}
@@ -531,6 +541,8 @@ static void tb_cfg_request_work(struct work_struct *work)
*
* This queues @req on the given control channel without waiting for it
* to complete. When the request completes @callback is called.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_cfg_request(struct tb_ctl *ctl, struct tb_cfg_request *req,
void (*callback)(void *), void *callback_data)
@@ -597,6 +609,9 @@ static void tb_cfg_request_complete(void *data)
* triggers the request is canceled before function returns. Note the
* caller needs to make sure only one message for given switch is active
* at a time.
+ *
+ * Return: &struct tb_cfg_result with non-zero @err field if error
+ * has occurred.
*/
struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl,
struct tb_cfg_request *req,
@@ -633,7 +648,7 @@ struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl,
*
* cb will be invoked once for every hot plug event.
*
- * Return: Returns a pointer on success or NULL on failure.
+ * Return: Pointer to &struct tb_ctl, %NULL on failure.
*/
struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, int index, int timeout_msec,
event_cb cb, void *cb_data)
@@ -756,8 +771,9 @@ void tb_ctl_stop(struct tb_ctl *ctl)
* @route: Router that originated the event
* @error: Pointer to the notification package
*
- * Call this as response for non-plug notification to ack it. Returns
- * %0 on success or an error code on failure.
+ * Call this as a response for non-plug notification to ack it.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_cfg_ack_notification(struct tb_ctl *ctl, u64 route,
const struct cfg_error_pkg *error)
@@ -819,8 +835,9 @@ int tb_cfg_ack_notification(struct tb_ctl *ctl, u64 route,
* @port: Port where the hot plug/unplug happened
* @unplug: Ack hot plug or unplug
*
- * Call this as response for hot plug/unplug event to ack it.
- * Returns %0 on success or an error code on failure.
+ * Call this as a response for hot plug/unplug event to ack it.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_cfg_ack_plug(struct tb_ctl *ctl, u64 route, u32 port, bool unplug)
{
@@ -887,6 +904,9 @@ static bool tb_cfg_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
* If the switch at route is incorrectly configured then we will not receive a
* reply (even though the switch will reset). The caller should check for
* -ETIMEDOUT and attempt to reconfigure the switch.
+ *
+ * Return: &struct tb_cfg_result with non-zero @err field if error
+ * has occurred.
*/
struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route)
{
@@ -929,6 +949,9 @@ struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route)
* @timeout_msec: Timeout in ms how long to wait for the response
*
* Reads from router config space without translating the possible error.
+ *
+ * Return: &struct tb_cfg_result with non-zero @err field if error
+ * has occurred.
*/
struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
u64 route, u32 port, enum tb_cfg_space space,
@@ -1000,6 +1023,9 @@ struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
* @timeout_msec: Timeout in ms how long to wait for the response
*
* Writes to router config space without translating the possible error.
+ *
+ * Return: &struct tb_cfg_result with non-zero @err field if error
+ * has occurred.
*/
struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, const void *buffer,
u64 route, u32 port, enum tb_cfg_space space,
@@ -1072,7 +1098,7 @@ static int tb_cfg_get_error(struct tb_ctl *ctl, enum tb_cfg_space space,
res->tb_error == TB_CFG_ERROR_INVALID_CONFIG_SPACE)
return -ENODEV;
- tb_cfg_print_error(ctl, res);
+ tb_cfg_print_error(ctl, space, res);
if (res->tb_error == TB_CFG_ERROR_LOCK)
return -EACCES;
@@ -1142,8 +1168,7 @@ int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port,
* Reads the first dword from the switches TB_CFG_SWITCH config area and
* returns the port number from which the reply originated.
*
- * Return: Returns the upstream port number on success or an error code on
- * failure.
+ * Return: Upstream port number on success or negative error code on failure.
*/
int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route)
{
diff --git a/drivers/thunderbolt/ctl.h b/drivers/thunderbolt/ctl.h
index bf930a191472..db1646eb4fd0 100644
--- a/drivers/thunderbolt/ctl.h
+++ b/drivers/thunderbolt/ctl.h
@@ -54,6 +54,7 @@ struct ctl_pkg {
* @kref: Reference count
* @ctl: Pointer to the control channel structure. Only set when the
* request is queued.
+ * @request: Request is stored here
* @request_size: Size of the request packet (in bytes)
* @request_type: Type of the request packet
* @response: Response is stored here
@@ -140,5 +141,4 @@ int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port,
enum tb_cfg_space space, u32 offset, u32 length);
int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route);
-
#endif
diff --git a/drivers/thunderbolt/debugfs.c b/drivers/thunderbolt/debugfs.c
index 70b52aac3d97..45266ec72f88 100644
--- a/drivers/thunderbolt/debugfs.c
+++ b/drivers/thunderbolt/debugfs.c
@@ -7,8 +7,12 @@
* Mika Westerberg <mika.westerberg@linux.intel.com>
*/
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
#include <linux/debugfs.h>
+#include <linux/delay.h>
#include <linux/pm_runtime.h>
+#include <linux/string_choices.h>
#include <linux/uaccess.h>
#include "tb.h"
@@ -33,6 +37,70 @@
#define COUNTER_SET_LEN 3
+/*
+ * USB4 spec doesn't specify dwell range, the range of 100 ms to 500 ms
+ * probed to give good results.
+ */
+#define MIN_DWELL_TIME 100 /* ms */
+#define MAX_DWELL_TIME 500 /* ms */
+#define DWELL_SAMPLE_INTERVAL 10
+
+enum usb4_margin_cap_voltage_indp {
+ USB4_MARGIN_CAP_VOLTAGE_INDP_GEN_2_3_MIN,
+ USB4_MARGIN_CAP_VOLTAGE_INDP_GEN_2_3_HL,
+ USB4_MARGIN_CAP_VOLTAGE_INDP_GEN_2_3_BOTH,
+ USB4_MARGIN_CAP_VOLTAGE_INDP_GEN_4_MIN,
+ USB4_MARGIN_CAP_VOLTAGE_INDP_GEN_4_BOTH,
+ USB4_MARGIN_CAP_VOLTAGE_INDP_UNKNOWN,
+};
+
+enum usb4_margin_cap_time_indp {
+ USB4_MARGIN_CAP_TIME_INDP_GEN_2_3_MIN,
+ USB4_MARGIN_CAP_TIME_INDP_GEN_2_3_LR,
+ USB4_MARGIN_CAP_TIME_INDP_GEN_2_3_BOTH,
+ USB4_MARGIN_CAP_TIME_INDP_GEN_4_MIN,
+ USB4_MARGIN_CAP_TIME_INDP_GEN_4_BOTH,
+ USB4_MARGIN_CAP_TIME_INDP_UNKNOWN,
+};
+
+/* Sideband registers and their sizes as defined in the USB4 spec */
+struct sb_reg {
+ unsigned int reg;
+ unsigned int size;
+};
+
+#define SB_MAX_SIZE 64
+
+/* Sideband registers for router */
+static const struct sb_reg port_sb_regs[] = {
+ { USB4_SB_VENDOR_ID, 4 },
+ { USB4_SB_PRODUCT_ID, 4 },
+ { USB4_SB_DEBUG_CONF, 4 },
+ { USB4_SB_DEBUG, 54 },
+ { USB4_SB_LRD_TUNING, 4 },
+ { USB4_SB_OPCODE, 4 },
+ { USB4_SB_METADATA, 4 },
+ { USB4_SB_LINK_CONF, 3 },
+ { USB4_SB_GEN23_TXFFE, 4 },
+ { USB4_SB_GEN4_TXFFE, 4 },
+ { USB4_SB_VERSION, 4 },
+ { USB4_SB_DATA, 64 },
+};
+
+/* Sideband registers for retimer */
+static const struct sb_reg retimer_sb_regs[] = {
+ { USB4_SB_VENDOR_ID, 4 },
+ { USB4_SB_PRODUCT_ID, 4 },
+ { USB4_SB_FW_VERSION, 4 },
+ { USB4_SB_LRD_TUNING, 4 },
+ { USB4_SB_OPCODE, 4 },
+ { USB4_SB_METADATA, 4 },
+ { USB4_SB_GEN23_TXFFE, 4 },
+ { USB4_SB_GEN4_TXFFE, 4 },
+ { USB4_SB_VERSION, 4 },
+ { USB4_SB_DATA, 64 },
+};
+
#define DEBUGFS_ATTR(__space, __write) \
static int __space ## _open(struct inode *inode, struct file *file) \
{ \
@@ -101,6 +169,13 @@ static bool parse_line(char **line, u32 *offs, u32 *val, int short_fmt_len,
* offset relative_offset cap_id vs_cap_id value\n
* v[0] v[1] v[2] v[3] v[4]
*
+ * For Path configuration space:
+ * Short format is: offset value\n
+ * v[0] v[1]
+ * Long format as produced from the read side:
+ * offset relative_offset in_hop_id value\n
+ * v[0] v[1] v[2] v[3]
+ *
* For Counter configuration space:
* Short format is: offset\n
* v[0]
@@ -124,14 +199,33 @@ static bool parse_line(char **line, u32 *offs, u32 *val, int short_fmt_len,
}
#if IS_ENABLED(CONFIG_USB4_DEBUGFS_WRITE)
+/*
+ * Path registers need to be written in double word pairs and they both must be
+ * read before written. This writes one double word in path config space
+ * following the spec flow.
+ */
+static int path_write_one(struct tb_port *port, u32 val, u32 offset)
+{
+ u32 index = offset % PATH_LEN;
+ u32 offs = offset - index;
+ u32 data[PATH_LEN];
+ int ret;
+
+ ret = tb_port_read(port, data, TB_CFG_HOPS, offs, PATH_LEN);
+ if (ret)
+ return ret;
+ data[index] = val;
+ return tb_port_write(port, data, TB_CFG_HOPS, offs, PATH_LEN);
+}
+
static ssize_t regs_write(struct tb_switch *sw, struct tb_port *port,
- const char __user *user_buf, size_t count,
- loff_t *ppos)
+ enum tb_cfg_space space, const char __user *user_buf,
+ size_t count, loff_t *ppos)
{
+ int long_fmt_len, ret = 0;
struct tb *tb = sw->tb;
char *line, *buf;
u32 val, offset;
- int ret = 0;
buf = validate_and_copy_from_user(user_buf, &count);
if (IS_ERR(buf))
@@ -147,12 +241,21 @@ static ssize_t regs_write(struct tb_switch *sw, struct tb_port *port,
/* User did hardware changes behind the driver's back */
add_taint(TAINT_USER, LOCKDEP_STILL_OK);
+ if (space == TB_CFG_HOPS)
+ long_fmt_len = 4;
+ else
+ long_fmt_len = 5;
+
line = buf;
- while (parse_line(&line, &offset, &val, 2, 5)) {
- if (port)
- ret = tb_port_write(port, &val, TB_CFG_PORT, offset, 1);
- else
+ while (parse_line(&line, &offset, &val, 2, long_fmt_len)) {
+ if (port) {
+ if (space == TB_CFG_HOPS)
+ ret = path_write_one(port, val, offset);
+ else
+ ret = tb_port_write(port, &val, space, offset, 1);
+ } else {
ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, offset, 1);
+ }
if (ret)
break;
}
@@ -173,7 +276,16 @@ static ssize_t port_regs_write(struct file *file, const char __user *user_buf,
struct seq_file *s = file->private_data;
struct tb_port *port = s->private;
- return regs_write(port->sw, port, user_buf, count, ppos);
+ return regs_write(port->sw, port, TB_CFG_PORT, user_buf, count, ppos);
+}
+
+static ssize_t path_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct tb_port *port = s->private;
+
+ return regs_write(port->sw, port, TB_CFG_HOPS, user_buf, count, ppos);
}
static ssize_t switch_regs_write(struct file *file, const char __user *user_buf,
@@ -182,18 +294,174 @@ static ssize_t switch_regs_write(struct file *file, const char __user *user_buf,
struct seq_file *s = file->private_data;
struct tb_switch *sw = s->private;
- return regs_write(sw, NULL, user_buf, count, ppos);
+ return regs_write(sw, NULL, TB_CFG_SWITCH, user_buf, count, ppos);
+}
+
+static bool parse_sb_line(char **line, u8 *reg, u8 *data, size_t data_size,
+ size_t *bytes_read)
+{
+ char *field, *token;
+ int i;
+
+ token = strsep(line, "\n");
+ if (!token)
+ return false;
+
+ /* Parse the register first */
+ field = strsep(&token, " ");
+ if (!field)
+ return false;
+ if (kstrtou8(field, 0, reg))
+ return false;
+
+ /* Then the values for the register, up to data_size */
+ for (i = 0; i < data_size; i++) {
+ field = strsep(&token, " ");
+ if (!field)
+ break;
+ if (kstrtou8(field, 0, &data[i]))
+ return false;
+ }
+
+ *bytes_read = i;
+ return true;
+}
+
+static ssize_t sb_regs_write(struct tb_port *port, const struct sb_reg *sb_regs,
+ size_t size, enum usb4_sb_target target, u8 index,
+ char *buf, size_t count, loff_t *ppos)
+{
+ u8 reg, data[SB_MAX_SIZE];
+ size_t bytes_read;
+ char *line = buf;
+
+ /* User did hardware changes behind the driver's back */
+ add_taint(TAINT_USER, LOCKDEP_STILL_OK);
+
+ /*
+ * For sideband registers we accept:
+ * reg b0 b1 b2...\n
+ *
+ * Here "reg" is the byte offset of the sideband register and "b0"..
+ * are the byte values. There can be less byte values than the register
+ * size. The leftovers will not be overwritten.
+ */
+ while (parse_sb_line(&line, &reg, data, ARRAY_SIZE(data), &bytes_read)) {
+ const struct sb_reg *sb_reg;
+ int ret;
+
+ /* At least one byte must be passed */
+ if (bytes_read < 1)
+ return -EINVAL;
+
+ /* Find the register */
+ sb_reg = NULL;
+ for (int i = 0; i < size; i++) {
+ if (sb_regs[i].reg == reg) {
+ sb_reg = &sb_regs[i];
+ break;
+ }
+ }
+
+ if (!sb_reg)
+ return -EINVAL;
+
+ if (bytes_read > sb_regs->size)
+ return -E2BIG;
+
+ ret = usb4_port_sb_write(port, target, index, sb_reg->reg, data,
+ bytes_read);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static ssize_t port_sb_regs_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct tb_port *port = s->private;
+ struct tb_switch *sw = port->sw;
+ struct tb *tb = sw->tb;
+ char *buf;
+ int ret;
+
+ buf = validate_and_copy_from_user(user_buf, &count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ pm_runtime_get_sync(&sw->dev);
+
+ if (mutex_lock_interruptible(&tb->lock)) {
+ ret = -ERESTARTSYS;
+ goto out;
+ }
+
+ ret = sb_regs_write(port, port_sb_regs, ARRAY_SIZE(port_sb_regs),
+ USB4_SB_TARGET_ROUTER, 0, buf, count, ppos);
+
+ mutex_unlock(&tb->lock);
+out:
+ pm_runtime_mark_last_busy(&sw->dev);
+ pm_runtime_put_autosuspend(&sw->dev);
+ free_page((unsigned long)buf);
+
+ return ret < 0 ? ret : count;
+}
+
+static ssize_t retimer_sb_regs_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct tb_retimer *rt = s->private;
+ struct tb *tb = rt->tb;
+ char *buf;
+ int ret;
+
+ buf = validate_and_copy_from_user(user_buf, &count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ pm_runtime_get_sync(&rt->dev);
+
+ if (mutex_lock_interruptible(&tb->lock)) {
+ ret = -ERESTARTSYS;
+ goto out;
+ }
+
+ ret = sb_regs_write(rt->port, retimer_sb_regs, ARRAY_SIZE(retimer_sb_regs),
+ USB4_SB_TARGET_RETIMER, rt->index, buf, count, ppos);
+
+ mutex_unlock(&tb->lock);
+out:
+ pm_runtime_mark_last_busy(&rt->dev);
+ pm_runtime_put_autosuspend(&rt->dev);
+ free_page((unsigned long)buf);
+
+ return ret < 0 ? ret : count;
}
#define DEBUGFS_MODE 0600
#else
#define port_regs_write NULL
+#define path_write NULL
#define switch_regs_write NULL
+#define port_sb_regs_write NULL
+#define retimer_sb_regs_write NULL
#define DEBUGFS_MODE 0400
#endif
#if IS_ENABLED(CONFIG_USB4_DEBUGFS_MARGINING)
/**
* struct tb_margining - Lane margining support
+ * @port: USB4 port through which the margining operations are run
+ * @target: Sideband target
+ * @index: Retimer index if taget is %USB4_SB_TARGET_RETIMER
+ * @dev: Pointer to the device that is the target (USB4 port or retimer)
+ * @gen: Link generation
+ * @asym_rx: %true% if @port supports asymmetric link with 3 Rx
* @caps: Port lane margining capabilities
* @results: Last lane margining results
* @lanes: %0, %1 or %7 (all)
@@ -202,60 +470,146 @@ static ssize_t switch_regs_write(struct file *file, const char __user *user_buf,
* @ber_level: Current BER level contour value
* @voltage_steps: Number of mandatory voltage steps
* @max_voltage_offset: Maximum mandatory voltage offset (in mV)
+ * @voltage_steps_optional_range: Number of voltage steps for optional range
+ * @max_voltage_offset_optional_range: Maximum voltage offset for the optional
+ * range (in mV).
* @time_steps: Number of time margin steps
* @max_time_offset: Maximum time margin offset (in mUI)
+ * @voltage_time_offset: Offset for voltage / time for software margining
+ * @dwell_time: Dwell time for software margining (in ms)
+ * @error_counter: Error counter operation for software margining
+ * @optional_voltage_offset_range: Enable optional extended voltage range
* @software: %true if software margining is used instead of hardware
* @time: %true if time margining is used instead of voltage
* @right_high: %false if left/low margin test is performed, %true if
* right/high
+ * @upper_eye: %false if the lower PAM3 eye is used, %true if the upper
+ * eye is used
*/
struct tb_margining {
- u32 caps[2];
- u32 results[2];
- unsigned int lanes;
+ struct tb_port *port;
+ enum usb4_sb_target target;
+ u8 index;
+ struct device *dev;
+ unsigned int gen;
+ bool asym_rx;
+ u32 caps[3];
+ u32 results[3];
+ enum usb4_margining_lane lanes;
unsigned int min_ber_level;
unsigned int max_ber_level;
unsigned int ber_level;
unsigned int voltage_steps;
unsigned int max_voltage_offset;
+ unsigned int voltage_steps_optional_range;
+ unsigned int max_voltage_offset_optional_range;
unsigned int time_steps;
unsigned int max_time_offset;
+ unsigned int voltage_time_offset;
+ unsigned int dwell_time;
+ enum usb4_margin_sw_error_counter error_counter;
+ bool optional_voltage_offset_range;
bool software;
bool time;
bool right_high;
+ bool upper_eye;
};
-static bool supports_software(const struct usb4_port *usb4)
+static int margining_modify_error_counter(struct tb_margining *margining,
+ u32 lanes, enum usb4_margin_sw_error_counter error_counter)
+{
+ struct usb4_port_margining_params params = { 0 };
+ struct tb_port *port = margining->port;
+ u32 result;
+
+ if (error_counter != USB4_MARGIN_SW_ERROR_COUNTER_CLEAR &&
+ error_counter != USB4_MARGIN_SW_ERROR_COUNTER_STOP)
+ return -EOPNOTSUPP;
+
+ params.error_counter = error_counter;
+ params.lanes = lanes;
+
+ return usb4_port_sw_margin(port, margining->target, margining->index,
+ &params, &result);
+}
+
+static bool supports_software(const struct tb_margining *margining)
{
- return usb4->margining->caps[0] & USB4_MARGIN_CAP_0_MODES_SW;
+ if (margining->gen < 4)
+ return margining->caps[0] & USB4_MARGIN_CAP_0_MODES_SW;
+ return margining->caps[2] & USB4_MARGIN_CAP_2_MODES_SW;
}
-static bool supports_hardware(const struct usb4_port *usb4)
+static bool supports_hardware(const struct tb_margining *margining)
{
- return usb4->margining->caps[0] & USB4_MARGIN_CAP_0_MODES_HW;
+ if (margining->gen < 4)
+ return margining->caps[0] & USB4_MARGIN_CAP_0_MODES_HW;
+ return margining->caps[2] & USB4_MARGIN_CAP_2_MODES_HW;
}
-static bool both_lanes(const struct usb4_port *usb4)
+static bool all_lanes(const struct tb_margining *margining)
{
- return usb4->margining->caps[0] & USB4_MARGIN_CAP_0_2_LANES;
+ return margining->caps[0] & USB4_MARGIN_CAP_0_ALL_LANES;
}
-static unsigned int independent_voltage_margins(const struct usb4_port *usb4)
+static enum usb4_margin_cap_voltage_indp
+independent_voltage_margins(const struct tb_margining *margining)
{
- return (usb4->margining->caps[0] & USB4_MARGIN_CAP_0_VOLTAGE_INDP_MASK) >>
- USB4_MARGIN_CAP_0_VOLTAGE_INDP_SHIFT;
+ if (margining->gen < 4) {
+ switch (FIELD_GET(USB4_MARGIN_CAP_0_VOLTAGE_INDP_MASK, margining->caps[0])) {
+ case USB4_MARGIN_CAP_0_VOLTAGE_MIN:
+ return USB4_MARGIN_CAP_VOLTAGE_INDP_GEN_2_3_MIN;
+ case USB4_MARGIN_CAP_0_VOLTAGE_HL:
+ return USB4_MARGIN_CAP_VOLTAGE_INDP_GEN_2_3_HL;
+ case USB4_MARGIN_CAP_1_TIME_BOTH:
+ return USB4_MARGIN_CAP_VOLTAGE_INDP_GEN_2_3_BOTH;
+ }
+ } else {
+ switch (FIELD_GET(USB4_MARGIN_CAP_2_VOLTAGE_INDP_MASK, margining->caps[2])) {
+ case USB4_MARGIN_CAP_2_VOLTAGE_MIN:
+ return USB4_MARGIN_CAP_VOLTAGE_INDP_GEN_4_MIN;
+ case USB4_MARGIN_CAP_2_VOLTAGE_BOTH:
+ return USB4_MARGIN_CAP_VOLTAGE_INDP_GEN_4_BOTH;
+ }
+ }
+ return USB4_MARGIN_CAP_VOLTAGE_INDP_UNKNOWN;
}
-static bool supports_time(const struct usb4_port *usb4)
+static bool supports_time(const struct tb_margining *margining)
{
- return usb4->margining->caps[0] & USB4_MARGIN_CAP_0_TIME;
+ if (margining->gen < 4)
+ return margining->caps[0] & USB4_MARGIN_CAP_0_TIME;
+ return margining->caps[2] & USB4_MARGIN_CAP_2_TIME;
}
/* Only applicable if supports_time() returns true */
-static unsigned int independent_time_margins(const struct usb4_port *usb4)
+static enum usb4_margin_cap_time_indp
+independent_time_margins(const struct tb_margining *margining)
{
- return (usb4->margining->caps[1] & USB4_MARGIN_CAP_1_TIME_INDP_MASK) >>
- USB4_MARGIN_CAP_1_TIME_INDP_SHIFT;
+ if (margining->gen < 4) {
+ switch (FIELD_GET(USB4_MARGIN_CAP_1_TIME_INDP_MASK, margining->caps[1])) {
+ case USB4_MARGIN_CAP_1_TIME_MIN:
+ return USB4_MARGIN_CAP_TIME_INDP_GEN_2_3_MIN;
+ case USB4_MARGIN_CAP_1_TIME_LR:
+ return USB4_MARGIN_CAP_TIME_INDP_GEN_2_3_LR;
+ case USB4_MARGIN_CAP_1_TIME_BOTH:
+ return USB4_MARGIN_CAP_TIME_INDP_GEN_2_3_BOTH;
+ }
+ } else {
+ switch (FIELD_GET(USB4_MARGIN_CAP_2_TIME_INDP_MASK, margining->caps[2])) {
+ case USB4_MARGIN_CAP_2_TIME_MIN:
+ return USB4_MARGIN_CAP_TIME_INDP_GEN_4_MIN;
+ case USB4_MARGIN_CAP_2_TIME_BOTH:
+ return USB4_MARGIN_CAP_TIME_INDP_GEN_4_BOTH;
+ }
+ }
+ return USB4_MARGIN_CAP_TIME_INDP_UNKNOWN;
+}
+
+static bool
+supports_optional_voltage_offset_range(const struct tb_margining *margining)
+{
+ return margining->caps[0] & USB4_MARGIN_CAP_0_OPT_VOLTAGE_SUPPORT;
}
static ssize_t
@@ -263,9 +617,8 @@ margining_ber_level_write(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct seq_file *s = file->private_data;
- struct tb_port *port = s->private;
- struct usb4_port *usb4 = port->usb4;
- struct tb *tb = port->sw->tb;
+ struct tb_margining *margining = s->private;
+ struct tb *tb = margining->port->sw->tb;
unsigned int val;
int ret = 0;
char *buf;
@@ -273,7 +626,7 @@ margining_ber_level_write(struct file *file, const char __user *user_buf,
if (mutex_lock_interruptible(&tb->lock))
return -ERESTARTSYS;
- if (usb4->margining->software) {
+ if (margining->software) {
ret = -EINVAL;
goto out_unlock;
}
@@ -290,13 +643,13 @@ margining_ber_level_write(struct file *file, const char __user *user_buf,
if (ret)
goto out_free;
- if (val < usb4->margining->min_ber_level ||
- val > usb4->margining->max_ber_level) {
+ if (val < margining->min_ber_level ||
+ val > margining->max_ber_level) {
ret = -EINVAL;
goto out_free;
}
- usb4->margining->ber_level = val;
+ margining->ber_level = val;
out_free:
free_page((unsigned long)buf);
@@ -316,102 +669,151 @@ static void ber_level_show(struct seq_file *s, unsigned int val)
static int margining_ber_level_show(struct seq_file *s, void *not_used)
{
- struct tb_port *port = s->private;
- struct usb4_port *usb4 = port->usb4;
+ const struct tb_margining *margining = s->private;
- if (usb4->margining->software)
+ if (margining->software)
return -EINVAL;
- ber_level_show(s, usb4->margining->ber_level);
+ ber_level_show(s, margining->ber_level);
return 0;
}
DEBUGFS_ATTR_RW(margining_ber_level);
static int margining_caps_show(struct seq_file *s, void *not_used)
{
- struct tb_port *port = s->private;
- struct usb4_port *usb4 = port->usb4;
- struct tb *tb = port->sw->tb;
- u32 cap0, cap1;
+ struct tb_margining *margining = s->private;
+ struct tb *tb = margining->port->sw->tb;
+ int ret = 0;
if (mutex_lock_interruptible(&tb->lock))
return -ERESTARTSYS;
/* Dump the raw caps first */
- cap0 = usb4->margining->caps[0];
- seq_printf(s, "0x%08x\n", cap0);
- cap1 = usb4->margining->caps[1];
- seq_printf(s, "0x%08x\n", cap1);
+ for (int i = 0; i < ARRAY_SIZE(margining->caps); i++)
+ seq_printf(s, "0x%08x\n", margining->caps[i]);
seq_printf(s, "# software margining: %s\n",
- supports_software(usb4) ? "yes" : "no");
- if (supports_hardware(usb4)) {
+ str_yes_no(supports_software(margining)));
+ if (supports_hardware(margining)) {
seq_puts(s, "# hardware margining: yes\n");
seq_puts(s, "# minimum BER level contour: ");
- ber_level_show(s, usb4->margining->min_ber_level);
+ ber_level_show(s, margining->min_ber_level);
seq_puts(s, "# maximum BER level contour: ");
- ber_level_show(s, usb4->margining->max_ber_level);
+ ber_level_show(s, margining->max_ber_level);
} else {
seq_puts(s, "# hardware margining: no\n");
}
- seq_printf(s, "# both lanes simultaneously: %s\n",
- both_lanes(usb4) ? "yes" : "no");
+ seq_printf(s, "# all lanes simultaneously: %s\n",
+ str_yes_no(all_lanes(margining)));
seq_printf(s, "# voltage margin steps: %u\n",
- usb4->margining->voltage_steps);
+ margining->voltage_steps);
seq_printf(s, "# maximum voltage offset: %u mV\n",
- usb4->margining->max_voltage_offset);
+ margining->max_voltage_offset);
+ seq_printf(s, "# optional voltage offset range support: %s\n",
+ str_yes_no(supports_optional_voltage_offset_range(margining)));
+ if (supports_optional_voltage_offset_range(margining)) {
+ seq_printf(s, "# voltage margin steps, optional range: %u\n",
+ margining->voltage_steps_optional_range);
+ seq_printf(s, "# maximum voltage offset, optional range: %u mV\n",
+ margining->max_voltage_offset_optional_range);
+ }
- switch (independent_voltage_margins(usb4)) {
- case USB4_MARGIN_CAP_0_VOLTAGE_MIN:
+ switch (independent_voltage_margins(margining)) {
+ case USB4_MARGIN_CAP_VOLTAGE_INDP_GEN_2_3_MIN:
seq_puts(s, "# returns minimum between high and low voltage margins\n");
break;
- case USB4_MARGIN_CAP_0_VOLTAGE_HL:
+ case USB4_MARGIN_CAP_VOLTAGE_INDP_GEN_2_3_HL:
seq_puts(s, "# returns high or low voltage margin\n");
break;
- case USB4_MARGIN_CAP_0_VOLTAGE_BOTH:
+ case USB4_MARGIN_CAP_VOLTAGE_INDP_GEN_2_3_BOTH:
seq_puts(s, "# returns both high and low margins\n");
break;
+ case USB4_MARGIN_CAP_VOLTAGE_INDP_GEN_4_MIN:
+ seq_puts(s, "# returns minimum between high and low voltage margins in both lower and upper eye\n");
+ break;
+ case USB4_MARGIN_CAP_VOLTAGE_INDP_GEN_4_BOTH:
+ seq_puts(s, "# returns both high and low margins of both upper and lower eye\n");
+ break;
+ case USB4_MARGIN_CAP_VOLTAGE_INDP_UNKNOWN:
+ tb_port_warn(margining->port,
+ "failed to parse independent voltage margining capabilities\n");
+ ret = -EIO;
+ goto out;
}
- if (supports_time(usb4)) {
+ if (supports_time(margining)) {
seq_puts(s, "# time margining: yes\n");
seq_printf(s, "# time margining is destructive: %s\n",
- cap1 & USB4_MARGIN_CAP_1_TIME_DESTR ? "yes" : "no");
+ str_yes_no(margining->caps[1] & USB4_MARGIN_CAP_1_TIME_DESTR));
- switch (independent_time_margins(usb4)) {
- case USB4_MARGIN_CAP_1_TIME_MIN:
+ switch (independent_time_margins(margining)) {
+ case USB4_MARGIN_CAP_TIME_INDP_GEN_2_3_MIN:
seq_puts(s, "# returns minimum between left and right time margins\n");
break;
- case USB4_MARGIN_CAP_1_TIME_LR:
+ case USB4_MARGIN_CAP_TIME_INDP_GEN_2_3_LR:
seq_puts(s, "# returns left or right margin\n");
break;
- case USB4_MARGIN_CAP_1_TIME_BOTH:
+ case USB4_MARGIN_CAP_TIME_INDP_GEN_2_3_BOTH:
seq_puts(s, "# returns both left and right margins\n");
break;
+ case USB4_MARGIN_CAP_TIME_INDP_GEN_4_MIN:
+ seq_puts(s, "# returns minimum between left and right time margins in both lower and upper eye\n");
+ break;
+ case USB4_MARGIN_CAP_TIME_INDP_GEN_4_BOTH:
+ seq_puts(s, "# returns both left and right margins of both upper and lower eye\n");
+ break;
+ case USB4_MARGIN_CAP_TIME_INDP_UNKNOWN:
+ tb_port_warn(margining->port,
+ "failed to parse independent time margining capabilities\n");
+ ret = -EIO;
+ goto out;
}
seq_printf(s, "# time margin steps: %u\n",
- usb4->margining->time_steps);
+ margining->time_steps);
seq_printf(s, "# maximum time offset: %u mUI\n",
- usb4->margining->max_time_offset);
+ margining->max_time_offset);
} else {
seq_puts(s, "# time margining: no\n");
}
+out:
mutex_unlock(&tb->lock);
- return 0;
+ return ret;
}
DEBUGFS_ATTR_RO(margining_caps);
+static const struct {
+ enum usb4_margining_lane lane;
+ const char *name;
+} lane_names[] = {
+ {
+ .lane = USB4_MARGINING_LANE_RX0,
+ .name = "0",
+ },
+ {
+ .lane = USB4_MARGINING_LANE_RX1,
+ .name = "1",
+ },
+ {
+ .lane = USB4_MARGINING_LANE_RX2,
+ .name = "2",
+ },
+ {
+ .lane = USB4_MARGINING_LANE_ALL,
+ .name = "all",
+ },
+};
+
static ssize_t
margining_lanes_write(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct seq_file *s = file->private_data;
- struct tb_port *port = s->private;
- struct usb4_port *usb4 = port->usb4;
+ struct tb_margining *margining = s->private;
+ struct tb_port *port = margining->port;
struct tb *tb = port->sw->tb;
- int ret = 0;
+ int lane = -1;
char *buf;
buf = validate_and_copy_from_user(user_buf, &count);
@@ -420,70 +822,263 @@ margining_lanes_write(struct file *file, const char __user *user_buf,
buf[count - 1] = '\0';
- if (mutex_lock_interruptible(&tb->lock)) {
- ret = -ERESTARTSYS;
- goto out_free;
+ for (int i = 0; i < ARRAY_SIZE(lane_names); i++) {
+ if (!strcmp(buf, lane_names[i].name)) {
+ lane = lane_names[i].lane;
+ break;
+ }
}
- if (!strcmp(buf, "0")) {
- usb4->margining->lanes = 0;
- } else if (!strcmp(buf, "1")) {
- usb4->margining->lanes = 1;
- } else if (!strcmp(buf, "all")) {
- /* Needs to be supported */
- if (both_lanes(usb4))
- usb4->margining->lanes = 7;
- else
- ret = -EINVAL;
- } else {
- ret = -EINVAL;
- }
+ free_page((unsigned long)buf);
- mutex_unlock(&tb->lock);
+ if (lane == -1)
+ return -EINVAL;
-out_free:
- free_page((unsigned long)buf);
- return ret < 0 ? ret : count;
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &tb->lock) {
+ if (lane == USB4_MARGINING_LANE_ALL && !all_lanes(margining))
+ return -EINVAL;
+ /*
+ * Enabling on RX2 requires that it is supported by the
+ * USB4 port.
+ */
+ if (lane == USB4_MARGINING_LANE_RX2 && !margining->asym_rx)
+ return -EINVAL;
+
+ margining->lanes = lane;
+ }
+
+ return count;
}
static int margining_lanes_show(struct seq_file *s, void *not_used)
{
- struct tb_port *port = s->private;
- struct usb4_port *usb4 = port->usb4;
+ struct tb_margining *margining = s->private;
+ struct tb_port *port = margining->port;
struct tb *tb = port->sw->tb;
- unsigned int lanes;
- if (mutex_lock_interruptible(&tb->lock))
- return -ERESTARTSYS;
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &tb->lock) {
+ for (int i = 0; i < ARRAY_SIZE(lane_names); i++) {
+ if (lane_names[i].lane == USB4_MARGINING_LANE_ALL &&
+ !all_lanes(margining))
+ continue;
+ if (lane_names[i].lane == USB4_MARGINING_LANE_RX2 &&
+ !margining->asym_rx)
+ continue;
- lanes = usb4->margining->lanes;
- if (both_lanes(usb4)) {
- if (!lanes)
- seq_puts(s, "[0] 1 all\n");
- else if (lanes == 1)
- seq_puts(s, "0 [1] all\n");
- else
- seq_puts(s, "0 1 [all]\n");
- } else {
- if (!lanes)
- seq_puts(s, "[0] 1\n");
- else
- seq_puts(s, "0 [1]\n");
+ if (i != 0)
+ seq_putc(s, ' ');
+
+ if (lane_names[i].lane == margining->lanes)
+ seq_printf(s, "[%s]", lane_names[i].name);
+ else
+ seq_printf(s, "%s", lane_names[i].name);
+ }
+ seq_puts(s, "\n");
}
- mutex_unlock(&tb->lock);
return 0;
}
DEBUGFS_ATTR_RW(margining_lanes);
+static ssize_t
+margining_voltage_time_offset_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct tb_margining *margining = s->private;
+ struct tb *tb = margining->port->sw->tb;
+ unsigned int max_margin;
+ unsigned int val;
+ int ret;
+
+ ret = kstrtouint_from_user(user_buf, count, 10, &val);
+ if (ret)
+ return ret;
+
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &tb->lock) {
+ if (!margining->software)
+ return -EOPNOTSUPP;
+
+ if (margining->time)
+ max_margin = margining->time_steps;
+ else
+ if (margining->optional_voltage_offset_range)
+ max_margin = margining->voltage_steps_optional_range;
+ else
+ max_margin = margining->voltage_steps;
+
+ margining->voltage_time_offset = clamp(val, 0, max_margin);
+ }
+
+ return count;
+}
+
+static int margining_voltage_time_offset_show(struct seq_file *s,
+ void *not_used)
+{
+ const struct tb_margining *margining = s->private;
+ struct tb *tb = margining->port->sw->tb;
+
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &tb->lock) {
+ if (!margining->software)
+ return -EOPNOTSUPP;
+
+ seq_printf(s, "%d\n", margining->voltage_time_offset);
+ }
+
+ return 0;
+}
+DEBUGFS_ATTR_RW(margining_voltage_time_offset);
+
+static ssize_t
+margining_error_counter_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ enum usb4_margin_sw_error_counter error_counter;
+ struct seq_file *s = file->private_data;
+ struct tb_margining *margining = s->private;
+ struct tb *tb = margining->port->sw->tb;
+ char *buf;
+
+ buf = validate_and_copy_from_user(user_buf, &count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ buf[count - 1] = '\0';
+
+ if (!strcmp(buf, "nop"))
+ error_counter = USB4_MARGIN_SW_ERROR_COUNTER_NOP;
+ else if (!strcmp(buf, "clear"))
+ error_counter = USB4_MARGIN_SW_ERROR_COUNTER_CLEAR;
+ else if (!strcmp(buf, "start"))
+ error_counter = USB4_MARGIN_SW_ERROR_COUNTER_START;
+ else if (!strcmp(buf, "stop"))
+ error_counter = USB4_MARGIN_SW_ERROR_COUNTER_STOP;
+ else
+ return -EINVAL;
+
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &tb->lock) {
+ if (!margining->software)
+ return -EOPNOTSUPP;
+
+ margining->error_counter = error_counter;
+ }
+
+ return count;
+}
+
+static int margining_error_counter_show(struct seq_file *s, void *not_used)
+{
+ const struct tb_margining *margining = s->private;
+ struct tb *tb = margining->port->sw->tb;
+
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &tb->lock) {
+ if (!margining->software)
+ return -EOPNOTSUPP;
+
+ switch (margining->error_counter) {
+ case USB4_MARGIN_SW_ERROR_COUNTER_NOP:
+ seq_puts(s, "[nop] clear start stop\n");
+ break;
+ case USB4_MARGIN_SW_ERROR_COUNTER_CLEAR:
+ seq_puts(s, "nop [clear] start stop\n");
+ break;
+ case USB4_MARGIN_SW_ERROR_COUNTER_START:
+ seq_puts(s, "nop clear [start] stop\n");
+ break;
+ case USB4_MARGIN_SW_ERROR_COUNTER_STOP:
+ seq_puts(s, "nop clear start [stop]\n");
+ break;
+ }
+ }
+
+ return 0;
+}
+DEBUGFS_ATTR_RW(margining_error_counter);
+
+static ssize_t
+margining_dwell_time_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct tb_margining *margining = s->private;
+ struct tb *tb = margining->port->sw->tb;
+ unsigned int val;
+ int ret;
+
+ ret = kstrtouint_from_user(user_buf, count, 10, &val);
+ if (ret)
+ return ret;
+
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &tb->lock) {
+ if (!margining->software)
+ return -EOPNOTSUPP;
+
+ margining->dwell_time = clamp(val, MIN_DWELL_TIME, MAX_DWELL_TIME);
+ }
+
+ return count;
+}
+
+static int margining_dwell_time_show(struct seq_file *s, void *not_used)
+{
+ struct tb_margining *margining = s->private;
+ struct tb *tb = margining->port->sw->tb;
+
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &tb->lock) {
+ if (!margining->software)
+ return -EOPNOTSUPP;
+
+ seq_printf(s, "%d\n", margining->dwell_time);
+ }
+
+ return 0;
+}
+DEBUGFS_ATTR_RW(margining_dwell_time);
+
+static ssize_t
+margining_optional_voltage_offset_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct tb_margining *margining = s->private;
+ struct tb *tb = margining->port->sw->tb;
+ bool val;
+ int ret;
+
+ ret = kstrtobool_from_user(user_buf, count, &val);
+ if (ret)
+ return ret;
+
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &tb->lock) {
+ margining->optional_voltage_offset_range = val;
+ }
+
+ return count;
+}
+
+static int margining_optional_voltage_offset_show(struct seq_file *s,
+ void *not_used)
+{
+ struct tb_margining *margining = s->private;
+ struct tb *tb = margining->port->sw->tb;
+
+ scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &tb->lock) {
+ seq_printf(s, "%u\n", margining->optional_voltage_offset_range);
+ }
+
+ return 0;
+}
+DEBUGFS_ATTR_RW(margining_optional_voltage_offset);
+
static ssize_t margining_mode_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct seq_file *s = file->private_data;
- struct tb_port *port = s->private;
- struct usb4_port *usb4 = port->usb4;
- struct tb *tb = port->sw->tb;
+ struct tb_margining *margining = s->private;
+ struct tb *tb = margining->port->sw->tb;
int ret = 0;
char *buf;
@@ -499,13 +1094,13 @@ static ssize_t margining_mode_write(struct file *file,
}
if (!strcmp(buf, "software")) {
- if (supports_software(usb4))
- usb4->margining->software = true;
+ if (supports_software(margining))
+ margining->software = true;
else
ret = -EINVAL;
} else if (!strcmp(buf, "hardware")) {
- if (supports_hardware(usb4))
- usb4->margining->software = false;
+ if (supports_hardware(margining))
+ margining->software = false;
else
ret = -EINVAL;
} else {
@@ -521,23 +1116,22 @@ out_free:
static int margining_mode_show(struct seq_file *s, void *not_used)
{
- const struct tb_port *port = s->private;
- const struct usb4_port *usb4 = port->usb4;
- struct tb *tb = port->sw->tb;
+ struct tb_margining *margining = s->private;
+ struct tb *tb = margining->port->sw->tb;
const char *space = "";
if (mutex_lock_interruptible(&tb->lock))
return -ERESTARTSYS;
- if (supports_software(usb4)) {
- if (usb4->margining->software)
+ if (supports_software(margining)) {
+ if (margining->software)
seq_puts(s, "[software]");
else
seq_puts(s, "software");
space = " ";
}
- if (supports_hardware(usb4)) {
- if (usb4->margining->software)
+ if (supports_hardware(margining)) {
+ if (margining->software)
seq_printf(s, "%shardware", space);
else
seq_printf(s, "%s[hardware]", space);
@@ -550,12 +1144,85 @@ static int margining_mode_show(struct seq_file *s, void *not_used)
}
DEBUGFS_ATTR_RW(margining_mode);
+static int margining_run_sw(struct tb_margining *margining,
+ struct usb4_port_margining_params *params)
+{
+ u32 nsamples = margining->dwell_time / DWELL_SAMPLE_INTERVAL;
+ int ret, i;
+
+ ret = usb4_port_sw_margin(margining->port, margining->target, margining->index,
+ params, margining->results);
+ if (ret)
+ goto out_stop;
+
+ for (i = 0; i <= nsamples; i++) {
+ u32 errors = 0;
+
+ ret = usb4_port_sw_margin_errors(margining->port, margining->target,
+ margining->index, &margining->results[1]);
+ if (ret)
+ break;
+
+ if (margining->lanes == USB4_MARGINING_LANE_RX0)
+ errors = FIELD_GET(USB4_MARGIN_SW_ERR_COUNTER_LANE_0_MASK,
+ margining->results[1]);
+ else if (margining->lanes == USB4_MARGINING_LANE_RX1)
+ errors = FIELD_GET(USB4_MARGIN_SW_ERR_COUNTER_LANE_1_MASK,
+ margining->results[1]);
+ else if (margining->lanes == USB4_MARGINING_LANE_RX2)
+ errors = FIELD_GET(USB4_MARGIN_SW_ERR_COUNTER_LANE_2_MASK,
+ margining->results[1]);
+ else if (margining->lanes == USB4_MARGINING_LANE_ALL)
+ errors = margining->results[1];
+
+ /* Any errors stop the test */
+ if (errors)
+ break;
+
+ fsleep(DWELL_SAMPLE_INTERVAL * USEC_PER_MSEC);
+ }
+
+out_stop:
+ /*
+ * Stop the counters but don't clear them to allow the
+ * different error counter configurations.
+ */
+ margining_modify_error_counter(margining, margining->lanes,
+ USB4_MARGIN_SW_ERROR_COUNTER_STOP);
+ return ret;
+}
+
+static int validate_margining(struct tb_margining *margining)
+{
+ /*
+ * For running on RX2 the link must be asymmetric with 3
+ * receivers. Because this can change dynamically, check it
+ * here before we start the margining and report back error if
+ * expectations are not met.
+ */
+ if (margining->lanes == USB4_MARGINING_LANE_RX2) {
+ int ret;
+
+ ret = tb_port_get_link_width(margining->port);
+ if (ret < 0)
+ return ret;
+ if (ret != TB_LINK_WIDTH_ASYM_RX) {
+ tb_port_warn(margining->port, "link is %s expected %s",
+ tb_width_name(ret),
+ tb_width_name(TB_LINK_WIDTH_ASYM_RX));
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int margining_run_write(void *data, u64 val)
{
- struct tb_port *port = data;
- struct usb4_port *usb4 = port->usb4;
+ struct tb_margining *margining = data;
+ struct tb_port *port = margining->port;
+ struct device *dev = margining->dev;
struct tb_switch *sw = port->sw;
- struct tb_margining *margining;
struct tb_switch *down_sw;
struct tb *tb = sw->tb;
int ret, clx;
@@ -563,13 +1230,17 @@ static int margining_run_write(void *data, u64 val)
if (val != 1)
return -EINVAL;
- pm_runtime_get_sync(&sw->dev);
+ pm_runtime_get_sync(dev);
if (mutex_lock_interruptible(&tb->lock)) {
ret = -ERESTARTSYS;
goto out_rpm_put;
}
+ ret = validate_margining(margining);
+ if (ret)
+ goto out_unlock;
+
if (tb_is_upstream_port(port))
down_sw = sw;
else if (port->remote)
@@ -590,37 +1261,52 @@ static int margining_run_write(void *data, u64 val)
clx = ret;
}
- margining = usb4->margining;
+ /* Clear the results */
+ memset(margining->results, 0, sizeof(margining->results));
if (margining->software) {
- tb_port_dbg(port, "running software %s lane margining for lanes %u\n",
- margining->time ? "time" : "voltage", margining->lanes);
- ret = usb4_port_sw_margin(port, margining->lanes, margining->time,
- margining->right_high,
- USB4_MARGIN_SW_COUNTER_CLEAR);
- if (ret)
- goto out_clx;
-
- ret = usb4_port_sw_margin_errors(port, &margining->results[0]);
+ struct usb4_port_margining_params params = {
+ .error_counter = USB4_MARGIN_SW_ERROR_COUNTER_CLEAR,
+ .lanes = margining->lanes,
+ .time = margining->time,
+ .voltage_time_offset = margining->voltage_time_offset,
+ .right_high = margining->right_high,
+ .upper_eye = margining->upper_eye,
+ .optional_voltage_offset_range = margining->optional_voltage_offset_range,
+ };
+
+ tb_port_dbg(port,
+ "running software %s lane margining for %s lanes %u\n",
+ margining->time ? "time" : "voltage", dev_name(dev),
+ margining->lanes);
+
+ ret = margining_run_sw(margining, &params);
} else {
- tb_port_dbg(port, "running hardware %s lane margining for lanes %u\n",
- margining->time ? "time" : "voltage", margining->lanes);
- /* Clear the results */
- margining->results[0] = 0;
- margining->results[1] = 0;
- ret = usb4_port_hw_margin(port, margining->lanes,
- margining->ber_level, margining->time,
- margining->right_high, margining->results);
+ struct usb4_port_margining_params params = {
+ .ber_level = margining->ber_level,
+ .lanes = margining->lanes,
+ .time = margining->time,
+ .right_high = margining->right_high,
+ .upper_eye = margining->upper_eye,
+ .optional_voltage_offset_range = margining->optional_voltage_offset_range,
+ };
+
+ tb_port_dbg(port,
+ "running hardware %s lane margining for %s lanes %u\n",
+ margining->time ? "time" : "voltage", dev_name(dev),
+ margining->lanes);
+
+ ret = usb4_port_hw_margin(port, margining->target, margining->index, &params,
+ margining->results, ARRAY_SIZE(margining->results));
}
-out_clx:
if (down_sw)
tb_switch_clx_enable(down_sw, clx);
out_unlock:
mutex_unlock(&tb->lock);
out_rpm_put:
- pm_runtime_mark_last_busy(&sw->dev);
- pm_runtime_put_autosuspend(&sw->dev);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
return ret;
}
@@ -632,16 +1318,21 @@ static ssize_t margining_results_write(struct file *file,
size_t count, loff_t *ppos)
{
struct seq_file *s = file->private_data;
- struct tb_port *port = s->private;
- struct usb4_port *usb4 = port->usb4;
- struct tb *tb = port->sw->tb;
+ struct tb_margining *margining = s->private;
+ struct tb *tb = margining->port->sw->tb;
if (mutex_lock_interruptible(&tb->lock))
return -ERESTARTSYS;
/* Just clear the results */
- usb4->margining->results[0] = 0;
- usb4->margining->results[1] = 0;
+ memset(margining->results, 0, sizeof(margining->results));
+
+ if (margining->software) {
+ /* Clear the error counters */
+ margining_modify_error_counter(margining,
+ USB4_MARGINING_LANE_ALL,
+ USB4_MARGIN_SW_ERROR_COUNTER_CLEAR);
+ }
mutex_unlock(&tb->lock);
return count;
@@ -652,12 +1343,14 @@ static void voltage_margin_show(struct seq_file *s,
{
unsigned int tmp, voltage;
- tmp = val & USB4_MARGIN_HW_RES_1_MARGIN_MASK;
+ tmp = FIELD_GET(USB4_MARGIN_HW_RES_MARGIN_MASK, val);
voltage = tmp * margining->max_voltage_offset / margining->voltage_steps;
seq_printf(s, "%u mV (%u)", voltage, tmp);
- if (val & USB4_MARGIN_HW_RES_1_EXCEEDS)
+ if (val & USB4_MARGIN_HW_RES_EXCEEDS)
seq_puts(s, " exceeds maximum");
seq_puts(s, "\n");
+ if (margining->optional_voltage_offset_range)
+ seq_puts(s, " optional voltage offset range enabled\n");
}
static void time_margin_show(struct seq_file *s,
@@ -665,73 +1358,106 @@ static void time_margin_show(struct seq_file *s,
{
unsigned int tmp, interval;
- tmp = val & USB4_MARGIN_HW_RES_1_MARGIN_MASK;
+ tmp = FIELD_GET(USB4_MARGIN_HW_RES_MARGIN_MASK, val);
interval = tmp * margining->max_time_offset / margining->time_steps;
seq_printf(s, "%u mUI (%u)", interval, tmp);
- if (val & USB4_MARGIN_HW_RES_1_EXCEEDS)
+ if (val & USB4_MARGIN_HW_RES_EXCEEDS)
seq_puts(s, " exceeds maximum");
seq_puts(s, "\n");
}
+static u8 margining_hw_result_val(const u32 *results,
+ enum usb4_margining_lane lane,
+ bool right_high)
+{
+ u32 val;
+
+ if (lane == USB4_MARGINING_LANE_RX0)
+ val = results[1];
+ else if (lane == USB4_MARGINING_LANE_RX1)
+ val = results[1] >> USB4_MARGIN_HW_RES_LANE_SHIFT;
+ else if (lane == USB4_MARGINING_LANE_RX2)
+ val = results[2];
+ else
+ val = 0;
+
+ return right_high ? val : val >> USB4_MARGIN_HW_RES_LL_SHIFT;
+}
+
+static void margining_hw_result_format(struct seq_file *s,
+ const struct tb_margining *margining,
+ enum usb4_margining_lane lane)
+{
+ u8 val;
+
+ if (margining->time) {
+ val = margining_hw_result_val(margining->results, lane, true);
+ seq_printf(s, "# lane %u right time margin: ", lane);
+ time_margin_show(s, margining, val);
+ val = margining_hw_result_val(margining->results, lane, false);
+ seq_printf(s, "# lane %u left time margin: ", lane);
+ time_margin_show(s, margining, val);
+ } else {
+ val = margining_hw_result_val(margining->results, lane, true);
+ seq_printf(s, "# lane %u high voltage margin: ", lane);
+ voltage_margin_show(s, margining, val);
+ val = margining_hw_result_val(margining->results, lane, false);
+ seq_printf(s, "# lane %u low voltage margin: ", lane);
+ voltage_margin_show(s, margining, val);
+ }
+}
+
static int margining_results_show(struct seq_file *s, void *not_used)
{
- struct tb_port *port = s->private;
- struct usb4_port *usb4 = port->usb4;
- struct tb_margining *margining;
- struct tb *tb = port->sw->tb;
+ struct tb_margining *margining = s->private;
+ struct tb *tb = margining->port->sw->tb;
if (mutex_lock_interruptible(&tb->lock))
return -ERESTARTSYS;
- margining = usb4->margining;
/* Dump the raw results first */
seq_printf(s, "0x%08x\n", margining->results[0]);
/* Only the hardware margining has two result dwords */
if (!margining->software) {
- unsigned int val;
+ for (int i = 1; i < ARRAY_SIZE(margining->results); i++)
+ seq_printf(s, "0x%08x\n", margining->results[i]);
+
+ if (margining->lanes == USB4_MARGINING_LANE_ALL) {
+ margining_hw_result_format(s, margining,
+ USB4_MARGINING_LANE_RX0);
+ margining_hw_result_format(s, margining,
+ USB4_MARGINING_LANE_RX1);
+ if (margining->asym_rx)
+ margining_hw_result_format(s, margining,
+ USB4_MARGINING_LANE_RX2);
+ } else {
+ margining_hw_result_format(s, margining,
+ margining->lanes);
+ }
+ } else {
+ u32 lane_errors, result;
seq_printf(s, "0x%08x\n", margining->results[1]);
- if (margining->time) {
- if (!margining->lanes || margining->lanes == 7) {
- val = margining->results[1];
- seq_puts(s, "# lane 0 right time margin: ");
- time_margin_show(s, margining, val);
- val = margining->results[1] >>
- USB4_MARGIN_HW_RES_1_L0_LL_MARGIN_SHIFT;
- seq_puts(s, "# lane 0 left time margin: ");
- time_margin_show(s, margining, val);
- }
- if (margining->lanes == 1 || margining->lanes == 7) {
- val = margining->results[1] >>
- USB4_MARGIN_HW_RES_1_L1_RH_MARGIN_SHIFT;
- seq_puts(s, "# lane 1 right time margin: ");
- time_margin_show(s, margining, val);
- val = margining->results[1] >>
- USB4_MARGIN_HW_RES_1_L1_LL_MARGIN_SHIFT;
- seq_puts(s, "# lane 1 left time margin: ");
- time_margin_show(s, margining, val);
- }
- } else {
- if (!margining->lanes || margining->lanes == 7) {
- val = margining->results[1];
- seq_puts(s, "# lane 0 high voltage margin: ");
- voltage_margin_show(s, margining, val);
- val = margining->results[1] >>
- USB4_MARGIN_HW_RES_1_L0_LL_MARGIN_SHIFT;
- seq_puts(s, "# lane 0 low voltage margin: ");
- voltage_margin_show(s, margining, val);
- }
- if (margining->lanes == 1 || margining->lanes == 7) {
- val = margining->results[1] >>
- USB4_MARGIN_HW_RES_1_L1_RH_MARGIN_SHIFT;
- seq_puts(s, "# lane 1 high voltage margin: ");
- voltage_margin_show(s, margining, val);
- val = margining->results[1] >>
- USB4_MARGIN_HW_RES_1_L1_LL_MARGIN_SHIFT;
- seq_puts(s, "# lane 1 low voltage margin: ");
- voltage_margin_show(s, margining, val);
- }
+ result = FIELD_GET(USB4_MARGIN_SW_LANES_MASK, margining->results[0]);
+ if (result == USB4_MARGINING_LANE_RX0 ||
+ result == USB4_MARGINING_LANE_ALL) {
+ lane_errors = FIELD_GET(USB4_MARGIN_SW_ERR_COUNTER_LANE_0_MASK,
+ margining->results[1]);
+ seq_printf(s, "# lane 0 errors: %u\n", lane_errors);
+ }
+ if (result == USB4_MARGINING_LANE_RX1 ||
+ result == USB4_MARGINING_LANE_ALL) {
+ lane_errors = FIELD_GET(USB4_MARGIN_SW_ERR_COUNTER_LANE_1_MASK,
+ margining->results[1]);
+ seq_printf(s, "# lane 1 errors: %u\n", lane_errors);
+ }
+ if (margining->asym_rx &&
+ (result == USB4_MARGINING_LANE_RX2 ||
+ result == USB4_MARGINING_LANE_ALL)) {
+ lane_errors = FIELD_GET(USB4_MARGIN_SW_ERR_COUNTER_LANE_2_MASK,
+ margining->results[1]);
+ seq_printf(s, "# lane 2 errors: %u\n", lane_errors);
}
}
@@ -745,9 +1471,8 @@ static ssize_t margining_test_write(struct file *file,
size_t count, loff_t *ppos)
{
struct seq_file *s = file->private_data;
- struct tb_port *port = s->private;
- struct usb4_port *usb4 = port->usb4;
- struct tb *tb = port->sw->tb;
+ struct tb_margining *margining = s->private;
+ struct tb *tb = margining->port->sw->tb;
int ret = 0;
char *buf;
@@ -762,10 +1487,10 @@ static ssize_t margining_test_write(struct file *file,
goto out_free;
}
- if (!strcmp(buf, "time") && supports_time(usb4))
- usb4->margining->time = true;
+ if (!strcmp(buf, "time") && supports_time(margining))
+ margining->time = true;
else if (!strcmp(buf, "voltage"))
- usb4->margining->time = false;
+ margining->time = false;
else
ret = -EINVAL;
@@ -778,15 +1503,14 @@ out_free:
static int margining_test_show(struct seq_file *s, void *not_used)
{
- struct tb_port *port = s->private;
- struct usb4_port *usb4 = port->usb4;
- struct tb *tb = port->sw->tb;
+ struct tb_margining *margining = s->private;
+ struct tb *tb = margining->port->sw->tb;
if (mutex_lock_interruptible(&tb->lock))
return -ERESTARTSYS;
- if (supports_time(usb4)) {
- if (usb4->margining->time)
+ if (supports_time(margining)) {
+ if (margining->time)
seq_puts(s, "voltage [time]\n");
else
seq_puts(s, "[voltage] time\n");
@@ -804,9 +1528,8 @@ static ssize_t margining_margin_write(struct file *file,
size_t count, loff_t *ppos)
{
struct seq_file *s = file->private_data;
- struct tb_port *port = s->private;
- struct usb4_port *usb4 = port->usb4;
- struct tb *tb = port->sw->tb;
+ struct tb_margining *margining = s->private;
+ struct tb *tb = margining->port->sw->tb;
int ret = 0;
char *buf;
@@ -821,18 +1544,18 @@ static ssize_t margining_margin_write(struct file *file,
goto out_free;
}
- if (usb4->margining->time) {
+ if (margining->time) {
if (!strcmp(buf, "left"))
- usb4->margining->right_high = false;
+ margining->right_high = false;
else if (!strcmp(buf, "right"))
- usb4->margining->right_high = true;
+ margining->right_high = true;
else
ret = -EINVAL;
} else {
if (!strcmp(buf, "low"))
- usb4->margining->right_high = false;
+ margining->right_high = false;
else if (!strcmp(buf, "high"))
- usb4->margining->right_high = true;
+ margining->right_high = true;
else
ret = -EINVAL;
}
@@ -846,20 +1569,19 @@ out_free:
static int margining_margin_show(struct seq_file *s, void *not_used)
{
- struct tb_port *port = s->private;
- struct usb4_port *usb4 = port->usb4;
- struct tb *tb = port->sw->tb;
+ struct tb_margining *margining = s->private;
+ struct tb *tb = margining->port->sw->tb;
if (mutex_lock_interruptible(&tb->lock))
return -ERESTARTSYS;
- if (usb4->margining->time) {
- if (usb4->margining->right_high)
+ if (margining->time) {
+ if (margining->right_high)
seq_puts(s, "left [right]\n");
else
seq_puts(s, "[left] right\n");
} else {
- if (usb4->margining->right_high)
+ if (margining->right_high)
seq_puts(s, "low [high]\n");
else
seq_puts(s, "[low] high\n");
@@ -870,51 +1592,118 @@ static int margining_margin_show(struct seq_file *s, void *not_used)
}
DEBUGFS_ATTR_RW(margining_margin);
-static void margining_port_init(struct tb_port *port)
+static ssize_t margining_eye_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct tb_port *port = s->private;
+ struct usb4_port *usb4 = port->usb4;
+ struct tb *tb = port->sw->tb;
+ int ret = 0;
+ char *buf;
+
+ buf = validate_and_copy_from_user(user_buf, &count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ buf[count - 1] = '\0';
+
+ scoped_cond_guard(mutex_intr, ret = -ERESTARTSYS, &tb->lock) {
+ if (!strcmp(buf, "lower"))
+ usb4->margining->upper_eye = false;
+ else if (!strcmp(buf, "upper"))
+ usb4->margining->upper_eye = true;
+ else
+ ret = -EINVAL;
+ }
+
+ free_page((unsigned long)buf);
+ return ret ? ret : count;
+}
+
+static int margining_eye_show(struct seq_file *s, void *not_used)
+{
+ struct tb_port *port = s->private;
+ struct usb4_port *usb4 = port->usb4;
+ struct tb *tb = port->sw->tb;
+
+ scoped_guard(mutex_intr, &tb->lock) {
+ if (usb4->margining->upper_eye)
+ seq_puts(s, "lower [upper]\n");
+ else
+ seq_puts(s, "[lower] upper\n");
+
+ return 0;
+ }
+
+ return -ERESTARTSYS;
+}
+DEBUGFS_ATTR_RW(margining_eye);
+
+static struct tb_margining *margining_alloc(struct tb_port *port,
+ struct device *dev,
+ enum usb4_sb_target target,
+ u8 index, struct dentry *parent)
{
struct tb_margining *margining;
- struct dentry *dir, *parent;
- struct usb4_port *usb4;
- char dir_name[10];
+ struct dentry *dir;
unsigned int val;
int ret;
- usb4 = port->usb4;
- if (!usb4)
- return;
-
- snprintf(dir_name, sizeof(dir_name), "port%d", port->port);
- parent = debugfs_lookup(dir_name, port->sw->debugfs_dir);
+ ret = tb_port_get_link_generation(port);
+ if (ret < 0) {
+ tb_port_warn(port, "failed to read link generation\n");
+ return NULL;
+ }
margining = kzalloc(sizeof(*margining), GFP_KERNEL);
if (!margining)
- return;
+ return NULL;
- ret = usb4_port_margining_caps(port, margining->caps);
+ margining->port = port;
+ margining->target = target;
+ margining->index = index;
+ margining->dev = dev;
+ margining->gen = ret;
+ margining->asym_rx = tb_port_width_supported(port, TB_LINK_WIDTH_ASYM_RX);
+
+ ret = usb4_port_margining_caps(port, target, index, margining->caps,
+ ARRAY_SIZE(margining->caps));
if (ret) {
kfree(margining);
- return;
+ return NULL;
}
- usb4->margining = margining;
-
/* Set the initial mode */
- if (supports_software(usb4))
+ if (supports_software(margining))
margining->software = true;
- val = (margining->caps[0] & USB4_MARGIN_CAP_0_VOLTAGE_STEPS_MASK) >>
- USB4_MARGIN_CAP_0_VOLTAGE_STEPS_SHIFT;
- margining->voltage_steps = val;
- val = (margining->caps[0] & USB4_MARGIN_CAP_0_MAX_VOLTAGE_OFFSET_MASK) >>
- USB4_MARGIN_CAP_0_MAX_VOLTAGE_OFFSET_SHIFT;
- margining->max_voltage_offset = 74 + val * 2;
+ if (margining->gen < 4) {
+ val = FIELD_GET(USB4_MARGIN_CAP_0_VOLTAGE_STEPS_MASK, margining->caps[0]);
+ margining->voltage_steps = val;
+ val = FIELD_GET(USB4_MARGIN_CAP_0_MAX_VOLTAGE_OFFSET_MASK, margining->caps[0]);
+ margining->max_voltage_offset = 74 + val * 2;
+ } else {
+ val = FIELD_GET(USB4_MARGIN_CAP_2_VOLTAGE_STEPS_MASK, margining->caps[2]);
+ margining->voltage_steps = val;
+ val = FIELD_GET(USB4_MARGIN_CAP_2_MAX_VOLTAGE_OFFSET_MASK, margining->caps[2]);
+ margining->max_voltage_offset = 74 + val * 2;
+ }
+
+ if (supports_optional_voltage_offset_range(margining)) {
+ val = FIELD_GET(USB4_MARGIN_CAP_0_VOLT_STEPS_OPT_MASK,
+ margining->caps[0]);
+ margining->voltage_steps_optional_range = val;
+ val = FIELD_GET(USB4_MARGIN_CAP_1_MAX_VOLT_OFS_OPT_MASK,
+ margining->caps[1]);
+ margining->max_voltage_offset_optional_range = 74 + val * 2;
+ }
- if (supports_time(usb4)) {
- val = (margining->caps[1] & USB4_MARGIN_CAP_1_TIME_STEPS_MASK) >>
- USB4_MARGIN_CAP_1_TIME_STEPS_SHIFT;
+ if (supports_time(margining)) {
+ val = FIELD_GET(USB4_MARGIN_CAP_1_TIME_STEPS_MASK, margining->caps[1]);
margining->time_steps = val;
- val = (margining->caps[1] & USB4_MARGIN_CAP_1_TIME_OFFSET_MASK) >>
- USB4_MARGIN_CAP_1_TIME_OFFSET_SHIFT;
+ val = FIELD_GET(USB4_MARGIN_CAP_1_TIME_OFFSET_MASK, margining->caps[1]);
/*
* Store it as mUI (milli Unit Interval) because we want
* to keep it as integer.
@@ -923,30 +1712,65 @@ static void margining_port_init(struct tb_port *port)
}
dir = debugfs_create_dir("margining", parent);
- if (supports_hardware(usb4)) {
- val = (margining->caps[1] & USB4_MARGIN_CAP_1_MIN_BER_MASK) >>
- USB4_MARGIN_CAP_1_MIN_BER_SHIFT;
+ if (supports_hardware(margining)) {
+ val = FIELD_GET(USB4_MARGIN_CAP_1_MIN_BER_MASK, margining->caps[1]);
margining->min_ber_level = val;
- val = (margining->caps[1] & USB4_MARGIN_CAP_1_MAX_BER_MASK) >>
- USB4_MARGIN_CAP_1_MAX_BER_SHIFT;
+ val = FIELD_GET(USB4_MARGIN_CAP_1_MAX_BER_MASK, margining->caps[1]);
margining->max_ber_level = val;
/* Set the default to minimum */
margining->ber_level = margining->min_ber_level;
- debugfs_create_file("ber_level_contour", 0400, dir, port,
+ debugfs_create_file("ber_level_contour", 0400, dir, margining,
&margining_ber_level_fops);
}
- debugfs_create_file("caps", 0400, dir, port, &margining_caps_fops);
- debugfs_create_file("lanes", 0600, dir, port, &margining_lanes_fops);
- debugfs_create_file("mode", 0600, dir, port, &margining_mode_fops);
- debugfs_create_file("run", 0600, dir, port, &margining_run_fops);
- debugfs_create_file("results", 0600, dir, port, &margining_results_fops);
- debugfs_create_file("test", 0600, dir, port, &margining_test_fops);
- if (independent_voltage_margins(usb4) == USB4_MARGIN_CAP_0_VOLTAGE_HL ||
- (supports_time(usb4) &&
- independent_time_margins(usb4) == USB4_MARGIN_CAP_1_TIME_LR))
- debugfs_create_file("margin", 0600, dir, port, &margining_margin_fops);
+ debugfs_create_file("caps", 0400, dir, margining, &margining_caps_fops);
+ debugfs_create_file("lanes", 0600, dir, margining, &margining_lanes_fops);
+ debugfs_create_file("mode", 0600, dir, margining, &margining_mode_fops);
+ debugfs_create_file("run", 0600, dir, margining, &margining_run_fops);
+ debugfs_create_file("results", 0600, dir, margining,
+ &margining_results_fops);
+ debugfs_create_file("test", 0600, dir, margining, &margining_test_fops);
+ if (independent_voltage_margins(margining) == USB4_MARGIN_CAP_VOLTAGE_INDP_GEN_2_3_HL ||
+ (supports_time(margining) &&
+ independent_time_margins(margining) == USB4_MARGIN_CAP_TIME_INDP_GEN_2_3_LR))
+ debugfs_create_file("margin", 0600, dir, margining, &margining_margin_fops);
+
+ margining->error_counter = USB4_MARGIN_SW_ERROR_COUNTER_CLEAR;
+ margining->dwell_time = MIN_DWELL_TIME;
+
+ if (supports_optional_voltage_offset_range(margining))
+ debugfs_create_file("optional_voltage_offset", DEBUGFS_MODE, dir, margining,
+ &margining_optional_voltage_offset_fops);
+
+ if (supports_software(margining)) {
+ debugfs_create_file("voltage_time_offset", DEBUGFS_MODE, dir, margining,
+ &margining_voltage_time_offset_fops);
+ debugfs_create_file("error_counter", DEBUGFS_MODE, dir, margining,
+ &margining_error_counter_fops);
+ debugfs_create_file("dwell_time", DEBUGFS_MODE, dir, margining,
+ &margining_dwell_time_fops);
+ }
+
+ if (margining->gen >= 4)
+ debugfs_create_file("eye", 0600, dir, port, &margining_eye_fops);
+
+ return margining;
+}
+
+static void margining_port_init(struct tb_port *port)
+{
+ struct dentry *parent;
+ char dir_name[10];
+
+ if (!port->usb4)
+ return;
+
+ snprintf(dir_name, sizeof(dir_name), "port%d", port->port);
+ parent = debugfs_lookup(dir_name, port->sw->debugfs_dir);
+ port->usb4->margining = margining_alloc(port, &port->usb4->dev,
+ USB4_SB_TARGET_ROUTER, 0,
+ parent);
}
static void margining_port_remove(struct tb_port *port)
@@ -1020,11 +1844,27 @@ static void margining_xdomain_remove(struct tb_xdomain *xd)
downstream = tb_port_at(xd->route, parent_sw);
margining_port_remove(downstream);
}
+
+static void margining_retimer_init(struct tb_retimer *rt, struct dentry *debugfs_dir)
+{
+ rt->margining = margining_alloc(rt->port, &rt->dev,
+ USB4_SB_TARGET_RETIMER, rt->index,
+ debugfs_dir);
+}
+
+static void margining_retimer_remove(struct tb_retimer *rt)
+{
+ kfree(rt->margining);
+ rt->margining = NULL;
+}
#else
static inline void margining_switch_init(struct tb_switch *sw) { }
static inline void margining_switch_remove(struct tb_switch *sw) { }
static inline void margining_xdomain_init(struct tb_xdomain *xd) { }
static inline void margining_xdomain_remove(struct tb_xdomain *xd) { }
+static inline void margining_retimer_init(struct tb_retimer *rt,
+ struct dentry *debugfs_dir) { }
+static inline void margining_retimer_remove(struct tb_retimer *rt) { }
#endif
static int port_clear_all_counters(struct tb_port *port)
@@ -1449,7 +2289,7 @@ out_rpm_put:
return ret;
}
-DEBUGFS_ATTR_RO(path);
+DEBUGFS_ATTR_RW(path);
static int counter_set_regs_show(struct tb_port *port, struct seq_file *s,
int counter)
@@ -1505,6 +2345,60 @@ out:
}
DEBUGFS_ATTR_RW(counters);
+static int sb_regs_show(struct tb_port *port, const struct sb_reg *sb_regs,
+ size_t size, enum usb4_sb_target target, u8 index,
+ struct seq_file *s)
+{
+ int ret, i;
+
+ seq_puts(s, "# register value\n");
+
+ for (i = 0; i < size; i++) {
+ const struct sb_reg *regs = &sb_regs[i];
+ u8 data[64];
+ int j;
+
+ memset(data, 0, sizeof(data));
+ ret = usb4_port_sb_read(port, target, index, regs->reg, data,
+ regs->size);
+ if (ret)
+ return ret;
+
+ seq_printf(s, "0x%02x", regs->reg);
+ for (j = 0; j < regs->size; j++)
+ seq_printf(s, " 0x%02x", data[j]);
+ seq_puts(s, "\n");
+ }
+
+ return 0;
+}
+
+static int port_sb_regs_show(struct seq_file *s, void *not_used)
+{
+ struct tb_port *port = s->private;
+ struct tb_switch *sw = port->sw;
+ struct tb *tb = sw->tb;
+ int ret;
+
+ pm_runtime_get_sync(&sw->dev);
+
+ if (mutex_lock_interruptible(&tb->lock)) {
+ ret = -ERESTARTSYS;
+ goto out_rpm_put;
+ }
+
+ ret = sb_regs_show(port, port_sb_regs, ARRAY_SIZE(port_sb_regs),
+ USB4_SB_TARGET_ROUTER, 0, s);
+
+ mutex_unlock(&tb->lock);
+out_rpm_put:
+ pm_runtime_mark_last_busy(&sw->dev);
+ pm_runtime_put_autosuspend(&sw->dev);
+
+ return ret;
+}
+DEBUGFS_ATTR_RW(port_sb_regs);
+
/**
* tb_switch_debugfs_init() - Add debugfs entries for router
* @sw: Pointer to the router
@@ -1520,6 +2414,8 @@ void tb_switch_debugfs_init(struct tb_switch *sw)
sw->debugfs_dir = debugfs_dir;
debugfs_create_file("regs", DEBUGFS_MODE, debugfs_dir, sw,
&switch_regs_fops);
+ if (sw->drom)
+ debugfs_create_blob("drom", 0400, debugfs_dir, &sw->drom_blob);
tb_switch_for_each_port(sw, port) {
struct dentry *debugfs_dir;
@@ -1539,6 +2435,9 @@ void tb_switch_debugfs_init(struct tb_switch *sw)
if (port->config.counters_support)
debugfs_create_file("counters", 0600, debugfs_dir, port,
&counters_fops);
+ if (port->usb4)
+ debugfs_create_file("sb_regs", DEBUGFS_MODE, debugfs_dir,
+ port, &port_sb_regs_fops);
}
margining_switch_init(sw);
@@ -1590,6 +2489,59 @@ void tb_service_debugfs_remove(struct tb_service *svc)
svc->debugfs_dir = NULL;
}
+static int retimer_sb_regs_show(struct seq_file *s, void *not_used)
+{
+ struct tb_retimer *rt = s->private;
+ struct tb *tb = rt->tb;
+ int ret;
+
+ pm_runtime_get_sync(&rt->dev);
+
+ if (mutex_lock_interruptible(&tb->lock)) {
+ ret = -ERESTARTSYS;
+ goto out_rpm_put;
+ }
+
+ ret = sb_regs_show(rt->port, retimer_sb_regs, ARRAY_SIZE(retimer_sb_regs),
+ USB4_SB_TARGET_RETIMER, rt->index, s);
+
+ mutex_unlock(&tb->lock);
+out_rpm_put:
+ pm_runtime_mark_last_busy(&rt->dev);
+ pm_runtime_put_autosuspend(&rt->dev);
+
+ return ret;
+}
+DEBUGFS_ATTR_RW(retimer_sb_regs);
+
+/**
+ * tb_retimer_debugfs_init() - Add debugfs directory for retimer
+ * @rt: Pointer to retimer structure
+ *
+ * Adds and populates retimer debugfs directory.
+ */
+void tb_retimer_debugfs_init(struct tb_retimer *rt)
+{
+ struct dentry *debugfs_dir;
+
+ debugfs_dir = debugfs_create_dir(dev_name(&rt->dev), tb_debugfs_root);
+ debugfs_create_file("sb_regs", DEBUGFS_MODE, debugfs_dir, rt,
+ &retimer_sb_regs_fops);
+ margining_retimer_init(rt, debugfs_dir);
+}
+
+/**
+ * tb_retimer_debugfs_remove() - Remove retimer debugfs directory
+ * @rt: Pointer to retimer structure
+ *
+ * Removes the retimer debugfs directory along with its contents.
+ */
+void tb_retimer_debugfs_remove(struct tb_retimer *rt)
+{
+ debugfs_lookup_and_remove(dev_name(&rt->dev), tb_debugfs_root);
+ margining_retimer_remove(rt);
+}
+
void tb_debugfs_init(void)
{
tb_debugfs_root = debugfs_create_dir("thunderbolt", NULL);
diff --git a/drivers/thunderbolt/dma_port.c b/drivers/thunderbolt/dma_port.c
index 9f20c7bbf0ce..dc8ea188a114 100644
--- a/drivers/thunderbolt/dma_port.c
+++ b/drivers/thunderbolt/dma_port.c
@@ -197,6 +197,8 @@ static int dma_find_port(struct tb_switch *sw)
*
* The DMA control port is functional also when the switch is in safe
* mode.
+ *
+ * Return: &struct tb_dma_port on success, %NULL otherwise.
*/
struct tb_dma_port *dma_port_alloc(struct tb_switch *sw)
{
@@ -354,6 +356,8 @@ static int dma_port_flash_write_block(void *data, unsigned int dwaddress,
* @address: Address relative to the start of active region
* @buf: Buffer where the data is read
* @size: Size of the buffer
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int dma_port_flash_read(struct tb_dma_port *dma, unsigned int address,
void *buf, size_t size)
@@ -372,6 +376,8 @@ int dma_port_flash_read(struct tb_dma_port *dma, unsigned int address,
* Writes block of data to the non-active flash region of the switch. If
* the address is given as %DMA_PORT_CSS_ADDRESS the block is written
* using CSS command.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int dma_port_flash_write(struct tb_dma_port *dma, unsigned int address,
const void *buf, size_t size)
@@ -393,6 +399,8 @@ int dma_port_flash_write(struct tb_dma_port *dma, unsigned int address,
* dma_port_flash_update_auth_status() to get status of this command.
* This is because if the switch in question is root switch the
* thunderbolt host controller gets reset as well.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int dma_port_flash_update_auth(struct tb_dma_port *dma)
{
@@ -410,12 +418,13 @@ int dma_port_flash_update_auth(struct tb_dma_port *dma)
* @status: Status code of the operation
*
* The function checks if there is status available from the last update
- * auth command. Returns %0 if there is no status and no further
- * action is required. If there is status, %1 is returned instead and
- * @status holds the failure code.
+ * auth command.
*
- * Negative return means there was an error reading status from the
- * switch.
+ * Return:
+ * * %0 - If there is no status and no further action is required.
+ * * %1 - If there is some status. @status holds the failure code.
+ * * Negative errno - An error occurred when reading status from the
+ * switch.
*/
int dma_port_flash_update_auth_status(struct tb_dma_port *dma, u32 *status)
{
@@ -446,6 +455,8 @@ int dma_port_flash_update_auth_status(struct tb_dma_port *dma, u32 *status)
* @dma: DMA control port
*
* Triggers power cycle to the switch.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int dma_port_power_cycle(struct tb_dma_port *dma)
{
diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c
index 0023017299f7..3ced37b4a869 100644
--- a/drivers/thunderbolt/domain.c
+++ b/drivers/thunderbolt/domain.c
@@ -12,7 +12,8 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/random.h>
-#include <crypto/hash.h>
+#include <crypto/sha2.h>
+#include <crypto/utils.h>
#include "tb.h"
@@ -36,7 +37,7 @@ static bool match_service_id(const struct tb_service_id *id,
return false;
}
- if (id->match_flags & TBSVC_MATCH_PROTOCOL_VERSION) {
+ if (id->match_flags & TBSVC_MATCH_PROTOCOL_REVISION) {
if (id->protocol_revision != svc->prtcrevs)
return false;
}
@@ -45,9 +46,9 @@ static bool match_service_id(const struct tb_service_id *id,
}
static const struct tb_service_id *__tb_service_match(struct device *dev,
- struct device_driver *drv)
+ const struct device_driver *drv)
{
- struct tb_service_driver *driver;
+ const struct tb_service_driver *driver;
const struct tb_service_id *ids;
struct tb_service *svc;
@@ -55,7 +56,7 @@ static const struct tb_service_id *__tb_service_match(struct device *dev,
if (!svc)
return NULL;
- driver = container_of(drv, struct tb_service_driver, driver);
+ driver = container_of_const(drv, struct tb_service_driver, driver);
if (!driver->id_table)
return NULL;
@@ -67,7 +68,7 @@ static const struct tb_service_id *__tb_service_match(struct device *dev,
return NULL;
}
-static int tb_service_match(struct device *dev, struct device_driver *drv)
+static int tb_service_match(struct device *dev, const struct device_driver *drv)
{
return !!__tb_service_match(dev, drv);
}
@@ -217,7 +218,7 @@ static ssize_t boot_acl_store(struct device *dev, struct device_attribute *attr,
ret = tb->cm_ops->set_boot_acl(tb, acl, tb->nboot_acl);
if (!ret) {
/* Notify userspace about the change */
- kobject_uevent(&tb->dev.kobj, KOBJ_CHANGE);
+ tb_domain_event(tb, NULL);
}
mutex_unlock(&tb->lock);
@@ -368,14 +369,14 @@ static bool tb_domain_event_cb(void *data, enum tb_cfg_pkg_type type,
* Call tb_domain_put() to release the domain before it has been added
* to the system.
*
- * Return: allocated domain structure on %NULL in case of error
+ * Return: Pointer to &struct tb or %NULL in case of error.
*/
struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize)
{
struct tb *tb;
/*
- * Make sure the structure sizes map with that the hardware
+ * Make sure the structure sizes map with what the hardware
* expects because bit-fields are being used.
*/
BUILD_BUG_ON(sizeof(struct tb_regs_switch_header) != 5 * 4);
@@ -430,7 +431,7 @@ err_free:
* and release the domain after this function has been called, call
* tb_domain_remove().
*
- * Return: %0 in case of success and negative errno in case of error
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_domain_add(struct tb *tb, bool reset)
{
@@ -518,6 +519,8 @@ void tb_domain_remove(struct tb *tb)
* @tb: Domain to suspend
*
* Suspends all devices in the domain and stops the control channel.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_domain_suspend_noirq(struct tb *tb)
{
@@ -544,6 +547,8 @@ int tb_domain_suspend_noirq(struct tb *tb)
*
* Re-starts the control channel, and resumes all devices connected to
* the domain.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_domain_resume_noirq(struct tb *tb)
{
@@ -643,6 +648,8 @@ int tb_domain_disapprove_switch(struct tb *tb, struct tb_switch *sw)
* This will approve switch by connection manager specific means. In
* case of success the connection manager will create PCIe tunnel from
* parent to @sw.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw)
{
@@ -708,8 +715,6 @@ int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw)
u8 response[TB_SWITCH_KEY_SIZE];
u8 hmac[TB_SWITCH_KEY_SIZE];
struct tb_switch *parent_sw;
- struct crypto_shash *tfm;
- struct shash_desc *shash;
int ret;
if (!tb->cm_ops->approve_switch || !tb->cm_ops->challenge_switch_key)
@@ -725,45 +730,15 @@ int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw)
if (ret)
return ret;
- tfm = crypto_alloc_shash("hmac(sha256)", 0, 0);
- if (IS_ERR(tfm))
- return PTR_ERR(tfm);
-
- ret = crypto_shash_setkey(tfm, sw->key, TB_SWITCH_KEY_SIZE);
- if (ret)
- goto err_free_tfm;
-
- shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(tfm),
- GFP_KERNEL);
- if (!shash) {
- ret = -ENOMEM;
- goto err_free_tfm;
- }
-
- shash->tfm = tfm;
-
- memset(hmac, 0, sizeof(hmac));
- ret = crypto_shash_digest(shash, challenge, sizeof(hmac), hmac);
- if (ret)
- goto err_free_shash;
+ static_assert(sizeof(hmac) == SHA256_DIGEST_SIZE);
+ hmac_sha256_usingrawkey(sw->key, TB_SWITCH_KEY_SIZE,
+ challenge, sizeof(challenge), hmac);
/* The returned HMAC must match the one we calculated */
- if (memcmp(response, hmac, sizeof(hmac))) {
- ret = -EKEYREJECTED;
- goto err_free_shash;
- }
-
- crypto_free_shash(tfm);
- kfree(shash);
+ if (crypto_memneq(response, hmac, sizeof(hmac)))
+ return -EKEYREJECTED;
return tb->cm_ops->approve_switch(tb, sw);
-
-err_free_shash:
- kfree(shash);
-err_free_tfm:
- crypto_free_shash(tfm);
-
- return ret;
}
/**
@@ -773,7 +748,7 @@ err_free_tfm:
* This needs to be called in preparation for NVM upgrade of the host
* controller. Makes sure all PCIe paths are disconnected.
*
- * Return %0 on success and negative errno in case of error.
+ * Return: %0 on success and negative errno in case of error.
*/
int tb_domain_disconnect_pcie_paths(struct tb *tb)
{
@@ -795,9 +770,11 @@ int tb_domain_disconnect_pcie_paths(struct tb *tb)
* Calls connection manager specific method to enable DMA paths to the
* XDomain in question.
*
- * Return: 0% in case of success and negative errno otherwise. In
- * particular returns %-ENOTSUPP if the connection manager
- * implementation does not support XDomains.
+ * Return:
+ * * %0 - On success.
+ * * %-ENOTSUPP - If the connection manager implementation does not support
+ * XDomains.
+ * * Negative errno - An error occurred.
*/
int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
int transmit_path, int transmit_ring,
@@ -822,9 +799,11 @@ int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
* Calls connection manager specific method to disconnect DMA paths to
* the XDomain in question.
*
- * Return: 0% in case of success and negative errno otherwise. In
- * particular returns %-ENOTSUPP if the connection manager
- * implementation does not support XDomains.
+ * Return:
+ * * %0 - On success.
+ * * %-ENOTSUPP - If the connection manager implementation does not support
+ * XDomains.
+ * * Negative errno - An error occurred.
*/
int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
int transmit_path, int transmit_ring,
diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c
index eb241b270f79..5477b9437048 100644
--- a/drivers/thunderbolt/eeprom.c
+++ b/drivers/thunderbolt/eeprom.c
@@ -21,7 +21,7 @@ static int tb_eeprom_ctl_write(struct tb_switch *sw, struct tb_eeprom_ctl *ctl)
}
/*
- * tb_eeprom_ctl_write() - read control word
+ * tb_eeprom_ctl_read() - read control word
*/
static int tb_eeprom_ctl_read(struct tb_switch *sw, struct tb_eeprom_ctl *ctl)
{
@@ -211,7 +211,7 @@ static u8 tb_crc8(u8 *data, int len)
static u32 tb_crc32(void *data, size_t len)
{
- return ~__crc32c_le(~0, data, len);
+ return ~crc32c(~0, data, len);
}
#define TB_DROM_DATA_START 13
@@ -298,6 +298,8 @@ struct tb_drom_entry_desc {
*
* Does not use the cached copy in sw->drom. Used during resume to check switch
* identity.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid)
{
@@ -435,6 +437,29 @@ static int tb_drom_parse_entries(struct tb_switch *sw, size_t header_size)
return 0;
}
+static int tb_switch_drom_alloc(struct tb_switch *sw, size_t size)
+{
+ sw->drom = kzalloc(size, GFP_KERNEL);
+ if (!sw->drom)
+ return -ENOMEM;
+
+#ifdef CONFIG_DEBUG_FS
+ sw->drom_blob.data = sw->drom;
+ sw->drom_blob.size = size;
+#endif
+ return 0;
+}
+
+static void tb_switch_drom_free(struct tb_switch *sw)
+{
+#ifdef CONFIG_DEBUG_FS
+ sw->drom_blob.data = NULL;
+ sw->drom_blob.size = 0;
+#endif
+ kfree(sw->drom);
+ sw->drom = NULL;
+}
+
/*
* tb_drom_copy_efi - copy drom supplied by EFI to sw->drom if present
*/
@@ -447,9 +472,9 @@ static int tb_drom_copy_efi(struct tb_switch *sw, u16 *size)
if (len < 0 || len < sizeof(struct tb_drom_header))
return -EINVAL;
- sw->drom = kmalloc(len, GFP_KERNEL);
- if (!sw->drom)
- return -ENOMEM;
+ res = tb_switch_drom_alloc(sw, len);
+ if (res)
+ return res;
res = device_property_read_u8_array(dev, "ThunderboltDROM", sw->drom,
len);
@@ -464,8 +489,7 @@ static int tb_drom_copy_efi(struct tb_switch *sw, u16 *size)
return 0;
err:
- kfree(sw->drom);
- sw->drom = NULL;
+ tb_switch_drom_free(sw);
return -EINVAL;
}
@@ -491,13 +515,15 @@ static int tb_drom_copy_nvm(struct tb_switch *sw, u16 *size)
/* Size includes CRC8 + UID + CRC32 */
*size += 1 + 8 + 4;
- sw->drom = kzalloc(*size, GFP_KERNEL);
- if (!sw->drom)
- return -ENOMEM;
+ ret = tb_switch_drom_alloc(sw, *size);
+ if (ret)
+ return ret;
ret = dma_port_flash_read(sw->dma_port, drom_offset, sw->drom, *size);
- if (ret)
- goto err_free;
+ if (ret) {
+ tb_switch_drom_free(sw);
+ return ret;
+ }
/*
* Read UID from the minimal DROM because the one in NVM is just
@@ -505,11 +531,6 @@ static int tb_drom_copy_nvm(struct tb_switch *sw, u16 *size)
*/
tb_drom_read_uid_only(sw, &sw->uid);
return 0;
-
-err_free:
- kfree(sw->drom);
- sw->drom = NULL;
- return ret;
}
static int usb4_copy_drom(struct tb_switch *sw, u16 *size)
@@ -522,15 +543,13 @@ static int usb4_copy_drom(struct tb_switch *sw, u16 *size)
/* Size includes CRC8 + UID + CRC32 */
*size += 1 + 8 + 4;
- sw->drom = kzalloc(*size, GFP_KERNEL);
- if (!sw->drom)
- return -ENOMEM;
+ ret = tb_switch_drom_alloc(sw, *size);
+ if (ret)
+ return ret;
ret = usb4_switch_drom_read(sw, 0, sw->drom, *size);
- if (ret) {
- kfree(sw->drom);
- sw->drom = NULL;
- }
+ if (ret)
+ tb_switch_drom_free(sw);
return ret;
}
@@ -552,19 +571,14 @@ static int tb_drom_bit_bang(struct tb_switch *sw, u16 *size)
return -EIO;
}
- sw->drom = kzalloc(*size, GFP_KERNEL);
- if (!sw->drom)
- return -ENOMEM;
+ ret = tb_switch_drom_alloc(sw, *size);
+ if (ret)
+ return ret;
ret = tb_eeprom_read_n(sw, 0, sw->drom, *size);
if (ret)
- goto err;
-
- return 0;
+ tb_switch_drom_free(sw);
-err:
- kfree(sw->drom);
- sw->drom = NULL;
return ret;
}
@@ -646,9 +660,7 @@ static int tb_drom_parse(struct tb_switch *sw, u16 size)
return 0;
err:
- kfree(sw->drom);
- sw->drom = NULL;
-
+ tb_switch_drom_free(sw);
return ret;
}
@@ -699,7 +711,7 @@ static int tb_drom_device_read(struct tb_switch *sw)
* populates the fields in @sw accordingly. Can be called for any router
* generation.
*
- * Returns %0 in case of success and negative errno otherwise.
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_drom_read(struct tb_switch *sw)
{
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index 7859bccc592d..d339ba835376 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -22,6 +22,7 @@
#include "ctl.h"
#include "nhi_regs.h"
#include "tb.h"
+#include "tunnel.h"
#define PCIE2CIO_CMD 0x30
#define PCIE2CIO_CMD_TIMEOUT BIT(31)
@@ -379,6 +380,27 @@ static bool icm_firmware_running(const struct tb_nhi *nhi)
return !!(val & REG_FW_STS_ICM_EN);
}
+static void icm_xdomain_activated(struct tb_xdomain *xd, bool activated)
+{
+ struct tb_port *nhi_port, *dst_port;
+ struct tb *tb = xd->tb;
+
+ nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
+ dst_port = tb_xdomain_downstream_port(xd);
+
+ if (activated)
+ tb_tunnel_event(tb, TB_TUNNEL_ACTIVATED, TB_TUNNEL_DMA,
+ nhi_port, dst_port);
+ else
+ tb_tunnel_event(tb, TB_TUNNEL_DEACTIVATED, TB_TUNNEL_DMA,
+ nhi_port, dst_port);
+}
+
+static void icm_dp_event(struct tb *tb)
+{
+ tb_tunnel_event(tb, TB_TUNNEL_CHANGED, TB_TUNNEL_DP, NULL, NULL);
+}
+
static bool icm_fr_is_supported(struct tb *tb)
{
return !x86_apple_machine;
@@ -584,6 +606,7 @@ static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
if (reply.hdr.flags & ICM_FLAGS_ERROR)
return -EIO;
+ icm_xdomain_activated(xd, true);
return 0;
}
@@ -603,6 +626,8 @@ static int icm_fr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
nhi_mailbox_cmd(tb->nhi, cmd, 1);
usleep_range(10, 50);
nhi_mailbox_cmd(tb->nhi, cmd, 2);
+
+ icm_xdomain_activated(xd, false);
return 0;
}
@@ -762,7 +787,7 @@ icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
* information might have changed for example by the
* fact that a switch on a dual-link connection might
* have been enumerated using the other link now. Make
- * sure our book keeping matches that.
+ * sure our bookkeeping matches that.
*/
if (sw->depth == depth && sw_phy_port == phy_port &&
!!sw->authorized == authorized) {
@@ -944,7 +969,7 @@ icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
/*
* Look if there already exists an XDomain in the same place
- * than the new one and in that case remove it because it is
+ * as the new one and in that case remove it because it is
* most likely another host that got disconnected.
*/
xd = tb_xdomain_find_by_link_depth(tb, link, depth);
@@ -1151,6 +1176,7 @@ static int icm_tr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
if (reply.hdr.flags & ICM_FLAGS_ERROR)
return -EIO;
+ icm_xdomain_activated(xd, true);
return 0;
}
@@ -1191,7 +1217,12 @@ static int icm_tr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
return ret;
usleep_range(10, 50);
- return icm_tr_xdomain_tear_down(tb, xd, 2);
+ ret = icm_tr_xdomain_tear_down(tb, xd, 2);
+ if (ret)
+ return ret;
+
+ icm_xdomain_activated(xd, false);
+ return 0;
}
static void
@@ -1718,6 +1749,9 @@ static void icm_handle_notification(struct work_struct *work)
if (tb_is_xdomain_enabled())
icm->xdomain_disconnected(tb, n->pkg);
break;
+ case ICM_EVENT_DP_CONFIG_CHANGED:
+ icm_dp_event(tb);
+ break;
case ICM_EVENT_RTD3_VETO:
icm->rtd3_veto(tb, n->pkg);
break;
@@ -1966,7 +2000,7 @@ static int icm_driver_ready(struct tb *tb)
if (icm->safe_mode) {
tb_info(tb, "Thunderbolt host controller is in safe mode.\n");
tb_info(tb, "You need to update NVM firmware of the controller before it can be used.\n");
- tb_info(tb, "For latest updates check https://thunderbolttechnology.net/updates.\n");
+ tb_info(tb, "Use fwupd tool to apply update. Check Documentation/admin-guide/thunderbolt.rst for details.\n");
return 0;
}
@@ -2137,7 +2171,7 @@ static int icm_runtime_resume_switch(struct tb_switch *sw)
static int icm_runtime_resume(struct tb *tb)
{
/*
- * We can reuse the same resume functionality than with system
+ * We can reuse the same resume functionality as with system
* suspend.
*/
icm_complete(tb);
diff --git a/drivers/thunderbolt/lc.c b/drivers/thunderbolt/lc.c
index 63cb4b6afb71..4449c28cc5f1 100644
--- a/drivers/thunderbolt/lc.c
+++ b/drivers/thunderbolt/lc.c
@@ -14,6 +14,8 @@
* tb_lc_read_uuid() - Read switch UUID from link controller common register
* @sw: Switch whose UUID is read
* @uuid: UUID is placed here
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid)
{
@@ -52,9 +54,10 @@ static int find_port_lc_cap(struct tb_port *port)
* @port: Port that is reset
*
* Triggers downstream port reset through link controller registers.
- * Returns %0 in case of success negative errno otherwise. Only supports
- * non-USB4 routers with link controller (that's Thunderbolt 2 and
- * Thunderbolt 3).
+ * Only supports non-USB4 routers with link controller (that's
+ * Thunderbolt 2 and Thunderbolt 3).
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_lc_reset_port(struct tb_port *port)
{
@@ -132,6 +135,8 @@ static int tb_lc_set_port_configured(struct tb_port *port, bool configured)
* @port: Port that is set as configured
*
* Sets the port configured for power management purposes.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_lc_configure_port(struct tb_port *port)
{
@@ -143,6 +148,8 @@ int tb_lc_configure_port(struct tb_port *port)
* @port: Port that is set as configured
*
* Sets the port unconfigured for power management purposes.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
void tb_lc_unconfigure_port(struct tb_port *port)
{
@@ -184,8 +191,10 @@ static int tb_lc_set_xdomain_configured(struct tb_port *port, bool configure)
* tb_lc_configure_xdomain() - Inform LC that the link is XDomain
* @port: Switch downstream port connected to another host
*
- * Sets the lane configured for XDomain accordingly so that the LC knows
- * about this. Returns %0 in success and negative errno in failure.
+ * Sets the lane configured for XDomain accordingly so that LC knows
+ * about this.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_lc_configure_xdomain(struct tb_port *port)
{
@@ -211,7 +220,7 @@ void tb_lc_unconfigure_xdomain(struct tb_port *port)
* sleep. Should be called for those downstream lane adapters that were
* not connected (tb_lc_configure_port() was not called) before sleep.
*
- * Returns %0 in success and negative errno in case of failure.
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_lc_start_lane_initialization(struct tb_port *port)
{
@@ -244,6 +253,8 @@ int tb_lc_start_lane_initialization(struct tb_port *port)
*
* TB_LC_LINK_ATTR_CPS bit reflects if the link supports CLx including
* active cables (if connected on the link).
+ *
+ * Return: %true if CLx is supported, %false otherwise.
*/
bool tb_lc_is_clx_supported(struct tb_port *port)
{
@@ -266,7 +277,8 @@ bool tb_lc_is_clx_supported(struct tb_port *port)
* tb_lc_is_usb_plugged() - Is there USB device connected to port
* @port: Device router lane 0 adapter
*
- * Returns true if the @port has USB type-C device connected.
+ * Return: %true if the @port has USB Type-C device connected, %false
+ * otherwise.
*/
bool tb_lc_is_usb_plugged(struct tb_port *port)
{
@@ -292,7 +304,8 @@ bool tb_lc_is_usb_plugged(struct tb_port *port)
* tb_lc_is_xhci_connected() - Is the internal xHCI connected
* @port: Device router lane 0 adapter
*
- * Returns true if the internal xHCI has been connected to @port.
+ * Return: %true if the internal xHCI has been connected to
+ * @port, %false otherwise.
*/
bool tb_lc_is_xhci_connected(struct tb_port *port)
{
@@ -343,9 +356,10 @@ static int __tb_lc_xhci_connect(struct tb_port *port, bool connect)
* tb_lc_xhci_connect() - Connect internal xHCI
* @port: Device router lane 0 adapter
*
- * Tells LC to connect the internal xHCI to @port. Returns %0 on success
- * and negative errno in case of failure. Can be called for Thunderbolt 3
- * routers only.
+ * Tells LC to connect the internal xHCI to @port. Can be called for
+ * Thunderbolt 3 routers only.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_lc_xhci_connect(struct tb_port *port)
{
@@ -408,6 +422,8 @@ static int tb_lc_set_wake_one(struct tb_switch *sw, unsigned int offset,
* @flags: Wakeup flags (%0 to disable)
*
* For each LC sets wake bits accordingly.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_lc_set_wake(struct tb_switch *sw, unsigned int flags)
{
@@ -447,6 +463,8 @@ int tb_lc_set_wake(struct tb_switch *sw, unsigned int flags)
*
* Let the switch link controllers know that the switch is going to
* sleep.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_lc_set_sleep(struct tb_switch *sw)
{
@@ -491,6 +509,8 @@ int tb_lc_set_sleep(struct tb_switch *sw)
*
* Checks whether conditions for lane bonding from parent to @sw are
* possible.
+ *
+ * Return: %true if lane bonding is possible, %false otherwise.
*/
bool tb_lc_lane_bonding_possible(struct tb_switch *sw)
{
@@ -538,7 +558,7 @@ static int tb_lc_dp_sink_available(struct tb_switch *sw, int sink)
return ret;
/*
- * Sink is available for CM/SW to use if the allocation valie is
+ * Sink is available for CM/SW to use if the allocation value is
* either 0 or 1.
*/
if (!sink) {
@@ -562,6 +582,8 @@ static int tb_lc_dp_sink_available(struct tb_switch *sw, int sink)
*
* Queries through LC SNK_ALLOCATION registers whether DP sink is available
* for the given DP IN port or not.
+ *
+ * Return: %true if DP sink is available, %false otherwise.
*/
bool tb_lc_dp_sink_query(struct tb_switch *sw, struct tb_port *in)
{
@@ -586,10 +608,12 @@ bool tb_lc_dp_sink_query(struct tb_switch *sw, struct tb_port *in)
* @sw: Switch whose DP sink is allocated
* @in: DP IN port the DP sink is allocated for
*
- * Allocate DP sink for @in via LC SNK_ALLOCATION registers. If the
- * resource is available and allocation is successful returns %0. In all
- * other cases returs negative errno. In particular %-EBUSY is returned if
- * the resource was not available.
+ * Allocate DP sink for @in via LC SNK_ALLOCATION registers.
+ *
+ * Return:
+ * * %0 - If the resource is available and allocation is successful.
+ * * %-EBUSY - If resource is not available.
+ * * Negative errno - Another error occurred.
*/
int tb_lc_dp_sink_alloc(struct tb_switch *sw, struct tb_port *in)
{
@@ -637,6 +661,8 @@ int tb_lc_dp_sink_alloc(struct tb_switch *sw, struct tb_port *in)
* @in: DP IN port whose DP sink is de-allocated
*
* De-allocate DP sink from @in using LC SNK_ALLOCATION registers.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_lc_dp_sink_dealloc(struct tb_switch *sw, struct tb_port *in)
{
@@ -680,6 +706,8 @@ int tb_lc_dp_sink_dealloc(struct tb_switch *sw, struct tb_port *in)
*
* This is useful to let authentication cycle pass even without
* a Thunderbolt link present.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_lc_force_power(struct tb_switch *sw)
{
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index 7af2642b97cb..6d0c9d37c55d 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/property.h>
+#include <linux/string_choices.h>
#include <linux/string_helpers.h>
#include "nhi.h"
@@ -146,7 +147,7 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active)
dev_WARN(&ring->nhi->pdev->dev,
"interrupt for %s %d is already %s\n",
RING_TYPE(ring), ring->hop,
- active ? "enabled" : "disabled");
+ str_enabled_disabled(active));
if (active)
iowrite32(new, ring->nhi->iobase + reg);
@@ -343,8 +344,10 @@ EXPORT_SYMBOL_GPL(__tb_ring_enqueue);
*
* This function can be called when @start_poll callback of the @ring
* has been called. It will read one completed frame from the ring and
- * return it to the caller. Returns %NULL if there is no more completed
- * frames.
+ * return it to the caller.
+ *
+ * Return: Pointer to &struct ring_frame, %NULL if there is no more
+ * completed frames.
*/
struct ring_frame *tb_ring_poll(struct tb_ring *ring)
{
@@ -639,6 +642,8 @@ err_free_ring:
* @hop: HopID (ring) to allocate
* @size: Number of entries in the ring
* @flags: Flags for the ring
+ *
+ * Return: Pointer to &struct tb_ring, %NULL otherwise.
*/
struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
unsigned int flags)
@@ -660,6 +665,8 @@ EXPORT_SYMBOL_GPL(tb_ring_alloc_tx);
* interrupt is triggered and masked, instead of callback
* in each Rx frame.
* @poll_data: Optional data passed to @start_poll
+ *
+ * Return: Pointer to &struct tb_ring, %NULL otherwise.
*/
struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
unsigned int flags, int e2e_tx_hop,
@@ -705,7 +712,7 @@ void tb_ring_start(struct tb_ring *ring)
ring_iowrite64desc(ring, ring->descriptors_dma, 0);
if (ring->is_tx) {
ring_iowrite32desc(ring, ring->size, 12);
- ring_iowrite32options(ring, 0, 4); /* time releated ? */
+ ring_iowrite32options(ring, 0, 4);
ring_iowrite32options(ring, flags, 0);
} else {
u32 sof_eof_mask = ring->sof_mask << 16 | ring->eof_mask;
@@ -853,8 +860,9 @@ EXPORT_SYMBOL_GPL(tb_ring_free);
* @cmd: Command to send
* @data: Data to be send with the command
*
- * Sends mailbox command to the firmware running on NHI. Returns %0 in
- * case of success and negative errno in case of failure.
+ * Sends mailbox command to the firmware running on NHI.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data)
{
@@ -890,6 +898,8 @@ int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data)
*
* The function reads current firmware operation mode using NHI mailbox
* registers and returns it to the caller.
+ *
+ * Return: &enum nhi_fw_mode.
*/
enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi)
{
@@ -1340,18 +1350,18 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (res)
return dev_err_probe(dev, res, "cannot enable PCI device, aborting\n");
- res = pcim_iomap_regions(pdev, 1 << 0, "thunderbolt");
- if (res)
- return dev_err_probe(dev, res, "cannot obtain PCI resources, aborting\n");
-
nhi = devm_kzalloc(&pdev->dev, sizeof(*nhi), GFP_KERNEL);
if (!nhi)
return -ENOMEM;
nhi->pdev = pdev;
nhi->ops = (const struct tb_nhi_ops *)id->driver_data;
- /* cannot fail - table is allocated in pcim_iomap_regions */
- nhi->iobase = pcim_iomap_table(pdev)[0];
+
+ nhi->iobase = pcim_iomap_region(pdev, 0, "thunderbolt");
+ res = PTR_ERR_OR_ZERO(nhi->iobase);
+ if (res)
+ return dev_err_probe(dev, res, "cannot obtain PCI resources, aborting\n");
+
nhi->hop_count = ioread32(nhi->iobase + REG_CAPS) & 0x3ff;
dev_dbg(dev, "total paths: %d\n", nhi->hop_count);
@@ -1520,6 +1530,16 @@ static struct pci_device_id nhi_ids[] = {
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_LNL_NHI1),
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_PTL_M_NHI0),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_PTL_M_NHI1),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_PTL_P_NHI0),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_PTL_P_NHI1),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_WCL_NHI0),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI) },
diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h
index 7a07c7c1a9c2..24ac4246d0ca 100644
--- a/drivers/thunderbolt/nhi.h
+++ b/drivers/thunderbolt/nhi.h
@@ -75,6 +75,7 @@ extern const struct tb_nhi_ops icl_nhi_ops;
#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE 0x15ef
#define PCI_DEVICE_ID_INTEL_ADL_NHI0 0x463e
#define PCI_DEVICE_ID_INTEL_ADL_NHI1 0x466d
+#define PCI_DEVICE_ID_INTEL_WCL_NHI0 0x4d33
#define PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI 0x5781
#define PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI 0x5784
#define PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HUB_80G_BRIDGE 0x5786
@@ -92,6 +93,10 @@ extern const struct tb_nhi_ops icl_nhi_ops;
#define PCI_DEVICE_ID_INTEL_RPL_NHI1 0xa76d
#define PCI_DEVICE_ID_INTEL_LNL_NHI0 0xa833
#define PCI_DEVICE_ID_INTEL_LNL_NHI1 0xa834
+#define PCI_DEVICE_ID_INTEL_PTL_M_NHI0 0xe333
+#define PCI_DEVICE_ID_INTEL_PTL_M_NHI1 0xe334
+#define PCI_DEVICE_ID_INTEL_PTL_P_NHI0 0xe433
+#define PCI_DEVICE_ID_INTEL_PTL_P_NHI1 0xe434
#define PCI_CLASS_SERIAL_USB_USB4 0x0c0340
diff --git a/drivers/thunderbolt/nhi_regs.h b/drivers/thunderbolt/nhi_regs.h
index 297a3e440648..cf5222bee971 100644
--- a/drivers/thunderbolt/nhi_regs.h
+++ b/drivers/thunderbolt/nhi_regs.h
@@ -21,6 +21,12 @@ enum ring_flags {
/**
* struct ring_desc - TX/RX ring entry
+ * @phys: DMA mapped address of the frame
+ * @length: Size of the ring
+ * @eof: End of frame protocol defined field
+ * @sof: Start of frame protocol defined field
+ * @flags: Ring descriptor flags
+ * @time: Fill with zero
*
* For TX set length/eof/sof.
* For RX length/eof/sof are set by the NHI.
diff --git a/drivers/thunderbolt/nvm.c b/drivers/thunderbolt/nvm.c
index 8901db2de327..6901058b7ac0 100644
--- a/drivers/thunderbolt/nvm.c
+++ b/drivers/thunderbolt/nvm.c
@@ -278,9 +278,13 @@ static const struct tb_nvm_vendor retimer_nvm_vendors[] = {
* tb_nvm_alloc() - Allocate new NVM structure
* @dev: Device owning the NVM
*
- * Allocates new NVM structure with unique @id and returns it. In case
- * of error returns ERR_PTR(). Specifically returns %-EOPNOTSUPP if the
- * NVM format of the @dev is not known by the kernel.
+ * Allocates new NVM structure with unique @id and returns it.
+ *
+ * Return:
+ * * Pointer to &struct tb_nvm - On success.
+ * * %-EOPNOTSUPP - If the NVM format of the @dev is not known by the
+ * kernel.
+ * * %ERR_PTR - In case of failure.
*/
struct tb_nvm *tb_nvm_alloc(struct device *dev)
{
@@ -347,9 +351,10 @@ struct tb_nvm *tb_nvm_alloc(struct device *dev)
* tb_nvm_read_version() - Read and populate NVM version
* @nvm: NVM structure
*
- * Uses vendor specific means to read out and fill in the existing
- * active NVM version. Returns %0 in case of success and negative errno
- * otherwise.
+ * Uses vendor specific means to read and fill out the existing
+ * active NVM version.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_nvm_read_version(struct tb_nvm *nvm)
{
@@ -365,12 +370,11 @@ int tb_nvm_read_version(struct tb_nvm *nvm)
* tb_nvm_validate() - Validate new NVM image
* @nvm: NVM structure
*
- * Runs vendor specific validation over the new NVM image and if all
- * checks pass returns %0. As side effect updates @nvm->buf_data_start
- * and @nvm->buf_data_size fields to match the actual data to be written
- * to the NVM.
+ * Runs vendor specific validation over the new NVM image. As a
+ * side effect, updates @nvm->buf_data_start and @nvm->buf_data_size
+ * fields to match the actual data to be written to the NVM.
*
- * If the validation does not pass then returns negative errno.
+ * Return: %0 on successful validation, negative errno otherwise.
*/
int tb_nvm_validate(struct tb_nvm *nvm)
{
@@ -405,7 +409,7 @@ int tb_nvm_validate(struct tb_nvm *nvm)
* the image, this function does that. Can be called even if the device
* does not need this.
*
- * Returns %0 in case of success and negative errno otherwise.
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_nvm_write_headers(struct tb_nvm *nvm)
{
@@ -423,7 +427,8 @@ int tb_nvm_write_headers(struct tb_nvm *nvm)
* Registers new active NVmem device for @nvm. The @reg_read is called
* directly from NVMem so it must handle possible concurrent access if
* needed. The first parameter passed to @reg_read is @nvm structure.
- * Returns %0 in success and negative errno otherwise.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_nvm_add_active(struct tb_nvm *nvm, nvmem_reg_read_t reg_read)
{
@@ -461,6 +466,11 @@ int tb_nvm_add_active(struct tb_nvm *nvm, nvmem_reg_read_t reg_read)
* Helper function to cache the new NVM image before it is actually
* written to the flash. Copies @bytes from @val to @nvm->buf starting
* from @offset.
+ *
+ * Return:
+ * * %0 - On success.
+ * * %-ENOMEM - If buffer allocation failed.
+ * * Negative errno - Another error occurred.
*/
int tb_nvm_write_buf(struct tb_nvm *nvm, unsigned int offset, void *val,
size_t bytes)
@@ -488,7 +498,7 @@ int tb_nvm_write_buf(struct tb_nvm *nvm, unsigned int offset, void *val,
* needed. The first parameter passed to @reg_write is @nvm structure.
* The size of the NVMem device is set to %NVM_MAX_SIZE.
*
- * Returns %0 in success and negative errno otherwise.
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_nvm_add_non_active(struct tb_nvm *nvm, nvmem_reg_write_t reg_write)
{
@@ -545,7 +555,7 @@ void tb_nvm_free(struct tb_nvm *nvm)
* This is a generic function that reads data from NVM or NVM like
* device.
*
- * Returns %0 on success and negative errno otherwise.
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_nvm_read_data(unsigned int address, void *buf, size_t size,
unsigned int retries, read_block_fn read_block,
@@ -588,11 +598,11 @@ int tb_nvm_read_data(unsigned int address, void *buf, size_t size,
* @size: Size of the buffer in bytes
* @retries: Number of retries if the block write fails
* @write_block: Function that writes block to the flash
- * @write_block_data: Data passwd to @write_block
+ * @write_block_data: Data passed to @write_block
*
* This is generic function that writes data to NVM or NVM like device.
*
- * Returns %0 on success and negative errno otherwise.
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_nvm_write_data(unsigned int address, const void *buf, size_t size,
unsigned int retries, write_block_fn write_block,
diff --git a/drivers/thunderbolt/path.c b/drivers/thunderbolt/path.c
index f760e54cd9bd..f9b11dadfbdd 100644
--- a/drivers/thunderbolt/path.c
+++ b/drivers/thunderbolt/path.c
@@ -96,7 +96,7 @@ static int tb_path_find_src_hopid(struct tb_port *src,
* that the @dst port is the expected one. If it is not, the path can be
* cleaned up by calling tb_path_deactivate() before tb_path_free().
*
- * Return: Discovered path on success, %NULL in case of failure
+ * Return: Pointer to &struct tb_path, %NULL in case of failure.
*/
struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid,
struct tb_port *dst, int dst_hopid,
@@ -233,7 +233,7 @@ err:
* links on the path, prioritizes using @link_nr but takes into account
* that the lanes may be bonded.
*
- * Return: Returns a tb_path on success or NULL on failure.
+ * Return: Pointer to &struct tb_path, %NULL in case of failure.
*/
struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid,
struct tb_port *dst, int dst_hopid, int link_nr,
@@ -452,7 +452,9 @@ static int __tb_path_deactivate_hop(struct tb_port *port, int hop_index,
* @hop_index: HopID of the path to be cleared
*
* This deactivates or clears a single path config space entry at
- * @hop_index. Returns %0 in success and negative errno otherwise.
+ * @hop_index.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_path_deactivate_hop(struct tb_port *port, int hop_index)
{
@@ -498,7 +500,7 @@ void tb_path_deactivate(struct tb_path *path)
* Activate a path starting with the last hop and iterating backwards. The
* caller must fill path->hops before calling tb_path_activate().
*
- * Return: Returns 0 on success or an error code on failure.
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_path_activate(struct tb_path *path)
{
@@ -581,10 +583,10 @@ int tb_path_activate(struct tb_path *path)
}
}
path->activated = true;
- tb_dbg(path->tb, "path activation complete\n");
+ tb_dbg(path->tb, "%s path activation complete\n", path->name);
return 0;
err:
- tb_WARN(path->tb, "path activation failed\n");
+ tb_WARN(path->tb, "%s path activation failed\n", path->name);
return res;
}
@@ -592,7 +594,7 @@ err:
* tb_path_is_invalid() - check whether any ports on the path are invalid
* @path: Path to check
*
- * Return: Returns true if the path is invalid, false otherwise.
+ * Return: %true if the path is invalid, %false otherwise.
*/
bool tb_path_is_invalid(struct tb_path *path)
{
@@ -613,6 +615,8 @@ bool tb_path_is_invalid(struct tb_path *path)
*
* Goes over all hops on path and checks if @port is any of them.
* Direction does not matter.
+ *
+ * Return: %true if port is on the path, %false otherwise.
*/
bool tb_path_port_on_path(const struct tb_path *path, const struct tb_port *port)
{
diff --git a/drivers/thunderbolt/property.c b/drivers/thunderbolt/property.c
index dc555cda98e6..31aa0516932a 100644
--- a/drivers/thunderbolt/property.c
+++ b/drivers/thunderbolt/property.c
@@ -211,11 +211,13 @@ static struct tb_property_dir *__tb_property_parse_dir(const u32 *block,
*
* This function parses the XDomain properties data block into format that
* can be traversed using the helper functions provided by this module.
- * Upon success returns the parsed directory. In case of error returns
- * %NULL. The resulting &struct tb_property_dir needs to be released by
+ *
+ * The resulting &struct tb_property_dir needs to be released by
* calling tb_property_free_dir() when not needed anymore.
*
* The @block is expected to be root directory.
+ *
+ * Return: Pointer to &struct tb_property_dir, %NULL in case of failure.
*/
struct tb_property_dir *tb_property_parse_dir(const u32 *block,
size_t block_len)
@@ -238,6 +240,8 @@ struct tb_property_dir *tb_property_parse_dir(const u32 *block,
*
* Creates new, empty property directory. If @uuid is %NULL then the
* directory is assumed to be root directory.
+ *
+ * Return: Pointer to &struct tb_property_dir, %NULL in case of failure.
*/
struct tb_property_dir *tb_property_create_dir(const uuid_t *uuid)
{
@@ -481,9 +485,11 @@ static ssize_t __tb_property_format_dir(const struct tb_property_dir *dir,
* @block_len: Length of the property block
*
* This function formats the directory to the packed format that can be
- * then send over the thunderbolt fabric to receiving host. Returns %0 in
- * case of success and negative errno on faulure. Passing %NULL in @block
- * returns number of entries the block takes.
+ * then sent over the thunderbolt fabric to receiving host.
+ *
+ * Passing %NULL in @block returns number of entries the block takes.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
ssize_t tb_property_format_dir(const struct tb_property_dir *dir, u32 *block,
size_t block_len)
@@ -505,9 +511,9 @@ ssize_t tb_property_format_dir(const struct tb_property_dir *dir, u32 *block,
* tb_property_copy_dir() - Take a deep copy of directory
* @dir: Directory to copy
*
- * This function takes a deep copy of @dir and returns back the copy. In
- * case of error returns %NULL. The resulting directory needs to be
- * released by calling tb_property_free_dir().
+ * The resulting directory needs to be released by calling tb_property_free_dir().
+ *
+ * Return: Pointer to &struct tb_property_dir, %NULL in case of failure.
*/
struct tb_property_dir *tb_property_copy_dir(const struct tb_property_dir *dir)
{
@@ -577,6 +583,8 @@ err_free:
* @parent: Directory to add the property
* @key: Key for the property
* @value: Immediate value to store with the property
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_property_add_immediate(struct tb_property_dir *parent, const char *key,
u32 value)
@@ -606,6 +614,8 @@ EXPORT_SYMBOL_GPL(tb_property_add_immediate);
* @buflen: Number of bytes in the data buffer
*
* Function takes a copy of @buf and adds it to the directory.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_property_add_data(struct tb_property_dir *parent, const char *key,
const void *buf, size_t buflen)
@@ -642,6 +652,8 @@ EXPORT_SYMBOL_GPL(tb_property_add_data);
* @text: String to add
*
* Function takes a copy of @text and adds it to the directory.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_property_add_text(struct tb_property_dir *parent, const char *key,
const char *text)
@@ -676,6 +688,8 @@ EXPORT_SYMBOL_GPL(tb_property_add_text);
* @parent: Directory to add the property
* @key: Key for the property
* @dir: Directory to add
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_property_add_dir(struct tb_property_dir *parent, const char *key,
struct tb_property_dir *dir)
@@ -716,8 +730,10 @@ EXPORT_SYMBOL_GPL(tb_property_remove);
* @key: Key to look for
* @type: Type of the property
*
- * Finds and returns property from the given directory. Does not recurse
- * into sub-directories. Returns %NULL if the property was not found.
+ * Finds and returns property from the given directory. Does not
+ * recurse into sub-directories.
+ *
+ * Return: Pointer to &struct tb_property, %NULL if the property was not found.
*/
struct tb_property *tb_property_find(struct tb_property_dir *dir,
const char *key, enum tb_property_type type)
@@ -737,6 +753,8 @@ EXPORT_SYMBOL_GPL(tb_property_find);
* tb_property_get_next() - Get next property from directory
* @dir: Directory holding properties
* @prev: Previous property in the directory (%NULL returns the first)
+ *
+ * Return: Pointer to &struct tb_property, %NULL if property was not found.
*/
struct tb_property *tb_property_get_next(struct tb_property_dir *dir,
struct tb_property *prev)
diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c
index 6eaaa5074ce8..13d64dbd2bc5 100644
--- a/drivers/thunderbolt/retimer.c
+++ b/drivers/thunderbolt/retimer.c
@@ -14,7 +14,11 @@
#include "sb_regs.h"
#include "tb.h"
+#if IS_ENABLED(CONFIG_USB4_DEBUGFS_MARGINING)
#define TB_MAX_RETIMER_INDEX 6
+#else
+#define TB_MAX_RETIMER_INDEX 2
+#endif
/**
* tb_retimer_nvm_read() - Read contents of retimer NVM
@@ -23,8 +27,9 @@
* @buf: Data read from NVM is stored here
* @size: Number of bytes to read
*
- * Reads retimer NVM and copies the contents to @buf. Returns %0 if the
- * read was successful and negative errno in case of failure.
+ * Reads retimer NVM and copies the contents to @buf.
+ *
+ * Return: %0 if the read was successful, negative errno in case of failure.
*/
int tb_retimer_nvm_read(struct tb_retimer *rt, unsigned int address, void *buf,
size_t size)
@@ -89,9 +94,11 @@ static int tb_retimer_nvm_add(struct tb_retimer *rt)
if (ret)
goto err_nvm;
- ret = tb_nvm_add_non_active(nvm, nvm_write);
- if (ret)
- goto err_nvm;
+ if (!rt->no_nvm_upgrade) {
+ ret = tb_nvm_add_non_active(nvm, nvm_write);
+ if (ret)
+ goto err_nvm;
+ }
rt->nvm = nvm;
dev_dbg(&rt->dev, "NVM version %x.%x\n", nvm->major, nvm->minor);
@@ -99,6 +106,7 @@ static int tb_retimer_nvm_add(struct tb_retimer *rt)
err_nvm:
dev_dbg(&rt->dev, "NVM upgrade disabled\n");
+ rt->no_nvm_upgrade = true;
if (!IS_ERR(nvm))
tb_nvm_free(nvm);
@@ -178,8 +186,6 @@ static ssize_t nvm_authenticate_show(struct device *dev,
if (!rt->nvm)
ret = -EAGAIN;
- else if (rt->no_nvm_upgrade)
- ret = -EOPNOTSUPP;
else
ret = sysfs_emit(buf, "%#x\n", rt->auth_status);
@@ -336,6 +342,19 @@ static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(vendor);
+static umode_t retimer_is_visible(struct kobject *kobj, struct attribute *attr,
+ int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct tb_retimer *rt = tb_to_retimer(dev);
+
+ if (attr == &dev_attr_nvm_authenticate.attr ||
+ attr == &dev_attr_nvm_version.attr)
+ return rt->no_nvm_upgrade ? 0 : attr->mode;
+
+ return attr->mode;
+}
+
static struct attribute *retimer_attrs[] = {
&dev_attr_device.attr,
&dev_attr_nvm_authenticate.attr,
@@ -345,6 +364,7 @@ static struct attribute *retimer_attrs[] = {
};
static const struct attribute_group retimer_group = {
+ .is_visible = retimer_is_visible,
.attrs = retimer_attrs,
};
@@ -366,35 +386,29 @@ const struct device_type tb_retimer_type = {
.release = tb_retimer_release,
};
-static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status)
+static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status,
+ bool on_board)
{
struct tb_retimer *rt;
u32 vendor, device;
int ret;
- ret = usb4_port_retimer_read(port, index, USB4_SB_VENDOR_ID, &vendor,
- sizeof(vendor));
+ ret = usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index,
+ USB4_SB_VENDOR_ID, &vendor, sizeof(vendor));
if (ret) {
if (ret != -ENODEV)
tb_port_warn(port, "failed read retimer VendorId: %d\n", ret);
return ret;
}
- ret = usb4_port_retimer_read(port, index, USB4_SB_PRODUCT_ID, &device,
- sizeof(device));
+ ret = usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index,
+ USB4_SB_PRODUCT_ID, &device, sizeof(device));
if (ret) {
if (ret != -ENODEV)
tb_port_warn(port, "failed read retimer ProductId: %d\n", ret);
return ret;
}
- /*
- * Check that it supports NVM operations. If not then don't add
- * the device at all.
- */
- ret = usb4_port_retimer_nvm_sector_size(port, index);
- if (ret < 0)
- return ret;
rt = kzalloc(sizeof(*rt), GFP_KERNEL);
if (!rt)
@@ -407,6 +421,13 @@ static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status)
rt->port = port;
rt->tb = port->sw->tb;
+ /*
+ * Only support NVM upgrade for on-board retimers. The retimers
+ * on the other side of the connection.
+ */
+ if (!on_board || usb4_port_retimer_nvm_sector_size(port, index) <= 0)
+ rt->no_nvm_upgrade = true;
+
rt->dev.parent = &port->usb4->dev;
rt->dev.bus = &tb_bus_type;
rt->dev.type = &tb_retimer_type;
@@ -437,12 +458,14 @@ static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status)
pm_runtime_mark_last_busy(&rt->dev);
pm_runtime_use_autosuspend(&rt->dev);
+ tb_retimer_debugfs_init(rt);
return 0;
}
static void tb_retimer_remove(struct tb_retimer *rt)
{
dev_info(&rt->dev, "retimer disconnected\n");
+ tb_retimer_debugfs_remove(rt);
tb_nvm_free(rt->nvm);
device_unregister(&rt->dev);
}
@@ -452,7 +475,7 @@ struct tb_retimer_lookup {
u8 index;
};
-static int retimer_match(struct device *dev, void *data)
+static int retimer_match(struct device *dev, const void *data)
{
const struct tb_retimer_lookup *lookup = data;
struct tb_retimer *rt = tb_to_retimer(dev);
@@ -478,14 +501,16 @@ static struct tb_retimer *tb_port_find_retimer(struct tb_port *port, u8 index)
* @add: If true also registers found retimers
*
* Brings the sideband into a state where retimers can be accessed.
- * Then Tries to enumerate on-board retimers connected to @port. Found
+ * Then tries to enumerate on-board retimers connected to @port. Found
* retimers are registered as children of @port if @add is set. Does
* not scan for cable retimers for now.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_retimer_scan(struct tb_port *port, bool add)
{
u32 status[TB_MAX_RETIMER_INDEX + 1] = {};
- int ret, i, last_idx = 0;
+ int ret, i, max, last_idx = 0;
/*
* Send broadcast RT to make sure retimer indices facing this
@@ -507,7 +532,7 @@ int tb_retimer_scan(struct tb_port *port, bool add)
*/
tb_retimer_set_inbound_sbtx(port);
- for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) {
+ for (max = 1, i = 1; i <= TB_MAX_RETIMER_INDEX; i++) {
/*
* Last retimer is true only for the last on-board
* retimer (the one connected directly to the Type-C
@@ -518,28 +543,33 @@ int tb_retimer_scan(struct tb_port *port, bool add)
last_idx = i;
else if (ret < 0)
break;
- }
- tb_retimer_unset_inbound_sbtx(port);
-
- if (!last_idx)
- return 0;
+ max = i;
+ }
- /* Add on-board retimers if they do not exist already */
ret = 0;
- for (i = 1; i <= last_idx; i++) {
+ if (!IS_ENABLED(CONFIG_USB4_DEBUGFS_MARGINING))
+ max = min(last_idx, max);
+
+ /* Add retimers if they do not exist already */
+ for (i = 1; i <= max; i++) {
struct tb_retimer *rt;
+ /* Skip cable retimers */
+ if (usb4_port_retimer_is_cable(port, i))
+ continue;
+
rt = tb_port_find_retimer(port, i);
if (rt) {
put_device(&rt->dev);
} else if (add) {
- ret = tb_retimer_add(port, i, status[i]);
+ ret = tb_retimer_add(port, i, status[i], i <= last_idx);
if (ret && ret != -EOPNOTSUPP)
break;
}
}
+ tb_retimer_unset_inbound_sbtx(port);
return ret;
}
diff --git a/drivers/thunderbolt/sb_regs.h b/drivers/thunderbolt/sb_regs.h
index f37a4320f10a..5391502a4b87 100644
--- a/drivers/thunderbolt/sb_regs.h
+++ b/drivers/thunderbolt/sb_regs.h
@@ -12,6 +12,10 @@
#define USB4_SB_VENDOR_ID 0x00
#define USB4_SB_PRODUCT_ID 0x01
+#define USB4_SB_FW_VERSION 0x02
+#define USB4_SB_DEBUG_CONF 0x05
+#define USB4_SB_DEBUG 0x06
+#define USB4_SB_LRD_TUNING 0x07
#define USB4_SB_OPCODE 0x08
enum usb4_sb_opcode {
@@ -22,6 +26,7 @@ enum usb4_sb_opcode {
USB4_SB_OPCODE_SET_INBOUND_SBTX = 0x5055534c, /* "LSUP" */
USB4_SB_OPCODE_UNSET_INBOUND_SBTX = 0x50555355, /* "USUP" */
USB4_SB_OPCODE_QUERY_LAST_RETIMER = 0x5453414c, /* "LAST" */
+ USB4_SB_OPCODE_QUERY_CABLE_RETIMER = 0x524c4243, /* "CBLR" */
USB4_SB_OPCODE_GET_NVM_SECTOR_SIZE = 0x53534e47, /* "GNSS" */
USB4_SB_OPCODE_NVM_SET_OFFSET = 0x53504f42, /* "BOPS" */
USB4_SB_OPCODE_NVM_BLOCK_WRITE = 0x574b4c42, /* "BLKW" */
@@ -35,60 +40,73 @@ enum usb4_sb_opcode {
#define USB4_SB_METADATA 0x09
#define USB4_SB_METADATA_NVM_AUTH_WRITE_MASK GENMASK(5, 0)
+#define USB4_SB_LINK_CONF 0x0c
+#define USB4_SB_GEN23_TXFFE 0x0d
+#define USB4_SB_GEN4_TXFFE 0x0e
+#define USB4_SB_VERSION 0x0f
#define USB4_SB_DATA 0x12
/* USB4_SB_OPCODE_READ_LANE_MARGINING_CAP */
#define USB4_MARGIN_CAP_0_MODES_HW BIT(0)
#define USB4_MARGIN_CAP_0_MODES_SW BIT(1)
-#define USB4_MARGIN_CAP_0_2_LANES BIT(2)
+#define USB4_MARGIN_CAP_0_ALL_LANES BIT(2)
#define USB4_MARGIN_CAP_0_VOLTAGE_INDP_MASK GENMASK(4, 3)
-#define USB4_MARGIN_CAP_0_VOLTAGE_INDP_SHIFT 3
#define USB4_MARGIN_CAP_0_VOLTAGE_MIN 0x0
#define USB4_MARGIN_CAP_0_VOLTAGE_HL 0x1
#define USB4_MARGIN_CAP_0_VOLTAGE_BOTH 0x2
#define USB4_MARGIN_CAP_0_TIME BIT(5)
#define USB4_MARGIN_CAP_0_VOLTAGE_STEPS_MASK GENMASK(12, 6)
-#define USB4_MARGIN_CAP_0_VOLTAGE_STEPS_SHIFT 6
#define USB4_MARGIN_CAP_0_MAX_VOLTAGE_OFFSET_MASK GENMASK(18, 13)
-#define USB4_MARGIN_CAP_0_MAX_VOLTAGE_OFFSET_SHIFT 13
+#define USB4_MARGIN_CAP_0_OPT_VOLTAGE_SUPPORT BIT(19)
+#define USB4_MARGIN_CAP_0_VOLT_STEPS_OPT_MASK GENMASK(26, 20)
+#define USB4_MARGIN_CAP_1_MAX_VOLT_OFS_OPT_MASK GENMASK(7, 0)
#define USB4_MARGIN_CAP_1_TIME_DESTR BIT(8)
#define USB4_MARGIN_CAP_1_TIME_INDP_MASK GENMASK(10, 9)
-#define USB4_MARGIN_CAP_1_TIME_INDP_SHIFT 9
#define USB4_MARGIN_CAP_1_TIME_MIN 0x0
#define USB4_MARGIN_CAP_1_TIME_LR 0x1
#define USB4_MARGIN_CAP_1_TIME_BOTH 0x2
#define USB4_MARGIN_CAP_1_TIME_STEPS_MASK GENMASK(15, 11)
-#define USB4_MARGIN_CAP_1_TIME_STEPS_SHIFT 11
#define USB4_MARGIN_CAP_1_TIME_OFFSET_MASK GENMASK(20, 16)
-#define USB4_MARGIN_CAP_1_TIME_OFFSET_SHIFT 16
#define USB4_MARGIN_CAP_1_MIN_BER_MASK GENMASK(25, 21)
-#define USB4_MARGIN_CAP_1_MIN_BER_SHIFT 21
#define USB4_MARGIN_CAP_1_MAX_BER_MASK GENMASK(30, 26)
-#define USB4_MARGIN_CAP_1_MAX_BER_SHIFT 26
-#define USB4_MARGIN_CAP_1_MAX_BER_SHIFT 26
+#define USB4_MARGIN_CAP_2_MODES_HW BIT(0)
+#define USB4_MARGIN_CAP_2_MODES_SW BIT(1)
+#define USB4_MARGIN_CAP_2_TIME BIT(2)
+#define USB4_MARGIN_CAP_2_MAX_VOLTAGE_OFFSET_MASK GENMASK(8, 3)
+#define USB4_MARGIN_CAP_2_VOLTAGE_STEPS_MASK GENMASK(15, 9)
+#define USB4_MARGIN_CAP_2_VOLTAGE_INDP_MASK GENMASK(17, 16)
+#define USB4_MARGIN_CAP_2_VOLTAGE_MIN 0x0
+#define USB4_MARGIN_CAP_2_VOLTAGE_BOTH 0x1
+#define USB4_MARGIN_CAP_2_TIME_INDP_MASK GENMASK(19, 18)
+#define USB4_MARGIN_CAP_2_TIME_MIN 0x0
+#define USB4_MARGIN_CAP_2_TIME_BOTH 0x1
/* USB4_SB_OPCODE_RUN_HW_LANE_MARGINING */
#define USB4_MARGIN_HW_TIME BIT(3)
-#define USB4_MARGIN_HW_RH BIT(4)
+#define USB4_MARGIN_HW_RHU BIT(4)
#define USB4_MARGIN_HW_BER_MASK GENMASK(9, 5)
#define USB4_MARGIN_HW_BER_SHIFT 5
+#define USB4_MARGIN_HW_OPT_VOLTAGE BIT(10)
/* Applicable to all margin values */
-#define USB4_MARGIN_HW_RES_1_MARGIN_MASK GENMASK(6, 0)
-#define USB4_MARGIN_HW_RES_1_EXCEEDS BIT(7)
-/* Different lane margin shifts */
-#define USB4_MARGIN_HW_RES_1_L0_LL_MARGIN_SHIFT 8
-#define USB4_MARGIN_HW_RES_1_L1_RH_MARGIN_SHIFT 16
-#define USB4_MARGIN_HW_RES_1_L1_LL_MARGIN_SHIFT 24
+#define USB4_MARGIN_HW_RES_MARGIN_MASK GENMASK(6, 0)
+#define USB4_MARGIN_HW_RES_EXCEEDS BIT(7)
+
+/* Shifts for parsing the lane results */
+#define USB4_MARGIN_HW_RES_LANE_SHIFT 16
+#define USB4_MARGIN_HW_RES_LL_SHIFT 8
/* USB4_SB_OPCODE_RUN_SW_LANE_MARGINING */
+#define USB4_MARGIN_SW_LANES_MASK GENMASK(2, 0)
#define USB4_MARGIN_SW_TIME BIT(3)
#define USB4_MARGIN_SW_RH BIT(4)
+#define USB4_MARGIN_SW_OPT_VOLTAGE BIT(5)
+#define USB4_MARGIN_SW_VT_MASK GENMASK(12, 6)
#define USB4_MARGIN_SW_COUNTER_MASK GENMASK(14, 13)
-#define USB4_MARGIN_SW_COUNTER_SHIFT 13
-#define USB4_MARGIN_SW_COUNTER_NOP 0x0
-#define USB4_MARGIN_SW_COUNTER_CLEAR 0x1
-#define USB4_MARGIN_SW_COUNTER_START 0x2
-#define USB4_MARGIN_SW_COUNTER_STOP 0x3
+#define USB4_MARGIN_SW_UPPER_EYE BIT(15)
+
+#define USB4_MARGIN_SW_ERR_COUNTER_LANE_0_MASK GENMASK(3, 0)
+#define USB4_MARGIN_SW_ERR_COUNTER_LANE_1_MASK GENMASK(7, 4)
+#define USB4_MARGIN_SW_ERR_COUNTER_LANE_2_MASK GENMASK(11, 8)
#endif
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 326433df5880..b3948aad0b95 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -290,8 +290,9 @@ static int nvm_authenticate(struct tb_switch *sw, bool auth_only)
* @size: Size of the buffer in bytes
*
* Reads from router NVM and returns the requested data in @buf. Locking
- * is up to the caller. Returns %0 in success and negative errno in case
- * of failure.
+ * is up to the caller.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
size_t size)
@@ -464,7 +465,7 @@ static void tb_dump_port(struct tb *tb, const struct tb_port *port)
*
* The port must have a TB_CAP_PHY (i.e. it should be a real port).
*
- * Return: Returns an enum tb_port_state on success or an error code on failure.
+ * Return: &enum tb_port_state or negative error code on failure.
*/
int tb_port_state(struct tb_port *port)
{
@@ -491,9 +492,11 @@ int tb_port_state(struct tb_port *port)
* switch resume). Otherwise we only wait if a device is registered but the link
* has not yet been established.
*
- * Return: Returns an error code on failure. Returns 0 if the port is not
- * connected or failed to reach state TB_PORT_UP within one second. Returns 1
- * if the port is connected and in state TB_PORT_UP.
+ * Return:
+ * * %0 - If the port is not connected or failed to reach
+ * state %TB_PORT_UP within one second.
+ * * %1 - If the port is connected and in state %TB_PORT_UP.
+ * * Negative errno - An error occurred.
*/
int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged)
{
@@ -562,7 +565,7 @@ int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged)
* Change the number of NFC credits allocated to @port by @credits. To remove
* NFC credits pass a negative amount of credits.
*
- * Return: Returns 0 on success or an error code on failure.
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_port_add_nfc_credits(struct tb_port *port, int credits)
{
@@ -599,7 +602,7 @@ int tb_port_add_nfc_credits(struct tb_port *port, int credits)
* @port: Port whose counters to clear
* @counter: Counter index to clear
*
- * Return: Returns 0 on success or an error code on failure.
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_port_clear_counter(struct tb_port *port, int counter)
{
@@ -614,6 +617,8 @@ int tb_port_clear_counter(struct tb_port *port, int counter)
*
* Needed for USB4 but can be called for any CIO/USB4 ports. Makes the
* downstream router accessible for CM.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_port_unlock(struct tb_port *port)
{
@@ -659,6 +664,8 @@ static int __tb_port_enable(struct tb_port *port, bool enable)
* @port: Port to enable (can be %NULL)
*
* This is used for lane 0 and 1 adapters to enable it.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_port_enable(struct tb_port *port)
{
@@ -670,6 +677,8 @@ int tb_port_enable(struct tb_port *port)
* @port: Port to disable (can be %NULL)
*
* This is used for lane 0 and 1 adapters to disable it.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_port_disable(struct tb_port *port)
{
@@ -689,7 +698,7 @@ static int tb_port_reset(struct tb_port *port)
* This is a helper method for tb_switch_alloc. Does not check or initialize
* any downstream switches.
*
- * Return: Returns 0 on success or an error code on failure.
+ * Return: %0 on success, negative errno otherwise.
*/
static int tb_init_port(struct tb_port *port)
{
@@ -727,9 +736,9 @@ static int tb_init_port(struct tb_port *port)
port->cap_usb4 = cap;
/*
- * USB4 ports the buffers allocated for the control path
+ * USB4 port buffers allocated for the control path
* can be read from the path config space. Legacy
- * devices we use hard-coded value.
+ * devices use hard-coded value.
*/
if (port->cap_usb4) {
struct tb_regs_hop hop;
@@ -847,9 +856,9 @@ static inline bool tb_switch_is_reachable(const struct tb_switch *parent,
* link port, the function follows that link and returns another end on
* that same link.
*
- * If the @end port has been reached, return %NULL.
- *
* Domain tb->lock must be held when this function is called.
+ *
+ * Return: Pointer to &struct tb_port, %NULL if the @end port has been reached.
*/
struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
struct tb_port *prev)
@@ -894,7 +903,7 @@ struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
* tb_port_get_link_speed() - Get current link speed
* @port: Port to check (USB4 or CIO)
*
- * Returns link speed in Gb/s or negative errno in case of failure.
+ * Return: Link speed in Gb/s or negative errno in case of failure.
*/
int tb_port_get_link_speed(struct tb_port *port)
{
@@ -926,9 +935,11 @@ int tb_port_get_link_speed(struct tb_port *port)
* tb_port_get_link_generation() - Returns link generation
* @port: Lane adapter
*
- * Returns link generation as number or negative errno in case of
- * failure. Does not distinguish between Thunderbolt 1 and Thunderbolt 2
- * links so for those always returns 2.
+ * Return: Link generation as a number or negative errno in case of
+ * failure.
+ *
+ * Does not distinguish between Thunderbolt 1 and Thunderbolt 2
+ * links so for those always returns %2.
*/
int tb_port_get_link_generation(struct tb_port *port)
{
@@ -952,8 +963,8 @@ int tb_port_get_link_generation(struct tb_port *port)
* tb_port_get_link_width() - Get current link width
* @port: Port to check (USB4 or CIO)
*
- * Returns link width. Return the link width as encoded in &enum
- * tb_link_width or negative errno in case of failure.
+ * Return: Link width encoded in &enum tb_link_width or
+ * negative errno in case of failure.
*/
int tb_port_get_link_width(struct tb_port *port)
{
@@ -979,7 +990,9 @@ int tb_port_get_link_width(struct tb_port *port)
* @width: Widths to check (bitmask)
*
* Can be called to any lane adapter. Checks if given @width is
- * supported by the hardware and returns %true if it is.
+ * supported by the hardware.
+ *
+ * Return: %true if link width is supported, %false otherwise.
*/
bool tb_port_width_supported(struct tb_port *port, unsigned int width)
{
@@ -1016,7 +1029,7 @@ bool tb_port_width_supported(struct tb_port *port, unsigned int width)
* Sets the target link width of the lane adapter to @width. Does not
* enable/disable lane bonding. For that call tb_port_set_lane_bonding().
*
- * Return: %0 in case of success and negative errno in case of error
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_port_set_link_width(struct tb_port *port, enum tb_link_width width)
{
@@ -1070,7 +1083,7 @@ int tb_port_set_link_width(struct tb_port *port, enum tb_link_width width)
* cases one should use tb_port_lane_bonding_enable() instead to enable
* lane bonding.
*
- * Return: %0 in case of success and negative errno in case of error
+ * Return: %0 on success, negative errno otherwise.
*/
static int tb_port_set_lane_bonding(struct tb_port *port, bool bonding)
{
@@ -1104,7 +1117,7 @@ static int tb_port_set_lane_bonding(struct tb_port *port, bool bonding)
* tb_port_wait_for_link_width() before enabling any paths through the
* link to make sure the link is in expected state.
*
- * Return: %0 in case of success and negative errno in case of error
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_port_lane_bonding_enable(struct tb_port *port)
{
@@ -1181,9 +1194,14 @@ void tb_port_lane_bonding_disable(struct tb_port *port)
*
* Should be used after both ends of the link have been bonded (or
* bonding has been disabled) to wait until the link actually reaches
- * the expected state. Returns %-ETIMEDOUT if the width was not reached
- * within the given timeout, %0 if it did. Can be passed a mask of
- * expected widths and succeeds if any of the widths is reached.
+ * the expected state.
+ *
+ * Can be passed a mask of expected widths.
+ *
+ * Return:
+ * * %0 - If link reaches any of the specified widths.
+ * * %-ETIMEDOUT - If link does not reach specified width.
+ * * Negative errno - Another error occurred.
*/
int tb_port_wait_for_link_width(struct tb_port *port, unsigned int width,
int timeout_msec)
@@ -1248,6 +1266,8 @@ static int tb_port_do_update_credits(struct tb_port *port)
* After the link is bonded (or bonding was disabled) the port total
* credits may change, so this function needs to be called to re-read
* the credits. Updates also the second lane adapter.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_port_update_credits(struct tb_port *port)
{
@@ -1303,6 +1323,8 @@ static bool tb_port_resume(struct tb_port *port)
/**
* tb_port_is_enabled() - Is the adapter port enabled
* @port: Port to check
+ *
+ * Return: %true if port is enabled, %false otherwise.
*/
bool tb_port_is_enabled(struct tb_port *port)
{
@@ -1327,6 +1349,8 @@ bool tb_port_is_enabled(struct tb_port *port)
/**
* tb_usb3_port_is_enabled() - Is the USB3 adapter port enabled
* @port: USB3 adapter port to check
+ *
+ * Return: %true if port is enabled, %false otherwise.
*/
bool tb_usb3_port_is_enabled(struct tb_port *port)
{
@@ -1343,6 +1367,8 @@ bool tb_usb3_port_is_enabled(struct tb_port *port)
* tb_usb3_port_enable() - Enable USB3 adapter port
* @port: USB3 adapter port to enable
* @enable: Enable/disable the USB3 adapter
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_usb3_port_enable(struct tb_port *port, bool enable)
{
@@ -1358,6 +1384,8 @@ int tb_usb3_port_enable(struct tb_port *port, bool enable)
/**
* tb_pci_port_is_enabled() - Is the PCIe adapter port enabled
* @port: PCIe port to check
+ *
+ * Return: %true if port is enabled, %false otherwise.
*/
bool tb_pci_port_is_enabled(struct tb_port *port)
{
@@ -1374,6 +1402,8 @@ bool tb_pci_port_is_enabled(struct tb_port *port)
* tb_pci_port_enable() - Enable PCIe adapter port
* @port: PCIe port to enable
* @enable: Enable/disable the PCIe adapter
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_pci_port_enable(struct tb_port *port, bool enable)
{
@@ -1389,6 +1419,8 @@ int tb_pci_port_enable(struct tb_port *port, bool enable)
* @port: DP out port to check
*
* Checks if the DP OUT adapter port has HPD bit already set.
+ *
+ * Return: %1 if HPD is active, %0 otherwise.
*/
int tb_dp_port_hpd_is_active(struct tb_port *port)
{
@@ -1408,6 +1440,8 @@ int tb_dp_port_hpd_is_active(struct tb_port *port)
* @port: Port to clear HPD
*
* If the DP IN port has HPD set, this function can be used to clear it.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_dp_port_hpd_clear(struct tb_port *port)
{
@@ -1434,6 +1468,8 @@ int tb_dp_port_hpd_clear(struct tb_port *port)
* Programs specified Hop IDs for DP IN/OUT port. Can be called for USB4
* router DP adapters too but does not program the values as the fields
* are read-only.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
unsigned int aux_tx, unsigned int aux_rx)
@@ -1450,7 +1486,7 @@ int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
return ret;
data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK;
- data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
+ data[1] &= ~ADP_DP_CS_1_AUX_TX_HOPID_MASK;
data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) &
@@ -1466,6 +1502,8 @@ int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
/**
* tb_dp_port_is_enabled() - Is DP adapter port enabled
* @port: DP adapter port to check
+ *
+ * Return: %true if DP port is enabled, %false otherwise.
*/
bool tb_dp_port_is_enabled(struct tb_port *port)
{
@@ -1485,6 +1523,8 @@ bool tb_dp_port_is_enabled(struct tb_port *port)
*
* Once Hop IDs are programmed DP paths can be enabled or disabled by
* calling this function.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_dp_port_enable(struct tb_port *port, bool enable)
{
@@ -1634,7 +1674,7 @@ static bool tb_switch_enumerated(struct tb_switch *sw)
*
* If the router is not enumerated does nothing.
*
- * Returns %0 on success or negative errno in case of failure.
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_switch_reset(struct tb_switch *sw)
{
@@ -1670,8 +1710,12 @@ int tb_switch_reset(struct tb_switch *sw)
* @timeout_msec: Timeout in ms how long to wait
*
* Wait till the specified bits in specified offset reach specified value.
- * Returns %0 in case of success, %-ETIMEDOUT if the @value was not reached
- * within the given timeout or a negative errno in case of failure.
+ *
+ * Return:
+ * * %0 - On success.
+ * * %-ETIMEDOUT - If the @value was not reached within
+ * the given timeout.
+ * * Negative errno - In case of failure.
*/
int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
u32 value, int timeout_msec)
@@ -1700,7 +1744,7 @@ int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
*
* Also configures a sane plug_events_delay of 255ms.
*
- * Return: Returns 0 on success or an error code on failure.
+ * Return: %0 on success, negative errno otherwise.
*/
static int tb_plug_events_active(struct tb_switch *sw, bool active)
{
@@ -2406,8 +2450,7 @@ static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth)
* separately. The returned switch should be released by calling
* tb_switch_put().
*
- * Return: Pointer to the allocated switch or ERR_PTR() in case of
- * failure.
+ * Return: Pointer to &struct tb_switch or ERR_PTR() in case of failure.
*/
struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
u64 route)
@@ -2526,7 +2569,7 @@ err_free_sw_ports:
*
* The returned switch must be released by calling tb_switch_put().
*
- * Return: Pointer to the allocated switch or ERR_PTR() in case of failure
+ * Return: Pointer to &struct tb_switch or ERR_PTR() in case of failure.
*/
struct tb_switch *
tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route)
@@ -2562,7 +2605,7 @@ tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route)
* connection manager to use. Can be called to the switch again after
* resume from low power states to re-initialize it.
*
- * Return: %0 in case of success and negative errno in case of failure
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_switch_configure(struct tb_switch *sw)
{
@@ -2625,7 +2668,7 @@ int tb_switch_configure(struct tb_switch *sw)
* Needs to be called before any tunnels can be setup through the
* router. Can be called to any router.
*
- * Returns %0 in success and negative errno otherwise.
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_switch_configuration_valid(struct tb_switch *sw)
{
@@ -2900,6 +2943,8 @@ static void tb_switch_link_init(struct tb_switch *sw)
* Connection manager can call this function to enable lane bonding of a
* switch. If conditions are correct and both switches support the feature,
* lanes are bonded. It is safe to call this to any switch.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
static int tb_switch_lane_bonding_enable(struct tb_switch *sw)
{
@@ -2950,6 +2995,8 @@ static int tb_switch_lane_bonding_enable(struct tb_switch *sw)
*
* Disables lane bonding between @sw and parent. This can be called even
* if lanes were not bonded originally.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
static int tb_switch_lane_bonding_disable(struct tb_switch *sw)
{
@@ -3069,12 +3116,12 @@ static int tb_switch_asym_disable(struct tb_switch *sw)
* @width: The new link width
*
* Set device router link width to @width from router upstream port
- * perspective. Supports also asymmetric links if the routers boths side
+ * perspective. Supports also asymmetric links if the routers both side
* of the link supports it.
*
* Does nothing for host router.
*
- * Returns %0 in case of success, negative errno otherwise.
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_switch_set_link_width(struct tb_switch *sw, enum tb_link_width width)
{
@@ -3145,7 +3192,7 @@ int tb_switch_set_link_width(struct tb_switch *sw, enum tb_link_width width)
*
* It is recommended that this is called after lane bonding is enabled.
*
- * Returns %0 on success and negative errno in case of error.
+ * Return: %0 on success and negative errno otherwise.
*/
int tb_switch_configure_link(struct tb_switch *sw)
{
@@ -3174,7 +3221,7 @@ int tb_switch_configure_link(struct tb_switch *sw)
* @sw: Switch whose link is unconfigured
*
* Sets the link unconfigured so the @sw will be disconnected if the
- * domain exists sleep.
+ * domain exits sleep.
*/
void tb_switch_unconfigure_link(struct tb_switch *sw)
{
@@ -3245,7 +3292,7 @@ static int tb_switch_port_hotplug_enable(struct tb_switch *sw)
* exposed to the userspace when this function successfully returns. To
* remove and release the switch, call tb_switch_remove().
*
- * Return: %0 in case of success and negative errno in case of failure
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_switch_add(struct tb_switch *sw)
{
@@ -3392,6 +3439,7 @@ void tb_switch_remove(struct tb_switch *sw)
tb_switch_remove(port->remote->sw);
port->remote = NULL;
} else if (port->xdomain) {
+ port->xdomain->is_unplugged = true;
tb_xdomain_remove(port->xdomain);
port->xdomain = NULL;
}
@@ -3436,7 +3484,7 @@ void tb_sw_set_unplugged(struct tb_switch *sw)
}
}
-static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
+static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags, bool runtime)
{
if (flags)
tb_sw_dbg(sw, "enabling wakeup: %#x\n", flags);
@@ -3444,7 +3492,7 @@ static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
tb_sw_dbg(sw, "disabling wakeup\n");
if (tb_switch_is_usb4(sw))
- return usb4_switch_set_wake(sw, flags);
+ return usb4_switch_set_wake(sw, flags, runtime);
return tb_lc_set_wake(sw, flags);
}
@@ -3466,6 +3514,8 @@ static void tb_switch_check_wakes(struct tb_switch *sw)
* suspend. If this is resume from system sleep, notifies PM core about the
* wakes occurred during suspend. Disables all wakes, except USB4 wake of
* upstream port for USB4 routers that shall be always enabled.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_switch_resume(struct tb_switch *sw, bool runtime)
{
@@ -3520,7 +3570,7 @@ int tb_switch_resume(struct tb_switch *sw, bool runtime)
tb_switch_check_wakes(sw);
/* Disable wakes */
- tb_switch_set_wake(sw, 0);
+ tb_switch_set_wake(sw, 0, true);
err = tb_switch_tmu_init(sw);
if (err)
@@ -3598,10 +3648,11 @@ void tb_switch_suspend(struct tb_switch *sw, bool runtime)
flags |= TB_WAKE_ON_USB4;
flags |= TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE | TB_WAKE_ON_DP;
} else if (device_may_wakeup(&sw->dev)) {
+ flags |= TB_WAKE_ON_CONNECT | TB_WAKE_ON_DISCONNECT;
flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE;
}
- tb_switch_set_wake(sw, flags);
+ tb_switch_set_wake(sw, flags, runtime);
if (tb_switch_is_usb4(sw))
usb4_switch_set_sleep(sw);
@@ -3615,7 +3666,9 @@ void tb_switch_suspend(struct tb_switch *sw, bool runtime)
* @in: DP IN port
*
* Queries availability of DP resource for DP tunneling using switch
- * specific means. Returns %true if resource is available.
+ * specific means.
+ *
+ * Return: %true if resource is available, %false otherwise.
*/
bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
{
@@ -3631,7 +3684,8 @@ bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
*
* Allocates DP resource for DP tunneling. The resource must be
* available for this to succeed (see tb_switch_query_dp_resource()).
- * Returns %0 in success and negative errno otherwise.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
{
@@ -3716,6 +3770,8 @@ static int tb_switch_match(struct device *dev, const void *data)
*
* Returned switch has reference count increased so the caller needs to
* call tb_switch_put() when done with the switch.
+ *
+ * Return: Pointer to &struct tb_switch, %NULL if not found.
*/
struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth)
{
@@ -3741,6 +3797,8 @@ struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth)
*
* Returned switch has reference count increased so the caller needs to
* call tb_switch_put() when done with the switch.
+ *
+ * Return: Pointer to &struct tb_switch, %NULL if not found.
*/
struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid)
{
@@ -3765,6 +3823,8 @@ struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid)
*
* Returned switch has reference count increased so the caller needs to
* call tb_switch_put() when done with the switch.
+ *
+ * Return: Pointer to &struct tb_switch, %NULL if not found.
*/
struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route)
{
@@ -3789,6 +3849,8 @@ struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route)
* tb_switch_find_port() - return the first port of @type on @sw or NULL
* @sw: Switch to find the port from
* @type: Port type to look for
+ *
+ * Return: Pointer to &struct tb_port, %NULL if not found.
*/
struct tb_port *tb_switch_find_port(struct tb_switch *sw,
enum tb_port_type type)
@@ -3857,6 +3919,8 @@ static int tb_switch_pcie_bridge_write(struct tb_switch *sw, unsigned int bridge
* entry to PCIe L1 state. Shall be called after the upstream PCIe tunnel
* was configured. Due to Intel platforms limitation, shall be called only
* for first hop switch.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_switch_pcie_l1_enable(struct tb_switch *sw)
{
@@ -3891,6 +3955,8 @@ int tb_switch_pcie_l1_enable(struct tb_switch *sw)
* connected to the type-C port. Call only after PCIe tunnel has been
* established. The function only does the connect if not done already
* so can be called several times for the same router.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_switch_xhci_connect(struct tb_switch *sw)
{
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 10e719dd837c..4f5f1dfc0fbf 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -20,6 +20,12 @@
#define TB_RELEASE_BW_TIMEOUT 10000 /* ms */
/*
+ * How many time bandwidth allocation request from graphics driver is
+ * retried if the DP tunnel is still activating.
+ */
+#define TB_BW_ALLOC_RETRIES 3
+
+/*
* Minimum bandwidth (in Mb/s) that is needed in the single transmitter/receiver
* direction. This is 40G - 10% guard band bandwidth.
*/
@@ -69,14 +75,20 @@ static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
}
struct tb_hotplug_event {
- struct work_struct work;
+ struct delayed_work work;
struct tb *tb;
u64 route;
u8 port;
bool unplug;
+ int retry;
};
+static void tb_scan_port(struct tb_port *port);
static void tb_handle_hotplug(struct work_struct *work);
+static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port,
+ const char *reason);
+static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port,
+ int retry, unsigned long delay);
static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
{
@@ -90,8 +102,8 @@ static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
ev->route = route;
ev->port = port;
ev->unplug = unplug;
- INIT_WORK(&ev->work, tb_handle_hotplug);
- queue_work(tb->wq, &ev->work);
+ INIT_DELAYED_WORK(&ev->work, tb_handle_hotplug);
+ queue_delayed_work(tb->wq, &ev->work, 0);
}
/* enumeration & hot plug handling */
@@ -213,14 +225,12 @@ static int tb_enable_clx(struct tb_switch *sw)
return ret == -EOPNOTSUPP ? 0 : ret;
}
-/**
- * tb_disable_clx() - Disable CL states up to host router
- * @sw: Router to start
+/*
+ * Disables CL states from @sw up to the host router.
*
- * Disables CL states from @sw up to the host router. Returns true if
- * any CL state were disabled. This can be used to figure out whether
- * the link was setup by us or the boot firmware so we don't
- * accidentally enable them if they were not enabled during discovery.
+ * This can be used to figure out whether the link was setup by us or the
+ * boot firmware so we don't accidentally enable them if they were not
+ * enabled during discovery.
*/
static bool tb_disable_clx(struct tb_switch *sw)
{
@@ -288,13 +298,31 @@ static void tb_increase_tmu_accuracy(struct tb_tunnel *tunnel)
device_for_each_child(&sw->dev, NULL, tb_increase_switch_tmu_accuracy);
}
+static int tb_switch_tmu_hifi_uni_required(struct device *dev, void *not_used)
+{
+ struct tb_switch *sw = tb_to_switch(dev);
+
+ if (sw && tb_switch_tmu_is_enabled(sw) &&
+ tb_switch_tmu_is_configured(sw, TB_SWITCH_TMU_MODE_HIFI_UNI))
+ return 1;
+
+ return device_for_each_child(dev, NULL,
+ tb_switch_tmu_hifi_uni_required);
+}
+
+static bool tb_tmu_hifi_uni_required(struct tb *tb)
+{
+ return device_for_each_child(&tb->dev, NULL,
+ tb_switch_tmu_hifi_uni_required) == 1;
+}
+
static int tb_enable_tmu(struct tb_switch *sw)
{
int ret;
/*
* If both routers at the end of the link are v2 we simply
- * enable the enhanched uni-directional mode. That covers all
+ * enable the enhanced uni-directional mode. That covers all
* the CL states. For v1 and before we need to use the normal
* rate to allow CL1 (when supported). Otherwise we keep the TMU
* running at the highest accuracy.
@@ -302,12 +330,30 @@ static int tb_enable_tmu(struct tb_switch *sw)
ret = tb_switch_tmu_configure(sw,
TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI);
if (ret == -EOPNOTSUPP) {
- if (tb_switch_clx_is_enabled(sw, TB_CL1))
- ret = tb_switch_tmu_configure(sw,
- TB_SWITCH_TMU_MODE_LOWRES);
- else
- ret = tb_switch_tmu_configure(sw,
- TB_SWITCH_TMU_MODE_HIFI_BI);
+ if (tb_switch_clx_is_enabled(sw, TB_CL1)) {
+ /*
+ * Figure out uni-directional HiFi TMU requirements
+ * currently in the domain. If there are no
+ * uni-directional HiFi requirements we can put the TMU
+ * into LowRes mode.
+ *
+ * Deliberately skip bi-directional HiFi links
+ * as these work independently of other links
+ * (and they do not allow any CL states anyway).
+ */
+ if (tb_tmu_hifi_uni_required(sw->tb))
+ ret = tb_switch_tmu_configure(sw,
+ TB_SWITCH_TMU_MODE_HIFI_UNI);
+ else
+ ret = tb_switch_tmu_configure(sw,
+ TB_SWITCH_TMU_MODE_LOWRES);
+ } else {
+ ret = tb_switch_tmu_configure(sw, TB_SWITCH_TMU_MODE_HIFI_BI);
+ }
+
+ /* If not supported, fallback to bi-directional HiFi */
+ if (ret == -EOPNOTSUPP)
+ ret = tb_switch_tmu_configure(sw, TB_SWITCH_TMU_MODE_HIFI_BI);
}
if (ret)
return ret;
@@ -408,10 +454,8 @@ static void tb_scan_xdomain(struct tb_port *port)
}
}
-/**
- * tb_find_unused_port() - return the first inactive port on @sw
- * @sw: Switch to find the port on
- * @type: Port type to look for
+/*
+ * Returns the first inactive port on @sw.
*/
static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
enum tb_port_type type)
@@ -494,13 +538,15 @@ static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb,
* @src_port: Source protocol adapter
* @dst_port: Destination protocol adapter
* @port: USB4 port the consumed bandwidth is calculated
- * @consumed_up: Consumed upsream bandwidth (Mb/s)
+ * @consumed_up: Consumed upstream bandwidth (Mb/s)
* @consumed_down: Consumed downstream bandwidth (Mb/s)
*
* Calculates consumed USB3 and PCIe bandwidth at @port between path
* from @src_port to @dst_port. Does not take USB3 tunnel starting from
* @src_port and ending on @src_port into account because that bandwidth is
* already included in as part of the "first hop" USB3 tunnel.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
static int tb_consumed_usb3_pcie_bandwidth(struct tb *tb,
struct tb_port *src_port,
@@ -543,7 +589,7 @@ static int tb_consumed_usb3_pcie_bandwidth(struct tb *tb,
* @src_port: Source protocol adapter
* @dst_port: Destination protocol adapter
* @port: USB4 port the consumed bandwidth is calculated
- * @consumed_up: Consumed upsream bandwidth (Mb/s)
+ * @consumed_up: Consumed upstream bandwidth (Mb/s)
* @consumed_down: Consumed downstream bandwidth (Mb/s)
*
* Calculates consumed DP bandwidth at @port between path from @src_port
@@ -553,6 +599,8 @@ static int tb_consumed_usb3_pcie_bandwidth(struct tb *tb,
* If there is bandwidth reserved for any of the groups between
* @src_port and @dst_port (but not yet used) that is also taken into
* account in the returned consumed bandwidth.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
static int tb_consumed_dp_bandwidth(struct tb *tb,
struct tb_port *src_port,
@@ -653,6 +701,8 @@ static bool tb_asym_supported(struct tb_port *src_port, struct tb_port *dst_port
* single link at @port. If @include_asym is set then includes the
* additional banwdith if the links are transitioned into asymmetric to
* direction from @src_port to @dst_port.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
static int tb_maximum_bandwidth(struct tb *tb, struct tb_port *src_port,
struct tb_port *dst_port, struct tb_port *port,
@@ -759,6 +809,8 @@ static int tb_maximum_bandwidth(struct tb *tb, struct tb_port *src_port,
* If @include_asym is true then includes also bandwidth that can be
* added when the links are transitioned into asymmetric (but does not
* transition the links).
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
struct tb_port *dst_port, int *available_up,
@@ -904,6 +956,15 @@ static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n",
available_up, available_down);
+ /*
+ * If the available bandwidth is less than 1.5 Gb/s notify
+ * userspace that the connected isochronous device may not work
+ * properly.
+ */
+ if (available_up < 1500 || available_down < 1500)
+ tb_tunnel_event(tb, TB_TUNNEL_LOW_BANDWIDTH, TB_TUNNEL_USB3,
+ down, up);
+
tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up,
available_down);
if (!tunnel) {
@@ -925,7 +986,7 @@ static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
return 0;
err_free:
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
err_reclaim:
if (tb_route(parent))
tb_reclaim_usb3_bandwidth(tb, down, up);
@@ -972,6 +1033,8 @@ static int tb_create_usb3_tunnels(struct tb_switch *sw)
* (requested + currently consumed) on that link exceed @asym_threshold.
*
* Must be called with available >= requested over all links.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
static int tb_configure_asym(struct tb *tb, struct tb_port *src_port,
struct tb_port *dst_port, int requested_up,
@@ -1052,7 +1115,7 @@ static int tb_configure_asym(struct tb *tb, struct tb_port *src_port,
/*
* Here requested + consumed > threshold so we need to
- * transtion the link into asymmetric now.
+ * transition the link into asymmetric now.
*/
ret = tb_switch_set_link_width(up->sw, width_up);
if (ret) {
@@ -1078,6 +1141,8 @@ static int tb_configure_asym(struct tb *tb, struct tb_port *src_port,
* Goes over each link from @src_port to @dst_port and tries to
* transition the link to symmetric if the currently consumed bandwidth
* allows and link asymmetric preference is ignored (if @keep_asym is %false).
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
static int tb_configure_sym(struct tb *tb, struct tb_port *src_port,
struct tb_port *dst_port, bool keep_asym)
@@ -1202,8 +1267,6 @@ static void tb_configure_link(struct tb_port *down, struct tb_port *up,
tb_switch_configure_link(sw);
}
-static void tb_scan_port(struct tb_port *port);
-
/*
* tb_scan_switch() - scan for and initialize downstream switches
*/
@@ -1259,12 +1322,16 @@ static void tb_scan_port(struct tb_port *port)
goto out_rpm_put;
}
- tb_retimer_scan(port, true);
-
sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
tb_downstream_route(port));
if (IS_ERR(sw)) {
/*
+ * Make the downstream retimers available even if there
+ * is no router connected.
+ */
+ tb_retimer_scan(port, true);
+
+ /*
* If there is an error accessing the connected switch
* it may be connected to another domain. Also we allow
* the other domain to be connected to a max depth switch.
@@ -1314,6 +1381,14 @@ static void tb_scan_port(struct tb_port *port)
tb_configure_link(port, upstream_port, sw);
/*
+ * Scan for downstream retimers. We only scan them after the
+ * router has been enumerated to avoid issues with certain
+ * Pluggable devices that expect the host to enumerate them
+ * within certain timeout.
+ */
+ tb_retimer_scan(port, true);
+
+ /*
* CL0s and CL1 are enabled and supported together.
* Silently ignore CLx enabling in case CLx is not supported.
*/
@@ -1691,7 +1766,7 @@ static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
break;
}
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
/*
@@ -1828,12 +1903,76 @@ static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
return NULL;
}
-static bool tb_tunnel_one_dp(struct tb *tb, struct tb_port *in,
+static void tb_dp_tunnel_active(struct tb_tunnel *tunnel, void *data)
+{
+ struct tb_port *in = tunnel->src_port;
+ struct tb_port *out = tunnel->dst_port;
+ struct tb *tb = data;
+
+ mutex_lock(&tb->lock);
+ if (tb_tunnel_is_active(tunnel)) {
+ int consumed_up, consumed_down, ret;
+
+ tb_tunnel_dbg(tunnel, "DPRX capabilities read completed\n");
+
+ /* If fail reading tunnel's consumed bandwidth, tear it down */
+ ret = tb_tunnel_consumed_bandwidth(tunnel, &consumed_up,
+ &consumed_down);
+ if (ret) {
+ tb_tunnel_warn(tunnel,
+ "failed to read consumed bandwidth, tearing down\n");
+ tb_deactivate_and_free_tunnel(tunnel);
+ } else {
+ tb_reclaim_usb3_bandwidth(tb, in, out);
+ /*
+ * Transition the links to asymmetric if the
+ * consumption exceeds the threshold.
+ */
+ tb_configure_asym(tb, in, out, consumed_up,
+ consumed_down);
+ /*
+ * Update the domain with the new bandwidth
+ * estimation.
+ */
+ tb_recalc_estimated_bandwidth(tb);
+ /*
+ * In case DP tunnel exists, change host
+ * router's 1st children TMU mode to HiFi for
+ * CL0s to work.
+ */
+ tb_increase_tmu_accuracy(tunnel);
+ }
+ } else {
+ struct tb_port *in = tunnel->src_port;
+
+ /*
+ * This tunnel failed to establish. This means DPRX
+ * negotiation most likely did not complete which
+ * happens either because there is no graphics driver
+ * loaded or not all DP cables where connected to the
+ * discrete router.
+ *
+ * In both cases we remove the DP IN adapter from the
+ * available resources as it is not usable. This will
+ * also tear down the tunnel and try to re-use the
+ * released DP OUT.
+ *
+ * It will be added back only if there is hotplug for
+ * the DP IN again.
+ */
+ tb_tunnel_warn(tunnel, "not active, tearing down\n");
+ tb_dp_resource_unavailable(tb, in, "DPRX negotiation failed");
+ }
+ mutex_unlock(&tb->lock);
+
+ tb_domain_put(tb);
+}
+
+static void tb_tunnel_one_dp(struct tb *tb, struct tb_port *in,
struct tb_port *out)
{
int available_up, available_down, ret, link_nr;
struct tb_cm *tcm = tb_priv(tb);
- int consumed_up, consumed_down;
struct tb_tunnel *tunnel;
/*
@@ -1878,54 +2017,38 @@ static bool tb_tunnel_one_dp(struct tb *tb, struct tb_port *in,
ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down,
true);
- if (ret)
+ if (ret) {
+ tb_tunnel_event(tb, TB_TUNNEL_NO_BANDWIDTH, TB_TUNNEL_DP, in, out);
goto err_reclaim_usb;
+ }
tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
available_up, available_down);
tunnel = tb_tunnel_alloc_dp(tb, in, out, link_nr, available_up,
- available_down);
+ available_down, tb_dp_tunnel_active,
+ tb_domain_get(tb));
if (!tunnel) {
tb_port_dbg(out, "could not allocate DP tunnel\n");
goto err_reclaim_usb;
}
- if (tb_tunnel_activate(tunnel)) {
+ list_add_tail(&tunnel->list, &tcm->tunnel_list);
+
+ ret = tb_tunnel_activate(tunnel);
+ if (ret && ret != -EINPROGRESS) {
tb_port_info(out, "DP tunnel activation failed, aborting\n");
+ list_del(&tunnel->list);
goto err_free;
}
- /* If fail reading tunnel's consumed bandwidth, tear it down */
- ret = tb_tunnel_consumed_bandwidth(tunnel, &consumed_up, &consumed_down);
- if (ret)
- goto err_deactivate;
-
- list_add_tail(&tunnel->list, &tcm->tunnel_list);
+ return;
- tb_reclaim_usb3_bandwidth(tb, in, out);
- /*
- * Transition the links to asymmetric if the consumption exceeds
- * the threshold.
- */
- tb_configure_asym(tb, in, out, consumed_up, consumed_down);
-
- /* Update the domain with the new bandwidth estimation */
- tb_recalc_estimated_bandwidth(tb);
-
- /*
- * In case of DP tunnel exists, change host router's 1st children
- * TMU mode to HiFi for CL0s to work.
- */
- tb_increase_tmu_accuracy(tunnel);
- return true;
-
-err_deactivate:
- tb_tunnel_deactivate(tunnel);
err_free:
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
err_reclaim_usb:
tb_reclaim_usb3_bandwidth(tb, in, out);
+ tb_domain_put(tb);
err_detach_group:
tb_detach_bandwidth_group(in);
err_dealloc_dp:
@@ -1935,8 +2058,6 @@ err_rpm_put:
pm_runtime_put_autosuspend(&out->sw->dev);
pm_runtime_mark_last_busy(&in->sw->dev);
pm_runtime_put_autosuspend(&in->sw->dev);
-
- return false;
}
static void tb_tunnel_dp(struct tb *tb)
@@ -2023,17 +2144,49 @@ static void tb_exit_redrive(struct tb_port *port)
}
}
-static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
+static void tb_switch_enter_redrive(struct tb_switch *sw)
+{
+ struct tb_port *port;
+
+ tb_switch_for_each_port(sw, port)
+ tb_enter_redrive(port);
+}
+
+/*
+ * Called during system and runtime suspend to forcefully exit redrive
+ * mode without querying whether the resource is available.
+ */
+static void tb_switch_exit_redrive(struct tb_switch *sw)
+{
+ struct tb_port *port;
+
+ if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE))
+ return;
+
+ tb_switch_for_each_port(sw, port) {
+ if (!tb_port_is_dpin(port))
+ continue;
+
+ if (port->redrive) {
+ port->redrive = false;
+ pm_runtime_put(&sw->dev);
+ tb_port_dbg(port, "exit redrive mode\n");
+ }
+ }
+}
+
+static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port,
+ const char *reason)
{
struct tb_port *in, *out;
struct tb_tunnel *tunnel;
if (tb_port_is_dpin(port)) {
- tb_port_dbg(port, "DP IN resource unavailable\n");
+ tb_port_dbg(port, "DP IN resource unavailable: %s\n", reason);
in = port;
out = NULL;
} else {
- tb_port_dbg(port, "DP OUT resource unavailable\n");
+ tb_port_dbg(port, "DP OUT resource unavailable: %s\n", reason);
in = NULL;
out = port;
}
@@ -2115,7 +2268,7 @@ static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw)
tb_tunnel_deactivate(tunnel);
list_del(&tunnel->list);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
return 0;
}
@@ -2145,7 +2298,7 @@ static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
if (tb_tunnel_activate(tunnel)) {
tb_port_info(up,
"PCIe tunnel activation failed, aborting\n");
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
return -EIO;
}
@@ -2204,7 +2357,7 @@ static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
return 0;
err_free:
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
err_clx:
tb_enable_clx(sw);
mutex_unlock(&tb->lock);
@@ -2267,7 +2420,7 @@ static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
*/
static void tb_handle_hotplug(struct work_struct *work)
{
- struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
+ struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work.work);
struct tb *tb = ev->tb;
struct tb_cm *tcm = tb_priv(tb);
struct tb_switch *sw;
@@ -2339,7 +2492,7 @@ static void tb_handle_hotplug(struct work_struct *work)
tb_xdomain_put(xd);
tb_port_unconfigure_xdomain(port);
} else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
- tb_dp_resource_unavailable(tb, port);
+ tb_dp_resource_unavailable(tb, port, "adapter unplug");
} else if (!port->port) {
tb_sw_dbg(sw, "xHCI disconnect request\n");
tb_switch_xhci_disconnect(sw);
@@ -2483,13 +2636,17 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
* the 10s already expired and we should
* give the reserved back to others).
*/
- mod_delayed_work(system_wq, &group->release_work,
+ mod_delayed_work(system_percpu_wq, &group->release_work,
msecs_to_jiffies(TB_RELEASE_BW_TIMEOUT));
}
}
- return tb_tunnel_alloc_bandwidth(tunnel, requested_up,
- requested_down);
+ ret = tb_tunnel_alloc_bandwidth(tunnel, requested_up,
+ requested_down);
+ if (ret)
+ goto fail;
+
+ return 0;
}
/*
@@ -2565,6 +2722,7 @@ fail:
"failing the request by rewriting allocated %d/%d Mb/s\n",
allocated_up, allocated_down);
tb_tunnel_alloc_bandwidth(tunnel, &allocated_up, &allocated_down);
+ tb_tunnel_event(tb, TB_TUNNEL_NO_BANDWIDTH, TB_TUNNEL_DP, in, out);
}
return ret;
@@ -2572,7 +2730,7 @@ fail:
static void tb_handle_dp_bandwidth_request(struct work_struct *work)
{
- struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
+ struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work.work);
int requested_bw, requested_up, requested_down, ret;
struct tb_tunnel *tunnel;
struct tb *tb = ev->tb;
@@ -2599,7 +2757,7 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work)
goto put_sw;
}
- tb_port_dbg(in, "handling bandwidth allocation request\n");
+ tb_port_dbg(in, "handling bandwidth allocation request, retry %d\n", ev->retry);
tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
if (!tunnel) {
@@ -2628,8 +2786,8 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work)
* There is no request active so this means the
* BW allocation mode was enabled from graphics
* side. At this point we know that the graphics
- * driver has read the DRPX capabilities so we
- * can offer an better bandwidth estimatation.
+ * driver has read the DPRX capabilities so we
+ * can offer better bandwidth estimation.
*/
tb_port_dbg(in, "DPTX enabled bandwidth allocation mode, updating estimated bandwidth\n");
tb_recalc_estimated_bandwidth(tb);
@@ -2652,12 +2810,33 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work)
ret = tb_alloc_dp_bandwidth(tunnel, &requested_up, &requested_down);
if (ret) {
- if (ret == -ENOBUFS)
+ if (ret == -ENOBUFS) {
tb_tunnel_warn(tunnel,
"not enough bandwidth available\n");
- else
+ } else if (ret == -ENOTCONN) {
+ tb_tunnel_dbg(tunnel, "not active yet\n");
+ /*
+ * We got bandwidth allocation request but the
+ * tunnel is not yet active. This means that
+ * tb_dp_tunnel_active() is not yet called for
+ * this tunnel. Allow it some time and retry
+ * this request a couple of times.
+ */
+ if (ev->retry < TB_BW_ALLOC_RETRIES) {
+ tb_tunnel_dbg(tunnel,
+ "retrying bandwidth allocation request\n");
+ tb_queue_dp_bandwidth_request(tb, ev->route,
+ ev->port,
+ ev->retry + 1,
+ msecs_to_jiffies(50));
+ } else {
+ tb_tunnel_dbg(tunnel,
+ "run out of retries, failing the request");
+ }
+ } else {
tb_tunnel_warn(tunnel,
"failed to change bandwidth allocation\n");
+ }
} else {
tb_tunnel_dbg(tunnel,
"bandwidth allocation changed to %d/%d Mb/s\n",
@@ -2678,7 +2857,8 @@ unlock:
kfree(ev);
}
-static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port)
+static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port,
+ int retry, unsigned long delay)
{
struct tb_hotplug_event *ev;
@@ -2689,8 +2869,9 @@ static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port)
ev->tb = tb;
ev->route = route;
ev->port = port;
- INIT_WORK(&ev->work, tb_handle_dp_bandwidth_request);
- queue_work(tb->wq, &ev->work);
+ ev->retry = retry;
+ INIT_DELAYED_WORK(&ev->work, tb_handle_dp_bandwidth_request);
+ queue_delayed_work(tb->wq, &ev->work, delay);
}
static void tb_handle_notification(struct tb *tb, u64 route,
@@ -2710,7 +2891,7 @@ static void tb_handle_notification(struct tb *tb, u64 route,
if (tb_cfg_ack_notification(tb->ctl, route, error))
tb_warn(tb, "could not ack notification on %llx\n",
route);
- tb_queue_dp_bandwidth_request(tb, route, error->port);
+ tb_queue_dp_bandwidth_request(tb, route, error->port, 0, 0);
break;
default:
@@ -2765,7 +2946,7 @@ static void tb_stop(struct tb *tb)
*/
if (tb_tunnel_is_dma(tunnel))
tb_tunnel_deactivate(tunnel);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
tb_switch_remove(tb->root_switch);
tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
@@ -2873,6 +3054,7 @@ static int tb_start(struct tb *tb, bool reset)
tb_create_usb3_tunnels(tb->root_switch);
/* Add DP IN resources for the root switch */
tb_add_dp_resources(tb->root_switch);
+ tb_switch_enter_redrive(tb->root_switch);
/* Make the discovered switches available to the userspace */
device_for_each_child(&tb->root_switch->dev, NULL,
tb_scan_finalize_switch);
@@ -2888,6 +3070,7 @@ static int tb_suspend_noirq(struct tb *tb)
tb_dbg(tb, "suspending...\n");
tb_disconnect_and_release_dp(tb);
+ tb_switch_exit_redrive(tb->root_switch);
tb_switch_suspend(tb->root_switch, false);
tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
tb_dbg(tb, "suspend finished\n");
@@ -2959,7 +3142,7 @@ static int tb_resume_noirq(struct tb *tb)
if (tb_tunnel_is_usb3(tunnel))
usb3_delay = 500;
tb_tunnel_deactivate(tunnel);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
/* Re-create our tunnels now */
@@ -2970,7 +3153,7 @@ static int tb_resume_noirq(struct tb *tb)
/* Only need to do it once */
usb3_delay = 0;
}
- tb_tunnel_restart(tunnel);
+ tb_tunnel_activate(tunnel);
}
if (!list_empty(&tcm->tunnel_list)) {
/*
@@ -2980,6 +3163,7 @@ static int tb_resume_noirq(struct tb *tb)
tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
msleep(100);
}
+ tb_switch_enter_redrive(tb->root_switch);
/* Allow tb_handle_hotplug to progress events */
tcm->hotplug_active = true;
tb_dbg(tb, "resume finished\n");
@@ -3043,6 +3227,12 @@ static int tb_runtime_suspend(struct tb *tb)
struct tb_cm *tcm = tb_priv(tb);
mutex_lock(&tb->lock);
+ /*
+ * The below call only releases DP resources to allow exiting and
+ * re-entering redrive mode.
+ */
+ tb_disconnect_and_release_dp(tb);
+ tb_switch_exit_redrive(tb->root_switch);
tb_switch_suspend(tb->root_switch, true);
tcm->hotplug_active = false;
mutex_unlock(&tb->lock);
@@ -3073,7 +3263,8 @@ static int tb_runtime_resume(struct tb *tb)
tb_free_invalid_tunnels(tb);
tb_restore_children(tb->root_switch);
list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
- tb_tunnel_restart(tunnel);
+ tb_tunnel_activate(tunnel);
+ tb_switch_enter_redrive(tb->root_switch);
tcm->hotplug_active = true;
mutex_unlock(&tb->lock);
@@ -3153,7 +3344,7 @@ static bool tb_apple_add_links(struct tb_nhi *nhi)
if (!pci_is_pcie(pdev))
continue;
if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM ||
- !pdev->is_hotplug_bridge)
+ !pdev->is_pciehp)
continue;
link = device_link_add(&pdev->dev, &nhi->pdev->dev,
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index 18aae4ccaed5..e96474f17067 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -9,6 +9,7 @@
#ifndef TB_H_
#define TB_H_
+#include <linux/debugfs.h>
#include <linux/nvmem-provider.h>
#include <linux/pci.h>
#include <linux/thunderbolt.h>
@@ -160,6 +161,7 @@ struct tb_switch_tmu {
* @max_pcie_credits: Router preferred number of buffers for PCIe
* @max_dma_credits: Router preferred number of buffers for DMA/P2P
* @clx: CLx states on the upstream link of the router
+ * @drom_blob: DROM debugfs blob wrapper
*
* When the switch is being added or removed to the domain (other
* switches) you need to have domain lock held.
@@ -212,6 +214,9 @@ struct tb_switch {
unsigned int max_pcie_credits;
unsigned int max_dma_credits;
unsigned int clx;
+#ifdef CONFIG_DEBUG_FS
+ struct debugfs_blob_wrapper drom_blob;
+#endif
};
/**
@@ -303,7 +308,7 @@ struct tb_port {
* struct usb4_port - USB4 port device
* @dev: Device for the port
* @port: Pointer to the lane 0 adapter
- * @can_offline: Does the port have necessary platform support to moved
+ * @can_offline: Does the port have necessary platform support to move
* it into offline mode and back
* @offline: The port is currently in offline mode
* @margining: Pointer to margining structure if enabled
@@ -319,7 +324,7 @@ struct usb4_port {
};
/**
- * tb_retimer: Thunderbolt retimer
+ * struct tb_retimer - Thunderbolt retimer
* @dev: Device for the retimer
* @tb: Pointer to the domain the retimer belongs to
* @index: Retimer index facing the router USB4 port
@@ -329,6 +334,7 @@ struct usb4_port {
* @nvm: Pointer to the NVM if the retimer has one (%NULL otherwise)
* @no_nvm_upgrade: Prevent NVM upgrade of this retimer
* @auth_status: Status of last NVM authentication
+ * @margining: Pointer to margining structure if enabled
*/
struct tb_retimer {
struct device dev;
@@ -340,13 +346,16 @@ struct tb_retimer {
struct tb_nvm *nvm;
bool no_nvm_upgrade;
u32 auth_status;
+#ifdef CONFIG_USB4_DEBUGFS_MARGINING
+ struct tb_margining *margining;
+#endif
};
/**
* struct tb_path_hop - routing information for a tb_path
* @in_port: Ingress port of a switch
* @out_port: Egress port of a switch where the packet is routed out
- * (must be on the same switch than @in_port)
+ * (must be on the same switch as @in_port)
* @in_hop_index: HopID where the path configuration entry is placed in
* the path config space of @in_port.
* @in_counter_index: Used counter index (not used in the driver
@@ -490,9 +499,9 @@ struct tb_path {
* performed. If this returns %-EOPNOTSUPP then the
* native USB4 router operation is called.
* @usb4_switch_nvm_authenticate_status: Optional callback that the CM
- * implementation can be used to
- * return status of USB4 NVM_AUTH
- * router operation.
+ * implementation can use to return
+ * status of USB4 NVM_AUTH router
+ * operation.
*/
struct tb_cm_ops {
int (*driver_ready)(struct tb *tb);
@@ -543,13 +552,14 @@ static inline void *tb_priv(struct tb *tb)
/**
* tb_upstream_port() - return the upstream port of a switch
+ * @sw: Router
*
* Every switch has an upstream port (for the root switch it is the NHI).
*
* During switch alloc/init tb_upstream_port()->remote may be NULL, even for
* non root switches (on the NHI port remote is always NULL).
*
- * Return: Returns the upstream port of the switch.
+ * Return: Pointer to &struct tb_port.
*/
static inline struct tb_port *tb_upstream_port(struct tb_switch *sw)
{
@@ -560,8 +570,8 @@ static inline struct tb_port *tb_upstream_port(struct tb_switch *sw)
* tb_is_upstream_port() - Is the port upstream facing
* @port: Port to check
*
- * Returns true if @port is upstream facing port. In case of dual link
- * ports both return true.
+ * Return: %true if @port is upstream facing port. In case of dual link
+ * ports, both return %true.
*/
static inline bool tb_is_upstream_port(const struct tb_port *port)
{
@@ -604,7 +614,7 @@ static inline const char *tb_width_name(enum tb_link_width width)
* tb_port_has_remote() - Does the port have switch connected downstream
* @port: Port to check
*
- * Returns true only when the port is primary port and has remote set.
+ * Return: %true only when the port is primary port and has remote set.
*/
static inline bool tb_port_has_remote(const struct tb_port *port)
{
@@ -795,6 +805,19 @@ static inline void tb_domain_put(struct tb *tb)
put_device(&tb->dev);
}
+/**
+ * tb_domain_event() - Notify userspace about an event in domain
+ * @tb: Domain where event occurred
+ * @envp: Array of uevent environment strings (can be %NULL)
+ *
+ * This function provides a way to notify userspace about any events
+ * that take place in the domain.
+ */
+static inline void tb_domain_event(struct tb *tb, char *envp[])
+{
+ kobject_uevent_env(&tb->dev.kobj, KOBJ_CHANGE, envp);
+}
+
struct tb_nvm *tb_nvm_alloc(struct device *dev);
int tb_nvm_read_version(struct tb_nvm *nvm);
int tb_nvm_validate(struct tb_nvm *nvm);
@@ -883,8 +906,9 @@ static inline struct tb_switch *tb_switch_parent(struct tb_switch *sw)
* tb_switch_downstream_port() - Return downstream facing port of parent router
* @sw: Device router pointer
*
- * Only call for device routers. Returns the downstream facing port of
- * the parent router.
+ * Call only for device routers.
+ *
+ * Return: Pointer to &struct tb_port or %NULL in case of failure.
*/
static inline struct tb_port *tb_switch_downstream_port(struct tb_switch *sw)
{
@@ -896,6 +920,8 @@ static inline struct tb_port *tb_switch_downstream_port(struct tb_switch *sw)
/**
* tb_switch_depth() - Returns depth of the connected router
* @sw: Router
+ *
+ * Return: Router depth level as a number.
*/
static inline int tb_switch_depth(const struct tb_switch *sw)
{
@@ -988,6 +1014,9 @@ static inline bool tb_switch_is_tiger_lake(const struct tb_switch *sw)
* is handling @sw this function can be called. It is valid to call this
* after tb_switch_alloc() and tb_switch_configure() has been called
* (latter only for SW CM case).
+ *
+ * Return: %true if switch is handled by ICM, %false if handled by
+ * software CM.
*/
static inline bool tb_switch_is_icm(const struct tb_switch *sw)
{
@@ -1015,6 +1044,8 @@ int tb_switch_tmu_configure(struct tb_switch *sw, enum tb_switch_tmu_mode mode);
*
* Checks if given router TMU mode is configured to @mode. Note the
* router TMU might not be enabled to this mode.
+ *
+ * Return: %true if TMU mode is equal to @mode, %false otherwise.
*/
static inline bool tb_switch_tmu_is_configured(const struct tb_switch *sw,
enum tb_switch_tmu_mode mode)
@@ -1026,8 +1057,8 @@ static inline bool tb_switch_tmu_is_configured(const struct tb_switch *sw,
* tb_switch_tmu_is_enabled() - Checks if the specified TMU mode is enabled
* @sw: Router whose TMU mode to check
*
- * Return true if hardware TMU configuration matches the requested
- * configuration (and is not %TB_SWITCH_TMU_MODE_OFF).
+ * Return: %true if hardware TMU configuration matches the requested
+ * configuration (and is not %TB_SWITCH_TMU_MODE_OFF), %false otherwise.
*/
static inline bool tb_switch_tmu_is_enabled(const struct tb_switch *sw)
{
@@ -1047,9 +1078,10 @@ int tb_switch_clx_disable(struct tb_switch *sw);
* @clx: The CLx states to check for
*
* Checks if the specified CLx is enabled on the router upstream link.
- * Returns true if any of the given states is enabled.
*
* Not applicable for a host router.
+ *
+ * Return: %true if any of the given states is enabled, %false otherwise.
*/
static inline bool tb_switch_clx_is_enabled(const struct tb_switch *sw,
unsigned int clx)
@@ -1077,11 +1109,11 @@ struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
struct tb_port *prev);
/**
- * tb_port_path_direction_downstream() - Checks if path directed downstream
+ * tb_port_path_direction_downstream() - Checks if path is directed downstream
* @src: Source adapter
* @dst: Destination adapter
*
- * Returns %true only if the specified path from source adapter (@src)
+ * Return: %true only if the specified path from source adapter (@src)
* to destination adapter (@dst) is directed downstream.
*/
static inline bool
@@ -1109,7 +1141,7 @@ static inline bool tb_port_use_credit_allocation(const struct tb_port *port)
(p) = tb_next_port_on_path((src), (dst), (p)))
/**
- * tb_for_each_upstream_port_on_path() - Iterate over each upstreamm port on path
+ * tb_for_each_upstream_port_on_path() - Iterate over each upstream port on path
* @src: Source port
* @dst: Destination port
* @p: Port used as iterator
@@ -1210,10 +1242,11 @@ static inline int tb_route_length(u64 route)
/**
* tb_downstream_route() - get route to downstream switch
+ * @port: Port to check
*
* Port must not be the upstream port (otherwise a loop is created).
*
- * Return: Returns a route to the switch behind @port.
+ * Return: Route to the switch behind @port.
*/
static inline u64 tb_downstream_route(struct tb_port *port)
{
@@ -1241,7 +1274,7 @@ static inline struct tb_switch *tb_xdomain_parent(struct tb_xdomain *xd)
* tb_xdomain_downstream_port() - Return downstream facing port of parent router
* @xd: Xdomain pointer
*
- * Returns the downstream port the XDomain is connected to.
+ * Return: Pointer to &struct tb_port or %NULL in case of failure.
*/
static inline struct tb_port *tb_xdomain_downstream_port(struct tb_xdomain *xd)
{
@@ -1269,7 +1302,7 @@ static inline struct tb_retimer *tb_to_retimer(struct device *dev)
* usb4_switch_version() - Returns USB4 version of the router
* @sw: Router to check
*
- * Returns major version of USB4 router (%1 for v1, %2 for v2 and so
+ * Return: Major version of USB4 router (%1 for v1, %2 for v2 and so
* on). Can be called to pre-USB4 router too and in that case returns %0.
*/
static inline unsigned int usb4_switch_version(const struct tb_switch *sw)
@@ -1281,7 +1314,7 @@ static inline unsigned int usb4_switch_version(const struct tb_switch *sw)
* tb_switch_is_usb4() - Is the switch USB4 compliant
* @sw: Switch to check
*
- * Returns true if the @sw is USB4 compliant router, false otherwise.
+ * Return: %true if the @sw is USB4 compliant router, %false otherwise.
*/
static inline bool tb_switch_is_usb4(const struct tb_switch *sw)
{
@@ -1295,7 +1328,7 @@ int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid);
int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
size_t size);
bool usb4_switch_lane_bonding_possible(struct tb_switch *sw);
-int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags);
+int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags, bool runtime);
int usb4_switch_set_sleep(struct tb_switch *sw);
int usb4_switch_nvm_sector_size(struct tb_switch *sw);
int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
@@ -1327,26 +1360,87 @@ int usb4_port_router_offline(struct tb_port *port);
int usb4_port_router_online(struct tb_port *port);
int usb4_port_enumerate_retimers(struct tb_port *port);
bool usb4_port_clx_supported(struct tb_port *port);
-int usb4_port_margining_caps(struct tb_port *port, u32 *caps);
bool usb4_port_asym_supported(struct tb_port *port);
int usb4_port_asym_set_link_width(struct tb_port *port, enum tb_link_width width);
int usb4_port_asym_start(struct tb_port *port);
-int usb4_port_hw_margin(struct tb_port *port, unsigned int lanes,
- unsigned int ber_level, bool timing, bool right_high,
+/**
+ * enum usb4_sb_target - Sideband transaction target
+ * @USB4_SB_TARGET_ROUTER: Target is the router itself
+ * @USB4_SB_TARGET_PARTNER: Target is partner
+ * @USB4_SB_TARGET_RETIMER: Target is retimer
+ */
+enum usb4_sb_target {
+ USB4_SB_TARGET_ROUTER,
+ USB4_SB_TARGET_PARTNER,
+ USB4_SB_TARGET_RETIMER,
+};
+
+int usb4_port_sb_read(struct tb_port *port, enum usb4_sb_target target, u8 index,
+ u8 reg, void *buf, u8 size);
+int usb4_port_sb_write(struct tb_port *port, enum usb4_sb_target target,
+ u8 index, u8 reg, const void *buf, u8 size);
+
+/**
+ * enum usb4_margin_sw_error_counter - Software margining error counter operation
+ * @USB4_MARGIN_SW_ERROR_COUNTER_NOP: No change in counter setup
+ * @USB4_MARGIN_SW_ERROR_COUNTER_CLEAR: Set the error counter to 0, enable counter
+ * @USB4_MARGIN_SW_ERROR_COUNTER_START: Start counter, count from last value
+ * @USB4_MARGIN_SW_ERROR_COUNTER_STOP: Stop counter, do not clear value
+ */
+enum usb4_margin_sw_error_counter {
+ USB4_MARGIN_SW_ERROR_COUNTER_NOP,
+ USB4_MARGIN_SW_ERROR_COUNTER_CLEAR,
+ USB4_MARGIN_SW_ERROR_COUNTER_START,
+ USB4_MARGIN_SW_ERROR_COUNTER_STOP,
+};
+
+enum usb4_margining_lane {
+ USB4_MARGINING_LANE_RX0 = 0,
+ USB4_MARGINING_LANE_RX1 = 1,
+ USB4_MARGINING_LANE_RX2 = 2,
+ USB4_MARGINING_LANE_ALL = 7,
+};
+
+/**
+ * struct usb4_port_margining_params - USB4 margining parameters
+ * @error_counter: Error counter operation for software margining
+ * @ber_level: Current BER level contour value
+ * @lanes: Lanes to enable for the margining operation
+ * @voltage_time_offset: Offset for voltage / time for software margining
+ * @optional_voltage_offset_range: Enable optional extended voltage range
+ * @right_high: %false if left/low margin test is performed, %true if right/high
+ * @upper_eye: %true if margin test is done on upper eye, %false if done on
+ * lower eye
+ * @time: %true if time margining is used instead of voltage
+ */
+struct usb4_port_margining_params {
+ enum usb4_margin_sw_error_counter error_counter;
+ u32 ber_level;
+ enum usb4_margining_lane lanes;
+ u32 voltage_time_offset;
+ bool optional_voltage_offset_range;
+ bool right_high;
+ bool upper_eye;
+ bool time;
+};
+
+int usb4_port_margining_caps(struct tb_port *port, enum usb4_sb_target target,
+ u8 index, u32 *caps, size_t ncaps);
+int usb4_port_hw_margin(struct tb_port *port, enum usb4_sb_target target,
+ u8 index, const struct usb4_port_margining_params *params,
+ u32 *results, size_t nresults);
+int usb4_port_sw_margin(struct tb_port *port, enum usb4_sb_target target,
+ u8 index, const struct usb4_port_margining_params *params,
u32 *results);
-int usb4_port_sw_margin(struct tb_port *port, unsigned int lanes, bool timing,
- bool right_high, u32 counter);
-int usb4_port_sw_margin_errors(struct tb_port *port, u32 *errors);
+int usb4_port_sw_margin_errors(struct tb_port *port, enum usb4_sb_target target,
+ u8 index, u32 *errors);
int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index);
int usb4_port_retimer_unset_inbound_sbtx(struct tb_port *port, u8 index);
-int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf,
- u8 size);
-int usb4_port_retimer_write(struct tb_port *port, u8 index, u8 reg,
- const void *buf, u8 size);
int usb4_port_retimer_is_last(struct tb_port *port, u8 index);
+int usb4_port_retimer_is_cable(struct tb_port *port, u8 index);
int usb4_port_retimer_nvm_sector_size(struct tb_port *port, u8 index);
int usb4_port_retimer_nvm_set_offset(struct tb_port *port, u8 index,
unsigned int address);
@@ -1400,6 +1494,7 @@ static inline struct usb4_port *tb_to_usb4_port_device(struct device *dev)
struct usb4_port *usb4_port_device_add(struct tb_port *port);
void usb4_port_device_remove(struct usb4_port *usb4);
int usb4_port_device_resume(struct usb4_port *usb4);
+int usb4_port_index(const struct tb_switch *sw, const struct tb_port *port);
static inline bool usb4_port_device_is_offline(const struct usb4_port *usb4)
{
@@ -1445,6 +1540,8 @@ void tb_xdomain_debugfs_init(struct tb_xdomain *xd);
void tb_xdomain_debugfs_remove(struct tb_xdomain *xd);
void tb_service_debugfs_init(struct tb_service *svc);
void tb_service_debugfs_remove(struct tb_service *svc);
+void tb_retimer_debugfs_init(struct tb_retimer *rt);
+void tb_retimer_debugfs_remove(struct tb_retimer *rt);
#else
static inline void tb_debugfs_init(void) { }
static inline void tb_debugfs_exit(void) { }
@@ -1454,6 +1551,8 @@ static inline void tb_xdomain_debugfs_init(struct tb_xdomain *xd) { }
static inline void tb_xdomain_debugfs_remove(struct tb_xdomain *xd) { }
static inline void tb_service_debugfs_init(struct tb_service *svc) { }
static inline void tb_service_debugfs_remove(struct tb_service *svc) { }
+static inline void tb_retimer_debugfs_init(struct tb_retimer *rt) { }
+static inline void tb_retimer_debugfs_remove(struct tb_retimer *rt) { }
#endif
#endif
diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h
index a1670a96cbdc..144f7332d5d2 100644
--- a/drivers/thunderbolt/tb_msgs.h
+++ b/drivers/thunderbolt/tb_msgs.h
@@ -118,6 +118,7 @@ enum icm_event_code {
ICM_EVENT_DEVICE_DISCONNECTED = 0x4,
ICM_EVENT_XDOMAIN_CONNECTED = 0x6,
ICM_EVENT_XDOMAIN_DISCONNECTED = 0x7,
+ ICM_EVENT_DP_CONFIG_CHANGED = 0x8,
ICM_EVENT_RTD3_VETO = 0xa,
};
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
index 4e43b47f9f11..c0bf136236e6 100644
--- a/drivers/thunderbolt/tb_regs.h
+++ b/drivers/thunderbolt/tb_regs.h
@@ -99,7 +99,7 @@ struct tb_cap_extended_long {
} __packed;
/**
- * struct tb_cap_any - Structure capable of hold every capability
+ * struct tb_cap_any - Structure capable of holding every capability
* @basic: Basic capability
* @extended_short: Vendor specific capability
* @extended_long: Vendor specific extended capability
@@ -534,8 +534,8 @@ struct tb_regs_hop {
/*
* Used for Titan Ridge only. Bits are part of the same register: TMU_ADP_CS_6
- * (see above) as in USB4 spec, but these specific bits used for Titan Ridge
- * only and reserved in USB4 spec.
+ * (see above) as in USB4 spec, but these specific bits are used for Titan Ridge
+ * only and are reserved in USB4 spec.
*/
#define TMU_ADP_CS_6_DISABLE_TMU_OBJ_MASK GENMASK(3, 2)
#define TMU_ADP_CS_6_DISABLE_TMU_OBJ_CL1 BIT(2)
diff --git a/drivers/thunderbolt/test.c b/drivers/thunderbolt/test.c
index 9475c6698c7d..1f4318249c22 100644
--- a/drivers/thunderbolt/test.c
+++ b/drivers/thunderbolt/test.c
@@ -1382,8 +1382,8 @@ static void tb_test_tunnel_pcie(struct kunit *test)
KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[0].in_port, up);
KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[1].out_port, down);
- tb_tunnel_free(tunnel2);
- tb_tunnel_free(tunnel1);
+ tb_tunnel_put(tunnel2);
+ tb_tunnel_put(tunnel1);
}
static void tb_test_tunnel_dp(struct kunit *test)
@@ -1406,7 +1406,7 @@ static void tb_test_tunnel_dp(struct kunit *test)
in = &host->ports[5];
out = &dev->ports[13];
- tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
+ tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0, NULL, NULL);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
@@ -1421,7 +1421,7 @@ static void tb_test_tunnel_dp(struct kunit *test)
KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 2);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[1].out_port, in);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_tunnel_dp_chain(struct kunit *test)
@@ -1452,7 +1452,7 @@ static void tb_test_tunnel_dp_chain(struct kunit *test)
in = &host->ports[5];
out = &dev4->ports[14];
- tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
+ tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0, NULL, NULL);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
@@ -1467,7 +1467,7 @@ static void tb_test_tunnel_dp_chain(struct kunit *test)
KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 3);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[2].out_port, in);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_tunnel_dp_tree(struct kunit *test)
@@ -1502,7 +1502,7 @@ static void tb_test_tunnel_dp_tree(struct kunit *test)
in = &dev2->ports[13];
out = &dev5->ports[13];
- tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
+ tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0, NULL, NULL);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
@@ -1517,7 +1517,7 @@ static void tb_test_tunnel_dp_tree(struct kunit *test)
KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 4);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[3].out_port, in);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_tunnel_dp_max_length(struct kunit *test)
@@ -1567,7 +1567,7 @@ static void tb_test_tunnel_dp_max_length(struct kunit *test)
in = &dev6->ports[13];
out = &dev12->ports[13];
- tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
+ tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0, NULL, NULL);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
@@ -1597,7 +1597,7 @@ static void tb_test_tunnel_dp_max_length(struct kunit *test)
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[6].out_port,
&host->ports[1]);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[12].out_port, in);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_tunnel_3dp(struct kunit *test)
@@ -1637,7 +1637,7 @@ static void tb_test_tunnel_3dp(struct kunit *test)
out2 = &dev5->ports[13];
out3 = &dev4->ports[14];
- tunnel1 = tb_tunnel_alloc_dp(NULL, in1, out1, 1, 0, 0);
+ tunnel1 = tb_tunnel_alloc_dp(NULL, in1, out1, 1, 0, 0, NULL, NULL);
KUNIT_ASSERT_TRUE(test, tunnel1 != NULL);
KUNIT_EXPECT_EQ(test, tunnel1->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, in1);
@@ -1645,7 +1645,7 @@ static void tb_test_tunnel_3dp(struct kunit *test)
KUNIT_ASSERT_EQ(test, tunnel1->npaths, 3);
KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 3);
- tunnel2 = tb_tunnel_alloc_dp(NULL, in2, out2, 1, 0, 0);
+ tunnel2 = tb_tunnel_alloc_dp(NULL, in2, out2, 1, 0, 0, NULL, NULL);
KUNIT_ASSERT_TRUE(test, tunnel2 != NULL);
KUNIT_EXPECT_EQ(test, tunnel2->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, in2);
@@ -1653,7 +1653,7 @@ static void tb_test_tunnel_3dp(struct kunit *test)
KUNIT_ASSERT_EQ(test, tunnel2->npaths, 3);
KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 4);
- tunnel3 = tb_tunnel_alloc_dp(NULL, in3, out3, 1, 0, 0);
+ tunnel3 = tb_tunnel_alloc_dp(NULL, in3, out3, 1, 0, 0, NULL, NULL);
KUNIT_ASSERT_TRUE(test, tunnel3 != NULL);
KUNIT_EXPECT_EQ(test, tunnel3->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel3->src_port, in3);
@@ -1661,8 +1661,8 @@ static void tb_test_tunnel_3dp(struct kunit *test)
KUNIT_ASSERT_EQ(test, tunnel3->npaths, 3);
KUNIT_ASSERT_EQ(test, tunnel3->paths[0]->path_length, 3);
- tb_tunnel_free(tunnel2);
- tb_tunnel_free(tunnel1);
+ tb_tunnel_put(tunnel2);
+ tb_tunnel_put(tunnel1);
}
static void tb_test_tunnel_usb3(struct kunit *test)
@@ -1716,8 +1716,8 @@ static void tb_test_tunnel_usb3(struct kunit *test)
KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[0].in_port, up);
KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[1].out_port, down);
- tb_tunnel_free(tunnel2);
- tb_tunnel_free(tunnel1);
+ tb_tunnel_put(tunnel2);
+ tb_tunnel_put(tunnel1);
}
static void tb_test_tunnel_port_on_path(struct kunit *test)
@@ -1750,7 +1750,7 @@ static void tb_test_tunnel_port_on_path(struct kunit *test)
in = &dev2->ports[13];
out = &dev5->ports[13];
- dp_tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
+ dp_tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0, NULL, NULL);
KUNIT_ASSERT_NOT_NULL(test, dp_tunnel);
KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, in));
@@ -1783,7 +1783,7 @@ static void tb_test_tunnel_port_on_path(struct kunit *test)
port = &dev4->ports[1];
KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
- tb_tunnel_free(dp_tunnel);
+ tb_tunnel_put(dp_tunnel);
}
static void tb_test_tunnel_dma(struct kunit *test)
@@ -1826,7 +1826,7 @@ static void tb_test_tunnel_dma(struct kunit *test)
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].out_port, port);
KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].next_hop_index, 8);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_tunnel_dma_rx(struct kunit *test)
@@ -1863,7 +1863,7 @@ static void tb_test_tunnel_dma_rx(struct kunit *test)
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, nhi);
KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 2);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_tunnel_dma_tx(struct kunit *test)
@@ -1900,7 +1900,7 @@ static void tb_test_tunnel_dma_tx(struct kunit *test)
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, port);
KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 15);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_tunnel_dma_chain(struct kunit *test)
@@ -1966,7 +1966,7 @@ static void tb_test_tunnel_dma_chain(struct kunit *test)
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].out_port, port);
KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[2].next_hop_index, 8);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_tunnel_dma_match(struct kunit *test)
@@ -1993,7 +1993,7 @@ static void tb_test_tunnel_dma_match(struct kunit *test)
KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1));
KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 8, -1, 8, -1));
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 1, -1, -1);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
@@ -2005,7 +2005,7 @@ static void tb_test_tunnel_dma_match(struct kunit *test)
KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1));
KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 11, -1, -1));
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, -1, -1, 15, 11);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
@@ -2017,7 +2017,7 @@ static void tb_test_tunnel_dma_match(struct kunit *test)
KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 10, 11));
KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 11, -1, -1));
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_credit_alloc_legacy_not_bonded(struct kunit *test)
@@ -2050,7 +2050,7 @@ static void tb_test_credit_alloc_legacy_not_bonded(struct kunit *test)
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 16U);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_credit_alloc_legacy_bonded(struct kunit *test)
@@ -2083,7 +2083,7 @@ static void tb_test_credit_alloc_legacy_bonded(struct kunit *test)
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_credit_alloc_pcie(struct kunit *test)
@@ -2116,7 +2116,7 @@ static void tb_test_credit_alloc_pcie(struct kunit *test)
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_credit_alloc_without_dp(struct kunit *test)
@@ -2166,7 +2166,7 @@ static void tb_test_credit_alloc_without_dp(struct kunit *test)
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_credit_alloc_dp(struct kunit *test)
@@ -2182,7 +2182,7 @@ static void tb_test_credit_alloc_dp(struct kunit *test)
in = &host->ports[5];
out = &dev->ports[14];
- tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
+ tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0, NULL, NULL);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3);
@@ -2210,7 +2210,7 @@ static void tb_test_credit_alloc_dp(struct kunit *test)
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_credit_alloc_usb3(struct kunit *test)
@@ -2243,7 +2243,7 @@ static void tb_test_credit_alloc_usb3(struct kunit *test)
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_credit_alloc_dma(struct kunit *test)
@@ -2279,7 +2279,7 @@ static void tb_test_credit_alloc_dma(struct kunit *test)
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
}
static void tb_test_credit_alloc_dma_multiple(struct kunit *test)
@@ -2356,7 +2356,7 @@ static void tb_test_credit_alloc_dma_multiple(struct kunit *test)
* Release the first DMA tunnel. That should make 14 buffers
* available for the next tunnel.
*/
- tb_tunnel_free(tunnel1);
+ tb_tunnel_put(tunnel1);
tunnel3 = tb_tunnel_alloc_dma(NULL, nhi, port, 10, 3, 10, 3);
KUNIT_ASSERT_NOT_NULL(test, tunnel3);
@@ -2375,8 +2375,8 @@ static void tb_test_credit_alloc_dma_multiple(struct kunit *test)
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
- tb_tunnel_free(tunnel3);
- tb_tunnel_free(tunnel2);
+ tb_tunnel_put(tunnel3);
+ tb_tunnel_put(tunnel2);
}
static struct tb_tunnel *TB_TEST_PCIE_TUNNEL(struct kunit *test,
@@ -2418,7 +2418,7 @@ static struct tb_tunnel *TB_TEST_DP_TUNNEL1(struct kunit *test,
in = &host->ports[5];
out = &dev->ports[13];
- dp_tunnel1 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
+ dp_tunnel1 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0, NULL, NULL);
KUNIT_ASSERT_NOT_NULL(test, dp_tunnel1);
KUNIT_ASSERT_EQ(test, dp_tunnel1->npaths, (size_t)3);
@@ -2455,7 +2455,7 @@ static struct tb_tunnel *TB_TEST_DP_TUNNEL2(struct kunit *test,
in = &host->ports[6];
out = &dev->ports[14];
- dp_tunnel2 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
+ dp_tunnel2 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0, NULL, NULL);
KUNIT_ASSERT_NOT_NULL(test, dp_tunnel2);
KUNIT_ASSERT_EQ(test, dp_tunnel2->npaths, (size_t)3);
@@ -2595,12 +2595,12 @@ static void tb_test_credit_alloc_all(struct kunit *test)
dma_tunnel1 = TB_TEST_DMA_TUNNEL1(test, host, dev);
dma_tunnel2 = TB_TEST_DMA_TUNNEL2(test, host, dev);
- tb_tunnel_free(dma_tunnel2);
- tb_tunnel_free(dma_tunnel1);
- tb_tunnel_free(usb3_tunnel);
- tb_tunnel_free(dp_tunnel2);
- tb_tunnel_free(dp_tunnel1);
- tb_tunnel_free(pcie_tunnel);
+ tb_tunnel_put(dma_tunnel2);
+ tb_tunnel_put(dma_tunnel1);
+ tb_tunnel_put(usb3_tunnel);
+ tb_tunnel_put(dp_tunnel2);
+ tb_tunnel_put(dp_tunnel1);
+ tb_tunnel_put(pcie_tunnel);
}
static const u32 root_directory[] = {
diff --git a/drivers/thunderbolt/tmu.c b/drivers/thunderbolt/tmu.c
index 9a259c72e5a7..cf779874c675 100644
--- a/drivers/thunderbolt/tmu.c
+++ b/drivers/thunderbolt/tmu.c
@@ -400,11 +400,13 @@ static int tmu_mode_init(struct tb_switch *sw)
/**
* tb_switch_tmu_init() - Initialize switch TMU structures
- * @sw: Switch to initialized
+ * @sw: Switch to be initialized
*
* This function must be called before other TMU related functions to
- * makes the internal structures are filled in correctly. Does not
+ * make sure the internal structures are filled in correctly. Does not
* change any hardware configuration.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_switch_tmu_init(struct tb_switch *sw)
{
@@ -439,6 +441,8 @@ int tb_switch_tmu_init(struct tb_switch *sw)
* @sw: Switch whose time to update
*
* Updates switch local time using time posting procedure.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_switch_tmu_post_time(struct tb_switch *sw)
{
@@ -555,6 +559,8 @@ static int disable_enhanced(struct tb_port *up, struct tb_port *down)
* @sw: Switch whose TMU to disable
*
* Turns off TMU of @sw if it is enabled. If not enabled does nothing.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_switch_tmu_disable(struct tb_switch *sw)
{
@@ -938,6 +944,8 @@ out:
* Enables TMU of a router to be in uni-directional Normal/HiFi or
* bi-directional HiFi mode. Calling tb_switch_tmu_configure() is
* required before calling this function.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_switch_tmu_enable(struct tb_switch *sw)
{
@@ -1017,9 +1025,11 @@ int tb_switch_tmu_enable(struct tb_switch *sw)
* Selects the TMU mode that is enabled when tb_switch_tmu_enable() is
* next called.
*
- * Returns %0 in success and negative errno otherwise. Specifically
- * returns %-EOPNOTSUPP if the requested mode is not possible (not
- * supported by the router and/or topology).
+ * Return:
+ * * %0 - On success.
+ * * %-EOPNOTSUPP - If the requested mode is not possible (not supported by
+ * the router and/or topology).
+ * * Negative errno - Another error occurred.
*/
int tb_switch_tmu_configure(struct tb_switch *sw, enum tb_switch_tmu_mode mode)
{
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
index 41cf6378ad25..9fa95c595ecc 100644
--- a/drivers/thunderbolt/tunnel.c
+++ b/drivers/thunderbolt/tunnel.c
@@ -70,6 +70,24 @@
#define USB4_V2_PCI_MIN_BANDWIDTH (1500 * TB_PCI_WEIGHT)
#define USB4_V2_USB3_MIN_BANDWIDTH (1500 * TB_USB3_WEIGHT)
+/*
+ * According to VESA spec, the DPRX negotiation shall compete in 5
+ * seconds after tunnel is established. Since at least i915 can runtime
+ * suspend if there is nothing connected, and that it polls any new
+ * connections every 10 seconds, we use 12 seconds here.
+ *
+ * These are in ms.
+ */
+#define TB_DPRX_TIMEOUT 12000
+#define TB_DPRX_WAIT_TIMEOUT 25
+#define TB_DPRX_POLL_DELAY 50
+
+static int dprx_timeout = TB_DPRX_TIMEOUT;
+module_param(dprx_timeout, int, 0444);
+MODULE_PARM_DESC(dprx_timeout,
+ "DPRX capability read timeout in ms, -1 waits forever (default: "
+ __MODULE_STRING(TB_DPRX_TIMEOUT) ")");
+
static unsigned int dma_credits = TB_DMA_CREDITS;
module_param(dma_credits, uint, 0444);
MODULE_PARM_DESC(dma_credits, "specify custom credits for DMA tunnels (default: "
@@ -82,6 +100,17 @@ MODULE_PARM_DESC(bw_alloc_mode,
static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
+static const char * const tb_event_names[] = {
+ [TB_TUNNEL_ACTIVATED] = "activated",
+ [TB_TUNNEL_CHANGED] = "changed",
+ [TB_TUNNEL_DEACTIVATED] = "deactivated",
+ [TB_TUNNEL_LOW_BANDWIDTH] = "low bandwidth",
+ [TB_TUNNEL_NO_BANDWIDTH] = "insufficient bandwidth",
+};
+
+/* Synchronizes kref_get()/put() of struct tb_tunnel */
+static DEFINE_MUTEX(tb_tunnel_lock);
+
static inline unsigned int tb_usable_credits(const struct tb_port *port)
{
return port->total_credits - port->ctl_credits;
@@ -92,6 +121,8 @@ static inline unsigned int tb_usable_credits(const struct tb_port *port)
* @port: Lane adapter to check
* @max_dp_streams: If non-%NULL stores maximum number of simultaneous DP
* streams possible through this lane adapter
+ *
+ * Return: Number of available credits.
*/
static unsigned int tb_available_credits(const struct tb_port *port,
size_t *max_dp_streams)
@@ -155,7 +186,7 @@ static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL);
if (!tunnel->paths) {
- tb_tunnel_free(tunnel);
+ kfree(tunnel);
return NULL;
}
@@ -163,16 +194,114 @@ static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
tunnel->tb = tb;
tunnel->npaths = npaths;
tunnel->type = type;
+ kref_init(&tunnel->kref);
return tunnel;
}
+static void tb_tunnel_get(struct tb_tunnel *tunnel)
+{
+ mutex_lock(&tb_tunnel_lock);
+ kref_get(&tunnel->kref);
+ mutex_unlock(&tb_tunnel_lock);
+}
+
+static void tb_tunnel_destroy(struct kref *kref)
+{
+ struct tb_tunnel *tunnel = container_of(kref, typeof(*tunnel), kref);
+ int i;
+
+ if (tunnel->destroy)
+ tunnel->destroy(tunnel);
+
+ for (i = 0; i < tunnel->npaths; i++) {
+ if (tunnel->paths[i])
+ tb_path_free(tunnel->paths[i]);
+ }
+
+ kfree(tunnel->paths);
+ kfree(tunnel);
+}
+
+void tb_tunnel_put(struct tb_tunnel *tunnel)
+{
+ mutex_lock(&tb_tunnel_lock);
+ kref_put(&tunnel->kref, tb_tunnel_destroy);
+ mutex_unlock(&tb_tunnel_lock);
+}
+
+/**
+ * tb_tunnel_event() - Notify userspace about tunneling event
+ * @tb: Domain where the event occurred
+ * @event: Event that happened
+ * @type: Type of the tunnel in question
+ * @src_port: Tunnel source port (can be %NULL)
+ * @dst_port: Tunnel destination port (can be %NULL)
+ *
+ * Notifies userspace about tunneling @event in the domain. The tunnel
+ * does not need to exist (e.g the tunnel was not activated because
+ * there is not enough bandwidth). If the @src_port and @dst_port are
+ * given fill in full %TUNNEL_DETAILS environment variable. Otherwise
+ * uses the shorter one (just the tunnel type).
+ */
+void tb_tunnel_event(struct tb *tb, enum tb_tunnel_event event,
+ enum tb_tunnel_type type,
+ const struct tb_port *src_port,
+ const struct tb_port *dst_port)
+{
+ char *envp[3] = { NULL };
+
+ if (WARN_ON_ONCE(event >= ARRAY_SIZE(tb_event_names)))
+ return;
+ if (WARN_ON_ONCE(type >= ARRAY_SIZE(tb_tunnel_names)))
+ return;
+
+ envp[0] = kasprintf(GFP_KERNEL, "TUNNEL_EVENT=%s", tb_event_names[event]);
+ if (!envp[0])
+ return;
+
+ if (src_port != NULL && dst_port != NULL) {
+ envp[1] = kasprintf(GFP_KERNEL, "TUNNEL_DETAILS=%llx:%u <-> %llx:%u (%s)",
+ tb_route(src_port->sw), src_port->port,
+ tb_route(dst_port->sw), dst_port->port,
+ tb_tunnel_names[type]);
+ } else {
+ envp[1] = kasprintf(GFP_KERNEL, "TUNNEL_DETAILS=(%s)",
+ tb_tunnel_names[type]);
+ }
+
+ if (envp[1])
+ tb_domain_event(tb, envp);
+
+ kfree(envp[1]);
+ kfree(envp[0]);
+}
+
+static inline void tb_tunnel_set_active(struct tb_tunnel *tunnel, bool active)
+{
+ if (active) {
+ tunnel->state = TB_TUNNEL_ACTIVE;
+ tb_tunnel_event(tunnel->tb, TB_TUNNEL_ACTIVATED, tunnel->type,
+ tunnel->src_port, tunnel->dst_port);
+ } else {
+ tunnel->state = TB_TUNNEL_INACTIVE;
+ tb_tunnel_event(tunnel->tb, TB_TUNNEL_DEACTIVATED, tunnel->type,
+ tunnel->src_port, tunnel->dst_port);
+ }
+}
+
+static inline void tb_tunnel_changed(struct tb_tunnel *tunnel)
+{
+ tb_tunnel_event(tunnel->tb, TB_TUNNEL_CHANGED, tunnel->type,
+ tunnel->src_port, tunnel->dst_port);
+}
+
static int tb_pci_set_ext_encapsulation(struct tb_tunnel *tunnel, bool enable)
{
struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw);
int ret;
- /* Only supported of both routers are at least USB4 v2 */
+ /* Only supported if both routers are at least USB4 v2 */
if ((usb4_switch_version(tunnel->src_port->sw) < 2) ||
(usb4_switch_version(tunnel->dst_port->sw) < 2))
return 0;
@@ -288,8 +417,9 @@ static int tb_pci_init_path(struct tb_path *path)
* @alloc_hopid: Allocate HopIDs from visited ports
*
* If @down adapter is active, follows the tunnel to the PCIe upstream
- * adapter and back. Returns the discovered tunnel or %NULL if there was
- * no tunnel.
+ * adapter and back.
+ *
+ * Return: Pointer to &struct tb_tunnel or %NULL if there was no tunnel.
*/
struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down,
bool alloc_hopid)
@@ -355,7 +485,7 @@ struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down,
err_deactivate:
tb_tunnel_deactivate(tunnel);
err_free:
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
return NULL;
}
@@ -369,7 +499,7 @@ err_free:
* Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
* TB_TYPE_PCIE_DOWN.
*
- * Return: Returns a tb_tunnel on success or NULL on failure.
+ * Return: Pointer to @struct tb_tunnel or %NULL on failure.
*/
struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
struct tb_port *down)
@@ -404,7 +534,7 @@ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
return tunnel;
err_free:
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
return NULL;
}
@@ -416,9 +546,12 @@ err_free:
*
* Can be called to any connected lane 0 adapter to find out how much
* bandwidth needs to be left in reserve for possible PCIe bulk traffic.
- * Returns true if there is something to be reserved and writes the
- * amount to @reserved_down/@reserved_up. Otherwise returns false and
- * does not touch the parameters.
+ *
+ * Return:
+ * * %true - If there is something to be reserved. Writes the amount to
+ * @reserved_down/@reserved_up.
+ * * %false - Nothing to be reserved. Leaves @reserved_down/@reserved_up
+ * unmodified.
*/
bool tb_tunnel_reserved_pci(struct tb_port *port, int *reserved_up,
int *reserved_down)
@@ -851,7 +984,7 @@ static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel)
return 0;
}
-static int tb_dp_init(struct tb_tunnel *tunnel)
+static int tb_dp_pre_activate(struct tb_tunnel *tunnel)
{
struct tb_port *in = tunnel->src_port;
struct tb_switch *sw = in->sw;
@@ -877,7 +1010,7 @@ static int tb_dp_init(struct tb_tunnel *tunnel)
return tb_dp_bandwidth_alloc_mode_enable(tunnel);
}
-static void tb_dp_deinit(struct tb_tunnel *tunnel)
+static void tb_dp_post_deactivate(struct tb_tunnel *tunnel)
{
struct tb_port *in = tunnel->src_port;
@@ -889,6 +1022,96 @@ static void tb_dp_deinit(struct tb_tunnel *tunnel)
}
}
+static ktime_t dprx_timeout_to_ktime(int timeout_msec)
+{
+ return timeout_msec >= 0 ?
+ ktime_add_ms(ktime_get(), timeout_msec) : KTIME_MAX;
+}
+
+static int tb_dp_wait_dprx(struct tb_tunnel *tunnel, int timeout_msec)
+{
+ ktime_t timeout = dprx_timeout_to_ktime(timeout_msec);
+ struct tb_port *in = tunnel->src_port;
+
+ /*
+ * Wait for DPRX done. Normally it should be already set for
+ * active tunnel.
+ */
+ do {
+ u32 val;
+ int ret;
+
+ ret = tb_port_read(in, &val, TB_CFG_PORT,
+ in->cap_adap + DP_COMMON_CAP, 1);
+ if (ret)
+ return ret;
+
+ if (val & DP_COMMON_CAP_DPRX_DONE)
+ return 0;
+
+ usleep_range(100, 150);
+ } while (ktime_before(ktime_get(), timeout));
+
+ tb_tunnel_dbg(tunnel, "DPRX read timeout\n");
+ return -ETIMEDOUT;
+}
+
+static void tb_dp_dprx_work(struct work_struct *work)
+{
+ struct tb_tunnel *tunnel = container_of(work, typeof(*tunnel), dprx_work.work);
+ struct tb *tb = tunnel->tb;
+
+ if (!tunnel->dprx_canceled) {
+ mutex_lock(&tb->lock);
+ if (tb_dp_is_usb4(tunnel->src_port->sw) &&
+ tb_dp_wait_dprx(tunnel, TB_DPRX_WAIT_TIMEOUT)) {
+ if (ktime_before(ktime_get(), tunnel->dprx_timeout)) {
+ queue_delayed_work(tb->wq, &tunnel->dprx_work,
+ msecs_to_jiffies(TB_DPRX_POLL_DELAY));
+ mutex_unlock(&tb->lock);
+ return;
+ }
+ } else {
+ tb_tunnel_set_active(tunnel, true);
+ }
+ mutex_unlock(&tb->lock);
+ }
+
+ if (tunnel->callback)
+ tunnel->callback(tunnel, tunnel->callback_data);
+ tb_tunnel_put(tunnel);
+}
+
+static int tb_dp_dprx_start(struct tb_tunnel *tunnel)
+{
+ /*
+ * Bump up the reference to keep the tunnel around. It will be
+ * dropped in tb_dp_dprx_stop() once the tunnel is deactivated.
+ */
+ tb_tunnel_get(tunnel);
+
+ tunnel->dprx_started = true;
+
+ if (tunnel->callback) {
+ tunnel->dprx_timeout = dprx_timeout_to_ktime(dprx_timeout);
+ queue_delayed_work(tunnel->tb->wq, &tunnel->dprx_work, 0);
+ return -EINPROGRESS;
+ }
+
+ return tb_dp_is_usb4(tunnel->src_port->sw) ?
+ tb_dp_wait_dprx(tunnel, dprx_timeout) : 0;
+}
+
+static void tb_dp_dprx_stop(struct tb_tunnel *tunnel)
+{
+ if (tunnel->dprx_started) {
+ tunnel->dprx_started = false;
+ tunnel->dprx_canceled = true;
+ if (cancel_delayed_work(&tunnel->dprx_work))
+ tb_tunnel_put(tunnel);
+ }
+}
+
static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
{
int ret;
@@ -910,6 +1133,7 @@ static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index,
paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index);
} else {
+ tb_dp_dprx_stop(tunnel);
tb_dp_port_hpd_clear(tunnel->src_port);
tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0);
if (tb_port_is_dpout(tunnel->dst_port))
@@ -920,10 +1144,13 @@ static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
if (ret)
return ret;
- if (tb_port_is_dpout(tunnel->dst_port))
- return tb_dp_port_enable(tunnel->dst_port, active);
+ if (tb_port_is_dpout(tunnel->dst_port)) {
+ ret = tb_dp_port_enable(tunnel->dst_port, active);
+ if (ret)
+ return ret;
+ }
- return 0;
+ return active ? tb_dp_dprx_start(tunnel) : 0;
}
/**
@@ -931,7 +1158,8 @@ static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
* @tunnel: DP tunnel to check
* @max_bw_rounded: Maximum bandwidth in Mb/s rounded up to the next granularity
*
- * Returns maximum possible bandwidth for this tunnel in Mb/s.
+ * Return: Maximum possible bandwidth for this tunnel in Mb/s, negative errno
+ * in case of failure.
*/
static int tb_dp_bandwidth_mode_maximum_bandwidth(struct tb_tunnel *tunnel,
int *max_bw_rounded)
@@ -942,8 +1170,8 @@ static int tb_dp_bandwidth_mode_maximum_bandwidth(struct tb_tunnel *tunnel,
/*
* DP IN adapter DP_LOCAL_CAP gets updated to the lowest AUX
- * read parameter values so this so we can use this to determine
- * the maximum possible bandwidth over this link.
+ * read parameter values so we can use this to determine the
+ * maximum possible bandwidth over this link.
*
* See USB4 v2 spec 1.0 10.4.4.5.
*/
@@ -1076,35 +1304,6 @@ static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
return 0;
}
-static int tb_dp_wait_dprx(struct tb_tunnel *tunnel, int timeout_msec)
-{
- ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
- struct tb_port *in = tunnel->src_port;
-
- /*
- * Wait for DPRX done. Normally it should be already set for
- * active tunnel.
- */
- do {
- u32 val;
- int ret;
-
- ret = tb_port_read(in, &val, TB_CFG_PORT,
- in->cap_adap + DP_COMMON_CAP, 1);
- if (ret)
- return ret;
-
- if (val & DP_COMMON_CAP_DPRX_DONE) {
- tb_tunnel_dbg(tunnel, "DPRX read done\n");
- return 0;
- }
- usleep_range(100, 150);
- } while (ktime_before(ktime_get(), timeout));
-
- tb_tunnel_dbg(tunnel, "DPRX read timeout\n");
- return -ETIMEDOUT;
-}
-
/* Read cap from tunnel DP IN */
static int tb_dp_read_cap(struct tb_tunnel *tunnel, unsigned int cap, u32 *rate,
u32 *lanes)
@@ -1168,32 +1367,39 @@ static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
int ret;
if (tb_dp_is_usb4(sw)) {
- /*
- * On USB4 routers check if the bandwidth allocation
- * mode is enabled first and then read the bandwidth
- * through those registers.
- */
- ret = tb_dp_bandwidth_mode_consumed_bandwidth(tunnel, consumed_up,
- consumed_down);
- if (ret < 0) {
- if (ret != -EOPNOTSUPP)
+ ret = tb_dp_wait_dprx(tunnel, 0);
+ if (ret) {
+ if (ret == -ETIMEDOUT) {
+ /*
+ * While we wait for DPRX complete the
+ * tunnel consumes as much as it had
+ * been reserved initially.
+ */
+ ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP,
+ &rate, &lanes);
+ if (ret)
+ return ret;
+ } else {
+ return ret;
+ }
+ } else {
+ /*
+ * On USB4 routers check if the bandwidth allocation
+ * mode is enabled first and then read the bandwidth
+ * through those registers.
+ */
+ ret = tb_dp_bandwidth_mode_consumed_bandwidth(tunnel, consumed_up,
+ consumed_down);
+ if (ret < 0) {
+ if (ret != -EOPNOTSUPP)
+ return ret;
+ } else if (!ret) {
+ return 0;
+ }
+ ret = tb_dp_read_cap(tunnel, DP_COMMON_CAP, &rate, &lanes);
+ if (ret)
return ret;
- } else if (!ret) {
- return 0;
}
- /*
- * Then see if the DPRX negotiation is ready and if yes
- * return that bandwidth (it may be smaller than the
- * reduced one). According to VESA spec, the DPRX
- * negotiation shall compete in 5 seconds after tunnel
- * established. We give it 100ms extra just in case.
- */
- ret = tb_dp_wait_dprx(tunnel, 5100);
- if (ret)
- return ret;
- ret = tb_dp_read_cap(tunnel, DP_COMMON_CAP, &rate, &lanes);
- if (ret)
- return ret;
} else if (sw->generation >= 2) {
ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP, &rate, &lanes);
if (ret)
@@ -1349,7 +1555,7 @@ static void tb_dp_dump(struct tb_tunnel *tunnel)
* and back. Returns the discovered tunnel or %NULL if there was no
* tunnel.
*
- * Return: DP tunnel or %NULL if no tunnel found.
+ * Return: Pointer to &struct tb_tunnel or %NULL if no tunnel found.
*/
struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
bool alloc_hopid)
@@ -1365,9 +1571,9 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
if (!tunnel)
return NULL;
- tunnel->init = tb_dp_init;
- tunnel->deinit = tb_dp_deinit;
+ tunnel->pre_activate = tb_dp_pre_activate;
tunnel->activate = tb_dp_activate;
+ tunnel->post_deactivate = tb_dp_post_deactivate;
tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth;
tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth;
tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth;
@@ -1424,7 +1630,7 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
err_deactivate:
tb_tunnel_deactivate(tunnel);
err_free:
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
return NULL;
}
@@ -1439,15 +1645,24 @@ err_free:
* %0 if no available bandwidth.
* @max_down: Maximum available downstream bandwidth for the DP tunnel.
* %0 if no available bandwidth.
+ * @callback: Optional callback that is called when the DP tunnel is
+ * fully activated (or there is an error)
+ * @callback_data: Optional data for @callback
*
* Allocates a tunnel between @in and @out that is capable of tunneling
- * Display Port traffic.
+ * Display Port traffic. If @callback is not %NULL it will be called
+ * after tb_tunnel_activate() once the tunnel has been fully activated.
+ * It can call tb_tunnel_is_active() to check if activation was
+ * successful (or if it returns %false there was some sort of issue).
+ * The @callback is called without @tb->lock held.
*
- * Return: Returns a tb_tunnel on success or NULL on failure.
+ * Return: Pointer to @struct tb_tunnel or %NULL in case of failure.
*/
struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
struct tb_port *out, int link_nr,
- int max_up, int max_down)
+ int max_up, int max_down,
+ void (*callback)(struct tb_tunnel *, void *),
+ void *callback_data)
{
struct tb_tunnel *tunnel;
struct tb_path **paths;
@@ -1461,9 +1676,9 @@ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
if (!tunnel)
return NULL;
- tunnel->init = tb_dp_init;
- tunnel->deinit = tb_dp_deinit;
+ tunnel->pre_activate = tb_dp_pre_activate;
tunnel->activate = tb_dp_activate;
+ tunnel->post_deactivate = tb_dp_post_deactivate;
tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth;
tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth;
tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth;
@@ -1472,6 +1687,9 @@ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
tunnel->dst_port = out;
tunnel->max_up = max_up;
tunnel->max_down = max_down;
+ tunnel->callback = callback;
+ tunnel->callback_data = callback_data;
+ INIT_DELAYED_WORK(&tunnel->dprx_work, tb_dp_dprx_work);
paths = tunnel->paths;
pm_support = usb4_switch_version(in->sw) >= 2;
@@ -1500,7 +1718,7 @@ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
return tunnel;
err_free:
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
return NULL;
}
@@ -1565,8 +1783,8 @@ static int tb_dma_init_rx_path(struct tb_path *path, unsigned int credits)
/*
* First lane adapter is the one connected to the remote host.
- * We don't tunnel other traffic over this link so can use all
- * the credits (except the ones reserved for control traffic).
+ * We don't tunnel other traffic over this link so we can use
+ * all the credits (except the ones reserved for control traffic).
*/
hop = &path->hops[0];
tmp = min(tb_usable_credits(hop->in_port), credits);
@@ -1620,7 +1838,7 @@ static void tb_dma_release_credits(struct tb_path_hop *hop)
}
}
-static void tb_dma_deinit_path(struct tb_path *path)
+static void tb_dma_destroy_path(struct tb_path *path)
{
struct tb_path_hop *hop;
@@ -1628,14 +1846,14 @@ static void tb_dma_deinit_path(struct tb_path *path)
tb_dma_release_credits(hop);
}
-static void tb_dma_deinit(struct tb_tunnel *tunnel)
+static void tb_dma_destroy(struct tb_tunnel *tunnel)
{
int i;
for (i = 0; i < tunnel->npaths; i++) {
if (!tunnel->paths[i])
continue;
- tb_dma_deinit_path(tunnel->paths[i]);
+ tb_dma_destroy_path(tunnel->paths[i]);
}
}
@@ -1651,7 +1869,7 @@ static void tb_dma_deinit(struct tb_tunnel *tunnel)
* @receive_ring: NHI ring number used to receive packets from the
* other domain. Set to %-1 if RX path is not needed.
*
- * Return: Returns a tb_tunnel on success or NULL on failure.
+ * Return: Pointer to @struct tb_tunnel or %NULL in case of failure.
*/
struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
struct tb_port *dst, int transmit_path,
@@ -1681,7 +1899,7 @@ struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
tunnel->src_port = nhi;
tunnel->dst_port = dst;
- tunnel->deinit = tb_dma_deinit;
+ tunnel->destroy = tb_dma_destroy;
credits = min_not_zero(dma_credits, nhi->sw->max_dma_credits);
@@ -1712,7 +1930,7 @@ struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
return tunnel;
err_free:
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
return NULL;
}
@@ -1728,7 +1946,8 @@ err_free:
*
* This function can be used to match specific DMA tunnel, if there are
* multiple DMA tunnels going through the same XDomain connection.
- * Returns true if there is match and false otherwise.
+ *
+ * Return: %true if there is a match, %false otherwise.
*/
bool tb_tunnel_match_dma(const struct tb_tunnel *tunnel, int transmit_path,
int transmit_ring, int receive_path, int receive_ring)
@@ -1793,7 +2012,7 @@ static int tb_usb3_max_link_rate(struct tb_port *up, struct tb_port *down)
return min(up_max_rate, down_max_rate);
}
-static int tb_usb3_init(struct tb_tunnel *tunnel)
+static int tb_usb3_pre_activate(struct tb_tunnel *tunnel)
{
tb_tunnel_dbg(tunnel, "allocating initial bandwidth %d/%d Mb/s\n",
tunnel->allocated_up, tunnel->allocated_down);
@@ -1825,7 +2044,7 @@ static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel,
/*
* PCIe tunneling, if enabled, affects the USB3 bandwidth so
- * take that it into account here.
+ * take that into account here.
*/
*consumed_up = tunnel->allocated_up *
(TB_USB3_WEIGHT + pcie_weight) / TB_USB3_WEIGHT;
@@ -1950,8 +2169,9 @@ static void tb_usb3_init_path(struct tb_path *path)
* @alloc_hopid: Allocate HopIDs from visited ports
*
* If @down adapter is active, follows the tunnel to the USB3 upstream
- * adapter and back. Returns the discovered tunnel or %NULL if there was
- * no tunnel.
+ * adapter and back.
+ *
+ * Return: Pointer to &struct tb_tunnel or %NULL if there was no tunnel.
*/
struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down,
bool alloc_hopid)
@@ -2024,7 +2244,7 @@ struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down,
tb_tunnel_dbg(tunnel, "currently allocated bandwidth %d/%d Mb/s\n",
tunnel->allocated_up, tunnel->allocated_down);
- tunnel->init = tb_usb3_init;
+ tunnel->pre_activate = tb_usb3_pre_activate;
tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
tunnel->release_unused_bandwidth =
tb_usb3_release_unused_bandwidth;
@@ -2038,7 +2258,7 @@ struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down,
err_deactivate:
tb_tunnel_deactivate(tunnel);
err_free:
- tb_tunnel_free(tunnel);
+ tb_tunnel_put(tunnel);
return NULL;
}
@@ -2056,7 +2276,7 @@ err_free:
* Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and
* @TB_TYPE_USB3_DOWN.
*
- * Return: Returns a tb_tunnel on success or %NULL on failure.
+ * Return: Pointer to @struct tb_tunnel or %NULL in case of failure.
*/
struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
struct tb_port *down, int max_up,
@@ -2093,19 +2313,15 @@ struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0,
"USB3 Down");
- if (!path) {
- tb_tunnel_free(tunnel);
- return NULL;
- }
+ if (!path)
+ goto err_free;
tb_usb3_init_path(path);
tunnel->paths[TB_USB3_PATH_DOWN] = path;
path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0,
"USB3 Up");
- if (!path) {
- tb_tunnel_free(tunnel);
- return NULL;
- }
+ if (!path)
+ goto err_free;
tb_usb3_init_path(path);
tunnel->paths[TB_USB3_PATH_UP] = path;
@@ -2113,7 +2329,7 @@ struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
tunnel->allocated_up = min(max_rate, max_up);
tunnel->allocated_down = min(max_rate, max_down);
- tunnel->init = tb_usb3_init;
+ tunnel->pre_activate = tb_usb3_pre_activate;
tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
tunnel->release_unused_bandwidth =
tb_usb3_release_unused_bandwidth;
@@ -2122,36 +2338,17 @@ struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
}
return tunnel;
-}
-
-/**
- * tb_tunnel_free() - free a tunnel
- * @tunnel: Tunnel to be freed
- *
- * Frees a tunnel. The tunnel does not need to be deactivated.
- */
-void tb_tunnel_free(struct tb_tunnel *tunnel)
-{
- int i;
-
- if (!tunnel)
- return;
-
- if (tunnel->deinit)
- tunnel->deinit(tunnel);
-
- for (i = 0; i < tunnel->npaths; i++) {
- if (tunnel->paths[i])
- tb_path_free(tunnel->paths[i]);
- }
- kfree(tunnel->paths);
- kfree(tunnel);
+err_free:
+ tb_tunnel_put(tunnel);
+ return NULL;
}
/**
* tb_tunnel_is_invalid - check whether an activated path is still valid
* @tunnel: Tunnel to check
+ *
+ * Return: %true if path is valid, %false otherwise.
*/
bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
{
@@ -2167,12 +2364,16 @@ bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
}
/**
- * tb_tunnel_restart() - activate a tunnel after a hardware reset
- * @tunnel: Tunnel to restart
+ * tb_tunnel_activate() - activate a tunnel
+ * @tunnel: Tunnel to activate
*
- * Return: 0 on success and negative errno in case if failure
+ * Return:
+ * * %0 - On success.
+ * * %-EINPROGRESS - If the tunnel activation is still in progress (that's
+ * for DP tunnels to complete DPRX capabilities read).
+ * * Negative errno - Another error occurred.
*/
-int tb_tunnel_restart(struct tb_tunnel *tunnel)
+int tb_tunnel_activate(struct tb_tunnel *tunnel)
{
int res, i;
@@ -2189,8 +2390,10 @@ int tb_tunnel_restart(struct tb_tunnel *tunnel)
}
}
- if (tunnel->init) {
- res = tunnel->init(tunnel);
+ tunnel->state = TB_TUNNEL_ACTIVATING;
+
+ if (tunnel->pre_activate) {
+ res = tunnel->pre_activate(tunnel);
if (res)
return res;
}
@@ -2203,10 +2406,14 @@ int tb_tunnel_restart(struct tb_tunnel *tunnel)
if (tunnel->activate) {
res = tunnel->activate(tunnel, true);
- if (res)
+ if (res) {
+ if (res == -EINPROGRESS)
+ return res;
goto err;
+ }
}
+ tb_tunnel_set_active(tunnel, true);
return 0;
err:
@@ -2216,27 +2423,6 @@ err:
}
/**
- * tb_tunnel_activate() - activate a tunnel
- * @tunnel: Tunnel to activate
- *
- * Return: Returns 0 on success or an error code on failure.
- */
-int tb_tunnel_activate(struct tb_tunnel *tunnel)
-{
- int i;
-
- for (i = 0; i < tunnel->npaths; i++) {
- if (tunnel->paths[i]->activated) {
- tb_tunnel_WARN(tunnel,
- "trying to activate an already activated tunnel\n");
- return -EINVAL;
- }
- }
-
- return tb_tunnel_restart(tunnel);
-}
-
-/**
* tb_tunnel_deactivate() - deactivate a tunnel
* @tunnel: Tunnel to deactivate
*/
@@ -2253,6 +2439,11 @@ void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
if (tunnel->paths[i] && tunnel->paths[i]->activated)
tb_path_deactivate(tunnel->paths[i]);
}
+
+ if (tunnel->post_deactivate)
+ tunnel->post_deactivate(tunnel);
+
+ tb_tunnel_set_active(tunnel, false);
}
/**
@@ -2260,8 +2451,8 @@ void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
* @tunnel: Tunnel to check
* @port: Port to check
*
- * Returns true if @tunnel goes through @port (direction does not matter),
- * false otherwise.
+ * Return: %true if @tunnel goes through @port (direction does not matter),
+ * %false otherwise.
*/
bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
const struct tb_port *port)
@@ -2279,18 +2470,10 @@ bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
return false;
}
-static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
+// Is tb_tunnel_activate() called for the tunnel
+static bool tb_tunnel_is_activated(const struct tb_tunnel *tunnel)
{
- int i;
-
- for (i = 0; i < tunnel->npaths; i++) {
- if (!tunnel->paths[i])
- return false;
- if (!tunnel->paths[i]->activated)
- return false;
- }
-
- return true;
+ return tunnel->state == TB_TUNNEL_ACTIVATING || tb_tunnel_is_active(tunnel);
}
/**
@@ -2299,15 +2482,17 @@ static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
* @max_up: Maximum upstream bandwidth in Mb/s
* @max_down: Maximum downstream bandwidth in Mb/s
*
- * Returns maximum possible bandwidth this tunnel can go if not limited
- * by other bandwidth clients. If the tunnel does not support this
- * returns %-EOPNOTSUPP.
+ * Return:
+ * * Maximum possible bandwidth this tunnel can support if not
+ * limited by other bandwidth clients.
+ * * %-EOPNOTSUPP - If the tunnel does not support this function.
+ * * %-ENOTCONN - If the tunnel is not active.
*/
int tb_tunnel_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
int *max_down)
{
if (!tb_tunnel_is_active(tunnel))
- return -EINVAL;
+ return -ENOTCONN;
if (tunnel->maximum_bandwidth)
return tunnel->maximum_bandwidth(tunnel, max_up, max_down);
@@ -2321,14 +2506,18 @@ int tb_tunnel_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
* @allocated_down: Currently allocated downstream bandwidth in Mb/s is
* stored here
*
- * Returns the bandwidth allocated for the tunnel. This may be higher
- * than what the tunnel actually consumes.
+ * Return:
+ * * Bandwidth allocated for the tunnel. This may be higher than what the
+ * tunnel actually consumes.
+ * * %-EOPNOTSUPP - If the tunnel does not support this function.
+ * * %-ENOTCONN - If the tunnel is not active.
+ * * Negative errno - Another error occurred.
*/
int tb_tunnel_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
int *allocated_down)
{
if (!tb_tunnel_is_active(tunnel))
- return -EINVAL;
+ return -ENOTCONN;
if (tunnel->allocated_bandwidth)
return tunnel->allocated_bandwidth(tunnel, allocated_up,
@@ -2342,19 +2531,29 @@ int tb_tunnel_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
* @alloc_up: New upstream bandwidth in Mb/s
* @alloc_down: New downstream bandwidth in Mb/s
*
- * Tries to change tunnel bandwidth allocation. If succeeds returns %0
- * and updates @alloc_up and @alloc_down to that was actually allocated
- * (it may not be the same as passed originally). Returns negative errno
- * in case of failure.
+ * Tries to change tunnel bandwidth allocation.
+ *
+ * Return:
+ * * %0 - On success. Updates @alloc_up and @alloc_down to values that were
+ * actually allocated (it may not be the same as passed originally).
+ * * Negative errno - In case of failure.
*/
int tb_tunnel_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
int *alloc_down)
{
if (!tb_tunnel_is_active(tunnel))
- return -EINVAL;
+ return -ENOTCONN;
- if (tunnel->alloc_bandwidth)
- return tunnel->alloc_bandwidth(tunnel, alloc_up, alloc_down);
+ if (tunnel->alloc_bandwidth) {
+ int ret;
+
+ ret = tunnel->alloc_bandwidth(tunnel, alloc_up, alloc_down);
+ if (ret)
+ return ret;
+
+ tb_tunnel_changed(tunnel);
+ return 0;
+ }
return -EOPNOTSUPP;
}
@@ -2368,34 +2567,36 @@ int tb_tunnel_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
* Can be %NULL.
*
* Stores the amount of isochronous bandwidth @tunnel consumes in
- * @consumed_up and @consumed_down. In case of success returns %0,
- * negative errno otherwise.
+ * @consumed_up and @consumed_down.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
int *consumed_down)
{
int up_bw = 0, down_bw = 0;
- if (!tb_tunnel_is_active(tunnel))
- goto out;
-
- if (tunnel->consumed_bandwidth) {
+ /*
+ * Here we need to distinguish between not active tunnel from
+ * tunnels that are either fully active or activation started.
+ * The latter is true for DP tunnels where we must report the
+ * consumed to be the maximum we gave it until DPRX capabilities
+ * read is done by the graphics driver.
+ */
+ if (tb_tunnel_is_activated(tunnel) && tunnel->consumed_bandwidth) {
int ret;
ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw);
if (ret)
return ret;
-
- tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw,
- down_bw);
}
-out:
if (consumed_up)
*consumed_up = up_bw;
if (consumed_down)
*consumed_down = down_bw;
+ tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw, down_bw);
return 0;
}
@@ -2404,14 +2605,14 @@ out:
* @tunnel: Tunnel whose unused bandwidth to release
*
* If tunnel supports dynamic bandwidth management (USB3 tunnels at the
- * moment) this function makes it to release all the unused bandwidth.
+ * moment) this function makes it release all the unused bandwidth.
*
- * Returns %0 in case of success and negative errno otherwise.
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel)
{
if (!tb_tunnel_is_active(tunnel))
- return 0;
+ return -ENOTCONN;
if (tunnel->release_unused_bandwidth) {
int ret;
diff --git a/drivers/thunderbolt/tunnel.h b/drivers/thunderbolt/tunnel.h
index 1a27ccd08b86..2c44fc8a10bc 100644
--- a/drivers/thunderbolt/tunnel.h
+++ b/drivers/thunderbolt/tunnel.h
@@ -19,16 +19,33 @@ enum tb_tunnel_type {
};
/**
+ * enum tb_tunnel_state - State of a tunnel
+ * @TB_TUNNEL_INACTIVE: tb_tunnel_activate() is not called for the tunnel
+ * @TB_TUNNEL_ACTIVATING: tb_tunnel_activate() returned successfully for the tunnel
+ * @TB_TUNNEL_ACTIVE: The tunnel is fully active
+ */
+enum tb_tunnel_state {
+ TB_TUNNEL_INACTIVE,
+ TB_TUNNEL_ACTIVATING,
+ TB_TUNNEL_ACTIVE,
+};
+
+/**
* struct tb_tunnel - Tunnel between two ports
+ * @kref: Reference count
* @tb: Pointer to the domain
* @src_port: Source port of the tunnel
* @dst_port: Destination port of the tunnel. For discovered incomplete
* tunnels may be %NULL or null adapter port instead.
* @paths: All paths required by the tunnel
* @npaths: Number of paths in @paths
- * @init: Optional tunnel specific initialization
- * @deinit: Optional tunnel specific de-initialization
+ * @pre_activate: Optional tunnel specific initialization called before
+ * activation. Can touch hardware.
* @activate: Optional tunnel specific activation/deactivation
+ * @post_deactivate: Optional tunnel specific de-initialization called
+ * after deactivation. Can touch hardware.
+ * @destroy: Optional tunnel specific callback called when the tunnel
+ * memory is being released. Should not touch hardware.
* @maximum_bandwidth: Returns maximum possible bandwidth for this tunnel
* @allocated_bandwidth: Return how much bandwidth is allocated for the tunnel
* @alloc_bandwidth: Change tunnel bandwidth allocation
@@ -37,6 +54,7 @@ enum tb_tunnel_type {
* @reclaim_available_bandwidth: Reclaim back available bandwidth
* @list: Tunnels are linked using this field
* @type: Type of the tunnel
+ * @state: Current state of the tunnel
* @max_up: Maximum upstream bandwidth (Mb/s) available for the tunnel.
* Only set if the bandwidth needs to be limited.
* @max_down: Maximum downstream bandwidth (Mb/s) available for the tunnel.
@@ -45,16 +63,24 @@ enum tb_tunnel_type {
* @allocated_down: Allocated downstream bandwidth (only for USB3)
* @bw_mode: DP bandwidth allocation mode registers can be used to
* determine consumed and allocated bandwidth
+ * @dprx_started: DPRX negotiation was started (tb_dp_dprx_start() was called for it)
+ * @dprx_canceled: Was DPRX capabilities read poll canceled
+ * @dprx_timeout: If set DPRX capabilities read poll work will timeout after this passes
+ * @dprx_work: Worker that is scheduled to poll completion of DPRX capabilities read
+ * @callback: Optional callback called when DP tunnel is fully activated
+ * @callback_data: Optional data for @callback
*/
struct tb_tunnel {
+ struct kref kref;
struct tb *tb;
struct tb_port *src_port;
struct tb_port *dst_port;
struct tb_path **paths;
size_t npaths;
- int (*init)(struct tb_tunnel *tunnel);
- void (*deinit)(struct tb_tunnel *tunnel);
+ int (*pre_activate)(struct tb_tunnel *tunnel);
int (*activate)(struct tb_tunnel *tunnel, bool activate);
+ void (*post_deactivate)(struct tb_tunnel *tunnel);
+ void (*destroy)(struct tb_tunnel *tunnel);
int (*maximum_bandwidth)(struct tb_tunnel *tunnel, int *max_up,
int *max_down);
int (*allocated_bandwidth)(struct tb_tunnel *tunnel, int *allocated_up,
@@ -69,11 +95,18 @@ struct tb_tunnel {
int *available_down);
struct list_head list;
enum tb_tunnel_type type;
+ enum tb_tunnel_state state;
int max_up;
int max_down;
int allocated_up;
int allocated_down;
bool bw_mode;
+ bool dprx_started;
+ bool dprx_canceled;
+ ktime_t dprx_timeout;
+ struct delayed_work dprx_work;
+ void (*callback)(struct tb_tunnel *tunnel, void *data);
+ void *callback_data;
};
struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down,
@@ -86,7 +119,9 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
bool alloc_hopid);
struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
struct tb_port *out, int link_nr,
- int max_up, int max_down);
+ int max_up, int max_down,
+ void (*callback)(struct tb_tunnel *, void *),
+ void *callback_data);
struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
struct tb_port *dst, int transmit_path,
int transmit_ring, int receive_path,
@@ -99,10 +134,25 @@ struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
struct tb_port *down, int max_up,
int max_down);
-void tb_tunnel_free(struct tb_tunnel *tunnel);
+void tb_tunnel_put(struct tb_tunnel *tunnel);
int tb_tunnel_activate(struct tb_tunnel *tunnel);
-int tb_tunnel_restart(struct tb_tunnel *tunnel);
void tb_tunnel_deactivate(struct tb_tunnel *tunnel);
+
+/**
+ * tb_tunnel_is_active() - Is tunnel fully activated
+ * @tunnel: Tunnel to check
+ *
+ * Return: %true if @tunnel is fully activated.
+ *
+ * Note for DP tunnels this returns %true only once the DPRX capabilities
+ * read has been issued successfully. For other tunnels, this function
+ * returns %true pretty much once tb_tunnel_activate() returns successfully.
+ */
+static inline bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
+{
+ return tunnel->state == TB_TUNNEL_ACTIVE;
+}
+
bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel);
bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
const struct tb_port *port);
@@ -145,6 +195,29 @@ static inline bool tb_tunnel_direction_downstream(const struct tb_tunnel *tunnel
tunnel->dst_port);
}
+/**
+ * enum tb_tunnel_event - Tunnel related events
+ * @TB_TUNNEL_ACTIVATED: A tunnel was activated
+ * @TB_TUNNEL_CHANGED: There is a tunneling change in the domain. Includes
+ * full %TUNNEL_DETAILS if the tunnel in question is known
+ * (ICM does not provide that information).
+ * @TB_TUNNEL_DEACTIVATED: A tunnel was torn down
+ * @TB_TUNNEL_LOW_BANDWIDTH: Tunnel bandwidth is not optimal
+ * @TB_TUNNEL_NO_BANDWIDTH: There is not enough bandwidth for a tunnel
+ */
+enum tb_tunnel_event {
+ TB_TUNNEL_ACTIVATED,
+ TB_TUNNEL_CHANGED,
+ TB_TUNNEL_DEACTIVATED,
+ TB_TUNNEL_LOW_BANDWIDTH,
+ TB_TUNNEL_NO_BANDWIDTH,
+};
+
+void tb_tunnel_event(struct tb *tb, enum tb_tunnel_event event,
+ enum tb_tunnel_type type,
+ const struct tb_port *src_port,
+ const struct tb_port *dst_port);
+
const char *tb_tunnel_type_name(const struct tb_tunnel *tunnel);
#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \
diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
index de480bf2a53d..9e810b2ae0b5 100644
--- a/drivers/thunderbolt/usb4.c
+++ b/drivers/thunderbolt/usb4.c
@@ -9,6 +9,7 @@
#include <linux/delay.h>
#include <linux/ktime.h>
+#include <linux/string_choices.h>
#include <linux/units.h>
#include "sb_regs.h"
@@ -17,12 +18,6 @@
#define USB4_DATA_RETRIES 3
#define USB4_DATA_DWORDS 16
-enum usb4_sb_target {
- USB4_SB_TARGET_ROUTER,
- USB4_SB_TARGET_PARTNER,
- USB4_SB_TARGET_RETIMER,
-};
-
#define USB4_NVM_READ_OFFSET_MASK GENMASK(23, 2)
#define USB4_NVM_READ_OFFSET_SHIFT 2
#define USB4_NVM_READ_LENGTH_MASK GENMASK(27, 24)
@@ -54,7 +49,7 @@ enum usb4_ba_index {
/* Delays in us used with usb4_port_wait_for_bit() */
#define USB4_PORT_DELAY 50
-#define USB4_PORT_SB_DELAY 5000
+#define USB4_PORT_SB_DELAY 1000
static int usb4_native_switch_op(struct tb_switch *sw, u16 opcode,
u32 *metadata, u8 *status,
@@ -178,8 +173,8 @@ void usb4_switch_check_wakes(struct tb_switch *sw)
return;
tb_sw_dbg(sw, "PCIe wake: %s, USB3 wake: %s\n",
- (val & ROUTER_CS_6_WOPS) ? "yes" : "no",
- (val & ROUTER_CS_6_WOUS) ? "yes" : "no");
+ str_yes_no(val & ROUTER_CS_6_WOPS),
+ str_yes_no(val & ROUTER_CS_6_WOUS));
wakeup = val & (ROUTER_CS_6_WOPS | ROUTER_CS_6_WOUS);
}
@@ -197,9 +192,9 @@ void usb4_switch_check_wakes(struct tb_switch *sw)
break;
tb_port_dbg(port, "USB4 wake: %s, connection wake: %s, disconnection wake: %s\n",
- (val & PORT_CS_18_WOU4S) ? "yes" : "no",
- (val & PORT_CS_18_WOCS) ? "yes" : "no",
- (val & PORT_CS_18_WODS) ? "yes" : "no");
+ str_yes_no(val & PORT_CS_18_WOU4S),
+ str_yes_no(val & PORT_CS_18_WOCS),
+ str_yes_no(val & PORT_CS_18_WODS));
wakeup_usb4 = val & (PORT_CS_18_WOU4S | PORT_CS_18_WOCS |
PORT_CS_18_WODS);
@@ -242,6 +237,8 @@ static bool link_is_usb4(struct tb_port *port)
*
* This does not set the configuration valid bit of the router. To do
* that call usb4_switch_configuration_valid().
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int usb4_switch_setup(struct tb_switch *sw)
{
@@ -266,7 +263,7 @@ int usb4_switch_setup(struct tb_switch *sw)
tbt3 = !(val & ROUTER_CS_6_TNS);
tb_sw_dbg(sw, "TBT3 support: %s, xHCI: %s\n",
- tbt3 ? "yes" : "no", xhci ? "yes" : "no");
+ str_yes_no(tbt3), str_yes_no(xhci));
ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
if (ret)
@@ -287,7 +284,7 @@ int usb4_switch_setup(struct tb_switch *sw)
val |= ROUTER_CS_5_PTO;
/*
* xHCI can be enabled if PCIe tunneling is supported
- * and the parent does not have any USB3 dowstream
+ * and the parent does not have any USB3 downstream
* adapters (so we cannot do USB 3.x tunneling).
*/
if (xhci)
@@ -309,7 +306,7 @@ int usb4_switch_setup(struct tb_switch *sw)
* usb4_switch_setup() has been called. Can be called to host and device
* routers (does nothing for the latter).
*
- * Returns %0 in success and negative errno otherwise.
+ * Return: %0 on success, negative errno otherwise.
*/
int usb4_switch_configuration_valid(struct tb_switch *sw)
{
@@ -339,6 +336,8 @@ int usb4_switch_configuration_valid(struct tb_switch *sw)
* @uid: UID is stored here
*
* Reads 64-bit UID from USB4 router config space.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid)
{
@@ -376,6 +375,8 @@ static int usb4_switch_drom_read_block(void *data,
* Uses USB4 router operations to read router DROM. For devices this
* should always work but for hosts it may return %-EOPNOTSUPP in which
* case the host router does not have DROM.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
size_t size)
@@ -390,6 +391,8 @@ int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
*
* Checks whether conditions are met so that lane bonding can be
* established with the upstream router. Call only for device routers.
+ *
+ * Return: %true if lane bonding is possible, %false otherwise.
*/
bool usb4_switch_lane_bonding_possible(struct tb_switch *sw)
{
@@ -409,12 +412,14 @@ bool usb4_switch_lane_bonding_possible(struct tb_switch *sw)
* usb4_switch_set_wake() - Enabled/disable wake
* @sw: USB4 router
* @flags: Wakeup flags (%0 to disable)
+ * @runtime: Wake is being programmed during system runtime
*
* Enables/disables router to wake up from sleep.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
-int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags)
+int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags, bool runtime)
{
- struct usb4_port *usb4;
struct tb_port *port;
u64 route = tb_route(sw);
u32 val;
@@ -444,13 +449,11 @@ int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags)
val |= PORT_CS_19_WOU4;
} else {
bool configured = val & PORT_CS_19_PC;
- usb4 = port->usb4;
+ bool wakeup = runtime || device_may_wakeup(&port->usb4->dev);
- if (((flags & TB_WAKE_ON_CONNECT) |
- device_may_wakeup(&usb4->dev)) && !configured)
+ if ((flags & TB_WAKE_ON_CONNECT) && wakeup && !configured)
val |= PORT_CS_19_WOC;
- if (((flags & TB_WAKE_ON_DISCONNECT) |
- device_may_wakeup(&usb4->dev)) && configured)
+ if ((flags & TB_WAKE_ON_DISCONNECT) && wakeup && configured)
val |= PORT_CS_19_WOD;
if ((flags & TB_WAKE_ON_USB4) && configured)
val |= PORT_CS_19_WOU4;
@@ -491,8 +494,10 @@ int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags)
* usb4_switch_set_sleep() - Prepare the router to enter sleep
* @sw: USB4 router
*
- * Sets sleep bit for the router. Returns when the router sleep ready
+ * Sets sleep bit for the router and waits until router sleep ready
* bit has been asserted.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int usb4_switch_set_sleep(struct tb_switch *sw)
{
@@ -518,9 +523,10 @@ int usb4_switch_set_sleep(struct tb_switch *sw)
* usb4_switch_nvm_sector_size() - Return router NVM sector size
* @sw: USB4 router
*
- * If the router supports NVM operations this function returns the NVM
- * sector size in bytes. If NVM operations are not supported returns
- * %-EOPNOTSUPP.
+ * Return:
+ * * NVM sector size in bytes if router supports NVM operations.
+ * * %-EOPNOTSUPP - If router does not support NVM operations.
+ * * Negative errno - Another error occurred.
*/
int usb4_switch_nvm_sector_size(struct tb_switch *sw)
{
@@ -567,8 +573,12 @@ static int usb4_switch_nvm_read_block(void *data,
* @buf: Read data is placed here
* @size: How many bytes to read
*
- * Reads NVM contents of the router. If NVM is not supported returns
- * %-EOPNOTSUPP.
+ * Reads NVM contents of the router.
+ *
+ * Return:
+ * * %0 - Read completed successfully.
+ * * %-EOPNOTSUPP - NVM not supported.
+ * * Negative errno - Another error occurred.
*/
int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
size_t size)
@@ -585,7 +595,7 @@ int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
* Explicitly sets NVM write offset. Normally when writing to NVM this
* is done automatically by usb4_switch_nvm_write().
*
- * Returns %0 in success and negative errno if there was a failure.
+ * Return: %0 on success, negative errno otherwise.
*/
int usb4_switch_nvm_set_offset(struct tb_switch *sw, unsigned int address)
{
@@ -627,8 +637,12 @@ static int usb4_switch_nvm_write_next_block(void *data, unsigned int dwaddress,
* @buf: Pointer to the data to write
* @size: Size of @buf in bytes
*
- * Writes @buf to the router NVM using USB4 router operations. If NVM
- * write is not supported returns %-EOPNOTSUPP.
+ * Writes @buf to the router NVM using USB4 router operations.
+ *
+ * Return:
+ * * %0 - Write completed successfully.
+ * * %-EOPNOTSUPP - NVM write not supported.
+ * * Negative errno - Another error occurred.
*/
int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address,
const void *buf, size_t size)
@@ -650,11 +664,13 @@ int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address,
* After the new NVM has been written via usb4_switch_nvm_write(), this
* function triggers NVM authentication process. The router gets power
* cycled and if the authentication is successful the new NVM starts
- * running. In case of failure returns negative errno.
+ * running.
*
* The caller should call usb4_switch_nvm_authenticate_status() to read
* the status of the authentication after power cycle. It should be the
* first router operation to avoid the status being lost.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int usb4_switch_nvm_authenticate(struct tb_switch *sw)
{
@@ -682,11 +698,13 @@ int usb4_switch_nvm_authenticate(struct tb_switch *sw)
* @status: Status code of the operation
*
* The function checks if there is status available from the last NVM
- * authenticate router operation. If there is status then %0 is returned
- * and the status code is placed in @status. Returns negative errno in case
- * of failure.
+ * authenticate router operation.
*
* Must be called before any other router operation.
+ *
+ * Return:
+ * * %0 - If there is status. Status code is placed in @status.
+ * * Negative errno - Failure occurred.
*/
int usb4_switch_nvm_authenticate_status(struct tb_switch *sw, u32 *status)
{
@@ -730,7 +748,7 @@ int usb4_switch_nvm_authenticate_status(struct tb_switch *sw, u32 *status)
* allocation fields accordingly. Specifically @sw->credits_allocation
* is set to %true if these parameters can be used in tunneling.
*
- * Returns %0 on success and negative errno otherwise.
+ * Return: %0 on success, negative errno otherwise.
*/
int usb4_switch_credits_init(struct tb_switch *sw)
{
@@ -869,8 +887,10 @@ err_invalid:
* @in: DP IN adapter
*
* For DP tunneling this function can be used to query availability of
- * DP IN resource. Returns true if the resource is available for DP
- * tunneling, false otherwise.
+ * DP IN resource.
+ *
+ * Return: %true if the resource is available for DP tunneling, %false
+ * otherwise.
*/
bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
{
@@ -898,9 +918,12 @@ bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
* @in: DP IN adapter
*
* Allocates DP IN resource for DP tunneling using USB4 router
- * operations. If the resource was allocated returns %0. Otherwise
- * returns negative errno, in particular %-EBUSY if the resource is
- * already allocated.
+ * operations.
+ *
+ * Return:
+ * * %0 - Resource allocated successfully.
+ * * %-EBUSY - Resource is already allocated.
+ * * Negative errno - Other failure occurred.
*/
int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
{
@@ -924,6 +947,8 @@ int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
* @in: DP IN adapter
*
* Releases the previously allocated DP IN resource.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
{
@@ -941,7 +966,15 @@ int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
return status ? -EIO : 0;
}
-static int usb4_port_idx(const struct tb_switch *sw, const struct tb_port *port)
+/**
+ * usb4_port_index() - Finds matching USB4 port index
+ * @sw: USB4 router
+ * @port: USB4 protocol or lane adapter
+ *
+ * Finds matching USB4 port index (starting from %0) that given @port goes
+ * through.
+ */
+int usb4_port_index(const struct tb_switch *sw, const struct tb_port *port)
{
struct tb_port *p;
int usb4_idx = 0;
@@ -971,11 +1004,13 @@ static int usb4_port_idx(const struct tb_switch *sw, const struct tb_port *port)
* downstream adapters where the PCIe topology is extended. This
* function returns the corresponding downstream PCIe adapter or %NULL
* if no such mapping was possible.
+ *
+ * Return: Pointer to &struct tb_port or %NULL if not found.
*/
struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw,
const struct tb_port *port)
{
- int usb4_idx = usb4_port_idx(sw, port);
+ int usb4_idx = usb4_port_index(sw, port);
struct tb_port *p;
int pcie_idx = 0;
@@ -1002,11 +1037,13 @@ struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw,
* downstream adapters where the USB 3.x topology is extended. This
* function returns the corresponding downstream USB 3.x adapter or
* %NULL if no such mapping was possible.
+ *
+ * Return: Pointer to &struct tb_port or %NULL if not found.
*/
struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw,
const struct tb_port *port)
{
- int usb4_idx = usb4_port_idx(sw, port);
+ int usb4_idx = usb4_port_index(sw, port);
struct tb_port *p;
int usb_idx = 0;
@@ -1031,7 +1068,7 @@ struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw,
* For USB4 router finds all USB4 ports and registers devices for each.
* Can be called to any router.
*
- * Return %0 in case of success and negative errno in case of failure.
+ * Return: %0 on success, negative errno otherwise.
*/
int usb4_switch_add_ports(struct tb_switch *sw)
{
@@ -1084,6 +1121,8 @@ void usb4_switch_remove_ports(struct tb_switch *sw)
*
* Unlocks USB4 downstream port so that the connection manager can
* access the router below this port.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int usb4_port_unlock(struct tb_port *port)
{
@@ -1104,6 +1143,8 @@ int usb4_port_unlock(struct tb_port *port)
*
* Enables hot plug events on a given port. This is only intended
* to be used on lane, DP-IN, and DP-OUT adapters.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int usb4_port_hotplug_enable(struct tb_port *port)
{
@@ -1123,6 +1164,8 @@ int usb4_port_hotplug_enable(struct tb_port *port)
* @port: USB4 port to reset
*
* Issues downstream port reset to @port.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int usb4_port_reset(struct tb_port *port)
{
@@ -1184,6 +1227,8 @@ static int usb4_port_set_configured(struct tb_port *port, bool configured)
* @port: USB4 router
*
* Sets the USB4 link to be configured for power management purposes.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int usb4_port_configure(struct tb_port *port)
{
@@ -1195,6 +1240,8 @@ int usb4_port_configure(struct tb_port *port)
* @port: USB4 router
*
* Sets the USB4 link to be unconfigured for power management purposes.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
void usb4_port_unconfigure(struct tb_port *port)
{
@@ -1229,7 +1276,9 @@ static int usb4_set_xdomain_configured(struct tb_port *port, bool configured)
* @xd: XDomain that is connected to the port
*
* Marks the USB4 port as being connected to another host and updates
- * the link type. Returns %0 in success and negative errno in failure.
+ * the link type.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int usb4_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd)
{
@@ -1289,8 +1338,21 @@ static int usb4_port_write_data(struct tb_port *port, const void *data,
dwords);
}
-static int usb4_port_sb_read(struct tb_port *port, enum usb4_sb_target target,
- u8 index, u8 reg, void *buf, u8 size)
+/**
+ * usb4_port_sb_read() - Read from sideband register
+ * @port: USB4 port to read
+ * @target: Sideband target
+ * @index: Retimer index if target is %USB4_SB_TARGET_RETIMER
+ * @reg: Sideband register index
+ * @buf: Buffer where the sideband data is copied
+ * @size: Size of @buf
+ *
+ * Reads data from sideband register @reg and copies it into @buf.
+ *
+ * Return: %0 on success, negative errno otherwise.
+ */
+int usb4_port_sb_read(struct tb_port *port, enum usb4_sb_target target, u8 index,
+ u8 reg, void *buf, u8 size)
{
size_t dwords = DIV_ROUND_UP(size, 4);
int ret;
@@ -1329,8 +1391,21 @@ static int usb4_port_sb_read(struct tb_port *port, enum usb4_sb_target target,
return buf ? usb4_port_read_data(port, buf, dwords) : 0;
}
-static int usb4_port_sb_write(struct tb_port *port, enum usb4_sb_target target,
- u8 index, u8 reg, const void *buf, u8 size)
+/**
+ * usb4_port_sb_write() - Write to sideband register
+ * @port: USB4 port to write
+ * @target: Sideband target
+ * @index: Retimer index if target is %USB4_SB_TARGET_RETIMER
+ * @reg: Sideband register index
+ * @buf: Data to write
+ * @size: Size of @buf
+ *
+ * Writes @buf to sideband register @reg.
+ *
+ * Return: %0 on success, negative errno otherwise.
+ */
+int usb4_port_sb_write(struct tb_port *port, enum usb4_sb_target target,
+ u8 index, u8 reg, const void *buf, u8 size)
{
size_t dwords = DIV_ROUND_UP(size, 4);
int ret;
@@ -1444,8 +1519,7 @@ static int usb4_port_set_router_offline(struct tb_port *port, bool offline)
* port does not react on hotplug events anymore. This needs to be
* called before retimer access is done when the USB4 links is not up.
*
- * Returns %0 in case of success and negative errno if there was an
- * error.
+ * Return: %0 on success, negative errno otherwise.
*/
int usb4_port_router_offline(struct tb_port *port)
{
@@ -1453,10 +1527,12 @@ int usb4_port_router_offline(struct tb_port *port)
}
/**
- * usb4_port_router_online() - Put the USB4 port back to online
+ * usb4_port_router_online() - Put the USB4 port back online
* @port: USB4 port
*
* Makes the USB4 port functional again.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int usb4_port_router_online(struct tb_port *port)
{
@@ -1468,8 +1544,9 @@ int usb4_port_router_online(struct tb_port *port)
* @port: USB4 port
*
* This forces the USB4 port to send broadcast RT transaction which
- * makes the retimers on the link to assign index to themselves. Returns
- * %0 in case of success and negative errno if there was an error.
+ * makes the retimers on the link assign index to themselves.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int usb4_port_enumerate_retimers(struct tb_port *port)
{
@@ -1486,6 +1563,8 @@ int usb4_port_enumerate_retimers(struct tb_port *port)
*
* PORT_CS_18_CPS bit reflects if the link supports CLx including
* active cables (if connected on the link).
+ *
+ * Return: %true if Clx is supported, %false otherwise.
*/
bool usb4_port_clx_supported(struct tb_port *port)
{
@@ -1504,8 +1583,9 @@ bool usb4_port_clx_supported(struct tb_port *port)
* usb4_port_asym_supported() - If the port supports asymmetric link
* @port: USB4 port
*
- * Checks if the port and the cable supports asymmetric link and returns
- * %true in that case.
+ * Checks if the port and the cable support asymmetric link.
+ *
+ * Return: %true if asymmetric link is supported, %false otherwise.
*/
bool usb4_port_asym_supported(struct tb_port *port)
{
@@ -1527,6 +1607,8 @@ bool usb4_port_asym_supported(struct tb_port *port)
*
* Sets USB4 port link width to @width. Can be called for widths where
* usb4_port_asym_width_supported() returned @true.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int usb4_port_asym_set_link_width(struct tb_port *port, enum tb_link_width width)
{
@@ -1571,8 +1653,10 @@ int usb4_port_asym_set_link_width(struct tb_port *port, enum tb_link_width width
* (according to what was previously set in tb_port_set_link_width().
* Wait for completion of the change.
*
- * Returns %0 in case of success, %-ETIMEDOUT if case of timeout or
- * a negative errno in case of a failure.
+ * Return:
+ * * %0 - Symmetry change was successful.
+ * * %-ETIMEDOUT - Timeout occurred.
+ * * Negative errno - Other failure occurred.
*/
int usb4_port_asym_start(struct tb_port *port)
{
@@ -1608,123 +1692,152 @@ int usb4_port_asym_start(struct tb_port *port)
}
/**
- * usb4_port_margining_caps() - Read USB4 port marginig capabilities
+ * usb4_port_margining_caps() - Read USB4 port margining capabilities
* @port: USB4 port
+ * @target: Sideband target
+ * @index: Retimer index if target is %USB4_SB_TARGET_RETIMER
* @caps: Array with at least two elements to hold the results
+ * @ncaps: Number of elements in the caps array
*
* Reads the USB4 port lane margining capabilities into @caps.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
-int usb4_port_margining_caps(struct tb_port *port, u32 *caps)
+int usb4_port_margining_caps(struct tb_port *port, enum usb4_sb_target target,
+ u8 index, u32 *caps, size_t ncaps)
{
int ret;
- ret = usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0,
+ ret = usb4_port_sb_op(port, target, index,
USB4_SB_OPCODE_READ_LANE_MARGINING_CAP, 500);
if (ret)
return ret;
- return usb4_port_sb_read(port, USB4_SB_TARGET_ROUTER, 0,
- USB4_SB_DATA, caps, sizeof(*caps) * 2);
+ return usb4_port_sb_read(port, target, index, USB4_SB_DATA, caps,
+ sizeof(*caps) * ncaps);
}
/**
* usb4_port_hw_margin() - Run hardware lane margining on port
* @port: USB4 port
- * @lanes: Which lanes to run (must match the port capabilities). Can be
- * %0, %1 or %7.
- * @ber_level: BER level contour value
- * @timing: Perform timing margining instead of voltage
- * @right_high: Use Right/high margin instead of left/low
- * @results: Array with at least two elements to hold the results
+ * @target: Sideband target
+ * @index: Retimer index if target is %USB4_SB_TARGET_RETIMER
+ * @params: Parameters for USB4 hardware margining
+ * @results: Array to hold the results
+ * @nresults: Number of elements in the results array
*
* Runs hardware lane margining on USB4 port and returns the result in
* @results.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
-int usb4_port_hw_margin(struct tb_port *port, unsigned int lanes,
- unsigned int ber_level, bool timing, bool right_high,
- u32 *results)
+int usb4_port_hw_margin(struct tb_port *port, enum usb4_sb_target target,
+ u8 index, const struct usb4_port_margining_params *params,
+ u32 *results, size_t nresults)
{
u32 val;
int ret;
- val = lanes;
- if (timing)
- val |= USB4_MARGIN_HW_TIME;
- if (right_high)
- val |= USB4_MARGIN_HW_RH;
- if (ber_level)
- val |= (ber_level << USB4_MARGIN_HW_BER_SHIFT) &
- USB4_MARGIN_HW_BER_MASK;
+ if (WARN_ON_ONCE(!params))
+ return -EINVAL;
- ret = usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
- USB4_SB_METADATA, &val, sizeof(val));
+ val = params->lanes;
+ if (params->time)
+ val |= USB4_MARGIN_HW_TIME;
+ if (params->right_high || params->upper_eye)
+ val |= USB4_MARGIN_HW_RHU;
+ if (params->ber_level)
+ val |= FIELD_PREP(USB4_MARGIN_HW_BER_MASK, params->ber_level);
+ if (params->optional_voltage_offset_range)
+ val |= USB4_MARGIN_HW_OPT_VOLTAGE;
+
+ ret = usb4_port_sb_write(port, target, index, USB4_SB_METADATA, &val,
+ sizeof(val));
if (ret)
return ret;
- ret = usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0,
+ ret = usb4_port_sb_op(port, target, index,
USB4_SB_OPCODE_RUN_HW_LANE_MARGINING, 2500);
if (ret)
return ret;
- return usb4_port_sb_read(port, USB4_SB_TARGET_ROUTER, 0,
- USB4_SB_DATA, results, sizeof(*results) * 2);
+ return usb4_port_sb_read(port, target, index, USB4_SB_DATA, results,
+ sizeof(*results) * nresults);
}
/**
* usb4_port_sw_margin() - Run software lane margining on port
* @port: USB4 port
- * @lanes: Which lanes to run (must match the port capabilities). Can be
- * %0, %1 or %7.
- * @timing: Perform timing margining instead of voltage
- * @right_high: Use Right/high margin instead of left/low
- * @counter: What to do with the error counter
+ * @target: Sideband target
+ * @index: Retimer index if target is %USB4_SB_TARGET_RETIMER
+ * @params: Parameters for USB4 software margining
+ * @results: Data word for the operation completion data
*
* Runs software lane margining on USB4 port. Read back the error
- * counters by calling usb4_port_sw_margin_errors(). Returns %0 in
- * success and negative errno otherwise.
+ * counters by calling usb4_port_sw_margin_errors().
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
-int usb4_port_sw_margin(struct tb_port *port, unsigned int lanes, bool timing,
- bool right_high, u32 counter)
+int usb4_port_sw_margin(struct tb_port *port, enum usb4_sb_target target,
+ u8 index, const struct usb4_port_margining_params *params,
+ u32 *results)
{
u32 val;
int ret;
- val = lanes;
- if (timing)
+ if (WARN_ON_ONCE(!params))
+ return -EINVAL;
+
+ val = params->lanes;
+ if (params->time)
val |= USB4_MARGIN_SW_TIME;
- if (right_high)
+ if (params->optional_voltage_offset_range)
+ val |= USB4_MARGIN_SW_OPT_VOLTAGE;
+ if (params->right_high)
val |= USB4_MARGIN_SW_RH;
- val |= (counter << USB4_MARGIN_SW_COUNTER_SHIFT) &
- USB4_MARGIN_SW_COUNTER_MASK;
+ if (params->upper_eye)
+ val |= USB4_MARGIN_SW_UPPER_EYE;
+ val |= FIELD_PREP(USB4_MARGIN_SW_COUNTER_MASK, params->error_counter);
+ val |= FIELD_PREP(USB4_MARGIN_SW_VT_MASK, params->voltage_time_offset);
- ret = usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
- USB4_SB_METADATA, &val, sizeof(val));
+ ret = usb4_port_sb_write(port, target, index, USB4_SB_METADATA, &val,
+ sizeof(val));
+ if (ret)
+ return ret;
+
+ ret = usb4_port_sb_op(port, target, index,
+ USB4_SB_OPCODE_RUN_SW_LANE_MARGINING, 2500);
if (ret)
return ret;
- return usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0,
- USB4_SB_OPCODE_RUN_SW_LANE_MARGINING, 2500);
+ return usb4_port_sb_read(port, target, index, USB4_SB_DATA, results,
+ sizeof(*results));
+
}
/**
* usb4_port_sw_margin_errors() - Read the software margining error counters
* @port: USB4 port
+ * @target: Sideband target
+ * @index: Retimer index if target is %USB4_SB_TARGET_RETIMER
* @errors: Error metadata is copied here.
*
* This reads back the software margining error counters from the port.
- * Returns %0 in success and negative errno otherwise.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
-int usb4_port_sw_margin_errors(struct tb_port *port, u32 *errors)
+int usb4_port_sw_margin_errors(struct tb_port *port, enum usb4_sb_target target,
+ u8 index, u32 *errors)
{
int ret;
- ret = usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0,
+ ret = usb4_port_sb_op(port, target, index,
USB4_SB_OPCODE_READ_SW_MARGIN_ERR, 150);
if (ret)
return ret;
- return usb4_port_sb_read(port, USB4_SB_TARGET_ROUTER, 0,
- USB4_SB_METADATA, errors, sizeof(*errors));
+ return usb4_port_sb_read(port, target, index, USB4_SB_METADATA, errors,
+ sizeof(*errors));
}
static inline int usb4_port_retimer_op(struct tb_port *port, u8 index,
@@ -1740,8 +1853,10 @@ static inline int usb4_port_retimer_op(struct tb_port *port, u8 index,
* @port: USB4 port
* @index: Retimer index
*
- * Enables sideband channel transations on SBTX. Can be used when USB4
+ * Enables sideband channel transactions on SBTX. Can be used when USB4
* link does not go up, for example if there is no device connected.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index)
{
@@ -1767,8 +1882,10 @@ int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index)
* @port: USB4 port
* @index: Retimer index
*
- * Disables sideband channel transations on SBTX. The reverse of
+ * Disables sideband channel transactions on SBTX. The reverse of
* usb4_port_retimer_set_inbound_sbtx().
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int usb4_port_retimer_unset_inbound_sbtx(struct tb_port *port, u8 index)
{
@@ -1777,68 +1894,55 @@ int usb4_port_retimer_unset_inbound_sbtx(struct tb_port *port, u8 index)
}
/**
- * usb4_port_retimer_read() - Read from retimer sideband registers
+ * usb4_port_retimer_is_last() - Is the retimer last on-board retimer
* @port: USB4 port
* @index: Retimer index
- * @reg: Sideband register to read
- * @buf: Data from @reg is stored here
- * @size: Number of bytes to read
*
- * Function reads retimer sideband registers starting from @reg. The
- * retimer is connected to @port at @index. Returns %0 in case of
- * success, and read data is copied to @buf. If there is no retimer
- * present at given @index returns %-ENODEV. In any other failure
- * returns negative errno.
+ * Return:
+ * * %1 - Retimer at @index is the last one (connected directly to the
+ * Type-C port).
+ * * %0 - Retimer at @index is not the last one.
+ * * %-ENODEV - Retimer is not present.
+ * * Negative errno - Other failure occurred.
*/
-int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf,
- u8 size)
+int usb4_port_retimer_is_last(struct tb_port *port, u8 index)
{
- return usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index, reg, buf,
- size);
-}
+ u32 metadata;
+ int ret;
-/**
- * usb4_port_retimer_write() - Write to retimer sideband registers
- * @port: USB4 port
- * @index: Retimer index
- * @reg: Sideband register to write
- * @buf: Data that is written starting from @reg
- * @size: Number of bytes to write
- *
- * Writes retimer sideband registers starting from @reg. The retimer is
- * connected to @port at @index. Returns %0 in case of success. If there
- * is no retimer present at given @index returns %-ENODEV. In any other
- * failure returns negative errno.
- */
-int usb4_port_retimer_write(struct tb_port *port, u8 index, u8 reg,
- const void *buf, u8 size)
-{
- return usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index, reg, buf,
- size);
+ ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_QUERY_LAST_RETIMER,
+ 500);
+ if (ret)
+ return ret;
+
+ ret = usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index,
+ USB4_SB_METADATA, &metadata, sizeof(metadata));
+ return ret ? ret : metadata & 1;
}
/**
- * usb4_port_retimer_is_last() - Is the retimer last on-board retimer
+ * usb4_port_retimer_is_cable() - Is the retimer cable retimer
* @port: USB4 port
* @index: Retimer index
*
- * If the retimer at @index is last one (connected directly to the
- * Type-C port) this function returns %1. If it is not returns %0. If
- * the retimer is not present returns %-ENODEV. Otherwise returns
- * negative errno.
+ * Return:
+ * * %1 - Retimer at @index is the last cable retimer.
+ * * %0 - Retimer at @index is on-board retimer.
+ * * %-ENODEV - Retimer is not present.
+ * * Negative errno - Other failure occurred.
*/
-int usb4_port_retimer_is_last(struct tb_port *port, u8 index)
+int usb4_port_retimer_is_cable(struct tb_port *port, u8 index)
{
u32 metadata;
int ret;
- ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_QUERY_LAST_RETIMER,
+ ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_QUERY_CABLE_RETIMER,
500);
if (ret)
return ret;
- ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, &metadata,
- sizeof(metadata));
+ ret = usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index,
+ USB4_SB_METADATA, &metadata, sizeof(metadata));
return ret ? ret : metadata & 1;
}
@@ -1849,9 +1953,12 @@ int usb4_port_retimer_is_last(struct tb_port *port, u8 index)
*
* Reads NVM sector size (in bytes) of a retimer at @index. This
* operation can be used to determine whether the retimer supports NVM
- * upgrade for example. Returns sector size in bytes or negative errno
- * in case of error. Specifically returns %-ENODEV if there is no
- * retimer at @index.
+ * upgrade for example.
+ *
+ * Return:
+ * * Sector size in bytes.
+ * * %-ENODEV - If there is no retimer at @index.
+ * * Negative errno - In case of an error.
*/
int usb4_port_retimer_nvm_sector_size(struct tb_port *port, u8 index)
{
@@ -1863,8 +1970,8 @@ int usb4_port_retimer_nvm_sector_size(struct tb_port *port, u8 index)
if (ret)
return ret;
- ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, &metadata,
- sizeof(metadata));
+ ret = usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index,
+ USB4_SB_METADATA, &metadata, sizeof(metadata));
return ret ? ret : metadata & USB4_NVM_SECTOR_SIZE_MASK;
}
@@ -1874,10 +1981,10 @@ int usb4_port_retimer_nvm_sector_size(struct tb_port *port, u8 index)
* @index: Retimer index
* @address: Start offset
*
- * Exlicitly sets NVM write offset. Normally when writing to NVM this is
+ * Explicitly sets NVM write offset. Normally when writing to NVM this is
* done automatically by usb4_port_retimer_nvm_write().
*
- * Returns %0 in success and negative errno if there was a failure.
+ * Return: %0 on success, negative errno otherwise.
*/
int usb4_port_retimer_nvm_set_offset(struct tb_port *port, u8 index,
unsigned int address)
@@ -1889,8 +1996,8 @@ int usb4_port_retimer_nvm_set_offset(struct tb_port *port, u8 index,
metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) &
USB4_NVM_SET_OFFSET_MASK;
- ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata,
- sizeof(metadata));
+ ret = usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index,
+ USB4_SB_METADATA, &metadata, sizeof(metadata));
if (ret)
return ret;
@@ -1912,8 +2019,8 @@ static int usb4_port_retimer_nvm_write_next_block(void *data,
u8 index = info->index;
int ret;
- ret = usb4_port_retimer_write(port, index, USB4_SB_DATA,
- buf, dwords * 4);
+ ret = usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index,
+ USB4_SB_DATA, buf, dwords * 4);
if (ret)
return ret;
@@ -1930,9 +2037,12 @@ static int usb4_port_retimer_nvm_write_next_block(void *data,
* @size: Size in bytes how much to write
*
* Writes @size bytes from @buf to the retimer NVM. Used for NVM
- * upgrade. Returns %0 if the data was written successfully and negative
- * errno in case of failure. Specifically returns %-ENODEV if there is
- * no retimer at @index.
+ * upgrade.
+ *
+ * Return:
+ * * %0 - If the data was written successfully.
+ * * %-ENODEV - If there is no retimer at @index.
+ * * Negative errno - In case of an error.
*/
int usb4_port_retimer_nvm_write(struct tb_port *port, u8 index, unsigned int address,
const void *buf, size_t size)
@@ -1958,6 +2068,8 @@ int usb4_port_retimer_nvm_write(struct tb_port *port, u8 index, unsigned int add
* successful the retimer restarts with the new NVM and may not have the
* index set so one needs to call usb4_port_enumerate_retimers() to
* force index to be assigned.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int usb4_port_retimer_nvm_authenticate(struct tb_port *port, u8 index)
{
@@ -1982,9 +2094,9 @@ int usb4_port_retimer_nvm_authenticate(struct tb_port *port, u8 index)
* This can be called after usb4_port_retimer_nvm_authenticate() and
* usb4_port_enumerate_retimers() to fetch status of the NVM upgrade.
*
- * Returns %0 if the authentication status was successfully read. The
+ * Return: %0 if the authentication status was successfully read. The
* completion metadata (the result) is then stored into @status. If
- * reading the status fails, returns negative errno.
+ * status read fails, returns negative errno.
*/
int usb4_port_retimer_nvm_authenticate_status(struct tb_port *port, u8 index,
u32 *status)
@@ -1992,8 +2104,8 @@ int usb4_port_retimer_nvm_authenticate_status(struct tb_port *port, u8 index,
u32 metadata, val;
int ret;
- ret = usb4_port_retimer_read(port, index, USB4_SB_OPCODE, &val,
- sizeof(val));
+ ret = usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index,
+ USB4_SB_OPCODE, &val, sizeof(val));
if (ret)
return ret;
@@ -2004,8 +2116,9 @@ int usb4_port_retimer_nvm_authenticate_status(struct tb_port *port, u8 index,
return 0;
case -EAGAIN:
- ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA,
- &metadata, sizeof(metadata));
+ ret = usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index,
+ USB4_SB_METADATA, &metadata,
+ sizeof(metadata));
if (ret)
return ret;
@@ -2030,8 +2143,8 @@ static int usb4_port_retimer_nvm_read_block(void *data, unsigned int dwaddress,
if (dwords < USB4_DATA_DWORDS)
metadata |= dwords << USB4_NVM_READ_LENGTH_SHIFT;
- ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata,
- sizeof(metadata));
+ ret = usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index,
+ USB4_SB_METADATA, &metadata, sizeof(metadata));
if (ret)
return ret;
@@ -2039,8 +2152,8 @@ static int usb4_port_retimer_nvm_read_block(void *data, unsigned int dwaddress,
if (ret)
return ret;
- return usb4_port_retimer_read(port, index, USB4_SB_DATA, buf,
- dwords * 4);
+ return usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index,
+ USB4_SB_DATA, buf, dwords * 4);
}
/**
@@ -2051,9 +2164,12 @@ static int usb4_port_retimer_nvm_read_block(void *data, unsigned int dwaddress,
* @buf: Data read from NVM is stored here
* @size: Number of bytes to read
*
- * Reads retimer NVM and copies the contents to @buf. Returns %0 if the
- * read was successful and negative errno in case of failure.
- * Specifically returns %-ENODEV if there is no retimer at @index.
+ * Reads retimer NVM and copies the contents to @buf.
+ *
+ * Return:
+ * * %0 - If the read was successful.
+ * * %-ENODEV - If there is no retimer at @index.
+ * * Negative errno - In case of an error.
*/
int usb4_port_retimer_nvm_read(struct tb_port *port, u8 index,
unsigned int address, void *buf, size_t size)
@@ -2074,11 +2190,11 @@ usb4_usb3_port_max_bandwidth(const struct tb_port *port, unsigned int bw)
}
/**
- * usb4_usb3_port_max_link_rate() - Maximum support USB3 link rate
+ * usb4_usb3_port_max_link_rate() - Maximum supported USB3 link rate
* @port: USB3 adapter port
*
- * Return maximum supported link rate of a USB3 adapter in Mb/s.
- * Negative errno in case of error.
+ * Return: Maximum supported link rate of a USB3 adapter in Mb/s.
+ * Negative errno in case of an error.
*/
int usb4_usb3_port_max_link_rate(struct tb_port *port)
{
@@ -2196,8 +2312,9 @@ static int usb4_usb3_port_read_allocated_bandwidth(struct tb_port *port,
* @downstream_bw: Allocated downstream bandwidth is stored here
*
* Stores currently allocated USB3 bandwidth into @upstream_bw and
- * @downstream_bw in Mb/s. Returns %0 in case of success and negative
- * errno in failure.
+ * @downstream_bw in Mb/s.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int usb4_usb3_port_allocated_bandwidth(struct tb_port *port, int *upstream_bw,
int *downstream_bw)
@@ -2299,8 +2416,7 @@ static int usb4_usb3_port_write_allocated_bandwidth(struct tb_port *port,
* cannot be taken away by CM). The actual new values are returned in
* @upstream_bw and @downstream_bw.
*
- * Returns %0 in case of success and negative errno if there was a
- * failure.
+ * Return: %0 on success, negative errno otherwise.
*/
int usb4_usb3_port_allocate_bandwidth(struct tb_port *port, int *upstream_bw,
int *downstream_bw)
@@ -2342,7 +2458,7 @@ err_request:
* Releases USB3 allocated bandwidth down to what is actually consumed.
* The new bandwidth is returned in @upstream_bw and @downstream_bw.
*
- * Returns 0% in success and negative errno in case of failure.
+ * Return: %0 on success, negative errno otherwise.
*/
int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw,
int *downstream_bw)
@@ -2394,9 +2510,12 @@ static bool is_usb4_dpin(const struct tb_port *port)
* @port: DP IN adapter
* @cm_id: CM ID to assign
*
- * Sets CM ID for the @port. Returns %0 on success and negative errno
- * otherwise. Speficially returns %-EOPNOTSUPP if the @port does not
- * support this.
+ * Sets CM ID for the @port.
+ *
+ * Return:
+ * * %0 - On success.
+ * * %-EOPNOTSUPP - If the @port does not support this.
+ * * Negative errno - Another error occurred.
*/
int usb4_dp_port_set_cm_id(struct tb_port *port, int cm_id)
{
@@ -2423,8 +2542,10 @@ int usb4_dp_port_set_cm_id(struct tb_port *port, int cm_id)
* supported
* @port: DP IN adapter to check
*
- * Can be called to any DP IN adapter. Returns true if the adapter
- * supports USB4 bandwidth allocation mode, false otherwise.
+ * Can be called to any DP IN adapter.
+ *
+ * Return: %true if the adapter supports USB4 bandwidth allocation mode,
+ * %false otherwise.
*/
bool usb4_dp_port_bandwidth_mode_supported(struct tb_port *port)
{
@@ -2447,8 +2568,10 @@ bool usb4_dp_port_bandwidth_mode_supported(struct tb_port *port)
* enabled
* @port: DP IN adapter to check
*
- * Can be called to any DP IN adapter. Returns true if the bandwidth
- * allocation mode has been enabled, false otherwise.
+ * Can be called to any DP IN adapter.
+ *
+ * Return: %true if the bandwidth allocation mode has been enabled,
+ * %false otherwise.
*/
bool usb4_dp_port_bandwidth_mode_enabled(struct tb_port *port)
{
@@ -2473,9 +2596,12 @@ bool usb4_dp_port_bandwidth_mode_enabled(struct tb_port *port)
* @supported: Does the CM support bandwidth allocation mode
*
* Can be called to any DP IN adapter. Sets or clears the CM support bit
- * of the DP IN adapter. Returns %0 in success and negative errno
- * otherwise. Specifically returns %-OPNOTSUPP if the passed in adapter
- * does not support this.
+ * of the DP IN adapter.
+ *
+ * * Return:
+ * * %0 - On success.
+ * * %-EOPNOTSUPP - If the passed IN adapter does not support this.
+ * * Negative errno - Another error occurred.
*/
int usb4_dp_port_set_cm_bandwidth_mode_supported(struct tb_port *port,
bool supported)
@@ -2505,8 +2631,12 @@ int usb4_dp_port_set_cm_bandwidth_mode_supported(struct tb_port *port,
* @port: DP IN adapter
*
* Reads bandwidth allocation Group ID from the DP IN adapter and
- * returns it. If the adapter does not support setting Group_ID
- * %-EOPNOTSUPP is returned.
+ * returns it.
+ *
+ * Return:
+ * * Group ID assigned to adapter @port.
+ * * %-EOPNOTSUPP - If adapter does not support setting GROUP_ID.
+ * * Negative errno - Another error occurred.
*/
int usb4_dp_port_group_id(struct tb_port *port)
{
@@ -2530,9 +2660,11 @@ int usb4_dp_port_group_id(struct tb_port *port)
* @group_id: Group ID for the adapter
*
* Sets bandwidth allocation mode Group ID for the DP IN adapter.
- * Returns %0 in case of success and negative errno otherwise.
- * Specifically returns %-EOPNOTSUPP if the adapter does not support
- * this.
+ *
+ * Return:
+ * * %0 - On success.
+ * * %-EOPNOTSUPP - If the adapter does not support this.
+ * * Negative errno - Another error occurred.
*/
int usb4_dp_port_set_group_id(struct tb_port *port, int group_id)
{
@@ -2560,9 +2692,12 @@ int usb4_dp_port_set_group_id(struct tb_port *port, int group_id)
* @rate: Non-reduced rate in Mb/s is placed here
* @lanes: Non-reduced lanes are placed here
*
- * Reads the non-reduced rate and lanes from the DP IN adapter. Returns
- * %0 in success and negative errno otherwise. Specifically returns
- * %-EOPNOTSUPP if the adapter does not support this.
+ * Reads the non-reduced rate and lanes from the DP IN adapter.
+ *
+ * Return:
+ * * %0 - On success.
+ * * %-EOPNOTSUPP - If the adapter does not support this.
+ * * Negative errno - Another error occurred.
*/
int usb4_dp_port_nrd(struct tb_port *port, int *rate, int *lanes)
{
@@ -2615,10 +2750,13 @@ int usb4_dp_port_nrd(struct tb_port *port, int *rate, int *lanes)
* @rate: Non-reduced rate in Mb/s
* @lanes: Non-reduced lanes
*
- * Before the capabilities reduction this function can be used to set
- * the non-reduced values for the DP IN adapter. Returns %0 in success
- * and negative errno otherwise. If the adapter does not support this
- * %-EOPNOTSUPP is returned.
+ * Before the capabilities reduction, this function can be used to set
+ * the non-reduced values for the DP IN adapter.
+ *
+ * Return:
+ * * %0 - On success.
+ * * %-EOPNOTSUPP - If the adapter does not support this.
+ * * Negative errno - Another error occurred.
*/
int usb4_dp_port_set_nrd(struct tb_port *port, int rate, int lanes)
{
@@ -2677,9 +2815,13 @@ int usb4_dp_port_set_nrd(struct tb_port *port, int rate, int lanes)
* usb4_dp_port_granularity() - Return granularity for the bandwidth values
* @port: DP IN adapter
*
- * Reads the programmed granularity from @port. If the DP IN adapter does
- * not support bandwidth allocation mode returns %-EOPNOTSUPP and negative
- * errno in other error cases.
+ * Reads the programmed granularity from @port.
+ *
+ * Return:
+ * * Granularity value of a @port.
+ * * %-EOPNOTSUPP - If the DP IN adapter does not support bandwidth
+ * allocation mode.
+ * * Negative errno - Another error occurred.
*/
int usb4_dp_port_granularity(struct tb_port *port)
{
@@ -2715,8 +2857,12 @@ int usb4_dp_port_granularity(struct tb_port *port)
* @granularity: Granularity in Mb/s. Supported values: 1000, 500 and 250.
*
* Sets the granularity used with the estimated, allocated and requested
- * bandwidth. Returns %0 in success and negative errno otherwise. If the
- * adapter does not support this %-EOPNOTSUPP is returned.
+ * bandwidth.
+ *
+ * Return:
+ * * %0 - On success.
+ * * %-EOPNOTSUPP - If the adapter does not support this.
+ * * Negative errno - Another error occurred.
*/
int usb4_dp_port_set_granularity(struct tb_port *port, int granularity)
{
@@ -2757,10 +2903,13 @@ int usb4_dp_port_set_granularity(struct tb_port *port, int granularity)
* @bw: Estimated bandwidth in Mb/s.
*
* Sets the estimated bandwidth to @bw. Set the granularity by calling
- * usb4_dp_port_set_granularity() before calling this. The @bw is round
- * down to the closest granularity multiplier. Returns %0 in success
- * and negative errno otherwise. Specifically returns %-EOPNOTSUPP if
- * the adapter does not support this.
+ * usb4_dp_port_set_granularity() before calling this. The @bw is rounded
+ * down to the closest granularity multiplier.
+ *
+ * Return:
+ * * %0 - On success.
+ * * %-EOPNOTSUPP - If the adapter does not support this.
+ * * Negative errno - Another error occurred.
*/
int usb4_dp_port_set_estimated_bandwidth(struct tb_port *port, int bw)
{
@@ -2791,9 +2940,10 @@ int usb4_dp_port_set_estimated_bandwidth(struct tb_port *port, int bw)
* usb4_dp_port_allocated_bandwidth() - Return allocated bandwidth
* @port: DP IN adapter
*
- * Reads and returns allocated bandwidth for @port in Mb/s (taking into
- * account the programmed granularity). Returns negative errno in case
- * of error.
+ * Reads the allocated bandwidth for @port in Mb/s (taking into account
+ * the programmed granularity).
+ *
+ * Return: Allocated bandwidth in Mb/s or negative errno in case of an error.
*/
int usb4_dp_port_allocated_bandwidth(struct tb_port *port)
{
@@ -2888,8 +3038,9 @@ static int usb4_dp_port_wait_and_clear_cm_ack(struct tb_port *port,
* @bw: New allocated bandwidth in Mb/s
*
* Communicates the new allocated bandwidth with the DPCD (graphics
- * driver). Takes into account the programmed granularity. Returns %0 in
- * success and negative errno in case of error.
+ * driver). Takes into account the programmed granularity.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int usb4_dp_port_allocate_bandwidth(struct tb_port *port, int bw)
{
@@ -2929,10 +3080,15 @@ int usb4_dp_port_allocate_bandwidth(struct tb_port *port, int bw)
* @port: DP IN adapter
*
* Reads the DPCD (graphics driver) requested bandwidth and returns it
- * in Mb/s. Takes the programmed granularity into account. In case of
- * error returns negative errno. Specifically returns %-EOPNOTSUPP if
- * the adapter does not support bandwidth allocation mode, and %ENODATA
- * if there is no active bandwidth request from the graphics driver.
+ * in Mb/s. Takes the programmed granularity into account.
+ *
+ * Return:
+ * * Requested bandwidth in Mb/s - On success.
+ * * %-EOPNOTSUPP - If the adapter does not support bandwidth allocation
+ * mode.
+ * * %ENODATA - If there is no active bandwidth request from the graphics
+ * driver.
+ * * Negative errno - On failure.
*/
int usb4_dp_port_requested_bandwidth(struct tb_port *port)
{
@@ -2964,8 +3120,9 @@ int usb4_dp_port_requested_bandwidth(struct tb_port *port)
* @enable: Enable/disable extended encapsulation
*
* Enables or disables extended encapsulation used in PCIe tunneling. Caller
- * needs to make sure both adapters support this before enabling. Returns %0 on
- * success and negative errno otherwise.
+ * needs to make sure both adapters support this before enabling.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int usb4_pci_port_set_ext_encapsulation(struct tb_port *port, bool enable)
{
diff --git a/drivers/thunderbolt/usb4_port.c b/drivers/thunderbolt/usb4_port.c
index 5150879888ca..b5e06237261b 100644
--- a/drivers/thunderbolt/usb4_port.c
+++ b/drivers/thunderbolt/usb4_port.c
@@ -105,6 +105,49 @@ static void usb4_port_online(struct usb4_port *usb4)
tb_acpi_power_off_retimers(port);
}
+/**
+ * usb4_usb3_port_match() - Matches USB4 port device with USB 3.x port device
+ * @usb4_port_dev: USB4 port device
+ * @usb3_port_fwnode: USB 3.x port firmware node
+ *
+ * Checks if USB 3.x port @usb3_port_fwnode is tunneled through USB4 port @usb4_port_dev.
+ * Returns true if match is found, false otherwise.
+ *
+ * Function is designed to be used with component framework (component_match_add).
+ */
+bool usb4_usb3_port_match(struct device *usb4_port_dev,
+ const struct fwnode_handle *usb3_port_fwnode)
+{
+ struct fwnode_handle *nhi_fwnode __free(fwnode_handle) = NULL;
+ struct usb4_port *usb4;
+ struct tb_switch *sw;
+ struct tb_nhi *nhi;
+ u8 usb4_port_num;
+ struct tb *tb;
+
+ usb4 = tb_to_usb4_port_device(usb4_port_dev);
+ if (!usb4)
+ return false;
+
+ sw = usb4->port->sw;
+ tb = sw->tb;
+ nhi = tb->nhi;
+
+ nhi_fwnode = fwnode_find_reference(usb3_port_fwnode, "usb4-host-interface", 0);
+ if (IS_ERR(nhi_fwnode))
+ return false;
+
+ /* Check if USB3 fwnode references same NHI where USB4 port resides */
+ if (!device_match_fwnode(&nhi->pdev->dev, nhi_fwnode))
+ return false;
+
+ if (fwnode_property_read_u8(usb3_port_fwnode, "usb4-port-number", &usb4_port_num))
+ return false;
+
+ return usb4_port_index(sw, usb4->port) == usb4_port_num;
+}
+EXPORT_SYMBOL_GPL(usb4_usb3_port_match);
+
static ssize_t offline_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -253,8 +296,9 @@ const struct device_type usb4_port_device_type = {
* usb4_port_device_add() - Add USB4 port device
* @port: Lane 0 adapter port to add the USB4 port
*
- * Creates and registers a USB4 port device for @port. Returns the new
- * USB4 port device pointer or ERR_PTR() in case of error.
+ * Creates and registers a USB4 port device for @port.
+ *
+ * Return: Pointer to &struct usb4_port or ERR_PTR() in case of an error.
*/
struct usb4_port *usb4_port_device_add(struct tb_port *port)
{
@@ -276,12 +320,10 @@ struct usb4_port *usb4_port_device_add(struct tb_port *port)
return ERR_PTR(ret);
}
- if (dev_fwnode(&usb4->dev)) {
- ret = component_add(&usb4->dev, &connector_ops);
- if (ret) {
- dev_err(&usb4->dev, "failed to add component\n");
- device_unregister(&usb4->dev);
- }
+ ret = component_add(&usb4->dev, &connector_ops);
+ if (ret) {
+ dev_err(&usb4->dev, "failed to add component\n");
+ device_unregister(&usb4->dev);
}
if (!tb_is_upstream_port(port))
@@ -306,8 +348,7 @@ struct usb4_port *usb4_port_device_add(struct tb_port *port)
*/
void usb4_port_device_remove(struct usb4_port *usb4)
{
- if (dev_fwnode(&usb4->dev))
- component_del(&usb4->dev, &connector_ops);
+ component_del(&usb4->dev, &connector_ops);
device_unregister(&usb4->dev);
}
@@ -316,6 +357,8 @@ void usb4_port_device_remove(struct usb4_port *usb4)
* @usb4: USB4 port device
*
* Used to resume USB4 port device after sleep state.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int usb4_port_device_resume(struct usb4_port *usb4)
{
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
index 11a50c86a1e4..63c7be818b2c 100644
--- a/drivers/thunderbolt/xdomain.c
+++ b/drivers/thunderbolt/xdomain.c
@@ -160,7 +160,7 @@ static int __tb_xdomain_response(struct tb_ctl *ctl, const void *response,
* This can be used to send a XDomain response message to the other
* domain. No response for the message is expected.
*
- * Return: %0 in case of success and negative errno in case of failure
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_xdomain_response(struct tb_xdomain *xd, const void *response,
size_t size, enum tb_cfg_pkg_type type)
@@ -212,7 +212,7 @@ static int __tb_xdomain_request(struct tb_ctl *ctl, const void *request,
* the other domain. The function waits until the response is received
* or when timeout triggers. Whichever comes first.
*
- * Return: %0 in case of success and negative errno in case of failure
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_xdomain_request(struct tb_xdomain *xd, const void *request,
size_t request_size, enum tb_cfg_pkg_type request_type,
@@ -613,6 +613,8 @@ static int tb_xdp_link_state_change_response(struct tb_ctl *ctl, u64 route,
* messages. After this function is called the service driver needs to
* be able to handle calls to callback whenever a package with the
* registered protocol is received.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_register_protocol_handler(struct tb_protocol_handler *handler)
{
@@ -877,6 +879,8 @@ tb_xdp_schedule_request(struct tb *tb, const struct tb_xdp_header *hdr,
* @drv: Driver to register
*
* Registers new service driver from @drv to the bus.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_register_service_driver(struct tb_service_driver *drv)
{
@@ -1026,7 +1030,7 @@ static int remove_missing_service(struct device *dev, void *data)
return 0;
}
-static int find_service(struct device *dev, void *data)
+static int find_service(struct device *dev, const void *data)
{
const struct tb_property *p = data;
struct tb_service *svc;
@@ -1947,14 +1951,16 @@ static void tb_xdomain_link_exit(struct tb_xdomain *xd)
/**
* tb_xdomain_alloc() - Allocate new XDomain object
* @tb: Domain where the XDomain belongs
- * @parent: Parent device (the switch through the connection to the
- * other domain is reached).
+ * @parent: Parent device (the switch through which the other domain
+ * is reached).
* @route: Route string used to reach the other domain
* @local_uuid: Our local domain UUID
* @remote_uuid: UUID of the other domain (optional)
*
* Allocates new XDomain structure and returns pointer to that. The
* object must be released by calling tb_xdomain_put().
+ *
+ * Return: Pointer to &struct tb_xdomain, %NULL in case of failure.
*/
struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
u64 route, const uuid_t *local_uuid,
@@ -2091,7 +2097,7 @@ void tb_xdomain_remove(struct tb_xdomain *xd)
* to enable bonding by first enabling the port and waiting for the CL0
* state.
*
- * Return: %0 in case of success and negative errno in case of error.
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_xdomain_lane_bonding_enable(struct tb_xdomain *xd)
{
@@ -2171,10 +2177,14 @@ EXPORT_SYMBOL_GPL(tb_xdomain_lane_bonding_disable);
* @xd: XDomain connection
* @hopid: Preferred HopID or %-1 for next available
*
- * Returns allocated HopID or negative errno. Specifically returns
- * %-ENOSPC if there are no more available HopIDs. Returned HopID is
- * guaranteed to be within range supported by the input lane adapter.
+ * Returned HopID is guaranteed to be within range supported by the input
+ * lane adapter.
* Call tb_xdomain_release_in_hopid() to release the allocated HopID.
+ *
+ * Return:
+ * * Allocated HopID - On success.
+ * * %-ENOSPC - If there are no more available HopIDs.
+ * * Negative errno - Another error occurred.
*/
int tb_xdomain_alloc_in_hopid(struct tb_xdomain *xd, int hopid)
{
@@ -2193,10 +2203,14 @@ EXPORT_SYMBOL_GPL(tb_xdomain_alloc_in_hopid);
* @xd: XDomain connection
* @hopid: Preferred HopID or %-1 for next available
*
- * Returns allocated HopID or negative errno. Specifically returns
- * %-ENOSPC if there are no more available HopIDs. Returned HopID is
- * guaranteed to be within range supported by the output lane adapter.
- * Call tb_xdomain_release_in_hopid() to release the allocated HopID.
+ * Returned HopID is guaranteed to be within range supported by the
+ * output lane adapter.
+ * Call tb_xdomain_release_out_hopid() to release the allocated HopID.
+ *
+ * Return:
+ * * Allocated HopID - On success.
+ * * %-ENOSPC - If there are no more available HopIDs.
+ * * Negative errno - Another error occurred.
*/
int tb_xdomain_alloc_out_hopid(struct tb_xdomain *xd, int hopid)
{
@@ -2245,7 +2259,7 @@ EXPORT_SYMBOL_GPL(tb_xdomain_release_out_hopid);
* path. If a transmit or receive path is not needed, pass %-1 for those
* parameters.
*
- * Return: %0 in case of success and negative errno in case of error
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_xdomain_enable_paths(struct tb_xdomain *xd, int transmit_path,
int transmit_ring, int receive_path,
@@ -2270,7 +2284,7 @@ EXPORT_SYMBOL_GPL(tb_xdomain_enable_paths);
* as path/ring parameter means don't care. Normally the callers should
* pass the same values here as they do when paths are enabled.
*
- * Return: %0 in case of success and negative errno in case of error
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_xdomain_disable_paths(struct tb_xdomain *xd, int transmit_path,
int transmit_ring, int receive_path,
@@ -2335,6 +2349,8 @@ static struct tb_xdomain *switch_find_xdomain(struct tb_switch *sw,
* to the bus (handshake is still in progress).
*
* The caller needs to hold @tb->lock.
+ *
+ * Return: Pointer to &struct tb_xdomain or %NULL if not found.
*/
struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid)
{
@@ -2364,6 +2380,8 @@ EXPORT_SYMBOL_GPL(tb_xdomain_find_by_uuid);
* to the bus (handshake is still in progress).
*
* The caller needs to hold @tb->lock.
+ *
+ * Return: Pointer to &struct tb_xdomain or %NULL if not found.
*/
struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link,
u8 depth)
@@ -2393,6 +2411,8 @@ struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link,
* to the bus (handshake is still in progress).
*
* The caller needs to hold @tb->lock.
+ *
+ * Return: Pointer to &struct tb_xdomain or %NULL if not found.
*/
struct tb_xdomain *tb_xdomain_find_by_route(struct tb *tb, u64 route)
{
@@ -2491,7 +2511,7 @@ static bool remove_directory(const char *key, const struct tb_property_dir *dir)
* notified so they can re-read properties of this host if they are
* interested.
*
- * Return: %0 on success and negative errno on failure
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_register_property_dir(const char *key, struct tb_property_dir *dir)
{
@@ -2562,10 +2582,9 @@ int tb_xdomain_init(void)
* Rest of the properties are filled dynamically based on these
* when the P2P connection is made.
*/
- tb_property_add_immediate(xdomain_property_dir, "vendorid",
- PCI_VENDOR_ID_INTEL);
- tb_property_add_text(xdomain_property_dir, "vendorid", "Intel Corp.");
- tb_property_add_immediate(xdomain_property_dir, "deviceid", 0x1);
+ tb_property_add_immediate(xdomain_property_dir, "vendorid", 0x1d6b);
+ tb_property_add_text(xdomain_property_dir, "vendorid", "Linux");
+ tb_property_add_immediate(xdomain_property_dir, "deviceid", 0x0004);
tb_property_add_immediate(xdomain_property_dir, "devicerv", 0x80000100);
xdomain_property_block_gen = get_random_u32();