diff options
Diffstat (limited to 'drivers/thunderbolt')
-rw-r--r-- | drivers/thunderbolt/debugfs.c | 12 | ||||
-rw-r--r-- | drivers/thunderbolt/nhi.c | 49 | ||||
-rw-r--r-- | drivers/thunderbolt/nhi_regs.h | 6 | ||||
-rw-r--r-- | drivers/thunderbolt/quirks.c | 44 | ||||
-rw-r--r-- | drivers/thunderbolt/retimer.c | 23 | ||||
-rw-r--r-- | drivers/thunderbolt/sb_regs.h | 1 | ||||
-rw-r--r-- | drivers/thunderbolt/switch.c | 4 | ||||
-rw-r--r-- | drivers/thunderbolt/tb.h | 15 | ||||
-rw-r--r-- | drivers/thunderbolt/usb4.c | 53 |
9 files changed, 166 insertions, 41 deletions
diff --git a/drivers/thunderbolt/debugfs.c b/drivers/thunderbolt/debugfs.c index 4339e706cc3a..f92ad71ef983 100644 --- a/drivers/thunderbolt/debugfs.c +++ b/drivers/thunderbolt/debugfs.c @@ -942,7 +942,8 @@ static void margining_port_remove(struct tb_port *port) snprintf(dir_name, sizeof(dir_name), "port%d", port->port); parent = debugfs_lookup(dir_name, port->sw->debugfs_dir); - debugfs_remove_recursive(debugfs_lookup("margining", parent)); + if (parent) + debugfs_remove_recursive(debugfs_lookup("margining", parent)); kfree(port->usb4->margining); port->usb4->margining = NULL; @@ -967,19 +968,18 @@ static void margining_switch_init(struct tb_switch *sw) static void margining_switch_remove(struct tb_switch *sw) { + struct tb_port *upstream, *downstream; struct tb_switch *parent_sw; - struct tb_port *downstream; u64 route = tb_route(sw); if (!route) return; - /* - * Upstream is removed with the router itself but we need to - * remove the downstream port margining directory. - */ + upstream = tb_upstream_port(sw); parent_sw = tb_switch_parent(sw); downstream = tb_port_at(route, parent_sw); + + margining_port_remove(upstream); margining_port_remove(downstream); } diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c index 4dce2edd86ea..cfebec107f3f 100644 --- a/drivers/thunderbolt/nhi.c +++ b/drivers/thunderbolt/nhi.c @@ -46,7 +46,7 @@ #define QUIRK_AUTO_CLEAR_INT BIT(0) #define QUIRK_E2E BIT(1) -static int ring_interrupt_index(struct tb_ring *ring) +static int ring_interrupt_index(const struct tb_ring *ring) { int bit = ring->hop; if (!ring->is_tx) @@ -63,13 +63,14 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active) { int reg = REG_RING_INTERRUPT_BASE + ring_interrupt_index(ring) / 32 * 4; - int bit = ring_interrupt_index(ring) & 31; - int mask = 1 << bit; + int interrupt_bit = ring_interrupt_index(ring) & 31; + int mask = 1 << interrupt_bit; u32 old, new; if (ring->irq > 0) { u32 step, shift, ivr, misc; void __iomem *ivr_base; + int auto_clear_bit; int index; if (ring->is_tx) @@ -77,18 +78,25 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active) else index = ring->hop + ring->nhi->hop_count; - if (ring->nhi->quirks & QUIRK_AUTO_CLEAR_INT) { - /* - * Ask the hardware to clear interrupt status - * bits automatically since we already know - * which interrupt was triggered. - */ - misc = ioread32(ring->nhi->iobase + REG_DMA_MISC); - if (!(misc & REG_DMA_MISC_INT_AUTO_CLEAR)) { - misc |= REG_DMA_MISC_INT_AUTO_CLEAR; - iowrite32(misc, ring->nhi->iobase + REG_DMA_MISC); - } - } + /* + * Intel routers support a bit that isn't part of + * the USB4 spec to ask the hardware to clear + * interrupt status bits automatically since + * we already know which interrupt was triggered. + * + * Other routers explicitly disable auto-clear + * to prevent conditions that may occur where two + * MSIX interrupts are simultaneously active and + * reading the register clears both of them. + */ + misc = ioread32(ring->nhi->iobase + REG_DMA_MISC); + if (ring->nhi->quirks & QUIRK_AUTO_CLEAR_INT) + auto_clear_bit = REG_DMA_MISC_INT_AUTO_CLEAR; + else + auto_clear_bit = REG_DMA_MISC_DISABLE_AUTO_CLEAR; + if (!(misc & auto_clear_bit)) + iowrite32(misc | auto_clear_bit, + ring->nhi->iobase + REG_DMA_MISC); ivr_base = ring->nhi->iobase + REG_INT_VEC_ALLOC_BASE; step = index / REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS; @@ -108,7 +116,7 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active) dev_dbg(&ring->nhi->pdev->dev, "%s interrupt at register %#x bit %d (%#x -> %#x)\n", - active ? "enabling" : "disabling", reg, bit, old, new); + active ? "enabling" : "disabling", reg, interrupt_bit, old, new); if (new == old) dev_WARN(&ring->nhi->pdev->dev, @@ -393,14 +401,17 @@ EXPORT_SYMBOL_GPL(tb_ring_poll_complete); static void ring_clear_msix(const struct tb_ring *ring) { + int bit; + if (ring->nhi->quirks & QUIRK_AUTO_CLEAR_INT) return; + bit = ring_interrupt_index(ring) & 31; if (ring->is_tx) - ioread32(ring->nhi->iobase + REG_RING_NOTIFY_BASE); + iowrite32(BIT(bit), ring->nhi->iobase + REG_RING_INT_CLEAR); else - ioread32(ring->nhi->iobase + REG_RING_NOTIFY_BASE + - 4 * (ring->nhi->hop_count / 32)); + iowrite32(BIT(bit), ring->nhi->iobase + REG_RING_INT_CLEAR + + 4 * (ring->nhi->hop_count / 32)); } static irqreturn_t ring_msix(int irq, void *data) diff --git a/drivers/thunderbolt/nhi_regs.h b/drivers/thunderbolt/nhi_regs.h index 0d4970dcef84..faef165a919c 100644 --- a/drivers/thunderbolt/nhi_regs.h +++ b/drivers/thunderbolt/nhi_regs.h @@ -77,12 +77,13 @@ struct ring_desc { /* * three bitfields: tx, rx, rx overflow - * Every bitfield contains one bit for every hop (REG_HOP_COUNT). Registers are - * cleared on read. New interrupts are fired only after ALL registers have been + * Every bitfield contains one bit for every hop (REG_HOP_COUNT). + * New interrupts are fired only after ALL registers have been * read (even those containing only disabled rings). */ #define REG_RING_NOTIFY_BASE 0x37800 #define RING_NOTIFY_REG_COUNT(nhi) ((31 + 3 * nhi->hop_count) / 32) +#define REG_RING_INT_CLEAR 0x37808 /* * two bitfields: rx, tx @@ -105,6 +106,7 @@ struct ring_desc { #define REG_DMA_MISC 0x39864 #define REG_DMA_MISC_INT_AUTO_CLEAR BIT(2) +#define REG_DMA_MISC_DISABLE_AUTO_CLEAR BIT(17) #define REG_INMAIL_DATA 0x39900 diff --git a/drivers/thunderbolt/quirks.c b/drivers/thunderbolt/quirks.c index b5f2ec79c4d6..1157b8869bcc 100644 --- a/drivers/thunderbolt/quirks.c +++ b/drivers/thunderbolt/quirks.c @@ -20,6 +20,25 @@ static void quirk_dp_credit_allocation(struct tb_switch *sw) } } +static void quirk_clx_disable(struct tb_switch *sw) +{ + sw->quirks |= QUIRK_NO_CLX; + tb_sw_dbg(sw, "disabling CL states\n"); +} + +static void quirk_usb3_maximum_bandwidth(struct tb_switch *sw) +{ + struct tb_port *port; + + tb_switch_for_each_port(sw, port) { + if (!tb_port_is_usb3_down(port)) + continue; + port->max_bw = 16376; + tb_port_dbg(port, "USB3 maximum bandwidth limited to %u Mb/s\n", + port->max_bw); + } +} + struct tb_quirk { u16 hw_vendor_id; u16 hw_device_id; @@ -37,6 +56,31 @@ static const struct tb_quirk tb_quirks[] = { * DP buffers. */ { 0x8087, 0x0b26, 0x0000, 0x0000, quirk_dp_credit_allocation }, + /* + * Limit the maximum USB3 bandwidth for the following Intel USB4 + * host routers due to a hardware issue. + */ + { 0x8087, PCI_DEVICE_ID_INTEL_ADL_NHI0, 0x0000, 0x0000, + quirk_usb3_maximum_bandwidth }, + { 0x8087, PCI_DEVICE_ID_INTEL_ADL_NHI1, 0x0000, 0x0000, + quirk_usb3_maximum_bandwidth }, + { 0x8087, PCI_DEVICE_ID_INTEL_RPL_NHI0, 0x0000, 0x0000, + quirk_usb3_maximum_bandwidth }, + { 0x8087, PCI_DEVICE_ID_INTEL_RPL_NHI1, 0x0000, 0x0000, + quirk_usb3_maximum_bandwidth }, + { 0x8087, PCI_DEVICE_ID_INTEL_MTL_M_NHI0, 0x0000, 0x0000, + quirk_usb3_maximum_bandwidth }, + { 0x8087, PCI_DEVICE_ID_INTEL_MTL_P_NHI0, 0x0000, 0x0000, + quirk_usb3_maximum_bandwidth }, + { 0x8087, PCI_DEVICE_ID_INTEL_MTL_P_NHI1, 0x0000, 0x0000, + quirk_usb3_maximum_bandwidth }, + /* + * CLx is not supported on AMD USB4 Yellow Carp and Pink Sardine platforms. + */ + { 0x0438, 0x0208, 0x0000, 0x0000, quirk_clx_disable }, + { 0x0438, 0x0209, 0x0000, 0x0000, quirk_clx_disable }, + { 0x0438, 0x020a, 0x0000, 0x0000, quirk_clx_disable }, + { 0x0438, 0x020b, 0x0000, 0x0000, quirk_clx_disable }, }; /** diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c index 56008eb91e2e..9cc28197dbc4 100644 --- a/drivers/thunderbolt/retimer.c +++ b/drivers/thunderbolt/retimer.c @@ -187,6 +187,22 @@ static ssize_t nvm_authenticate_show(struct device *dev, return ret; } +static void tb_retimer_set_inbound_sbtx(struct tb_port *port) +{ + int i; + + for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) + usb4_port_retimer_set_inbound_sbtx(port, i); +} + +static void tb_retimer_unset_inbound_sbtx(struct tb_port *port) +{ + int i; + + for (i = TB_MAX_RETIMER_INDEX; i >= 1; i--) + usb4_port_retimer_unset_inbound_sbtx(port, i); +} + static ssize_t nvm_authenticate_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { @@ -213,6 +229,7 @@ static ssize_t nvm_authenticate_store(struct device *dev, rt->auth_status = 0; if (val) { + tb_retimer_set_inbound_sbtx(rt->port); if (val == AUTHENTICATE_ONLY) { ret = tb_retimer_nvm_authenticate(rt, true); } else { @@ -232,6 +249,7 @@ static ssize_t nvm_authenticate_store(struct device *dev, } exit_unlock: + tb_retimer_unset_inbound_sbtx(rt->port); mutex_unlock(&rt->tb->lock); exit_rpm: pm_runtime_mark_last_busy(&rt->dev); @@ -440,8 +458,7 @@ int tb_retimer_scan(struct tb_port *port, bool add) * Enable sideband channel for each retimer. We can do this * regardless whether there is device connected or not. */ - for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) - usb4_port_retimer_set_inbound_sbtx(port, i); + tb_retimer_set_inbound_sbtx(port); /* * Before doing anything else, read the authentication status. @@ -464,6 +481,8 @@ int tb_retimer_scan(struct tb_port *port, bool add) break; } + tb_retimer_unset_inbound_sbtx(port); + if (!last_idx) return 0; diff --git a/drivers/thunderbolt/sb_regs.h b/drivers/thunderbolt/sb_regs.h index 5185cf3e4d97..f37a4320f10a 100644 --- a/drivers/thunderbolt/sb_regs.h +++ b/drivers/thunderbolt/sb_regs.h @@ -20,6 +20,7 @@ enum usb4_sb_opcode { USB4_SB_OPCODE_ROUTER_OFFLINE = 0x4e45534c, /* "LSEN" */ USB4_SB_OPCODE_ENUMERATE_RETIMERS = 0x4d554e45, /* "ENUM" */ USB4_SB_OPCODE_SET_INBOUND_SBTX = 0x5055534c, /* "LSUP" */ + USB4_SB_OPCODE_UNSET_INBOUND_SBTX = 0x50555355, /* "USUP" */ USB4_SB_OPCODE_QUERY_LAST_RETIMER = 0x5453414c, /* "LAST" */ USB4_SB_OPCODE_GET_NVM_SECTOR_SIZE = 0x53534e47, /* "GNSS" */ USB4_SB_OPCODE_NVM_SET_OFFSET = 0x53504f42, /* "BOPS" */ diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index 3370e18ba05f..da373ac38285 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -2968,8 +2968,6 @@ int tb_switch_add(struct tb_switch *sw) dev_warn(&sw->dev, "reading DROM failed: %d\n", ret); tb_sw_dbg(sw, "uid: %#llx\n", sw->uid); - tb_check_quirks(sw); - ret = tb_switch_set_uuid(sw); if (ret) { dev_err(&sw->dev, "failed to set UUID\n"); @@ -2988,6 +2986,8 @@ int tb_switch_add(struct tb_switch *sw) } } + tb_check_quirks(sw); + tb_switch_default_link_ports(sw); ret = tb_switch_update_link_attributes(sw); diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h index cbb20a277346..275ff5219a3a 100644 --- a/drivers/thunderbolt/tb.h +++ b/drivers/thunderbolt/tb.h @@ -23,6 +23,11 @@ #define NVM_MAX_SIZE SZ_512K #define NVM_DATA_DWORDS 16 +/* Keep link controller awake during update */ +#define QUIRK_FORCE_POWER_LINK_CONTROLLER BIT(0) +/* Disable CLx if not supported */ +#define QUIRK_NO_CLX BIT(1) + /** * struct tb_nvm - Structure holding NVM information * @dev: Owner of the NVM @@ -267,6 +272,8 @@ struct tb_bandwidth_group { * @group: Bandwidth allocation group the adapter is assigned to. Only * used for DP IN adapters for now. * @group_list: The adapter is linked to the group's list of ports through this + * @max_bw: Maximum possible bandwidth through this adapter if set to + * non-zero. * * In USB4 terminology this structure represents an adapter (protocol or * lane adapter). @@ -294,6 +301,7 @@ struct tb_port { unsigned int dma_credits; struct tb_bandwidth_group *group; struct list_head group_list; + unsigned int max_bw; }; /** @@ -1019,6 +1027,9 @@ static inline bool tb_switch_is_clx_enabled(const struct tb_switch *sw, */ static inline bool tb_switch_is_clx_supported(const struct tb_switch *sw) { + if (sw->quirks & QUIRK_NO_CLX) + return false; + return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw); } @@ -1234,6 +1245,7 @@ int usb4_port_sw_margin(struct tb_port *port, unsigned int lanes, bool timing, int usb4_port_sw_margin_errors(struct tb_port *port, u32 *errors); int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index); +int usb4_port_retimer_unset_inbound_sbtx(struct tb_port *port, u8 index); int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf, u8 size); int usb4_port_retimer_write(struct tb_port *port, u8 index, u8 reg, @@ -1291,9 +1303,6 @@ struct usb4_port *usb4_port_device_add(struct tb_port *port); void usb4_port_device_remove(struct usb4_port *usb4); int usb4_port_device_resume(struct usb4_port *usb4); -/* Keep link controller awake during update */ -#define QUIRK_FORCE_POWER_LINK_CONTROLLER BIT(0) - void tb_check_quirks(struct tb_switch *sw); #ifdef CONFIG_ACPI diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c index 1e5e9c147a31..a0996cb2893c 100644 --- a/drivers/thunderbolt/usb4.c +++ b/drivers/thunderbolt/usb4.c @@ -1579,6 +1579,20 @@ int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index) } /** + * usb4_port_retimer_unset_inbound_sbtx() - Disable sideband channel transactions + * @port: USB4 port + * @index: Retimer index + * + * Disables sideband channel transations on SBTX. The reverse of + * usb4_port_retimer_set_inbound_sbtx(). + */ +int usb4_port_retimer_unset_inbound_sbtx(struct tb_port *port, u8 index) +{ + return usb4_port_retimer_op(port, index, + USB4_SB_OPCODE_UNSET_INBOUND_SBTX, 500); +} + +/** * usb4_port_retimer_read() - Read from retimer sideband registers * @port: USB4 port * @index: Retimer index @@ -1868,6 +1882,15 @@ int usb4_port_retimer_nvm_read(struct tb_port *port, u8 index, usb4_port_retimer_nvm_read_block, &info); } +static inline unsigned int +usb4_usb3_port_max_bandwidth(const struct tb_port *port, unsigned int bw) +{ + /* Take the possible bandwidth limitation into account */ + if (port->max_bw) + return min(bw, port->max_bw); + return bw; +} + /** * usb4_usb3_port_max_link_rate() - Maximum support USB3 link rate * @port: USB3 adapter port @@ -1889,7 +1912,9 @@ int usb4_usb3_port_max_link_rate(struct tb_port *port) return ret; lr = (val & ADP_USB3_CS_4_MSLR_MASK) >> ADP_USB3_CS_4_MSLR_SHIFT; - return lr == ADP_USB3_CS_4_MSLR_20G ? 20000 : 10000; + ret = lr == ADP_USB3_CS_4_MSLR_20G ? 20000 : 10000; + + return usb4_usb3_port_max_bandwidth(port, ret); } /** @@ -1916,7 +1941,9 @@ int usb4_usb3_port_actual_link_rate(struct tb_port *port) return 0; lr = val & ADP_USB3_CS_4_ALR_MASK; - return lr == ADP_USB3_CS_4_ALR_20G ? 20000 : 10000; + ret = lr == ADP_USB3_CS_4_ALR_20G ? 20000 : 10000; + + return usb4_usb3_port_max_bandwidth(port, ret); } static int usb4_usb3_port_cm_request(struct tb_port *port, bool request) @@ -2067,18 +2094,30 @@ static int usb4_usb3_port_write_allocated_bandwidth(struct tb_port *port, int downstream_bw) { u32 val, ubw, dbw, scale; - int ret; + int ret, max_bw; - /* Read the used scale, hardware default is 0 */ - ret = tb_port_read(port, &scale, TB_CFG_PORT, - port->cap_adap + ADP_USB3_CS_3, 1); + /* Figure out suitable scale */ + scale = 0; + max_bw = max(upstream_bw, downstream_bw); + while (scale < 64) { + if (mbps_to_usb3_bw(max_bw, scale) < 4096) + break; + scale++; + } + + if (WARN_ON(scale >= 64)) + return -EINVAL; + + ret = tb_port_write(port, &scale, TB_CFG_PORT, + port->cap_adap + ADP_USB3_CS_3, 1); if (ret) return ret; - scale &= ADP_USB3_CS_3_SCALE_MASK; ubw = mbps_to_usb3_bw(upstream_bw, scale); dbw = mbps_to_usb3_bw(downstream_bw, scale); + tb_port_dbg(port, "scaled bandwidth %u/%u, scale %u\n", ubw, dbw, scale); + ret = tb_port_read(port, &val, TB_CFG_PORT, port->cap_adap + ADP_USB3_CS_2, 1); if (ret) |