summaryrefslogtreecommitdiff
path: root/drivers/thunderbolt/nhi.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/thunderbolt/nhi.c')
-rw-r--r--drivers/thunderbolt/nhi.c183
1 files changed, 141 insertions, 42 deletions
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index 4dce2edd86ea..6d0c9d37c55d 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/property.h>
+#include <linux/string_choices.h>
#include <linux/string_helpers.h>
#include "nhi.h"
@@ -46,7 +47,11 @@
#define QUIRK_AUTO_CLEAR_INT BIT(0)
#define QUIRK_E2E BIT(1)
-static int ring_interrupt_index(struct tb_ring *ring)
+static bool host_reset = true;
+module_param(host_reset, bool, 0444);
+MODULE_PARM_DESC(host_reset, "reset USB4 host router (default: true)");
+
+static int ring_interrupt_index(const struct tb_ring *ring)
{
int bit = ring->hop;
if (!ring->is_tx)
@@ -54,6 +59,26 @@ static int ring_interrupt_index(struct tb_ring *ring)
return bit;
}
+static void nhi_mask_interrupt(struct tb_nhi *nhi, int mask, int ring)
+{
+ if (nhi->quirks & QUIRK_AUTO_CLEAR_INT) {
+ u32 val;
+
+ val = ioread32(nhi->iobase + REG_RING_INTERRUPT_BASE + ring);
+ iowrite32(val & ~mask, nhi->iobase + REG_RING_INTERRUPT_BASE + ring);
+ } else {
+ iowrite32(mask, nhi->iobase + REG_RING_INTERRUPT_MASK_CLEAR_BASE + ring);
+ }
+}
+
+static void nhi_clear_interrupt(struct tb_nhi *nhi, int ring)
+{
+ if (nhi->quirks & QUIRK_AUTO_CLEAR_INT)
+ ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + ring);
+ else
+ iowrite32(~0, nhi->iobase + REG_RING_INT_CLEAR + ring);
+}
+
/*
* ring_interrupt_active() - activate/deactivate interrupts for a single ring
*
@@ -61,15 +86,16 @@ static int ring_interrupt_index(struct tb_ring *ring)
*/
static void ring_interrupt_active(struct tb_ring *ring, bool active)
{
- int reg = REG_RING_INTERRUPT_BASE +
- ring_interrupt_index(ring) / 32 * 4;
- int bit = ring_interrupt_index(ring) & 31;
- int mask = 1 << bit;
+ int index = ring_interrupt_index(ring) / 32 * 4;
+ int reg = REG_RING_INTERRUPT_BASE + index;
+ int interrupt_bit = ring_interrupt_index(ring) & 31;
+ int mask = 1 << interrupt_bit;
u32 old, new;
if (ring->irq > 0) {
u32 step, shift, ivr, misc;
void __iomem *ivr_base;
+ int auto_clear_bit;
int index;
if (ring->is_tx)
@@ -77,18 +103,25 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active)
else
index = ring->hop + ring->nhi->hop_count;
- if (ring->nhi->quirks & QUIRK_AUTO_CLEAR_INT) {
- /*
- * Ask the hardware to clear interrupt status
- * bits automatically since we already know
- * which interrupt was triggered.
- */
- misc = ioread32(ring->nhi->iobase + REG_DMA_MISC);
- if (!(misc & REG_DMA_MISC_INT_AUTO_CLEAR)) {
- misc |= REG_DMA_MISC_INT_AUTO_CLEAR;
- iowrite32(misc, ring->nhi->iobase + REG_DMA_MISC);
- }
- }
+ /*
+ * Intel routers support a bit that isn't part of
+ * the USB4 spec to ask the hardware to clear
+ * interrupt status bits automatically since
+ * we already know which interrupt was triggered.
+ *
+ * Other routers explicitly disable auto-clear
+ * to prevent conditions that may occur where two
+ * MSIX interrupts are simultaneously active and
+ * reading the register clears both of them.
+ */
+ misc = ioread32(ring->nhi->iobase + REG_DMA_MISC);
+ if (ring->nhi->quirks & QUIRK_AUTO_CLEAR_INT)
+ auto_clear_bit = REG_DMA_MISC_INT_AUTO_CLEAR;
+ else
+ auto_clear_bit = REG_DMA_MISC_DISABLE_AUTO_CLEAR;
+ if (!(misc & auto_clear_bit))
+ iowrite32(misc | auto_clear_bit,
+ ring->nhi->iobase + REG_DMA_MISC);
ivr_base = ring->nhi->iobase + REG_INT_VEC_ALLOC_BASE;
step = index / REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS;
@@ -108,14 +141,18 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active)
dev_dbg(&ring->nhi->pdev->dev,
"%s interrupt at register %#x bit %d (%#x -> %#x)\n",
- active ? "enabling" : "disabling", reg, bit, old, new);
+ active ? "enabling" : "disabling", reg, interrupt_bit, old, new);
if (new == old)
dev_WARN(&ring->nhi->pdev->dev,
"interrupt for %s %d is already %s\n",
RING_TYPE(ring), ring->hop,
- active ? "enabled" : "disabled");
- iowrite32(new, ring->nhi->iobase + reg);
+ str_enabled_disabled(active));
+
+ if (active)
+ iowrite32(new, ring->nhi->iobase + reg);
+ else
+ nhi_mask_interrupt(ring->nhi, mask, index);
}
/*
@@ -128,11 +165,11 @@ static void nhi_disable_interrupts(struct tb_nhi *nhi)
int i = 0;
/* disable interrupts */
for (i = 0; i < RING_INTERRUPT_REG_COUNT(nhi); i++)
- iowrite32(0, nhi->iobase + REG_RING_INTERRUPT_BASE + 4 * i);
+ nhi_mask_interrupt(nhi, ~0, 4 * i);
/* clear interrupt status bits */
for (i = 0; i < RING_NOTIFY_REG_COUNT(nhi); i++)
- ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + 4 * i);
+ nhi_clear_interrupt(nhi, 4 * i);
}
/* ring helper methods */
@@ -307,8 +344,10 @@ EXPORT_SYMBOL_GPL(__tb_ring_enqueue);
*
* This function can be called when @start_poll callback of the @ring
* has been called. It will read one completed frame from the ring and
- * return it to the caller. Returns %NULL if there is no more completed
- * frames.
+ * return it to the caller.
+ *
+ * Return: Pointer to &struct ring_frame, %NULL if there is no more
+ * completed frames.
*/
struct ring_frame *tb_ring_poll(struct tb_ring *ring)
{
@@ -393,14 +432,17 @@ EXPORT_SYMBOL_GPL(tb_ring_poll_complete);
static void ring_clear_msix(const struct tb_ring *ring)
{
+ int bit;
+
if (ring->nhi->quirks & QUIRK_AUTO_CLEAR_INT)
return;
+ bit = ring_interrupt_index(ring) & 31;
if (ring->is_tx)
- ioread32(ring->nhi->iobase + REG_RING_NOTIFY_BASE);
+ iowrite32(BIT(bit), ring->nhi->iobase + REG_RING_INT_CLEAR);
else
- ioread32(ring->nhi->iobase + REG_RING_NOTIFY_BASE +
- 4 * (ring->nhi->hop_count / 32));
+ iowrite32(BIT(bit), ring->nhi->iobase + REG_RING_INT_CLEAR +
+ 4 * (ring->nhi->hop_count / 32));
}
static irqreturn_t ring_msix(int irq, void *data)
@@ -426,7 +468,7 @@ static int ring_request_msix(struct tb_ring *ring, bool no_suspend)
if (!nhi->pdev->msix_enabled)
return 0;
- ret = ida_simple_get(&nhi->msix_ida, 0, MSIX_MAX_VECS, GFP_KERNEL);
+ ret = ida_alloc_max(&nhi->msix_ida, MSIX_MAX_VECS - 1, GFP_KERNEL);
if (ret < 0)
return ret;
@@ -446,7 +488,7 @@ static int ring_request_msix(struct tb_ring *ring, bool no_suspend)
return 0;
err_ida_remove:
- ida_simple_remove(&nhi->msix_ida, ring->vector);
+ ida_free(&nhi->msix_ida, ring->vector);
return ret;
}
@@ -457,7 +499,7 @@ static void ring_release_msix(struct tb_ring *ring)
return;
free_irq(ring->irq, ring);
- ida_simple_remove(&ring->nhi->msix_ida, ring->vector);
+ ida_free(&ring->nhi->msix_ida, ring->vector);
ring->vector = 0;
ring->irq = 0;
}
@@ -515,7 +557,8 @@ static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring)
ring->hop);
ret = -EBUSY;
goto err_unlock;
- } else if (!ring->is_tx && nhi->rx_rings[ring->hop]) {
+ }
+ if (!ring->is_tx && nhi->rx_rings[ring->hop]) {
dev_warn(&nhi->pdev->dev, "RX hop %d already allocated\n",
ring->hop);
ret = -EBUSY;
@@ -599,6 +642,8 @@ err_free_ring:
* @hop: HopID (ring) to allocate
* @size: Number of entries in the ring
* @flags: Flags for the ring
+ *
+ * Return: Pointer to &struct tb_ring, %NULL otherwise.
*/
struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
unsigned int flags)
@@ -620,6 +665,8 @@ EXPORT_SYMBOL_GPL(tb_ring_alloc_tx);
* interrupt is triggered and masked, instead of callback
* in each Rx frame.
* @poll_data: Optional data passed to @start_poll
+ *
+ * Return: Pointer to &struct tb_ring, %NULL otherwise.
*/
struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
unsigned int flags, int e2e_tx_hop,
@@ -665,7 +712,7 @@ void tb_ring_start(struct tb_ring *ring)
ring_iowrite64desc(ring, ring->descriptors_dma, 0);
if (ring->is_tx) {
ring_iowrite32desc(ring, ring->size, 12);
- ring_iowrite32options(ring, 0, 4); /* time releated ? */
+ ring_iowrite32options(ring, 0, 4);
ring_iowrite32options(ring, flags, 0);
} else {
u32 sof_eof_mask = ring->sof_mask << 16 | ring->eof_mask;
@@ -813,8 +860,9 @@ EXPORT_SYMBOL_GPL(tb_ring_free);
* @cmd: Command to send
* @data: Data to be send with the command
*
- * Sends mailbox command to the firmware running on NHI. Returns %0 in
- * case of success and negative errno in case of failure.
+ * Sends mailbox command to the firmware running on NHI.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data)
{
@@ -850,6 +898,8 @@ int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data)
*
* The function reads current firmware operation mode using NHI mailbox
* registers and returns it to the caller.
+ *
+ * Return: &enum nhi_fw_mode.
*/
enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi)
{
@@ -1181,6 +1231,37 @@ static void nhi_check_iommu(struct tb_nhi *nhi)
str_enabled_disabled(port_ok));
}
+static void nhi_reset(struct tb_nhi *nhi)
+{
+ ktime_t timeout;
+ u32 val;
+
+ val = ioread32(nhi->iobase + REG_CAPS);
+ /* Reset only v2 and later routers */
+ if (FIELD_GET(REG_CAPS_VERSION_MASK, val) < REG_CAPS_VERSION_2)
+ return;
+
+ if (!host_reset) {
+ dev_dbg(&nhi->pdev->dev, "skipping host router reset\n");
+ return;
+ }
+
+ iowrite32(REG_RESET_HRR, nhi->iobase + REG_RESET);
+ msleep(100);
+
+ timeout = ktime_add_ms(ktime_get(), 500);
+ do {
+ val = ioread32(nhi->iobase + REG_RESET);
+ if (!(val & REG_RESET_HRR)) {
+ dev_warn(&nhi->pdev->dev, "host router reset successful\n");
+ return;
+ }
+ usleep_range(10, 20);
+ } while (ktime_before(ktime_get(), timeout));
+
+ dev_warn(&nhi->pdev->dev, "timeout resetting host router\n");
+}
+
static int nhi_init_msi(struct tb_nhi *nhi)
{
struct pci_dev *pdev = nhi->pdev;
@@ -1269,19 +1350,19 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (res)
return dev_err_probe(dev, res, "cannot enable PCI device, aborting\n");
- res = pcim_iomap_regions(pdev, 1 << 0, "thunderbolt");
- if (res)
- return dev_err_probe(dev, res, "cannot obtain PCI resources, aborting\n");
-
nhi = devm_kzalloc(&pdev->dev, sizeof(*nhi), GFP_KERNEL);
if (!nhi)
return -ENOMEM;
nhi->pdev = pdev;
nhi->ops = (const struct tb_nhi_ops *)id->driver_data;
- /* cannot fail - table is allocated in pcim_iomap_regions */
- nhi->iobase = pcim_iomap_table(pdev)[0];
- nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff;
+
+ nhi->iobase = pcim_iomap_region(pdev, 0, "thunderbolt");
+ res = PTR_ERR_OR_ZERO(nhi->iobase);
+ if (res)
+ return dev_err_probe(dev, res, "cannot obtain PCI resources, aborting\n");
+
+ nhi->hop_count = ioread32(nhi->iobase + REG_CAPS) & 0x3ff;
dev_dbg(dev, "total paths: %d\n", nhi->hop_count);
nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
@@ -1293,6 +1374,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
nhi_check_quirks(nhi);
nhi_check_iommu(nhi);
+ nhi_reset(nhi);
res = nhi_init_msi(nhi);
if (res)
@@ -1319,7 +1401,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev_dbg(dev, "NHI initialized, starting thunderbolt\n");
- res = tb_domain_add(tb);
+ res = tb_domain_add(tb, host_reset);
if (res) {
/*
* At this point the RX/TX rings might already have been
@@ -1444,6 +1526,22 @@ static struct pci_device_id nhi_ids[] = {
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTL_P_NHI1),
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_LNL_NHI0),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_LNL_NHI1),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_PTL_M_NHI0),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_PTL_M_NHI1),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_PTL_P_NHI0),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_PTL_P_NHI1),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_WCL_NHI0),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI) },
/* Any USB4 compliant host */
{ PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_USB4, ~0) },
@@ -1452,6 +1550,7 @@ static struct pci_device_id nhi_ids[] = {
};
MODULE_DEVICE_TABLE(pci, nhi_ids);
+MODULE_DESCRIPTION("Thunderbolt/USB4 core driver");
MODULE_LICENSE("GPL");
static struct pci_driver nhi_driver = {