summaryrefslogtreecommitdiff
path: root/drivers/thunderbolt/nhi.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/thunderbolt/nhi.c')
-rw-r--r--drivers/thunderbolt/nhi.c613
1 files changed, 501 insertions, 112 deletions
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index 9aa44f9762a3..6d0c9d37c55d 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Thunderbolt driver - NHI driver
*
@@ -12,9 +13,14 @@
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/pci.h>
+#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
+#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/delay.h>
+#include <linux/property.h>
+#include <linux/string_choices.h>
+#include <linux/string_helpers.h>
#include "nhi.h"
#include "nhi_regs.h"
@@ -22,14 +28,12 @@
#define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring")
+#define RING_FIRST_USABLE_HOPID 1
/*
- * Used to enable end-to-end workaround for missing RX packets. Do not
- * use this ring for anything else.
+ * Used with QUIRK_E2E to specify an unused HopID the Rx credits are
+ * transferred.
*/
-#define RING_E2E_UNUSED_HOPID 2
-/* HopIDs 0-7 are reserved by the Thunderbolt protocol */
-#define RING_FIRST_USABLE_HOPID 8
-
+#define RING_E2E_RESERVED_HOPID RING_FIRST_USABLE_HOPID
/*
* Minimal number of vectors when we use MSI-X. Two for control channel
* Rx/Tx and the rest four are for cross domain DMA paths.
@@ -39,7 +43,15 @@
#define NHI_MAILBOX_TIMEOUT 500 /* ms */
-static int ring_interrupt_index(struct tb_ring *ring)
+/* Host interface quirks */
+#define QUIRK_AUTO_CLEAR_INT BIT(0)
+#define QUIRK_E2E BIT(1)
+
+static bool host_reset = true;
+module_param(host_reset, bool, 0444);
+MODULE_PARM_DESC(host_reset, "reset USB4 host router (default: true)");
+
+static int ring_interrupt_index(const struct tb_ring *ring)
{
int bit = ring->hop;
if (!ring->is_tx)
@@ -47,22 +59,43 @@ static int ring_interrupt_index(struct tb_ring *ring)
return bit;
}
-/**
+static void nhi_mask_interrupt(struct tb_nhi *nhi, int mask, int ring)
+{
+ if (nhi->quirks & QUIRK_AUTO_CLEAR_INT) {
+ u32 val;
+
+ val = ioread32(nhi->iobase + REG_RING_INTERRUPT_BASE + ring);
+ iowrite32(val & ~mask, nhi->iobase + REG_RING_INTERRUPT_BASE + ring);
+ } else {
+ iowrite32(mask, nhi->iobase + REG_RING_INTERRUPT_MASK_CLEAR_BASE + ring);
+ }
+}
+
+static void nhi_clear_interrupt(struct tb_nhi *nhi, int ring)
+{
+ if (nhi->quirks & QUIRK_AUTO_CLEAR_INT)
+ ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + ring);
+ else
+ iowrite32(~0, nhi->iobase + REG_RING_INT_CLEAR + ring);
+}
+
+/*
* ring_interrupt_active() - activate/deactivate interrupts for a single ring
*
* ring->nhi->lock must be held.
*/
static void ring_interrupt_active(struct tb_ring *ring, bool active)
{
- int reg = REG_RING_INTERRUPT_BASE +
- ring_interrupt_index(ring) / 32 * 4;
- int bit = ring_interrupt_index(ring) & 31;
- int mask = 1 << bit;
+ int index = ring_interrupt_index(ring) / 32 * 4;
+ int reg = REG_RING_INTERRUPT_BASE + index;
+ int interrupt_bit = ring_interrupt_index(ring) & 31;
+ int mask = 1 << interrupt_bit;
u32 old, new;
if (ring->irq > 0) {
u32 step, shift, ivr, misc;
void __iomem *ivr_base;
+ int auto_clear_bit;
int index;
if (ring->is_tx)
@@ -71,14 +104,24 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active)
index = ring->hop + ring->nhi->hop_count;
/*
- * Ask the hardware to clear interrupt status bits automatically
- * since we already know which interrupt was triggered.
+ * Intel routers support a bit that isn't part of
+ * the USB4 spec to ask the hardware to clear
+ * interrupt status bits automatically since
+ * we already know which interrupt was triggered.
+ *
+ * Other routers explicitly disable auto-clear
+ * to prevent conditions that may occur where two
+ * MSIX interrupts are simultaneously active and
+ * reading the register clears both of them.
*/
misc = ioread32(ring->nhi->iobase + REG_DMA_MISC);
- if (!(misc & REG_DMA_MISC_INT_AUTO_CLEAR)) {
- misc |= REG_DMA_MISC_INT_AUTO_CLEAR;
- iowrite32(misc, ring->nhi->iobase + REG_DMA_MISC);
- }
+ if (ring->nhi->quirks & QUIRK_AUTO_CLEAR_INT)
+ auto_clear_bit = REG_DMA_MISC_INT_AUTO_CLEAR;
+ else
+ auto_clear_bit = REG_DMA_MISC_DISABLE_AUTO_CLEAR;
+ if (!(misc & auto_clear_bit))
+ iowrite32(misc | auto_clear_bit,
+ ring->nhi->iobase + REG_DMA_MISC);
ivr_base = ring->nhi->iobase + REG_INT_VEC_ALLOC_BASE;
step = index / REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS;
@@ -98,17 +141,21 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active)
dev_dbg(&ring->nhi->pdev->dev,
"%s interrupt at register %#x bit %d (%#x -> %#x)\n",
- active ? "enabling" : "disabling", reg, bit, old, new);
+ active ? "enabling" : "disabling", reg, interrupt_bit, old, new);
if (new == old)
dev_WARN(&ring->nhi->pdev->dev,
"interrupt for %s %d is already %s\n",
RING_TYPE(ring), ring->hop,
- active ? "enabled" : "disabled");
- iowrite32(new, ring->nhi->iobase + reg);
+ str_enabled_disabled(active));
+
+ if (active)
+ iowrite32(new, ring->nhi->iobase + reg);
+ else
+ nhi_mask_interrupt(ring->nhi, mask, index);
}
-/**
+/*
* nhi_disable_interrupts() - disable interrupts for all rings
*
* Use only during init and shutdown.
@@ -118,11 +165,11 @@ static void nhi_disable_interrupts(struct tb_nhi *nhi)
int i = 0;
/* disable interrupts */
for (i = 0; i < RING_INTERRUPT_REG_COUNT(nhi); i++)
- iowrite32(0, nhi->iobase + REG_RING_INTERRUPT_BASE + 4 * i);
+ nhi_mask_interrupt(nhi, ~0, 4 * i);
/* clear interrupt status bits */
for (i = 0; i < RING_NOTIFY_REG_COUNT(nhi); i++)
- ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + 4 * i);
+ nhi_clear_interrupt(nhi, 4 * i);
}
/* ring helper methods */
@@ -143,9 +190,20 @@ static void __iomem *ring_options_base(struct tb_ring *ring)
return io;
}
-static void ring_iowrite16desc(struct tb_ring *ring, u32 value, u32 offset)
+static void ring_iowrite_cons(struct tb_ring *ring, u16 cons)
{
- iowrite16(value, ring_desc_base(ring) + offset);
+ /*
+ * The other 16-bits in the register is read-only and writes to it
+ * are ignored by the hardware so we can save one ioread32() by
+ * filling the read-only bits with zeroes.
+ */
+ iowrite32(cons, ring_desc_base(ring) + 8);
+}
+
+static void ring_iowrite_prod(struct tb_ring *ring, u16 prod)
+{
+ /* See ring_iowrite_cons() above for explanation */
+ iowrite32(prod << 16, ring_desc_base(ring) + 8);
}
static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset)
@@ -174,7 +232,7 @@ static bool ring_empty(struct tb_ring *ring)
return ring->head == ring->tail;
}
-/**
+/*
* ring_write_descriptors() - post frames from ring->queue to the controller
*
* ring->lock is held.
@@ -197,11 +255,14 @@ static void ring_write_descriptors(struct tb_ring *ring)
descriptor->sof = frame->sof;
}
ring->head = (ring->head + 1) % ring->size;
- ring_iowrite16desc(ring, ring->head, ring->is_tx ? 10 : 8);
+ if (ring->is_tx)
+ ring_iowrite_prod(ring, ring->head);
+ else
+ ring_iowrite_cons(ring, ring->head);
}
}
-/**
+/*
* ring_work() - progress completed frames
*
* If the ring is shutting down then all frames are marked as canceled and
@@ -283,8 +344,10 @@ EXPORT_SYMBOL_GPL(__tb_ring_enqueue);
*
* This function can be called when @start_poll callback of the @ring
* has been called. It will read one completed frame from the ring and
- * return it to the caller. Returns %NULL if there is no more completed
- * frames.
+ * return it to the caller.
+ *
+ * Return: Pointer to &struct ring_frame, %NULL if there is no more
+ * completed frames.
*/
struct ring_frame *tb_ring_poll(struct tb_ring *ring)
{
@@ -367,11 +430,27 @@ void tb_ring_poll_complete(struct tb_ring *ring)
}
EXPORT_SYMBOL_GPL(tb_ring_poll_complete);
+static void ring_clear_msix(const struct tb_ring *ring)
+{
+ int bit;
+
+ if (ring->nhi->quirks & QUIRK_AUTO_CLEAR_INT)
+ return;
+
+ bit = ring_interrupt_index(ring) & 31;
+ if (ring->is_tx)
+ iowrite32(BIT(bit), ring->nhi->iobase + REG_RING_INT_CLEAR);
+ else
+ iowrite32(BIT(bit), ring->nhi->iobase + REG_RING_INT_CLEAR +
+ 4 * (ring->nhi->hop_count / 32));
+}
+
static irqreturn_t ring_msix(int irq, void *data)
{
struct tb_ring *ring = data;
spin_lock(&ring->nhi->lock);
+ ring_clear_msix(ring);
spin_lock(&ring->lock);
__ring_interrupt(ring);
spin_unlock(&ring->lock);
@@ -389,18 +468,29 @@ static int ring_request_msix(struct tb_ring *ring, bool no_suspend)
if (!nhi->pdev->msix_enabled)
return 0;
- ret = ida_simple_get(&nhi->msix_ida, 0, MSIX_MAX_VECS, GFP_KERNEL);
+ ret = ida_alloc_max(&nhi->msix_ida, MSIX_MAX_VECS - 1, GFP_KERNEL);
if (ret < 0)
return ret;
ring->vector = ret;
- ring->irq = pci_irq_vector(ring->nhi->pdev, ring->vector);
- if (ring->irq < 0)
- return ring->irq;
+ ret = pci_irq_vector(ring->nhi->pdev, ring->vector);
+ if (ret < 0)
+ goto err_ida_remove;
+
+ ring->irq = ret;
irqflags = no_suspend ? IRQF_NO_SUSPEND : 0;
- return request_irq(ring->irq, ring_msix, irqflags, "thunderbolt", ring);
+ ret = request_irq(ring->irq, ring_msix, irqflags, "thunderbolt", ring);
+ if (ret)
+ goto err_ida_remove;
+
+ return 0;
+
+err_ida_remove:
+ ida_free(&nhi->msix_ida, ring->vector);
+
+ return ret;
}
static void ring_release_msix(struct tb_ring *ring)
@@ -409,15 +499,25 @@ static void ring_release_msix(struct tb_ring *ring)
return;
free_irq(ring->irq, ring);
- ida_simple_remove(&ring->nhi->msix_ida, ring->vector);
+ ida_free(&ring->nhi->msix_ida, ring->vector);
ring->vector = 0;
ring->irq = 0;
}
static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring)
{
+ unsigned int start_hop = RING_FIRST_USABLE_HOPID;
int ret = 0;
+ if (nhi->quirks & QUIRK_E2E) {
+ start_hop = RING_FIRST_USABLE_HOPID + 1;
+ if (ring->flags & RING_FLAG_E2E && !ring->is_tx) {
+ dev_dbg(&nhi->pdev->dev, "quirking E2E TX HopID %u -> %u\n",
+ ring->e2e_tx_hop, RING_E2E_RESERVED_HOPID);
+ ring->e2e_tx_hop = RING_E2E_RESERVED_HOPID;
+ }
+ }
+
spin_lock_irq(&nhi->lock);
if (ring->hop < 0) {
@@ -425,9 +525,9 @@ static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring)
/*
* Automatically allocate HopID from the non-reserved
- * range 8 .. hop_count - 1.
+ * range 1 .. hop_count - 1.
*/
- for (i = RING_FIRST_USABLE_HOPID; i < nhi->hop_count; i++) {
+ for (i = start_hop; i < nhi->hop_count; i++) {
if (ring->is_tx) {
if (!nhi->tx_rings[i]) {
ring->hop = i;
@@ -442,6 +542,11 @@ static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring)
}
}
+ if (ring->hop > 0 && ring->hop < start_hop) {
+ dev_warn(&nhi->pdev->dev, "invalid hop: %d\n", ring->hop);
+ ret = -EINVAL;
+ goto err_unlock;
+ }
if (ring->hop < 0 || ring->hop >= nhi->hop_count) {
dev_warn(&nhi->pdev->dev, "invalid hop: %d\n", ring->hop);
ret = -EINVAL;
@@ -452,7 +557,8 @@ static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring)
ring->hop);
ret = -EBUSY;
goto err_unlock;
- } else if (!ring->is_tx && nhi->rx_rings[ring->hop]) {
+ }
+ if (!ring->is_tx && nhi->rx_rings[ring->hop]) {
dev_warn(&nhi->pdev->dev, "RX hop %d already allocated\n",
ring->hop);
ret = -EBUSY;
@@ -472,7 +578,7 @@ err_unlock:
static struct tb_ring *tb_ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
bool transmit, unsigned int flags,
- u16 sof_mask, u16 eof_mask,
+ int e2e_tx_hop, u16 sof_mask, u16 eof_mask,
void (*start_poll)(void *),
void *poll_data)
{
@@ -481,10 +587,6 @@ static struct tb_ring *tb_ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
dev_dbg(&nhi->pdev->dev, "allocating %s ring %d of size %d\n",
transmit ? "TX" : "RX", hop, size);
- /* Tx Ring 2 is reserved for E2E workaround */
- if (transmit && hop == RING_E2E_UNUSED_HOPID)
- return NULL;
-
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring)
return NULL;
@@ -499,6 +601,7 @@ static struct tb_ring *tb_ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
ring->is_tx = transmit;
ring->size = size;
ring->flags = flags;
+ ring->e2e_tx_hop = e2e_tx_hop;
ring->sof_mask = sof_mask;
ring->eof_mask = eof_mask;
ring->head = 0;
@@ -539,11 +642,13 @@ err_free_ring:
* @hop: HopID (ring) to allocate
* @size: Number of entries in the ring
* @flags: Flags for the ring
+ *
+ * Return: Pointer to &struct tb_ring, %NULL otherwise.
*/
struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
unsigned int flags)
{
- return tb_ring_alloc(nhi, hop, size, true, flags, 0, 0, NULL, NULL);
+ return tb_ring_alloc(nhi, hop, size, true, flags, 0, 0, 0, NULL, NULL);
}
EXPORT_SYMBOL_GPL(tb_ring_alloc_tx);
@@ -553,24 +658,29 @@ EXPORT_SYMBOL_GPL(tb_ring_alloc_tx);
* @hop: HopID (ring) to allocate. Pass %-1 for automatic allocation.
* @size: Number of entries in the ring
* @flags: Flags for the ring
+ * @e2e_tx_hop: Transmit HopID when E2E is enabled in @flags
* @sof_mask: Mask of PDF values that start a frame
* @eof_mask: Mask of PDF values that end a frame
* @start_poll: If not %NULL the ring will call this function when an
* interrupt is triggered and masked, instead of callback
* in each Rx frame.
* @poll_data: Optional data passed to @start_poll
+ *
+ * Return: Pointer to &struct tb_ring, %NULL otherwise.
*/
struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
- unsigned int flags, u16 sof_mask, u16 eof_mask,
+ unsigned int flags, int e2e_tx_hop,
+ u16 sof_mask, u16 eof_mask,
void (*start_poll)(void *), void *poll_data)
{
- return tb_ring_alloc(nhi, hop, size, false, flags, sof_mask, eof_mask,
+ return tb_ring_alloc(nhi, hop, size, false, flags, e2e_tx_hop, sof_mask, eof_mask,
start_poll, poll_data);
}
EXPORT_SYMBOL_GPL(tb_ring_alloc_rx);
/**
* tb_ring_start() - enable a ring
+ * @ring: Ring to start
*
* Must not be invoked in parallel with tb_ring_stop().
*/
@@ -599,23 +709,10 @@ void tb_ring_start(struct tb_ring *ring)
flags = RING_FLAG_ENABLE | RING_FLAG_RAW;
}
- if (ring->flags & RING_FLAG_E2E && !ring->is_tx) {
- u32 hop;
-
- /*
- * In order not to lose Rx packets we enable end-to-end
- * workaround which transfers Rx credits to an unused Tx
- * HopID.
- */
- hop = RING_E2E_UNUSED_HOPID << REG_RX_OPTIONS_E2E_HOP_SHIFT;
- hop &= REG_RX_OPTIONS_E2E_HOP_MASK;
- flags |= hop | RING_FLAG_E2E_FLOW_CONTROL;
- }
-
ring_iowrite64desc(ring, ring->descriptors_dma, 0);
if (ring->is_tx) {
ring_iowrite32desc(ring, ring->size, 12);
- ring_iowrite32options(ring, 0, 4); /* time releated ? */
+ ring_iowrite32options(ring, 0, 4);
ring_iowrite32options(ring, flags, 0);
} else {
u32 sof_eof_mask = ring->sof_mask << 16 | ring->eof_mask;
@@ -624,6 +721,31 @@ void tb_ring_start(struct tb_ring *ring)
ring_iowrite32options(ring, sof_eof_mask, 4);
ring_iowrite32options(ring, flags, 0);
}
+
+ /*
+ * Now that the ring valid bit is set we can configure E2E if
+ * enabled for the ring.
+ */
+ if (ring->flags & RING_FLAG_E2E) {
+ if (!ring->is_tx) {
+ u32 hop;
+
+ hop = ring->e2e_tx_hop << REG_RX_OPTIONS_E2E_HOP_SHIFT;
+ hop &= REG_RX_OPTIONS_E2E_HOP_MASK;
+ flags |= hop;
+
+ dev_dbg(&ring->nhi->pdev->dev,
+ "enabling E2E for %s %d with TX HopID %d\n",
+ RING_TYPE(ring), ring->hop, ring->e2e_tx_hop);
+ } else {
+ dev_dbg(&ring->nhi->pdev->dev, "enabling E2E for %s %d\n",
+ RING_TYPE(ring), ring->hop);
+ }
+
+ flags |= RING_FLAG_E2E_FLOW_CONTROL;
+ ring_iowrite32options(ring, flags, 0);
+ }
+
ring_interrupt_active(ring, true);
ring->running = true;
err:
@@ -634,6 +756,7 @@ EXPORT_SYMBOL_GPL(tb_ring_start);
/**
* tb_ring_stop() - shutdown a ring
+ * @ring: Ring to stop
*
* Must not be invoked from a callback.
*
@@ -662,7 +785,7 @@ void tb_ring_stop(struct tb_ring *ring)
ring_iowrite32options(ring, 0, 0);
ring_iowrite64desc(ring, 0, 0);
- ring_iowrite16desc(ring, 0, ring->is_tx ? 10 : 8);
+ ring_iowrite32desc(ring, 0, 8);
ring_iowrite32desc(ring, 0, 12);
ring->head = 0;
ring->tail = 0;
@@ -721,7 +844,7 @@ void tb_ring_free(struct tb_ring *ring)
dev_dbg(&ring->nhi->pdev->dev, "freeing %s %d\n", RING_TYPE(ring),
ring->hop);
- /**
+ /*
* ring->work can no longer be scheduled (it is scheduled only
* by nhi_interrupt_work, ring_stop and ring_msix). Wait for it
* to finish before freeing the ring.
@@ -737,8 +860,9 @@ EXPORT_SYMBOL_GPL(tb_ring_free);
* @cmd: Command to send
* @data: Data to be send with the command
*
- * Sends mailbox command to the firmware running on NHI. Returns %0 in
- * case of success and negative errno in case of failure.
+ * Sends mailbox command to the firmware running on NHI.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data)
{
@@ -774,6 +898,8 @@ int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data)
*
* The function reads current firmware operation mode using NHI mailbox
* registers and returns it to the caller.
+ *
+ * Return: &enum nhi_fw_mode.
*/
enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi)
{
@@ -845,12 +971,68 @@ static irqreturn_t nhi_msi(int irq, void *data)
return IRQ_HANDLED;
}
+static int __nhi_suspend_noirq(struct device *dev, bool wakeup)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct tb *tb = pci_get_drvdata(pdev);
+ struct tb_nhi *nhi = tb->nhi;
+ int ret;
+
+ ret = tb_domain_suspend_noirq(tb);
+ if (ret)
+ return ret;
+
+ if (nhi->ops && nhi->ops->suspend_noirq) {
+ ret = nhi->ops->suspend_noirq(tb->nhi, wakeup);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int nhi_suspend_noirq(struct device *dev)
{
+ return __nhi_suspend_noirq(dev, device_may_wakeup(dev));
+}
+
+static int nhi_freeze_noirq(struct device *dev)
+{
struct pci_dev *pdev = to_pci_dev(dev);
struct tb *tb = pci_get_drvdata(pdev);
- return tb_domain_suspend_noirq(tb);
+ return tb_domain_freeze_noirq(tb);
+}
+
+static int nhi_thaw_noirq(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct tb *tb = pci_get_drvdata(pdev);
+
+ return tb_domain_thaw_noirq(tb);
+}
+
+static bool nhi_wake_supported(struct pci_dev *pdev)
+{
+ u8 val;
+
+ /*
+ * If power rails are sustainable for wakeup from S4 this
+ * property is set by the BIOS.
+ */
+ if (device_property_read_u8(&pdev->dev, "WAKE_SUPPORTED", &val))
+ return !!val;
+
+ return true;
+}
+
+static int nhi_poweroff_noirq(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ bool wakeup;
+
+ wakeup = device_may_wakeup(dev) && nhi_wake_supported(pdev);
+ return __nhi_suspend_noirq(dev, wakeup);
}
static void nhi_enable_int_throttling(struct tb_nhi *nhi)
@@ -873,16 +1055,24 @@ static int nhi_resume_noirq(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct tb *tb = pci_get_drvdata(pdev);
+ struct tb_nhi *nhi = tb->nhi;
+ int ret;
/*
* Check that the device is still there. It may be that the user
* unplugged last device which causes the host controller to go
* away on PCs.
*/
- if (!pci_device_is_present(pdev))
- tb->nhi->going_away = true;
- else
+ if (!pci_device_is_present(pdev)) {
+ nhi->going_away = true;
+ } else {
+ if (nhi->ops && nhi->ops->resume_noirq) {
+ ret = nhi->ops->resume_noirq(nhi);
+ if (ret)
+ return ret;
+ }
nhi_enable_int_throttling(tb->nhi);
+ }
return tb_domain_resume_noirq(tb);
}
@@ -915,16 +1105,35 @@ static int nhi_runtime_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct tb *tb = pci_get_drvdata(pdev);
+ struct tb_nhi *nhi = tb->nhi;
+ int ret;
+
+ ret = tb_domain_runtime_suspend(tb);
+ if (ret)
+ return ret;
- return tb_domain_runtime_suspend(tb);
+ if (nhi->ops && nhi->ops->runtime_suspend) {
+ ret = nhi->ops->runtime_suspend(tb->nhi);
+ if (ret)
+ return ret;
+ }
+ return 0;
}
static int nhi_runtime_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct tb *tb = pci_get_drvdata(pdev);
+ struct tb_nhi *nhi = tb->nhi;
+ int ret;
+
+ if (nhi->ops && nhi->ops->runtime_resume) {
+ ret = nhi->ops->runtime_resume(nhi);
+ if (ret)
+ return ret;
+ }
- nhi_enable_int_throttling(tb->nhi);
+ nhi_enable_int_throttling(nhi);
return tb_domain_runtime_resume(tb);
}
@@ -952,11 +1161,111 @@ static void nhi_shutdown(struct tb_nhi *nhi)
flush_work(&nhi->interrupt_work);
}
ida_destroy(&nhi->msix_ida);
+
+ if (nhi->ops && nhi->ops->shutdown)
+ nhi->ops->shutdown(nhi);
+}
+
+static void nhi_check_quirks(struct tb_nhi *nhi)
+{
+ if (nhi->pdev->vendor == PCI_VENDOR_ID_INTEL) {
+ /*
+ * Intel hardware supports auto clear of the interrupt
+ * status register right after interrupt is being
+ * issued.
+ */
+ nhi->quirks |= QUIRK_AUTO_CLEAR_INT;
+
+ switch (nhi->pdev->device) {
+ case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
+ case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
+ /*
+ * Falcon Ridge controller needs the end-to-end
+ * flow control workaround to avoid losing Rx
+ * packets when RING_FLAG_E2E is set.
+ */
+ nhi->quirks |= QUIRK_E2E;
+ break;
+ }
+ }
+}
+
+static int nhi_check_iommu_pdev(struct pci_dev *pdev, void *data)
+{
+ if (!pdev->external_facing ||
+ !device_iommu_capable(&pdev->dev, IOMMU_CAP_PRE_BOOT_PROTECTION))
+ return 0;
+ *(bool *)data = true;
+ return 1; /* Stop walking */
+}
+
+static void nhi_check_iommu(struct tb_nhi *nhi)
+{
+ struct pci_bus *bus = nhi->pdev->bus;
+ bool port_ok = false;
+
+ /*
+ * Ideally what we'd do here is grab every PCI device that
+ * represents a tunnelling adapter for this NHI and check their
+ * status directly, but unfortunately USB4 seems to make it
+ * obnoxiously difficult to reliably make any correlation.
+ *
+ * So for now we'll have to bodge it... Hoping that the system
+ * is at least sane enough that an adapter is in the same PCI
+ * segment as its NHI, if we can find *something* on that segment
+ * which meets the requirements for Kernel DMA Protection, we'll
+ * take that to imply that firmware is aware and has (hopefully)
+ * done the right thing in general. We need to know that the PCI
+ * layer has seen the ExternalFacingPort property which will then
+ * inform the IOMMU layer to enforce the complete "untrusted DMA"
+ * flow, but also that the IOMMU driver itself can be trusted not
+ * to have been subverted by a pre-boot DMA attack.
+ */
+ while (bus->parent)
+ bus = bus->parent;
+
+ pci_walk_bus(bus, nhi_check_iommu_pdev, &port_ok);
+
+ nhi->iommu_dma_protection = port_ok;
+ dev_dbg(&nhi->pdev->dev, "IOMMU DMA protection is %s\n",
+ str_enabled_disabled(port_ok));
+}
+
+static void nhi_reset(struct tb_nhi *nhi)
+{
+ ktime_t timeout;
+ u32 val;
+
+ val = ioread32(nhi->iobase + REG_CAPS);
+ /* Reset only v2 and later routers */
+ if (FIELD_GET(REG_CAPS_VERSION_MASK, val) < REG_CAPS_VERSION_2)
+ return;
+
+ if (!host_reset) {
+ dev_dbg(&nhi->pdev->dev, "skipping host router reset\n");
+ return;
+ }
+
+ iowrite32(REG_RESET_HRR, nhi->iobase + REG_RESET);
+ msleep(100);
+
+ timeout = ktime_add_ms(ktime_get(), 500);
+ do {
+ val = ioread32(nhi->iobase + REG_RESET);
+ if (!(val & REG_RESET_HRR)) {
+ dev_warn(&nhi->pdev->dev, "host router reset successful\n");
+ return;
+ }
+ usleep_range(10, 20);
+ } while (ktime_before(ktime_get(), timeout));
+
+ dev_warn(&nhi->pdev->dev, "timeout resetting host router\n");
}
static int nhi_init_msi(struct tb_nhi *nhi)
{
struct pci_dev *pdev = nhi->pdev;
+ struct device *dev = &pdev->dev;
int res, irq, nvec;
/* In case someone left them on. */
@@ -987,44 +1296,74 @@ static int nhi_init_msi(struct tb_nhi *nhi)
res = devm_request_irq(&pdev->dev, irq, nhi_msi,
IRQF_NO_SUSPEND, "thunderbolt", nhi);
- if (res) {
- dev_err(&pdev->dev, "request_irq failed, aborting\n");
- return res;
- }
+ if (res)
+ return dev_err_probe(dev, res, "request_irq failed, aborting\n");
}
return 0;
}
+static bool nhi_imr_valid(struct pci_dev *pdev)
+{
+ u8 val;
+
+ if (!device_property_read_u8(&pdev->dev, "IMR_VALID", &val))
+ return !!val;
+
+ return true;
+}
+
+static struct tb *nhi_select_cm(struct tb_nhi *nhi)
+{
+ struct tb *tb;
+
+ /*
+ * USB4 case is simple. If we got control of any of the
+ * capabilities, we use software CM.
+ */
+ if (tb_acpi_is_native())
+ return tb_probe(nhi);
+
+ /*
+ * Either firmware based CM is running (we did not get control
+ * from the firmware) or this is pre-USB4 PC so try first
+ * firmware CM and then fallback to software CM.
+ */
+ tb = icm_probe(nhi);
+ if (!tb)
+ tb = tb_probe(nhi);
+
+ return tb;
+}
+
static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
+ struct device *dev = &pdev->dev;
struct tb_nhi *nhi;
struct tb *tb;
int res;
- res = pcim_enable_device(pdev);
- if (res) {
- dev_err(&pdev->dev, "cannot enable PCI device, aborting\n");
- return res;
- }
+ if (!nhi_imr_valid(pdev))
+ return dev_err_probe(dev, -ENODEV, "firmware image not valid, aborting\n");
- res = pcim_iomap_regions(pdev, 1 << 0, "thunderbolt");
- if (res) {
- dev_err(&pdev->dev, "cannot obtain PCI resources, aborting\n");
- return res;
- }
+ res = pcim_enable_device(pdev);
+ if (res)
+ return dev_err_probe(dev, res, "cannot enable PCI device, aborting\n");
nhi = devm_kzalloc(&pdev->dev, sizeof(*nhi), GFP_KERNEL);
if (!nhi)
return -ENOMEM;
nhi->pdev = pdev;
- /* cannot fail - table is allocated bin pcim_iomap_regions */
- nhi->iobase = pcim_iomap_table(pdev)[0];
- nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff;
- if (nhi->hop_count != 12 && nhi->hop_count != 32)
- dev_warn(&pdev->dev, "unexpected hop count: %d\n",
- nhi->hop_count);
+ nhi->ops = (const struct tb_nhi_ops *)id->driver_data;
+
+ nhi->iobase = pcim_iomap_region(pdev, 0, "thunderbolt");
+ res = PTR_ERR_OR_ZERO(nhi->iobase);
+ if (res)
+ return dev_err_probe(dev, res, "cannot obtain PCI resources, aborting\n");
+
+ nhi->hop_count = ioread32(nhi->iobase + REG_CAPS) & 0x3ff;
+ dev_dbg(dev, "total paths: %d\n", nhi->hop_count);
nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
sizeof(*nhi->tx_rings), GFP_KERNEL);
@@ -1033,36 +1372,36 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!nhi->tx_rings || !nhi->rx_rings)
return -ENOMEM;
+ nhi_check_quirks(nhi);
+ nhi_check_iommu(nhi);
+ nhi_reset(nhi);
+
res = nhi_init_msi(nhi);
- if (res) {
- dev_err(&pdev->dev, "cannot enable MSI, aborting\n");
- return res;
- }
+ if (res)
+ return dev_err_probe(dev, res, "cannot enable MSI, aborting\n");
spin_lock_init(&nhi->lock);
res = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (res)
- res = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (res) {
- dev_err(&pdev->dev, "failed to set DMA mask\n");
- return res;
- }
+ return dev_err_probe(dev, res, "failed to set DMA mask\n");
pci_set_master(pdev);
- tb = icm_probe(nhi);
+ if (nhi->ops && nhi->ops->init) {
+ res = nhi->ops->init(nhi);
+ if (res)
+ return res;
+ }
+
+ tb = nhi_select_cm(nhi);
if (!tb)
- tb = tb_probe(nhi);
- if (!tb) {
- dev_err(&nhi->pdev->dev,
+ return dev_err_probe(dev, -ENODEV,
"failed to determine connection manager, aborting\n");
- return -ENODEV;
- }
- dev_dbg(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n");
+ dev_dbg(dev, "NHI initialized, starting thunderbolt\n");
- res = tb_domain_add(tb);
+ res = tb_domain_add(tb, host_reset);
if (res) {
/*
* At this point the RX/TX rings might already have been
@@ -1074,6 +1413,8 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
pci_set_drvdata(pdev, tb);
+ device_wakeup_enable(&pdev->dev);
+
pm_runtime_allow(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, TB_AUTOSUSPEND_DELAY);
pm_runtime_use_autosuspend(&pdev->dev);
@@ -1103,14 +1444,14 @@ static void nhi_remove(struct pci_dev *pdev)
static const struct dev_pm_ops nhi_pm_ops = {
.suspend_noirq = nhi_suspend_noirq,
.resume_noirq = nhi_resume_noirq,
- .freeze_noirq = nhi_suspend_noirq, /*
+ .freeze_noirq = nhi_freeze_noirq, /*
* we just disable hotplug, the
* pci-tunnels stay alive.
*/
- .thaw_noirq = nhi_resume_noirq,
+ .thaw_noirq = nhi_thaw_noirq,
.restore_noirq = nhi_resume_noirq,
.suspend = nhi_suspend,
- .freeze = nhi_suspend,
+ .poweroff_noirq = nhi_poweroff_noirq,
.poweroff = nhi_suspend,
.complete = nhi_complete,
.runtime_suspend = nhi_runtime_suspend,
@@ -1158,11 +1499,58 @@ static struct pci_device_id nhi_ids[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_USBONLY_NHI) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI0),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI1),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ /* Thunderbolt 4 */
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_NHI0),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_NHI1),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_H_NHI0),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_H_NHI1),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADL_NHI0),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADL_NHI1),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_RPL_NHI0),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_RPL_NHI1),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTL_M_NHI0),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTL_P_NHI0),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTL_P_NHI1),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_LNL_NHI0),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_LNL_NHI1),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_PTL_M_NHI0),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_PTL_M_NHI1),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_PTL_P_NHI0),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_PTL_P_NHI1),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_WCL_NHI0),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI) },
+
+ /* Any USB4 compliant host */
+ { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_USB4, ~0) },
{ 0,}
};
MODULE_DEVICE_TABLE(pci, nhi_ids);
+MODULE_DESCRIPTION("Thunderbolt/USB4 core driver");
MODULE_LICENSE("GPL");
static struct pci_driver nhi_driver = {
@@ -1170,6 +1558,7 @@ static struct pci_driver nhi_driver = {
.id_table = nhi_ids,
.probe = nhi_probe,
.remove = nhi_remove,
+ .shutdown = nhi_remove,
.driver.pm = &nhi_pm_ops,
};