summaryrefslogtreecommitdiff
path: root/drivers/pci
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/controller/Kconfig18
-rw-r--r--drivers/pci/controller/Makefile1
-rw-r--r--drivers/pci/controller/cadence/pci-j721e.c2
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-plat.c2
-rw-r--r--drivers/pci/controller/dwc/Kconfig18
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c22
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c19
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier.c26
-rw-r--r--drivers/pci/controller/dwc/pcie-visconti.c5
-rw-r--r--drivers/pci/controller/pci-aardvark.c491
-rw-r--r--drivers/pci/controller/pci-hyperv.c4
-rw-r--r--drivers/pci/controller/pci-thunder-ecam.c4
-rw-r--r--drivers/pci/controller/pci-xgene-msi.c2
-rw-r--r--drivers/pci/controller/pcie-apple.c824
-rw-r--r--drivers/pci/controller/pcie-brcmstb.c2
-rw-r--r--drivers/pci/controller/pcie-iproc.c2
-rw-r--r--drivers/pci/controller/vmd.c2
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-ntb.c22
-rw-r--r--drivers/pci/endpoint/pci-ep-cfs.c48
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c2
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c4
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c2
-rw-r--r--drivers/pci/hotplug/cpqphp.h2
-rw-r--r--drivers/pci/hotplug/cpqphp_ctrl.c4
-rw-r--r--drivers/pci/hotplug/cpqphp_pci.c6
-rw-r--r--drivers/pci/hotplug/ibmphp.h4
-rw-r--r--drivers/pci/hotplug/pciehp.h2
-rw-r--r--drivers/pci/hotplug/pciehp_core.c2
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c26
-rw-r--r--drivers/pci/hotplug/shpchp_hpc.c2
-rw-r--r--drivers/pci/iov.c38
-rw-r--r--drivers/pci/msi.c3
-rw-r--r--drivers/pci/of.c10
-rw-r--r--drivers/pci/p2pdma.c8
-rw-r--r--drivers/pci/pci-bridge-emul.c13
-rw-r--r--drivers/pci/pci-driver.c57
-rw-r--r--drivers/pci/pci-sysfs.c51
-rw-r--r--drivers/pci/pci.c65
-rw-r--r--drivers/pci/pci.h1
-rw-r--r--drivers/pci/pcie/Makefile4
-rw-r--r--drivers/pci/pcie/aer.c2
-rw-r--r--drivers/pci/pcie/aspm.c4
-rw-r--r--drivers/pci/pcie/err.c40
-rw-r--r--drivers/pci/pcie/portdrv.h6
-rw-r--r--drivers/pci/pcie/portdrv_core.c67
-rw-r--r--drivers/pci/pcie/portdrv_pci.c27
-rw-r--r--drivers/pci/probe.c60
-rw-r--r--drivers/pci/quirks.c70
-rw-r--r--drivers/pci/rom.c2
-rw-r--r--drivers/pci/setup-bus.c2
-rw-r--r--drivers/pci/setup-irq.c26
-rw-r--r--drivers/pci/switch/switchtec.c95
-rw-r--r--drivers/pci/vpd.c93
-rw-r--r--drivers/pci/xen-pcifront.c58
57 files changed, 1809 insertions, 567 deletions
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index 326f7d13024f..5661d4a84832 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -312,6 +312,24 @@ config PCIE_HISI_ERR
Say Y here if you want error handling support
for the PCIe controller's errors on HiSilicon HIP SoCs
+config PCIE_APPLE_MSI_DOORBELL_ADDR
+ hex
+ default 0xfffff000
+ depends on PCIE_APPLE
+
+config PCIE_APPLE
+ tristate "Apple PCIe controller"
+ depends on ARCH_APPLE || COMPILE_TEST
+ depends on OF
+ depends on PCI_MSI_IRQ_DOMAIN
+ select PCI_HOST_COMMON
+ help
+ Say Y here if you want to enable PCIe controller support on Apple
+ system-on-chips, like the Apple M1. This is required for the USB
+ type-A ports, Ethernet, Wi-Fi, and Bluetooth.
+
+ If unsure, say Y if you have an Apple Silicon system.
+
source "drivers/pci/controller/dwc/Kconfig"
source "drivers/pci/controller/mobiveil/Kconfig"
source "drivers/pci/controller/cadence/Kconfig"
diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile
index aaf30b3dcc14..f9d40bad932c 100644
--- a/drivers/pci/controller/Makefile
+++ b/drivers/pci/controller/Makefile
@@ -37,6 +37,7 @@ obj-$(CONFIG_VMD) += vmd.o
obj-$(CONFIG_PCIE_BRCMSTB) += pcie-brcmstb.o
obj-$(CONFIG_PCI_LOONGSON) += pci-loongson.o
obj-$(CONFIG_PCIE_HISI_ERR) += pcie-hisi-error.o
+obj-$(CONFIG_PCIE_APPLE) += pcie-apple.o
# pcie-hisi.o quirks are needed even without CONFIG_PCIE_DW
obj-y += dwc/
obj-y += mobiveil/
diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c
index ffb176d288cd..918e11082e6a 100644
--- a/drivers/pci/controller/cadence/pci-j721e.c
+++ b/drivers/pci/controller/cadence/pci-j721e.c
@@ -474,7 +474,7 @@ static int j721e_pcie_probe(struct platform_device *pdev)
ret = clk_prepare_enable(clk);
if (ret) {
dev_err(dev, "failed to enable pcie_refclk\n");
- goto err_get_sync;
+ goto err_pcie_setup;
}
pcie->refclk = clk;
diff --git a/drivers/pci/controller/cadence/pcie-cadence-plat.c b/drivers/pci/controller/cadence/pcie-cadence-plat.c
index 5fee0f89ab59..a224afadbcc0 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-plat.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-plat.c
@@ -127,6 +127,8 @@ static int cdns_plat_pcie_probe(struct platform_device *pdev)
goto err_init;
}
+ return 0;
+
err_init:
err_get_sync:
pm_runtime_put_sync(dev);
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index a07c08d714c9..bb9179ac7fa5 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -8,22 +8,20 @@ config PCIE_DW
config PCIE_DW_HOST
bool
- depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW
config PCIE_DW_EP
bool
- depends on PCI_ENDPOINT
select PCIE_DW
config PCI_DRA7XX
- bool
+ tristate
config PCI_DRA7XX_HOST
- bool "TI DRA7xx PCIe controller Host Mode"
+ tristate "TI DRA7xx PCIe controller Host Mode"
depends on SOC_DRA7XX || COMPILE_TEST
- depends on PCI_MSI_IRQ_DOMAIN
depends on OF && HAS_IOMEM && TI_PIPE3
+ depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW_HOST
select PCI_DRA7XX
default y if SOC_DRA7XX
@@ -36,10 +34,10 @@ config PCI_DRA7XX_HOST
This uses the DesignWare core.
config PCI_DRA7XX_EP
- bool "TI DRA7xx PCIe controller Endpoint Mode"
+ tristate "TI DRA7xx PCIe controller Endpoint Mode"
depends on SOC_DRA7XX || COMPILE_TEST
- depends on PCI_ENDPOINT
depends on OF && HAS_IOMEM && TI_PIPE3
+ depends on PCI_ENDPOINT
select PCIE_DW_EP
select PCI_DRA7XX
help
@@ -55,7 +53,7 @@ config PCIE_DW_PLAT
config PCIE_DW_PLAT_HOST
bool "Platform bus based DesignWare PCIe Controller - Host mode"
- depends on PCI && PCI_MSI_IRQ_DOMAIN
+ depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW_HOST
select PCIE_DW_PLAT
help
@@ -138,8 +136,8 @@ config PCI_LAYERSCAPE
bool "Freescale Layerscape PCIe controller - Host mode"
depends on OF && (ARM || ARCH_LAYERSCAPE || COMPILE_TEST)
depends on PCI_MSI_IRQ_DOMAIN
- select MFD_SYSCON
select PCIE_DW_HOST
+ select MFD_SYSCON
help
Say Y here if you want to enable PCIe controller support on Layerscape
SoCs to work in Host mode.
@@ -283,8 +281,8 @@ config PCIE_HISI_STB
config PCI_MESON
tristate "MESON PCIe controller"
- depends on PCI_MSI_IRQ_DOMAIN
default m if ARCH_MESON
+ depends on PCI_MSI_IRQ_DOMAIN
select PCIE_DW_HOST
help
Say Y here if you want to enable PCI controller support on Amlogic
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index fbbb78f6885e..a4221f6f3629 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -7,6 +7,7 @@
* Authors: Kishon Vijay Abraham I <kishon@ti.com>
*/
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -14,7 +15,7 @@
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
-#include <linux/init.h>
+#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/of_pci.h>
@@ -90,6 +91,7 @@ struct dra7xx_pcie {
int phy_count; /* DT phy-names count */
struct phy **phy;
struct irq_domain *irq_domain;
+ struct clk *clk;
enum dw_pcie_device_mode mode;
};
@@ -607,6 +609,7 @@ static const struct of_device_id of_dra7xx_pcie_match[] = {
},
{},
};
+MODULE_DEVICE_TABLE(of, of_dra7xx_pcie_match);
/*
* dra7xx_pcie_unaligned_memaccess: workaround for AM572x/AM571x Errata i870
@@ -740,6 +743,15 @@ static int dra7xx_pcie_probe(struct platform_device *pdev)
if (!link)
return -ENOMEM;
+ dra7xx->clk = devm_clk_get_optional(dev, NULL);
+ if (IS_ERR(dra7xx->clk))
+ return dev_err_probe(dev, PTR_ERR(dra7xx->clk),
+ "clock request failed");
+
+ ret = clk_prepare_enable(dra7xx->clk);
+ if (ret)
+ return ret;
+
for (i = 0; i < phy_count; i++) {
snprintf(name, sizeof(name), "pcie-phy%d", i);
phy[i] = devm_phy_get(dev, name);
@@ -925,6 +937,8 @@ static void dra7xx_pcie_shutdown(struct platform_device *pdev)
pm_runtime_disable(dev);
dra7xx_pcie_disable_phy(dra7xx);
+
+ clk_disable_unprepare(dra7xx->clk);
}
static const struct dev_pm_ops dra7xx_pcie_pm_ops = {
@@ -943,4 +957,8 @@ static struct platform_driver dra7xx_pcie_driver = {
},
.shutdown = dra7xx_pcie_shutdown,
};
-builtin_platform_driver(dra7xx_pcie_driver);
+module_platform_driver(dra7xx_pcie_driver);
+
+MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
+MODULE_DESCRIPTION("PCIe controller driver for TI DRA7xx SoCs");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 80fc98acf097..26f49f797b0f 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -1132,7 +1132,7 @@ static int imx6_pcie_probe(struct platform_device *pdev)
/* Limit link speed */
pci->link_gen = 1;
- ret = of_property_read_u32(node, "fsl,max-link-speed", &pci->link_gen);
+ of_property_read_u32(node, "fsl,max-link-speed", &pci->link_gen);
imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie");
if (IS_ERR(imx6_pcie->vpcie)) {
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index 998b698f4085..27e4735f577e 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -83,6 +83,7 @@ void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
for (func_no = 0; func_no < funcs; func_no++)
__dw_pcie_ep_reset_bar(pci, func_no, bar, 0);
}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_reset_bar);
static u8 __dw_pcie_ep_find_next_cap(struct dw_pcie_ep *ep, u8 func_no,
u8 cap_ptr, u8 cap)
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index d1d9b8344ec9..f4755f3a03be 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -335,6 +335,16 @@ int dw_pcie_host_init(struct pcie_port *pp)
if (pci->link_gen < 1)
pci->link_gen = of_pci_get_max_link_speed(np);
+ /* Set default bus ops */
+ bridge->ops = &dw_pcie_ops;
+ bridge->child_ops = &dw_child_pcie_ops;
+
+ if (pp->ops->host_init) {
+ ret = pp->ops->host_init(pp);
+ if (ret)
+ return ret;
+ }
+
if (pci_msi_enabled()) {
pp->has_msi_ctrl = !(pp->ops->msi_host_init ||
of_property_read_bool(np, "msi-parent") ||
@@ -388,15 +398,6 @@ int dw_pcie_host_init(struct pcie_port *pp)
}
}
- /* Set default bus ops */
- bridge->ops = &dw_pcie_ops;
- bridge->child_ops = &dw_child_pcie_ops;
-
- if (pp->ops->host_init) {
- ret = pp->ops->host_init(pp);
- if (ret)
- goto err_free_msi;
- }
dw_pcie_iatu_detect(pci);
dw_pcie_setup_rc(pp);
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index a945f0c0e73d..850b4533f4ef 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -538,6 +538,7 @@ int dw_pcie_link_up(struct dw_pcie *pci)
return ((val & PCIE_PORT_DEBUG1_LINK_UP) &&
(!(val & PCIE_PORT_DEBUG1_LINK_IN_TRAINING)));
}
+EXPORT_SYMBOL_GPL(dw_pcie_link_up);
void dw_pcie_upconfig_setup(struct dw_pcie *pci)
{
diff --git a/drivers/pci/controller/dwc/pcie-uniphier.c b/drivers/pci/controller/dwc/pcie-uniphier.c
index d842fd018129..d05be942956e 100644
--- a/drivers/pci/controller/dwc/pcie-uniphier.c
+++ b/drivers/pci/controller/dwc/pcie-uniphier.c
@@ -168,30 +168,21 @@ static void uniphier_pcie_irq_enable(struct uniphier_pcie_priv *priv)
writel(PCL_RCV_INTX_ALL_ENABLE, priv->base + PCL_RCV_INTX);
}
-static void uniphier_pcie_irq_ack(struct irq_data *d)
-{
- struct pcie_port *pp = irq_data_get_irq_chip_data(d);
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
- u32 val;
-
- val = readl(priv->base + PCL_RCV_INTX);
- val &= ~PCL_RCV_INTX_ALL_STATUS;
- val |= BIT(irqd_to_hwirq(d) + PCL_RCV_INTX_STATUS_SHIFT);
- writel(val, priv->base + PCL_RCV_INTX);
-}
-
static void uniphier_pcie_irq_mask(struct irq_data *d)
{
struct pcie_port *pp = irq_data_get_irq_chip_data(d);
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+ unsigned long flags;
u32 val;
+ raw_spin_lock_irqsave(&pp->lock, flags);
+
val = readl(priv->base + PCL_RCV_INTX);
- val &= ~PCL_RCV_INTX_ALL_MASK;
val |= BIT(irqd_to_hwirq(d) + PCL_RCV_INTX_MASK_SHIFT);
writel(val, priv->base + PCL_RCV_INTX);
+
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
}
static void uniphier_pcie_irq_unmask(struct irq_data *d)
@@ -199,17 +190,20 @@ static void uniphier_pcie_irq_unmask(struct irq_data *d)
struct pcie_port *pp = irq_data_get_irq_chip_data(d);
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+ unsigned long flags;
u32 val;
+ raw_spin_lock_irqsave(&pp->lock, flags);
+
val = readl(priv->base + PCL_RCV_INTX);
- val &= ~PCL_RCV_INTX_ALL_MASK;
val &= ~BIT(irqd_to_hwirq(d) + PCL_RCV_INTX_MASK_SHIFT);
writel(val, priv->base + PCL_RCV_INTX);
+
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
}
static struct irq_chip uniphier_pcie_irq_chip = {
.name = "PCI",
- .irq_ack = uniphier_pcie_irq_ack,
.irq_mask = uniphier_pcie_irq_mask,
.irq_unmask = uniphier_pcie_irq_unmask,
};
diff --git a/drivers/pci/controller/dwc/pcie-visconti.c b/drivers/pci/controller/dwc/pcie-visconti.c
index a88eab6829bb..50f80f07e4db 100644
--- a/drivers/pci/controller/dwc/pcie-visconti.c
+++ b/drivers/pci/controller/dwc/pcie-visconti.c
@@ -279,13 +279,10 @@ static int visconti_add_pcie_port(struct visconti_pcie *pcie,
{
struct dw_pcie *pci = &pcie->pci;
struct pcie_port *pp = &pci->pp;
- struct device *dev = &pdev->dev;
pp->irq = platform_get_irq_byname(pdev, "intr");
- if (pp->irq < 0) {
- dev_err(dev, "Interrupt intr is missing");
+ if (pp->irq < 0)
return pp->irq;
- }
pp->ops = &visconti_pcie_host_ops;
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index 596ebcfcc82d..c5300d49807a 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -31,10 +31,8 @@
/* PCIe core registers */
#define PCIE_CORE_DEV_ID_REG 0x0
#define PCIE_CORE_CMD_STATUS_REG 0x4
-#define PCIE_CORE_CMD_IO_ACCESS_EN BIT(0)
-#define PCIE_CORE_CMD_MEM_ACCESS_EN BIT(1)
-#define PCIE_CORE_CMD_MEM_IO_REQ_EN BIT(2)
#define PCIE_CORE_DEV_REV_REG 0x8
+#define PCIE_CORE_EXP_ROM_BAR_REG 0x30
#define PCIE_CORE_PCIEXP_CAP 0xc0
#define PCIE_CORE_ERR_CAPCTL_REG 0x118
#define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX BIT(5)
@@ -99,6 +97,7 @@
#define PCIE_CORE_CTRL2_MSI_ENABLE BIT(10)
#define PCIE_CORE_REF_CLK_REG (CONTROL_BASE_ADDR + 0x14)
#define PCIE_CORE_REF_CLK_TX_ENABLE BIT(1)
+#define PCIE_CORE_REF_CLK_RX_ENABLE BIT(2)
#define PCIE_MSG_LOG_REG (CONTROL_BASE_ADDR + 0x30)
#define PCIE_ISR0_REG (CONTROL_BASE_ADDR + 0x40)
#define PCIE_MSG_PM_PME_MASK BIT(7)
@@ -106,18 +105,19 @@
#define PCIE_ISR0_MSI_INT_PENDING BIT(24)
#define PCIE_ISR0_INTX_ASSERT(val) BIT(16 + (val))
#define PCIE_ISR0_INTX_DEASSERT(val) BIT(20 + (val))
-#define PCIE_ISR0_ALL_MASK GENMASK(26, 0)
+#define PCIE_ISR0_ALL_MASK GENMASK(31, 0)
#define PCIE_ISR1_REG (CONTROL_BASE_ADDR + 0x48)
#define PCIE_ISR1_MASK_REG (CONTROL_BASE_ADDR + 0x4C)
#define PCIE_ISR1_POWER_STATE_CHANGE BIT(4)
#define PCIE_ISR1_FLUSH BIT(5)
#define PCIE_ISR1_INTX_ASSERT(val) BIT(8 + (val))
-#define PCIE_ISR1_ALL_MASK GENMASK(11, 4)
+#define PCIE_ISR1_ALL_MASK GENMASK(31, 0)
#define PCIE_MSI_ADDR_LOW_REG (CONTROL_BASE_ADDR + 0x50)
#define PCIE_MSI_ADDR_HIGH_REG (CONTROL_BASE_ADDR + 0x54)
#define PCIE_MSI_STATUS_REG (CONTROL_BASE_ADDR + 0x58)
#define PCIE_MSI_MASK_REG (CONTROL_BASE_ADDR + 0x5C)
#define PCIE_MSI_PAYLOAD_REG (CONTROL_BASE_ADDR + 0x9C)
+#define PCIE_MSI_DATA_MASK GENMASK(15, 0)
/* PCIe window configuration */
#define OB_WIN_BASE_ADDR 0x4c00
@@ -164,8 +164,50 @@
#define CFG_REG (LMI_BASE_ADDR + 0x0)
#define LTSSM_SHIFT 24
#define LTSSM_MASK 0x3f
-#define LTSSM_L0 0x10
#define RC_BAR_CONFIG 0x300
+
+/* LTSSM values in CFG_REG */
+enum {
+ LTSSM_DETECT_QUIET = 0x0,
+ LTSSM_DETECT_ACTIVE = 0x1,
+ LTSSM_POLLING_ACTIVE = 0x2,
+ LTSSM_POLLING_COMPLIANCE = 0x3,
+ LTSSM_POLLING_CONFIGURATION = 0x4,
+ LTSSM_CONFIG_LINKWIDTH_START = 0x5,
+ LTSSM_CONFIG_LINKWIDTH_ACCEPT = 0x6,
+ LTSSM_CONFIG_LANENUM_ACCEPT = 0x7,
+ LTSSM_CONFIG_LANENUM_WAIT = 0x8,
+ LTSSM_CONFIG_COMPLETE = 0x9,
+ LTSSM_CONFIG_IDLE = 0xa,
+ LTSSM_RECOVERY_RCVR_LOCK = 0xb,
+ LTSSM_RECOVERY_SPEED = 0xc,
+ LTSSM_RECOVERY_RCVR_CFG = 0xd,
+ LTSSM_RECOVERY_IDLE = 0xe,
+ LTSSM_L0 = 0x10,
+ LTSSM_RX_L0S_ENTRY = 0x11,
+ LTSSM_RX_L0S_IDLE = 0x12,
+ LTSSM_RX_L0S_FTS = 0x13,
+ LTSSM_TX_L0S_ENTRY = 0x14,
+ LTSSM_TX_L0S_IDLE = 0x15,
+ LTSSM_TX_L0S_FTS = 0x16,
+ LTSSM_L1_ENTRY = 0x17,
+ LTSSM_L1_IDLE = 0x18,
+ LTSSM_L2_IDLE = 0x19,
+ LTSSM_L2_TRANSMIT_WAKE = 0x1a,
+ LTSSM_DISABLED = 0x20,
+ LTSSM_LOOPBACK_ENTRY_MASTER = 0x21,
+ LTSSM_LOOPBACK_ACTIVE_MASTER = 0x22,
+ LTSSM_LOOPBACK_EXIT_MASTER = 0x23,
+ LTSSM_LOOPBACK_ENTRY_SLAVE = 0x24,
+ LTSSM_LOOPBACK_ACTIVE_SLAVE = 0x25,
+ LTSSM_LOOPBACK_EXIT_SLAVE = 0x26,
+ LTSSM_HOT_RESET = 0x27,
+ LTSSM_RECOVERY_EQUALIZATION_PHASE0 = 0x28,
+ LTSSM_RECOVERY_EQUALIZATION_PHASE1 = 0x29,
+ LTSSM_RECOVERY_EQUALIZATION_PHASE2 = 0x2a,
+ LTSSM_RECOVERY_EQUALIZATION_PHASE3 = 0x2b,
+};
+
#define VENDOR_ID_REG (LMI_BASE_ADDR + 0x44)
/* PCIe core controller registers */
@@ -198,7 +240,7 @@
#define PCIE_IRQ_MSI_INT2_DET BIT(21)
#define PCIE_IRQ_RC_DBELL_DET BIT(22)
#define PCIE_IRQ_EP_STATUS BIT(23)
-#define PCIE_IRQ_ALL_MASK 0xfff0fb
+#define PCIE_IRQ_ALL_MASK GENMASK(31, 0)
#define PCIE_IRQ_ENABLE_INTS_MASK PCIE_IRQ_CORE_INT
/* Transaction types */
@@ -257,18 +299,49 @@ static inline u32 advk_readl(struct advk_pcie *pcie, u64 reg)
return readl(pcie->base + reg);
}
-static inline u16 advk_read16(struct advk_pcie *pcie, u64 reg)
+static u8 advk_pcie_ltssm_state(struct advk_pcie *pcie)
{
- return advk_readl(pcie, (reg & ~0x3)) >> ((reg & 0x3) * 8);
+ u32 val;
+ u8 ltssm_state;
+
+ val = advk_readl(pcie, CFG_REG);
+ ltssm_state = (val >> LTSSM_SHIFT) & LTSSM_MASK;
+ return ltssm_state;
}
-static int advk_pcie_link_up(struct advk_pcie *pcie)
+static inline bool advk_pcie_link_up(struct advk_pcie *pcie)
{
- u32 val, ltssm_state;
+ /* check if LTSSM is in normal operation - some L* state */
+ u8 ltssm_state = advk_pcie_ltssm_state(pcie);
+ return ltssm_state >= LTSSM_L0 && ltssm_state < LTSSM_DISABLED;
+}
- val = advk_readl(pcie, CFG_REG);
- ltssm_state = (val >> LTSSM_SHIFT) & LTSSM_MASK;
- return ltssm_state >= LTSSM_L0;
+static inline bool advk_pcie_link_active(struct advk_pcie *pcie)
+{
+ /*
+ * According to PCIe Base specification 3.0, Table 4-14: Link
+ * Status Mapped to the LTSSM, and 4.2.6.3.6 Configuration.Idle
+ * is Link Up mapped to LTSSM Configuration.Idle, Recovery, L0,
+ * L0s, L1 and L2 states. And according to 3.2.1. Data Link
+ * Control and Management State Machine Rules is DL Up status
+ * reported in DL Active state.
+ */
+ u8 ltssm_state = advk_pcie_ltssm_state(pcie);
+ return ltssm_state >= LTSSM_CONFIG_IDLE && ltssm_state < LTSSM_DISABLED;
+}
+
+static inline bool advk_pcie_link_training(struct advk_pcie *pcie)
+{
+ /*
+ * According to PCIe Base specification 3.0, Table 4-14: Link
+ * Status Mapped to the LTSSM is Link Training mapped to LTSSM
+ * Configuration and Recovery states.
+ */
+ u8 ltssm_state = advk_pcie_ltssm_state(pcie);
+ return ((ltssm_state >= LTSSM_CONFIG_LINKWIDTH_START &&
+ ltssm_state < LTSSM_L0) ||
+ (ltssm_state >= LTSSM_RECOVERY_EQUALIZATION_PHASE0 &&
+ ltssm_state <= LTSSM_RECOVERY_EQUALIZATION_PHASE3));
}
static int advk_pcie_wait_for_link(struct advk_pcie *pcie)
@@ -291,7 +364,7 @@ static void advk_pcie_wait_for_retrain(struct advk_pcie *pcie)
size_t retries;
for (retries = 0; retries < RETRAIN_WAIT_MAX_RETRIES; ++retries) {
- if (!advk_pcie_link_up(pcie))
+ if (advk_pcie_link_training(pcie))
break;
udelay(RETRAIN_WAIT_USLEEP_US);
}
@@ -299,23 +372,9 @@ static void advk_pcie_wait_for_retrain(struct advk_pcie *pcie)
static void advk_pcie_issue_perst(struct advk_pcie *pcie)
{
- u32 reg;
-
if (!pcie->reset_gpio)
return;
- /*
- * As required by PCI Express spec (PCI Express Base Specification, REV.
- * 4.0 PCI Express, February 19 2014, 6.6.1 Conventional Reset) a delay
- * for at least 100ms after de-asserting PERST# signal is needed before
- * link training is enabled. So ensure that link training is disabled
- * prior de-asserting PERST# signal to fulfill that PCI Express spec
- * requirement.
- */
- reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
- reg &= ~LINK_TRAINING_EN;
- advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
-
/* 10ms delay is needed for some cards */
dev_info(&pcie->pdev->dev, "issuing PERST via reset GPIO for 10ms\n");
gpiod_set_value_cansleep(pcie->reset_gpio, 1);
@@ -323,54 +382,47 @@ static void advk_pcie_issue_perst(struct advk_pcie *pcie)
gpiod_set_value_cansleep(pcie->reset_gpio, 0);
}
-static int advk_pcie_train_at_gen(struct advk_pcie *pcie, int gen)
+static void advk_pcie_train_link(struct advk_pcie *pcie)
{
- int ret, neg_gen;
+ struct device *dev = &pcie->pdev->dev;
u32 reg;
+ int ret;
- /* Setup link speed */
+ /*
+ * Setup PCIe rev / gen compliance based on device tree property
+ * 'max-link-speed' which also forces maximal link speed.
+ */
reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
reg &= ~PCIE_GEN_SEL_MSK;
- if (gen == 3)
+ if (pcie->link_gen == 3)
reg |= SPEED_GEN_3;
- else if (gen == 2)
+ else if (pcie->link_gen == 2)
reg |= SPEED_GEN_2;
else
reg |= SPEED_GEN_1;
advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
/*
- * Enable link training. This is not needed in every call to this
- * function, just once suffices, but it does not break anything either.
+ * Set maximal link speed value also into PCIe Link Control 2 register.
+ * Armada 3700 Functional Specification says that default value is based
+ * on SPEED_GEN but tests showed that default value is always 8.0 GT/s.
*/
+ reg = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL2);
+ reg &= ~PCI_EXP_LNKCTL2_TLS;
+ if (pcie->link_gen == 3)
+ reg |= PCI_EXP_LNKCTL2_TLS_8_0GT;
+ else if (pcie->link_gen == 2)
+ reg |= PCI_EXP_LNKCTL2_TLS_5_0GT;
+ else
+ reg |= PCI_EXP_LNKCTL2_TLS_2_5GT;
+ advk_writel(pcie, reg, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL2);
+
+ /* Enable link training after selecting PCIe generation */
reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
reg |= LINK_TRAINING_EN;
advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
/*
- * Start link training immediately after enabling it.
- * This solves problems for some buggy cards.
- */
- reg = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL);
- reg |= PCI_EXP_LNKCTL_RL;
- advk_writel(pcie, reg, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL);
-
- ret = advk_pcie_wait_for_link(pcie);
- if (ret)
- return ret;
-
- reg = advk_read16(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKSTA);
- neg_gen = reg & PCI_EXP_LNKSTA_CLS;
-
- return neg_gen;
-}
-
-static void advk_pcie_train_link(struct advk_pcie *pcie)
-{
- struct device *dev = &pcie->pdev->dev;
- int neg_gen = -1, gen;
-
- /*
* Reset PCIe card via PERST# signal. Some cards are not detected
* during link training when they are in some non-initial state.
*/
@@ -380,41 +432,18 @@ static void advk_pcie_train_link(struct advk_pcie *pcie)
* PERST# signal could have been asserted by pinctrl subsystem before
* probe() callback has been called or issued explicitly by reset gpio
* function advk_pcie_issue_perst(), making the endpoint going into
- * fundamental reset. As required by PCI Express spec a delay for at
- * least 100ms after such a reset before link training is needed.
+ * fundamental reset. As required by PCI Express spec (PCI Express
+ * Base Specification, REV. 4.0 PCI Express, February 19 2014, 6.6.1
+ * Conventional Reset) a delay for at least 100ms after such a reset
+ * before sending a Configuration Request to the device is needed.
+ * So wait until PCIe link is up. Function advk_pcie_wait_for_link()
+ * waits for link at least 900ms.
*/
- msleep(PCI_PM_D3COLD_WAIT);
-
- /*
- * Try link training at link gen specified by device tree property
- * 'max-link-speed'. If this fails, iteratively train at lower gen.
- */
- for (gen = pcie->link_gen; gen > 0; --gen) {
- neg_gen = advk_pcie_train_at_gen(pcie, gen);
- if (neg_gen > 0)
- break;
- }
-
- if (neg_gen < 0)
- goto err;
-
- /*
- * After successful training if negotiated gen is lower than requested,
- * train again on negotiated gen. This solves some stability issues for
- * some buggy gen1 cards.
- */
- if (neg_gen < gen) {
- gen = neg_gen;
- neg_gen = advk_pcie_train_at_gen(pcie, gen);
- }
-
- if (neg_gen == gen) {
- dev_info(dev, "link up at gen %i\n", gen);
- return;
- }
-
-err:
- dev_err(dev, "link never came up\n");
+ ret = advk_pcie_wait_for_link(pcie);
+ if (ret < 0)
+ dev_err(dev, "link never came up\n");
+ else
+ dev_info(dev, "link up\n");
}
/*
@@ -451,9 +480,15 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
u32 reg;
int i;
- /* Enable TX */
+ /*
+ * Configure PCIe Reference clock. Direction is from the PCIe
+ * controller to the endpoint card, so enable transmitting of
+ * Reference clock differential signal off-chip and disable
+ * receiving off-chip differential signal.
+ */
reg = advk_readl(pcie, PCIE_CORE_REF_CLK_REG);
reg |= PCIE_CORE_REF_CLK_TX_ENABLE;
+ reg &= ~PCIE_CORE_REF_CLK_RX_ENABLE;
advk_writel(pcie, reg, PCIE_CORE_REF_CLK_REG);
/* Set to Direct mode */
@@ -477,6 +512,31 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
reg = (PCI_VENDOR_ID_MARVELL << 16) | PCI_VENDOR_ID_MARVELL;
advk_writel(pcie, reg, VENDOR_ID_REG);
+ /*
+ * Change Class Code of PCI Bridge device to PCI Bridge (0x600400),
+ * because the default value is Mass storage controller (0x010400).
+ *
+ * Note that this Aardvark PCI Bridge does not have compliant Type 1
+ * Configuration Space and it even cannot be accessed via Aardvark's
+ * PCI config space access method. Something like config space is
+ * available in internal Aardvark registers starting at offset 0x0
+ * and is reported as Type 0. In range 0x10 - 0x34 it has totally
+ * different registers.
+ *
+ * Therefore driver uses emulation of PCI Bridge which emulates
+ * access to configuration space via internal Aardvark registers or
+ * emulated configuration buffer.
+ */
+ reg = advk_readl(pcie, PCIE_CORE_DEV_REV_REG);
+ reg &= ~0xffffff00;
+ reg |= (PCI_CLASS_BRIDGE_PCI << 8) << 8;
+ advk_writel(pcie, reg, PCIE_CORE_DEV_REV_REG);
+
+ /* Disable Root Bridge I/O space, memory space and bus mastering */
+ reg = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG);
+ reg &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
+ advk_writel(pcie, reg, PCIE_CORE_CMD_STATUS_REG);
+
/* Set Advanced Error Capabilities and Control PF0 register */
reg = PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX |
PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN |
@@ -488,8 +548,9 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
reg = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_DEVCTL);
reg &= ~PCI_EXP_DEVCTL_RELAX_EN;
reg &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
+ reg &= ~PCI_EXP_DEVCTL_PAYLOAD;
reg &= ~PCI_EXP_DEVCTL_READRQ;
- reg |= PCI_EXP_DEVCTL_PAYLOAD; /* Set max payload size */
+ reg |= PCI_EXP_DEVCTL_PAYLOAD_512B;
reg |= PCI_EXP_DEVCTL_READRQ_512B;
advk_writel(pcie, reg, PCIE_CORE_PCIEXP_CAP + PCI_EXP_DEVCTL);
@@ -574,19 +635,6 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
advk_pcie_disable_ob_win(pcie, i);
advk_pcie_train_link(pcie);
-
- /*
- * FIXME: The following register update is suspicious. This register is
- * applicable only when the PCI controller is configured for Endpoint
- * mode, not as a Root Complex. But apparently when this code is
- * removed, some cards stop working. This should be investigated and
- * a comment explaining this should be put here.
- */
- reg = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG);
- reg |= PCIE_CORE_CMD_MEM_ACCESS_EN |
- PCIE_CORE_CMD_IO_ACCESS_EN |
- PCIE_CORE_CMD_MEM_IO_REQ_EN;
- advk_writel(pcie, reg, PCIE_CORE_CMD_STATUS_REG);
}
static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u32 *val)
@@ -595,6 +643,7 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
u32 reg;
unsigned int status;
char *strcomp_status, *str_posted;
+ int ret;
reg = advk_readl(pcie, PIO_STAT);
status = (reg & PIO_COMPLETION_STATUS_MASK) >>
@@ -619,6 +668,7 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
case PIO_COMPLETION_STATUS_OK:
if (reg & PIO_ERR_STATUS) {
strcomp_status = "COMP_ERR";
+ ret = -EFAULT;
break;
}
/* Get the read result */
@@ -626,9 +676,11 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
*val = advk_readl(pcie, PIO_RD_DATA);
/* No error */
strcomp_status = NULL;
+ ret = 0;
break;
case PIO_COMPLETION_STATUS_UR:
strcomp_status = "UR";
+ ret = -EOPNOTSUPP;
break;
case PIO_COMPLETION_STATUS_CRS:
if (allow_crs && val) {
@@ -646,6 +698,7 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
*/
*val = CFG_RD_CRS_VAL;
strcomp_status = NULL;
+ ret = 0;
break;
}
/* PCIe r4.0, sec 2.3.2, says:
@@ -661,31 +714,34 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
* Request and taking appropriate action, e.g., complete the
* Request to the host as a failed transaction.
*
- * To simplify implementation do not re-issue the Configuration
- * Request and complete the Request as a failed transaction.
+ * So return -EAGAIN and caller (pci-aardvark.c driver) will
+ * re-issue request again up to the PIO_RETRY_CNT retries.
*/
strcomp_status = "CRS";
+ ret = -EAGAIN;
break;
case PIO_COMPLETION_STATUS_CA:
strcomp_status = "CA";
+ ret = -ECANCELED;
break;
default:
strcomp_status = "Unknown";
+ ret = -EINVAL;
break;
}
if (!strcomp_status)
- return 0;
+ return ret;
if (reg & PIO_NON_POSTED_REQ)
str_posted = "Non-posted";
else
str_posted = "Posted";
- dev_err(dev, "%s PIO Response Status: %s, %#x @ %#x\n",
+ dev_dbg(dev, "%s PIO Response Status: %s, %#x @ %#x\n",
str_posted, strcomp_status, reg, advk_readl(pcie, PIO_ADDR_LS));
- return -EFAULT;
+ return ret;
}
static int advk_pcie_wait_pio(struct advk_pcie *pcie)
@@ -693,13 +749,13 @@ static int advk_pcie_wait_pio(struct advk_pcie *pcie)
struct device *dev = &pcie->pdev->dev;
int i;
- for (i = 0; i < PIO_RETRY_CNT; i++) {
+ for (i = 1; i <= PIO_RETRY_CNT; i++) {
u32 start, isr;
start = advk_readl(pcie, PIO_START);
isr = advk_readl(pcie, PIO_ISR);
if (!start && isr)
- return 0;
+ return i;
udelay(PIO_RETRY_DELAY);
}
@@ -707,6 +763,72 @@ static int advk_pcie_wait_pio(struct advk_pcie *pcie)
return -ETIMEDOUT;
}
+static pci_bridge_emul_read_status_t
+advk_pci_bridge_emul_base_conf_read(struct pci_bridge_emul *bridge,
+ int reg, u32 *value)
+{
+ struct advk_pcie *pcie = bridge->data;
+
+ switch (reg) {
+ case PCI_COMMAND:
+ *value = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG);
+ return PCI_BRIDGE_EMUL_HANDLED;
+
+ case PCI_ROM_ADDRESS1:
+ *value = advk_readl(pcie, PCIE_CORE_EXP_ROM_BAR_REG);
+ return PCI_BRIDGE_EMUL_HANDLED;
+
+ case PCI_INTERRUPT_LINE: {
+ /*
+ * From the whole 32bit register we support reading from HW only
+ * one bit: PCI_BRIDGE_CTL_BUS_RESET.
+ * Other bits are retrieved only from emulated config buffer.
+ */
+ __le32 *cfgspace = (__le32 *)&bridge->conf;
+ u32 val = le32_to_cpu(cfgspace[PCI_INTERRUPT_LINE / 4]);
+ if (advk_readl(pcie, PCIE_CORE_CTRL1_REG) & HOT_RESET_GEN)
+ val |= PCI_BRIDGE_CTL_BUS_RESET << 16;
+ else
+ val &= ~(PCI_BRIDGE_CTL_BUS_RESET << 16);
+ *value = val;
+ return PCI_BRIDGE_EMUL_HANDLED;
+ }
+
+ default:
+ return PCI_BRIDGE_EMUL_NOT_HANDLED;
+ }
+}
+
+static void
+advk_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge,
+ int reg, u32 old, u32 new, u32 mask)
+{
+ struct advk_pcie *pcie = bridge->data;
+
+ switch (reg) {
+ case PCI_COMMAND:
+ advk_writel(pcie, new, PCIE_CORE_CMD_STATUS_REG);
+ break;
+
+ case PCI_ROM_ADDRESS1:
+ advk_writel(pcie, new, PCIE_CORE_EXP_ROM_BAR_REG);
+ break;
+
+ case PCI_INTERRUPT_LINE:
+ if (mask & (PCI_BRIDGE_CTL_BUS_RESET << 16)) {
+ u32 val = advk_readl(pcie, PCIE_CORE_CTRL1_REG);
+ if (new & (PCI_BRIDGE_CTL_BUS_RESET << 16))
+ val |= HOT_RESET_GEN;
+ else
+ val &= ~HOT_RESET_GEN;
+ advk_writel(pcie, val, PCIE_CORE_CTRL1_REG);
+ }
+ break;
+
+ default:
+ break;
+ }
+}
static pci_bridge_emul_read_status_t
advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
@@ -723,6 +845,7 @@ advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
case PCI_EXP_RTCTL: {
u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG);
*value = (val & PCIE_MSG_PM_PME_MASK) ? 0 : PCI_EXP_RTCTL_PMEIE;
+ *value |= le16_to_cpu(bridge->pcie_conf.rootctl) & PCI_EXP_RTCTL_CRSSVE;
*value |= PCI_EXP_RTCAP_CRSVIS << 16;
return PCI_BRIDGE_EMUL_HANDLED;
}
@@ -734,12 +857,26 @@ advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
return PCI_BRIDGE_EMUL_HANDLED;
}
+ case PCI_EXP_LNKCAP: {
+ u32 val = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg);
+ /*
+ * PCI_EXP_LNKCAP_DLLLARC bit is hardwired in aardvark HW to 0.
+ * But support for PCI_EXP_LNKSTA_DLLLA is emulated via ltssm
+ * state so explicitly enable PCI_EXP_LNKCAP_DLLLARC flag.
+ */
+ val |= PCI_EXP_LNKCAP_DLLLARC;
+ *value = val;
+ return PCI_BRIDGE_EMUL_HANDLED;
+ }
+
case PCI_EXP_LNKCTL: {
/* u32 contains both PCI_EXP_LNKCTL and PCI_EXP_LNKSTA */
u32 val = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg) &
~(PCI_EXP_LNKSTA_LT << 16);
- if (!advk_pcie_link_up(pcie))
+ if (advk_pcie_link_training(pcie))
val |= (PCI_EXP_LNKSTA_LT << 16);
+ if (advk_pcie_link_active(pcie))
+ val |= (PCI_EXP_LNKSTA_DLLLA << 16);
*value = val;
return PCI_BRIDGE_EMUL_HANDLED;
}
@@ -747,7 +884,6 @@ advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
case PCI_CAP_LIST_ID:
case PCI_EXP_DEVCAP:
case PCI_EXP_DEVCTL:
- case PCI_EXP_LNKCAP:
*value = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg);
return PCI_BRIDGE_EMUL_HANDLED;
default:
@@ -794,6 +930,8 @@ advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
}
static struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = {
+ .read_base = advk_pci_bridge_emul_base_conf_read,
+ .write_base = advk_pci_bridge_emul_base_conf_write,
.read_pcie = advk_pci_bridge_emul_pcie_conf_read,
.write_pcie = advk_pci_bridge_emul_pcie_conf_write,
};
@@ -805,7 +943,6 @@ static struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = {
static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
{
struct pci_bridge_emul *bridge = &pcie->bridge;
- int ret;
bridge->conf.vendor =
cpu_to_le16(advk_readl(pcie, PCIE_CORE_DEV_ID_REG) & 0xffff);
@@ -825,19 +962,14 @@ static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
/* Support interrupt A for MSI feature */
bridge->conf.intpin = PCIE_CORE_INT_A_ASSERT_ENABLE;
+ /* Indicates supports for Completion Retry Status */
+ bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS);
+
bridge->has_pcie = true;
bridge->data = pcie;
bridge->ops = &advk_pci_bridge_emul_ops;
- /* PCIe config space can be initialized after pci_bridge_emul_init() */
- ret = pci_bridge_emul_init(bridge, 0);
- if (ret < 0)
- return ret;
-
- /* Indicates supports for Completion Retry Status */
- bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS);
-
- return 0;
+ return pci_bridge_emul_init(bridge, 0);
}
static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
@@ -889,6 +1021,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
int where, int size, u32 *val)
{
struct advk_pcie *pcie = bus->sysdata;
+ int retry_count;
bool allow_crs;
u32 reg;
int ret;
@@ -911,18 +1044,8 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
(le16_to_cpu(pcie->bridge.pcie_conf.rootctl) &
PCI_EXP_RTCTL_CRSSVE);
- if (advk_pcie_pio_is_running(pcie)) {
- /*
- * If it is possible return Completion Retry Status so caller
- * tries to issue the request again instead of failing.
- */
- if (allow_crs) {
- *val = CFG_RD_CRS_VAL;
- return PCIBIOS_SUCCESSFUL;
- }
- *val = 0xffffffff;
- return PCIBIOS_SET_FAILED;
- }
+ if (advk_pcie_pio_is_running(pcie))
+ goto try_crs;
/* Program the control register */
reg = advk_readl(pcie, PIO_CTRL);
@@ -941,30 +1064,24 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
/* Program the data strobe */
advk_writel(pcie, 0xf, PIO_WR_DATA_STRB);
- /* Clear PIO DONE ISR and start the transfer */
- advk_writel(pcie, 1, PIO_ISR);
- advk_writel(pcie, 1, PIO_START);
+ retry_count = 0;
+ do {
+ /* Clear PIO DONE ISR and start the transfer */
+ advk_writel(pcie, 1, PIO_ISR);
+ advk_writel(pcie, 1, PIO_START);
- ret = advk_pcie_wait_pio(pcie);
- if (ret < 0) {
- /*
- * If it is possible return Completion Retry Status so caller
- * tries to issue the request again instead of failing.
- */
- if (allow_crs) {
- *val = CFG_RD_CRS_VAL;
- return PCIBIOS_SUCCESSFUL;
- }
- *val = 0xffffffff;
- return PCIBIOS_SET_FAILED;
- }
+ ret = advk_pcie_wait_pio(pcie);
+ if (ret < 0)
+ goto try_crs;
- /* Check PIO status and get the read result */
- ret = advk_pcie_check_pio_status(pcie, allow_crs, val);
- if (ret < 0) {
- *val = 0xffffffff;
- return PCIBIOS_SET_FAILED;
- }
+ retry_count += ret;
+
+ /* Check PIO status and get the read result */
+ ret = advk_pcie_check_pio_status(pcie, allow_crs, val);
+ } while (ret == -EAGAIN && retry_count < PIO_RETRY_CNT);
+
+ if (ret < 0)
+ goto fail;
if (size == 1)
*val = (*val >> (8 * (where & 3))) & 0xff;
@@ -972,6 +1089,20 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
*val = (*val >> (8 * (where & 3))) & 0xffff;
return PCIBIOS_SUCCESSFUL;
+
+try_crs:
+ /*
+ * If it is possible, return Completion Retry Status so that caller
+ * tries to issue the request again instead of failing.
+ */
+ if (allow_crs) {
+ *val = CFG_RD_CRS_VAL;
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+fail:
+ *val = 0xffffffff;
+ return PCIBIOS_SET_FAILED;
}
static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
@@ -980,6 +1111,7 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
struct advk_pcie *pcie = bus->sysdata;
u32 reg;
u32 data_strobe = 0x0;
+ int retry_count;
int offset;
int ret;
@@ -1021,19 +1153,22 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
/* Program the data strobe */
advk_writel(pcie, data_strobe, PIO_WR_DATA_STRB);
- /* Clear PIO DONE ISR and start the transfer */
- advk_writel(pcie, 1, PIO_ISR);
- advk_writel(pcie, 1, PIO_START);
+ retry_count = 0;
+ do {
+ /* Clear PIO DONE ISR and start the transfer */
+ advk_writel(pcie, 1, PIO_ISR);
+ advk_writel(pcie, 1, PIO_START);
- ret = advk_pcie_wait_pio(pcie);
- if (ret < 0)
- return PCIBIOS_SET_FAILED;
+ ret = advk_pcie_wait_pio(pcie);
+ if (ret < 0)
+ return PCIBIOS_SET_FAILED;
- ret = advk_pcie_check_pio_status(pcie, false, NULL);
- if (ret < 0)
- return PCIBIOS_SET_FAILED;
+ retry_count += ret;
- return PCIBIOS_SUCCESSFUL;
+ ret = advk_pcie_check_pio_status(pcie, false, NULL);
+ } while (ret == -EAGAIN && retry_count < PIO_RETRY_CNT);
+
+ return ret < 0 ? PCIBIOS_SET_FAILED : PCIBIOS_SUCCESSFUL;
}
static struct pci_ops advk_pcie_ops = {
@@ -1082,7 +1217,7 @@ static int advk_msi_irq_domain_alloc(struct irq_domain *domain,
domain->host_data, handle_simple_irq,
NULL, NULL);
- return hwirq;
+ return 0;
}
static void advk_msi_irq_domain_free(struct irq_domain *domain,
@@ -1263,8 +1398,12 @@ static void advk_pcie_handle_msi(struct advk_pcie *pcie)
if (!(BIT(msi_idx) & msi_status))
continue;
+ /*
+ * msi_idx contains bits [4:0] of the msi_data and msi_data
+ * contains 16bit MSI interrupt number
+ */
advk_writel(pcie, BIT(msi_idx), PCIE_MSI_STATUS_REG);
- msi_data = advk_readl(pcie, PCIE_MSI_PAYLOAD_REG) & 0xFF;
+ msi_data = advk_readl(pcie, PCIE_MSI_PAYLOAD_REG) & PCIE_MSI_DATA_MASK;
generic_handle_irq(msi_data);
}
@@ -1286,12 +1425,6 @@ static void advk_pcie_handle_int(struct advk_pcie *pcie)
isr1_mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
isr1_status = isr1_val & ((~isr1_mask) & PCIE_ISR1_ALL_MASK);
- if (!isr0_status && !isr1_status) {
- advk_writel(pcie, isr0_val, PCIE_ISR0_REG);
- advk_writel(pcie, isr1_val, PCIE_ISR1_REG);
- return;
- }
-
/* Process MSI interrupts */
if (isr0_status & PCIE_ISR0_MSI_INT_PENDING)
advk_pcie_handle_msi(pcie);
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index eaec915ffe62..8459f857ad9e 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -3126,14 +3126,14 @@ static int hv_pci_probe(struct hv_device *hdev,
if (dom == HVPCI_DOM_INVALID) {
dev_err(&hdev->device,
- "Unable to use dom# 0x%hx or other numbers", dom_req);
+ "Unable to use dom# 0x%x or other numbers", dom_req);
ret = -EINVAL;
goto free_bus;
}
if (dom != dom_req)
dev_info(&hdev->device,
- "PCI dom# 0x%hx has collision, using 0x%hx",
+ "PCI dom# 0x%x has collision, using 0x%x",
dom_req, dom);
hbus->bridge->domain_nr = dom;
diff --git a/drivers/pci/controller/pci-thunder-ecam.c b/drivers/pci/controller/pci-thunder-ecam.c
index ffd84656544f..e9d5ca245f5e 100644
--- a/drivers/pci/controller/pci-thunder-ecam.c
+++ b/drivers/pci/controller/pci-thunder-ecam.c
@@ -17,7 +17,7 @@ static void set_val(u32 v, int where, int size, u32 *val)
{
int shift = (where & 3) * 8;
- pr_debug("set_val %04x: %08x\n", (unsigned)(where & ~3), v);
+ pr_debug("set_val %04x: %08x\n", (unsigned int)(where & ~3), v);
v >>= shift;
if (size == 1)
v &= 0xff;
@@ -187,7 +187,7 @@ static int thunder_ecam_config_read(struct pci_bus *bus, unsigned int devfn,
pr_debug("%04x:%04x - Fix pass#: %08x, where: %03x, devfn: %03x\n",
vendor_device & 0xffff, vendor_device >> 16, class_rev,
- (unsigned) where, devfn);
+ (unsigned int)where, devfn);
/* Check for non type-00 header */
if (cfg_type == 0) {
diff --git a/drivers/pci/controller/pci-xgene-msi.c b/drivers/pci/controller/pci-xgene-msi.c
index b7a8e062fcc5..c50ff279903c 100644
--- a/drivers/pci/controller/pci-xgene-msi.c
+++ b/drivers/pci/controller/pci-xgene-msi.c
@@ -302,7 +302,7 @@ static void xgene_msi_isr(struct irq_desc *desc)
/*
* MSIINTn (n is 0..F) indicates if there is a pending MSI interrupt
- * If bit x of this register is set (x is 0..7), one or more interupts
+ * If bit x of this register is set (x is 0..7), one or more interrupts
* corresponding to MSInIRx is set.
*/
grp_select = xgene_msi_int_read(xgene_msi, msi_grp);
diff --git a/drivers/pci/controller/pcie-apple.c b/drivers/pci/controller/pcie-apple.c
new file mode 100644
index 000000000000..1bf4d75b61be
--- /dev/null
+++ b/drivers/pci/controller/pcie-apple.c
@@ -0,0 +1,824 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host bridge driver for Apple system-on-chips.
+ *
+ * The HW is ECAM compliant, so once the controller is initialized,
+ * the driver mostly deals MSI mapping and handling of per-port
+ * interrupts (INTx, management and error signals).
+ *
+ * Initialization requires enabling power and clocks, along with a
+ * number of register pokes.
+ *
+ * Copyright (C) 2021 Alyssa Rosenzweig <alyssa@rosenzweig.io>
+ * Copyright (C) 2021 Google LLC
+ * Copyright (C) 2021 Corellium LLC
+ * Copyright (C) 2021 Mark Kettenis <kettenis@openbsd.org>
+ *
+ * Author: Alyssa Rosenzweig <alyssa@rosenzweig.io>
+ * Author: Marc Zyngier <maz@kernel.org>
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/kernel.h>
+#include <linux/iopoll.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/notifier.h>
+#include <linux/of_irq.h>
+#include <linux/pci-ecam.h>
+
+#define CORE_RC_PHYIF_CTL 0x00024
+#define CORE_RC_PHYIF_CTL_RUN BIT(0)
+#define CORE_RC_PHYIF_STAT 0x00028
+#define CORE_RC_PHYIF_STAT_REFCLK BIT(4)
+#define CORE_RC_CTL 0x00050
+#define CORE_RC_CTL_RUN BIT(0)
+#define CORE_RC_STAT 0x00058
+#define CORE_RC_STAT_READY BIT(0)
+#define CORE_FABRIC_STAT 0x04000
+#define CORE_FABRIC_STAT_MASK 0x001F001F
+#define CORE_LANE_CFG(port) (0x84000 + 0x4000 * (port))
+#define CORE_LANE_CFG_REFCLK0REQ BIT(0)
+#define CORE_LANE_CFG_REFCLK1 BIT(1)
+#define CORE_LANE_CFG_REFCLK0ACK BIT(2)
+#define CORE_LANE_CFG_REFCLKEN (BIT(9) | BIT(10))
+#define CORE_LANE_CTL(port) (0x84004 + 0x4000 * (port))
+#define CORE_LANE_CTL_CFGACC BIT(15)
+
+#define PORT_LTSSMCTL 0x00080
+#define PORT_LTSSMCTL_START BIT(0)
+#define PORT_INTSTAT 0x00100
+#define PORT_INT_TUNNEL_ERR 31
+#define PORT_INT_CPL_TIMEOUT 23
+#define PORT_INT_RID2SID_MAPERR 22
+#define PORT_INT_CPL_ABORT 21
+#define PORT_INT_MSI_BAD_DATA 19
+#define PORT_INT_MSI_ERR 18
+#define PORT_INT_REQADDR_GT32 17
+#define PORT_INT_AF_TIMEOUT 15
+#define PORT_INT_LINK_DOWN 14
+#define PORT_INT_LINK_UP 12
+#define PORT_INT_LINK_BWMGMT 11
+#define PORT_INT_AER_MASK (15 << 4)
+#define PORT_INT_PORT_ERR 4
+#define PORT_INT_INTx(i) i
+#define PORT_INT_INTx_MASK 15
+#define PORT_INTMSK 0x00104
+#define PORT_INTMSKSET 0x00108
+#define PORT_INTMSKCLR 0x0010c
+#define PORT_MSICFG 0x00124
+#define PORT_MSICFG_EN BIT(0)
+#define PORT_MSICFG_L2MSINUM_SHIFT 4
+#define PORT_MSIBASE 0x00128
+#define PORT_MSIBASE_1_SHIFT 16
+#define PORT_MSIADDR 0x00168
+#define PORT_LINKSTS 0x00208
+#define PORT_LINKSTS_UP BIT(0)
+#define PORT_LINKSTS_BUSY BIT(2)
+#define PORT_LINKCMDSTS 0x00210
+#define PORT_OUTS_NPREQS 0x00284
+#define PORT_OUTS_NPREQS_REQ BIT(24)
+#define PORT_OUTS_NPREQS_CPL BIT(16)
+#define PORT_RXWR_FIFO 0x00288
+#define PORT_RXWR_FIFO_HDR GENMASK(15, 10)
+#define PORT_RXWR_FIFO_DATA GENMASK(9, 0)
+#define PORT_RXRD_FIFO 0x0028C
+#define PORT_RXRD_FIFO_REQ GENMASK(6, 0)
+#define PORT_OUTS_CPLS 0x00290
+#define PORT_OUTS_CPLS_SHRD GENMASK(14, 8)
+#define PORT_OUTS_CPLS_WAIT GENMASK(6, 0)
+#define PORT_APPCLK 0x00800
+#define PORT_APPCLK_EN BIT(0)
+#define PORT_APPCLK_CGDIS BIT(8)
+#define PORT_STATUS 0x00804
+#define PORT_STATUS_READY BIT(0)
+#define PORT_REFCLK 0x00810
+#define PORT_REFCLK_EN BIT(0)
+#define PORT_REFCLK_CGDIS BIT(8)
+#define PORT_PERST 0x00814
+#define PORT_PERST_OFF BIT(0)
+#define PORT_RID2SID(i16) (0x00828 + 4 * (i16))
+#define PORT_RID2SID_VALID BIT(31)
+#define PORT_RID2SID_SID_SHIFT 16
+#define PORT_RID2SID_BUS_SHIFT 8
+#define PORT_RID2SID_DEV_SHIFT 3
+#define PORT_RID2SID_FUNC_SHIFT 0
+#define PORT_OUTS_PREQS_HDR 0x00980
+#define PORT_OUTS_PREQS_HDR_MASK GENMASK(9, 0)
+#define PORT_OUTS_PREQS_DATA 0x00984
+#define PORT_OUTS_PREQS_DATA_MASK GENMASK(15, 0)
+#define PORT_TUNCTRL 0x00988
+#define PORT_TUNCTRL_PERST_ON BIT(0)
+#define PORT_TUNCTRL_PERST_ACK_REQ BIT(1)
+#define PORT_TUNSTAT 0x0098c
+#define PORT_TUNSTAT_PERST_ON BIT(0)
+#define PORT_TUNSTAT_PERST_ACK_PEND BIT(1)
+#define PORT_PREFMEM_ENABLE 0x00994
+
+#define MAX_RID2SID 64
+
+/*
+ * The doorbell address is set to 0xfffff000, which by convention
+ * matches what MacOS does, and it is possible to use any other
+ * address (in the bottom 4GB, as the base register is only 32bit).
+ * However, it has to be excluded from the IOVA range, and the DART
+ * driver has to know about it.
+ */
+#define DOORBELL_ADDR CONFIG_PCIE_APPLE_MSI_DOORBELL_ADDR
+
+struct apple_pcie {
+ struct mutex lock;
+ struct device *dev;
+ void __iomem *base;
+ struct irq_domain *domain;
+ unsigned long *bitmap;
+ struct list_head ports;
+ struct completion event;
+ struct irq_fwspec fwspec;
+ u32 nvecs;
+};
+
+struct apple_pcie_port {
+ struct apple_pcie *pcie;
+ struct device_node *np;
+ void __iomem *base;
+ struct irq_domain *domain;
+ struct list_head entry;
+ DECLARE_BITMAP(sid_map, MAX_RID2SID);
+ int sid_map_sz;
+ int idx;
+};
+
+static void rmw_set(u32 set, void __iomem *addr)
+{
+ writel_relaxed(readl_relaxed(addr) | set, addr);
+}
+
+static void rmw_clear(u32 clr, void __iomem *addr)
+{
+ writel_relaxed(readl_relaxed(addr) & ~clr, addr);
+}
+
+static void apple_msi_top_irq_mask(struct irq_data *d)
+{
+ pci_msi_mask_irq(d);
+ irq_chip_mask_parent(d);
+}
+
+static void apple_msi_top_irq_unmask(struct irq_data *d)
+{
+ pci_msi_unmask_irq(d);
+ irq_chip_unmask_parent(d);
+}
+
+static struct irq_chip apple_msi_top_chip = {
+ .name = "PCIe MSI",
+ .irq_mask = apple_msi_top_irq_mask,
+ .irq_unmask = apple_msi_top_irq_unmask,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .irq_set_type = irq_chip_set_type_parent,
+};
+
+static void apple_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ msg->address_hi = upper_32_bits(DOORBELL_ADDR);
+ msg->address_lo = lower_32_bits(DOORBELL_ADDR);
+ msg->data = data->hwirq;
+}
+
+static struct irq_chip apple_msi_bottom_chip = {
+ .name = "MSI",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .irq_set_type = irq_chip_set_type_parent,
+ .irq_compose_msi_msg = apple_msi_compose_msg,
+};
+
+static int apple_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct apple_pcie *pcie = domain->host_data;
+ struct irq_fwspec fwspec = pcie->fwspec;
+ unsigned int i;
+ int ret, hwirq;
+
+ mutex_lock(&pcie->lock);
+
+ hwirq = bitmap_find_free_region(pcie->bitmap, pcie->nvecs,
+ order_base_2(nr_irqs));
+
+ mutex_unlock(&pcie->lock);
+
+ if (hwirq < 0)
+ return -ENOSPC;
+
+ fwspec.param[1] += hwirq;
+
+ ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &fwspec);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nr_irqs; i++) {
+ irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
+ &apple_msi_bottom_chip,
+ domain->host_data);
+ }
+
+ return 0;
+}
+
+static void apple_msi_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct apple_pcie *pcie = domain->host_data;
+
+ mutex_lock(&pcie->lock);
+
+ bitmap_release_region(pcie->bitmap, d->hwirq, order_base_2(nr_irqs));
+
+ mutex_unlock(&pcie->lock);
+}
+
+static const struct irq_domain_ops apple_msi_domain_ops = {
+ .alloc = apple_msi_domain_alloc,
+ .free = apple_msi_domain_free,
+};
+
+static struct msi_domain_info apple_msi_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
+ .chip = &apple_msi_top_chip,
+};
+
+static void apple_port_irq_mask(struct irq_data *data)
+{
+ struct apple_pcie_port *port = irq_data_get_irq_chip_data(data);
+
+ writel_relaxed(BIT(data->hwirq), port->base + PORT_INTMSKSET);
+}
+
+static void apple_port_irq_unmask(struct irq_data *data)
+{
+ struct apple_pcie_port *port = irq_data_get_irq_chip_data(data);
+
+ writel_relaxed(BIT(data->hwirq), port->base + PORT_INTMSKCLR);
+}
+
+static bool hwirq_is_intx(unsigned int hwirq)
+{
+ return BIT(hwirq) & PORT_INT_INTx_MASK;
+}
+
+static void apple_port_irq_ack(struct irq_data *data)
+{
+ struct apple_pcie_port *port = irq_data_get_irq_chip_data(data);
+
+ if (!hwirq_is_intx(data->hwirq))
+ writel_relaxed(BIT(data->hwirq), port->base + PORT_INTSTAT);
+}
+
+static int apple_port_irq_set_type(struct irq_data *data, unsigned int type)
+{
+ /*
+ * It doesn't seem that there is any way to configure the
+ * trigger, so assume INTx have to be level (as per the spec),
+ * and the rest is edge (which looks likely).
+ */
+ if (hwirq_is_intx(data->hwirq) ^ !!(type & IRQ_TYPE_LEVEL_MASK))
+ return -EINVAL;
+
+ irqd_set_trigger_type(data, type);
+ return 0;
+}
+
+static struct irq_chip apple_port_irqchip = {
+ .name = "PCIe",
+ .irq_ack = apple_port_irq_ack,
+ .irq_mask = apple_port_irq_mask,
+ .irq_unmask = apple_port_irq_unmask,
+ .irq_set_type = apple_port_irq_set_type,
+};
+
+static int apple_port_irq_domain_alloc(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs,
+ void *args)
+{
+ struct apple_pcie_port *port = domain->host_data;
+ struct irq_fwspec *fwspec = args;
+ int i;
+
+ for (i = 0; i < nr_irqs; i++) {
+ irq_flow_handler_t flow = handle_edge_irq;
+ unsigned int type = IRQ_TYPE_EDGE_RISING;
+
+ if (hwirq_is_intx(fwspec->param[0] + i)) {
+ flow = handle_level_irq;
+ type = IRQ_TYPE_LEVEL_HIGH;
+ }
+
+ irq_domain_set_info(domain, virq + i, fwspec->param[0] + i,
+ &apple_port_irqchip, port, flow,
+ NULL, NULL);
+
+ irq_set_irq_type(virq + i, type);
+ }
+
+ return 0;
+}
+
+static void apple_port_irq_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ int i;
+
+ for (i = 0; i < nr_irqs; i++) {
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
+
+ irq_set_handler(virq + i, NULL);
+ irq_domain_reset_irq_data(d);
+ }
+}
+
+static const struct irq_domain_ops apple_port_irq_domain_ops = {
+ .translate = irq_domain_translate_onecell,
+ .alloc = apple_port_irq_domain_alloc,
+ .free = apple_port_irq_domain_free,
+};
+
+static void apple_port_irq_handler(struct irq_desc *desc)
+{
+ struct apple_pcie_port *port = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned long stat;
+ int i;
+
+ chained_irq_enter(chip, desc);
+
+ stat = readl_relaxed(port->base + PORT_INTSTAT);
+
+ for_each_set_bit(i, &stat, 32)
+ generic_handle_domain_irq(port->domain, i);
+
+ chained_irq_exit(chip, desc);
+}
+
+static int apple_pcie_port_setup_irq(struct apple_pcie_port *port)
+{
+ struct fwnode_handle *fwnode = &port->np->fwnode;
+ unsigned int irq;
+
+ /* FIXME: consider moving each interrupt under each port */
+ irq = irq_of_parse_and_map(to_of_node(dev_fwnode(port->pcie->dev)),
+ port->idx);
+ if (!irq)
+ return -ENXIO;
+
+ port->domain = irq_domain_create_linear(fwnode, 32,
+ &apple_port_irq_domain_ops,
+ port);
+ if (!port->domain)
+ return -ENOMEM;
+
+ /* Disable all interrupts */
+ writel_relaxed(~0, port->base + PORT_INTMSKSET);
+ writel_relaxed(~0, port->base + PORT_INTSTAT);
+
+ irq_set_chained_handler_and_data(irq, apple_port_irq_handler, port);
+
+ /* Configure MSI base address */
+ BUILD_BUG_ON(upper_32_bits(DOORBELL_ADDR));
+ writel_relaxed(lower_32_bits(DOORBELL_ADDR), port->base + PORT_MSIADDR);
+
+ /* Enable MSIs, shared between all ports */
+ writel_relaxed(0, port->base + PORT_MSIBASE);
+ writel_relaxed((ilog2(port->pcie->nvecs) << PORT_MSICFG_L2MSINUM_SHIFT) |
+ PORT_MSICFG_EN, port->base + PORT_MSICFG);
+
+ return 0;
+}
+
+static irqreturn_t apple_pcie_port_irq(int irq, void *data)
+{
+ struct apple_pcie_port *port = data;
+ unsigned int hwirq = irq_domain_get_irq_data(port->domain, irq)->hwirq;
+
+ switch (hwirq) {
+ case PORT_INT_LINK_UP:
+ dev_info_ratelimited(port->pcie->dev, "Link up on %pOF\n",
+ port->np);
+ complete_all(&port->pcie->event);
+ break;
+ case PORT_INT_LINK_DOWN:
+ dev_info_ratelimited(port->pcie->dev, "Link down on %pOF\n",
+ port->np);
+ break;
+ default:
+ return IRQ_NONE;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int apple_pcie_port_register_irqs(struct apple_pcie_port *port)
+{
+ static struct {
+ unsigned int hwirq;
+ const char *name;
+ } port_irqs[] = {
+ { PORT_INT_LINK_UP, "Link up", },
+ { PORT_INT_LINK_DOWN, "Link down", },
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(port_irqs); i++) {
+ struct irq_fwspec fwspec = {
+ .fwnode = &port->np->fwnode,
+ .param_count = 1,
+ .param = {
+ [0] = port_irqs[i].hwirq,
+ },
+ };
+ unsigned int irq;
+ int ret;
+
+ irq = irq_domain_alloc_irqs(port->domain, 1, NUMA_NO_NODE,
+ &fwspec);
+ if (WARN_ON(!irq))
+ continue;
+
+ ret = request_irq(irq, apple_pcie_port_irq, 0,
+ port_irqs[i].name, port);
+ WARN_ON(ret);
+ }
+
+ return 0;
+}
+
+static int apple_pcie_setup_refclk(struct apple_pcie *pcie,
+ struct apple_pcie_port *port)
+{
+ u32 stat;
+ int res;
+
+ res = readl_relaxed_poll_timeout(pcie->base + CORE_RC_PHYIF_STAT, stat,
+ stat & CORE_RC_PHYIF_STAT_REFCLK,
+ 100, 50000);
+ if (res < 0)
+ return res;
+
+ rmw_set(CORE_LANE_CTL_CFGACC, pcie->base + CORE_LANE_CTL(port->idx));
+ rmw_set(CORE_LANE_CFG_REFCLK0REQ, pcie->base + CORE_LANE_CFG(port->idx));
+
+ res = readl_relaxed_poll_timeout(pcie->base + CORE_LANE_CFG(port->idx),
+ stat, stat & CORE_LANE_CFG_REFCLK0ACK,
+ 100, 50000);
+ if (res < 0)
+ return res;
+
+ rmw_set(CORE_LANE_CFG_REFCLK1, pcie->base + CORE_LANE_CFG(port->idx));
+ res = readl_relaxed_poll_timeout(pcie->base + CORE_LANE_CFG(port->idx),
+ stat, stat & CORE_LANE_CFG_REFCLK1,
+ 100, 50000);
+
+ if (res < 0)
+ return res;
+
+ rmw_clear(CORE_LANE_CTL_CFGACC, pcie->base + CORE_LANE_CTL(port->idx));
+
+ rmw_set(CORE_LANE_CFG_REFCLKEN, pcie->base + CORE_LANE_CFG(port->idx));
+ rmw_set(PORT_REFCLK_EN, port->base + PORT_REFCLK);
+
+ return 0;
+}
+
+static u32 apple_pcie_rid2sid_write(struct apple_pcie_port *port,
+ int idx, u32 val)
+{
+ writel_relaxed(val, port->base + PORT_RID2SID(idx));
+ /* Read back to ensure completion of the write */
+ return readl_relaxed(port->base + PORT_RID2SID(idx));
+}
+
+static int apple_pcie_setup_port(struct apple_pcie *pcie,
+ struct device_node *np)
+{
+ struct platform_device *platform = to_platform_device(pcie->dev);
+ struct apple_pcie_port *port;
+ struct gpio_desc *reset;
+ u32 stat, idx;
+ int ret, i;
+
+ reset = gpiod_get_from_of_node(np, "reset-gpios", 0,
+ GPIOD_OUT_LOW, "#PERST");
+ if (IS_ERR(reset))
+ return PTR_ERR(reset);
+
+ port = devm_kzalloc(pcie->dev, sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ ret = of_property_read_u32_index(np, "reg", 0, &idx);
+ if (ret)
+ return ret;
+
+ /* Use the first reg entry to work out the port index */
+ port->idx = idx >> 11;
+ port->pcie = pcie;
+ port->np = np;
+
+ port->base = devm_platform_ioremap_resource(platform, port->idx + 2);
+ if (IS_ERR(port->base))
+ return PTR_ERR(port->base);
+
+ rmw_set(PORT_APPCLK_EN, port->base + PORT_APPCLK);
+
+ ret = apple_pcie_setup_refclk(pcie, port);
+ if (ret < 0)
+ return ret;
+
+ rmw_set(PORT_PERST_OFF, port->base + PORT_PERST);
+ gpiod_set_value(reset, 1);
+
+ ret = readl_relaxed_poll_timeout(port->base + PORT_STATUS, stat,
+ stat & PORT_STATUS_READY, 100, 250000);
+ if (ret < 0) {
+ dev_err(pcie->dev, "port %pOF ready wait timeout\n", np);
+ return ret;
+ }
+
+ ret = apple_pcie_port_setup_irq(port);
+ if (ret)
+ return ret;
+
+ /* Reset all RID/SID mappings, and check for RAZ/WI registers */
+ for (i = 0; i < MAX_RID2SID; i++) {
+ if (apple_pcie_rid2sid_write(port, i, 0xbad1d) != 0xbad1d)
+ break;
+ apple_pcie_rid2sid_write(port, i, 0);
+ }
+
+ dev_dbg(pcie->dev, "%pOF: %d RID/SID mapping entries\n", np, i);
+
+ port->sid_map_sz = i;
+
+ list_add_tail(&port->entry, &pcie->ports);
+ init_completion(&pcie->event);
+
+ ret = apple_pcie_port_register_irqs(port);
+ WARN_ON(ret);
+
+ writel_relaxed(PORT_LTSSMCTL_START, port->base + PORT_LTSSMCTL);
+
+ if (!wait_for_completion_timeout(&pcie->event, HZ / 10))
+ dev_warn(pcie->dev, "%pOF link didn't come up\n", np);
+
+ return 0;
+}
+
+static int apple_msi_init(struct apple_pcie *pcie)
+{
+ struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
+ struct of_phandle_args args = {};
+ struct irq_domain *parent;
+ int ret;
+
+ ret = of_parse_phandle_with_args(to_of_node(fwnode), "msi-ranges",
+ "#interrupt-cells", 0, &args);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32_index(to_of_node(fwnode), "msi-ranges",
+ args.args_count + 1, &pcie->nvecs);
+ if (ret)
+ return ret;
+
+ of_phandle_args_to_fwspec(args.np, args.args, args.args_count,
+ &pcie->fwspec);
+
+ pcie->bitmap = devm_bitmap_zalloc(pcie->dev, pcie->nvecs, GFP_KERNEL);
+ if (!pcie->bitmap)
+ return -ENOMEM;
+
+ parent = irq_find_matching_fwspec(&pcie->fwspec, DOMAIN_BUS_WIRED);
+ if (!parent) {
+ dev_err(pcie->dev, "failed to find parent domain\n");
+ return -ENXIO;
+ }
+
+ parent = irq_domain_create_hierarchy(parent, 0, pcie->nvecs, fwnode,
+ &apple_msi_domain_ops, pcie);
+ if (!parent) {
+ dev_err(pcie->dev, "failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+ irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
+
+ pcie->domain = pci_msi_create_irq_domain(fwnode, &apple_msi_info,
+ parent);
+ if (!pcie->domain) {
+ dev_err(pcie->dev, "failed to create MSI domain\n");
+ irq_domain_remove(parent);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static struct apple_pcie_port *apple_pcie_get_port(struct pci_dev *pdev)
+{
+ struct pci_config_window *cfg = pdev->sysdata;
+ struct apple_pcie *pcie = cfg->priv;
+ struct pci_dev *port_pdev;
+ struct apple_pcie_port *port;
+
+ /* Find the root port this device is on */
+ port_pdev = pcie_find_root_port(pdev);
+
+ /* If finding the port itself, nothing to do */
+ if (WARN_ON(!port_pdev) || pdev == port_pdev)
+ return NULL;
+
+ list_for_each_entry(port, &pcie->ports, entry) {
+ if (port->idx == PCI_SLOT(port_pdev->devfn))
+ return port;
+ }
+
+ return NULL;
+}
+
+static int apple_pcie_add_device(struct apple_pcie_port *port,
+ struct pci_dev *pdev)
+{
+ u32 sid, rid = PCI_DEVID(pdev->bus->number, pdev->devfn);
+ int idx, err;
+
+ dev_dbg(&pdev->dev, "added to bus %s, index %d\n",
+ pci_name(pdev->bus->self), port->idx);
+
+ err = of_map_id(port->pcie->dev->of_node, rid, "iommu-map",
+ "iommu-map-mask", NULL, &sid);
+ if (err)
+ return err;
+
+ mutex_lock(&port->pcie->lock);
+
+ idx = bitmap_find_free_region(port->sid_map, port->sid_map_sz, 0);
+ if (idx >= 0) {
+ apple_pcie_rid2sid_write(port, idx,
+ PORT_RID2SID_VALID |
+ (sid << PORT_RID2SID_SID_SHIFT) | rid);
+
+ dev_dbg(&pdev->dev, "mapping RID%x to SID%x (index %d)\n",
+ rid, sid, idx);
+ }
+
+ mutex_unlock(&port->pcie->lock);
+
+ return idx >= 0 ? 0 : -ENOSPC;
+}
+
+static void apple_pcie_release_device(struct apple_pcie_port *port,
+ struct pci_dev *pdev)
+{
+ u32 rid = PCI_DEVID(pdev->bus->number, pdev->devfn);
+ int idx;
+
+ mutex_lock(&port->pcie->lock);
+
+ for_each_set_bit(idx, port->sid_map, port->sid_map_sz) {
+ u32 val;
+
+ val = readl_relaxed(port->base + PORT_RID2SID(idx));
+ if ((val & 0xffff) == rid) {
+ apple_pcie_rid2sid_write(port, idx, 0);
+ bitmap_release_region(port->sid_map, idx, 0);
+ dev_dbg(&pdev->dev, "Released %x (%d)\n", val, idx);
+ break;
+ }
+ }
+
+ mutex_unlock(&port->pcie->lock);
+}
+
+static int apple_pcie_bus_notifier(struct notifier_block *nb,
+ unsigned long action,
+ void *data)
+{
+ struct device *dev = data;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct apple_pcie_port *port;
+ int err;
+
+ /*
+ * This is a bit ugly. We assume that if we get notified for
+ * any PCI device, we must be in charge of it, and that there
+ * is no other PCI controller in the whole system. It probably
+ * holds for now, but who knows for how long?
+ */
+ port = apple_pcie_get_port(pdev);
+ if (!port)
+ return NOTIFY_DONE;
+
+ switch (action) {
+ case BUS_NOTIFY_ADD_DEVICE:
+ err = apple_pcie_add_device(port, pdev);
+ if (err)
+ return notifier_from_errno(err);
+ break;
+ case BUS_NOTIFY_DEL_DEVICE:
+ apple_pcie_release_device(port, pdev);
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block apple_pcie_nb = {
+ .notifier_call = apple_pcie_bus_notifier,
+};
+
+static int apple_pcie_init(struct pci_config_window *cfg)
+{
+ struct device *dev = cfg->parent;
+ struct platform_device *platform = to_platform_device(dev);
+ struct device_node *of_port;
+ struct apple_pcie *pcie;
+ int ret;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pcie->dev = dev;
+
+ mutex_init(&pcie->lock);
+
+ pcie->base = devm_platform_ioremap_resource(platform, 1);
+ if (IS_ERR(pcie->base))
+ return PTR_ERR(pcie->base);
+
+ cfg->priv = pcie;
+ INIT_LIST_HEAD(&pcie->ports);
+
+ for_each_child_of_node(dev->of_node, of_port) {
+ ret = apple_pcie_setup_port(pcie, of_port);
+ if (ret) {
+ dev_err(pcie->dev, "Port %pOF setup fail: %d\n", of_port, ret);
+ of_node_put(of_port);
+ return ret;
+ }
+ }
+
+ return apple_msi_init(pcie);
+}
+
+static int apple_pcie_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = bus_register_notifier(&pci_bus_type, &apple_pcie_nb);
+ if (ret)
+ return ret;
+
+ ret = pci_host_common_probe(pdev);
+ if (ret)
+ bus_unregister_notifier(&pci_bus_type, &apple_pcie_nb);
+
+ return ret;
+}
+
+static const struct pci_ecam_ops apple_pcie_cfg_ecam_ops = {
+ .init = apple_pcie_init,
+ .pci_ops = {
+ .map_bus = pci_ecam_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ }
+};
+
+static const struct of_device_id apple_pcie_of_match[] = {
+ { .compatible = "apple,pcie", .data = &apple_pcie_cfg_ecam_ops },
+ { }
+};
+MODULE_DEVICE_TABLE(of, apple_pcie_of_match);
+
+static struct platform_driver apple_pcie_driver = {
+ .probe = apple_pcie_probe,
+ .driver = {
+ .name = "pcie-apple",
+ .of_match_table = apple_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+module_platform_driver(apple_pcie_driver);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
index cc30215f5a43..1fc7bd49a7ad 100644
--- a/drivers/pci/controller/pcie-brcmstb.c
+++ b/drivers/pci/controller/pcie-brcmstb.c
@@ -145,7 +145,7 @@
#define BRCM_INT_PCI_MSI_LEGACY_NR 8
#define BRCM_INT_PCI_MSI_SHIFT 0
-/* MSI target adresses */
+/* MSI target addresses */
#define BRCM_MSI_TARGET_ADDR_LT_4GB 0x0fffffffcULL
#define BRCM_MSI_TARGET_ADDR_GT_4GB 0xffffffffcULL
diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c
index 30ac5fbefbbf..36b9d2c46cfa 100644
--- a/drivers/pci/controller/pcie-iproc.c
+++ b/drivers/pci/controller/pcie-iproc.c
@@ -249,7 +249,7 @@ enum iproc_pcie_reg {
/*
* To hold the address of the register where the MSI writes are
- * programed. When ARM GICv3 ITS is used, this should be programmed
+ * programmed. When ARM GICv3 ITS is used, this should be programmed
* with the address of the GITS_TRANSLATER register.
*/
IPROC_PCIE_MSI_ADDR_LO,
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index a5987e52700e..1ed2667069ab 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -18,8 +18,6 @@
#include <linux/rcupdate.h>
#include <asm/irqdomain.h>
-#include <asm/device.h>
-#include <asm/msi.h>
#define VMD_CFGBAR 0
#define VMD_MEMBAR1 2
diff --git a/drivers/pci/endpoint/functions/pci-epf-ntb.c b/drivers/pci/endpoint/functions/pci-epf-ntb.c
index 8b4756159f15..5a03401f4571 100644
--- a/drivers/pci/endpoint/functions/pci-epf-ntb.c
+++ b/drivers/pci/endpoint/functions/pci-epf-ntb.c
@@ -1937,7 +1937,7 @@ static ssize_t epf_ntb_##_name##_show(struct config_item *item, \
struct config_group *group = to_config_group(item); \
struct epf_ntb *ntb = to_epf_ntb(group); \
\
- return sprintf(page, "%d\n", ntb->_name); \
+ return sysfs_emit(page, "%d\n", ntb->_name); \
}
#define EPF_NTB_W(_name) \
@@ -1947,11 +1947,9 @@ static ssize_t epf_ntb_##_name##_store(struct config_item *item, \
struct config_group *group = to_config_group(item); \
struct epf_ntb *ntb = to_epf_ntb(group); \
u32 val; \
- int ret; \
\
- ret = kstrtou32(page, 0, &val); \
- if (ret) \
- return ret; \
+ if (kstrtou32(page, 0, &val) < 0) \
+ return -EINVAL; \
\
ntb->_name = val; \
\
@@ -1968,7 +1966,7 @@ static ssize_t epf_ntb_##_name##_show(struct config_item *item, \
\
sscanf(#_name, "mw%d", &win_no); \
\
- return sprintf(page, "%lld\n", ntb->mws_size[win_no - 1]); \
+ return sysfs_emit(page, "%lld\n", ntb->mws_size[win_no - 1]); \
}
#define EPF_NTB_MW_W(_name) \
@@ -1980,11 +1978,9 @@ static ssize_t epf_ntb_##_name##_store(struct config_item *item, \
struct device *dev = &ntb->epf->dev; \
int win_no; \
u64 val; \
- int ret; \
\
- ret = kstrtou64(page, 0, &val); \
- if (ret) \
- return ret; \
+ if (kstrtou64(page, 0, &val) < 0) \
+ return -EINVAL; \
\
if (sscanf(#_name, "mw%d", &win_no) != 1) \
return -EINVAL; \
@@ -2005,11 +2001,9 @@ static ssize_t epf_ntb_num_mws_store(struct config_item *item,
struct config_group *group = to_config_group(item);
struct epf_ntb *ntb = to_epf_ntb(group);
u32 val;
- int ret;
- ret = kstrtou32(page, 0, &val);
- if (ret)
- return ret;
+ if (kstrtou32(page, 0, &val) < 0)
+ return -EINVAL;
if (val > MAX_MW)
return -EINVAL;
diff --git a/drivers/pci/endpoint/pci-ep-cfs.c b/drivers/pci/endpoint/pci-ep-cfs.c
index 999911801877..d4850bdd837f 100644
--- a/drivers/pci/endpoint/pci-ep-cfs.c
+++ b/drivers/pci/endpoint/pci-ep-cfs.c
@@ -175,9 +175,8 @@ static ssize_t pci_epc_start_store(struct config_item *item, const char *page,
epc = epc_group->epc;
- ret = kstrtobool(page, &start);
- if (ret)
- return ret;
+ if (kstrtobool(page, &start) < 0)
+ return -EINVAL;
if (!start) {
pci_epc_stop(epc);
@@ -198,8 +197,7 @@ static ssize_t pci_epc_start_store(struct config_item *item, const char *page,
static ssize_t pci_epc_start_show(struct config_item *item, char *page)
{
- return sprintf(page, "%d\n",
- to_pci_epc_group(item)->start);
+ return sysfs_emit(page, "%d\n", to_pci_epc_group(item)->start);
}
CONFIGFS_ATTR(pci_epc_, start);
@@ -321,7 +319,7 @@ static ssize_t pci_epf_##_name##_show(struct config_item *item, char *page) \
struct pci_epf *epf = to_pci_epf_group(item)->epf; \
if (WARN_ON_ONCE(!epf->header)) \
return -EINVAL; \
- return sprintf(page, "0x%04x\n", epf->header->_name); \
+ return sysfs_emit(page, "0x%04x\n", epf->header->_name); \
}
#define PCI_EPF_HEADER_W_u32(_name) \
@@ -329,13 +327,11 @@ static ssize_t pci_epf_##_name##_store(struct config_item *item, \
const char *page, size_t len) \
{ \
u32 val; \
- int ret; \
struct pci_epf *epf = to_pci_epf_group(item)->epf; \
if (WARN_ON_ONCE(!epf->header)) \
return -EINVAL; \
- ret = kstrtou32(page, 0, &val); \
- if (ret) \
- return ret; \
+ if (kstrtou32(page, 0, &val) < 0) \
+ return -EINVAL; \
epf->header->_name = val; \
return len; \
}
@@ -345,13 +341,11 @@ static ssize_t pci_epf_##_name##_store(struct config_item *item, \
const char *page, size_t len) \
{ \
u16 val; \
- int ret; \
struct pci_epf *epf = to_pci_epf_group(item)->epf; \
if (WARN_ON_ONCE(!epf->header)) \
return -EINVAL; \
- ret = kstrtou16(page, 0, &val); \
- if (ret) \
- return ret; \
+ if (kstrtou16(page, 0, &val) < 0) \
+ return -EINVAL; \
epf->header->_name = val; \
return len; \
}
@@ -361,13 +355,11 @@ static ssize_t pci_epf_##_name##_store(struct config_item *item, \
const char *page, size_t len) \
{ \
u8 val; \
- int ret; \
struct pci_epf *epf = to_pci_epf_group(item)->epf; \
if (WARN_ON_ONCE(!epf->header)) \
return -EINVAL; \
- ret = kstrtou8(page, 0, &val); \
- if (ret) \
- return ret; \
+ if (kstrtou8(page, 0, &val) < 0) \
+ return -EINVAL; \
epf->header->_name = val; \
return len; \
}
@@ -376,11 +368,9 @@ static ssize_t pci_epf_msi_interrupts_store(struct config_item *item,
const char *page, size_t len)
{
u8 val;
- int ret;
- ret = kstrtou8(page, 0, &val);
- if (ret)
- return ret;
+ if (kstrtou8(page, 0, &val) < 0)
+ return -EINVAL;
to_pci_epf_group(item)->epf->msi_interrupts = val;
@@ -390,19 +380,17 @@ static ssize_t pci_epf_msi_interrupts_store(struct config_item *item,
static ssize_t pci_epf_msi_interrupts_show(struct config_item *item,
char *page)
{
- return sprintf(page, "%d\n",
- to_pci_epf_group(item)->epf->msi_interrupts);
+ return sysfs_emit(page, "%d\n",
+ to_pci_epf_group(item)->epf->msi_interrupts);
}
static ssize_t pci_epf_msix_interrupts_store(struct config_item *item,
const char *page, size_t len)
{
u16 val;
- int ret;
- ret = kstrtou16(page, 0, &val);
- if (ret)
- return ret;
+ if (kstrtou16(page, 0, &val) < 0)
+ return -EINVAL;
to_pci_epf_group(item)->epf->msix_interrupts = val;
@@ -412,8 +400,8 @@ static ssize_t pci_epf_msix_interrupts_store(struct config_item *item,
static ssize_t pci_epf_msix_interrupts_show(struct config_item *item,
char *page)
{
- return sprintf(page, "%d\n",
- to_pci_epf_group(item)->epf->msix_interrupts);
+ return sysfs_emit(page, "%d\n",
+ to_pci_epf_group(item)->epf->msix_interrupts);
}
PCI_EPF_HEADER_R(vendorid)
diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
index ecbb0fb3b653..38621558d397 100644
--- a/drivers/pci/endpoint/pci-epc-core.c
+++ b/drivers/pci/endpoint/pci-epc-core.c
@@ -700,7 +700,7 @@ EXPORT_SYMBOL_GPL(pci_epc_linkup);
/**
* pci_epc_init_notify() - Notify the EPF device that EPC device's core
* initialization is completed.
- * @epc: the EPC device whose core initialization is completeds
+ * @epc: the EPC device whose core initialization is completed
*
* Invoke to Notify the EPF device that the EPC device's initialization
* is completed.
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
index 8aea16380870..9ed556936f48 100644
--- a/drivers/pci/endpoint/pci-epf-core.c
+++ b/drivers/pci/endpoint/pci-epf-core.c
@@ -224,7 +224,7 @@ EXPORT_SYMBOL_GPL(pci_epf_add_vepf);
* be removed
* @epf_vf: the virtual EP function to be removed
*
- * Invoke to remove a virtual endpoint function from the physcial endpoint
+ * Invoke to remove a virtual endpoint function from the physical endpoint
* function.
*/
void pci_epf_remove_vepf(struct pci_epf *epf_pf, struct pci_epf *epf_vf)
@@ -432,7 +432,7 @@ EXPORT_SYMBOL_GPL(pci_epf_destroy);
/**
* pci_epf_create() - create a new PCI EPF device
* @name: the name of the PCI EPF device. This name will be used to bind the
- * the EPF device to a EPF driver
+ * EPF device to a EPF driver
*
* Invoke to create a new PCI EPF device by providing the name of the function
* device.
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index f031302ad401..12f4b351be67 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -22,7 +22,7 @@
* when the bridge is scanned and it loses a refcount when the bridge
* is removed.
* - When a P2P bridge is present, we elevate the refcount on the subordinate
- * bus. It loses the refcount when the the driver unloads.
+ * bus. It loses the refcount when the driver unloads.
*/
#define pr_fmt(fmt) "acpiphp_glue: " fmt
diff --git a/drivers/pci/hotplug/cpqphp.h b/drivers/pci/hotplug/cpqphp.h
index 77e4e0142fbc..2f7b49ea96e2 100644
--- a/drivers/pci/hotplug/cpqphp.h
+++ b/drivers/pci/hotplug/cpqphp.h
@@ -15,7 +15,7 @@
#define _CPQPHP_H
#include <linux/interrupt.h>
-#include <asm/io.h> /* for read? and write? functions */
+#include <linux/io.h> /* for read? and write? functions */
#include <linux/delay.h> /* for delays */
#include <linux/mutex.h>
#include <linux/sched/signal.h> /* for signal_pending() */
diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c
index 1b26ca0b3701..ed7b58eb64d2 100644
--- a/drivers/pci/hotplug/cpqphp_ctrl.c
+++ b/drivers/pci/hotplug/cpqphp_ctrl.c
@@ -519,7 +519,7 @@ error:
* @head: list to search
* @size: size of node to find, must be a power of two.
*
- * Description: This function sorts the resource list by size and then returns
+ * Description: This function sorts the resource list by size and then
* returns the first node of "size" length that is not in the ISA aliasing
* window. If it finds a node larger than "size" it will split it up.
*/
@@ -1202,7 +1202,7 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_
mdelay(5);
- /* Reenable interrupts */
+ /* Re-enable interrupts */
writel(0, ctrl->hpc_reg + INT_MASK);
pci_write_config_byte(ctrl->pci_dev, 0x41, reg);
diff --git a/drivers/pci/hotplug/cpqphp_pci.c b/drivers/pci/hotplug/cpqphp_pci.c
index 1b2b3f3b648b..9038039ad6db 100644
--- a/drivers/pci/hotplug/cpqphp_pci.c
+++ b/drivers/pci/hotplug/cpqphp_pci.c
@@ -189,8 +189,10 @@ int cpqhp_set_irq(u8 bus_num, u8 dev_num, u8 int_pin, u8 irq_num)
/* This should only be for x86 as it sets the Edge Level
* Control Register
*/
- outb((u8) (temp_word & 0xFF), 0x4d0); outb((u8) ((temp_word &
- 0xFF00) >> 8), 0x4d1); rc = 0; }
+ outb((u8)(temp_word & 0xFF), 0x4d0);
+ outb((u8)((temp_word & 0xFF00) >> 8), 0x4d1);
+ rc = 0;
+ }
return rc;
}
diff --git a/drivers/pci/hotplug/ibmphp.h b/drivers/pci/hotplug/ibmphp.h
index e90a4ebf6550..0399c60d2ec1 100644
--- a/drivers/pci/hotplug/ibmphp.h
+++ b/drivers/pci/hotplug/ibmphp.h
@@ -352,7 +352,7 @@ struct resource_node {
u32 len;
int type; /* MEM, IO, PFMEM */
u8 fromMem; /* this is to indicate that the range is from
- * from the Memory bucket rather than from PFMem */
+ * the Memory bucket rather than from PFMem */
struct resource_node *next;
struct resource_node *nextRange; /* for the other mem range on bus */
};
@@ -736,7 +736,7 @@ struct controller {
int ibmphp_init_devno(struct slot **); /* This function is called from EBDA, so we need it not be static */
int ibmphp_do_disable_slot(struct slot *slot_cur);
-int ibmphp_update_slot_info(struct slot *); /* This function is called from HPC, so we need it to not be be static */
+int ibmphp_update_slot_info(struct slot *); /* This function is called from HPC, so we need it to not be static */
int ibmphp_configure_card(struct pci_func *, u8);
int ibmphp_unconfigure_card(struct slot **, int);
extern const struct hotplug_slot_ops ibmphp_hotplug_slot_ops;
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 69fd401691be..918dccbc74b6 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -189,6 +189,8 @@ int pciehp_get_attention_status(struct hotplug_slot *hotplug_slot, u8 *status);
int pciehp_set_raw_indicator_status(struct hotplug_slot *h_slot, u8 status);
int pciehp_get_raw_indicator_status(struct hotplug_slot *h_slot, u8 *status);
+int pciehp_slot_reset(struct pcie_device *dev);
+
static inline const char *slot_name(struct controller *ctrl)
{
return hotplug_slot_name(&ctrl->hotplug_slot);
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index ad3393930ecb..f34114d45259 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -351,6 +351,8 @@ static struct pcie_port_service_driver hpdriver_portdrv = {
.runtime_suspend = pciehp_runtime_suspend,
.runtime_resume = pciehp_runtime_resume,
#endif /* PM */
+
+ .slot_reset = pciehp_slot_reset,
};
int __init pcie_hp_init(void)
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 3024d7e85e6a..83a0fa119cae 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -862,6 +862,32 @@ void pcie_disable_interrupt(struct controller *ctrl)
pcie_write_cmd(ctrl, 0, mask);
}
+/**
+ * pciehp_slot_reset() - ignore link event caused by error-induced hot reset
+ * @dev: PCI Express port service device
+ *
+ * Called from pcie_portdrv_slot_reset() after AER or DPC initiated a reset
+ * further up in the hierarchy to recover from an error. The reset was
+ * propagated down to this hotplug port. Ignore the resulting link flap.
+ * If the link failed to retrain successfully, synthesize the ignored event.
+ * Surprise removal during reset is detected through Presence Detect Changed.
+ */
+int pciehp_slot_reset(struct pcie_device *dev)
+{
+ struct controller *ctrl = get_service_data(dev);
+
+ if (ctrl->state != ON_STATE)
+ return 0;
+
+ pcie_capability_write_word(dev->port, PCI_EXP_SLTSTA,
+ PCI_EXP_SLTSTA_DLLSC);
+
+ if (!pciehp_check_link_active(ctrl))
+ pciehp_request(ctrl, PCI_EXP_SLTSTA_DLLSC);
+
+ return 0;
+}
+
/*
* pciehp has a 1:1 bus:slot relationship so we ultimately want a secondary
* bus reset of the bridge, but at the same time we want to ensure that it is
diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c
index 9e3b27744305..bd7557ca4910 100644
--- a/drivers/pci/hotplug/shpchp_hpc.c
+++ b/drivers/pci/hotplug/shpchp_hpc.c
@@ -295,7 +295,7 @@ static int shpc_write_cmd(struct slot *slot, u8 t_slot, u8 cmd)
mutex_lock(&slot->ctrl->cmd_lock);
if (!shpc_poll_ctrl_busy(ctrl)) {
- /* After 1 sec and and the controller is still busy */
+ /* After 1 sec and the controller is still busy */
ctrl_err(ctrl, "Controller is still busy after 1 sec\n");
retval = -EBUSY;
goto out;
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index dafdc652fcd0..1d7a7c5b5307 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -164,13 +164,15 @@ static ssize_t sriov_vf_total_msix_show(struct device *dev,
char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
+ struct pci_driver *pdrv;
u32 vf_total_msix = 0;
device_lock(dev);
- if (!pdev->driver || !pdev->driver->sriov_get_vf_total_msix)
+ pdrv = to_pci_driver(dev->driver);
+ if (!pdrv || !pdrv->sriov_get_vf_total_msix)
goto unlock;
- vf_total_msix = pdev->driver->sriov_get_vf_total_msix(pdev);
+ vf_total_msix = pdrv->sriov_get_vf_total_msix(pdev);
unlock:
device_unlock(dev);
return sysfs_emit(buf, "%u\n", vf_total_msix);
@@ -183,23 +185,24 @@ static ssize_t sriov_vf_msix_count_store(struct device *dev,
{
struct pci_dev *vf_dev = to_pci_dev(dev);
struct pci_dev *pdev = pci_physfn(vf_dev);
- int val, ret;
+ struct pci_driver *pdrv;
+ int val, ret = 0;
- ret = kstrtoint(buf, 0, &val);
- if (ret)
- return ret;
+ if (kstrtoint(buf, 0, &val) < 0)
+ return -EINVAL;
if (val < 0)
return -EINVAL;
device_lock(&pdev->dev);
- if (!pdev->driver || !pdev->driver->sriov_set_msix_vec_count) {
+ pdrv = to_pci_driver(dev->driver);
+ if (!pdrv || !pdrv->sriov_set_msix_vec_count) {
ret = -EOPNOTSUPP;
goto err_pdev;
}
device_lock(&vf_dev->dev);
- if (vf_dev->driver) {
+ if (to_pci_driver(vf_dev->dev.driver)) {
/*
* A driver is already attached to this VF and has configured
* itself based on the current MSI-X vector count. Changing
@@ -209,7 +212,7 @@ static ssize_t sriov_vf_msix_count_store(struct device *dev,
goto err_dev;
}
- ret = pdev->driver->sriov_set_msix_vec_count(vf_dev, val);
+ ret = pdrv->sriov_set_msix_vec_count(vf_dev, val);
err_dev:
device_unlock(&vf_dev->dev);
@@ -376,12 +379,12 @@ static ssize_t sriov_numvfs_store(struct device *dev,
const char *buf, size_t count)
{
struct pci_dev *pdev = to_pci_dev(dev);
- int ret;
+ struct pci_driver *pdrv;
+ int ret = 0;
u16 num_vfs;
- ret = kstrtou16(buf, 0, &num_vfs);
- if (ret < 0)
- return ret;
+ if (kstrtou16(buf, 0, &num_vfs) < 0)
+ return -EINVAL;
if (num_vfs > pci_sriov_get_totalvfs(pdev))
return -ERANGE;
@@ -392,14 +395,15 @@ static ssize_t sriov_numvfs_store(struct device *dev,
goto exit;
/* is PF driver loaded */
- if (!pdev->driver) {
+ pdrv = to_pci_driver(dev->driver);
+ if (!pdrv) {
pci_info(pdev, "no driver bound to device; cannot configure SR-IOV\n");
ret = -ENOENT;
goto exit;
}
/* is PF driver loaded w/callback */
- if (!pdev->driver->sriov_configure) {
+ if (!pdrv->sriov_configure) {
pci_info(pdev, "driver does not support SR-IOV configuration via sysfs\n");
ret = -ENOENT;
goto exit;
@@ -407,7 +411,7 @@ static ssize_t sriov_numvfs_store(struct device *dev,
if (num_vfs == 0) {
/* disable VFs */
- ret = pdev->driver->sriov_configure(pdev, 0);
+ ret = pdrv->sriov_configure(pdev, 0);
goto exit;
}
@@ -419,7 +423,7 @@ static ssize_t sriov_numvfs_store(struct device *dev,
goto exit;
}
- ret = pdev->driver->sriov_configure(pdev, num_vfs);
+ ret = pdrv->sriov_configure(pdev, num_vfs);
if (ret < 0)
goto exit;
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 0099a00af361..bdc6ba7f39f0 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -579,7 +579,8 @@ err:
return ret;
}
-static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries)
+static void __iomem *msix_map_region(struct pci_dev *dev,
+ unsigned int nr_entries)
{
resource_size_t phys_addr;
u32 table_offset;
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index d84381ce82b5..0b1237cff239 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -423,7 +423,7 @@ failed:
*/
static int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq)
{
- struct device_node *dn, *ppnode;
+ struct device_node *dn, *ppnode = NULL;
struct pci_dev *ppdev;
__be32 laddr[3];
u8 pin;
@@ -452,8 +452,14 @@ static int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *
if (pin == 0)
return -ENODEV;
+ /* Local interrupt-map in the device node? Use it! */
+ if (of_get_property(dn, "interrupt-map", NULL)) {
+ pin = pci_swizzle_interrupt_pin(pdev, pin);
+ ppnode = dn;
+ }
+
/* Now we walk up the PCI tree */
- for (;;) {
+ while (!ppnode) {
/* Get the pci_dev of our parent */
ppdev = pdev->bus->self;
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
index 50cdde3e9a8b..8d47cb7218d1 100644
--- a/drivers/pci/p2pdma.c
+++ b/drivers/pci/p2pdma.c
@@ -874,7 +874,7 @@ static int __pci_p2pdma_map_sg(struct pci_p2pdma_pagemap *p2p_pgmap,
int i;
for_each_sg(sg, s, nents, i) {
- s->dma_address = sg_phys(s) - p2p_pgmap->bus_offset;
+ s->dma_address = sg_phys(s) + p2p_pgmap->bus_offset;
sg_dma_len(s) = s->length;
}
@@ -943,7 +943,7 @@ EXPORT_SYMBOL_GPL(pci_p2pdma_unmap_sg_attrs);
*
* Parses an attribute value to decide whether to enable p2pdma.
* The value can select a PCI device (using its full BDF device
- * name) or a boolean (in any format strtobool() accepts). A false
+ * name) or a boolean (in any format kstrtobool() accepts). A false
* value disables p2pdma, a true value expects the caller
* to automatically find a compatible device and specifying a PCI device
* expects the caller to use the specific provider.
@@ -975,11 +975,11 @@ int pci_p2pdma_enable_store(const char *page, struct pci_dev **p2p_dev,
} else if ((page[0] == '0' || page[0] == '1') && !iscntrl(page[1])) {
/*
* If the user enters a PCI device that doesn't exist
- * like "0000:01:00.1", we don't want strtobool to think
+ * like "0000:01:00.1", we don't want kstrtobool to think
* it's a '0' when it's clearly not what the user wanted.
* So we require 0's and 1's to be exactly one character.
*/
- } else if (!strtobool(page, use_p2pdma)) {
+ } else if (!kstrtobool(page, use_p2pdma)) {
return 0;
}
diff --git a/drivers/pci/pci-bridge-emul.c b/drivers/pci/pci-bridge-emul.c
index fdaf86a888b7..db97cddfc85e 100644
--- a/drivers/pci/pci-bridge-emul.c
+++ b/drivers/pci/pci-bridge-emul.c
@@ -431,8 +431,21 @@ int pci_bridge_emul_conf_write(struct pci_bridge_emul *bridge, int where,
/* Clear the W1C bits */
new &= ~((value << shift) & (behavior[reg / 4].w1c & mask));
+ /* Save the new value with the cleared W1C bits into the cfgspace */
cfgspace[reg / 4] = cpu_to_le32(new);
+ /*
+ * Clear the W1C bits not specified by the write mask, so that the
+ * write_op() does not clear them.
+ */
+ new &= ~(behavior[reg / 4].w1c & ~mask);
+
+ /*
+ * Set the W1C bits specified by the write mask, so that write_op()
+ * knows about that they are to be cleared.
+ */
+ new |= (value << shift) & (behavior[reg / 4].w1c & mask);
+
if (write_op)
write_op(bridge, reg, old, new, mask);
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 2761ab86490d..1d98c974381c 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -319,12 +319,10 @@ static long local_pci_probe(void *_ddi)
* its remove routine.
*/
pm_runtime_get_sync(dev);
- pci_dev->driver = pci_drv;
rc = pci_drv->probe(pci_dev, ddi->id);
if (!rc)
return rc;
if (rc < 0) {
- pci_dev->driver = NULL;
pm_runtime_put_sync(dev);
return rc;
}
@@ -390,14 +388,13 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
* @pci_dev: PCI device being probed
*
* returns 0 on success, else error.
- * side-effect: pci_dev->driver is set to drv when drv claims pci_dev.
*/
static int __pci_device_probe(struct pci_driver *drv, struct pci_dev *pci_dev)
{
const struct pci_device_id *id;
int error = 0;
- if (!pci_dev->driver && drv->probe) {
+ if (drv->probe) {
error = -ENODEV;
id = pci_match_device(drv, pci_dev);
@@ -457,18 +454,15 @@ static int pci_device_probe(struct device *dev)
static void pci_device_remove(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- struct pci_driver *drv = pci_dev->driver;
+ struct pci_driver *drv = to_pci_driver(dev->driver);
- if (drv) {
- if (drv->remove) {
- pm_runtime_get_sync(dev);
- drv->remove(pci_dev);
- pm_runtime_put_noidle(dev);
- }
- pcibios_free_irq(pci_dev);
- pci_dev->driver = NULL;
- pci_iov_remove(pci_dev);
+ if (drv->remove) {
+ pm_runtime_get_sync(dev);
+ drv->remove(pci_dev);
+ pm_runtime_put_noidle(dev);
}
+ pcibios_free_irq(pci_dev);
+ pci_iov_remove(pci_dev);
/* Undo the runtime PM settings in local_pci_probe() */
pm_runtime_put_sync(dev);
@@ -495,7 +489,7 @@ static void pci_device_remove(struct device *dev)
static void pci_device_shutdown(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- struct pci_driver *drv = pci_dev->driver;
+ struct pci_driver *drv = to_pci_driver(dev->driver);
pm_runtime_resume(dev);
@@ -576,7 +570,7 @@ static int pci_pm_reenable_device(struct pci_dev *pci_dev)
{
int retval;
- /* if the device was enabled before suspend, reenable */
+ /* if the device was enabled before suspend, re-enable */
retval = pci_reenable_device(pci_dev);
/*
* if the device was busmaster before the suspend, make it busmaster
@@ -591,7 +585,7 @@ static int pci_pm_reenable_device(struct pci_dev *pci_dev)
static int pci_legacy_suspend(struct device *dev, pm_message_t state)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- struct pci_driver *drv = pci_dev->driver;
+ struct pci_driver *drv = to_pci_driver(dev->driver);
if (drv && drv->suspend) {
pci_power_t prev = pci_dev->current_state;
@@ -632,7 +626,7 @@ static int pci_legacy_suspend_late(struct device *dev, pm_message_t state)
static int pci_legacy_resume(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- struct pci_driver *drv = pci_dev->driver;
+ struct pci_driver *drv = to_pci_driver(dev->driver);
pci_fixup_device(pci_fixup_resume, pci_dev);
@@ -651,7 +645,7 @@ static void pci_pm_default_suspend(struct pci_dev *pci_dev)
static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev)
{
- struct pci_driver *drv = pci_dev->driver;
+ struct pci_driver *drv = to_pci_driver(pci_dev->dev.driver);
bool ret = drv && (drv->suspend || drv->resume);
/*
@@ -1244,11 +1238,11 @@ static int pci_pm_runtime_suspend(struct device *dev)
int error;
/*
- * If pci_dev->driver is not set (unbound), we leave the device in D0,
- * but it may go to D3cold when the bridge above it runtime suspends.
- * Save its config space in case that happens.
+ * If the device has no driver, we leave it in D0, but it may go to
+ * D3cold when the bridge above it runtime suspends. Save its
+ * config space in case that happens.
*/
- if (!pci_dev->driver) {
+ if (!to_pci_driver(dev->driver)) {
pci_save_state(pci_dev);
return 0;
}
@@ -1305,7 +1299,7 @@ static int pci_pm_runtime_resume(struct device *dev)
*/
pci_restore_standard_config(pci_dev);
- if (!pci_dev->driver)
+ if (!to_pci_driver(dev->driver))
return 0;
pci_fixup_device(pci_fixup_resume_early, pci_dev);
@@ -1324,14 +1318,13 @@ static int pci_pm_runtime_resume(struct device *dev)
static int pci_pm_runtime_idle(struct device *dev)
{
- struct pci_dev *pci_dev = to_pci_dev(dev);
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
/*
- * If pci_dev->driver is not set (unbound), the device should
- * always remain in D0 regardless of the runtime PM status
+ * If the device has no driver, it should always remain in D0
+ * regardless of the runtime PM status
*/
- if (!pci_dev->driver)
+ if (!to_pci_driver(dev->driver))
return 0;
if (!pm)
@@ -1438,8 +1431,10 @@ static struct pci_driver pci_compat_driver = {
*/
struct pci_driver *pci_dev_driver(const struct pci_dev *dev)
{
- if (dev->driver)
- return dev->driver;
+ struct pci_driver *drv = to_pci_driver(dev->dev.driver);
+
+ if (drv)
+ return drv;
else {
int i;
for (i = 0; i <= PCI_ROM_RESOURCE; i++)
@@ -1542,7 +1537,7 @@ static int pci_uevent(struct device *dev, struct kobj_uevent_env *env)
return 0;
}
-#if defined(CONFIG_PCIEPORTBUS) || defined(CONFIG_EEH)
+#if defined(CONFIG_PCIEAER) || defined(CONFIG_EEH)
/**
* pci_uevent_ers - emit a uevent during recovery path of PCI device
* @pdev: PCI device undergoing error recovery
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 7fb5cd17cc98..1637a440b353 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -26,6 +26,7 @@
#include <linux/slab.h>
#include <linux/vgaarb.h>
#include <linux/pm_runtime.h>
+#include <linux/msi.h>
#include <linux/of.h>
#include "pci.h"
@@ -49,7 +50,28 @@ pci_config_attr(subsystem_vendor, "0x%04x\n");
pci_config_attr(subsystem_device, "0x%04x\n");
pci_config_attr(revision, "0x%02x\n");
pci_config_attr(class, "0x%06x\n");
-pci_config_attr(irq, "%u\n");
+
+static ssize_t irq_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+#ifdef CONFIG_PCI_MSI
+ /*
+ * For MSI, show the first MSI IRQ; for all other cases including
+ * MSI-X, show the legacy INTx IRQ.
+ */
+ if (pdev->msi_enabled) {
+ struct msi_desc *desc = first_pci_msi_entry(pdev);
+
+ return sysfs_emit(buf, "%u\n", desc->irq);
+ }
+#endif
+
+ return sysfs_emit(buf, "%u\n", pdev->irq);
+}
+static DEVICE_ATTR_RO(irq);
static ssize_t broken_parity_status_show(struct device *dev,
struct device_attribute *attr,
@@ -273,15 +295,15 @@ static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
{
struct pci_dev *pdev = to_pci_dev(dev);
unsigned long val;
- ssize_t result = kstrtoul(buf, 0, &val);
-
- if (result < 0)
- return result;
+ ssize_t result = 0;
/* this can crash the machine when done on the "wrong" device */
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+ if (kstrtoul(buf, 0, &val) < 0)
+ return -EINVAL;
+
device_lock(dev);
if (dev->driver)
result = -EBUSY;
@@ -312,14 +334,13 @@ static ssize_t numa_node_store(struct device *dev,
size_t count)
{
struct pci_dev *pdev = to_pci_dev(dev);
- int node, ret;
+ int node;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- ret = kstrtoint(buf, 0, &node);
- if (ret)
- return ret;
+ if (kstrtoint(buf, 0, &node) < 0)
+ return -EINVAL;
if ((node < 0 && node != NUMA_NO_NODE) || node >= MAX_NUMNODES)
return -EINVAL;
@@ -378,12 +399,12 @@ static ssize_t msi_bus_store(struct device *dev, struct device_attribute *attr,
struct pci_bus *subordinate = pdev->subordinate;
unsigned long val;
- if (kstrtoul(buf, 0, &val) < 0)
- return -EINVAL;
-
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+ if (kstrtoul(buf, 0, &val) < 0)
+ return -EINVAL;
+
/*
* "no_msi" and "bus_flags" only affect what happens when a driver
* requests MSI or MSI-X. They don't affect any drivers that have
@@ -1339,10 +1360,10 @@ static ssize_t reset_store(struct device *dev, struct device_attribute *attr,
{
struct pci_dev *pdev = to_pci_dev(dev);
unsigned long val;
- ssize_t result = kstrtoul(buf, 0, &val);
+ ssize_t result;
- if (result < 0)
- return result;
+ if (kstrtoul(buf, 0, &val) < 0)
+ return -EINVAL;
if (val != 1)
return -EINVAL;
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index ce2ab62b64cf..b88db815ee01 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -269,7 +269,7 @@ static int pci_dev_str_match_path(struct pci_dev *dev, const char *path,
const char **endptr)
{
int ret;
- int seg, bus, slot, func;
+ unsigned int seg, bus, slot, func;
char *wpath, *p;
char end;
@@ -1477,6 +1477,24 @@ static int pci_save_pcie_state(struct pci_dev *dev)
return 0;
}
+void pci_bridge_reconfigure_ltr(struct pci_dev *dev)
+{
+#ifdef CONFIG_PCIEASPM
+ struct pci_dev *bridge;
+ u32 ctl;
+
+ bridge = pci_upstream_bridge(dev);
+ if (bridge && bridge->ltr_path) {
+ pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2, &ctl);
+ if (!(ctl & PCI_EXP_DEVCTL2_LTR_EN)) {
+ pci_dbg(bridge, "re-enabling LTR\n");
+ pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_LTR_EN);
+ }
+ }
+#endif
+}
+
static void pci_restore_pcie_state(struct pci_dev *dev)
{
int i = 0;
@@ -1487,6 +1505,13 @@ static void pci_restore_pcie_state(struct pci_dev *dev)
if (!save_state)
return;
+ /*
+ * Downstream ports reset the LTR enable bit when link goes down.
+ * Check and re-configure the bit here before restoring device.
+ * PCIe r5.0, sec 7.5.3.16.
+ */
+ pci_bridge_reconfigure_ltr(dev);
+
cap = (u16 *)&save_state->cap.data[0];
pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
@@ -2091,14 +2116,14 @@ void pcim_pin_device(struct pci_dev *pdev)
EXPORT_SYMBOL(pcim_pin_device);
/*
- * pcibios_add_device - provide arch specific hooks when adding device dev
+ * pcibios_device_add - provide arch specific hooks when adding device dev
* @dev: the PCI device being added
*
* Permits the platform to provide architecture specific functionality when
* devices are added. This is the default implementation. Architecture
* implementations can override this.
*/
-int __weak pcibios_add_device(struct pci_dev *dev)
+int __weak pcibios_device_add(struct pci_dev *dev)
{
return 0;
}
@@ -2218,6 +2243,7 @@ int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
}
EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
+#ifdef CONFIG_PCIEAER
void pcie_clear_device_status(struct pci_dev *dev)
{
u16 sta;
@@ -2225,6 +2251,7 @@ void pcie_clear_device_status(struct pci_dev *dev)
pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &sta);
pcie_capability_write_word(dev, PCI_EXP_DEVSTA, sta);
}
+#endif
/**
* pcie_clear_root_pme_status - Clear root port PME interrupt status.
@@ -3719,6 +3746,14 @@ int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
struct pci_dev *bridge;
u32 cap, ctl2;
+ /*
+ * Per PCIe r5.0, sec 9.3.5.10, the AtomicOp Requester Enable bit
+ * in Device Control 2 is reserved in VFs and the PF value applies
+ * to all associated VFs.
+ */
+ if (dev->is_virtfn)
+ return -EINVAL;
+
if (!pci_is_pcie(dev))
return -EINVAL;
@@ -5088,13 +5123,14 @@ EXPORT_SYMBOL_GPL(pci_dev_unlock);
static void pci_dev_save_and_disable(struct pci_dev *dev)
{
+ struct pci_driver *drv = to_pci_driver(dev->dev.driver);
const struct pci_error_handlers *err_handler =
- dev->driver ? dev->driver->err_handler : NULL;
+ drv ? drv->err_handler : NULL;
/*
- * dev->driver->err_handler->reset_prepare() is protected against
- * races with ->remove() by the device lock, which must be held by
- * the caller.
+ * drv->err_handler->reset_prepare() is protected against races
+ * with ->remove() by the device lock, which must be held by the
+ * caller.
*/
if (err_handler && err_handler->reset_prepare)
err_handler->reset_prepare(dev);
@@ -5119,15 +5155,15 @@ static void pci_dev_save_and_disable(struct pci_dev *dev)
static void pci_dev_restore(struct pci_dev *dev)
{
+ struct pci_driver *drv = to_pci_driver(dev->dev.driver);
const struct pci_error_handlers *err_handler =
- dev->driver ? dev->driver->err_handler : NULL;
+ drv ? drv->err_handler : NULL;
pci_restore_state(dev);
/*
- * dev->driver->err_handler->reset_done() is protected against
- * races with ->remove() by the device lock, which must be held by
- * the caller.
+ * drv->err_handler->reset_done() is protected against races with
+ * ->remove() by the device lock, which must be held by the caller.
*/
if (err_handler && err_handler->reset_done)
err_handler->reset_done(dev);
@@ -5288,7 +5324,7 @@ const struct attribute_group pci_dev_reset_method_attr_group = {
*/
int __pci_reset_function_locked(struct pci_dev *dev)
{
- int i, m, rc = -ENOTTY;
+ int i, m, rc;
might_sleep();
@@ -6324,11 +6360,12 @@ EXPORT_SYMBOL_GPL(pci_pr3_present);
* cannot be left as a userspace activity). DMA aliases should therefore
* be configured via quirks, such as the PCI fixup header quirk.
*/
-void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from, unsigned nr_devfns)
+void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from,
+ unsigned int nr_devfns)
{
int devfn_to;
- nr_devfns = min(nr_devfns, (unsigned) MAX_NR_DEVFNS - devfn_from);
+ nr_devfns = min(nr_devfns, (unsigned int)MAX_NR_DEVFNS - devfn_from);
devfn_to = devfn_from + nr_devfns - 1;
if (!dev->dma_alias_mask)
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 1cce56c2aea0..bc7f3a10ff60 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -125,6 +125,7 @@ void pci_msix_init(struct pci_dev *dev);
bool pci_bridge_d3_possible(struct pci_dev *dev);
void pci_bridge_d3_update(struct pci_dev *dev);
void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev);
+void pci_bridge_reconfigure_ltr(struct pci_dev *dev);
static inline void pci_wakeup_event(struct pci_dev *dev)
{
diff --git a/drivers/pci/pcie/Makefile b/drivers/pci/pcie/Makefile
index b2980db88cc0..5783a2f79e6a 100644
--- a/drivers/pci/pcie/Makefile
+++ b/drivers/pci/pcie/Makefile
@@ -2,12 +2,12 @@
#
# Makefile for PCI Express features and port driver
-pcieportdrv-y := portdrv_core.o portdrv_pci.o err.o rcec.o
+pcieportdrv-y := portdrv_core.o portdrv_pci.o rcec.o
obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o
obj-$(CONFIG_PCIEASPM) += aspm.o
-obj-$(CONFIG_PCIEAER) += aer.o
+obj-$(CONFIG_PCIEAER) += aer.o err.o
obj-$(CONFIG_PCIEAER_INJECT) += aer_inject.o
obj-$(CONFIG_PCIE_PME) += pme.o
obj-$(CONFIG_PCIE_DPC) += dpc.o
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index 9784fdcf3006..9fa1f97e5b27 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -57,7 +57,7 @@ struct aer_stats {
* "as seen by this device". Note that this may mean that if an
* end point is causing problems, the AER counters may increment
* at its link partner (e.g. root port) because the errors will be
- * "seen" by the link partner and not the the problematic end point
+ * "seen" by the link partner and not the problematic end point
* itself (which may report all counters as 0 as it never saw any
* problems).
*/
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 013a47f587ce..52c74682601a 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -1219,7 +1219,7 @@ static ssize_t aspm_attr_store_common(struct device *dev,
struct pcie_link_state *link = pcie_aspm_get_link(pdev);
bool state_enable;
- if (strtobool(buf, &state_enable) < 0)
+ if (kstrtobool(buf, &state_enable) < 0)
return -EINVAL;
down_read(&pci_bus_sem);
@@ -1276,7 +1276,7 @@ static ssize_t clkpm_store(struct device *dev,
struct pcie_link_state *link = pcie_aspm_get_link(pdev);
bool state_enable;
- if (strtobool(buf, &state_enable) < 0)
+ if (kstrtobool(buf, &state_enable) < 0)
return -EINVAL;
down_read(&pci_bus_sem);
diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c
index b576aa890c76..356b9317297e 100644
--- a/drivers/pci/pcie/err.c
+++ b/drivers/pci/pcie/err.c
@@ -49,14 +49,16 @@ static int report_error_detected(struct pci_dev *dev,
pci_channel_state_t state,
enum pci_ers_result *result)
{
+ struct pci_driver *pdrv;
pci_ers_result_t vote;
const struct pci_error_handlers *err_handler;
device_lock(&dev->dev);
+ pdrv = to_pci_driver(dev->dev.driver);
if (!pci_dev_set_io_state(dev, state) ||
- !dev->driver ||
- !dev->driver->err_handler ||
- !dev->driver->err_handler->error_detected) {
+ !pdrv ||
+ !pdrv->err_handler ||
+ !pdrv->err_handler->error_detected) {
/*
* If any device in the subtree does not have an error_detected
* callback, PCI_ERS_RESULT_NO_AER_DRIVER prevents subsequent
@@ -70,7 +72,7 @@ static int report_error_detected(struct pci_dev *dev,
vote = PCI_ERS_RESULT_NONE;
}
} else {
- err_handler = dev->driver->err_handler;
+ err_handler = pdrv->err_handler;
vote = err_handler->error_detected(dev, state);
}
pci_uevent_ers(dev, vote);
@@ -91,16 +93,18 @@ static int report_normal_detected(struct pci_dev *dev, void *data)
static int report_mmio_enabled(struct pci_dev *dev, void *data)
{
+ struct pci_driver *pdrv;
pci_ers_result_t vote, *result = data;
const struct pci_error_handlers *err_handler;
device_lock(&dev->dev);
- if (!dev->driver ||
- !dev->driver->err_handler ||
- !dev->driver->err_handler->mmio_enabled)
+ pdrv = to_pci_driver(dev->dev.driver);
+ if (!pdrv ||
+ !pdrv->err_handler ||
+ !pdrv->err_handler->mmio_enabled)
goto out;
- err_handler = dev->driver->err_handler;
+ err_handler = pdrv->err_handler;
vote = err_handler->mmio_enabled(dev);
*result = merge_result(*result, vote);
out:
@@ -110,16 +114,18 @@ out:
static int report_slot_reset(struct pci_dev *dev, void *data)
{
+ struct pci_driver *pdrv;
pci_ers_result_t vote, *result = data;
const struct pci_error_handlers *err_handler;
device_lock(&dev->dev);
- if (!dev->driver ||
- !dev->driver->err_handler ||
- !dev->driver->err_handler->slot_reset)
+ pdrv = to_pci_driver(dev->dev.driver);
+ if (!pdrv ||
+ !pdrv->err_handler ||
+ !pdrv->err_handler->slot_reset)
goto out;
- err_handler = dev->driver->err_handler;
+ err_handler = pdrv->err_handler;
vote = err_handler->slot_reset(dev);
*result = merge_result(*result, vote);
out:
@@ -129,16 +135,18 @@ out:
static int report_resume(struct pci_dev *dev, void *data)
{
+ struct pci_driver *pdrv;
const struct pci_error_handlers *err_handler;
device_lock(&dev->dev);
+ pdrv = to_pci_driver(dev->dev.driver);
if (!pci_dev_set_io_state(dev, pci_channel_io_normal) ||
- !dev->driver ||
- !dev->driver->err_handler ||
- !dev->driver->err_handler->resume)
+ !pdrv ||
+ !pdrv->err_handler ||
+ !pdrv->err_handler->resume)
goto out;
- err_handler = dev->driver->err_handler;
+ err_handler = pdrv->err_handler;
err_handler->resume(dev);
out:
pci_uevent_ers(dev, PCI_ERS_RESULT_RECOVERED);
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index 2ff5724b8f13..0ef4bf5f811d 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -85,8 +85,7 @@ struct pcie_port_service_driver {
int (*runtime_suspend)(struct pcie_device *dev);
int (*runtime_resume)(struct pcie_device *dev);
- /* Device driver may resume normal operations */
- void (*error_resume)(struct pci_dev *dev);
+ int (*slot_reset)(struct pcie_device *dev);
int port_type; /* Type of the port this driver can handle */
u32 service; /* Port service this device represents */
@@ -110,6 +109,7 @@ void pcie_port_service_unregister(struct pcie_port_service_driver *new);
extern struct bus_type pcie_port_bus_type;
int pcie_port_device_register(struct pci_dev *dev);
+int pcie_port_device_iter(struct device *dev, void *data);
#ifdef CONFIG_PM
int pcie_port_device_suspend(struct device *dev);
int pcie_port_device_resume_noirq(struct device *dev);
@@ -118,8 +118,6 @@ int pcie_port_device_runtime_suspend(struct device *dev);
int pcie_port_device_runtime_resume(struct device *dev);
#endif
void pcie_port_device_remove(struct pci_dev *dev);
-int __must_check pcie_port_bus_register(void);
-void pcie_port_bus_unregister(void);
struct pci_dev;
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 3ee63968deaa..bda630889f95 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -166,9 +166,6 @@ static int pcie_init_service_irqs(struct pci_dev *dev, int *irqs, int mask)
{
int ret, i;
- for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
- irqs[i] = -1;
-
/*
* If we support PME but can't use MSI/MSI-X for it, we have to
* fall back to INTx or other interrupts, e.g., a system shared
@@ -317,8 +314,10 @@ static int pcie_device_init(struct pci_dev *pdev, int service, int irq)
*/
int pcie_port_device_register(struct pci_dev *dev)
{
- int status, capabilities, i, nr_service;
- int irqs[PCIE_PORT_DEVICE_MAXSERVICES];
+ int status, capabilities, irq_services, i, nr_service;
+ int irqs[PCIE_PORT_DEVICE_MAXSERVICES] = {
+ [0 ... PCIE_PORT_DEVICE_MAXSERVICES-1] = -1
+ };
/* Enable PCI Express port device */
status = pci_enable_device(dev);
@@ -331,18 +330,32 @@ int pcie_port_device_register(struct pci_dev *dev)
return 0;
pci_set_master(dev);
- /*
- * Initialize service irqs. Don't use service devices that
- * require interrupts if there is no way to generate them.
- * However, some drivers may have a polling mode (e.g. pciehp_poll_mode)
- * that can be used in the absence of irqs. Allow them to determine
- * if that is to be used.
- */
- status = pcie_init_service_irqs(dev, irqs, capabilities);
- if (status) {
- capabilities &= PCIE_PORT_SERVICE_HP;
- if (!capabilities)
- goto error_disable;
+
+ irq_services = 0;
+ if (IS_ENABLED(CONFIG_PCIE_PME))
+ irq_services |= PCIE_PORT_SERVICE_PME;
+ if (IS_ENABLED(CONFIG_PCIEAER))
+ irq_services |= PCIE_PORT_SERVICE_AER;
+ if (IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
+ irq_services |= PCIE_PORT_SERVICE_HP;
+ if (IS_ENABLED(CONFIG_PCIE_DPC))
+ irq_services |= PCIE_PORT_SERVICE_DPC;
+ irq_services &= capabilities;
+
+ if (irq_services) {
+ /*
+ * Initialize service IRQs. Don't use service devices that
+ * require interrupts if there is no way to generate them.
+ * However, some drivers may have a polling mode (e.g.
+ * pciehp_poll_mode) that can be used in the absence of IRQs.
+ * Allow them to determine if that is to be used.
+ */
+ status = pcie_init_service_irqs(dev, irqs, irq_services);
+ if (status) {
+ irq_services &= PCIE_PORT_SERVICE_HP;
+ if (!irq_services)
+ goto error_disable;
+ }
}
/* Allocate child services if any */
@@ -367,24 +380,24 @@ error_disable:
return status;
}
-#ifdef CONFIG_PM
-typedef int (*pcie_pm_callback_t)(struct pcie_device *);
+typedef int (*pcie_callback_t)(struct pcie_device *);
-static int pm_iter(struct device *dev, void *data)
+int pcie_port_device_iter(struct device *dev, void *data)
{
struct pcie_port_service_driver *service_driver;
size_t offset = *(size_t *)data;
- pcie_pm_callback_t cb;
+ pcie_callback_t cb;
if ((dev->bus == &pcie_port_bus_type) && dev->driver) {
service_driver = to_service_driver(dev->driver);
- cb = *(pcie_pm_callback_t *)((void *)service_driver + offset);
+ cb = *(pcie_callback_t *)((void *)service_driver + offset);
if (cb)
return cb(to_pcie_device(dev));
}
return 0;
}
+#ifdef CONFIG_PM
/**
* pcie_port_device_suspend - suspend port services associated with a PCIe port
* @dev: PCI Express port to handle
@@ -392,13 +405,13 @@ static int pm_iter(struct device *dev, void *data)
int pcie_port_device_suspend(struct device *dev)
{
size_t off = offsetof(struct pcie_port_service_driver, suspend);
- return device_for_each_child(dev, &off, pm_iter);
+ return device_for_each_child(dev, &off, pcie_port_device_iter);
}
int pcie_port_device_resume_noirq(struct device *dev)
{
size_t off = offsetof(struct pcie_port_service_driver, resume_noirq);
- return device_for_each_child(dev, &off, pm_iter);
+ return device_for_each_child(dev, &off, pcie_port_device_iter);
}
/**
@@ -408,7 +421,7 @@ int pcie_port_device_resume_noirq(struct device *dev)
int pcie_port_device_resume(struct device *dev)
{
size_t off = offsetof(struct pcie_port_service_driver, resume);
- return device_for_each_child(dev, &off, pm_iter);
+ return device_for_each_child(dev, &off, pcie_port_device_iter);
}
/**
@@ -418,7 +431,7 @@ int pcie_port_device_resume(struct device *dev)
int pcie_port_device_runtime_suspend(struct device *dev)
{
size_t off = offsetof(struct pcie_port_service_driver, runtime_suspend);
- return device_for_each_child(dev, &off, pm_iter);
+ return device_for_each_child(dev, &off, pcie_port_device_iter);
}
/**
@@ -428,7 +441,7 @@ int pcie_port_device_runtime_suspend(struct device *dev)
int pcie_port_device_runtime_resume(struct device *dev)
{
size_t off = offsetof(struct pcie_port_service_driver, runtime_resume);
- return device_for_each_child(dev, &off, pm_iter);
+ return device_for_each_child(dev, &off, pcie_port_device_iter);
}
#endif /* PM */
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index c7ff1eea225a..35eca6277a96 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -160,6 +160,9 @@ static pci_ers_result_t pcie_portdrv_error_detected(struct pci_dev *dev,
static pci_ers_result_t pcie_portdrv_slot_reset(struct pci_dev *dev)
{
+ size_t off = offsetof(struct pcie_port_service_driver, slot_reset);
+ device_for_each_child(&dev->dev, &off, pcie_port_device_iter);
+
pci_restore_state(dev);
pci_save_state(dev);
return PCI_ERS_RESULT_RECOVERED;
@@ -170,29 +173,6 @@ static pci_ers_result_t pcie_portdrv_mmio_enabled(struct pci_dev *dev)
return PCI_ERS_RESULT_RECOVERED;
}
-static int resume_iter(struct device *device, void *data)
-{
- struct pcie_device *pcie_device;
- struct pcie_port_service_driver *driver;
-
- if (device->bus == &pcie_port_bus_type && device->driver) {
- driver = to_service_driver(device->driver);
- if (driver && driver->error_resume) {
- pcie_device = to_pcie_device(device);
-
- /* Forward error message to service drivers */
- driver->error_resume(pcie_device->port);
- }
- }
-
- return 0;
-}
-
-static void pcie_portdrv_err_resume(struct pci_dev *dev)
-{
- device_for_each_child(&dev->dev, NULL, resume_iter);
-}
-
/*
* LINUX Device Driver Model
*/
@@ -210,7 +190,6 @@ static const struct pci_error_handlers pcie_portdrv_err_handler = {
.error_detected = pcie_portdrv_error_detected,
.slot_reset = pcie_portdrv_slot_reset,
.mmio_enabled = pcie_portdrv_mmio_enabled,
- .resume = pcie_portdrv_err_resume,
};
static struct pci_driver pcie_portdriver = {
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index d9fc02a71baa..087d3658f75c 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -883,11 +883,11 @@ static void pci_set_bus_msi_domain(struct pci_bus *bus)
static int pci_register_host_bridge(struct pci_host_bridge *bridge)
{
struct device *parent = bridge->dev.parent;
- struct resource_entry *window, *n;
+ struct resource_entry *window, *next, *n;
struct pci_bus *bus, *b;
- resource_size_t offset;
+ resource_size_t offset, next_offset;
LIST_HEAD(resources);
- struct resource *res;
+ struct resource *res, *next_res;
char addr[64], *fmt;
const char *name;
int err;
@@ -970,11 +970,34 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
if (nr_node_ids > 1 && pcibus_to_node(bus) == NUMA_NO_NODE)
dev_warn(&bus->dev, "Unknown NUMA node; performance will be reduced\n");
+ /* Coalesce contiguous windows */
+ resource_list_for_each_entry_safe(window, n, &resources) {
+ if (list_is_last(&window->node, &resources))
+ break;
+
+ next = list_next_entry(window, node);
+ offset = window->offset;
+ res = window->res;
+ next_offset = next->offset;
+ next_res = next->res;
+
+ if (res->flags != next_res->flags || offset != next_offset)
+ continue;
+
+ if (res->end + 1 == next_res->start) {
+ next_res->start = res->start;
+ res->flags = res->start = res->end = 0;
+ }
+ }
+
/* Add initial resources to the bus */
resource_list_for_each_entry_safe(window, n, &resources) {
- list_move_tail(&window->node, &bridge->windows);
offset = window->offset;
res = window->res;
+ if (!res->end)
+ continue;
+
+ list_move_tail(&window->node, &bridge->windows);
if (res->flags & IORESOURCE_BUS)
pci_bus_insert_busn_res(bus, bus->number, res->end);
@@ -2168,9 +2191,21 @@ static void pci_configure_ltr(struct pci_dev *dev)
* Complex and all intermediate Switches indicate support for LTR.
* PCIe r4.0, sec 6.18.
*/
- if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT ||
- ((bridge = pci_upstream_bridge(dev)) &&
- bridge->ltr_path)) {
+ if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) {
+ pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_LTR_EN);
+ dev->ltr_path = 1;
+ return;
+ }
+
+ /*
+ * If we're configuring a hot-added device, LTR was likely
+ * disabled in the upstream bridge, so re-enable it before enabling
+ * it in the new device.
+ */
+ bridge = pci_upstream_bridge(dev);
+ if (bridge && bridge->ltr_path) {
+ pci_bridge_reconfigure_ltr(dev);
pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
PCI_EXP_DEVCTL2_LTR_EN);
dev->ltr_path = 1;
@@ -2450,7 +2485,7 @@ static struct irq_domain *pci_dev_msi_domain(struct pci_dev *dev)
struct irq_domain *d;
/*
- * If a domain has been set through the pcibios_add_device()
+ * If a domain has been set through the pcibios_device_add()
* callback, then this is the one (platform code knows best).
*/
d = dev_get_msi_domain(&dev->dev);
@@ -2518,7 +2553,7 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
list_add_tail(&dev->bus_list, &bus->devices);
up_write(&pci_bus_sem);
- ret = pcibios_add_device(dev);
+ ret = pcibios_device_add(dev);
WARN_ON(ret < 0);
/* Set up MSI IRQ domain */
@@ -2550,11 +2585,12 @@ struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn)
}
EXPORT_SYMBOL(pci_scan_single_device);
-static unsigned next_fn(struct pci_bus *bus, struct pci_dev *dev, unsigned fn)
+static unsigned int next_fn(struct pci_bus *bus, struct pci_dev *dev,
+ unsigned int fn)
{
int pos;
u16 cap = 0;
- unsigned next_fn;
+ unsigned int next_fn;
if (pci_ari_enabled(bus)) {
if (!dev)
@@ -2613,7 +2649,7 @@ static int only_one_child(struct pci_bus *bus)
*/
int pci_scan_slot(struct pci_bus *bus, int devfn)
{
- unsigned fn, nr = 0;
+ unsigned int fn, nr = 0;
struct pci_dev *dev;
if (only_one_child(bus) && (devfn > 0))
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 4537d1ea14fd..aedb78c86ddc 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -501,7 +501,7 @@ static void quirk_s3_64M(struct pci_dev *dev)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M);
-static void quirk_io(struct pci_dev *dev, int pos, unsigned size,
+static void quirk_io(struct pci_dev *dev, int pos, unsigned int size,
const char *name)
{
u32 region;
@@ -552,7 +552,7 @@ static void quirk_cs5536_vsa(struct pci_dev *dev)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa);
static void quirk_io_region(struct pci_dev *dev, int port,
- unsigned size, int nr, const char *name)
+ unsigned int size, int nr, const char *name)
{
u16 region;
struct pci_bus_region bus_region;
@@ -666,7 +666,7 @@ static void piix4_io_quirk(struct pci_dev *dev, const char *name, unsigned int p
base = devres & 0xffff;
size = 16;
for (;;) {
- unsigned bit = size >> 1;
+ unsigned int bit = size >> 1;
if ((bit & mask) == bit)
break;
size = bit;
@@ -692,7 +692,7 @@ static void piix4_mem_quirk(struct pci_dev *dev, const char *name, unsigned int
mask = (devres & 0x3f) << 16;
size = 128 << 16;
for (;;) {
- unsigned bit = size >> 1;
+ unsigned int bit = size >> 1;
if ((bit & mask) == bit)
break;
size = bit;
@@ -806,7 +806,7 @@ static void ich6_lpc_acpi_gpio(struct pci_dev *dev)
"ICH6 GPIO");
}
-static void ich6_lpc_generic_decode(struct pci_dev *dev, unsigned reg,
+static void ich6_lpc_generic_decode(struct pci_dev *dev, unsigned int reg,
const char *name, int dynsize)
{
u32 val;
@@ -850,7 +850,7 @@ static void quirk_ich6_lpc(struct pci_dev *dev)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0, quirk_ich6_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, quirk_ich6_lpc);
-static void ich7_lpc_generic_decode(struct pci_dev *dev, unsigned reg,
+static void ich7_lpc_generic_decode(struct pci_dev *dev, unsigned int reg,
const char *name)
{
u32 val;
@@ -2700,7 +2700,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
* then the device can't use INTx interrupts. Tegra's PCIe root ports don't
* generate MSI interrupts for PME and AER events instead only INTx interrupts
* are generated. Though Tegra's PCIe root ports can generate MSI interrupts
- * for other events, since PCIe specificiation doesn't support using a mix of
+ * for other events, since PCIe specification doesn't support using a mix of
* INTx and MSI/MSI-X, it is required to disable MSI interrupts to avoid port
* service drivers registering their respective ISRs for MSIs.
*/
@@ -3612,6 +3612,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0033, quirk_no_bus_reset);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0034, quirk_no_bus_reset);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003e, quirk_no_bus_reset);
/*
* Root port on some Cavium CN8xxx chips do not successfully complete a bus
@@ -5795,3 +5796,58 @@ static void apex_pci_fixup_class(struct pci_dev *pdev)
}
DECLARE_PCI_FIXUP_CLASS_HEADER(0x1ac1, 0x089a,
PCI_CLASS_NOT_DEFINED, 8, apex_pci_fixup_class);
+
+/*
+ * Pericom PI7C9X2G404/PI7C9X2G304/PI7C9X2G303 switch erratum E5 -
+ * ACS P2P Request Redirect is not functional
+ *
+ * When ACS P2P Request Redirect is enabled and bandwidth is not balanced
+ * between upstream and downstream ports, packets are queued in an internal
+ * buffer until CPLD packet. The workaround is to use the switch in store and
+ * forward mode.
+ */
+#define PI7C9X2Gxxx_MODE_REG 0x74
+#define PI7C9X2Gxxx_STORE_FORWARD_MODE BIT(0)
+static void pci_fixup_pericom_acs_store_forward(struct pci_dev *pdev)
+{
+ struct pci_dev *upstream;
+ u16 val;
+
+ /* Downstream ports only */
+ if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM)
+ return;
+
+ /* Check for ACS P2P Request Redirect use */
+ if (!pdev->acs_cap)
+ return;
+ pci_read_config_word(pdev, pdev->acs_cap + PCI_ACS_CTRL, &val);
+ if (!(val & PCI_ACS_RR))
+ return;
+
+ upstream = pci_upstream_bridge(pdev);
+ if (!upstream)
+ return;
+
+ pci_read_config_word(upstream, PI7C9X2Gxxx_MODE_REG, &val);
+ if (!(val & PI7C9X2Gxxx_STORE_FORWARD_MODE)) {
+ pci_info(upstream, "Setting PI7C9X2Gxxx store-forward mode to avoid ACS erratum\n");
+ pci_write_config_word(upstream, PI7C9X2Gxxx_MODE_REG, val |
+ PI7C9X2Gxxx_STORE_FORWARD_MODE);
+ }
+}
+/*
+ * Apply fixup on enable and on resume, in order to apply the fix up whenever
+ * ACS configuration changes or switch mode is reset
+ */
+DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_PERICOM, 0x2404,
+ pci_fixup_pericom_acs_store_forward);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_PERICOM, 0x2404,
+ pci_fixup_pericom_acs_store_forward);
+DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_PERICOM, 0x2304,
+ pci_fixup_pericom_acs_store_forward);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_PERICOM, 0x2304,
+ pci_fixup_pericom_acs_store_forward);
+DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_PERICOM, 0x2303,
+ pci_fixup_pericom_acs_store_forward);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_PERICOM, 0x2303,
+ pci_fixup_pericom_acs_store_forward);
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
index 8fc9a4e911e3..e18d3a4383ba 100644
--- a/drivers/pci/rom.c
+++ b/drivers/pci/rom.c
@@ -85,7 +85,7 @@ static size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom,
{
void __iomem *image;
int last_image;
- unsigned length;
+ unsigned int length;
image = rom;
do {
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 2ce636937c6e..547396ec50b5 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -1525,7 +1525,7 @@ static void pci_bridge_release_resources(struct pci_bus *bus,
{
struct pci_dev *dev = bus->self;
struct resource *r;
- unsigned old_flags = 0;
+ unsigned int old_flags = 0;
struct resource *b_res;
int idx = 1;
diff --git a/drivers/pci/setup-irq.c b/drivers/pci/setup-irq.c
index 7129494754dd..cc7d26b015f3 100644
--- a/drivers/pci/setup-irq.c
+++ b/drivers/pci/setup-irq.c
@@ -8,7 +8,6 @@
* David Miller (davem@redhat.com)
*/
-
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/errno.h>
@@ -28,25 +27,26 @@ void pci_assign_irq(struct pci_dev *dev)
return;
}
- /* If this device is not on the primary bus, we need to figure out
- which interrupt pin it will come in on. We know which slot it
- will come in on 'cos that slot is where the bridge is. Each
- time the interrupt line passes through a PCI-PCI bridge we must
- apply the swizzle function. */
-
+ /*
+ * If this device is not on the primary bus, we need to figure out
+ * which interrupt pin it will come in on. We know which slot it
+ * will come in on because that slot is where the bridge is. Each
+ * time the interrupt line passes through a PCI-PCI bridge we must
+ * apply the swizzle function.
+ */
pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
/* Cope with illegal. */
if (pin > 4)
pin = 1;
if (pin) {
- /* Follow the chain of bridges, swizzling as we go. */
+ /* Follow the chain of bridges, swizzling as we go. */
if (hbrg->swizzle_irq)
slot = (*(hbrg->swizzle_irq))(dev, &pin);
/*
- * If a swizzling function is not used map_irq must
- * ignore slot
+ * If a swizzling function is not used, map_irq() must
+ * ignore slot.
*/
irq = (*(hbrg->map_irq))(dev, slot, pin);
if (irq == -1)
@@ -56,7 +56,9 @@ void pci_assign_irq(struct pci_dev *dev)
pci_dbg(dev, "assign IRQ: got %d\n", dev->irq);
- /* Always tell the device, so the driver knows what is
- the real IRQ to use; the device does not use it. */
+ /*
+ * Always tell the device, so the driver knows what is the real IRQ
+ * to use; the device does not use it.
+ */
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
}
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index 0b301f8be9ed..38c2b036fb8e 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -45,6 +45,7 @@ enum mrpc_state {
MRPC_QUEUED,
MRPC_RUNNING,
MRPC_DONE,
+ MRPC_IO_ERROR,
};
struct switchtec_user {
@@ -66,6 +67,19 @@ struct switchtec_user {
int event_cnt;
};
+/*
+ * The MMIO reads to the device_id register should always return the device ID
+ * of the device, otherwise the firmware is probably stuck or unreachable
+ * due to a firmware reset which clears PCI state including the BARs and Memory
+ * Space Enable bits.
+ */
+static int is_firmware_running(struct switchtec_dev *stdev)
+{
+ u32 device = ioread32(&stdev->mmio_sys_info->device_id);
+
+ return stdev->pdev->device == device;
+}
+
static struct switchtec_user *stuser_create(struct switchtec_dev *stdev)
{
struct switchtec_user *stuser;
@@ -113,6 +127,7 @@ static void stuser_set_state(struct switchtec_user *stuser,
[MRPC_QUEUED] = "QUEUED",
[MRPC_RUNNING] = "RUNNING",
[MRPC_DONE] = "DONE",
+ [MRPC_IO_ERROR] = "IO_ERROR",
};
stuser->state = state;
@@ -184,9 +199,26 @@ static int mrpc_queue_cmd(struct switchtec_user *stuser)
return 0;
}
+static void mrpc_cleanup_cmd(struct switchtec_dev *stdev)
+{
+ /* requires the mrpc_mutex to already be held when called */
+
+ struct switchtec_user *stuser = list_entry(stdev->mrpc_queue.next,
+ struct switchtec_user, list);
+
+ stuser->cmd_done = true;
+ wake_up_interruptible(&stuser->cmd_comp);
+ list_del_init(&stuser->list);
+ stuser_put(stuser);
+ stdev->mrpc_busy = 0;
+
+ mrpc_cmd_submit(stdev);
+}
+
static void mrpc_complete_cmd(struct switchtec_dev *stdev)
{
/* requires the mrpc_mutex to already be held when called */
+
struct switchtec_user *stuser;
if (list_empty(&stdev->mrpc_queue))
@@ -206,7 +238,8 @@ static void mrpc_complete_cmd(struct switchtec_dev *stdev)
stuser_set_state(stuser, MRPC_DONE);
stuser->return_code = 0;
- if (stuser->status != SWITCHTEC_MRPC_STATUS_DONE)
+ if (stuser->status != SWITCHTEC_MRPC_STATUS_DONE &&
+ stuser->status != SWITCHTEC_MRPC_STATUS_ERROR)
goto out;
if (stdev->dma_mrpc)
@@ -223,13 +256,7 @@ static void mrpc_complete_cmd(struct switchtec_dev *stdev)
memcpy_fromio(stuser->data, &stdev->mmio_mrpc->output_data,
stuser->read_len);
out:
- stuser->cmd_done = true;
- wake_up_interruptible(&stuser->cmd_comp);
- list_del_init(&stuser->list);
- stuser_put(stuser);
- stdev->mrpc_busy = 0;
-
- mrpc_cmd_submit(stdev);
+ mrpc_cleanup_cmd(stdev);
}
static void mrpc_event_work(struct work_struct *work)
@@ -246,6 +273,23 @@ static void mrpc_event_work(struct work_struct *work)
mutex_unlock(&stdev->mrpc_mutex);
}
+static void mrpc_error_complete_cmd(struct switchtec_dev *stdev)
+{
+ /* requires the mrpc_mutex to already be held when called */
+
+ struct switchtec_user *stuser;
+
+ if (list_empty(&stdev->mrpc_queue))
+ return;
+
+ stuser = list_entry(stdev->mrpc_queue.next,
+ struct switchtec_user, list);
+
+ stuser_set_state(stuser, MRPC_IO_ERROR);
+
+ mrpc_cleanup_cmd(stdev);
+}
+
static void mrpc_timeout_work(struct work_struct *work)
{
struct switchtec_dev *stdev;
@@ -257,6 +301,11 @@ static void mrpc_timeout_work(struct work_struct *work)
mutex_lock(&stdev->mrpc_mutex);
+ if (!is_firmware_running(stdev)) {
+ mrpc_error_complete_cmd(stdev);
+ goto out;
+ }
+
if (stdev->dma_mrpc)
status = stdev->dma_mrpc->status;
else
@@ -327,7 +376,7 @@ static ssize_t field ## _show(struct device *dev, \
return io_string_show(buf, &si->gen4.field, \
sizeof(si->gen4.field)); \
else \
- return -ENOTSUPP; \
+ return -EOPNOTSUPP; \
} \
\
static DEVICE_ATTR_RO(field)
@@ -544,6 +593,11 @@ static ssize_t switchtec_dev_read(struct file *filp, char __user *data,
if (rc)
return rc;
+ if (stuser->state == MRPC_IO_ERROR) {
+ mutex_unlock(&stdev->mrpc_mutex);
+ return -EIO;
+ }
+
if (stuser->state != MRPC_DONE) {
mutex_unlock(&stdev->mrpc_mutex);
return -EBADE;
@@ -569,7 +623,8 @@ static ssize_t switchtec_dev_read(struct file *filp, char __user *data,
out:
mutex_unlock(&stdev->mrpc_mutex);
- if (stuser->status == SWITCHTEC_MRPC_STATUS_DONE)
+ if (stuser->status == SWITCHTEC_MRPC_STATUS_DONE ||
+ stuser->status == SWITCHTEC_MRPC_STATUS_ERROR)
return size;
else if (stuser->status == SWITCHTEC_MRPC_STATUS_INTERRUPTED)
return -ENXIO;
@@ -613,7 +668,7 @@ static int ioctl_flash_info(struct switchtec_dev *stdev,
info.flash_length = ioread32(&fi->gen4.flash_length);
info.num_partitions = SWITCHTEC_NUM_PARTITIONS_GEN4;
} else {
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
if (copy_to_user(uinfo, &info, sizeof(info)))
@@ -821,7 +876,7 @@ static int ioctl_flash_part_info(struct switchtec_dev *stdev,
if (ret)
return ret;
} else {
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
if (copy_to_user(uinfo, &info, sizeof(info)))
@@ -969,6 +1024,9 @@ static int event_ctl(struct switchtec_dev *stdev,
return PTR_ERR(reg);
hdr = ioread32(reg);
+ if (hdr & SWITCHTEC_EVENT_NOT_SUPP)
+ return -EOPNOTSUPP;
+
for (i = 0; i < ARRAY_SIZE(ctl->data); i++)
ctl->data[i] = ioread32(&reg[i + 1]);
@@ -1041,7 +1099,7 @@ static int ioctl_event_ctl(struct switchtec_dev *stdev,
for (ctl.index = 0; ctl.index < nr_idxs; ctl.index++) {
ctl.flags = event_flags;
ret = event_ctl(stdev, &ctl);
- if (ret < 0)
+ if (ret < 0 && ret != -EOPNOTSUPP)
return ret;
}
} else {
@@ -1078,7 +1136,7 @@ static int ioctl_pff_to_port(struct switchtec_dev *stdev,
break;
}
- reg = ioread32(&pcfg->vep_pff_inst_id);
+ reg = ioread32(&pcfg->vep_pff_inst_id) & 0xFF;
if (reg == p.pff) {
p.port = SWITCHTEC_IOCTL_PFF_VEP;
break;
@@ -1124,7 +1182,7 @@ static int ioctl_port_to_pff(struct switchtec_dev *stdev,
p.pff = ioread32(&pcfg->usp_pff_inst_id);
break;
case SWITCHTEC_IOCTL_PFF_VEP:
- p.pff = ioread32(&pcfg->vep_pff_inst_id);
+ p.pff = ioread32(&pcfg->vep_pff_inst_id) & 0xFF;
break;
default:
if (p.port > ARRAY_SIZE(pcfg->dsp_pff_inst_id))
@@ -1348,6 +1406,9 @@ static int mask_event(struct switchtec_dev *stdev, int eid, int idx)
hdr_reg = event_regs[eid].map_reg(stdev, off, idx);
hdr = ioread32(hdr_reg);
+ if (hdr & SWITCHTEC_EVENT_NOT_SUPP)
+ return 0;
+
if (!(hdr & SWITCHTEC_EVENT_OCCURRED && hdr & SWITCHTEC_EVENT_EN_IRQ))
return 0;
@@ -1498,7 +1559,7 @@ static void init_pff(struct switchtec_dev *stdev)
if (reg < stdev->pff_csr_count)
stdev->pff_local[reg] = 1;
- reg = ioread32(&pcfg->vep_pff_inst_id);
+ reg = ioread32(&pcfg->vep_pff_inst_id) & 0xFF;
if (reg < stdev->pff_csr_count)
stdev->pff_local[reg] = 1;
@@ -1556,7 +1617,7 @@ static int switchtec_init_pci(struct switchtec_dev *stdev,
else if (stdev->gen == SWITCHTEC_GEN4)
part_id = &stdev->mmio_sys_info->gen4.partition_id;
else
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
stdev->partition = ioread8(part_id);
stdev->partition_count = ioread8(&stdev->mmio_ntb->partition_count);
diff --git a/drivers/pci/vpd.c b/drivers/pci/vpd.c
index 4be24890132e..a4fc4d0690fe 100644
--- a/drivers/pci/vpd.c
+++ b/drivers/pci/vpd.c
@@ -57,10 +57,7 @@ static size_t pci_vpd_size(struct pci_dev *dev)
size_t off = 0, size;
unsigned char tag, header[1+2]; /* 1 byte tag, 2 bytes length */
- /* Otherwise the following reads would fail. */
- dev->vpd.len = PCI_VPD_MAX_SIZE;
-
- while (pci_read_vpd(dev, off, 1, header) == 1) {
+ while (pci_read_vpd_any(dev, off, 1, header) == 1) {
size = 0;
if (off == 0 && (header[0] == 0x00 || header[0] == 0xff))
@@ -68,7 +65,7 @@ static size_t pci_vpd_size(struct pci_dev *dev)
if (header[0] & PCI_VPD_LRDT) {
/* Large Resource Data Type Tag */
- if (pci_read_vpd(dev, off + 1, 2, &header[1]) != 2) {
+ if (pci_read_vpd_any(dev, off + 1, 2, &header[1]) != 2) {
pci_warn(dev, "failed VPD read at offset %zu\n",
off + 1);
return off ?: PCI_VPD_SZ_INVALID;
@@ -99,14 +96,14 @@ error:
return off ?: PCI_VPD_SZ_INVALID;
}
-static bool pci_vpd_available(struct pci_dev *dev)
+static bool pci_vpd_available(struct pci_dev *dev, bool check_size)
{
struct pci_vpd *vpd = &dev->vpd;
if (!vpd->cap)
return false;
- if (vpd->len == 0) {
+ if (vpd->len == 0 && check_size) {
vpd->len = pci_vpd_size(dev);
if (vpd->len == PCI_VPD_SZ_INVALID) {
vpd->cap = 0;
@@ -156,24 +153,27 @@ static int pci_vpd_wait(struct pci_dev *dev, bool set)
}
static ssize_t pci_vpd_read(struct pci_dev *dev, loff_t pos, size_t count,
- void *arg)
+ void *arg, bool check_size)
{
struct pci_vpd *vpd = &dev->vpd;
+ unsigned int max_len;
int ret = 0;
loff_t end = pos + count;
u8 *buf = arg;
- if (!pci_vpd_available(dev))
+ if (!pci_vpd_available(dev, check_size))
return -ENODEV;
if (pos < 0)
return -EINVAL;
- if (pos > vpd->len)
+ max_len = check_size ? vpd->len : PCI_VPD_MAX_SIZE;
+
+ if (pos >= max_len)
return 0;
- if (end > vpd->len) {
- end = vpd->len;
+ if (end > max_len) {
+ end = max_len;
count = end - pos;
}
@@ -217,20 +217,23 @@ static ssize_t pci_vpd_read(struct pci_dev *dev, loff_t pos, size_t count,
}
static ssize_t pci_vpd_write(struct pci_dev *dev, loff_t pos, size_t count,
- const void *arg)
+ const void *arg, bool check_size)
{
struct pci_vpd *vpd = &dev->vpd;
+ unsigned int max_len;
const u8 *buf = arg;
loff_t end = pos + count;
int ret = 0;
- if (!pci_vpd_available(dev))
+ if (!pci_vpd_available(dev, check_size))
return -ENODEV;
if (pos < 0 || (pos & 3) || (count & 3))
return -EINVAL;
- if (end > vpd->len)
+ max_len = check_size ? vpd->len : PCI_VPD_MAX_SIZE;
+
+ if (end > max_len)
return -EINVAL;
if (mutex_lock_killable(&vpd->lock))
@@ -313,7 +316,7 @@ void *pci_vpd_alloc(struct pci_dev *dev, unsigned int *size)
void *buf;
int cnt;
- if (!pci_vpd_available(dev))
+ if (!pci_vpd_available(dev, true))
return ERR_PTR(-ENODEV);
len = dev->vpd.len;
@@ -381,6 +384,24 @@ static int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off,
return -ENOENT;
}
+static ssize_t __pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf,
+ bool check_size)
+{
+ ssize_t ret;
+
+ if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0) {
+ dev = pci_get_func0_dev(dev);
+ if (!dev)
+ return -ENODEV;
+
+ ret = pci_vpd_read(dev, pos, count, buf, check_size);
+ pci_dev_put(dev);
+ return ret;
+ }
+
+ return pci_vpd_read(dev, pos, count, buf, check_size);
+}
+
/**
* pci_read_vpd - Read one entry from Vital Product Data
* @dev: PCI device struct
@@ -390,6 +411,20 @@ static int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off,
*/
ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf)
{
+ return __pci_read_vpd(dev, pos, count, buf, true);
+}
+EXPORT_SYMBOL(pci_read_vpd);
+
+/* Same, but allow to access any address */
+ssize_t pci_read_vpd_any(struct pci_dev *dev, loff_t pos, size_t count, void *buf)
+{
+ return __pci_read_vpd(dev, pos, count, buf, false);
+}
+EXPORT_SYMBOL(pci_read_vpd_any);
+
+static ssize_t __pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count,
+ const void *buf, bool check_size)
+{
ssize_t ret;
if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0) {
@@ -397,14 +432,13 @@ ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf)
if (!dev)
return -ENODEV;
- ret = pci_vpd_read(dev, pos, count, buf);
+ ret = pci_vpd_write(dev, pos, count, buf, check_size);
pci_dev_put(dev);
return ret;
}
- return pci_vpd_read(dev, pos, count, buf);
+ return pci_vpd_write(dev, pos, count, buf, check_size);
}
-EXPORT_SYMBOL(pci_read_vpd);
/**
* pci_write_vpd - Write entry to Vital Product Data
@@ -415,22 +449,17 @@ EXPORT_SYMBOL(pci_read_vpd);
*/
ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf)
{
- ssize_t ret;
-
- if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0) {
- dev = pci_get_func0_dev(dev);
- if (!dev)
- return -ENODEV;
-
- ret = pci_vpd_write(dev, pos, count, buf);
- pci_dev_put(dev);
- return ret;
- }
-
- return pci_vpd_write(dev, pos, count, buf);
+ return __pci_write_vpd(dev, pos, count, buf, true);
}
EXPORT_SYMBOL(pci_write_vpd);
+/* Same, but allow to access any address */
+ssize_t pci_write_vpd_any(struct pci_dev *dev, loff_t pos, size_t count, const void *buf)
+{
+ return __pci_write_vpd(dev, pos, count, buf, false);
+}
+EXPORT_SYMBOL(pci_write_vpd_any);
+
int pci_vpd_find_ro_info_keyword(const void *buf, unsigned int len,
const char *kw, unsigned int *size)
{
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index 2156c632524d..d858d25b6cab 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -588,61 +588,43 @@ static pci_ers_result_t pcifront_common_process(int cmd,
struct pcifront_device *pdev,
pci_channel_state_t state)
{
- pci_ers_result_t result;
struct pci_driver *pdrv;
int bus = pdev->sh_info->aer_op.bus;
int devfn = pdev->sh_info->aer_op.devfn;
int domain = pdev->sh_info->aer_op.domain;
struct pci_dev *pcidev;
- int flag = 0;
dev_dbg(&pdev->xdev->dev,
"pcifront AER process: cmd %x (bus:%x, devfn%x)",
cmd, bus, devfn);
- result = PCI_ERS_RESULT_NONE;
pcidev = pci_get_domain_bus_and_slot(domain, bus, devfn);
- if (!pcidev || !pcidev->driver) {
+ if (!pcidev || !pcidev->dev.driver) {
dev_err(&pdev->xdev->dev, "device or AER driver is NULL\n");
pci_dev_put(pcidev);
- return result;
+ return PCI_ERS_RESULT_NONE;
}
- pdrv = pcidev->driver;
-
- if (pdrv) {
- if (pdrv->err_handler && pdrv->err_handler->error_detected) {
- pci_dbg(pcidev, "trying to call AER service\n");
- if (pcidev) {
- flag = 1;
- switch (cmd) {
- case XEN_PCI_OP_aer_detected:
- result = pdrv->err_handler->
- error_detected(pcidev, state);
- break;
- case XEN_PCI_OP_aer_mmio:
- result = pdrv->err_handler->
- mmio_enabled(pcidev);
- break;
- case XEN_PCI_OP_aer_slotreset:
- result = pdrv->err_handler->
- slot_reset(pcidev);
- break;
- case XEN_PCI_OP_aer_resume:
- pdrv->err_handler->resume(pcidev);
- break;
- default:
- dev_err(&pdev->xdev->dev,
- "bad request in aer recovery "
- "operation!\n");
-
- }
- }
+ pdrv = to_pci_driver(pcidev->dev.driver);
+
+ if (pdrv->err_handler && pdrv->err_handler->error_detected) {
+ pci_dbg(pcidev, "trying to call AER service\n");
+ switch (cmd) {
+ case XEN_PCI_OP_aer_detected:
+ return pdrv->err_handler->error_detected(pcidev, state);
+ case XEN_PCI_OP_aer_mmio:
+ return pdrv->err_handler->mmio_enabled(pcidev);
+ case XEN_PCI_OP_aer_slotreset:
+ return pdrv->err_handler->slot_reset(pcidev);
+ case XEN_PCI_OP_aer_resume:
+ pdrv->err_handler->resume(pcidev);
+ return PCI_ERS_RESULT_NONE;
+ default:
+ dev_err(&pdev->xdev->dev,
+ "bad request in aer recovery operation!\n");
}
}
- if (!flag)
- result = PCI_ERS_RESULT_NONE;
- return result;
+ return PCI_ERS_RESULT_NONE;
}