summaryrefslogtreecommitdiff
path: root/drivers/pci/controller
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci/controller')
-rw-r--r--drivers/pci/controller/Kconfig14
-rw-r--r--drivers/pci/controller/Makefile2
-rw-r--r--drivers/pci/controller/cadence/Kconfig2
-rw-r--r--drivers/pci/controller/cadence/pci-j721e.c176
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-ep.c18
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-host.c44
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.c4
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.h15
-rw-r--r--drivers/pci/controller/dwc/Kconfig48
-rw-r--r--drivers/pci/controller/dwc/Makefile5
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c57
-rw-r--r--drivers/pci/controller/dwc/pci-exynos.c57
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c1448
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c240
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape-ep.c11
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape.c10
-rw-r--r--drivers/pci/controller/dwc/pci-meson.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-al.c16
-rw-r--r--drivers/pci/controller/dwc/pcie-amd-mdb.c476
-rw-r--r--drivers/pci/controller/dwc/pcie-artpec6.c36
-rw-r--r--drivers/pci/controller/dwc/pcie-bt1.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-debugfs.c677
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c700
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c250
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-plat.c11
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c284
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h186
-rw-r--r--drivers/pci/controller/dwc/pcie-dw-rockchip.c418
-rw-r--r--drivers/pci/controller/dwc/pcie-histb.c14
-rw-r--r--drivers/pci/controller/dwc/pcie-intel-gw.c14
-rw-r--r--drivers/pci/controller/dwc/pcie-keembay.c18
-rw-r--r--drivers/pci/controller/dwc/pcie-kirin.c174
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-common.c78
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-common.h14
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-ep.c117
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c504
-rw-r--r--drivers/pci/controller/dwc/pcie-rcar-gen4.c347
-rw-r--r--drivers/pci/controller/dwc/pcie-spear13xx.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c58
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier-ep.c15
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier.c2
-rw-r--r--drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c2
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil-host.c22
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil.h2
-rw-r--r--drivers/pci/controller/pci-aardvark.c91
-rw-r--r--drivers/pci/controller/pci-ftpci100.c4
-rw-r--r--drivers/pci/controller/pci-host-common.c7
-rw-r--r--drivers/pci/controller/pci-host-generic.c3
-rw-r--r--drivers/pci/controller/pci-hyperv.c24
-rw-r--r--drivers/pci/controller/pci-loongson.c13
-rw-r--r--drivers/pci/controller/pci-mvebu.c11
-rw-r--r--drivers/pci/controller/pci-tegra.c153
-rw-r--r--drivers/pci/controller/pci-thunder-ecam.c2
-rw-r--r--drivers/pci/controller/pci-thunder-pem.c4
-rw-r--r--drivers/pci/controller/pci-xgene-msi.c57
-rw-r--r--drivers/pci/controller/pci-xgene.c6
-rw-r--r--drivers/pci/controller/pcie-altera-msi.c18
-rw-r--r--drivers/pci/controller/pcie-altera.c269
-rw-r--r--drivers/pci/controller/pcie-apple.c149
-rw-r--r--drivers/pci/controller/pcie-brcmstb.c700
-rw-r--r--drivers/pci/controller/pcie-hisi-error.c2
-rw-r--r--drivers/pci/controller/pcie-iproc-msi.c6
-rw-r--r--drivers/pci/controller/pcie-iproc-platform.c2
-rw-r--r--drivers/pci/controller/pcie-iproc.c18
-rw-r--r--drivers/pci/controller/pcie-mediatek-gen3.c395
-rw-r--r--drivers/pci/controller/pcie-mediatek.c36
-rw-r--r--drivers/pci/controller/pcie-microchip-host.c1216
-rw-r--r--drivers/pci/controller/pcie-mt7621.c20
-rw-r--r--drivers/pci/controller/pcie-rcar-ep.c4
-rw-r--r--drivers/pci/controller/pcie-rcar-host.c30
-rw-r--r--drivers/pci/controller/pcie-rockchip-ep.c453
-rw-r--r--drivers/pci/controller/pcie-rockchip-host.c15
-rw-r--r--drivers/pci/controller/pcie-rockchip.c240
-rw-r--r--drivers/pci/controller/pcie-rockchip.h60
-rw-r--r--drivers/pci/controller/pcie-xilinx-cpm.c113
-rw-r--r--drivers/pci/controller/pcie-xilinx-dma-pl.c78
-rw-r--r--drivers/pci/controller/pcie-xilinx-nwl.c161
-rw-r--r--drivers/pci/controller/pcie-xilinx.c14
-rw-r--r--drivers/pci/controller/plda/Kconfig30
-rw-r--r--drivers/pci/controller/plda/Makefile4
-rw-r--r--drivers/pci/controller/plda/pcie-microchip-host.c833
-rw-r--r--drivers/pci/controller/plda/pcie-plda-host.c653
-rw-r--r--drivers/pci/controller/plda/pcie-plda.h275
-rw-r--r--drivers/pci/controller/plda/pcie-starfive.c492
-rw-r--r--drivers/pci/controller/vmd.c75
85 files changed, 9352 insertions, 3975 deletions
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index e534c02ee34f..eb3cc28d43f8 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -40,6 +40,7 @@ config PCIE_APPLE
depends on OF
depends on PCI_MSI
select PCI_HOST_COMMON
+ select IRQ_MSI_LIB
help
Say Y here if you want to enable PCIe controller support on Apple
system-on-chips, like the Apple M1. This is required for the USB
@@ -196,7 +197,7 @@ config PCIE_MEDIATEK
config PCIE_MEDIATEK_GEN3
tristate "MediaTek Gen3 PCIe controller"
- depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on ARCH_AIROHA || ARCH_MEDIATEK || COMPILE_TEST
depends on PCI_MSI
help
Adds support for PCIe Gen3 MAC controller for MediaTek SoCs.
@@ -215,14 +216,6 @@ config PCIE_MT7621
help
This selects a driver for the MediaTek MT7621 PCIe Controller.
-config PCIE_MICROCHIP_HOST
- tristate "Microchip AXI PCIe controller"
- depends on PCI_MSI && OF
- select PCI_HOST_COMMON
- help
- Say Y here if you want kernel to support the Microchip AXI PCIe
- Host Bridge driver.
-
config PCI_HYPERV_INTERFACE
tristate "Microsoft Hyper-V PCI Interface"
depends on ((X86 && X86_64) || ARM64) && HYPERV && PCI_MSI
@@ -235,6 +228,7 @@ config PCI_TEGRA
bool "NVIDIA Tegra PCIe controller"
depends on ARCH_TEGRA || COMPILE_TEST
depends on PCI_MSI
+ select IRQ_MSI_LIB
help
Say Y here if you want support for the PCIe host controller found
on NVIDIA Tegra SoCs.
@@ -311,6 +305,7 @@ config PCI_XGENE_MSI
bool "X-Gene v1 PCIe MSI feature"
depends on PCI_XGENE
depends on PCI_MSI
+ select IRQ_MSI_LIB
default y
help
Say Y here if you want PCIe MSI support for the APM X-Gene v1 SoC.
@@ -356,4 +351,5 @@ config PCIE_XILINX_CPM
source "drivers/pci/controller/cadence/Kconfig"
source "drivers/pci/controller/dwc/Kconfig"
source "drivers/pci/controller/mobiveil/Kconfig"
+source "drivers/pci/controller/plda/Kconfig"
endmenu
diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile
index f2b19e6174af..038ccbd9e3ba 100644
--- a/drivers/pci/controller/Makefile
+++ b/drivers/pci/controller/Makefile
@@ -33,7 +33,6 @@ obj-$(CONFIG_PCIE_ROCKCHIP_EP) += pcie-rockchip-ep.o
obj-$(CONFIG_PCIE_ROCKCHIP_HOST) += pcie-rockchip-host.o
obj-$(CONFIG_PCIE_MEDIATEK) += pcie-mediatek.o
obj-$(CONFIG_PCIE_MEDIATEK_GEN3) += pcie-mediatek-gen3.o
-obj-$(CONFIG_PCIE_MICROCHIP_HOST) += pcie-microchip-host.o
obj-$(CONFIG_VMD) += vmd.o
obj-$(CONFIG_PCIE_BRCMSTB) += pcie-brcmstb.o
obj-$(CONFIG_PCI_LOONGSON) += pci-loongson.o
@@ -44,6 +43,7 @@ obj-$(CONFIG_PCIE_MT7621) += pcie-mt7621.o
# pcie-hisi.o quirks are needed even without CONFIG_PCIE_DW
obj-y += dwc/
obj-y += mobiveil/
+obj-y += plda/
# The following drivers are for devices that use the generic ACPI
diff --git a/drivers/pci/controller/cadence/Kconfig b/drivers/pci/controller/cadence/Kconfig
index 1d5a70c9055e..8a0044bb3989 100644
--- a/drivers/pci/controller/cadence/Kconfig
+++ b/drivers/pci/controller/cadence/Kconfig
@@ -38,7 +38,7 @@ config PCIE_CADENCE_PLAT_EP
select PCIE_CADENCE_EP
select PCIE_CADENCE_PLAT
help
- Say Y here if you want to support the Cadence PCIe platform controller in
+ Say Y here if you want to support the Cadence PCIe platform controller in
endpoint mode. This PCIe controller may be embedded into many
different vendors SoCs.
diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c
index 85718246016b..ef1cfdae33bb 100644
--- a/drivers/pci/controller/cadence/pci-j721e.c
+++ b/drivers/pci/controller/cadence/pci-j721e.c
@@ -7,6 +7,8 @@
*/
#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/container_of.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/io.h>
@@ -22,6 +24,8 @@
#include "../../pci.h"
#include "pcie-cadence.h"
+#define cdns_pcie_to_rc(p) container_of(p, struct cdns_pcie_rc, pcie)
+
#define ENABLE_REG_SYS_2 0x108
#define STATUS_REG_SYS_2 0x508
#define STATUS_CLR_REG_SYS_2 0x708
@@ -44,6 +48,7 @@ enum link_status {
#define J721E_MODE_RC BIT(7)
#define LANE_COUNT(n) ((n) << 8)
+#define ACSPCIE_PAD_DISABLE_MASK GENMASK(1, 0)
#define GENERATION_SEL_MASK GENMASK(1, 0)
struct j721e_pcie {
@@ -52,6 +57,7 @@ struct j721e_pcie {
u32 mode;
u32 num_lanes;
u32 max_lanes;
+ struct gpio_desc *reset_gpio;
void __iomem *user_cfg_base;
void __iomem *intd_cfg_base;
u32 linkdown_irq_regfield;
@@ -220,6 +226,36 @@ static int j721e_pcie_set_lane_count(struct j721e_pcie *pcie,
return ret;
}
+static int j721e_enable_acspcie_refclk(struct j721e_pcie *pcie,
+ struct regmap *syscon)
+{
+ struct device *dev = pcie->cdns_pcie->dev;
+ struct device_node *node = dev->of_node;
+ u32 mask = ACSPCIE_PAD_DISABLE_MASK;
+ struct of_phandle_args args;
+ u32 val;
+ int ret;
+
+ ret = of_parse_phandle_with_fixed_args(node,
+ "ti,syscon-acspcie-proxy-ctrl",
+ 1, 0, &args);
+ if (ret) {
+ dev_err(dev,
+ "ti,syscon-acspcie-proxy-ctrl has invalid arguments\n");
+ return ret;
+ }
+
+ /* Clear PAD IO disable bits to enable refclk output */
+ val = ~(args.args[0]);
+ ret = regmap_update_bits(syscon, 0, mask, val);
+ if (ret) {
+ dev_err(dev, "failed to enable ACSPCIE refclk: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie)
{
struct device *dev = pcie->cdns_pcie->dev;
@@ -259,7 +295,13 @@ static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie)
return ret;
}
- return 0;
+ /* Enable ACSPCIE refclk output if the optional property exists */
+ syscon = syscon_regmap_lookup_by_phandle_optional(node,
+ "ti,syscon-acspcie-proxy-ctrl");
+ if (!syscon)
+ return 0;
+
+ return j721e_enable_acspcie_refclk(pcie, syscon);
}
static int cdns_ti_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
@@ -313,6 +355,7 @@ static const struct j721e_pcie_data j7200_pcie_rc_data = {
static const struct j721e_pcie_data j7200_pcie_ep_data = {
.mode = PCI_MODE_EP,
.quirk_detect_quiet_flag = true,
+ .linkdown_irq_regfield = J7200_LINK_DOWN,
.quirk_disable_flr = true,
.max_lanes = 2,
};
@@ -334,16 +377,23 @@ static const struct j721e_pcie_data j784s4_pcie_rc_data = {
.mode = PCI_MODE_RC,
.quirk_retrain_flag = true,
.byte_access_allowed = false,
- .linkdown_irq_regfield = LINK_DOWN,
+ .linkdown_irq_regfield = J7200_LINK_DOWN,
.max_lanes = 4,
};
static const struct j721e_pcie_data j784s4_pcie_ep_data = {
.mode = PCI_MODE_EP,
- .linkdown_irq_regfield = LINK_DOWN,
+ .linkdown_irq_regfield = J7200_LINK_DOWN,
.max_lanes = 4,
};
+static const struct j721e_pcie_data j722s_pcie_rc_data = {
+ .mode = PCI_MODE_RC,
+ .linkdown_irq_regfield = J7200_LINK_DOWN,
+ .byte_access_allowed = true,
+ .max_lanes = 1,
+};
+
static const struct of_device_id of_j721e_pcie_match[] = {
{
.compatible = "ti,j721e-pcie-host",
@@ -377,6 +427,10 @@ static const struct of_device_id of_j721e_pcie_match[] = {
.compatible = "ti,j784s4-pcie-ep",
.data = &j784s4_pcie_ep_data,
},
+ {
+ .compatible = "ti,j722s-pcie-host",
+ .data = &j722s_pcie_rc_data,
+ },
{},
};
@@ -482,20 +536,20 @@ static int j721e_pcie_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
- dev_err(dev, "pm_runtime_get_sync failed\n");
+ dev_err_probe(dev, ret, "pm_runtime_get_sync failed\n");
goto err_get_sync;
}
ret = j721e_pcie_ctrl_init(pcie);
if (ret < 0) {
- dev_err(dev, "pm_runtime_get_sync failed\n");
+ dev_err_probe(dev, ret, "pm_runtime_get_sync failed\n");
goto err_get_sync;
}
ret = devm_request_irq(dev, irq, j721e_pcie_link_irq_handler, 0,
"j721e-pcie-link-down-irq", pcie);
if (ret < 0) {
- dev_err(dev, "failed to request link state IRQ %d\n", irq);
+ dev_err_probe(dev, ret, "failed to request link state IRQ %d\n", irq);
goto err_get_sync;
}
@@ -505,42 +559,39 @@ static int j721e_pcie_probe(struct platform_device *pdev)
case PCI_MODE_RC:
gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(gpiod)) {
- ret = PTR_ERR(gpiod);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to get reset GPIO\n");
+ ret = dev_err_probe(dev, PTR_ERR(gpiod), "Failed to get reset GPIO\n");
goto err_get_sync;
}
+ pcie->reset_gpio = gpiod;
ret = cdns_pcie_init_phy(dev, cdns_pcie);
if (ret) {
- dev_err(dev, "Failed to init phy\n");
+ dev_err_probe(dev, ret, "Failed to init phy\n");
goto err_get_sync;
}
clk = devm_clk_get_optional(dev, "pcie_refclk");
if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- dev_err(dev, "failed to get pcie_refclk\n");
+ ret = dev_err_probe(dev, PTR_ERR(clk), "failed to get pcie_refclk\n");
goto err_pcie_setup;
}
ret = clk_prepare_enable(clk);
if (ret) {
- dev_err(dev, "failed to enable pcie_refclk\n");
+ dev_err_probe(dev, ret, "failed to enable pcie_refclk\n");
goto err_pcie_setup;
}
pcie->refclk = clk;
/*
- * "Power Sequencing and Reset Signal Timings" table in
- * PCI EXPRESS CARD ELECTROMECHANICAL SPECIFICATION, REV. 3.0
- * indicates PERST# should be deasserted after minimum of 100us
- * once REFCLK is stable. The REFCLK to the connector in RC
- * mode is selected while enabling the PHY. So deassert PERST#
- * after 100 us.
+ * Section 2.2 of the PCI Express Card Electromechanical
+ * Specification (Revision 5.1) mandates that the deassertion
+ * of the PERST# signal should be delayed by 100 ms (TPVPERL).
+ * This shall ensure that the power and the reference clock
+ * are stable.
*/
if (gpiod) {
- usleep_range(100, 200);
+ msleep(PCIE_T_PVPERL_MS);
gpiod_set_value_cansleep(gpiod, 1);
}
@@ -554,7 +605,7 @@ static int j721e_pcie_probe(struct platform_device *pdev)
case PCI_MODE_EP:
ret = cdns_pcie_init_phy(dev, cdns_pcie);
if (ret) {
- dev_err(dev, "Failed to init phy\n");
+ dev_err_probe(dev, ret, "Failed to init phy\n");
goto err_get_sync;
}
@@ -589,13 +640,94 @@ static void j721e_pcie_remove(struct platform_device *pdev)
pm_runtime_disable(dev);
}
+static int j721e_pcie_suspend_noirq(struct device *dev)
+{
+ struct j721e_pcie *pcie = dev_get_drvdata(dev);
+
+ if (pcie->mode == PCI_MODE_RC) {
+ gpiod_set_value_cansleep(pcie->reset_gpio, 0);
+ clk_disable_unprepare(pcie->refclk);
+ }
+
+ cdns_pcie_disable_phy(pcie->cdns_pcie);
+
+ return 0;
+}
+
+static int j721e_pcie_resume_noirq(struct device *dev)
+{
+ struct j721e_pcie *pcie = dev_get_drvdata(dev);
+ struct cdns_pcie *cdns_pcie = pcie->cdns_pcie;
+ int ret;
+
+ ret = j721e_pcie_ctrl_init(pcie);
+ if (ret < 0)
+ return ret;
+
+ j721e_pcie_config_link_irq(pcie);
+
+ /*
+ * This is not called explicitly in the probe, it is called by
+ * cdns_pcie_init_phy().
+ */
+ ret = cdns_pcie_enable_phy(pcie->cdns_pcie);
+ if (ret < 0)
+ return ret;
+
+ if (pcie->mode == PCI_MODE_RC) {
+ struct cdns_pcie_rc *rc = cdns_pcie_to_rc(cdns_pcie);
+
+ ret = clk_prepare_enable(pcie->refclk);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Section 2.2 of the PCI Express Card Electromechanical
+ * Specification (Revision 5.1) mandates that the deassertion
+ * of the PERST# signal should be delayed by 100 ms (TPVPERL).
+ * This shall ensure that the power and the reference clock
+ * are stable.
+ */
+ if (pcie->reset_gpio) {
+ msleep(PCIE_T_PVPERL_MS);
+ gpiod_set_value_cansleep(pcie->reset_gpio, 1);
+ }
+
+ ret = cdns_pcie_host_link_setup(rc);
+ if (ret < 0) {
+ clk_disable_unprepare(pcie->refclk);
+ return ret;
+ }
+
+ /*
+ * Reset internal status of BARs to force reinitialization in
+ * cdns_pcie_host_init().
+ */
+ for (enum cdns_pcie_rp_bar bar = RP_BAR0; bar <= RP_NO_BAR; bar++)
+ rc->avail_ib_bar[bar] = true;
+
+ ret = cdns_pcie_host_init(rc);
+ if (ret) {
+ clk_disable_unprepare(pcie->refclk);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static DEFINE_NOIRQ_DEV_PM_OPS(j721e_pcie_pm_ops,
+ j721e_pcie_suspend_noirq,
+ j721e_pcie_resume_noirq);
+
static struct platform_driver j721e_pcie_driver = {
.probe = j721e_pcie_probe,
- .remove_new = j721e_pcie_remove,
+ .remove = j721e_pcie_remove,
.driver = {
.name = "j721e-pcie",
.of_match_table = of_j721e_pcie_match,
.suppress_bind_attrs = true,
+ .pm = pm_sleep_ptr(&j721e_pcie_pm_ops),
},
};
builtin_platform_driver(j721e_pcie_driver);
diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c
index 81c50dc64da9..599ec4b1223e 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-ep.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c
@@ -99,14 +99,11 @@ static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, u8 vfn,
ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS;
} else {
bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH);
- bool is_64bits = sz > SZ_2G;
+ bool is_64bits = !!(flags & PCI_BASE_ADDRESS_MEM_TYPE_64);
if (is_64bits && (bar & 1))
return -EINVAL;
- if (is_64bits && !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64))
- epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
-
if (is_64bits && is_prefetch)
ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS;
else if (is_prefetch)
@@ -304,12 +301,12 @@ static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u8 vfn,
val |= interrupts;
cdns_pcie_ep_fn_writew(pcie, fn, reg, val);
- /* Set MSIX BAR and offset */
+ /* Set MSI-X BAR and offset */
reg = cap + PCI_MSIX_TABLE;
val = offset | bir;
cdns_pcie_ep_fn_writel(pcie, fn, reg, val);
- /* Set PBA BAR and offset. BAR must match MSIX BAR */
+ /* Set PBA BAR and offset. BAR must match MSI-X BAR */
reg = cap + PCI_MSIX_PBA;
val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir;
cdns_pcie_ep_fn_writel(pcie, fn, reg, val);
@@ -355,8 +352,7 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx,
spin_unlock_irqrestore(&ep->lock, flags);
offset = CDNS_PCIE_NORMAL_MSG_ROUTING(MSG_ROUTING_LOCAL) |
- CDNS_PCIE_NORMAL_MSG_CODE(msg_code) |
- CDNS_PCIE_MSG_NO_DATA;
+ CDNS_PCIE_NORMAL_MSG_CODE(msg_code);
writel(0, ep->irq_cpu_addr + offset);
}
@@ -576,8 +572,8 @@ static int cdns_pcie_ep_start(struct pci_epc *epc)
/*
* Next function field in ARI_CAP_AND_CTR register for last function
- * should be 0.
- * Clearing Next Function Number field for the last function used.
+ * should be 0. Clear Next Function Number field for the last
+ * function used.
*/
last_fn = find_last_bit(&epc->function_num_map, BITS_PER_LONG);
reg = CDNS_PCIE_CORE_PF_I_ARI_CAP_AND_CTRL(last_fn);
@@ -746,6 +742,8 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
spin_lock_init(&ep->lock);
+ pci_epc_init_notify(epc);
+
return 0;
free_epc_mem:
diff --git a/drivers/pci/controller/cadence/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c
index 5b14f7ee3c79..8af95e9da7ce 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-host.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-host.c
@@ -485,8 +485,7 @@ static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc)
return cdns_pcie_host_map_dma_ranges(rc);
}
-static int cdns_pcie_host_init(struct device *dev,
- struct cdns_pcie_rc *rc)
+int cdns_pcie_host_init(struct cdns_pcie_rc *rc)
{
int err;
@@ -497,6 +496,30 @@ static int cdns_pcie_host_init(struct device *dev,
return cdns_pcie_host_init_address_translation(rc);
}
+int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc)
+{
+ struct cdns_pcie *pcie = &rc->pcie;
+ struct device *dev = rc->pcie.dev;
+ int ret;
+
+ if (rc->quirk_detect_quiet_flag)
+ cdns_pcie_detect_quiet_min_delay_set(&rc->pcie);
+
+ cdns_pcie_host_enable_ptm_response(pcie);
+
+ ret = cdns_pcie_start_link(pcie);
+ if (ret) {
+ dev_err(dev, "Failed to start link\n");
+ return ret;
+ }
+
+ ret = cdns_pcie_host_start_link(rc);
+ if (ret)
+ dev_dbg(dev, "PCIe link never came up\n");
+
+ return 0;
+}
+
int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
{
struct device *dev = rc->pcie.dev;
@@ -533,25 +556,14 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
return PTR_ERR(rc->cfg_base);
rc->cfg_res = res;
- if (rc->quirk_detect_quiet_flag)
- cdns_pcie_detect_quiet_min_delay_set(&rc->pcie);
-
- cdns_pcie_host_enable_ptm_response(pcie);
-
- ret = cdns_pcie_start_link(pcie);
- if (ret) {
- dev_err(dev, "Failed to start link\n");
- return ret;
- }
-
- ret = cdns_pcie_host_start_link(rc);
+ ret = cdns_pcie_host_link_setup(rc);
if (ret)
- dev_dbg(dev, "PCIe link never came up\n");
+ return ret;
for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++)
rc->avail_ib_bar[bar] = true;
- ret = cdns_pcie_host_init(dev, rc);
+ ret = cdns_pcie_host_init(rc);
if (ret)
return ret;
diff --git a/drivers/pci/controller/cadence/pcie-cadence.c b/drivers/pci/controller/cadence/pcie-cadence.c
index 4251fac5e310..204e045aed8c 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.c
+++ b/drivers/pci/controller/cadence/pcie-cadence.c
@@ -197,7 +197,7 @@ int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie)
phy_count = of_property_count_strings(np, "phy-names");
if (phy_count < 1) {
- dev_err(dev, "no phy-names. PHY will not be initialized\n");
+ dev_info(dev, "no \"phy-names\" property found; PHY will not be initialized\n");
pcie->phy_count = 0;
return 0;
}
@@ -260,7 +260,7 @@ static int cdns_pcie_resume_noirq(struct device *dev)
ret = cdns_pcie_enable_phy(pcie);
if (ret) {
- dev_err(dev, "failed to enable phy\n");
+ dev_err(dev, "failed to enable PHY\n");
return ret;
}
diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h
index 7a66a2f815dc..39ee9945c903 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.h
+++ b/drivers/pci/controller/cadence/pcie-cadence.h
@@ -246,7 +246,7 @@ struct cdns_pcie_rp_ib_bar {
#define CDNS_PCIE_NORMAL_MSG_CODE_MASK GENMASK(15, 8)
#define CDNS_PCIE_NORMAL_MSG_CODE(code) \
(((code) << 8) & CDNS_PCIE_NORMAL_MSG_CODE_MASK)
-#define CDNS_PCIE_MSG_NO_DATA BIT(16)
+#define CDNS_PCIE_MSG_DATA BIT(16)
struct cdns_pcie;
@@ -314,7 +314,6 @@ struct cdns_pcie {
/**
* struct cdns_pcie_rc - private data for this PCIe Root Complex driver
* @pcie: Cadence PCIe controller
- * @dev: pointer to PCIe device
* @cfg_res: start/end offsets in the physical system memory to map PCI
* configuration space accesses
* @cfg_base: IO mapped window to access the PCI configuration space of a
@@ -521,10 +520,22 @@ static inline bool cdns_pcie_link_up(struct cdns_pcie *pcie)
}
#ifdef CONFIG_PCIE_CADENCE_HOST
+int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc);
+int cdns_pcie_host_init(struct cdns_pcie_rc *rc);
int cdns_pcie_host_setup(struct cdns_pcie_rc *rc);
void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
int where);
#else
+static inline int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc)
+{
+ return 0;
+}
+
+static inline int cdns_pcie_host_init(struct cdns_pcie_rc *rc)
+{
+ return 0;
+}
+
static inline int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
{
return 0;
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index 8afacc90c63b..d9f0386396ed 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -6,6 +6,16 @@ menu "DesignWare-based PCIe controllers"
config PCIE_DW
bool
+config PCIE_DW_DEBUGFS
+ bool "DesignWare PCIe debugfs entries"
+ depends on DEBUG_FS
+ depends on PCIE_DW_HOST || PCIE_DW_EP
+ help
+ Say Y here to enable debugfs entries for the PCIe controller. These
+ entries provide various debug features related to the controller and
+ expose the RAS DES capabilities such as Silicon Debug, Error Injection
+ and Statistical Counters.
+
config PCIE_DW_HOST
bool
select PCIE_DW
@@ -27,6 +37,17 @@ config PCIE_AL
required only for DT-based platforms. ACPI platforms with the
Annapurna Labs PCIe controller don't need to enable this.
+config PCIE_AMD_MDB
+ bool "AMD MDB Versal2 PCIe controller"
+ depends on OF && (ARM64 || COMPILE_TEST)
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ help
+ Say Y here if you want to enable PCIe controller support on AMD
+ Versal2 SoCs. The AMD MDB Versal2 PCIe controller is based on
+ DesignWare IP and therefore the driver re-uses the DesignWare
+ core functions to implement the driver.
+
config PCI_MESON
tristate "Amlogic Meson PCIe controller"
default m if ARCH_MESON
@@ -265,12 +286,16 @@ config PCIE_DW_PLAT_EP
order to enable device-specific features PCI_DW_PLAT_EP must be
selected.
+config PCIE_QCOM_COMMON
+ bool
+
config PCIE_QCOM
bool "Qualcomm PCIe controller (host mode)"
depends on OF && (ARCH_QCOM || COMPILE_TEST)
depends on PCI_MSI
select PCIE_DW_HOST
select CRC8
+ select PCIE_QCOM_COMMON
help
Say Y here to enable PCIe controller support on Qualcomm SoCs. The
PCIe controller uses the DesignWare core plus Qualcomm-specific
@@ -281,6 +306,7 @@ config PCIE_QCOM_EP
depends on OF && (ARCH_QCOM || COMPILE_TEST)
depends on PCI_ENDPOINT
select PCIE_DW_EP
+ select PCIE_QCOM_COMMON
help
Say Y here to enable support for the PCIe controllers on Qualcomm SoCs
to work in endpoint mode. The PCIe controller uses the DesignWare core
@@ -311,16 +337,30 @@ config PCIE_RCAR_GEN4_EP
SoCs. To compile this driver as a module, choose M here: the module
will be called pcie-rcar-gen4.ko. This uses the DesignWare core.
+config PCIE_ROCKCHIP_DW
+ bool
+
config PCIE_ROCKCHIP_DW_HOST
- bool "Rockchip DesignWare PCIe controller"
- select PCIE_DW
- select PCIE_DW_HOST
+ bool "Rockchip DesignWare PCIe controller (host mode)"
depends on PCI_MSI
depends on ARCH_ROCKCHIP || COMPILE_TEST
depends on OF
+ select PCIE_DW_HOST
+ select PCIE_ROCKCHIP_DW
+ help
+ Enables support for the DesignWare PCIe controller in the
+ Rockchip SoC (except RK3399) to work in host mode.
+
+config PCIE_ROCKCHIP_DW_EP
+ bool "Rockchip DesignWare PCIe controller (endpoint mode)"
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
+ depends on OF
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ select PCIE_ROCKCHIP_DW
help
Enables support for the DesignWare PCIe controller in the
- Rockchip SoC except RK3399.
+ Rockchip SoC (except RK3399) to work in endpoint mode.
config PCI_EXYNOS
tristate "Samsung Exynos PCIe controller"
diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile
index bac103faa523..908cb7f345db 100644
--- a/drivers/pci/controller/dwc/Makefile
+++ b/drivers/pci/controller/dwc/Makefile
@@ -1,8 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_PCIE_DW) += pcie-designware.o
+obj-$(CONFIG_PCIE_DW_DEBUGFS) += pcie-designware-debugfs.o
obj-$(CONFIG_PCIE_DW_HOST) += pcie-designware-host.o
obj-$(CONFIG_PCIE_DW_EP) += pcie-designware-ep.o
obj-$(CONFIG_PCIE_DW_PLAT) += pcie-designware-plat.o
+obj-$(CONFIG_PCIE_AMD_MDB) += pcie-amd-mdb.o
obj-$(CONFIG_PCIE_BT1) += pcie-bt1.o
obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o
obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
@@ -12,11 +14,12 @@ obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone.o
obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o
obj-$(CONFIG_PCI_LAYERSCAPE_EP) += pci-layerscape-ep.o
+obj-$(CONFIG_PCIE_QCOM_COMMON) += pcie-qcom-common.o
obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
obj-$(CONFIG_PCIE_QCOM_EP) += pcie-qcom-ep.o
obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o
-obj-$(CONFIG_PCIE_ROCKCHIP_DW_HOST) += pcie-dw-rockchip.o
+obj-$(CONFIG_PCIE_ROCKCHIP_DW) += pcie-dw-rockchip.o
obj-$(CONFIG_PCIE_INTEL_GW) += pcie-intel-gw.o
obj-$(CONFIG_PCIE_KEEMBAY) += pcie-keembay.o
obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index 0e406677060d..3219704aba0e 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -13,11 +13,11 @@
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
#include <linux/phy/phy.h>
@@ -113,9 +113,9 @@ static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset,
writel(value, pcie->base + offset);
}
-static u64 dra7xx_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr)
+static u64 dra7xx_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 cpu_addr)
{
- return pci_addr & DRA7XX_CPU_TO_BUS_ADDR;
+ return cpu_addr & DRA7XX_CPU_TO_BUS_ADDR;
}
static int dra7xx_pcie_link_up(struct dw_pcie *pci)
@@ -359,8 +359,8 @@ static int dra7xx_pcie_init_irq_domain(struct dw_pcie_rp *pp)
irq_set_chained_handler_and_data(pp->irq, dra7xx_pcie_msi_irq_handler,
pp);
- dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
- &intx_domain_ops, pp);
+ dra7xx->irq_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node),
+ PCI_NUM_INTX, &intx_domain_ops, pp);
of_node_put(pcie_intc_node);
if (!dra7xx->irq_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
@@ -467,6 +467,15 @@ static int dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx,
return ret;
}
+ ret = dw_pcie_ep_init_registers(ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(ep);
+ return ret;
+ }
+
+ pci_epc_init_notify(ep->epc);
+
return 0;
}
@@ -626,30 +635,20 @@ static int dra7xx_pcie_unaligned_memaccess(struct device *dev)
{
int ret;
struct device_node *np = dev->of_node;
- struct of_phandle_args args;
+ unsigned int args[2];
struct regmap *regmap;
- regmap = syscon_regmap_lookup_by_phandle(np,
- "ti,syscon-unaligned-access");
+ regmap = syscon_regmap_lookup_by_phandle_args(np, "ti,syscon-unaligned-access",
+ 2, args);
if (IS_ERR(regmap)) {
dev_dbg(dev, "can't get ti,syscon-unaligned-access\n");
return -EINVAL;
}
- ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-unaligned-access",
- 2, 0, &args);
- if (ret) {
- dev_err(dev, "failed to parse ti,syscon-unaligned-access\n");
- return ret;
- }
-
- ret = regmap_update_bits(regmap, args.args[0], args.args[1],
- args.args[1]);
+ ret = regmap_update_bits(regmap, args[0], args[1], args[1]);
if (ret)
dev_err(dev, "failed to enable unaligned access\n");
- of_node_put(args.np);
-
return ret;
}
@@ -662,18 +661,13 @@ static int dra7xx_pcie_configure_two_lane(struct device *dev,
u32 mask;
u32 val;
- pcie_syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-lane-sel");
+ pcie_syscon = syscon_regmap_lookup_by_phandle_args(np, "ti,syscon-lane-sel",
+ 1, &pcie_reg);
if (IS_ERR(pcie_syscon)) {
dev_err(dev, "unable to get ti,syscon-lane-sel\n");
return -EINVAL;
}
- if (of_property_read_u32_index(np, "ti,syscon-lane-sel", 1,
- &pcie_reg)) {
- dev_err(dev, "couldn't get lane selection reg offset\n");
- return -EINVAL;
- }
-
mask = b1co_mode_sel_mask | PCIE_B0_B1_TSYNCEN;
val = PCIE_B1C0_MODE_SEL | PCIE_B0_B1_TSYNCEN;
regmap_update_bits(pcie_syscon, pcie_reg, mask, val);
@@ -841,14 +835,21 @@ static int dra7xx_pcie_probe(struct platform_device *pdev)
dra7xx->mode = mode;
ret = devm_request_threaded_irq(dev, irq, NULL, dra7xx_pcie_irq_handler,
- IRQF_SHARED, "dra7xx-pcie-main", dra7xx);
+ IRQF_SHARED | IRQF_ONESHOT,
+ "dra7xx-pcie-main", dra7xx);
if (ret) {
dev_err(dev, "failed to request irq\n");
- goto err_gpio;
+ goto err_deinit;
}
return 0;
+err_deinit:
+ if (dra7xx->mode == DW_PCIE_RC_TYPE)
+ dw_pcie_host_deinit(&dra7xx->pci->pp);
+ else
+ dw_pcie_ep_deinit(&dra7xx->pci->ep);
+
err_gpio:
err_get_sync:
pm_runtime_put(dev);
diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c
index a33fa98a252e..ace736b025b1 100644
--- a/drivers/pci/controller/dwc/pci-exynos.c
+++ b/drivers/pci/controller/dwc/pci-exynos.c
@@ -54,43 +54,11 @@
struct exynos_pcie {
struct dw_pcie pci;
void __iomem *elbi_base;
- struct clk *clk;
- struct clk *bus_clk;
+ struct clk_bulk_data *clks;
struct phy *phy;
struct regulator_bulk_data supplies[2];
};
-static int exynos_pcie_init_clk_resources(struct exynos_pcie *ep)
-{
- struct device *dev = ep->pci.dev;
- int ret;
-
- ret = clk_prepare_enable(ep->clk);
- if (ret) {
- dev_err(dev, "cannot enable pcie rc clock");
- return ret;
- }
-
- ret = clk_prepare_enable(ep->bus_clk);
- if (ret) {
- dev_err(dev, "cannot enable pcie bus clock");
- goto err_bus_clk;
- }
-
- return 0;
-
-err_bus_clk:
- clk_disable_unprepare(ep->clk);
-
- return ret;
-}
-
-static void exynos_pcie_deinit_clk_resources(struct exynos_pcie *ep)
-{
- clk_disable_unprepare(ep->bus_clk);
- clk_disable_unprepare(ep->clk);
-}
-
static void exynos_pcie_writel(void __iomem *base, u32 val, u32 reg)
{
writel(val, base + reg);
@@ -332,17 +300,9 @@ static int exynos_pcie_probe(struct platform_device *pdev)
if (IS_ERR(ep->elbi_base))
return PTR_ERR(ep->elbi_base);
- ep->clk = devm_clk_get(dev, "pcie");
- if (IS_ERR(ep->clk)) {
- dev_err(dev, "Failed to get pcie rc clock\n");
- return PTR_ERR(ep->clk);
- }
-
- ep->bus_clk = devm_clk_get(dev, "pcie_bus");
- if (IS_ERR(ep->bus_clk)) {
- dev_err(dev, "Failed to get pcie bus clock\n");
- return PTR_ERR(ep->bus_clk);
- }
+ ret = devm_clk_bulk_get_all_enabled(dev, &ep->clks);
+ if (ret < 0)
+ return ret;
ep->supplies[0].supply = "vdd18";
ep->supplies[1].supply = "vdd10";
@@ -351,10 +311,6 @@ static int exynos_pcie_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = exynos_pcie_init_clk_resources(ep);
- if (ret)
- return ret;
-
ret = regulator_bulk_enable(ARRAY_SIZE(ep->supplies), ep->supplies);
if (ret)
return ret;
@@ -369,7 +325,6 @@ static int exynos_pcie_probe(struct platform_device *pdev)
fail_probe:
phy_exit(ep->phy);
- exynos_pcie_deinit_clk_resources(ep);
regulator_bulk_disable(ARRAY_SIZE(ep->supplies), ep->supplies);
return ret;
@@ -383,7 +338,6 @@ static void exynos_pcie_remove(struct platform_device *pdev)
exynos_pcie_assert_core_reset(ep);
phy_power_off(ep->phy);
phy_exit(ep->phy);
- exynos_pcie_deinit_clk_resources(ep);
regulator_bulk_disable(ARRAY_SIZE(ep->supplies), ep->supplies);
}
@@ -429,7 +383,7 @@ static const struct of_device_id exynos_pcie_of_match[] = {
static struct platform_driver exynos_pcie_driver = {
.probe = exynos_pcie_probe,
- .remove_new = exynos_pcie_remove,
+ .remove = exynos_pcie_remove,
.driver = {
.name = "exynos-pcie",
.of_match_table = exynos_pcie_of_match,
@@ -437,5 +391,6 @@ static struct platform_driver exynos_pcie_driver = {
},
};
module_platform_driver(exynos_pcie_driver);
+MODULE_DESCRIPTION("Samsung Exynos PCIe host controller driver");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(of, exynos_pcie_of_match);
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 99a60270b26c..5f267dd261b5 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -11,14 +11,13 @@
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
#include <linux/mfd/syscon/imx7-iomuxc-gpr.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/of_address.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
@@ -29,10 +28,12 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/reset.h>
+#include <linux/phy/pcie.h>
#include <linux/phy/phy.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
+#include "../../pci.h"
#include "pcie-designware.h"
#define IMX8MQ_GPR_PCIE_REF_USE_PAD BIT(9)
@@ -40,7 +41,6 @@
#define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE BIT(11)
#define IMX8MQ_GPR_PCIE_VREG_BYPASS BIT(12)
#define IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE GENMASK(11, 8)
-#define IMX8MQ_PCIE2_BASE_ADDR 0x33c00000
#define IMX95_PCIE_PHY_GEN_CTRL 0x0
#define IMX95_PCIE_REF_USE_PAD BIT(17)
@@ -55,9 +55,25 @@
#define IMX95_PE0_GEN_CTRL_3 0x1058
#define IMX95_PCIE_LTSSM_EN BIT(0)
-#define to_imx6_pcie(x) dev_get_drvdata((x)->dev)
+#define IMX95_PE0_LUT_ACSCTRL 0x1008
+#define IMX95_PEO_LUT_RWA BIT(16)
+#define IMX95_PE0_LUT_ENLOC GENMASK(4, 0)
-enum imx6_pcie_variants {
+#define IMX95_PE0_LUT_DATA1 0x100c
+#define IMX95_PE0_LUT_VLD BIT(31)
+#define IMX95_PE0_LUT_DAC_ID GENMASK(10, 8)
+#define IMX95_PE0_LUT_STREAM_ID GENMASK(5, 0)
+
+#define IMX95_PE0_LUT_DATA2 0x1010
+#define IMX95_PE0_LUT_REQID GENMASK(31, 16)
+#define IMX95_PE0_LUT_MASK GENMASK(15, 0)
+
+#define IMX95_SID_MASK GENMASK(5, 0)
+#define IMX95_MAX_LUT 32
+
+#define to_imx_pcie(x) dev_get_drvdata((x)->dev)
+
+enum imx_pcie_variants {
IMX6Q,
IMX6SX,
IMX6QP,
@@ -65,58 +81,64 @@ enum imx6_pcie_variants {
IMX8MQ,
IMX8MM,
IMX8MP,
+ IMX8Q,
IMX95,
IMX8MQ_EP,
IMX8MM_EP,
IMX8MP_EP,
+ IMX8Q_EP,
IMX95_EP,
};
-#define IMX6_PCIE_FLAG_IMX6_PHY BIT(0)
-#define IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE BIT(1)
-#define IMX6_PCIE_FLAG_SUPPORTS_SUSPEND BIT(2)
-#define IMX6_PCIE_FLAG_HAS_PHYDRV BIT(3)
-#define IMX6_PCIE_FLAG_HAS_APP_RESET BIT(4)
-#define IMX6_PCIE_FLAG_HAS_PHY_RESET BIT(5)
-#define IMX6_PCIE_FLAG_HAS_SERDES BIT(6)
-#define IMX6_PCIE_FLAG_SUPPORT_64BIT BIT(7)
-
-#define imx6_check_flag(pci, val) (pci->drvdata->flags & val)
+#define IMX_PCIE_FLAG_IMX_PHY BIT(0)
+#define IMX_PCIE_FLAG_IMX_SPEED_CHANGE BIT(1)
+#define IMX_PCIE_FLAG_SUPPORTS_SUSPEND BIT(2)
+#define IMX_PCIE_FLAG_HAS_PHYDRV BIT(3)
+#define IMX_PCIE_FLAG_HAS_APP_RESET BIT(4)
+#define IMX_PCIE_FLAG_HAS_PHY_RESET BIT(5)
+#define IMX_PCIE_FLAG_HAS_SERDES BIT(6)
+#define IMX_PCIE_FLAG_SUPPORT_64BIT BIT(7)
+#define IMX_PCIE_FLAG_CPU_ADDR_FIXUP BIT(8)
+/*
+ * Because of ERR005723 (PCIe does not support L2 power down) we need to
+ * workaround suspend resume on some devices which are affected by this errata.
+ */
+#define IMX_PCIE_FLAG_BROKEN_SUSPEND BIT(9)
+#define IMX_PCIE_FLAG_HAS_LUT BIT(10)
-#define IMX6_PCIE_MAX_CLKS 6
+#define imx_check_flag(pci, val) (pci->drvdata->flags & val)
-#define IMX6_PCIE_MAX_INSTANCES 2
+#define IMX_PCIE_MAX_INSTANCES 2
-struct imx6_pcie;
+struct imx_pcie;
-struct imx6_pcie_drvdata {
- enum imx6_pcie_variants variant;
+struct imx_pcie_drvdata {
+ enum imx_pcie_variants variant;
enum dw_pcie_device_mode mode;
u32 flags;
int dbi_length;
const char *gpr;
- const char * const *clk_names;
- const u32 clks_cnt;
const u32 ltssm_off;
const u32 ltssm_mask;
- const u32 mode_off[IMX6_PCIE_MAX_INSTANCES];
- const u32 mode_mask[IMX6_PCIE_MAX_INSTANCES];
+ const u32 mode_off[IMX_PCIE_MAX_INSTANCES];
+ const u32 mode_mask[IMX_PCIE_MAX_INSTANCES];
const struct pci_epc_features *epc_features;
- int (*init_phy)(struct imx6_pcie *pcie);
+ int (*init_phy)(struct imx_pcie *pcie);
+ int (*enable_ref_clk)(struct imx_pcie *pcie, bool enable);
+ int (*core_reset)(struct imx_pcie *pcie, bool assert);
+ const struct dw_pcie_host_ops *ops;
};
-struct imx6_pcie {
+struct imx_pcie {
struct dw_pcie *pci;
- int reset_gpio;
- bool gpio_active_high;
- bool link_is_up;
- struct clk_bulk_data clks[IMX6_PCIE_MAX_CLKS];
+ struct gpio_desc *reset_gpiod;
+ struct clk_bulk_data *clks;
+ int num_clks;
struct regmap *iomuxc_gpr;
u16 msi_ctrl;
u32 controller_id;
struct reset_control *pciephy_reset;
struct reset_control *apps_reset;
- struct reset_control *turnoff_reset;
u32 tx_deemph_gen1;
u32 tx_deemph_gen2_3p5db;
u32 tx_deemph_gen2_6db;
@@ -131,7 +153,10 @@ struct imx6_pcie {
/* power domain for pcie phy */
struct device *pd_pcie_phy;
struct phy *phy;
- const struct imx6_pcie_drvdata *drvdata;
+ const struct imx_pcie_drvdata *drvdata;
+
+ /* Ensure that only one device's LUT is configured at any given time */
+ struct mutex lock;
};
/* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */
@@ -186,28 +211,28 @@ struct imx6_pcie {
#define PHY_RX_OVRD_IN_LO_RX_DATA_EN BIT(5)
#define PHY_RX_OVRD_IN_LO_RX_PLL_EN BIT(3)
-static unsigned int imx6_pcie_grp_offset(const struct imx6_pcie *imx6_pcie)
+static unsigned int imx_pcie_grp_offset(const struct imx_pcie *imx_pcie)
{
- WARN_ON(imx6_pcie->drvdata->variant != IMX8MQ &&
- imx6_pcie->drvdata->variant != IMX8MQ_EP &&
- imx6_pcie->drvdata->variant != IMX8MM &&
- imx6_pcie->drvdata->variant != IMX8MM_EP &&
- imx6_pcie->drvdata->variant != IMX8MP &&
- imx6_pcie->drvdata->variant != IMX8MP_EP);
- return imx6_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14;
+ WARN_ON(imx_pcie->drvdata->variant != IMX8MQ &&
+ imx_pcie->drvdata->variant != IMX8MQ_EP &&
+ imx_pcie->drvdata->variant != IMX8MM &&
+ imx_pcie->drvdata->variant != IMX8MM_EP &&
+ imx_pcie->drvdata->variant != IMX8MP &&
+ imx_pcie->drvdata->variant != IMX8MP_EP);
+ return imx_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14;
}
-static int imx95_pcie_init_phy(struct imx6_pcie *imx6_pcie)
+static int imx95_pcie_init_phy(struct imx_pcie *imx_pcie)
{
- regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ regmap_update_bits(imx_pcie->iomuxc_gpr,
IMX95_PCIE_SS_RW_REG_0,
IMX95_PCIE_PHY_CR_PARA_SEL,
IMX95_PCIE_PHY_CR_PARA_SEL);
- regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ regmap_update_bits(imx_pcie->iomuxc_gpr,
IMX95_PCIE_PHY_GEN_CTRL,
IMX95_PCIE_REF_USE_PAD, 0);
- regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ regmap_update_bits(imx_pcie->iomuxc_gpr,
IMX95_PCIE_SS_RW_REG_0,
IMX95_PCIE_REF_CLKEN,
IMX95_PCIE_REF_CLKEN);
@@ -215,9 +240,9 @@ static int imx95_pcie_init_phy(struct imx6_pcie *imx6_pcie)
return 0;
}
-static void imx6_pcie_configure_type(struct imx6_pcie *imx6_pcie)
+static void imx_pcie_configure_type(struct imx_pcie *imx_pcie)
{
- const struct imx6_pcie_drvdata *drvdata = imx6_pcie->drvdata;
+ const struct imx_pcie_drvdata *drvdata = imx_pcie->drvdata;
unsigned int mask, val, mode, id;
if (drvdata->mode == DW_PCIE_EP_TYPE)
@@ -225,21 +250,25 @@ static void imx6_pcie_configure_type(struct imx6_pcie *imx6_pcie)
else
mode = PCI_EXP_TYPE_ROOT_PORT;
- id = imx6_pcie->controller_id;
+ id = imx_pcie->controller_id;
- /* If mode_mask[id] is zero, means each controller have its individual gpr */
+ /* If mode_mask is 0, generic PHY driver is used to set the mode */
+ if (!drvdata->mode_mask[0])
+ return;
+
+ /* If mode_mask[id] is 0, each controller has its individual GPR */
if (!drvdata->mode_mask[id])
id = 0;
mask = drvdata->mode_mask[id];
val = mode << (ffs(mask) - 1);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, drvdata->mode_off[id], mask, val);
+ regmap_update_bits(imx_pcie->iomuxc_gpr, drvdata->mode_off[id], mask, val);
}
-static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, bool exp_val)
+static int pcie_phy_poll_ack(struct imx_pcie *imx_pcie, bool exp_val)
{
- struct dw_pcie *pci = imx6_pcie->pci;
+ struct dw_pcie *pci = imx_pcie->pci;
bool val;
u32 max_iterations = 10;
u32 wait_counter = 0;
@@ -258,9 +287,9 @@ static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, bool exp_val)
return -ETIMEDOUT;
}
-static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr)
+static int pcie_phy_wait_ack(struct imx_pcie *imx_pcie, int addr)
{
- struct dw_pcie *pci = imx6_pcie->pci;
+ struct dw_pcie *pci = imx_pcie->pci;
u32 val;
int ret;
@@ -270,24 +299,24 @@ static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr)
val |= PCIE_PHY_CTRL_CAP_ADR;
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
- ret = pcie_phy_poll_ack(imx6_pcie, true);
+ ret = pcie_phy_poll_ack(imx_pcie, true);
if (ret)
return ret;
val = PCIE_PHY_CTRL_DATA(addr);
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
- return pcie_phy_poll_ack(imx6_pcie, false);
+ return pcie_phy_poll_ack(imx_pcie, false);
}
/* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
-static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, u16 *data)
+static int pcie_phy_read(struct imx_pcie *imx_pcie, int addr, u16 *data)
{
- struct dw_pcie *pci = imx6_pcie->pci;
+ struct dw_pcie *pci = imx_pcie->pci;
u32 phy_ctl;
int ret;
- ret = pcie_phy_wait_ack(imx6_pcie, addr);
+ ret = pcie_phy_wait_ack(imx_pcie, addr);
if (ret)
return ret;
@@ -295,7 +324,7 @@ static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, u16 *data)
phy_ctl = PCIE_PHY_CTRL_RD;
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, phy_ctl);
- ret = pcie_phy_poll_ack(imx6_pcie, true);
+ ret = pcie_phy_poll_ack(imx_pcie, true);
if (ret)
return ret;
@@ -304,18 +333,18 @@ static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, u16 *data)
/* deassert Read signal */
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x00);
- return pcie_phy_poll_ack(imx6_pcie, false);
+ return pcie_phy_poll_ack(imx_pcie, false);
}
-static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
+static int pcie_phy_write(struct imx_pcie *imx_pcie, int addr, u16 data)
{
- struct dw_pcie *pci = imx6_pcie->pci;
+ struct dw_pcie *pci = imx_pcie->pci;
u32 var;
int ret;
/* write addr */
/* cap addr */
- ret = pcie_phy_wait_ack(imx6_pcie, addr);
+ ret = pcie_phy_wait_ack(imx_pcie, addr);
if (ret)
return ret;
@@ -326,7 +355,7 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
var |= PCIE_PHY_CTRL_CAP_DAT;
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
- ret = pcie_phy_poll_ack(imx6_pcie, true);
+ ret = pcie_phy_poll_ack(imx_pcie, true);
if (ret)
return ret;
@@ -335,7 +364,7 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
/* wait for ack de-assertion */
- ret = pcie_phy_poll_ack(imx6_pcie, false);
+ ret = pcie_phy_poll_ack(imx_pcie, false);
if (ret)
return ret;
@@ -344,7 +373,7 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
/* wait for ack */
- ret = pcie_phy_poll_ack(imx6_pcie, true);
+ ret = pcie_phy_poll_ack(imx_pcie, true);
if (ret)
return ret;
@@ -353,7 +382,7 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
/* wait for ack de-assertion */
- ret = pcie_phy_poll_ack(imx6_pcie, false);
+ ret = pcie_phy_poll_ack(imx_pcie, false);
if (ret)
return ret;
@@ -362,74 +391,68 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
return 0;
}
-static int imx8mq_pcie_init_phy(struct imx6_pcie *imx6_pcie)
+static int imx8mq_pcie_init_phy(struct imx_pcie *imx_pcie)
{
- /* TODO: Currently this code assumes external oscillator is being used */
- regmap_update_bits(imx6_pcie->iomuxc_gpr,
- imx6_pcie_grp_offset(imx6_pcie),
+ /* TODO: This code assumes external oscillator is being used */
+ regmap_update_bits(imx_pcie->iomuxc_gpr,
+ imx_pcie_grp_offset(imx_pcie),
IMX8MQ_GPR_PCIE_REF_USE_PAD,
IMX8MQ_GPR_PCIE_REF_USE_PAD);
/*
- * Regarding the datasheet, the PCIE_VPH is suggested to be 1.8V. If the PCIE_VPH is
- * supplied by 3.3V, the VREG_BYPASS should be cleared to zero.
+ * Per the datasheet, the PCIE_VPH is suggested to be 1.8V. If the
+ * PCIE_VPH is supplied by 3.3V, the VREG_BYPASS should be cleared
+ * to zero.
*/
- if (imx6_pcie->vph && regulator_get_voltage(imx6_pcie->vph) > 3000000)
- regmap_update_bits(imx6_pcie->iomuxc_gpr,
- imx6_pcie_grp_offset(imx6_pcie),
+ if (imx_pcie->vph && regulator_get_voltage(imx_pcie->vph) > 3000000)
+ regmap_update_bits(imx_pcie->iomuxc_gpr,
+ imx_pcie_grp_offset(imx_pcie),
IMX8MQ_GPR_PCIE_VREG_BYPASS,
0);
return 0;
}
-static int imx7d_pcie_init_phy(struct imx6_pcie *imx6_pcie)
-{
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0);
-
- return 0;
-}
-
-static int imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
+static int imx_pcie_init_phy(struct imx_pcie *imx_pcie)
{
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
/* configure constant input signal to the pcie ctrl and phy */
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8,
IMX6Q_GPR8_TX_DEEMPH_GEN1,
- imx6_pcie->tx_deemph_gen1 << 0);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ imx_pcie->tx_deemph_gen1 << 0);
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8,
IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB,
- imx6_pcie->tx_deemph_gen2_3p5db << 6);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ imx_pcie->tx_deemph_gen2_3p5db << 6);
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8,
IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB,
- imx6_pcie->tx_deemph_gen2_6db << 12);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ imx_pcie->tx_deemph_gen2_6db << 12);
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8,
IMX6Q_GPR8_TX_SWING_FULL,
- imx6_pcie->tx_swing_full << 18);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ imx_pcie->tx_swing_full << 18);
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8,
IMX6Q_GPR8_TX_SWING_LOW,
- imx6_pcie->tx_swing_low << 25);
+ imx_pcie->tx_swing_low << 25);
return 0;
}
-static int imx6sx_pcie_init_phy(struct imx6_pcie *imx6_pcie)
+static int imx6sx_pcie_init_phy(struct imx_pcie *imx_pcie)
{
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6SX_GPR12_PCIE_RX_EQ_MASK, IMX6SX_GPR12_PCIE_RX_EQ_2);
- return imx6_pcie_init_phy(imx6_pcie);
+ return imx_pcie_init_phy(imx_pcie);
}
-static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
+static void imx7d_pcie_wait_for_phy_pll_lock(struct imx_pcie *imx_pcie)
{
u32 val;
- struct device *dev = imx6_pcie->pci->dev;
+ struct device *dev = imx_pcie->pci->dev;
- if (regmap_read_poll_timeout(imx6_pcie->iomuxc_gpr,
+ if (regmap_read_poll_timeout(imx_pcie->iomuxc_gpr,
IOMUXC_GPR22, val,
val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED,
PHY_PLL_LOCK_WAIT_USLEEP_MAX,
@@ -437,19 +460,20 @@ static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
dev_err(dev, "PCIe PLL lock timeout\n");
}
-static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie)
+static int imx_setup_phy_mpll(struct imx_pcie *imx_pcie)
{
unsigned long phy_rate = 0;
int mult, div;
u16 val;
int i;
+ struct clk_bulk_data *clks = imx_pcie->clks;
- if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
+ if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_IMX_PHY))
return 0;
- for (i = 0; i < imx6_pcie->drvdata->clks_cnt; i++)
- if (strncmp(imx6_pcie->clks[i].id, "pcie_phy", 8) == 0)
- phy_rate = clk_get_rate(imx6_pcie->clks[i].clk);
+ for (i = 0; i < imx_pcie->num_clks; i++)
+ if (strncmp(clks[i].id, "pcie_phy", 8) == 0)
+ phy_rate = clk_get_rate(clks[i].clk);
switch (phy_rate) {
case 125000000:
@@ -467,46 +491,46 @@ static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie)
div = 1;
break;
default:
- dev_err(imx6_pcie->pci->dev,
+ dev_err(imx_pcie->pci->dev,
"Unsupported PHY reference clock rate %lu\n", phy_rate);
return -EINVAL;
}
- pcie_phy_read(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, &val);
+ pcie_phy_read(imx_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, &val);
val &= ~(PCIE_PHY_MPLL_MULTIPLIER_MASK <<
PCIE_PHY_MPLL_MULTIPLIER_SHIFT);
val |= mult << PCIE_PHY_MPLL_MULTIPLIER_SHIFT;
val |= PCIE_PHY_MPLL_MULTIPLIER_OVRD;
- pcie_phy_write(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, val);
+ pcie_phy_write(imx_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, val);
- pcie_phy_read(imx6_pcie, PCIE_PHY_ATEOVRD, &val);
+ pcie_phy_read(imx_pcie, PCIE_PHY_ATEOVRD, &val);
val &= ~(PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK <<
PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT);
val |= div << PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT;
val |= PCIE_PHY_ATEOVRD_EN;
- pcie_phy_write(imx6_pcie, PCIE_PHY_ATEOVRD, val);
+ pcie_phy_write(imx_pcie, PCIE_PHY_ATEOVRD, val);
return 0;
}
-static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie)
+static void imx_pcie_reset_phy(struct imx_pcie *imx_pcie)
{
u16 tmp;
- if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
+ if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_IMX_PHY))
return;
- pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
+ pcie_phy_read(imx_pcie, PHY_RX_OVRD_IN_LO, &tmp);
tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
PHY_RX_OVRD_IN_LO_RX_PLL_EN);
- pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
+ pcie_phy_write(imx_pcie, PHY_RX_OVRD_IN_LO, tmp);
usleep_range(2000, 3000);
- pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
+ pcie_phy_read(imx_pcie, PHY_RX_OVRD_IN_LO, &tmp);
tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
PHY_RX_OVRD_IN_LO_RX_PLL_EN);
- pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
+ pcie_phy_write(imx_pcie, PHY_RX_OVRD_IN_LO, tmp);
}
#ifdef CONFIG_ARM
@@ -545,133 +569,112 @@ static int imx6q_pcie_abort_handler(unsigned long addr,
}
#endif
-static int imx6_pcie_attach_pd(struct device *dev)
+static int imx_pcie_attach_pd(struct device *dev)
{
- struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+ struct imx_pcie *imx_pcie = dev_get_drvdata(dev);
struct device_link *link;
/* Do nothing when in a single power domain */
if (dev->pm_domain)
return 0;
- imx6_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie");
- if (IS_ERR(imx6_pcie->pd_pcie))
- return PTR_ERR(imx6_pcie->pd_pcie);
+ imx_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie");
+ if (IS_ERR(imx_pcie->pd_pcie))
+ return PTR_ERR(imx_pcie->pd_pcie);
/* Do nothing when power domain missing */
- if (!imx6_pcie->pd_pcie)
+ if (!imx_pcie->pd_pcie)
return 0;
- link = device_link_add(dev, imx6_pcie->pd_pcie,
+ link = device_link_add(dev, imx_pcie->pd_pcie,
DL_FLAG_STATELESS |
DL_FLAG_PM_RUNTIME |
DL_FLAG_RPM_ACTIVE);
if (!link) {
- dev_err(dev, "Failed to add device_link to pcie pd.\n");
+ dev_err(dev, "Failed to add device_link to pcie pd\n");
return -EINVAL;
}
- imx6_pcie->pd_pcie_phy = dev_pm_domain_attach_by_name(dev, "pcie_phy");
- if (IS_ERR(imx6_pcie->pd_pcie_phy))
- return PTR_ERR(imx6_pcie->pd_pcie_phy);
+ imx_pcie->pd_pcie_phy = dev_pm_domain_attach_by_name(dev, "pcie_phy");
+ if (IS_ERR(imx_pcie->pd_pcie_phy))
+ return PTR_ERR(imx_pcie->pd_pcie_phy);
- link = device_link_add(dev, imx6_pcie->pd_pcie_phy,
+ link = device_link_add(dev, imx_pcie->pd_pcie_phy,
DL_FLAG_STATELESS |
DL_FLAG_PM_RUNTIME |
DL_FLAG_RPM_ACTIVE);
if (!link) {
- dev_err(dev, "Failed to add device_link to pcie_phy pd.\n");
+ dev_err(dev, "Failed to add device_link to pcie_phy pd\n");
return -EINVAL;
}
return 0;
}
-static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
+static int imx6sx_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable)
{
- unsigned int offset;
- int ret = 0;
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
+ enable ? 0 : IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
+ return 0;
+}
- switch (imx6_pcie->drvdata->variant) {
- case IMX6SX:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0);
- break;
- case IMX6QP:
- case IMX6Q:
+static int imx6q_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable)
+{
+ if (enable) {
/* power up core phy and enable ref clock */
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
+ regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD);
/*
- * the async reset input need ref clock to sync internally,
+ * The async reset input need ref clock to sync internally,
* when the ref clock comes after reset, internal synced
* reset time is too short, cannot meet the requirement.
- * add one ~10us delay here.
+ * Add a ~10us delay here.
*/
usleep_range(10, 100);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
- break;
- case IMX7D:
- case IMX95:
- case IMX95_EP:
- break;
- case IMX8MM:
- case IMX8MM_EP:
- case IMX8MQ:
- case IMX8MQ_EP:
- case IMX8MP:
- case IMX8MP_EP:
- offset = imx6_pcie_grp_offset(imx6_pcie);
- /*
- * Set the over ride low and enabled
- * make sure that REF_CLK is turned on.
- */
- regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
- IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE,
- 0);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
- IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN,
- IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN);
- break;
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN);
+ } else {
+ regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN);
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD);
}
- return ret;
+ return 0;
}
-static void imx6_pcie_disable_ref_clk(struct imx6_pcie *imx6_pcie)
+static int imx8mm_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable)
{
- switch (imx6_pcie->drvdata->variant) {
- case IMX6QP:
- case IMX6Q:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_REF_CLK_EN, 0);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_TEST_PD,
- IMX6Q_GPR1_PCIE_TEST_PD);
- break;
- case IMX7D:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX7D_GPR12_PCIE_PHY_REFCLK_SEL,
- IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
- break;
- default:
- break;
- }
+ int offset = imx_pcie_grp_offset(imx_pcie);
+
+ regmap_update_bits(imx_pcie->iomuxc_gpr, offset,
+ IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE,
+ enable ? 0 : IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE);
+ regmap_update_bits(imx_pcie->iomuxc_gpr, offset,
+ IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN,
+ enable ? IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN : 0);
+ return 0;
+}
+
+static int imx7d_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable)
+{
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX7D_GPR12_PCIE_PHY_REFCLK_SEL,
+ enable ? 0 : IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
+ return 0;
}
-static int imx6_pcie_clk_enable(struct imx6_pcie *imx6_pcie)
+static int imx_pcie_clk_enable(struct imx_pcie *imx_pcie)
{
- struct dw_pcie *pci = imx6_pcie->pci;
+ struct dw_pcie *pci = imx_pcie->pci;
struct device *dev = pci->dev;
int ret;
- ret = clk_bulk_prepare_enable(imx6_pcie->drvdata->clks_cnt, imx6_pcie->clks);
+ ret = clk_bulk_prepare_enable(imx_pcie->num_clks, imx_pcie->clks);
if (ret)
return ret;
- ret = imx6_pcie_enable_ref_clk(imx6_pcie);
- if (ret) {
- dev_err(dev, "unable to enable pcie ref clock\n");
- goto err_ref_clk;
+ if (imx_pcie->drvdata->enable_ref_clk) {
+ ret = imx_pcie->drvdata->enable_ref_clk(imx_pcie, true);
+ if (ret) {
+ dev_err(dev, "Failed to enable PCIe REFCLK\n");
+ goto err_ref_clk;
+ }
}
/* allow the clocks to stabilize */
@@ -679,102 +682,121 @@ static int imx6_pcie_clk_enable(struct imx6_pcie *imx6_pcie)
return 0;
err_ref_clk:
- clk_bulk_disable_unprepare(imx6_pcie->drvdata->clks_cnt, imx6_pcie->clks);
+ clk_bulk_disable_unprepare(imx_pcie->num_clks, imx_pcie->clks);
return ret;
}
-static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie)
+static void imx_pcie_clk_disable(struct imx_pcie *imx_pcie)
{
- imx6_pcie_disable_ref_clk(imx6_pcie);
- clk_bulk_disable_unprepare(imx6_pcie->drvdata->clks_cnt, imx6_pcie->clks);
+ if (imx_pcie->drvdata->enable_ref_clk)
+ imx_pcie->drvdata->enable_ref_clk(imx_pcie, false);
+ clk_bulk_disable_unprepare(imx_pcie->num_clks, imx_pcie->clks);
}
-static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
+static int imx6sx_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert)
{
- reset_control_assert(imx6_pcie->pciephy_reset);
- reset_control_assert(imx6_pcie->apps_reset);
-
- switch (imx6_pcie->drvdata->variant) {
- case IMX6SX:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
- IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
- /* Force PCIe PHY reset */
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
- IMX6SX_GPR5_PCIE_BTNRST_RESET,
- IMX6SX_GPR5_PCIE_BTNRST_RESET);
- break;
- case IMX6QP:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_SW_RST,
- IMX6Q_GPR1_PCIE_SW_RST);
- break;
- case IMX6Q:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
- break;
- default:
- break;
- }
+ if (assert)
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
- /* Some boards don't have PCIe reset GPIO. */
- if (gpio_is_valid(imx6_pcie->reset_gpio))
- gpio_set_value_cansleep(imx6_pcie->reset_gpio,
- imx6_pcie->gpio_active_high);
+ /* Force PCIe PHY reset */
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR5, IMX6SX_GPR5_PCIE_BTNRST_RESET,
+ assert ? IMX6SX_GPR5_PCIE_BTNRST_RESET : 0);
+ return 0;
}
-static int imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
+static int imx6qp_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert)
{
- struct dw_pcie *pci = imx6_pcie->pci;
- struct device *dev = pci->dev;
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_SW_RST,
+ assert ? IMX6Q_GPR1_PCIE_SW_RST : 0);
+ if (!assert)
+ usleep_range(200, 500);
- reset_control_deassert(imx6_pcie->pciephy_reset);
+ return 0;
+}
- switch (imx6_pcie->drvdata->variant) {
- case IMX7D:
- /* Workaround for ERR010728, failure of PCI-e PLL VCO to
- * oscillate, especially when cold. This turns off "Duty-cycle
- * Corrector" and other mysterious undocumented things.
- */
- if (likely(imx6_pcie->phy_base)) {
- /* De-assert DCC_FB_EN */
- writel(PCIE_PHY_CMN_REG4_DCC_FB_EN,
- imx6_pcie->phy_base + PCIE_PHY_CMN_REG4);
- /* Assert RX_EQS and RX_EQS_SEL */
- writel(PCIE_PHY_CMN_REG24_RX_EQ_SEL
- | PCIE_PHY_CMN_REG24_RX_EQ,
- imx6_pcie->phy_base + PCIE_PHY_CMN_REG24);
- /* Assert ATT_MODE */
- writel(PCIE_PHY_CMN_REG26_ATT_MODE,
- imx6_pcie->phy_base + PCIE_PHY_CMN_REG26);
- } else {
- dev_warn(dev, "Unable to apply ERR010728 workaround. DT missing fsl,imx7d-pcie-phy phandle ?\n");
- }
+static int imx6q_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert)
+{
+ if (!assert)
+ return 0;
- imx7d_pcie_wait_for_phy_pll_lock(imx6_pcie);
- break;
- case IMX6SX:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
- IMX6SX_GPR5_PCIE_BTNRST_RESET, 0);
- break;
- case IMX6QP:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_SW_RST, 0);
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD);
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN);
- usleep_range(200, 500);
- break;
- default:
- break;
+ return 0;
+}
+
+static int imx7d_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert)
+{
+ struct dw_pcie *pci = imx_pcie->pci;
+ struct device *dev = pci->dev;
+
+ if (assert)
+ return 0;
+
+ /*
+ * Workaround for ERR010728 (IMX7DS_2N09P, Rev. 1.1, 4/2023):
+ *
+ * PCIe: PLL may fail to lock under corner conditions.
+ *
+ * Initial VCO oscillation may fail under corner conditions such as
+ * cold temperature which will cause the PCIe PLL fail to lock in the
+ * initialization phase.
+ *
+ * The Duty-cycle Corrector calibration must be disabled.
+ *
+ * 1. De-assert the G_RST signal by clearing
+ * SRC_PCIEPHY_RCR[PCIEPHY_G_RST].
+ * 2. De-assert DCC_FB_EN by writing data “0x29” to the register
+ * address 0x306d0014 (PCIE_PHY_CMN_REG4).
+ * 3. Assert RX_EQS, RX_EQ_SEL by writing data “0x48” to the register
+ * address 0x306d0090 (PCIE_PHY_CMN_REG24).
+ * 4. Assert ATT_MODE by writing data “0xbc” to the register
+ * address 0x306d0098 (PCIE_PHY_CMN_REG26).
+ * 5. De-assert the CMN_RST signal by clearing register bit
+ * SRC_PCIEPHY_RCR[PCIEPHY_BTN]
+ */
+
+ if (likely(imx_pcie->phy_base)) {
+ /* De-assert DCC_FB_EN */
+ writel(PCIE_PHY_CMN_REG4_DCC_FB_EN, imx_pcie->phy_base + PCIE_PHY_CMN_REG4);
+ /* Assert RX_EQS and RX_EQS_SEL */
+ writel(PCIE_PHY_CMN_REG24_RX_EQ_SEL | PCIE_PHY_CMN_REG24_RX_EQ,
+ imx_pcie->phy_base + PCIE_PHY_CMN_REG24);
+ /* Assert ATT_MODE */
+ writel(PCIE_PHY_CMN_REG26_ATT_MODE, imx_pcie->phy_base + PCIE_PHY_CMN_REG26);
+ } else {
+ dev_warn(dev, "Unable to apply ERR010728 workaround. DT missing fsl,imx7d-pcie-phy phandle ?\n");
}
+ imx7d_pcie_wait_for_phy_pll_lock(imx_pcie);
+ return 0;
+}
+
+static void imx_pcie_assert_core_reset(struct imx_pcie *imx_pcie)
+{
+ reset_control_assert(imx_pcie->pciephy_reset);
+ reset_control_assert(imx_pcie->apps_reset);
+
+ if (imx_pcie->drvdata->core_reset)
+ imx_pcie->drvdata->core_reset(imx_pcie, true);
+
+ /* Some boards don't have PCIe reset GPIO. */
+ gpiod_set_value_cansleep(imx_pcie->reset_gpiod, 1);
+}
+
+static int imx_pcie_deassert_core_reset(struct imx_pcie *imx_pcie)
+{
+ reset_control_deassert(imx_pcie->pciephy_reset);
+ reset_control_deassert(imx_pcie->apps_reset);
+
+ if (imx_pcie->drvdata->core_reset)
+ imx_pcie->drvdata->core_reset(imx_pcie, false);
/* Some boards don't have PCIe reset GPIO. */
- if (gpio_is_valid(imx6_pcie->reset_gpio)) {
+ if (imx_pcie->reset_gpiod) {
msleep(100);
- gpio_set_value_cansleep(imx6_pcie->reset_gpio,
- !imx6_pcie->gpio_active_high);
+ gpiod_set_value_cansleep(imx_pcie->reset_gpiod, 0);
/* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */
msleep(100);
}
@@ -782,9 +804,9 @@ static int imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
return 0;
}
-static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
+static int imx_pcie_wait_for_speed_change(struct imx_pcie *imx_pcie)
{
- struct dw_pcie *pci = imx6_pcie->pci;
+ struct dw_pcie *pci = imx_pcie->pci;
struct device *dev = pci->dev;
u32 tmp;
unsigned int retries;
@@ -801,33 +823,38 @@ static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
return -ETIMEDOUT;
}
-static void imx6_pcie_ltssm_enable(struct device *dev)
+static void imx_pcie_ltssm_enable(struct device *dev)
{
- struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
- const struct imx6_pcie_drvdata *drvdata = imx6_pcie->drvdata;
+ struct imx_pcie *imx_pcie = dev_get_drvdata(dev);
+ const struct imx_pcie_drvdata *drvdata = imx_pcie->drvdata;
+ u8 offset = dw_pcie_find_capability(imx_pcie->pci, PCI_CAP_ID_EXP);
+ u32 tmp;
+ tmp = dw_pcie_readl_dbi(imx_pcie->pci, offset + PCI_EXP_LNKCAP);
+ phy_set_speed(imx_pcie->phy, FIELD_GET(PCI_EXP_LNKCAP_SLS, tmp));
if (drvdata->ltssm_mask)
- regmap_update_bits(imx6_pcie->iomuxc_gpr, drvdata->ltssm_off, drvdata->ltssm_mask,
+ regmap_update_bits(imx_pcie->iomuxc_gpr, drvdata->ltssm_off, drvdata->ltssm_mask,
drvdata->ltssm_mask);
- reset_control_deassert(imx6_pcie->apps_reset);
+ reset_control_deassert(imx_pcie->apps_reset);
}
-static void imx6_pcie_ltssm_disable(struct device *dev)
+static void imx_pcie_ltssm_disable(struct device *dev)
{
- struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
- const struct imx6_pcie_drvdata *drvdata = imx6_pcie->drvdata;
+ struct imx_pcie *imx_pcie = dev_get_drvdata(dev);
+ const struct imx_pcie_drvdata *drvdata = imx_pcie->drvdata;
+ phy_set_speed(imx_pcie->phy, 0);
if (drvdata->ltssm_mask)
- regmap_update_bits(imx6_pcie->iomuxc_gpr, drvdata->ltssm_off,
+ regmap_update_bits(imx_pcie->iomuxc_gpr, drvdata->ltssm_off,
drvdata->ltssm_mask, 0);
- reset_control_assert(imx6_pcie->apps_reset);
+ reset_control_assert(imx_pcie->apps_reset);
}
-static int imx6_pcie_start_link(struct dw_pcie *pci)
+static int imx_pcie_start_link(struct dw_pcie *pci)
{
- struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+ struct imx_pcie *imx_pcie = to_imx_pcie(pci);
struct device *dev = pci->dev;
u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
u32 tmp;
@@ -846,18 +873,18 @@ static int imx6_pcie_start_link(struct dw_pcie *pci)
dw_pcie_dbi_ro_wr_dis(pci);
/* Start LTSSM. */
- imx6_pcie_ltssm_enable(dev);
+ imx_pcie_ltssm_enable(dev);
ret = dw_pcie_wait_for_link(pci);
if (ret)
goto err_reset_phy;
- if (pci->link_gen > 1) {
+ if (pci->max_link_speed > 1) {
/* Allow faster modes after the link is up */
dw_pcie_dbi_ro_wr_en(pci);
tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
tmp &= ~PCI_EXP_LNKCAP_SLS;
- tmp |= pci->link_gen;
+ tmp |= pci->max_link_speed;
dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp);
/*
@@ -869,8 +896,9 @@ static int imx6_pcie_start_link(struct dw_pcie *pci)
dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);
dw_pcie_dbi_ro_wr_dis(pci);
- if (imx6_pcie->drvdata->flags &
- IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE) {
+ if (imx_pcie->drvdata->flags &
+ IMX_PCIE_FLAG_IMX_SPEED_CHANGE) {
+
/*
* On i.MX7, DIRECT_SPEED_CHANGE behaves differently
* from i.MX6 family when no link speed transition
@@ -879,8 +907,7 @@ static int imx6_pcie_start_link(struct dw_pcie *pci)
* which will cause the following code to report false
* failure.
*/
-
- ret = imx6_pcie_wait_for_speed_change(imx6_pcie);
+ ret = imx_pcie_wait_for_speed_change(imx_pcie);
if (ret) {
dev_err(dev, "Failed to bring link up!\n");
goto err_reset_phy;
@@ -895,37 +922,213 @@ static int imx6_pcie_start_link(struct dw_pcie *pci)
dev_info(dev, "Link: Only Gen1 is enabled\n");
}
- imx6_pcie->link_is_up = true;
tmp = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA);
dev_info(dev, "Link up, Gen%i\n", tmp & PCI_EXP_LNKSTA_CLS);
return 0;
err_reset_phy:
- imx6_pcie->link_is_up = false;
dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n",
dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0),
dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1));
- imx6_pcie_reset_phy(imx6_pcie);
+ imx_pcie_reset_phy(imx_pcie);
return 0;
}
-static void imx6_pcie_stop_link(struct dw_pcie *pci)
+static void imx_pcie_stop_link(struct dw_pcie *pci)
{
struct device *dev = pci->dev;
/* Turn off PCIe LTSSM */
- imx6_pcie_ltssm_disable(dev);
+ imx_pcie_ltssm_disable(dev);
+}
+
+static int imx_pcie_add_lut(struct imx_pcie *imx_pcie, u16 rid, u8 sid)
+{
+ struct dw_pcie *pci = imx_pcie->pci;
+ struct device *dev = pci->dev;
+ u32 data1, data2;
+ int free = -1;
+ int i;
+
+ if (sid >= 64) {
+ dev_err(dev, "Invalid SID for index %d\n", sid);
+ return -EINVAL;
+ }
+
+ guard(mutex)(&imx_pcie->lock);
+
+ /*
+ * Iterate through all LUT entries to check for duplicate RID and
+ * identify the first available entry. Configure this available entry
+ * immediately after verification to avoid rescanning it.
+ */
+ for (i = 0; i < IMX95_MAX_LUT; i++) {
+ regmap_write(imx_pcie->iomuxc_gpr,
+ IMX95_PE0_LUT_ACSCTRL, IMX95_PEO_LUT_RWA | i);
+ regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA1, &data1);
+
+ if (!(data1 & IMX95_PE0_LUT_VLD)) {
+ if (free < 0)
+ free = i;
+ continue;
+ }
+
+ regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, &data2);
+
+ /* Do not add duplicate RID */
+ if (rid == FIELD_GET(IMX95_PE0_LUT_REQID, data2)) {
+ dev_warn(dev, "Existing LUT entry available for RID (%d)", rid);
+ return 0;
+ }
+ }
+
+ if (free < 0) {
+ dev_err(dev, "LUT entry is not available\n");
+ return -ENOSPC;
+ }
+
+ data1 = FIELD_PREP(IMX95_PE0_LUT_DAC_ID, 0);
+ data1 |= FIELD_PREP(IMX95_PE0_LUT_STREAM_ID, sid);
+ data1 |= IMX95_PE0_LUT_VLD;
+ regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA1, data1);
+
+ data2 = IMX95_PE0_LUT_MASK; /* Match all bits of RID */
+ data2 |= FIELD_PREP(IMX95_PE0_LUT_REQID, rid);
+ regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, data2);
+
+ regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_ACSCTRL, free);
+
+ return 0;
+}
+
+static void imx_pcie_remove_lut(struct imx_pcie *imx_pcie, u16 rid)
+{
+ u32 data2;
+ int i;
+
+ guard(mutex)(&imx_pcie->lock);
+
+ for (i = 0; i < IMX95_MAX_LUT; i++) {
+ regmap_write(imx_pcie->iomuxc_gpr,
+ IMX95_PE0_LUT_ACSCTRL, IMX95_PEO_LUT_RWA | i);
+ regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, &data2);
+ if (FIELD_GET(IMX95_PE0_LUT_REQID, data2) == rid) {
+ regmap_write(imx_pcie->iomuxc_gpr,
+ IMX95_PE0_LUT_DATA1, 0);
+ regmap_write(imx_pcie->iomuxc_gpr,
+ IMX95_PE0_LUT_DATA2, 0);
+ regmap_write(imx_pcie->iomuxc_gpr,
+ IMX95_PE0_LUT_ACSCTRL, i);
+
+ break;
+ }
+ }
+}
+
+static int imx_pcie_enable_device(struct pci_host_bridge *bridge,
+ struct pci_dev *pdev)
+{
+ struct imx_pcie *imx_pcie = to_imx_pcie(to_dw_pcie_from_pp(bridge->sysdata));
+ u32 sid_i, sid_m, rid = pci_dev_id(pdev);
+ struct device_node *target;
+ struct device *dev;
+ int err_i, err_m;
+ u32 sid = 0;
+
+ dev = imx_pcie->pci->dev;
+
+ target = NULL;
+ err_i = of_map_id(dev->of_node, rid, "iommu-map", "iommu-map-mask",
+ &target, &sid_i);
+ if (target) {
+ of_node_put(target);
+ } else {
+ /*
+ * "target == NULL && err_i == 0" means RID out of map range.
+ * Use 1:1 map RID to streamID. Hardware can't support this
+ * because the streamID is only 6 bits
+ */
+ err_i = -EINVAL;
+ }
+
+ target = NULL;
+ err_m = of_map_id(dev->of_node, rid, "msi-map", "msi-map-mask",
+ &target, &sid_m);
+
+ /*
+ * err_m target
+ * 0 NULL RID out of range. Use 1:1 map RID to
+ * streamID, Current hardware can't
+ * support it, so return -EINVAL.
+ * != 0 NULL msi-map does not exist, use built-in MSI
+ * 0 != NULL Get correct streamID from RID
+ * != 0 != NULL Invalid combination
+ */
+ if (!err_m && !target)
+ return -EINVAL;
+ else if (target)
+ of_node_put(target); /* Find streamID map entry for RID in msi-map */
+
+ /*
+ * msi-map iommu-map
+ * N N DWC MSI Ctrl
+ * Y Y ITS + SMMU, require the same SID
+ * Y N ITS
+ * N Y DWC MSI Ctrl + SMMU
+ */
+ if (err_i && err_m)
+ return 0;
+
+ if (!err_i && !err_m) {
+ /*
+ * Glue Layer
+ * <==========>
+ * ┌─────┐ ┌──────────┐
+ * │ LUT │ 6-bit streamID │ │
+ * │ │─────────────────►│ MSI │
+ * └─────┘ 2-bit ctrl ID │ │
+ * ┌───────────►│ │
+ * (i.MX95) │ │ │
+ * 00 PCIe0 │ │ │
+ * 01 ENETC │ │ │
+ * 10 PCIe1 │ │ │
+ * │ └──────────┘
+ * The MSI glue layer auto adds 2 bits controller ID ahead of
+ * streamID, so mask these 2 bits to get streamID. The
+ * IOMMU glue layer doesn't do that.
+ */
+ if (sid_i != (sid_m & IMX95_SID_MASK)) {
+ dev_err(dev, "iommu-map and msi-map entries mismatch!\n");
+ return -EINVAL;
+ }
+ }
+
+ if (!err_i)
+ sid = sid_i;
+ else if (!err_m)
+ sid = sid_m & IMX95_SID_MASK;
+
+ return imx_pcie_add_lut(imx_pcie, rid, sid);
+}
+
+static void imx_pcie_disable_device(struct pci_host_bridge *bridge,
+ struct pci_dev *pdev)
+{
+ struct imx_pcie *imx_pcie;
+
+ imx_pcie = to_imx_pcie(to_dw_pcie_from_pp(bridge->sysdata));
+ imx_pcie_remove_lut(imx_pcie, pci_dev_id(pdev));
}
-static int imx6_pcie_host_init(struct dw_pcie_rp *pp)
+static int imx_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct device *dev = pci->dev;
- struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+ struct imx_pcie *imx_pcie = to_imx_pcie(pci);
int ret;
- if (imx6_pcie->vpcie) {
- ret = regulator_enable(imx6_pcie->vpcie);
+ if (imx_pcie->vpcie) {
+ ret = regulator_enable(imx_pcie->vpcie);
if (ret) {
dev_err(dev, "failed to enable vpcie regulator: %d\n",
ret);
@@ -933,83 +1136,117 @@ static int imx6_pcie_host_init(struct dw_pcie_rp *pp)
}
}
- imx6_pcie_assert_core_reset(imx6_pcie);
+ if (pp->bridge && imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_LUT)) {
+ pp->bridge->enable_device = imx_pcie_enable_device;
+ pp->bridge->disable_device = imx_pcie_disable_device;
+ }
+
+ imx_pcie_assert_core_reset(imx_pcie);
- if (imx6_pcie->drvdata->init_phy)
- imx6_pcie->drvdata->init_phy(imx6_pcie);
+ if (imx_pcie->drvdata->init_phy)
+ imx_pcie->drvdata->init_phy(imx_pcie);
- imx6_pcie_configure_type(imx6_pcie);
+ imx_pcie_configure_type(imx_pcie);
- ret = imx6_pcie_clk_enable(imx6_pcie);
+ ret = imx_pcie_clk_enable(imx_pcie);
if (ret) {
dev_err(dev, "unable to enable pcie clocks: %d\n", ret);
goto err_reg_disable;
}
- if (imx6_pcie->phy) {
- ret = phy_init(imx6_pcie->phy);
+ if (imx_pcie->phy) {
+ ret = phy_init(imx_pcie->phy);
if (ret) {
dev_err(dev, "pcie PHY power up failed\n");
goto err_clk_disable;
}
- }
- if (imx6_pcie->phy) {
- ret = phy_power_on(imx6_pcie->phy);
+ ret = phy_set_mode_ext(imx_pcie->phy, PHY_MODE_PCIE,
+ imx_pcie->drvdata->mode == DW_PCIE_EP_TYPE ?
+ PHY_MODE_PCIE_EP : PHY_MODE_PCIE_RC);
+ if (ret) {
+ dev_err(dev, "unable to set PCIe PHY mode\n");
+ goto err_phy_exit;
+ }
+
+ ret = phy_power_on(imx_pcie->phy);
if (ret) {
dev_err(dev, "waiting for PHY ready timeout!\n");
- goto err_phy_off;
+ goto err_phy_exit;
}
}
- ret = imx6_pcie_deassert_core_reset(imx6_pcie);
+ ret = imx_pcie_deassert_core_reset(imx_pcie);
if (ret < 0) {
dev_err(dev, "pcie deassert core reset failed: %d\n", ret);
goto err_phy_off;
}
- imx6_setup_phy_mpll(imx6_pcie);
+ imx_setup_phy_mpll(imx_pcie);
return 0;
err_phy_off:
- if (imx6_pcie->phy)
- phy_exit(imx6_pcie->phy);
+ phy_power_off(imx_pcie->phy);
+err_phy_exit:
+ phy_exit(imx_pcie->phy);
err_clk_disable:
- imx6_pcie_clk_disable(imx6_pcie);
+ imx_pcie_clk_disable(imx_pcie);
err_reg_disable:
- if (imx6_pcie->vpcie)
- regulator_disable(imx6_pcie->vpcie);
+ if (imx_pcie->vpcie)
+ regulator_disable(imx_pcie->vpcie);
return ret;
}
-static void imx6_pcie_host_exit(struct dw_pcie_rp *pp)
+static void imx_pcie_host_exit(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+ struct imx_pcie *imx_pcie = to_imx_pcie(pci);
- if (imx6_pcie->phy) {
- if (phy_power_off(imx6_pcie->phy))
+ if (imx_pcie->phy) {
+ if (phy_power_off(imx_pcie->phy))
dev_err(pci->dev, "unable to power off PHY\n");
- phy_exit(imx6_pcie->phy);
+ phy_exit(imx_pcie->phy);
}
- imx6_pcie_clk_disable(imx6_pcie);
+ imx_pcie_clk_disable(imx_pcie);
+
+ if (imx_pcie->vpcie)
+ regulator_disable(imx_pcie->vpcie);
+}
- if (imx6_pcie->vpcie)
- regulator_disable(imx6_pcie->vpcie);
+/*
+ * In old DWC implementations, PCIE_ATU_INHIBIT_PAYLOAD in iATU Ctrl2
+ * register is reserved, so the generic DWC implementation of sending the
+ * PME_Turn_Off message using a dummy MMIO write cannot be used.
+ */
+static void imx_pcie_pme_turn_off(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct imx_pcie *imx_pcie = to_imx_pcie(pci);
+
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6SX_GPR12_PCIE_PM_TURN_OFF);
+ regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6SX_GPR12_PCIE_PM_TURN_OFF);
+
+ usleep_range(PCIE_PME_TO_L2_TIMEOUT_US/10, PCIE_PME_TO_L2_TIMEOUT_US);
}
-static const struct dw_pcie_host_ops imx6_pcie_host_ops = {
- .init = imx6_pcie_host_init,
- .deinit = imx6_pcie_host_exit,
+static const struct dw_pcie_host_ops imx_pcie_host_ops = {
+ .init = imx_pcie_host_init,
+ .deinit = imx_pcie_host_exit,
+ .pme_turn_off = imx_pcie_pme_turn_off,
+};
+
+static const struct dw_pcie_host_ops imx_pcie_host_dw_pme_ops = {
+ .init = imx_pcie_host_init,
+ .deinit = imx_pcie_host_exit,
};
static const struct dw_pcie_ops dw_pcie_ops = {
- .start_link = imx6_pcie_start_link,
- .stop_link = imx6_pcie_stop_link,
+ .start_link = imx_pcie_start_link,
+ .stop_link = imx_pcie_stop_link,
};
-static void imx6_pcie_ep_init(struct dw_pcie_ep *ep)
+static void imx_pcie_ep_init(struct dw_pcie_ep *ep)
{
enum pci_barno bar;
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -1018,7 +1255,7 @@ static void imx6_pcie_ep_init(struct dw_pcie_ep *ep)
dw_pcie_ep_reset_bar(pci, bar);
}
-static int imx6_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+static int imx_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
unsigned int type, u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -1047,16 +1284,27 @@ static const struct pci_epc_features imx8m_pcie_epc_features = {
.align = SZ_64K,
};
+static const struct pci_epc_features imx8q_pcie_epc_features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = false,
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
+ .bar[BAR_5] = { .type = BAR_RESERVED, },
+ .align = SZ_64K,
+};
+
/*
- * BAR# | Default BAR enable | Default BAR Type | Default BAR Size | BAR Sizing Scheme
- * ================================================================================================
- * BAR0 | Enable | 64-bit | 1 MB | Programmable Size
- * BAR1 | Disable | 32-bit | 64 KB | Fixed Size
- * BAR1 should be disabled if BAR0 is 64bit.
- * BAR2 | Enable | 32-bit | 1 MB | Programmable Size
- * BAR3 | Enable | 32-bit | 64 KB | Programmable Size
- * BAR4 | Enable | 32-bit | 1M | Programmable Size
- * BAR5 | Enable | 32-bit | 64 KB | Programmable Size
+ * | Default | Default | Default | BAR Sizing
+ * BAR# | Enable? | Type | Size | Scheme
+ * =======================================================
+ * BAR0 | Enable | 64-bit | 1 MB | Programmable Size
+ * BAR1 | Disable | 32-bit | 64 KB | Fixed Size
+ * (BAR1 should be disabled if BAR0 is 64-bit)
+ * BAR2 | Enable | 32-bit | 1 MB | Programmable Size
+ * BAR3 | Enable | 32-bit | 64 KB | Programmable Size
+ * BAR4 | Enable | 32-bit | 1 MB | Programmable Size
+ * BAR5 | Enable | 32-bit | 64 KB | Programmable Size
*/
static const struct pci_epc_features imx95_pcie_epc_features = {
.msi_capable = true,
@@ -1065,183 +1313,153 @@ static const struct pci_epc_features imx95_pcie_epc_features = {
};
static const struct pci_epc_features*
-imx6_pcie_ep_get_features(struct dw_pcie_ep *ep)
+imx_pcie_ep_get_features(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+ struct imx_pcie *imx_pcie = to_imx_pcie(pci);
- return imx6_pcie->drvdata->epc_features;
+ return imx_pcie->drvdata->epc_features;
}
static const struct dw_pcie_ep_ops pcie_ep_ops = {
- .init = imx6_pcie_ep_init,
- .raise_irq = imx6_pcie_ep_raise_irq,
- .get_features = imx6_pcie_ep_get_features,
+ .init = imx_pcie_ep_init,
+ .raise_irq = imx_pcie_ep_raise_irq,
+ .get_features = imx_pcie_ep_get_features,
};
-static int imx6_add_pcie_ep(struct imx6_pcie *imx6_pcie,
+static int imx_add_pcie_ep(struct imx_pcie *imx_pcie,
struct platform_device *pdev)
{
int ret;
- unsigned int pcie_dbi2_offset;
struct dw_pcie_ep *ep;
- struct dw_pcie *pci = imx6_pcie->pci;
+ struct dw_pcie *pci = imx_pcie->pci;
struct dw_pcie_rp *pp = &pci->pp;
struct device *dev = pci->dev;
- imx6_pcie_host_init(pp);
+ imx_pcie_host_init(pp);
ep = &pci->ep;
ep->ops = &pcie_ep_ops;
- switch (imx6_pcie->drvdata->variant) {
- case IMX8MQ_EP:
- case IMX8MM_EP:
- case IMX8MP_EP:
- pcie_dbi2_offset = SZ_1M;
- break;
- default:
- pcie_dbi2_offset = SZ_4K;
- break;
- }
-
- pci->dbi_base2 = pci->dbi_base + pcie_dbi2_offset;
-
- /*
- * FIXME: Ideally, dbi2 base address should come from DT. But since only IMX95 is defining
- * "dbi2" in DT, "dbi_base2" is set to NULL here for that platform alone so that the DWC
- * core code can fetch that from DT. But once all platform DTs were fixed, this and the
- * above "dbi_base2" setting should be removed.
- */
- if (device_property_match_string(dev, "reg-names", "dbi2") >= 0)
- pci->dbi_base2 = NULL;
-
- if (imx6_check_flag(imx6_pcie, IMX6_PCIE_FLAG_SUPPORT_64BIT))
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_SUPPORT_64BIT))
dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ ep->page_size = imx_pcie->drvdata->epc_features->align;
+
ret = dw_pcie_ep_init(ep);
if (ret) {
dev_err(dev, "failed to initialize endpoint\n");
return ret;
}
- /* Start LTSSM. */
- imx6_pcie_ltssm_enable(dev);
-
- return 0;
-}
-static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie)
-{
- struct device *dev = imx6_pcie->pci->dev;
-
- /* Some variants have a turnoff reset in DT */
- if (imx6_pcie->turnoff_reset) {
- reset_control_assert(imx6_pcie->turnoff_reset);
- reset_control_deassert(imx6_pcie->turnoff_reset);
- goto pm_turnoff_sleep;
+ ret = dw_pcie_ep_init_registers(ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(ep);
+ return ret;
}
- /* Others poke directly at IOMUXC registers */
- switch (imx6_pcie->drvdata->variant) {
- case IMX6SX:
- case IMX6QP:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6SX_GPR12_PCIE_PM_TURN_OFF,
- IMX6SX_GPR12_PCIE_PM_TURN_OFF);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6SX_GPR12_PCIE_PM_TURN_OFF, 0);
- break;
- default:
- dev_err(dev, "PME_Turn_Off not implemented\n");
- return;
- }
+ pci_epc_init_notify(ep->epc);
- /*
- * Components with an upstream port must respond to
- * PME_Turn_Off with PME_TO_Ack but we can't check.
- *
- * The standard recommends a 1-10ms timeout after which to
- * proceed anyway as if acks were received.
- */
-pm_turnoff_sleep:
- usleep_range(1000, 10000);
+ /* Start LTSSM. */
+ imx_pcie_ltssm_enable(dev);
+
+ return 0;
}
-static void imx6_pcie_msi_save_restore(struct imx6_pcie *imx6_pcie, bool save)
+static void imx_pcie_msi_save_restore(struct imx_pcie *imx_pcie, bool save)
{
u8 offset;
u16 val;
- struct dw_pcie *pci = imx6_pcie->pci;
+ struct dw_pcie *pci = imx_pcie->pci;
if (pci_msi_enabled()) {
offset = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI);
if (save) {
val = dw_pcie_readw_dbi(pci, offset + PCI_MSI_FLAGS);
- imx6_pcie->msi_ctrl = val;
+ imx_pcie->msi_ctrl = val;
} else {
dw_pcie_dbi_ro_wr_en(pci);
- val = imx6_pcie->msi_ctrl;
+ val = imx_pcie->msi_ctrl;
dw_pcie_writew_dbi(pci, offset + PCI_MSI_FLAGS, val);
dw_pcie_dbi_ro_wr_dis(pci);
}
}
}
-static int imx6_pcie_suspend_noirq(struct device *dev)
+static int imx_pcie_suspend_noirq(struct device *dev)
{
- struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
- struct dw_pcie_rp *pp = &imx6_pcie->pci->pp;
+ struct imx_pcie *imx_pcie = dev_get_drvdata(dev);
- if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))
+ if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_SUPPORTS_SUSPEND))
return 0;
- imx6_pcie_msi_save_restore(imx6_pcie, true);
- imx6_pcie_pm_turnoff(imx6_pcie);
- imx6_pcie_stop_link(imx6_pcie->pci);
- imx6_pcie_host_exit(pp);
+ imx_pcie_msi_save_restore(imx_pcie, true);
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_BROKEN_SUSPEND)) {
+ /*
+ * The minimum for a workaround would be to set PERST# and to
+ * set the PCIE_TEST_PD flag. However, we can also disable the
+ * clock which saves some power.
+ */
+ imx_pcie_assert_core_reset(imx_pcie);
+ imx_pcie->drvdata->enable_ref_clk(imx_pcie, false);
+ } else {
+ return dw_pcie_suspend_noirq(imx_pcie->pci);
+ }
return 0;
}
-static int imx6_pcie_resume_noirq(struct device *dev)
+static int imx_pcie_resume_noirq(struct device *dev)
{
int ret;
- struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
- struct dw_pcie_rp *pp = &imx6_pcie->pci->pp;
+ struct imx_pcie *imx_pcie = dev_get_drvdata(dev);
- if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))
+ if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_SUPPORTS_SUSPEND))
return 0;
- ret = imx6_pcie_host_init(pp);
- if (ret)
- return ret;
- imx6_pcie_msi_save_restore(imx6_pcie, false);
- dw_pcie_setup_rc(pp);
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_BROKEN_SUSPEND)) {
+ ret = imx_pcie->drvdata->enable_ref_clk(imx_pcie, true);
+ if (ret)
+ return ret;
+ ret = imx_pcie_deassert_core_reset(imx_pcie);
+ if (ret)
+ return ret;
- if (imx6_pcie->link_is_up)
- imx6_pcie_start_link(imx6_pcie->pci);
+ /*
+ * Using PCIE_TEST_PD seems to disable MSI and powers down the
+ * root complex. This is why we have to setup the rc again and
+ * why we have to restore the MSI register.
+ */
+ ret = dw_pcie_setup_rc(&imx_pcie->pci->pp);
+ if (ret)
+ return ret;
+ } else {
+ ret = dw_pcie_resume_noirq(imx_pcie->pci);
+ if (ret)
+ return ret;
+ }
+ imx_pcie_msi_save_restore(imx_pcie, false);
return 0;
}
-static const struct dev_pm_ops imx6_pcie_pm_ops = {
- NOIRQ_SYSTEM_SLEEP_PM_OPS(imx6_pcie_suspend_noirq,
- imx6_pcie_resume_noirq)
+static const struct dev_pm_ops imx_pcie_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_pcie_suspend_noirq,
+ imx_pcie_resume_noirq)
};
-static int imx6_pcie_probe(struct platform_device *pdev)
+static int imx_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct dw_pcie *pci;
- struct imx6_pcie *imx6_pcie;
+ struct imx_pcie *imx_pcie;
struct device_node *np;
- struct resource *dbi_base;
struct device_node *node = dev->of_node;
- int ret;
+ int ret, domain;
u16 val;
- int i;
- imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL);
- if (!imx6_pcie)
+ imx_pcie = devm_kzalloc(dev, sizeof(*imx_pcie), GFP_KERNEL);
+ if (!imx_pcie)
return -ENOMEM;
pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
@@ -1250,10 +1468,16 @@ static int imx6_pcie_probe(struct platform_device *pdev)
pci->dev = dev;
pci->ops = &dw_pcie_ops;
- pci->pp.ops = &imx6_pcie_host_ops;
- imx6_pcie->pci = pci;
- imx6_pcie->drvdata = of_device_get_match_data(dev);
+ imx_pcie->pci = pci;
+ imx_pcie->drvdata = of_device_get_match_data(dev);
+
+ mutex_init(&imx_pcie->lock);
+
+ if (imx_pcie->drvdata->ops)
+ pci->pp.ops = imx_pcie->drvdata->ops;
+ else
+ pci->pp.ops = &imx_pcie_host_dw_pme_ops;
/* Find the PHY if one is defined, only imx7d uses it */
np = of_parse_phandle(node, "fsl,imx7d-pcie-phy", 0);
@@ -1265,93 +1489,68 @@ static int imx6_pcie_probe(struct platform_device *pdev)
dev_err(dev, "Unable to map PCIe PHY\n");
return ret;
}
- imx6_pcie->phy_base = devm_ioremap_resource(dev, &res);
- if (IS_ERR(imx6_pcie->phy_base))
- return PTR_ERR(imx6_pcie->phy_base);
+ imx_pcie->phy_base = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(imx_pcie->phy_base))
+ return PTR_ERR(imx_pcie->phy_base);
}
- pci->dbi_base = devm_platform_get_and_ioremap_resource(pdev, 0, &dbi_base);
- if (IS_ERR(pci->dbi_base))
- return PTR_ERR(pci->dbi_base);
-
/* Fetch GPIOs */
- imx6_pcie->reset_gpio = of_get_named_gpio(node, "reset-gpio", 0);
- imx6_pcie->gpio_active_high = of_property_read_bool(node,
- "reset-gpio-active-high");
- if (gpio_is_valid(imx6_pcie->reset_gpio)) {
- ret = devm_gpio_request_one(dev, imx6_pcie->reset_gpio,
- imx6_pcie->gpio_active_high ?
- GPIOF_OUT_INIT_HIGH :
- GPIOF_OUT_INIT_LOW,
- "PCIe reset");
- if (ret) {
- dev_err(dev, "unable to get reset gpio\n");
- return ret;
- }
- } else if (imx6_pcie->reset_gpio == -EPROBE_DEFER) {
- return imx6_pcie->reset_gpio;
- }
-
- if (imx6_pcie->drvdata->clks_cnt >= IMX6_PCIE_MAX_CLKS)
- return dev_err_probe(dev, -ENOMEM, "clks_cnt is too big\n");
-
- for (i = 0; i < imx6_pcie->drvdata->clks_cnt; i++)
- imx6_pcie->clks[i].id = imx6_pcie->drvdata->clk_names[i];
+ imx_pcie->reset_gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(imx_pcie->reset_gpiod))
+ return dev_err_probe(dev, PTR_ERR(imx_pcie->reset_gpiod),
+ "unable to get reset gpio\n");
+ gpiod_set_consumer_name(imx_pcie->reset_gpiod, "PCIe reset");
/* Fetch clocks */
- ret = devm_clk_bulk_get(dev, imx6_pcie->drvdata->clks_cnt, imx6_pcie->clks);
- if (ret)
- return ret;
-
- if (imx6_check_flag(imx6_pcie, IMX6_PCIE_FLAG_HAS_PHYDRV)) {
- imx6_pcie->phy = devm_phy_get(dev, "pcie-phy");
- if (IS_ERR(imx6_pcie->phy))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->phy),
+ imx_pcie->num_clks = devm_clk_bulk_get_all(dev, &imx_pcie->clks);
+ if (imx_pcie->num_clks < 0)
+ return dev_err_probe(dev, imx_pcie->num_clks,
+ "failed to get clocks\n");
+
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_PHYDRV)) {
+ imx_pcie->phy = devm_phy_get(dev, "pcie-phy");
+ if (IS_ERR(imx_pcie->phy))
+ return dev_err_probe(dev, PTR_ERR(imx_pcie->phy),
"failed to get pcie phy\n");
}
- if (imx6_check_flag(imx6_pcie, IMX6_PCIE_FLAG_HAS_APP_RESET)) {
- imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev, "apps");
- if (IS_ERR(imx6_pcie->apps_reset))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->apps_reset),
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_APP_RESET)) {
+ imx_pcie->apps_reset = devm_reset_control_get_exclusive(dev, "apps");
+ if (IS_ERR(imx_pcie->apps_reset))
+ return dev_err_probe(dev, PTR_ERR(imx_pcie->apps_reset),
"failed to get pcie apps reset control\n");
}
- if (imx6_check_flag(imx6_pcie, IMX6_PCIE_FLAG_HAS_PHY_RESET)) {
- imx6_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev, "pciephy");
- if (IS_ERR(imx6_pcie->pciephy_reset))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->pciephy_reset),
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_PHY_RESET)) {
+ imx_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev, "pciephy");
+ if (IS_ERR(imx_pcie->pciephy_reset))
+ return dev_err_probe(dev, PTR_ERR(imx_pcie->pciephy_reset),
"Failed to get PCIEPHY reset control\n");
}
- switch (imx6_pcie->drvdata->variant) {
+ switch (imx_pcie->drvdata->variant) {
case IMX8MQ:
case IMX8MQ_EP:
- case IMX7D:
- if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR)
- imx6_pcie->controller_id = 1;
+ domain = of_get_pci_domain_nr(node);
+ if (domain < 0 || domain > 1)
+ return dev_err_probe(dev, -ENODEV, "no \"linux,pci-domain\" property in devicetree\n");
+
+ imx_pcie->controller_id = domain;
break;
default:
break;
}
- /* Grab turnoff reset */
- imx6_pcie->turnoff_reset = devm_reset_control_get_optional_exclusive(dev, "turnoff");
- if (IS_ERR(imx6_pcie->turnoff_reset)) {
- dev_err(dev, "Failed to get TURNOFF reset control\n");
- return PTR_ERR(imx6_pcie->turnoff_reset);
- }
-
- if (imx6_pcie->drvdata->gpr) {
+ if (imx_pcie->drvdata->gpr) {
/* Grab GPR config register range */
- imx6_pcie->iomuxc_gpr =
- syscon_regmap_lookup_by_compatible(imx6_pcie->drvdata->gpr);
- if (IS_ERR(imx6_pcie->iomuxc_gpr))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->iomuxc_gpr),
+ imx_pcie->iomuxc_gpr =
+ syscon_regmap_lookup_by_compatible(imx_pcie->drvdata->gpr);
+ if (IS_ERR(imx_pcie->iomuxc_gpr))
+ return dev_err_probe(dev, PTR_ERR(imx_pcie->iomuxc_gpr),
"unable to find iomuxc registers\n");
}
- if (imx6_check_flag(imx6_pcie, IMX6_PCIE_FLAG_HAS_SERDES)) {
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_SERDES)) {
void __iomem *off = devm_platform_ioremap_resource_byname(pdev, "app");
if (IS_ERR(off))
@@ -1364,62 +1563,64 @@ static int imx6_pcie_probe(struct platform_device *pdev)
.reg_stride = 4,
};
- imx6_pcie->iomuxc_gpr = devm_regmap_init_mmio(dev, off, &regmap_config);
- if (IS_ERR(imx6_pcie->iomuxc_gpr))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->iomuxc_gpr),
+ imx_pcie->iomuxc_gpr = devm_regmap_init_mmio(dev, off, &regmap_config);
+ if (IS_ERR(imx_pcie->iomuxc_gpr))
+ return dev_err_probe(dev, PTR_ERR(imx_pcie->iomuxc_gpr),
"unable to find iomuxc registers\n");
}
/* Grab PCIe PHY Tx Settings */
if (of_property_read_u32(node, "fsl,tx-deemph-gen1",
- &imx6_pcie->tx_deemph_gen1))
- imx6_pcie->tx_deemph_gen1 = 0;
+ &imx_pcie->tx_deemph_gen1))
+ imx_pcie->tx_deemph_gen1 = 0;
if (of_property_read_u32(node, "fsl,tx-deemph-gen2-3p5db",
- &imx6_pcie->tx_deemph_gen2_3p5db))
- imx6_pcie->tx_deemph_gen2_3p5db = 0;
+ &imx_pcie->tx_deemph_gen2_3p5db))
+ imx_pcie->tx_deemph_gen2_3p5db = 0;
if (of_property_read_u32(node, "fsl,tx-deemph-gen2-6db",
- &imx6_pcie->tx_deemph_gen2_6db))
- imx6_pcie->tx_deemph_gen2_6db = 20;
+ &imx_pcie->tx_deemph_gen2_6db))
+ imx_pcie->tx_deemph_gen2_6db = 20;
if (of_property_read_u32(node, "fsl,tx-swing-full",
- &imx6_pcie->tx_swing_full))
- imx6_pcie->tx_swing_full = 127;
+ &imx_pcie->tx_swing_full))
+ imx_pcie->tx_swing_full = 127;
if (of_property_read_u32(node, "fsl,tx-swing-low",
- &imx6_pcie->tx_swing_low))
- imx6_pcie->tx_swing_low = 127;
+ &imx_pcie->tx_swing_low))
+ imx_pcie->tx_swing_low = 127;
/* Limit link speed */
- pci->link_gen = 1;
- of_property_read_u32(node, "fsl,max-link-speed", &pci->link_gen);
-
- imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie");
- if (IS_ERR(imx6_pcie->vpcie)) {
- if (PTR_ERR(imx6_pcie->vpcie) != -ENODEV)
- return PTR_ERR(imx6_pcie->vpcie);
- imx6_pcie->vpcie = NULL;
+ pci->max_link_speed = 1;
+ of_property_read_u32(node, "fsl,max-link-speed", &pci->max_link_speed);
+
+ imx_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie");
+ if (IS_ERR(imx_pcie->vpcie)) {
+ if (PTR_ERR(imx_pcie->vpcie) != -ENODEV)
+ return PTR_ERR(imx_pcie->vpcie);
+ imx_pcie->vpcie = NULL;
}
- imx6_pcie->vph = devm_regulator_get_optional(&pdev->dev, "vph");
- if (IS_ERR(imx6_pcie->vph)) {
- if (PTR_ERR(imx6_pcie->vph) != -ENODEV)
- return PTR_ERR(imx6_pcie->vph);
- imx6_pcie->vph = NULL;
+ imx_pcie->vph = devm_regulator_get_optional(&pdev->dev, "vph");
+ if (IS_ERR(imx_pcie->vph)) {
+ if (PTR_ERR(imx_pcie->vph) != -ENODEV)
+ return PTR_ERR(imx_pcie->vph);
+ imx_pcie->vph = NULL;
}
- platform_set_drvdata(pdev, imx6_pcie);
+ platform_set_drvdata(pdev, imx_pcie);
- ret = imx6_pcie_attach_pd(dev);
+ ret = imx_pcie_attach_pd(dev);
if (ret)
return ret;
- if (imx6_pcie->drvdata->mode == DW_PCIE_EP_TYPE) {
- ret = imx6_add_pcie_ep(imx6_pcie, pdev);
+ pci->use_parent_dt_ranges = true;
+ if (imx_pcie->drvdata->mode == DW_PCIE_EP_TYPE) {
+ ret = imx_add_pcie_ep(imx_pcie, pdev);
if (ret < 0)
return ret;
} else {
+ pci->pp.use_atu_msg = true;
ret = dw_pcie_host_init(&pci->pp);
if (ret < 0)
return ret;
@@ -1436,115 +1637,117 @@ static int imx6_pcie_probe(struct platform_device *pdev)
return 0;
}
-static void imx6_pcie_shutdown(struct platform_device *pdev)
+static void imx_pcie_shutdown(struct platform_device *pdev)
{
- struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev);
+ struct imx_pcie *imx_pcie = platform_get_drvdata(pdev);
/* bring down link, so bootloader gets clean state in case of reboot */
- imx6_pcie_assert_core_reset(imx6_pcie);
+ imx_pcie_assert_core_reset(imx_pcie);
}
-static const char * const imx6q_clks[] = {"pcie_bus", "pcie", "pcie_phy"};
-static const char * const imx8mm_clks[] = {"pcie_bus", "pcie", "pcie_aux"};
-static const char * const imx8mq_clks[] = {"pcie_bus", "pcie", "pcie_phy", "pcie_aux"};
-static const char * const imx6sx_clks[] = {"pcie_bus", "pcie", "pcie_phy", "pcie_inbound_axi"};
-
-static const struct imx6_pcie_drvdata drvdata[] = {
+static const struct imx_pcie_drvdata drvdata[] = {
[IMX6Q] = {
.variant = IMX6Q,
- .flags = IMX6_PCIE_FLAG_IMX6_PHY |
- IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE,
+ .flags = IMX_PCIE_FLAG_IMX_PHY |
+ IMX_PCIE_FLAG_IMX_SPEED_CHANGE |
+ IMX_PCIE_FLAG_BROKEN_SUSPEND |
+ IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
.dbi_length = 0x200,
.gpr = "fsl,imx6q-iomuxc-gpr",
- .clk_names = imx6q_clks,
- .clks_cnt = ARRAY_SIZE(imx6q_clks),
.ltssm_off = IOMUXC_GPR12,
.ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2,
.mode_off[0] = IOMUXC_GPR12,
.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
- .init_phy = imx6_pcie_init_phy,
+ .init_phy = imx_pcie_init_phy,
+ .enable_ref_clk = imx6q_pcie_enable_ref_clk,
+ .core_reset = imx6q_pcie_core_reset,
},
[IMX6SX] = {
.variant = IMX6SX,
- .flags = IMX6_PCIE_FLAG_IMX6_PHY |
- IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE |
- IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .flags = IMX_PCIE_FLAG_IMX_PHY |
+ IMX_PCIE_FLAG_IMX_SPEED_CHANGE |
+ IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
.gpr = "fsl,imx6q-iomuxc-gpr",
- .clk_names = imx6sx_clks,
- .clks_cnt = ARRAY_SIZE(imx6sx_clks),
.ltssm_off = IOMUXC_GPR12,
.ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2,
.mode_off[0] = IOMUXC_GPR12,
.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
.init_phy = imx6sx_pcie_init_phy,
+ .enable_ref_clk = imx6sx_pcie_enable_ref_clk,
+ .core_reset = imx6sx_pcie_core_reset,
+ .ops = &imx_pcie_host_ops,
},
[IMX6QP] = {
.variant = IMX6QP,
- .flags = IMX6_PCIE_FLAG_IMX6_PHY |
- IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE |
- IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .flags = IMX_PCIE_FLAG_IMX_PHY |
+ IMX_PCIE_FLAG_IMX_SPEED_CHANGE |
+ IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
.dbi_length = 0x200,
.gpr = "fsl,imx6q-iomuxc-gpr",
- .clk_names = imx6q_clks,
- .clks_cnt = ARRAY_SIZE(imx6q_clks),
.ltssm_off = IOMUXC_GPR12,
.ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2,
.mode_off[0] = IOMUXC_GPR12,
.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
- .init_phy = imx6_pcie_init_phy,
+ .init_phy = imx_pcie_init_phy,
+ .enable_ref_clk = imx6q_pcie_enable_ref_clk,
+ .core_reset = imx6qp_pcie_core_reset,
+ .ops = &imx_pcie_host_ops,
},
[IMX7D] = {
.variant = IMX7D,
- .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND |
- IMX6_PCIE_FLAG_HAS_APP_RESET |
- IMX6_PCIE_FLAG_HAS_PHY_RESET,
+ .flags = IMX_PCIE_FLAG_SUPPORTS_SUSPEND |
+ IMX_PCIE_FLAG_HAS_APP_RESET |
+ IMX_PCIE_FLAG_HAS_PHY_RESET,
.gpr = "fsl,imx7d-iomuxc-gpr",
- .clk_names = imx6q_clks,
- .clks_cnt = ARRAY_SIZE(imx6q_clks),
.mode_off[0] = IOMUXC_GPR12,
.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
- .init_phy = imx7d_pcie_init_phy,
+ .enable_ref_clk = imx7d_pcie_enable_ref_clk,
+ .core_reset = imx7d_pcie_core_reset,
},
[IMX8MQ] = {
.variant = IMX8MQ,
- .flags = IMX6_PCIE_FLAG_HAS_APP_RESET |
- IMX6_PCIE_FLAG_HAS_PHY_RESET,
+ .flags = IMX_PCIE_FLAG_HAS_APP_RESET |
+ IMX_PCIE_FLAG_HAS_PHY_RESET |
+ IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
.gpr = "fsl,imx8mq-iomuxc-gpr",
- .clk_names = imx8mq_clks,
- .clks_cnt = ARRAY_SIZE(imx8mq_clks),
.mode_off[0] = IOMUXC_GPR12,
.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
.mode_off[1] = IOMUXC_GPR12,
.mode_mask[1] = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
.init_phy = imx8mq_pcie_init_phy,
+ .enable_ref_clk = imx8mm_pcie_enable_ref_clk,
},
[IMX8MM] = {
.variant = IMX8MM,
- .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND |
- IMX6_PCIE_FLAG_HAS_PHYDRV |
- IMX6_PCIE_FLAG_HAS_APP_RESET,
+ .flags = IMX_PCIE_FLAG_SUPPORTS_SUSPEND |
+ IMX_PCIE_FLAG_HAS_PHYDRV |
+ IMX_PCIE_FLAG_HAS_APP_RESET,
.gpr = "fsl,imx8mm-iomuxc-gpr",
- .clk_names = imx8mm_clks,
- .clks_cnt = ARRAY_SIZE(imx8mm_clks),
.mode_off[0] = IOMUXC_GPR12,
.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .enable_ref_clk = imx8mm_pcie_enable_ref_clk,
},
[IMX8MP] = {
.variant = IMX8MP,
- .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND |
- IMX6_PCIE_FLAG_HAS_PHYDRV |
- IMX6_PCIE_FLAG_HAS_APP_RESET,
+ .flags = IMX_PCIE_FLAG_SUPPORTS_SUSPEND |
+ IMX_PCIE_FLAG_HAS_PHYDRV |
+ IMX_PCIE_FLAG_HAS_APP_RESET,
.gpr = "fsl,imx8mp-iomuxc-gpr",
- .clk_names = imx8mm_clks,
- .clks_cnt = ARRAY_SIZE(imx8mm_clks),
.mode_off[0] = IOMUXC_GPR12,
.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .enable_ref_clk = imx8mm_pcie_enable_ref_clk,
+ },
+ [IMX8Q] = {
+ .variant = IMX8Q,
+ .flags = IMX_PCIE_FLAG_HAS_PHYDRV |
+ IMX_PCIE_FLAG_CPU_ADDR_FIXUP |
+ IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
},
[IMX95] = {
.variant = IMX95,
- .flags = IMX6_PCIE_FLAG_HAS_SERDES,
- .clk_names = imx8mq_clks,
- .clks_cnt = ARRAY_SIZE(imx8mq_clks),
+ .flags = IMX_PCIE_FLAG_HAS_SERDES |
+ IMX_PCIE_FLAG_HAS_LUT |
+ IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
.ltssm_off = IMX95_PE0_GEN_CTRL_3,
.ltssm_mask = IMX95_PCIE_LTSSM_EN,
.mode_off[0] = IMX95_PE0_GEN_CTRL_1,
@@ -1553,47 +1756,50 @@ static const struct imx6_pcie_drvdata drvdata[] = {
},
[IMX8MQ_EP] = {
.variant = IMX8MQ_EP,
- .flags = IMX6_PCIE_FLAG_HAS_APP_RESET |
- IMX6_PCIE_FLAG_HAS_PHY_RESET,
+ .flags = IMX_PCIE_FLAG_HAS_APP_RESET |
+ IMX_PCIE_FLAG_HAS_PHY_RESET,
.mode = DW_PCIE_EP_TYPE,
.gpr = "fsl,imx8mq-iomuxc-gpr",
- .clk_names = imx8mq_clks,
- .clks_cnt = ARRAY_SIZE(imx8mq_clks),
.mode_off[0] = IOMUXC_GPR12,
.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
.mode_off[1] = IOMUXC_GPR12,
.mode_mask[1] = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
.epc_features = &imx8m_pcie_epc_features,
.init_phy = imx8mq_pcie_init_phy,
+ .enable_ref_clk = imx8mm_pcie_enable_ref_clk,
},
[IMX8MM_EP] = {
.variant = IMX8MM_EP,
- .flags = IMX6_PCIE_FLAG_HAS_PHYDRV,
+ .flags = IMX_PCIE_FLAG_HAS_APP_RESET |
+ IMX_PCIE_FLAG_HAS_PHYDRV,
.mode = DW_PCIE_EP_TYPE,
.gpr = "fsl,imx8mm-iomuxc-gpr",
- .clk_names = imx8mm_clks,
- .clks_cnt = ARRAY_SIZE(imx8mm_clks),
.mode_off[0] = IOMUXC_GPR12,
.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
.epc_features = &imx8m_pcie_epc_features,
+ .enable_ref_clk = imx8mm_pcie_enable_ref_clk,
},
[IMX8MP_EP] = {
.variant = IMX8MP_EP,
- .flags = IMX6_PCIE_FLAG_HAS_PHYDRV,
+ .flags = IMX_PCIE_FLAG_HAS_APP_RESET |
+ IMX_PCIE_FLAG_HAS_PHYDRV,
.mode = DW_PCIE_EP_TYPE,
.gpr = "fsl,imx8mp-iomuxc-gpr",
- .clk_names = imx8mm_clks,
- .clks_cnt = ARRAY_SIZE(imx8mm_clks),
.mode_off[0] = IOMUXC_GPR12,
.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
.epc_features = &imx8m_pcie_epc_features,
+ .enable_ref_clk = imx8mm_pcie_enable_ref_clk,
+ },
+ [IMX8Q_EP] = {
+ .variant = IMX8Q_EP,
+ .flags = IMX_PCIE_FLAG_HAS_PHYDRV,
+ .mode = DW_PCIE_EP_TYPE,
+ .epc_features = &imx8q_pcie_epc_features,
},
[IMX95_EP] = {
.variant = IMX95_EP,
- .flags = IMX6_PCIE_FLAG_HAS_SERDES |
- IMX6_PCIE_FLAG_SUPPORT_64BIT,
- .clk_names = imx8mq_clks,
- .clks_cnt = ARRAY_SIZE(imx8mq_clks),
+ .flags = IMX_PCIE_FLAG_HAS_SERDES |
+ IMX_PCIE_FLAG_SUPPORT_64BIT,
.ltssm_off = IMX95_PE0_GEN_CTRL_3,
.ltssm_mask = IMX95_PCIE_LTSSM_EN,
.mode_off[0] = IMX95_PE0_GEN_CTRL_1,
@@ -1604,7 +1810,7 @@ static const struct imx6_pcie_drvdata drvdata[] = {
},
};
-static const struct of_device_id imx6_pcie_of_match[] = {
+static const struct of_device_id imx_pcie_of_match[] = {
{ .compatible = "fsl,imx6q-pcie", .data = &drvdata[IMX6Q], },
{ .compatible = "fsl,imx6sx-pcie", .data = &drvdata[IMX6SX], },
{ .compatible = "fsl,imx6qp-pcie", .data = &drvdata[IMX6QP], },
@@ -1612,27 +1818,29 @@ static const struct of_device_id imx6_pcie_of_match[] = {
{ .compatible = "fsl,imx8mq-pcie", .data = &drvdata[IMX8MQ], },
{ .compatible = "fsl,imx8mm-pcie", .data = &drvdata[IMX8MM], },
{ .compatible = "fsl,imx8mp-pcie", .data = &drvdata[IMX8MP], },
+ { .compatible = "fsl,imx8q-pcie", .data = &drvdata[IMX8Q], },
{ .compatible = "fsl,imx95-pcie", .data = &drvdata[IMX95], },
{ .compatible = "fsl,imx8mq-pcie-ep", .data = &drvdata[IMX8MQ_EP], },
{ .compatible = "fsl,imx8mm-pcie-ep", .data = &drvdata[IMX8MM_EP], },
{ .compatible = "fsl,imx8mp-pcie-ep", .data = &drvdata[IMX8MP_EP], },
+ { .compatible = "fsl,imx8q-pcie-ep", .data = &drvdata[IMX8Q_EP], },
{ .compatible = "fsl,imx95-pcie-ep", .data = &drvdata[IMX95_EP], },
{},
};
-static struct platform_driver imx6_pcie_driver = {
+static struct platform_driver imx_pcie_driver = {
.driver = {
.name = "imx6q-pcie",
- .of_match_table = imx6_pcie_of_match,
+ .of_match_table = imx_pcie_of_match,
.suppress_bind_attrs = true,
- .pm = &imx6_pcie_pm_ops,
+ .pm = &imx_pcie_pm_ops,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
- .probe = imx6_pcie_probe,
- .shutdown = imx6_pcie_shutdown,
+ .probe = imx_pcie_probe,
+ .shutdown = imx_pcie_shutdown,
};
-static void imx6_pcie_quirk(struct pci_dev *dev)
+static void imx_pcie_quirk(struct pci_dev *dev)
{
struct pci_bus *bus = dev->bus;
struct dw_pcie_rp *pp = bus->sysdata;
@@ -1642,33 +1850,33 @@ static void imx6_pcie_quirk(struct pci_dev *dev)
return;
/* Make sure we only quirk devices associated with this driver */
- if (bus->dev.parent->parent->driver != &imx6_pcie_driver.driver)
+ if (bus->dev.parent->parent->driver != &imx_pcie_driver.driver)
return;
if (pci_is_root_bus(bus)) {
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+ struct imx_pcie *imx_pcie = to_imx_pcie(pci);
/*
* Limit config length to avoid the kernel reading beyond
* the register set and causing an abort on i.MX 6Quad
*/
- if (imx6_pcie->drvdata->dbi_length) {
- dev->cfg_size = imx6_pcie->drvdata->dbi_length;
+ if (imx_pcie->drvdata->dbi_length) {
+ dev->cfg_size = imx_pcie->drvdata->dbi_length;
dev_info(&dev->dev, "Limiting cfg_size to %d\n",
dev->cfg_size);
}
}
}
DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, 0xabcd,
- PCI_CLASS_BRIDGE_PCI, 8, imx6_pcie_quirk);
+ PCI_CLASS_BRIDGE_PCI, 8, imx_pcie_quirk);
-static int __init imx6_pcie_init(void)
+static int __init imx_pcie_init(void)
{
#ifdef CONFIG_ARM
struct device_node *np;
- np = of_find_matching_node(NULL, imx6_pcie_of_match);
+ np = of_find_matching_node(NULL, imx_pcie_of_match);
if (!np)
return -ENODEV;
of_node_put(np);
@@ -1684,6 +1892,6 @@ static int __init imx6_pcie_init(void)
"external abort on non-linefetch");
#endif
- return platform_driver_register(&imx6_pcie_driver);
+ return platform_driver_register(&imx_pcie_driver);
}
-device_initcall(imx6_pcie_init);
+device_initcall(imx_pcie_init);
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index 844de4418724..1385d9db7b32 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -34,6 +34,11 @@
#define PCIE_DEVICEID_SHIFT 16
/* Application registers */
+#define PID 0x000
+#define RTL GENMASK(15, 11)
+#define RTL_SHIFT 11
+#define AM6_PCI_PG1_RTL_VER 0x15
+
#define CMD_STATUS 0x004
#define LTSSM_EN_VAL BIT(0)
#define OB_XLAT_EN_VAL BIT(1)
@@ -104,6 +109,8 @@
#define to_keystone_pcie(x) dev_get_drvdata((x)->dev)
+#define PCI_DEVICE_ID_TI_AM654X 0xb00c
+
struct ks_pcie_of_data {
enum dw_pcie_device_mode mode;
const struct dw_pcie_host_ops *host_ops;
@@ -182,12 +189,6 @@ static void ks_pcie_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
(int)data->hwirq, msg->address_hi, msg->address_lo);
}
-static int ks_pcie_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static void ks_pcie_msi_mask(struct irq_data *data)
{
struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
@@ -240,13 +241,72 @@ static struct irq_chip ks_pcie_msi_irq_chip = {
.name = "KEYSTONE-PCI-MSI",
.irq_ack = ks_pcie_msi_irq_ack,
.irq_compose_msi_msg = ks_pcie_compose_msi_msg,
- .irq_set_affinity = ks_pcie_msi_set_affinity,
.irq_mask = ks_pcie_msi_mask,
.irq_unmask = ks_pcie_msi_unmask,
};
+/**
+ * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask registers
+ * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
+ * PCIe host controller driver information.
+ *
+ * Since modification of dbi_cs2 involves different clock domain, read the
+ * status back to ensure the transition is complete.
+ */
+static void ks_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie)
+{
+ u32 val;
+
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ val |= DBI_CS2;
+ ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
+
+ do {
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ } while (!(val & DBI_CS2));
+}
+
+/**
+ * ks_pcie_clear_dbi_mode() - Disable DBI mode
+ * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
+ * PCIe host controller driver information.
+ *
+ * Since modification of dbi_cs2 involves different clock domain, read the
+ * status back to ensure the transition is complete.
+ */
+static void ks_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
+{
+ u32 val;
+
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ val &= ~DBI_CS2;
+ ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
+
+ do {
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ } while (val & DBI_CS2);
+}
+
static int ks_pcie_msi_host_init(struct dw_pcie_rp *pp)
{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+
+ /* Configure and set up BAR0 */
+ ks_pcie_set_dbi_mode(ks_pcie);
+
+ /* Enable BAR0 */
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1);
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1);
+
+ ks_pcie_clear_dbi_mode(ks_pcie);
+
+ /*
+ * For BAR0, just setting bus address for inbound writes (MSI) should
+ * be sufficient. Use physical address to avoid any conflicts.
+ */
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start);
+
pp->msi_irq_chip = &ks_pcie_msi_irq_chip;
return dw_pcie_allocate_domains(pp);
}
@@ -340,59 +400,22 @@ static const struct irq_domain_ops ks_pcie_intx_irq_domain_ops = {
.xlate = irq_domain_xlate_onetwocell,
};
-/**
- * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask registers
- * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
- * PCIe host controller driver information.
- *
- * Since modification of dbi_cs2 involves different clock domain, read the
- * status back to ensure the transition is complete.
- */
-static void ks_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie)
-{
- u32 val;
-
- val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
- val |= DBI_CS2;
- ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
-
- do {
- val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
- } while (!(val & DBI_CS2));
-}
-
-/**
- * ks_pcie_clear_dbi_mode() - Disable DBI mode
- * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
- * PCIe host controller driver information.
- *
- * Since modification of dbi_cs2 involves different clock domain, read the
- * status back to ensure the transition is complete.
- */
-static void ks_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
-{
- u32 val;
-
- val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
- val &= ~DBI_CS2;
- ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
-
- do {
- val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
- } while (val & DBI_CS2);
-}
-
-static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
+static int ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
{
u32 val;
u32 num_viewport = ks_pcie->num_viewport;
struct dw_pcie *pci = ks_pcie->pci;
struct dw_pcie_rp *pp = &pci->pp;
- u64 start, end;
+ struct resource_entry *entry;
struct resource *mem;
+ u64 start, end;
int i;
- mem = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM)->res;
+ entry = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM);
+ if (!entry)
+ return -ENODEV;
+
+ mem = entry->res;
start = mem->start;
end = mem->end;
@@ -403,7 +426,7 @@ static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
ks_pcie_clear_dbi_mode(ks_pcie);
if (ks_pcie->is_am6)
- return;
+ return 0;
val = ilog2(OB_WIN_SIZE);
ks_pcie_app_writel(ks_pcie, OB_SIZE, val);
@@ -420,6 +443,8 @@ static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
val |= OB_XLAT_EN_VAL;
ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
+
+ return 0;
}
static void __iomem *ks_pcie_other_map_bus(struct pci_bus *bus,
@@ -430,6 +455,17 @@ static void __iomem *ks_pcie_other_map_bus(struct pci_bus *bus,
struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
u32 reg;
+ /*
+ * Checking whether the link is up here is a last line of defense
+ * against platforms that forward errors on the system bus as
+ * SError upon PCI configuration transactions issued when the link
+ * is down. This check is racy by definition and does not stop
+ * the system from triggering an SError if the link goes down
+ * after this check is performed.
+ */
+ if (!dw_pcie_link_up(pci))
+ return NULL;
+
reg = CFG_BUS(bus->number) | CFG_DEVICE(PCI_SLOT(devfn)) |
CFG_FUNC(PCI_FUNC(devfn));
if (!pci_is_root_bus(bus->parent))
@@ -445,44 +481,10 @@ static struct pci_ops ks_child_pcie_ops = {
.write = pci_generic_config_write,
};
-/**
- * ks_pcie_v3_65_add_bus() - keystone add_bus post initialization
- * @bus: A pointer to the PCI bus structure.
- *
- * This sets BAR0 to enable inbound access for MSI_IRQ register
- */
-static int ks_pcie_v3_65_add_bus(struct pci_bus *bus)
-{
- struct dw_pcie_rp *pp = bus->sysdata;
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
-
- if (!pci_is_root_bus(bus))
- return 0;
-
- /* Configure and set up BAR0 */
- ks_pcie_set_dbi_mode(ks_pcie);
-
- /* Enable BAR0 */
- dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1);
- dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1);
-
- ks_pcie_clear_dbi_mode(ks_pcie);
-
- /*
- * For BAR0, just setting bus address for inbound writes (MSI) should
- * be sufficient. Use physical address to avoid any conflicts.
- */
- dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start);
-
- return 0;
-}
-
static struct pci_ops ks_pcie_ops = {
.map_bus = dw_pcie_own_conf_map_bus,
.read = pci_generic_config_read,
.write = pci_generic_config_write,
- .add_bus = ks_pcie_v3_65_add_bus,
};
/**
@@ -525,7 +527,11 @@ static int ks_pcie_start_link(struct dw_pcie *pci)
static void ks_pcie_quirk(struct pci_dev *dev)
{
struct pci_bus *bus = dev->bus;
+ struct keystone_pcie *ks_pcie;
+ struct device *bridge_dev;
struct pci_dev *bridge;
+ u32 val;
+
static const struct pci_device_id rc_pci_devids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK),
.class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
@@ -537,6 +543,11 @@ static void ks_pcie_quirk(struct pci_dev *dev)
.class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
{ 0, },
};
+ static const struct pci_device_id am6_pci_devids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654X),
+ .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
+ { 0, },
+ };
if (pci_is_root_bus(bus))
bridge = dev;
@@ -558,10 +569,36 @@ static void ks_pcie_quirk(struct pci_dev *dev)
*/
if (pci_match_id(rc_pci_devids, bridge)) {
if (pcie_get_readrq(dev) > 256) {
- dev_info(&dev->dev, "limiting MRRS to 256\n");
+ dev_info(&dev->dev, "limiting MRRS to 256 bytes\n");
pcie_set_readrq(dev, 256);
}
}
+
+ /*
+ * Memory transactions fail with PCI controller in AM654 PG1.0
+ * when MRRS is set to more than 128 bytes. Force the MRRS to
+ * 128 bytes in all downstream devices.
+ */
+ if (pci_match_id(am6_pci_devids, bridge)) {
+ bridge_dev = pci_get_host_bridge_device(dev);
+ if (!bridge_dev || !bridge_dev->parent)
+ return;
+
+ ks_pcie = dev_get_drvdata(bridge_dev->parent);
+ if (!ks_pcie)
+ return;
+
+ val = ks_pcie_app_readl(ks_pcie, PID);
+ val &= RTL;
+ val >>= RTL_SHIFT;
+ if (val != AM6_PCI_PG1_RTL_VER)
+ return;
+
+ if (pcie_get_readrq(dev) > 128) {
+ dev_info(&dev->dev, "limiting MRRS to 128 bytes\n");
+ pcie_set_readrq(dev, 128);
+ }
+ }
}
DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, ks_pcie_quirk);
@@ -724,7 +761,7 @@ static int ks_pcie_config_intx_irq(struct keystone_pcie *ks_pcie)
ks_pcie);
}
- intx_irq_domain = irq_domain_add_linear(intc_np, PCI_NUM_INTX,
+ intx_irq_domain = irq_domain_create_linear(of_fwnode_handle(intc_np), PCI_NUM_INTX,
&ks_pcie_intx_irq_domain_ops, NULL);
if (!intx_irq_domain) {
dev_err(dev, "Failed to add irq domain for INTX irqs\n");
@@ -814,7 +851,10 @@ static int __init ks_pcie_host_init(struct dw_pcie_rp *pp)
return ret;
ks_pcie_stop_link(pci);
- ks_pcie_setup_rc_app_regs(ks_pcie);
+ ret = ks_pcie_setup_rc_app_regs(ks_pcie);
+ if (ret)
+ return ret;
+
writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8),
pci->dbi_base + PCI_IO_BASE);
@@ -926,11 +966,11 @@ static const struct pci_epc_features ks_pcie_am654_epc_features = {
.msix_capable = true,
.bar[BAR_0] = { .type = BAR_RESERVED, },
.bar[BAR_1] = { .type = BAR_RESERVED, },
- .bar[BAR_2] = { .type = BAR_FIXED, .fixed_size = SZ_1M, },
+ .bar[BAR_2] = { .type = BAR_RESIZABLE, },
.bar[BAR_3] = { .type = BAR_FIXED, .fixed_size = SZ_64K, },
.bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = 256, },
- .bar[BAR_5] = { .type = BAR_FIXED, .fixed_size = SZ_1M, },
- .align = SZ_1M,
+ .bar[BAR_5] = { .type = BAR_RESIZABLE, },
+ .align = SZ_64K,
};
static const struct pci_epc_features*
@@ -1064,6 +1104,7 @@ static int ks_pcie_am654_set_mode(struct device *dev,
static const struct ks_pcie_of_data ks_pcie_rc_of_data = {
.host_ops = &ks_pcie_host_ops,
+ .mode = DW_PCIE_RC_TYPE,
.version = DW_PCIE_VER_365A,
};
@@ -1286,6 +1327,15 @@ static int ks_pcie_probe(struct platform_device *pdev)
ret = dw_pcie_ep_init(&pci->ep);
if (ret < 0)
goto err_get_sync;
+
+ ret = dw_pcie_ep_init_registers(&pci->ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ goto err_ep_init;
+ }
+
+ pci_epc_init_notify(pci->ep.epc);
+
break;
default:
dev_err(dev, "INVALID device type %d\n", mode);
@@ -1295,6 +1345,8 @@ static int ks_pcie_probe(struct platform_device *pdev)
return 0;
+err_ep_init:
+ dw_pcie_ep_deinit(&pci->ep);
err_get_sync:
pm_runtime_put(dev);
pm_runtime_disable(dev);
@@ -1323,7 +1375,7 @@ static void ks_pcie_remove(struct platform_device *pdev)
static struct platform_driver ks_pcie_driver = {
.probe = ks_pcie_probe,
- .remove_new = ks_pcie_remove,
+ .remove = ks_pcie_remove,
.driver = {
.name = "keystone-pcie",
.of_match_table = ks_pcie_of_match,
diff --git a/drivers/pci/controller/dwc/pci-layerscape-ep.c b/drivers/pci/controller/dwc/pci-layerscape-ep.c
index 1f6ee1460ec2..a4a800699f89 100644
--- a/drivers/pci/controller/dwc/pci-layerscape-ep.c
+++ b/drivers/pci/controller/dwc/pci-layerscape-ep.c
@@ -104,7 +104,7 @@ static irqreturn_t ls_pcie_ep_event_handler(int irq, void *dev_id)
dev_dbg(pci->dev, "Link up\n");
} else if (val & PEX_PF0_PME_MES_DR_LDD) {
dev_dbg(pci->dev, "Link down\n");
- pci_epc_linkdown(pci->ep.epc);
+ dw_pcie_ep_linkdown(&pci->ep);
} else if (val & PEX_PF0_PME_MES_DR_HRD) {
dev_dbg(pci->dev, "Hot reset\n");
}
@@ -279,6 +279,15 @@ static int __init ls_pcie_ep_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ret = dw_pcie_ep_init_registers(&pci->ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(&pci->ep);
+ return ret;
+ }
+
+ pci_epc_init_notify(pci->ep.epc);
+
return ls_pcie_ep_interrupt_init(pcie, pdev);
}
diff --git a/drivers/pci/controller/dwc/pci-layerscape.c b/drivers/pci/controller/dwc/pci-layerscape.c
index ee6f52568133..a44b5c256d6e 100644
--- a/drivers/pci/controller/dwc/pci-layerscape.c
+++ b/drivers/pci/controller/dwc/pci-layerscape.c
@@ -329,7 +329,6 @@ static int ls_pcie_probe(struct platform_device *pdev)
struct ls_pcie *pcie;
struct resource *dbi_base;
u32 index[2];
- int ret;
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
@@ -355,16 +354,15 @@ static int ls_pcie_probe(struct platform_device *pdev)
pcie->pf_lut_base = pci->dbi_base + pcie->drvdata->pf_lut_off;
if (pcie->drvdata->scfg_support) {
- pcie->scfg = syscon_regmap_lookup_by_phandle(dev->of_node, "fsl,pcie-scfg");
+ pcie->scfg =
+ syscon_regmap_lookup_by_phandle_args(dev->of_node,
+ "fsl,pcie-scfg", 1,
+ index);
if (IS_ERR(pcie->scfg)) {
dev_err(dev, "No syscfg phandle specified\n");
return PTR_ERR(pcie->scfg);
}
- ret = of_property_read_u32_array(dev->of_node, "fsl,pcie-scfg", index, 2);
- if (ret)
- return ret;
-
pcie->index = index[1];
}
diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c
index 6477c83262c2..db9482a113e9 100644
--- a/drivers/pci/controller/dwc/pci-meson.c
+++ b/drivers/pci/controller/dwc/pci-meson.c
@@ -9,7 +9,6 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
-#include <linux/of_gpio.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
diff --git a/drivers/pci/controller/dwc/pcie-al.c b/drivers/pci/controller/dwc/pcie-al.c
index 6dfdda59f328..643115f74092 100644
--- a/drivers/pci/controller/dwc/pcie-al.c
+++ b/drivers/pci/controller/dwc/pcie-al.c
@@ -242,18 +242,24 @@ static struct pci_ops al_child_pci_ops = {
.write = pci_generic_config_write,
};
-static void al_pcie_config_prepare(struct al_pcie *pcie)
+static int al_pcie_config_prepare(struct al_pcie *pcie)
{
struct al_pcie_target_bus_cfg *target_bus_cfg;
struct dw_pcie_rp *pp = &pcie->pci->pp;
unsigned int ecam_bus_mask;
+ struct resource_entry *ft;
u32 cfg_control_offset;
+ struct resource *bus;
u8 subordinate_bus;
u8 secondary_bus;
u32 cfg_control;
u32 reg;
- struct resource *bus = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS)->res;
+ ft = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS);
+ if (!ft)
+ return -ENODEV;
+
+ bus = ft->res;
target_bus_cfg = &pcie->target_bus_cfg;
ecam_bus_mask = (pcie->ecam_size >> PCIE_ECAM_BUS_SHIFT) - 1;
@@ -287,6 +293,8 @@ static void al_pcie_config_prepare(struct al_pcie *pcie)
FIELD_PREP(CFG_CONTROL_SEC_BUS_MASK, secondary_bus);
al_pcie_controller_writel(pcie, cfg_control_offset, reg);
+
+ return 0;
}
static int al_pcie_host_init(struct dw_pcie_rp *pp)
@@ -305,7 +313,9 @@ static int al_pcie_host_init(struct dw_pcie_rp *pp)
if (rc)
return rc;
- al_pcie_config_prepare(pcie);
+ rc = al_pcie_config_prepare(pcie);
+ if (rc)
+ return rc;
return 0;
}
diff --git a/drivers/pci/controller/dwc/pcie-amd-mdb.c b/drivers/pci/controller/dwc/pcie-amd-mdb.c
new file mode 100644
index 000000000000..9f7251a16d32
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-amd-mdb.c
@@ -0,0 +1,476 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for AMD MDB PCIe Bridge
+ *
+ * Copyright (C) 2024-2025, Advanced Micro Devices, Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of_device.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+
+#define AMD_MDB_TLP_IR_STATUS_MISC 0x4C0
+#define AMD_MDB_TLP_IR_MASK_MISC 0x4C4
+#define AMD_MDB_TLP_IR_ENABLE_MISC 0x4C8
+#define AMD_MDB_TLP_IR_DISABLE_MISC 0x4CC
+
+#define AMD_MDB_TLP_PCIE_INTX_MASK GENMASK(23, 16)
+
+#define AMD_MDB_PCIE_INTR_INTX_ASSERT(x) BIT((x) * 2)
+
+/* Interrupt registers definitions. */
+#define AMD_MDB_PCIE_INTR_CMPL_TIMEOUT 15
+#define AMD_MDB_PCIE_INTR_INTX 16
+#define AMD_MDB_PCIE_INTR_PM_PME_RCVD 24
+#define AMD_MDB_PCIE_INTR_PME_TO_ACK_RCVD 25
+#define AMD_MDB_PCIE_INTR_MISC_CORRECTABLE 26
+#define AMD_MDB_PCIE_INTR_NONFATAL 27
+#define AMD_MDB_PCIE_INTR_FATAL 28
+
+#define IMR(x) BIT(AMD_MDB_PCIE_INTR_ ##x)
+#define AMD_MDB_PCIE_IMR_ALL_MASK \
+ ( \
+ IMR(CMPL_TIMEOUT) | \
+ IMR(PM_PME_RCVD) | \
+ IMR(PME_TO_ACK_RCVD) | \
+ IMR(MISC_CORRECTABLE) | \
+ IMR(NONFATAL) | \
+ IMR(FATAL) | \
+ AMD_MDB_TLP_PCIE_INTX_MASK \
+ )
+
+/**
+ * struct amd_mdb_pcie - PCIe port information
+ * @pci: DesignWare PCIe controller structure
+ * @slcr: MDB System Level Control and Status Register (SLCR) base
+ * @intx_domain: INTx IRQ domain pointer
+ * @mdb_domain: MDB IRQ domain pointer
+ * @intx_irq: INTx IRQ interrupt number
+ */
+struct amd_mdb_pcie {
+ struct dw_pcie pci;
+ void __iomem *slcr;
+ struct irq_domain *intx_domain;
+ struct irq_domain *mdb_domain;
+ int intx_irq;
+};
+
+static const struct dw_pcie_host_ops amd_mdb_pcie_host_ops = {
+};
+
+static void amd_mdb_intx_irq_mask(struct irq_data *data)
+{
+ struct amd_mdb_pcie *pcie = irq_data_get_irq_chip_data(data);
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *port = &pci->pp;
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&port->lock, flags);
+ val = FIELD_PREP(AMD_MDB_TLP_PCIE_INTX_MASK,
+ AMD_MDB_PCIE_INTR_INTX_ASSERT(data->hwirq));
+
+ /*
+ * Writing '1' to a bit in AMD_MDB_TLP_IR_DISABLE_MISC disables that
+ * interrupt, writing '0' has no effect.
+ */
+ writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_DISABLE_MISC);
+ raw_spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void amd_mdb_intx_irq_unmask(struct irq_data *data)
+{
+ struct amd_mdb_pcie *pcie = irq_data_get_irq_chip_data(data);
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *port = &pci->pp;
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&port->lock, flags);
+ val = FIELD_PREP(AMD_MDB_TLP_PCIE_INTX_MASK,
+ AMD_MDB_PCIE_INTR_INTX_ASSERT(data->hwirq));
+
+ /*
+ * Writing '1' to a bit in AMD_MDB_TLP_IR_ENABLE_MISC enables that
+ * interrupt, writing '0' has no effect.
+ */
+ writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_ENABLE_MISC);
+ raw_spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static struct irq_chip amd_mdb_intx_irq_chip = {
+ .name = "AMD MDB INTx",
+ .irq_mask = amd_mdb_intx_irq_mask,
+ .irq_unmask = amd_mdb_intx_irq_unmask,
+};
+
+/**
+ * amd_mdb_pcie_intx_map - Set the handler for the INTx and mark IRQ as valid
+ * @domain: IRQ domain
+ * @irq: Virtual IRQ number
+ * @hwirq: Hardware interrupt number
+ *
+ * Return: Always returns '0'.
+ */
+static int amd_mdb_pcie_intx_map(struct irq_domain *domain,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &amd_mdb_intx_irq_chip,
+ handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ irq_set_status_flags(irq, IRQ_LEVEL);
+
+ return 0;
+}
+
+/* INTx IRQ domain operations. */
+static const struct irq_domain_ops amd_intx_domain_ops = {
+ .map = amd_mdb_pcie_intx_map,
+};
+
+static irqreturn_t dw_pcie_rp_intx(int irq, void *args)
+{
+ struct amd_mdb_pcie *pcie = args;
+ unsigned long val;
+ int i, int_status;
+
+ val = readl_relaxed(pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC);
+ int_status = FIELD_GET(AMD_MDB_TLP_PCIE_INTX_MASK, val);
+
+ for (i = 0; i < PCI_NUM_INTX; i++) {
+ if (int_status & AMD_MDB_PCIE_INTR_INTX_ASSERT(i))
+ generic_handle_domain_irq(pcie->intx_domain, i);
+ }
+
+ return IRQ_HANDLED;
+}
+
+#define _IC(x, s)[AMD_MDB_PCIE_INTR_ ## x] = { __stringify(x), s }
+
+static const struct {
+ const char *sym;
+ const char *str;
+} intr_cause[32] = {
+ _IC(CMPL_TIMEOUT, "Completion timeout"),
+ _IC(PM_PME_RCVD, "PM_PME message received"),
+ _IC(PME_TO_ACK_RCVD, "PME_TO_ACK message received"),
+ _IC(MISC_CORRECTABLE, "Correctable error message"),
+ _IC(NONFATAL, "Non fatal error message"),
+ _IC(FATAL, "Fatal error message"),
+};
+
+static void amd_mdb_event_irq_mask(struct irq_data *d)
+{
+ struct amd_mdb_pcie *pcie = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *port = &pci->pp;
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&port->lock, flags);
+ val = BIT(d->hwirq);
+ writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_DISABLE_MISC);
+ raw_spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void amd_mdb_event_irq_unmask(struct irq_data *d)
+{
+ struct amd_mdb_pcie *pcie = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *port = &pci->pp;
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&port->lock, flags);
+ val = BIT(d->hwirq);
+ writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_ENABLE_MISC);
+ raw_spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static struct irq_chip amd_mdb_event_irq_chip = {
+ .name = "AMD MDB RC-Event",
+ .irq_mask = amd_mdb_event_irq_mask,
+ .irq_unmask = amd_mdb_event_irq_unmask,
+};
+
+static int amd_mdb_pcie_event_map(struct irq_domain *domain,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &amd_mdb_event_irq_chip,
+ handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ irq_set_status_flags(irq, IRQ_LEVEL);
+
+ return 0;
+}
+
+static const struct irq_domain_ops event_domain_ops = {
+ .map = amd_mdb_pcie_event_map,
+};
+
+static irqreturn_t amd_mdb_pcie_event(int irq, void *args)
+{
+ struct amd_mdb_pcie *pcie = args;
+ unsigned long val;
+ int i;
+
+ val = readl_relaxed(pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC);
+ val &= ~readl_relaxed(pcie->slcr + AMD_MDB_TLP_IR_MASK_MISC);
+ for_each_set_bit(i, &val, 32)
+ generic_handle_domain_irq(pcie->mdb_domain, i);
+ writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC);
+
+ return IRQ_HANDLED;
+}
+
+static void amd_mdb_pcie_free_irq_domains(struct amd_mdb_pcie *pcie)
+{
+ if (pcie->intx_domain) {
+ irq_domain_remove(pcie->intx_domain);
+ pcie->intx_domain = NULL;
+ }
+
+ if (pcie->mdb_domain) {
+ irq_domain_remove(pcie->mdb_domain);
+ pcie->mdb_domain = NULL;
+ }
+}
+
+static int amd_mdb_pcie_init_port(struct amd_mdb_pcie *pcie)
+{
+ unsigned long val;
+
+ /* Disable all TLP interrupts. */
+ writel_relaxed(AMD_MDB_PCIE_IMR_ALL_MASK,
+ pcie->slcr + AMD_MDB_TLP_IR_DISABLE_MISC);
+
+ /* Clear pending TLP interrupts. */
+ val = readl_relaxed(pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC);
+ val &= AMD_MDB_PCIE_IMR_ALL_MASK;
+ writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC);
+
+ /* Enable all TLP interrupts. */
+ writel_relaxed(AMD_MDB_PCIE_IMR_ALL_MASK,
+ pcie->slcr + AMD_MDB_TLP_IR_ENABLE_MISC);
+
+ return 0;
+}
+
+/**
+ * amd_mdb_pcie_init_irq_domains - Initialize IRQ domain
+ * @pcie: PCIe port information
+ * @pdev: Platform device
+ *
+ * Return: Returns '0' on success and error value on failure.
+ */
+static int amd_mdb_pcie_init_irq_domains(struct amd_mdb_pcie *pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct device_node *pcie_intc_node;
+ int err;
+
+ pcie_intc_node = of_get_next_child(node, NULL);
+ if (!pcie_intc_node) {
+ dev_err(dev, "No PCIe Intc node found\n");
+ return -ENODEV;
+ }
+
+ pcie->mdb_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), 32,
+ &event_domain_ops, pcie);
+ if (!pcie->mdb_domain) {
+ err = -ENOMEM;
+ dev_err(dev, "Failed to add MDB domain\n");
+ goto out;
+ }
+
+ irq_domain_update_bus_token(pcie->mdb_domain, DOMAIN_BUS_NEXUS);
+
+ pcie->intx_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node),
+ PCI_NUM_INTX, &amd_intx_domain_ops, pcie);
+ if (!pcie->intx_domain) {
+ err = -ENOMEM;
+ dev_err(dev, "Failed to add INTx domain\n");
+ goto mdb_out;
+ }
+
+ of_node_put(pcie_intc_node);
+ irq_domain_update_bus_token(pcie->intx_domain, DOMAIN_BUS_WIRED);
+
+ raw_spin_lock_init(&pp->lock);
+
+ return 0;
+mdb_out:
+ amd_mdb_pcie_free_irq_domains(pcie);
+out:
+ of_node_put(pcie_intc_node);
+ return err;
+}
+
+static irqreturn_t amd_mdb_pcie_intr_handler(int irq, void *args)
+{
+ struct amd_mdb_pcie *pcie = args;
+ struct device *dev;
+ struct irq_data *d;
+
+ dev = pcie->pci.dev;
+
+ /*
+ * In the future, error reporting will be hooked to the AER subsystem.
+ * Currently, the driver prints a warning message to the user.
+ */
+ d = irq_domain_get_irq_data(pcie->mdb_domain, irq);
+ if (intr_cause[d->hwirq].str)
+ dev_warn(dev, "%s\n", intr_cause[d->hwirq].str);
+ else
+ dev_warn_once(dev, "Unknown IRQ %ld\n", d->hwirq);
+
+ return IRQ_HANDLED;
+}
+
+static int amd_mdb_setup_irq(struct amd_mdb_pcie *pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct device *dev = &pdev->dev;
+ int i, irq, err;
+
+ amd_mdb_pcie_init_port(pcie);
+
+ pp->irq = platform_get_irq(pdev, 0);
+ if (pp->irq < 0)
+ return pp->irq;
+
+ for (i = 0; i < ARRAY_SIZE(intr_cause); i++) {
+ if (!intr_cause[i].str)
+ continue;
+
+ irq = irq_create_mapping(pcie->mdb_domain, i);
+ if (!irq) {
+ dev_err(dev, "Failed to map MDB domain interrupt\n");
+ return -ENOMEM;
+ }
+
+ err = devm_request_irq(dev, irq, amd_mdb_pcie_intr_handler,
+ IRQF_NO_THREAD, intr_cause[i].sym, pcie);
+ if (err) {
+ dev_err(dev, "Failed to request IRQ %d, err=%d\n",
+ irq, err);
+ return err;
+ }
+ }
+
+ pcie->intx_irq = irq_create_mapping(pcie->mdb_domain,
+ AMD_MDB_PCIE_INTR_INTX);
+ if (!pcie->intx_irq) {
+ dev_err(dev, "Failed to map INTx interrupt\n");
+ return -ENXIO;
+ }
+
+ err = devm_request_irq(dev, pcie->intx_irq, dw_pcie_rp_intx,
+ IRQF_NO_THREAD, NULL, pcie);
+ if (err) {
+ dev_err(dev, "Failed to request INTx IRQ %d, err=%d\n",
+ irq, err);
+ return err;
+ }
+
+ /* Plug the main event handler. */
+ err = devm_request_irq(dev, pp->irq, amd_mdb_pcie_event, IRQF_NO_THREAD,
+ "amd_mdb pcie_irq", pcie);
+ if (err) {
+ dev_err(dev, "Failed to request event IRQ %d, err=%d\n",
+ pp->irq, err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int amd_mdb_add_pcie_port(struct amd_mdb_pcie *pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct device *dev = &pdev->dev;
+ int err;
+
+ pcie->slcr = devm_platform_ioremap_resource_byname(pdev, "slcr");
+ if (IS_ERR(pcie->slcr))
+ return PTR_ERR(pcie->slcr);
+
+ err = amd_mdb_pcie_init_irq_domains(pcie, pdev);
+ if (err)
+ return err;
+
+ err = amd_mdb_setup_irq(pcie, pdev);
+ if (err) {
+ dev_err(dev, "Failed to set up interrupts, err=%d\n", err);
+ goto out;
+ }
+
+ pp->ops = &amd_mdb_pcie_host_ops;
+
+ err = dw_pcie_host_init(pp);
+ if (err) {
+ dev_err(dev, "Failed to initialize host, err=%d\n", err);
+ goto out;
+ }
+
+ return 0;
+
+out:
+ amd_mdb_pcie_free_irq_domains(pcie);
+ return err;
+}
+
+static int amd_mdb_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct amd_mdb_pcie *pcie;
+ struct dw_pcie *pci;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pci = &pcie->pci;
+ pci->dev = dev;
+
+ platform_set_drvdata(pdev, pcie);
+
+ return amd_mdb_add_pcie_port(pcie, pdev);
+}
+
+static const struct of_device_id amd_mdb_pcie_of_match[] = {
+ {
+ .compatible = "amd,versal2-mdb-host",
+ },
+ {},
+};
+
+static struct platform_driver amd_mdb_pcie_driver = {
+ .driver = {
+ .name = "amd-mdb-pcie",
+ .of_match_table = amd_mdb_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = amd_mdb_pcie_probe,
+};
+
+builtin_platform_driver(amd_mdb_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c
index 9ed0a9ba7619..234c8cbcae3a 100644
--- a/drivers/pci/controller/dwc/pcie-artpec6.c
+++ b/drivers/pci/controller/dwc/pcie-artpec6.c
@@ -94,7 +94,7 @@ static void artpec6_pcie_writel(struct artpec6_pcie *artpec6_pcie, u32 offset, u
regmap_write(artpec6_pcie->regmap, offset, val);
}
-static u64 artpec6_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr)
+static u64 artpec6_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 cpu_addr)
{
struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
struct dw_pcie_rp *pp = &pci->pp;
@@ -102,13 +102,13 @@ static u64 artpec6_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr)
switch (artpec6_pcie->mode) {
case DW_PCIE_RC_TYPE:
- return pci_addr - pp->cfg0_base;
+ return cpu_addr - pp->cfg0_base;
case DW_PCIE_EP_TYPE:
- return pci_addr - ep->phys_base;
+ return cpu_addr - ep->phys_base;
default:
dev_err(pci->dev, "UNKNOWN device type\n");
}
- return pci_addr;
+ return cpu_addr;
}
static int artpec6_pcie_establish_link(struct dw_pcie *pci)
@@ -369,9 +369,22 @@ static int artpec6_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
return 0;
}
+static const struct pci_epc_features artpec6_pcie_epc_features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = false,
+};
+
+static const struct pci_epc_features *
+artpec6_pcie_get_features(struct dw_pcie_ep *ep)
+{
+ return &artpec6_pcie_epc_features;
+}
+
static const struct dw_pcie_ep_ops pcie_ep_ops = {
.init = artpec6_pcie_ep_init,
.raise_irq = artpec6_pcie_raise_irq,
+ .get_features = artpec6_pcie_get_features,
};
static int artpec6_pcie_probe(struct platform_device *pdev)
@@ -441,7 +454,20 @@ static int artpec6_pcie_probe(struct platform_device *pdev)
pci->ep.ops = &pcie_ep_ops;
- return dw_pcie_ep_init(&pci->ep);
+ ret = dw_pcie_ep_init(&pci->ep);
+ if (ret)
+ return ret;
+
+ ret = dw_pcie_ep_init_registers(&pci->ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(&pci->ep);
+ return ret;
+ }
+
+ pci_epc_init_notify(pci->ep.epc);
+
+ break;
default:
dev_err(dev, "INVALID device type %d\n", artpec6_pcie->mode);
}
diff --git a/drivers/pci/controller/dwc/pcie-bt1.c b/drivers/pci/controller/dwc/pcie-bt1.c
index 76d0ddea8007..1340edc18d12 100644
--- a/drivers/pci/controller/dwc/pcie-bt1.c
+++ b/drivers/pci/controller/dwc/pcie-bt1.c
@@ -632,7 +632,7 @@ MODULE_DEVICE_TABLE(of, bt1_pcie_of_match);
static struct platform_driver bt1_pcie_driver = {
.probe = bt1_pcie_probe,
- .remove_new = bt1_pcie_remove,
+ .remove = bt1_pcie_remove,
.driver = {
.name = "bt1-pcie",
.of_match_table = bt1_pcie_of_match,
diff --git a/drivers/pci/controller/dwc/pcie-designware-debugfs.c b/drivers/pci/controller/dwc/pcie-designware-debugfs.c
new file mode 100644
index 000000000000..9e6f4d00f262
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-designware-debugfs.c
@@ -0,0 +1,677 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Synopsys DesignWare PCIe controller debugfs driver
+ *
+ * Copyright (C) 2025 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Shradha Todi <shradha.t@samsung.com>
+ */
+
+#include <linux/debugfs.h>
+
+#include "pcie-designware.h"
+
+#define SD_STATUS_L1LANE_REG 0xb0
+#define PIPE_RXVALID BIT(18)
+#define PIPE_DETECT_LANE BIT(17)
+#define LANE_SELECT GENMASK(3, 0)
+
+#define ERR_INJ0_OFF 0x34
+#define EINJ_VAL_DIFF GENMASK(28, 16)
+#define EINJ_VC_NUM GENMASK(14, 12)
+#define EINJ_TYPE_SHIFT 8
+#define EINJ0_TYPE GENMASK(11, 8)
+#define EINJ1_TYPE BIT(8)
+#define EINJ2_TYPE GENMASK(9, 8)
+#define EINJ3_TYPE GENMASK(10, 8)
+#define EINJ4_TYPE GENMASK(10, 8)
+#define EINJ5_TYPE BIT(8)
+#define EINJ_COUNT GENMASK(7, 0)
+
+#define ERR_INJ_ENABLE_REG 0x30
+
+#define RAS_DES_EVENT_COUNTER_DATA_REG 0xc
+
+#define RAS_DES_EVENT_COUNTER_CTRL_REG 0x8
+#define EVENT_COUNTER_GROUP_SELECT GENMASK(27, 24)
+#define EVENT_COUNTER_EVENT_SELECT GENMASK(23, 16)
+#define EVENT_COUNTER_LANE_SELECT GENMASK(11, 8)
+#define EVENT_COUNTER_STATUS BIT(7)
+#define EVENT_COUNTER_ENABLE GENMASK(4, 2)
+#define PER_EVENT_ON 0x3
+#define PER_EVENT_OFF 0x1
+
+#define DWC_DEBUGFS_BUF_MAX 128
+
+/**
+ * struct dwc_pcie_rasdes_info - Stores controller common information
+ * @ras_cap_offset: RAS DES vendor specific extended capability offset
+ * @reg_event_lock: Mutex used for RAS DES shadow event registers
+ *
+ * Any parameter constant to all files of the debugfs hierarchy for a single
+ * controller will be stored in this struct. It is allocated and assigned to
+ * controller specific struct dw_pcie during initialization.
+ */
+struct dwc_pcie_rasdes_info {
+ u32 ras_cap_offset;
+ struct mutex reg_event_lock;
+};
+
+/**
+ * struct dwc_pcie_rasdes_priv - Stores file specific private data information
+ * @pci: Reference to the dw_pcie structure
+ * @idx: Index of specific file related information in array of structs
+ *
+ * All debugfs files will have this struct as its private data.
+ */
+struct dwc_pcie_rasdes_priv {
+ struct dw_pcie *pci;
+ int idx;
+};
+
+/**
+ * struct dwc_pcie_err_inj - Store details about each error injection
+ * supported by DWC RAS DES
+ * @name: Name of the error that can be injected
+ * @err_inj_group: Group number to which the error belongs. The value
+ * can range from 0 to 5
+ * @err_inj_type: Each group can have multiple types of error
+ */
+struct dwc_pcie_err_inj {
+ const char *name;
+ u32 err_inj_group;
+ u32 err_inj_type;
+};
+
+static const struct dwc_pcie_err_inj err_inj_list[] = {
+ {"tx_lcrc", 0x0, 0x0},
+ {"b16_crc_dllp", 0x0, 0x1},
+ {"b16_crc_upd_fc", 0x0, 0x2},
+ {"tx_ecrc", 0x0, 0x3},
+ {"fcrc_tlp", 0x0, 0x4},
+ {"parity_tsos", 0x0, 0x5},
+ {"parity_skpos", 0x0, 0x6},
+ {"rx_lcrc", 0x0, 0x8},
+ {"rx_ecrc", 0x0, 0xb},
+ {"tlp_err_seq", 0x1, 0x0},
+ {"ack_nak_dllp_seq", 0x1, 0x1},
+ {"ack_nak_dllp", 0x2, 0x0},
+ {"upd_fc_dllp", 0x2, 0x1},
+ {"nak_dllp", 0x2, 0x2},
+ {"inv_sync_hdr_sym", 0x3, 0x0},
+ {"com_pad_ts1", 0x3, 0x1},
+ {"com_pad_ts2", 0x3, 0x2},
+ {"com_fts", 0x3, 0x3},
+ {"com_idl", 0x3, 0x4},
+ {"end_edb", 0x3, 0x5},
+ {"stp_sdp", 0x3, 0x6},
+ {"com_skp", 0x3, 0x7},
+ {"posted_tlp_hdr", 0x4, 0x0},
+ {"non_post_tlp_hdr", 0x4, 0x1},
+ {"cmpl_tlp_hdr", 0x4, 0x2},
+ {"posted_tlp_data", 0x4, 0x4},
+ {"non_post_tlp_data", 0x4, 0x5},
+ {"cmpl_tlp_data", 0x4, 0x6},
+ {"duplicate_tlp", 0x5, 0x0},
+ {"nullified_tlp", 0x5, 0x1},
+};
+
+static const u32 err_inj_type_mask[] = {
+ EINJ0_TYPE,
+ EINJ1_TYPE,
+ EINJ2_TYPE,
+ EINJ3_TYPE,
+ EINJ4_TYPE,
+ EINJ5_TYPE,
+};
+
+/**
+ * struct dwc_pcie_event_counter - Store details about each event counter
+ * supported in DWC RAS DES
+ * @name: Name of the error counter
+ * @group_no: Group number that the event belongs to. The value can range
+ * from 0 to 4
+ * @event_no: Event number of the particular event. The value ranges are:
+ * Group 0: 0 - 10
+ * Group 1: 5 - 13
+ * Group 2: 0 - 7
+ * Group 3: 0 - 5
+ * Group 4: 0 - 1
+ */
+struct dwc_pcie_event_counter {
+ const char *name;
+ u32 group_no;
+ u32 event_no;
+};
+
+static const struct dwc_pcie_event_counter event_list[] = {
+ {"ebuf_overflow", 0x0, 0x0},
+ {"ebuf_underrun", 0x0, 0x1},
+ {"decode_err", 0x0, 0x2},
+ {"running_disparity_err", 0x0, 0x3},
+ {"skp_os_parity_err", 0x0, 0x4},
+ {"sync_header_err", 0x0, 0x5},
+ {"rx_valid_deassertion", 0x0, 0x6},
+ {"ctl_skp_os_parity_err", 0x0, 0x7},
+ {"retimer_parity_err_1st", 0x0, 0x8},
+ {"retimer_parity_err_2nd", 0x0, 0x9},
+ {"margin_crc_parity_err", 0x0, 0xA},
+ {"detect_ei_infer", 0x1, 0x5},
+ {"receiver_err", 0x1, 0x6},
+ {"rx_recovery_req", 0x1, 0x7},
+ {"n_fts_timeout", 0x1, 0x8},
+ {"framing_err", 0x1, 0x9},
+ {"deskew_err", 0x1, 0xa},
+ {"framing_err_in_l0", 0x1, 0xc},
+ {"deskew_uncompleted_err", 0x1, 0xd},
+ {"bad_tlp", 0x2, 0x0},
+ {"lcrc_err", 0x2, 0x1},
+ {"bad_dllp", 0x2, 0x2},
+ {"replay_num_rollover", 0x2, 0x3},
+ {"replay_timeout", 0x2, 0x4},
+ {"rx_nak_dllp", 0x2, 0x5},
+ {"tx_nak_dllp", 0x2, 0x6},
+ {"retry_tlp", 0x2, 0x7},
+ {"fc_timeout", 0x3, 0x0},
+ {"poisoned_tlp", 0x3, 0x1},
+ {"ecrc_error", 0x3, 0x2},
+ {"unsupported_request", 0x3, 0x3},
+ {"completer_abort", 0x3, 0x4},
+ {"completion_timeout", 0x3, 0x5},
+ {"ebuf_skp_add", 0x4, 0x0},
+ {"ebuf_skp_del", 0x4, 0x1},
+};
+
+static ssize_t lane_detect_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dw_pcie *pci = file->private_data;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ char debugfs_buf[DWC_DEBUGFS_BUF_MAX];
+ ssize_t pos;
+ u32 val;
+
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + SD_STATUS_L1LANE_REG);
+ val = FIELD_GET(PIPE_DETECT_LANE, val);
+ if (val)
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Lane Detected\n");
+ else
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Lane Undetected\n");
+
+ return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos);
+}
+
+static ssize_t lane_detect_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dw_pcie *pci = file->private_data;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ u32 lane, val;
+
+ val = kstrtou32_from_user(buf, count, 0, &lane);
+ if (val)
+ return val;
+
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + SD_STATUS_L1LANE_REG);
+ val &= ~(LANE_SELECT);
+ val |= FIELD_PREP(LANE_SELECT, lane);
+ dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + SD_STATUS_L1LANE_REG, val);
+
+ return count;
+}
+
+static ssize_t rx_valid_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dw_pcie *pci = file->private_data;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ char debugfs_buf[DWC_DEBUGFS_BUF_MAX];
+ ssize_t pos;
+ u32 val;
+
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + SD_STATUS_L1LANE_REG);
+ val = FIELD_GET(PIPE_RXVALID, val);
+ if (val)
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "RX Valid\n");
+ else
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "RX Invalid\n");
+
+ return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos);
+}
+
+static ssize_t rx_valid_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return lane_detect_write(file, buf, count, ppos);
+}
+
+static ssize_t err_inj_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dwc_pcie_rasdes_priv *pdata = file->private_data;
+ struct dw_pcie *pci = pdata->pci;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ u32 val, counter, vc_num, err_group, type_mask;
+ int val_diff = 0;
+ char *kern_buf;
+
+ err_group = err_inj_list[pdata->idx].err_inj_group;
+ type_mask = err_inj_type_mask[err_group];
+
+ kern_buf = memdup_user_nul(buf, count);
+ if (IS_ERR(kern_buf))
+ return PTR_ERR(kern_buf);
+
+ if (err_group == 4) {
+ val = sscanf(kern_buf, "%u %d %u", &counter, &val_diff, &vc_num);
+ if ((val != 3) || (val_diff < -4095 || val_diff > 4095)) {
+ kfree(kern_buf);
+ return -EINVAL;
+ }
+ } else if (err_group == 1) {
+ val = sscanf(kern_buf, "%u %d", &counter, &val_diff);
+ if ((val != 2) || (val_diff < -4095 || val_diff > 4095)) {
+ kfree(kern_buf);
+ return -EINVAL;
+ }
+ } else {
+ val = kstrtou32(kern_buf, 0, &counter);
+ if (val) {
+ kfree(kern_buf);
+ return val;
+ }
+ }
+
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + ERR_INJ0_OFF + (0x4 * err_group));
+ val &= ~(type_mask | EINJ_COUNT);
+ val |= ((err_inj_list[pdata->idx].err_inj_type << EINJ_TYPE_SHIFT) & type_mask);
+ val |= FIELD_PREP(EINJ_COUNT, counter);
+
+ if (err_group == 1 || err_group == 4) {
+ val &= ~(EINJ_VAL_DIFF);
+ val |= FIELD_PREP(EINJ_VAL_DIFF, val_diff);
+ }
+ if (err_group == 4) {
+ val &= ~(EINJ_VC_NUM);
+ val |= FIELD_PREP(EINJ_VC_NUM, vc_num);
+ }
+
+ dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + ERR_INJ0_OFF + (0x4 * err_group), val);
+ dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + ERR_INJ_ENABLE_REG, (0x1 << err_group));
+
+ kfree(kern_buf);
+ return count;
+}
+
+static void set_event_number(struct dwc_pcie_rasdes_priv *pdata,
+ struct dw_pcie *pci, struct dwc_pcie_rasdes_info *rinfo)
+{
+ u32 val;
+
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG);
+ val &= ~EVENT_COUNTER_ENABLE;
+ val &= ~(EVENT_COUNTER_GROUP_SELECT | EVENT_COUNTER_EVENT_SELECT);
+ val |= FIELD_PREP(EVENT_COUNTER_GROUP_SELECT, event_list[pdata->idx].group_no);
+ val |= FIELD_PREP(EVENT_COUNTER_EVENT_SELECT, event_list[pdata->idx].event_no);
+ dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG, val);
+}
+
+static ssize_t counter_enable_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dwc_pcie_rasdes_priv *pdata = file->private_data;
+ struct dw_pcie *pci = pdata->pci;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ char debugfs_buf[DWC_DEBUGFS_BUF_MAX];
+ ssize_t pos;
+ u32 val;
+
+ mutex_lock(&rinfo->reg_event_lock);
+ set_event_number(pdata, pci, rinfo);
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG);
+ mutex_unlock(&rinfo->reg_event_lock);
+ val = FIELD_GET(EVENT_COUNTER_STATUS, val);
+ if (val)
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Counter Enabled\n");
+ else
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Counter Disabled\n");
+
+ return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos);
+}
+
+static ssize_t counter_enable_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dwc_pcie_rasdes_priv *pdata = file->private_data;
+ struct dw_pcie *pci = pdata->pci;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ u32 val, enable;
+
+ val = kstrtou32_from_user(buf, count, 0, &enable);
+ if (val)
+ return val;
+
+ mutex_lock(&rinfo->reg_event_lock);
+ set_event_number(pdata, pci, rinfo);
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG);
+ if (enable)
+ val |= FIELD_PREP(EVENT_COUNTER_ENABLE, PER_EVENT_ON);
+ else
+ val |= FIELD_PREP(EVENT_COUNTER_ENABLE, PER_EVENT_OFF);
+
+ dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG, val);
+
+ /*
+ * While enabling the counter, always read the status back to check if
+ * it is enabled or not. Return error if it is not enabled to let the
+ * users know that the counter is not supported on the platform.
+ */
+ if (enable) {
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset +
+ RAS_DES_EVENT_COUNTER_CTRL_REG);
+ if (!FIELD_GET(EVENT_COUNTER_STATUS, val)) {
+ mutex_unlock(&rinfo->reg_event_lock);
+ return -EOPNOTSUPP;
+ }
+ }
+
+ mutex_unlock(&rinfo->reg_event_lock);
+
+ return count;
+}
+
+static ssize_t counter_lane_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dwc_pcie_rasdes_priv *pdata = file->private_data;
+ struct dw_pcie *pci = pdata->pci;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ char debugfs_buf[DWC_DEBUGFS_BUF_MAX];
+ ssize_t pos;
+ u32 val;
+
+ mutex_lock(&rinfo->reg_event_lock);
+ set_event_number(pdata, pci, rinfo);
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG);
+ mutex_unlock(&rinfo->reg_event_lock);
+ val = FIELD_GET(EVENT_COUNTER_LANE_SELECT, val);
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Lane: %d\n", val);
+
+ return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos);
+}
+
+static ssize_t counter_lane_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dwc_pcie_rasdes_priv *pdata = file->private_data;
+ struct dw_pcie *pci = pdata->pci;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ u32 val, lane;
+
+ val = kstrtou32_from_user(buf, count, 0, &lane);
+ if (val)
+ return val;
+
+ mutex_lock(&rinfo->reg_event_lock);
+ set_event_number(pdata, pci, rinfo);
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG);
+ val &= ~(EVENT_COUNTER_LANE_SELECT);
+ val |= FIELD_PREP(EVENT_COUNTER_LANE_SELECT, lane);
+ dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG, val);
+ mutex_unlock(&rinfo->reg_event_lock);
+
+ return count;
+}
+
+static ssize_t counter_value_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dwc_pcie_rasdes_priv *pdata = file->private_data;
+ struct dw_pcie *pci = pdata->pci;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ char debugfs_buf[DWC_DEBUGFS_BUF_MAX];
+ ssize_t pos;
+ u32 val;
+
+ mutex_lock(&rinfo->reg_event_lock);
+ set_event_number(pdata, pci, rinfo);
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_DATA_REG);
+ mutex_unlock(&rinfo->reg_event_lock);
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Counter value: %d\n", val);
+
+ return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos);
+}
+
+static const char *ltssm_status_string(enum dw_pcie_ltssm ltssm)
+{
+ const char *str;
+
+ switch (ltssm) {
+#define DW_PCIE_LTSSM_NAME(n) case n: str = #n; break
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DETECT_QUIET);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DETECT_ACT);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_POLL_ACTIVE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_POLL_COMPLIANCE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_POLL_CONFIG);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_PRE_DETECT_QUIET);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DETECT_WAIT);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_LINKWD_START);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_LINKWD_ACEPT);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_LANENUM_WAI);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_LANENUM_ACEPT);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_COMPLETE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_IDLE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_LOCK);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_SPEED);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_RCVRCFG);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_IDLE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L0);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L0S);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L123_SEND_EIDLE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L1_IDLE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L2_IDLE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L2_WAKE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DISABLED_ENTRY);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DISABLED_IDLE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DISABLED);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_LPBK_ENTRY);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_LPBK_ACTIVE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_LPBK_EXIT);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_LPBK_EXIT_TIMEOUT);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_HOT_RESET_ENTRY);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_HOT_RESET);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_EQ0);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_EQ1);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_EQ2);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_EQ3);
+ default:
+ str = "DW_PCIE_LTSSM_UNKNOWN";
+ break;
+ }
+
+ return str + strlen("DW_PCIE_LTSSM_");
+}
+
+static int ltssm_status_show(struct seq_file *s, void *v)
+{
+ struct dw_pcie *pci = s->private;
+ enum dw_pcie_ltssm val;
+
+ val = dw_pcie_get_ltssm(pci);
+ seq_printf(s, "%s (0x%02x)\n", ltssm_status_string(val), val);
+
+ return 0;
+}
+
+static int ltssm_status_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ltssm_status_show, inode->i_private);
+}
+
+#define dwc_debugfs_create(name) \
+debugfs_create_file(#name, 0644, rasdes_debug, pci, \
+ &dbg_ ## name ## _fops)
+
+#define DWC_DEBUGFS_FOPS(name) \
+static const struct file_operations dbg_ ## name ## _fops = { \
+ .open = simple_open, \
+ .read = name ## _read, \
+ .write = name ## _write \
+}
+
+DWC_DEBUGFS_FOPS(lane_detect);
+DWC_DEBUGFS_FOPS(rx_valid);
+
+static const struct file_operations dwc_pcie_err_inj_ops = {
+ .open = simple_open,
+ .write = err_inj_write,
+};
+
+static const struct file_operations dwc_pcie_counter_enable_ops = {
+ .open = simple_open,
+ .read = counter_enable_read,
+ .write = counter_enable_write,
+};
+
+static const struct file_operations dwc_pcie_counter_lane_ops = {
+ .open = simple_open,
+ .read = counter_lane_read,
+ .write = counter_lane_write,
+};
+
+static const struct file_operations dwc_pcie_counter_value_ops = {
+ .open = simple_open,
+ .read = counter_value_read,
+};
+
+static const struct file_operations dwc_pcie_ltssm_status_ops = {
+ .open = ltssm_status_open,
+ .read = seq_read,
+};
+
+static void dwc_pcie_rasdes_debugfs_deinit(struct dw_pcie *pci)
+{
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+
+ mutex_destroy(&rinfo->reg_event_lock);
+}
+
+static int dwc_pcie_rasdes_debugfs_init(struct dw_pcie *pci, struct dentry *dir)
+{
+ struct dentry *rasdes_debug, *rasdes_err_inj;
+ struct dentry *rasdes_event_counter, *rasdes_events;
+ struct dwc_pcie_rasdes_info *rasdes_info;
+ struct dwc_pcie_rasdes_priv *priv_tmp;
+ struct device *dev = pci->dev;
+ int ras_cap, i, ret;
+
+ /*
+ * If a given SoC has no RAS DES capability, the following call is
+ * bound to return an error, breaking some existing platforms. So,
+ * return 0 here, as this is not necessarily an error.
+ */
+ ras_cap = dw_pcie_find_rasdes_capability(pci);
+ if (!ras_cap) {
+ dev_dbg(dev, "no RAS DES capability available\n");
+ return 0;
+ }
+
+ rasdes_info = devm_kzalloc(dev, sizeof(*rasdes_info), GFP_KERNEL);
+ if (!rasdes_info)
+ return -ENOMEM;
+
+ /* Create subdirectories for Debug, Error Injection, Statistics. */
+ rasdes_debug = debugfs_create_dir("rasdes_debug", dir);
+ rasdes_err_inj = debugfs_create_dir("rasdes_err_inj", dir);
+ rasdes_event_counter = debugfs_create_dir("rasdes_event_counter", dir);
+
+ mutex_init(&rasdes_info->reg_event_lock);
+ rasdes_info->ras_cap_offset = ras_cap;
+ pci->debugfs->rasdes_info = rasdes_info;
+
+ /* Create debugfs files for Debug subdirectory. */
+ dwc_debugfs_create(lane_detect);
+ dwc_debugfs_create(rx_valid);
+
+ /* Create debugfs files for Error Injection subdirectory. */
+ for (i = 0; i < ARRAY_SIZE(err_inj_list); i++) {
+ priv_tmp = devm_kzalloc(dev, sizeof(*priv_tmp), GFP_KERNEL);
+ if (!priv_tmp) {
+ ret = -ENOMEM;
+ goto err_deinit;
+ }
+
+ priv_tmp->idx = i;
+ priv_tmp->pci = pci;
+ debugfs_create_file(err_inj_list[i].name, 0200, rasdes_err_inj, priv_tmp,
+ &dwc_pcie_err_inj_ops);
+ }
+
+ /* Create debugfs files for Statistical Counter subdirectory. */
+ for (i = 0; i < ARRAY_SIZE(event_list); i++) {
+ priv_tmp = devm_kzalloc(dev, sizeof(*priv_tmp), GFP_KERNEL);
+ if (!priv_tmp) {
+ ret = -ENOMEM;
+ goto err_deinit;
+ }
+
+ priv_tmp->idx = i;
+ priv_tmp->pci = pci;
+ rasdes_events = debugfs_create_dir(event_list[i].name, rasdes_event_counter);
+ if (event_list[i].group_no == 0 || event_list[i].group_no == 4) {
+ debugfs_create_file("lane_select", 0644, rasdes_events,
+ priv_tmp, &dwc_pcie_counter_lane_ops);
+ }
+ debugfs_create_file("counter_value", 0444, rasdes_events, priv_tmp,
+ &dwc_pcie_counter_value_ops);
+ debugfs_create_file("counter_enable", 0644, rasdes_events, priv_tmp,
+ &dwc_pcie_counter_enable_ops);
+ }
+
+ return 0;
+
+err_deinit:
+ dwc_pcie_rasdes_debugfs_deinit(pci);
+ return ret;
+}
+
+static void dwc_pcie_ltssm_debugfs_init(struct dw_pcie *pci, struct dentry *dir)
+{
+ debugfs_create_file("ltssm_status", 0444, dir, pci,
+ &dwc_pcie_ltssm_status_ops);
+}
+
+void dwc_pcie_debugfs_deinit(struct dw_pcie *pci)
+{
+ if (!pci->debugfs)
+ return;
+
+ dwc_pcie_rasdes_debugfs_deinit(pci);
+ debugfs_remove_recursive(pci->debugfs->debug_dir);
+}
+
+void dwc_pcie_debugfs_init(struct dw_pcie *pci)
+{
+ char dirname[DWC_DEBUGFS_BUF_MAX];
+ struct device *dev = pci->dev;
+ struct debugfs_info *debugfs;
+ struct dentry *dir;
+ int err;
+
+ /* Create main directory for each platform driver. */
+ snprintf(dirname, DWC_DEBUGFS_BUF_MAX, "dwc_pcie_%s", dev_name(dev));
+ dir = debugfs_create_dir(dirname, NULL);
+ debugfs = devm_kzalloc(dev, sizeof(*debugfs), GFP_KERNEL);
+ if (!debugfs)
+ return;
+
+ debugfs->debug_dir = dir;
+ pci->debugfs = debugfs;
+ err = dwc_pcie_rasdes_debugfs_init(pci, dir);
+ if (err)
+ dev_err(dev, "failed to initialize RAS DES debugfs, err=%d\n",
+ err);
+
+ dwc_pcie_ltssm_debugfs_init(pci, dir);
+}
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index 746a11dcb67f..1a0bf9341542 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -15,22 +15,14 @@
#include <linux/pci-epc.h>
#include <linux/pci-epf.h>
-void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
-{
- struct pci_epc *epc = ep->epc;
-
- pci_epc_linkup(epc);
-}
-EXPORT_SYMBOL_GPL(dw_pcie_ep_linkup);
-
-void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep)
-{
- struct pci_epc *epc = ep->epc;
-
- pci_epc_init_notify(epc);
-}
-EXPORT_SYMBOL_GPL(dw_pcie_ep_init_notify);
-
+/**
+ * dw_pcie_ep_get_func_from_ep - Get the struct dw_pcie_ep_func corresponding to
+ * the endpoint function
+ * @ep: DWC EP device
+ * @func_no: Function number of the endpoint device
+ *
+ * Return: struct dw_pcie_ep_func if success, NULL otherwise.
+ */
struct dw_pcie_ep_func *
dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no)
{
@@ -61,6 +53,11 @@ static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, u8 func_no,
dw_pcie_dbi_ro_wr_dis(pci);
}
+/**
+ * dw_pcie_ep_reset_bar - Reset endpoint BAR
+ * @pci: DWC PCI device
+ * @bar: BAR number of the endpoint
+ */
void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
{
u8 func_no, funcs;
@@ -105,6 +102,45 @@ static u8 dw_pcie_ep_find_capability(struct dw_pcie_ep *ep, u8 func_no, u8 cap)
return __dw_pcie_ep_find_next_cap(ep, func_no, next_cap_ptr, cap);
}
+/**
+ * dw_pcie_ep_hide_ext_capability - Hide a capability from the linked list
+ * @pci: DWC PCI device
+ * @prev_cap: Capability preceding the capability that should be hidden
+ * @cap: Capability that should be hidden
+ *
+ * Return: 0 if success, errno otherwise.
+ */
+int dw_pcie_ep_hide_ext_capability(struct dw_pcie *pci, u8 prev_cap, u8 cap)
+{
+ u16 prev_cap_offset, cap_offset;
+ u32 prev_cap_header, cap_header;
+
+ prev_cap_offset = dw_pcie_find_ext_capability(pci, prev_cap);
+ if (!prev_cap_offset)
+ return -EINVAL;
+
+ prev_cap_header = dw_pcie_readl_dbi(pci, prev_cap_offset);
+ cap_offset = PCI_EXT_CAP_NEXT(prev_cap_header);
+ cap_header = dw_pcie_readl_dbi(pci, cap_offset);
+
+ /* cap must immediately follow prev_cap. */
+ if (PCI_EXT_CAP_ID(cap_header) != cap)
+ return -EINVAL;
+
+ /* Clear next ptr. */
+ prev_cap_header &= ~GENMASK(31, 20);
+
+ /* Set next ptr to next ptr of cap. */
+ prev_cap_header |= cap_header & GENMASK(31, 20);
+
+ dw_pcie_dbi_ro_wr_en(pci);
+ dw_pcie_writel_dbi(pci, prev_cap_offset, prev_cap_header);
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_hide_ext_capability);
+
static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_header *hdr)
{
@@ -131,7 +167,8 @@ static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
}
static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,
- dma_addr_t cpu_addr, enum pci_barno bar)
+ dma_addr_t parent_bus_addr, enum pci_barno bar,
+ size_t size)
{
int ret;
u32 free_win;
@@ -140,7 +177,7 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,
if (!ep->bar_to_atu[bar])
free_win = find_first_zero_bit(ep->ib_window_map, pci->num_ib_windows);
else
- free_win = ep->bar_to_atu[bar];
+ free_win = ep->bar_to_atu[bar] - 1;
if (free_win >= pci->num_ib_windows) {
dev_err(pci->dev, "No free inbound window\n");
@@ -148,21 +185,24 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,
}
ret = dw_pcie_prog_ep_inbound_atu(pci, func_no, free_win, type,
- cpu_addr, bar);
+ parent_bus_addr, bar, size);
if (ret < 0) {
dev_err(pci->dev, "Failed to program IB window\n");
return ret;
}
- ep->bar_to_atu[bar] = free_win;
+ /*
+ * Always increment free_win before assignment, since value 0 is used to identify
+ * unallocated mapping.
+ */
+ ep->bar_to_atu[bar] = free_win + 1;
set_bit(free_win, ep->ib_window_map);
return 0;
}
-static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, u8 func_no,
- phys_addr_t phys_addr,
- u64 pci_addr, size_t size)
+static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep,
+ struct dw_pcie_ob_atu_cfg *atu)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
u32 free_win;
@@ -174,13 +214,13 @@ static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, u8 func_no,
return -EINVAL;
}
- ret = dw_pcie_prog_ep_outbound_atu(pci, func_no, free_win, PCIE_ATU_TYPE_MEM,
- phys_addr, pci_addr, size);
+ atu->index = free_win;
+ ret = dw_pcie_prog_outbound_atu(pci, atu);
if (ret)
return ret;
set_bit(free_win, ep->ob_window_map);
- ep->outbound_addr[free_win] = phys_addr;
+ ep->outbound_addr[free_win] = atu->parent_bus_addr;
return 0;
}
@@ -191,7 +231,10 @@ static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
enum pci_barno bar = epf_bar->barno;
- u32 atu_index = ep->bar_to_atu[bar];
+ u32 atu_index = ep->bar_to_atu[bar] - 1;
+
+ if (!ep->bar_to_atu[bar])
+ return;
__dw_pcie_ep_reset_bar(pci, func_no, bar, epf_bar->flags);
@@ -201,30 +244,96 @@ static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
ep->bar_to_atu[bar] = 0;
}
-static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
- struct pci_epf_bar *epf_bar)
+static unsigned int dw_pcie_ep_get_rebar_offset(struct dw_pcie *pci,
+ enum pci_barno bar)
+{
+ u32 reg, bar_index;
+ unsigned int offset, nbars;
+ int i;
+
+ offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
+ if (!offset)
+ return offset;
+
+ reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
+ nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >> PCI_REBAR_CTRL_NBAR_SHIFT;
+
+ for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) {
+ reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
+ bar_index = reg & PCI_REBAR_CTRL_BAR_IDX;
+ if (bar_index == bar)
+ return offset;
+ }
+
+ return 0;
+}
+
+static int dw_pcie_ep_set_bar_resizable(struct dw_pcie_ep *ep, u8 func_no,
+ struct pci_epf_bar *epf_bar)
{
- struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
enum pci_barno bar = epf_bar->barno;
size_t size = epf_bar->size;
int flags = epf_bar->flags;
- int ret, type;
- u32 reg;
-
- reg = PCI_BASE_ADDRESS_0 + (4 * bar);
+ u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar);
+ unsigned int rebar_offset;
+ u32 rebar_cap, rebar_ctrl;
+ int ret;
- if (!(flags & PCI_BASE_ADDRESS_SPACE))
- type = PCIE_ATU_TYPE_MEM;
- else
- type = PCIE_ATU_TYPE_IO;
+ rebar_offset = dw_pcie_ep_get_rebar_offset(pci, bar);
+ if (!rebar_offset)
+ return -EINVAL;
- ret = dw_pcie_ep_inbound_atu(ep, func_no, type, epf_bar->phys_addr, bar);
+ ret = pci_epc_bar_size_to_rebar_cap(size, &rebar_cap);
if (ret)
return ret;
- if (ep->epf_bar[bar])
- return 0;
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ /*
+ * A BAR mask should not be written for a resizable BAR. The BAR mask
+ * is automatically derived by the controller every time the "selected
+ * size" bits are updated, see "Figure 3-26 Resizable BAR Example for
+ * 32-bit Memory BAR0" in DWC EP databook 5.96a. We simply need to write
+ * BIT(0) to set the BAR enable bit.
+ */
+ dw_pcie_ep_writel_dbi2(ep, func_no, reg, BIT(0));
+ dw_pcie_ep_writel_dbi(ep, func_no, reg, flags);
+
+ if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
+ dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, 0);
+ dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0);
+ }
+
+ /*
+ * Bits 31:0 in PCI_REBAR_CAP define "supported sizes" bits for sizes
+ * 1 MB to 128 TB. Bits 31:16 in PCI_REBAR_CTRL define "supported sizes"
+ * bits for sizes 256 TB to 8 EB. Disallow sizes 256 TB to 8 EB.
+ */
+ rebar_ctrl = dw_pcie_readl_dbi(pci, rebar_offset + PCI_REBAR_CTRL);
+ rebar_ctrl &= ~GENMASK(31, 16);
+ dw_pcie_writel_dbi(pci, rebar_offset + PCI_REBAR_CTRL, rebar_ctrl);
+
+ /*
+ * The "selected size" (bits 13:8) in PCI_REBAR_CTRL are automatically
+ * updated when writing PCI_REBAR_CAP, see "Figure 3-26 Resizable BAR
+ * Example for 32-bit Memory BAR0" in DWC EP databook 5.96a.
+ */
+ dw_pcie_writel_dbi(pci, rebar_offset + PCI_REBAR_CAP, rebar_cap);
+
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ return 0;
+}
+
+static int dw_pcie_ep_set_bar_programmable(struct dw_pcie_ep *ep, u8 func_no,
+ struct pci_epf_bar *epf_bar)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar = epf_bar->barno;
+ size_t size = epf_bar->size;
+ int flags = epf_bar->flags;
+ u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar);
dw_pcie_dbi_ro_wr_en(pci);
@@ -236,19 +345,113 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0);
}
- ep->epf_bar[bar] = epf_bar;
dw_pcie_dbi_ro_wr_dis(pci);
return 0;
}
+static enum pci_epc_bar_type dw_pcie_ep_get_bar_type(struct dw_pcie_ep *ep,
+ enum pci_barno bar)
+{
+ const struct pci_epc_features *epc_features;
+
+ if (!ep->ops->get_features)
+ return BAR_PROGRAMMABLE;
+
+ epc_features = ep->ops->get_features(ep);
+
+ return epc_features->bar[bar].type;
+}
+
+static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ struct pci_epf_bar *epf_bar)
+{
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar = epf_bar->barno;
+ size_t size = epf_bar->size;
+ enum pci_epc_bar_type bar_type;
+ int flags = epf_bar->flags;
+ int ret, type;
+
+ /*
+ * DWC does not allow BAR pairs to overlap, e.g. you cannot combine BARs
+ * 1 and 2 to form a 64-bit BAR.
+ */
+ if ((flags & PCI_BASE_ADDRESS_MEM_TYPE_64) && (bar & 1))
+ return -EINVAL;
+
+ /*
+ * Certain EPF drivers dynamically change the physical address of a BAR
+ * (i.e. they call set_bar() twice, without ever calling clear_bar(), as
+ * calling clear_bar() would clear the BAR's PCI address assigned by the
+ * host).
+ */
+ if (ep->epf_bar[bar]) {
+ /*
+ * We can only dynamically change a BAR if the new BAR size and
+ * BAR flags do not differ from the existing configuration.
+ */
+ if (ep->epf_bar[bar]->barno != bar ||
+ ep->epf_bar[bar]->size != size ||
+ ep->epf_bar[bar]->flags != flags)
+ return -EINVAL;
+
+ /*
+ * When dynamically changing a BAR, skip writing the BAR reg, as
+ * that would clear the BAR's PCI address assigned by the host.
+ */
+ goto config_atu;
+ }
+
+ bar_type = dw_pcie_ep_get_bar_type(ep, bar);
+ switch (bar_type) {
+ case BAR_FIXED:
+ /*
+ * There is no need to write a BAR mask for a fixed BAR (except
+ * to write 1 to the LSB of the BAR mask register, to enable the
+ * BAR). Write the BAR mask regardless. (The fixed bits in the
+ * BAR mask register will be read-only anyway.)
+ */
+ fallthrough;
+ case BAR_PROGRAMMABLE:
+ ret = dw_pcie_ep_set_bar_programmable(ep, func_no, epf_bar);
+ break;
+ case BAR_RESIZABLE:
+ ret = dw_pcie_ep_set_bar_resizable(ep, func_no, epf_bar);
+ break;
+ default:
+ ret = -EINVAL;
+ dev_err(pci->dev, "Invalid BAR type\n");
+ break;
+ }
+
+ if (ret)
+ return ret;
+
+config_atu:
+ if (!(flags & PCI_BASE_ADDRESS_SPACE))
+ type = PCIE_ATU_TYPE_MEM;
+ else
+ type = PCIE_ATU_TYPE_IO;
+
+ ret = dw_pcie_ep_inbound_atu(ep, func_no, type, epf_bar->phys_addr, bar,
+ size);
+ if (ret)
+ return ret;
+
+ ep->epf_bar[bar] = epf_bar;
+
+ return 0;
+}
+
static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr,
u32 *atu_index)
{
u32 index;
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- for (index = 0; index < pci->num_ob_windows; index++) {
+ for_each_set_bit(index, ep->ob_window_map, pci->num_ob_windows) {
if (ep->outbound_addr[index] != addr)
continue;
*atu_index = index;
@@ -258,6 +461,20 @@ static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr,
return -EINVAL;
}
+static u64 dw_pcie_ep_align_addr(struct pci_epc *epc, u64 pci_addr,
+ size_t *pci_size, size_t *offset)
+{
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ u64 mask = pci->region_align - 1;
+ size_t ofst = pci_addr & mask;
+
+ *pci_size = ALIGN(ofst + *pci_size, epc->mem->window.page_size);
+ *offset = ofst;
+
+ return pci_addr & ~mask;
+}
+
static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t addr)
{
@@ -266,10 +483,12 @@ static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- ret = dw_pcie_find_index(ep, addr, &atu_index);
+ ret = dw_pcie_find_index(ep, addr - pci->parent_bus_offset,
+ &atu_index);
if (ret < 0)
return;
+ ep->outbound_addr[atu_index] = 0;
dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, atu_index);
clear_bit(atu_index, ep->ob_window_map);
}
@@ -280,8 +499,14 @@ static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
int ret;
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
-
- ret = dw_pcie_ep_outbound_atu(ep, func_no, addr, pci_addr, size);
+ struct dw_pcie_ob_atu_cfg atu = { 0 };
+
+ atu.func_no = func_no;
+ atu.type = PCIE_ATU_TYPE_MEM;
+ atu.parent_bus_addr = addr - pci->parent_bus_offset;
+ atu.pci_addr = pci_addr;
+ atu.size = size;
+ ret = dw_pcie_ep_outbound_atu(ep, &atu);
if (ret) {
dev_err(pci->dev, "Failed to enable address\n");
return ret;
@@ -428,6 +653,7 @@ static const struct pci_epc_ops epc_ops = {
.write_header = dw_pcie_ep_write_header,
.set_bar = dw_pcie_ep_set_bar,
.clear_bar = dw_pcie_ep_clear_bar,
+ .align_addr = dw_pcie_ep_align_addr,
.map_addr = dw_pcie_ep_map_addr,
.unmap_addr = dw_pcie_ep_unmap_addr,
.set_msi = dw_pcie_ep_set_msi,
@@ -440,6 +666,13 @@ static const struct pci_epc_ops epc_ops = {
.get_features = dw_pcie_ep_get_features,
};
+/**
+ * dw_pcie_ep_raise_intx_irq - Raise INTx IRQ to the host
+ * @ep: DWC EP device
+ * @func_no: Function number of the endpoint
+ *
+ * Return: 0 if success, errono otherwise.
+ */
int dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep, u8 func_no)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -451,13 +684,22 @@ int dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep, u8 func_no)
}
EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_intx_irq);
+/**
+ * dw_pcie_ep_raise_msi_irq - Raise MSI IRQ to the host
+ * @ep: DWC EP device
+ * @func_no: Function number of the endpoint
+ * @interrupt_num: Interrupt number to be raised
+ *
+ * Return: 0 if success, errono otherwise.
+ */
int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
u8 interrupt_num)
{
u32 msg_addr_lower, msg_addr_upper, reg;
struct dw_pcie_ep_func *ep_func;
struct pci_epc *epc = ep->epc;
- unsigned int aligned_offset;
+ size_t map_size = sizeof(u32);
+ size_t offset;
u16 msg_ctrl, msg_data;
bool has_upper;
u64 msg_addr;
@@ -485,14 +727,13 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
}
msg_addr = ((u64)msg_addr_upper) << 32 | msg_addr_lower;
- aligned_offset = msg_addr & (epc->mem->window.page_size - 1);
- msg_addr = ALIGN_DOWN(msg_addr, epc->mem->window.page_size);
+ msg_addr = dw_pcie_ep_align_addr(epc, msg_addr, &map_size, &offset);
ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr,
- epc->mem->window.page_size);
+ map_size);
if (ret)
return ret;
- writel(msg_data | (interrupt_num - 1), ep->msi_mem + aligned_offset);
+ writel(msg_data | (interrupt_num - 1), ep->msi_mem + offset);
dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys);
@@ -500,6 +741,15 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
}
EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_msi_irq);
+/**
+ * dw_pcie_ep_raise_msix_irq_doorbell - Raise MSI-X to the host using Doorbell
+ * method
+ * @ep: DWC EP device
+ * @func_no: Function number of the endpoint device
+ * @interrupt_num: Interrupt number to be raised
+ *
+ * Return: 0 if success, errno otherwise.
+ */
int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep, u8 func_no,
u16 interrupt_num)
{
@@ -519,6 +769,14 @@ int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep, u8 func_no,
return 0;
}
+/**
+ * dw_pcie_ep_raise_msix_irq - Raise MSI-X to the host
+ * @ep: DWC EP device
+ * @func_no: Function number of the endpoint device
+ * @interrupt_num: Interrupt number to be raised
+ *
+ * Return: 0 if success, errno otherwise.
+ */
int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
u16 interrupt_num)
{
@@ -526,8 +784,9 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
struct pci_epf_msix_tbl *msix_tbl;
struct dw_pcie_ep_func *ep_func;
struct pci_epc *epc = ep->epc;
+ size_t map_size = sizeof(u32);
+ size_t offset;
u32 reg, msg_data, vec_ctrl;
- unsigned int aligned_offset;
u32 tbl_offset;
u64 msg_addr;
int ret;
@@ -552,63 +811,125 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
return -EPERM;
}
- aligned_offset = msg_addr & (epc->mem->window.page_size - 1);
- msg_addr = ALIGN_DOWN(msg_addr, epc->mem->window.page_size);
+ msg_addr = dw_pcie_ep_align_addr(epc, msg_addr, &map_size, &offset);
ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr,
- epc->mem->window.page_size);
+ map_size);
if (ret)
return ret;
- writel(msg_data, ep->msi_mem + aligned_offset);
+ writel(msg_data, ep->msi_mem + offset);
dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys);
return 0;
}
-void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
+/**
+ * dw_pcie_ep_cleanup - Cleanup DWC EP resources after fundamental reset
+ * @ep: DWC EP device
+ *
+ * Cleans up the DWC EP specific resources like eDMA etc... after fundamental
+ * reset like PERST#. Note that this API is only applicable for drivers
+ * supporting PERST# or any other methods of fundamental reset.
+ */
+void dw_pcie_ep_cleanup(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- struct pci_epc *epc = ep->epc;
+ dwc_pcie_debugfs_deinit(pci);
dw_pcie_edma_remove(pci);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_cleanup);
+
+/**
+ * dw_pcie_ep_deinit - Deinitialize the endpoint device
+ * @ep: DWC EP device
+ *
+ * Deinitialize the endpoint device. EPC device is not destroyed since that will
+ * be taken care by Devres.
+ */
+void dw_pcie_ep_deinit(struct dw_pcie_ep *ep)
+{
+ struct pci_epc *epc = ep->epc;
+
+ dw_pcie_ep_cleanup(ep);
pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
epc->mem->window.page_size);
pci_epc_mem_exit(epc);
-
- if (ep->ops->deinit)
- ep->ops->deinit(ep);
}
-EXPORT_SYMBOL_GPL(dw_pcie_ep_exit);
+EXPORT_SYMBOL_GPL(dw_pcie_ep_deinit);
-static unsigned int dw_pcie_ep_find_ext_capability(struct dw_pcie *pci, int cap)
+static void dw_pcie_ep_init_non_sticky_registers(struct dw_pcie *pci)
{
- u32 header;
- int pos = PCI_CFG_SPACE_SIZE;
+ struct dw_pcie_ep *ep = &pci->ep;
+ unsigned int offset;
+ unsigned int nbars;
+ enum pci_barno bar;
+ u32 reg, i, val;
+
+ offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
- while (pos) {
- header = dw_pcie_readl_dbi(pci, pos);
- if (PCI_EXT_CAP_ID(header) == cap)
- return pos;
+ dw_pcie_dbi_ro_wr_en(pci);
- pos = PCI_EXT_CAP_NEXT(header);
- if (!pos)
- break;
+ if (offset) {
+ reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
+ nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >>
+ PCI_REBAR_CTRL_NBAR_SHIFT;
+
+ /*
+ * PCIe r6.0, sec 7.8.6.2 require us to support at least one
+ * size in the range from 1 MB to 512 GB. Advertise support
+ * for 1 MB BAR size only.
+ *
+ * For a BAR that has been configured via dw_pcie_ep_set_bar(),
+ * advertise support for only that size instead.
+ */
+ for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) {
+ /*
+ * While the RESBAR_CAP_REG_* fields are sticky, the
+ * RESBAR_CTRL_REG_BAR_SIZE field is non-sticky (it is
+ * sticky in certain versions of DWC PCIe, but not all).
+ *
+ * RESBAR_CTRL_REG_BAR_SIZE is updated automatically by
+ * the controller when RESBAR_CAP_REG is written, which
+ * is why RESBAR_CAP_REG is written here.
+ */
+ val = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
+ bar = val & PCI_REBAR_CTRL_BAR_IDX;
+ if (ep->epf_bar[bar])
+ pci_epc_bar_size_to_rebar_cap(ep->epf_bar[bar]->size, &val);
+ else
+ val = BIT(4);
+
+ dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, val);
+ }
}
- return 0;
+ dw_pcie_setup(pci);
+ dw_pcie_dbi_ro_wr_dis(pci);
}
-int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
+/**
+ * dw_pcie_ep_init_registers - Initialize DWC EP specific registers
+ * @ep: DWC EP device
+ *
+ * Initialize the registers (CSRs) specific to DWC EP. This API should be called
+ * only when the endpoint receives an active refclk (either from host or
+ * generated locally).
+ */
+int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- unsigned int offset, ptm_cap_base;
- unsigned int nbars;
+ struct dw_pcie_ep_func *ep_func;
+ struct device *dev = pci->dev;
+ struct pci_epc *epc = ep->epc;
+ u32 ptm_cap_base, reg;
u8 hdr_type;
- u32 reg;
- int i;
+ u8 func_no;
+ void *addr;
+ int ret;
hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE) &
PCI_HEADER_TYPE_MASK;
@@ -619,25 +940,61 @@ int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
return -EIO;
}
- offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
- ptm_cap_base = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_PTM);
+ dw_pcie_version_detect(pci);
- dw_pcie_dbi_ro_wr_en(pci);
+ dw_pcie_iatu_detect(pci);
- if (offset) {
- reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
- nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >>
- PCI_REBAR_CTRL_NBAR_SHIFT;
+ ret = dw_pcie_edma_detect(pci);
+ if (ret)
+ return ret;
- /*
- * PCIe r6.0, sec 7.8.6.2 require us to support at least one
- * size in the range from 1 MB to 512 GB. Advertise support
- * for 1 MB BAR size only.
- */
- for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL)
- dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, BIT(4));
+ ret = -ENOMEM;
+ if (!ep->ib_window_map) {
+ ep->ib_window_map = devm_bitmap_zalloc(dev, pci->num_ib_windows,
+ GFP_KERNEL);
+ if (!ep->ib_window_map)
+ goto err_remove_edma;
+ }
+
+ if (!ep->ob_window_map) {
+ ep->ob_window_map = devm_bitmap_zalloc(dev, pci->num_ob_windows,
+ GFP_KERNEL);
+ if (!ep->ob_window_map)
+ goto err_remove_edma;
+ }
+
+ if (!ep->outbound_addr) {
+ addr = devm_kcalloc(dev, pci->num_ob_windows, sizeof(phys_addr_t),
+ GFP_KERNEL);
+ if (!addr)
+ goto err_remove_edma;
+ ep->outbound_addr = addr;
+ }
+
+ for (func_no = 0; func_no < epc->max_functions; func_no++) {
+
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+ if (ep_func)
+ continue;
+
+ ep_func = devm_kzalloc(dev, sizeof(*ep_func), GFP_KERNEL);
+ if (!ep_func)
+ goto err_remove_edma;
+
+ ep_func->func_no = func_no;
+ ep_func->msi_cap = dw_pcie_ep_find_capability(ep, func_no,
+ PCI_CAP_ID_MSI);
+ ep_func->msix_cap = dw_pcie_ep_find_capability(ep, func_no,
+ PCI_CAP_ID_MSIX);
+
+ list_add_tail(&ep_func->list, &ep->func_list);
}
+ if (ep->ops->init)
+ ep->ops->init(ep);
+
+ ptm_cap_base = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_PTM);
+
/*
* PTM responder capability can be disabled only after disabling
* PTM root capability.
@@ -654,28 +1011,65 @@ int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
dw_pcie_dbi_ro_wr_dis(pci);
}
- dw_pcie_setup(pci);
- dw_pcie_dbi_ro_wr_dis(pci);
+ dw_pcie_ep_init_non_sticky_registers(pci);
+
+ dwc_pcie_debugfs_init(pci);
return 0;
+
+err_remove_edma:
+ dw_pcie_edma_remove(pci);
+
+ return ret;
}
-EXPORT_SYMBOL_GPL(dw_pcie_ep_init_complete);
+EXPORT_SYMBOL_GPL(dw_pcie_ep_init_registers);
-int dw_pcie_ep_init(struct dw_pcie_ep *ep)
+/**
+ * dw_pcie_ep_linkup - Notify EPF drivers about Link Up event
+ * @ep: DWC EP device
+ */
+void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
+{
+ struct pci_epc *epc = ep->epc;
+
+ pci_epc_linkup(epc);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_linkup);
+
+/**
+ * dw_pcie_ep_linkdown - Notify EPF drivers about Link Down event
+ * @ep: DWC EP device
+ *
+ * Non-sticky registers are also initialized before sending the notification to
+ * the EPF drivers. This is needed since the registers need to be initialized
+ * before the link comes back again.
+ */
+void dw_pcie_ep_linkdown(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct pci_epc *epc = ep->epc;
+
+ /*
+ * Initialize the non-sticky DWC registers as they would've reset post
+ * Link Down. This is specifically needed for drivers not supporting
+ * PERST# as they have no way to reinitialize the registers before the
+ * link comes back again.
+ */
+ dw_pcie_ep_init_non_sticky_registers(pci);
+
+ pci_epc_linkdown(epc);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_linkdown);
+
+static int dw_pcie_ep_get_resources(struct dw_pcie_ep *ep)
{
- int ret;
- void *addr;
- u8 func_no;
- struct resource *res;
- struct pci_epc *epc;
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct device *dev = pci->dev;
struct platform_device *pdev = to_platform_device(dev);
struct device_node *np = dev->of_node;
- const struct pci_epc_features *epc_features;
- struct dw_pcie_ep_func *ep_func;
-
- INIT_LIST_HEAD(&ep->func_list);
+ struct pci_epc *epc = ep->epc;
+ struct resource *res;
+ int ret;
ret = dw_pcie_get_resources(pci);
if (ret)
@@ -688,28 +1082,37 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
ep->phys_base = res->start;
ep->addr_size = resource_size(res);
- if (ep->ops->pre_init)
- ep->ops->pre_init(ep);
-
- dw_pcie_version_detect(pci);
+ /*
+ * artpec6_pcie_cpu_addr_fixup() uses ep->phys_base, so call
+ * dw_pcie_parent_bus_offset() after setting ep->phys_base.
+ */
+ pci->parent_bus_offset = dw_pcie_parent_bus_offset(pci, "addr_space",
+ ep->phys_base);
- dw_pcie_iatu_detect(pci);
+ ret = of_property_read_u8(np, "max-functions", &epc->max_functions);
+ if (ret < 0)
+ epc->max_functions = 1;
- ep->ib_window_map = devm_bitmap_zalloc(dev, pci->num_ib_windows,
- GFP_KERNEL);
- if (!ep->ib_window_map)
- return -ENOMEM;
+ return 0;
+}
- ep->ob_window_map = devm_bitmap_zalloc(dev, pci->num_ob_windows,
- GFP_KERNEL);
- if (!ep->ob_window_map)
- return -ENOMEM;
+/**
+ * dw_pcie_ep_init - Initialize the endpoint device
+ * @ep: DWC EP device
+ *
+ * Initialize the endpoint device. Allocate resources and create the EPC
+ * device with the endpoint framework.
+ *
+ * Return: 0 if success, errno otherwise.
+ */
+int dw_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ int ret;
+ struct pci_epc *epc;
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct device *dev = pci->dev;
- addr = devm_kcalloc(dev, pci->num_ob_windows, sizeof(phys_addr_t),
- GFP_KERNEL);
- if (!addr)
- return -ENOMEM;
- ep->outbound_addr = addr;
+ INIT_LIST_HEAD(&ep->func_list);
epc = devm_pci_epc_create(dev, &epc_ops);
if (IS_ERR(epc)) {
@@ -720,32 +1123,18 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
ep->epc = epc;
epc_set_drvdata(epc, ep);
- ret = of_property_read_u8(np, "max-functions", &epc->max_functions);
- if (ret < 0)
- epc->max_functions = 1;
-
- for (func_no = 0; func_no < epc->max_functions; func_no++) {
- ep_func = devm_kzalloc(dev, sizeof(*ep_func), GFP_KERNEL);
- if (!ep_func)
- return -ENOMEM;
-
- ep_func->func_no = func_no;
- ep_func->msi_cap = dw_pcie_ep_find_capability(ep, func_no,
- PCI_CAP_ID_MSI);
- ep_func->msix_cap = dw_pcie_ep_find_capability(ep, func_no,
- PCI_CAP_ID_MSIX);
-
- list_add_tail(&ep_func->list, &ep->func_list);
- }
+ ret = dw_pcie_ep_get_resources(ep);
+ if (ret)
+ return ret;
- if (ep->ops->init)
- ep->ops->init(ep);
+ if (ep->ops->pre_init)
+ ep->ops->pre_init(ep);
ret = pci_epc_mem_init(epc, ep->phys_base, ep->addr_size,
ep->page_size);
if (ret < 0) {
dev_err(dev, "Failed to initialize address space\n");
- goto err_ep_deinit;
+ return ret;
}
ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys,
@@ -756,36 +1145,11 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
goto err_exit_epc_mem;
}
- ret = dw_pcie_edma_detect(pci);
- if (ret)
- goto err_free_epc_mem;
-
- if (ep->ops->get_features) {
- epc_features = ep->ops->get_features(ep);
- if (epc_features->core_init_notifier)
- return 0;
- }
-
- ret = dw_pcie_ep_init_complete(ep);
- if (ret)
- goto err_remove_edma;
-
return 0;
-err_remove_edma:
- dw_pcie_edma_remove(pci);
-
-err_free_epc_mem:
- pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
- epc->mem->window.page_size);
-
err_exit_epc_mem:
pci_epc_mem_exit(epc);
-err_ep_deinit:
- if (ep->ops->deinit)
- ep->ops->deinit(ep);
-
return ret;
}
EXPORT_SYMBOL_GPL(dw_pcie_ep_init);
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index d15a5c2d5b48..d1cd48efad43 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -48,8 +48,9 @@ static struct irq_chip dw_pcie_msi_irq_chip = {
};
static struct msi_domain_info dw_pcie_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX |
+ MSI_FLAG_MULTI_PCI_MSI,
.chip = &dw_pcie_msi_irq_chip,
};
@@ -116,12 +117,6 @@ static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg)
(int)d->hwirq, msg->address_hi, msg->address_lo);
}
-static int dw_pci_msi_set_affinity(struct irq_data *d,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static void dw_pci_bottom_mask(struct irq_data *d)
{
struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
@@ -177,7 +172,6 @@ static struct irq_chip dw_pci_msi_bottom_irq_chip = {
.name = "DWPCI-MSI",
.irq_ack = dw_pci_bottom_ack,
.irq_compose_msi_msg = dw_pci_setup_msi_msg,
- .irq_set_affinity = dw_pci_msi_set_affinity,
.irq_mask = dw_pci_bottom_mask,
.irq_unmask = dw_pci_bottom_unmask,
};
@@ -233,7 +227,7 @@ static const struct irq_domain_ops dw_pcie_msi_domain_ops = {
int dw_pcie_allocate_domains(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node);
+ struct fwnode_handle *fwnode = of_fwnode_handle(pci->dev->of_node);
pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors,
&dw_pcie_msi_domain_ops, pp);
@@ -398,50 +392,95 @@ static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
return 0;
}
-int dw_pcie_host_init(struct dw_pcie_rp *pp)
+static void dw_pcie_host_request_msg_tlp_res(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct resource_entry *win;
+ struct resource *res;
+
+ win = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM);
+ if (win) {
+ res = devm_kzalloc(pci->dev, sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return;
+
+ /*
+ * Allocate MSG TLP region of size 'region_align' at the end of
+ * the host bridge window.
+ */
+ res->start = win->res->end - pci->region_align + 1;
+ res->end = win->res->end;
+ res->name = "msg";
+ res->flags = win->res->flags | IORESOURCE_BUSY;
+
+ if (!devm_request_resource(pci->dev, win->res, res))
+ pp->msg_res = res;
+ }
+}
+
+static int dw_pcie_host_get_resources(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct device *dev = pci->dev;
- struct device_node *np = dev->of_node;
struct platform_device *pdev = to_platform_device(dev);
struct resource_entry *win;
- struct pci_host_bridge *bridge;
struct resource *res;
int ret;
- raw_spin_lock_init(&pp->lock);
-
ret = dw_pcie_get_resources(pci);
if (ret)
return ret;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
- if (res) {
- pp->cfg0_size = resource_size(res);
- pp->cfg0_base = res->start;
-
- pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res);
- if (IS_ERR(pp->va_cfg0_base))
- return PTR_ERR(pp->va_cfg0_base);
- } else {
- dev_err(dev, "Missing *config* reg space\n");
+ if (!res) {
+ dev_err(dev, "Missing \"config\" reg space\n");
return -ENODEV;
}
- bridge = devm_pci_alloc_host_bridge(dev, 0);
- if (!bridge)
- return -ENOMEM;
+ pp->cfg0_size = resource_size(res);
+ pp->cfg0_base = res->start;
- pp->bridge = bridge;
+ pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pp->va_cfg0_base))
+ return PTR_ERR(pp->va_cfg0_base);
/* Get the I/O range from DT */
- win = resource_list_first_type(&bridge->windows, IORESOURCE_IO);
+ win = resource_list_first_type(&pp->bridge->windows, IORESOURCE_IO);
if (win) {
pp->io_size = resource_size(win->res);
pp->io_bus_addr = win->res->start - win->offset;
pp->io_base = pci_pio_to_address(win->res->start);
}
+ /*
+ * visconti_pcie_cpu_addr_fixup() uses pp->io_base, so we have to
+ * call dw_pcie_parent_bus_offset() after setting pp->io_base.
+ */
+ pci->parent_bus_offset = dw_pcie_parent_bus_offset(pci, "config",
+ pp->cfg0_base);
+ return 0;
+}
+
+int dw_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct device *dev = pci->dev;
+ struct device_node *np = dev->of_node;
+ struct pci_host_bridge *bridge;
+ int ret;
+
+ raw_spin_lock_init(&pp->lock);
+
+ bridge = devm_pci_alloc_host_bridge(dev, 0);
+ if (!bridge)
+ return -ENOMEM;
+
+ pp->bridge = bridge;
+
+ ret = dw_pcie_host_get_resources(pp);
+ if (ret)
+ return ret;
+
/* Set default bus ops */
bridge->ops = &dw_pcie_ops;
bridge->child_ops = &dw_child_pcie_ops;
@@ -454,8 +493,8 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
if (pci_msi_enabled()) {
pp->has_msi_ctrl = !(pp->ops->msi_init ||
- of_property_read_bool(np, "msi-parent") ||
- of_property_read_bool(np, "msi-map"));
+ of_property_present(np, "msi-parent") ||
+ of_property_present(np, "msi-map"));
/*
* For the has_msi_ctrl case the default assignment is handled
@@ -484,6 +523,18 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
dw_pcie_iatu_detect(pci);
+ /*
+ * Allocate the resource for MSG TLP before programming the iATU
+ * outbound window in dw_pcie_setup_rc(). Since the allocation depends
+ * on the value of 'region_align', this has to be done after
+ * dw_pcie_iatu_detect().
+ *
+ * Glue drivers need to set 'use_atu_msg' before dw_pcie_host_init() to
+ * make use of the generic MSG TLP implementation.
+ */
+ if (pp->use_atu_msg)
+ dw_pcie_host_request_msg_tlp_res(pp);
+
ret = dw_pcie_edma_detect(pci);
if (ret)
goto err_free_msi;
@@ -498,8 +549,14 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
goto err_remove_edma;
}
- /* Ignore errors, the link may come up later */
- dw_pcie_wait_for_link(pci);
+ /*
+ * Note: Skip the link up delay only when a Link Up IRQ is present.
+ * If there is no Link Up IRQ, we should not bypass the delay
+ * because that would require users to manually rescan for devices.
+ */
+ if (!pp->use_linkup_irq)
+ /* Ignore errors, the link may come up later */
+ dw_pcie_wait_for_link(pci);
bridge->sysdata = pp;
@@ -510,6 +567,8 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
if (pp->ops->post_init)
pp->ops->post_init(pp);
+ dwc_pcie_debugfs_init(pci);
+
return 0;
err_stop_link:
@@ -534,6 +593,8 @@ void dw_pcie_host_deinit(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ dwc_pcie_debugfs_deinit(pci);
+
pci_stop_root_bus(pp->bridge->bus);
pci_remove_root_bus(pp->bridge->bus);
@@ -554,6 +615,7 @@ static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
{
struct dw_pcie_rp *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct dw_pcie_ob_atu_cfg atu = { 0 };
int type, ret;
u32 busdev;
@@ -576,8 +638,12 @@ static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
else
type = PCIE_ATU_TYPE_CFG1;
- ret = dw_pcie_prog_outbound_atu(pci, 0, type, pp->cfg0_base, busdev,
- pp->cfg0_size);
+ atu.type = type;
+ atu.parent_bus_addr = pp->cfg0_base - pci->parent_bus_offset;
+ atu.pci_addr = busdev;
+ atu.size = pp->cfg0_size;
+
+ ret = dw_pcie_prog_outbound_atu(pci, &atu);
if (ret)
return NULL;
@@ -589,6 +655,7 @@ static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn,
{
struct dw_pcie_rp *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct dw_pcie_ob_atu_cfg atu = { 0 };
int ret;
ret = pci_generic_config_read(bus, devfn, where, size, val);
@@ -596,9 +663,12 @@ static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn,
return ret;
if (pp->cfg0_io_shared) {
- ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO,
- pp->io_base, pp->io_bus_addr,
- pp->io_size);
+ atu.type = PCIE_ATU_TYPE_IO;
+ atu.parent_bus_addr = pp->io_base - pci->parent_bus_offset;
+ atu.pci_addr = pp->io_bus_addr;
+ atu.size = pp->io_size;
+
+ ret = dw_pcie_prog_outbound_atu(pci, &atu);
if (ret)
return PCIBIOS_SET_FAILED;
}
@@ -611,6 +681,7 @@ static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn,
{
struct dw_pcie_rp *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct dw_pcie_ob_atu_cfg atu = { 0 };
int ret;
ret = pci_generic_config_write(bus, devfn, where, size, val);
@@ -618,9 +689,12 @@ static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn,
return ret;
if (pp->cfg0_io_shared) {
- ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO,
- pp->io_base, pp->io_bus_addr,
- pp->io_size);
+ atu.type = PCIE_ATU_TYPE_IO;
+ atu.parent_bus_addr = pp->io_base - pci->parent_bus_offset;
+ atu.pci_addr = pp->io_bus_addr;
+ atu.size = pp->io_size;
+
+ ret = dw_pcie_prog_outbound_atu(pci, &atu);
if (ret)
return PCIBIOS_SET_FAILED;
}
@@ -655,6 +729,7 @@ static struct pci_ops dw_pcie_ops = {
static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct dw_pcie_ob_atu_cfg atu = { 0 };
struct resource_entry *entry;
int i, ret;
@@ -682,10 +757,19 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
if (pci->num_ob_windows <= ++i)
break;
- ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_MEM,
- entry->res->start,
- entry->res->start - entry->offset,
- resource_size(entry->res));
+ atu.index = i;
+ atu.type = PCIE_ATU_TYPE_MEM;
+ atu.parent_bus_addr = entry->res->start - pci->parent_bus_offset;
+ atu.pci_addr = entry->res->start - entry->offset;
+
+ /* Adjust iATU size if MSG TLP region was allocated before */
+ if (pp->msg_res && pp->msg_res->parent == entry->res)
+ atu.size = resource_size(entry->res) -
+ resource_size(pp->msg_res);
+ else
+ atu.size = resource_size(entry->res);
+
+ ret = dw_pcie_prog_outbound_atu(pci, &atu);
if (ret) {
dev_err(pci->dev, "Failed to set MEM range %pr\n",
entry->res);
@@ -695,10 +779,13 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
if (pp->io_size) {
if (pci->num_ob_windows > ++i) {
- ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_IO,
- pp->io_base,
- pp->io_bus_addr,
- pp->io_size);
+ atu.index = i;
+ atu.type = PCIE_ATU_TYPE_IO;
+ atu.parent_bus_addr = pp->io_base - pci->parent_bus_offset;
+ atu.pci_addr = pp->io_bus_addr;
+ atu.size = pp->io_size;
+
+ ret = dw_pcie_prog_outbound_atu(pci, &atu);
if (ret) {
dev_err(pci->dev, "Failed to set IO range %pr\n",
entry->res);
@@ -713,6 +800,8 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
dev_warn(pci->dev, "Ranges exceed outbound iATU size (%d)\n",
pci->num_ob_windows);
+ pp->msg_atu_index = i;
+
i = 0;
resource_list_for_each_entry(entry, &pp->bridge->dma_ranges) {
if (resource_type(entry->res) != IORESOURCE_MEM)
@@ -818,6 +907,42 @@ int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
}
EXPORT_SYMBOL_GPL(dw_pcie_setup_rc);
+static int dw_pcie_pme_turn_off(struct dw_pcie *pci)
+{
+ struct dw_pcie_ob_atu_cfg atu = { 0 };
+ void __iomem *mem;
+ int ret;
+
+ if (pci->num_ob_windows <= pci->pp.msg_atu_index)
+ return -ENOSPC;
+
+ if (!pci->pp.msg_res)
+ return -ENOSPC;
+
+ atu.code = PCIE_MSG_CODE_PME_TURN_OFF;
+ atu.routing = PCIE_MSG_TYPE_R_BC;
+ atu.type = PCIE_ATU_TYPE_MSG;
+ atu.size = resource_size(pci->pp.msg_res);
+ atu.index = pci->pp.msg_atu_index;
+
+ atu.parent_bus_addr = pci->pp.msg_res->start - pci->parent_bus_offset;
+
+ ret = dw_pcie_prog_outbound_atu(pci, &atu);
+ if (ret)
+ return ret;
+
+ mem = ioremap(pci->pp.msg_res->start, pci->region_align);
+ if (!mem)
+ return -ENOMEM;
+
+ /* A dummy write is converted to a Msg TLP */
+ writel(0, mem);
+
+ iounmap(mem);
+
+ return 0;
+}
+
int dw_pcie_suspend_noirq(struct dw_pcie *pci)
{
u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
@@ -831,22 +956,33 @@ int dw_pcie_suspend_noirq(struct dw_pcie *pci)
if (dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKCTL) & PCI_EXP_LNKCTL_ASPM_L1)
return 0;
- if (dw_pcie_get_ltssm(pci) <= DW_PCIE_LTSSM_DETECT_ACT)
- return 0;
-
- if (!pci->pp.ops->pme_turn_off)
- return 0;
-
- pci->pp.ops->pme_turn_off(&pci->pp);
+ if (pci->pp.ops->pme_turn_off) {
+ pci->pp.ops->pme_turn_off(&pci->pp);
+ } else {
+ ret = dw_pcie_pme_turn_off(pci);
+ if (ret)
+ return ret;
+ }
- ret = read_poll_timeout(dw_pcie_get_ltssm, val, val == DW_PCIE_LTSSM_L2_IDLE,
+ ret = read_poll_timeout(dw_pcie_get_ltssm, val,
+ val == DW_PCIE_LTSSM_L2_IDLE ||
+ val <= DW_PCIE_LTSSM_DETECT_WAIT,
PCIE_PME_TO_L2_TIMEOUT_US/10,
PCIE_PME_TO_L2_TIMEOUT_US, false, pci);
if (ret) {
+ /* Only log message when LTSSM isn't in DETECT or POLL */
dev_err(pci->dev, "Timeout waiting for L2 entry! LTSSM: 0x%x\n", val);
return ret;
}
+ /*
+ * Per PCIe r6.0, sec 5.3.3.2.1, software should wait at least
+ * 100ns after L2/L3 Ready before turning off refclock and
+ * main power. This is harmless when no endpoint is connected.
+ */
+ udelay(1);
+
+ dw_pcie_stop_link(pci);
if (pci->pp.ops->deinit)
pci->pp.ops->deinit(&pci->pp);
diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c
index 778588b4be70..771b9d9be077 100644
--- a/drivers/pci/controller/dwc/pcie-designware-plat.c
+++ b/drivers/pci/controller/dwc/pcie-designware-plat.c
@@ -145,6 +145,17 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)
pci->ep.ops = &pcie_ep_ops;
ret = dw_pcie_ep_init(&pci->ep);
+ if (ret)
+ return ret;
+
+ ret = dw_pcie_ep_init_registers(&pci->ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(&pci->ep);
+ }
+
+ pci_epc_init_notify(pci->ep.epc);
+
break;
default:
dev_err(dev, "INVALID device type %d\n", dw_plat_pcie->mode);
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index 250cf7f40b85..97d76d3dc066 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -16,6 +16,8 @@
#include <linux/gpio/consumer.h>
#include <linux/ioport.h>
#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/pcie-dwc.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
#include <linux/types.h>
@@ -112,6 +114,7 @@ int dw_pcie_get_resources(struct dw_pcie *pci)
pci->dbi_base = devm_pci_remap_cfg_resource(pci->dev, res);
if (IS_ERR(pci->dbi_base))
return PTR_ERR(pci->dbi_base);
+ pci->dbi_phys_addr = res->start;
}
/* DBI2 is mainly useful for the endpoint controller */
@@ -134,6 +137,7 @@ int dw_pcie_get_resources(struct dw_pcie *pci)
pci->atu_base = devm_ioremap_resource(pci->dev, res);
if (IS_ERR(pci->atu_base))
return PTR_ERR(pci->atu_base);
+ pci->atu_phys_addr = res->start;
} else {
pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
}
@@ -166,8 +170,8 @@ int dw_pcie_get_resources(struct dw_pcie *pci)
return ret;
}
- if (pci->link_gen < 1)
- pci->link_gen = of_pci_get_max_link_speed(np);
+ if (pci->max_link_speed < 1)
+ pci->max_link_speed = of_pci_get_max_link_speed(np);
of_property_read_u32(np, "num-lanes", &pci->num_lanes);
@@ -281,6 +285,51 @@ u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap)
}
EXPORT_SYMBOL_GPL(dw_pcie_find_ext_capability);
+static u16 __dw_pcie_find_vsec_capability(struct dw_pcie *pci, u16 vendor_id,
+ u16 vsec_id)
+{
+ u16 vsec = 0;
+ u32 header;
+
+ if (vendor_id != dw_pcie_readw_dbi(pci, PCI_VENDOR_ID))
+ return 0;
+
+ while ((vsec = dw_pcie_find_next_ext_capability(pci, vsec,
+ PCI_EXT_CAP_ID_VNDR))) {
+ header = dw_pcie_readl_dbi(pci, vsec + PCI_VNDR_HEADER);
+ if (PCI_VNDR_HEADER_ID(header) == vsec_id)
+ return vsec;
+ }
+
+ return 0;
+}
+
+static u16 dw_pcie_find_vsec_capability(struct dw_pcie *pci,
+ const struct dwc_pcie_vsec_id *vsec_ids)
+{
+ const struct dwc_pcie_vsec_id *vid;
+ u16 vsec;
+ u32 header;
+
+ for (vid = vsec_ids; vid->vendor_id; vid++) {
+ vsec = __dw_pcie_find_vsec_capability(pci, vid->vendor_id,
+ vid->vsec_id);
+ if (vsec) {
+ header = dw_pcie_readl_dbi(pci, vsec + PCI_VNDR_HEADER);
+ if (PCI_VNDR_HEADER_REV(header) == vid->vsec_rev)
+ return vsec;
+ }
+ }
+
+ return 0;
+}
+
+u16 dw_pcie_find_rasdes_capability(struct dw_pcie *pci)
+{
+ return dw_pcie_find_vsec_capability(pci, dwc_pcie_rasdes_vsec_ids);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_find_rasdes_capability);
+
int dw_pcie_read(void __iomem *addr, int size, u32 *val)
{
if (!IS_ALIGNED((uintptr_t)addr, size)) {
@@ -465,56 +514,58 @@ static inline u32 dw_pcie_enable_ecrc(u32 val)
return val | PCIE_ATU_TD;
}
-static int __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, u8 func_no,
- int index, int type, u64 cpu_addr,
- u64 pci_addr, u64 size)
+int dw_pcie_prog_outbound_atu(struct dw_pcie *pci,
+ const struct dw_pcie_ob_atu_cfg *atu)
{
+ u64 parent_bus_addr = atu->parent_bus_addr;
u32 retries, val;
u64 limit_addr;
- if (pci->ops && pci->ops->cpu_addr_fixup)
- cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr);
+ limit_addr = parent_bus_addr + atu->size - 1;
- limit_addr = cpu_addr + size - 1;
-
- if ((limit_addr & ~pci->region_limit) != (cpu_addr & ~pci->region_limit) ||
- !IS_ALIGNED(cpu_addr, pci->region_align) ||
- !IS_ALIGNED(pci_addr, pci->region_align) || !size) {
+ if ((limit_addr & ~pci->region_limit) != (parent_bus_addr & ~pci->region_limit) ||
+ !IS_ALIGNED(parent_bus_addr, pci->region_align) ||
+ !IS_ALIGNED(atu->pci_addr, pci->region_align) || !atu->size) {
return -EINVAL;
}
- dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LOWER_BASE,
- lower_32_bits(cpu_addr));
- dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_BASE,
- upper_32_bits(cpu_addr));
+ dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_LOWER_BASE,
+ lower_32_bits(parent_bus_addr));
+ dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_UPPER_BASE,
+ upper_32_bits(parent_bus_addr));
- dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LIMIT,
+ dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_LIMIT,
lower_32_bits(limit_addr));
if (dw_pcie_ver_is_ge(pci, 460A))
- dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_LIMIT,
+ dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_UPPER_LIMIT,
upper_32_bits(limit_addr));
- dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LOWER_TARGET,
- lower_32_bits(pci_addr));
- dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_TARGET,
- upper_32_bits(pci_addr));
+ dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_LOWER_TARGET,
+ lower_32_bits(atu->pci_addr));
+ dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_UPPER_TARGET,
+ upper_32_bits(atu->pci_addr));
- val = type | PCIE_ATU_FUNC_NUM(func_no);
- if (upper_32_bits(limit_addr) > upper_32_bits(cpu_addr) &&
+ val = atu->type | atu->routing | PCIE_ATU_FUNC_NUM(atu->func_no);
+ if (upper_32_bits(limit_addr) > upper_32_bits(parent_bus_addr) &&
dw_pcie_ver_is_ge(pci, 460A))
val |= PCIE_ATU_INCREASE_REGION_SIZE;
if (dw_pcie_ver_is(pci, 490A))
val = dw_pcie_enable_ecrc(val);
- dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_REGION_CTRL1, val);
+ dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_REGION_CTRL1, val);
- dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_REGION_CTRL2, PCIE_ATU_ENABLE);
+ val = PCIE_ATU_ENABLE;
+ if (atu->type == PCIE_ATU_TYPE_MSG) {
+ /* The data-less messages only for now */
+ val |= PCIE_ATU_INHIBIT_PAYLOAD | atu->code;
+ }
+ dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_REGION_CTRL2, val);
/*
* Make sure ATU enable takes effect before any subsequent config
* and I/O accesses.
*/
for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
- val = dw_pcie_readl_atu_ob(pci, index, PCIE_ATU_REGION_CTRL2);
+ val = dw_pcie_readl_atu_ob(pci, atu->index, PCIE_ATU_REGION_CTRL2);
if (val & PCIE_ATU_ENABLE)
return 0;
@@ -526,21 +577,6 @@ static int __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, u8 func_no,
return -ETIMEDOUT;
}
-int dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
- u64 cpu_addr, u64 pci_addr, u64 size)
-{
- return __dw_pcie_prog_outbound_atu(pci, 0, index, type,
- cpu_addr, pci_addr, size);
-}
-
-int dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
- int type, u64 cpu_addr, u64 pci_addr,
- u64 size)
-{
- return __dw_pcie_prog_outbound_atu(pci, func_no, index, type,
- cpu_addr, pci_addr, size);
-}
-
static inline u32 dw_pcie_readl_atu_ib(struct dw_pcie *pci, u32 index, u32 reg)
{
return dw_pcie_readl_atu(pci, PCIE_ATU_REGION_DIR_IB, index, reg);
@@ -553,13 +589,13 @@ static inline void dw_pcie_writel_atu_ib(struct dw_pcie *pci, u32 index, u32 reg
}
int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,
- u64 cpu_addr, u64 pci_addr, u64 size)
+ u64 parent_bus_addr, u64 pci_addr, u64 size)
{
u64 limit_addr = pci_addr + size - 1;
u32 retries, val;
if ((limit_addr & ~pci->region_limit) != (pci_addr & ~pci->region_limit) ||
- !IS_ALIGNED(cpu_addr, pci->region_align) ||
+ !IS_ALIGNED(parent_bus_addr, pci->region_align) ||
!IS_ALIGNED(pci_addr, pci->region_align) || !size) {
return -EINVAL;
}
@@ -576,9 +612,9 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,
upper_32_bits(limit_addr));
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_TARGET,
- lower_32_bits(cpu_addr));
+ lower_32_bits(parent_bus_addr));
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_TARGET,
- upper_32_bits(cpu_addr));
+ upper_32_bits(parent_bus_addr));
val = type;
if (upper_32_bits(limit_addr) > upper_32_bits(pci_addr) &&
@@ -605,17 +641,18 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,
}
int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
- int type, u64 cpu_addr, u8 bar)
+ int type, u64 parent_bus_addr, u8 bar, size_t size)
{
u32 retries, val;
- if (!IS_ALIGNED(cpu_addr, pci->region_align))
+ if (!IS_ALIGNED(parent_bus_addr, pci->region_align) ||
+ !IS_ALIGNED(parent_bus_addr, size))
return -EINVAL;
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_TARGET,
- lower_32_bits(cpu_addr));
+ lower_32_bits(parent_bus_addr));
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_TARGET,
- upper_32_bits(cpu_addr));
+ upper_32_bits(parent_bus_addr));
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL1, type |
PCIE_ATU_FUNC_NUM(func_no));
@@ -655,7 +692,7 @@ int dw_pcie_wait_for_link(struct dw_pcie *pci)
if (dw_pcie_link_up(pci))
break;
- usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
+ msleep(LINK_WAIT_SLEEP_MS);
}
if (retries >= LINK_WAIT_MAX_RETRIES) {
@@ -697,16 +734,27 @@ void dw_pcie_upconfig_setup(struct dw_pcie *pci)
}
EXPORT_SYMBOL_GPL(dw_pcie_upconfig_setup);
-static void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen)
+static void dw_pcie_link_set_max_speed(struct dw_pcie *pci)
{
u32 cap, ctrl2, link_speed;
u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
cap = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
+
+ /*
+ * Even if the platform doesn't want to limit the maximum link speed,
+ * just cache the hardware default value so that the vendor drivers can
+ * use it to do any link specific configuration.
+ */
+ if (pci->max_link_speed < 1) {
+ pci->max_link_speed = FIELD_GET(PCI_EXP_LNKCAP_SLS, cap);
+ return;
+ }
+
ctrl2 = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCTL2);
ctrl2 &= ~PCI_EXP_LNKCTL2_TLS;
- switch (pcie_link_speed[link_gen]) {
+ switch (pcie_link_speed[pci->max_link_speed]) {
case PCIE_SPEED_2_5GT:
link_speed = PCI_EXP_LNKCTL2_TLS_2_5GT;
break;
@@ -880,30 +928,40 @@ static struct dw_edma_plat_ops dw_pcie_edma_ops = {
.irq_vector = dw_pcie_edma_irq_vector,
};
-static int dw_pcie_edma_find_chip(struct dw_pcie *pci)
+static void dw_pcie_edma_init_data(struct dw_pcie *pci)
+{
+ pci->edma.dev = pci->dev;
+
+ if (!pci->edma.ops)
+ pci->edma.ops = &dw_pcie_edma_ops;
+
+ pci->edma.flags |= DW_EDMA_CHIP_LOCAL;
+}
+
+static int dw_pcie_edma_find_mf(struct dw_pcie *pci)
{
u32 val;
/*
+ * Bail out finding the mapping format if it is already set by the glue
+ * driver. Also ensure that the edma.reg_base is pointing to a valid
+ * memory region.
+ */
+ if (pci->edma.mf != EDMA_MF_EDMA_LEGACY)
+ return pci->edma.reg_base ? 0 : -ENODEV;
+
+ /*
* Indirect eDMA CSRs access has been completely removed since v5.40a
* thus no space is now reserved for the eDMA channels viewport and
* former DMA CTRL register is no longer fixed to FFs.
- *
- * Note that Renesas R-Car S4-8's PCIe controllers for unknown reason
- * have zeros in the eDMA CTRL register even though the HW-manual
- * explicitly states there must FFs if the unrolled mapping is enabled.
- * For such cases the low-level drivers are supposed to manually
- * activate the unrolled mapping to bypass the auto-detection procedure.
*/
- if (dw_pcie_ver_is_ge(pci, 540A) || dw_pcie_cap_is(pci, EDMA_UNROLL))
+ if (dw_pcie_ver_is_ge(pci, 540A))
val = 0xFFFFFFFF;
else
val = dw_pcie_readl_dbi(pci, PCIE_DMA_VIEWPORT_BASE + PCIE_DMA_CTRL);
if (val == 0xFFFFFFFF && pci->edma.reg_base) {
pci->edma.mf = EDMA_MF_EDMA_UNROLL;
-
- val = dw_pcie_readl_dma(pci, PCIE_DMA_CTRL);
} else if (val != 0xFFFFFFFF) {
pci->edma.mf = EDMA_MF_EDMA_LEGACY;
@@ -912,15 +970,25 @@ static int dw_pcie_edma_find_chip(struct dw_pcie *pci)
return -ENODEV;
}
- pci->edma.dev = pci->dev;
+ return 0;
+}
- if (!pci->edma.ops)
- pci->edma.ops = &dw_pcie_edma_ops;
+static int dw_pcie_edma_find_channels(struct dw_pcie *pci)
+{
+ u32 val;
- pci->edma.flags |= DW_EDMA_CHIP_LOCAL;
+ /*
+ * Autodetect the read/write channels count only for non-HDMA platforms.
+ * HDMA platforms with native CSR mapping doesn't support autodetect,
+ * so the glue drivers should've passed the valid count already. If not,
+ * the below sanity check will catch it.
+ */
+ if (pci->edma.mf != EDMA_MF_HDMA_NATIVE) {
+ val = dw_pcie_readl_dma(pci, PCIE_DMA_CTRL);
- pci->edma.ll_wr_cnt = FIELD_GET(PCIE_DMA_NUM_WR_CHAN, val);
- pci->edma.ll_rd_cnt = FIELD_GET(PCIE_DMA_NUM_RD_CHAN, val);
+ pci->edma.ll_wr_cnt = FIELD_GET(PCIE_DMA_NUM_WR_CHAN, val);
+ pci->edma.ll_rd_cnt = FIELD_GET(PCIE_DMA_NUM_RD_CHAN, val);
+ }
/* Sanity check the channels count if the mapping was incorrect */
if (!pci->edma.ll_wr_cnt || pci->edma.ll_wr_cnt > EDMA_MAX_WR_CH ||
@@ -930,11 +998,24 @@ static int dw_pcie_edma_find_chip(struct dw_pcie *pci)
return 0;
}
+static int dw_pcie_edma_find_chip(struct dw_pcie *pci)
+{
+ int ret;
+
+ dw_pcie_edma_init_data(pci);
+
+ ret = dw_pcie_edma_find_mf(pci);
+ if (ret)
+ return ret;
+
+ return dw_pcie_edma_find_channels(pci);
+}
+
static int dw_pcie_edma_irq_verify(struct dw_pcie *pci)
{
struct platform_device *pdev = to_platform_device(pci->dev);
u16 ch_cnt = pci->edma.ll_wr_cnt + pci->edma.ll_rd_cnt;
- char name[6];
+ char name[15];
int ret;
if (pci->edma.nr_irqs == 1)
@@ -1035,8 +1116,7 @@ void dw_pcie_setup(struct dw_pcie *pci)
{
u32 val;
- if (pci->link_gen > 0)
- dw_pcie_link_set_max_speed(pci, pci->link_gen);
+ dw_pcie_link_set_max_speed(pci);
/* Configure Gen1 N_FTS */
if (pci->n_fts[0]) {
@@ -1069,3 +1149,63 @@ void dw_pcie_setup(struct dw_pcie *pci)
dw_pcie_link_set_max_link_width(pci, pci->num_lanes);
}
+
+resource_size_t dw_pcie_parent_bus_offset(struct dw_pcie *pci,
+ const char *reg_name,
+ resource_size_t cpu_phys_addr)
+{
+ struct device *dev = pci->dev;
+ struct device_node *np = dev->of_node;
+ int index;
+ u64 reg_addr, fixup_addr;
+ u64 (*fixup)(struct dw_pcie *pcie, u64 cpu_addr);
+
+ /* Look up reg_name address on parent bus */
+ index = of_property_match_string(np, "reg-names", reg_name);
+
+ if (index < 0) {
+ dev_err(dev, "No %s in devicetree \"reg\" property\n", reg_name);
+ return 0;
+ }
+
+ of_property_read_reg(np, index, &reg_addr, NULL);
+
+ fixup = pci->ops ? pci->ops->cpu_addr_fixup : NULL;
+ if (fixup) {
+ fixup_addr = fixup(pci, cpu_phys_addr);
+ if (reg_addr == fixup_addr) {
+ dev_info(dev, "%s reg[%d] %#010llx == %#010llx == fixup(cpu %#010llx); %ps is redundant with this devicetree\n",
+ reg_name, index, reg_addr, fixup_addr,
+ (unsigned long long) cpu_phys_addr, fixup);
+ } else {
+ dev_warn(dev, "%s reg[%d] %#010llx != %#010llx == fixup(cpu %#010llx); devicetree is broken\n",
+ reg_name, index, reg_addr, fixup_addr,
+ (unsigned long long) cpu_phys_addr);
+ reg_addr = fixup_addr;
+ }
+
+ return cpu_phys_addr - reg_addr;
+ }
+
+ if (pci->use_parent_dt_ranges) {
+
+ /*
+ * This platform once had a fixup, presumably because it
+ * translates between CPU and PCI controller addresses.
+ * Log a note if devicetree didn't describe a translation.
+ */
+ if (reg_addr == cpu_phys_addr)
+ dev_info(dev, "%s reg[%d] %#010llx == cpu %#010llx\n; no fixup was ever needed for this devicetree\n",
+ reg_name, index, reg_addr,
+ (unsigned long long) cpu_phys_addr);
+ } else {
+ if (reg_addr != cpu_phys_addr) {
+ dev_warn(dev, "%s reg[%d] %#010llx != cpu %#010llx; no fixup and devicetree \"ranges\" is broken, assuming no translation\n",
+ reg_name, index, reg_addr,
+ (unsigned long long) cpu_phys_addr);
+ return 0;
+ }
+ }
+
+ return cpu_phys_addr - reg_addr;
+}
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index 26dae4837462..56aafdbcdaca 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -51,9 +51,8 @@
/* DWC PCIe controller capabilities */
#define DW_PCIE_CAP_REQ_RES 0
-#define DW_PCIE_CAP_EDMA_UNROLL 1
-#define DW_PCIE_CAP_IATU_UNROLL 2
-#define DW_PCIE_CAP_CDM_CHECK 3
+#define DW_PCIE_CAP_IATU_UNROLL 1
+#define DW_PCIE_CAP_CDM_CHECK 2
#define dw_pcie_cap_is(_pci, _cap) \
test_bit(DW_PCIE_CAP_ ## _cap, &(_pci)->caps)
@@ -63,14 +62,16 @@
/* Parameters for the waiting for link up routine */
#define LINK_WAIT_MAX_RETRIES 10
-#define LINK_WAIT_USLEEP_MIN 90000
-#define LINK_WAIT_USLEEP_MAX 100000
+#define LINK_WAIT_SLEEP_MS 90
/* Parameters for the waiting for iATU enabled routine */
#define LINK_WAIT_MAX_IATU_RETRIES 5
#define LINK_WAIT_IATU 9
/* Synopsys-specific PCIe configuration registers */
+#define PCIE_PORT_FORCE 0x708
+#define PORT_FORCE_DO_DESKEW_FOR_SRIS BIT(23)
+
#define PCIE_PORT_AFR 0x70C
#define PORT_AFR_N_FTS_MASK GENMASK(15, 8)
#define PORT_AFR_N_FTS(n) FIELD_PREP(PORT_AFR_N_FTS_MASK, n)
@@ -92,6 +93,9 @@
#define PORT_LINK_MODE_4_LANES PORT_LINK_MODE(0x7)
#define PORT_LINK_MODE_8_LANES PORT_LINK_MODE(0xf)
+#define PCIE_PORT_LANE_SKEW 0x714
+#define PORT_LANE_SKEW_INSERT_MASK GENMASK(23, 0)
+
#define PCIE_PORT_DEBUG0 0x728
#define PORT_LOGIC_LTSSM_STATE_MASK 0x1f
#define PORT_LOGIC_LTSSM_STATE_L0 0x11
@@ -121,6 +125,19 @@
#define GEN3_RELATED_OFF_GEN3_EQ_DISABLE BIT(16)
#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT 24
#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK GENMASK(25, 24)
+#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_16_0GT 0x1
+
+#define GEN3_EQ_CONTROL_OFF 0x8A8
+#define GEN3_EQ_CONTROL_OFF_FB_MODE GENMASK(3, 0)
+#define GEN3_EQ_CONTROL_OFF_PHASE23_EXIT_MODE BIT(4)
+#define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC GENMASK(23, 8)
+#define GEN3_EQ_CONTROL_OFF_FOM_INC_INITIAL_EVAL BIT(24)
+
+#define GEN3_EQ_FB_MODE_DIR_CHANGE_OFF 0x8AC
+#define GEN3_EQ_FMDC_T_MIN_PHASE23 GENMASK(4, 0)
+#define GEN3_EQ_FMDC_N_EVALS GENMASK(9, 5)
+#define GEN3_EQ_FMDC_MAX_PRE_CUSROR_DELTA GENMASK(13, 10)
+#define GEN3_EQ_FMDC_MAX_POST_CUSROR_DELTA GENMASK(17, 14)
#define PCIE_PORT_MULTI_LANE_CTRL 0x8C0
#define PORT_MLTI_UPCFG_SUPPORT BIT(7)
@@ -148,11 +165,13 @@
#define PCIE_ATU_TYPE_IO 0x2
#define PCIE_ATU_TYPE_CFG0 0x4
#define PCIE_ATU_TYPE_CFG1 0x5
+#define PCIE_ATU_TYPE_MSG 0x10
#define PCIE_ATU_TD BIT(8)
#define PCIE_ATU_FUNC_NUM(pf) ((pf) << 20)
#define PCIE_ATU_REGION_CTRL2 0x004
#define PCIE_ATU_ENABLE BIT(31)
#define PCIE_ATU_BAR_MODE_ENABLE BIT(30)
+#define PCIE_ATU_INHIBIT_PAYLOAD BIT(22)
#define PCIE_ATU_FUNC_NUM_MATCH_EN BIT(19)
#define PCIE_ATU_LOWER_BASE 0x008
#define PCIE_ATU_UPPER_BASE 0x00C
@@ -192,6 +211,24 @@
#define PCIE_PL_CHK_REG_ERR_ADDR 0xB28
/*
+ * 16.0 GT/s (Gen 4) lane margining register definitions
+ */
+#define GEN4_LANE_MARGINING_1_OFF 0xB80
+#define MARGINING_MAX_VOLTAGE_OFFSET GENMASK(29, 24)
+#define MARGINING_NUM_VOLTAGE_STEPS GENMASK(22, 16)
+#define MARGINING_MAX_TIMING_OFFSET GENMASK(13, 8)
+#define MARGINING_NUM_TIMING_STEPS GENMASK(5, 0)
+
+#define GEN4_LANE_MARGINING_2_OFF 0xB84
+#define MARGINING_IND_ERROR_SAMPLER BIT(28)
+#define MARGINING_SAMPLE_REPORTING_METHOD BIT(27)
+#define MARGINING_IND_LEFT_RIGHT_TIMING BIT(26)
+#define MARGINING_IND_UP_DOWN_VOLTAGE BIT(25)
+#define MARGINING_VOLTAGE_SUPPORTED BIT(24)
+#define MARGINING_MAXLANES GENMASK(20, 16)
+#define MARGINING_SAMPLE_RATE_TIMING GENMASK(13, 8)
+#define MARGINING_SAMPLE_RATE_VOLTAGE GENMASK(5, 0)
+/*
* iATU Unroll-specific register definitions
* From 4.80 core version the address translation will be made by unroll
*/
@@ -293,12 +330,55 @@ enum dw_pcie_ltssm {
/* Need to align with PCIE_PORT_DEBUG0 bits 0:5 */
DW_PCIE_LTSSM_DETECT_QUIET = 0x0,
DW_PCIE_LTSSM_DETECT_ACT = 0x1,
+ DW_PCIE_LTSSM_POLL_ACTIVE = 0x2,
+ DW_PCIE_LTSSM_POLL_COMPLIANCE = 0x3,
+ DW_PCIE_LTSSM_POLL_CONFIG = 0x4,
+ DW_PCIE_LTSSM_PRE_DETECT_QUIET = 0x5,
+ DW_PCIE_LTSSM_DETECT_WAIT = 0x6,
+ DW_PCIE_LTSSM_CFG_LINKWD_START = 0x7,
+ DW_PCIE_LTSSM_CFG_LINKWD_ACEPT = 0x8,
+ DW_PCIE_LTSSM_CFG_LANENUM_WAI = 0x9,
+ DW_PCIE_LTSSM_CFG_LANENUM_ACEPT = 0xa,
+ DW_PCIE_LTSSM_CFG_COMPLETE = 0xb,
+ DW_PCIE_LTSSM_CFG_IDLE = 0xc,
+ DW_PCIE_LTSSM_RCVRY_LOCK = 0xd,
+ DW_PCIE_LTSSM_RCVRY_SPEED = 0xe,
+ DW_PCIE_LTSSM_RCVRY_RCVRCFG = 0xf,
+ DW_PCIE_LTSSM_RCVRY_IDLE = 0x10,
DW_PCIE_LTSSM_L0 = 0x11,
+ DW_PCIE_LTSSM_L0S = 0x12,
+ DW_PCIE_LTSSM_L123_SEND_EIDLE = 0x13,
+ DW_PCIE_LTSSM_L1_IDLE = 0x14,
DW_PCIE_LTSSM_L2_IDLE = 0x15,
+ DW_PCIE_LTSSM_L2_WAKE = 0x16,
+ DW_PCIE_LTSSM_DISABLED_ENTRY = 0x17,
+ DW_PCIE_LTSSM_DISABLED_IDLE = 0x18,
+ DW_PCIE_LTSSM_DISABLED = 0x19,
+ DW_PCIE_LTSSM_LPBK_ENTRY = 0x1a,
+ DW_PCIE_LTSSM_LPBK_ACTIVE = 0x1b,
+ DW_PCIE_LTSSM_LPBK_EXIT = 0x1c,
+ DW_PCIE_LTSSM_LPBK_EXIT_TIMEOUT = 0x1d,
+ DW_PCIE_LTSSM_HOT_RESET_ENTRY = 0x1e,
+ DW_PCIE_LTSSM_HOT_RESET = 0x1f,
+ DW_PCIE_LTSSM_RCVRY_EQ0 = 0x20,
+ DW_PCIE_LTSSM_RCVRY_EQ1 = 0x21,
+ DW_PCIE_LTSSM_RCVRY_EQ2 = 0x22,
+ DW_PCIE_LTSSM_RCVRY_EQ3 = 0x23,
DW_PCIE_LTSSM_UNKNOWN = 0xFFFFFFFF,
};
+struct dw_pcie_ob_atu_cfg {
+ int index;
+ int type;
+ u8 func_no;
+ u8 code;
+ u8 routing;
+ u64 parent_bus_addr;
+ u64 pci_addr;
+ u64 size;
+};
+
struct dw_pcie_host_ops {
int (*init)(struct dw_pcie_rp *pp);
void (*deinit)(struct dw_pcie_rp *pp);
@@ -328,12 +408,15 @@ struct dw_pcie_rp {
struct pci_host_bridge *bridge;
raw_spinlock_t lock;
DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS);
+ bool use_atu_msg;
+ int msg_atu_index;
+ struct resource *msg_res;
+ bool use_linkup_irq;
};
struct dw_pcie_ep_ops {
void (*pre_init)(struct dw_pcie_ep *ep);
void (*init)(struct dw_pcie_ep *ep);
- void (*deinit)(struct dw_pcie_ep *ep);
int (*raise_irq)(struct dw_pcie_ep *ep, u8 func_no,
unsigned int type, u16 interrupt_num);
const struct pci_epc_features* (*get_features)(struct dw_pcie_ep *ep);
@@ -385,12 +468,20 @@ struct dw_pcie_ops {
void (*stop_link)(struct dw_pcie *pcie);
};
+struct debugfs_info {
+ struct dentry *debug_dir;
+ void *rasdes_info;
+};
+
struct dw_pcie {
struct device *dev;
void __iomem *dbi_base;
+ resource_size_t dbi_phys_addr;
void __iomem *dbi_base2;
void __iomem *atu_base;
+ resource_size_t atu_phys_addr;
size_t atu_size;
+ resource_size_t parent_bus_offset;
u32 num_ib_windows;
u32 num_ob_windows;
u32 region_align;
@@ -402,7 +493,7 @@ struct dw_pcie {
u32 type;
unsigned long caps;
int num_lanes;
- int link_gen;
+ int max_link_speed;
u8 n_fts[2];
struct dw_edma_chip edma;
struct clk_bulk_data app_clks[DW_PCIE_NUM_APP_CLKS];
@@ -411,6 +502,20 @@ struct dw_pcie {
struct reset_control_bulk_data core_rsts[DW_PCIE_NUM_CORE_RSTS];
struct gpio_desc *pe_rst;
bool suspended;
+ struct debugfs_info *debugfs;
+
+ /*
+ * If iATU input addresses are offset from CPU physical addresses,
+ * we previously required .cpu_addr_fixup() to convert them. We
+ * now rely on the devicetree instead. If .cpu_addr_fixup()
+ * exists, we compare its results with devicetree.
+ *
+ * If .cpu_addr_fixup() does not exist, we assume the offset is
+ * zero and warn if devicetree claims otherwise. If we know all
+ * devicetrees correctly describe the offset, set
+ * use_parent_dt_ranges to true to avoid this warning.
+ */
+ bool use_parent_dt_ranges;
};
#define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp)
@@ -424,6 +529,7 @@ void dw_pcie_version_detect(struct dw_pcie *pci);
u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap);
u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap);
+u16 dw_pcie_find_rasdes_capability(struct dw_pcie *pci);
int dw_pcie_read(void __iomem *addr, int size, u32 *val);
int dw_pcie_write(void __iomem *addr, int size, u32 val);
@@ -434,22 +540,21 @@ void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val);
int dw_pcie_link_up(struct dw_pcie *pci);
void dw_pcie_upconfig_setup(struct dw_pcie *pci);
int dw_pcie_wait_for_link(struct dw_pcie *pci);
-int dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
- u64 cpu_addr, u64 pci_addr, u64 size);
-int dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
- int type, u64 cpu_addr, u64 pci_addr, u64 size);
+int dw_pcie_prog_outbound_atu(struct dw_pcie *pci,
+ const struct dw_pcie_ob_atu_cfg *atu);
int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,
- u64 cpu_addr, u64 pci_addr, u64 size);
+ u64 parent_bus_addr, u64 pci_addr, u64 size);
int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
- int type, u64 cpu_addr, u8 bar);
+ int type, u64 parent_bus_addr,
+ u8 bar, size_t size);
void dw_pcie_disable_atu(struct dw_pcie *pci, u32 dir, int index);
void dw_pcie_setup(struct dw_pcie *pci);
void dw_pcie_iatu_detect(struct dw_pcie *pci);
int dw_pcie_edma_detect(struct dw_pcie *pci);
void dw_pcie_edma_remove(struct dw_pcie *pci);
-
-int dw_pcie_suspend_noirq(struct dw_pcie *pci);
-int dw_pcie_resume_noirq(struct dw_pcie *pci);
+resource_size_t dw_pcie_parent_bus_offset(struct dw_pcie *pci,
+ const char *reg_name,
+ resource_size_t cpu_phy_addr);
static inline void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val)
{
@@ -628,6 +733,8 @@ static inline enum dw_pcie_ltssm dw_pcie_get_ltssm(struct dw_pcie *pci)
}
#ifdef CONFIG_PCIE_DW_HOST
+int dw_pcie_suspend_noirq(struct dw_pcie *pci);
+int dw_pcie_resume_noirq(struct dw_pcie *pci);
irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp);
int dw_pcie_setup_rc(struct dw_pcie_rp *pp);
int dw_pcie_host_init(struct dw_pcie_rp *pp);
@@ -636,6 +743,16 @@ int dw_pcie_allocate_domains(struct dw_pcie_rp *pp);
void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn,
int where);
#else
+static inline int dw_pcie_suspend_noirq(struct dw_pcie *pci)
+{
+ return 0;
+}
+
+static inline int dw_pcie_resume_noirq(struct dw_pcie *pci)
+{
+ return 0;
+}
+
static inline irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp)
{
return IRQ_NONE;
@@ -669,10 +786,11 @@ static inline void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus,
#ifdef CONFIG_PCIE_DW_EP
void dw_pcie_ep_linkup(struct dw_pcie_ep *ep);
+void dw_pcie_ep_linkdown(struct dw_pcie_ep *ep);
int dw_pcie_ep_init(struct dw_pcie_ep *ep);
-int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep);
-void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep);
-void dw_pcie_ep_exit(struct dw_pcie_ep *ep);
+int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep);
+void dw_pcie_ep_deinit(struct dw_pcie_ep *ep);
+void dw_pcie_ep_cleanup(struct dw_pcie_ep *ep);
int dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep, u8 func_no);
int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
u8 interrupt_num);
@@ -681,6 +799,7 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep, u8 func_no,
u16 interrupt_num);
void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar);
+int dw_pcie_ep_hide_ext_capability(struct dw_pcie *pci, u8 prev_cap, u8 cap);
struct dw_pcie_ep_func *
dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no);
#else
@@ -688,21 +807,25 @@ static inline void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
{
}
+static inline void dw_pcie_ep_linkdown(struct dw_pcie_ep *ep)
+{
+}
+
static inline int dw_pcie_ep_init(struct dw_pcie_ep *ep)
{
return 0;
}
-static inline int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
+static inline int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep)
{
return 0;
}
-static inline void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep)
+static inline void dw_pcie_ep_deinit(struct dw_pcie_ep *ep)
{
}
-static inline void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
+static inline void dw_pcie_ep_cleanup(struct dw_pcie_ep *ep)
{
}
@@ -734,10 +857,29 @@ static inline void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
{
}
+static inline int dw_pcie_ep_hide_ext_capability(struct dw_pcie *pci,
+ u8 prev_cap, u8 cap)
+{
+ return 0;
+}
+
static inline struct dw_pcie_ep_func *
dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no)
{
return NULL;
}
#endif
+
+#ifdef CONFIG_PCIE_DW_DEBUGFS
+void dwc_pcie_debugfs_init(struct dw_pcie *pci);
+void dwc_pcie_debugfs_deinit(struct dw_pcie *pci);
+#else
+static inline void dwc_pcie_debugfs_init(struct dw_pcie *pci)
+{
+}
+static inline void dwc_pcie_debugfs_deinit(struct dw_pcie *pci)
+{
+}
+#endif
+
#endif /* _PCIE_DESIGNWARE_H */
diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
index d6842141d384..678d510a261d 100644
--- a/drivers/pci/controller/dwc/pcie-dw-rockchip.c
+++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
@@ -34,10 +34,16 @@
#define to_rockchip_pcie(x) dev_get_drvdata((x)->dev)
#define PCIE_CLIENT_RC_MODE HIWORD_UPDATE_BIT(0x40)
+#define PCIE_CLIENT_EP_MODE HIWORD_UPDATE(0xf0, 0x0)
#define PCIE_CLIENT_ENABLE_LTSSM HIWORD_UPDATE_BIT(0xc)
+#define PCIE_CLIENT_DISABLE_LTSSM HIWORD_UPDATE(0x0c, 0x8)
+#define PCIE_CLIENT_INTR_STATUS_MISC 0x10
+#define PCIE_CLIENT_INTR_MASK_MISC 0x24
#define PCIE_SMLH_LINKUP BIT(16)
#define PCIE_RDLH_LINKUP BIT(17)
#define PCIE_LINKUP (PCIE_SMLH_LINKUP | PCIE_RDLH_LINKUP)
+#define PCIE_RDLH_LINK_UP_CHGED BIT(1)
+#define PCIE_LINK_REQ_RST_NOT_INT BIT(2)
#define PCIE_L0S_ENTRY 0x11
#define PCIE_CLIENT_GENERAL_CONTROL 0x0
#define PCIE_CLIENT_INTR_STATUS_LEGACY 0x8
@@ -49,25 +55,30 @@
#define PCIE_LTSSM_STATUS_MASK GENMASK(5, 0)
struct rockchip_pcie {
- struct dw_pcie pci;
- void __iomem *apb_base;
- struct phy *phy;
- struct clk_bulk_data *clks;
- unsigned int clk_cnt;
- struct reset_control *rst;
- struct gpio_desc *rst_gpio;
- struct regulator *vpcie3v3;
- struct irq_domain *irq_domain;
+ struct dw_pcie pci;
+ void __iomem *apb_base;
+ struct phy *phy;
+ struct clk_bulk_data *clks;
+ unsigned int clk_cnt;
+ struct reset_control *rst;
+ struct gpio_desc *rst_gpio;
+ struct regulator *vpcie3v3;
+ struct irq_domain *irq_domain;
+ const struct rockchip_pcie_of_data *data;
};
-static int rockchip_pcie_readl_apb(struct rockchip_pcie *rockchip,
- u32 reg)
+struct rockchip_pcie_of_data {
+ enum dw_pcie_device_mode mode;
+ const struct pci_epc_features *epc_features;
+};
+
+static int rockchip_pcie_readl_apb(struct rockchip_pcie *rockchip, u32 reg)
{
return readl_relaxed(rockchip->apb_base + reg);
}
-static void rockchip_pcie_writel_apb(struct rockchip_pcie *rockchip,
- u32 val, u32 reg)
+static void rockchip_pcie_writel_apb(struct rockchip_pcie *rockchip, u32 val,
+ u32 reg)
{
writel_relaxed(val, rockchip->apb_base + reg);
}
@@ -133,8 +144,8 @@ static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip)
return -EINVAL;
}
- rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX,
- &intx_domain_ops, rockchip);
+ rockchip->irq_domain = irq_domain_create_linear(of_fwnode_handle(intc), PCI_NUM_INTX,
+ &intx_domain_ops, rockchip);
of_node_put(intc);
if (!rockchip->irq_domain) {
dev_err(dev, "failed to get a INTx IRQ domain\n");
@@ -144,16 +155,27 @@ static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip)
return 0;
}
+static u32 rockchip_pcie_get_ltssm(struct rockchip_pcie *rockchip)
+{
+ return rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_LTSSM_STATUS);
+}
+
static void rockchip_pcie_enable_ltssm(struct rockchip_pcie *rockchip)
{
rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_ENABLE_LTSSM,
PCIE_CLIENT_GENERAL_CONTROL);
}
+static void rockchip_pcie_disable_ltssm(struct rockchip_pcie *rockchip)
+{
+ rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_DISABLE_LTSSM,
+ PCIE_CLIENT_GENERAL_CONTROL);
+}
+
static int rockchip_pcie_link_up(struct dw_pcie *pci)
{
struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
- u32 val = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_LTSSM_STATUS);
+ u32 val = rockchip_pcie_get_ltssm(rockchip);
if ((val & PCIE_LINKUP) == PCIE_LINKUP &&
(val & PCIE_LTSSM_STATUS_MASK) == PCIE_L0S_ENTRY)
@@ -186,12 +208,18 @@ static int rockchip_pcie_start_link(struct dw_pcie *pci)
return 0;
}
+static void rockchip_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
+
+ rockchip_pcie_disable_ltssm(rockchip);
+}
+
static int rockchip_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
struct device *dev = rockchip->pci.dev;
- u32 val = HIWORD_UPDATE_BIT(PCIE_LTSSM_ENABLE_ENHANCE);
int irq, ret;
irq = of_irq_get_byname(dev->of_node, "legacy");
@@ -205,12 +233,6 @@ static int rockchip_pcie_host_init(struct dw_pcie_rp *pp)
irq_set_chained_handler_and_data(irq, rockchip_pcie_intx_handler,
rockchip);
- /* LTSSM enable control mode */
- rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL);
-
- rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_RC_MODE,
- PCIE_CLIENT_GENERAL_CONTROL);
-
return 0;
}
@@ -218,6 +240,113 @@ static const struct dw_pcie_host_ops rockchip_pcie_host_ops = {
.init = rockchip_pcie_host_init,
};
+/*
+ * ATS does not work on RK3588 when running in EP mode.
+ *
+ * After the host has enabled ATS on the EP side, it will send an IOTLB
+ * invalidation request to the EP side. However, the RK3588 will never send
+ * a completion back and eventually the host will print an IOTLB_INV_TIMEOUT
+ * error, and the EP will not be operational. If we hide the ATS capability,
+ * things work as expected.
+ */
+static void rockchip_pcie_ep_hide_broken_ats_cap_rk3588(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct device *dev = pci->dev;
+
+ /* Only hide the ATS capability for RK3588 running in EP mode. */
+ if (!of_device_is_compatible(dev->of_node, "rockchip,rk3588-pcie-ep"))
+ return;
+
+ if (dw_pcie_ep_hide_ext_capability(pci, PCI_EXT_CAP_ID_SECPCI,
+ PCI_EXT_CAP_ID_ATS))
+ dev_err(dev, "failed to hide ATS capability\n");
+}
+
+static void rockchip_pcie_ep_pre_init(struct dw_pcie_ep *ep)
+{
+ rockchip_pcie_ep_hide_broken_ats_cap_rk3588(ep);
+}
+
+static void rockchip_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar;
+
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
+ dw_pcie_ep_reset_bar(pci, bar);
+};
+
+static int rockchip_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ unsigned int type, u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ switch (type) {
+ case PCI_IRQ_INTX:
+ return dw_pcie_ep_raise_intx_irq(ep, func_no);
+ case PCI_IRQ_MSI:
+ return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+ case PCI_IRQ_MSIX:
+ return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
+ default:
+ dev_err(pci->dev, "UNKNOWN IRQ type\n");
+ }
+
+ return 0;
+}
+
+static const struct pci_epc_features rockchip_pcie_epc_features_rk3568 = {
+ .linkup_notifier = true,
+ .msi_capable = true,
+ .msix_capable = true,
+ .intx_capable = false,
+ .align = SZ_64K,
+ .bar[BAR_0] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_1] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_2] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_3] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_4] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_5] = { .type = BAR_RESIZABLE, },
+};
+
+/*
+ * BAR4 on rk3588 exposes the ATU Port Logic Structure to the host regardless of
+ * iATU settings for BAR4. This means that BAR4 cannot be used by an EPF driver,
+ * so mark it as RESERVED. (rockchip_pcie_ep_init() will disable all BARs by
+ * default.) If the host could write to BAR4, the iATU settings (for all other
+ * BARs) would be overwritten, resulting in (all other BARs) no longer working.
+ */
+static const struct pci_epc_features rockchip_pcie_epc_features_rk3588 = {
+ .linkup_notifier = true,
+ .msi_capable = true,
+ .msix_capable = true,
+ .intx_capable = false,
+ .align = SZ_64K,
+ .bar[BAR_0] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_1] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_2] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_3] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_4] = { .type = BAR_RESERVED, },
+ .bar[BAR_5] = { .type = BAR_RESIZABLE, },
+};
+
+static const struct pci_epc_features *
+rockchip_pcie_get_features(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
+
+ return rockchip->data->epc_features;
+}
+
+static const struct dw_pcie_ep_ops rockchip_pcie_ep_ops = {
+ .init = rockchip_pcie_ep_init,
+ .pre_init = rockchip_pcie_ep_pre_init,
+ .raise_irq = rockchip_pcie_raise_irq,
+ .get_features = rockchip_pcie_get_features,
+};
+
static int rockchip_pcie_clk_init(struct rockchip_pcie *rockchip)
{
struct device *dev = rockchip->pci.dev;
@@ -225,11 +354,15 @@ static int rockchip_pcie_clk_init(struct rockchip_pcie *rockchip)
ret = devm_clk_bulk_get_all(dev, &rockchip->clks);
if (ret < 0)
- return ret;
+ return dev_err_probe(dev, ret, "failed to get clocks\n");
rockchip->clk_cnt = ret;
- return clk_bulk_prepare_enable(rockchip->clk_cnt, rockchip->clks);
+ ret = clk_bulk_prepare_enable(rockchip->clk_cnt, rockchip->clks);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to enable clocks\n");
+
+ return 0;
}
static int rockchip_pcie_resource_get(struct platform_device *pdev,
@@ -237,12 +370,14 @@ static int rockchip_pcie_resource_get(struct platform_device *pdev,
{
rockchip->apb_base = devm_platform_ioremap_resource_byname(pdev, "apb");
if (IS_ERR(rockchip->apb_base))
- return PTR_ERR(rockchip->apb_base);
+ return dev_err_probe(&pdev->dev, PTR_ERR(rockchip->apb_base),
+ "failed to map apb registers\n");
rockchip->rst_gpio = devm_gpiod_get_optional(&pdev->dev, "reset",
- GPIOD_OUT_HIGH);
+ GPIOD_OUT_LOW);
if (IS_ERR(rockchip->rst_gpio))
- return PTR_ERR(rockchip->rst_gpio);
+ return dev_err_probe(&pdev->dev, PTR_ERR(rockchip->rst_gpio),
+ "failed to get reset gpio\n");
rockchip->rst = devm_reset_control_array_get_exclusive(&pdev->dev);
if (IS_ERR(rockchip->rst))
@@ -282,15 +417,180 @@ static void rockchip_pcie_phy_deinit(struct rockchip_pcie *rockchip)
static const struct dw_pcie_ops dw_pcie_ops = {
.link_up = rockchip_pcie_link_up,
.start_link = rockchip_pcie_start_link,
+ .stop_link = rockchip_pcie_stop_link,
};
+static irqreturn_t rockchip_pcie_rc_sys_irq_thread(int irq, void *arg)
+{
+ struct rockchip_pcie *rockchip = arg;
+ struct dw_pcie *pci = &rockchip->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct device *dev = pci->dev;
+ u32 reg, val;
+
+ reg = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_INTR_STATUS_MISC);
+ rockchip_pcie_writel_apb(rockchip, reg, PCIE_CLIENT_INTR_STATUS_MISC);
+
+ dev_dbg(dev, "PCIE_CLIENT_INTR_STATUS_MISC: %#x\n", reg);
+ dev_dbg(dev, "LTSSM_STATUS: %#x\n", rockchip_pcie_get_ltssm(rockchip));
+
+ if (reg & PCIE_RDLH_LINK_UP_CHGED) {
+ val = rockchip_pcie_get_ltssm(rockchip);
+ if ((val & PCIE_LINKUP) == PCIE_LINKUP) {
+ dev_dbg(dev, "Received Link up event. Starting enumeration!\n");
+ /* Rescan the bus to enumerate endpoint devices */
+ pci_lock_rescan_remove();
+ pci_rescan_bus(pp->bridge->bus);
+ pci_unlock_rescan_remove();
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rockchip_pcie_ep_sys_irq_thread(int irq, void *arg)
+{
+ struct rockchip_pcie *rockchip = arg;
+ struct dw_pcie *pci = &rockchip->pci;
+ struct device *dev = pci->dev;
+ u32 reg, val;
+
+ reg = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_INTR_STATUS_MISC);
+ rockchip_pcie_writel_apb(rockchip, reg, PCIE_CLIENT_INTR_STATUS_MISC);
+
+ dev_dbg(dev, "PCIE_CLIENT_INTR_STATUS_MISC: %#x\n", reg);
+ dev_dbg(dev, "LTSSM_STATUS: %#x\n", rockchip_pcie_get_ltssm(rockchip));
+
+ if (reg & PCIE_LINK_REQ_RST_NOT_INT) {
+ dev_dbg(dev, "hot reset or link-down reset\n");
+ dw_pcie_ep_linkdown(&pci->ep);
+ }
+
+ if (reg & PCIE_RDLH_LINK_UP_CHGED) {
+ val = rockchip_pcie_get_ltssm(rockchip);
+ if ((val & PCIE_LINKUP) == PCIE_LINKUP) {
+ dev_dbg(dev, "link up\n");
+ dw_pcie_ep_linkup(&pci->ep);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int rockchip_pcie_configure_rc(struct platform_device *pdev,
+ struct rockchip_pcie *rockchip)
+{
+ struct device *dev = &pdev->dev;
+ struct dw_pcie_rp *pp;
+ int irq, ret;
+ u32 val;
+
+ if (!IS_ENABLED(CONFIG_PCIE_ROCKCHIP_DW_HOST))
+ return -ENODEV;
+
+ irq = platform_get_irq_byname(pdev, "sys");
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ rockchip_pcie_rc_sys_irq_thread,
+ IRQF_ONESHOT, "pcie-sys-rc", rockchip);
+ if (ret) {
+ dev_err(dev, "failed to request PCIe sys IRQ\n");
+ return ret;
+ }
+
+ /* LTSSM enable control mode */
+ val = HIWORD_UPDATE_BIT(PCIE_LTSSM_ENABLE_ENHANCE);
+ rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL);
+
+ rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_RC_MODE,
+ PCIE_CLIENT_GENERAL_CONTROL);
+
+ pp = &rockchip->pci.pp;
+ pp->ops = &rockchip_pcie_host_ops;
+ pp->use_linkup_irq = true;
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(dev, "failed to initialize host\n");
+ return ret;
+ }
+
+ /* unmask DLL up/down indicator */
+ val = HIWORD_UPDATE(PCIE_RDLH_LINK_UP_CHGED, 0);
+ rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_INTR_MASK_MISC);
+
+ return ret;
+}
+
+static int rockchip_pcie_configure_ep(struct platform_device *pdev,
+ struct rockchip_pcie *rockchip)
+{
+ struct device *dev = &pdev->dev;
+ int irq, ret;
+ u32 val;
+
+ if (!IS_ENABLED(CONFIG_PCIE_ROCKCHIP_DW_EP))
+ return -ENODEV;
+
+ irq = platform_get_irq_byname(pdev, "sys");
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ rockchip_pcie_ep_sys_irq_thread,
+ IRQF_ONESHOT, "pcie-sys-ep", rockchip);
+ if (ret) {
+ dev_err(dev, "failed to request PCIe sys IRQ\n");
+ return ret;
+ }
+
+ /* LTSSM enable control mode */
+ val = HIWORD_UPDATE_BIT(PCIE_LTSSM_ENABLE_ENHANCE);
+ rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL);
+
+ rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_EP_MODE,
+ PCIE_CLIENT_GENERAL_CONTROL);
+
+ rockchip->pci.ep.ops = &rockchip_pcie_ep_ops;
+ rockchip->pci.ep.page_size = SZ_64K;
+
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+
+ ret = dw_pcie_ep_init(&rockchip->pci.ep);
+ if (ret) {
+ dev_err(dev, "failed to initialize endpoint\n");
+ return ret;
+ }
+
+ ret = dw_pcie_ep_init_registers(&rockchip->pci.ep);
+ if (ret) {
+ dev_err(dev, "failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(&rockchip->pci.ep);
+ return ret;
+ }
+
+ pci_epc_init_notify(rockchip->pci.ep.epc);
+
+ /* unmask DLL up/down indicator and hot reset/link-down reset */
+ val = HIWORD_UPDATE(PCIE_RDLH_LINK_UP_CHGED | PCIE_LINK_REQ_RST_NOT_INT, 0);
+ rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_INTR_MASK_MISC);
+
+ return ret;
+}
+
static int rockchip_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rockchip_pcie *rockchip;
- struct dw_pcie_rp *pp;
+ const struct rockchip_pcie_of_data *data;
int ret;
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -EINVAL;
+
rockchip = devm_kzalloc(dev, sizeof(*rockchip), GFP_KERNEL);
if (!rockchip)
return -ENOMEM;
@@ -299,9 +599,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
rockchip->pci.dev = dev;
rockchip->pci.ops = &dw_pcie_ops;
-
- pp = &rockchip->pci.pp;
- pp->ops = &rockchip_pcie_host_ops;
+ rockchip->data = data;
ret = rockchip_pcie_resource_get(pdev, rockchip);
if (ret)
@@ -320,10 +618,9 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
rockchip->vpcie3v3 = NULL;
} else {
ret = regulator_enable(rockchip->vpcie3v3);
- if (ret) {
- dev_err(dev, "failed to enable vpcie3v3 regulator\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to enable vpcie3v3 regulator\n");
}
ret = rockchip_pcie_phy_init(rockchip);
@@ -338,10 +635,26 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
if (ret)
goto deinit_phy;
- ret = dw_pcie_host_init(pp);
- if (!ret)
- return 0;
+ switch (data->mode) {
+ case DW_PCIE_RC_TYPE:
+ ret = rockchip_pcie_configure_rc(pdev, rockchip);
+ if (ret)
+ goto deinit_clk;
+ break;
+ case DW_PCIE_EP_TYPE:
+ ret = rockchip_pcie_configure_ep(pdev, rockchip);
+ if (ret)
+ goto deinit_clk;
+ break;
+ default:
+ dev_err(dev, "INVALID device type %d\n", data->mode);
+ ret = -EINVAL;
+ goto deinit_clk;
+ }
+
+ return 0;
+deinit_clk:
clk_bulk_disable_unprepare(rockchip->clk_cnt, rockchip->clks);
deinit_phy:
rockchip_pcie_phy_deinit(rockchip);
@@ -352,8 +665,33 @@ disable_regulator:
return ret;
}
+static const struct rockchip_pcie_of_data rockchip_pcie_rc_of_data_rk3568 = {
+ .mode = DW_PCIE_RC_TYPE,
+};
+
+static const struct rockchip_pcie_of_data rockchip_pcie_ep_of_data_rk3568 = {
+ .mode = DW_PCIE_EP_TYPE,
+ .epc_features = &rockchip_pcie_epc_features_rk3568,
+};
+
+static const struct rockchip_pcie_of_data rockchip_pcie_ep_of_data_rk3588 = {
+ .mode = DW_PCIE_EP_TYPE,
+ .epc_features = &rockchip_pcie_epc_features_rk3588,
+};
+
static const struct of_device_id rockchip_pcie_of_match[] = {
- { .compatible = "rockchip,rk3568-pcie", },
+ {
+ .compatible = "rockchip,rk3568-pcie",
+ .data = &rockchip_pcie_rc_of_data_rk3568,
+ },
+ {
+ .compatible = "rockchip,rk3568-pcie-ep",
+ .data = &rockchip_pcie_ep_of_data_rk3568,
+ },
+ {
+ .compatible = "rockchip,rk3588-pcie-ep",
+ .data = &rockchip_pcie_ep_of_data_rk3588,
+ },
{},
};
diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c
index 7a11c618b9d9..1f2f4c28a949 100644
--- a/drivers/pci/controller/dwc/pcie-histb.c
+++ b/drivers/pci/controller/dwc/pcie-histb.c
@@ -409,16 +409,21 @@ static int histb_pcie_probe(struct platform_device *pdev)
ret = histb_pcie_host_enable(pp);
if (ret) {
dev_err(dev, "failed to enable host\n");
- return ret;
+ goto err_exit_phy;
}
ret = dw_pcie_host_init(pp);
if (ret) {
dev_err(dev, "failed to initialize host\n");
- return ret;
+ goto err_exit_phy;
}
return 0;
+
+err_exit_phy:
+ phy_exit(hipcie->phy);
+
+ return ret;
}
static void histb_pcie_remove(struct platform_device *pdev)
@@ -427,8 +432,7 @@ static void histb_pcie_remove(struct platform_device *pdev)
histb_pcie_host_disable(hipcie);
- if (hipcie->phy)
- phy_exit(hipcie->phy);
+ phy_exit(hipcie->phy);
}
static const struct of_device_id histb_pcie_of_match[] = {
@@ -439,7 +443,7 @@ MODULE_DEVICE_TABLE(of, histb_pcie_of_match);
static struct platform_driver histb_pcie_platform_driver = {
.probe = histb_pcie_probe,
- .remove_new = histb_pcie_remove,
+ .remove = histb_pcie_remove,
.driver = {
.name = "histb-pcie",
.of_match_table = histb_pcie_of_match,
diff --git a/drivers/pci/controller/dwc/pcie-intel-gw.c b/drivers/pci/controller/dwc/pcie-intel-gw.c
index acbe4f6d3291..c21906eced61 100644
--- a/drivers/pci/controller/dwc/pcie-intel-gw.c
+++ b/drivers/pci/controller/dwc/pcie-intel-gw.c
@@ -57,7 +57,6 @@
PCIE_APP_IRN_INTA | PCIE_APP_IRN_INTB | \
PCIE_APP_IRN_INTC | PCIE_APP_IRN_INTD)
-#define BUS_IATU_OFFSET SZ_256M
#define RESET_INTERVAL_MS 100
struct intel_pcie {
@@ -132,7 +131,7 @@ static void intel_pcie_link_setup(struct intel_pcie *pcie)
static void intel_pcie_init_n_fts(struct dw_pcie *pci)
{
- switch (pci->link_gen) {
+ switch (pci->max_link_speed) {
case 3:
pci->n_fts[1] = PORT_AFR_N_FTS_GEN3;
break;
@@ -252,7 +251,7 @@ static int intel_pcie_wait_l2(struct intel_pcie *pcie)
int ret;
struct dw_pcie *pci = &pcie->pci;
- if (pci->link_gen < 3)
+ if (pci->max_link_speed < 3)
return 0;
/* Send PME_TURN_OFF message */
@@ -381,13 +380,7 @@ static int intel_pcie_rc_init(struct dw_pcie_rp *pp)
return intel_pcie_host_setup(pcie);
}
-static u64 intel_pcie_cpu_addr(struct dw_pcie *pcie, u64 cpu_addr)
-{
- return cpu_addr + BUS_IATU_OFFSET;
-}
-
static const struct dw_pcie_ops intel_pcie_ops = {
- .cpu_addr_fixup = intel_pcie_cpu_addr,
};
static const struct dw_pcie_host_ops intel_pcie_dw_ops = {
@@ -409,6 +402,7 @@ static int intel_pcie_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pcie);
pci = &pcie->pci;
pci->dev = dev;
+ pci->use_parent_dt_ranges = true;
pp = &pci->pp;
ret = intel_pcie_get_resources(pdev);
@@ -443,7 +437,7 @@ static const struct of_device_id of_intel_pcie_match[] = {
static struct platform_driver intel_pcie_driver = {
.probe = intel_pcie_probe,
- .remove_new = intel_pcie_remove,
+ .remove = intel_pcie_remove,
.driver = {
.name = "intel-gw-pcie",
.of_match_table = of_intel_pcie_match,
diff --git a/drivers/pci/controller/dwc/pcie-keembay.c b/drivers/pci/controller/dwc/pcie-keembay.c
index 5e8e54f597dd..278205db60a2 100644
--- a/drivers/pci/controller/dwc/pcie-keembay.c
+++ b/drivers/pci/controller/dwc/pcie-keembay.c
@@ -396,6 +396,7 @@ static int keembay_pcie_probe(struct platform_device *pdev)
struct keembay_pcie *pcie;
struct dw_pcie *pci;
enum dw_pcie_device_mode mode;
+ int ret;
data = device_get_match_data(dev);
if (!data)
@@ -430,11 +431,26 @@ static int keembay_pcie_probe(struct platform_device *pdev)
return -ENODEV;
pci->ep.ops = &keembay_pcie_ep_ops;
- return dw_pcie_ep_init(&pci->ep);
+ ret = dw_pcie_ep_init(&pci->ep);
+ if (ret)
+ return ret;
+
+ ret = dw_pcie_ep_init_registers(&pci->ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(&pci->ep);
+ return ret;
+ }
+
+ pci_epc_init_notify(pci->ep.epc);
+
+ break;
default:
dev_err(dev, "Invalid device type %d\n", pcie->mode);
return -ENODEV;
}
+
+ return 0;
}
static const struct keembay_pcie_of_data keembay_pcie_rc_of_data = {
diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c
index d5523f302102..d0e6a3811b00 100644
--- a/drivers/pci/controller/dwc/pcie-kirin.c
+++ b/drivers/pci/controller/dwc/pcie-kirin.c
@@ -12,12 +12,10 @@
#include <linux/compiler.h>
#include <linux/delay.h>
#include <linux/err.h>
-#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/of_pci.h>
#include <linux/phy/phy.h>
#include <linux/pci.h>
@@ -78,16 +76,16 @@ struct kirin_pcie {
void *phy_priv; /* only for PCIE_KIRIN_INTERNAL_PHY */
/* DWC PERST# */
- int gpio_id_dwc_perst;
+ struct gpio_desc *id_dwc_perst_gpio;
/* Per-slot PERST# */
int num_slots;
- int gpio_id_reset[MAX_PCI_SLOTS];
+ struct gpio_desc *id_reset_gpio[MAX_PCI_SLOTS];
const char *reset_names[MAX_PCI_SLOTS];
/* Per-slot clkreq */
int n_gpio_clkreq;
- int gpio_id_clkreq[MAX_PCI_SLOTS];
+ struct gpio_desc *id_clkreq_gpio[MAX_PCI_SLOTS];
const char *clkreq_names[MAX_PCI_SLOTS];
};
@@ -218,10 +216,9 @@ static int hi3660_pcie_phy_start(struct hi3660_pcie_phy *phy)
usleep_range(PIPE_CLK_WAIT_MIN, PIPE_CLK_WAIT_MAX);
reg_val = kirin_apb_phy_readl(phy, PCIE_APB_PHY_STATUS0);
- if (reg_val & PIPE_CLK_STABLE) {
- dev_err(dev, "PIPE clk is not stable\n");
- return -EINVAL;
- }
+ if (reg_val & PIPE_CLK_STABLE)
+ return dev_err_probe(dev, -ETIMEDOUT,
+ "PIPE clk is not stable\n");
return 0;
}
@@ -373,23 +370,27 @@ static int kirin_pcie_get_gpio_enable(struct kirin_pcie *pcie,
if (ret < 0)
return 0;
- if (ret > MAX_PCI_SLOTS) {
- dev_err(dev, "Too many GPIO clock requests!\n");
- return -EINVAL;
- }
+ if (ret > MAX_PCI_SLOTS)
+ return dev_err_probe(dev, -EINVAL,
+ "Too many GPIO clock requests!\n");
pcie->n_gpio_clkreq = ret;
for (i = 0; i < pcie->n_gpio_clkreq; i++) {
- pcie->gpio_id_clkreq[i] = of_get_named_gpio(dev->of_node,
- "hisilicon,clken-gpios", i);
- if (pcie->gpio_id_clkreq[i] < 0)
- return pcie->gpio_id_clkreq[i];
+ pcie->id_clkreq_gpio[i] = devm_gpiod_get_index(dev,
+ "hisilicon,clken", i,
+ GPIOD_OUT_LOW);
+ if (IS_ERR(pcie->id_clkreq_gpio[i]))
+ return dev_err_probe(dev, PTR_ERR(pcie->id_clkreq_gpio[i]),
+ "unable to get a valid clken gpio\n");
pcie->clkreq_names[i] = devm_kasprintf(dev, GFP_KERNEL,
"pcie_clkreq_%d", i);
if (!pcie->clkreq_names[i])
return -ENOMEM;
+
+ gpiod_set_consumer_name(pcie->id_clkreq_gpio[i],
+ pcie->clkreq_names[i]);
}
return 0;
@@ -400,56 +401,55 @@ static int kirin_pcie_parse_port(struct kirin_pcie *pcie,
struct device_node *node)
{
struct device *dev = &pdev->dev;
- struct device_node *parent, *child;
int ret, slot, i;
- for_each_available_child_of_node(node, parent) {
- for_each_available_child_of_node(parent, child) {
+ for_each_available_child_of_node_scoped(node, parent) {
+ for_each_available_child_of_node_scoped(parent, child) {
i = pcie->num_slots;
- pcie->gpio_id_reset[i] = of_get_named_gpio(child,
- "reset-gpios", 0);
- if (pcie->gpio_id_reset[i] < 0)
- continue;
+ pcie->id_reset_gpio[i] = devm_fwnode_gpiod_get_index(dev,
+ of_fwnode_handle(child),
+ "reset", 0, GPIOD_OUT_LOW,
+ NULL);
+ if (IS_ERR(pcie->id_reset_gpio[i])) {
+ if (PTR_ERR(pcie->id_reset_gpio[i]) == -ENOENT)
+ continue;
+ return dev_err_probe(dev, PTR_ERR(pcie->id_reset_gpio[i]),
+ "unable to get a valid reset gpio\n");
+ }
+
+ if (pcie->num_slots + 1 >= MAX_PCI_SLOTS)
+ return dev_err_probe(dev, -EINVAL,
+ "Too many PCI slots!\n");
pcie->num_slots++;
- if (pcie->num_slots > MAX_PCI_SLOTS) {
- dev_err(dev, "Too many PCI slots!\n");
- ret = -EINVAL;
- goto put_node;
- }
ret = of_pci_get_devfn(child);
- if (ret < 0) {
- dev_err(dev, "failed to parse devfn: %d\n", ret);
- goto put_node;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "failed to parse devfn\n");
slot = PCI_SLOT(ret);
pcie->reset_names[i] = devm_kasprintf(dev, GFP_KERNEL,
"pcie_perst_%d",
slot);
- if (!pcie->reset_names[i]) {
- ret = -ENOMEM;
- goto put_node;
- }
+ if (!pcie->reset_names[i])
+ return -ENOMEM;
+
+ gpiod_set_consumer_name(pcie->id_reset_gpio[i],
+ pcie->reset_names[i]);
}
}
return 0;
-
-put_node:
- of_node_put(child);
- of_node_put(parent);
- return ret;
}
static long kirin_pcie_get_resource(struct kirin_pcie *kirin_pcie,
struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *child, *node = dev->of_node;
+ struct device_node *node = dev->of_node;
void __iomem *apb_base;
int ret;
@@ -463,31 +463,24 @@ static long kirin_pcie_get_resource(struct kirin_pcie *kirin_pcie,
return PTR_ERR(kirin_pcie->apb);
/* pcie internal PERST# gpio */
- kirin_pcie->gpio_id_dwc_perst = of_get_named_gpio(dev->of_node,
- "reset-gpios", 0);
- if (kirin_pcie->gpio_id_dwc_perst == -EPROBE_DEFER) {
- return -EPROBE_DEFER;
- } else if (!gpio_is_valid(kirin_pcie->gpio_id_dwc_perst)) {
- dev_err(dev, "unable to get a valid gpio pin\n");
- return -ENODEV;
- }
+ kirin_pcie->id_dwc_perst_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(kirin_pcie->id_dwc_perst_gpio))
+ return dev_err_probe(dev, PTR_ERR(kirin_pcie->id_dwc_perst_gpio),
+ "unable to get a valid gpio pin\n");
+ gpiod_set_consumer_name(kirin_pcie->id_dwc_perst_gpio, "pcie_perst_bridge");
ret = kirin_pcie_get_gpio_enable(kirin_pcie, pdev);
if (ret)
return ret;
/* Parse OF children */
- for_each_available_child_of_node(node, child) {
+ for_each_available_child_of_node_scoped(node, child) {
ret = kirin_pcie_parse_port(kirin_pcie, pdev, child);
if (ret)
- goto put_node;
+ return ret;
}
return 0;
-
-put_node:
- of_node_put(child);
- return ret;
}
static void kirin_pcie_sideband_dbi_w_mode(struct kirin_pcie *kirin_pcie,
@@ -553,7 +546,7 @@ static int kirin_pcie_add_bus(struct pci_bus *bus)
/* Send PERST# to each slot */
for (i = 0; i < kirin_pcie->num_slots; i++) {
- ret = gpio_direction_output(kirin_pcie->gpio_id_reset[i], 1);
+ ret = gpiod_direction_output_raw(kirin_pcie->id_reset_gpio[i], 1);
if (ret) {
dev_err(pci->dev, "PERST# %s error: %d\n",
kirin_pcie->reset_names[i], ret);
@@ -623,44 +616,6 @@ static int kirin_pcie_host_init(struct dw_pcie_rp *pp)
return 0;
}
-static int kirin_pcie_gpio_request(struct kirin_pcie *kirin_pcie,
- struct device *dev)
-{
- int ret, i;
-
- for (i = 0; i < kirin_pcie->num_slots; i++) {
- if (!gpio_is_valid(kirin_pcie->gpio_id_reset[i])) {
- dev_err(dev, "unable to get a valid %s gpio\n",
- kirin_pcie->reset_names[i]);
- return -ENODEV;
- }
-
- ret = devm_gpio_request(dev, kirin_pcie->gpio_id_reset[i],
- kirin_pcie->reset_names[i]);
- if (ret)
- return ret;
- }
-
- for (i = 0; i < kirin_pcie->n_gpio_clkreq; i++) {
- if (!gpio_is_valid(kirin_pcie->gpio_id_clkreq[i])) {
- dev_err(dev, "unable to get a valid %s gpio\n",
- kirin_pcie->clkreq_names[i]);
- return -ENODEV;
- }
-
- ret = devm_gpio_request(dev, kirin_pcie->gpio_id_clkreq[i],
- kirin_pcie->clkreq_names[i]);
- if (ret)
- return ret;
-
- ret = gpio_direction_output(kirin_pcie->gpio_id_clkreq[i], 0);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
static const struct dw_pcie_ops kirin_dw_pcie_ops = {
.read_dbi = kirin_pcie_read_dbi,
.write_dbi = kirin_pcie_write_dbi,
@@ -680,7 +635,7 @@ static int kirin_pcie_power_off(struct kirin_pcie *kirin_pcie)
return hi3660_pcie_phy_power_off(kirin_pcie);
for (i = 0; i < kirin_pcie->n_gpio_clkreq; i++)
- gpio_direction_output(kirin_pcie->gpio_id_clkreq[i], 1);
+ gpiod_direction_output_raw(kirin_pcie->id_clkreq_gpio[i], 1);
phy_power_off(kirin_pcie->phy);
phy_exit(kirin_pcie->phy);
@@ -707,10 +662,6 @@ static int kirin_pcie_power_on(struct platform_device *pdev,
if (IS_ERR(kirin_pcie->phy))
return PTR_ERR(kirin_pcie->phy);
- ret = kirin_pcie_gpio_request(kirin_pcie, dev);
- if (ret)
- return ret;
-
ret = phy_init(kirin_pcie->phy);
if (ret)
goto err;
@@ -723,11 +674,9 @@ static int kirin_pcie_power_on(struct platform_device *pdev,
/* perst assert Endpoint */
usleep_range(REF_2_PERST_MIN, REF_2_PERST_MAX);
- if (!gpio_request(kirin_pcie->gpio_id_dwc_perst, "pcie_perst_bridge")) {
- ret = gpio_direction_output(kirin_pcie->gpio_id_dwc_perst, 1);
- if (ret)
- goto err;
- }
+ ret = gpiod_direction_output_raw(kirin_pcie->id_dwc_perst_gpio, 1);
+ if (ret)
+ goto err;
usleep_range(PERST_2_ACCESS_MIN, PERST_2_ACCESS_MAX);
@@ -773,16 +722,9 @@ static int kirin_pcie_probe(struct platform_device *pdev)
struct dw_pcie *pci;
int ret;
- if (!dev->of_node) {
- dev_err(dev, "NULL node\n");
- return -EINVAL;
- }
-
data = of_device_get_match_data(dev);
- if (!data) {
- dev_err(dev, "OF data missing\n");
- return -EINVAL;
- }
+ if (!data)
+ return dev_err_probe(dev, -EINVAL, "OF data missing\n");
kirin_pcie = devm_kzalloc(dev, sizeof(struct kirin_pcie), GFP_KERNEL);
if (!kirin_pcie)
@@ -813,7 +755,7 @@ static int kirin_pcie_probe(struct platform_device *pdev)
static struct platform_driver kirin_pcie_driver = {
.probe = kirin_pcie_probe,
- .remove_new = kirin_pcie_remove,
+ .remove = kirin_pcie_remove,
.driver = {
.name = "kirin-pcie",
.of_match_table = kirin_pcie_match,
diff --git a/drivers/pci/controller/dwc/pcie-qcom-common.c b/drivers/pci/controller/dwc/pcie-qcom-common.c
new file mode 100644
index 000000000000..3aad19b56da8
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-qcom-common.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/pci.h>
+
+#include "pcie-designware.h"
+#include "pcie-qcom-common.h"
+
+void qcom_pcie_common_set_16gt_equalization(struct dw_pcie *pci)
+{
+ u32 reg;
+
+ /*
+ * GEN3_RELATED_OFF register is repurposed to apply equalization
+ * settings at various data transmission rates through registers namely
+ * GEN3_EQ_*. The RATE_SHADOW_SEL bit field of GEN3_RELATED_OFF
+ * determines the data rate for which these equalization settings are
+ * applied.
+ */
+ reg = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
+ reg &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
+ reg &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
+ reg |= FIELD_PREP(GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK,
+ GEN3_RELATED_OFF_RATE_SHADOW_SEL_16_0GT);
+ dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, reg);
+
+ reg = dw_pcie_readl_dbi(pci, GEN3_EQ_FB_MODE_DIR_CHANGE_OFF);
+ reg &= ~(GEN3_EQ_FMDC_T_MIN_PHASE23 |
+ GEN3_EQ_FMDC_N_EVALS |
+ GEN3_EQ_FMDC_MAX_PRE_CUSROR_DELTA |
+ GEN3_EQ_FMDC_MAX_POST_CUSROR_DELTA);
+ reg |= FIELD_PREP(GEN3_EQ_FMDC_T_MIN_PHASE23, 0x1) |
+ FIELD_PREP(GEN3_EQ_FMDC_N_EVALS, 0xd) |
+ FIELD_PREP(GEN3_EQ_FMDC_MAX_PRE_CUSROR_DELTA, 0x5) |
+ FIELD_PREP(GEN3_EQ_FMDC_MAX_POST_CUSROR_DELTA, 0x5);
+ dw_pcie_writel_dbi(pci, GEN3_EQ_FB_MODE_DIR_CHANGE_OFF, reg);
+
+ reg = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
+ reg &= ~(GEN3_EQ_CONTROL_OFF_FB_MODE |
+ GEN3_EQ_CONTROL_OFF_PHASE23_EXIT_MODE |
+ GEN3_EQ_CONTROL_OFF_FOM_INC_INITIAL_EVAL |
+ GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC);
+ dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, reg);
+}
+EXPORT_SYMBOL_GPL(qcom_pcie_common_set_16gt_equalization);
+
+void qcom_pcie_common_set_16gt_lane_margining(struct dw_pcie *pci)
+{
+ u32 reg;
+
+ reg = dw_pcie_readl_dbi(pci, GEN4_LANE_MARGINING_1_OFF);
+ reg &= ~(MARGINING_MAX_VOLTAGE_OFFSET |
+ MARGINING_NUM_VOLTAGE_STEPS |
+ MARGINING_MAX_TIMING_OFFSET |
+ MARGINING_NUM_TIMING_STEPS);
+ reg |= FIELD_PREP(MARGINING_MAX_VOLTAGE_OFFSET, 0x24) |
+ FIELD_PREP(MARGINING_NUM_VOLTAGE_STEPS, 0x78) |
+ FIELD_PREP(MARGINING_MAX_TIMING_OFFSET, 0x32) |
+ FIELD_PREP(MARGINING_NUM_TIMING_STEPS, 0x10);
+ dw_pcie_writel_dbi(pci, GEN4_LANE_MARGINING_1_OFF, reg);
+
+ reg = dw_pcie_readl_dbi(pci, GEN4_LANE_MARGINING_2_OFF);
+ reg |= MARGINING_IND_ERROR_SAMPLER |
+ MARGINING_SAMPLE_REPORTING_METHOD |
+ MARGINING_IND_LEFT_RIGHT_TIMING |
+ MARGINING_VOLTAGE_SUPPORTED;
+ reg &= ~(MARGINING_IND_UP_DOWN_VOLTAGE |
+ MARGINING_MAXLANES |
+ MARGINING_SAMPLE_RATE_TIMING |
+ MARGINING_SAMPLE_RATE_VOLTAGE);
+ reg |= FIELD_PREP(MARGINING_MAXLANES, pci->num_lanes) |
+ FIELD_PREP(MARGINING_SAMPLE_RATE_TIMING, 0x3f) |
+ FIELD_PREP(MARGINING_SAMPLE_RATE_VOLTAGE, 0x3f);
+ dw_pcie_writel_dbi(pci, GEN4_LANE_MARGINING_2_OFF, reg);
+}
+EXPORT_SYMBOL_GPL(qcom_pcie_common_set_16gt_lane_margining);
diff --git a/drivers/pci/controller/dwc/pcie-qcom-common.h b/drivers/pci/controller/dwc/pcie-qcom-common.h
new file mode 100644
index 000000000000..7d88d29e4766
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-qcom-common.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _PCIE_QCOM_COMMON_H
+#define _PCIE_QCOM_COMMON_H
+
+struct dw_pcie;
+
+void qcom_pcie_common_set_16gt_equalization(struct dw_pcie *pci);
+void qcom_pcie_common_set_16gt_lane_margining(struct dw_pcie *pci);
+
+#endif
diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c
index 36e5e80cd22f..46b1c6d19974 100644
--- a/drivers/pci/controller/dwc/pcie-qcom-ep.c
+++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c
@@ -25,6 +25,7 @@
#include "../../pci.h"
#include "pcie-designware.h"
+#include "pcie-qcom-common.h"
/* PARF registers */
#define PARF_SYS_CTRL 0x00
@@ -47,6 +48,7 @@
#define PARF_DBI_BASE_ADDR_HI 0x354
#define PARF_SLV_ADDR_SPACE_SIZE 0x358
#define PARF_SLV_ADDR_SPACE_SIZE_HI 0x35c
+#define PARF_NO_SNOOP_OVERRIDE 0x3d4
#define PARF_ATU_BASE_ADDR 0x634
#define PARF_ATU_BASE_ADDR_HI 0x638
#define PARF_SRIS_MODE 0x644
@@ -57,6 +59,7 @@
#define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2 0xc88
#define PARF_DEVICE_TYPE 0x1000
#define PARF_BDF_TO_SID_CFG 0x2c00
+#define PARF_INT_ALL_5_MASK 0x2dcc
/* PARF_INT_ALL_{STATUS/CLEAR/MASK} register fields */
#define PARF_INT_ALL_LINK_DOWN BIT(1)
@@ -86,6 +89,10 @@
#define PARF_DEBUG_INT_CFG_BUS_MASTER_EN BIT(2)
#define PARF_DEBUG_INT_RADM_PM_TURNOFF BIT(3)
+/* PARF_NO_SNOOP_OVERRIDE register fields */
+#define WR_NO_SNOOP_OVERRIDE_EN BIT(1)
+#define RD_NO_SNOOP_OVERRIDE_EN BIT(3)
+
/* PARF_DEVICE_TYPE register fields */
#define PARF_DEVICE_TYPE_EP 0x0
@@ -122,6 +129,9 @@
/* PARF_CFG_BITS register fields */
#define PARF_CFG_BITS_REQ_EXIT_L1SS_MSI_LTR_EN BIT(1)
+/* PARF_INT_ALL_5_MASK fields */
+#define PARF_INT_ALL_5_MHI_RAM_DATA_PARITY_ERR BIT(0)
+
/* ELBI registers */
#define ELBI_SYS_STTS 0x08
#define ELBI_CS2_ENABLE 0xa4
@@ -150,6 +160,18 @@ enum qcom_pcie_ep_link_status {
};
/**
+ * struct qcom_pcie_ep_cfg - Per SoC config struct
+ * @hdma_support: HDMA support on this SoC
+ * @override_no_snoop: Override NO_SNOOP attribute in TLP to enable cache snooping
+ * @disable_mhi_ram_parity_check: Disable MHI RAM data parity error check
+ */
+struct qcom_pcie_ep_cfg {
+ bool hdma_support;
+ bool override_no_snoop;
+ bool disable_mhi_ram_parity_check;
+};
+
+/**
* struct qcom_pcie_ep - Qualcomm PCIe Endpoint Controller
* @pci: Designware PCIe controller struct
* @parf: Qualcomm PCIe specific PARF register base
@@ -167,6 +189,7 @@ enum qcom_pcie_ep_link_status {
* @num_clks: PCIe clocks count
* @perst_en: Flag for PERST enable
* @perst_sep_en: Flag for PERST separation enable
+ * @cfg: PCIe EP config struct
* @link_status: PCIe Link status
* @global_irq: Qualcomm PCIe specific Global IRQ
* @perst_irq: PERST# IRQ
@@ -194,6 +217,7 @@ struct qcom_pcie_ep {
u32 perst_en;
u32 perst_sep_en;
+ const struct qcom_pcie_ep_cfg *cfg;
enum qcom_pcie_ep_link_status link_status;
int global_irq;
int perst_irq;
@@ -372,6 +396,10 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
return ret;
}
+ /* Perform cleanup that requires refclk */
+ pci_epc_deinit_notify(pci->ep.epc);
+ dw_pcie_ep_cleanup(&pci->ep);
+
/* Assert WAKE# to RC to indicate device is ready */
gpiod_set_value_cansleep(pcie_ep->wake, 1);
usleep_range(WAKE_DELAY_US, WAKE_DELAY_US + 500);
@@ -463,12 +491,23 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
PARF_INT_ALL_LINK_UP | PARF_INT_ALL_EDMA;
writel_relaxed(val, pcie_ep->parf + PARF_INT_ALL_MASK);
- ret = dw_pcie_ep_init_complete(&pcie_ep->pci.ep);
+ if (pcie_ep->cfg && pcie_ep->cfg->disable_mhi_ram_parity_check) {
+ val = readl_relaxed(pcie_ep->parf + PARF_INT_ALL_5_MASK);
+ val &= ~PARF_INT_ALL_5_MHI_RAM_DATA_PARITY_ERR;
+ writel_relaxed(val, pcie_ep->parf + PARF_INT_ALL_5_MASK);
+ }
+
+ ret = dw_pcie_ep_init_registers(&pcie_ep->pci.ep);
if (ret) {
dev_err(dev, "Failed to complete initialization: %d\n", ret);
goto err_disable_resources;
}
+ if (pcie_link_speed[pci->max_link_speed] == PCIE_SPEED_16_0GT) {
+ qcom_pcie_common_set_16gt_equalization(pci);
+ qcom_pcie_common_set_16gt_lane_margining(pci);
+ }
+
/*
* The physical address of the MMIO region which is exposed as the BAR
* should be written to MHI BASE registers.
@@ -482,13 +521,17 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
val &= ~PARF_MSTR_AXI_CLK_EN;
writel_relaxed(val, pcie_ep->parf + PARF_MHI_CLOCK_RESET_CTRL);
- dw_pcie_ep_init_notify(&pcie_ep->pci.ep);
+ pci_epc_init_notify(pcie_ep->pci.ep.epc);
/* Enable LTSSM */
val = readl_relaxed(pcie_ep->parf + PARF_LTSSM);
val |= BIT(8);
writel_relaxed(val, pcie_ep->parf + PARF_LTSSM);
+ if (pcie_ep->cfg && pcie_ep->cfg->override_no_snoop)
+ writel_relaxed(WR_NO_SNOOP_OVERRIDE_EN | RD_NO_SNOOP_OVERRIDE_EN,
+ pcie_ep->parf + PARF_NO_SNOOP_OVERRIDE);
+
return 0;
err_disable_resources:
@@ -500,12 +543,6 @@ err_disable_resources:
static void qcom_pcie_perst_assert(struct dw_pcie *pci)
{
struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
- struct device *dev = pci->dev;
-
- if (pcie_ep->link_status == QCOM_PCIE_EP_LINK_DISABLED) {
- dev_dbg(dev, "Link is already disabled\n");
- return;
- }
qcom_pcie_disable_resources(pcie_ep);
pcie_ep->link_status = QCOM_PCIE_EP_LINK_DISABLED;
@@ -630,21 +667,19 @@ static irqreturn_t qcom_pcie_ep_global_irq_thread(int irq, void *data)
struct dw_pcie *pci = &pcie_ep->pci;
struct device *dev = pci->dev;
u32 status = readl_relaxed(pcie_ep->parf + PARF_INT_ALL_STATUS);
- u32 mask = readl_relaxed(pcie_ep->parf + PARF_INT_ALL_MASK);
u32 dstate, val;
writel_relaxed(status, pcie_ep->parf + PARF_INT_ALL_CLEAR);
- status &= mask;
if (FIELD_GET(PARF_INT_ALL_LINK_DOWN, status)) {
dev_dbg(dev, "Received Linkdown event\n");
pcie_ep->link_status = QCOM_PCIE_EP_LINK_DOWN;
- pci_epc_linkdown(pci->ep.epc);
+ dw_pcie_ep_linkdown(&pci->ep);
} else if (FIELD_GET(PARF_INT_ALL_BME, status)) {
- dev_dbg(dev, "Received BME event. Link is enabled!\n");
+ dev_dbg(dev, "Received Bus Master Enable event\n");
pcie_ep->link_status = QCOM_PCIE_EP_LINK_ENABLED;
qcom_pcie_ep_icc_update(pcie_ep);
- pci_epc_bme_notify(pci->ep.epc);
+ pci_epc_bus_master_enable_notify(pci->ep.epc);
} else if (FIELD_GET(PARF_INT_ALL_PM_TURNOFF, status)) {
dev_dbg(dev, "Received PM Turn-off event! Entering L23\n");
val = readl_relaxed(pcie_ep->parf + PARF_PM_CTRL);
@@ -664,7 +699,8 @@ static irqreturn_t qcom_pcie_ep_global_irq_thread(int irq, void *data)
dw_pcie_ep_linkup(&pci->ep);
pcie_ep->link_status = QCOM_PCIE_EP_LINK_UP;
} else {
- dev_err(dev, "Received unknown event: %d\n", status);
+ dev_WARN_ONCE(dev, 1, "Received unknown event. INT_STATUS: 0x%08x\n",
+ status);
}
return IRQ_HANDLED;
@@ -695,8 +731,15 @@ static irqreturn_t qcom_pcie_ep_perst_irq_thread(int irq, void *data)
static int qcom_pcie_ep_enable_irq_resources(struct platform_device *pdev,
struct qcom_pcie_ep *pcie_ep)
{
+ struct device *dev = pcie_ep->pci.dev;
+ char *name;
int ret;
+ name = devm_kasprintf(dev, GFP_KERNEL, "qcom_pcie_ep_global_irq%d",
+ pcie_ep->pci.ep.epc->domain_nr);
+ if (!name)
+ return -ENOMEM;
+
pcie_ep->global_irq = platform_get_irq_byname(pdev, "global");
if (pcie_ep->global_irq < 0)
return pcie_ep->global_irq;
@@ -704,18 +747,23 @@ static int qcom_pcie_ep_enable_irq_resources(struct platform_device *pdev,
ret = devm_request_threaded_irq(&pdev->dev, pcie_ep->global_irq, NULL,
qcom_pcie_ep_global_irq_thread,
IRQF_ONESHOT,
- "global_irq", pcie_ep);
+ name, pcie_ep);
if (ret) {
dev_err(&pdev->dev, "Failed to request Global IRQ\n");
return ret;
}
+ name = devm_kasprintf(dev, GFP_KERNEL, "qcom_pcie_ep_perst_irq%d",
+ pcie_ep->pci.ep.epc->domain_nr);
+ if (!name)
+ return -ENOMEM;
+
pcie_ep->perst_irq = gpiod_to_irq(pcie_ep->reset);
irq_set_status_flags(pcie_ep->perst_irq, IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(&pdev->dev, pcie_ep->perst_irq, NULL,
qcom_pcie_ep_perst_irq_thread,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
- "perst_irq", pcie_ep);
+ name, pcie_ep);
if (ret) {
dev_err(&pdev->dev, "Failed to request PERST IRQ\n");
disable_irq(pcie_ep->global_irq);
@@ -774,10 +822,13 @@ static void qcom_pcie_ep_init_debugfs(struct qcom_pcie_ep *pcie_ep)
static const struct pci_epc_features qcom_pcie_epc_features = {
.linkup_notifier = true,
- .core_init_notifier = true,
.msi_capable = true,
.msix_capable = false,
.align = SZ_4K,
+ .bar[BAR_0] = { .only_64bit = true, },
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_2] = { .only_64bit = true, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
};
static const struct pci_epc_features *
@@ -816,27 +867,29 @@ static int qcom_pcie_ep_probe(struct platform_device *pdev)
pcie_ep->pci.ops = &pci_ops;
pcie_ep->pci.ep.ops = &pci_ep_ops;
pcie_ep->pci.edma.nr_irqs = 1;
+
+ pcie_ep->cfg = of_device_get_match_data(dev);
+ if (pcie_ep->cfg && pcie_ep->cfg->hdma_support) {
+ pcie_ep->pci.edma.ll_wr_cnt = 8;
+ pcie_ep->pci.edma.ll_rd_cnt = 8;
+ pcie_ep->pci.edma.mf = EDMA_MF_HDMA_NATIVE;
+ }
+
platform_set_drvdata(pdev, pcie_ep);
ret = qcom_pcie_ep_get_resources(pdev, pcie_ep);
if (ret)
return ret;
- ret = qcom_pcie_enable_resources(pcie_ep);
- if (ret) {
- dev_err(dev, "Failed to enable resources: %d\n", ret);
- return ret;
- }
-
ret = dw_pcie_ep_init(&pcie_ep->pci.ep);
if (ret) {
dev_err(dev, "Failed to initialize endpoint: %d\n", ret);
- goto err_disable_resources;
+ return ret;
}
ret = qcom_pcie_ep_enable_irq_resources(pdev, pcie_ep);
if (ret)
- goto err_disable_resources;
+ goto err_ep_deinit;
name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
if (!name) {
@@ -853,8 +906,8 @@ err_disable_irqs:
disable_irq(pcie_ep->global_irq);
disable_irq(pcie_ep->perst_irq);
-err_disable_resources:
- qcom_pcie_disable_resources(pcie_ep);
+err_ep_deinit:
+ dw_pcie_ep_deinit(&pcie_ep->pci.ep);
return ret;
}
@@ -874,16 +927,24 @@ static void qcom_pcie_ep_remove(struct platform_device *pdev)
qcom_pcie_disable_resources(pcie_ep);
}
+static const struct qcom_pcie_ep_cfg cfg_1_34_0 = {
+ .hdma_support = true,
+ .override_no_snoop = true,
+ .disable_mhi_ram_parity_check = true,
+};
+
static const struct of_device_id qcom_pcie_ep_match[] = {
+ { .compatible = "qcom,sa8775p-pcie-ep", .data = &cfg_1_34_0},
{ .compatible = "qcom,sdx55-pcie-ep", },
{ .compatible = "qcom,sm8450-pcie-ep", },
+ { .compatible = "qcom,sar2130p-pcie-ep", },
{ }
};
MODULE_DEVICE_TABLE(of, qcom_pcie_ep_match);
static struct platform_driver qcom_pcie_ep_driver = {
.probe = qcom_pcie_ep_probe,
- .remove_new = qcom_pcie_ep_remove,
+ .remove = qcom_pcie_ep_remove,
.driver = {
.name = "qcom-pcie-ep",
.of_match_table = qcom_pcie_ep_match,
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 14772edcf0d3..dc98ae63362d 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -18,10 +18,11 @@
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
+#include <linux/limits.h>
#include <linux/init.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/pci.h>
+#include <linux/pm_opp.h>
#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
#include <linux/phy/pcie.h>
@@ -30,9 +31,11 @@
#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/types.h>
+#include <linux/units.h>
#include "../../pci.h"
#include "pcie-designware.h"
+#include "pcie-qcom-common.h"
/* PARF registers */
#define PARF_SYS_CTRL 0x00
@@ -43,14 +46,24 @@
#define PARF_PHY_REFCLK 0x4c
#define PARF_CONFIG_BITS 0x50
#define PARF_DBI_BASE_ADDR 0x168
+#define PARF_SLV_ADDR_SPACE_SIZE 0x16c
#define PARF_MHI_CLOCK_RESET_CTRL 0x174
#define PARF_AXI_MSTR_WR_ADDR_HALT 0x178
#define PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1a8
#define PARF_Q2A_FLUSH 0x1ac
#define PARF_LTSSM 0x1b0
+#define PARF_INT_ALL_STATUS 0x224
+#define PARF_INT_ALL_CLEAR 0x228
+#define PARF_INT_ALL_MASK 0x22c
#define PARF_SID_OFFSET 0x234
#define PARF_BDF_TRANSLATE_CFG 0x24c
-#define PARF_SLV_ADDR_SPACE_SIZE 0x358
+#define PARF_DBI_BASE_ADDR_V2 0x350
+#define PARF_DBI_BASE_ADDR_V2_HI 0x354
+#define PARF_SLV_ADDR_SPACE_SIZE_V2 0x358
+#define PARF_SLV_ADDR_SPACE_SIZE_V2_HI 0x35c
+#define PARF_NO_SNOOP_OVERRIDE 0x3d4
+#define PARF_ATU_BASE_ADDR 0x634
+#define PARF_ATU_BASE_ADDR_HI 0x638
#define PARF_DEVICE_TYPE 0x1000
#define PARF_BDF_TO_SID_TABLE_N 0x2000
#define PARF_BDF_TO_SID_CFG 0x2c00
@@ -105,7 +118,7 @@
#define PHY_RX0_EQ(x) FIELD_PREP(GENMASK(26, 24), x)
/* PARF_SLV_ADDR_SPACE_SIZE register value */
-#define SLV_ADDR_SPACE_SZ 0x10000000
+#define SLV_ADDR_SPACE_SZ 0x80000000
/* PARF_MHI_CLOCK_RESET_CTRL register fields */
#define AHB_CLK_EN BIT(0)
@@ -118,6 +131,14 @@
/* PARF_LTSSM register fields */
#define LTSSM_EN BIT(8)
+/* PARF_INT_ALL_{STATUS/CLEAR/MASK} register fields */
+#define PARF_INT_ALL_LINK_UP BIT(13)
+#define PARF_INT_MSI_DEV_0_7 GENMASK(30, 23)
+
+/* PARF_NO_SNOOP_OVERRIDE register fields */
+#define WR_NO_SNOOP_OVERRIDE_EN BIT(1)
+#define RD_NO_SNOOP_OVERRIDE_EN BIT(3)
+
/* PARF_DEVICE_TYPE register fields */
#define DEVICE_TYPE_RC 0x4
@@ -154,58 +175,56 @@
#define QCOM_PCIE_LINK_SPEED_TO_BW(speed) \
Mbps_to_icc(PCIE_SPEED2MBS_ENC(pcie_link_speed[speed]))
-#define QCOM_PCIE_1_0_0_MAX_CLOCKS 4
struct qcom_pcie_resources_1_0_0 {
- struct clk_bulk_data clks[QCOM_PCIE_1_0_0_MAX_CLOCKS];
+ struct clk_bulk_data *clks;
+ int num_clks;
struct reset_control *core;
struct regulator *vdda;
};
-#define QCOM_PCIE_2_1_0_MAX_CLOCKS 5
#define QCOM_PCIE_2_1_0_MAX_RESETS 6
#define QCOM_PCIE_2_1_0_MAX_SUPPLY 3
struct qcom_pcie_resources_2_1_0 {
- struct clk_bulk_data clks[QCOM_PCIE_2_1_0_MAX_CLOCKS];
+ struct clk_bulk_data *clks;
+ int num_clks;
struct reset_control_bulk_data resets[QCOM_PCIE_2_1_0_MAX_RESETS];
int num_resets;
struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY];
};
-#define QCOM_PCIE_2_3_2_MAX_CLOCKS 4
#define QCOM_PCIE_2_3_2_MAX_SUPPLY 2
struct qcom_pcie_resources_2_3_2 {
- struct clk_bulk_data clks[QCOM_PCIE_2_3_2_MAX_CLOCKS];
+ struct clk_bulk_data *clks;
+ int num_clks;
struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY];
};
-#define QCOM_PCIE_2_3_3_MAX_CLOCKS 5
#define QCOM_PCIE_2_3_3_MAX_RESETS 7
struct qcom_pcie_resources_2_3_3 {
- struct clk_bulk_data clks[QCOM_PCIE_2_3_3_MAX_CLOCKS];
+ struct clk_bulk_data *clks;
+ int num_clks;
struct reset_control_bulk_data rst[QCOM_PCIE_2_3_3_MAX_RESETS];
};
-#define QCOM_PCIE_2_4_0_MAX_CLOCKS 4
#define QCOM_PCIE_2_4_0_MAX_RESETS 12
struct qcom_pcie_resources_2_4_0 {
- struct clk_bulk_data clks[QCOM_PCIE_2_4_0_MAX_CLOCKS];
+ struct clk_bulk_data *clks;
int num_clks;
struct reset_control_bulk_data resets[QCOM_PCIE_2_4_0_MAX_RESETS];
int num_resets;
};
-#define QCOM_PCIE_2_7_0_MAX_CLOCKS 15
#define QCOM_PCIE_2_7_0_MAX_SUPPLIES 2
struct qcom_pcie_resources_2_7_0 {
- struct clk_bulk_data clks[QCOM_PCIE_2_7_0_MAX_CLOCKS];
+ struct clk_bulk_data *clks;
int num_clks;
struct regulator_bulk_data supplies[QCOM_PCIE_2_7_0_MAX_SUPPLIES];
struct reset_control *rst;
};
-#define QCOM_PCIE_2_9_0_MAX_CLOCKS 5
struct qcom_pcie_resources_2_9_0 {
- struct clk_bulk_data clks[QCOM_PCIE_2_9_0_MAX_CLOCKS];
+ struct clk_bulk_data *clks;
+ int num_clks;
struct reset_control *rst;
};
@@ -231,8 +250,15 @@ struct qcom_pcie_ops {
int (*config_sid)(struct qcom_pcie *pcie);
};
+ /**
+ * struct qcom_pcie_cfg - Per SoC config struct
+ * @ops: qcom PCIe ops structure
+ * @override_no_snoop: Override NO_SNOOP attribute in TLP to enable cache
+ * snooping
+ */
struct qcom_pcie_cfg {
const struct qcom_pcie_ops *ops;
+ bool override_no_snoop;
bool no_l0s;
};
@@ -245,9 +271,11 @@ struct qcom_pcie {
struct phy *phy;
struct gpio_desc *reset;
struct icc_path *icc_mem;
+ struct icc_path *icc_cpu;
const struct qcom_pcie_cfg *cfg;
struct dentry *debugfs;
bool suspended;
+ bool use_pm_opp;
};
#define to_qcom_pcie(x) dev_get_drvdata((x)->dev)
@@ -270,6 +298,11 @@ static int qcom_pcie_start_link(struct dw_pcie *pci)
{
struct qcom_pcie *pcie = to_qcom_pcie(pci);
+ if (pcie_link_speed[pci->max_link_speed] == PCIE_SPEED_16_0GT) {
+ qcom_pcie_common_set_16gt_equalization(pci);
+ qcom_pcie_common_set_16gt_lane_margining(pci);
+ }
+
/* Enable Link Training state machine */
if (pcie->cfg->ops->ltssm_enable)
pcie->cfg->ops->ltssm_enable(pcie);
@@ -311,6 +344,50 @@ static void qcom_pcie_clear_hpc(struct dw_pcie *pci)
dw_pcie_dbi_ro_wr_dis(pci);
}
+static void qcom_pcie_configure_dbi_base(struct qcom_pcie *pcie)
+{
+ struct dw_pcie *pci = pcie->pci;
+
+ if (pci->dbi_phys_addr) {
+ /*
+ * PARF_DBI_BASE_ADDR register is in CPU domain and require to
+ * be programmed with CPU physical address.
+ */
+ writel(lower_32_bits(pci->dbi_phys_addr), pcie->parf +
+ PARF_DBI_BASE_ADDR);
+ writel(SLV_ADDR_SPACE_SZ, pcie->parf +
+ PARF_SLV_ADDR_SPACE_SIZE);
+ }
+}
+
+static void qcom_pcie_configure_dbi_atu_base(struct qcom_pcie *pcie)
+{
+ struct dw_pcie *pci = pcie->pci;
+
+ if (pci->dbi_phys_addr) {
+ /*
+ * PARF_DBI_BASE_ADDR_V2 and PARF_ATU_BASE_ADDR registers are
+ * in CPU domain and require to be programmed with CPU
+ * physical addresses.
+ */
+ writel(lower_32_bits(pci->dbi_phys_addr), pcie->parf +
+ PARF_DBI_BASE_ADDR_V2);
+ writel(upper_32_bits(pci->dbi_phys_addr), pcie->parf +
+ PARF_DBI_BASE_ADDR_V2_HI);
+
+ if (pci->atu_phys_addr) {
+ writel(lower_32_bits(pci->atu_phys_addr), pcie->parf +
+ PARF_ATU_BASE_ADDR);
+ writel(upper_32_bits(pci->atu_phys_addr), pcie->parf +
+ PARF_ATU_BASE_ADDR_HI);
+ }
+
+ writel(0x0, pcie->parf + PARF_SLV_ADDR_SPACE_SIZE_V2);
+ writel(SLV_ADDR_SPACE_SZ, pcie->parf +
+ PARF_SLV_ADDR_SPACE_SIZE_V2_HI);
+ }
+}
+
static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie)
{
u32 val;
@@ -337,21 +414,11 @@ static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
if (ret)
return ret;
- res->clks[0].id = "iface";
- res->clks[1].id = "core";
- res->clks[2].id = "phy";
- res->clks[3].id = "aux";
- res->clks[4].id = "ref";
-
- /* iface, core, phy are required */
- ret = devm_clk_bulk_get(dev, 3, res->clks);
- if (ret < 0)
- return ret;
-
- /* aux, ref are optional */
- ret = devm_clk_bulk_get_optional(dev, 2, res->clks + 3);
- if (ret < 0)
- return ret;
+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
+ if (res->num_clks < 0) {
+ dev_err(dev, "Failed to get clocks\n");
+ return res->num_clks;
+ }
res->resets[0].id = "pci";
res->resets[1].id = "axi";
@@ -373,7 +440,7 @@ static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
- clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
reset_control_bulk_assert(res->num_resets, res->resets);
writel(1, pcie->parf + PARF_PHY_CTRL);
@@ -425,7 +492,7 @@ static int qcom_pcie_post_init_2_1_0(struct qcom_pcie *pcie)
val &= ~PHY_TEST_PWR_DOWN;
writel(val, pcie->parf + PARF_PHY_CTRL);
- ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
+ ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
if (ret)
return ret;
@@ -476,20 +543,16 @@ static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie)
struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
- int ret;
res->vdda = devm_regulator_get(dev, "vdda");
if (IS_ERR(res->vdda))
return PTR_ERR(res->vdda);
- res->clks[0].id = "iface";
- res->clks[1].id = "aux";
- res->clks[2].id = "master_bus";
- res->clks[3].id = "slave_bus";
-
- ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
- if (ret < 0)
- return ret;
+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
+ if (res->num_clks < 0) {
+ dev_err(dev, "Failed to get clocks\n");
+ return res->num_clks;
+ }
res->core = devm_reset_control_get_exclusive(dev, "core");
return PTR_ERR_OR_ZERO(res->core);
@@ -500,7 +563,7 @@ static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie)
struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
reset_control_assert(res->core);
- clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
regulator_disable(res->vdda);
}
@@ -517,7 +580,7 @@ static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)
return ret;
}
- ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
+ ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
if (ret) {
dev_err(dev, "cannot prepare/enable clocks\n");
goto err_assert_reset;
@@ -532,7 +595,7 @@ static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)
return 0;
err_disable_clks:
- clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
err_assert_reset:
reset_control_assert(res->core);
@@ -541,8 +604,7 @@ err_assert_reset:
static int qcom_pcie_post_init_1_0_0(struct qcom_pcie *pcie)
{
- /* change DBI base address */
- writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
+ qcom_pcie_configure_dbi_base(pcie);
if (IS_ENABLED(CONFIG_PCI_MSI)) {
u32 val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT);
@@ -580,14 +642,11 @@ static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie)
if (ret)
return ret;
- res->clks[0].id = "aux";
- res->clks[1].id = "cfg";
- res->clks[2].id = "bus_master";
- res->clks[3].id = "bus_slave";
-
- ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
- if (ret < 0)
- return ret;
+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
+ if (res->num_clks < 0) {
+ dev_err(dev, "Failed to get clocks\n");
+ return res->num_clks;
+ }
return 0;
}
@@ -596,7 +655,7 @@ static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
- clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
}
@@ -613,7 +672,7 @@ static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)
return ret;
}
- ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
+ ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
if (ret) {
dev_err(dev, "cannot prepare/enable clocks\n");
regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
@@ -632,8 +691,7 @@ static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie)
val &= ~PHY_TEST_PWR_DOWN;
writel(val, pcie->parf + PARF_PHY_CTRL);
- /* change DBI base address */
- writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
+ qcom_pcie_configure_dbi_base(pcie);
/* MAC PHY_POWERDOWN MUX DISABLE */
val = readl(pcie->parf + PARF_SYS_CTRL);
@@ -661,17 +719,11 @@ static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019");
int ret;
- res->clks[0].id = "aux";
- res->clks[1].id = "master_bus";
- res->clks[2].id = "slave_bus";
- res->clks[3].id = "iface";
-
- /* qcom,pcie-ipq4019 is defined without "iface" */
- res->num_clks = is_ipq ? 3 : 4;
-
- ret = devm_clk_bulk_get(dev, res->num_clks, res->clks);
- if (ret < 0)
- return ret;
+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
+ if (res->num_clks < 0) {
+ dev_err(dev, "Failed to get clocks\n");
+ return res->num_clks;
+ }
res->resets[0].id = "axi_m";
res->resets[1].id = "axi_s";
@@ -742,15 +794,11 @@ static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie)
struct device *dev = pci->dev;
int ret;
- res->clks[0].id = "iface";
- res->clks[1].id = "axi_m";
- res->clks[2].id = "axi_s";
- res->clks[3].id = "ahb";
- res->clks[4].id = "aux";
-
- ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
- if (ret < 0)
- return ret;
+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
+ if (res->num_clks < 0) {
+ dev_err(dev, "Failed to get clocks\n");
+ return res->num_clks;
+ }
res->rst[0].id = "axi_m";
res->rst[1].id = "axi_s";
@@ -771,7 +819,7 @@ static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
- clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
}
static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
@@ -801,7 +849,7 @@ static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
*/
usleep_range(2000, 2500);
- ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
+ ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
if (ret) {
dev_err(dev, "cannot prepare/enable clocks\n");
goto err_assert_resets;
@@ -825,13 +873,11 @@ static int qcom_pcie_post_init_2_3_3(struct qcom_pcie *pcie)
u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
u32 val;
- writel(SLV_ADDR_SPACE_SZ, pcie->parf + PARF_SLV_ADDR_SPACE_SIZE);
-
val = readl(pcie->parf + PARF_PHY_CTRL);
val &= ~PHY_TEST_PWR_DOWN;
writel(val, pcie->parf + PARF_PHY_CTRL);
- writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
+ qcom_pcie_configure_dbi_atu_base(pcie);
writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS
| SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
@@ -862,8 +908,6 @@ static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
- unsigned int num_clks, num_opt_clks;
- unsigned int idx;
int ret;
res->rst = devm_reset_control_array_get_exclusive(dev);
@@ -877,36 +921,11 @@ static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
if (ret)
return ret;
- idx = 0;
- res->clks[idx++].id = "aux";
- res->clks[idx++].id = "cfg";
- res->clks[idx++].id = "bus_master";
- res->clks[idx++].id = "bus_slave";
- res->clks[idx++].id = "slave_q2a";
-
- num_clks = idx;
-
- ret = devm_clk_bulk_get(dev, num_clks, res->clks);
- if (ret < 0)
- return ret;
-
- res->clks[idx++].id = "tbu";
- res->clks[idx++].id = "ddrss_sf_tbu";
- res->clks[idx++].id = "aggre0";
- res->clks[idx++].id = "aggre1";
- res->clks[idx++].id = "noc_aggr";
- res->clks[idx++].id = "noc_aggr_4";
- res->clks[idx++].id = "noc_aggr_south_sf";
- res->clks[idx++].id = "cnoc_qx";
- res->clks[idx++].id = "sleep";
- res->clks[idx++].id = "cnoc_sf_axi";
-
- num_opt_clks = idx - num_clks;
- res->num_clks = idx;
-
- ret = devm_clk_bulk_get_optional(dev, num_opt_clks, res->clks + num_clks);
- if (ret < 0)
- return ret;
+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
+ if (res->num_clks < 0) {
+ dev_err(dev, "Failed to get clocks\n");
+ return res->num_clks;
+ }
return 0;
}
@@ -954,8 +973,7 @@ static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
val &= ~PHY_TEST_PWR_DOWN;
writel(val, pcie->parf + PARF_PHY_CTRL);
- /* change DBI base address */
- writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
+ qcom_pcie_configure_dbi_atu_base(pcie);
/* MAC PHY_POWERDOWN MUX DISABLE */
val = readl(pcie->parf + PARF_SYS_CTRL);
@@ -986,6 +1004,12 @@ err_disable_regulators:
static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)
{
+ const struct qcom_pcie_cfg *pcie_cfg = pcie->cfg;
+
+ if (pcie_cfg->override_no_snoop)
+ writel(WR_NO_SNOOP_OVERRIDE_EN | RD_NO_SNOOP_OVERRIDE_EN,
+ pcie->parf + PARF_NO_SNOOP_OVERRIDE);
+
qcom_pcie_clear_aspm_l0s(pcie->pci);
qcom_pcie_clear_hpc(pcie->pci);
@@ -1101,17 +1125,12 @@ static int qcom_pcie_get_resources_2_9_0(struct qcom_pcie *pcie)
struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
- int ret;
- res->clks[0].id = "iface";
- res->clks[1].id = "axi_m";
- res->clks[2].id = "axi_s";
- res->clks[3].id = "axi_bridge";
- res->clks[4].id = "rchng";
-
- ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
- if (ret < 0)
- return ret;
+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
+ if (res->num_clks < 0) {
+ dev_err(dev, "Failed to get clocks\n");
+ return res->num_clks;
+ }
res->rst = devm_reset_control_array_get_exclusive(dev);
if (IS_ERR(res->rst))
@@ -1124,7 +1143,7 @@ static void qcom_pcie_deinit_2_9_0(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
- clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
}
static int qcom_pcie_init_2_9_0(struct qcom_pcie *pcie)
@@ -1153,7 +1172,7 @@ static int qcom_pcie_init_2_9_0(struct qcom_pcie *pcie)
usleep_range(2000, 2500);
- return clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
+ return clk_bulk_prepare_enable(res->num_clks, res->clks);
}
static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie)
@@ -1163,14 +1182,11 @@ static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie)
u32 val;
int i;
- writel(SLV_ADDR_SPACE_SZ,
- pcie->parf + PARF_SLV_ADDR_SPACE_SIZE);
-
val = readl(pcie->parf + PARF_PHY_CTRL);
val &= ~PHY_TEST_PWR_DOWN;
writel(val, pcie->parf + PARF_PHY_CTRL);
- writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
+ qcom_pcie_configure_dbi_atu_base(pcie);
writel(DEVICE_TYPE_RC, pcie->parf + PARF_DEVICE_TYPE);
writel(BYPASS | MSTR_AXI_CLK_EN | AHB_CLK_EN,
@@ -1349,6 +1365,16 @@ static const struct qcom_pcie_ops ops_1_9_0 = {
.config_sid = qcom_pcie_config_sid_1_9_0,
};
+/* Qcom IP rev.: 1.21.0 Synopsys IP rev.: 5.60a */
+static const struct qcom_pcie_ops ops_1_21_0 = {
+ .get_resources = qcom_pcie_get_resources_2_7_0,
+ .init = qcom_pcie_init_2_7_0,
+ .post_init = qcom_pcie_post_init_2_7_0,
+ .host_post_init = qcom_pcie_host_post_init_2_7_0,
+ .deinit = qcom_pcie_deinit_2_7_0,
+ .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
+};
+
/* Qcom IP rev.: 2.9.0 Synopsys IP rev.: 5.00a */
static const struct qcom_pcie_ops ops_2_9_0 = {
.get_resources = qcom_pcie_get_resources_2_9_0,
@@ -1366,6 +1392,11 @@ static const struct qcom_pcie_cfg cfg_1_9_0 = {
.ops = &ops_1_9_0,
};
+static const struct qcom_pcie_cfg cfg_1_34_0 = {
+ .ops = &ops_1_9_0,
+ .override_no_snoop = true,
+};
+
static const struct qcom_pcie_cfg cfg_2_1_0 = {
.ops = &ops_2_1_0,
};
@@ -1391,7 +1422,7 @@ static const struct qcom_pcie_cfg cfg_2_9_0 = {
};
static const struct qcom_pcie_cfg cfg_sc8280xp = {
- .ops = &ops_1_9_0,
+ .ops = &ops_1_21_0,
.no_l0s = true,
};
@@ -1409,6 +1440,9 @@ static int qcom_pcie_icc_init(struct qcom_pcie *pcie)
if (IS_ERR(pcie->icc_mem))
return PTR_ERR(pcie->icc_mem);
+ pcie->icc_cpu = devm_of_icc_get(pci->dev, "cpu-pcie");
+ if (IS_ERR(pcie->icc_cpu))
+ return PTR_ERR(pcie->icc_cpu);
/*
* Some Qualcomm platforms require interconnect bandwidth constraints
* to be set before enabling interconnect clocks.
@@ -1418,23 +1452,35 @@ static int qcom_pcie_icc_init(struct qcom_pcie *pcie)
*/
ret = icc_set_bw(pcie->icc_mem, 0, QCOM_PCIE_LINK_SPEED_TO_BW(1));
if (ret) {
- dev_err(pci->dev, "failed to set interconnect bandwidth: %d\n",
+ dev_err(pci->dev, "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n",
+ ret);
+ return ret;
+ }
+
+ /*
+ * Since the CPU-PCIe path is only used for activities like register
+ * access of the host controller and endpoint Config/BAR space access,
+ * HW team has recommended to use a minimal bandwidth of 1KBps just to
+ * keep the path active.
+ */
+ ret = icc_set_bw(pcie->icc_cpu, 0, kBps_to_icc(1));
+ if (ret) {
+ dev_err(pci->dev, "Failed to set bandwidth for CPU-PCIe interconnect path: %d\n",
ret);
+ icc_set_bw(pcie->icc_mem, 0, 0);
return ret;
}
return 0;
}
-static void qcom_pcie_icc_update(struct qcom_pcie *pcie)
+static void qcom_pcie_icc_opp_update(struct qcom_pcie *pcie)
{
+ u32 offset, status, width, speed;
struct dw_pcie *pci = pcie->pci;
- u32 offset, status;
- int speed, width;
- int ret;
-
- if (!pcie->icc_mem)
- return;
+ unsigned long freq_kbps;
+ struct dev_pm_opp *opp;
+ int ret, freq_mbps;
offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
status = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA);
@@ -1446,10 +1492,28 @@ static void qcom_pcie_icc_update(struct qcom_pcie *pcie)
speed = FIELD_GET(PCI_EXP_LNKSTA_CLS, status);
width = FIELD_GET(PCI_EXP_LNKSTA_NLW, status);
- ret = icc_set_bw(pcie->icc_mem, 0, width * QCOM_PCIE_LINK_SPEED_TO_BW(speed));
- if (ret) {
- dev_err(pci->dev, "failed to set interconnect bandwidth: %d\n",
- ret);
+ if (pcie->icc_mem) {
+ ret = icc_set_bw(pcie->icc_mem, 0,
+ width * QCOM_PCIE_LINK_SPEED_TO_BW(speed));
+ if (ret) {
+ dev_err(pci->dev, "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n",
+ ret);
+ }
+ } else if (pcie->use_pm_opp) {
+ freq_mbps = pcie_dev_speed_mbps(pcie_link_speed[speed]);
+ if (freq_mbps < 0)
+ return;
+
+ freq_kbps = freq_mbps * KILO;
+ opp = dev_pm_opp_find_freq_exact(pci->dev, freq_kbps * width,
+ true);
+ if (!IS_ERR(opp)) {
+ ret = dev_pm_opp_set_opp(pci->dev, opp);
+ if (ret)
+ dev_err(pci->dev, "Failed to set OPP for freq (%lu): %d\n",
+ freq_kbps * width, ret);
+ dev_pm_opp_put(opp);
+ }
}
}
@@ -1490,15 +1554,43 @@ static void qcom_pcie_init_debugfs(struct qcom_pcie *pcie)
qcom_pcie_link_transition_count);
}
+static irqreturn_t qcom_pcie_global_irq_thread(int irq, void *data)
+{
+ struct qcom_pcie *pcie = data;
+ struct dw_pcie_rp *pp = &pcie->pci->pp;
+ struct device *dev = pcie->pci->dev;
+ u32 status = readl_relaxed(pcie->parf + PARF_INT_ALL_STATUS);
+
+ writel_relaxed(status, pcie->parf + PARF_INT_ALL_CLEAR);
+
+ if (FIELD_GET(PARF_INT_ALL_LINK_UP, status)) {
+ dev_dbg(dev, "Received Link up event. Starting enumeration!\n");
+ /* Rescan the bus to enumerate endpoint devices */
+ pci_lock_rescan_remove();
+ pci_rescan_bus(pp->bridge->bus);
+ pci_unlock_rescan_remove();
+
+ qcom_pcie_icc_opp_update(pcie);
+ } else {
+ dev_WARN_ONCE(dev, 1, "Received unknown event. INT_STATUS: 0x%08x\n",
+ status);
+ }
+
+ return IRQ_HANDLED;
+}
+
static int qcom_pcie_probe(struct platform_device *pdev)
{
const struct qcom_pcie_cfg *pcie_cfg;
+ unsigned long max_freq = ULONG_MAX;
struct device *dev = &pdev->dev;
+ struct dev_pm_opp *opp;
struct qcom_pcie *pcie;
struct dw_pcie_rp *pp;
struct resource *res;
struct dw_pcie *pci;
- int ret;
+ int ret, irq;
+ char *name;
pcie_cfg = of_device_get_match_data(dev);
if (!pcie_cfg || !pcie_cfg->ops) {
@@ -1561,9 +1653,45 @@ static int qcom_pcie_probe(struct platform_device *pdev)
goto err_pm_runtime_put;
}
- ret = qcom_pcie_icc_init(pcie);
- if (ret)
+ /* OPP table is optional */
+ ret = devm_pm_opp_of_add_table(dev);
+ if (ret && ret != -ENODEV) {
+ dev_err_probe(dev, ret, "Failed to add OPP table\n");
goto err_pm_runtime_put;
+ }
+
+ /*
+ * Before the PCIe link is initialized, vote for highest OPP in the OPP
+ * table, so that we are voting for maximum voltage corner for the
+ * link to come up in maximum supported speed. At the end of the
+ * probe(), OPP will be updated using qcom_pcie_icc_opp_update().
+ */
+ if (!ret) {
+ opp = dev_pm_opp_find_freq_floor(dev, &max_freq);
+ if (IS_ERR(opp)) {
+ ret = PTR_ERR(opp);
+ dev_err_probe(pci->dev, ret,
+ "Unable to find max freq OPP\n");
+ goto err_pm_runtime_put;
+ } else {
+ ret = dev_pm_opp_set_opp(dev, opp);
+ }
+
+ dev_pm_opp_put(opp);
+ if (ret) {
+ dev_err_probe(pci->dev, ret,
+ "Failed to set OPP for freq %lu\n",
+ max_freq);
+ goto err_pm_runtime_put;
+ }
+
+ pcie->use_pm_opp = true;
+ } else {
+ /* Skip ICC init if OPP is supported as it is handled by OPP */
+ ret = qcom_pcie_icc_init(pcie);
+ if (ret)
+ goto err_pm_runtime_put;
+ }
ret = pcie->cfg->ops->get_resources(pcie);
if (ret)
@@ -1577,19 +1705,46 @@ static int qcom_pcie_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pcie);
+ irq = platform_get_irq_byname_optional(pdev, "global");
+ if (irq > 0)
+ pp->use_linkup_irq = true;
+
ret = dw_pcie_host_init(pp);
if (ret) {
dev_err(dev, "cannot initialize host\n");
goto err_phy_exit;
}
- qcom_pcie_icc_update(pcie);
+ name = devm_kasprintf(dev, GFP_KERNEL, "qcom_pcie_global_irq%d",
+ pci_domain_nr(pp->bridge->bus));
+ if (!name) {
+ ret = -ENOMEM;
+ goto err_host_deinit;
+ }
+
+ if (irq > 0) {
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ qcom_pcie_global_irq_thread,
+ IRQF_ONESHOT, name, pcie);
+ if (ret) {
+ dev_err_probe(&pdev->dev, ret,
+ "Failed to request Global IRQ\n");
+ goto err_host_deinit;
+ }
+
+ writel_relaxed(PARF_INT_ALL_LINK_UP | PARF_INT_MSI_DEV_0_7,
+ pcie->parf + PARF_INT_ALL_MASK);
+ }
+
+ qcom_pcie_icc_opp_update(pcie);
if (pcie->mhi)
qcom_pcie_init_debugfs(pcie);
return 0;
+err_host_deinit:
+ dw_pcie_host_deinit(pp);
err_phy_exit:
phy_exit(pcie->phy);
err_pm_runtime_put:
@@ -1602,16 +1757,20 @@ err_pm_runtime_put:
static int qcom_pcie_suspend_noirq(struct device *dev)
{
struct qcom_pcie *pcie = dev_get_drvdata(dev);
- int ret;
+ int ret = 0;
/*
* Set minimum bandwidth required to keep data path functional during
* suspend.
*/
- ret = icc_set_bw(pcie->icc_mem, 0, kBps_to_icc(1));
- if (ret) {
- dev_err(dev, "Failed to set interconnect bandwidth: %d\n", ret);
- return ret;
+ if (pcie->icc_mem) {
+ ret = icc_set_bw(pcie->icc_mem, 0, kBps_to_icc(1));
+ if (ret) {
+ dev_err(dev,
+ "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n",
+ ret);
+ return ret;
+ }
}
/*
@@ -1634,7 +1793,21 @@ static int qcom_pcie_suspend_noirq(struct device *dev)
pcie->suspended = true;
}
- return 0;
+ /*
+ * Only disable CPU-PCIe interconnect path if the suspend is non-S2RAM.
+ * Because on some platforms, DBI access can happen very late during the
+ * S2RAM and a non-active CPU-PCIe interconnect path may lead to NoC
+ * error.
+ */
+ if (pm_suspend_target_state != PM_SUSPEND_MEM) {
+ ret = icc_disable(pcie->icc_cpu);
+ if (ret)
+ dev_err(dev, "Failed to disable CPU-PCIe interconnect path: %d\n", ret);
+
+ if (pcie->use_pm_opp)
+ dev_pm_opp_set_opp(pcie->pci->dev, NULL);
+ }
+ return ret;
}
static int qcom_pcie_resume_noirq(struct device *dev)
@@ -1642,6 +1815,14 @@ static int qcom_pcie_resume_noirq(struct device *dev)
struct qcom_pcie *pcie = dev_get_drvdata(dev);
int ret;
+ if (pm_suspend_target_state != PM_SUSPEND_MEM) {
+ ret = icc_enable(pcie->icc_cpu);
+ if (ret) {
+ dev_err(dev, "Failed to enable CPU-PCIe interconnect path: %d\n", ret);
+ return ret;
+ }
+ }
+
if (pcie->suspended) {
ret = qcom_pcie_host_init(&pcie->pci->pp);
if (ret)
@@ -1650,7 +1831,7 @@ static int qcom_pcie_resume_noirq(struct device *dev)
pcie->suspended = false;
}
- qcom_pcie_icc_update(pcie);
+ qcom_pcie_icc_opp_update(pcie);
return 0;
}
@@ -1664,10 +1845,11 @@ static const struct of_device_id qcom_pcie_match[] = {
{ .compatible = "qcom,pcie-ipq8064-v2", .data = &cfg_2_1_0 },
{ .compatible = "qcom,pcie-ipq8074", .data = &cfg_2_3_3 },
{ .compatible = "qcom,pcie-ipq8074-gen3", .data = &cfg_2_9_0 },
+ { .compatible = "qcom,pcie-ipq9574", .data = &cfg_2_9_0 },
{ .compatible = "qcom,pcie-msm8996", .data = &cfg_2_3_2 },
{ .compatible = "qcom,pcie-qcs404", .data = &cfg_2_4_0 },
{ .compatible = "qcom,pcie-sa8540p", .data = &cfg_sc8280xp },
- { .compatible = "qcom,pcie-sa8775p", .data = &cfg_1_9_0},
+ { .compatible = "qcom,pcie-sa8775p", .data = &cfg_1_34_0},
{ .compatible = "qcom,pcie-sc7280", .data = &cfg_1_9_0 },
{ .compatible = "qcom,pcie-sc8180x", .data = &cfg_1_9_0 },
{ .compatible = "qcom,pcie-sc8280xp", .data = &cfg_sc8280xp },
@@ -1679,7 +1861,7 @@ static const struct of_device_id qcom_pcie_match[] = {
{ .compatible = "qcom,pcie-sm8450-pcie0", .data = &cfg_1_9_0 },
{ .compatible = "qcom,pcie-sm8450-pcie1", .data = &cfg_1_9_0 },
{ .compatible = "qcom,pcie-sm8550", .data = &cfg_1_9_0 },
- { .compatible = "qcom,pcie-x1e80100", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-x1e80100", .data = &cfg_sc8280xp },
{ }
};
diff --git a/drivers/pci/controller/dwc/pcie-rcar-gen4.c b/drivers/pci/controller/dwc/pcie-rcar-gen4.c
index 0be760ed420b..fc872dd35029 100644
--- a/drivers/pci/controller/dwc/pcie-rcar-gen4.c
+++ b/drivers/pci/controller/dwc/pcie-rcar-gen4.c
@@ -2,11 +2,17 @@
/*
* PCIe controller driver for Renesas R-Car Gen4 Series SoCs
* Copyright (C) 2022-2023 Renesas Electronics Corporation
+ *
+ * The r8a779g0 (R-Car V4H) controller requires a specific firmware to be
+ * provided, to initialize the PHY. Otherwise, the PCIe controller will not
+ * work.
*/
#include <linux/delay.h>
+#include <linux/firmware.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/pci.h>
@@ -20,9 +26,10 @@
/* Renesas-specific */
/* PCIe Mode Setting Register 0 */
#define PCIEMSR0 0x0000
-#define BIFUR_MOD_SET_ON BIT(0)
+#define APP_SRIS_MODE BIT(6)
#define DEVICE_TYPE_EP 0
#define DEVICE_TYPE_RC BIT(4)
+#define BIFUR_MOD_SET_ON BIT(0)
/* PCIe Interrupt Status 0 */
#define PCIEINTSTS0 0x0084
@@ -37,47 +44,49 @@
#define PCIEDMAINTSTSEN 0x0314
#define PCIEDMAINTSTSEN_INIT GENMASK(15, 0)
+/* Port Logic Registers 89 */
+#define PRTLGC89 0x0b70
+
+/* Port Logic Registers 90 */
+#define PRTLGC90 0x0b74
+
/* PCIe Reset Control Register 1 */
#define PCIERSTCTRL1 0x0014
#define APP_HOLD_PHY_RST BIT(16)
#define APP_LTSSM_ENABLE BIT(0)
+/* PCIe Power Management Control */
+#define PCIEPWRMNGCTRL 0x0070
+#define APP_CLK_REQ_N BIT(11)
+#define APP_CLK_PM_EN BIT(10)
+
#define RCAR_NUM_SPEED_CHANGE_RETRIES 10
#define RCAR_MAX_LINK_SPEED 4
#define RCAR_GEN4_PCIE_EP_FUNC_DBI_OFFSET 0x1000
#define RCAR_GEN4_PCIE_EP_FUNC_DBI2_OFFSET 0x800
+#define RCAR_GEN4_PCIE_FIRMWARE_NAME "rcar_gen4_pcie.bin"
+#define RCAR_GEN4_PCIE_FIRMWARE_BASE_ADDR 0xc000
+MODULE_FIRMWARE(RCAR_GEN4_PCIE_FIRMWARE_NAME);
+
+struct rcar_gen4_pcie;
+struct rcar_gen4_pcie_drvdata {
+ void (*additional_common_init)(struct rcar_gen4_pcie *rcar);
+ int (*ltssm_control)(struct rcar_gen4_pcie *rcar, bool enable);
+ enum dw_pcie_device_mode mode;
+};
+
struct rcar_gen4_pcie {
struct dw_pcie dw;
void __iomem *base;
+ void __iomem *phy_base;
struct platform_device *pdev;
- enum dw_pcie_device_mode mode;
+ const struct rcar_gen4_pcie_drvdata *drvdata;
};
#define to_rcar_gen4_pcie(_dw) container_of(_dw, struct rcar_gen4_pcie, dw)
/* Common */
-static void rcar_gen4_pcie_ltssm_enable(struct rcar_gen4_pcie *rcar,
- bool enable)
-{
- u32 val;
-
- val = readl(rcar->base + PCIERSTCTRL1);
- if (enable) {
- val |= APP_LTSSM_ENABLE;
- val &= ~APP_HOLD_PHY_RST;
- } else {
- /*
- * Since the datasheet of R-Car doesn't mention how to assert
- * the APP_HOLD_PHY_RST, don't assert it again. Otherwise,
- * hang-up issue happened in the dw_edma_core_off() when
- * the controller didn't detect a PCI device.
- */
- val &= ~APP_LTSSM_ENABLE;
- }
- writel(val, rcar->base + PCIERSTCTRL1);
-}
-
static int rcar_gen4_pcie_link_up(struct dw_pcie *dw)
{
struct rcar_gen4_pcie *rcar = to_rcar_gen4_pcie(dw);
@@ -123,21 +132,25 @@ static int rcar_gen4_pcie_speed_change(struct dw_pcie *dw)
static int rcar_gen4_pcie_start_link(struct dw_pcie *dw)
{
struct rcar_gen4_pcie *rcar = to_rcar_gen4_pcie(dw);
- int i, changes;
+ int i, changes, ret;
- rcar_gen4_pcie_ltssm_enable(rcar, true);
+ if (rcar->drvdata->ltssm_control) {
+ ret = rcar->drvdata->ltssm_control(rcar, true);
+ if (ret)
+ return ret;
+ }
/*
- * Require direct speed change with retrying here if the link_gen is
- * PCIe Gen2 or higher.
+ * Require direct speed change with retrying here if the max_link_speed
+ * is PCIe Gen2 or higher.
*/
- changes = min_not_zero(dw->link_gen, RCAR_MAX_LINK_SPEED) - 1;
+ changes = min_not_zero(dw->max_link_speed, RCAR_MAX_LINK_SPEED) - 1;
/*
* Since dw_pcie_setup_rc() sets it once, PCIe Gen2 will be trained.
* So, this needs remaining times for up to PCIe Gen4 if RC mode.
*/
- if (changes && rcar->mode == DW_PCIE_RC_TYPE)
+ if (changes && rcar->drvdata->mode == DW_PCIE_RC_TYPE)
changes--;
for (i = 0; i < changes; i++) {
@@ -153,7 +166,8 @@ static void rcar_gen4_pcie_stop_link(struct dw_pcie *dw)
{
struct rcar_gen4_pcie *rcar = to_rcar_gen4_pcie(dw);
- rcar_gen4_pcie_ltssm_enable(rcar, false);
+ if (rcar->drvdata->ltssm_control)
+ rcar->drvdata->ltssm_control(rcar, false);
}
static int rcar_gen4_pcie_common_init(struct rcar_gen4_pcie *rcar)
@@ -172,9 +186,9 @@ static int rcar_gen4_pcie_common_init(struct rcar_gen4_pcie *rcar)
reset_control_assert(dw->core_rsts[DW_PCIE_PWR_RST].rstc);
val = readl(rcar->base + PCIEMSR0);
- if (rcar->mode == DW_PCIE_RC_TYPE) {
+ if (rcar->drvdata->mode == DW_PCIE_RC_TYPE) {
val |= DEVICE_TYPE_RC;
- } else if (rcar->mode == DW_PCIE_EP_TYPE) {
+ } else if (rcar->drvdata->mode == DW_PCIE_EP_TYPE) {
val |= DEVICE_TYPE_EP;
} else {
ret = -EINVAL;
@@ -190,6 +204,9 @@ static int rcar_gen4_pcie_common_init(struct rcar_gen4_pcie *rcar)
if (ret)
goto err_unprepare;
+ if (rcar->drvdata->additional_common_init)
+ rcar->drvdata->additional_common_init(rcar);
+
return 0;
err_unprepare:
@@ -231,6 +248,10 @@ static void rcar_gen4_pcie_unprepare(struct rcar_gen4_pcie *rcar)
static int rcar_gen4_pcie_get_resources(struct rcar_gen4_pcie *rcar)
{
+ rcar->phy_base = devm_platform_ioremap_resource_byname(rcar->pdev, "phy");
+ if (IS_ERR(rcar->phy_base))
+ return PTR_ERR(rcar->phy_base);
+
/* Renesas-specific registers */
rcar->base = devm_platform_ioremap_resource_byname(rcar->pdev, "app");
@@ -255,7 +276,7 @@ static struct rcar_gen4_pcie *rcar_gen4_pcie_alloc(struct platform_device *pdev)
rcar->dw.ops = &dw_pcie_ops;
rcar->dw.dev = dev;
rcar->pdev = pdev;
- dw_pcie_cap_set(&rcar->dw, EDMA_UNROLL);
+ rcar->dw.edma.mf = EDMA_MF_EDMA_UNROLL;
dw_pcie_cap_set(&rcar->dw, REQ_RES);
platform_set_drvdata(pdev, rcar);
@@ -352,11 +373,8 @@ static void rcar_gen4_pcie_ep_init(struct dw_pcie_ep *ep)
dw_pcie_ep_reset_bar(pci, bar);
}
-static void rcar_gen4_pcie_ep_deinit(struct dw_pcie_ep *ep)
+static void rcar_gen4_pcie_ep_deinit(struct rcar_gen4_pcie *rcar)
{
- struct dw_pcie *dw = to_dw_pcie_from_ep(ep);
- struct rcar_gen4_pcie *rcar = to_rcar_gen4_pcie(dw);
-
writel(0, rcar->base + PCIEDMAINTSTSEN);
rcar_gen4_pcie_common_deinit(rcar);
}
@@ -410,7 +428,6 @@ static unsigned int rcar_gen4_pcie_ep_get_dbi2_offset(struct dw_pcie_ep *ep,
static const struct dw_pcie_ep_ops pcie_ep_ops = {
.pre_init = rcar_gen4_pcie_ep_pre_init,
.init = rcar_gen4_pcie_ep_init,
- .deinit = rcar_gen4_pcie_ep_deinit,
.raise_irq = rcar_gen4_pcie_ep_raise_irq,
.get_features = rcar_gen4_pcie_ep_get_features,
.get_dbi_offset = rcar_gen4_pcie_ep_get_dbi_offset,
@@ -420,26 +437,46 @@ static const struct dw_pcie_ep_ops pcie_ep_ops = {
static int rcar_gen4_add_dw_pcie_ep(struct rcar_gen4_pcie *rcar)
{
struct dw_pcie_ep *ep = &rcar->dw.ep;
+ struct device *dev = rcar->dw.dev;
+ int ret;
if (!IS_ENABLED(CONFIG_PCIE_RCAR_GEN4_EP))
return -ENODEV;
ep->ops = &pcie_ep_ops;
- return dw_pcie_ep_init(ep);
+ ret = dw_pcie_ep_init(ep);
+ if (ret) {
+ rcar_gen4_pcie_ep_deinit(rcar);
+ return ret;
+ }
+
+ ret = dw_pcie_ep_init_registers(ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(ep);
+ rcar_gen4_pcie_ep_deinit(rcar);
+ }
+
+ pci_epc_init_notify(ep->epc);
+
+ return ret;
}
static void rcar_gen4_remove_dw_pcie_ep(struct rcar_gen4_pcie *rcar)
{
- dw_pcie_ep_exit(&rcar->dw.ep);
+ dw_pcie_ep_deinit(&rcar->dw.ep);
+ rcar_gen4_pcie_ep_deinit(rcar);
}
/* Common */
static int rcar_gen4_add_dw_pcie(struct rcar_gen4_pcie *rcar)
{
- rcar->mode = (uintptr_t)of_device_get_match_data(&rcar->pdev->dev);
+ rcar->drvdata = of_device_get_match_data(&rcar->pdev->dev);
+ if (!rcar->drvdata)
+ return -EINVAL;
- switch (rcar->mode) {
+ switch (rcar->drvdata->mode) {
case DW_PCIE_RC_TYPE:
return rcar_gen4_add_dw_pcie_rp(rcar);
case DW_PCIE_EP_TYPE:
@@ -480,7 +517,7 @@ err_unprepare:
static void rcar_gen4_remove_dw_pcie(struct rcar_gen4_pcie *rcar)
{
- switch (rcar->mode) {
+ switch (rcar->drvdata->mode) {
case DW_PCIE_RC_TYPE:
rcar_gen4_remove_dw_pcie_rp(rcar);
break;
@@ -500,14 +537,232 @@ static void rcar_gen4_pcie_remove(struct platform_device *pdev)
rcar_gen4_pcie_unprepare(rcar);
}
+static int r8a779f0_pcie_ltssm_control(struct rcar_gen4_pcie *rcar, bool enable)
+{
+ u32 val;
+
+ val = readl(rcar->base + PCIERSTCTRL1);
+ if (enable) {
+ val |= APP_LTSSM_ENABLE;
+ val &= ~APP_HOLD_PHY_RST;
+ } else {
+ /*
+ * Since the datasheet of R-Car doesn't mention how to assert
+ * the APP_HOLD_PHY_RST, don't assert it again. Otherwise,
+ * hang-up issue happened in the dw_edma_core_off() when
+ * the controller didn't detect a PCI device.
+ */
+ val &= ~APP_LTSSM_ENABLE;
+ }
+ writel(val, rcar->base + PCIERSTCTRL1);
+
+ return 0;
+}
+
+static void rcar_gen4_pcie_additional_common_init(struct rcar_gen4_pcie *rcar)
+{
+ struct dw_pcie *dw = &rcar->dw;
+ u32 val;
+
+ val = dw_pcie_readl_dbi(dw, PCIE_PORT_LANE_SKEW);
+ val &= ~PORT_LANE_SKEW_INSERT_MASK;
+ if (dw->num_lanes < 4)
+ val |= BIT(6);
+ dw_pcie_writel_dbi(dw, PCIE_PORT_LANE_SKEW, val);
+
+ val = readl(rcar->base + PCIEPWRMNGCTRL);
+ val |= APP_CLK_REQ_N | APP_CLK_PM_EN;
+ writel(val, rcar->base + PCIEPWRMNGCTRL);
+}
+
+static void rcar_gen4_pcie_phy_reg_update_bits(struct rcar_gen4_pcie *rcar,
+ u32 offset, u32 mask, u32 val)
+{
+ u32 tmp;
+
+ tmp = readl(rcar->phy_base + offset);
+ tmp &= ~mask;
+ tmp |= val;
+ writel(tmp, rcar->phy_base + offset);
+}
+
+/*
+ * SoC datasheet suggests checking port logic register bits during firmware
+ * write. If read returns non-zero value, then this function returns -EAGAIN
+ * indicating that the write needs to be done again. If read returns zero,
+ * then return 0 to indicate success.
+ */
+static int rcar_gen4_pcie_reg_test_bit(struct rcar_gen4_pcie *rcar,
+ u32 offset, u32 mask)
+{
+ struct dw_pcie *dw = &rcar->dw;
+
+ if (dw_pcie_readl_dbi(dw, offset) & mask)
+ return -EAGAIN;
+
+ return 0;
+}
+
+static int rcar_gen4_pcie_download_phy_firmware(struct rcar_gen4_pcie *rcar)
+{
+ /* The check_addr values are magical numbers in the datasheet */
+ static const u32 check_addr[] = {
+ 0x00101018,
+ 0x00101118,
+ 0x00101021,
+ 0x00101121,
+ };
+ struct dw_pcie *dw = &rcar->dw;
+ const struct firmware *fw;
+ unsigned int i, timeout;
+ u32 data;
+ int ret;
+
+ ret = request_firmware(&fw, RCAR_GEN4_PCIE_FIRMWARE_NAME, dw->dev);
+ if (ret) {
+ dev_err(dw->dev, "Failed to load firmware (%s): %d\n",
+ RCAR_GEN4_PCIE_FIRMWARE_NAME, ret);
+ return ret;
+ }
+
+ for (i = 0; i < (fw->size / 2); i++) {
+ data = fw->data[(i * 2) + 1] << 8 | fw->data[i * 2];
+ timeout = 100;
+ do {
+ dw_pcie_writel_dbi(dw, PRTLGC89, RCAR_GEN4_PCIE_FIRMWARE_BASE_ADDR + i);
+ dw_pcie_writel_dbi(dw, PRTLGC90, data);
+ if (!rcar_gen4_pcie_reg_test_bit(rcar, PRTLGC89, BIT(30)))
+ break;
+ if (!(--timeout)) {
+ ret = -ETIMEDOUT;
+ goto exit;
+ }
+ usleep_range(100, 200);
+ } while (1);
+ }
+
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x0f8, BIT(17), BIT(17));
+
+ for (i = 0; i < ARRAY_SIZE(check_addr); i++) {
+ timeout = 100;
+ do {
+ dw_pcie_writel_dbi(dw, PRTLGC89, check_addr[i]);
+ ret = rcar_gen4_pcie_reg_test_bit(rcar, PRTLGC89, BIT(30));
+ ret |= rcar_gen4_pcie_reg_test_bit(rcar, PRTLGC90, BIT(0));
+ if (!ret)
+ break;
+ if (!(--timeout)) {
+ ret = -ETIMEDOUT;
+ goto exit;
+ }
+ usleep_range(100, 200);
+ } while (1);
+ }
+
+exit:
+ release_firmware(fw);
+
+ return ret;
+}
+
+static int rcar_gen4_pcie_ltssm_control(struct rcar_gen4_pcie *rcar, bool enable)
+{
+ struct dw_pcie *dw = &rcar->dw;
+ u32 val;
+ int ret;
+
+ if (!enable) {
+ val = readl(rcar->base + PCIERSTCTRL1);
+ val &= ~APP_LTSSM_ENABLE;
+ writel(val, rcar->base + PCIERSTCTRL1);
+
+ return 0;
+ }
+
+ val = dw_pcie_readl_dbi(dw, PCIE_PORT_FORCE);
+ val |= PORT_FORCE_DO_DESKEW_FOR_SRIS;
+ dw_pcie_writel_dbi(dw, PCIE_PORT_FORCE, val);
+
+ val = readl(rcar->base + PCIEMSR0);
+ val |= APP_SRIS_MODE;
+ writel(val, rcar->base + PCIEMSR0);
+
+ /*
+ * The R-Car Gen4 datasheet doesn't describe the PHY registers' name.
+ * But, the initialization procedure describes these offsets. So,
+ * this driver has magical offset numbers.
+ */
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x700, BIT(28), 0);
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x700, BIT(20), 0);
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x700, BIT(12), 0);
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x700, BIT(4), 0);
+
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(23, 22), BIT(22));
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(18, 16), GENMASK(17, 16));
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(7, 6), BIT(6));
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(2, 0), GENMASK(11, 0));
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x1d4, GENMASK(16, 15), GENMASK(16, 15));
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x514, BIT(26), BIT(26));
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x0f8, BIT(16), 0);
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x0f8, BIT(19), BIT(19));
+
+ val = readl(rcar->base + PCIERSTCTRL1);
+ val &= ~APP_HOLD_PHY_RST;
+ writel(val, rcar->base + PCIERSTCTRL1);
+
+ ret = readl_poll_timeout(rcar->phy_base + 0x0f8, val, !(val & BIT(18)), 100, 10000);
+ if (ret < 0)
+ return ret;
+
+ ret = rcar_gen4_pcie_download_phy_firmware(rcar);
+ if (ret)
+ return ret;
+
+ val = readl(rcar->base + PCIERSTCTRL1);
+ val |= APP_LTSSM_ENABLE;
+ writel(val, rcar->base + PCIERSTCTRL1);
+
+ return 0;
+}
+
+static struct rcar_gen4_pcie_drvdata drvdata_r8a779f0_pcie = {
+ .ltssm_control = r8a779f0_pcie_ltssm_control,
+ .mode = DW_PCIE_RC_TYPE,
+};
+
+static struct rcar_gen4_pcie_drvdata drvdata_r8a779f0_pcie_ep = {
+ .ltssm_control = r8a779f0_pcie_ltssm_control,
+ .mode = DW_PCIE_EP_TYPE,
+};
+
+static struct rcar_gen4_pcie_drvdata drvdata_rcar_gen4_pcie = {
+ .additional_common_init = rcar_gen4_pcie_additional_common_init,
+ .ltssm_control = rcar_gen4_pcie_ltssm_control,
+ .mode = DW_PCIE_RC_TYPE,
+};
+
+static struct rcar_gen4_pcie_drvdata drvdata_rcar_gen4_pcie_ep = {
+ .additional_common_init = rcar_gen4_pcie_additional_common_init,
+ .ltssm_control = rcar_gen4_pcie_ltssm_control,
+ .mode = DW_PCIE_EP_TYPE,
+};
+
static const struct of_device_id rcar_gen4_pcie_of_match[] = {
{
+ .compatible = "renesas,r8a779f0-pcie",
+ .data = &drvdata_r8a779f0_pcie,
+ },
+ {
+ .compatible = "renesas,r8a779f0-pcie-ep",
+ .data = &drvdata_r8a779f0_pcie_ep,
+ },
+ {
.compatible = "renesas,rcar-gen4-pcie",
- .data = (void *)DW_PCIE_RC_TYPE,
+ .data = &drvdata_rcar_gen4_pcie,
},
{
.compatible = "renesas,rcar-gen4-pcie-ep",
- .data = (void *)DW_PCIE_EP_TYPE,
+ .data = &drvdata_rcar_gen4_pcie_ep,
},
{},
};
@@ -520,7 +775,7 @@ static struct platform_driver rcar_gen4_pcie_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = rcar_gen4_pcie_probe,
- .remove_new = rcar_gen4_pcie_remove,
+ .remove = rcar_gen4_pcie_remove,
};
module_platform_driver(rcar_gen4_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-spear13xx.c b/drivers/pci/controller/dwc/pcie-spear13xx.c
index 201dced209f0..ff986ced56b2 100644
--- a/drivers/pci/controller/dwc/pcie-spear13xx.c
+++ b/drivers/pci/controller/dwc/pcie-spear13xx.c
@@ -233,7 +233,7 @@ static int spear13xx_pcie_probe(struct platform_device *pdev)
}
if (of_property_read_bool(np, "st,pcie-is-gen1"))
- pci->link_gen = 1;
+ pci->max_link_speed = 1;
platform_set_drvdata(pdev, spear13xx_pcie);
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index 1f7b662cb8e1..5103995cd6c7 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -13,7 +13,6 @@
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/interconnect.h>
#include <linux/interrupt.h>
@@ -21,7 +20,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
#include <linux/phy/phy.h>
@@ -179,17 +177,12 @@
#define N_FTS_VAL 52
#define FTS_VAL 52
-#define GEN3_EQ_CONTROL_OFF 0x8a8
-#define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT 8
-#define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK GENMASK(23, 8)
-#define GEN3_EQ_CONTROL_OFF_FB_MODE_MASK GENMASK(3, 0)
-
#define PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT 0x8D0
-#define AMBA_ERROR_RESPONSE_CRS_SHIFT 3
-#define AMBA_ERROR_RESPONSE_CRS_MASK GENMASK(1, 0)
-#define AMBA_ERROR_RESPONSE_CRS_OKAY 0
-#define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFFFFFF 1
-#define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 2
+#define AMBA_ERROR_RESPONSE_RRS_SHIFT 3
+#define AMBA_ERROR_RESPONSE_RRS_MASK GENMASK(1, 0)
+#define AMBA_ERROR_RESPONSE_RRS_OKAY 0
+#define AMBA_ERROR_RESPONSE_RRS_OKAY_FFFFFFFF 1
+#define AMBA_ERROR_RESPONSE_RRS_OKAY_FFFF0001 2
#define MSIX_ADDR_MATCH_LOW_OFF 0x940
#define MSIX_ADDR_MATCH_LOW_OFF_EN BIT(0)
@@ -308,10 +301,6 @@ static inline u32 appl_readl(struct tegra_pcie_dw *pcie, const u32 reg)
return readl_relaxed(pcie->appl_base + reg);
}
-struct tegra_pcie_soc {
- enum dw_pcie_device_mode mode;
-};
-
static void tegra_pcie_icc_set(struct tegra_pcie_dw *pcie)
{
struct dw_pcie *pci = &pcie->pci;
@@ -867,9 +856,9 @@ static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie)
dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
- val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK;
- val |= (0x3ff << GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT);
- val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK;
+ val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC;
+ val |= FIELD_PREP(GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC, 0x3ff);
+ val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE;
dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val);
val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
@@ -878,10 +867,10 @@ static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie)
dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
- val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK;
- val |= (pcie->of_data->gen4_preset_vec <<
- GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT);
- val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK;
+ val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC;
+ val |= FIELD_PREP(GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC,
+ pcie->of_data->gen4_preset_vec);
+ val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE;
dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val);
val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
@@ -913,11 +902,11 @@ static int tegra_pcie_dw_host_init(struct dw_pcie_rp *pp)
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
- /* Enable as 0xFFFF0001 response for CRS */
+ /* Enable as 0xFFFF0001 response for RRS */
val = dw_pcie_readl_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT);
- val &= ~(AMBA_ERROR_RESPONSE_CRS_MASK << AMBA_ERROR_RESPONSE_CRS_SHIFT);
- val |= (AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 <<
- AMBA_ERROR_RESPONSE_CRS_SHIFT);
+ val &= ~(AMBA_ERROR_RESPONSE_RRS_MASK << AMBA_ERROR_RESPONSE_RRS_SHIFT);
+ val |= (AMBA_ERROR_RESPONSE_RRS_OKAY_FFFF0001 <<
+ AMBA_ERROR_RESPONSE_RRS_SHIFT);
dw_pcie_writel_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT, val);
/* Clear Slot Clock Configuration bit if SRNS configuration */
@@ -1793,6 +1782,10 @@ static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
goto fail_phy;
}
+ /* Perform cleanup that requires refclk */
+ pci_epc_deinit_notify(pcie->pci.ep.epc);
+ dw_pcie_ep_cleanup(&pcie->pci.ep);
+
/* Clear any stale interrupt statuses */
appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
@@ -1895,13 +1888,13 @@ static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
val = (upper_32_bits(ep->msi_mem_phys) & MSIX_ADDR_MATCH_HIGH_OFF_MASK);
dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_HIGH_OFF, val);
- ret = dw_pcie_ep_init_complete(ep);
+ ret = dw_pcie_ep_init_registers(ep);
if (ret) {
dev_err(dev, "Failed to complete initialization: %d\n", ret);
goto fail_init_complete;
}
- dw_pcie_ep_init_notify(ep);
+ pci_epc_init_notify(ep->epc);
/* Program the private control to allow sending LTR upstream */
if (pcie->of_data->has_ltr_req_fix) {
@@ -2004,7 +1997,6 @@ static int tegra_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
static const struct pci_epc_features tegra_pcie_epc_features = {
.linkup_notifier = true,
- .core_init_notifier = true,
.msi_capable = false,
.msix_capable = false,
.bar[BAR_0] = { .type = BAR_FIXED, .fixed_size = SZ_1M,
@@ -2014,6 +2006,7 @@ static const struct pci_epc_features tegra_pcie_epc_features = {
.bar[BAR_3] = { .type = BAR_RESERVED, },
.bar[BAR_4] = { .type = BAR_RESERVED, },
.bar[BAR_5] = { .type = BAR_RESERVED, },
+ .align = SZ_64K,
};
static const struct pci_epc_features*
@@ -2273,11 +2266,14 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev)
ret = tegra_pcie_config_ep(pcie, pdev);
if (ret < 0)
goto fail;
+ else
+ return 0;
break;
default:
dev_err(dev, "Invalid PCIe device type %d\n",
pcie->of_data->mode);
+ ret = -EINVAL;
}
fail:
@@ -2498,7 +2494,7 @@ static const struct dev_pm_ops tegra_pcie_dw_pm_ops = {
static struct platform_driver tegra_pcie_dw_driver = {
.probe = tegra_pcie_dw_probe,
- .remove_new = tegra_pcie_dw_remove,
+ .remove = tegra_pcie_dw_remove,
.shutdown = tegra_pcie_dw_shutdown,
.driver = {
.name = "tegra194-pcie",
diff --git a/drivers/pci/controller/dwc/pcie-uniphier-ep.c b/drivers/pci/controller/dwc/pcie-uniphier-ep.c
index 639bc2e12476..d6e73811216e 100644
--- a/drivers/pci/controller/dwc/pcie-uniphier-ep.c
+++ b/drivers/pci/controller/dwc/pcie-uniphier-ep.c
@@ -399,7 +399,20 @@ static int uniphier_pcie_ep_probe(struct platform_device *pdev)
return ret;
priv->pci.ep.ops = &uniphier_pcie_ep_ops;
- return dw_pcie_ep_init(&priv->pci.ep);
+ ret = dw_pcie_ep_init(&priv->pci.ep);
+ if (ret)
+ return ret;
+
+ ret = dw_pcie_ep_init_registers(&priv->pci.ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(&priv->pci.ep);
+ return ret;
+ }
+
+ pci_epc_init_notify(priv->pci.ep.epc);
+
+ return 0;
}
static const struct uniphier_pcie_ep_soc_data uniphier_pro5_data = {
diff --git a/drivers/pci/controller/dwc/pcie-uniphier.c b/drivers/pci/controller/dwc/pcie-uniphier.c
index 5757ca3803c9..43b28f826edd 100644
--- a/drivers/pci/controller/dwc/pcie-uniphier.c
+++ b/drivers/pci/controller/dwc/pcie-uniphier.c
@@ -279,7 +279,7 @@ static int uniphier_pcie_config_intx_irq(struct dw_pcie_rp *pp)
goto out_put_node;
}
- pcie->intx_irq_domain = irq_domain_add_linear(np_intc, PCI_NUM_INTX,
+ pcie->intx_irq_domain = irq_domain_create_linear(of_fwnode_handle(np_intc), PCI_NUM_INTX,
&uniphier_intx_domain_ops, pp);
if (!pcie->intx_irq_domain) {
dev_err(pci->dev, "Failed to get INTx domain\n");
diff --git a/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c b/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c
index d7b7350f02dd..5af22bee913b 100644
--- a/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c
+++ b/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c
@@ -190,7 +190,7 @@ static void ls_g4_pcie_reset(struct work_struct *work)
ls_g4_pcie_enable_interrupt(pcie);
}
-static struct mobiveil_rp_ops ls_g4_pcie_rp_ops = {
+static const struct mobiveil_rp_ops ls_g4_pcie_rp_ops = {
.interrupt_init = ls_g4_pcie_interrupt_init,
};
diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
index 32951f7d6d6d..a600f46ee3c3 100644
--- a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
+++ b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
@@ -360,8 +360,8 @@ static struct irq_chip mobiveil_msi_irq_chip = {
};
static struct msi_domain_info mobiveil_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX),
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX,
.chip = &mobiveil_msi_irq_chip,
};
@@ -378,16 +378,9 @@ static void mobiveil_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
(int)data->hwirq, msg->address_hi, msg->address_lo);
}
-static int mobiveil_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static struct irq_chip mobiveil_msi_bottom_irq_chip = {
.name = "Mobiveil MSI",
.irq_compose_msi_msg = mobiveil_compose_msi_msg,
- .irq_set_affinity = mobiveil_msi_set_affinity,
};
static int mobiveil_irq_msi_domain_alloc(struct irq_domain *domain,
@@ -442,12 +435,12 @@ static const struct irq_domain_ops msi_domain_ops = {
static int mobiveil_allocate_msi_domains(struct mobiveil_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
- struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
+ struct fwnode_handle *fwnode = of_fwnode_handle(dev->of_node);
struct mobiveil_msi *msi = &pcie->rp.msi;
mutex_init(&msi->lock);
- msi->dev_domain = irq_domain_add_linear(NULL, msi->num_of_vectors,
- &msi_domain_ops, pcie);
+ msi->dev_domain = irq_domain_create_linear(NULL, msi->num_of_vectors,
+ &msi_domain_ops, pcie);
if (!msi->dev_domain) {
dev_err(dev, "failed to create IRQ domain\n");
return -ENOMEM;
@@ -468,12 +461,11 @@ static int mobiveil_allocate_msi_domains(struct mobiveil_pcie *pcie)
static int mobiveil_pcie_init_irq_domain(struct mobiveil_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
- struct device_node *node = dev->of_node;
struct mobiveil_root_port *rp = &pcie->rp;
/* setup INTx */
- rp->intx_domain = irq_domain_add_linear(node, PCI_NUM_INTX,
- &intx_domain_ops, pcie);
+ rp->intx_domain = irq_domain_create_linear(of_fwnode_handle(dev->of_node), PCI_NUM_INTX,
+ &intx_domain_ops, pcie);
if (!rp->intx_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil.h b/drivers/pci/controller/mobiveil/pcie-mobiveil.h
index 6082b8afbc31..e63abb887ee3 100644
--- a/drivers/pci/controller/mobiveil/pcie-mobiveil.h
+++ b/drivers/pci/controller/mobiveil/pcie-mobiveil.h
@@ -151,7 +151,7 @@ struct mobiveil_rp_ops {
struct mobiveil_root_port {
void __iomem *config_axi_slave_base; /* endpoint config base */
struct resource *ob_io_res;
- struct mobiveil_rp_ops *ops;
+ const struct mobiveil_rp_ops *ops;
int irq;
raw_spinlock_t intx_mask_lock;
struct irq_domain *intx_domain;
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index 71ecd7ddcc8a..7bac64533b14 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -23,7 +23,6 @@
#include <linux/platform_device.h>
#include <linux/msi.h>
#include <linux/of_address.h>
-#include <linux/of_gpio.h>
#include <linux/of_pci.h>
#include "../pci.h"
@@ -51,7 +50,7 @@
#define PIO_COMPLETION_STATUS_MASK GENMASK(9, 7)
#define PIO_COMPLETION_STATUS_OK 0
#define PIO_COMPLETION_STATUS_UR 1
-#define PIO_COMPLETION_STATUS_CRS 2
+#define PIO_COMPLETION_STATUS_RRS 2
#define PIO_COMPLETION_STATUS_CA 4
#define PIO_NON_POSTED_REQ BIT(10)
#define PIO_ERR_STATUS BIT(11)
@@ -263,7 +262,7 @@ enum {
#define MSI_IRQ_NUM 32
-#define CFG_RD_CRS_VAL 0xffff0001
+#define CFG_RD_RRS_VAL 0xffff0001
struct advk_pcie {
struct platform_device *pdev;
@@ -650,7 +649,7 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
advk_pcie_train_link(pcie);
}
-static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u32 *val)
+static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_rrs, u32 *val)
{
struct device *dev = &pcie->pdev->dev;
u32 reg;
@@ -670,7 +669,7 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
* 2) value Unsupported Request(1) of COMPLETION_STATUS(bit9:7) only
* means a PIO write error, and for PIO read it is successful with
* a read value of 0xFFFFFFFF.
- * 3) value Completion Retry Status(CRS) of COMPLETION_STATUS(bit9:7)
+ * 3) value Config Request Retry Status(RRS) of COMPLETION_STATUS(bit9:7)
* only means a PIO write error, and for PIO read it is successful
* with a read value of 0xFFFF0001.
* 4) value Completer Abort (CA) of COMPLETION_STATUS(bit9:7) means
@@ -695,10 +694,10 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
strcomp_status = "UR";
ret = -EOPNOTSUPP;
break;
- case PIO_COMPLETION_STATUS_CRS:
- if (allow_crs && val) {
- /* PCIe r4.0, sec 2.3.2, says:
- * If CRS Software Visibility is enabled:
+ case PIO_COMPLETION_STATUS_RRS:
+ if (allow_rrs && val) {
+ /* PCIe r6.0, sec 2.3.2, says:
+ * If Configuration RRS Software Visibility is enabled:
* For a Configuration Read Request that includes both
* bytes of the Vendor ID field of a device Function's
* Configuration Space Header, the Root Complex must
@@ -707,22 +706,22 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
* all '1's for any additional bytes included in the
* request.
*
- * So CRS in this case is not an error status.
+ * So RRS in this case is not an error status.
*/
- *val = CFG_RD_CRS_VAL;
+ *val = CFG_RD_RRS_VAL;
strcomp_status = NULL;
ret = 0;
break;
}
- /* PCIe r4.0, sec 2.3.2, says:
- * If CRS Software Visibility is not enabled, the Root Complex
+ /* PCIe r6.0, sec 2.3.2, says:
+ * If RRS Software Visibility is not enabled, the Root Complex
* must re-issue the Configuration Request as a new Request.
- * If CRS Software Visibility is enabled: For a Configuration
+ * If RRS Software Visibility is enabled: For a Configuration
* Write Request or for any other Configuration Read Request,
* the Root Complex must re-issue the Configuration Request as
* a new Request.
* A Root Complex implementation may choose to limit the number
- * of Configuration Request/CRS Completion Status loops before
+ * of Configuration Request/RRS Completion Status loops before
* determining that something is wrong with the target of the
* Request and taking appropriate action, e.g., complete the
* Request to the host as a failed transaction.
@@ -730,7 +729,7 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
* So return -EAGAIN and caller (pci-aardvark.c driver) will
* re-issue request again up to the PIO_RETRY_CNT retries.
*/
- strcomp_status = "CRS";
+ strcomp_status = "RRS";
ret = -EAGAIN;
break;
case PIO_COMPLETION_STATUS_CA:
@@ -921,8 +920,8 @@ advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
case PCI_EXP_RTCTL: {
u16 rootctl = le16_to_cpu(bridge->pcie_conf.rootctl);
- /* Only emulation of PMEIE and CRSSVE bits is provided */
- rootctl &= PCI_EXP_RTCTL_PMEIE | PCI_EXP_RTCTL_CRSSVE;
+ /* Only emulation of PMEIE and RRS_SVE bits is provided */
+ rootctl &= PCI_EXP_RTCTL_PMEIE | PCI_EXP_RTCTL_RRS_SVE;
bridge->pcie_conf.rootctl = cpu_to_le16(rootctl);
break;
}
@@ -1076,7 +1075,7 @@ static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
bridge->pcie_conf.slotsta = cpu_to_le16(PCI_EXP_SLTSTA_PDS);
/* Indicates supports for Completion Retry Status */
- bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS);
+ bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_RRS_SV);
bridge->subsystem_vendor_id = advk_readl(pcie, PCIE_CORE_SSDEV_ID_REG) & 0xffff;
bridge->subsystem_id = advk_readl(pcie, PCIE_CORE_SSDEV_ID_REG) >> 16;
@@ -1142,7 +1141,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
{
struct advk_pcie *pcie = bus->sysdata;
int retry_count;
- bool allow_crs;
+ bool allow_rrs;
u32 reg;
int ret;
@@ -1154,16 +1153,16 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
size, val);
/*
- * Completion Retry Status is possible to return only when reading all
- * 4 bytes from PCI_VENDOR_ID and PCI_DEVICE_ID registers at once and
- * CRSSVE flag on Root Bridge is enabled.
+ * Configuration Request Retry Status (RRS) is possible to return
+ * only when reading both bytes from PCI_VENDOR_ID at once and
+ * RRS_SVE flag on Root Port is enabled.
*/
- allow_crs = (where == PCI_VENDOR_ID) && (size == 4) &&
+ allow_rrs = (where == PCI_VENDOR_ID) && (size >= 2) &&
(le16_to_cpu(pcie->bridge.pcie_conf.rootctl) &
- PCI_EXP_RTCTL_CRSSVE);
+ PCI_EXP_RTCTL_RRS_SVE);
if (advk_pcie_pio_is_running(pcie))
- goto try_crs;
+ goto try_rrs;
/* Program the control register */
reg = advk_readl(pcie, PIO_CTRL);
@@ -1190,12 +1189,12 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
ret = advk_pcie_wait_pio(pcie);
if (ret < 0)
- goto try_crs;
+ goto try_rrs;
retry_count += ret;
/* Check PIO status and get the read result */
- ret = advk_pcie_check_pio_status(pcie, allow_crs, val);
+ ret = advk_pcie_check_pio_status(pcie, allow_rrs, val);
} while (ret == -EAGAIN && retry_count < PIO_RETRY_CNT);
if (ret < 0)
@@ -1208,13 +1207,13 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
return PCIBIOS_SUCCESSFUL;
-try_crs:
+try_rrs:
/*
- * If it is possible, return Completion Retry Status so that caller
- * tries to issue the request again instead of failing.
+ * If it is possible, return Configuration Request Retry Status so
+ * that caller tries to issue the request again instead of failing.
*/
- if (allow_crs) {
- *val = CFG_RD_CRS_VAL;
+ if (allow_rrs) {
+ *val = CFG_RD_RRS_VAL;
return PCIBIOS_SUCCESSFUL;
}
@@ -1305,12 +1304,6 @@ static void advk_msi_irq_compose_msi_msg(struct irq_data *data,
msg->data = data->hwirq;
}
-static int advk_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static void advk_msi_irq_mask(struct irq_data *d)
{
struct advk_pcie *pcie = d->domain->host_data;
@@ -1354,7 +1347,6 @@ static void advk_msi_top_irq_unmask(struct irq_data *d)
static struct irq_chip advk_msi_bottom_irq_chip = {
.name = "MSI",
.irq_compose_msi_msg = advk_msi_irq_compose_msi_msg,
- .irq_set_affinity = advk_msi_set_affinity,
.irq_mask = advk_msi_irq_mask,
.irq_unmask = advk_msi_irq_unmask,
};
@@ -1452,7 +1444,8 @@ static struct irq_chip advk_msi_irq_chip = {
static struct msi_domain_info advk_msi_domain_info = {
.flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX,
+ MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI |
+ MSI_FLAG_PCI_MSIX,
.chip = &advk_msi_irq_chip,
};
@@ -1463,9 +1456,8 @@ static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie)
raw_spin_lock_init(&pcie->msi_irq_lock);
mutex_init(&pcie->msi_used_lock);
- pcie->msi_inner_domain =
- irq_domain_add_linear(NULL, MSI_IRQ_NUM,
- &advk_msi_domain_ops, pcie);
+ pcie->msi_inner_domain = irq_domain_create_linear(NULL, MSI_IRQ_NUM,
+ &advk_msi_domain_ops, pcie);
if (!pcie->msi_inner_domain)
return -ENOMEM;
@@ -1515,9 +1507,8 @@ static int advk_pcie_init_irq_domain(struct advk_pcie *pcie)
irq_chip->irq_mask = advk_pcie_irq_mask;
irq_chip->irq_unmask = advk_pcie_irq_unmask;
- pcie->irq_domain =
- irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
- &advk_pcie_irq_domain_ops, pcie);
+ pcie->irq_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX,
+ &advk_pcie_irq_domain_ops, pcie);
if (!pcie->irq_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
ret = -ENOMEM;
@@ -1556,9 +1547,7 @@ static const struct irq_domain_ops advk_pcie_rp_irq_domain_ops = {
static int advk_pcie_init_rp_irq_domain(struct advk_pcie *pcie)
{
- pcie->rp_irq_domain = irq_domain_add_linear(NULL, 1,
- &advk_pcie_rp_irq_domain_ops,
- pcie);
+ pcie->rp_irq_domain = irq_domain_create_linear(NULL, 1, &advk_pcie_rp_irq_domain_ops, pcie);
if (!pcie->rp_irq_domain) {
dev_err(&pcie->pdev->dev, "Failed to add Root Port IRQ domain\n");
return -ENOMEM;
@@ -2003,7 +1992,7 @@ static struct platform_driver advk_pcie_driver = {
.of_match_table = advk_pcie_of_match_table,
},
.probe = advk_pcie_probe,
- .remove_new = advk_pcie_remove,
+ .remove = advk_pcie_remove,
};
module_platform_driver(advk_pcie_driver);
diff --git a/drivers/pci/controller/pci-ftpci100.c b/drivers/pci/controller/pci-ftpci100.c
index ffdeed25e961..28e43831c0f1 100644
--- a/drivers/pci/controller/pci-ftpci100.c
+++ b/drivers/pci/controller/pci-ftpci100.c
@@ -345,8 +345,8 @@ static int faraday_pci_setup_cascaded_irq(struct faraday_pci *p)
return irq ?: -EINVAL;
}
- p->irqdomain = irq_domain_add_linear(intc, PCI_NUM_INTX,
- &faraday_pci_irqdomain_ops, p);
+ p->irqdomain = irq_domain_create_linear(of_fwnode_handle(intc), PCI_NUM_INTX,
+ &faraday_pci_irqdomain_ops, p);
of_node_put(intc);
if (!p->irqdomain) {
dev_err(p->dev, "failed to create Gemini PCI IRQ domain\n");
diff --git a/drivers/pci/controller/pci-host-common.c b/drivers/pci/controller/pci-host-common.c
index 45b71806182d..f441bfd6f96a 100644
--- a/drivers/pci/controller/pci-host-common.c
+++ b/drivers/pci/controller/pci-host-common.c
@@ -73,12 +73,10 @@ int pci_host_common_probe(struct platform_device *pdev)
if (IS_ERR(cfg))
return PTR_ERR(cfg);
- /* Do not reassign resources if probe only */
- if (!pci_has_flag(PCI_PROBE_ONLY))
- pci_add_flags(PCI_REASSIGN_ALL_BUS);
-
bridge->sysdata = cfg;
bridge->ops = (struct pci_ops *)&ops->pci_ops;
+ bridge->enable_device = ops->enable_device;
+ bridge->disable_device = ops->disable_device;
bridge->msi_domain = true;
return pci_host_probe(bridge);
@@ -96,4 +94,5 @@ void pci_host_common_remove(struct platform_device *pdev)
}
EXPORT_SYMBOL_GPL(pci_host_common_remove);
+MODULE_DESCRIPTION("Generic PCI host common driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pci-host-generic.c b/drivers/pci/controller/pci-host-generic.c
index 41cb6a057f6e..4051b9b61dac 100644
--- a/drivers/pci/controller/pci-host-generic.c
+++ b/drivers/pci/controller/pci-host-generic.c
@@ -82,8 +82,9 @@ static struct platform_driver gen_pci_driver = {
.of_match_table = gen_pci_of_match,
},
.probe = pci_host_common_probe,
- .remove_new = pci_host_common_remove,
+ .remove = pci_host_common_remove,
};
module_platform_driver(gen_pci_driver);
+MODULE_DESCRIPTION("Generic PCI host controller driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index 5992280e8110..e1eaa24559a2 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -1130,8 +1130,8 @@ static void _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where,
PCI_CAPABILITY_LIST) {
/* ROM BARs are unimplemented */
*val = 0;
- } else if (where >= PCI_INTERRUPT_LINE && where + size <=
- PCI_INTERRUPT_PIN) {
+ } else if ((where >= PCI_INTERRUPT_LINE && where + size <= PCI_INTERRUPT_PIN) ||
+ (where >= PCI_INTERRUPT_PIN && where + size <= PCI_MIN_GNT)) {
/*
* Interrupt Line and Interrupt PIN are hard-wired to zero
* because this front-end only supports message-signaled
@@ -1356,7 +1356,7 @@ static struct pci_ops hv_pcifront_ops = {
*
* If the PF driver wishes to initiate communication, it can "invalidate" one or
* more of the first 64 blocks. This invalidation is delivered via a callback
- * supplied by the VF driver by this driver.
+ * supplied to the VF driver by this driver.
*
* No protocol is implied, except that supplied by the PF and VF drivers.
*/
@@ -1757,8 +1757,7 @@ static int hv_compose_multi_msi_req_get_cpu(void)
spin_lock_irqsave(&multi_msi_cpu_lock, flags);
- cpu_next = cpumask_next_wrap(cpu_next, cpu_online_mask, nr_cpu_ids,
- false);
+ cpu_next = cpumask_next_wrap(cpu_next, cpu_online_mask);
cpu = cpu_next;
spin_unlock_irqrestore(&multi_msi_cpu_lock, flags);
@@ -2053,6 +2052,7 @@ static struct irq_chip hv_msi_irq_chip = {
.irq_set_affinity = irq_chip_set_affinity_parent,
#ifdef CONFIG_X86
.irq_ack = irq_chip_ack_parent,
+ .flags = IRQCHIP_MOVE_DEFERRED,
#elif defined(CONFIG_ARM64)
.irq_eoi = irq_chip_eoi_parent,
#endif
@@ -3975,24 +3975,18 @@ static int hv_pci_restore_msi_msg(struct pci_dev *pdev, void *arg)
{
struct irq_data *irq_data;
struct msi_desc *entry;
- int ret = 0;
if (!pdev->msi_enabled && !pdev->msix_enabled)
return 0;
- msi_lock_descs(&pdev->dev);
+ guard(msi_descs_lock)(&pdev->dev);
msi_for_each_desc(entry, &pdev->dev, MSI_DESC_ASSOCIATED) {
irq_data = irq_get_irq_data(entry->irq);
- if (WARN_ON_ONCE(!irq_data)) {
- ret = -EINVAL;
- break;
- }
-
+ if (WARN_ON_ONCE(!irq_data))
+ return -EINVAL;
hv_compose_msi_msg(irq_data, &entry->msg);
}
- msi_unlock_descs(&pdev->dev);
-
- return ret;
+ return 0;
}
/*
diff --git a/drivers/pci/controller/pci-loongson.c b/drivers/pci/controller/pci-loongson.c
index 8b34ccff073a..bc630ab8a283 100644
--- a/drivers/pci/controller/pci-loongson.c
+++ b/drivers/pci/controller/pci-loongson.c
@@ -163,6 +163,19 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON,
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON,
DEV_LS7A_HDMI, loongson_pci_pin_quirk);
+static void loongson_pci_msi_quirk(struct pci_dev *dev)
+{
+ u16 val, class = dev->class >> 8;
+
+ if (class != PCI_CLASS_BRIDGE_HOST)
+ return;
+
+ pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &val);
+ val |= PCI_MSI_FLAGS_ENABLE;
+ pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, val);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, DEV_LS7A_PCIE_PORT5, loongson_pci_msi_quirk);
+
static struct loongson_pci *pci_bus_to_loongson_pci(struct pci_bus *bus)
{
struct pci_config_window *cfg;
diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
index 29fe09c99e7d..60da24ba0a19 100644
--- a/drivers/pci/controller/pci-mvebu.c
+++ b/drivers/pci/controller/pci-mvebu.c
@@ -1078,9 +1078,9 @@ static int mvebu_pcie_init_irq_domain(struct mvebu_pcie_port *port)
return -ENODEV;
}
- port->intx_irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
- &mvebu_pcie_intx_irq_domain_ops,
- port);
+ port->intx_irq_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node),
+ PCI_NUM_INTX,
+ &mvebu_pcie_intx_irq_domain_ops, port);
of_node_put(pcie_intc_node);
if (!port->intx_irq_domain) {
dev_err(dev, "Failed to get INTx IRQ domain for %s\n", port->name);
@@ -1422,7 +1422,7 @@ static void mvebu_pcie_powerdown(struct mvebu_pcie_port *port)
}
/*
- * devm_of_pci_get_host_bridge_resources() only sets up translateable resources,
+ * devm_of_pci_get_host_bridge_resources() only sets up translatable resources,
* so we need extra resource setup parsing our special DT properties encoding
* the MEM and IO apertures.
*/
@@ -1715,6 +1715,7 @@ static const struct of_device_id mvebu_pcie_of_match_table[] = {
{ .compatible = "marvell,kirkwood-pcie", },
{},
};
+MODULE_DEVICE_TABLE(of, mvebu_pcie_of_match_table);
static const struct dev_pm_ops mvebu_pcie_pm_ops = {
NOIRQ_SYSTEM_SLEEP_PM_OPS(mvebu_pcie_suspend, mvebu_pcie_resume)
@@ -1727,7 +1728,7 @@ static struct platform_driver mvebu_pcie_driver = {
.pm = &mvebu_pcie_pm_ops,
},
.probe = mvebu_pcie_probe,
- .remove_new = mvebu_pcie_remove,
+ .remove = mvebu_pcie_remove,
};
module_platform_driver(mvebu_pcie_driver);
diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
index 038d974a318e..467ddc701adc 100644
--- a/drivers/pci/controller/pci-tegra.c
+++ b/drivers/pci/controller/pci-tegra.c
@@ -22,6 +22,7 @@
#include <linux/iopoll.h>
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -1460,7 +1461,7 @@ static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
pcie->cs = *res;
/* constrain configuration space to 4 KiB */
- pcie->cs.end = pcie->cs.start + SZ_4K - 1;
+ resource_set_size(&pcie->cs, SZ_4K);
pcie->cfg = devm_ioremap_resource(dev, &pcie->cs);
if (IS_ERR(pcie->cfg)) {
@@ -1547,7 +1548,7 @@ static void tegra_pcie_msi_irq(struct irq_desc *desc)
unsigned int index = i * 32 + offset;
int ret;
- ret = generic_handle_domain_irq(msi->domain->parent, index);
+ ret = generic_handle_domain_irq(msi->domain, index);
if (ret) {
/*
* that's weird who triggered this?
@@ -1565,30 +1566,6 @@ static void tegra_pcie_msi_irq(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
-static void tegra_msi_top_irq_ack(struct irq_data *d)
-{
- irq_chip_ack_parent(d);
-}
-
-static void tegra_msi_top_irq_mask(struct irq_data *d)
-{
- pci_msi_mask_irq(d);
- irq_chip_mask_parent(d);
-}
-
-static void tegra_msi_top_irq_unmask(struct irq_data *d)
-{
- pci_msi_unmask_irq(d);
- irq_chip_unmask_parent(d);
-}
-
-static struct irq_chip tegra_msi_top_chip = {
- .name = "Tegra PCIe MSI",
- .irq_ack = tegra_msi_top_irq_ack,
- .irq_mask = tegra_msi_top_irq_mask,
- .irq_unmask = tegra_msi_top_irq_unmask,
-};
-
static void tegra_msi_irq_ack(struct irq_data *d)
{
struct tegra_msi *msi = irq_data_get_irq_chip_data(d);
@@ -1629,11 +1606,6 @@ static void tegra_msi_irq_unmask(struct irq_data *d)
spin_unlock_irqrestore(&msi->mask_lock, flags);
}
-static int tegra_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static void tegra_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
struct tegra_msi *msi = irq_data_get_irq_chip_data(data);
@@ -1648,7 +1620,6 @@ static struct irq_chip tegra_msi_bottom_chip = {
.irq_ack = tegra_msi_irq_ack,
.irq_mask = tegra_msi_irq_mask,
.irq_unmask = tegra_msi_irq_unmask,
- .irq_set_affinity = tegra_msi_set_affinity,
.irq_compose_msi_msg = tegra_compose_msi_msg,
};
@@ -1696,42 +1667,40 @@ static const struct irq_domain_ops tegra_msi_domain_ops = {
.free = tegra_msi_domain_free,
};
-static struct msi_domain_info tegra_msi_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX),
- .chip = &tegra_msi_top_chip,
+static const struct msi_parent_ops tegra_msi_parent_ops = {
+ .supported_flags = (MSI_GENERIC_FLAGS_MASK |
+ MSI_FLAG_PCI_MSIX),
+ .required_flags = (MSI_FLAG_USE_DEF_DOM_OPS |
+ MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_PCI_MSI_MASK_PARENT |
+ MSI_FLAG_NO_AFFINITY),
+ .chip_flags = MSI_CHIP_FLAG_SET_ACK,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
};
static int tegra_allocate_domains(struct tegra_msi *msi)
{
struct tegra_pcie *pcie = msi_to_pcie(msi);
struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
- struct irq_domain *parent;
-
- parent = irq_domain_create_linear(fwnode, INT_PCI_MSI_NR,
- &tegra_msi_domain_ops, msi);
- if (!parent) {
- dev_err(pcie->dev, "failed to create IRQ domain\n");
- return -ENOMEM;
- }
- irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
+ struct irq_domain_info info = {
+ .fwnode = fwnode,
+ .ops = &tegra_msi_domain_ops,
+ .size = INT_PCI_MSI_NR,
+ .host_data = msi,
+ };
- msi->domain = pci_msi_create_irq_domain(fwnode, &tegra_msi_info, parent);
+ msi->domain = msi_create_parent_irq_domain(&info, &tegra_msi_parent_ops);
if (!msi->domain) {
dev_err(pcie->dev, "failed to create MSI domain\n");
- irq_domain_remove(parent);
return -ENOMEM;
}
-
return 0;
}
static void tegra_free_domains(struct tegra_msi *msi)
{
- struct irq_domain *parent = msi->domain->parent;
-
irq_domain_remove(msi->domain);
- irq_domain_remove(parent);
}
static int tegra_pcie_msi_setup(struct tegra_pcie *pcie)
@@ -2112,47 +2081,39 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
{
struct device *dev = pcie->dev;
- struct device_node *np = dev->of_node, *port;
+ struct device_node *np = dev->of_node;
const struct tegra_pcie_soc *soc = pcie->soc;
u32 lanes = 0, mask = 0;
unsigned int lane = 0;
int err;
/* parse root ports */
- for_each_child_of_node(np, port) {
+ for_each_child_of_node_scoped(np, port) {
struct tegra_pcie_port *rp;
unsigned int index;
u32 value;
char *label;
err = of_pci_get_devfn(port);
- if (err < 0) {
- dev_err(dev, "failed to parse address: %d\n", err);
- goto err_node_put;
- }
+ if (err < 0)
+ return dev_err_probe(dev, err, "failed to parse address\n");
index = PCI_SLOT(err);
- if (index < 1 || index > soc->num_ports) {
- dev_err(dev, "invalid port number: %d\n", index);
- err = -EINVAL;
- goto err_node_put;
- }
+ if (index < 1 || index > soc->num_ports)
+ return dev_err_probe(dev, -EINVAL,
+ "invalid port number: %d\n", index);
index--;
err = of_property_read_u32(port, "nvidia,num-lanes", &value);
- if (err < 0) {
- dev_err(dev, "failed to parse # of lanes: %d\n",
- err);
- goto err_node_put;
- }
+ if (err < 0)
+ return dev_err_probe(dev, err,
+ "failed to parse # of lanes\n");
- if (value > 16) {
- dev_err(dev, "invalid # of lanes: %u\n", value);
- err = -EINVAL;
- goto err_node_put;
- }
+ if (value > 16)
+ return dev_err_probe(dev, -EINVAL,
+ "invalid # of lanes: %u\n", value);
lanes |= value << (index << 3);
@@ -2165,16 +2126,12 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
lane += value;
rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL);
- if (!rp) {
- err = -ENOMEM;
- goto err_node_put;
- }
+ if (!rp)
+ return -ENOMEM;
err = of_address_to_resource(port, 0, &rp->regs);
- if (err < 0) {
- dev_err(dev, "failed to parse address: %d\n", err);
- goto err_node_put;
- }
+ if (err < 0)
+ return dev_err_probe(dev, err, "failed to parse address\n");
INIT_LIST_HEAD(&rp->list);
rp->index = index;
@@ -2183,16 +2140,12 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
rp->np = port;
rp->base = devm_pci_remap_cfg_resource(dev, &rp->regs);
- if (IS_ERR(rp->base)) {
- err = PTR_ERR(rp->base);
- goto err_node_put;
- }
+ if (IS_ERR(rp->base))
+ return PTR_ERR(rp->base);
label = devm_kasprintf(dev, GFP_KERNEL, "pex-reset-%u", index);
- if (!label) {
- err = -ENOMEM;
- goto err_node_put;
- }
+ if (!label)
+ return -ENOMEM;
/*
* Returns -ENOENT if reset-gpios property is not populated
@@ -2205,34 +2158,26 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
GPIOD_OUT_LOW,
label);
if (IS_ERR(rp->reset_gpio)) {
- if (PTR_ERR(rp->reset_gpio) == -ENOENT) {
+ if (PTR_ERR(rp->reset_gpio) == -ENOENT)
rp->reset_gpio = NULL;
- } else {
- dev_err(dev, "failed to get reset GPIO: %ld\n",
- PTR_ERR(rp->reset_gpio));
- err = PTR_ERR(rp->reset_gpio);
- goto err_node_put;
- }
+ else
+ return dev_err_probe(dev, PTR_ERR(rp->reset_gpio),
+ "failed to get reset GPIO\n");
}
list_add_tail(&rp->list, &pcie->ports);
}
err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
- if (err < 0) {
- dev_err(dev, "invalid lane configuration\n");
- return err;
- }
+ if (err < 0)
+ return dev_err_probe(dev, err,
+ "invalid lane configuration\n");
err = tegra_pcie_get_regulators(pcie, mask);
if (err < 0)
return err;
return 0;
-
-err_node_put:
- of_node_put(port);
- return err;
}
/*
@@ -2806,6 +2751,6 @@ static struct platform_driver tegra_pcie_driver = {
.pm = &tegra_pcie_pm_ops,
},
.probe = tegra_pcie_probe,
- .remove_new = tegra_pcie_remove,
+ .remove = tegra_pcie_remove,
};
module_platform_driver(tegra_pcie_driver);
diff --git a/drivers/pci/controller/pci-thunder-ecam.c b/drivers/pci/controller/pci-thunder-ecam.c
index b5bd10a62adb..08161065a89c 100644
--- a/drivers/pci/controller/pci-thunder-ecam.c
+++ b/drivers/pci/controller/pci-thunder-ecam.c
@@ -204,7 +204,7 @@ static int thunder_ecam_config_read(struct pci_bus *bus, unsigned int devfn,
v = readl(addr);
if (v & 0xff00)
- pr_err("Bad MSIX cap header: %08x\n", v);
+ pr_err("Bad MSI-X cap header: %08x\n", v);
v |= 0xbc00; /* next capability is EA at 0xbc */
set_val(v, where, size, val);
return PCIBIOS_SUCCESSFUL;
diff --git a/drivers/pci/controller/pci-thunder-pem.c b/drivers/pci/controller/pci-thunder-pem.c
index 06a9855cb431..f1bd5de67997 100644
--- a/drivers/pci/controller/pci-thunder-pem.c
+++ b/drivers/pci/controller/pci-thunder-pem.c
@@ -400,9 +400,9 @@ static int thunder_pem_acpi_init(struct pci_config_window *cfg)
* Reserve 64K size PEM specific resources. The full 16M range
* size is required for thunder_pem_init() call.
*/
- res_pem->end = res_pem->start + SZ_64K - 1;
+ resource_set_size(res_pem, SZ_64K);
thunder_pem_reserve_range(dev, root->segment, res_pem);
- res_pem->end = res_pem->start + SZ_16M - 1;
+ resource_set_size(res_pem, SZ_16M);
/* Reserve PCI configuration space as well. */
thunder_pem_reserve_range(dev, root->segment, &cfg->res);
diff --git a/drivers/pci/controller/pci-xgene-msi.c b/drivers/pci/controller/pci-xgene-msi.c
index 3ce38dfd0d29..b05ec8b0bb93 100644
--- a/drivers/pci/controller/pci-xgene-msi.c
+++ b/drivers/pci/controller/pci-xgene-msi.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/msi.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/of_pci.h>
@@ -32,7 +33,6 @@ struct xgene_msi_group {
struct xgene_msi {
struct device_node *node;
struct irq_domain *inner_domain;
- struct irq_domain *msi_domain;
u64 msi_addr;
void __iomem *msi_regs;
unsigned long *bitmap;
@@ -44,20 +44,6 @@ struct xgene_msi {
/* Global data */
static struct xgene_msi xgene_msi_ctrl;
-static struct irq_chip xgene_msi_top_irq_chip = {
- .name = "X-Gene1 MSI",
- .irq_enable = pci_msi_unmask_irq,
- .irq_disable = pci_msi_mask_irq,
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
-};
-
-static struct msi_domain_info xgene_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX),
- .chip = &xgene_msi_top_irq_chip,
-};
-
/*
* X-Gene v1 has 16 groups of MSI termination registers MSInIRx, where
* n is group number (0..F), x is index of registers in each group (0..7)
@@ -154,7 +140,7 @@ static void xgene_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
* X-Gene v1 only has 16 MSI GIC IRQs for 2048 MSI vectors. To maintain
* the expected behaviour of .set_affinity for each MSI interrupt, the 16
* MSI GIC IRQs are statically allocated to 8 X-Gene v1 cores (2 GIC IRQs
- * for each core). The MSI vector is moved fom 1 MSI GIC IRQ to another
+ * for each core). The MSI vector is moved from 1 MSI GIC IRQ to another
* MSI GIC IRQ to steer its MSI interrupt to correct X-Gene v1 core. As a
* consequence, the total MSI vectors that X-Gene v1 supports will be
* reduced to 256 (2048/8) vectors.
@@ -235,34 +221,35 @@ static void xgene_irq_domain_free(struct irq_domain *domain,
irq_domain_free_irqs_parent(domain, virq, nr_irqs);
}
-static const struct irq_domain_ops msi_domain_ops = {
+static const struct irq_domain_ops xgene_msi_domain_ops = {
.alloc = xgene_irq_domain_alloc,
.free = xgene_irq_domain_free,
};
+static const struct msi_parent_ops xgene_msi_parent_ops = {
+ .supported_flags = (MSI_GENERIC_FLAGS_MASK |
+ MSI_FLAG_PCI_MSIX),
+ .required_flags = (MSI_FLAG_USE_DEF_DOM_OPS |
+ MSI_FLAG_USE_DEF_CHIP_OPS),
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
+};
+
static int xgene_allocate_domains(struct xgene_msi *msi)
{
- msi->inner_domain = irq_domain_add_linear(NULL, NR_MSI_VEC,
- &msi_domain_ops, msi);
- if (!msi->inner_domain)
- return -ENOMEM;
-
- msi->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(msi->node),
- &xgene_msi_domain_info,
- msi->inner_domain);
-
- if (!msi->msi_domain) {
- irq_domain_remove(msi->inner_domain);
- return -ENOMEM;
- }
-
- return 0;
+ struct irq_domain_info info = {
+ .fwnode = of_fwnode_handle(msi->node),
+ .ops = &xgene_msi_domain_ops,
+ .size = NR_MSI_VEC,
+ .host_data = msi,
+ };
+
+ msi->inner_domain = msi_create_parent_irq_domain(&info, &xgene_msi_parent_ops);
+ return msi->inner_domain ? 0 : -ENOMEM;
}
static void xgene_free_domains(struct xgene_msi *msi)
{
- if (msi->msi_domain)
- irq_domain_remove(msi->msi_domain);
if (msi->inner_domain)
irq_domain_remove(msi->inner_domain);
}
@@ -518,7 +505,7 @@ static struct platform_driver xgene_msi_driver = {
.of_match_table = xgene_msi_match_table,
},
.probe = xgene_msi_probe,
- .remove_new = xgene_msi_remove,
+ .remove = xgene_msi_remove,
};
static int __init xgene_pcie_msi_init(void)
diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c
index 8e457fa450a2..1e2ebbfa36d1 100644
--- a/drivers/pci/controller/pci-xgene.c
+++ b/drivers/pci/controller/pci-xgene.c
@@ -171,17 +171,17 @@ static int xgene_pcie_config_read32(struct pci_bus *bus, unsigned int devfn,
/*
* The v1 controller has a bug in its Configuration Request Retry
- * Status (CRS) logic: when CRS Software Visibility is enabled and
+ * Status (RRS) logic: when RRS Software Visibility is enabled and
* we read the Vendor and Device ID of a non-existent device, the
* controller fabricates return data of 0xFFFF0001 ("device exists
* but is not ready") instead of 0xFFFFFFFF (PCI_ERROR_RESPONSE)
* ("device does not exist"). This causes the PCI core to retry
* the read until it times out. Avoid this by not claiming to
- * support CRS SV.
+ * support RRS SV.
*/
if (pci_is_root_bus(bus) && (port->version == XGENE_PCIE_IP_VER_1) &&
((where & ~0x3) == XGENE_V1_PCI_EXP_CAP + PCI_EXP_RTCTL))
- *val &= ~(PCI_EXP_RTCAP_CRSVIS << 16);
+ *val &= ~(PCI_EXP_RTCAP_RRS_SV << 16);
if (size <= 2)
*val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
diff --git a/drivers/pci/controller/pcie-altera-msi.c b/drivers/pci/controller/pcie-altera-msi.c
index 6ad5427490b5..a43f21eb8fbb 100644
--- a/drivers/pci/controller/pcie-altera-msi.c
+++ b/drivers/pci/controller/pcie-altera-msi.c
@@ -81,8 +81,8 @@ static struct irq_chip altera_msi_irq_chip = {
};
static struct msi_domain_info altera_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX),
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX,
.chip = &altera_msi_irq_chip,
};
@@ -99,16 +99,9 @@ static void altera_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
(int)data->hwirq, msg->address_hi, msg->address_lo);
}
-static int altera_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static struct irq_chip altera_msi_bottom_irq_chip = {
.name = "Altera MSI",
.irq_compose_msi_msg = altera_compose_msi_msg,
- .irq_set_affinity = altera_msi_set_affinity,
};
static int altera_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
@@ -171,9 +164,9 @@ static const struct irq_domain_ops msi_domain_ops = {
static int altera_allocate_domains(struct altera_msi *msi)
{
- struct fwnode_handle *fwnode = of_node_to_fwnode(msi->pdev->dev.of_node);
+ struct fwnode_handle *fwnode = of_fwnode_handle(msi->pdev->dev.of_node);
- msi->inner_domain = irq_domain_add_linear(NULL, msi->num_of_vectors,
+ msi->inner_domain = irq_domain_create_linear(NULL, msi->num_of_vectors,
&msi_domain_ops, msi);
if (!msi->inner_domain) {
dev_err(&msi->pdev->dev, "failed to create IRQ domain\n");
@@ -274,7 +267,7 @@ static struct platform_driver altera_msi_driver = {
.of_match_table = altera_msi_of_match,
},
.probe = altera_msi_probe,
- .remove_new = altera_msi_remove,
+ .remove = altera_msi_remove,
};
static int __init altera_msi_init(void)
@@ -290,4 +283,5 @@ static void __exit altera_msi_exit(void)
subsys_initcall(altera_msi_init);
MODULE_DEVICE_TABLE(of, altera_msi_of_match);
module_exit(altera_msi_exit);
+MODULE_DESCRIPTION("Altera PCIe MSI support driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-altera.c b/drivers/pci/controller/pcie-altera.c
index a9536dc4bf96..0fc77176a52e 100644
--- a/drivers/pci/controller/pcie-altera.c
+++ b/drivers/pci/controller/pcie-altera.c
@@ -6,6 +6,7 @@
* Description: Altera PCIe host controller driver
*/
+#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/irqchip/chained_irq.h>
@@ -55,12 +56,11 @@
#define TLP_READ_TAG 0x1d
#define TLP_WRITE_TAG 0x10
#define RP_DEVFN 0
-#define TLP_REQ_ID(bus, devfn) (((bus) << 8) | (devfn))
#define TLP_CFG_DW0(pcie, cfg) \
(((cfg) << 24) | \
TLP_PAYLOAD_SIZE)
#define TLP_CFG_DW1(pcie, tag, be) \
- (((TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN)) << 16) | (tag << 8) | (be))
+ (((PCI_DEVID(pcie->root_bus_nr, RP_DEVFN)) << 16) | (tag << 8) | (be))
#define TLP_CFG_DW2(bus, devfn, offset) \
(((bus) << 24) | ((devfn) << 16) | (offset))
#define TLP_COMP_STATUS(s) (((s) >> 13) & 7)
@@ -78,9 +78,25 @@
#define S10_TLP_FMTTYPE_CFGWR0 0x45
#define S10_TLP_FMTTYPE_CFGWR1 0x44
+#define AGLX_RP_CFG_ADDR(pcie, reg) (((pcie)->hip_base) + (reg))
+#define AGLX_RP_SECONDARY(pcie) \
+ readb(AGLX_RP_CFG_ADDR(pcie, PCI_SECONDARY_BUS))
+
+#define AGLX_BDF_REG 0x00002004
+#define AGLX_ROOT_PORT_IRQ_STATUS 0x14c
+#define AGLX_ROOT_PORT_IRQ_ENABLE 0x150
+#define CFG_AER BIT(4)
+
+#define AGLX_CFG_TARGET GENMASK(13, 12)
+#define AGLX_CFG_TARGET_TYPE0 0
+#define AGLX_CFG_TARGET_TYPE1 1
+#define AGLX_CFG_TARGET_LOCAL_2000 2
+#define AGLX_CFG_TARGET_LOCAL_3000 3
+
enum altera_pcie_version {
ALTERA_PCIE_V1 = 0,
ALTERA_PCIE_V2,
+ ALTERA_PCIE_V3,
};
struct altera_pcie {
@@ -103,6 +119,11 @@ struct altera_pcie_ops {
int size, u32 *value);
int (*rp_write_cfg)(struct altera_pcie *pcie, u8 busno,
int where, int size, u32 value);
+ int (*ep_read_cfg)(struct altera_pcie *pcie, u8 busno,
+ unsigned int devfn, int where, int size, u32 *value);
+ int (*ep_write_cfg)(struct altera_pcie *pcie, u8 busno,
+ unsigned int devfn, int where, int size, u32 value);
+ void (*rp_isr)(struct irq_desc *desc);
};
struct altera_pcie_data {
@@ -113,6 +134,9 @@ struct altera_pcie_data {
u32 cfgrd1;
u32 cfgwr0;
u32 cfgwr1;
+ u32 port_conf_offset;
+ u32 port_irq_status_offset;
+ u32 port_irq_enable_offset;
};
struct tlp_rp_regpair_t {
@@ -132,6 +156,28 @@ static inline u32 cra_readl(struct altera_pcie *pcie, const u32 reg)
return readl_relaxed(pcie->cra_base + reg);
}
+static inline void cra_writew(struct altera_pcie *pcie, const u32 value,
+ const u32 reg)
+{
+ writew_relaxed(value, pcie->cra_base + reg);
+}
+
+static inline u32 cra_readw(struct altera_pcie *pcie, const u32 reg)
+{
+ return readw_relaxed(pcie->cra_base + reg);
+}
+
+static inline void cra_writeb(struct altera_pcie *pcie, const u32 value,
+ const u32 reg)
+{
+ writeb_relaxed(value, pcie->cra_base + reg);
+}
+
+static inline u32 cra_readb(struct altera_pcie *pcie, const u32 reg)
+{
+ return readb_relaxed(pcie->cra_base + reg);
+}
+
static bool altera_pcie_link_up(struct altera_pcie *pcie)
{
return !!((cra_readl(pcie, RP_LTSSM) & RP_LTSSM_MASK) == LTSSM_L0);
@@ -146,11 +192,20 @@ static bool s10_altera_pcie_link_up(struct altera_pcie *pcie)
return !!(readw(addr) & PCI_EXP_LNKSTA_DLLLA);
}
+static bool aglx_altera_pcie_link_up(struct altera_pcie *pcie)
+{
+ void __iomem *addr = AGLX_RP_CFG_ADDR(pcie,
+ pcie->pcie_data->cap_offset +
+ PCI_EXP_LNKSTA);
+
+ return (readw_relaxed(addr) & PCI_EXP_LNKSTA_DLLLA);
+}
+
/*
* Altera PCIe port uses BAR0 of RC's configuration space as the translation
* from PCI bus to native BUS. Entire DDR region is mapped into PCIe space
* using these registers, so it can be reached by DMA from EP devices.
- * This BAR0 will also access to MSI vector when receiving MSI/MSIX interrupt
+ * This BAR0 will also access to MSI vector when receiving MSI/MSI-X interrupt
* from EP devices, eventually trigger interrupt to GIC. The BAR0 of bridge
* should be hidden during enumeration to avoid the sizing and resource
* allocation by PCIe core.
@@ -426,6 +481,103 @@ static int s10_rp_write_cfg(struct altera_pcie *pcie, u8 busno,
return PCIBIOS_SUCCESSFUL;
}
+static int aglx_rp_read_cfg(struct altera_pcie *pcie, int where,
+ int size, u32 *value)
+{
+ void __iomem *addr = AGLX_RP_CFG_ADDR(pcie, where);
+
+ switch (size) {
+ case 1:
+ *value = readb_relaxed(addr);
+ break;
+ case 2:
+ *value = readw_relaxed(addr);
+ break;
+ default:
+ *value = readl_relaxed(addr);
+ break;
+ }
+
+ /* Interrupt PIN not programmed in hardware, set to INTA. */
+ if (where == PCI_INTERRUPT_PIN && size == 1 && !(*value))
+ *value = 0x01;
+ else if (where == PCI_INTERRUPT_LINE && !(*value & 0xff00))
+ *value |= 0x0100;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int aglx_rp_write_cfg(struct altera_pcie *pcie, u8 busno,
+ int where, int size, u32 value)
+{
+ void __iomem *addr = AGLX_RP_CFG_ADDR(pcie, where);
+
+ switch (size) {
+ case 1:
+ writeb_relaxed(value, addr);
+ break;
+ case 2:
+ writew_relaxed(value, addr);
+ break;
+ default:
+ writel_relaxed(value, addr);
+ break;
+ }
+
+ /*
+ * Monitor changes to PCI_PRIMARY_BUS register on Root Port
+ * and update local copy of root bus number accordingly.
+ */
+ if (busno == pcie->root_bus_nr && where == PCI_PRIMARY_BUS)
+ pcie->root_bus_nr = value & 0xff;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int aglx_ep_write_cfg(struct altera_pcie *pcie, u8 busno,
+ unsigned int devfn, int where, int size, u32 value)
+{
+ cra_writel(pcie, ((busno << 8) | devfn), AGLX_BDF_REG);
+ if (busno > AGLX_RP_SECONDARY(pcie))
+ where |= FIELD_PREP(AGLX_CFG_TARGET, AGLX_CFG_TARGET_TYPE1);
+
+ switch (size) {
+ case 1:
+ cra_writeb(pcie, value, where);
+ break;
+ case 2:
+ cra_writew(pcie, value, where);
+ break;
+ default:
+ cra_writel(pcie, value, where);
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int aglx_ep_read_cfg(struct altera_pcie *pcie, u8 busno,
+ unsigned int devfn, int where, int size, u32 *value)
+{
+ cra_writel(pcie, ((busno << 8) | devfn), AGLX_BDF_REG);
+ if (busno > AGLX_RP_SECONDARY(pcie))
+ where |= FIELD_PREP(AGLX_CFG_TARGET, AGLX_CFG_TARGET_TYPE1);
+
+ switch (size) {
+ case 1:
+ *value = cra_readb(pcie, where);
+ break;
+ case 2:
+ *value = cra_readw(pcie, where);
+ break;
+ default:
+ *value = cra_readl(pcie, where);
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
static int _altera_pcie_cfg_read(struct altera_pcie *pcie, u8 busno,
unsigned int devfn, int where, int size,
u32 *value)
@@ -438,6 +590,10 @@ static int _altera_pcie_cfg_read(struct altera_pcie *pcie, u8 busno,
return pcie->pcie_data->ops->rp_read_cfg(pcie, where,
size, value);
+ if (pcie->pcie_data->ops->ep_read_cfg)
+ return pcie->pcie_data->ops->ep_read_cfg(pcie, busno, devfn,
+ where, size, value);
+
switch (size) {
case 1:
byte_en = 1 << (where & 3);
@@ -482,6 +638,10 @@ static int _altera_pcie_cfg_write(struct altera_pcie *pcie, u8 busno,
return pcie->pcie_data->ops->rp_write_cfg(pcie, busno,
where, size, value);
+ if (pcie->pcie_data->ops->ep_write_cfg)
+ return pcie->pcie_data->ops->ep_write_cfg(pcie, busno, devfn,
+ where, size, value);
+
switch (size) {
case 1:
data32 = (value & 0xff) << shift;
@@ -660,7 +820,32 @@ static void altera_pcie_isr(struct irq_desc *desc)
dev_err_ratelimited(dev, "unexpected IRQ, INT%d\n", bit);
}
}
+ chained_irq_exit(chip, desc);
+}
+
+static void aglx_isr(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct altera_pcie *pcie;
+ struct device *dev;
+ u32 status;
+ int ret;
+
+ chained_irq_enter(chip, desc);
+ pcie = irq_desc_get_handler_data(desc);
+ dev = &pcie->pdev->dev;
+
+ status = readl(pcie->hip_base + pcie->pcie_data->port_conf_offset +
+ pcie->pcie_data->port_irq_status_offset);
+ if (status & CFG_AER) {
+ writel(CFG_AER, (pcie->hip_base + pcie->pcie_data->port_conf_offset +
+ pcie->pcie_data->port_irq_status_offset));
+
+ ret = generic_handle_domain_irq(pcie->irq_domain, 0);
+ if (ret)
+ dev_err_ratelimited(dev, "unexpected IRQ %d\n", pcie->irq);
+ }
chained_irq_exit(chip, desc);
}
@@ -670,7 +855,7 @@ static int altera_pcie_init_irq_domain(struct altera_pcie *pcie)
struct device_node *node = dev->of_node;
/* Setup INTx */
- pcie->irq_domain = irq_domain_add_linear(node, PCI_NUM_INTX,
+ pcie->irq_domain = irq_domain_create_linear(of_fwnode_handle(node), PCI_NUM_INTX,
&intx_domain_ops, pcie);
if (!pcie->irq_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
@@ -695,9 +880,9 @@ static int altera_pcie_parse_dt(struct altera_pcie *pcie)
if (IS_ERR(pcie->cra_base))
return PTR_ERR(pcie->cra_base);
- if (pcie->pcie_data->version == ALTERA_PCIE_V2) {
- pcie->hip_base =
- devm_platform_ioremap_resource_byname(pdev, "Hip");
+ if (pcie->pcie_data->version == ALTERA_PCIE_V2 ||
+ pcie->pcie_data->version == ALTERA_PCIE_V3) {
+ pcie->hip_base = devm_platform_ioremap_resource_byname(pdev, "Hip");
if (IS_ERR(pcie->hip_base))
return PTR_ERR(pcie->hip_base);
}
@@ -707,7 +892,7 @@ static int altera_pcie_parse_dt(struct altera_pcie *pcie)
if (pcie->irq < 0)
return pcie->irq;
- irq_set_chained_handler_and_data(pcie->irq, altera_pcie_isr, pcie);
+ irq_set_chained_handler_and_data(pcie->irq, pcie->pcie_data->ops->rp_isr, pcie);
return 0;
}
@@ -720,6 +905,7 @@ static const struct altera_pcie_ops altera_pcie_ops_1_0 = {
.tlp_read_pkt = tlp_read_packet,
.tlp_write_pkt = tlp_write_packet,
.get_link_status = altera_pcie_link_up,
+ .rp_isr = altera_pcie_isr,
};
static const struct altera_pcie_ops altera_pcie_ops_2_0 = {
@@ -728,6 +914,16 @@ static const struct altera_pcie_ops altera_pcie_ops_2_0 = {
.get_link_status = s10_altera_pcie_link_up,
.rp_read_cfg = s10_rp_read_cfg,
.rp_write_cfg = s10_rp_write_cfg,
+ .rp_isr = altera_pcie_isr,
+};
+
+static const struct altera_pcie_ops altera_pcie_ops_3_0 = {
+ .rp_read_cfg = aglx_rp_read_cfg,
+ .rp_write_cfg = aglx_rp_write_cfg,
+ .get_link_status = aglx_altera_pcie_link_up,
+ .ep_read_cfg = aglx_ep_read_cfg,
+ .ep_write_cfg = aglx_ep_write_cfg,
+ .rp_isr = aglx_isr,
};
static const struct altera_pcie_data altera_pcie_1_0_data = {
@@ -750,11 +946,44 @@ static const struct altera_pcie_data altera_pcie_2_0_data = {
.cfgwr1 = S10_TLP_FMTTYPE_CFGWR1,
};
+static const struct altera_pcie_data altera_pcie_3_0_f_tile_data = {
+ .ops = &altera_pcie_ops_3_0,
+ .version = ALTERA_PCIE_V3,
+ .cap_offset = 0x70,
+ .port_conf_offset = 0x14000,
+ .port_irq_status_offset = AGLX_ROOT_PORT_IRQ_STATUS,
+ .port_irq_enable_offset = AGLX_ROOT_PORT_IRQ_ENABLE,
+};
+
+static const struct altera_pcie_data altera_pcie_3_0_p_tile_data = {
+ .ops = &altera_pcie_ops_3_0,
+ .version = ALTERA_PCIE_V3,
+ .cap_offset = 0x70,
+ .port_conf_offset = 0x104000,
+ .port_irq_status_offset = AGLX_ROOT_PORT_IRQ_STATUS,
+ .port_irq_enable_offset = AGLX_ROOT_PORT_IRQ_ENABLE,
+};
+
+static const struct altera_pcie_data altera_pcie_3_0_r_tile_data = {
+ .ops = &altera_pcie_ops_3_0,
+ .version = ALTERA_PCIE_V3,
+ .cap_offset = 0x70,
+ .port_conf_offset = 0x1300,
+ .port_irq_status_offset = 0x0,
+ .port_irq_enable_offset = 0x4,
+};
+
static const struct of_device_id altera_pcie_of_match[] = {
{.compatible = "altr,pcie-root-port-1.0",
.data = &altera_pcie_1_0_data },
{.compatible = "altr,pcie-root-port-2.0",
.data = &altera_pcie_2_0_data },
+ {.compatible = "altr,pcie-root-port-3.0-f-tile",
+ .data = &altera_pcie_3_0_f_tile_data },
+ {.compatible = "altr,pcie-root-port-3.0-p-tile",
+ .data = &altera_pcie_3_0_p_tile_data },
+ {.compatible = "altr,pcie-root-port-3.0-r-tile",
+ .data = &altera_pcie_3_0_r_tile_data },
{},
};
@@ -792,11 +1021,18 @@ static int altera_pcie_probe(struct platform_device *pdev)
return ret;
}
- /* clear all interrupts */
- cra_writel(pcie, P2A_INT_STS_ALL, P2A_INT_STATUS);
- /* enable all interrupts */
- cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE);
- altera_pcie_host_init(pcie);
+ if (pcie->pcie_data->version == ALTERA_PCIE_V1 ||
+ pcie->pcie_data->version == ALTERA_PCIE_V2) {
+ /* clear all interrupts */
+ cra_writel(pcie, P2A_INT_STS_ALL, P2A_INT_STATUS);
+ /* enable all interrupts */
+ cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE);
+ altera_pcie_host_init(pcie);
+ } else if (pcie->pcie_data->version == ALTERA_PCIE_V3) {
+ writel(CFG_AER,
+ pcie->hip_base + pcie->pcie_data->port_conf_offset +
+ pcie->pcie_data->port_irq_enable_offset);
+ }
bridge->sysdata = pcie;
bridge->busnr = pcie->root_bus_nr;
@@ -816,14 +1052,15 @@ static void altera_pcie_remove(struct platform_device *pdev)
}
static struct platform_driver altera_pcie_driver = {
- .probe = altera_pcie_probe,
- .remove_new = altera_pcie_remove,
+ .probe = altera_pcie_probe,
+ .remove = altera_pcie_remove,
.driver = {
- .name = "altera-pcie",
+ .name = "altera-pcie",
.of_match_table = altera_pcie_of_match,
},
};
MODULE_DEVICE_TABLE(of, altera_pcie_of_match);
module_platform_driver(altera_pcie_driver);
+MODULE_DESCRIPTION("Altera PCIe host controller driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-apple.c b/drivers/pci/controller/pcie-apple.c
index f7a248393a8f..3d412a931774 100644
--- a/drivers/pci/controller/pcie-apple.c
+++ b/drivers/pci/controller/pcie-apple.c
@@ -22,11 +22,11 @@
#include <linux/kernel.h>
#include <linux/iopoll.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/msi.h>
-#include <linux/notifier.h>
#include <linux/of_irq.h>
#include <linux/pci-ecam.h>
@@ -134,7 +134,6 @@ struct apple_pcie {
struct mutex lock;
struct device *dev;
void __iomem *base;
- struct irq_domain *domain;
unsigned long *bitmap;
struct list_head ports;
struct completion event;
@@ -163,27 +162,6 @@ static void rmw_clear(u32 clr, void __iomem *addr)
writel_relaxed(readl_relaxed(addr) & ~clr, addr);
}
-static void apple_msi_top_irq_mask(struct irq_data *d)
-{
- pci_msi_mask_irq(d);
- irq_chip_mask_parent(d);
-}
-
-static void apple_msi_top_irq_unmask(struct irq_data *d)
-{
- pci_msi_unmask_irq(d);
- irq_chip_unmask_parent(d);
-}
-
-static struct irq_chip apple_msi_top_chip = {
- .name = "PCIe MSI",
- .irq_mask = apple_msi_top_irq_mask,
- .irq_unmask = apple_msi_top_irq_unmask,
- .irq_eoi = irq_chip_eoi_parent,
- .irq_set_affinity = irq_chip_set_affinity_parent,
- .irq_set_type = irq_chip_set_type_parent,
-};
-
static void apple_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
{
msg->address_hi = upper_32_bits(DOORBELL_ADDR);
@@ -227,8 +205,7 @@ static int apple_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
for (i = 0; i < nr_irqs; i++) {
irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
- &apple_msi_bottom_chip,
- domain->host_data);
+ &apple_msi_bottom_chip, pcie);
}
return 0;
@@ -252,12 +229,6 @@ static const struct irq_domain_ops apple_msi_domain_ops = {
.free = apple_msi_domain_free,
};
-static struct msi_domain_info apple_msi_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
- .chip = &apple_msi_top_chip,
-};
-
static void apple_port_irq_mask(struct irq_data *data)
{
struct apple_pcie_port *port = irq_data_get_irq_chip_data(data);
@@ -596,11 +567,28 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
return 0;
}
+static const struct msi_parent_ops apple_msi_parent_ops = {
+ .supported_flags = (MSI_GENERIC_FLAGS_MASK |
+ MSI_FLAG_PCI_MSIX |
+ MSI_FLAG_MULTI_PCI_MSI),
+ .required_flags = (MSI_FLAG_USE_DEF_DOM_OPS |
+ MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_PCI_MSI_MASK_PARENT),
+ .chip_flags = MSI_CHIP_FLAG_SET_EOI,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
+};
+
static int apple_msi_init(struct apple_pcie *pcie)
{
struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
+ struct irq_domain_info info = {
+ .fwnode = fwnode,
+ .ops = &apple_msi_domain_ops,
+ .size = pcie->nvecs,
+ .host_data = pcie,
+ };
struct of_phandle_args args = {};
- struct irq_domain *parent;
int ret;
ret = of_parse_phandle_with_args(to_of_node(fwnode), "msi-ranges",
@@ -620,28 +608,16 @@ static int apple_msi_init(struct apple_pcie *pcie)
if (!pcie->bitmap)
return -ENOMEM;
- parent = irq_find_matching_fwspec(&pcie->fwspec, DOMAIN_BUS_WIRED);
- if (!parent) {
+ info.parent = irq_find_matching_fwspec(&pcie->fwspec, DOMAIN_BUS_WIRED);
+ if (!info.parent) {
dev_err(pcie->dev, "failed to find parent domain\n");
return -ENXIO;
}
- parent = irq_domain_create_hierarchy(parent, 0, pcie->nvecs, fwnode,
- &apple_msi_domain_ops, pcie);
- if (!parent) {
+ if (!msi_create_parent_irq_domain(&info, &apple_msi_parent_ops)) {
dev_err(pcie->dev, "failed to create IRQ domain\n");
return -ENOMEM;
}
- irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
-
- pcie->domain = pci_msi_create_irq_domain(fwnode, &apple_msi_info,
- parent);
- if (!pcie->domain) {
- dev_err(pcie->dev, "failed to create MSI domain\n");
- irq_domain_remove(parent);
- return -ENOMEM;
- }
-
return 0;
}
@@ -667,12 +643,16 @@ static struct apple_pcie_port *apple_pcie_get_port(struct pci_dev *pdev)
return NULL;
}
-static int apple_pcie_add_device(struct apple_pcie_port *port,
- struct pci_dev *pdev)
+static int apple_pcie_enable_device(struct pci_host_bridge *bridge, struct pci_dev *pdev)
{
u32 sid, rid = pci_dev_id(pdev);
+ struct apple_pcie_port *port;
int idx, err;
+ port = apple_pcie_get_port(pdev);
+ if (!port)
+ return 0;
+
dev_dbg(&pdev->dev, "added to bus %s, index %d\n",
pci_name(pdev->bus->self), port->idx);
@@ -698,12 +678,16 @@ static int apple_pcie_add_device(struct apple_pcie_port *port,
return idx >= 0 ? 0 : -ENOSPC;
}
-static void apple_pcie_release_device(struct apple_pcie_port *port,
- struct pci_dev *pdev)
+static void apple_pcie_disable_device(struct pci_host_bridge *bridge, struct pci_dev *pdev)
{
+ struct apple_pcie_port *port;
u32 rid = pci_dev_id(pdev);
int idx;
+ port = apple_pcie_get_port(pdev);
+ if (!port)
+ return;
+
mutex_lock(&port->pcie->lock);
for_each_set_bit(idx, port->sid_map, port->sid_map_sz) {
@@ -721,50 +705,10 @@ static void apple_pcie_release_device(struct apple_pcie_port *port,
mutex_unlock(&port->pcie->lock);
}
-static int apple_pcie_bus_notifier(struct notifier_block *nb,
- unsigned long action,
- void *data)
-{
- struct device *dev = data;
- struct pci_dev *pdev = to_pci_dev(dev);
- struct apple_pcie_port *port;
- int err;
-
- /*
- * This is a bit ugly. We assume that if we get notified for
- * any PCI device, we must be in charge of it, and that there
- * is no other PCI controller in the whole system. It probably
- * holds for now, but who knows for how long?
- */
- port = apple_pcie_get_port(pdev);
- if (!port)
- return NOTIFY_DONE;
-
- switch (action) {
- case BUS_NOTIFY_ADD_DEVICE:
- err = apple_pcie_add_device(port, pdev);
- if (err)
- return notifier_from_errno(err);
- break;
- case BUS_NOTIFY_DEL_DEVICE:
- apple_pcie_release_device(port, pdev);
- break;
- default:
- return NOTIFY_DONE;
- }
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block apple_pcie_nb = {
- .notifier_call = apple_pcie_bus_notifier,
-};
-
static int apple_pcie_init(struct pci_config_window *cfg)
{
struct device *dev = cfg->parent;
struct platform_device *platform = to_platform_device(dev);
- struct device_node *of_port;
struct apple_pcie *pcie;
int ret;
@@ -787,11 +731,10 @@ static int apple_pcie_init(struct pci_config_window *cfg)
if (ret)
return ret;
- for_each_child_of_node(dev->of_node, of_port) {
+ for_each_child_of_node_scoped(dev->of_node, of_port) {
ret = apple_pcie_setup_port(pcie, of_port);
if (ret) {
dev_err(pcie->dev, "Port %pOF setup fail: %d\n", of_port, ret);
- of_node_put(of_port);
return ret;
}
}
@@ -799,23 +742,10 @@ static int apple_pcie_init(struct pci_config_window *cfg)
return 0;
}
-static int apple_pcie_probe(struct platform_device *pdev)
-{
- int ret;
-
- ret = bus_register_notifier(&pci_bus_type, &apple_pcie_nb);
- if (ret)
- return ret;
-
- ret = pci_host_common_probe(pdev);
- if (ret)
- bus_unregister_notifier(&pci_bus_type, &apple_pcie_nb);
-
- return ret;
-}
-
static const struct pci_ecam_ops apple_pcie_cfg_ecam_ops = {
.init = apple_pcie_init,
+ .enable_device = apple_pcie_enable_device,
+ .disable_device = apple_pcie_disable_device,
.pci_ops = {
.map_bus = pci_ecam_map_bus,
.read = pci_generic_config_read,
@@ -830,7 +760,7 @@ static const struct of_device_id apple_pcie_of_match[] = {
MODULE_DEVICE_TABLE(of, apple_pcie_of_match);
static struct platform_driver apple_pcie_driver = {
- .probe = apple_pcie_probe,
+ .probe = pci_host_common_probe,
.driver = {
.name = "pcie-apple",
.of_match_table = apple_pcie_of_match,
@@ -839,4 +769,5 @@ static struct platform_driver apple_pcie_driver = {
};
module_platform_driver(apple_pcie_driver);
+MODULE_DESCRIPTION("Apple PCIe host bridge driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
index c08683febdd4..92887b394eb4 100644
--- a/drivers/pci/controller/pcie-brcmstb.c
+++ b/drivers/pci/controller/pcie-brcmstb.c
@@ -40,7 +40,7 @@
/* Broadcom STB PCIe Register Offsets */
#define PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1 0x0188
#define PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK 0xc
-#define PCIE_RC_CFG_VENDOR_SPCIFIC_REG1_LITTLE_ENDIAN 0x0
+#define PCIE_RC_CFG_VENDOR_SPECIFIC_REG1_LITTLE_ENDIAN 0x0
#define PCIE_RC_CFG_PRIV1_ID_VAL3 0x043c
#define PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK 0xffffff
@@ -55,6 +55,10 @@
#define PCIE_RC_DL_MDIO_WR_DATA 0x1104
#define PCIE_RC_DL_MDIO_RD_DATA 0x1108
+#define PCIE_RC_PL_PHY_CTL_15 0x184c
+#define PCIE_RC_PL_PHY_CTL_15_DIS_PLL_PD_MASK 0x400000
+#define PCIE_RC_PL_PHY_CTL_15_PM_CLK_PERIOD_MASK 0xff
+
#define PCIE_MISC_MISC_CTRL 0x4008
#define PCIE_MISC_MISC_CTRL_PCIE_RCB_64B_MODE_MASK 0x80
#define PCIE_MISC_MISC_CTRL_PCIE_RCB_MPS_MODE_MASK 0x400
@@ -75,15 +79,19 @@
#define PCIE_MEM_WIN0_HI(win) \
PCIE_MISC_CPU_2_PCIE_MEM_WIN0_HI + ((win) * 8)
+/*
+ * NOTE: You may see the term "BAR" in a number of register names used by
+ * this driver. The term is an artifact of when the HW core was an
+ * endpoint device (EP). Now it is a root complex (RC) and anywhere a
+ * register has the term "BAR" it is related to an inbound window.
+ */
+
+#define PCIE_BRCM_MAX_INBOUND_WINS 16
#define PCIE_MISC_RC_BAR1_CONFIG_LO 0x402c
#define PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK 0x1f
-#define PCIE_MISC_RC_BAR2_CONFIG_LO 0x4034
-#define PCIE_MISC_RC_BAR2_CONFIG_LO_SIZE_MASK 0x1f
-#define PCIE_MISC_RC_BAR2_CONFIG_HI 0x4038
+#define PCIE_MISC_RC_BAR4_CONFIG_LO 0x40d4
-#define PCIE_MISC_RC_BAR3_CONFIG_LO 0x403c
-#define PCIE_MISC_RC_BAR3_CONFIG_LO_SIZE_MASK 0x1f
#define PCIE_MISC_MSI_BAR_CONFIG_LO 0x4044
#define PCIE_MISC_MSI_BAR_CONFIG_HI 0x4048
@@ -122,7 +130,6 @@
#define PCIE_MEM_WIN0_LIMIT_HI(win) \
PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI + ((win) * 8)
-#define PCIE_MISC_HARD_PCIE_HARD_DEBUG 0x4204
#define PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK 0x2
#define PCIE_MISC_HARD_PCIE_HARD_DEBUG_L1SS_ENABLE_MASK 0x200000
#define PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK 0x08000000
@@ -131,17 +138,18 @@
(PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK | \
PCIE_MISC_HARD_PCIE_HARD_DEBUG_L1SS_ENABLE_MASK)
-#define PCIE_INTR2_CPU_BASE 0x4300
+#define PCIE_MISC_UBUS_BAR1_CONFIG_REMAP 0x40ac
+#define PCIE_MISC_UBUS_BAR1_CONFIG_REMAP_ACCESS_EN_MASK BIT(0)
+#define PCIE_MISC_UBUS_BAR4_CONFIG_REMAP 0x410c
+
#define PCIE_MSI_INTR2_BASE 0x4500
-/* Offsets from PCIE_INTR2_CPU_BASE and PCIE_MSI_INTR2_BASE */
+
+/* Offsets from INTR2_CPU and MSI_INTR2 BASE offsets */
#define MSI_INT_STATUS 0x0
#define MSI_INT_CLR 0x8
#define MSI_INT_MASK_SET 0x10
#define MSI_INT_MASK_CLR 0x14
-#define PCIE_EXT_CFG_DATA 0x8000
-#define PCIE_EXT_CFG_INDEX 0x9000
-
#define PCIE_RGR1_SW_INIT_1_PERST_MASK 0x1
#define PCIE_RGR1_SW_INIT_1_PERST_SHIFT 0x0
@@ -167,8 +175,9 @@
#define MDIO_PORT0 0x0
#define MDIO_DATA_MASK 0x7fffffff
#define MDIO_PORT_MASK 0xf0000
+#define MDIO_PORT_EXT_MASK 0x200000
#define MDIO_REGAD_MASK 0xffff
-#define MDIO_CMD_MASK 0xfff00000
+#define MDIO_CMD_MASK 0x00100000
#define MDIO_CMD_READ 0x1
#define MDIO_CMD_WRITE 0x0
#define MDIO_DATA_DONE_MASK 0x80000000
@@ -184,9 +193,11 @@
#define SSC_STATUS_PLL_LOCK_MASK 0x800
#define PCIE_BRCM_MAX_MEMC 3
-#define IDX_ADDR(pcie) (pcie->reg_offsets[EXT_CFG_INDEX])
-#define DATA_ADDR(pcie) (pcie->reg_offsets[EXT_CFG_DATA])
-#define PCIE_RGR1_SW_INIT_1(pcie) (pcie->reg_offsets[RGR1_SW_INIT_1])
+#define IDX_ADDR(pcie) ((pcie)->cfg->offsets[EXT_CFG_INDEX])
+#define DATA_ADDR(pcie) ((pcie)->cfg->offsets[EXT_CFG_DATA])
+#define PCIE_RGR1_SW_INIT_1(pcie) ((pcie)->cfg->offsets[RGR1_SW_INIT_1])
+#define HARD_DEBUG(pcie) ((pcie)->cfg->offsets[PCIE_HARD_DEBUG])
+#define INTR2_CPU_BASE(pcie) ((pcie)->cfg->offsets[PCIE_INTR2_CPU_BASE])
/* Rescal registers */
#define PCIE_DVT_PMU_PCIE_PHY_CTRL 0xc700
@@ -205,27 +216,44 @@ enum {
RGR1_SW_INIT_1,
EXT_CFG_INDEX,
EXT_CFG_DATA,
+ PCIE_HARD_DEBUG,
+ PCIE_INTR2_CPU_BASE,
};
-enum {
- RGR1_SW_INIT_1_INIT_MASK,
- RGR1_SW_INIT_1_INIT_SHIFT,
-};
-
-enum pcie_type {
+enum pcie_soc_base {
GENERIC,
- BCM7425,
- BCM7435,
+ BCM2711,
BCM4908,
BCM7278,
- BCM2711,
+ BCM7425,
+ BCM7435,
+ BCM7712,
+};
+
+struct inbound_win {
+ u64 size;
+ u64 pci_offset;
+ u64 cpu_addr;
};
+/*
+ * The RESCAL block is tied to PCIe controller #1, regardless of the number of
+ * controllers, and turning off PCIe controller #1 prevents access to the RESCAL
+ * register blocks, therefore no other controller can access this register
+ * space, and depending upon the bus fabric we may get a timeout (UBUS/GISB),
+ * or a hang (AXI).
+ */
+#define CFG_QUIRK_AVOID_BRIDGE_SHUTDOWN BIT(0)
+
struct pcie_cfg_data {
const int *offsets;
- const enum pcie_type type;
- void (*perst_set)(struct brcm_pcie *pcie, u32 val);
- void (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
+ const enum pcie_soc_base soc_base;
+ const bool has_phy;
+ const u32 quirks;
+ u8 num_inbound_wins;
+ int (*perst_set)(struct brcm_pcie *pcie, u32 val);
+ int (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
+ int (*post_setup)(struct brcm_pcie *pcie);
};
struct subdev_regulators {
@@ -261,22 +289,21 @@ struct brcm_pcie {
int gen;
u64 msi_target_addr;
struct brcm_msi *msi;
- const int *reg_offsets;
- enum pcie_type type;
struct reset_control *rescal;
struct reset_control *perst_reset;
+ struct reset_control *bridge_reset;
+ struct reset_control *swinit_reset;
int num_memc;
u64 memc_size[PCIE_BRCM_MAX_MEMC];
u32 hw_rev;
- void (*perst_set)(struct brcm_pcie *pcie, u32 val);
- void (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
struct subdev_regulators *sr;
bool ep_wakeup_capable;
+ const struct pcie_cfg_data *cfg;
};
static inline bool is_bmips(const struct brcm_pcie *pcie)
{
- return pcie->type == BCM7435 || pcie->type == BCM7425;
+ return pcie->cfg->soc_base == BCM7435 || pcie->cfg->soc_base == BCM7425;
}
/*
@@ -290,8 +317,8 @@ static int brcm_pcie_encode_ibar_size(u64 size)
if (log2_in >= 12 && log2_in <= 15)
/* Covers 4KB to 32KB (inclusive) */
return (log2_in - 12) + 0x1c;
- else if (log2_in >= 16 && log2_in <= 35)
- /* Covers 64KB to 32GB, (inclusive) */
+ else if (log2_in >= 16 && log2_in <= 36)
+ /* Covers 64KB to 64GB, (inclusive) */
return log2_in - 15;
/* Something is awry so disable */
return 0;
@@ -301,6 +328,7 @@ static u32 brcm_pcie_mdio_form_pkt(int port, int regad, int cmd)
{
u32 pkt = 0;
+ pkt |= FIELD_PREP(MDIO_PORT_EXT_MASK, port >> 4);
pkt |= FIELD_PREP(MDIO_PORT_MASK, port);
pkt |= FIELD_PREP(MDIO_REGAD_MASK, regad);
pkt |= FIELD_PREP(MDIO_CMD_MASK, cmd);
@@ -384,17 +412,17 @@ static int brcm_pcie_set_ssc(struct brcm_pcie *pcie)
static void brcm_pcie_set_gen(struct brcm_pcie *pcie, int gen)
{
u16 lnkctl2 = readw(pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCTL2);
- u32 lnkcap = readl(pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCAP);
+ u32 lnkcap = readl(pcie->base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
- lnkcap = (lnkcap & ~PCI_EXP_LNKCAP_SLS) | gen;
- writel(lnkcap, pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCAP);
+ u32p_replace_bits(&lnkcap, gen, PCI_EXP_LNKCAP_SLS);
+ writel(lnkcap, pcie->base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
- lnkctl2 = (lnkctl2 & ~0xf) | gen;
+ u16p_replace_bits(&lnkctl2, gen, PCI_EXP_LNKCTL2_TLS);
writew(lnkctl2, pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCTL2);
}
static void brcm_pcie_set_outbound_win(struct brcm_pcie *pcie,
- unsigned int win, u64 cpu_addr,
+ u8 win, u64 cpu_addr,
u64 pcie_addr, u64 size)
{
u32 cpu_addr_mb_high, limit_addr_mb_high;
@@ -445,8 +473,8 @@ static struct irq_chip brcm_msi_irq_chip = {
};
static struct msi_domain_info brcm_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_MULTI_PCI_MSI),
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI,
.chip = &brcm_msi_irq_chip,
};
@@ -484,12 +512,6 @@ static void brcm_msi_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
msg->data = (0xffff & PCIE_MISC_MSI_DATA_CONFIG_VAL_32) | data->hwirq;
}
-static int brcm_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static void brcm_msi_ack_irq(struct irq_data *data)
{
struct brcm_msi *msi = irq_data_get_irq_chip_data(data);
@@ -502,7 +524,6 @@ static void brcm_msi_ack_irq(struct irq_data *data)
static struct irq_chip brcm_msi_bottom_irq_chip = {
.name = "BRCM STB MSI",
.irq_compose_msi_msg = brcm_msi_compose_msi_msg,
- .irq_set_affinity = brcm_msi_set_affinity,
.irq_ack = brcm_msi_ack_irq,
};
@@ -538,7 +559,7 @@ static int brcm_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
return hwirq;
for (i = 0; i < nr_irqs; i++)
- irq_domain_set_info(domain, virq + i, hwirq + i,
+ irq_domain_set_info(domain, virq + i, (irq_hw_number_t)hwirq + i,
&brcm_msi_bottom_irq_chip, domain->host_data,
handle_edge_irq, NULL, NULL);
return 0;
@@ -560,10 +581,10 @@ static const struct irq_domain_ops msi_domain_ops = {
static int brcm_allocate_domains(struct brcm_msi *msi)
{
- struct fwnode_handle *fwnode = of_node_to_fwnode(msi->np);
+ struct fwnode_handle *fwnode = of_fwnode_handle(msi->np);
struct device *dev = msi->dev;
- msi->inner_domain = irq_domain_add_linear(NULL, msi->nr, &msi_domain_ops, msi);
+ msi->inner_domain = irq_domain_create_linear(NULL, msi->nr, &msi_domain_ops, msi);
if (!msi->inner_domain) {
dev_err(dev, "failed to create IRQ domain\n");
return -ENOMEM;
@@ -649,7 +670,7 @@ static int brcm_pcie_enable_msi(struct brcm_pcie *pcie)
BUILD_BUG_ON(BRCM_INT_PCI_MSI_LEGACY_NR > BRCM_INT_PCI_MSI_NR);
if (msi->legacy) {
- msi->intr_base = msi->base + PCIE_INTR2_CPU_BASE;
+ msi->intr_base = msi->base + INTR2_CPU_BASE(pcie);
msi->nr = BRCM_INT_PCI_MSI_LEGACY_NR;
msi->legacy_shift = 24;
} else {
@@ -705,8 +726,8 @@ static void __iomem *brcm_pcie_map_bus(struct pci_bus *bus,
/* For devices, write to the config space index register */
idx = PCIE_ECAM_OFFSET(bus->number, devfn, 0);
- writel(idx, pcie->base + PCIE_EXT_CFG_INDEX);
- return base + PCIE_EXT_CFG_DATA + PCIE_ECAM_REG(where);
+ writel(idx, base + IDX_ADDR(pcie));
+ return base + DATA_ADDR(pcie) + PCIE_ECAM_REG(where);
}
static void __iomem *brcm7425_pcie_map_bus(struct pci_bus *bus,
@@ -730,17 +751,33 @@ static void __iomem *brcm7425_pcie_map_bus(struct pci_bus *bus,
return base + DATA_ADDR(pcie);
}
-static void brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val)
+static int brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val)
{
- u32 tmp, mask = RGR1_SW_INIT_1_INIT_GENERIC_MASK;
+ u32 tmp, mask = RGR1_SW_INIT_1_INIT_GENERIC_MASK;
u32 shift = RGR1_SW_INIT_1_INIT_GENERIC_SHIFT;
+ int ret = 0;
+
+ if (pcie->bridge_reset) {
+ if (val)
+ ret = reset_control_assert(pcie->bridge_reset);
+ else
+ ret = reset_control_deassert(pcie->bridge_reset);
+
+ if (ret)
+ dev_err(pcie->dev, "failed to %s 'bridge' reset, err=%d\n",
+ val ? "assert" : "deassert", ret);
+
+ return ret;
+ }
tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
tmp = (tmp & ~mask) | ((val << shift) & mask);
writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
+
+ return ret;
}
-static void brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 val)
+static int brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 val)
{
u32 tmp, mask = RGR1_SW_INIT_1_INIT_7278_MASK;
u32 shift = RGR1_SW_INIT_1_INIT_7278_SHIFT;
@@ -748,20 +785,29 @@ static void brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 val)
tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
tmp = (tmp & ~mask) | ((val << shift) & mask);
writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
+
+ return 0;
}
-static void brcm_pcie_perst_set_4908(struct brcm_pcie *pcie, u32 val)
+static int brcm_pcie_perst_set_4908(struct brcm_pcie *pcie, u32 val)
{
+ int ret;
+
if (WARN_ONCE(!pcie->perst_reset, "missing PERST# reset controller\n"))
- return;
+ return -EINVAL;
if (val)
- reset_control_assert(pcie->perst_reset);
+ ret = reset_control_assert(pcie->perst_reset);
else
- reset_control_deassert(pcie->perst_reset);
+ ret = reset_control_deassert(pcie->perst_reset);
+
+ if (ret)
+ dev_err(pcie->dev, "failed to %s 'perst' reset, err=%d\n",
+ val ? "assert" : "deassert", ret);
+ return ret;
}
-static void brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val)
+static int brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val)
{
u32 tmp;
@@ -769,34 +815,110 @@ static void brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val)
tmp = readl(pcie->base + PCIE_MISC_PCIE_CTRL);
u32p_replace_bits(&tmp, !val, PCIE_MISC_PCIE_CTRL_PCIE_PERSTB_MASK);
writel(tmp, pcie->base + PCIE_MISC_PCIE_CTRL);
+
+ return 0;
}
-static void brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val)
+static int brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val)
{
u32 tmp;
tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
u32p_replace_bits(&tmp, val, PCIE_RGR1_SW_INIT_1_PERST_MASK);
writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
+
+ return 0;
}
-static int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
- u64 *rc_bar2_size,
- u64 *rc_bar2_offset)
+static int brcm_pcie_post_setup_bcm2712(struct brcm_pcie *pcie)
+{
+ static const u16 data[] = { 0x50b9, 0xbda1, 0x0094, 0x97b4, 0x5030,
+ 0x5030, 0x0007 };
+ static const u8 regs[] = { 0x16, 0x17, 0x18, 0x19, 0x1b, 0x1c, 0x1e };
+ int ret, i;
+ u32 tmp;
+
+ /* Allow a 54MHz (xosc) refclk source */
+ ret = brcm_pcie_mdio_write(pcie->base, MDIO_PORT0, SET_ADDR_OFFSET, 0x1600);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(regs); i++) {
+ ret = brcm_pcie_mdio_write(pcie->base, MDIO_PORT0, regs[i], data[i]);
+ if (ret < 0)
+ return ret;
+ }
+
+ usleep_range(100, 200);
+
+ /*
+ * Set L1SS sub-state timers to avoid lengthy state transitions,
+ * PM clock period is 18.52ns (1/54MHz, round down).
+ */
+ tmp = readl(pcie->base + PCIE_RC_PL_PHY_CTL_15);
+ tmp &= ~PCIE_RC_PL_PHY_CTL_15_PM_CLK_PERIOD_MASK;
+ tmp |= 0x12;
+ writel(tmp, pcie->base + PCIE_RC_PL_PHY_CTL_15);
+
+ return 0;
+}
+
+static void add_inbound_win(struct inbound_win *b, u8 *count, u64 size,
+ u64 cpu_addr, u64 pci_offset)
+{
+ b->size = size;
+ b->cpu_addr = cpu_addr;
+ b->pci_offset = pci_offset;
+ (*count)++;
+}
+
+static int brcm_pcie_get_inbound_wins(struct brcm_pcie *pcie,
+ struct inbound_win inbound_wins[])
{
struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
+ u64 pci_offset, cpu_addr, size = 0, tot_size = 0;
struct resource_entry *entry;
struct device *dev = pcie->dev;
u64 lowest_pcie_addr = ~(u64)0;
int ret, i = 0;
- u64 size = 0;
+ u8 n = 0;
+
+ /*
+ * The HW registers (and PCIe) use order-1 numbering for BARs. As such,
+ * we have inbound_wins[0] unused and BAR1 starts at inbound_wins[1].
+ */
+ struct inbound_win *b_begin = &inbound_wins[1];
+ struct inbound_win *b = b_begin;
+
+ /*
+ * STB chips beside 7712 disable the first inbound window default.
+ * Rather being mapped to system memory it is mapped to the
+ * internal registers of the SoC. This feature is deprecated, has
+ * security considerations, and is not implemented in our modern
+ * SoCs.
+ */
+ if (pcie->cfg->soc_base != BCM7712)
+ add_inbound_win(b++, &n, 0, 0, 0);
resource_list_for_each_entry(entry, &bridge->dma_ranges) {
- u64 pcie_beg = entry->res->start - entry->offset;
+ u64 pcie_start = entry->res->start - entry->offset;
+ u64 cpu_start = entry->res->start;
- size += entry->res->end - entry->res->start + 1;
- if (pcie_beg < lowest_pcie_addr)
- lowest_pcie_addr = pcie_beg;
+ size = resource_size(entry->res);
+ tot_size += size;
+ if (pcie_start < lowest_pcie_addr)
+ lowest_pcie_addr = pcie_start;
+ /*
+ * 7712 and newer chips may have many BARs, with each
+ * offering a non-overlapping viewport to system memory.
+ * That being said, each BARs size must still be a power of
+ * two.
+ */
+ if (pcie->cfg->soc_base == BCM7712)
+ add_inbound_win(b++, &n, size, cpu_start, pcie_start);
+
+ if (n > pcie->cfg->num_inbound_wins)
+ break;
}
if (lowest_pcie_addr == ~(u64)0) {
@@ -804,13 +926,20 @@ static int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
return -EINVAL;
}
+ /*
+ * 7712 and newer chips do not have an internal memory mapping system
+ * that enables multiple memory controllers. As such, it can return
+ * now w/o doing special configuration.
+ */
+ if (pcie->cfg->soc_base == BCM7712)
+ return n;
+
ret = of_property_read_variable_u64_array(pcie->np, "brcm,scb-sizes", pcie->memc_size, 1,
PCIE_BRCM_MAX_MEMC);
-
if (ret <= 0) {
/* Make an educated guess */
pcie->num_memc = 1;
- pcie->memc_size[0] = 1ULL << fls64(size - 1);
+ pcie->memc_size[0] = 1ULL << fls64(tot_size - 1);
} else {
pcie->num_memc = ret;
}
@@ -819,10 +948,15 @@ static int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
for (i = 0, size = 0; i < pcie->num_memc; i++)
size += pcie->memc_size[i];
- /* System memory starts at this address in PCIe-space */
- *rc_bar2_offset = lowest_pcie_addr;
- /* The sum of all memc views must also be a power of 2 */
- *rc_bar2_size = 1ULL << fls64(size - 1);
+ /* Our HW mandates that the window size must be a power of 2 */
+ size = 1ULL << fls64(size - 1);
+
+ /*
+ * For STB chips, the BAR2 cpu_addr is hardwired to the start
+ * of system memory, so we set it to 0.
+ */
+ cpu_addr = 0;
+ pci_offset = lowest_pcie_addr;
/*
* We validate the inbound memory view even though we should trust
@@ -857,44 +991,119 @@ static int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
* outbound memory @ 3GB). So instead it will start at the 1x
* multiple of its size
*/
- if (!*rc_bar2_size || (*rc_bar2_offset & (*rc_bar2_size - 1)) ||
- (*rc_bar2_offset < SZ_4G && *rc_bar2_offset > SZ_2G)) {
- dev_err(dev, "Invalid rc_bar2_offset/size: size 0x%llx, off 0x%llx\n",
- *rc_bar2_size, *rc_bar2_offset);
+ if (!size || (pci_offset & (size - 1)) ||
+ (pci_offset < SZ_4G && pci_offset > SZ_2G)) {
+ dev_err(dev, "Invalid inbound_win2_offset/size: size 0x%llx, off 0x%llx\n",
+ size, pci_offset);
return -EINVAL;
}
- return 0;
+ /* Enable inbound window 2, the main inbound window for STB chips */
+ add_inbound_win(b++, &n, size, cpu_addr, pci_offset);
+
+ /*
+ * Disable inbound window 3. On some chips presents the same
+ * window as #2 but the data appears in a settable endianness.
+ */
+ add_inbound_win(b++, &n, 0, 0, 0);
+
+ return n;
+}
+
+static u32 brcm_bar_reg_offset(int bar)
+{
+ if (bar <= 3)
+ return PCIE_MISC_RC_BAR1_CONFIG_LO + 8 * (bar - 1);
+ else
+ return PCIE_MISC_RC_BAR4_CONFIG_LO + 8 * (bar - 4);
+}
+
+static u32 brcm_ubus_reg_offset(int bar)
+{
+ if (bar <= 3)
+ return PCIE_MISC_UBUS_BAR1_CONFIG_REMAP + 8 * (bar - 1);
+ else
+ return PCIE_MISC_UBUS_BAR4_CONFIG_REMAP + 8 * (bar - 4);
+}
+
+static void set_inbound_win_registers(struct brcm_pcie *pcie,
+ const struct inbound_win *inbound_wins,
+ u8 num_inbound_wins)
+{
+ void __iomem *base = pcie->base;
+ int i;
+
+ for (i = 1; i <= num_inbound_wins; i++) {
+ u64 pci_offset = inbound_wins[i].pci_offset;
+ u64 cpu_addr = inbound_wins[i].cpu_addr;
+ u64 size = inbound_wins[i].size;
+ u32 reg_offset = brcm_bar_reg_offset(i);
+ u32 tmp = lower_32_bits(pci_offset);
+
+ u32p_replace_bits(&tmp, brcm_pcie_encode_ibar_size(size),
+ PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK);
+
+ /* Write low */
+ writel_relaxed(tmp, base + reg_offset);
+ /* Write high */
+ writel_relaxed(upper_32_bits(pci_offset), base + reg_offset + 4);
+
+ /*
+ * Most STB chips:
+ * Do nothing.
+ * 7712:
+ * All of their BARs need to be set.
+ */
+ if (pcie->cfg->soc_base == BCM7712) {
+ /* BUS remap register settings */
+ reg_offset = brcm_ubus_reg_offset(i);
+ tmp = lower_32_bits(cpu_addr) & ~0xfff;
+ tmp |= PCIE_MISC_UBUS_BAR1_CONFIG_REMAP_ACCESS_EN_MASK;
+ writel_relaxed(tmp, base + reg_offset);
+ tmp = upper_32_bits(cpu_addr);
+ writel_relaxed(tmp, base + reg_offset + 4);
+ }
+ }
}
static int brcm_pcie_setup(struct brcm_pcie *pcie)
{
- u64 rc_bar2_offset, rc_bar2_size;
+ struct inbound_win inbound_wins[PCIE_BRCM_MAX_INBOUND_WINS];
void __iomem *base = pcie->base;
struct pci_host_bridge *bridge;
struct resource_entry *entry;
u32 tmp, burst, aspm_support;
- int num_out_wins = 0;
- int ret, memc;
+ u8 num_out_wins = 0;
+ int num_inbound_wins = 0;
+ int memc, ret;
/* Reset the bridge */
- pcie->bridge_sw_init_set(pcie, 1);
+ ret = pcie->cfg->bridge_sw_init_set(pcie, 1);
+ if (ret)
+ return ret;
/* Ensure that PERST# is asserted; some bootloaders may deassert it. */
- if (pcie->type == BCM2711)
- pcie->perst_set(pcie, 1);
+ if (pcie->cfg->soc_base == BCM2711) {
+ ret = pcie->cfg->perst_set(pcie, 1);
+ if (ret) {
+ pcie->cfg->bridge_sw_init_set(pcie, 0);
+ return ret;
+ }
+ }
usleep_range(100, 200);
/* Take the bridge out of reset */
- pcie->bridge_sw_init_set(pcie, 0);
+ ret = pcie->cfg->bridge_sw_init_set(pcie, 0);
+ if (ret)
+ return ret;
- tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ tmp = readl(base + HARD_DEBUG(pcie));
if (is_bmips(pcie))
tmp &= ~PCIE_BMIPS_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK;
else
tmp &= ~PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK;
- writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ writel(tmp, base + HARD_DEBUG(pcie));
/* Wait for SerDes to be stable */
usleep_range(100, 200);
@@ -905,9 +1114,9 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
*/
if (is_bmips(pcie))
burst = 0x1; /* 256 bytes */
- else if (pcie->type == BCM2711)
+ else if (pcie->cfg->soc_base == BCM2711)
burst = 0x0; /* 128 bytes */
- else if (pcie->type == BCM7278)
+ else if (pcie->cfg->soc_base == BCM7278)
burst = 0x3; /* 512 bytes */
else
burst = 0x2; /* 512 bytes */
@@ -924,17 +1133,16 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_PCIE_RCB_64B_MODE_MASK);
writel(tmp, base + PCIE_MISC_MISC_CTRL);
- ret = brcm_pcie_get_rc_bar2_size_and_offset(pcie, &rc_bar2_size,
- &rc_bar2_offset);
- if (ret)
- return ret;
+ num_inbound_wins = brcm_pcie_get_inbound_wins(pcie, inbound_wins);
+ if (num_inbound_wins < 0)
+ return num_inbound_wins;
- tmp = lower_32_bits(rc_bar2_offset);
- u32p_replace_bits(&tmp, brcm_pcie_encode_ibar_size(rc_bar2_size),
- PCIE_MISC_RC_BAR2_CONFIG_LO_SIZE_MASK);
- writel(tmp, base + PCIE_MISC_RC_BAR2_CONFIG_LO);
- writel(upper_32_bits(rc_bar2_offset),
- base + PCIE_MISC_RC_BAR2_CONFIG_HI);
+ set_inbound_win_registers(pcie, inbound_wins, num_inbound_wins);
+
+ if (!brcm_pcie_rc_mode(pcie)) {
+ dev_err(pcie->dev, "PCIe RC controller misconfigured as Endpoint\n");
+ return -EINVAL;
+ }
tmp = readl(base + PCIE_MISC_MISC_CTRL);
for (memc = 0; memc < pcie->num_memc; memc++) {
@@ -956,25 +1164,12 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
* 4GB or when the inbound area is smaller than 4GB (taking into
* account the rounding-up we're forced to perform).
*/
- if (rc_bar2_offset >= SZ_4G || (rc_bar2_size + rc_bar2_offset) < SZ_4G)
+ if (inbound_wins[2].pci_offset >= SZ_4G ||
+ (inbound_wins[2].size + inbound_wins[2].pci_offset) < SZ_4G)
pcie->msi_target_addr = BRCM_MSI_TARGET_ADDR_LT_4GB;
else
pcie->msi_target_addr = BRCM_MSI_TARGET_ADDR_GT_4GB;
- if (!brcm_pcie_rc_mode(pcie)) {
- dev_err(pcie->dev, "PCIe RC controller misconfigured as Endpoint\n");
- return -EINVAL;
- }
-
- /* disable the PCIe->GISB memory window (RC_BAR1) */
- tmp = readl(base + PCIE_MISC_RC_BAR1_CONFIG_LO);
- tmp &= ~PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK;
- writel(tmp, base + PCIE_MISC_RC_BAR1_CONFIG_LO);
-
- /* disable the PCIe->SCB memory window (RC_BAR3) */
- tmp = readl(base + PCIE_MISC_RC_BAR3_CONFIG_LO);
- tmp &= ~PCIE_MISC_RC_BAR3_CONFIG_LO_SIZE_MASK;
- writel(tmp, base + PCIE_MISC_RC_BAR3_CONFIG_LO);
/* Don't advertise L0s capability if 'aspm-no-l0s' */
aspm_support = PCIE_LINK_STATE_L1;
@@ -1025,12 +1220,18 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
num_out_wins++;
}
- /* PCIe->SCB endian mode for BAR */
+ /* PCIe->SCB endian mode for inbound window */
tmp = readl(base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
- u32p_replace_bits(&tmp, PCIE_RC_CFG_VENDOR_SPCIFIC_REG1_LITTLE_ENDIAN,
+ u32p_replace_bits(&tmp, PCIE_RC_CFG_VENDOR_SPECIFIC_REG1_LITTLE_ENDIAN,
PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK);
writel(tmp, base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
+ if (pcie->cfg->post_setup) {
+ ret = pcie->cfg->post_setup(pcie);
+ if (ret < 0)
+ return ret;
+ }
+
return 0;
}
@@ -1045,6 +1246,10 @@ static void brcm_extend_rbus_timeout(struct brcm_pcie *pcie)
const unsigned int REG_OFFSET = PCIE_RGR1_SW_INIT_1(pcie) - 8;
u32 timeout_us = 4000000; /* 4 seconds, our setting for L1SS */
+ /* 7712 does not have this (RGR1) timer */
+ if (pcie->cfg->soc_base == BCM7712)
+ return;
+
/* Each unit in timeout register is 1/216,000,000 seconds */
writel(216 * timeout_us, pcie->base + REG_OFFSET);
}
@@ -1063,7 +1268,7 @@ static void brcm_config_clkreq(struct brcm_pcie *pcie)
}
/* Start out assuming safe mode (both mode bits cleared) */
- clkreq_cntl = readl(pcie->base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ clkreq_cntl = readl(pcie->base + HARD_DEBUG(pcie));
clkreq_cntl &= ~PCIE_CLKREQ_MASK;
if (strcmp(mode, "no-l1ss") == 0) {
@@ -1106,7 +1311,7 @@ static void brcm_config_clkreq(struct brcm_pcie *pcie)
dev_err(pcie->dev, err_msg);
mode = "safe";
}
- writel(clkreq_cntl, pcie->base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ writel(clkreq_cntl, pcie->base + HARD_DEBUG(pcie));
dev_info(pcie->dev, "clkreq-mode set to %s\n", mode);
}
@@ -1119,8 +1324,14 @@ static int brcm_pcie_start_link(struct brcm_pcie *pcie)
bool ssc_good = false;
int ret, i;
+ /* Limit the generation if specified */
+ if (pcie->gen)
+ brcm_pcie_set_gen(pcie, pcie->gen);
+
/* Unassert the fundamental reset */
- pcie->perst_set(pcie, 0);
+ ret = pcie->cfg->perst_set(pcie, 0);
+ if (ret)
+ return ret;
/*
* Wait for 100ms after PERST# deassertion; see PCIe CEM specification
@@ -1143,9 +1354,6 @@ static int brcm_pcie_start_link(struct brcm_pcie *pcie)
brcm_config_clkreq(pcie);
- if (pcie->gen)
- brcm_pcie_set_gen(pcie, pcie->gen);
-
if (pcie->ssc) {
ret = brcm_pcie_set_ssc(pcie);
if (ret == 0)
@@ -1208,7 +1416,8 @@ static int brcm_pcie_add_bus(struct pci_bus *bus)
ret = regulator_bulk_get(dev, sr->num_supplies, sr->supplies);
if (ret) {
- dev_info(dev, "No regulators for downstream device\n");
+ dev_info(dev, "Did not get regulators, err=%d\n", ret);
+ pcie->sr = NULL;
goto no_regulators;
}
@@ -1231,7 +1440,7 @@ static void brcm_pcie_remove_bus(struct pci_bus *bus)
struct subdev_regulators *sr = pcie->sr;
struct device *dev = &bus->dev;
- if (!sr)
+ if (!sr || !bus->parent || !pci_is_root_bus(bus->parent))
return;
if (regulator_bulk_disable(sr->num_supplies, sr->supplies))
@@ -1304,23 +1513,25 @@ static int brcm_phy_cntl(struct brcm_pcie *pcie, const int start)
static inline int brcm_phy_start(struct brcm_pcie *pcie)
{
- return pcie->rescal ? brcm_phy_cntl(pcie, 1) : 0;
+ return pcie->cfg->has_phy ? brcm_phy_cntl(pcie, 1) : 0;
}
static inline int brcm_phy_stop(struct brcm_pcie *pcie)
{
- return pcie->rescal ? brcm_phy_cntl(pcie, 0) : 0;
+ return pcie->cfg->has_phy ? brcm_phy_cntl(pcie, 0) : 0;
}
-static void brcm_pcie_turn_off(struct brcm_pcie *pcie)
+static int brcm_pcie_turn_off(struct brcm_pcie *pcie)
{
void __iomem *base = pcie->base;
- int tmp;
+ int tmp, ret;
if (brcm_pcie_link_up(pcie))
brcm_pcie_enter_l23(pcie);
/* Assert fundamental reset */
- pcie->perst_set(pcie, 1);
+ ret = pcie->cfg->perst_set(pcie, 1);
+ if (ret)
+ return ret;
/* Deassert request for L23 in case it was asserted */
tmp = readl(base + PCIE_MISC_PCIE_CTRL);
@@ -1328,12 +1539,15 @@ static void brcm_pcie_turn_off(struct brcm_pcie *pcie)
writel(tmp, base + PCIE_MISC_PCIE_CTRL);
/* Turn off SerDes */
- tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ tmp = readl(base + HARD_DEBUG(pcie));
u32p_replace_bits(&tmp, 1, PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK);
- writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ writel(tmp, base + HARD_DEBUG(pcie));
- /* Shutdown PCIe bridge */
- pcie->bridge_sw_init_set(pcie, 1);
+ if (!(pcie->cfg->quirks & CFG_QUIRK_AVOID_BRIDGE_SHUTDOWN))
+ /* Shutdown PCIe bridge */
+ ret = pcie->cfg->bridge_sw_init_set(pcie, 1);
+
+ return ret;
}
static int pci_dev_may_wakeup(struct pci_dev *dev, void *data)
@@ -1351,9 +1565,12 @@ static int brcm_pcie_suspend_noirq(struct device *dev)
{
struct brcm_pcie *pcie = dev_get_drvdata(dev);
struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
- int ret;
+ int ret, rret;
+
+ ret = brcm_pcie_turn_off(pcie);
+ if (ret)
+ return ret;
- brcm_pcie_turn_off(pcie);
/*
* If brcm_phy_stop() returns an error, just dev_err(). If we
* return the error it will cause the suspend to fail and this is a
@@ -1382,7 +1599,10 @@ static int brcm_pcie_suspend_noirq(struct device *dev)
pcie->sr->supplies);
if (ret) {
dev_err(dev, "Could not turn off regulators\n");
- reset_control_reset(pcie->rescal);
+ rret = reset_control_reset(pcie->rescal);
+ if (rret)
+ dev_err(dev, "failed to reset 'rascal' controller ret=%d\n",
+ rret);
return ret;
}
}
@@ -1397,7 +1617,7 @@ static int brcm_pcie_resume_noirq(struct device *dev)
struct brcm_pcie *pcie = dev_get_drvdata(dev);
void __iomem *base;
u32 tmp;
- int ret;
+ int ret, rret;
base = pcie->base;
ret = clk_prepare_enable(pcie->clk);
@@ -1413,12 +1633,12 @@ static int brcm_pcie_resume_noirq(struct device *dev)
goto err_reset;
/* Take bridge out of reset so we can access the SERDES reg */
- pcie->bridge_sw_init_set(pcie, 0);
+ pcie->cfg->bridge_sw_init_set(pcie, 0);
/* SERDES_IDDQ = 0 */
- tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ tmp = readl(base + HARD_DEBUG(pcie));
u32p_replace_bits(&tmp, 0, PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK);
- writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ writel(tmp, base + HARD_DEBUG(pcie));
/* wait for serdes to be stable */
udelay(100);
@@ -1459,7 +1679,9 @@ err_regulator:
if (pcie->sr)
regulator_bulk_disable(pcie->sr->num_supplies, pcie->sr->supplies);
err_reset:
- reset_control_rearm(pcie->rescal);
+ rret = reset_control_rearm(pcie->rescal);
+ if (rret)
+ dev_err(pcie->dev, "failed to rearm 'rescal' reset, err=%d\n", rret);
err_disable_clk:
clk_disable_unprepare(pcie->clk);
return ret;
@@ -1487,74 +1709,123 @@ static void brcm_pcie_remove(struct platform_device *pdev)
}
static const int pcie_offsets[] = {
- [RGR1_SW_INIT_1] = 0x9210,
- [EXT_CFG_INDEX] = 0x9000,
- [EXT_CFG_DATA] = 0x9004,
+ [RGR1_SW_INIT_1] = 0x9210,
+ [EXT_CFG_INDEX] = 0x9000,
+ [EXT_CFG_DATA] = 0x8000,
+ [PCIE_HARD_DEBUG] = 0x4204,
+ [PCIE_INTR2_CPU_BASE] = 0x4300,
+};
+
+static const int pcie_offsets_bcm7278[] = {
+ [RGR1_SW_INIT_1] = 0xc010,
+ [EXT_CFG_INDEX] = 0x9000,
+ [EXT_CFG_DATA] = 0x8000,
+ [PCIE_HARD_DEBUG] = 0x4204,
+ [PCIE_INTR2_CPU_BASE] = 0x4300,
};
-static const int pcie_offsets_bmips_7425[] = {
- [RGR1_SW_INIT_1] = 0x8010,
- [EXT_CFG_INDEX] = 0x8300,
- [EXT_CFG_DATA] = 0x8304,
+static const int pcie_offsets_bcm7425[] = {
+ [RGR1_SW_INIT_1] = 0x8010,
+ [EXT_CFG_INDEX] = 0x8300,
+ [EXT_CFG_DATA] = 0x8304,
+ [PCIE_HARD_DEBUG] = 0x4204,
+ [PCIE_INTR2_CPU_BASE] = 0x4300,
+};
+
+static const int pcie_offsets_bcm7712[] = {
+ [RGR1_SW_INIT_1] = 0x9210,
+ [EXT_CFG_INDEX] = 0x9000,
+ [EXT_CFG_DATA] = 0x8000,
+ [PCIE_HARD_DEBUG] = 0x4304,
+ [PCIE_INTR2_CPU_BASE] = 0x4400,
};
static const struct pcie_cfg_data generic_cfg = {
.offsets = pcie_offsets,
- .type = GENERIC,
+ .soc_base = GENERIC,
.perst_set = brcm_pcie_perst_set_generic,
.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+ .num_inbound_wins = 3,
};
-static const struct pcie_cfg_data bcm7425_cfg = {
- .offsets = pcie_offsets_bmips_7425,
- .type = BCM7425,
+static const struct pcie_cfg_data bcm2711_cfg = {
+ .offsets = pcie_offsets,
+ .soc_base = BCM2711,
.perst_set = brcm_pcie_perst_set_generic,
.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+ .num_inbound_wins = 3,
};
-static const struct pcie_cfg_data bcm7435_cfg = {
- .offsets = pcie_offsets,
- .type = BCM7435,
- .perst_set = brcm_pcie_perst_set_generic,
+static const struct pcie_cfg_data bcm2712_cfg = {
+ .offsets = pcie_offsets_bcm7712,
+ .soc_base = BCM7712,
+ .perst_set = brcm_pcie_perst_set_7278,
.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+ .post_setup = brcm_pcie_post_setup_bcm2712,
+ .quirks = CFG_QUIRK_AVOID_BRIDGE_SHUTDOWN,
+ .num_inbound_wins = 10,
};
static const struct pcie_cfg_data bcm4908_cfg = {
.offsets = pcie_offsets,
- .type = BCM4908,
+ .soc_base = BCM4908,
.perst_set = brcm_pcie_perst_set_4908,
.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
-};
-
-static const int pcie_offset_bcm7278[] = {
- [RGR1_SW_INIT_1] = 0xc010,
- [EXT_CFG_INDEX] = 0x9000,
- [EXT_CFG_DATA] = 0x9004,
+ .num_inbound_wins = 3,
};
static const struct pcie_cfg_data bcm7278_cfg = {
- .offsets = pcie_offset_bcm7278,
- .type = BCM7278,
+ .offsets = pcie_offsets_bcm7278,
+ .soc_base = BCM7278,
.perst_set = brcm_pcie_perst_set_7278,
.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_7278,
+ .num_inbound_wins = 3,
};
-static const struct pcie_cfg_data bcm2711_cfg = {
+static const struct pcie_cfg_data bcm7425_cfg = {
+ .offsets = pcie_offsets_bcm7425,
+ .soc_base = BCM7425,
+ .perst_set = brcm_pcie_perst_set_generic,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+ .num_inbound_wins = 3,
+};
+
+static const struct pcie_cfg_data bcm7435_cfg = {
.offsets = pcie_offsets,
- .type = BCM2711,
+ .soc_base = BCM7435,
.perst_set = brcm_pcie_perst_set_generic,
.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+ .num_inbound_wins = 3,
+};
+
+static const struct pcie_cfg_data bcm7216_cfg = {
+ .offsets = pcie_offsets_bcm7278,
+ .soc_base = BCM7278,
+ .perst_set = brcm_pcie_perst_set_7278,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_7278,
+ .has_phy = true,
+ .num_inbound_wins = 3,
+};
+
+static const struct pcie_cfg_data bcm7712_cfg = {
+ .offsets = pcie_offsets_bcm7712,
+ .perst_set = brcm_pcie_perst_set_7278,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+ .soc_base = BCM7712,
+ .num_inbound_wins = 10,
};
static const struct of_device_id brcm_pcie_match[] = {
{ .compatible = "brcm,bcm2711-pcie", .data = &bcm2711_cfg },
+ { .compatible = "brcm,bcm2712-pcie", .data = &bcm2712_cfg },
{ .compatible = "brcm,bcm4908-pcie", .data = &bcm4908_cfg },
{ .compatible = "brcm,bcm7211-pcie", .data = &generic_cfg },
+ { .compatible = "brcm,bcm7216-pcie", .data = &bcm7216_cfg },
{ .compatible = "brcm,bcm7278-pcie", .data = &bcm7278_cfg },
- { .compatible = "brcm,bcm7216-pcie", .data = &bcm7278_cfg },
- { .compatible = "brcm,bcm7445-pcie", .data = &generic_cfg },
- { .compatible = "brcm,bcm7435-pcie", .data = &bcm7435_cfg },
{ .compatible = "brcm,bcm7425-pcie", .data = &bcm7425_cfg },
+ { .compatible = "brcm,bcm7435-pcie", .data = &bcm7435_cfg },
+ { .compatible = "brcm,bcm7445-pcie", .data = &generic_cfg },
+ { .compatible = "brcm,bcm7712-pcie", .data = &bcm7712_cfg },
{},
};
@@ -1576,7 +1847,7 @@ static struct pci_ops brcm7425_pcie_ops = {
static int brcm_pcie_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node, *msi_np;
+ struct device_node *np = pdev->dev.of_node;
struct pci_host_bridge *bridge;
const struct pcie_cfg_data *data;
struct brcm_pcie *pcie;
@@ -1595,10 +1866,7 @@ static int brcm_pcie_probe(struct platform_device *pdev)
pcie = pci_host_bridge_priv(bridge);
pcie->dev = &pdev->dev;
pcie->np = np;
- pcie->reg_offsets = data->offsets;
- pcie->type = data->type;
- pcie->perst_set = data->perst_set;
- pcie->bridge_sw_init_set = data->bridge_sw_init_set;
+ pcie->cfg = data;
pcie->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pcie->base))
@@ -1613,25 +1881,52 @@ static int brcm_pcie_probe(struct platform_device *pdev)
pcie->ssc = of_property_read_bool(np, "brcm,enable-ssc");
- ret = clk_prepare_enable(pcie->clk);
- if (ret) {
- dev_err(&pdev->dev, "could not enable clock\n");
- return ret;
- }
pcie->rescal = devm_reset_control_get_optional_shared(&pdev->dev, "rescal");
- if (IS_ERR(pcie->rescal)) {
- clk_disable_unprepare(pcie->clk);
+ if (IS_ERR(pcie->rescal))
return PTR_ERR(pcie->rescal);
- }
+
pcie->perst_reset = devm_reset_control_get_optional_exclusive(&pdev->dev, "perst");
- if (IS_ERR(pcie->perst_reset)) {
- clk_disable_unprepare(pcie->clk);
+ if (IS_ERR(pcie->perst_reset))
return PTR_ERR(pcie->perst_reset);
+
+ pcie->bridge_reset = devm_reset_control_get_optional_exclusive(&pdev->dev, "bridge");
+ if (IS_ERR(pcie->bridge_reset))
+ return PTR_ERR(pcie->bridge_reset);
+
+ pcie->swinit_reset = devm_reset_control_get_optional_exclusive(&pdev->dev, "swinit");
+ if (IS_ERR(pcie->swinit_reset))
+ return PTR_ERR(pcie->swinit_reset);
+
+ ret = clk_prepare_enable(pcie->clk);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "could not enable clock\n");
+
+ pcie->cfg->bridge_sw_init_set(pcie, 0);
+
+ if (pcie->swinit_reset) {
+ ret = reset_control_assert(pcie->swinit_reset);
+ if (ret) {
+ clk_disable_unprepare(pcie->clk);
+ return dev_err_probe(&pdev->dev, ret,
+ "could not assert reset 'swinit'\n");
+ }
+
+ /* HW team recommends 1us for proper sync and propagation of reset */
+ udelay(1);
+
+ ret = reset_control_deassert(pcie->swinit_reset);
+ if (ret) {
+ clk_disable_unprepare(pcie->clk);
+ return dev_err_probe(&pdev->dev, ret,
+ "could not de-assert reset 'swinit'\n");
+ }
}
ret = reset_control_reset(pcie->rescal);
- if (ret)
- dev_err(&pdev->dev, "failed to deassert 'rescal'\n");
+ if (ret) {
+ clk_disable_unprepare(pcie->clk);
+ return dev_err_probe(&pdev->dev, ret, "failed to deassert 'rescal'\n");
+ }
ret = brcm_phy_start(pcie);
if (ret) {
@@ -1645,22 +1940,29 @@ static int brcm_pcie_probe(struct platform_device *pdev)
goto fail;
pcie->hw_rev = readl(pcie->base + PCIE_MISC_REVISION);
- if (pcie->type == BCM4908 && pcie->hw_rev >= BRCM_PCIE_HW_REV_3_20) {
+ if (pcie->cfg->soc_base == BCM4908 &&
+ pcie->hw_rev >= BRCM_PCIE_HW_REV_3_20) {
dev_err(pcie->dev, "hardware revision with unsupported PERST# setup\n");
ret = -ENODEV;
goto fail;
}
- msi_np = of_parse_phandle(pcie->np, "msi-parent", 0);
- if (pci_msi_enabled() && msi_np == pcie->np) {
- ret = brcm_pcie_enable_msi(pcie);
+ if (pci_msi_enabled()) {
+ struct device_node *msi_np = of_parse_phandle(pcie->np, "msi-parent", 0);
+
+ if (msi_np == pcie->np)
+ ret = brcm_pcie_enable_msi(pcie);
+
+ of_node_put(msi_np);
+
if (ret) {
dev_err(pcie->dev, "probe of internal MSI failed");
goto fail;
}
}
- bridge->ops = pcie->type == BCM7425 ? &brcm7425_pcie_ops : &brcm_pcie_ops;
+ bridge->ops = pcie->cfg->soc_base == BCM7425 ?
+ &brcm7425_pcie_ops : &brcm_pcie_ops;
bridge->sysdata = pcie;
platform_set_drvdata(pdev, pcie);
@@ -1678,6 +1980,7 @@ static int brcm_pcie_probe(struct platform_device *pdev)
fail:
__brcm_pcie_remove(pcie);
+
return ret;
}
@@ -1690,7 +1993,7 @@ static const struct dev_pm_ops brcm_pcie_pm_ops = {
static struct platform_driver brcm_pcie_driver = {
.probe = brcm_pcie_probe,
- .remove_new = brcm_pcie_remove,
+ .remove = brcm_pcie_remove,
.driver = {
.name = "brcm-pcie",
.of_match_table = brcm_pcie_match,
@@ -1702,3 +2005,4 @@ module_platform_driver(brcm_pcie_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Broadcom STB PCIe RC driver");
MODULE_AUTHOR("Broadcom");
+MODULE_SOFTDEP("pre: irq_bcm2712_mip");
diff --git a/drivers/pci/controller/pcie-hisi-error.c b/drivers/pci/controller/pcie-hisi-error.c
index ad9d5ffcd9e3..aaf1ed2b6e59 100644
--- a/drivers/pci/controller/pcie-hisi-error.c
+++ b/drivers/pci/controller/pcie-hisi-error.c
@@ -317,7 +317,7 @@ static struct platform_driver hisi_pcie_error_handler_driver = {
.acpi_match_table = hisi_pcie_acpi_match,
},
.probe = hisi_pcie_error_handler_probe,
- .remove_new = hisi_pcie_error_handler_remove,
+ .remove = hisi_pcie_error_handler_remove,
};
module_platform_driver(hisi_pcie_error_handler_driver);
diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c
index 649fcb449f34..d2cb4c4f821a 100644
--- a/drivers/pci/controller/pcie-iproc-msi.c
+++ b/drivers/pci/controller/pcie-iproc-msi.c
@@ -446,12 +446,12 @@ static void iproc_msi_disable(struct iproc_msi *msi)
static int iproc_msi_alloc_domains(struct device_node *node,
struct iproc_msi *msi)
{
- msi->inner_domain = irq_domain_add_linear(NULL, msi->nr_msi_vecs,
- &msi_domain_ops, msi);
+ msi->inner_domain = irq_domain_create_linear(NULL, msi->nr_msi_vecs,
+ &msi_domain_ops, msi);
if (!msi->inner_domain)
return -ENOMEM;
- msi->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node),
+ msi->msi_domain = pci_msi_create_irq_domain(of_fwnode_handle(node),
&iproc_msi_domain_info,
msi->inner_domain);
if (!msi->msi_domain) {
diff --git a/drivers/pci/controller/pcie-iproc-platform.c b/drivers/pci/controller/pcie-iproc-platform.c
index 4e6aa882a567..0cb78c583c7e 100644
--- a/drivers/pci/controller/pcie-iproc-platform.c
+++ b/drivers/pci/controller/pcie-iproc-platform.c
@@ -134,7 +134,7 @@ static struct platform_driver iproc_pltfm_pcie_driver = {
.of_match_table = of_match_ptr(iproc_pcie_of_match_table),
},
.probe = iproc_pltfm_pcie_probe,
- .remove_new = iproc_pltfm_pcie_remove,
+ .remove = iproc_pltfm_pcie_remove,
.shutdown = iproc_pltfm_pcie_shutdown,
};
module_platform_driver(iproc_pltfm_pcie_driver);
diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c
index 97f739a2c9f8..22134e95574b 100644
--- a/drivers/pci/controller/pcie-iproc.c
+++ b/drivers/pci/controller/pcie-iproc.c
@@ -54,7 +54,7 @@
#define CFG_RD_SUCCESS 0
#define CFG_RD_UR 1
-#define CFG_RD_CRS 2
+#define CFG_RD_RRS 2
#define CFG_RD_CA 3
#define CFG_RETRY_STATUS 0xffff0001
#define CFG_RETRY_STATUS_TIMEOUT_US 500000 /* 500 milliseconds */
@@ -485,31 +485,31 @@ static unsigned int iproc_pcie_cfg_retry(struct iproc_pcie *pcie,
u32 status;
/*
- * As per PCIe spec r3.1, sec 2.3.2, CRS Software Visibility only
+ * As per PCIe r6.0, sec 2.3.2, Config RRS Software Visibility only
* affects config reads of the Vendor ID. For config writes or any
* other config reads, the Root may automatically reissue the
* configuration request again as a new request.
*
* For config reads, this hardware returns CFG_RETRY_STATUS data
- * when it receives a CRS completion, regardless of the address of
- * the read or the CRS Software Visibility Enable bit. As a
+ * when it receives a RRS completion, regardless of the address of
+ * the read or the RRS Software Visibility Enable bit. As a
* partial workaround for this, we retry in software any read that
* returns CFG_RETRY_STATUS.
*
* Note that a non-Vendor ID config register may have a value of
* CFG_RETRY_STATUS. If we read that, we can't distinguish it from
- * a CRS completion, so we will incorrectly retry the read and
+ * a RRS completion, so we will incorrectly retry the read and
* eventually return the wrong data (0xffffffff).
*/
data = readl(cfg_data_p);
while (data == CFG_RETRY_STATUS && timeout--) {
/*
- * CRS state is set in CFG_RD status register
+ * RRS state is set in CFG_RD status register
* This will handle the case where CFG_RETRY_STATUS is
* valid config data.
*/
status = iproc_pcie_read_reg(pcie, IPROC_PCIE_CFG_RD_STATUS);
- if (status != CFG_RD_CRS)
+ if (status != CFG_RD_RRS)
return data;
udelay(1);
@@ -556,8 +556,8 @@ static void iproc_pcie_fix_cap(struct iproc_pcie *pcie, int where, u32 *val)
break;
case IPROC_PCI_EXP_CAP + PCI_EXP_RTCTL:
- /* Don't advertise CRS SV support */
- *val &= ~(PCI_EXP_RTCAP_CRSVIS << 16);
+ /* Don't advertise RRS SV support */
+ *val &= ~(PCI_EXP_RTCAP_RRS_SV << 16);
break;
default:
diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c
index 975b3024fb08..b55f5973414c 100644
--- a/drivers/pci/controller/pcie-mediatek-gen3.c
+++ b/drivers/pci/controller/pcie-mediatek-gen3.c
@@ -6,29 +6,46 @@
* Author: Jianjun Wang <jianjun.wang@mediatek.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/iopoll.h>
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/msi.h>
+#include <linux/of_device.h>
+#include <linux/of_pci.h>
#include <linux/pci.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
#include <linux/reset.h>
#include "../pci.h"
+#define PCIE_BASE_CFG_REG 0x14
+#define PCIE_BASE_CFG_SPEED GENMASK(15, 8)
+
#define PCIE_SETTING_REG 0x80
+#define PCIE_SETTING_LINK_WIDTH GENMASK(11, 8)
+#define PCIE_SETTING_GEN_SUPPORT GENMASK(14, 12)
#define PCIE_PCI_IDS_1 0x9c
#define PCI_CLASS(class) (class << 8)
#define PCIE_RC_MODE BIT(0)
+#define PCIE_EQ_PRESET_01_REG 0x100
+#define PCIE_VAL_LN0_DOWNSTREAM GENMASK(6, 0)
+#define PCIE_VAL_LN0_UPSTREAM GENMASK(14, 8)
+#define PCIE_VAL_LN1_DOWNSTREAM GENMASK(22, 16)
+#define PCIE_VAL_LN1_UPSTREAM GENMASK(30, 24)
+
#define PCIE_CFGNUM_REG 0x140
#define PCIE_CFG_DEVFN(devfn) ((devfn) & GENMASK(7, 0))
#define PCIE_CFG_BUS(bus) (((bus) << 8) & GENMASK(15, 8))
@@ -68,6 +85,14 @@
#define PCIE_MSI_SET_ENABLE_REG 0x190
#define PCIE_MSI_SET_ENABLE GENMASK(PCIE_MSI_SET_NUM - 1, 0)
+#define PCIE_PIPE4_PIE8_REG 0x338
+#define PCIE_K_FINETUNE_MAX GENMASK(5, 0)
+#define PCIE_K_FINETUNE_ERR GENMASK(7, 6)
+#define PCIE_K_PRESET_TO_USE GENMASK(18, 8)
+#define PCIE_K_PHYPARAM_QUERY BIT(19)
+#define PCIE_K_QUERY_TIMEOUT BIT(20)
+#define PCIE_K_PRESET_TO_USE_16G GENMASK(31, 21)
+
#define PCIE_MSI_SET_BASE_REG 0xc00
#define PCIE_MSI_SET_OFFSET 0x10
#define PCIE_MSI_SET_STATUS_OFFSET 0x04
@@ -100,6 +125,40 @@
#define PCIE_ATR_TLP_TYPE_MEM PCIE_ATR_TLP_TYPE(0)
#define PCIE_ATR_TLP_TYPE_IO PCIE_ATR_TLP_TYPE(2)
+#define MAX_NUM_PHY_RESETS 3
+
+#define PCIE_MTK_RESET_TIME_US 10
+
+/* Time in ms needed to complete PCIe reset on EN7581 SoC */
+#define PCIE_EN7581_RESET_TIME_MS 100
+
+struct mtk_gen3_pcie;
+
+#define PCIE_CONF_LINK2_CTL_STS (PCIE_CFG_OFFSET_ADDR + 0xb0)
+#define PCIE_CONF_LINK2_LCR2_LINK_SPEED GENMASK(3, 0)
+
+enum mtk_gen3_pcie_flags {
+ SKIP_PCIE_RSTB = BIT(0), /* Skip PERST# assertion during device
+ * probing or suspend/resume phase to
+ * avoid hw bugs/issues.
+ */
+};
+
+/**
+ * struct mtk_gen3_pcie_pdata - differentiate between host generations
+ * @power_up: pcie power_up callback
+ * @phy_resets: phy reset lines SoC data.
+ * @flags: pcie device flags.
+ */
+struct mtk_gen3_pcie_pdata {
+ int (*power_up)(struct mtk_gen3_pcie *pcie);
+ struct {
+ const char *id[MAX_NUM_PHY_RESETS];
+ int num_resets;
+ } phy_resets;
+ u32 flags;
+};
+
/**
* struct mtk_msi_set - MSI information for each set
* @base: IO mapped register base
@@ -118,10 +177,12 @@ struct mtk_msi_set {
* @base: IO mapped register base
* @reg_base: physical register base
* @mac_reset: MAC reset control
- * @phy_reset: PHY reset control
+ * @phy_resets: PHY reset controllers
* @phy: PHY controller block
* @clks: PCIe clocks
* @num_clks: PCIe clocks count for this port
+ * @max_link_speed: Maximum link speed (PCIe Gen) for this port
+ * @num_lanes: Number of PCIe lanes for this port
* @irq: PCIe controller interrupt number
* @saved_irq_state: IRQ enable state saved at suspend time
* @irq_lock: lock protecting IRQ register access
@@ -131,16 +192,19 @@ struct mtk_msi_set {
* @msi_sets: MSI sets information
* @lock: lock protecting IRQ bit map
* @msi_irq_in_use: bit map for assigned MSI IRQ
+ * @soc: pointer to SoC-dependent operations
*/
struct mtk_gen3_pcie {
struct device *dev;
void __iomem *base;
phys_addr_t reg_base;
struct reset_control *mac_reset;
- struct reset_control *phy_reset;
+ struct reset_control_bulk_data phy_resets[MAX_NUM_PHY_RESETS];
struct phy *phy;
struct clk_bulk_data *clks;
int num_clks;
+ u8 max_link_speed;
+ u8 num_lanes;
int irq;
u32 saved_irq_state;
@@ -151,6 +215,8 @@ struct mtk_gen3_pcie {
struct mtk_msi_set msi_sets[PCIE_MSI_SET_NUM];
struct mutex lock;
DECLARE_BITMAP(msi_irq_in_use, PCIE_MSI_IRQS_NUM);
+
+ const struct mtk_gen3_pcie_pdata *soc;
};
/* LTSSM state in PCIE_LTSSM_STATUS_REG bit[28:24] */
@@ -288,7 +354,8 @@ static int mtk_pcie_set_trans_table(struct mtk_gen3_pcie *pcie,
dev_dbg(pcie->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n",
range_type, *num, (unsigned long long)cpu_addr,
- (unsigned long long)pci_addr, (unsigned long long)table_size);
+ (unsigned long long)pci_addr,
+ (unsigned long long)table_size);
cpu_addr += table_size;
pci_addr += table_size;
@@ -340,11 +407,35 @@ static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie)
int err;
u32 val;
- /* Set as RC mode */
+ /* Set as RC mode and set controller PCIe Gen speed restriction, if any */
val = readl_relaxed(pcie->base + PCIE_SETTING_REG);
val |= PCIE_RC_MODE;
+ if (pcie->max_link_speed) {
+ val &= ~PCIE_SETTING_GEN_SUPPORT;
+
+ /* Can enable link speed support only from Gen2 onwards */
+ if (pcie->max_link_speed >= 2)
+ val |= FIELD_PREP(PCIE_SETTING_GEN_SUPPORT,
+ GENMASK(pcie->max_link_speed - 2, 0));
+ }
+ if (pcie->num_lanes) {
+ val &= ~PCIE_SETTING_LINK_WIDTH;
+
+ /* Zero means one lane, each bit activates x2/x4/x8/x16 */
+ if (pcie->num_lanes > 1)
+ val |= FIELD_PREP(PCIE_SETTING_LINK_WIDTH,
+ GENMASK(fls(pcie->num_lanes >> 2), 0));
+ }
writel_relaxed(val, pcie->base + PCIE_SETTING_REG);
+ /* Set Link Control 2 (LNKCTL2) speed restriction, if any */
+ if (pcie->max_link_speed) {
+ val = readl_relaxed(pcie->base + PCIE_CONF_LINK2_CTL_STS);
+ val &= ~PCIE_CONF_LINK2_LCR2_LINK_SPEED;
+ val |= FIELD_PREP(PCIE_CONF_LINK2_LCR2_LINK_SPEED, pcie->max_link_speed);
+ writel_relaxed(val, pcie->base + PCIE_CONF_LINK2_CTL_STS);
+ }
+
/* Set class code */
val = readl_relaxed(pcie->base + PCIE_PCI_IDS_1);
val &= ~GENMASK(31, 8);
@@ -361,22 +452,33 @@ static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie)
val |= PCIE_DISABLE_DVFSRC_VLT_REQ;
writel_relaxed(val, pcie->base + PCIE_MISC_CTRL_REG);
- /* Assert all reset signals */
- val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
- val |= PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB;
- writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
-
/*
- * Described in PCIe CEM specification sections 2.2 (PERST# Signal)
- * and 2.2.1 (Initial Power-Up (G3 to S0)).
- * The deassertion of PERST# should be delayed 100ms (TPVPERL)
- * for the power and clock to become stable.
+ * Airoha EN7581 has a hw bug asserting/releasing PCIE_PE_RSTB signal
+ * causing occasional PCIe link down. In order to overcome the issue,
+ * PCIE_RSTB signals are not asserted/released at this stage and the
+ * PCIe block is reset using en7523_reset_assert() and
+ * en7581_pci_enable().
*/
- msleep(100);
-
- /* De-assert reset signals */
- val &= ~(PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB);
- writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
+ if (!(pcie->soc->flags & SKIP_PCIE_RSTB)) {
+ /* Assert all reset signals */
+ val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
+ val |= PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB |
+ PCIE_PE_RSTB;
+ writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
+
+ /*
+ * Described in PCIe CEM specification revision 6.0.
+ *
+ * The deassertion of PERST# should be delayed 100ms (TPVPERL)
+ * for the power and clock to become stable.
+ */
+ msleep(PCIE_T_PVPERL_MS);
+
+ /* De-assert reset signals */
+ val &= ~(PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB |
+ PCIE_PE_RSTB);
+ writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
+ }
/* Check if the link is up or not */
err = readl_poll_timeout(pcie->base + PCIE_LINK_STATUS_REG, val,
@@ -424,12 +526,6 @@ static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie)
return 0;
}
-static int mtk_pcie_set_affinity(struct irq_data *data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static void mtk_pcie_msi_irq_mask(struct irq_data *data)
{
pci_msi_mask_irq(data);
@@ -450,8 +546,9 @@ static struct irq_chip mtk_msi_irq_chip = {
};
static struct msi_domain_info mtk_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX |
+ MSI_FLAG_MULTI_PCI_MSI,
.chip = &mtk_msi_irq_chip,
};
@@ -517,7 +614,6 @@ static struct irq_chip mtk_msi_bottom_irq_chip = {
.irq_mask = mtk_msi_bottom_irq_mask,
.irq_unmask = mtk_msi_bottom_irq_unmask,
.irq_compose_msi_msg = mtk_compose_msi_msg,
- .irq_set_affinity = mtk_pcie_set_affinity,
.name = "MSI",
};
@@ -618,7 +714,6 @@ static struct irq_chip mtk_intx_irq_chip = {
.irq_mask = mtk_intx_mask,
.irq_unmask = mtk_intx_unmask,
.irq_eoi = mtk_intx_eoi,
- .irq_set_affinity = mtk_pcie_set_affinity,
.name = "INTx",
};
@@ -650,8 +745,8 @@ static int mtk_pcie_init_irq_domains(struct mtk_gen3_pcie *pcie)
return -ENODEV;
}
- pcie->intx_domain = irq_domain_add_linear(intc_node, PCI_NUM_INTX,
- &intx_domain_ops, pcie);
+ pcie->intx_domain = irq_domain_create_linear(of_fwnode_handle(intc_node), PCI_NUM_INTX,
+ &intx_domain_ops, pcie);
if (!pcie->intx_domain) {
dev_err(dev, "failed to create INTx IRQ domain\n");
ret = -ENODEV;
@@ -661,8 +756,9 @@ static int mtk_pcie_init_irq_domains(struct mtk_gen3_pcie *pcie)
/* Setup MSI */
mutex_init(&pcie->lock);
- pcie->msi_bottom_domain = irq_domain_add_linear(node, PCIE_MSI_IRQS_NUM,
- &mtk_msi_bottom_domain_ops, pcie);
+ pcie->msi_bottom_domain = irq_domain_create_linear(of_fwnode_handle(node),
+ PCIE_MSI_IRQS_NUM,
+ &mtk_msi_bottom_domain_ops, pcie);
if (!pcie->msi_bottom_domain) {
dev_err(dev, "failed to create MSI bottom domain\n");
ret = -ENODEV;
@@ -775,10 +871,11 @@ static int mtk_pcie_setup_irq(struct mtk_gen3_pcie *pcie)
static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie)
{
+ int i, ret, num_resets = pcie->soc->phy_resets.num_resets;
struct device *dev = pcie->dev;
struct platform_device *pdev = to_platform_device(dev);
struct resource *regs;
- int ret;
+ u32 num_lanes;
regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcie-mac");
if (!regs)
@@ -791,12 +888,13 @@ static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie)
pcie->reg_base = regs->start;
- pcie->phy_reset = devm_reset_control_get_optional_exclusive(dev, "phy");
- if (IS_ERR(pcie->phy_reset)) {
- ret = PTR_ERR(pcie->phy_reset);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to get PHY reset\n");
+ for (i = 0; i < num_resets; i++)
+ pcie->phy_resets[i].id = pcie->soc->phy_resets.id[i];
+ ret = devm_reset_control_bulk_get_optional_shared(dev, num_resets,
+ pcie->phy_resets);
+ if (ret) {
+ dev_err(dev, "failed to get PHY bulk reset\n");
return ret;
}
@@ -824,16 +922,151 @@ static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie)
return pcie->num_clks;
}
+ ret = of_property_read_u32(dev->of_node, "num-lanes", &num_lanes);
+ if (ret == 0) {
+ if (num_lanes == 0 || num_lanes > 16 ||
+ (num_lanes != 1 && num_lanes % 2))
+ dev_warn(dev, "invalid num-lanes, using controller defaults\n");
+ else
+ pcie->num_lanes = num_lanes;
+ }
+
return 0;
}
+static int mtk_pcie_en7581_power_up(struct mtk_gen3_pcie *pcie)
+{
+ struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
+ struct device *dev = pcie->dev;
+ struct resource_entry *entry;
+ struct regmap *pbus_regmap;
+ u32 val, args[2], size;
+ resource_size_t addr;
+ int err;
+
+ /*
+ * The controller may have been left out of reset by the bootloader
+ * so make sure that we get a clean start by asserting resets here.
+ */
+ reset_control_bulk_assert(pcie->soc->phy_resets.num_resets,
+ pcie->phy_resets);
+
+ /* Wait for the time needed to complete the reset lines assert. */
+ msleep(PCIE_EN7581_RESET_TIME_MS);
+
+ /*
+ * Configure PBus base address and base address mask to allow the
+ * hw to detect if a given address is accessible on PCIe controller.
+ */
+ pbus_regmap = syscon_regmap_lookup_by_phandle_args(dev->of_node,
+ "mediatek,pbus-csr",
+ ARRAY_SIZE(args),
+ args);
+ if (IS_ERR(pbus_regmap))
+ return PTR_ERR(pbus_regmap);
+
+ entry = resource_list_first_type(&host->windows, IORESOURCE_MEM);
+ if (!entry)
+ return -ENODEV;
+
+ addr = entry->res->start - entry->offset;
+ regmap_write(pbus_regmap, args[0], lower_32_bits(addr));
+ size = lower_32_bits(resource_size(entry->res));
+ regmap_write(pbus_regmap, args[1], GENMASK(31, __fls(size)));
+
+ /*
+ * Unlike the other MediaTek Gen3 controllers, the Airoha EN7581
+ * requires PHY initialization and power-on before PHY reset deassert.
+ */
+ err = phy_init(pcie->phy);
+ if (err) {
+ dev_err(dev, "failed to initialize PHY\n");
+ return err;
+ }
+
+ err = phy_power_on(pcie->phy);
+ if (err) {
+ dev_err(dev, "failed to power on PHY\n");
+ goto err_phy_on;
+ }
+
+ err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets,
+ pcie->phy_resets);
+ if (err) {
+ dev_err(dev, "failed to deassert PHYs\n");
+ goto err_phy_deassert;
+ }
+
+ /*
+ * Wait for the time needed to complete the bulk de-assert above.
+ * This time is specific for EN7581 SoC.
+ */
+ msleep(PCIE_EN7581_RESET_TIME_MS);
+
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+
+ val = FIELD_PREP(PCIE_VAL_LN0_DOWNSTREAM, 0x47) |
+ FIELD_PREP(PCIE_VAL_LN1_DOWNSTREAM, 0x47) |
+ FIELD_PREP(PCIE_VAL_LN0_UPSTREAM, 0x41) |
+ FIELD_PREP(PCIE_VAL_LN1_UPSTREAM, 0x41);
+ writel_relaxed(val, pcie->base + PCIE_EQ_PRESET_01_REG);
+
+ val = PCIE_K_PHYPARAM_QUERY | PCIE_K_QUERY_TIMEOUT |
+ FIELD_PREP(PCIE_K_PRESET_TO_USE_16G, 0x80) |
+ FIELD_PREP(PCIE_K_PRESET_TO_USE, 0x2) |
+ FIELD_PREP(PCIE_K_FINETUNE_MAX, 0xf);
+ writel_relaxed(val, pcie->base + PCIE_PIPE4_PIE8_REG);
+
+ err = clk_bulk_prepare_enable(pcie->num_clks, pcie->clks);
+ if (err) {
+ dev_err(dev, "failed to prepare clock\n");
+ goto err_clk_prepare_enable;
+ }
+
+ /*
+ * Airoha EN7581 performs PCIe reset via clk callbacks since it has a
+ * hw issue with PCIE_PE_RSTB signal. Add wait for the time needed to
+ * complete the PCIe reset.
+ */
+ msleep(PCIE_T_PVPERL_MS);
+
+ return 0;
+
+err_clk_prepare_enable:
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+ reset_control_bulk_assert(pcie->soc->phy_resets.num_resets,
+ pcie->phy_resets);
+err_phy_deassert:
+ phy_power_off(pcie->phy);
+err_phy_on:
+ phy_exit(pcie->phy);
+
+ return err;
+}
+
static int mtk_pcie_power_up(struct mtk_gen3_pcie *pcie)
{
struct device *dev = pcie->dev;
int err;
+ /*
+ * The controller may have been left out of reset by the bootloader
+ * so make sure that we get a clean start by asserting resets here.
+ */
+ reset_control_bulk_assert(pcie->soc->phy_resets.num_resets,
+ pcie->phy_resets);
+ reset_control_assert(pcie->mac_reset);
+ usleep_range(PCIE_MTK_RESET_TIME_US, 2 * PCIE_MTK_RESET_TIME_US);
+
/* PHY power on and enable pipe clock */
- reset_control_deassert(pcie->phy_reset);
+ err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets,
+ pcie->phy_resets);
+ if (err) {
+ dev_err(dev, "failed to deassert PHYs\n");
+ return err;
+ }
err = phy_init(pcie->phy);
if (err) {
@@ -869,7 +1102,8 @@ err_clk_init:
err_phy_on:
phy_exit(pcie->phy);
err_phy_init:
- reset_control_assert(pcie->phy_reset);
+ reset_control_bulk_assert(pcie->soc->phy_resets.num_resets,
+ pcie->phy_resets);
return err;
}
@@ -884,30 +1118,56 @@ static void mtk_pcie_power_down(struct mtk_gen3_pcie *pcie)
phy_power_off(pcie->phy);
phy_exit(pcie->phy);
- reset_control_assert(pcie->phy_reset);
+ reset_control_bulk_assert(pcie->soc->phy_resets.num_resets,
+ pcie->phy_resets);
+}
+
+static int mtk_pcie_get_controller_max_link_speed(struct mtk_gen3_pcie *pcie)
+{
+ u32 val;
+ int ret;
+
+ val = readl_relaxed(pcie->base + PCIE_BASE_CFG_REG);
+ val = FIELD_GET(PCIE_BASE_CFG_SPEED, val);
+ ret = fls(val);
+
+ return ret > 0 ? ret : -EINVAL;
}
static int mtk_pcie_setup(struct mtk_gen3_pcie *pcie)
{
- int err;
+ int err, max_speed;
err = mtk_pcie_parse_port(pcie);
if (err)
return err;
/*
- * The controller may have been left out of reset by the bootloader
- * so make sure that we get a clean start by asserting resets here.
+ * Deassert the line in order to avoid unbalance in deassert_count
+ * counter since the bulk is shared.
*/
- reset_control_assert(pcie->phy_reset);
- reset_control_assert(pcie->mac_reset);
- usleep_range(10, 20);
+ reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets,
+ pcie->phy_resets);
/* Don't touch the hardware registers before power up */
- err = mtk_pcie_power_up(pcie);
+ err = pcie->soc->power_up(pcie);
if (err)
return err;
+ err = of_pci_get_max_link_speed(pcie->dev->of_node);
+ if (err) {
+ /* Get the maximum speed supported by the controller */
+ max_speed = mtk_pcie_get_controller_max_link_speed(pcie);
+
+ /* Set max_link_speed only if the controller supports it */
+ if (max_speed >= 0 && max_speed <= err) {
+ pcie->max_link_speed = err;
+ dev_info(pcie->dev,
+ "maximum controller link speed Gen%d, overriding to Gen%u",
+ max_speed, pcie->max_link_speed);
+ }
+ }
+
/* Try link up */
err = mtk_pcie_startup_port(pcie);
if (err)
@@ -939,6 +1199,7 @@ static int mtk_pcie_probe(struct platform_device *pdev)
pcie = pci_host_bridge_priv(host);
pcie->dev = dev;
+ pcie->soc = device_get_match_data(dev);
platform_set_drvdata(pdev, pcie);
err = mtk_pcie_setup(pcie);
@@ -1036,10 +1297,12 @@ static int mtk_pcie_suspend_noirq(struct device *dev)
return err;
}
- /* Pull down the PERST# pin */
- val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
- val |= PCIE_PE_RSTB;
- writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
+ if (!(pcie->soc->flags & SKIP_PCIE_RSTB)) {
+ /* Assert the PERST# pin */
+ val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
+ val |= PCIE_PE_RSTB;
+ writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
+ }
dev_dbg(pcie->dev, "entered L2 states successfully");
@@ -1054,7 +1317,7 @@ static int mtk_pcie_resume_noirq(struct device *dev)
struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev);
int err;
- err = mtk_pcie_power_up(pcie);
+ err = pcie->soc->power_up(pcie);
if (err)
return err;
@@ -1074,21 +1337,43 @@ static const struct dev_pm_ops mtk_pcie_pm_ops = {
mtk_pcie_resume_noirq)
};
+static const struct mtk_gen3_pcie_pdata mtk_pcie_soc_mt8192 = {
+ .power_up = mtk_pcie_power_up,
+ .phy_resets = {
+ .id[0] = "phy",
+ .num_resets = 1,
+ },
+};
+
+static const struct mtk_gen3_pcie_pdata mtk_pcie_soc_en7581 = {
+ .power_up = mtk_pcie_en7581_power_up,
+ .phy_resets = {
+ .id[0] = "phy-lane0",
+ .id[1] = "phy-lane1",
+ .id[2] = "phy-lane2",
+ .num_resets = 3,
+ },
+ .flags = SKIP_PCIE_RSTB,
+};
+
static const struct of_device_id mtk_pcie_of_match[] = {
- { .compatible = "mediatek,mt8192-pcie" },
+ { .compatible = "airoha,en7581-pcie", .data = &mtk_pcie_soc_en7581 },
+ { .compatible = "mediatek,mt8192-pcie", .data = &mtk_pcie_soc_mt8192 },
{},
};
MODULE_DEVICE_TABLE(of, mtk_pcie_of_match);
static struct platform_driver mtk_pcie_driver = {
.probe = mtk_pcie_probe,
- .remove_new = mtk_pcie_remove,
+ .remove = mtk_pcie_remove,
.driver = {
.name = "mtk-pcie-gen3",
.of_match_table = mtk_pcie_of_match,
.pm = &mtk_pcie_pm_ops,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
module_platform_driver(mtk_pcie_driver);
+MODULE_DESCRIPTION("MediaTek Gen3 PCIe host controller driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
index 48372013f26d..e1934aa06c8d 100644
--- a/drivers/pci/controller/pcie-mediatek.c
+++ b/drivers/pci/controller/pcie-mediatek.c
@@ -211,7 +211,6 @@ struct mtk_pcie_port {
* @base: IO mapped register base
* @cfg: IO mapped register map for PCIe config
* @free_ck: free-run reference clock
- * @mem: non-prefetchable memory resource
* @ports: pointer to PCIe port information
* @soc: pointer to SoC-dependent operations
*/
@@ -407,12 +406,6 @@ static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
(int)data->hwirq, msg->address_hi, msg->address_lo);
}
-static int mtk_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static void mtk_msi_ack_irq(struct irq_data *data)
{
struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
@@ -424,7 +417,6 @@ static void mtk_msi_ack_irq(struct irq_data *data)
static struct irq_chip mtk_msi_bottom_irq_chip = {
.name = "MTK MSI",
.irq_compose_msi_msg = mtk_compose_msi_msg,
- .irq_set_affinity = mtk_msi_set_affinity,
.irq_ack = mtk_msi_ack_irq,
};
@@ -486,14 +478,14 @@ static struct irq_chip mtk_msi_irq_chip = {
};
static struct msi_domain_info mtk_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX),
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX,
.chip = &mtk_msi_irq_chip,
};
static int mtk_pcie_allocate_msi_domains(struct mtk_pcie_port *port)
{
- struct fwnode_handle *fwnode = of_node_to_fwnode(port->pcie->dev->of_node);
+ struct fwnode_handle *fwnode = of_fwnode_handle(port->pcie->dev->of_node);
mutex_init(&port->lock);
@@ -577,8 +569,8 @@ static int mtk_pcie_init_irq_domain(struct mtk_pcie_port *port,
return -ENODEV;
}
- port->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
- &intx_domain_ops, port);
+ port->irq_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX,
+ &intx_domain_ops, port);
of_node_put(pcie_intc_node);
if (!port->irq_domain) {
dev_err(dev, "failed to get INTx IRQ domain\n");
@@ -1049,24 +1041,22 @@ err_free_ck:
static int mtk_pcie_setup(struct mtk_pcie *pcie)
{
struct device *dev = pcie->dev;
- struct device_node *node = dev->of_node, *child;
+ struct device_node *node = dev->of_node;
struct mtk_pcie_port *port, *tmp;
int err, slot;
slot = of_get_pci_domain_nr(dev->of_node);
if (slot < 0) {
- for_each_available_child_of_node(node, child) {
+ for_each_available_child_of_node_scoped(node, child) {
err = of_pci_get_devfn(child);
- if (err < 0) {
- dev_err(dev, "failed to get devfn: %d\n", err);
- goto error_put_node;
- }
+ if (err < 0)
+ return dev_err_probe(dev, err, "failed to get devfn\n");
slot = PCI_SLOT(err);
err = mtk_pcie_parse_port(pcie, child, slot);
if (err)
- goto error_put_node;
+ return err;
}
} else {
err = mtk_pcie_parse_port(pcie, node, slot);
@@ -1087,9 +1077,6 @@ static int mtk_pcie_setup(struct mtk_pcie *pcie)
mtk_pcie_subsys_powerdown(pcie);
return 0;
-error_put_node:
- of_node_put(child);
- return err;
}
static int mtk_pcie_probe(struct platform_device *pdev)
@@ -1243,7 +1230,7 @@ MODULE_DEVICE_TABLE(of, mtk_pcie_ids);
static struct platform_driver mtk_pcie_driver = {
.probe = mtk_pcie_probe,
- .remove_new = mtk_pcie_remove,
+ .remove = mtk_pcie_remove,
.driver = {
.name = "mtk-pcie",
.of_match_table = mtk_pcie_ids,
@@ -1252,4 +1239,5 @@ static struct platform_driver mtk_pcie_driver = {
},
};
module_platform_driver(mtk_pcie_driver);
+MODULE_DESCRIPTION("MediaTek PCIe host controller driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-microchip-host.c b/drivers/pci/controller/pcie-microchip-host.c
deleted file mode 100644
index 137fb8570ba2..000000000000
--- a/drivers/pci/controller/pcie-microchip-host.c
+++ /dev/null
@@ -1,1216 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Microchip AXI PCIe Bridge host controller driver
- *
- * Copyright (c) 2018 - 2020 Microchip Corporation. All rights reserved.
- *
- * Author: Daire McNamara <daire.mcnamara@microchip.com>
- */
-
-#include <linux/bitfield.h>
-#include <linux/clk.h>
-#include <linux/irqchip/chained_irq.h>
-#include <linux/irqdomain.h>
-#include <linux/module.h>
-#include <linux/msi.h>
-#include <linux/of_address.h>
-#include <linux/of_pci.h>
-#include <linux/pci-ecam.h>
-#include <linux/platform_device.h>
-
-#include "../pci.h"
-
-/* Number of MSI IRQs */
-#define MC_MAX_NUM_MSI_IRQS 32
-
-/* PCIe Bridge Phy and Controller Phy offsets */
-#define MC_PCIE1_BRIDGE_ADDR 0x00008000u
-#define MC_PCIE1_CTRL_ADDR 0x0000a000u
-
-#define MC_PCIE_BRIDGE_ADDR (MC_PCIE1_BRIDGE_ADDR)
-#define MC_PCIE_CTRL_ADDR (MC_PCIE1_CTRL_ADDR)
-
-/* PCIe Bridge Phy Regs */
-#define PCIE_PCI_IRQ_DW0 0xa8
-#define MSIX_CAP_MASK BIT(31)
-#define NUM_MSI_MSGS_MASK GENMASK(6, 4)
-#define NUM_MSI_MSGS_SHIFT 4
-
-#define IMASK_LOCAL 0x180
-#define DMA_END_ENGINE_0_MASK 0x00000000u
-#define DMA_END_ENGINE_0_SHIFT 0
-#define DMA_END_ENGINE_1_MASK 0x00000000u
-#define DMA_END_ENGINE_1_SHIFT 1
-#define DMA_ERROR_ENGINE_0_MASK 0x00000100u
-#define DMA_ERROR_ENGINE_0_SHIFT 8
-#define DMA_ERROR_ENGINE_1_MASK 0x00000200u
-#define DMA_ERROR_ENGINE_1_SHIFT 9
-#define A_ATR_EVT_POST_ERR_MASK 0x00010000u
-#define A_ATR_EVT_POST_ERR_SHIFT 16
-#define A_ATR_EVT_FETCH_ERR_MASK 0x00020000u
-#define A_ATR_EVT_FETCH_ERR_SHIFT 17
-#define A_ATR_EVT_DISCARD_ERR_MASK 0x00040000u
-#define A_ATR_EVT_DISCARD_ERR_SHIFT 18
-#define A_ATR_EVT_DOORBELL_MASK 0x00000000u
-#define A_ATR_EVT_DOORBELL_SHIFT 19
-#define P_ATR_EVT_POST_ERR_MASK 0x00100000u
-#define P_ATR_EVT_POST_ERR_SHIFT 20
-#define P_ATR_EVT_FETCH_ERR_MASK 0x00200000u
-#define P_ATR_EVT_FETCH_ERR_SHIFT 21
-#define P_ATR_EVT_DISCARD_ERR_MASK 0x00400000u
-#define P_ATR_EVT_DISCARD_ERR_SHIFT 22
-#define P_ATR_EVT_DOORBELL_MASK 0x00000000u
-#define P_ATR_EVT_DOORBELL_SHIFT 23
-#define PM_MSI_INT_INTA_MASK 0x01000000u
-#define PM_MSI_INT_INTA_SHIFT 24
-#define PM_MSI_INT_INTB_MASK 0x02000000u
-#define PM_MSI_INT_INTB_SHIFT 25
-#define PM_MSI_INT_INTC_MASK 0x04000000u
-#define PM_MSI_INT_INTC_SHIFT 26
-#define PM_MSI_INT_INTD_MASK 0x08000000u
-#define PM_MSI_INT_INTD_SHIFT 27
-#define PM_MSI_INT_INTX_MASK 0x0f000000u
-#define PM_MSI_INT_INTX_SHIFT 24
-#define PM_MSI_INT_MSI_MASK 0x10000000u
-#define PM_MSI_INT_MSI_SHIFT 28
-#define PM_MSI_INT_AER_EVT_MASK 0x20000000u
-#define PM_MSI_INT_AER_EVT_SHIFT 29
-#define PM_MSI_INT_EVENTS_MASK 0x40000000u
-#define PM_MSI_INT_EVENTS_SHIFT 30
-#define PM_MSI_INT_SYS_ERR_MASK 0x80000000u
-#define PM_MSI_INT_SYS_ERR_SHIFT 31
-#define NUM_LOCAL_EVENTS 15
-#define ISTATUS_LOCAL 0x184
-#define IMASK_HOST 0x188
-#define ISTATUS_HOST 0x18c
-#define IMSI_ADDR 0x190
-#define ISTATUS_MSI 0x194
-
-/* PCIe Master table init defines */
-#define ATR0_PCIE_WIN0_SRCADDR_PARAM 0x600u
-#define ATR0_PCIE_ATR_SIZE 0x25
-#define ATR0_PCIE_ATR_SIZE_SHIFT 1
-#define ATR0_PCIE_WIN0_SRC_ADDR 0x604u
-#define ATR0_PCIE_WIN0_TRSL_ADDR_LSB 0x608u
-#define ATR0_PCIE_WIN0_TRSL_ADDR_UDW 0x60cu
-#define ATR0_PCIE_WIN0_TRSL_PARAM 0x610u
-
-/* PCIe AXI slave table init defines */
-#define ATR0_AXI4_SLV0_SRCADDR_PARAM 0x800u
-#define ATR_SIZE_SHIFT 1
-#define ATR_IMPL_ENABLE 1
-#define ATR0_AXI4_SLV0_SRC_ADDR 0x804u
-#define ATR0_AXI4_SLV0_TRSL_ADDR_LSB 0x808u
-#define ATR0_AXI4_SLV0_TRSL_ADDR_UDW 0x80cu
-#define ATR0_AXI4_SLV0_TRSL_PARAM 0x810u
-#define PCIE_TX_RX_INTERFACE 0x00000000u
-#define PCIE_CONFIG_INTERFACE 0x00000001u
-
-#define ATR_ENTRY_SIZE 32
-
-/* PCIe Controller Phy Regs */
-#define SEC_ERROR_EVENT_CNT 0x20
-#define DED_ERROR_EVENT_CNT 0x24
-#define SEC_ERROR_INT 0x28
-#define SEC_ERROR_INT_TX_RAM_SEC_ERR_INT GENMASK(3, 0)
-#define SEC_ERROR_INT_RX_RAM_SEC_ERR_INT GENMASK(7, 4)
-#define SEC_ERROR_INT_PCIE2AXI_RAM_SEC_ERR_INT GENMASK(11, 8)
-#define SEC_ERROR_INT_AXI2PCIE_RAM_SEC_ERR_INT GENMASK(15, 12)
-#define SEC_ERROR_INT_ALL_RAM_SEC_ERR_INT GENMASK(15, 0)
-#define NUM_SEC_ERROR_INTS (4)
-#define SEC_ERROR_INT_MASK 0x2c
-#define DED_ERROR_INT 0x30
-#define DED_ERROR_INT_TX_RAM_DED_ERR_INT GENMASK(3, 0)
-#define DED_ERROR_INT_RX_RAM_DED_ERR_INT GENMASK(7, 4)
-#define DED_ERROR_INT_PCIE2AXI_RAM_DED_ERR_INT GENMASK(11, 8)
-#define DED_ERROR_INT_AXI2PCIE_RAM_DED_ERR_INT GENMASK(15, 12)
-#define DED_ERROR_INT_ALL_RAM_DED_ERR_INT GENMASK(15, 0)
-#define NUM_DED_ERROR_INTS (4)
-#define DED_ERROR_INT_MASK 0x34
-#define ECC_CONTROL 0x38
-#define ECC_CONTROL_TX_RAM_INJ_ERROR_0 BIT(0)
-#define ECC_CONTROL_TX_RAM_INJ_ERROR_1 BIT(1)
-#define ECC_CONTROL_TX_RAM_INJ_ERROR_2 BIT(2)
-#define ECC_CONTROL_TX_RAM_INJ_ERROR_3 BIT(3)
-#define ECC_CONTROL_RX_RAM_INJ_ERROR_0 BIT(4)
-#define ECC_CONTROL_RX_RAM_INJ_ERROR_1 BIT(5)
-#define ECC_CONTROL_RX_RAM_INJ_ERROR_2 BIT(6)
-#define ECC_CONTROL_RX_RAM_INJ_ERROR_3 BIT(7)
-#define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_0 BIT(8)
-#define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_1 BIT(9)
-#define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_2 BIT(10)
-#define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_3 BIT(11)
-#define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_0 BIT(12)
-#define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_1 BIT(13)
-#define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_2 BIT(14)
-#define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_3 BIT(15)
-#define ECC_CONTROL_TX_RAM_ECC_BYPASS BIT(24)
-#define ECC_CONTROL_RX_RAM_ECC_BYPASS BIT(25)
-#define ECC_CONTROL_PCIE2AXI_RAM_ECC_BYPASS BIT(26)
-#define ECC_CONTROL_AXI2PCIE_RAM_ECC_BYPASS BIT(27)
-#define PCIE_EVENT_INT 0x14c
-#define PCIE_EVENT_INT_L2_EXIT_INT BIT(0)
-#define PCIE_EVENT_INT_HOTRST_EXIT_INT BIT(1)
-#define PCIE_EVENT_INT_DLUP_EXIT_INT BIT(2)
-#define PCIE_EVENT_INT_MASK GENMASK(2, 0)
-#define PCIE_EVENT_INT_L2_EXIT_INT_MASK BIT(16)
-#define PCIE_EVENT_INT_HOTRST_EXIT_INT_MASK BIT(17)
-#define PCIE_EVENT_INT_DLUP_EXIT_INT_MASK BIT(18)
-#define PCIE_EVENT_INT_ENB_MASK GENMASK(18, 16)
-#define PCIE_EVENT_INT_ENB_SHIFT 16
-#define NUM_PCIE_EVENTS (3)
-
-/* PCIe Config space MSI capability structure */
-#define MC_MSI_CAP_CTRL_OFFSET 0xe0u
-
-/* Events */
-#define EVENT_PCIE_L2_EXIT 0
-#define EVENT_PCIE_HOTRST_EXIT 1
-#define EVENT_PCIE_DLUP_EXIT 2
-#define EVENT_SEC_TX_RAM_SEC_ERR 3
-#define EVENT_SEC_RX_RAM_SEC_ERR 4
-#define EVENT_SEC_PCIE2AXI_RAM_SEC_ERR 5
-#define EVENT_SEC_AXI2PCIE_RAM_SEC_ERR 6
-#define EVENT_DED_TX_RAM_DED_ERR 7
-#define EVENT_DED_RX_RAM_DED_ERR 8
-#define EVENT_DED_PCIE2AXI_RAM_DED_ERR 9
-#define EVENT_DED_AXI2PCIE_RAM_DED_ERR 10
-#define EVENT_LOCAL_DMA_END_ENGINE_0 11
-#define EVENT_LOCAL_DMA_END_ENGINE_1 12
-#define EVENT_LOCAL_DMA_ERROR_ENGINE_0 13
-#define EVENT_LOCAL_DMA_ERROR_ENGINE_1 14
-#define EVENT_LOCAL_A_ATR_EVT_POST_ERR 15
-#define EVENT_LOCAL_A_ATR_EVT_FETCH_ERR 16
-#define EVENT_LOCAL_A_ATR_EVT_DISCARD_ERR 17
-#define EVENT_LOCAL_A_ATR_EVT_DOORBELL 18
-#define EVENT_LOCAL_P_ATR_EVT_POST_ERR 19
-#define EVENT_LOCAL_P_ATR_EVT_FETCH_ERR 20
-#define EVENT_LOCAL_P_ATR_EVT_DISCARD_ERR 21
-#define EVENT_LOCAL_P_ATR_EVT_DOORBELL 22
-#define EVENT_LOCAL_PM_MSI_INT_INTX 23
-#define EVENT_LOCAL_PM_MSI_INT_MSI 24
-#define EVENT_LOCAL_PM_MSI_INT_AER_EVT 25
-#define EVENT_LOCAL_PM_MSI_INT_EVENTS 26
-#define EVENT_LOCAL_PM_MSI_INT_SYS_ERR 27
-#define NUM_EVENTS 28
-
-#define PCIE_EVENT_CAUSE(x, s) \
- [EVENT_PCIE_ ## x] = { __stringify(x), s }
-
-#define SEC_ERROR_CAUSE(x, s) \
- [EVENT_SEC_ ## x] = { __stringify(x), s }
-
-#define DED_ERROR_CAUSE(x, s) \
- [EVENT_DED_ ## x] = { __stringify(x), s }
-
-#define LOCAL_EVENT_CAUSE(x, s) \
- [EVENT_LOCAL_ ## x] = { __stringify(x), s }
-
-#define PCIE_EVENT(x) \
- .base = MC_PCIE_CTRL_ADDR, \
- .offset = PCIE_EVENT_INT, \
- .mask_offset = PCIE_EVENT_INT, \
- .mask_high = 1, \
- .mask = PCIE_EVENT_INT_ ## x ## _INT, \
- .enb_mask = PCIE_EVENT_INT_ENB_MASK
-
-#define SEC_EVENT(x) \
- .base = MC_PCIE_CTRL_ADDR, \
- .offset = SEC_ERROR_INT, \
- .mask_offset = SEC_ERROR_INT_MASK, \
- .mask = SEC_ERROR_INT_ ## x ## _INT, \
- .mask_high = 1, \
- .enb_mask = 0
-
-#define DED_EVENT(x) \
- .base = MC_PCIE_CTRL_ADDR, \
- .offset = DED_ERROR_INT, \
- .mask_offset = DED_ERROR_INT_MASK, \
- .mask_high = 1, \
- .mask = DED_ERROR_INT_ ## x ## _INT, \
- .enb_mask = 0
-
-#define LOCAL_EVENT(x) \
- .base = MC_PCIE_BRIDGE_ADDR, \
- .offset = ISTATUS_LOCAL, \
- .mask_offset = IMASK_LOCAL, \
- .mask_high = 0, \
- .mask = x ## _MASK, \
- .enb_mask = 0
-
-#define PCIE_EVENT_TO_EVENT_MAP(x) \
- { PCIE_EVENT_INT_ ## x ## _INT, EVENT_PCIE_ ## x }
-
-#define SEC_ERROR_TO_EVENT_MAP(x) \
- { SEC_ERROR_INT_ ## x ## _INT, EVENT_SEC_ ## x }
-
-#define DED_ERROR_TO_EVENT_MAP(x) \
- { DED_ERROR_INT_ ## x ## _INT, EVENT_DED_ ## x }
-
-#define LOCAL_STATUS_TO_EVENT_MAP(x) \
- { x ## _MASK, EVENT_LOCAL_ ## x }
-
-struct event_map {
- u32 reg_mask;
- u32 event_bit;
-};
-
-struct mc_msi {
- struct mutex lock; /* Protect used bitmap */
- struct irq_domain *msi_domain;
- struct irq_domain *dev_domain;
- u32 num_vectors;
- u64 vector_phy;
- DECLARE_BITMAP(used, MC_MAX_NUM_MSI_IRQS);
-};
-
-struct mc_pcie {
- void __iomem *axi_base_addr;
- struct device *dev;
- struct irq_domain *intx_domain;
- struct irq_domain *event_domain;
- raw_spinlock_t lock;
- struct mc_msi msi;
-};
-
-struct cause {
- const char *sym;
- const char *str;
-};
-
-static const struct cause event_cause[NUM_EVENTS] = {
- PCIE_EVENT_CAUSE(L2_EXIT, "L2 exit event"),
- PCIE_EVENT_CAUSE(HOTRST_EXIT, "Hot reset exit event"),
- PCIE_EVENT_CAUSE(DLUP_EXIT, "DLUP exit event"),
- SEC_ERROR_CAUSE(TX_RAM_SEC_ERR, "sec error in tx buffer"),
- SEC_ERROR_CAUSE(RX_RAM_SEC_ERR, "sec error in rx buffer"),
- SEC_ERROR_CAUSE(PCIE2AXI_RAM_SEC_ERR, "sec error in pcie2axi buffer"),
- SEC_ERROR_CAUSE(AXI2PCIE_RAM_SEC_ERR, "sec error in axi2pcie buffer"),
- DED_ERROR_CAUSE(TX_RAM_DED_ERR, "ded error in tx buffer"),
- DED_ERROR_CAUSE(RX_RAM_DED_ERR, "ded error in rx buffer"),
- DED_ERROR_CAUSE(PCIE2AXI_RAM_DED_ERR, "ded error in pcie2axi buffer"),
- DED_ERROR_CAUSE(AXI2PCIE_RAM_DED_ERR, "ded error in axi2pcie buffer"),
- LOCAL_EVENT_CAUSE(DMA_ERROR_ENGINE_0, "dma engine 0 error"),
- LOCAL_EVENT_CAUSE(DMA_ERROR_ENGINE_1, "dma engine 1 error"),
- LOCAL_EVENT_CAUSE(A_ATR_EVT_POST_ERR, "axi write request error"),
- LOCAL_EVENT_CAUSE(A_ATR_EVT_FETCH_ERR, "axi read request error"),
- LOCAL_EVENT_CAUSE(A_ATR_EVT_DISCARD_ERR, "axi read timeout"),
- LOCAL_EVENT_CAUSE(P_ATR_EVT_POST_ERR, "pcie write request error"),
- LOCAL_EVENT_CAUSE(P_ATR_EVT_FETCH_ERR, "pcie read request error"),
- LOCAL_EVENT_CAUSE(P_ATR_EVT_DISCARD_ERR, "pcie read timeout"),
- LOCAL_EVENT_CAUSE(PM_MSI_INT_AER_EVT, "aer event"),
- LOCAL_EVENT_CAUSE(PM_MSI_INT_EVENTS, "pm/ltr/hotplug event"),
- LOCAL_EVENT_CAUSE(PM_MSI_INT_SYS_ERR, "system error"),
-};
-
-static struct event_map pcie_event_to_event[] = {
- PCIE_EVENT_TO_EVENT_MAP(L2_EXIT),
- PCIE_EVENT_TO_EVENT_MAP(HOTRST_EXIT),
- PCIE_EVENT_TO_EVENT_MAP(DLUP_EXIT),
-};
-
-static struct event_map sec_error_to_event[] = {
- SEC_ERROR_TO_EVENT_MAP(TX_RAM_SEC_ERR),
- SEC_ERROR_TO_EVENT_MAP(RX_RAM_SEC_ERR),
- SEC_ERROR_TO_EVENT_MAP(PCIE2AXI_RAM_SEC_ERR),
- SEC_ERROR_TO_EVENT_MAP(AXI2PCIE_RAM_SEC_ERR),
-};
-
-static struct event_map ded_error_to_event[] = {
- DED_ERROR_TO_EVENT_MAP(TX_RAM_DED_ERR),
- DED_ERROR_TO_EVENT_MAP(RX_RAM_DED_ERR),
- DED_ERROR_TO_EVENT_MAP(PCIE2AXI_RAM_DED_ERR),
- DED_ERROR_TO_EVENT_MAP(AXI2PCIE_RAM_DED_ERR),
-};
-
-static struct event_map local_status_to_event[] = {
- LOCAL_STATUS_TO_EVENT_MAP(DMA_END_ENGINE_0),
- LOCAL_STATUS_TO_EVENT_MAP(DMA_END_ENGINE_1),
- LOCAL_STATUS_TO_EVENT_MAP(DMA_ERROR_ENGINE_0),
- LOCAL_STATUS_TO_EVENT_MAP(DMA_ERROR_ENGINE_1),
- LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_POST_ERR),
- LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_FETCH_ERR),
- LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_DISCARD_ERR),
- LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_DOORBELL),
- LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_POST_ERR),
- LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_FETCH_ERR),
- LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_DISCARD_ERR),
- LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_DOORBELL),
- LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_INTX),
- LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_MSI),
- LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_AER_EVT),
- LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_EVENTS),
- LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_SYS_ERR),
-};
-
-static struct {
- u32 base;
- u32 offset;
- u32 mask;
- u32 shift;
- u32 enb_mask;
- u32 mask_high;
- u32 mask_offset;
-} event_descs[] = {
- { PCIE_EVENT(L2_EXIT) },
- { PCIE_EVENT(HOTRST_EXIT) },
- { PCIE_EVENT(DLUP_EXIT) },
- { SEC_EVENT(TX_RAM_SEC_ERR) },
- { SEC_EVENT(RX_RAM_SEC_ERR) },
- { SEC_EVENT(PCIE2AXI_RAM_SEC_ERR) },
- { SEC_EVENT(AXI2PCIE_RAM_SEC_ERR) },
- { DED_EVENT(TX_RAM_DED_ERR) },
- { DED_EVENT(RX_RAM_DED_ERR) },
- { DED_EVENT(PCIE2AXI_RAM_DED_ERR) },
- { DED_EVENT(AXI2PCIE_RAM_DED_ERR) },
- { LOCAL_EVENT(DMA_END_ENGINE_0) },
- { LOCAL_EVENT(DMA_END_ENGINE_1) },
- { LOCAL_EVENT(DMA_ERROR_ENGINE_0) },
- { LOCAL_EVENT(DMA_ERROR_ENGINE_1) },
- { LOCAL_EVENT(A_ATR_EVT_POST_ERR) },
- { LOCAL_EVENT(A_ATR_EVT_FETCH_ERR) },
- { LOCAL_EVENT(A_ATR_EVT_DISCARD_ERR) },
- { LOCAL_EVENT(A_ATR_EVT_DOORBELL) },
- { LOCAL_EVENT(P_ATR_EVT_POST_ERR) },
- { LOCAL_EVENT(P_ATR_EVT_FETCH_ERR) },
- { LOCAL_EVENT(P_ATR_EVT_DISCARD_ERR) },
- { LOCAL_EVENT(P_ATR_EVT_DOORBELL) },
- { LOCAL_EVENT(PM_MSI_INT_INTX) },
- { LOCAL_EVENT(PM_MSI_INT_MSI) },
- { LOCAL_EVENT(PM_MSI_INT_AER_EVT) },
- { LOCAL_EVENT(PM_MSI_INT_EVENTS) },
- { LOCAL_EVENT(PM_MSI_INT_SYS_ERR) },
-};
-
-static char poss_clks[][5] = { "fic0", "fic1", "fic2", "fic3" };
-
-static struct mc_pcie *port;
-
-static void mc_pcie_enable_msi(struct mc_pcie *port, void __iomem *ecam)
-{
- struct mc_msi *msi = &port->msi;
- u16 reg;
- u8 queue_size;
-
- /* Fixup MSI enable flag */
- reg = readw_relaxed(ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_FLAGS);
- reg |= PCI_MSI_FLAGS_ENABLE;
- writew_relaxed(reg, ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_FLAGS);
-
- /* Fixup PCI MSI queue flags */
- queue_size = FIELD_GET(PCI_MSI_FLAGS_QMASK, reg);
- reg |= FIELD_PREP(PCI_MSI_FLAGS_QSIZE, queue_size);
- writew_relaxed(reg, ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_FLAGS);
-
- /* Fixup MSI addr fields */
- writel_relaxed(lower_32_bits(msi->vector_phy),
- ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_ADDRESS_LO);
- writel_relaxed(upper_32_bits(msi->vector_phy),
- ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_ADDRESS_HI);
-}
-
-static void mc_handle_msi(struct irq_desc *desc)
-{
- struct mc_pcie *port = irq_desc_get_handler_data(desc);
- struct irq_chip *chip = irq_desc_get_chip(desc);
- struct device *dev = port->dev;
- struct mc_msi *msi = &port->msi;
- void __iomem *bridge_base_addr =
- port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
- unsigned long status;
- u32 bit;
- int ret;
-
- chained_irq_enter(chip, desc);
-
- status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
- if (status & PM_MSI_INT_MSI_MASK) {
- writel_relaxed(status & PM_MSI_INT_MSI_MASK, bridge_base_addr + ISTATUS_LOCAL);
- status = readl_relaxed(bridge_base_addr + ISTATUS_MSI);
- for_each_set_bit(bit, &status, msi->num_vectors) {
- ret = generic_handle_domain_irq(msi->dev_domain, bit);
- if (ret)
- dev_err_ratelimited(dev, "bad MSI IRQ %d\n",
- bit);
- }
- }
-
- chained_irq_exit(chip, desc);
-}
-
-static void mc_msi_bottom_irq_ack(struct irq_data *data)
-{
- struct mc_pcie *port = irq_data_get_irq_chip_data(data);
- void __iomem *bridge_base_addr =
- port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
- u32 bitpos = data->hwirq;
-
- writel_relaxed(BIT(bitpos), bridge_base_addr + ISTATUS_MSI);
-}
-
-static void mc_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
-{
- struct mc_pcie *port = irq_data_get_irq_chip_data(data);
- phys_addr_t addr = port->msi.vector_phy;
-
- msg->address_lo = lower_32_bits(addr);
- msg->address_hi = upper_32_bits(addr);
- msg->data = data->hwirq;
-
- dev_dbg(port->dev, "msi#%x address_hi %#x address_lo %#x\n",
- (int)data->hwirq, msg->address_hi, msg->address_lo);
-}
-
-static int mc_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
-static struct irq_chip mc_msi_bottom_irq_chip = {
- .name = "Microchip MSI",
- .irq_ack = mc_msi_bottom_irq_ack,
- .irq_compose_msi_msg = mc_compose_msi_msg,
- .irq_set_affinity = mc_msi_set_affinity,
-};
-
-static int mc_irq_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
- unsigned int nr_irqs, void *args)
-{
- struct mc_pcie *port = domain->host_data;
- struct mc_msi *msi = &port->msi;
- unsigned long bit;
-
- mutex_lock(&msi->lock);
- bit = find_first_zero_bit(msi->used, msi->num_vectors);
- if (bit >= msi->num_vectors) {
- mutex_unlock(&msi->lock);
- return -ENOSPC;
- }
-
- set_bit(bit, msi->used);
-
- irq_domain_set_info(domain, virq, bit, &mc_msi_bottom_irq_chip,
- domain->host_data, handle_edge_irq, NULL, NULL);
-
- mutex_unlock(&msi->lock);
-
- return 0;
-}
-
-static void mc_irq_msi_domain_free(struct irq_domain *domain, unsigned int virq,
- unsigned int nr_irqs)
-{
- struct irq_data *d = irq_domain_get_irq_data(domain, virq);
- struct mc_pcie *port = irq_data_get_irq_chip_data(d);
- struct mc_msi *msi = &port->msi;
-
- mutex_lock(&msi->lock);
-
- if (test_bit(d->hwirq, msi->used))
- __clear_bit(d->hwirq, msi->used);
- else
- dev_err(port->dev, "trying to free unused MSI%lu\n", d->hwirq);
-
- mutex_unlock(&msi->lock);
-}
-
-static const struct irq_domain_ops msi_domain_ops = {
- .alloc = mc_irq_msi_domain_alloc,
- .free = mc_irq_msi_domain_free,
-};
-
-static struct irq_chip mc_msi_irq_chip = {
- .name = "Microchip PCIe MSI",
- .irq_ack = irq_chip_ack_parent,
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
-};
-
-static struct msi_domain_info mc_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX),
- .chip = &mc_msi_irq_chip,
-};
-
-static int mc_allocate_msi_domains(struct mc_pcie *port)
-{
- struct device *dev = port->dev;
- struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
- struct mc_msi *msi = &port->msi;
-
- mutex_init(&port->msi.lock);
-
- msi->dev_domain = irq_domain_add_linear(NULL, msi->num_vectors,
- &msi_domain_ops, port);
- if (!msi->dev_domain) {
- dev_err(dev, "failed to create IRQ domain\n");
- return -ENOMEM;
- }
-
- msi->msi_domain = pci_msi_create_irq_domain(fwnode, &mc_msi_domain_info,
- msi->dev_domain);
- if (!msi->msi_domain) {
- dev_err(dev, "failed to create MSI domain\n");
- irq_domain_remove(msi->dev_domain);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static void mc_handle_intx(struct irq_desc *desc)
-{
- struct mc_pcie *port = irq_desc_get_handler_data(desc);
- struct irq_chip *chip = irq_desc_get_chip(desc);
- struct device *dev = port->dev;
- void __iomem *bridge_base_addr =
- port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
- unsigned long status;
- u32 bit;
- int ret;
-
- chained_irq_enter(chip, desc);
-
- status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
- if (status & PM_MSI_INT_INTX_MASK) {
- status &= PM_MSI_INT_INTX_MASK;
- status >>= PM_MSI_INT_INTX_SHIFT;
- for_each_set_bit(bit, &status, PCI_NUM_INTX) {
- ret = generic_handle_domain_irq(port->intx_domain, bit);
- if (ret)
- dev_err_ratelimited(dev, "bad INTx IRQ %d\n",
- bit);
- }
- }
-
- chained_irq_exit(chip, desc);
-}
-
-static void mc_ack_intx_irq(struct irq_data *data)
-{
- struct mc_pcie *port = irq_data_get_irq_chip_data(data);
- void __iomem *bridge_base_addr =
- port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
- u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
-
- writel_relaxed(mask, bridge_base_addr + ISTATUS_LOCAL);
-}
-
-static void mc_mask_intx_irq(struct irq_data *data)
-{
- struct mc_pcie *port = irq_data_get_irq_chip_data(data);
- void __iomem *bridge_base_addr =
- port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
- unsigned long flags;
- u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
- u32 val;
-
- raw_spin_lock_irqsave(&port->lock, flags);
- val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
- val &= ~mask;
- writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
- raw_spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static void mc_unmask_intx_irq(struct irq_data *data)
-{
- struct mc_pcie *port = irq_data_get_irq_chip_data(data);
- void __iomem *bridge_base_addr =
- port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
- unsigned long flags;
- u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
- u32 val;
-
- raw_spin_lock_irqsave(&port->lock, flags);
- val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
- val |= mask;
- writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
- raw_spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static struct irq_chip mc_intx_irq_chip = {
- .name = "Microchip PCIe INTx",
- .irq_ack = mc_ack_intx_irq,
- .irq_mask = mc_mask_intx_irq,
- .irq_unmask = mc_unmask_intx_irq,
-};
-
-static int mc_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
- irq_hw_number_t hwirq)
-{
- irq_set_chip_and_handler(irq, &mc_intx_irq_chip, handle_level_irq);
- irq_set_chip_data(irq, domain->host_data);
-
- return 0;
-}
-
-static const struct irq_domain_ops intx_domain_ops = {
- .map = mc_pcie_intx_map,
-};
-
-static inline u32 reg_to_event(u32 reg, struct event_map field)
-{
- return (reg & field.reg_mask) ? BIT(field.event_bit) : 0;
-}
-
-static u32 pcie_events(struct mc_pcie *port)
-{
- void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR;
- u32 reg = readl_relaxed(ctrl_base_addr + PCIE_EVENT_INT);
- u32 val = 0;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(pcie_event_to_event); i++)
- val |= reg_to_event(reg, pcie_event_to_event[i]);
-
- return val;
-}
-
-static u32 sec_errors(struct mc_pcie *port)
-{
- void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR;
- u32 reg = readl_relaxed(ctrl_base_addr + SEC_ERROR_INT);
- u32 val = 0;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(sec_error_to_event); i++)
- val |= reg_to_event(reg, sec_error_to_event[i]);
-
- return val;
-}
-
-static u32 ded_errors(struct mc_pcie *port)
-{
- void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR;
- u32 reg = readl_relaxed(ctrl_base_addr + DED_ERROR_INT);
- u32 val = 0;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(ded_error_to_event); i++)
- val |= reg_to_event(reg, ded_error_to_event[i]);
-
- return val;
-}
-
-static u32 local_events(struct mc_pcie *port)
-{
- void __iomem *bridge_base_addr = port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
- u32 reg = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
- u32 val = 0;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(local_status_to_event); i++)
- val |= reg_to_event(reg, local_status_to_event[i]);
-
- return val;
-}
-
-static u32 get_events(struct mc_pcie *port)
-{
- u32 events = 0;
-
- events |= pcie_events(port);
- events |= sec_errors(port);
- events |= ded_errors(port);
- events |= local_events(port);
-
- return events;
-}
-
-static irqreturn_t mc_event_handler(int irq, void *dev_id)
-{
- struct mc_pcie *port = dev_id;
- struct device *dev = port->dev;
- struct irq_data *data;
-
- data = irq_domain_get_irq_data(port->event_domain, irq);
-
- if (event_cause[data->hwirq].str)
- dev_err_ratelimited(dev, "%s\n", event_cause[data->hwirq].str);
- else
- dev_err_ratelimited(dev, "bad event IRQ %ld\n", data->hwirq);
-
- return IRQ_HANDLED;
-}
-
-static void mc_handle_event(struct irq_desc *desc)
-{
- struct mc_pcie *port = irq_desc_get_handler_data(desc);
- unsigned long events;
- u32 bit;
- struct irq_chip *chip = irq_desc_get_chip(desc);
-
- chained_irq_enter(chip, desc);
-
- events = get_events(port);
-
- for_each_set_bit(bit, &events, NUM_EVENTS)
- generic_handle_domain_irq(port->event_domain, bit);
-
- chained_irq_exit(chip, desc);
-}
-
-static void mc_ack_event_irq(struct irq_data *data)
-{
- struct mc_pcie *port = irq_data_get_irq_chip_data(data);
- u32 event = data->hwirq;
- void __iomem *addr;
- u32 mask;
-
- addr = port->axi_base_addr + event_descs[event].base +
- event_descs[event].offset;
- mask = event_descs[event].mask;
- mask |= event_descs[event].enb_mask;
-
- writel_relaxed(mask, addr);
-}
-
-static void mc_mask_event_irq(struct irq_data *data)
-{
- struct mc_pcie *port = irq_data_get_irq_chip_data(data);
- u32 event = data->hwirq;
- void __iomem *addr;
- u32 mask;
- u32 val;
-
- addr = port->axi_base_addr + event_descs[event].base +
- event_descs[event].mask_offset;
- mask = event_descs[event].mask;
- if (event_descs[event].enb_mask) {
- mask <<= PCIE_EVENT_INT_ENB_SHIFT;
- mask &= PCIE_EVENT_INT_ENB_MASK;
- }
-
- if (!event_descs[event].mask_high)
- mask = ~mask;
-
- raw_spin_lock(&port->lock);
- val = readl_relaxed(addr);
- if (event_descs[event].mask_high)
- val |= mask;
- else
- val &= mask;
-
- writel_relaxed(val, addr);
- raw_spin_unlock(&port->lock);
-}
-
-static void mc_unmask_event_irq(struct irq_data *data)
-{
- struct mc_pcie *port = irq_data_get_irq_chip_data(data);
- u32 event = data->hwirq;
- void __iomem *addr;
- u32 mask;
- u32 val;
-
- addr = port->axi_base_addr + event_descs[event].base +
- event_descs[event].mask_offset;
- mask = event_descs[event].mask;
-
- if (event_descs[event].enb_mask)
- mask <<= PCIE_EVENT_INT_ENB_SHIFT;
-
- if (event_descs[event].mask_high)
- mask = ~mask;
-
- if (event_descs[event].enb_mask)
- mask &= PCIE_EVENT_INT_ENB_MASK;
-
- raw_spin_lock(&port->lock);
- val = readl_relaxed(addr);
- if (event_descs[event].mask_high)
- val &= mask;
- else
- val |= mask;
- writel_relaxed(val, addr);
- raw_spin_unlock(&port->lock);
-}
-
-static struct irq_chip mc_event_irq_chip = {
- .name = "Microchip PCIe EVENT",
- .irq_ack = mc_ack_event_irq,
- .irq_mask = mc_mask_event_irq,
- .irq_unmask = mc_unmask_event_irq,
-};
-
-static int mc_pcie_event_map(struct irq_domain *domain, unsigned int irq,
- irq_hw_number_t hwirq)
-{
- irq_set_chip_and_handler(irq, &mc_event_irq_chip, handle_level_irq);
- irq_set_chip_data(irq, domain->host_data);
-
- return 0;
-}
-
-static const struct irq_domain_ops event_domain_ops = {
- .map = mc_pcie_event_map,
-};
-
-static inline void mc_pcie_deinit_clk(void *data)
-{
- struct clk *clk = data;
-
- clk_disable_unprepare(clk);
-}
-
-static inline struct clk *mc_pcie_init_clk(struct device *dev, const char *id)
-{
- struct clk *clk;
- int ret;
-
- clk = devm_clk_get_optional(dev, id);
- if (IS_ERR(clk))
- return clk;
- if (!clk)
- return clk;
-
- ret = clk_prepare_enable(clk);
- if (ret)
- return ERR_PTR(ret);
-
- devm_add_action_or_reset(dev, mc_pcie_deinit_clk, clk);
-
- return clk;
-}
-
-static int mc_pcie_init_clks(struct device *dev)
-{
- int i;
- struct clk *fic;
-
- /*
- * PCIe may be clocked via Fabric Interface using between 1 and 4
- * clocks. Scan DT for clocks and enable them if present
- */
- for (i = 0; i < ARRAY_SIZE(poss_clks); i++) {
- fic = mc_pcie_init_clk(dev, poss_clks[i]);
- if (IS_ERR(fic))
- return PTR_ERR(fic);
- }
-
- return 0;
-}
-
-static int mc_pcie_init_irq_domains(struct mc_pcie *port)
-{
- struct device *dev = port->dev;
- struct device_node *node = dev->of_node;
- struct device_node *pcie_intc_node;
-
- /* Setup INTx */
- pcie_intc_node = of_get_next_child(node, NULL);
- if (!pcie_intc_node) {
- dev_err(dev, "failed to find PCIe Intc node\n");
- return -EINVAL;
- }
-
- port->event_domain = irq_domain_add_linear(pcie_intc_node, NUM_EVENTS,
- &event_domain_ops, port);
- if (!port->event_domain) {
- dev_err(dev, "failed to get event domain\n");
- of_node_put(pcie_intc_node);
- return -ENOMEM;
- }
-
- irq_domain_update_bus_token(port->event_domain, DOMAIN_BUS_NEXUS);
-
- port->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
- &intx_domain_ops, port);
- if (!port->intx_domain) {
- dev_err(dev, "failed to get an INTx IRQ domain\n");
- of_node_put(pcie_intc_node);
- return -ENOMEM;
- }
-
- irq_domain_update_bus_token(port->intx_domain, DOMAIN_BUS_WIRED);
-
- of_node_put(pcie_intc_node);
- raw_spin_lock_init(&port->lock);
-
- return mc_allocate_msi_domains(port);
-}
-
-static void mc_pcie_setup_window(void __iomem *bridge_base_addr, u32 index,
- phys_addr_t axi_addr, phys_addr_t pci_addr,
- size_t size)
-{
- u32 atr_sz = ilog2(size) - 1;
- u32 val;
-
- if (index == 0)
- val = PCIE_CONFIG_INTERFACE;
- else
- val = PCIE_TX_RX_INTERFACE;
-
- writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
- ATR0_AXI4_SLV0_TRSL_PARAM);
-
- val = lower_32_bits(axi_addr) | (atr_sz << ATR_SIZE_SHIFT) |
- ATR_IMPL_ENABLE;
- writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
- ATR0_AXI4_SLV0_SRCADDR_PARAM);
-
- val = upper_32_bits(axi_addr);
- writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
- ATR0_AXI4_SLV0_SRC_ADDR);
-
- val = lower_32_bits(pci_addr);
- writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
- ATR0_AXI4_SLV0_TRSL_ADDR_LSB);
-
- val = upper_32_bits(pci_addr);
- writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
- ATR0_AXI4_SLV0_TRSL_ADDR_UDW);
-
- val = readl(bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
- val |= (ATR0_PCIE_ATR_SIZE << ATR0_PCIE_ATR_SIZE_SHIFT);
- writel(val, bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
- writel(0, bridge_base_addr + ATR0_PCIE_WIN0_SRC_ADDR);
-}
-
-static int mc_pcie_setup_windows(struct platform_device *pdev,
- struct mc_pcie *port)
-{
- void __iomem *bridge_base_addr =
- port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
- struct pci_host_bridge *bridge = platform_get_drvdata(pdev);
- struct resource_entry *entry;
- u64 pci_addr;
- u32 index = 1;
-
- resource_list_for_each_entry(entry, &bridge->windows) {
- if (resource_type(entry->res) == IORESOURCE_MEM) {
- pci_addr = entry->res->start - entry->offset;
- mc_pcie_setup_window(bridge_base_addr, index,
- entry->res->start, pci_addr,
- resource_size(entry->res));
- index++;
- }
- }
-
- return 0;
-}
-
-static inline void mc_clear_secs(struct mc_pcie *port)
-{
- void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR;
-
- writel_relaxed(SEC_ERROR_INT_ALL_RAM_SEC_ERR_INT, ctrl_base_addr +
- SEC_ERROR_INT);
- writel_relaxed(0, ctrl_base_addr + SEC_ERROR_EVENT_CNT);
-}
-
-static inline void mc_clear_deds(struct mc_pcie *port)
-{
- void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR;
-
- writel_relaxed(DED_ERROR_INT_ALL_RAM_DED_ERR_INT, ctrl_base_addr +
- DED_ERROR_INT);
- writel_relaxed(0, ctrl_base_addr + DED_ERROR_EVENT_CNT);
-}
-
-static void mc_disable_interrupts(struct mc_pcie *port)
-{
- void __iomem *bridge_base_addr = port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
- void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR;
- u32 val;
-
- /* Ensure ECC bypass is enabled */
- val = ECC_CONTROL_TX_RAM_ECC_BYPASS |
- ECC_CONTROL_RX_RAM_ECC_BYPASS |
- ECC_CONTROL_PCIE2AXI_RAM_ECC_BYPASS |
- ECC_CONTROL_AXI2PCIE_RAM_ECC_BYPASS;
- writel_relaxed(val, ctrl_base_addr + ECC_CONTROL);
-
- /* Disable SEC errors and clear any outstanding */
- writel_relaxed(SEC_ERROR_INT_ALL_RAM_SEC_ERR_INT, ctrl_base_addr +
- SEC_ERROR_INT_MASK);
- mc_clear_secs(port);
-
- /* Disable DED errors and clear any outstanding */
- writel_relaxed(DED_ERROR_INT_ALL_RAM_DED_ERR_INT, ctrl_base_addr +
- DED_ERROR_INT_MASK);
- mc_clear_deds(port);
-
- /* Disable local interrupts and clear any outstanding */
- writel_relaxed(0, bridge_base_addr + IMASK_LOCAL);
- writel_relaxed(GENMASK(31, 0), bridge_base_addr + ISTATUS_LOCAL);
- writel_relaxed(GENMASK(31, 0), bridge_base_addr + ISTATUS_MSI);
-
- /* Disable PCIe events and clear any outstanding */
- val = PCIE_EVENT_INT_L2_EXIT_INT |
- PCIE_EVENT_INT_HOTRST_EXIT_INT |
- PCIE_EVENT_INT_DLUP_EXIT_INT |
- PCIE_EVENT_INT_L2_EXIT_INT_MASK |
- PCIE_EVENT_INT_HOTRST_EXIT_INT_MASK |
- PCIE_EVENT_INT_DLUP_EXIT_INT_MASK;
- writel_relaxed(val, ctrl_base_addr + PCIE_EVENT_INT);
-
- /* Disable host interrupts and clear any outstanding */
- writel_relaxed(0, bridge_base_addr + IMASK_HOST);
- writel_relaxed(GENMASK(31, 0), bridge_base_addr + ISTATUS_HOST);
-}
-
-static int mc_init_interrupts(struct platform_device *pdev, struct mc_pcie *port)
-{
- struct device *dev = &pdev->dev;
- int irq;
- int i, intx_irq, msi_irq, event_irq;
- int ret;
-
- ret = mc_pcie_init_irq_domains(port);
- if (ret) {
- dev_err(dev, "failed creating IRQ domains\n");
- return ret;
- }
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return -ENODEV;
-
- for (i = 0; i < NUM_EVENTS; i++) {
- event_irq = irq_create_mapping(port->event_domain, i);
- if (!event_irq) {
- dev_err(dev, "failed to map hwirq %d\n", i);
- return -ENXIO;
- }
-
- ret = devm_request_irq(dev, event_irq, mc_event_handler,
- 0, event_cause[i].sym, port);
- if (ret) {
- dev_err(dev, "failed to request IRQ %d\n", event_irq);
- return ret;
- }
- }
-
- intx_irq = irq_create_mapping(port->event_domain,
- EVENT_LOCAL_PM_MSI_INT_INTX);
- if (!intx_irq) {
- dev_err(dev, "failed to map INTx interrupt\n");
- return -ENXIO;
- }
-
- /* Plug the INTx chained handler */
- irq_set_chained_handler_and_data(intx_irq, mc_handle_intx, port);
-
- msi_irq = irq_create_mapping(port->event_domain,
- EVENT_LOCAL_PM_MSI_INT_MSI);
- if (!msi_irq)
- return -ENXIO;
-
- /* Plug the MSI chained handler */
- irq_set_chained_handler_and_data(msi_irq, mc_handle_msi, port);
-
- /* Plug the main event chained handler */
- irq_set_chained_handler_and_data(irq, mc_handle_event, port);
-
- return 0;
-}
-
-static int mc_platform_init(struct pci_config_window *cfg)
-{
- struct device *dev = cfg->parent;
- struct platform_device *pdev = to_platform_device(dev);
- void __iomem *bridge_base_addr =
- port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
- int ret;
-
- /* Configure address translation table 0 for PCIe config space */
- mc_pcie_setup_window(bridge_base_addr, 0, cfg->res.start,
- cfg->res.start,
- resource_size(&cfg->res));
-
- /* Need some fixups in config space */
- mc_pcie_enable_msi(port, cfg->win);
-
- /* Configure non-config space outbound ranges */
- ret = mc_pcie_setup_windows(pdev, port);
- if (ret)
- return ret;
-
- /* Address translation is up; safe to enable interrupts */
- ret = mc_init_interrupts(pdev, port);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int mc_host_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- void __iomem *bridge_base_addr;
- int ret;
- u32 val;
-
- port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
- if (!port)
- return -ENOMEM;
-
- port->dev = dev;
-
- port->axi_base_addr = devm_platform_ioremap_resource(pdev, 1);
- if (IS_ERR(port->axi_base_addr))
- return PTR_ERR(port->axi_base_addr);
-
- mc_disable_interrupts(port);
-
- bridge_base_addr = port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
-
- /* Allow enabling MSI by disabling MSI-X */
- val = readl(bridge_base_addr + PCIE_PCI_IRQ_DW0);
- val &= ~MSIX_CAP_MASK;
- writel(val, bridge_base_addr + PCIE_PCI_IRQ_DW0);
-
- /* Pick num vectors from bitfile programmed onto FPGA fabric */
- val = readl(bridge_base_addr + PCIE_PCI_IRQ_DW0);
- val &= NUM_MSI_MSGS_MASK;
- val >>= NUM_MSI_MSGS_SHIFT;
-
- port->msi.num_vectors = 1 << val;
-
- /* Pick vector address from design */
- port->msi.vector_phy = readl_relaxed(bridge_base_addr + IMSI_ADDR);
-
- ret = mc_pcie_init_clks(dev);
- if (ret) {
- dev_err(dev, "failed to get clock resources, error %d\n", ret);
- return -ENODEV;
- }
-
- return pci_host_common_probe(pdev);
-}
-
-static const struct pci_ecam_ops mc_ecam_ops = {
- .init = mc_platform_init,
- .pci_ops = {
- .map_bus = pci_ecam_map_bus,
- .read = pci_generic_config_read,
- .write = pci_generic_config_write,
- }
-};
-
-static const struct of_device_id mc_pcie_of_match[] = {
- {
- .compatible = "microchip,pcie-host-1.0",
- .data = &mc_ecam_ops,
- },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, mc_pcie_of_match);
-
-static struct platform_driver mc_pcie_driver = {
- .probe = mc_host_probe,
- .driver = {
- .name = "microchip-pcie",
- .of_match_table = mc_pcie_of_match,
- .suppress_bind_attrs = true,
- },
-};
-
-builtin_platform_driver(mc_pcie_driver);
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Microchip PCIe host controller driver");
-MODULE_AUTHOR("Daire McNamara <daire.mcnamara@microchip.com>");
diff --git a/drivers/pci/controller/pcie-mt7621.c b/drivers/pci/controller/pcie-mt7621.c
index 79e225edb42a..01ead2f92e87 100644
--- a/drivers/pci/controller/pcie-mt7621.c
+++ b/drivers/pci/controller/pcie-mt7621.c
@@ -202,7 +202,7 @@ static int mt7621_pcie_parse_port(struct mt7621_pcie *pcie,
struct mt7621_pcie_port *port;
struct device *dev = pcie->dev;
struct platform_device *pdev = to_platform_device(dev);
- char name[10];
+ char name[11];
int err;
port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
@@ -258,30 +258,25 @@ static int mt7621_pcie_parse_dt(struct mt7621_pcie *pcie)
{
struct device *dev = pcie->dev;
struct platform_device *pdev = to_platform_device(dev);
- struct device_node *node = dev->of_node, *child;
+ struct device_node *node = dev->of_node;
int err;
pcie->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pcie->base))
return PTR_ERR(pcie->base);
- for_each_available_child_of_node(node, child) {
+ for_each_available_child_of_node_scoped(node, child) {
int slot;
err = of_pci_get_devfn(child);
- if (err < 0) {
- of_node_put(child);
- dev_err(dev, "failed to parse devfn: %d\n", err);
- return err;
- }
+ if (err < 0)
+ return dev_err_probe(dev, err, "failed to parse devfn\n");
slot = PCI_SLOT(err);
err = mt7621_pcie_parse_port(pcie, child, slot);
- if (err) {
- of_node_put(child);
+ if (err)
return err;
- }
}
return 0;
@@ -541,7 +536,7 @@ MODULE_DEVICE_TABLE(of, mt7621_pcie_ids);
static struct platform_driver mt7621_pcie_driver = {
.probe = mt7621_pcie_probe,
- .remove_new = mt7621_pcie_remove,
+ .remove = mt7621_pcie_remove,
.driver = {
.name = "mt7621-pci",
.of_match_table = mt7621_pcie_ids,
@@ -549,4 +544,5 @@ static struct platform_driver mt7621_pcie_driver = {
};
builtin_platform_driver(mt7621_pcie_driver);
+MODULE_DESCRIPTION("MediaTek MT7621 PCIe host controller driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-rcar-ep.c b/drivers/pci/controller/pcie-rcar-ep.c
index 05967c6c0b42..c5e0d025bc43 100644
--- a/drivers/pci/controller/pcie-rcar-ep.c
+++ b/drivers/pci/controller/pcie-rcar-ep.c
@@ -107,7 +107,7 @@ static int rcar_pcie_parse_outbound_ranges(struct rcar_pcie_endpoint *ep,
}
if (!devm_request_mem_region(&pdev->dev, res->start,
resource_size(res),
- outbound_name)) {
+ res->name)) {
dev_err(pcie->dev, "Cannot request memory region %s.\n",
outbound_name);
return -EIO;
@@ -542,6 +542,8 @@ static int rcar_pcie_ep_probe(struct platform_device *pdev)
goto err_pm_put;
}
+ pci_epc_init_notify(epc);
+
return 0;
err_pm_put:
diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c
index 996077ab7cfd..c32b803a47c7 100644
--- a/drivers/pci/controller/pcie-rcar-host.c
+++ b/drivers/pci/controller/pcie-rcar-host.c
@@ -78,7 +78,11 @@ static int rcar_pcie_wakeup(struct device *pcie_dev, void __iomem *pcie_base)
writel(L1IATN, pcie_base + PMCTLR);
ret = readl_poll_timeout_atomic(pcie_base + PMSR, val,
val & L1FAEG, 10, 1000);
- WARN(ret, "Timeout waiting for L1 link state, ret=%d\n", ret);
+ if (ret) {
+ dev_warn_ratelimited(pcie_dev,
+ "Timeout waiting for L1 link state, ret=%d\n",
+ ret);
+ }
writel(L1FAEG | PMEL1RX, pcie_base + PMSR);
}
@@ -174,8 +178,8 @@ static int rcar_pcie_config_access(struct rcar_pcie_host *host,
* space, it's generally only accessible when in endpoint mode.
* When in root complex mode, the controller is unable to target
* itself with either type 0 or type 1 accesses, and indeed, any
- * controller initiated target transfer to its own config space
- * result in a completer abort.
+ * controller-initiated target transfer to its own config space
+ * results in a completer abort.
*
* Each channel effectively only supports a single device, but as
* the same channel <-> device access works for any PCI_SLOT()
@@ -654,11 +658,6 @@ static void rcar_msi_irq_unmask(struct irq_data *d)
spin_unlock_irqrestore(&msi->mask_lock, flags);
}
-static int rcar_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static void rcar_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
struct rcar_msi *msi = irq_data_get_irq_chip_data(data);
@@ -674,7 +673,6 @@ static struct irq_chip rcar_msi_bottom_chip = {
.irq_ack = rcar_msi_irq_ack,
.irq_mask = rcar_msi_irq_mask,
.irq_unmask = rcar_msi_irq_unmask,
- .irq_set_affinity = rcar_msi_set_affinity,
.irq_compose_msi_msg = rcar_compose_msi_msg,
};
@@ -721,8 +719,8 @@ static const struct irq_domain_ops rcar_msi_domain_ops = {
};
static struct msi_domain_info rcar_msi_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_MULTI_PCI_MSI),
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI,
.chip = &rcar_msi_top_chip,
};
@@ -777,7 +775,7 @@ static int rcar_pcie_enable_msi(struct rcar_pcie_host *host)
if (err)
return err;
- /* Two irqs are for MSI, but they are also used for non-MSI irqs */
+ /* Two IRQs are for MSI, but they are also used for non-MSI IRQs */
err = devm_request_irq(dev, msi->irq1, rcar_pcie_msi_irq,
IRQF_SHARED | IRQF_NO_THREAD,
rcar_msi_bottom_chip.name, host);
@@ -794,12 +792,12 @@ static int rcar_pcie_enable_msi(struct rcar_pcie_host *host)
goto err;
}
- /* disable all MSIs */
+ /* Disable all MSIs */
rcar_pci_write_reg(pcie, 0, PCIEMSIIER);
/*
- * Setup MSI data target using RC base address address, which
- * is guaranteed to be in the low 32bit range on any R-Car HW.
+ * Setup MSI data target using RC base address, which is guaranteed
+ * to be in the low 32bit range on any R-Car HW.
*/
rcar_pci_write_reg(pcie, lower_32_bits(res.start) | MSIFE, PCIEMSIALR);
rcar_pci_write_reg(pcie, upper_32_bits(res.start), PCIEMSIAUR);
@@ -894,6 +892,7 @@ static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
dev_err(pcie->dev, "Failed to map inbound regions!\n");
return -EINVAL;
}
+
/*
* If the size of the range is larger than the alignment of
* the start address, we have to use multiple entries to
@@ -905,6 +904,7 @@ static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
size = min(size, alignment);
}
+
/* Hardware supports max 4GiB inbound region */
size = min(size, 1ULL << 32);
diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c
index c9046e97a1d2..85ea36df2f59 100644
--- a/drivers/pci/controller/pcie-rockchip-ep.c
+++ b/drivers/pci/controller/pcie-rockchip-ep.c
@@ -10,12 +10,16 @@
#include <linux/configfs.h>
#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
+#include <linux/irq.h>
#include <linux/of.h>
#include <linux/pci-epc.h>
#include <linux/platform_device.h>
#include <linux/pci-epf.h>
#include <linux/sizes.h>
+#include <linux/workqueue.h>
#include "pcie-rockchip.h"
@@ -36,6 +40,10 @@
* @irq_pci_fn: the latest PCI function that has updated the mapping of
* the MSI/INTX IRQ dedicated outbound region.
* @irq_pending: bitmask of asserted INTX IRQs.
+ * @perst_irq: IRQ used for the PERST# signal.
+ * @perst_asserted: True if the PERST# signal was asserted.
+ * @link_up: True if the PCI link is up.
+ * @link_training: Work item to execute PCI link training.
*/
struct rockchip_pcie_ep {
struct rockchip_pcie rockchip;
@@ -48,6 +56,10 @@ struct rockchip_pcie_ep {
u64 irq_pci_addr;
u8 irq_pci_fn;
u8 irq_pending;
+ int perst_irq;
+ bool perst_asserted;
+ bool link_up;
+ struct delayed_work link_training;
};
static void rockchip_pcie_clear_ep_ob_atu(struct rockchip_pcie *rockchip,
@@ -63,15 +75,25 @@ static void rockchip_pcie_clear_ep_ob_atu(struct rockchip_pcie *rockchip,
ROCKCHIP_PCIE_AT_OB_REGION_DESC1(region));
}
+static int rockchip_pcie_ep_ob_atu_num_bits(struct rockchip_pcie *rockchip,
+ u64 pci_addr, size_t size)
+{
+ int num_pass_bits = fls64(pci_addr ^ (pci_addr + size - 1));
+
+ return clamp(num_pass_bits,
+ ROCKCHIP_PCIE_AT_MIN_NUM_BITS,
+ ROCKCHIP_PCIE_AT_MAX_NUM_BITS);
+}
+
static void rockchip_pcie_prog_ep_ob_atu(struct rockchip_pcie *rockchip, u8 fn,
u32 r, u64 cpu_addr, u64 pci_addr,
size_t size)
{
- int num_pass_bits = fls64(size - 1);
+ int num_pass_bits;
u32 addr0, addr1, desc0;
- if (num_pass_bits < 8)
- num_pass_bits = 8;
+ num_pass_bits = rockchip_pcie_ep_ob_atu_num_bits(rockchip,
+ pci_addr, size);
addr0 = ((num_pass_bits - 1) & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS) |
(lower_32_bits(pci_addr) & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR);
@@ -98,10 +120,8 @@ static int rockchip_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn,
/* All functions share the same vendor ID with function 0 */
if (fn == 0) {
- u32 vid_regs = (hdr->vendorid & GENMASK(15, 0)) |
- (hdr->subsys_vendor_id & GENMASK(31, 16)) << 16;
-
- rockchip_pcie_write(rockchip, vid_regs,
+ rockchip_pcie_write(rockchip,
+ hdr->vendorid | hdr->subsys_vendor_id << 16,
PCIE_CORE_CONFIG_VENDOR);
}
@@ -153,7 +173,7 @@ static int rockchip_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, u8 vfn,
ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_IO_32BITS;
} else {
bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH);
- bool is_64bits = sz > SZ_2G;
+ bool is_64bits = !!(flags & PCI_BASE_ADDRESS_MEM_TYPE_64);
if (is_64bits && (bar & 1))
return -EINVAL;
@@ -230,6 +250,28 @@ static inline u32 rockchip_ob_region(phys_addr_t addr)
return (addr >> ilog2(SZ_1M)) & 0x1f;
}
+static u64 rockchip_pcie_ep_align_addr(struct pci_epc *epc, u64 pci_addr,
+ size_t *pci_size, size_t *addr_offset)
+{
+ struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+ size_t size = *pci_size;
+ u64 offset, mask;
+ int num_bits;
+
+ num_bits = rockchip_pcie_ep_ob_atu_num_bits(&ep->rockchip,
+ pci_addr, size);
+ mask = (1ULL << num_bits) - 1;
+
+ offset = pci_addr & mask;
+ if (size + offset > SZ_1M)
+ size = SZ_1M - offset;
+
+ *pci_size = ALIGN(offset + size, ROCKCHIP_PCIE_AT_SIZE_ALIGN);
+ *addr_offset = offset;
+
+ return pci_addr & ~mask;
+}
+
static int rockchip_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn,
phys_addr_t addr, u64 pci_addr,
size_t size)
@@ -238,6 +280,9 @@ static int rockchip_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn,
struct rockchip_pcie *pcie = &ep->rockchip;
u32 r = rockchip_ob_region(addr);
+ if (test_bit(r, &ep->ob_region_map))
+ return -EBUSY;
+
rockchip_pcie_prog_ep_ob_atu(pcie, fn, r, addr, pci_addr, size);
set_bit(r, &ep->ob_region_map);
@@ -251,13 +296,9 @@ static void rockchip_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn,
{
struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
struct rockchip_pcie *rockchip = &ep->rockchip;
- u32 r;
-
- for (r = 0; r < ep->max_regions; r++)
- if (ep->ob_addr[r] == addr)
- break;
+ u32 r = rockchip_ob_region(addr);
- if (r == ep->max_regions)
+ if (addr != ep->ob_addr[r] || !test_bit(r, &ep->ob_region_map))
return;
rockchip_pcie_clear_ep_ob_atu(rockchip, r);
@@ -353,9 +394,10 @@ static int rockchip_pcie_ep_send_msi_irq(struct rockchip_pcie_ep *ep, u8 fn,
{
struct rockchip_pcie *rockchip = &ep->rockchip;
u32 flags, mme, data, data_mask;
+ size_t irq_pci_size, offset;
+ u64 irq_pci_addr;
u8 msi_count;
u64 pci_addr;
- u32 r;
/* Check MSI enable bit */
flags = rockchip_pcie_read(&ep->rockchip,
@@ -391,18 +433,21 @@ static int rockchip_pcie_ep_send_msi_irq(struct rockchip_pcie_ep *ep, u8 fn,
PCI_MSI_ADDRESS_LO);
/* Set the outbound region if needed. */
- if (unlikely(ep->irq_pci_addr != (pci_addr & PCIE_ADDR_MASK) ||
+ irq_pci_size = ~PCIE_ADDR_MASK + 1;
+ irq_pci_addr = rockchip_pcie_ep_align_addr(ep->epc,
+ pci_addr & PCIE_ADDR_MASK,
+ &irq_pci_size, &offset);
+ if (unlikely(ep->irq_pci_addr != irq_pci_addr ||
ep->irq_pci_fn != fn)) {
- r = rockchip_ob_region(ep->irq_phys_addr);
- rockchip_pcie_prog_ep_ob_atu(rockchip, fn, r,
- ep->irq_phys_addr,
- pci_addr & PCIE_ADDR_MASK,
- ~PCIE_ADDR_MASK + 1);
- ep->irq_pci_addr = (pci_addr & PCIE_ADDR_MASK);
+ rockchip_pcie_prog_ep_ob_atu(rockchip, fn,
+ rockchip_ob_region(ep->irq_phys_addr),
+ ep->irq_phys_addr,
+ irq_pci_addr, irq_pci_size);
+ ep->irq_pci_addr = irq_pci_addr;
ep->irq_pci_fn = fn;
}
- writew(data, ep->irq_cpu_addr + (pci_addr & ~PCIE_ADDR_MASK));
+ writew(data, ep->irq_cpu_addr + offset + (pci_addr & ~PCIE_ADDR_MASK));
return 0;
}
@@ -434,14 +479,222 @@ static int rockchip_pcie_ep_start(struct pci_epc *epc)
rockchip_pcie_write(rockchip, cfg, PCIE_CORE_PHY_FUNC_CFG);
+ if (rockchip->perst_gpio)
+ enable_irq(ep->perst_irq);
+
+ /* Enable configuration and start link training */
+ rockchip_pcie_write(rockchip,
+ PCIE_CLIENT_LINK_TRAIN_ENABLE |
+ PCIE_CLIENT_CONF_ENABLE,
+ PCIE_CLIENT_CONFIG);
+
+ if (!rockchip->perst_gpio)
+ schedule_delayed_work(&ep->link_training, 0);
+
+ return 0;
+}
+
+static void rockchip_pcie_ep_stop(struct pci_epc *epc)
+{
+ struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+
+ if (rockchip->perst_gpio) {
+ ep->perst_asserted = true;
+ disable_irq(ep->perst_irq);
+ }
+
+ cancel_delayed_work_sync(&ep->link_training);
+
+ /* Stop link training and disable configuration */
+ rockchip_pcie_write(rockchip,
+ PCIE_CLIENT_CONF_DISABLE |
+ PCIE_CLIENT_LINK_TRAIN_DISABLE,
+ PCIE_CLIENT_CONFIG);
+}
+
+static void rockchip_pcie_ep_retrain_link(struct rockchip_pcie *rockchip)
+{
+ u32 status;
+
+ status = rockchip_pcie_read(rockchip, PCIE_EP_CONFIG_LCS);
+ status |= PCI_EXP_LNKCTL_RL;
+ rockchip_pcie_write(rockchip, status, PCIE_EP_CONFIG_LCS);
+}
+
+static bool rockchip_pcie_ep_link_up(struct rockchip_pcie *rockchip)
+{
+ u32 val = rockchip_pcie_read(rockchip, PCIE_CLIENT_BASIC_STATUS1);
+
+ return PCIE_LINK_UP(val);
+}
+
+static void rockchip_pcie_ep_link_training(struct work_struct *work)
+{
+ struct rockchip_pcie_ep *ep =
+ container_of(work, struct rockchip_pcie_ep, link_training.work);
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+ struct device *dev = rockchip->dev;
+ u32 val;
+ int ret;
+
+ /* Enable Gen1 training and wait for its completion */
+ ret = readl_poll_timeout(rockchip->apb_base + PCIE_CORE_CTRL,
+ val, PCIE_LINK_TRAINING_DONE(val), 50,
+ LINK_TRAIN_TIMEOUT);
+ if (ret)
+ goto again;
+
+ /* Make sure that the link is up */
+ ret = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_BASIC_STATUS1,
+ val, PCIE_LINK_UP(val), 50,
+ LINK_TRAIN_TIMEOUT);
+ if (ret)
+ goto again;
+
+ /*
+ * Check the current speed: if gen2 speed was requested and we are not
+ * at gen2 speed yet, retrain again for gen2.
+ */
+ val = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL);
+ if (!PCIE_LINK_IS_GEN2(val) && rockchip->link_gen == 2) {
+ /* Enable retrain for gen2 */
+ rockchip_pcie_ep_retrain_link(rockchip);
+ readl_poll_timeout(rockchip->apb_base + PCIE_CORE_CTRL,
+ val, PCIE_LINK_IS_GEN2(val), 50,
+ LINK_TRAIN_TIMEOUT);
+ }
+
+ /* Check again that the link is up */
+ if (!rockchip_pcie_ep_link_up(rockchip))
+ goto again;
+
+ /*
+ * If PERST# was asserted while polling the link, do not notify
+ * the function.
+ */
+ if (ep->perst_asserted)
+ return;
+
+ val = rockchip_pcie_read(rockchip, PCIE_CLIENT_BASIC_STATUS0);
+ dev_info(dev,
+ "link up (negotiated speed: %sGT/s, width: x%lu)\n",
+ (val & PCIE_CLIENT_NEG_LINK_SPEED) ? "5" : "2.5",
+ ((val & PCIE_CLIENT_NEG_LINK_WIDTH_MASK) >>
+ PCIE_CLIENT_NEG_LINK_WIDTH_SHIFT) << 1);
+
+ /* Notify the function */
+ pci_epc_linkup(ep->epc);
+ ep->link_up = true;
+
+ return;
+
+again:
+ schedule_delayed_work(&ep->link_training, msecs_to_jiffies(5));
+}
+
+static void rockchip_pcie_ep_perst_assert(struct rockchip_pcie_ep *ep)
+{
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+
+ dev_dbg(rockchip->dev, "PERST# asserted, link down\n");
+
+ if (ep->perst_asserted)
+ return;
+
+ ep->perst_asserted = true;
+
+ cancel_delayed_work_sync(&ep->link_training);
+
+ if (ep->link_up) {
+ pci_epc_linkdown(ep->epc);
+ ep->link_up = false;
+ }
+}
+
+static void rockchip_pcie_ep_perst_deassert(struct rockchip_pcie_ep *ep)
+{
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+
+ dev_dbg(rockchip->dev, "PERST# de-asserted, starting link training\n");
+
+ if (!ep->perst_asserted)
+ return;
+
+ ep->perst_asserted = false;
+
+ /* Enable link re-training */
+ rockchip_pcie_ep_retrain_link(rockchip);
+
+ /* Start link training */
+ schedule_delayed_work(&ep->link_training, 0);
+}
+
+static irqreturn_t rockchip_pcie_ep_perst_irq_thread(int irq, void *data)
+{
+ struct pci_epc *epc = data;
+ struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+ u32 perst = gpiod_get_value(rockchip->perst_gpio);
+
+ if (perst)
+ rockchip_pcie_ep_perst_assert(ep);
+ else
+ rockchip_pcie_ep_perst_deassert(ep);
+
+ irq_set_irq_type(ep->perst_irq,
+ (perst ? IRQF_TRIGGER_HIGH : IRQF_TRIGGER_LOW));
+
+ return IRQ_HANDLED;
+}
+
+static int rockchip_pcie_ep_setup_irq(struct pci_epc *epc)
+{
+ struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+ struct device *dev = rockchip->dev;
+ int ret;
+
+ if (!rockchip->perst_gpio)
+ return 0;
+
+ /* PCIe reset interrupt */
+ ep->perst_irq = gpiod_to_irq(rockchip->perst_gpio);
+ if (ep->perst_irq < 0) {
+ dev_err(dev,
+ "failed to get IRQ for PERST# GPIO: %d\n",
+ ep->perst_irq);
+
+ return ep->perst_irq;
+ }
+
+ /*
+ * The perst_gpio is active low, so when it is inactive on start, it
+ * is high and will trigger the perst_irq handler. So treat this initial
+ * IRQ as a dummy one by faking the host asserting PERST#.
+ */
+ ep->perst_asserted = true;
+ irq_set_status_flags(ep->perst_irq, IRQ_NOAUTOEN);
+ ret = devm_request_threaded_irq(dev, ep->perst_irq, NULL,
+ rockchip_pcie_ep_perst_irq_thread,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ "pcie-ep-perst", epc);
+ if (ret) {
+ dev_err(dev,
+ "failed to request IRQ for PERST# GPIO: %d\n",
+ ret);
+
+ return ret;
+ }
+
return 0;
}
static const struct pci_epc_features rockchip_pcie_epc_features = {
- .linkup_notifier = false,
+ .linkup_notifier = true,
.msi_capable = true,
.msix_capable = false,
- .align = 256,
+ .align = ROCKCHIP_PCIE_AT_SIZE_ALIGN,
};
static const struct pci_epc_features*
@@ -454,17 +707,19 @@ static const struct pci_epc_ops rockchip_pcie_epc_ops = {
.write_header = rockchip_pcie_ep_write_header,
.set_bar = rockchip_pcie_ep_set_bar,
.clear_bar = rockchip_pcie_ep_clear_bar,
+ .align_addr = rockchip_pcie_ep_align_addr,
.map_addr = rockchip_pcie_ep_map_addr,
.unmap_addr = rockchip_pcie_ep_unmap_addr,
.set_msi = rockchip_pcie_ep_set_msi,
.get_msi = rockchip_pcie_ep_get_msi,
.raise_irq = rockchip_pcie_ep_raise_irq,
.start = rockchip_pcie_ep_start,
+ .stop = rockchip_pcie_ep_stop,
.get_features = rockchip_pcie_ep_get_features,
};
-static int rockchip_pcie_parse_ep_dt(struct rockchip_pcie *rockchip,
- struct rockchip_pcie_ep *ep)
+static int rockchip_pcie_ep_get_resources(struct rockchip_pcie *rockchip,
+ struct rockchip_pcie_ep *ep)
{
struct device *dev = rockchip->dev;
int err;
@@ -498,82 +753,38 @@ static const struct of_device_id rockchip_pcie_ep_of_match[] = {
{},
};
-static int rockchip_pcie_ep_probe(struct platform_device *pdev)
+static int rockchip_pcie_ep_init_ob_mem(struct rockchip_pcie_ep *ep)
{
- struct device *dev = &pdev->dev;
- struct rockchip_pcie_ep *ep;
- struct rockchip_pcie *rockchip;
- struct pci_epc *epc;
- size_t max_regions;
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+ struct device *dev = rockchip->dev;
struct pci_epc_mem_window *windows = NULL;
int err, i;
- u32 cfg_msi, cfg_msix_cp;
-
- ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
- if (!ep)
- return -ENOMEM;
-
- rockchip = &ep->rockchip;
- rockchip->is_rc = false;
- rockchip->dev = dev;
-
- epc = devm_pci_epc_create(dev, &rockchip_pcie_epc_ops);
- if (IS_ERR(epc)) {
- dev_err(dev, "failed to create epc device\n");
- return PTR_ERR(epc);
- }
-
- ep->epc = epc;
- epc_set_drvdata(epc, ep);
-
- err = rockchip_pcie_parse_ep_dt(rockchip, ep);
- if (err)
- return err;
-
- err = rockchip_pcie_enable_clocks(rockchip);
- if (err)
- return err;
- err = rockchip_pcie_init_port(rockchip);
- if (err)
- goto err_disable_clocks;
-
- /* Establish the link automatically */
- rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE,
- PCIE_CLIENT_CONFIG);
-
- max_regions = ep->max_regions;
- ep->ob_addr = devm_kcalloc(dev, max_regions, sizeof(*ep->ob_addr),
+ ep->ob_addr = devm_kcalloc(dev, ep->max_regions, sizeof(*ep->ob_addr),
GFP_KERNEL);
- if (!ep->ob_addr) {
- err = -ENOMEM;
- goto err_uninit_port;
- }
-
- /* Only enable function 0 by default */
- rockchip_pcie_write(rockchip, BIT(0), PCIE_CORE_PHY_FUNC_CFG);
+ if (!ep->ob_addr)
+ return -ENOMEM;
windows = devm_kcalloc(dev, ep->max_regions,
sizeof(struct pci_epc_mem_window), GFP_KERNEL);
- if (!windows) {
- err = -ENOMEM;
- goto err_uninit_port;
- }
+ if (!windows)
+ return -ENOMEM;
+
for (i = 0; i < ep->max_regions; i++) {
windows[i].phys_base = rockchip->mem_res->start + (SZ_1M * i);
windows[i].size = SZ_1M;
windows[i].page_size = SZ_1M;
}
- err = pci_epc_multi_mem_init(epc, windows, ep->max_regions);
+ err = pci_epc_multi_mem_init(ep->epc, windows, ep->max_regions);
devm_kfree(dev, windows);
if (err < 0) {
dev_err(dev, "failed to initialize the memory space\n");
- goto err_uninit_port;
+ return err;
}
- ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &ep->irq_phys_addr,
+ ep->irq_cpu_addr = pci_epc_mem_alloc_addr(ep->epc, &ep->irq_phys_addr,
SZ_1M);
if (!ep->irq_cpu_addr) {
dev_err(dev, "failed to reserve memory space for MSI\n");
@@ -583,6 +794,23 @@ static int rockchip_pcie_ep_probe(struct platform_device *pdev)
ep->irq_pci_addr = ROCKCHIP_PCIE_EP_DUMMY_IRQ_ADDR;
+ return 0;
+
+err_epc_mem_exit:
+ pci_epc_mem_exit(ep->epc);
+
+ return err;
+}
+
+static void rockchip_pcie_ep_exit_ob_mem(struct rockchip_pcie_ep *ep)
+{
+ pci_epc_mem_exit(ep->epc);
+}
+
+static void rockchip_pcie_ep_hide_broken_msix_cap(struct rockchip_pcie *rockchip)
+{
+ u32 cfg_msi, cfg_msix_cp;
+
/*
* MSI-X is not supported but the controller still advertises the MSI-X
* capability by default, which can lead to the Root Complex side
@@ -605,17 +833,68 @@ static int rockchip_pcie_ep_probe(struct platform_device *pdev)
rockchip_pcie_write(rockchip, cfg_msi,
PCIE_EP_CONFIG_BASE + ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
+}
- rockchip_pcie_write(rockchip, PCIE_CLIENT_CONF_ENABLE,
- PCIE_CLIENT_CONFIG);
+static int rockchip_pcie_ep_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rockchip_pcie_ep *ep;
+ struct rockchip_pcie *rockchip;
+ struct pci_epc *epc;
+ int err;
+
+ ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
+ if (!ep)
+ return -ENOMEM;
+
+ rockchip = &ep->rockchip;
+ rockchip->is_rc = false;
+ rockchip->dev = dev;
+ INIT_DELAYED_WORK(&ep->link_training, rockchip_pcie_ep_link_training);
+
+ epc = devm_pci_epc_create(dev, &rockchip_pcie_epc_ops);
+ if (IS_ERR(epc)) {
+ dev_err(dev, "failed to create EPC device\n");
+ return PTR_ERR(epc);
+ }
+
+ ep->epc = epc;
+ epc_set_drvdata(epc, ep);
+
+ err = rockchip_pcie_ep_get_resources(rockchip, ep);
+ if (err)
+ return err;
+
+ err = rockchip_pcie_ep_init_ob_mem(ep);
+ if (err)
+ return err;
+
+ err = rockchip_pcie_enable_clocks(rockchip);
+ if (err)
+ goto err_exit_ob_mem;
+
+ err = rockchip_pcie_init_port(rockchip);
+ if (err)
+ goto err_disable_clocks;
+
+ rockchip_pcie_ep_hide_broken_msix_cap(rockchip);
+
+ /* Only enable function 0 by default */
+ rockchip_pcie_write(rockchip, BIT(0), PCIE_CORE_PHY_FUNC_CFG);
+
+ pci_epc_init_notify(epc);
+
+ err = rockchip_pcie_ep_setup_irq(epc);
+ if (err < 0)
+ goto err_uninit_port;
return 0;
-err_epc_mem_exit:
- pci_epc_mem_exit(epc);
err_uninit_port:
rockchip_pcie_deinit_phys(rockchip);
err_disable_clocks:
rockchip_pcie_disable_clocks(rockchip);
+err_exit_ob_mem:
+ rockchip_pcie_ep_exit_ob_mem(ep);
return err;
}
diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c
index 300b9dc85ecc..b9e7a8710cf0 100644
--- a/drivers/pci/controller/pcie-rockchip-host.c
+++ b/drivers/pci/controller/pcie-rockchip-host.c
@@ -294,7 +294,7 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip)
int err, i = MAX_LANE_NUM;
u32 status;
- gpiod_set_value_cansleep(rockchip->ep_gpio, 0);
+ gpiod_set_value_cansleep(rockchip->perst_gpio, 0);
err = rockchip_pcie_init_port(rockchip);
if (err)
@@ -322,7 +322,10 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip)
rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE,
PCIE_CLIENT_CONFIG);
- gpiod_set_value_cansleep(rockchip->ep_gpio, 1);
+ msleep(PCIE_T_PVPERL_MS);
+ gpiod_set_value_cansleep(rockchip->perst_gpio, 1);
+
+ msleep(PCIE_T_RRS_READY_MS);
/* 500ms timeout value should be enough for Gen1/2 training */
err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_BASIC_STATUS1,
@@ -364,7 +367,7 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip)
}
}
- rockchip_pcie_write(rockchip, ROCKCHIP_VENDOR_ID,
+ rockchip_pcie_write(rockchip, PCI_VENDOR_ID_ROCKCHIP,
PCIE_CORE_CONFIG_VENDOR);
rockchip_pcie_write(rockchip,
PCI_CLASS_BRIDGE_PCI_NORMAL << 8,
@@ -690,8 +693,8 @@ static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip)
return -EINVAL;
}
- rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX,
- &intx_domain_ops, rockchip);
+ rockchip->irq_domain = irq_domain_create_linear(of_fwnode_handle(intc), PCI_NUM_INTX,
+ &intx_domain_ops, rockchip);
of_node_put(intc);
if (!rockchip->irq_domain) {
dev_err(dev, "failed to get a INTx IRQ domain\n");
@@ -1047,7 +1050,7 @@ static struct platform_driver rockchip_pcie_driver = {
.pm = &rockchip_pcie_pm_ops,
},
.probe = rockchip_pcie_probe,
- .remove_new = rockchip_pcie_remove,
+ .remove = rockchip_pcie_remove,
};
module_platform_driver(rockchip_pcie_driver);
diff --git a/drivers/pci/controller/pcie-rockchip.c b/drivers/pci/controller/pcie-rockchip.c
index 0ef2e622d36e..0f88da378805 100644
--- a/drivers/pci/controller/pcie-rockchip.c
+++ b/drivers/pci/controller/pcie-rockchip.c
@@ -30,7 +30,7 @@ int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
struct platform_device *pdev = to_platform_device(dev);
struct device_node *node = dev->of_node;
struct resource *regs;
- int err;
+ int err, i;
if (rockchip->is_rc) {
regs = platform_get_resource_byname(pdev,
@@ -69,87 +69,38 @@ int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
if (rockchip->link_gen < 0 || rockchip->link_gen > 2)
rockchip->link_gen = 2;
- rockchip->core_rst = devm_reset_control_get_exclusive(dev, "core");
- if (IS_ERR(rockchip->core_rst)) {
- if (PTR_ERR(rockchip->core_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing core reset property in node\n");
- return PTR_ERR(rockchip->core_rst);
- }
-
- rockchip->mgmt_rst = devm_reset_control_get_exclusive(dev, "mgmt");
- if (IS_ERR(rockchip->mgmt_rst)) {
- if (PTR_ERR(rockchip->mgmt_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing mgmt reset property in node\n");
- return PTR_ERR(rockchip->mgmt_rst);
- }
-
- rockchip->mgmt_sticky_rst = devm_reset_control_get_exclusive(dev,
- "mgmt-sticky");
- if (IS_ERR(rockchip->mgmt_sticky_rst)) {
- if (PTR_ERR(rockchip->mgmt_sticky_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing mgmt-sticky reset property in node\n");
- return PTR_ERR(rockchip->mgmt_sticky_rst);
- }
-
- rockchip->pipe_rst = devm_reset_control_get_exclusive(dev, "pipe");
- if (IS_ERR(rockchip->pipe_rst)) {
- if (PTR_ERR(rockchip->pipe_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing pipe reset property in node\n");
- return PTR_ERR(rockchip->pipe_rst);
- }
-
- rockchip->pm_rst = devm_reset_control_get_exclusive(dev, "pm");
- if (IS_ERR(rockchip->pm_rst)) {
- if (PTR_ERR(rockchip->pm_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing pm reset property in node\n");
- return PTR_ERR(rockchip->pm_rst);
- }
+ for (i = 0; i < ROCKCHIP_NUM_PM_RSTS; i++)
+ rockchip->pm_rsts[i].id = rockchip_pci_pm_rsts[i];
- rockchip->pclk_rst = devm_reset_control_get_exclusive(dev, "pclk");
- if (IS_ERR(rockchip->pclk_rst)) {
- if (PTR_ERR(rockchip->pclk_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing pclk reset property in node\n");
- return PTR_ERR(rockchip->pclk_rst);
- }
-
- rockchip->aclk_rst = devm_reset_control_get_exclusive(dev, "aclk");
- if (IS_ERR(rockchip->aclk_rst)) {
- if (PTR_ERR(rockchip->aclk_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing aclk reset property in node\n");
- return PTR_ERR(rockchip->aclk_rst);
- }
-
- if (rockchip->is_rc) {
- rockchip->ep_gpio = devm_gpiod_get_optional(dev, "ep",
- GPIOD_OUT_HIGH);
- if (IS_ERR(rockchip->ep_gpio))
- return dev_err_probe(dev, PTR_ERR(rockchip->ep_gpio),
- "failed to get ep GPIO\n");
- }
+ err = devm_reset_control_bulk_get_exclusive(dev,
+ ROCKCHIP_NUM_PM_RSTS,
+ rockchip->pm_rsts);
+ if (err)
+ return dev_err_probe(dev, err, "Cannot get the PM reset\n");
- rockchip->aclk_pcie = devm_clk_get(dev, "aclk");
- if (IS_ERR(rockchip->aclk_pcie)) {
- dev_err(dev, "aclk clock not found\n");
- return PTR_ERR(rockchip->aclk_pcie);
- }
+ for (i = 0; i < ROCKCHIP_NUM_CORE_RSTS; i++)
+ rockchip->core_rsts[i].id = rockchip_pci_core_rsts[i];
- rockchip->aclk_perf_pcie = devm_clk_get(dev, "aclk-perf");
- if (IS_ERR(rockchip->aclk_perf_pcie)) {
- dev_err(dev, "aclk_perf clock not found\n");
- return PTR_ERR(rockchip->aclk_perf_pcie);
- }
+ err = devm_reset_control_bulk_get_exclusive(dev,
+ ROCKCHIP_NUM_CORE_RSTS,
+ rockchip->core_rsts);
+ if (err)
+ return dev_err_probe(dev, err, "Cannot get the Core resets\n");
- rockchip->hclk_pcie = devm_clk_get(dev, "hclk");
- if (IS_ERR(rockchip->hclk_pcie)) {
- dev_err(dev, "hclk clock not found\n");
- return PTR_ERR(rockchip->hclk_pcie);
- }
+ if (rockchip->is_rc)
+ rockchip->perst_gpio = devm_gpiod_get_optional(dev, "ep",
+ GPIOD_OUT_LOW);
+ else
+ rockchip->perst_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_IN);
+ if (IS_ERR(rockchip->perst_gpio))
+ return dev_err_probe(dev, PTR_ERR(rockchip->perst_gpio),
+ "failed to get PERST# GPIO\n");
- rockchip->clk_pcie_pm = devm_clk_get(dev, "pm");
- if (IS_ERR(rockchip->clk_pcie_pm)) {
- dev_err(dev, "pm clock not found\n");
- return PTR_ERR(rockchip->clk_pcie_pm);
- }
+ rockchip->num_clks = devm_clk_bulk_get_all(dev, &rockchip->clks);
+ if (rockchip->num_clks < 0)
+ return dev_err_probe(dev, rockchip->num_clks,
+ "failed to get clocks\n");
return 0;
}
@@ -167,23 +118,10 @@ int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
int err, i;
u32 regs;
- err = reset_control_assert(rockchip->aclk_rst);
- if (err) {
- dev_err(dev, "assert aclk_rst err %d\n", err);
- return err;
- }
-
- err = reset_control_assert(rockchip->pclk_rst);
- if (err) {
- dev_err(dev, "assert pclk_rst err %d\n", err);
- return err;
- }
-
- err = reset_control_assert(rockchip->pm_rst);
- if (err) {
- dev_err(dev, "assert pm_rst err %d\n", err);
- return err;
- }
+ err = reset_control_bulk_assert(ROCKCHIP_NUM_PM_RSTS,
+ rockchip->pm_rsts);
+ if (err)
+ return dev_err_probe(dev, err, "Couldn't assert PM resets\n");
for (i = 0; i < MAX_LANE_NUM; i++) {
err = phy_init(rockchip->phys[i]);
@@ -193,47 +131,19 @@ int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
}
}
- err = reset_control_assert(rockchip->core_rst);
- if (err) {
- dev_err(dev, "assert core_rst err %d\n", err);
- goto err_exit_phy;
- }
-
- err = reset_control_assert(rockchip->mgmt_rst);
- if (err) {
- dev_err(dev, "assert mgmt_rst err %d\n", err);
- goto err_exit_phy;
- }
-
- err = reset_control_assert(rockchip->mgmt_sticky_rst);
+ err = reset_control_bulk_assert(ROCKCHIP_NUM_CORE_RSTS,
+ rockchip->core_rsts);
if (err) {
- dev_err(dev, "assert mgmt_sticky_rst err %d\n", err);
- goto err_exit_phy;
- }
-
- err = reset_control_assert(rockchip->pipe_rst);
- if (err) {
- dev_err(dev, "assert pipe_rst err %d\n", err);
+ dev_err_probe(dev, err, "Couldn't assert Core resets\n");
goto err_exit_phy;
}
udelay(10);
- err = reset_control_deassert(rockchip->pm_rst);
- if (err) {
- dev_err(dev, "deassert pm_rst err %d\n", err);
- goto err_exit_phy;
- }
-
- err = reset_control_deassert(rockchip->aclk_rst);
+ err = reset_control_bulk_deassert(ROCKCHIP_NUM_PM_RSTS,
+ rockchip->pm_rsts);
if (err) {
- dev_err(dev, "deassert aclk_rst err %d\n", err);
- goto err_exit_phy;
- }
-
- err = reset_control_deassert(rockchip->pclk_rst);
- if (err) {
- dev_err(dev, "deassert pclk_rst err %d\n", err);
+ dev_err(dev, "Couldn't deassert PM resets %d\n", err);
goto err_exit_phy;
}
@@ -244,11 +154,12 @@ int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
rockchip_pcie_write(rockchip, PCIE_CLIENT_GEN_SEL_1,
PCIE_CLIENT_CONFIG);
- regs = PCIE_CLIENT_LINK_TRAIN_ENABLE | PCIE_CLIENT_ARI_ENABLE |
+ regs = PCIE_CLIENT_ARI_ENABLE |
PCIE_CLIENT_CONF_LANE_NUM(rockchip->lanes);
if (rockchip->is_rc)
- regs |= PCIE_CLIENT_CONF_ENABLE | PCIE_CLIENT_MODE_RC;
+ regs |= PCIE_CLIENT_LINK_TRAIN_ENABLE |
+ PCIE_CLIENT_CONF_ENABLE | PCIE_CLIENT_MODE_RC;
else
regs |= PCIE_CLIENT_CONF_DISABLE | PCIE_CLIENT_MODE_EP;
@@ -272,31 +183,10 @@ int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
goto err_power_off_phy;
}
- /*
- * Please don't reorder the deassert sequence of the following
- * four reset pins.
- */
- err = reset_control_deassert(rockchip->mgmt_sticky_rst);
- if (err) {
- dev_err(dev, "deassert mgmt_sticky_rst err %d\n", err);
- goto err_power_off_phy;
- }
-
- err = reset_control_deassert(rockchip->core_rst);
- if (err) {
- dev_err(dev, "deassert core_rst err %d\n", err);
- goto err_power_off_phy;
- }
-
- err = reset_control_deassert(rockchip->mgmt_rst);
- if (err) {
- dev_err(dev, "deassert mgmt_rst err %d\n", err);
- goto err_power_off_phy;
- }
-
- err = reset_control_deassert(rockchip->pipe_rst);
+ err = reset_control_bulk_deassert(ROCKCHIP_NUM_CORE_RSTS,
+ rockchip->core_rsts);
if (err) {
- dev_err(dev, "deassert pipe_rst err %d\n", err);
+ dev_err(dev, "Couldn't deassert Core reset %d\n", err);
goto err_power_off_phy;
}
@@ -372,50 +262,18 @@ int rockchip_pcie_enable_clocks(struct rockchip_pcie *rockchip)
struct device *dev = rockchip->dev;
int err;
- err = clk_prepare_enable(rockchip->aclk_pcie);
- if (err) {
- dev_err(dev, "unable to enable aclk_pcie clock\n");
- return err;
- }
-
- err = clk_prepare_enable(rockchip->aclk_perf_pcie);
- if (err) {
- dev_err(dev, "unable to enable aclk_perf_pcie clock\n");
- goto err_aclk_perf_pcie;
- }
-
- err = clk_prepare_enable(rockchip->hclk_pcie);
- if (err) {
- dev_err(dev, "unable to enable hclk_pcie clock\n");
- goto err_hclk_pcie;
- }
-
- err = clk_prepare_enable(rockchip->clk_pcie_pm);
- if (err) {
- dev_err(dev, "unable to enable clk_pcie_pm clock\n");
- goto err_clk_pcie_pm;
- }
+ err = clk_bulk_prepare_enable(rockchip->num_clks, rockchip->clks);
+ if (err)
+ return dev_err_probe(dev, err, "failed to enable clocks\n");
return 0;
-
-err_clk_pcie_pm:
- clk_disable_unprepare(rockchip->hclk_pcie);
-err_hclk_pcie:
- clk_disable_unprepare(rockchip->aclk_perf_pcie);
-err_aclk_perf_pcie:
- clk_disable_unprepare(rockchip->aclk_pcie);
- return err;
}
EXPORT_SYMBOL_GPL(rockchip_pcie_enable_clocks);
-void rockchip_pcie_disable_clocks(void *data)
+void rockchip_pcie_disable_clocks(struct rockchip_pcie *rockchip)
{
- struct rockchip_pcie *rockchip = data;
- clk_disable_unprepare(rockchip->clk_pcie_pm);
- clk_disable_unprepare(rockchip->hclk_pcie);
- clk_disable_unprepare(rockchip->aclk_perf_pcie);
- clk_disable_unprepare(rockchip->aclk_pcie);
+ clk_bulk_disable_unprepare(rockchip->num_clks, rockchip->clks);
}
EXPORT_SYMBOL_GPL(rockchip_pcie_disable_clocks);
diff --git a/drivers/pci/controller/pcie-rockchip.h b/drivers/pci/controller/pcie-rockchip.h
index 6111de35f84c..14954f43e5e9 100644
--- a/drivers/pci/controller/pcie-rockchip.h
+++ b/drivers/pci/controller/pcie-rockchip.h
@@ -11,9 +11,11 @@
#ifndef _PCIE_ROCKCHIP_H
#define _PCIE_ROCKCHIP_H
+#include <linux/clk.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/pci-ecam.h>
+#include <linux/reset.h>
/*
* The upper 16 bits of PCIE_CLIENT_CONFIG are a write mask for the lower 16
@@ -26,12 +28,14 @@
#define MAX_LANE_NUM 4
#define MAX_REGION_LIMIT 32
#define MIN_EP_APERTURE 28
+#define LINK_TRAIN_TIMEOUT (500 * USEC_PER_MSEC)
#define PCIE_CLIENT_BASE 0x0
#define PCIE_CLIENT_CONFIG (PCIE_CLIENT_BASE + 0x00)
#define PCIE_CLIENT_CONF_ENABLE HIWORD_UPDATE_BIT(0x0001)
#define PCIE_CLIENT_CONF_DISABLE HIWORD_UPDATE(0x0001, 0)
#define PCIE_CLIENT_LINK_TRAIN_ENABLE HIWORD_UPDATE_BIT(0x0002)
+#define PCIE_CLIENT_LINK_TRAIN_DISABLE HIWORD_UPDATE(0x0002, 0)
#define PCIE_CLIENT_ARI_ENABLE HIWORD_UPDATE_BIT(0x0008)
#define PCIE_CLIENT_CONF_LANE_NUM(x) HIWORD_UPDATE(0x0030, ENCODE_LANES(x))
#define PCIE_CLIENT_MODE_RC HIWORD_UPDATE_BIT(0x0040)
@@ -49,6 +53,10 @@
#define PCIE_CLIENT_DEBUG_LTSSM_MASK GENMASK(5, 0)
#define PCIE_CLIENT_DEBUG_LTSSM_L1 0x18
#define PCIE_CLIENT_DEBUG_LTSSM_L2 0x19
+#define PCIE_CLIENT_BASIC_STATUS0 (PCIE_CLIENT_BASE + 0x44)
+#define PCIE_CLIENT_NEG_LINK_WIDTH_MASK GENMASK(7, 6)
+#define PCIE_CLIENT_NEG_LINK_WIDTH_SHIFT 6
+#define PCIE_CLIENT_NEG_LINK_SPEED BIT(5)
#define PCIE_CLIENT_BASIC_STATUS1 (PCIE_CLIENT_BASE + 0x48)
#define PCIE_CLIENT_LINK_STATUS_UP 0x00300000
#define PCIE_CLIENT_LINK_STATUS_MASK 0x00300000
@@ -86,6 +94,8 @@
#define PCIE_CORE_CTRL_MGMT_BASE 0x900000
#define PCIE_CORE_CTRL (PCIE_CORE_CTRL_MGMT_BASE + 0x000)
+#define PCIE_CORE_PL_CONF_LS_MASK 0x00000001
+#define PCIE_CORE_PL_CONF_LS_READY 0x00000001
#define PCIE_CORE_PL_CONF_SPEED_5G 0x00000008
#define PCIE_CORE_PL_CONF_SPEED_MASK 0x00000018
#define PCIE_CORE_PL_CONF_LANE_MASK 0x00000006
@@ -143,6 +153,7 @@
#define PCIE_RC_CONFIG_BASE 0xa00000
#define PCIE_EP_CONFIG_BASE 0xa00000
#define PCIE_EP_CONFIG_DID_VID (PCIE_EP_CONFIG_BASE + 0x00)
+#define PCIE_EP_CONFIG_LCS (PCIE_EP_CONFIG_BASE + 0xd0)
#define PCIE_RC_CONFIG_RID_CCR (PCIE_RC_CONFIG_BASE + 0x08)
#define PCIE_RC_CONFIG_DCR (PCIE_RC_CONFIG_BASE + 0xc4)
#define PCIE_RC_CONFIG_DCR_CSPL_SHIFT 18
@@ -154,6 +165,7 @@
#define PCIE_RC_CONFIG_LINK_CAP (PCIE_RC_CONFIG_BASE + 0xcc)
#define PCIE_RC_CONFIG_LINK_CAP_L0S BIT(10)
#define PCIE_RC_CONFIG_LCS (PCIE_RC_CONFIG_BASE + 0xd0)
+#define PCIE_EP_CONFIG_LCS (PCIE_EP_CONFIG_BASE + 0xd0)
#define PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2 (PCIE_RC_CONFIG_BASE + 0x90c)
#define PCIE_RC_CONFIG_THP_CAP (PCIE_RC_CONFIG_BASE + 0x274)
#define PCIE_RC_CONFIG_THP_CAP_NEXT_MASK GENMASK(31, 20)
@@ -188,9 +200,10 @@
#define AXI_WRAPPER_NOR_MSG 0xc
#define PCIE_RC_SEND_PME_OFF 0x11960
-#define ROCKCHIP_VENDOR_ID 0x1d87
#define PCIE_LINK_IS_L2(x) \
(((x) & PCIE_CLIENT_DEBUG_LTSSM_MASK) == PCIE_CLIENT_DEBUG_LTSSM_L2)
+#define PCIE_LINK_TRAINING_DONE(x) \
+ (((x) & PCIE_CORE_PL_CONF_LS_MASK) == PCIE_CORE_PL_CONF_LS_READY)
#define PCIE_LINK_UP(x) \
(((x) & PCIE_CLIENT_LINK_STATUS_MASK) == PCIE_CLIENT_LINK_STATUS_UP)
#define PCIE_LINK_IS_GEN2(x) \
@@ -241,10 +254,20 @@
#define ROCKCHIP_PCIE_EP_MSIX_CAP_CP_MASK GENMASK(15, 8)
#define ROCKCHIP_PCIE_EP_DUMMY_IRQ_ADDR 0x1
#define ROCKCHIP_PCIE_EP_PCI_LEGACY_IRQ_ADDR 0x3
+
+#define ROCKCHIP_PCIE_AT_MIN_NUM_BITS 8
+#define ROCKCHIP_PCIE_AT_MAX_NUM_BITS 20
+#define ROCKCHIP_PCIE_AT_SIZE_ALIGN (1UL << ROCKCHIP_PCIE_AT_MIN_NUM_BITS)
+
#define ROCKCHIP_PCIE_EP_FUNC_BASE(fn) \
(PCIE_EP_PF_CONFIG_REGS_BASE + (((fn) << 12) & GENMASK(19, 12)))
#define ROCKCHIP_PCIE_EP_VIRT_FUNC_BASE(fn) \
(PCIE_EP_PF_CONFIG_REGS_BASE + 0x10000 + (((fn) << 12) & GENMASK(19, 12)))
+
+#define ROCKCHIP_PCIE_AT_MIN_NUM_BITS 8
+#define ROCKCHIP_PCIE_AT_MAX_NUM_BITS 20
+#define ROCKCHIP_PCIE_AT_SIZE_ALIGN (1UL << ROCKCHIP_PCIE_AT_MIN_NUM_BITS)
+
#define ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar) \
(PCIE_CORE_AXI_CONF_BASE + 0x0828 + (fn) * 0x0040 + (bar) * 0x0008)
#define ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar) \
@@ -287,27 +310,36 @@
(((c) << ((b) * 8 + 5)) & \
ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b))
+#define ROCKCHIP_NUM_PM_RSTS ARRAY_SIZE(rockchip_pci_pm_rsts)
+#define ROCKCHIP_NUM_CORE_RSTS ARRAY_SIZE(rockchip_pci_core_rsts)
+
+static const char * const rockchip_pci_pm_rsts[] = {
+ "pm",
+ "pclk",
+ "aclk",
+};
+
+static const char * const rockchip_pci_core_rsts[] = {
+ "mgmt-sticky",
+ "core",
+ "mgmt",
+ "pipe",
+};
+
struct rockchip_pcie {
void __iomem *reg_base; /* DT axi-base */
void __iomem *apb_base; /* DT apb-base */
bool legacy_phy;
struct phy *phys[MAX_LANE_NUM];
- struct reset_control *core_rst;
- struct reset_control *mgmt_rst;
- struct reset_control *mgmt_sticky_rst;
- struct reset_control *pipe_rst;
- struct reset_control *pm_rst;
- struct reset_control *aclk_rst;
- struct reset_control *pclk_rst;
- struct clk *aclk_pcie;
- struct clk *aclk_perf_pcie;
- struct clk *hclk_pcie;
- struct clk *clk_pcie_pm;
+ struct reset_control_bulk_data pm_rsts[ROCKCHIP_NUM_PM_RSTS];
+ struct reset_control_bulk_data core_rsts[ROCKCHIP_NUM_CORE_RSTS];
+ struct clk_bulk_data *clks;
+ int num_clks;
struct regulator *vpcie12v; /* 12V power supply */
struct regulator *vpcie3v3; /* 3.3V power supply */
struct regulator *vpcie1v8; /* 1.8V power supply */
struct regulator *vpcie0v9; /* 0.9V power supply */
- struct gpio_desc *ep_gpio;
+ struct gpio_desc *perst_gpio;
u32 lanes;
u8 lanes_map;
int link_gen;
@@ -336,7 +368,7 @@ int rockchip_pcie_init_port(struct rockchip_pcie *rockchip);
int rockchip_pcie_get_phys(struct rockchip_pcie *rockchip);
void rockchip_pcie_deinit_phys(struct rockchip_pcie *rockchip);
int rockchip_pcie_enable_clocks(struct rockchip_pcie *rockchip);
-void rockchip_pcie_disable_clocks(void *data);
+void rockchip_pcie_disable_clocks(struct rockchip_pcie *rockchip);
void rockchip_pcie_cfg_configuration_accesses(
struct rockchip_pcie *rockchip, u32 type);
diff --git a/drivers/pci/controller/pcie-xilinx-cpm.c b/drivers/pci/controller/pcie-xilinx-cpm.c
index a0f5e1d67b04..d38f27e20761 100644
--- a/drivers/pci/controller/pcie-xilinx-cpm.c
+++ b/drivers/pci/controller/pcie-xilinx-cpm.c
@@ -30,11 +30,14 @@
#define XILINX_CPM_PCIE_REG_IDRN_MASK 0x00000E3C
#define XILINX_CPM_PCIE_MISC_IR_STATUS 0x00000340
#define XILINX_CPM_PCIE_MISC_IR_ENABLE 0x00000348
-#define XILINX_CPM_PCIE_MISC_IR_LOCAL BIT(1)
+#define XILINX_CPM_PCIE0_MISC_IR_LOCAL BIT(1)
+#define XILINX_CPM_PCIE1_MISC_IR_LOCAL BIT(2)
-#define XILINX_CPM_PCIE_IR_STATUS 0x000002A0
-#define XILINX_CPM_PCIE_IR_ENABLE 0x000002A8
-#define XILINX_CPM_PCIE_IR_LOCAL BIT(0)
+#define XILINX_CPM_PCIE0_IR_STATUS 0x000002A0
+#define XILINX_CPM_PCIE1_IR_STATUS 0x000002B4
+#define XILINX_CPM_PCIE0_IR_ENABLE 0x000002A8
+#define XILINX_CPM_PCIE1_IR_ENABLE 0x000002BC
+#define XILINX_CPM_PCIE_IR_LOCAL BIT(0)
#define IMR(x) BIT(XILINX_PCIE_INTR_ ##x)
@@ -80,14 +83,22 @@
enum xilinx_cpm_version {
CPM,
CPM5,
+ CPM5_HOST1,
+ CPM5NC_HOST,
};
/**
* struct xilinx_cpm_variant - CPM variant information
* @version: CPM version
+ * @ir_status: Offset for the error interrupt status register
+ * @ir_enable: Offset for the CPM5 local error interrupt enable register
+ * @ir_misc_value: A bitmask for the miscellaneous interrupt status
*/
struct xilinx_cpm_variant {
enum xilinx_cpm_version version;
+ u32 ir_status;
+ u32 ir_enable;
+ u32 ir_misc_value;
};
/**
@@ -269,6 +280,7 @@ static void xilinx_cpm_pcie_event_flow(struct irq_desc *desc)
{
struct xilinx_cpm_pcie *port = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
+ const struct xilinx_cpm_variant *variant = port->variant;
unsigned long val;
int i;
@@ -279,11 +291,11 @@ static void xilinx_cpm_pcie_event_flow(struct irq_desc *desc)
generic_handle_domain_irq(port->cpm_domain, i);
pcie_write(port, val, XILINX_CPM_PCIE_REG_IDR);
- if (port->variant->version == CPM5) {
- val = readl_relaxed(port->cpm_base + XILINX_CPM_PCIE_IR_STATUS);
+ if (variant->ir_status) {
+ val = readl_relaxed(port->cpm_base + variant->ir_status);
if (val)
writel_relaxed(val, port->cpm_base +
- XILINX_CPM_PCIE_IR_STATUS);
+ variant->ir_status);
}
/*
@@ -383,17 +395,15 @@ static int xilinx_cpm_pcie_init_irq_domain(struct xilinx_cpm_pcie *port)
return -EINVAL;
}
- port->cpm_domain = irq_domain_add_linear(pcie_intc_node, 32,
- &event_domain_ops,
- port);
+ port->cpm_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), 32,
+ &event_domain_ops, port);
if (!port->cpm_domain)
goto out;
irq_domain_update_bus_token(port->cpm_domain, DOMAIN_BUS_NEXUS);
- port->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
- &intx_domain_ops,
- port);
+ port->intx_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX,
+ &intx_domain_ops, port);
if (!port->intx_domain)
goto out;
@@ -465,6 +475,11 @@ static int xilinx_cpm_setup_irq(struct xilinx_cpm_pcie *port)
*/
static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie *port)
{
+ const struct xilinx_cpm_variant *variant = port->variant;
+
+ if (variant->version == CPM5NC_HOST)
+ return;
+
if (cpm_pcie_link_up(port))
dev_info(port->dev, "PCIe Link is UP\n");
else
@@ -483,15 +498,15 @@ static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie *port)
* XILINX_CPM_PCIE_MISC_IR_ENABLE register is mapped to
* CPM SLCR block.
*/
- writel(XILINX_CPM_PCIE_MISC_IR_LOCAL,
+ writel(variant->ir_misc_value,
port->cpm_base + XILINX_CPM_PCIE_MISC_IR_ENABLE);
- if (port->variant->version == CPM5) {
+ if (variant->ir_enable) {
writel(XILINX_CPM_PCIE_IR_LOCAL,
- port->cpm_base + XILINX_CPM_PCIE_IR_ENABLE);
+ port->cpm_base + variant->ir_enable);
}
- /* Enable the Bridge enable bit */
+ /* Set Bridge enable bit */
pcie_write(port, pcie_read(port, XILINX_CPM_PCIE_REG_RPSC) |
XILINX_CPM_PCIE_REG_RPSC_BEN,
XILINX_CPM_PCIE_REG_RPSC);
@@ -525,7 +540,8 @@ static int xilinx_cpm_pcie_parse_dt(struct xilinx_cpm_pcie *port,
if (IS_ERR(port->cfg))
return PTR_ERR(port->cfg);
- if (port->variant->version == CPM5) {
+ if (port->variant->version == CPM5 ||
+ port->variant->version == CPM5_HOST1) {
port->reg_base = devm_platform_ioremap_resource_byname(pdev,
"cpm_csr");
if (IS_ERR(port->reg_base))
@@ -565,28 +581,34 @@ static int xilinx_cpm_pcie_probe(struct platform_device *pdev)
port->dev = dev;
- err = xilinx_cpm_pcie_init_irq_domain(port);
- if (err)
- return err;
+ port->variant = of_device_get_match_data(dev);
- bus = resource_list_first_type(&bridge->windows, IORESOURCE_BUS);
- if (!bus)
- return -ENODEV;
+ if (port->variant->version != CPM5NC_HOST) {
+ err = xilinx_cpm_pcie_init_irq_domain(port);
+ if (err)
+ return err;
+ }
- port->variant = of_device_get_match_data(dev);
+ bus = resource_list_first_type(&bridge->windows, IORESOURCE_BUS);
+ if (!bus) {
+ err = -ENODEV;
+ goto err_free_irq_domains;
+ }
err = xilinx_cpm_pcie_parse_dt(port, bus->res);
if (err) {
dev_err(dev, "Parsing DT failed\n");
- goto err_parse_dt;
+ goto err_free_irq_domains;
}
xilinx_cpm_pcie_init_port(port);
- err = xilinx_cpm_setup_irq(port);
- if (err) {
- dev_err(dev, "Failed to set up interrupts\n");
- goto err_setup_irq;
+ if (port->variant->version != CPM5NC_HOST) {
+ err = xilinx_cpm_setup_irq(port);
+ if (err) {
+ dev_err(dev, "Failed to set up interrupts\n");
+ goto err_setup_irq;
+ }
}
bridge->sysdata = port->cfg;
@@ -599,20 +621,37 @@ static int xilinx_cpm_pcie_probe(struct platform_device *pdev)
return 0;
err_host_bridge:
- xilinx_cpm_free_interrupts(port);
+ if (port->variant->version != CPM5NC_HOST)
+ xilinx_cpm_free_interrupts(port);
err_setup_irq:
pci_ecam_free(port->cfg);
-err_parse_dt:
- xilinx_cpm_free_irq_domains(port);
+err_free_irq_domains:
+ if (port->variant->version != CPM5NC_HOST)
+ xilinx_cpm_free_irq_domains(port);
return err;
}
static const struct xilinx_cpm_variant cpm_host = {
.version = CPM,
+ .ir_misc_value = XILINX_CPM_PCIE0_MISC_IR_LOCAL,
};
static const struct xilinx_cpm_variant cpm5_host = {
.version = CPM5,
+ .ir_misc_value = XILINX_CPM_PCIE0_MISC_IR_LOCAL,
+ .ir_status = XILINX_CPM_PCIE0_IR_STATUS,
+ .ir_enable = XILINX_CPM_PCIE0_IR_ENABLE,
+};
+
+static const struct xilinx_cpm_variant cpm5_host1 = {
+ .version = CPM5_HOST1,
+ .ir_misc_value = XILINX_CPM_PCIE1_MISC_IR_LOCAL,
+ .ir_status = XILINX_CPM_PCIE1_IR_STATUS,
+ .ir_enable = XILINX_CPM_PCIE1_IR_ENABLE,
+};
+
+static const struct xilinx_cpm_variant cpm5n_host = {
+ .version = CPM5NC_HOST,
};
static const struct of_device_id xilinx_cpm_pcie_of_match[] = {
@@ -624,6 +663,14 @@ static const struct of_device_id xilinx_cpm_pcie_of_match[] = {
.compatible = "xlnx,versal-cpm5-host",
.data = &cpm5_host,
},
+ {
+ .compatible = "xlnx,versal-cpm5-host1",
+ .data = &cpm5_host1,
+ },
+ {
+ .compatible = "xlnx,versal-cpm5nc-host",
+ .data = &cpm5n_host,
+ },
{}
};
diff --git a/drivers/pci/controller/pcie-xilinx-dma-pl.c b/drivers/pci/controller/pcie-xilinx-dma-pl.c
index 5be5dfd8398f..dc9690a535e1 100644
--- a/drivers/pci/controller/pcie-xilinx-dma-pl.c
+++ b/drivers/pci/controller/pcie-xilinx-dma-pl.c
@@ -71,10 +71,24 @@
/* Phy Status/Control Register definitions */
#define XILINX_PCIE_DMA_REG_PSCR_LNKUP BIT(11)
+#define QDMA_BRIDGE_BASE_OFF 0xcd8
/* Number of MSI IRQs */
#define XILINX_NUM_MSI_IRQS 64
+enum xilinx_pl_dma_version {
+ XDMA,
+ QDMA,
+};
+
+/**
+ * struct xilinx_pl_dma_variant - PL DMA PCIe variant information
+ * @version: DMA version
+ */
+struct xilinx_pl_dma_variant {
+ enum xilinx_pl_dma_version version;
+};
+
struct xilinx_msi {
struct irq_domain *msi_domain;
unsigned long *bitmap;
@@ -88,6 +102,7 @@ struct xilinx_msi {
* struct pl_dma_pcie - PCIe port information
* @dev: Device pointer
* @reg_base: IO Mapped Register Base
+ * @cfg_base: IO Mapped Configuration Base
* @irq: Interrupt number
* @cfg: Holds mappings of config space window
* @phys_reg_base: Physical address of reg base
@@ -97,10 +112,12 @@ struct xilinx_msi {
* @msi: MSI information
* @intx_irq: INTx error interrupt number
* @lock: Lock protecting shared register access
+ * @variant: PL DMA PCIe version check pointer
*/
struct pl_dma_pcie {
struct device *dev;
void __iomem *reg_base;
+ void __iomem *cfg_base;
int irq;
struct pci_config_window *cfg;
phys_addr_t phys_reg_base;
@@ -110,16 +127,23 @@ struct pl_dma_pcie {
struct xilinx_msi msi;
int intx_irq;
raw_spinlock_t lock;
+ const struct xilinx_pl_dma_variant *variant;
};
static inline u32 pcie_read(struct pl_dma_pcie *port, u32 reg)
{
+ if (port->variant->version == QDMA)
+ return readl(port->reg_base + reg + QDMA_BRIDGE_BASE_OFF);
+
return readl(port->reg_base + reg);
}
static inline void pcie_write(struct pl_dma_pcie *port, u32 val, u32 reg)
{
- writel(val, port->reg_base + reg);
+ if (port->variant->version == QDMA)
+ writel(val, port->reg_base + reg + QDMA_BRIDGE_BASE_OFF);
+ else
+ writel(val, port->reg_base + reg);
}
static inline bool xilinx_pl_dma_pcie_link_up(struct pl_dma_pcie *port)
@@ -173,6 +197,9 @@ static void __iomem *xilinx_pl_dma_pcie_map_bus(struct pci_bus *bus,
if (!xilinx_pl_dma_pcie_valid_device(bus, devfn))
return NULL;
+ if (port->variant->version == QDMA)
+ return port->cfg_base + PCIE_ECAM_OFFSET(bus->number, devfn, where);
+
return port->reg_base + PCIE_ECAM_OFFSET(bus->number, devfn, where);
}
@@ -355,8 +382,8 @@ static struct irq_chip xilinx_msi_irq_chip = {
};
static struct msi_domain_info xilinx_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_MULTI_PCI_MSI),
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI,
.chip = &xilinx_msi_irq_chip,
};
@@ -370,16 +397,9 @@ static void xilinx_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
msg->data = data->hwirq;
}
-static int xilinx_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static struct irq_chip xilinx_irq_chip = {
.name = "pl_dma:MSI",
.irq_compose_msi_msg = xilinx_compose_msi_msg,
- .irq_set_affinity = xilinx_msi_set_affinity,
};
static int xilinx_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
@@ -450,10 +470,10 @@ static int xilinx_pl_dma_pcie_init_msi_irq_domain(struct pl_dma_pcie *port)
struct device *dev = port->dev;
struct xilinx_msi *msi = &port->msi;
int size = BITS_TO_LONGS(XILINX_NUM_MSI_IRQS) * sizeof(long);
- struct fwnode_handle *fwnode = of_node_to_fwnode(port->dev->of_node);
+ struct fwnode_handle *fwnode = of_fwnode_handle(port->dev->of_node);
- msi->dev_domain = irq_domain_add_linear(NULL, XILINX_NUM_MSI_IRQS,
- &dev_msi_domain_ops, port);
+ msi->dev_domain = irq_domain_create_linear(NULL, XILINX_NUM_MSI_IRQS,
+ &dev_msi_domain_ops, port);
if (!msi->dev_domain)
goto out;
@@ -565,15 +585,15 @@ static int xilinx_pl_dma_pcie_init_irq_domain(struct pl_dma_pcie *port)
return -EINVAL;
}
- port->pldma_domain = irq_domain_add_linear(pcie_intc_node, 32,
- &event_domain_ops, port);
+ port->pldma_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), 32,
+ &event_domain_ops, port);
if (!port->pldma_domain)
return -ENOMEM;
irq_domain_update_bus_token(port->pldma_domain, DOMAIN_BUS_NEXUS);
- port->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
- &intx_domain_ops, port);
+ port->intx_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX,
+ &intx_domain_ops, port);
if (!port->intx_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
return -ENOMEM;
@@ -731,6 +751,15 @@ static int xilinx_pl_dma_pcie_parse_dt(struct pl_dma_pcie *port,
port->reg_base = port->cfg->win;
+ if (port->variant->version == QDMA) {
+ port->cfg_base = port->cfg->win;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "breg");
+ port->reg_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(port->reg_base))
+ return PTR_ERR(port->reg_base);
+ port->phys_reg_base = res->start;
+ }
+
err = xilinx_request_msi_irq(port);
if (err) {
pci_ecam_free(port->cfg);
@@ -760,6 +789,8 @@ static int xilinx_pl_dma_pcie_probe(struct platform_device *pdev)
if (!bus)
return -ENODEV;
+ port->variant = of_device_get_match_data(dev);
+
err = xilinx_pl_dma_pcie_parse_dt(port, bus->res);
if (err) {
dev_err(dev, "Parsing DT failed\n");
@@ -791,9 +822,22 @@ err_irq_domain:
return err;
}
+static const struct xilinx_pl_dma_variant xdma_host = {
+ .version = XDMA,
+};
+
+static const struct xilinx_pl_dma_variant qdma_host = {
+ .version = QDMA,
+};
+
static const struct of_device_id xilinx_pl_dma_pcie_of_match[] = {
{
.compatible = "xlnx,xdma-host-3.00",
+ .data = &xdma_host,
+ },
+ {
+ .compatible = "xlnx,qdma-host-3.00",
+ .data = &qdma_host,
},
{}
};
diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c
index 0408f4d612b5..c8b05477b719 100644
--- a/drivers/pci/controller/pcie-xilinx-nwl.c
+++ b/drivers/pci/controller/pcie-xilinx-nwl.c
@@ -19,6 +19,7 @@
#include <linux/of_platform.h>
#include <linux/pci.h>
#include <linux/pci-ecam.h>
+#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/irqchip/chained_irq.h>
@@ -80,8 +81,8 @@
#define MSGF_MISC_SR_NON_FATAL_DEV BIT(22)
#define MSGF_MISC_SR_FATAL_DEV BIT(23)
#define MSGF_MISC_SR_LINK_DOWN BIT(24)
-#define MSGF_MSIC_SR_LINK_AUTO_BWIDTH BIT(25)
-#define MSGF_MSIC_SR_LINK_BWIDTH BIT(26)
+#define MSGF_MISC_SR_LINK_AUTO_BWIDTH BIT(25)
+#define MSGF_MISC_SR_LINK_BWIDTH BIT(26)
#define MSGF_MISC_SR_MASKALL (MSGF_MISC_SR_RXMSG_AVAIL | \
MSGF_MISC_SR_RXMSG_OVER | \
@@ -96,8 +97,8 @@
MSGF_MISC_SR_NON_FATAL_DEV | \
MSGF_MISC_SR_FATAL_DEV | \
MSGF_MISC_SR_LINK_DOWN | \
- MSGF_MSIC_SR_LINK_AUTO_BWIDTH | \
- MSGF_MSIC_SR_LINK_BWIDTH)
+ MSGF_MISC_SR_LINK_AUTO_BWIDTH | \
+ MSGF_MISC_SR_LINK_BWIDTH)
/* Legacy interrupt status mask bits */
#define MSGF_LEG_SR_INTA BIT(0)
@@ -157,6 +158,7 @@ struct nwl_pcie {
void __iomem *breg_base;
void __iomem *pcireg_base;
void __iomem *ecam_base;
+ struct phy *phy[4];
phys_addr_t phys_breg_base; /* Physical Bridge Register Base */
phys_addr_t phys_pcie_reg_base; /* Physical PCIe Controller Base */
phys_addr_t phys_ecam_base; /* Physical Configuration Base */
@@ -267,42 +269,42 @@ static irqreturn_t nwl_pcie_misc_handler(int irq, void *data)
return IRQ_NONE;
if (misc_stat & MSGF_MISC_SR_RXMSG_OVER)
- dev_err(dev, "Received Message FIFO Overflow\n");
+ dev_err_ratelimited(dev, "Received Message FIFO Overflow\n");
if (misc_stat & MSGF_MISC_SR_SLAVE_ERR)
- dev_err(dev, "Slave error\n");
+ dev_err_ratelimited(dev, "Slave error\n");
if (misc_stat & MSGF_MISC_SR_MASTER_ERR)
- dev_err(dev, "Master error\n");
+ dev_err_ratelimited(dev, "Master error\n");
if (misc_stat & MSGF_MISC_SR_I_ADDR_ERR)
- dev_err(dev, "In Misc Ingress address translation error\n");
+ dev_err_ratelimited(dev, "In Misc Ingress address translation error\n");
if (misc_stat & MSGF_MISC_SR_E_ADDR_ERR)
- dev_err(dev, "In Misc Egress address translation error\n");
+ dev_err_ratelimited(dev, "In Misc Egress address translation error\n");
if (misc_stat & MSGF_MISC_SR_FATAL_AER)
- dev_err(dev, "Fatal Error in AER Capability\n");
+ dev_err_ratelimited(dev, "Fatal Error in AER Capability\n");
if (misc_stat & MSGF_MISC_SR_NON_FATAL_AER)
- dev_err(dev, "Non-Fatal Error in AER Capability\n");
+ dev_err_ratelimited(dev, "Non-Fatal Error in AER Capability\n");
if (misc_stat & MSGF_MISC_SR_CORR_AER)
- dev_err(dev, "Correctable Error in AER Capability\n");
+ dev_err_ratelimited(dev, "Correctable Error in AER Capability\n");
if (misc_stat & MSGF_MISC_SR_UR_DETECT)
- dev_err(dev, "Unsupported request Detected\n");
+ dev_err_ratelimited(dev, "Unsupported request Detected\n");
if (misc_stat & MSGF_MISC_SR_NON_FATAL_DEV)
- dev_err(dev, "Non-Fatal Error Detected\n");
+ dev_err_ratelimited(dev, "Non-Fatal Error Detected\n");
if (misc_stat & MSGF_MISC_SR_FATAL_DEV)
- dev_err(dev, "Fatal Error Detected\n");
+ dev_err_ratelimited(dev, "Fatal Error Detected\n");
- if (misc_stat & MSGF_MSIC_SR_LINK_AUTO_BWIDTH)
+ if (misc_stat & MSGF_MISC_SR_LINK_AUTO_BWIDTH)
dev_info(dev, "Link Autonomous Bandwidth Management Status bit set\n");
- if (misc_stat & MSGF_MSIC_SR_LINK_BWIDTH)
+ if (misc_stat & MSGF_MISC_SR_LINK_BWIDTH)
dev_info(dev, "Link Bandwidth Management Status bit set\n");
/* Clear misc interrupt status */
@@ -371,7 +373,7 @@ static void nwl_mask_intx_irq(struct irq_data *data)
u32 mask;
u32 val;
- mask = 1 << (data->hwirq - 1);
+ mask = 1 << data->hwirq;
raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags);
val = nwl_bridge_readl(pcie, MSGF_LEG_MASK);
nwl_bridge_writel(pcie, (val & (~mask)), MSGF_LEG_MASK);
@@ -385,7 +387,7 @@ static void nwl_unmask_intx_irq(struct irq_data *data)
u32 mask;
u32 val;
- mask = 1 << (data->hwirq - 1);
+ mask = 1 << data->hwirq;
raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags);
val = nwl_bridge_readl(pcie, MSGF_LEG_MASK);
nwl_bridge_writel(pcie, (val | mask), MSGF_LEG_MASK);
@@ -425,8 +427,8 @@ static struct irq_chip nwl_msi_irq_chip = {
};
static struct msi_domain_info nwl_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_MULTI_PCI_MSI),
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI,
.chip = &nwl_msi_irq_chip,
};
#endif
@@ -441,16 +443,9 @@ static void nwl_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
msg->data = data->hwirq;
}
-static int nwl_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static struct irq_chip nwl_irq_chip = {
.name = "Xilinx MSI",
.irq_compose_msi_msg = nwl_compose_msi_msg,
- .irq_set_affinity = nwl_msi_set_affinity,
};
static int nwl_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
@@ -500,11 +495,10 @@ static int nwl_pcie_init_msi_irq_domain(struct nwl_pcie *pcie)
{
#ifdef CONFIG_PCI_MSI
struct device *dev = pcie->dev;
- struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
+ struct fwnode_handle *fwnode = of_fwnode_handle(dev->of_node);
struct nwl_msi *msi = &pcie->msi;
- msi->dev_domain = irq_domain_add_linear(NULL, INT_PCI_MSI_NR,
- &dev_msi_domain_ops, pcie);
+ msi->dev_domain = irq_domain_create_linear(NULL, INT_PCI_MSI_NR, &dev_msi_domain_ops, pcie);
if (!msi->dev_domain) {
dev_err(dev, "failed to create dev IRQ domain\n");
return -ENOMEM;
@@ -521,6 +515,60 @@ static int nwl_pcie_init_msi_irq_domain(struct nwl_pcie *pcie)
return 0;
}
+static void nwl_pcie_phy_power_off(struct nwl_pcie *pcie, int i)
+{
+ int err = phy_power_off(pcie->phy[i]);
+
+ if (err)
+ dev_err(pcie->dev, "could not power off phy %d (err=%d)\n", i,
+ err);
+}
+
+static void nwl_pcie_phy_exit(struct nwl_pcie *pcie, int i)
+{
+ int err = phy_exit(pcie->phy[i]);
+
+ if (err)
+ dev_err(pcie->dev, "could not exit phy %d (err=%d)\n", i, err);
+}
+
+static int nwl_pcie_phy_enable(struct nwl_pcie *pcie)
+{
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(pcie->phy); i++) {
+ ret = phy_init(pcie->phy[i]);
+ if (ret)
+ goto err;
+
+ ret = phy_power_on(pcie->phy[i]);
+ if (ret) {
+ nwl_pcie_phy_exit(pcie, i);
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ while (i--) {
+ nwl_pcie_phy_power_off(pcie, i);
+ nwl_pcie_phy_exit(pcie, i);
+ }
+
+ return ret;
+}
+
+static void nwl_pcie_phy_disable(struct nwl_pcie *pcie)
+{
+ int i;
+
+ for (i = ARRAY_SIZE(pcie->phy); i--;) {
+ nwl_pcie_phy_power_off(pcie, i);
+ nwl_pcie_phy_exit(pcie, i);
+ }
+}
+
static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie)
{
struct device *dev = pcie->dev;
@@ -533,10 +581,8 @@ static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie)
return -EINVAL;
}
- pcie->intx_irq_domain = irq_domain_add_linear(intc_node,
- PCI_NUM_INTX,
- &intx_domain_ops,
- pcie);
+ pcie->intx_irq_domain = irq_domain_create_linear(of_fwnode_handle(intc_node), PCI_NUM_INTX,
+ &intx_domain_ops, pcie);
of_node_put(intc_node);
if (!pcie->intx_irq_domain) {
dev_err(dev, "failed to create IRQ domain\n");
@@ -732,6 +778,7 @@ static int nwl_pcie_parse_dt(struct nwl_pcie *pcie,
{
struct device *dev = pcie->dev;
struct resource *res;
+ int i;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "breg");
pcie->breg_base = devm_ioremap_resource(dev, res);
@@ -759,6 +806,18 @@ static int nwl_pcie_parse_dt(struct nwl_pcie *pcie,
irq_set_chained_handler_and_data(pcie->irq_intx,
nwl_pcie_leg_handler, pcie);
+
+ for (i = 0; i < ARRAY_SIZE(pcie->phy); i++) {
+ pcie->phy[i] = devm_of_phy_get_by_index(dev, dev->of_node, i);
+ if (PTR_ERR(pcie->phy[i]) == -ENODEV) {
+ pcie->phy[i] = NULL;
+ break;
+ }
+
+ if (IS_ERR(pcie->phy[i]))
+ return PTR_ERR(pcie->phy[i]);
+ }
+
return 0;
}
@@ -779,6 +838,7 @@ static int nwl_pcie_probe(struct platform_device *pdev)
return -ENODEV;
pcie = pci_host_bridge_priv(bridge);
+ platform_set_drvdata(pdev, pcie);
pcie->dev = dev;
@@ -798,16 +858,22 @@ static int nwl_pcie_probe(struct platform_device *pdev)
return err;
}
+ err = nwl_pcie_phy_enable(pcie);
+ if (err) {
+ dev_err(dev, "could not enable PHYs\n");
+ goto err_clk;
+ }
+
err = nwl_pcie_bridge_init(pcie);
if (err) {
dev_err(dev, "HW Initialization failed\n");
- return err;
+ goto err_phy;
}
err = nwl_pcie_init_irq_domain(pcie);
if (err) {
dev_err(dev, "Failed creating IRQ Domain\n");
- return err;
+ goto err_phy;
}
bridge->sysdata = pcie;
@@ -817,11 +883,27 @@ static int nwl_pcie_probe(struct platform_device *pdev)
err = nwl_pcie_enable_msi(pcie);
if (err < 0) {
dev_err(dev, "failed to enable MSI support: %d\n", err);
- return err;
+ goto err_phy;
}
}
- return pci_host_probe(bridge);
+ err = pci_host_probe(bridge);
+ if (!err)
+ return 0;
+
+err_phy:
+ nwl_pcie_phy_disable(pcie);
+err_clk:
+ clk_disable_unprepare(pcie->clk);
+ return err;
+}
+
+static void nwl_pcie_remove(struct platform_device *pdev)
+{
+ struct nwl_pcie *pcie = platform_get_drvdata(pdev);
+
+ nwl_pcie_phy_disable(pcie);
+ clk_disable_unprepare(pcie->clk);
}
static struct platform_driver nwl_pcie_driver = {
@@ -831,5 +913,6 @@ static struct platform_driver nwl_pcie_driver = {
.of_match_table = nwl_pcie_of_match,
},
.probe = nwl_pcie_probe,
+ .remove = nwl_pcie_remove,
};
builtin_platform_driver(nwl_pcie_driver);
diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c
index cb6e9f7b0152..e36aa874bae9 100644
--- a/drivers/pci/controller/pcie-xilinx.c
+++ b/drivers/pci/controller/pcie-xilinx.c
@@ -208,11 +208,6 @@ static struct irq_chip xilinx_msi_top_chip = {
.irq_ack = xilinx_msi_top_irq_ack,
};
-static int xilinx_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static void xilinx_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
struct xilinx_pcie *pcie = irq_data_get_irq_chip_data(data);
@@ -225,7 +220,6 @@ static void xilinx_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
static struct irq_chip xilinx_msi_bottom_chip = {
.name = "Xilinx MSI",
- .irq_set_affinity = xilinx_msi_set_affinity,
.irq_compose_msi_msg = xilinx_compose_msi_msg,
};
@@ -271,7 +265,8 @@ static const struct irq_domain_ops xilinx_msi_domain_ops = {
};
static struct msi_domain_info xilinx_msi_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_NO_AFFINITY,
.chip = &xilinx_msi_top_chip,
};
@@ -466,9 +461,8 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie *pcie)
return -ENODEV;
}
- pcie->leg_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
- &intx_domain_ops,
- pcie);
+ pcie->leg_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX,
+ &intx_domain_ops, pcie);
of_node_put(pcie_intc_node);
if (!pcie->leg_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
diff --git a/drivers/pci/controller/plda/Kconfig b/drivers/pci/controller/plda/Kconfig
new file mode 100644
index 000000000000..c0e14146d7e4
--- /dev/null
+++ b/drivers/pci/controller/plda/Kconfig
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: GPL-2.0
+
+menu "PLDA-based PCIe controllers"
+ depends on PCI
+
+config PCIE_PLDA_HOST
+ bool
+
+config PCIE_MICROCHIP_HOST
+ tristate "Microchip AXI PCIe controller"
+ depends on PCI_MSI && OF
+ select PCI_HOST_COMMON
+ select PCIE_PLDA_HOST
+ help
+ Say Y here if you want kernel to support the Microchip AXI PCIe
+ Host Bridge driver.
+
+config PCIE_STARFIVE_HOST
+ tristate "StarFive PCIe host controller"
+ depends on PCI_MSI && OF
+ depends on ARCH_STARFIVE || COMPILE_TEST
+ select PCIE_PLDA_HOST
+ help
+ Say Y here if you want to support the StarFive PCIe controller in
+ host mode. StarFive PCIe controller uses PLDA PCIe core.
+
+ If you choose to build this driver as module it will be dynamically
+ linked and module will be called pcie-starfive.ko.
+
+endmenu
diff --git a/drivers/pci/controller/plda/Makefile b/drivers/pci/controller/plda/Makefile
new file mode 100644
index 000000000000..0ac6851bed48
--- /dev/null
+++ b/drivers/pci/controller/plda/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_PCIE_PLDA_HOST) += pcie-plda-host.o
+obj-$(CONFIG_PCIE_MICROCHIP_HOST) += pcie-microchip-host.o
+obj-$(CONFIG_PCIE_STARFIVE_HOST) += pcie-starfive.o
diff --git a/drivers/pci/controller/plda/pcie-microchip-host.c b/drivers/pci/controller/plda/pcie-microchip-host.c
new file mode 100644
index 000000000000..3fdfffdf0270
--- /dev/null
+++ b/drivers/pci/controller/plda/pcie-microchip-host.c
@@ -0,0 +1,833 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microchip AXI PCIe Bridge host controller driver
+ *
+ * Copyright (c) 2018 - 2020 Microchip Corporation. All rights reserved.
+ *
+ * Author: Daire McNamara <daire.mcnamara@microchip.com>
+ */
+
+#include <linux/align.h>
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/log2.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/pci-ecam.h>
+#include <linux/platform_device.h>
+#include <linux/wordpart.h>
+
+#include "../../pci.h"
+#include "pcie-plda.h"
+
+#define MC_MAX_NUM_INBOUND_WINDOWS 8
+#define MPFS_NC_BOUNCE_ADDR 0x80000000
+
+/* PCIe Bridge Phy and Controller Phy offsets */
+#define MC_PCIE1_BRIDGE_ADDR 0x00008000u
+#define MC_PCIE1_CTRL_ADDR 0x0000a000u
+
+/* PCIe Controller Phy Regs */
+#define SEC_ERROR_EVENT_CNT 0x20
+#define DED_ERROR_EVENT_CNT 0x24
+#define SEC_ERROR_INT 0x28
+#define SEC_ERROR_INT_TX_RAM_SEC_ERR_INT GENMASK(3, 0)
+#define SEC_ERROR_INT_RX_RAM_SEC_ERR_INT GENMASK(7, 4)
+#define SEC_ERROR_INT_PCIE2AXI_RAM_SEC_ERR_INT GENMASK(11, 8)
+#define SEC_ERROR_INT_AXI2PCIE_RAM_SEC_ERR_INT GENMASK(15, 12)
+#define SEC_ERROR_INT_ALL_RAM_SEC_ERR_INT GENMASK(15, 0)
+#define NUM_SEC_ERROR_INTS (4)
+#define SEC_ERROR_INT_MASK 0x2c
+#define DED_ERROR_INT 0x30
+#define DED_ERROR_INT_TX_RAM_DED_ERR_INT GENMASK(3, 0)
+#define DED_ERROR_INT_RX_RAM_DED_ERR_INT GENMASK(7, 4)
+#define DED_ERROR_INT_PCIE2AXI_RAM_DED_ERR_INT GENMASK(11, 8)
+#define DED_ERROR_INT_AXI2PCIE_RAM_DED_ERR_INT GENMASK(15, 12)
+#define DED_ERROR_INT_ALL_RAM_DED_ERR_INT GENMASK(15, 0)
+#define NUM_DED_ERROR_INTS (4)
+#define DED_ERROR_INT_MASK 0x34
+#define ECC_CONTROL 0x38
+#define ECC_CONTROL_TX_RAM_INJ_ERROR_0 BIT(0)
+#define ECC_CONTROL_TX_RAM_INJ_ERROR_1 BIT(1)
+#define ECC_CONTROL_TX_RAM_INJ_ERROR_2 BIT(2)
+#define ECC_CONTROL_TX_RAM_INJ_ERROR_3 BIT(3)
+#define ECC_CONTROL_RX_RAM_INJ_ERROR_0 BIT(4)
+#define ECC_CONTROL_RX_RAM_INJ_ERROR_1 BIT(5)
+#define ECC_CONTROL_RX_RAM_INJ_ERROR_2 BIT(6)
+#define ECC_CONTROL_RX_RAM_INJ_ERROR_3 BIT(7)
+#define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_0 BIT(8)
+#define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_1 BIT(9)
+#define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_2 BIT(10)
+#define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_3 BIT(11)
+#define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_0 BIT(12)
+#define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_1 BIT(13)
+#define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_2 BIT(14)
+#define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_3 BIT(15)
+#define ECC_CONTROL_TX_RAM_ECC_BYPASS BIT(24)
+#define ECC_CONTROL_RX_RAM_ECC_BYPASS BIT(25)
+#define ECC_CONTROL_PCIE2AXI_RAM_ECC_BYPASS BIT(26)
+#define ECC_CONTROL_AXI2PCIE_RAM_ECC_BYPASS BIT(27)
+#define PCIE_EVENT_INT 0x14c
+#define PCIE_EVENT_INT_L2_EXIT_INT BIT(0)
+#define PCIE_EVENT_INT_HOTRST_EXIT_INT BIT(1)
+#define PCIE_EVENT_INT_DLUP_EXIT_INT BIT(2)
+#define PCIE_EVENT_INT_MASK GENMASK(2, 0)
+#define PCIE_EVENT_INT_L2_EXIT_INT_MASK BIT(16)
+#define PCIE_EVENT_INT_HOTRST_EXIT_INT_MASK BIT(17)
+#define PCIE_EVENT_INT_DLUP_EXIT_INT_MASK BIT(18)
+#define PCIE_EVENT_INT_ENB_MASK GENMASK(18, 16)
+#define PCIE_EVENT_INT_ENB_SHIFT 16
+#define NUM_PCIE_EVENTS (3)
+
+/* PCIe Config space MSI capability structure */
+#define MC_MSI_CAP_CTRL_OFFSET 0xe0u
+
+/* Events */
+#define EVENT_PCIE_L2_EXIT 0
+#define EVENT_PCIE_HOTRST_EXIT 1
+#define EVENT_PCIE_DLUP_EXIT 2
+#define EVENT_SEC_TX_RAM_SEC_ERR 3
+#define EVENT_SEC_RX_RAM_SEC_ERR 4
+#define EVENT_SEC_PCIE2AXI_RAM_SEC_ERR 5
+#define EVENT_SEC_AXI2PCIE_RAM_SEC_ERR 6
+#define EVENT_DED_TX_RAM_DED_ERR 7
+#define EVENT_DED_RX_RAM_DED_ERR 8
+#define EVENT_DED_PCIE2AXI_RAM_DED_ERR 9
+#define EVENT_DED_AXI2PCIE_RAM_DED_ERR 10
+#define EVENT_LOCAL_DMA_END_ENGINE_0 11
+#define EVENT_LOCAL_DMA_END_ENGINE_1 12
+#define EVENT_LOCAL_DMA_ERROR_ENGINE_0 13
+#define EVENT_LOCAL_DMA_ERROR_ENGINE_1 14
+#define NUM_MC_EVENTS 15
+#define EVENT_LOCAL_A_ATR_EVT_POST_ERR (NUM_MC_EVENTS + PLDA_AXI_POST_ERR)
+#define EVENT_LOCAL_A_ATR_EVT_FETCH_ERR (NUM_MC_EVENTS + PLDA_AXI_FETCH_ERR)
+#define EVENT_LOCAL_A_ATR_EVT_DISCARD_ERR (NUM_MC_EVENTS + PLDA_AXI_DISCARD_ERR)
+#define EVENT_LOCAL_A_ATR_EVT_DOORBELL (NUM_MC_EVENTS + PLDA_AXI_DOORBELL)
+#define EVENT_LOCAL_P_ATR_EVT_POST_ERR (NUM_MC_EVENTS + PLDA_PCIE_POST_ERR)
+#define EVENT_LOCAL_P_ATR_EVT_FETCH_ERR (NUM_MC_EVENTS + PLDA_PCIE_FETCH_ERR)
+#define EVENT_LOCAL_P_ATR_EVT_DISCARD_ERR (NUM_MC_EVENTS + PLDA_PCIE_DISCARD_ERR)
+#define EVENT_LOCAL_P_ATR_EVT_DOORBELL (NUM_MC_EVENTS + PLDA_PCIE_DOORBELL)
+#define EVENT_LOCAL_PM_MSI_INT_INTX (NUM_MC_EVENTS + PLDA_INTX)
+#define EVENT_LOCAL_PM_MSI_INT_MSI (NUM_MC_EVENTS + PLDA_MSI)
+#define EVENT_LOCAL_PM_MSI_INT_AER_EVT (NUM_MC_EVENTS + PLDA_AER_EVENT)
+#define EVENT_LOCAL_PM_MSI_INT_EVENTS (NUM_MC_EVENTS + PLDA_MISC_EVENTS)
+#define EVENT_LOCAL_PM_MSI_INT_SYS_ERR (NUM_MC_EVENTS + PLDA_SYS_ERR)
+#define NUM_EVENTS (NUM_MC_EVENTS + PLDA_INT_EVENT_NUM)
+
+#define PCIE_EVENT_CAUSE(x, s) \
+ [EVENT_PCIE_ ## x] = { __stringify(x), s }
+
+#define SEC_ERROR_CAUSE(x, s) \
+ [EVENT_SEC_ ## x] = { __stringify(x), s }
+
+#define DED_ERROR_CAUSE(x, s) \
+ [EVENT_DED_ ## x] = { __stringify(x), s }
+
+#define LOCAL_EVENT_CAUSE(x, s) \
+ [EVENT_LOCAL_ ## x] = { __stringify(x), s }
+
+#define PCIE_EVENT(x) \
+ .offset = PCIE_EVENT_INT, \
+ .mask_offset = PCIE_EVENT_INT, \
+ .mask_high = 1, \
+ .mask = PCIE_EVENT_INT_ ## x ## _INT, \
+ .enb_mask = PCIE_EVENT_INT_ENB_MASK
+
+#define SEC_EVENT(x) \
+ .offset = SEC_ERROR_INT, \
+ .mask_offset = SEC_ERROR_INT_MASK, \
+ .mask = SEC_ERROR_INT_ ## x ## _INT, \
+ .mask_high = 1, \
+ .enb_mask = 0
+
+#define DED_EVENT(x) \
+ .offset = DED_ERROR_INT, \
+ .mask_offset = DED_ERROR_INT_MASK, \
+ .mask_high = 1, \
+ .mask = DED_ERROR_INT_ ## x ## _INT, \
+ .enb_mask = 0
+
+#define LOCAL_EVENT(x) \
+ .offset = ISTATUS_LOCAL, \
+ .mask_offset = IMASK_LOCAL, \
+ .mask_high = 0, \
+ .mask = x ## _MASK, \
+ .enb_mask = 0
+
+#define PCIE_EVENT_TO_EVENT_MAP(x) \
+ { PCIE_EVENT_INT_ ## x ## _INT, EVENT_PCIE_ ## x }
+
+#define SEC_ERROR_TO_EVENT_MAP(x) \
+ { SEC_ERROR_INT_ ## x ## _INT, EVENT_SEC_ ## x }
+
+#define DED_ERROR_TO_EVENT_MAP(x) \
+ { DED_ERROR_INT_ ## x ## _INT, EVENT_DED_ ## x }
+
+#define LOCAL_STATUS_TO_EVENT_MAP(x) \
+ { x ## _MASK, EVENT_LOCAL_ ## x }
+
+struct event_map {
+ u32 reg_mask;
+ u32 event_bit;
+};
+
+
+struct mc_pcie {
+ struct plda_pcie_rp plda;
+ void __iomem *bridge_base_addr;
+ void __iomem *ctrl_base_addr;
+};
+
+struct cause {
+ const char *sym;
+ const char *str;
+};
+
+static const struct cause event_cause[NUM_EVENTS] = {
+ PCIE_EVENT_CAUSE(L2_EXIT, "L2 exit event"),
+ PCIE_EVENT_CAUSE(HOTRST_EXIT, "Hot reset exit event"),
+ PCIE_EVENT_CAUSE(DLUP_EXIT, "DLUP exit event"),
+ SEC_ERROR_CAUSE(TX_RAM_SEC_ERR, "sec error in tx buffer"),
+ SEC_ERROR_CAUSE(RX_RAM_SEC_ERR, "sec error in rx buffer"),
+ SEC_ERROR_CAUSE(PCIE2AXI_RAM_SEC_ERR, "sec error in pcie2axi buffer"),
+ SEC_ERROR_CAUSE(AXI2PCIE_RAM_SEC_ERR, "sec error in axi2pcie buffer"),
+ DED_ERROR_CAUSE(TX_RAM_DED_ERR, "ded error in tx buffer"),
+ DED_ERROR_CAUSE(RX_RAM_DED_ERR, "ded error in rx buffer"),
+ DED_ERROR_CAUSE(PCIE2AXI_RAM_DED_ERR, "ded error in pcie2axi buffer"),
+ DED_ERROR_CAUSE(AXI2PCIE_RAM_DED_ERR, "ded error in axi2pcie buffer"),
+ LOCAL_EVENT_CAUSE(DMA_ERROR_ENGINE_0, "dma engine 0 error"),
+ LOCAL_EVENT_CAUSE(DMA_ERROR_ENGINE_1, "dma engine 1 error"),
+ LOCAL_EVENT_CAUSE(A_ATR_EVT_POST_ERR, "axi write request error"),
+ LOCAL_EVENT_CAUSE(A_ATR_EVT_FETCH_ERR, "axi read request error"),
+ LOCAL_EVENT_CAUSE(A_ATR_EVT_DISCARD_ERR, "axi read timeout"),
+ LOCAL_EVENT_CAUSE(P_ATR_EVT_POST_ERR, "pcie write request error"),
+ LOCAL_EVENT_CAUSE(P_ATR_EVT_FETCH_ERR, "pcie read request error"),
+ LOCAL_EVENT_CAUSE(P_ATR_EVT_DISCARD_ERR, "pcie read timeout"),
+ LOCAL_EVENT_CAUSE(PM_MSI_INT_AER_EVT, "aer event"),
+ LOCAL_EVENT_CAUSE(PM_MSI_INT_EVENTS, "pm/ltr/hotplug event"),
+ LOCAL_EVENT_CAUSE(PM_MSI_INT_SYS_ERR, "system error"),
+};
+
+static struct event_map pcie_event_to_event[] = {
+ PCIE_EVENT_TO_EVENT_MAP(L2_EXIT),
+ PCIE_EVENT_TO_EVENT_MAP(HOTRST_EXIT),
+ PCIE_EVENT_TO_EVENT_MAP(DLUP_EXIT),
+};
+
+static struct event_map sec_error_to_event[] = {
+ SEC_ERROR_TO_EVENT_MAP(TX_RAM_SEC_ERR),
+ SEC_ERROR_TO_EVENT_MAP(RX_RAM_SEC_ERR),
+ SEC_ERROR_TO_EVENT_MAP(PCIE2AXI_RAM_SEC_ERR),
+ SEC_ERROR_TO_EVENT_MAP(AXI2PCIE_RAM_SEC_ERR),
+};
+
+static struct event_map ded_error_to_event[] = {
+ DED_ERROR_TO_EVENT_MAP(TX_RAM_DED_ERR),
+ DED_ERROR_TO_EVENT_MAP(RX_RAM_DED_ERR),
+ DED_ERROR_TO_EVENT_MAP(PCIE2AXI_RAM_DED_ERR),
+ DED_ERROR_TO_EVENT_MAP(AXI2PCIE_RAM_DED_ERR),
+};
+
+static struct event_map local_status_to_event[] = {
+ LOCAL_STATUS_TO_EVENT_MAP(DMA_END_ENGINE_0),
+ LOCAL_STATUS_TO_EVENT_MAP(DMA_END_ENGINE_1),
+ LOCAL_STATUS_TO_EVENT_MAP(DMA_ERROR_ENGINE_0),
+ LOCAL_STATUS_TO_EVENT_MAP(DMA_ERROR_ENGINE_1),
+ LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_POST_ERR),
+ LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_FETCH_ERR),
+ LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_DISCARD_ERR),
+ LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_DOORBELL),
+ LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_POST_ERR),
+ LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_FETCH_ERR),
+ LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_DISCARD_ERR),
+ LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_DOORBELL),
+ LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_INTX),
+ LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_MSI),
+ LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_AER_EVT),
+ LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_EVENTS),
+ LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_SYS_ERR),
+};
+
+static struct {
+ u32 offset;
+ u32 mask;
+ u32 shift;
+ u32 enb_mask;
+ u32 mask_high;
+ u32 mask_offset;
+} event_descs[] = {
+ { PCIE_EVENT(L2_EXIT) },
+ { PCIE_EVENT(HOTRST_EXIT) },
+ { PCIE_EVENT(DLUP_EXIT) },
+ { SEC_EVENT(TX_RAM_SEC_ERR) },
+ { SEC_EVENT(RX_RAM_SEC_ERR) },
+ { SEC_EVENT(PCIE2AXI_RAM_SEC_ERR) },
+ { SEC_EVENT(AXI2PCIE_RAM_SEC_ERR) },
+ { DED_EVENT(TX_RAM_DED_ERR) },
+ { DED_EVENT(RX_RAM_DED_ERR) },
+ { DED_EVENT(PCIE2AXI_RAM_DED_ERR) },
+ { DED_EVENT(AXI2PCIE_RAM_DED_ERR) },
+ { LOCAL_EVENT(DMA_END_ENGINE_0) },
+ { LOCAL_EVENT(DMA_END_ENGINE_1) },
+ { LOCAL_EVENT(DMA_ERROR_ENGINE_0) },
+ { LOCAL_EVENT(DMA_ERROR_ENGINE_1) },
+ { LOCAL_EVENT(A_ATR_EVT_POST_ERR) },
+ { LOCAL_EVENT(A_ATR_EVT_FETCH_ERR) },
+ { LOCAL_EVENT(A_ATR_EVT_DISCARD_ERR) },
+ { LOCAL_EVENT(A_ATR_EVT_DOORBELL) },
+ { LOCAL_EVENT(P_ATR_EVT_POST_ERR) },
+ { LOCAL_EVENT(P_ATR_EVT_FETCH_ERR) },
+ { LOCAL_EVENT(P_ATR_EVT_DISCARD_ERR) },
+ { LOCAL_EVENT(P_ATR_EVT_DOORBELL) },
+ { LOCAL_EVENT(PM_MSI_INT_INTX) },
+ { LOCAL_EVENT(PM_MSI_INT_MSI) },
+ { LOCAL_EVENT(PM_MSI_INT_AER_EVT) },
+ { LOCAL_EVENT(PM_MSI_INT_EVENTS) },
+ { LOCAL_EVENT(PM_MSI_INT_SYS_ERR) },
+};
+
+static char poss_clks[][5] = { "fic0", "fic1", "fic2", "fic3" };
+
+static struct mc_pcie *port;
+
+static void mc_pcie_enable_msi(struct mc_pcie *port, void __iomem *ecam)
+{
+ struct plda_msi *msi = &port->plda.msi;
+ u16 reg;
+ u8 queue_size;
+
+ /* Fixup MSI enable flag */
+ reg = readw_relaxed(ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_FLAGS);
+ reg |= PCI_MSI_FLAGS_ENABLE;
+ writew_relaxed(reg, ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_FLAGS);
+
+ /* Fixup PCI MSI queue flags */
+ queue_size = FIELD_GET(PCI_MSI_FLAGS_QMASK, reg);
+ reg |= FIELD_PREP(PCI_MSI_FLAGS_QSIZE, queue_size);
+ writew_relaxed(reg, ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_FLAGS);
+
+ /* Fixup MSI addr fields */
+ writel_relaxed(lower_32_bits(msi->vector_phy),
+ ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_ADDRESS_LO);
+ writel_relaxed(upper_32_bits(msi->vector_phy),
+ ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_ADDRESS_HI);
+}
+
+static inline u32 reg_to_event(u32 reg, struct event_map field)
+{
+ return (reg & field.reg_mask) ? BIT(field.event_bit) : 0;
+}
+
+static u32 pcie_events(struct mc_pcie *port)
+{
+ u32 reg = readl_relaxed(port->ctrl_base_addr + PCIE_EVENT_INT);
+ u32 val = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pcie_event_to_event); i++)
+ val |= reg_to_event(reg, pcie_event_to_event[i]);
+
+ return val;
+}
+
+static u32 sec_errors(struct mc_pcie *port)
+{
+ u32 reg = readl_relaxed(port->ctrl_base_addr + SEC_ERROR_INT);
+ u32 val = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sec_error_to_event); i++)
+ val |= reg_to_event(reg, sec_error_to_event[i]);
+
+ return val;
+}
+
+static u32 ded_errors(struct mc_pcie *port)
+{
+ u32 reg = readl_relaxed(port->ctrl_base_addr + DED_ERROR_INT);
+ u32 val = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ded_error_to_event); i++)
+ val |= reg_to_event(reg, ded_error_to_event[i]);
+
+ return val;
+}
+
+static u32 local_events(struct mc_pcie *port)
+{
+ u32 reg = readl_relaxed(port->bridge_base_addr + ISTATUS_LOCAL);
+ u32 val = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(local_status_to_event); i++)
+ val |= reg_to_event(reg, local_status_to_event[i]);
+
+ return val;
+}
+
+static u32 mc_get_events(struct plda_pcie_rp *port)
+{
+ struct mc_pcie *mc_port = container_of(port, struct mc_pcie, plda);
+ u32 events = 0;
+
+ events |= pcie_events(mc_port);
+ events |= sec_errors(mc_port);
+ events |= ded_errors(mc_port);
+ events |= local_events(mc_port);
+
+ return events;
+}
+
+static irqreturn_t mc_event_handler(int irq, void *dev_id)
+{
+ struct plda_pcie_rp *port = dev_id;
+ struct device *dev = port->dev;
+ struct irq_data *data;
+
+ data = irq_domain_get_irq_data(port->event_domain, irq);
+
+ if (event_cause[data->hwirq].str)
+ dev_err_ratelimited(dev, "%s\n", event_cause[data->hwirq].str);
+ else
+ dev_err_ratelimited(dev, "bad event IRQ %ld\n", data->hwirq);
+
+ return IRQ_HANDLED;
+}
+
+static void mc_ack_event_irq(struct irq_data *data)
+{
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+ struct mc_pcie *mc_port = container_of(port, struct mc_pcie, plda);
+ u32 event = data->hwirq;
+ void __iomem *addr;
+ u32 mask;
+
+ if (event_descs[event].offset == ISTATUS_LOCAL)
+ addr = mc_port->bridge_base_addr;
+ else
+ addr = mc_port->ctrl_base_addr;
+
+ addr += event_descs[event].offset;
+ mask = event_descs[event].mask;
+ mask |= event_descs[event].enb_mask;
+
+ writel_relaxed(mask, addr);
+}
+
+static void mc_mask_event_irq(struct irq_data *data)
+{
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+ struct mc_pcie *mc_port = container_of(port, struct mc_pcie, plda);
+ u32 event = data->hwirq;
+ void __iomem *addr;
+ u32 mask;
+ u32 val;
+
+ if (event_descs[event].offset == ISTATUS_LOCAL)
+ addr = mc_port->bridge_base_addr;
+ else
+ addr = mc_port->ctrl_base_addr;
+
+ addr += event_descs[event].mask_offset;
+ mask = event_descs[event].mask;
+ if (event_descs[event].enb_mask) {
+ mask <<= PCIE_EVENT_INT_ENB_SHIFT;
+ mask &= PCIE_EVENT_INT_ENB_MASK;
+ }
+
+ if (!event_descs[event].mask_high)
+ mask = ~mask;
+
+ raw_spin_lock(&port->lock);
+ val = readl_relaxed(addr);
+ if (event_descs[event].mask_high)
+ val |= mask;
+ else
+ val &= mask;
+
+ writel_relaxed(val, addr);
+ raw_spin_unlock(&port->lock);
+}
+
+static void mc_unmask_event_irq(struct irq_data *data)
+{
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+ struct mc_pcie *mc_port = container_of(port, struct mc_pcie, plda);
+ u32 event = data->hwirq;
+ void __iomem *addr;
+ u32 mask;
+ u32 val;
+
+ if (event_descs[event].offset == ISTATUS_LOCAL)
+ addr = mc_port->bridge_base_addr;
+ else
+ addr = mc_port->ctrl_base_addr;
+
+ addr += event_descs[event].mask_offset;
+ mask = event_descs[event].mask;
+
+ if (event_descs[event].enb_mask)
+ mask <<= PCIE_EVENT_INT_ENB_SHIFT;
+
+ if (event_descs[event].mask_high)
+ mask = ~mask;
+
+ if (event_descs[event].enb_mask)
+ mask &= PCIE_EVENT_INT_ENB_MASK;
+
+ raw_spin_lock(&port->lock);
+ val = readl_relaxed(addr);
+ if (event_descs[event].mask_high)
+ val &= mask;
+ else
+ val |= mask;
+ writel_relaxed(val, addr);
+ raw_spin_unlock(&port->lock);
+}
+
+static struct irq_chip mc_event_irq_chip = {
+ .name = "Microchip PCIe EVENT",
+ .irq_ack = mc_ack_event_irq,
+ .irq_mask = mc_mask_event_irq,
+ .irq_unmask = mc_unmask_event_irq,
+};
+
+static inline void mc_pcie_deinit_clk(void *data)
+{
+ struct clk *clk = data;
+
+ clk_disable_unprepare(clk);
+}
+
+static inline struct clk *mc_pcie_init_clk(struct device *dev, const char *id)
+{
+ struct clk *clk;
+ int ret;
+
+ clk = devm_clk_get_optional(dev, id);
+ if (IS_ERR(clk))
+ return clk;
+ if (!clk)
+ return clk;
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ERR_PTR(ret);
+
+ devm_add_action_or_reset(dev, mc_pcie_deinit_clk, clk);
+
+ return clk;
+}
+
+static int mc_pcie_init_clks(struct device *dev)
+{
+ int i;
+ struct clk *fic;
+
+ /*
+ * PCIe may be clocked via Fabric Interface using between 1 and 4
+ * clocks. Scan DT for clocks and enable them if present
+ */
+ for (i = 0; i < ARRAY_SIZE(poss_clks); i++) {
+ fic = mc_pcie_init_clk(dev, poss_clks[i]);
+ if (IS_ERR(fic))
+ return PTR_ERR(fic);
+ }
+
+ return 0;
+}
+
+static int mc_request_event_irq(struct plda_pcie_rp *plda, int event_irq,
+ int event)
+{
+ return devm_request_irq(plda->dev, event_irq, mc_event_handler,
+ 0, event_cause[event].sym, plda);
+}
+
+static const struct plda_event_ops mc_event_ops = {
+ .get_events = mc_get_events,
+};
+
+static const struct plda_event mc_event = {
+ .request_event_irq = mc_request_event_irq,
+ .intx_event = EVENT_LOCAL_PM_MSI_INT_INTX,
+ .msi_event = EVENT_LOCAL_PM_MSI_INT_MSI,
+};
+
+static inline void mc_clear_secs(struct mc_pcie *port)
+{
+ writel_relaxed(SEC_ERROR_INT_ALL_RAM_SEC_ERR_INT,
+ port->ctrl_base_addr + SEC_ERROR_INT);
+ writel_relaxed(0, port->ctrl_base_addr + SEC_ERROR_EVENT_CNT);
+}
+
+static inline void mc_clear_deds(struct mc_pcie *port)
+{
+ writel_relaxed(DED_ERROR_INT_ALL_RAM_DED_ERR_INT,
+ port->ctrl_base_addr + DED_ERROR_INT);
+ writel_relaxed(0, port->ctrl_base_addr + DED_ERROR_EVENT_CNT);
+}
+
+static void mc_disable_interrupts(struct mc_pcie *port)
+{
+ u32 val;
+
+ /* Ensure ECC bypass is enabled */
+ val = ECC_CONTROL_TX_RAM_ECC_BYPASS |
+ ECC_CONTROL_RX_RAM_ECC_BYPASS |
+ ECC_CONTROL_PCIE2AXI_RAM_ECC_BYPASS |
+ ECC_CONTROL_AXI2PCIE_RAM_ECC_BYPASS;
+ writel_relaxed(val, port->ctrl_base_addr + ECC_CONTROL);
+
+ /* Disable SEC errors and clear any outstanding */
+ writel_relaxed(SEC_ERROR_INT_ALL_RAM_SEC_ERR_INT,
+ port->ctrl_base_addr + SEC_ERROR_INT_MASK);
+ mc_clear_secs(port);
+
+ /* Disable DED errors and clear any outstanding */
+ writel_relaxed(DED_ERROR_INT_ALL_RAM_DED_ERR_INT,
+ port->ctrl_base_addr + DED_ERROR_INT_MASK);
+ mc_clear_deds(port);
+
+ /* Disable local interrupts and clear any outstanding */
+ writel_relaxed(0, port->bridge_base_addr + IMASK_LOCAL);
+ writel_relaxed(GENMASK(31, 0), port->bridge_base_addr + ISTATUS_LOCAL);
+ writel_relaxed(GENMASK(31, 0), port->bridge_base_addr + ISTATUS_MSI);
+
+ /* Disable PCIe events and clear any outstanding */
+ val = PCIE_EVENT_INT_L2_EXIT_INT |
+ PCIE_EVENT_INT_HOTRST_EXIT_INT |
+ PCIE_EVENT_INT_DLUP_EXIT_INT |
+ PCIE_EVENT_INT_L2_EXIT_INT_MASK |
+ PCIE_EVENT_INT_HOTRST_EXIT_INT_MASK |
+ PCIE_EVENT_INT_DLUP_EXIT_INT_MASK;
+ writel_relaxed(val, port->ctrl_base_addr + PCIE_EVENT_INT);
+
+ /* Disable host interrupts and clear any outstanding */
+ writel_relaxed(0, port->bridge_base_addr + IMASK_HOST);
+ writel_relaxed(GENMASK(31, 0), port->bridge_base_addr + ISTATUS_HOST);
+}
+
+static void mc_pcie_setup_inbound_atr(struct mc_pcie *port, int window_index,
+ u64 axi_addr, u64 pcie_addr, u64 size)
+{
+ u32 table_offset = window_index * ATR_ENTRY_SIZE;
+ void __iomem *table_addr = port->bridge_base_addr + table_offset;
+ u32 atr_sz;
+ u32 val;
+
+ atr_sz = ilog2(size) - 1;
+
+ val = ALIGN_DOWN(lower_32_bits(pcie_addr), SZ_4K);
+ val |= FIELD_PREP(ATR_SIZE_MASK, atr_sz);
+ val |= ATR_IMPL_ENABLE;
+
+ writel(val, table_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
+
+ writel(upper_32_bits(pcie_addr), table_addr + ATR0_PCIE_WIN0_SRC_ADDR);
+
+ writel(lower_32_bits(axi_addr), table_addr + ATR0_PCIE_WIN0_TRSL_ADDR_LSB);
+ writel(upper_32_bits(axi_addr), table_addr + ATR0_PCIE_WIN0_TRSL_ADDR_UDW);
+
+ writel(TRSL_ID_AXI4_MASTER_0, table_addr + ATR0_PCIE_WIN0_TRSL_PARAM);
+}
+
+static int mc_pcie_setup_inbound_ranges(struct platform_device *pdev,
+ struct mc_pcie *port)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *dn = dev->of_node;
+ struct of_range_parser parser;
+ struct of_range range;
+ int atr_index = 0;
+
+ /*
+ * MPFS PCIe Root Port is 32-bit only, behind a Fabric Interface
+ * Controller FPGA logic block which contains the AXI-S interface.
+ *
+ * From the point of view of the PCIe Root Port, there are only two
+ * supported Root Port configurations:
+ *
+ * Configuration 1: for use with fully coherent designs; supports a
+ * window from 0x0 (CPU space) to specified PCIe space.
+ *
+ * Configuration 2: for use with non-coherent designs; supports two
+ * 1 GB windows to CPU space; one mapping CPU space 0 to PCIe space
+ * 0x80000000 and a second mapping CPU space 0x40000000 to PCIe
+ * space 0xc0000000. This cfg needs two windows because of how the
+ * MSI space is allocated in the AXI-S range on MPFS.
+ *
+ * The FIC interface outside the PCIe block *must* complete the
+ * inbound address translation as per MCHP MPFS FPGA design
+ * guidelines.
+ */
+ if (device_property_read_bool(dev, "dma-noncoherent")) {
+ /*
+ * Always need same two tables in this case. Need two tables
+ * due to hardware interactions between address and size.
+ */
+ mc_pcie_setup_inbound_atr(port, 0, 0,
+ MPFS_NC_BOUNCE_ADDR, SZ_1G);
+ mc_pcie_setup_inbound_atr(port, 1, SZ_1G,
+ MPFS_NC_BOUNCE_ADDR + SZ_1G, SZ_1G);
+ } else {
+ /* Find any DMA ranges */
+ if (of_pci_dma_range_parser_init(&parser, dn)) {
+ /* No DMA range property - setup default */
+ mc_pcie_setup_inbound_atr(port, 0, 0, 0, SZ_4G);
+ return 0;
+ }
+
+ for_each_of_range(&parser, &range) {
+ if (atr_index >= MC_MAX_NUM_INBOUND_WINDOWS) {
+ dev_err(dev, "too many inbound ranges; %d available tables\n",
+ MC_MAX_NUM_INBOUND_WINDOWS);
+ return -EINVAL;
+ }
+ mc_pcie_setup_inbound_atr(port, atr_index, 0,
+ range.pci_addr, range.size);
+ atr_index++;
+ }
+ }
+
+ return 0;
+}
+
+static int mc_platform_init(struct pci_config_window *cfg)
+{
+ struct device *dev = cfg->parent;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct pci_host_bridge *bridge = platform_get_drvdata(pdev);
+ int ret;
+
+ /* Configure address translation table 0 for PCIe config space */
+ plda_pcie_setup_window(port->bridge_base_addr, 0, cfg->res.start,
+ cfg->res.start,
+ resource_size(&cfg->res));
+
+ /* Need some fixups in config space */
+ mc_pcie_enable_msi(port, cfg->win);
+
+ /* Configure non-config space outbound ranges */
+ ret = plda_pcie_setup_iomems(bridge, &port->plda);
+ if (ret)
+ return ret;
+
+ ret = mc_pcie_setup_inbound_ranges(pdev, port);
+ if (ret)
+ return ret;
+
+ port->plda.event_ops = &mc_event_ops;
+ port->plda.event_irq_chip = &mc_event_irq_chip;
+ port->plda.events_bitmap = GENMASK(NUM_EVENTS - 1, 0);
+
+ /* Address translation is up; safe to enable interrupts */
+ ret = plda_init_interrupts(pdev, &port->plda, &mc_event);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int mc_host_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ void __iomem *apb_base_addr;
+ struct plda_pcie_rp *plda;
+ int ret;
+ u32 val;
+
+ port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ plda = &port->plda;
+ plda->dev = dev;
+
+ port->bridge_base_addr = devm_platform_ioremap_resource_byname(pdev,
+ "bridge");
+ port->ctrl_base_addr = devm_platform_ioremap_resource_byname(pdev,
+ "ctrl");
+ if (!IS_ERR(port->bridge_base_addr) && !IS_ERR(port->ctrl_base_addr))
+ goto addrs_set;
+
+ /*
+ * The original, incorrect, binding that lumped the control and
+ * bridge addresses together still needs to be handled by the driver.
+ */
+ apb_base_addr = devm_platform_ioremap_resource_byname(pdev, "apb");
+ if (IS_ERR(apb_base_addr))
+ return dev_err_probe(dev, PTR_ERR(apb_base_addr),
+ "both legacy apb register and ctrl/bridge regions missing");
+
+ port->bridge_base_addr = apb_base_addr + MC_PCIE1_BRIDGE_ADDR;
+ port->ctrl_base_addr = apb_base_addr + MC_PCIE1_CTRL_ADDR;
+
+addrs_set:
+ mc_disable_interrupts(port);
+
+ plda->bridge_addr = port->bridge_base_addr;
+ plda->num_events = NUM_EVENTS;
+
+ /* Allow enabling MSI by disabling MSI-X */
+ val = readl(port->bridge_base_addr + PCIE_PCI_IRQ_DW0);
+ val &= ~MSIX_CAP_MASK;
+ writel(val, port->bridge_base_addr + PCIE_PCI_IRQ_DW0);
+
+ /* Pick num vectors from bitfile programmed onto FPGA fabric */
+ val = readl(port->bridge_base_addr + PCIE_PCI_IRQ_DW0);
+ val &= NUM_MSI_MSGS_MASK;
+ val >>= NUM_MSI_MSGS_SHIFT;
+
+ plda->msi.num_vectors = 1 << val;
+
+ /* Pick vector address from design */
+ plda->msi.vector_phy = readl_relaxed(port->bridge_base_addr + IMSI_ADDR);
+
+ ret = mc_pcie_init_clks(dev);
+ if (ret) {
+ dev_err(dev, "failed to get clock resources, error %d\n", ret);
+ return -ENODEV;
+ }
+
+ return pci_host_common_probe(pdev);
+}
+
+static const struct pci_ecam_ops mc_ecam_ops = {
+ .init = mc_platform_init,
+ .pci_ops = {
+ .map_bus = pci_ecam_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ }
+};
+
+static const struct of_device_id mc_pcie_of_match[] = {
+ {
+ .compatible = "microchip,pcie-host-1.0",
+ .data = &mc_ecam_ops,
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, mc_pcie_of_match);
+
+static struct platform_driver mc_pcie_driver = {
+ .probe = mc_host_probe,
+ .driver = {
+ .name = "microchip-pcie",
+ .of_match_table = mc_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+builtin_platform_driver(mc_pcie_driver);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Microchip PCIe host controller driver");
+MODULE_AUTHOR("Daire McNamara <daire.mcnamara@microchip.com>");
diff --git a/drivers/pci/controller/plda/pcie-plda-host.c b/drivers/pci/controller/plda/pcie-plda-host.c
new file mode 100644
index 000000000000..3abedf723215
--- /dev/null
+++ b/drivers/pci/controller/plda/pcie-plda-host.c
@@ -0,0 +1,653 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PLDA PCIe XpressRich host controller driver
+ *
+ * Copyright (C) 2023 Microchip Co. Ltd
+ * StarFive Co. Ltd
+ *
+ * Author: Daire McNamara <daire.mcnamara@microchip.com>
+ */
+
+#include <linux/align.h>
+#include <linux/bitfield.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/msi.h>
+#include <linux/pci_regs.h>
+#include <linux/pci-ecam.h>
+#include <linux/wordpart.h>
+
+#include "pcie-plda.h"
+
+void __iomem *plda_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int where)
+{
+ struct plda_pcie_rp *pcie = bus->sysdata;
+
+ return pcie->config_base + PCIE_ECAM_OFFSET(bus->number, devfn, where);
+}
+EXPORT_SYMBOL_GPL(plda_pcie_map_bus);
+
+static void plda_handle_msi(struct irq_desc *desc)
+{
+ struct plda_pcie_rp *port = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct device *dev = port->dev;
+ struct plda_msi *msi = &port->msi;
+ void __iomem *bridge_base_addr = port->bridge_addr;
+ unsigned long status;
+ u32 bit;
+ int ret;
+
+ chained_irq_enter(chip, desc);
+
+ status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
+ if (status & PM_MSI_INT_MSI_MASK) {
+ writel_relaxed(status & PM_MSI_INT_MSI_MASK,
+ bridge_base_addr + ISTATUS_LOCAL);
+ status = readl_relaxed(bridge_base_addr + ISTATUS_MSI);
+ for_each_set_bit(bit, &status, msi->num_vectors) {
+ ret = generic_handle_domain_irq(msi->dev_domain, bit);
+ if (ret)
+ dev_err_ratelimited(dev, "bad MSI IRQ %d\n",
+ bit);
+ }
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void plda_msi_bottom_irq_ack(struct irq_data *data)
+{
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+ void __iomem *bridge_base_addr = port->bridge_addr;
+ u32 bitpos = data->hwirq;
+
+ writel_relaxed(BIT(bitpos), bridge_base_addr + ISTATUS_MSI);
+}
+
+static void plda_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+ phys_addr_t addr = port->msi.vector_phy;
+
+ msg->address_lo = lower_32_bits(addr);
+ msg->address_hi = upper_32_bits(addr);
+ msg->data = data->hwirq;
+
+ dev_dbg(port->dev, "msi#%x address_hi %#x address_lo %#x\n",
+ (int)data->hwirq, msg->address_hi, msg->address_lo);
+}
+
+static struct irq_chip plda_msi_bottom_irq_chip = {
+ .name = "PLDA MSI",
+ .irq_ack = plda_msi_bottom_irq_ack,
+ .irq_compose_msi_msg = plda_compose_msi_msg,
+};
+
+static int plda_irq_msi_domain_alloc(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs,
+ void *args)
+{
+ struct plda_pcie_rp *port = domain->host_data;
+ struct plda_msi *msi = &port->msi;
+ unsigned long bit;
+
+ mutex_lock(&msi->lock);
+ bit = find_first_zero_bit(msi->used, msi->num_vectors);
+ if (bit >= msi->num_vectors) {
+ mutex_unlock(&msi->lock);
+ return -ENOSPC;
+ }
+
+ set_bit(bit, msi->used);
+
+ irq_domain_set_info(domain, virq, bit, &plda_msi_bottom_irq_chip,
+ domain->host_data, handle_edge_irq, NULL, NULL);
+
+ mutex_unlock(&msi->lock);
+
+ return 0;
+}
+
+static void plda_irq_msi_domain_free(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(d);
+ struct plda_msi *msi = &port->msi;
+
+ mutex_lock(&msi->lock);
+
+ if (test_bit(d->hwirq, msi->used))
+ __clear_bit(d->hwirq, msi->used);
+ else
+ dev_err(port->dev, "trying to free unused MSI%lu\n", d->hwirq);
+
+ mutex_unlock(&msi->lock);
+}
+
+static const struct irq_domain_ops msi_domain_ops = {
+ .alloc = plda_irq_msi_domain_alloc,
+ .free = plda_irq_msi_domain_free,
+};
+
+static struct irq_chip plda_msi_irq_chip = {
+ .name = "PLDA PCIe MSI",
+ .irq_ack = irq_chip_ack_parent,
+ .irq_mask = pci_msi_mask_irq,
+ .irq_unmask = pci_msi_unmask_irq,
+};
+
+static struct msi_domain_info plda_msi_domain_info = {
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX,
+ .chip = &plda_msi_irq_chip,
+};
+
+static int plda_allocate_msi_domains(struct plda_pcie_rp *port)
+{
+ struct device *dev = port->dev;
+ struct fwnode_handle *fwnode = of_fwnode_handle(dev->of_node);
+ struct plda_msi *msi = &port->msi;
+
+ mutex_init(&port->msi.lock);
+
+ msi->dev_domain = irq_domain_create_linear(NULL, msi->num_vectors, &msi_domain_ops, port);
+ if (!msi->dev_domain) {
+ dev_err(dev, "failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ msi->msi_domain = pci_msi_create_irq_domain(fwnode,
+ &plda_msi_domain_info,
+ msi->dev_domain);
+ if (!msi->msi_domain) {
+ dev_err(dev, "failed to create MSI domain\n");
+ irq_domain_remove(msi->dev_domain);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void plda_handle_intx(struct irq_desc *desc)
+{
+ struct plda_pcie_rp *port = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct device *dev = port->dev;
+ void __iomem *bridge_base_addr = port->bridge_addr;
+ unsigned long status;
+ u32 bit;
+ int ret;
+
+ chained_irq_enter(chip, desc);
+
+ status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
+ if (status & PM_MSI_INT_INTX_MASK) {
+ status &= PM_MSI_INT_INTX_MASK;
+ status >>= PM_MSI_INT_INTX_SHIFT;
+ for_each_set_bit(bit, &status, PCI_NUM_INTX) {
+ ret = generic_handle_domain_irq(port->intx_domain, bit);
+ if (ret)
+ dev_err_ratelimited(dev, "bad INTx IRQ %d\n",
+ bit);
+ }
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void plda_ack_intx_irq(struct irq_data *data)
+{
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+ void __iomem *bridge_base_addr = port->bridge_addr;
+ u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
+
+ writel_relaxed(mask, bridge_base_addr + ISTATUS_LOCAL);
+}
+
+static void plda_mask_intx_irq(struct irq_data *data)
+{
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+ void __iomem *bridge_base_addr = port->bridge_addr;
+ unsigned long flags;
+ u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
+ u32 val;
+
+ raw_spin_lock_irqsave(&port->lock, flags);
+ val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
+ val &= ~mask;
+ writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
+ raw_spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void plda_unmask_intx_irq(struct irq_data *data)
+{
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+ void __iomem *bridge_base_addr = port->bridge_addr;
+ unsigned long flags;
+ u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
+ u32 val;
+
+ raw_spin_lock_irqsave(&port->lock, flags);
+ val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
+ val |= mask;
+ writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
+ raw_spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static struct irq_chip plda_intx_irq_chip = {
+ .name = "PLDA PCIe INTx",
+ .irq_ack = plda_ack_intx_irq,
+ .irq_mask = plda_mask_intx_irq,
+ .irq_unmask = plda_unmask_intx_irq,
+};
+
+static int plda_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &plda_intx_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = plda_pcie_intx_map,
+};
+
+static u32 plda_get_events(struct plda_pcie_rp *port)
+{
+ u32 events, val, origin;
+
+ origin = readl_relaxed(port->bridge_addr + ISTATUS_LOCAL);
+
+ /* MSI event and sys events */
+ val = (origin & SYS_AND_MSI_MASK) >> PM_MSI_INT_MSI_SHIFT;
+ events = val << (PM_MSI_INT_MSI_SHIFT - PCI_NUM_INTX + 1);
+
+ /* INTx events */
+ if (origin & PM_MSI_INT_INTX_MASK)
+ events |= BIT(PM_MSI_INT_INTX_SHIFT);
+
+ /* remains are same with register */
+ events |= origin & GENMASK(P_ATR_EVT_DOORBELL_SHIFT, 0);
+
+ return events;
+}
+
+static irqreturn_t plda_event_handler(int irq, void *dev_id)
+{
+ return IRQ_HANDLED;
+}
+
+static void plda_handle_event(struct irq_desc *desc)
+{
+ struct plda_pcie_rp *port = irq_desc_get_handler_data(desc);
+ unsigned long events;
+ u32 bit;
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+
+ chained_irq_enter(chip, desc);
+
+ events = port->event_ops->get_events(port);
+
+ events &= port->events_bitmap;
+ for_each_set_bit(bit, &events, port->num_events)
+ generic_handle_domain_irq(port->event_domain, bit);
+
+ chained_irq_exit(chip, desc);
+}
+
+static u32 plda_hwirq_to_mask(int hwirq)
+{
+ u32 mask;
+
+ /* hwirq 23 - 0 are the same with register */
+ if (hwirq < EVENT_PM_MSI_INT_INTX)
+ mask = BIT(hwirq);
+ else if (hwirq == EVENT_PM_MSI_INT_INTX)
+ mask = PM_MSI_INT_INTX_MASK;
+ else
+ mask = BIT(hwirq + PCI_NUM_INTX - 1);
+
+ return mask;
+}
+
+static void plda_ack_event_irq(struct irq_data *data)
+{
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+
+ writel_relaxed(plda_hwirq_to_mask(data->hwirq),
+ port->bridge_addr + ISTATUS_LOCAL);
+}
+
+static void plda_mask_event_irq(struct irq_data *data)
+{
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+ u32 mask, val;
+
+ mask = plda_hwirq_to_mask(data->hwirq);
+
+ raw_spin_lock(&port->lock);
+ val = readl_relaxed(port->bridge_addr + IMASK_LOCAL);
+ val &= ~mask;
+ writel_relaxed(val, port->bridge_addr + IMASK_LOCAL);
+ raw_spin_unlock(&port->lock);
+}
+
+static void plda_unmask_event_irq(struct irq_data *data)
+{
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+ u32 mask, val;
+
+ mask = plda_hwirq_to_mask(data->hwirq);
+
+ raw_spin_lock(&port->lock);
+ val = readl_relaxed(port->bridge_addr + IMASK_LOCAL);
+ val |= mask;
+ writel_relaxed(val, port->bridge_addr + IMASK_LOCAL);
+ raw_spin_unlock(&port->lock);
+}
+
+static struct irq_chip plda_event_irq_chip = {
+ .name = "PLDA PCIe EVENT",
+ .irq_ack = plda_ack_event_irq,
+ .irq_mask = plda_mask_event_irq,
+ .irq_unmask = plda_unmask_event_irq,
+};
+
+static const struct plda_event_ops plda_event_ops = {
+ .get_events = plda_get_events,
+};
+
+static int plda_pcie_event_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct plda_pcie_rp *port = (void *)domain->host_data;
+
+ irq_set_chip_and_handler(irq, port->event_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops plda_event_domain_ops = {
+ .map = plda_pcie_event_map,
+};
+
+static int plda_pcie_init_irq_domains(struct plda_pcie_rp *port)
+{
+ struct device *dev = port->dev;
+ struct device_node *node = dev->of_node;
+ struct device_node *pcie_intc_node;
+
+ /* Setup INTx */
+ pcie_intc_node = of_get_next_child(node, NULL);
+ if (!pcie_intc_node) {
+ dev_err(dev, "failed to find PCIe Intc node\n");
+ return -EINVAL;
+ }
+
+ port->event_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node),
+ port->num_events, &plda_event_domain_ops,
+ port);
+ if (!port->event_domain) {
+ dev_err(dev, "failed to get event domain\n");
+ of_node_put(pcie_intc_node);
+ return -ENOMEM;
+ }
+
+ irq_domain_update_bus_token(port->event_domain, DOMAIN_BUS_NEXUS);
+
+ port->intx_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX,
+ &intx_domain_ops, port);
+ if (!port->intx_domain) {
+ dev_err(dev, "failed to get an INTx IRQ domain\n");
+ of_node_put(pcie_intc_node);
+ return -ENOMEM;
+ }
+
+ irq_domain_update_bus_token(port->intx_domain, DOMAIN_BUS_WIRED);
+
+ of_node_put(pcie_intc_node);
+ raw_spin_lock_init(&port->lock);
+
+ return plda_allocate_msi_domains(port);
+}
+
+int plda_init_interrupts(struct platform_device *pdev,
+ struct plda_pcie_rp *port,
+ const struct plda_event *event)
+{
+ struct device *dev = &pdev->dev;
+ int event_irq, ret;
+ u32 i;
+
+ if (!port->event_ops)
+ port->event_ops = &plda_event_ops;
+
+ if (!port->event_irq_chip)
+ port->event_irq_chip = &plda_event_irq_chip;
+
+ ret = plda_pcie_init_irq_domains(port);
+ if (ret) {
+ dev_err(dev, "failed creating IRQ domains\n");
+ return ret;
+ }
+
+ port->irq = platform_get_irq(pdev, 0);
+ if (port->irq < 0)
+ return -ENODEV;
+
+ for_each_set_bit(i, &port->events_bitmap, port->num_events) {
+ event_irq = irq_create_mapping(port->event_domain, i);
+ if (!event_irq) {
+ dev_err(dev, "failed to map hwirq %d\n", i);
+ return -ENXIO;
+ }
+
+ if (event->request_event_irq)
+ ret = event->request_event_irq(port, event_irq, i);
+ else
+ ret = devm_request_irq(dev, event_irq,
+ plda_event_handler,
+ 0, NULL, port);
+
+ if (ret) {
+ dev_err(dev, "failed to request IRQ %d\n", event_irq);
+ return ret;
+ }
+ }
+
+ port->intx_irq = irq_create_mapping(port->event_domain,
+ event->intx_event);
+ if (!port->intx_irq) {
+ dev_err(dev, "failed to map INTx interrupt\n");
+ return -ENXIO;
+ }
+
+ /* Plug the INTx chained handler */
+ irq_set_chained_handler_and_data(port->intx_irq, plda_handle_intx, port);
+
+ port->msi_irq = irq_create_mapping(port->event_domain,
+ event->msi_event);
+ if (!port->msi_irq)
+ return -ENXIO;
+
+ /* Plug the MSI chained handler */
+ irq_set_chained_handler_and_data(port->msi_irq, plda_handle_msi, port);
+
+ /* Plug the main event chained handler */
+ irq_set_chained_handler_and_data(port->irq, plda_handle_event, port);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(plda_init_interrupts);
+
+void plda_pcie_setup_window(void __iomem *bridge_base_addr, u32 index,
+ phys_addr_t axi_addr, phys_addr_t pci_addr,
+ size_t size)
+{
+ u32 atr_sz = ilog2(size) - 1;
+ u32 val;
+
+ if (index == 0)
+ val = PCIE_CONFIG_INTERFACE;
+ else
+ val = PCIE_TX_RX_INTERFACE;
+
+ writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
+ ATR0_AXI4_SLV0_TRSL_PARAM);
+
+ val = ALIGN_DOWN(lower_32_bits(axi_addr), SZ_4K);
+ val |= FIELD_PREP(ATR_SIZE_MASK, atr_sz);
+ val |= ATR_IMPL_ENABLE;
+ writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
+ ATR0_AXI4_SLV0_SRCADDR_PARAM);
+
+ val = upper_32_bits(axi_addr);
+ writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
+ ATR0_AXI4_SLV0_SRC_ADDR);
+
+ val = lower_32_bits(pci_addr);
+ writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
+ ATR0_AXI4_SLV0_TRSL_ADDR_LSB);
+
+ val = upper_32_bits(pci_addr);
+ writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
+ ATR0_AXI4_SLV0_TRSL_ADDR_UDW);
+}
+EXPORT_SYMBOL_GPL(plda_pcie_setup_window);
+
+void plda_pcie_setup_inbound_address_translation(struct plda_pcie_rp *port)
+{
+ void __iomem *bridge_base_addr = port->bridge_addr;
+ u32 val;
+
+ val = readl(bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
+ val |= (ATR0_PCIE_ATR_SIZE << ATR0_PCIE_ATR_SIZE_SHIFT);
+ writel(val, bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
+ writel(0, bridge_base_addr + ATR0_PCIE_WIN0_SRC_ADDR);
+}
+EXPORT_SYMBOL_GPL(plda_pcie_setup_inbound_address_translation);
+
+int plda_pcie_setup_iomems(struct pci_host_bridge *bridge,
+ struct plda_pcie_rp *port)
+{
+ void __iomem *bridge_base_addr = port->bridge_addr;
+ struct resource_entry *entry;
+ u64 pci_addr;
+ u32 index = 1;
+
+ resource_list_for_each_entry(entry, &bridge->windows) {
+ if (resource_type(entry->res) == IORESOURCE_MEM) {
+ pci_addr = entry->res->start - entry->offset;
+ plda_pcie_setup_window(bridge_base_addr, index,
+ entry->res->start, pci_addr,
+ resource_size(entry->res));
+ index++;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(plda_pcie_setup_iomems);
+
+static void plda_pcie_irq_domain_deinit(struct plda_pcie_rp *pcie)
+{
+ irq_set_chained_handler_and_data(pcie->irq, NULL, NULL);
+ irq_set_chained_handler_and_data(pcie->msi_irq, NULL, NULL);
+ irq_set_chained_handler_and_data(pcie->intx_irq, NULL, NULL);
+
+ irq_domain_remove(pcie->msi.msi_domain);
+ irq_domain_remove(pcie->msi.dev_domain);
+
+ irq_domain_remove(pcie->intx_domain);
+ irq_domain_remove(pcie->event_domain);
+}
+
+int plda_pcie_host_init(struct plda_pcie_rp *port, struct pci_ops *ops,
+ const struct plda_event *plda_event)
+{
+ struct device *dev = port->dev;
+ struct pci_host_bridge *bridge;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource *cfg_res;
+ int ret;
+
+ pdev = to_platform_device(dev);
+
+ port->bridge_addr =
+ devm_platform_ioremap_resource_byname(pdev, "apb");
+
+ if (IS_ERR(port->bridge_addr))
+ return dev_err_probe(dev, PTR_ERR(port->bridge_addr),
+ "failed to map reg memory\n");
+
+ cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
+ if (!cfg_res)
+ return dev_err_probe(dev, -ENODEV,
+ "failed to get config memory\n");
+
+ port->config_base = devm_ioremap_resource(dev, cfg_res);
+ if (IS_ERR(port->config_base))
+ return dev_err_probe(dev, PTR_ERR(port->config_base),
+ "failed to map config memory\n");
+
+ bridge = devm_pci_alloc_host_bridge(dev, 0);
+ if (!bridge)
+ return dev_err_probe(dev, -ENOMEM,
+ "failed to alloc bridge\n");
+
+ if (port->host_ops && port->host_ops->host_init) {
+ ret = port->host_ops->host_init(port);
+ if (ret)
+ return ret;
+ }
+
+ port->bridge = bridge;
+ plda_pcie_setup_window(port->bridge_addr, 0, cfg_res->start, 0,
+ resource_size(cfg_res));
+ plda_pcie_setup_iomems(bridge, port);
+ plda_set_default_msi(&port->msi);
+ ret = plda_init_interrupts(pdev, port, plda_event);
+ if (ret)
+ goto err_host;
+
+ /* Set default bus ops */
+ bridge->ops = ops;
+ bridge->sysdata = port;
+
+ ret = pci_host_probe(bridge);
+ if (ret < 0) {
+ dev_err_probe(dev, ret, "failed to probe pci host\n");
+ goto err_probe;
+ }
+
+ return ret;
+
+err_probe:
+ plda_pcie_irq_domain_deinit(port);
+err_host:
+ if (port->host_ops && port->host_ops->host_deinit)
+ port->host_ops->host_deinit(port);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(plda_pcie_host_init);
+
+void plda_pcie_host_deinit(struct plda_pcie_rp *port)
+{
+ pci_stop_root_bus(port->bridge->bus);
+ pci_remove_root_bus(port->bridge->bus);
+
+ plda_pcie_irq_domain_deinit(port);
+
+ if (port->host_ops && port->host_ops->host_deinit)
+ port->host_ops->host_deinit(port);
+}
+EXPORT_SYMBOL_GPL(plda_pcie_host_deinit);
diff --git a/drivers/pci/controller/plda/pcie-plda.h b/drivers/pci/controller/plda/pcie-plda.h
new file mode 100644
index 000000000000..61ece26065ea
--- /dev/null
+++ b/drivers/pci/controller/plda/pcie-plda.h
@@ -0,0 +1,275 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * PLDA PCIe host controller driver
+ */
+
+#ifndef _PCIE_PLDA_H
+#define _PCIE_PLDA_H
+
+/* Number of MSI IRQs */
+#define PLDA_MAX_NUM_MSI_IRQS 32
+
+/* PCIe Bridge Phy Regs */
+#define GEN_SETTINGS 0x80
+#define RP_ENABLE 1
+#define PCIE_PCI_IDS_DW1 0x9c
+#define IDS_CLASS_CODE_SHIFT 16
+#define REVISION_ID_MASK GENMASK(7, 0)
+#define CLASS_CODE_ID_MASK GENMASK(31, 8)
+#define PCIE_PCI_IRQ_DW0 0xa8
+#define MSIX_CAP_MASK BIT(31)
+#define NUM_MSI_MSGS_MASK GENMASK(6, 4)
+#define NUM_MSI_MSGS_SHIFT 4
+#define PCI_MISC 0xb4
+#define PHY_FUNCTION_DIS BIT(15)
+#define PCIE_WINROM 0xfc
+#define PREF_MEM_WIN_64_SUPPORT BIT(3)
+
+#define IMASK_LOCAL 0x180
+#define DMA_END_ENGINE_0_MASK 0x00000000u
+#define DMA_END_ENGINE_0_SHIFT 0
+#define DMA_END_ENGINE_1_MASK 0x00000000u
+#define DMA_END_ENGINE_1_SHIFT 1
+#define DMA_ERROR_ENGINE_0_MASK 0x00000100u
+#define DMA_ERROR_ENGINE_0_SHIFT 8
+#define DMA_ERROR_ENGINE_1_MASK 0x00000200u
+#define DMA_ERROR_ENGINE_1_SHIFT 9
+#define A_ATR_EVT_POST_ERR_MASK 0x00010000u
+#define A_ATR_EVT_POST_ERR_SHIFT 16
+#define A_ATR_EVT_FETCH_ERR_MASK 0x00020000u
+#define A_ATR_EVT_FETCH_ERR_SHIFT 17
+#define A_ATR_EVT_DISCARD_ERR_MASK 0x00040000u
+#define A_ATR_EVT_DISCARD_ERR_SHIFT 18
+#define A_ATR_EVT_DOORBELL_MASK 0x00000000u
+#define A_ATR_EVT_DOORBELL_SHIFT 19
+#define P_ATR_EVT_POST_ERR_MASK 0x00100000u
+#define P_ATR_EVT_POST_ERR_SHIFT 20
+#define P_ATR_EVT_FETCH_ERR_MASK 0x00200000u
+#define P_ATR_EVT_FETCH_ERR_SHIFT 21
+#define P_ATR_EVT_DISCARD_ERR_MASK 0x00400000u
+#define P_ATR_EVT_DISCARD_ERR_SHIFT 22
+#define P_ATR_EVT_DOORBELL_MASK 0x00000000u
+#define P_ATR_EVT_DOORBELL_SHIFT 23
+#define PM_MSI_INT_INTA_MASK 0x01000000u
+#define PM_MSI_INT_INTA_SHIFT 24
+#define PM_MSI_INT_INTB_MASK 0x02000000u
+#define PM_MSI_INT_INTB_SHIFT 25
+#define PM_MSI_INT_INTC_MASK 0x04000000u
+#define PM_MSI_INT_INTC_SHIFT 26
+#define PM_MSI_INT_INTD_MASK 0x08000000u
+#define PM_MSI_INT_INTD_SHIFT 27
+#define PM_MSI_INT_INTX_MASK 0x0f000000u
+#define PM_MSI_INT_INTX_SHIFT 24
+#define PM_MSI_INT_MSI_MASK 0x10000000u
+#define PM_MSI_INT_MSI_SHIFT 28
+#define PM_MSI_INT_AER_EVT_MASK 0x20000000u
+#define PM_MSI_INT_AER_EVT_SHIFT 29
+#define PM_MSI_INT_EVENTS_MASK 0x40000000u
+#define PM_MSI_INT_EVENTS_SHIFT 30
+#define PM_MSI_INT_SYS_ERR_MASK 0x80000000u
+#define PM_MSI_INT_SYS_ERR_SHIFT 31
+#define SYS_AND_MSI_MASK GENMASK(31, 28)
+#define NUM_LOCAL_EVENTS 15
+#define ISTATUS_LOCAL 0x184
+#define IMASK_HOST 0x188
+#define ISTATUS_HOST 0x18c
+#define IMSI_ADDR 0x190
+#define ISTATUS_MSI 0x194
+#define PMSG_SUPPORT_RX 0x3f0
+#define PMSG_LTR_SUPPORT BIT(2)
+
+/* PCIe Master table init defines */
+#define ATR0_PCIE_WIN0_SRCADDR_PARAM 0x600u
+#define ATR0_PCIE_ATR_SIZE 0x25
+#define ATR0_PCIE_ATR_SIZE_SHIFT 1
+#define ATR0_PCIE_WIN0_SRC_ADDR 0x604u
+#define ATR0_PCIE_WIN0_TRSL_ADDR_LSB 0x608u
+#define ATR0_PCIE_WIN0_TRSL_ADDR_UDW 0x60cu
+#define ATR0_PCIE_WIN0_TRSL_PARAM 0x610u
+
+/* PCIe AXI slave table init defines */
+#define ATR0_AXI4_SLV0_SRCADDR_PARAM 0x800u
+#define ATR_SIZE_MASK GENMASK(6, 1)
+#define ATR_IMPL_ENABLE BIT(0)
+#define ATR0_AXI4_SLV0_SRC_ADDR 0x804u
+#define ATR0_AXI4_SLV0_TRSL_ADDR_LSB 0x808u
+#define ATR0_AXI4_SLV0_TRSL_ADDR_UDW 0x80cu
+#define ATR0_AXI4_SLV0_TRSL_PARAM 0x810u
+#define PCIE_TX_RX_INTERFACE 0x00000000u
+#define PCIE_CONFIG_INTERFACE 0x00000001u
+#define TRSL_ID_AXI4_MASTER_0 0x00000004u
+
+#define CONFIG_SPACE_ADDR_OFFSET 0x1000u
+
+#define ATR_ENTRY_SIZE 32
+
+enum plda_int_event {
+ PLDA_AXI_POST_ERR,
+ PLDA_AXI_FETCH_ERR,
+ PLDA_AXI_DISCARD_ERR,
+ PLDA_AXI_DOORBELL,
+ PLDA_PCIE_POST_ERR,
+ PLDA_PCIE_FETCH_ERR,
+ PLDA_PCIE_DISCARD_ERR,
+ PLDA_PCIE_DOORBELL,
+ PLDA_INTX,
+ PLDA_MSI,
+ PLDA_AER_EVENT,
+ PLDA_MISC_EVENTS,
+ PLDA_SYS_ERR,
+ PLDA_INT_EVENT_NUM
+};
+
+#define PLDA_NUM_DMA_EVENTS 16
+
+#define EVENT_PM_MSI_INT_INTX (PLDA_NUM_DMA_EVENTS + PLDA_INTX)
+#define EVENT_PM_MSI_INT_MSI (PLDA_NUM_DMA_EVENTS + PLDA_MSI)
+#define PLDA_MAX_EVENT_NUM (PLDA_NUM_DMA_EVENTS + PLDA_INT_EVENT_NUM)
+
+/*
+ * PLDA interrupt register
+ *
+ * 31 27 23 15 7 0
+ * +--+--+--+-+------+-+-+-+-+-+-+-+-+-----------+-----------+
+ * |12|11|10|9| intx |7|6|5|4|3|2|1|0| DMA error | DMA end |
+ * +--+--+--+-+------+-+-+-+-+-+-+-+-+-----------+-----------+
+ * event bit
+ * 0-7 (0-7) DMA interrupt end : reserved for vendor implement
+ * 8-15 (8-15) DMA error : reserved for vendor implement
+ * 16 (16) AXI post error (PLDA_AXI_POST_ERR)
+ * 17 (17) AXI fetch error (PLDA_AXI_FETCH_ERR)
+ * 18 (18) AXI discard error (PLDA_AXI_DISCARD_ERR)
+ * 19 (19) AXI doorbell (PLDA_PCIE_DOORBELL)
+ * 20 (20) PCIe post error (PLDA_PCIE_POST_ERR)
+ * 21 (21) PCIe fetch error (PLDA_PCIE_FETCH_ERR)
+ * 22 (22) PCIe discard error (PLDA_PCIE_DISCARD_ERR)
+ * 23 (23) PCIe doorbell (PLDA_PCIE_DOORBELL)
+ * 24 (27-24) INTx interruts (PLDA_INTX)
+ * 25 (28): MSI interrupt (PLDA_MSI)
+ * 26 (29): AER event (PLDA_AER_EVENT)
+ * 27 (30): PM/LTR/Hotplug (PLDA_MISC_EVENTS)
+ * 28 (31): System error (PLDA_SYS_ERR)
+ */
+
+struct plda_pcie_rp;
+
+struct plda_event_ops {
+ u32 (*get_events)(struct plda_pcie_rp *pcie);
+};
+
+struct plda_pcie_host_ops {
+ int (*host_init)(struct plda_pcie_rp *pcie);
+ void (*host_deinit)(struct plda_pcie_rp *pcie);
+};
+
+struct plda_msi {
+ struct mutex lock; /* Protect used bitmap */
+ struct irq_domain *msi_domain;
+ struct irq_domain *dev_domain;
+ u32 num_vectors;
+ u64 vector_phy;
+ DECLARE_BITMAP(used, PLDA_MAX_NUM_MSI_IRQS);
+};
+
+struct plda_pcie_rp {
+ struct device *dev;
+ struct pci_host_bridge *bridge;
+ struct irq_domain *intx_domain;
+ struct irq_domain *event_domain;
+ raw_spinlock_t lock;
+ struct plda_msi msi;
+ const struct plda_event_ops *event_ops;
+ const struct irq_chip *event_irq_chip;
+ const struct plda_pcie_host_ops *host_ops;
+ void __iomem *bridge_addr;
+ void __iomem *config_base;
+ unsigned long events_bitmap;
+ int irq;
+ int msi_irq;
+ int intx_irq;
+ int num_events;
+};
+
+struct plda_event {
+ int (*request_event_irq)(struct plda_pcie_rp *pcie,
+ int event_irq, int event);
+ int intx_event;
+ int msi_event;
+};
+
+void __iomem *plda_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int where);
+int plda_init_interrupts(struct platform_device *pdev,
+ struct plda_pcie_rp *port,
+ const struct plda_event *event);
+void plda_pcie_setup_window(void __iomem *bridge_base_addr, u32 index,
+ phys_addr_t axi_addr, phys_addr_t pci_addr,
+ size_t size);
+void plda_pcie_setup_inbound_address_translation(struct plda_pcie_rp *port);
+int plda_pcie_setup_iomems(struct pci_host_bridge *bridge,
+ struct plda_pcie_rp *port);
+int plda_pcie_host_init(struct plda_pcie_rp *port, struct pci_ops *ops,
+ const struct plda_event *plda_event);
+void plda_pcie_host_deinit(struct plda_pcie_rp *pcie);
+
+static inline void plda_set_default_msi(struct plda_msi *msi)
+{
+ msi->vector_phy = IMSI_ADDR;
+ msi->num_vectors = PLDA_MAX_NUM_MSI_IRQS;
+}
+
+static inline void plda_pcie_enable_root_port(struct plda_pcie_rp *plda)
+{
+ u32 value;
+
+ value = readl_relaxed(plda->bridge_addr + GEN_SETTINGS);
+ value |= RP_ENABLE;
+ writel_relaxed(value, plda->bridge_addr + GEN_SETTINGS);
+}
+
+static inline void plda_pcie_set_standard_class(struct plda_pcie_rp *plda)
+{
+ u32 value;
+
+ /* set class code and reserve revision id */
+ value = readl_relaxed(plda->bridge_addr + PCIE_PCI_IDS_DW1);
+ value &= REVISION_ID_MASK;
+ value |= (PCI_CLASS_BRIDGE_PCI << IDS_CLASS_CODE_SHIFT);
+ writel_relaxed(value, plda->bridge_addr + PCIE_PCI_IDS_DW1);
+}
+
+static inline void plda_pcie_set_pref_win_64bit(struct plda_pcie_rp *plda)
+{
+ u32 value;
+
+ value = readl_relaxed(plda->bridge_addr + PCIE_WINROM);
+ value |= PREF_MEM_WIN_64_SUPPORT;
+ writel_relaxed(value, plda->bridge_addr + PCIE_WINROM);
+}
+
+static inline void plda_pcie_disable_ltr(struct plda_pcie_rp *plda)
+{
+ u32 value;
+
+ value = readl_relaxed(plda->bridge_addr + PMSG_SUPPORT_RX);
+ value &= ~PMSG_LTR_SUPPORT;
+ writel_relaxed(value, plda->bridge_addr + PMSG_SUPPORT_RX);
+}
+
+static inline void plda_pcie_disable_func(struct plda_pcie_rp *plda)
+{
+ u32 value;
+
+ value = readl_relaxed(plda->bridge_addr + PCI_MISC);
+ value |= PHY_FUNCTION_DIS;
+ writel_relaxed(value, plda->bridge_addr + PCI_MISC);
+}
+
+static inline void plda_pcie_write_rc_bar(struct plda_pcie_rp *plda, u64 val)
+{
+ void __iomem *addr = plda->bridge_addr + CONFIG_SPACE_ADDR_OFFSET;
+
+ writel_relaxed(lower_32_bits(val), addr + PCI_BASE_ADDRESS_0);
+ writel_relaxed(upper_32_bits(val), addr + PCI_BASE_ADDRESS_1);
+}
+#endif /* _PCIE_PLDA_H */
diff --git a/drivers/pci/controller/plda/pcie-starfive.c b/drivers/pci/controller/plda/pcie-starfive.c
new file mode 100644
index 000000000000..e73c1b7bc8ef
--- /dev/null
+++ b/drivers/pci/controller/plda/pcie-starfive.c
@@ -0,0 +1,492 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * PCIe host controller driver for StarFive JH7110 Soc.
+ *
+ * Copyright (C) 2023 StarFive Technology Co., Ltd.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include "../../pci.h"
+
+#include "pcie-plda.h"
+
+#define PCIE_FUNC_NUM 4
+
+/* system control */
+#define STG_SYSCON_PCIE0_BASE 0x48
+#define STG_SYSCON_PCIE1_BASE 0x1f8
+
+#define STG_SYSCON_AR_OFFSET 0x78
+#define STG_SYSCON_AXI4_SLVL_AR_MASK GENMASK(22, 8)
+#define STG_SYSCON_AXI4_SLVL_PHY_AR(x) FIELD_PREP(GENMASK(20, 17), x)
+#define STG_SYSCON_AW_OFFSET 0x7c
+#define STG_SYSCON_AXI4_SLVL_AW_MASK GENMASK(14, 0)
+#define STG_SYSCON_AXI4_SLVL_PHY_AW(x) FIELD_PREP(GENMASK(12, 9), x)
+#define STG_SYSCON_CLKREQ BIT(22)
+#define STG_SYSCON_CKREF_SRC_MASK GENMASK(19, 18)
+#define STG_SYSCON_RP_NEP_OFFSET 0xe8
+#define STG_SYSCON_K_RP_NEP BIT(8)
+#define STG_SYSCON_LNKSTA_OFFSET 0x170
+#define DATA_LINK_ACTIVE BIT(5)
+
+/* Parameters for the waiting for link up routine */
+#define LINK_WAIT_MAX_RETRIES 10
+#define LINK_WAIT_USLEEP_MIN 90000
+#define LINK_WAIT_USLEEP_MAX 100000
+
+struct starfive_jh7110_pcie {
+ struct plda_pcie_rp plda;
+ struct reset_control *resets;
+ struct clk_bulk_data *clks;
+ struct regmap *reg_syscon;
+ struct gpio_desc *power_gpio;
+ struct gpio_desc *reset_gpio;
+ struct phy *phy;
+
+ unsigned int stg_pcie_base;
+ int num_clks;
+};
+
+/*
+ * JH7110 PCIe port BAR0/1 can be configured as 64-bit prefetchable memory
+ * space. PCIe read and write requests targeting BAR0/1 are routed to so called
+ * 'Bridge Configuration space' in PLDA IP datasheet, which contains the bridge
+ * internal registers, such as interrupt, DMA and ATU registers...
+ * JH7110 can access the Bridge Configuration space by local bus, and don`t
+ * want the bridge internal registers accessed by the DMA from EP devices.
+ * Thus, they are unimplemented and should be hidden here.
+ */
+static bool starfive_pcie_hide_rc_bar(struct pci_bus *bus, unsigned int devfn,
+ int offset)
+{
+ if (pci_is_root_bus(bus) && !devfn &&
+ (offset == PCI_BASE_ADDRESS_0 || offset == PCI_BASE_ADDRESS_1))
+ return true;
+
+ return false;
+}
+
+static int starfive_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 value)
+{
+ if (starfive_pcie_hide_rc_bar(bus, devfn, where))
+ return PCIBIOS_SUCCESSFUL;
+
+ return pci_generic_config_write(bus, devfn, where, size, value);
+}
+
+static int starfive_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *value)
+{
+ if (starfive_pcie_hide_rc_bar(bus, devfn, where)) {
+ *value = 0;
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+ return pci_generic_config_read(bus, devfn, where, size, value);
+}
+
+static int starfive_pcie_parse_dt(struct starfive_jh7110_pcie *pcie,
+ struct device *dev)
+{
+ int domain_nr;
+
+ pcie->num_clks = devm_clk_bulk_get_all(dev, &pcie->clks);
+ if (pcie->num_clks < 0)
+ return dev_err_probe(dev, pcie->num_clks,
+ "failed to get pcie clocks\n");
+
+ pcie->resets = devm_reset_control_array_get_exclusive(dev);
+ if (IS_ERR(pcie->resets))
+ return dev_err_probe(dev, PTR_ERR(pcie->resets),
+ "failed to get pcie resets");
+
+ pcie->reg_syscon =
+ syscon_regmap_lookup_by_phandle(dev->of_node,
+ "starfive,stg-syscon");
+
+ if (IS_ERR(pcie->reg_syscon))
+ return dev_err_probe(dev, PTR_ERR(pcie->reg_syscon),
+ "failed to parse starfive,stg-syscon\n");
+
+ pcie->phy = devm_phy_optional_get(dev, NULL);
+ if (IS_ERR(pcie->phy))
+ return dev_err_probe(dev, PTR_ERR(pcie->phy),
+ "failed to get pcie phy\n");
+
+ /*
+ * The PCIe domain numbers are set to be static in JH7110 DTS.
+ * As the STG system controller defines different bases in PCIe RP0 &
+ * RP1, we use them to identify which controller is doing the hardware
+ * initialization.
+ */
+ domain_nr = of_get_pci_domain_nr(dev->of_node);
+
+ if (domain_nr < 0 || domain_nr > 1)
+ return dev_err_probe(dev, -ENODEV,
+ "failed to get valid pcie domain\n");
+
+ if (domain_nr == 0)
+ pcie->stg_pcie_base = STG_SYSCON_PCIE0_BASE;
+ else
+ pcie->stg_pcie_base = STG_SYSCON_PCIE1_BASE;
+
+ pcie->reset_gpio = devm_gpiod_get_optional(dev, "perst",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(pcie->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(pcie->reset_gpio),
+ "failed to get perst-gpio\n");
+
+ pcie->power_gpio = devm_gpiod_get_optional(dev, "enable",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(pcie->power_gpio))
+ return dev_err_probe(dev, PTR_ERR(pcie->power_gpio),
+ "failed to get power-gpio\n");
+
+ return 0;
+}
+
+static struct pci_ops starfive_pcie_ops = {
+ .map_bus = plda_pcie_map_bus,
+ .read = starfive_pcie_config_read,
+ .write = starfive_pcie_config_write,
+};
+
+static int starfive_pcie_clk_rst_init(struct starfive_jh7110_pcie *pcie)
+{
+ struct device *dev = pcie->plda.dev;
+ int ret;
+
+ ret = clk_bulk_prepare_enable(pcie->num_clks, pcie->clks);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to enable clocks\n");
+
+ ret = reset_control_deassert(pcie->resets);
+ if (ret) {
+ clk_bulk_disable_unprepare(pcie->num_clks, pcie->clks);
+ dev_err_probe(dev, ret, "failed to deassert resets\n");
+ }
+
+ return ret;
+}
+
+static void starfive_pcie_clk_rst_deinit(struct starfive_jh7110_pcie *pcie)
+{
+ reset_control_assert(pcie->resets);
+ clk_bulk_disable_unprepare(pcie->num_clks, pcie->clks);
+}
+
+static bool starfive_pcie_link_up(struct plda_pcie_rp *plda)
+{
+ struct starfive_jh7110_pcie *pcie =
+ container_of(plda, struct starfive_jh7110_pcie, plda);
+ int ret;
+ u32 stg_reg_val;
+
+ ret = regmap_read(pcie->reg_syscon,
+ pcie->stg_pcie_base + STG_SYSCON_LNKSTA_OFFSET,
+ &stg_reg_val);
+ if (ret) {
+ dev_err(pcie->plda.dev, "failed to read link status\n");
+ return false;
+ }
+
+ return !!(stg_reg_val & DATA_LINK_ACTIVE);
+}
+
+static int starfive_pcie_host_wait_for_link(struct starfive_jh7110_pcie *pcie)
+{
+ int retries;
+
+ /* Check if the link is up or not */
+ for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
+ if (starfive_pcie_link_up(&pcie->plda)) {
+ dev_info(pcie->plda.dev, "port link up\n");
+ return 0;
+ }
+ usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int starfive_pcie_enable_phy(struct device *dev,
+ struct starfive_jh7110_pcie *pcie)
+{
+ int ret;
+
+ if (!pcie->phy)
+ return 0;
+
+ ret = phy_init(pcie->phy);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to initialize pcie phy\n");
+
+ ret = phy_set_mode(pcie->phy, PHY_MODE_PCIE);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to set pcie mode\n");
+ goto err_phy_on;
+ }
+
+ ret = phy_power_on(pcie->phy);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to power on pcie phy\n");
+ goto err_phy_on;
+ }
+
+ return 0;
+
+err_phy_on:
+ phy_exit(pcie->phy);
+ return ret;
+}
+
+static void starfive_pcie_disable_phy(struct starfive_jh7110_pcie *pcie)
+{
+ phy_power_off(pcie->phy);
+ phy_exit(pcie->phy);
+}
+
+static void starfive_pcie_host_deinit(struct plda_pcie_rp *plda)
+{
+ struct starfive_jh7110_pcie *pcie =
+ container_of(plda, struct starfive_jh7110_pcie, plda);
+
+ starfive_pcie_clk_rst_deinit(pcie);
+ if (pcie->power_gpio)
+ gpiod_set_value_cansleep(pcie->power_gpio, 0);
+ starfive_pcie_disable_phy(pcie);
+}
+
+static int starfive_pcie_host_init(struct plda_pcie_rp *plda)
+{
+ struct starfive_jh7110_pcie *pcie =
+ container_of(plda, struct starfive_jh7110_pcie, plda);
+ struct device *dev = plda->dev;
+ int ret;
+ int i;
+
+ ret = starfive_pcie_enable_phy(dev, pcie);
+ if (ret)
+ return ret;
+
+ regmap_update_bits(pcie->reg_syscon,
+ pcie->stg_pcie_base + STG_SYSCON_RP_NEP_OFFSET,
+ STG_SYSCON_K_RP_NEP, STG_SYSCON_K_RP_NEP);
+
+ regmap_update_bits(pcie->reg_syscon,
+ pcie->stg_pcie_base + STG_SYSCON_AW_OFFSET,
+ STG_SYSCON_CKREF_SRC_MASK,
+ FIELD_PREP(STG_SYSCON_CKREF_SRC_MASK, 2));
+
+ regmap_update_bits(pcie->reg_syscon,
+ pcie->stg_pcie_base + STG_SYSCON_AW_OFFSET,
+ STG_SYSCON_CLKREQ, STG_SYSCON_CLKREQ);
+
+ ret = starfive_pcie_clk_rst_init(pcie);
+ if (ret)
+ return ret;
+
+ if (pcie->power_gpio)
+ gpiod_set_value_cansleep(pcie->power_gpio, 1);
+
+ if (pcie->reset_gpio)
+ gpiod_set_value_cansleep(pcie->reset_gpio, 1);
+
+ /* Disable physical functions except #0 */
+ for (i = 1; i < PCIE_FUNC_NUM; i++) {
+ regmap_update_bits(pcie->reg_syscon,
+ pcie->stg_pcie_base + STG_SYSCON_AR_OFFSET,
+ STG_SYSCON_AXI4_SLVL_AR_MASK,
+ STG_SYSCON_AXI4_SLVL_PHY_AR(i));
+
+ regmap_update_bits(pcie->reg_syscon,
+ pcie->stg_pcie_base + STG_SYSCON_AW_OFFSET,
+ STG_SYSCON_AXI4_SLVL_AW_MASK,
+ STG_SYSCON_AXI4_SLVL_PHY_AW(i));
+
+ plda_pcie_disable_func(plda);
+ }
+
+ regmap_update_bits(pcie->reg_syscon,
+ pcie->stg_pcie_base + STG_SYSCON_AR_OFFSET,
+ STG_SYSCON_AXI4_SLVL_AR_MASK, 0);
+ regmap_update_bits(pcie->reg_syscon,
+ pcie->stg_pcie_base + STG_SYSCON_AW_OFFSET,
+ STG_SYSCON_AXI4_SLVL_AW_MASK, 0);
+
+ plda_pcie_enable_root_port(plda);
+ plda_pcie_write_rc_bar(plda, 0);
+
+ /* PCIe PCI Standard Configuration Identification Settings. */
+ plda_pcie_set_standard_class(plda);
+
+ /*
+ * The LTR message receiving is enabled by the register "PCIe Message
+ * Reception" as default, but the forward id & addr are uninitialized.
+ * If we do not disable LTR message forwarding here, or set a legal
+ * forwarding address, the kernel will get stuck.
+ * To workaround, disable the LTR message forwarding here before using
+ * this feature.
+ */
+ plda_pcie_disable_ltr(plda);
+
+ /*
+ * Enable the prefetchable memory window 64-bit addressing in JH7110.
+ * The 64-bits prefetchable address translation configurations in ATU
+ * can be work after enable the register setting below.
+ */
+ plda_pcie_set_pref_win_64bit(plda);
+
+ /*
+ * Ensure that PERST has been asserted for at least 100 ms,
+ * the sleep value is T_PVPERL from PCIe CEM spec r2.0 (Table 2-4)
+ */
+ msleep(100);
+ if (pcie->reset_gpio)
+ gpiod_set_value_cansleep(pcie->reset_gpio, 0);
+
+ /*
+ * With a Downstream Port (<=5GT/s), software must wait a minimum
+ * of 100ms following exit from a conventional reset before
+ * sending a configuration request to the device.
+ */
+ msleep(PCIE_RESET_CONFIG_DEVICE_WAIT_MS);
+
+ if (starfive_pcie_host_wait_for_link(pcie))
+ dev_info(dev, "port link down\n");
+
+ return 0;
+}
+
+static const struct plda_pcie_host_ops sf_host_ops = {
+ .host_init = starfive_pcie_host_init,
+ .host_deinit = starfive_pcie_host_deinit,
+};
+
+static const struct plda_event stf_pcie_event = {
+ .intx_event = EVENT_PM_MSI_INT_INTX,
+ .msi_event = EVENT_PM_MSI_INT_MSI
+};
+
+static int starfive_pcie_probe(struct platform_device *pdev)
+{
+ struct starfive_jh7110_pcie *pcie;
+ struct device *dev = &pdev->dev;
+ struct plda_pcie_rp *plda;
+ int ret;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ plda = &pcie->plda;
+ plda->dev = dev;
+
+ ret = starfive_pcie_parse_dt(pcie, dev);
+ if (ret)
+ return ret;
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
+ plda->host_ops = &sf_host_ops;
+ plda->num_events = PLDA_MAX_EVENT_NUM;
+ /* mask doorbell event */
+ plda->events_bitmap = GENMASK(PLDA_INT_EVENT_NUM - 1, 0)
+ & ~BIT(PLDA_AXI_DOORBELL)
+ & ~BIT(PLDA_PCIE_DOORBELL);
+ plda->events_bitmap <<= PLDA_NUM_DMA_EVENTS;
+ ret = plda_pcie_host_init(&pcie->plda, &starfive_pcie_ops,
+ &stf_pcie_event);
+ if (ret) {
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, pcie);
+
+ return 0;
+}
+
+static void starfive_pcie_remove(struct platform_device *pdev)
+{
+ struct starfive_jh7110_pcie *pcie = platform_get_drvdata(pdev);
+
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ plda_pcie_host_deinit(&pcie->plda);
+ platform_set_drvdata(pdev, NULL);
+}
+
+static int starfive_pcie_suspend_noirq(struct device *dev)
+{
+ struct starfive_jh7110_pcie *pcie = dev_get_drvdata(dev);
+
+ clk_bulk_disable_unprepare(pcie->num_clks, pcie->clks);
+ starfive_pcie_disable_phy(pcie);
+
+ return 0;
+}
+
+static int starfive_pcie_resume_noirq(struct device *dev)
+{
+ struct starfive_jh7110_pcie *pcie = dev_get_drvdata(dev);
+ int ret;
+
+ ret = starfive_pcie_enable_phy(dev, pcie);
+ if (ret)
+ return ret;
+
+ ret = clk_bulk_prepare_enable(pcie->num_clks, pcie->clks);
+ if (ret) {
+ dev_err(dev, "failed to enable clocks\n");
+ starfive_pcie_disable_phy(pcie);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops starfive_pcie_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(starfive_pcie_suspend_noirq,
+ starfive_pcie_resume_noirq)
+};
+
+static const struct of_device_id starfive_pcie_of_match[] = {
+ { .compatible = "starfive,jh7110-pcie", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, starfive_pcie_of_match);
+
+static struct platform_driver starfive_pcie_driver = {
+ .driver = {
+ .name = "pcie-starfive",
+ .of_match_table = of_match_ptr(starfive_pcie_of_match),
+ .pm = pm_sleep_ptr(&starfive_pcie_pm_ops),
+ },
+ .probe = starfive_pcie_probe,
+ .remove = starfive_pcie_remove,
+};
+module_platform_driver(starfive_pcie_driver);
+
+MODULE_DESCRIPTION("StarFive JH7110 PCIe host driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index 87b7856f375a..8df064b62a2f 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -17,6 +17,8 @@
#include <linux/rculist.h>
#include <linux/rcupdate.h>
+#include <xen/xen.h>
+
#include <asm/irqdomain.h>
#define VMD_CFGBAR 0
@@ -125,7 +127,7 @@ struct vmd_irq_list {
struct vmd_dev {
struct pci_dev *dev;
- spinlock_t cfg_lock;
+ raw_spinlock_t cfg_lock;
void __iomem *cfgbar;
int msix_count;
@@ -204,22 +206,11 @@ static void vmd_irq_disable(struct irq_data *data)
raw_spin_unlock_irqrestore(&list_lock, flags);
}
-/*
- * XXX: Stubbed until we develop acceptable way to not create conflicts with
- * other devices sharing the same vector.
- */
-static int vmd_irq_set_affinity(struct irq_data *data,
- const struct cpumask *dest, bool force)
-{
- return -EINVAL;
-}
-
static struct irq_chip vmd_msi_controller = {
.name = "VMD-MSI",
.irq_enable = vmd_irq_enable,
.irq_disable = vmd_irq_disable,
.irq_compose_msi_msg = vmd_compose_msi_msg,
- .irq_set_affinity = vmd_irq_set_affinity,
};
static irq_hw_number_t vmd_get_hwirq(struct msi_domain_info *info,
@@ -326,7 +317,7 @@ static struct msi_domain_ops vmd_msi_domain_ops = {
static struct msi_domain_info vmd_msi_domain_info = {
.flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX,
+ MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX,
.ops = &vmd_msi_domain_ops,
.chip = &vmd_msi_controller,
};
@@ -402,7 +393,7 @@ static int vmd_pci_read(struct pci_bus *bus, unsigned int devfn, int reg,
if (!addr)
return -EFAULT;
- spin_lock_irqsave(&vmd->cfg_lock, flags);
+ raw_spin_lock_irqsave(&vmd->cfg_lock, flags);
switch (len) {
case 1:
*value = readb(addr);
@@ -417,7 +408,7 @@ static int vmd_pci_read(struct pci_bus *bus, unsigned int devfn, int reg,
ret = -EINVAL;
break;
}
- spin_unlock_irqrestore(&vmd->cfg_lock, flags);
+ raw_spin_unlock_irqrestore(&vmd->cfg_lock, flags);
return ret;
}
@@ -437,7 +428,7 @@ static int vmd_pci_write(struct pci_bus *bus, unsigned int devfn, int reg,
if (!addr)
return -EFAULT;
- spin_lock_irqsave(&vmd->cfg_lock, flags);
+ raw_spin_lock_irqsave(&vmd->cfg_lock, flags);
switch (len) {
case 1:
writeb(value, addr);
@@ -455,7 +446,7 @@ static int vmd_pci_write(struct pci_bus *bus, unsigned int devfn, int reg,
ret = -EINVAL;
break;
}
- spin_unlock_irqrestore(&vmd->cfg_lock, flags);
+ raw_spin_unlock_irqrestore(&vmd->cfg_lock, flags);
return ret;
}
@@ -751,11 +742,9 @@ static int vmd_pm_enable_quirk(struct pci_dev *pdev, void *userdata)
if (!(features & VMD_FEAT_BIOS_PM_QUIRK))
return 0;
- pci_enable_link_state_locked(pdev, PCIE_LINK_STATE_ALL);
-
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_LTR);
if (!pos)
- return 0;
+ goto out_state_change;
/*
* Skip if the max snoop LTR is non-zero, indicating BIOS has set it
@@ -763,7 +752,7 @@ static int vmd_pm_enable_quirk(struct pci_dev *pdev, void *userdata)
*/
pci_read_config_dword(pdev, pos + PCI_LTR_MAX_SNOOP_LAT, &ltr_reg);
if (!!(ltr_reg & (PCI_LTR_VALUE_MASK | PCI_LTR_SCALE_MASK)))
- return 0;
+ goto out_state_change;
/*
* Set the default values to the maximum required by the platform to
@@ -775,6 +764,13 @@ static int vmd_pm_enable_quirk(struct pci_dev *pdev, void *userdata)
pci_write_config_dword(pdev, pos + PCI_LTR_MAX_SNOOP_LAT, ltr_reg);
pci_info(pdev, "VMD: Default LTR value set by driver\n");
+out_state_change:
+ /*
+ * Ensure devices are in D0 before enabling PCI-PM L1 PM Substates, per
+ * PCIe r6.0, sec 5.5.4.
+ */
+ pci_set_power_state_locked(pdev, PCI_D0);
+ pci_enable_link_state_locked(pdev, PCIE_LINK_STATE_ALL);
return 0;
}
@@ -925,6 +921,9 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
dev_set_msi_domain(&vmd->bus->dev,
dev_get_msi_domain(&vmd->dev->dev));
+ WARN(sysfs_create_link(&vmd->dev->dev.kobj, &vmd->bus->dev.kobj,
+ "domain"), "Can't create symlink to domain\n");
+
vmd_acpi_begin();
pci_scan_child_bus(vmd->bus);
@@ -964,9 +963,6 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
pci_bus_add_devices(vmd->bus);
vmd_acpi_end();
-
- WARN(sysfs_create_link(&vmd->dev->dev.kobj, &vmd->bus->dev.kobj,
- "domain"), "Can't create symlink to domain\n");
return 0;
}
@@ -976,6 +972,24 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
struct vmd_dev *vmd;
int err;
+ if (xen_domain()) {
+ /*
+ * Xen doesn't have knowledge about devices in the VMD bus
+ * because the config space of devices behind the VMD bridge is
+ * not known to Xen, and hence Xen cannot discover or configure
+ * them in any way.
+ *
+ * Bypass of MSI remapping won't work in that case as direct
+ * write by Linux to the MSI entries won't result in functional
+ * interrupts, as Xen is the entity that manages the host
+ * interrupt controller and must configure interrupts. However
+ * multiplexing of interrupts by the VMD bridge will work under
+ * Xen, so force the usage of that mode which must always be
+ * supported by VMD bridges.
+ */
+ features &= ~VMD_FEAT_CAN_BYPASS_MSI_REMAP;
+ }
+
if (resource_size(&dev->resource[VMD_CFGBAR]) < (1 << 20))
return -ENOMEM;
@@ -1015,7 +1029,7 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (features & VMD_FEAT_OFFSET_FIRST_VECTOR)
vmd->first_vec = 1;
- spin_lock_init(&vmd->cfg_lock);
+ raw_spin_lock_init(&vmd->cfg_lock);
pci_set_drvdata(dev, vmd);
err = vmd_enable_domain(vmd, features);
if (err)
@@ -1042,8 +1056,8 @@ static void vmd_remove(struct pci_dev *dev)
{
struct vmd_dev *vmd = pci_get_drvdata(dev);
- sysfs_remove_link(&vmd->dev->dev.kobj, "domain");
pci_stop_root_bus(vmd->bus);
+ sysfs_remove_link(&vmd->dev->dev.kobj, "domain");
pci_remove_root_bus(vmd->bus);
vmd_cleanup_srcu(vmd);
vmd_detach_resources(vmd);
@@ -1053,9 +1067,9 @@ static void vmd_remove(struct pci_dev *dev)
static void vmd_shutdown(struct pci_dev *dev)
{
- struct vmd_dev *vmd = pci_get_drvdata(dev);
+ struct vmd_dev *vmd = pci_get_drvdata(dev);
- vmd_remove_irq_domain(vmd);
+ vmd_remove_irq_domain(vmd);
}
#ifdef CONFIG_PM_SLEEP
@@ -1111,6 +1125,10 @@ static const struct pci_device_id vmd_ids[] = {
.driver_data = VMD_FEATS_CLIENT,},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_VMD_9A0B),
.driver_data = VMD_FEATS_CLIENT,},
+ {PCI_VDEVICE(INTEL, 0xb60b),
+ .driver_data = VMD_FEATS_CLIENT,},
+ {PCI_VDEVICE(INTEL, 0xb06f),
+ .driver_data = VMD_FEATS_CLIENT,},
{0,}
};
MODULE_DEVICE_TABLE(pci, vmd_ids);
@@ -1128,5 +1146,6 @@ static struct pci_driver vmd_drv = {
module_pci_driver(vmd_drv);
MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("Volume Management Device driver");
MODULE_LICENSE("GPL v2");
MODULE_VERSION("0.6");