summaryrefslogtreecommitdiff
path: root/drivers/pci/controller/cadence
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci/controller/cadence')
-rw-r--r--drivers/pci/controller/cadence/Kconfig18
-rw-r--r--drivers/pci/controller/cadence/pci-j721e.c216
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-ep.c54
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-host.c168
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.c16
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.h40
6 files changed, 411 insertions, 101 deletions
diff --git a/drivers/pci/controller/cadence/Kconfig b/drivers/pci/controller/cadence/Kconfig
index 1d5a70c9055e..666e16b6367f 100644
--- a/drivers/pci/controller/cadence/Kconfig
+++ b/drivers/pci/controller/cadence/Kconfig
@@ -4,16 +4,16 @@ menu "Cadence-based PCIe controllers"
depends on PCI
config PCIE_CADENCE
- bool
+ tristate
config PCIE_CADENCE_HOST
- bool
+ tristate
depends on OF
select IRQ_DOMAIN
select PCIE_CADENCE
config PCIE_CADENCE_EP
- bool
+ tristate
depends on OF
depends on PCI_ENDPOINT
select PCIE_CADENCE
@@ -38,18 +38,19 @@ config PCIE_CADENCE_PLAT_EP
select PCIE_CADENCE_EP
select PCIE_CADENCE_PLAT
help
- Say Y here if you want to support the Cadence PCIe platform controller in
+ Say Y here if you want to support the Cadence PCIe platform controller in
endpoint mode. This PCIe controller may be embedded into many
different vendors SoCs.
config PCI_J721E
- bool
+ tristate
+ select PCIE_CADENCE_HOST if PCI_J721E_HOST != n
+ select PCIE_CADENCE_EP if PCI_J721E_EP != n
config PCI_J721E_HOST
- bool "TI J721E PCIe controller (host mode)"
+ tristate "TI J721E PCIe controller (host mode)"
depends on ARCH_K3 || COMPILE_TEST
depends on OF
- select PCIE_CADENCE_HOST
select PCI_J721E
help
Say Y here if you want to support the TI J721E PCIe platform
@@ -57,11 +58,10 @@ config PCI_J721E_HOST
core.
config PCI_J721E_EP
- bool "TI J721E PCIe controller (endpoint mode)"
+ tristate "TI J721E PCIe controller (endpoint mode)"
depends on ARCH_K3 || COMPILE_TEST
depends on OF
depends on PCI_ENDPOINT
- select PCIE_CADENCE_EP
select PCI_J721E
help
Say Y here if you want to support the TI J721E PCIe platform
diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c
index 85718246016b..6c93f39d0288 100644
--- a/drivers/pci/controller/cadence/pci-j721e.c
+++ b/drivers/pci/controller/cadence/pci-j721e.c
@@ -7,12 +7,15 @@
*/
#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/container_of.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/io.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/mfd/syscon.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
@@ -22,7 +25,10 @@
#include "../../pci.h"
#include "pcie-cadence.h"
+#define cdns_pcie_to_rc(p) container_of(p, struct cdns_pcie_rc, pcie)
+
#define ENABLE_REG_SYS_2 0x108
+#define ENABLE_CLR_REG_SYS_2 0x308
#define STATUS_REG_SYS_2 0x508
#define STATUS_CLR_REG_SYS_2 0x708
#define LINK_DOWN BIT(1)
@@ -44,6 +50,7 @@ enum link_status {
#define J721E_MODE_RC BIT(7)
#define LANE_COUNT(n) ((n) << 8)
+#define ACSPCIE_PAD_DISABLE_MASK GENMASK(1, 0)
#define GENERATION_SEL_MASK GENMASK(1, 0)
struct j721e_pcie {
@@ -52,6 +59,7 @@ struct j721e_pcie {
u32 mode;
u32 num_lanes;
u32 max_lanes;
+ struct gpio_desc *reset_gpio;
void __iomem *user_cfg_base;
void __iomem *intd_cfg_base;
u32 linkdown_irq_regfield;
@@ -110,6 +118,15 @@ static irqreturn_t j721e_pcie_link_irq_handler(int irq, void *priv)
return IRQ_HANDLED;
}
+static void j721e_pcie_disable_link_irq(struct j721e_pcie *pcie)
+{
+ u32 reg;
+
+ reg = j721e_pcie_intd_readl(pcie, ENABLE_CLR_REG_SYS_2);
+ reg |= pcie->linkdown_irq_regfield;
+ j721e_pcie_intd_writel(pcie, ENABLE_CLR_REG_SYS_2, reg);
+}
+
static void j721e_pcie_config_link_irq(struct j721e_pcie *pcie)
{
u32 reg;
@@ -147,11 +164,7 @@ static bool j721e_pcie_link_up(struct cdns_pcie *cdns_pcie)
u32 reg;
reg = j721e_pcie_user_readl(pcie, J721E_PCIE_USER_LINKSTATUS);
- reg &= LINK_STATUS;
- if (reg == LINK_UP_DL_COMPLETED)
- return true;
-
- return false;
+ return (reg & LINK_STATUS) == LINK_UP_DL_COMPLETED;
}
static const struct cdns_pcie_ops j721e_pcie_ops = {
@@ -220,6 +233,36 @@ static int j721e_pcie_set_lane_count(struct j721e_pcie *pcie,
return ret;
}
+static int j721e_enable_acspcie_refclk(struct j721e_pcie *pcie,
+ struct regmap *syscon)
+{
+ struct device *dev = pcie->cdns_pcie->dev;
+ struct device_node *node = dev->of_node;
+ u32 mask = ACSPCIE_PAD_DISABLE_MASK;
+ struct of_phandle_args args;
+ u32 val;
+ int ret;
+
+ ret = of_parse_phandle_with_fixed_args(node,
+ "ti,syscon-acspcie-proxy-ctrl",
+ 1, 0, &args);
+ if (ret) {
+ dev_err(dev,
+ "ti,syscon-acspcie-proxy-ctrl has invalid arguments\n");
+ return ret;
+ }
+
+ /* Clear PAD IO disable bits to enable refclk output */
+ val = ~(args.args[0]);
+ ret = regmap_update_bits(syscon, 0, mask, val);
+ if (ret) {
+ dev_err(dev, "failed to enable ACSPCIE refclk: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie)
{
struct device *dev = pcie->cdns_pcie->dev;
@@ -259,7 +302,13 @@ static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie)
return ret;
}
- return 0;
+ /* Enable ACSPCIE refclk output if the optional property exists */
+ syscon = syscon_regmap_lookup_by_phandle_optional(node,
+ "ti,syscon-acspcie-proxy-ctrl");
+ if (!syscon)
+ return 0;
+
+ return j721e_enable_acspcie_refclk(pcie, syscon);
}
static int cdns_ti_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
@@ -313,6 +362,7 @@ static const struct j721e_pcie_data j7200_pcie_rc_data = {
static const struct j721e_pcie_data j7200_pcie_ep_data = {
.mode = PCI_MODE_EP,
.quirk_detect_quiet_flag = true,
+ .linkdown_irq_regfield = J7200_LINK_DOWN,
.quirk_disable_flr = true,
.max_lanes = 2,
};
@@ -334,16 +384,23 @@ static const struct j721e_pcie_data j784s4_pcie_rc_data = {
.mode = PCI_MODE_RC,
.quirk_retrain_flag = true,
.byte_access_allowed = false,
- .linkdown_irq_regfield = LINK_DOWN,
+ .linkdown_irq_regfield = J7200_LINK_DOWN,
.max_lanes = 4,
};
static const struct j721e_pcie_data j784s4_pcie_ep_data = {
.mode = PCI_MODE_EP,
- .linkdown_irq_regfield = LINK_DOWN,
+ .linkdown_irq_regfield = J7200_LINK_DOWN,
.max_lanes = 4,
};
+static const struct j721e_pcie_data j722s_pcie_rc_data = {
+ .mode = PCI_MODE_RC,
+ .linkdown_irq_regfield = J7200_LINK_DOWN,
+ .byte_access_allowed = true,
+ .max_lanes = 1,
+};
+
static const struct of_device_id of_j721e_pcie_match[] = {
{
.compatible = "ti,j721e-pcie-host",
@@ -377,6 +434,10 @@ static const struct of_device_id of_j721e_pcie_match[] = {
.compatible = "ti,j784s4-pcie-ep",
.data = &j784s4_pcie_ep_data,
},
+ {
+ .compatible = "ti,j722s-pcie-host",
+ .data = &j722s_pcie_rc_data,
+ },
{},
};
@@ -410,7 +471,7 @@ static int j721e_pcie_probe(struct platform_device *pdev)
switch (mode) {
case PCI_MODE_RC:
- if (!IS_ENABLED(CONFIG_PCIE_CADENCE_HOST))
+ if (!IS_ENABLED(CONFIG_PCI_J721E_HOST))
return -ENODEV;
bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc));
@@ -429,7 +490,7 @@ static int j721e_pcie_probe(struct platform_device *pdev)
pcie->cdns_pcie = cdns_pcie;
break;
case PCI_MODE_EP:
- if (!IS_ENABLED(CONFIG_PCIE_CADENCE_EP))
+ if (!IS_ENABLED(CONFIG_PCI_J721E_EP))
return -ENODEV;
ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
@@ -482,20 +543,20 @@ static int j721e_pcie_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
- dev_err(dev, "pm_runtime_get_sync failed\n");
+ dev_err_probe(dev, ret, "pm_runtime_get_sync failed\n");
goto err_get_sync;
}
ret = j721e_pcie_ctrl_init(pcie);
if (ret < 0) {
- dev_err(dev, "pm_runtime_get_sync failed\n");
+ dev_err_probe(dev, ret, "pm_runtime_get_sync failed\n");
goto err_get_sync;
}
ret = devm_request_irq(dev, irq, j721e_pcie_link_irq_handler, 0,
"j721e-pcie-link-down-irq", pcie);
if (ret < 0) {
- dev_err(dev, "failed to request link state IRQ %d\n", irq);
+ dev_err_probe(dev, ret, "failed to request link state IRQ %d\n", irq);
goto err_get_sync;
}
@@ -505,42 +566,39 @@ static int j721e_pcie_probe(struct platform_device *pdev)
case PCI_MODE_RC:
gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(gpiod)) {
- ret = PTR_ERR(gpiod);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to get reset GPIO\n");
+ ret = dev_err_probe(dev, PTR_ERR(gpiod), "Failed to get reset GPIO\n");
goto err_get_sync;
}
+ pcie->reset_gpio = gpiod;
ret = cdns_pcie_init_phy(dev, cdns_pcie);
if (ret) {
- dev_err(dev, "Failed to init phy\n");
+ dev_err_probe(dev, ret, "Failed to init phy\n");
goto err_get_sync;
}
clk = devm_clk_get_optional(dev, "pcie_refclk");
if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- dev_err(dev, "failed to get pcie_refclk\n");
+ ret = dev_err_probe(dev, PTR_ERR(clk), "failed to get pcie_refclk\n");
goto err_pcie_setup;
}
ret = clk_prepare_enable(clk);
if (ret) {
- dev_err(dev, "failed to enable pcie_refclk\n");
+ dev_err_probe(dev, ret, "failed to enable pcie_refclk\n");
goto err_pcie_setup;
}
pcie->refclk = clk;
/*
- * "Power Sequencing and Reset Signal Timings" table in
- * PCI EXPRESS CARD ELECTROMECHANICAL SPECIFICATION, REV. 3.0
- * indicates PERST# should be deasserted after minimum of 100us
- * once REFCLK is stable. The REFCLK to the connector in RC
- * mode is selected while enabling the PHY. So deassert PERST#
- * after 100 us.
+ * Section 2.2 of the PCI Express Card Electromechanical
+ * Specification (Revision 5.1) mandates that the deassertion
+ * of the PERST# signal should be delayed by 100 ms (TPVPERL).
+ * This shall ensure that the power and the reference clock
+ * are stable.
*/
if (gpiod) {
- usleep_range(100, 200);
+ msleep(PCIE_T_PVPERL_MS);
gpiod_set_value_cansleep(gpiod, 1);
}
@@ -554,7 +612,7 @@ static int j721e_pcie_probe(struct platform_device *pdev)
case PCI_MODE_EP:
ret = cdns_pcie_init_phy(dev, cdns_pcie);
if (ret) {
- dev_err(dev, "Failed to init phy\n");
+ dev_err_probe(dev, ret, "Failed to init phy\n");
goto err_get_sync;
}
@@ -582,20 +640,118 @@ static void j721e_pcie_remove(struct platform_device *pdev)
struct j721e_pcie *pcie = platform_get_drvdata(pdev);
struct cdns_pcie *cdns_pcie = pcie->cdns_pcie;
struct device *dev = &pdev->dev;
+ struct cdns_pcie_ep *ep;
+ struct cdns_pcie_rc *rc;
+
+ if (pcie->mode == PCI_MODE_RC) {
+ rc = container_of(cdns_pcie, struct cdns_pcie_rc, pcie);
+ cdns_pcie_host_disable(rc);
+ } else {
+ ep = container_of(cdns_pcie, struct cdns_pcie_ep, pcie);
+ cdns_pcie_ep_disable(ep);
+ }
+
+ gpiod_set_value_cansleep(pcie->reset_gpio, 0);
clk_disable_unprepare(pcie->refclk);
cdns_pcie_disable_phy(cdns_pcie);
+ j721e_pcie_disable_link_irq(pcie);
pm_runtime_put(dev);
pm_runtime_disable(dev);
}
+static int j721e_pcie_suspend_noirq(struct device *dev)
+{
+ struct j721e_pcie *pcie = dev_get_drvdata(dev);
+
+ if (pcie->mode == PCI_MODE_RC) {
+ gpiod_set_value_cansleep(pcie->reset_gpio, 0);
+ clk_disable_unprepare(pcie->refclk);
+ }
+
+ cdns_pcie_disable_phy(pcie->cdns_pcie);
+
+ return 0;
+}
+
+static int j721e_pcie_resume_noirq(struct device *dev)
+{
+ struct j721e_pcie *pcie = dev_get_drvdata(dev);
+ struct cdns_pcie *cdns_pcie = pcie->cdns_pcie;
+ int ret;
+
+ ret = j721e_pcie_ctrl_init(pcie);
+ if (ret < 0)
+ return ret;
+
+ j721e_pcie_config_link_irq(pcie);
+
+ /*
+ * This is not called explicitly in the probe, it is called by
+ * cdns_pcie_init_phy().
+ */
+ ret = cdns_pcie_enable_phy(pcie->cdns_pcie);
+ if (ret < 0)
+ return ret;
+
+ if (pcie->mode == PCI_MODE_RC) {
+ struct cdns_pcie_rc *rc = cdns_pcie_to_rc(cdns_pcie);
+
+ ret = clk_prepare_enable(pcie->refclk);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Section 2.2 of the PCI Express Card Electromechanical
+ * Specification (Revision 5.1) mandates that the deassertion
+ * of the PERST# signal should be delayed by 100 ms (TPVPERL).
+ * This shall ensure that the power and the reference clock
+ * are stable.
+ */
+ if (pcie->reset_gpio) {
+ msleep(PCIE_T_PVPERL_MS);
+ gpiod_set_value_cansleep(pcie->reset_gpio, 1);
+ }
+
+ ret = cdns_pcie_host_link_setup(rc);
+ if (ret < 0) {
+ clk_disable_unprepare(pcie->refclk);
+ return ret;
+ }
+
+ /*
+ * Reset internal status of BARs to force reinitialization in
+ * cdns_pcie_host_init().
+ */
+ for (enum cdns_pcie_rp_bar bar = RP_BAR0; bar <= RP_NO_BAR; bar++)
+ rc->avail_ib_bar[bar] = true;
+
+ ret = cdns_pcie_host_init(rc);
+ if (ret) {
+ clk_disable_unprepare(pcie->refclk);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static DEFINE_NOIRQ_DEV_PM_OPS(j721e_pcie_pm_ops,
+ j721e_pcie_suspend_noirq,
+ j721e_pcie_resume_noirq);
+
static struct platform_driver j721e_pcie_driver = {
.probe = j721e_pcie_probe,
- .remove_new = j721e_pcie_remove,
+ .remove = j721e_pcie_remove,
.driver = {
.name = "j721e-pcie",
.of_match_table = of_j721e_pcie_match,
.suppress_bind_attrs = true,
+ .pm = pm_sleep_ptr(&j721e_pcie_pm_ops),
},
};
-builtin_platform_driver(j721e_pcie_driver);
+module_platform_driver(j721e_pcie_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("PCIe controller driver for TI's J721E and related SoCs");
+MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c
index 81c50dc64da9..8ab6cf70c18e 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-ep.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c
@@ -6,12 +6,14 @@
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/pci-epc.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
#include "pcie-cadence.h"
+#include "../../pci.h"
#define CDNS_PCIE_EP_MIN_APERTURE 128 /* 128 bytes */
#define CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE 0x1
@@ -99,14 +101,11 @@ static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, u8 vfn,
ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS;
} else {
bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH);
- bool is_64bits = sz > SZ_2G;
+ bool is_64bits = !!(flags & PCI_BASE_ADDRESS_MEM_TYPE_64);
if (is_64bits && (bar & 1))
return -EINVAL;
- if (is_64bits && !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64))
- epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
-
if (is_64bits && is_prefetch)
ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS;
else if (is_prefetch)
@@ -223,10 +222,11 @@ static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn,
clear_bit(r, &ep->ob_region_map);
}
-static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn, u8 mmc)
+static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn, u8 nr_irqs)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
+ u8 mmc = order_base_2(nr_irqs);
u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
u16 flags;
@@ -265,7 +265,7 @@ static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
*/
mme = FIELD_GET(PCI_MSI_FLAGS_QSIZE, flags);
- return mme;
+ return 1 << mme;
}
static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
@@ -284,12 +284,11 @@ static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
val &= PCI_MSIX_FLAGS_QSIZE;
- return val;
+ return val + 1;
}
static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u8 vfn,
- u16 interrupts, enum pci_barno bir,
- u32 offset)
+ u16 nr_irqs, enum pci_barno bir, u32 offset)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
@@ -301,17 +300,17 @@ static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u8 vfn,
reg = cap + PCI_MSIX_FLAGS;
val = cdns_pcie_ep_fn_readw(pcie, fn, reg);
val &= ~PCI_MSIX_FLAGS_QSIZE;
- val |= interrupts;
+ val |= nr_irqs - 1; /* encoded as N-1 */
cdns_pcie_ep_fn_writew(pcie, fn, reg, val);
- /* Set MSIX BAR and offset */
+ /* Set MSI-X BAR and offset */
reg = cap + PCI_MSIX_TABLE;
val = offset | bir;
cdns_pcie_ep_fn_writel(pcie, fn, reg, val);
- /* Set PBA BAR and offset. BAR must match MSIX BAR */
+ /* Set PBA BAR and offset. BAR must match MSI-X BAR */
reg = cap + PCI_MSIX_PBA;
- val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir;
+ val = (offset + (nr_irqs * PCI_MSIX_ENTRY_SIZE)) | bir;
cdns_pcie_ep_fn_writel(pcie, fn, reg, val);
return 0;
@@ -340,10 +339,10 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx,
if (is_asserted) {
ep->irq_pending |= BIT(intx);
- msg_code = MSG_CODE_ASSERT_INTA + intx;
+ msg_code = PCIE_MSG_CODE_ASSERT_INTA + intx;
} else {
ep->irq_pending &= ~BIT(intx);
- msg_code = MSG_CODE_DEASSERT_INTA + intx;
+ msg_code = PCIE_MSG_CODE_DEASSERT_INTA + intx;
}
spin_lock_irqsave(&ep->lock, flags);
@@ -355,8 +354,7 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx,
spin_unlock_irqrestore(&ep->lock, flags);
offset = CDNS_PCIE_NORMAL_MSG_ROUTING(MSG_ROUTING_LOCAL) |
- CDNS_PCIE_NORMAL_MSG_CODE(msg_code) |
- CDNS_PCIE_MSG_NO_DATA;
+ CDNS_PCIE_NORMAL_MSG_CODE(msg_code);
writel(0, ep->irq_cpu_addr + offset);
}
@@ -576,8 +574,8 @@ static int cdns_pcie_ep_start(struct pci_epc *epc)
/*
* Next function field in ARI_CAP_AND_CTR register for last function
- * should be 0.
- * Clearing Next Function Number field for the last function used.
+ * should be 0. Clear Next Function Number field for the last
+ * function used.
*/
last_fn = find_last_bit(&epc->function_num_map, BITS_PER_LONG);
reg = CDNS_PCIE_CORE_PF_I_ARI_CAP_AND_CTRL(last_fn);
@@ -648,6 +646,17 @@ static const struct pci_epc_ops cdns_pcie_epc_ops = {
.get_features = cdns_pcie_ep_get_features,
};
+void cdns_pcie_ep_disable(struct cdns_pcie_ep *ep)
+{
+ struct device *dev = ep->pcie.dev;
+ struct pci_epc *epc = to_pci_epc(dev);
+
+ pci_epc_deinit_notify(epc);
+ pci_epc_mem_free_addr(epc, ep->irq_phys_addr, ep->irq_cpu_addr,
+ SZ_128K);
+ pci_epc_mem_exit(epc);
+}
+EXPORT_SYMBOL_GPL(cdns_pcie_ep_disable);
int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
{
@@ -746,6 +755,8 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
spin_lock_init(&ep->lock);
+ pci_epc_init_notify(epc);
+
return 0;
free_epc_mem:
@@ -753,3 +764,8 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
return ret;
}
+EXPORT_SYMBOL_GPL(cdns_pcie_ep_setup);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Cadence PCIe endpoint controller driver");
+MODULE_AUTHOR("Cyrille Pitchen <cyrille.pitchen@free-electrons.com>");
diff --git a/drivers/pci/controller/cadence/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c
index 5b14f7ee3c79..59a4631de79f 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-host.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-host.c
@@ -5,6 +5,7 @@
#include <linux/delay.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/list_sort.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
@@ -72,6 +73,7 @@ void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
return rc->cfg_base + (where & 0xfff);
}
+EXPORT_SYMBOL_GPL(cdns_pci_map_bus);
static struct pci_ops cdns_pcie_host_ops = {
.map_bus = cdns_pci_map_bus,
@@ -150,6 +152,14 @@ static int cdns_pcie_retrain(struct cdns_pcie *pcie)
return ret;
}
+static void cdns_pcie_host_disable_ptm_response(struct cdns_pcie *pcie)
+{
+ u32 val;
+
+ val = cdns_pcie_readl(pcie, CDNS_PCIE_LM_PTM_CTRL);
+ cdns_pcie_writel(pcie, CDNS_PCIE_LM_PTM_CTRL, val & ~CDNS_PCIE_LM_TPM_CTRL_PTMRSEN);
+}
+
static void cdns_pcie_host_enable_ptm_response(struct cdns_pcie *pcie)
{
u32 val;
@@ -175,6 +185,26 @@ static int cdns_pcie_host_start_link(struct cdns_pcie_rc *rc)
return ret;
}
+static void cdns_pcie_host_deinit_root_port(struct cdns_pcie_rc *rc)
+{
+ struct cdns_pcie *pcie = &rc->pcie;
+ u32 value, ctrl;
+
+ cdns_pcie_rp_writew(pcie, PCI_CLASS_DEVICE, 0xffff);
+ cdns_pcie_rp_writeb(pcie, PCI_CLASS_PROG, 0xff);
+ cdns_pcie_rp_writeb(pcie, PCI_CLASS_REVISION, 0xff);
+ cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, 0xffffffff);
+ cdns_pcie_rp_writew(pcie, PCI_DEVICE_ID, 0xffff);
+ ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED;
+ value = ~(CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(ctrl) |
+ CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(ctrl) |
+ CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE |
+ CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS |
+ CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE |
+ CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS);
+ cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value);
+}
+
static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc)
{
struct cdns_pcie *pcie = &rc->pcie;
@@ -391,6 +421,32 @@ static int cdns_pcie_host_dma_ranges_cmp(void *priv, const struct list_head *a,
return resource_size(entry2->res) - resource_size(entry1->res);
}
+static void cdns_pcie_host_unmap_dma_ranges(struct cdns_pcie_rc *rc)
+{
+ struct cdns_pcie *pcie = &rc->pcie;
+ enum cdns_pcie_rp_bar bar;
+ u32 value;
+
+ /* Reset inbound configuration for all BARs which were being used */
+ for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++) {
+ if (rc->avail_ib_bar[bar])
+ continue;
+
+ cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR0(bar), 0);
+ cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR1(bar), 0);
+
+ if (bar == RP_NO_BAR)
+ continue;
+
+ value = ~(LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar) |
+ LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar) |
+ LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar) |
+ LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar) |
+ LM_RC_BAR_CFG_APERTURE(bar, bar_aperture_mask[bar] + 2));
+ cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value);
+ }
+}
+
static int cdns_pcie_host_map_dma_ranges(struct cdns_pcie_rc *rc)
{
struct cdns_pcie *pcie = &rc->pcie;
@@ -428,6 +484,29 @@ static int cdns_pcie_host_map_dma_ranges(struct cdns_pcie_rc *rc)
return 0;
}
+static void cdns_pcie_host_deinit_address_translation(struct cdns_pcie_rc *rc)
+{
+ struct cdns_pcie *pcie = &rc->pcie;
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(rc);
+ struct resource_entry *entry;
+ int r;
+
+ cdns_pcie_host_unmap_dma_ranges(rc);
+
+ /*
+ * Reset outbound region 0 which was reserved for configuration space
+ * accesses.
+ */
+ cdns_pcie_reset_outbound_region(pcie, 0);
+
+ /* Reset rest of the outbound regions */
+ r = 1;
+ resource_list_for_each_entry(entry, &bridge->windows) {
+ cdns_pcie_reset_outbound_region(pcie, r);
+ r++;
+ }
+}
+
static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc)
{
struct cdns_pcie *pcie = &rc->pcie;
@@ -485,8 +564,13 @@ static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc)
return cdns_pcie_host_map_dma_ranges(rc);
}
-static int cdns_pcie_host_init(struct device *dev,
- struct cdns_pcie_rc *rc)
+static void cdns_pcie_host_deinit(struct cdns_pcie_rc *rc)
+{
+ cdns_pcie_host_deinit_address_translation(rc);
+ cdns_pcie_host_deinit_root_port(rc);
+}
+
+int cdns_pcie_host_init(struct cdns_pcie_rc *rc)
{
int err;
@@ -496,6 +580,53 @@ static int cdns_pcie_host_init(struct device *dev,
return cdns_pcie_host_init_address_translation(rc);
}
+EXPORT_SYMBOL_GPL(cdns_pcie_host_init);
+
+static void cdns_pcie_host_link_disable(struct cdns_pcie_rc *rc)
+{
+ struct cdns_pcie *pcie = &rc->pcie;
+
+ cdns_pcie_stop_link(pcie);
+ cdns_pcie_host_disable_ptm_response(pcie);
+}
+
+int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc)
+{
+ struct cdns_pcie *pcie = &rc->pcie;
+ struct device *dev = rc->pcie.dev;
+ int ret;
+
+ if (rc->quirk_detect_quiet_flag)
+ cdns_pcie_detect_quiet_min_delay_set(&rc->pcie);
+
+ cdns_pcie_host_enable_ptm_response(pcie);
+
+ ret = cdns_pcie_start_link(pcie);
+ if (ret) {
+ dev_err(dev, "Failed to start link\n");
+ return ret;
+ }
+
+ ret = cdns_pcie_host_start_link(rc);
+ if (ret)
+ dev_dbg(dev, "PCIe link never came up\n");
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cdns_pcie_host_link_setup);
+
+void cdns_pcie_host_disable(struct cdns_pcie_rc *rc)
+{
+ struct pci_host_bridge *bridge;
+
+ bridge = pci_host_bridge_from_priv(rc);
+ pci_stop_root_bus(bridge->bus);
+ pci_remove_root_bus(bridge->bus);
+
+ cdns_pcie_host_deinit(rc);
+ cdns_pcie_host_link_disable(rc);
+}
+EXPORT_SYMBOL_GPL(cdns_pcie_host_disable);
int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
{
@@ -533,39 +664,24 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
return PTR_ERR(rc->cfg_base);
rc->cfg_res = res;
- if (rc->quirk_detect_quiet_flag)
- cdns_pcie_detect_quiet_min_delay_set(&rc->pcie);
-
- cdns_pcie_host_enable_ptm_response(pcie);
-
- ret = cdns_pcie_start_link(pcie);
- if (ret) {
- dev_err(dev, "Failed to start link\n");
- return ret;
- }
-
- ret = cdns_pcie_host_start_link(rc);
+ ret = cdns_pcie_host_link_setup(rc);
if (ret)
- dev_dbg(dev, "PCIe link never came up\n");
+ return ret;
for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++)
rc->avail_ib_bar[bar] = true;
- ret = cdns_pcie_host_init(dev, rc);
+ ret = cdns_pcie_host_init(rc);
if (ret)
return ret;
if (!bridge->ops)
bridge->ops = &cdns_pcie_host_ops;
- ret = pci_host_probe(bridge);
- if (ret < 0)
- goto err_init;
-
- return 0;
-
- err_init:
- pm_runtime_put_sync(dev);
-
- return ret;
+ return pci_host_probe(bridge);
}
+EXPORT_SYMBOL_GPL(cdns_pcie_host_setup);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Cadence PCIe host controller driver");
+MODULE_AUTHOR("Cyrille Pitchen <cyrille.pitchen@free-electrons.com>");
diff --git a/drivers/pci/controller/cadence/pcie-cadence.c b/drivers/pci/controller/cadence/pcie-cadence.c
index 4251fac5e310..70a19573440e 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.c
+++ b/drivers/pci/controller/cadence/pcie-cadence.c
@@ -4,6 +4,7 @@
// Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/of.h>
#include "pcie-cadence.h"
@@ -23,6 +24,7 @@ void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie)
cdns_pcie_writel(pcie, CDNS_PCIE_LTSSM_CONTROL_CAP, ltssm_control_cap);
}
+EXPORT_SYMBOL_GPL(cdns_pcie_detect_quiet_min_delay_set);
void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
u32 r, bool is_io,
@@ -100,6 +102,7 @@ void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1);
}
+EXPORT_SYMBOL_GPL(cdns_pcie_set_outbound_region);
void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie,
u8 busnr, u8 fn,
@@ -134,6 +137,7 @@ void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie,
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1);
}
+EXPORT_SYMBOL_GPL(cdns_pcie_set_outbound_region_for_normal_msg);
void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r)
{
@@ -146,6 +150,7 @@ void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r)
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), 0);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), 0);
}
+EXPORT_SYMBOL_GPL(cdns_pcie_reset_outbound_region);
void cdns_pcie_disable_phy(struct cdns_pcie *pcie)
{
@@ -156,6 +161,7 @@ void cdns_pcie_disable_phy(struct cdns_pcie *pcie)
phy_exit(pcie->phy[i]);
}
}
+EXPORT_SYMBOL_GPL(cdns_pcie_disable_phy);
int cdns_pcie_enable_phy(struct cdns_pcie *pcie)
{
@@ -184,6 +190,7 @@ err_phy:
return ret;
}
+EXPORT_SYMBOL_GPL(cdns_pcie_enable_phy);
int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie)
{
@@ -197,7 +204,7 @@ int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie)
phy_count = of_property_count_strings(np, "phy-names");
if (phy_count < 1) {
- dev_err(dev, "no phy-names. PHY will not be initialized\n");
+ dev_info(dev, "no \"phy-names\" property found; PHY will not be initialized\n");
pcie->phy_count = 0;
return 0;
}
@@ -243,6 +250,7 @@ err_phy:
return ret;
}
+EXPORT_SYMBOL_GPL(cdns_pcie_init_phy);
static int cdns_pcie_suspend_noirq(struct device *dev)
{
@@ -260,7 +268,7 @@ static int cdns_pcie_resume_noirq(struct device *dev)
ret = cdns_pcie_enable_phy(pcie);
if (ret) {
- dev_err(dev, "failed to enable phy\n");
+ dev_err(dev, "failed to enable PHY\n");
return ret;
}
@@ -271,3 +279,7 @@ const struct dev_pm_ops cdns_pcie_pm_ops = {
NOIRQ_SYSTEM_SLEEP_PM_OPS(cdns_pcie_suspend_noirq,
cdns_pcie_resume_noirq)
};
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Cadence PCIe controller driver");
+MODULE_AUTHOR("Cyrille Pitchen <cyrille.pitchen@free-electrons.com>");
diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h
index 7a66a2f815dc..a149845d341a 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.h
+++ b/drivers/pci/controller/cadence/pcie-cadence.h
@@ -246,21 +246,10 @@ struct cdns_pcie_rp_ib_bar {
#define CDNS_PCIE_NORMAL_MSG_CODE_MASK GENMASK(15, 8)
#define CDNS_PCIE_NORMAL_MSG_CODE(code) \
(((code) << 8) & CDNS_PCIE_NORMAL_MSG_CODE_MASK)
-#define CDNS_PCIE_MSG_NO_DATA BIT(16)
+#define CDNS_PCIE_MSG_DATA BIT(16)
struct cdns_pcie;
-enum cdns_pcie_msg_code {
- MSG_CODE_ASSERT_INTA = 0x20,
- MSG_CODE_ASSERT_INTB = 0x21,
- MSG_CODE_ASSERT_INTC = 0x22,
- MSG_CODE_ASSERT_INTD = 0x23,
- MSG_CODE_DEASSERT_INTA = 0x24,
- MSG_CODE_DEASSERT_INTB = 0x25,
- MSG_CODE_DEASSERT_INTC = 0x26,
- MSG_CODE_DEASSERT_INTD = 0x27,
-};
-
enum cdns_pcie_msg_routing {
/* Route to Root Complex */
MSG_ROUTING_TO_RC,
@@ -314,7 +303,6 @@ struct cdns_pcie {
/**
* struct cdns_pcie_rc - private data for this PCIe Root Complex driver
* @pcie: Cadence PCIe controller
- * @dev: pointer to PCIe device
* @cfg_res: start/end offsets in the physical system memory to map PCI
* configuration space accesses
* @cfg_base: IO mapped window to access the PCI configuration space of a
@@ -520,16 +508,33 @@ static inline bool cdns_pcie_link_up(struct cdns_pcie *pcie)
return true;
}
-#ifdef CONFIG_PCIE_CADENCE_HOST
+#if IS_ENABLED(CONFIG_PCIE_CADENCE_HOST)
+int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc);
+int cdns_pcie_host_init(struct cdns_pcie_rc *rc);
int cdns_pcie_host_setup(struct cdns_pcie_rc *rc);
+void cdns_pcie_host_disable(struct cdns_pcie_rc *rc);
void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
int where);
#else
+static inline int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc)
+{
+ return 0;
+}
+
+static inline int cdns_pcie_host_init(struct cdns_pcie_rc *rc)
+{
+ return 0;
+}
+
static inline int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
{
return 0;
}
+static inline void cdns_pcie_host_disable(struct cdns_pcie_rc *rc)
+{
+}
+
static inline void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
int where)
{
@@ -537,13 +542,18 @@ static inline void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int d
}
#endif
-#ifdef CONFIG_PCIE_CADENCE_EP
+#if IS_ENABLED(CONFIG_PCIE_CADENCE_EP)
int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep);
+void cdns_pcie_ep_disable(struct cdns_pcie_ep *ep);
#else
static inline int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
{
return 0;
}
+
+static inline void cdns_pcie_ep_disable(struct cdns_pcie_ep *ep)
+{
+}
#endif
void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie);