summaryrefslogtreecommitdiff
path: root/drivers/pci/controller/pcie-rcar-host.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci/controller/pcie-rcar-host.c')
-rw-r--r--drivers/pci/controller/pcie-rcar-host.c185
1 files changed, 75 insertions, 110 deletions
diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c
index e4faf90feaf5..213028052aa5 100644
--- a/drivers/pci/controller/pcie-rcar-host.c
+++ b/drivers/pci/controller/pcie-rcar-host.c
@@ -12,11 +12,13 @@
*/
#include <linux/bitops.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -29,6 +31,7 @@
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
#include "pcie-rcar.h"
@@ -36,26 +39,11 @@ struct rcar_msi {
DECLARE_BITMAP(used, INT_PCI_MSI_NR);
struct irq_domain *domain;
struct mutex map_lock;
- spinlock_t mask_lock;
+ raw_spinlock_t mask_lock;
int irq1;
int irq2;
};
-#ifdef CONFIG_ARM
-/*
- * Here we keep a static copy of the remapped PCIe controller address.
- * This is only used on aarch32 systems, all of which have one single
- * PCIe controller, to provide quick access to the PCIe controller in
- * the L1 link state fixup function, called from the ARM fault handler.
- */
-static void __iomem *pcie_base;
-/*
- * Static copy of PCIe device pointer, so we can check whether the
- * device is runtime suspended or not.
- */
-static struct device *pcie_dev;
-#endif
-
/* Structure representing the PCIe interface */
struct rcar_pcie_host {
struct rcar_pcie pcie;
@@ -65,20 +53,13 @@ struct rcar_pcie_host {
int (*phy_init_fn)(struct rcar_pcie_host *host);
};
-static DEFINE_SPINLOCK(pmsr_lock);
-
static int rcar_pcie_wakeup(struct device *pcie_dev, void __iomem *pcie_base)
{
- unsigned long flags;
u32 pmsr, val;
int ret = 0;
- spin_lock_irqsave(&pmsr_lock, flags);
-
- if (!pcie_base || pm_runtime_suspended(pcie_dev)) {
- ret = -EINVAL;
- goto unlock_exit;
- }
+ if (!pcie_base || pm_runtime_suspended(pcie_dev))
+ return -EINVAL;
pmsr = readl(pcie_base + PMSR);
@@ -92,12 +73,14 @@ static int rcar_pcie_wakeup(struct device *pcie_dev, void __iomem *pcie_base)
writel(L1IATN, pcie_base + PMCTLR);
ret = readl_poll_timeout_atomic(pcie_base + PMSR, val,
val & L1FAEG, 10, 1000);
- WARN(ret, "Timeout waiting for L1 link state, ret=%d\n", ret);
+ if (ret) {
+ dev_warn_ratelimited(pcie_dev,
+ "Timeout waiting for L1 link state, ret=%d\n",
+ ret);
+ }
writel(L1FAEG | PMEL1RX, pcie_base + PMSR);
}
-unlock_exit:
- spin_unlock_irqrestore(&pmsr_lock, flags);
return ret;
}
@@ -188,8 +171,8 @@ static int rcar_pcie_config_access(struct rcar_pcie_host *host,
* space, it's generally only accessible when in endpoint mode.
* When in root complex mode, the controller is unable to target
* itself with either type 0 or type 1 accesses, and indeed, any
- * controller initiated target transfer to its own config space
- * result in a completer abort.
+ * controller-initiated target transfer to its own config space
+ * results in a completer abort.
*
* Each channel effectively only supports a single device, but as
* the same channel <-> device access works for any PCI_SLOT()
@@ -219,9 +202,9 @@ static int rcar_pcie_config_access(struct rcar_pcie_host *host,
/* Enable the configuration access */
if (pci_is_root_bus(bus->parent))
- rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE0, PCIECCTLR);
+ rcar_pci_write_reg(pcie, PCIECCTLR_CCIE | TYPE0, PCIECCTLR);
else
- rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE1, PCIECCTLR);
+ rcar_pci_write_reg(pcie, PCIECCTLR_CCIE | TYPE1, PCIECCTLR);
/* Check for errors */
if (rcar_pci_read_reg(pcie, PCIEERRFR) & UNSUPPORTED_REQUEST)
@@ -475,7 +458,7 @@ static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
rcar_rmw32(pcie, REXPCAP(0), 0xff, PCI_CAP_ID_EXP);
rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS),
PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ROOT_PORT << 4);
- rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), 0x7f,
+ rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), PCI_HEADER_TYPE_MASK,
PCI_HEADER_TYPE_BRIDGE);
/* Enable data link layer active state reporting */
@@ -593,7 +576,7 @@ static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
unsigned int index = find_first_bit(&reg, 32);
int ret;
- ret = generic_handle_domain_irq(msi->domain->parent, index);
+ ret = generic_handle_domain_irq(msi->domain, index);
if (ret) {
/* Unknown MSI, just clear it */
dev_dbg(dev, "unexpected MSI\n");
@@ -607,30 +590,6 @@ static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static void rcar_msi_top_irq_ack(struct irq_data *d)
-{
- irq_chip_ack_parent(d);
-}
-
-static void rcar_msi_top_irq_mask(struct irq_data *d)
-{
- pci_msi_mask_irq(d);
- irq_chip_mask_parent(d);
-}
-
-static void rcar_msi_top_irq_unmask(struct irq_data *d)
-{
- pci_msi_unmask_irq(d);
- irq_chip_unmask_parent(d);
-}
-
-static struct irq_chip rcar_msi_top_chip = {
- .name = "PCIe MSI",
- .irq_ack = rcar_msi_top_irq_ack,
- .irq_mask = rcar_msi_top_irq_mask,
- .irq_unmask = rcar_msi_top_irq_unmask,
-};
-
static void rcar_msi_irq_ack(struct irq_data *d)
{
struct rcar_msi *msi = irq_data_get_irq_chip_data(d);
@@ -644,33 +603,26 @@ static void rcar_msi_irq_mask(struct irq_data *d)
{
struct rcar_msi *msi = irq_data_get_irq_chip_data(d);
struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
- unsigned long flags;
u32 value;
- spin_lock_irqsave(&msi->mask_lock, flags);
- value = rcar_pci_read_reg(pcie, PCIEMSIIER);
- value &= ~BIT(d->hwirq);
- rcar_pci_write_reg(pcie, value, PCIEMSIIER);
- spin_unlock_irqrestore(&msi->mask_lock, flags);
+ scoped_guard(raw_spinlock_irqsave, &msi->mask_lock) {
+ value = rcar_pci_read_reg(pcie, PCIEMSIIER);
+ value &= ~BIT(d->hwirq);
+ rcar_pci_write_reg(pcie, value, PCIEMSIIER);
+ }
}
static void rcar_msi_irq_unmask(struct irq_data *d)
{
struct rcar_msi *msi = irq_data_get_irq_chip_data(d);
struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
- unsigned long flags;
u32 value;
- spin_lock_irqsave(&msi->mask_lock, flags);
- value = rcar_pci_read_reg(pcie, PCIEMSIIER);
- value |= BIT(d->hwirq);
- rcar_pci_write_reg(pcie, value, PCIEMSIIER);
- spin_unlock_irqrestore(&msi->mask_lock, flags);
-}
-
-static int rcar_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force)
-{
- return -EINVAL;
+ scoped_guard(raw_spinlock_irqsave, &msi->mask_lock) {
+ value = rcar_pci_read_reg(pcie, PCIEMSIIER);
+ value |= BIT(d->hwirq);
+ rcar_pci_write_reg(pcie, value, PCIEMSIIER);
+ }
}
static void rcar_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
@@ -684,11 +636,10 @@ static void rcar_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
}
static struct irq_chip rcar_msi_bottom_chip = {
- .name = "Rcar MSI",
+ .name = "R-Car MSI",
.irq_ack = rcar_msi_irq_ack,
.irq_mask = rcar_msi_irq_mask,
.irq_unmask = rcar_msi_irq_unmask,
- .irq_set_affinity = rcar_msi_set_affinity,
.irq_compose_msi_msg = rcar_compose_msi_msg,
};
@@ -734,30 +685,36 @@ static const struct irq_domain_ops rcar_msi_domain_ops = {
.free = rcar_msi_domain_free,
};
-static struct msi_domain_info rcar_msi_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_MULTI_PCI_MSI),
- .chip = &rcar_msi_top_chip,
+#define RCAR_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_PCI_MSI_MASK_PARENT | \
+ MSI_FLAG_NO_AFFINITY)
+
+#define RCAR_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_MULTI_PCI_MSI)
+
+static const struct msi_parent_ops rcar_msi_parent_ops = {
+ .required_flags = RCAR_MSI_FLAGS_REQUIRED,
+ .supported_flags = RCAR_MSI_FLAGS_SUPPORTED,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .chip_flags = MSI_CHIP_FLAG_SET_ACK,
+ .prefix = "RCAR-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
};
static int rcar_allocate_domains(struct rcar_msi *msi)
{
struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
- struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
- struct irq_domain *parent;
-
- parent = irq_domain_create_linear(fwnode, INT_PCI_MSI_NR,
- &rcar_msi_domain_ops, msi);
- if (!parent) {
- dev_err(pcie->dev, "failed to create IRQ domain\n");
- return -ENOMEM;
- }
- irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
-
- msi->domain = pci_msi_create_irq_domain(fwnode, &rcar_msi_info, parent);
+ struct irq_domain_info info = {
+ .fwnode = dev_fwnode(pcie->dev),
+ .ops = &rcar_msi_domain_ops,
+ .host_data = msi,
+ .size = INT_PCI_MSI_NR,
+ };
+
+ msi->domain = msi_create_parent_irq_domain(&info, &rcar_msi_parent_ops);
if (!msi->domain) {
- dev_err(pcie->dev, "failed to create MSI domain\n");
- irq_domain_remove(parent);
+ dev_err(pcie->dev, "failed to create IRQ domain\n");
return -ENOMEM;
}
@@ -766,10 +723,7 @@ static int rcar_allocate_domains(struct rcar_msi *msi)
static void rcar_free_domains(struct rcar_msi *msi)
{
- struct irq_domain *parent = msi->domain->parent;
-
irq_domain_remove(msi->domain);
- irq_domain_remove(parent);
}
static int rcar_pcie_enable_msi(struct rcar_pcie_host *host)
@@ -781,7 +735,7 @@ static int rcar_pcie_enable_msi(struct rcar_pcie_host *host)
int err;
mutex_init(&msi->map_lock);
- spin_lock_init(&msi->mask_lock);
+ raw_spin_lock_init(&msi->mask_lock);
err = of_address_to_resource(dev->of_node, 0, &res);
if (err)
@@ -791,7 +745,7 @@ static int rcar_pcie_enable_msi(struct rcar_pcie_host *host)
if (err)
return err;
- /* Two irqs are for MSI, but they are also used for non-MSI irqs */
+ /* Two IRQs are for MSI, but they are also used for non-MSI IRQs */
err = devm_request_irq(dev, msi->irq1, rcar_pcie_msi_irq,
IRQF_SHARED | IRQF_NO_THREAD,
rcar_msi_bottom_chip.name, host);
@@ -808,12 +762,12 @@ static int rcar_pcie_enable_msi(struct rcar_pcie_host *host)
goto err;
}
- /* disable all MSIs */
+ /* Disable all MSIs */
rcar_pci_write_reg(pcie, 0, PCIEMSIIER);
/*
- * Setup MSI data target using RC base address address, which
- * is guaranteed to be in the low 32bit range on any RCar HW.
+ * Setup MSI data target using RC base address, which is guaranteed
+ * to be in the low 32bit range on any R-Car HW.
*/
rcar_pci_write_reg(pcie, lower_32_bits(res.start) | MSIFE, PCIEMSIALR);
rcar_pci_write_reg(pcie, upper_32_bits(res.start), PCIEMSIAUR);
@@ -879,12 +833,6 @@ static int rcar_pcie_get_resources(struct rcar_pcie_host *host)
}
host->msi.irq2 = i;
-#ifdef CONFIG_ARM
- /* Cache static copy for L1 link state fixup hook on aarch32 */
- pcie_base = pcie->base;
- pcie_dev = pcie->dev;
-#endif
-
return 0;
err_irq2:
@@ -914,6 +862,7 @@ static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
dev_err(pcie->dev, "Failed to map inbound regions!\n");
return -EINVAL;
}
+
/*
* If the size of the range is larger than the alignment of
* the start address, we have to use multiple entries to
@@ -925,6 +874,7 @@ static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
size = min(size, alignment);
}
+
/* Hardware supports max 4GiB inbound region */
size = min(size, 1ULL << 32);
@@ -974,14 +924,22 @@ static const struct of_device_id rcar_pcie_of_match[] = {
{},
};
+/* Design note 346 from Linear Technology says order is not important. */
+static const char * const rcar_pcie_supplies[] = {
+ "vpcie1v5",
+ "vpcie3v3",
+ "vpcie12v",
+};
+
static int rcar_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct pci_host_bridge *bridge;
struct rcar_pcie_host *host;
struct rcar_pcie *pcie;
+ unsigned int i;
u32 data;
int err;
- struct pci_host_bridge *bridge;
bridge = devm_pci_alloc_host_bridge(dev, sizeof(*host));
if (!bridge)
@@ -992,6 +950,13 @@ static int rcar_pcie_probe(struct platform_device *pdev)
pcie->dev = dev;
platform_set_drvdata(pdev, host);
+ for (i = 0; i < ARRAY_SIZE(rcar_pcie_supplies); i++) {
+ err = devm_regulator_get_enable_optional(dev, rcar_pcie_supplies[i]);
+ if (err < 0 && err != -ENODEV)
+ return dev_err_probe(dev, err, "failed to enable regulator: %s\n",
+ rcar_pcie_supplies[i]);
+ }
+
pm_runtime_enable(pcie->dev);
err = pm_runtime_get_sync(pcie->dev);
if (err < 0) {