summaryrefslogtreecommitdiff
path: root/drivers/pci
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/Kconfig30
-rw-r--r--drivers/pci/Makefile3
-rw-r--r--drivers/pci/access.c40
-rw-r--r--drivers/pci/ats.c72
-rw-r--r--drivers/pci/bus.c92
-rw-r--r--drivers/pci/controller/Kconfig14
-rw-r--r--drivers/pci/controller/Makefile2
-rw-r--r--drivers/pci/controller/cadence/Kconfig2
-rw-r--r--drivers/pci/controller/cadence/pci-j721e.c176
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-ep.c18
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-host.c44
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.c4
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.h15
-rw-r--r--drivers/pci/controller/dwc/Kconfig48
-rw-r--r--drivers/pci/controller/dwc/Makefile5
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c57
-rw-r--r--drivers/pci/controller/dwc/pci-exynos.c57
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c1448
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c240
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape-ep.c11
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape.c10
-rw-r--r--drivers/pci/controller/dwc/pci-meson.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-al.c16
-rw-r--r--drivers/pci/controller/dwc/pcie-amd-mdb.c476
-rw-r--r--drivers/pci/controller/dwc/pcie-artpec6.c36
-rw-r--r--drivers/pci/controller/dwc/pcie-bt1.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-debugfs.c677
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c700
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c250
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-plat.c11
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c284
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h186
-rw-r--r--drivers/pci/controller/dwc/pcie-dw-rockchip.c418
-rw-r--r--drivers/pci/controller/dwc/pcie-histb.c14
-rw-r--r--drivers/pci/controller/dwc/pcie-intel-gw.c14
-rw-r--r--drivers/pci/controller/dwc/pcie-keembay.c18
-rw-r--r--drivers/pci/controller/dwc/pcie-kirin.c174
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-common.c78
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-common.h14
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-ep.c117
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c504
-rw-r--r--drivers/pci/controller/dwc/pcie-rcar-gen4.c347
-rw-r--r--drivers/pci/controller/dwc/pcie-spear13xx.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c58
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier-ep.c15
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier.c2
-rw-r--r--drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c2
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil-host.c22
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil.h2
-rw-r--r--drivers/pci/controller/pci-aardvark.c91
-rw-r--r--drivers/pci/controller/pci-ftpci100.c4
-rw-r--r--drivers/pci/controller/pci-host-common.c7
-rw-r--r--drivers/pci/controller/pci-host-generic.c3
-rw-r--r--drivers/pci/controller/pci-hyperv.c24
-rw-r--r--drivers/pci/controller/pci-loongson.c13
-rw-r--r--drivers/pci/controller/pci-mvebu.c11
-rw-r--r--drivers/pci/controller/pci-tegra.c153
-rw-r--r--drivers/pci/controller/pci-thunder-ecam.c2
-rw-r--r--drivers/pci/controller/pci-thunder-pem.c4
-rw-r--r--drivers/pci/controller/pci-xgene-msi.c57
-rw-r--r--drivers/pci/controller/pci-xgene.c6
-rw-r--r--drivers/pci/controller/pcie-altera-msi.c18
-rw-r--r--drivers/pci/controller/pcie-altera.c269
-rw-r--r--drivers/pci/controller/pcie-apple.c149
-rw-r--r--drivers/pci/controller/pcie-brcmstb.c700
-rw-r--r--drivers/pci/controller/pcie-hisi-error.c2
-rw-r--r--drivers/pci/controller/pcie-iproc-msi.c6
-rw-r--r--drivers/pci/controller/pcie-iproc-platform.c2
-rw-r--r--drivers/pci/controller/pcie-iproc.c18
-rw-r--r--drivers/pci/controller/pcie-mediatek-gen3.c395
-rw-r--r--drivers/pci/controller/pcie-mediatek.c36
-rw-r--r--drivers/pci/controller/pcie-microchip-host.c1216
-rw-r--r--drivers/pci/controller/pcie-mt7621.c20
-rw-r--r--drivers/pci/controller/pcie-rcar-ep.c4
-rw-r--r--drivers/pci/controller/pcie-rcar-host.c30
-rw-r--r--drivers/pci/controller/pcie-rockchip-ep.c453
-rw-r--r--drivers/pci/controller/pcie-rockchip-host.c15
-rw-r--r--drivers/pci/controller/pcie-rockchip.c240
-rw-r--r--drivers/pci/controller/pcie-rockchip.h60
-rw-r--r--drivers/pci/controller/pcie-xilinx-cpm.c113
-rw-r--r--drivers/pci/controller/pcie-xilinx-dma-pl.c78
-rw-r--r--drivers/pci/controller/pcie-xilinx-nwl.c161
-rw-r--r--drivers/pci/controller/pcie-xilinx.c14
-rw-r--r--drivers/pci/controller/plda/Kconfig30
-rw-r--r--drivers/pci/controller/plda/Makefile4
-rw-r--r--drivers/pci/controller/plda/pcie-microchip-host.c833
-rw-r--r--drivers/pci/controller/plda/pcie-plda-host.c653
-rw-r--r--drivers/pci/controller/plda/pcie-plda.h275
-rw-r--r--drivers/pci/controller/plda/pcie-starfive.c492
-rw-r--r--drivers/pci/controller/vmd.c75
-rw-r--r--drivers/pci/devres.c883
-rw-r--r--drivers/pci/doe.c271
-rw-r--r--drivers/pci/ecam.c2
-rw-r--r--drivers/pci/endpoint/Kconfig2
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-mhi.c56
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c664
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-vntb.c19
-rw-r--r--drivers/pci/endpoint/pci-ep-cfs.c10
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c383
-rw-r--r--drivers/pci/endpoint/pci-epc-mem.c9
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c18
-rw-r--r--drivers/pci/hotplug/Kconfig12
-rw-r--r--drivers/pci/hotplug/Makefile1
-rw-r--r--drivers/pci/hotplug/TODO17
-rw-r--r--drivers/pci/hotplug/acpiphp_ampere_altra.c3
-rw-r--r--drivers/pci/hotplug/acpiphp_ibm.c6
-rw-r--r--drivers/pci/hotplug/cpci_hotplug.h3
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_core.c17
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c2
-rw-r--r--drivers/pci/hotplug/cpqphp_ctrl.c2
-rw-r--r--drivers/pci/hotplug/cpqphp_pci.c49
-rw-r--r--drivers/pci/hotplug/cpqphp_sysfs.c1
-rw-r--r--drivers/pci/hotplug/octep_hp.c427
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c150
-rw-r--r--drivers/pci/hotplug/pciehp.h4
-rw-r--r--drivers/pci/hotplug/pciehp_core.c45
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c5
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c20
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c4
-rw-r--r--drivers/pci/hotplug/pnv_php.c3
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c5
-rw-r--r--drivers/pci/hotplug/shpchp.h56
-rw-r--r--drivers/pci/hotplug/shpchp_core.c28
-rw-r--r--drivers/pci/hotplug/shpchp_ctrl.c79
-rw-r--r--drivers/pci/hotplug/shpchp_hpc.c67
-rw-r--r--drivers/pci/iomap.c47
-rw-r--r--drivers/pci/iov.c64
-rw-r--r--drivers/pci/msi/api.c68
-rw-r--r--drivers/pci/msi/irqdomain.c86
-rw-r--r--drivers/pci/msi/msi.c246
-rw-r--r--drivers/pci/msi/msi.h2
-rw-r--r--drivers/pci/npem.c595
-rw-r--r--drivers/pci/of.c244
-rw-r--r--drivers/pci/of_property.c147
-rw-r--r--drivers/pci/p2pdma.c61
-rw-r--r--drivers/pci/pci-acpi.c204
-rw-r--r--drivers/pci/pci-bridge-emul.c4
-rw-r--r--drivers/pci/pci-driver.c33
-rw-r--r--drivers/pci/pci-mid.c4
-rw-r--r--drivers/pci/pci-pf-stub.c1
-rw-r--r--drivers/pci/pci-stub.c1
-rw-r--r--drivers/pci/pci-sysfs.c234
-rw-r--r--drivers/pci/pci.c903
-rw-r--r--drivers/pci/pci.h340
-rw-r--r--drivers/pci/pcie/Kconfig2
-rw-r--r--drivers/pci/pcie/Makefile4
-rw-r--r--drivers/pci/pcie/aer.c131
-rw-r--r--drivers/pci/pcie/aer_inject.c6
-rw-r--r--drivers/pci/pcie/aspm.c317
-rw-r--r--drivers/pci/pcie/bwctrl.c377
-rw-r--r--drivers/pci/pcie/dpc.c96
-rw-r--r--drivers/pci/pcie/edr.c28
-rw-r--r--drivers/pci/pcie/err.c12
-rw-r--r--drivers/pci/pcie/portdrv.c31
-rw-r--r--drivers/pci/pcie/portdrv.h6
-rw-r--r--drivers/pci/pcie/tlp.c135
-rw-r--r--drivers/pci/probe.c339
-rw-r--r--drivers/pci/proc.c4
-rw-r--r--drivers/pci/pwrctrl/Kconfig23
-rw-r--r--drivers/pci/pwrctrl/Makefile9
-rw-r--r--drivers/pci/pwrctrl/core.c148
-rw-r--r--drivers/pci/pwrctrl/pci-pwrctrl-pwrseq.c139
-rw-r--r--drivers/pci/pwrctrl/slot.c93
-rw-r--r--drivers/pci/quirks.c161
-rw-r--r--drivers/pci/remove.c45
-rw-r--r--drivers/pci/setup-bus.c703
-rw-r--r--drivers/pci/setup-res.c31
-rw-r--r--drivers/pci/slot.c68
-rw-r--r--drivers/pci/switch/switchtec.c42
-rw-r--r--drivers/pci/tph.c505
-rw-r--r--drivers/pci/vpd.c18
171 files changed, 17032 insertions, 6612 deletions
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index d35001589d88..da28295b4aac 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -122,7 +122,10 @@ config PCI_ATS
bool
config PCI_DOE
- bool
+ bool "Enable PCI Data Object Exchange (DOE) support"
+ help
+ Say Y here if you want be able to communicate with PCIe DOE
+ mailboxes.
config PCI_ECAM
bool
@@ -143,6 +146,15 @@ config PCI_IOV
If unsure, say N.
+config PCI_NPEM
+ bool "Native PCIe Enclosure Management"
+ depends on LEDS_CLASS=y
+ help
+ Support for Native PCIe Enclosure Management. It allows managing LED
+ indications in storage enclosures. Enclosure must support following
+ indications: OK, Locate, Fail, Rebuild, other indications are
+ optional.
+
config PCI_PRI
bool "PCI PRI support"
select PCI_ATS
@@ -164,6 +176,15 @@ config PCI_PASID
If unsure, say N.
+config PCIE_TPH
+ bool "TLP Processing Hints"
+ help
+ This option adds support for PCIe TLP Processing Hints (TPH).
+ TPH allows endpoint devices to provide optimization hints, such as
+ desired caching behavior, for requests that target memory space.
+ These hints, called Steering Tags, can empower the system hardware
+ to optimize the utilization of platform resources.
+
config PCI_P2PDMA
bool "PCI peer-to-peer transfer support"
depends on ZONE_DEVICE
@@ -185,6 +206,12 @@ config PCI_P2PDMA
P2P DMA transactions must be between devices behind the same root
port.
+ Enabling this option will reduce the entropy of x86 KASLR memory
+ regions. For example - on a 46 bit system, the entropy goes down
+ from 16 bits to 15 bits. The actual reduction in entropy depends
+ on the physical address bits, on processor features, kernel config
+ (5 level page table) and physical memory present on the system.
+
If unsure, say N.
config PCI_LABEL
@@ -296,5 +323,6 @@ source "drivers/pci/hotplug/Kconfig"
source "drivers/pci/controller/Kconfig"
source "drivers/pci/endpoint/Kconfig"
source "drivers/pci/switch/Kconfig"
+source "drivers/pci/pwrctrl/Kconfig"
endif
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 175302036890..67647f1880fb 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_PCI) += access.o bus.o probe.o host-bridge.o \
obj-$(CONFIG_PCI) += msi/
obj-$(CONFIG_PCI) += pcie/
+obj-$(CONFIG_PCI) += pwrctrl/
ifdef CONFIG_PCI
obj-$(CONFIG_PROC_FS) += proc.o
@@ -34,6 +35,8 @@ obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += xen-pcifront.o
obj-$(CONFIG_VGA_ARB) += vgaarb.o
obj-$(CONFIG_PCI_DOE) += doe.o
obj-$(CONFIG_PCI_DYNAMIC_OF_NODES) += of_property.o
+obj-$(CONFIG_PCI_NPEM) += npem.o
+obj-$(CONFIG_PCIE_TPH) += tph.o
# Endpoint library must be initialized before its users
obj-$(CONFIG_PCI_ENDPOINT) += endpoint/
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 6449056b57dd..b123da16b63b 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -36,10 +36,13 @@ DEFINE_RAW_SPINLOCK(pci_lock);
int noinline pci_bus_read_config_##size \
(struct pci_bus *bus, unsigned int devfn, int pos, type *value) \
{ \
- int res; \
unsigned long flags; \
u32 data = 0; \
- if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
+ int res; \
+ \
+ if (PCI_##size##_BAD) \
+ return PCIBIOS_BAD_REGISTER_NUMBER; \
+ \
pci_lock_config(flags); \
res = bus->ops->read(bus, devfn, pos, len, &data); \
if (res) \
@@ -47,6 +50,7 @@ int noinline pci_bus_read_config_##size \
else \
*value = (type)data; \
pci_unlock_config(flags); \
+ \
return res; \
}
@@ -54,12 +58,16 @@ int noinline pci_bus_read_config_##size \
int noinline pci_bus_write_config_##size \
(struct pci_bus *bus, unsigned int devfn, int pos, type value) \
{ \
- int res; \
unsigned long flags; \
- if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
+ int res; \
+ \
+ if (PCI_##size##_BAD) \
+ return PCIBIOS_BAD_REGISTER_NUMBER; \
+ \
pci_lock_config(flags); \
res = bus->ops->write(bus, devfn, pos, len, value); \
pci_unlock_config(flags); \
+ \
return res; \
}
@@ -216,24 +224,27 @@ static noinline void pci_wait_cfg(struct pci_dev *dev)
}
/* Returns 0 on success, negative values indicate error. */
-#define PCI_USER_READ_CONFIG(size, type) \
+#define PCI_USER_READ_CONFIG(size, type) \
int pci_user_read_config_##size \
(struct pci_dev *dev, int pos, type *val) \
{ \
- int ret = PCIBIOS_SUCCESSFUL; \
u32 data = -1; \
+ int ret; \
+ \
if (PCI_##size##_BAD) \
return -EINVAL; \
- raw_spin_lock_irq(&pci_lock); \
+ \
+ raw_spin_lock_irq(&pci_lock); \
if (unlikely(dev->block_cfg_access)) \
pci_wait_cfg(dev); \
ret = dev->bus->ops->read(dev->bus, dev->devfn, \
- pos, sizeof(type), &data); \
- raw_spin_unlock_irq(&pci_lock); \
+ pos, sizeof(type), &data); \
+ raw_spin_unlock_irq(&pci_lock); \
if (ret) \
PCI_SET_ERROR_RESPONSE(val); \
else \
*val = (type)data; \
+ \
return pcibios_err_to_errno(ret); \
} \
EXPORT_SYMBOL_GPL(pci_user_read_config_##size);
@@ -243,15 +254,18 @@ EXPORT_SYMBOL_GPL(pci_user_read_config_##size);
int pci_user_write_config_##size \
(struct pci_dev *dev, int pos, type val) \
{ \
- int ret = PCIBIOS_SUCCESSFUL; \
+ int ret; \
+ \
if (PCI_##size##_BAD) \
return -EINVAL; \
- raw_spin_lock_irq(&pci_lock); \
+ \
+ raw_spin_lock_irq(&pci_lock); \
if (unlikely(dev->block_cfg_access)) \
pci_wait_cfg(dev); \
ret = dev->bus->ops->write(dev->bus, dev->devfn, \
- pos, sizeof(type), val); \
- raw_spin_unlock_irq(&pci_lock); \
+ pos, sizeof(type), val); \
+ raw_spin_unlock_irq(&pci_lock); \
+ \
return pcibios_err_to_errno(ret); \
} \
EXPORT_SYMBOL_GPL(pci_user_write_config_##size);
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
index c570892b2090..ec6c8dbdc5e9 100644
--- a/drivers/pci/ats.c
+++ b/drivers/pci/ats.c
@@ -48,6 +48,39 @@ bool pci_ats_supported(struct pci_dev *dev)
EXPORT_SYMBOL_GPL(pci_ats_supported);
/**
+ * pci_prepare_ats - Setup the PS for ATS
+ * @dev: the PCI device
+ * @ps: the IOMMU page shift
+ *
+ * This must be done by the IOMMU driver on the PF before any VFs are created to
+ * ensure that the VF can have ATS enabled.
+ *
+ * Returns 0 on success, or negative on failure.
+ */
+int pci_prepare_ats(struct pci_dev *dev, int ps)
+{
+ u16 ctrl;
+
+ if (!pci_ats_supported(dev))
+ return -EINVAL;
+
+ if (WARN_ON(dev->ats_enabled))
+ return -EBUSY;
+
+ if (ps < PCI_ATS_MIN_STU)
+ return -EINVAL;
+
+ if (dev->is_virtfn)
+ return 0;
+
+ dev->ats_stu = ps;
+ ctrl = PCI_ATS_CTRL_STU(dev->ats_stu - PCI_ATS_MIN_STU);
+ pci_write_config_word(dev, dev->ats_cap + PCI_ATS_CTRL, ctrl);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_prepare_ats);
+
+/**
* pci_enable_ats - enable the ATS capability
* @dev: the PCI device
* @ps: the IOMMU page shift
@@ -377,7 +410,7 @@ int pci_enable_pasid(struct pci_dev *pdev, int features)
if (WARN_ON(pdev->pasid_enabled))
return -EBUSY;
- if (!pdev->eetlp_prefix_path && !pdev->pasid_no_tlp)
+ if (!pdev->eetlp_prefix_max && !pdev->pasid_no_tlp)
return -EINVAL;
if (!pasid)
@@ -455,8 +488,8 @@ void pci_restore_pasid_state(struct pci_dev *pdev)
* pci_pasid_features - Check which PASID features are supported
* @pdev: PCI device structure
*
- * Returns a negative value when no PASI capability is present.
- * Otherwise is returns a bitmask with supported features. Current
+ * Return a negative value when no PASID capability is present.
+ * Otherwise return a bitmask with supported features. Current
* features reported are:
* PCI_PASID_CAP_EXEC - Execute permission supported
* PCI_PASID_CAP_PRIV - Privileged mode supported
@@ -505,4 +538,37 @@ int pci_max_pasids(struct pci_dev *pdev)
return (1 << FIELD_GET(PCI_PASID_CAP_WIDTH, supported));
}
EXPORT_SYMBOL_GPL(pci_max_pasids);
+
+/**
+ * pci_pasid_status - Check the PASID status
+ * @pdev: PCI device structure
+ *
+ * Returns a negative value when no PASID capability is present.
+ * Otherwise the value of the control register is returned.
+ * Status reported are:
+ *
+ * PCI_PASID_CTRL_ENABLE - PASID enabled
+ * PCI_PASID_CTRL_EXEC - Execute permission enabled
+ * PCI_PASID_CTRL_PRIV - Privileged mode enabled
+ */
+int pci_pasid_status(struct pci_dev *pdev)
+{
+ int pasid;
+ u16 ctrl;
+
+ if (pdev->is_virtfn)
+ pdev = pci_physfn(pdev);
+
+ pasid = pdev->pasid_cap;
+ if (!pasid)
+ return -EINVAL;
+
+ pci_read_config_word(pdev, pasid + PCI_PASID_CTRL, &ctrl);
+
+ ctrl &= PCI_PASID_CTRL_ENABLE | PCI_PASID_CTRL_EXEC |
+ PCI_PASID_CTRL_PRIV;
+
+ return ctrl;
+}
+EXPORT_SYMBOL_GPL(pci_pasid_status);
#endif /* CONFIG_PCI_PASID */
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 826b5016a101..b6851101ac36 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -12,11 +12,25 @@
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/proc_fs.h>
#include <linux/slab.h>
#include "pci.h"
+/*
+ * The first PCI_BRIDGE_RESOURCE_NUM PCI bus resources (those that correspond
+ * to P2P or CardBus bridge windows) go in a table. Additional ones (for
+ * buses below host bridges or subtractive decode bridges) go in the list.
+ * Use pci_bus_for_each_resource() to iterate through all the resources.
+ */
+
+struct pci_bus_resource {
+ struct list_head list;
+ struct resource *res;
+};
+
void pci_add_resource_offset(struct list_head *resources, struct resource *res,
resource_size_t offset)
{
@@ -45,8 +59,7 @@ void pci_free_resource_list(struct list_head *resources)
}
EXPORT_SYMBOL(pci_free_resource_list);
-void pci_bus_add_resource(struct pci_bus *bus, struct resource *res,
- unsigned int flags)
+void pci_bus_add_resource(struct pci_bus *bus, struct resource *res)
{
struct pci_bus_resource *bus_res;
@@ -57,7 +70,6 @@ void pci_bus_add_resource(struct pci_bus *bus, struct resource *res,
}
bus_res->res = res;
- bus_res->flags = flags;
list_add_tail(&bus_res->list, &bus->resources);
}
@@ -176,10 +188,7 @@ static void pci_clip_resource_to_region(struct pci_bus *bus,
static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res,
resource_size_t size, resource_size_t align,
resource_size_t min, unsigned long type_mask,
- resource_size_t (*alignf)(void *,
- const struct resource *,
- resource_size_t,
- resource_size_t),
+ resource_alignf alignf,
void *alignf_data,
struct pci_bus_region *region)
{
@@ -250,10 +259,7 @@ static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res,
int pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
resource_size_t size, resource_size_t align,
resource_size_t min, unsigned long type_mask,
- resource_size_t (*alignf)(void *,
- const struct resource *,
- resource_size_t,
- resource_size_t),
+ resource_alignf alignf,
void *alignf_data)
{
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
@@ -334,6 +340,7 @@ void __weak pcibios_bus_add_device(struct pci_dev *pdev) { }
void pci_bus_add_device(struct pci_dev *dev)
{
struct device_node *dn = dev->dev.of_node;
+ struct platform_device *pdev;
int retval;
/*
@@ -348,12 +355,26 @@ void pci_bus_add_device(struct pci_dev *dev)
pci_proc_attach_device(dev);
pci_bridge_d3_update(dev);
+ /*
+ * If the PCI device is associated with a pwrctrl device with a
+ * power supply, create a device link between the PCI device and
+ * pwrctrl device. This ensures that pwrctrl drivers are probed
+ * before PCI client drivers.
+ */
+ pdev = of_find_device_by_node(dn);
+ if (pdev && of_pci_supply_present(dn)) {
+ if (!device_link_add(&dev->dev, &pdev->dev,
+ DL_FLAG_AUTOREMOVE_CONSUMER))
+ pci_err(dev, "failed to add device link to power control device %s\n",
+ pdev->name);
+ }
+
dev->match_driver = !dn || of_device_is_available(dn);
retval = device_attach(&dev->dev);
if (retval < 0 && retval != -EPROBE_DEFER)
pci_warn(dev, "device attach failed (%d)\n", retval);
- pci_dev_assign_added(dev, true);
+ pci_dev_assign_added(dev);
}
EXPORT_SYMBOL_GPL(pci_bus_add_device);
@@ -386,41 +407,23 @@ void pci_bus_add_devices(const struct pci_bus *bus)
}
EXPORT_SYMBOL(pci_bus_add_devices);
-static void __pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
- void *userdata, bool locked)
+static int __pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
+ void *userdata)
{
struct pci_dev *dev;
- struct pci_bus *bus;
- struct list_head *next;
- int retval;
+ int ret = 0;
- bus = top;
- if (!locked)
- down_read(&pci_bus_sem);
- next = top->devices.next;
- for (;;) {
- if (next == &bus->devices) {
- /* end of this bus, go up or finish */
- if (bus == top)
+ list_for_each_entry(dev, &top->devices, bus_list) {
+ ret = cb(dev, userdata);
+ if (ret)
+ break;
+ if (dev->subordinate) {
+ ret = __pci_walk_bus(dev->subordinate, cb, userdata);
+ if (ret)
break;
- next = bus->self->bus_list.next;
- bus = bus->self->bus;
- continue;
}
- dev = list_entry(next, struct pci_dev, bus_list);
- if (dev->subordinate) {
- /* this is a pci-pci bridge, do its devices next */
- next = dev->subordinate->devices.next;
- bus = dev->subordinate;
- } else
- next = dev->bus_list.next;
-
- retval = cb(dev, userdata);
- if (retval)
- break;
}
- if (!locked)
- up_read(&pci_bus_sem);
+ return ret;
}
/**
@@ -438,7 +441,9 @@ static void __pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void
*/
void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), void *userdata)
{
- __pci_walk_bus(top, cb, userdata, false);
+ down_read(&pci_bus_sem);
+ __pci_walk_bus(top, cb, userdata);
+ up_read(&pci_bus_sem);
}
EXPORT_SYMBOL_GPL(pci_walk_bus);
@@ -446,9 +451,8 @@ void pci_walk_bus_locked(struct pci_bus *top, int (*cb)(struct pci_dev *, void *
{
lockdep_assert_held(&pci_bus_sem);
- __pci_walk_bus(top, cb, userdata, true);
+ __pci_walk_bus(top, cb, userdata);
}
-EXPORT_SYMBOL_GPL(pci_walk_bus_locked);
struct pci_bus *pci_bus_get(struct pci_bus *bus)
{
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index e534c02ee34f..eb3cc28d43f8 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -40,6 +40,7 @@ config PCIE_APPLE
depends on OF
depends on PCI_MSI
select PCI_HOST_COMMON
+ select IRQ_MSI_LIB
help
Say Y here if you want to enable PCIe controller support on Apple
system-on-chips, like the Apple M1. This is required for the USB
@@ -196,7 +197,7 @@ config PCIE_MEDIATEK
config PCIE_MEDIATEK_GEN3
tristate "MediaTek Gen3 PCIe controller"
- depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on ARCH_AIROHA || ARCH_MEDIATEK || COMPILE_TEST
depends on PCI_MSI
help
Adds support for PCIe Gen3 MAC controller for MediaTek SoCs.
@@ -215,14 +216,6 @@ config PCIE_MT7621
help
This selects a driver for the MediaTek MT7621 PCIe Controller.
-config PCIE_MICROCHIP_HOST
- tristate "Microchip AXI PCIe controller"
- depends on PCI_MSI && OF
- select PCI_HOST_COMMON
- help
- Say Y here if you want kernel to support the Microchip AXI PCIe
- Host Bridge driver.
-
config PCI_HYPERV_INTERFACE
tristate "Microsoft Hyper-V PCI Interface"
depends on ((X86 && X86_64) || ARM64) && HYPERV && PCI_MSI
@@ -235,6 +228,7 @@ config PCI_TEGRA
bool "NVIDIA Tegra PCIe controller"
depends on ARCH_TEGRA || COMPILE_TEST
depends on PCI_MSI
+ select IRQ_MSI_LIB
help
Say Y here if you want support for the PCIe host controller found
on NVIDIA Tegra SoCs.
@@ -311,6 +305,7 @@ config PCI_XGENE_MSI
bool "X-Gene v1 PCIe MSI feature"
depends on PCI_XGENE
depends on PCI_MSI
+ select IRQ_MSI_LIB
default y
help
Say Y here if you want PCIe MSI support for the APM X-Gene v1 SoC.
@@ -356,4 +351,5 @@ config PCIE_XILINX_CPM
source "drivers/pci/controller/cadence/Kconfig"
source "drivers/pci/controller/dwc/Kconfig"
source "drivers/pci/controller/mobiveil/Kconfig"
+source "drivers/pci/controller/plda/Kconfig"
endmenu
diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile
index f2b19e6174af..038ccbd9e3ba 100644
--- a/drivers/pci/controller/Makefile
+++ b/drivers/pci/controller/Makefile
@@ -33,7 +33,6 @@ obj-$(CONFIG_PCIE_ROCKCHIP_EP) += pcie-rockchip-ep.o
obj-$(CONFIG_PCIE_ROCKCHIP_HOST) += pcie-rockchip-host.o
obj-$(CONFIG_PCIE_MEDIATEK) += pcie-mediatek.o
obj-$(CONFIG_PCIE_MEDIATEK_GEN3) += pcie-mediatek-gen3.o
-obj-$(CONFIG_PCIE_MICROCHIP_HOST) += pcie-microchip-host.o
obj-$(CONFIG_VMD) += vmd.o
obj-$(CONFIG_PCIE_BRCMSTB) += pcie-brcmstb.o
obj-$(CONFIG_PCI_LOONGSON) += pci-loongson.o
@@ -44,6 +43,7 @@ obj-$(CONFIG_PCIE_MT7621) += pcie-mt7621.o
# pcie-hisi.o quirks are needed even without CONFIG_PCIE_DW
obj-y += dwc/
obj-y += mobiveil/
+obj-y += plda/
# The following drivers are for devices that use the generic ACPI
diff --git a/drivers/pci/controller/cadence/Kconfig b/drivers/pci/controller/cadence/Kconfig
index 1d5a70c9055e..8a0044bb3989 100644
--- a/drivers/pci/controller/cadence/Kconfig
+++ b/drivers/pci/controller/cadence/Kconfig
@@ -38,7 +38,7 @@ config PCIE_CADENCE_PLAT_EP
select PCIE_CADENCE_EP
select PCIE_CADENCE_PLAT
help
- Say Y here if you want to support the Cadence PCIe platform controller in
+ Say Y here if you want to support the Cadence PCIe platform controller in
endpoint mode. This PCIe controller may be embedded into many
different vendors SoCs.
diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c
index 85718246016b..ef1cfdae33bb 100644
--- a/drivers/pci/controller/cadence/pci-j721e.c
+++ b/drivers/pci/controller/cadence/pci-j721e.c
@@ -7,6 +7,8 @@
*/
#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/container_of.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/io.h>
@@ -22,6 +24,8 @@
#include "../../pci.h"
#include "pcie-cadence.h"
+#define cdns_pcie_to_rc(p) container_of(p, struct cdns_pcie_rc, pcie)
+
#define ENABLE_REG_SYS_2 0x108
#define STATUS_REG_SYS_2 0x508
#define STATUS_CLR_REG_SYS_2 0x708
@@ -44,6 +48,7 @@ enum link_status {
#define J721E_MODE_RC BIT(7)
#define LANE_COUNT(n) ((n) << 8)
+#define ACSPCIE_PAD_DISABLE_MASK GENMASK(1, 0)
#define GENERATION_SEL_MASK GENMASK(1, 0)
struct j721e_pcie {
@@ -52,6 +57,7 @@ struct j721e_pcie {
u32 mode;
u32 num_lanes;
u32 max_lanes;
+ struct gpio_desc *reset_gpio;
void __iomem *user_cfg_base;
void __iomem *intd_cfg_base;
u32 linkdown_irq_regfield;
@@ -220,6 +226,36 @@ static int j721e_pcie_set_lane_count(struct j721e_pcie *pcie,
return ret;
}
+static int j721e_enable_acspcie_refclk(struct j721e_pcie *pcie,
+ struct regmap *syscon)
+{
+ struct device *dev = pcie->cdns_pcie->dev;
+ struct device_node *node = dev->of_node;
+ u32 mask = ACSPCIE_PAD_DISABLE_MASK;
+ struct of_phandle_args args;
+ u32 val;
+ int ret;
+
+ ret = of_parse_phandle_with_fixed_args(node,
+ "ti,syscon-acspcie-proxy-ctrl",
+ 1, 0, &args);
+ if (ret) {
+ dev_err(dev,
+ "ti,syscon-acspcie-proxy-ctrl has invalid arguments\n");
+ return ret;
+ }
+
+ /* Clear PAD IO disable bits to enable refclk output */
+ val = ~(args.args[0]);
+ ret = regmap_update_bits(syscon, 0, mask, val);
+ if (ret) {
+ dev_err(dev, "failed to enable ACSPCIE refclk: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie)
{
struct device *dev = pcie->cdns_pcie->dev;
@@ -259,7 +295,13 @@ static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie)
return ret;
}
- return 0;
+ /* Enable ACSPCIE refclk output if the optional property exists */
+ syscon = syscon_regmap_lookup_by_phandle_optional(node,
+ "ti,syscon-acspcie-proxy-ctrl");
+ if (!syscon)
+ return 0;
+
+ return j721e_enable_acspcie_refclk(pcie, syscon);
}
static int cdns_ti_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
@@ -313,6 +355,7 @@ static const struct j721e_pcie_data j7200_pcie_rc_data = {
static const struct j721e_pcie_data j7200_pcie_ep_data = {
.mode = PCI_MODE_EP,
.quirk_detect_quiet_flag = true,
+ .linkdown_irq_regfield = J7200_LINK_DOWN,
.quirk_disable_flr = true,
.max_lanes = 2,
};
@@ -334,16 +377,23 @@ static const struct j721e_pcie_data j784s4_pcie_rc_data = {
.mode = PCI_MODE_RC,
.quirk_retrain_flag = true,
.byte_access_allowed = false,
- .linkdown_irq_regfield = LINK_DOWN,
+ .linkdown_irq_regfield = J7200_LINK_DOWN,
.max_lanes = 4,
};
static const struct j721e_pcie_data j784s4_pcie_ep_data = {
.mode = PCI_MODE_EP,
- .linkdown_irq_regfield = LINK_DOWN,
+ .linkdown_irq_regfield = J7200_LINK_DOWN,
.max_lanes = 4,
};
+static const struct j721e_pcie_data j722s_pcie_rc_data = {
+ .mode = PCI_MODE_RC,
+ .linkdown_irq_regfield = J7200_LINK_DOWN,
+ .byte_access_allowed = true,
+ .max_lanes = 1,
+};
+
static const struct of_device_id of_j721e_pcie_match[] = {
{
.compatible = "ti,j721e-pcie-host",
@@ -377,6 +427,10 @@ static const struct of_device_id of_j721e_pcie_match[] = {
.compatible = "ti,j784s4-pcie-ep",
.data = &j784s4_pcie_ep_data,
},
+ {
+ .compatible = "ti,j722s-pcie-host",
+ .data = &j722s_pcie_rc_data,
+ },
{},
};
@@ -482,20 +536,20 @@ static int j721e_pcie_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
- dev_err(dev, "pm_runtime_get_sync failed\n");
+ dev_err_probe(dev, ret, "pm_runtime_get_sync failed\n");
goto err_get_sync;
}
ret = j721e_pcie_ctrl_init(pcie);
if (ret < 0) {
- dev_err(dev, "pm_runtime_get_sync failed\n");
+ dev_err_probe(dev, ret, "pm_runtime_get_sync failed\n");
goto err_get_sync;
}
ret = devm_request_irq(dev, irq, j721e_pcie_link_irq_handler, 0,
"j721e-pcie-link-down-irq", pcie);
if (ret < 0) {
- dev_err(dev, "failed to request link state IRQ %d\n", irq);
+ dev_err_probe(dev, ret, "failed to request link state IRQ %d\n", irq);
goto err_get_sync;
}
@@ -505,42 +559,39 @@ static int j721e_pcie_probe(struct platform_device *pdev)
case PCI_MODE_RC:
gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(gpiod)) {
- ret = PTR_ERR(gpiod);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to get reset GPIO\n");
+ ret = dev_err_probe(dev, PTR_ERR(gpiod), "Failed to get reset GPIO\n");
goto err_get_sync;
}
+ pcie->reset_gpio = gpiod;
ret = cdns_pcie_init_phy(dev, cdns_pcie);
if (ret) {
- dev_err(dev, "Failed to init phy\n");
+ dev_err_probe(dev, ret, "Failed to init phy\n");
goto err_get_sync;
}
clk = devm_clk_get_optional(dev, "pcie_refclk");
if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- dev_err(dev, "failed to get pcie_refclk\n");
+ ret = dev_err_probe(dev, PTR_ERR(clk), "failed to get pcie_refclk\n");
goto err_pcie_setup;
}
ret = clk_prepare_enable(clk);
if (ret) {
- dev_err(dev, "failed to enable pcie_refclk\n");
+ dev_err_probe(dev, ret, "failed to enable pcie_refclk\n");
goto err_pcie_setup;
}
pcie->refclk = clk;
/*
- * "Power Sequencing and Reset Signal Timings" table in
- * PCI EXPRESS CARD ELECTROMECHANICAL SPECIFICATION, REV. 3.0
- * indicates PERST# should be deasserted after minimum of 100us
- * once REFCLK is stable. The REFCLK to the connector in RC
- * mode is selected while enabling the PHY. So deassert PERST#
- * after 100 us.
+ * Section 2.2 of the PCI Express Card Electromechanical
+ * Specification (Revision 5.1) mandates that the deassertion
+ * of the PERST# signal should be delayed by 100 ms (TPVPERL).
+ * This shall ensure that the power and the reference clock
+ * are stable.
*/
if (gpiod) {
- usleep_range(100, 200);
+ msleep(PCIE_T_PVPERL_MS);
gpiod_set_value_cansleep(gpiod, 1);
}
@@ -554,7 +605,7 @@ static int j721e_pcie_probe(struct platform_device *pdev)
case PCI_MODE_EP:
ret = cdns_pcie_init_phy(dev, cdns_pcie);
if (ret) {
- dev_err(dev, "Failed to init phy\n");
+ dev_err_probe(dev, ret, "Failed to init phy\n");
goto err_get_sync;
}
@@ -589,13 +640,94 @@ static void j721e_pcie_remove(struct platform_device *pdev)
pm_runtime_disable(dev);
}
+static int j721e_pcie_suspend_noirq(struct device *dev)
+{
+ struct j721e_pcie *pcie = dev_get_drvdata(dev);
+
+ if (pcie->mode == PCI_MODE_RC) {
+ gpiod_set_value_cansleep(pcie->reset_gpio, 0);
+ clk_disable_unprepare(pcie->refclk);
+ }
+
+ cdns_pcie_disable_phy(pcie->cdns_pcie);
+
+ return 0;
+}
+
+static int j721e_pcie_resume_noirq(struct device *dev)
+{
+ struct j721e_pcie *pcie = dev_get_drvdata(dev);
+ struct cdns_pcie *cdns_pcie = pcie->cdns_pcie;
+ int ret;
+
+ ret = j721e_pcie_ctrl_init(pcie);
+ if (ret < 0)
+ return ret;
+
+ j721e_pcie_config_link_irq(pcie);
+
+ /*
+ * This is not called explicitly in the probe, it is called by
+ * cdns_pcie_init_phy().
+ */
+ ret = cdns_pcie_enable_phy(pcie->cdns_pcie);
+ if (ret < 0)
+ return ret;
+
+ if (pcie->mode == PCI_MODE_RC) {
+ struct cdns_pcie_rc *rc = cdns_pcie_to_rc(cdns_pcie);
+
+ ret = clk_prepare_enable(pcie->refclk);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Section 2.2 of the PCI Express Card Electromechanical
+ * Specification (Revision 5.1) mandates that the deassertion
+ * of the PERST# signal should be delayed by 100 ms (TPVPERL).
+ * This shall ensure that the power and the reference clock
+ * are stable.
+ */
+ if (pcie->reset_gpio) {
+ msleep(PCIE_T_PVPERL_MS);
+ gpiod_set_value_cansleep(pcie->reset_gpio, 1);
+ }
+
+ ret = cdns_pcie_host_link_setup(rc);
+ if (ret < 0) {
+ clk_disable_unprepare(pcie->refclk);
+ return ret;
+ }
+
+ /*
+ * Reset internal status of BARs to force reinitialization in
+ * cdns_pcie_host_init().
+ */
+ for (enum cdns_pcie_rp_bar bar = RP_BAR0; bar <= RP_NO_BAR; bar++)
+ rc->avail_ib_bar[bar] = true;
+
+ ret = cdns_pcie_host_init(rc);
+ if (ret) {
+ clk_disable_unprepare(pcie->refclk);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static DEFINE_NOIRQ_DEV_PM_OPS(j721e_pcie_pm_ops,
+ j721e_pcie_suspend_noirq,
+ j721e_pcie_resume_noirq);
+
static struct platform_driver j721e_pcie_driver = {
.probe = j721e_pcie_probe,
- .remove_new = j721e_pcie_remove,
+ .remove = j721e_pcie_remove,
.driver = {
.name = "j721e-pcie",
.of_match_table = of_j721e_pcie_match,
.suppress_bind_attrs = true,
+ .pm = pm_sleep_ptr(&j721e_pcie_pm_ops),
},
};
builtin_platform_driver(j721e_pcie_driver);
diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c
index 81c50dc64da9..599ec4b1223e 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-ep.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c
@@ -99,14 +99,11 @@ static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, u8 vfn,
ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS;
} else {
bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH);
- bool is_64bits = sz > SZ_2G;
+ bool is_64bits = !!(flags & PCI_BASE_ADDRESS_MEM_TYPE_64);
if (is_64bits && (bar & 1))
return -EINVAL;
- if (is_64bits && !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64))
- epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
-
if (is_64bits && is_prefetch)
ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS;
else if (is_prefetch)
@@ -304,12 +301,12 @@ static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u8 vfn,
val |= interrupts;
cdns_pcie_ep_fn_writew(pcie, fn, reg, val);
- /* Set MSIX BAR and offset */
+ /* Set MSI-X BAR and offset */
reg = cap + PCI_MSIX_TABLE;
val = offset | bir;
cdns_pcie_ep_fn_writel(pcie, fn, reg, val);
- /* Set PBA BAR and offset. BAR must match MSIX BAR */
+ /* Set PBA BAR and offset. BAR must match MSI-X BAR */
reg = cap + PCI_MSIX_PBA;
val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir;
cdns_pcie_ep_fn_writel(pcie, fn, reg, val);
@@ -355,8 +352,7 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx,
spin_unlock_irqrestore(&ep->lock, flags);
offset = CDNS_PCIE_NORMAL_MSG_ROUTING(MSG_ROUTING_LOCAL) |
- CDNS_PCIE_NORMAL_MSG_CODE(msg_code) |
- CDNS_PCIE_MSG_NO_DATA;
+ CDNS_PCIE_NORMAL_MSG_CODE(msg_code);
writel(0, ep->irq_cpu_addr + offset);
}
@@ -576,8 +572,8 @@ static int cdns_pcie_ep_start(struct pci_epc *epc)
/*
* Next function field in ARI_CAP_AND_CTR register for last function
- * should be 0.
- * Clearing Next Function Number field for the last function used.
+ * should be 0. Clear Next Function Number field for the last
+ * function used.
*/
last_fn = find_last_bit(&epc->function_num_map, BITS_PER_LONG);
reg = CDNS_PCIE_CORE_PF_I_ARI_CAP_AND_CTRL(last_fn);
@@ -746,6 +742,8 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
spin_lock_init(&ep->lock);
+ pci_epc_init_notify(epc);
+
return 0;
free_epc_mem:
diff --git a/drivers/pci/controller/cadence/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c
index 5b14f7ee3c79..8af95e9da7ce 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-host.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-host.c
@@ -485,8 +485,7 @@ static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc)
return cdns_pcie_host_map_dma_ranges(rc);
}
-static int cdns_pcie_host_init(struct device *dev,
- struct cdns_pcie_rc *rc)
+int cdns_pcie_host_init(struct cdns_pcie_rc *rc)
{
int err;
@@ -497,6 +496,30 @@ static int cdns_pcie_host_init(struct device *dev,
return cdns_pcie_host_init_address_translation(rc);
}
+int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc)
+{
+ struct cdns_pcie *pcie = &rc->pcie;
+ struct device *dev = rc->pcie.dev;
+ int ret;
+
+ if (rc->quirk_detect_quiet_flag)
+ cdns_pcie_detect_quiet_min_delay_set(&rc->pcie);
+
+ cdns_pcie_host_enable_ptm_response(pcie);
+
+ ret = cdns_pcie_start_link(pcie);
+ if (ret) {
+ dev_err(dev, "Failed to start link\n");
+ return ret;
+ }
+
+ ret = cdns_pcie_host_start_link(rc);
+ if (ret)
+ dev_dbg(dev, "PCIe link never came up\n");
+
+ return 0;
+}
+
int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
{
struct device *dev = rc->pcie.dev;
@@ -533,25 +556,14 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
return PTR_ERR(rc->cfg_base);
rc->cfg_res = res;
- if (rc->quirk_detect_quiet_flag)
- cdns_pcie_detect_quiet_min_delay_set(&rc->pcie);
-
- cdns_pcie_host_enable_ptm_response(pcie);
-
- ret = cdns_pcie_start_link(pcie);
- if (ret) {
- dev_err(dev, "Failed to start link\n");
- return ret;
- }
-
- ret = cdns_pcie_host_start_link(rc);
+ ret = cdns_pcie_host_link_setup(rc);
if (ret)
- dev_dbg(dev, "PCIe link never came up\n");
+ return ret;
for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++)
rc->avail_ib_bar[bar] = true;
- ret = cdns_pcie_host_init(dev, rc);
+ ret = cdns_pcie_host_init(rc);
if (ret)
return ret;
diff --git a/drivers/pci/controller/cadence/pcie-cadence.c b/drivers/pci/controller/cadence/pcie-cadence.c
index 4251fac5e310..204e045aed8c 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.c
+++ b/drivers/pci/controller/cadence/pcie-cadence.c
@@ -197,7 +197,7 @@ int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie)
phy_count = of_property_count_strings(np, "phy-names");
if (phy_count < 1) {
- dev_err(dev, "no phy-names. PHY will not be initialized\n");
+ dev_info(dev, "no \"phy-names\" property found; PHY will not be initialized\n");
pcie->phy_count = 0;
return 0;
}
@@ -260,7 +260,7 @@ static int cdns_pcie_resume_noirq(struct device *dev)
ret = cdns_pcie_enable_phy(pcie);
if (ret) {
- dev_err(dev, "failed to enable phy\n");
+ dev_err(dev, "failed to enable PHY\n");
return ret;
}
diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h
index 7a66a2f815dc..39ee9945c903 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.h
+++ b/drivers/pci/controller/cadence/pcie-cadence.h
@@ -246,7 +246,7 @@ struct cdns_pcie_rp_ib_bar {
#define CDNS_PCIE_NORMAL_MSG_CODE_MASK GENMASK(15, 8)
#define CDNS_PCIE_NORMAL_MSG_CODE(code) \
(((code) << 8) & CDNS_PCIE_NORMAL_MSG_CODE_MASK)
-#define CDNS_PCIE_MSG_NO_DATA BIT(16)
+#define CDNS_PCIE_MSG_DATA BIT(16)
struct cdns_pcie;
@@ -314,7 +314,6 @@ struct cdns_pcie {
/**
* struct cdns_pcie_rc - private data for this PCIe Root Complex driver
* @pcie: Cadence PCIe controller
- * @dev: pointer to PCIe device
* @cfg_res: start/end offsets in the physical system memory to map PCI
* configuration space accesses
* @cfg_base: IO mapped window to access the PCI configuration space of a
@@ -521,10 +520,22 @@ static inline bool cdns_pcie_link_up(struct cdns_pcie *pcie)
}
#ifdef CONFIG_PCIE_CADENCE_HOST
+int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc);
+int cdns_pcie_host_init(struct cdns_pcie_rc *rc);
int cdns_pcie_host_setup(struct cdns_pcie_rc *rc);
void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
int where);
#else
+static inline int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc)
+{
+ return 0;
+}
+
+static inline int cdns_pcie_host_init(struct cdns_pcie_rc *rc)
+{
+ return 0;
+}
+
static inline int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
{
return 0;
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index 8afacc90c63b..d9f0386396ed 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -6,6 +6,16 @@ menu "DesignWare-based PCIe controllers"
config PCIE_DW
bool
+config PCIE_DW_DEBUGFS
+ bool "DesignWare PCIe debugfs entries"
+ depends on DEBUG_FS
+ depends on PCIE_DW_HOST || PCIE_DW_EP
+ help
+ Say Y here to enable debugfs entries for the PCIe controller. These
+ entries provide various debug features related to the controller and
+ expose the RAS DES capabilities such as Silicon Debug, Error Injection
+ and Statistical Counters.
+
config PCIE_DW_HOST
bool
select PCIE_DW
@@ -27,6 +37,17 @@ config PCIE_AL
required only for DT-based platforms. ACPI platforms with the
Annapurna Labs PCIe controller don't need to enable this.
+config PCIE_AMD_MDB
+ bool "AMD MDB Versal2 PCIe controller"
+ depends on OF && (ARM64 || COMPILE_TEST)
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ help
+ Say Y here if you want to enable PCIe controller support on AMD
+ Versal2 SoCs. The AMD MDB Versal2 PCIe controller is based on
+ DesignWare IP and therefore the driver re-uses the DesignWare
+ core functions to implement the driver.
+
config PCI_MESON
tristate "Amlogic Meson PCIe controller"
default m if ARCH_MESON
@@ -265,12 +286,16 @@ config PCIE_DW_PLAT_EP
order to enable device-specific features PCI_DW_PLAT_EP must be
selected.
+config PCIE_QCOM_COMMON
+ bool
+
config PCIE_QCOM
bool "Qualcomm PCIe controller (host mode)"
depends on OF && (ARCH_QCOM || COMPILE_TEST)
depends on PCI_MSI
select PCIE_DW_HOST
select CRC8
+ select PCIE_QCOM_COMMON
help
Say Y here to enable PCIe controller support on Qualcomm SoCs. The
PCIe controller uses the DesignWare core plus Qualcomm-specific
@@ -281,6 +306,7 @@ config PCIE_QCOM_EP
depends on OF && (ARCH_QCOM || COMPILE_TEST)
depends on PCI_ENDPOINT
select PCIE_DW_EP
+ select PCIE_QCOM_COMMON
help
Say Y here to enable support for the PCIe controllers on Qualcomm SoCs
to work in endpoint mode. The PCIe controller uses the DesignWare core
@@ -311,16 +337,30 @@ config PCIE_RCAR_GEN4_EP
SoCs. To compile this driver as a module, choose M here: the module
will be called pcie-rcar-gen4.ko. This uses the DesignWare core.
+config PCIE_ROCKCHIP_DW
+ bool
+
config PCIE_ROCKCHIP_DW_HOST
- bool "Rockchip DesignWare PCIe controller"
- select PCIE_DW
- select PCIE_DW_HOST
+ bool "Rockchip DesignWare PCIe controller (host mode)"
depends on PCI_MSI
depends on ARCH_ROCKCHIP || COMPILE_TEST
depends on OF
+ select PCIE_DW_HOST
+ select PCIE_ROCKCHIP_DW
+ help
+ Enables support for the DesignWare PCIe controller in the
+ Rockchip SoC (except RK3399) to work in host mode.
+
+config PCIE_ROCKCHIP_DW_EP
+ bool "Rockchip DesignWare PCIe controller (endpoint mode)"
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
+ depends on OF
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ select PCIE_ROCKCHIP_DW
help
Enables support for the DesignWare PCIe controller in the
- Rockchip SoC except RK3399.
+ Rockchip SoC (except RK3399) to work in endpoint mode.
config PCI_EXYNOS
tristate "Samsung Exynos PCIe controller"
diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile
index bac103faa523..908cb7f345db 100644
--- a/drivers/pci/controller/dwc/Makefile
+++ b/drivers/pci/controller/dwc/Makefile
@@ -1,8 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_PCIE_DW) += pcie-designware.o
+obj-$(CONFIG_PCIE_DW_DEBUGFS) += pcie-designware-debugfs.o
obj-$(CONFIG_PCIE_DW_HOST) += pcie-designware-host.o
obj-$(CONFIG_PCIE_DW_EP) += pcie-designware-ep.o
obj-$(CONFIG_PCIE_DW_PLAT) += pcie-designware-plat.o
+obj-$(CONFIG_PCIE_AMD_MDB) += pcie-amd-mdb.o
obj-$(CONFIG_PCIE_BT1) += pcie-bt1.o
obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o
obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
@@ -12,11 +14,12 @@ obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone.o
obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o
obj-$(CONFIG_PCI_LAYERSCAPE_EP) += pci-layerscape-ep.o
+obj-$(CONFIG_PCIE_QCOM_COMMON) += pcie-qcom-common.o
obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
obj-$(CONFIG_PCIE_QCOM_EP) += pcie-qcom-ep.o
obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o
-obj-$(CONFIG_PCIE_ROCKCHIP_DW_HOST) += pcie-dw-rockchip.o
+obj-$(CONFIG_PCIE_ROCKCHIP_DW) += pcie-dw-rockchip.o
obj-$(CONFIG_PCIE_INTEL_GW) += pcie-intel-gw.o
obj-$(CONFIG_PCIE_KEEMBAY) += pcie-keembay.o
obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index 0e406677060d..3219704aba0e 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -13,11 +13,11 @@
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
#include <linux/phy/phy.h>
@@ -113,9 +113,9 @@ static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset,
writel(value, pcie->base + offset);
}
-static u64 dra7xx_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr)
+static u64 dra7xx_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 cpu_addr)
{
- return pci_addr & DRA7XX_CPU_TO_BUS_ADDR;
+ return cpu_addr & DRA7XX_CPU_TO_BUS_ADDR;
}
static int dra7xx_pcie_link_up(struct dw_pcie *pci)
@@ -359,8 +359,8 @@ static int dra7xx_pcie_init_irq_domain(struct dw_pcie_rp *pp)
irq_set_chained_handler_and_data(pp->irq, dra7xx_pcie_msi_irq_handler,
pp);
- dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
- &intx_domain_ops, pp);
+ dra7xx->irq_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node),
+ PCI_NUM_INTX, &intx_domain_ops, pp);
of_node_put(pcie_intc_node);
if (!dra7xx->irq_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
@@ -467,6 +467,15 @@ static int dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx,
return ret;
}
+ ret = dw_pcie_ep_init_registers(ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(ep);
+ return ret;
+ }
+
+ pci_epc_init_notify(ep->epc);
+
return 0;
}
@@ -626,30 +635,20 @@ static int dra7xx_pcie_unaligned_memaccess(struct device *dev)
{
int ret;
struct device_node *np = dev->of_node;
- struct of_phandle_args args;
+ unsigned int args[2];
struct regmap *regmap;
- regmap = syscon_regmap_lookup_by_phandle(np,
- "ti,syscon-unaligned-access");
+ regmap = syscon_regmap_lookup_by_phandle_args(np, "ti,syscon-unaligned-access",
+ 2, args);
if (IS_ERR(regmap)) {
dev_dbg(dev, "can't get ti,syscon-unaligned-access\n");
return -EINVAL;
}
- ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-unaligned-access",
- 2, 0, &args);
- if (ret) {
- dev_err(dev, "failed to parse ti,syscon-unaligned-access\n");
- return ret;
- }
-
- ret = regmap_update_bits(regmap, args.args[0], args.args[1],
- args.args[1]);
+ ret = regmap_update_bits(regmap, args[0], args[1], args[1]);
if (ret)
dev_err(dev, "failed to enable unaligned access\n");
- of_node_put(args.np);
-
return ret;
}
@@ -662,18 +661,13 @@ static int dra7xx_pcie_configure_two_lane(struct device *dev,
u32 mask;
u32 val;
- pcie_syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-lane-sel");
+ pcie_syscon = syscon_regmap_lookup_by_phandle_args(np, "ti,syscon-lane-sel",
+ 1, &pcie_reg);
if (IS_ERR(pcie_syscon)) {
dev_err(dev, "unable to get ti,syscon-lane-sel\n");
return -EINVAL;
}
- if (of_property_read_u32_index(np, "ti,syscon-lane-sel", 1,
- &pcie_reg)) {
- dev_err(dev, "couldn't get lane selection reg offset\n");
- return -EINVAL;
- }
-
mask = b1co_mode_sel_mask | PCIE_B0_B1_TSYNCEN;
val = PCIE_B1C0_MODE_SEL | PCIE_B0_B1_TSYNCEN;
regmap_update_bits(pcie_syscon, pcie_reg, mask, val);
@@ -841,14 +835,21 @@ static int dra7xx_pcie_probe(struct platform_device *pdev)
dra7xx->mode = mode;
ret = devm_request_threaded_irq(dev, irq, NULL, dra7xx_pcie_irq_handler,
- IRQF_SHARED, "dra7xx-pcie-main", dra7xx);
+ IRQF_SHARED | IRQF_ONESHOT,
+ "dra7xx-pcie-main", dra7xx);
if (ret) {
dev_err(dev, "failed to request irq\n");
- goto err_gpio;
+ goto err_deinit;
}
return 0;
+err_deinit:
+ if (dra7xx->mode == DW_PCIE_RC_TYPE)
+ dw_pcie_host_deinit(&dra7xx->pci->pp);
+ else
+ dw_pcie_ep_deinit(&dra7xx->pci->ep);
+
err_gpio:
err_get_sync:
pm_runtime_put(dev);
diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c
index a33fa98a252e..ace736b025b1 100644
--- a/drivers/pci/controller/dwc/pci-exynos.c
+++ b/drivers/pci/controller/dwc/pci-exynos.c
@@ -54,43 +54,11 @@
struct exynos_pcie {
struct dw_pcie pci;
void __iomem *elbi_base;
- struct clk *clk;
- struct clk *bus_clk;
+ struct clk_bulk_data *clks;
struct phy *phy;
struct regulator_bulk_data supplies[2];
};
-static int exynos_pcie_init_clk_resources(struct exynos_pcie *ep)
-{
- struct device *dev = ep->pci.dev;
- int ret;
-
- ret = clk_prepare_enable(ep->clk);
- if (ret) {
- dev_err(dev, "cannot enable pcie rc clock");
- return ret;
- }
-
- ret = clk_prepare_enable(ep->bus_clk);
- if (ret) {
- dev_err(dev, "cannot enable pcie bus clock");
- goto err_bus_clk;
- }
-
- return 0;
-
-err_bus_clk:
- clk_disable_unprepare(ep->clk);
-
- return ret;
-}
-
-static void exynos_pcie_deinit_clk_resources(struct exynos_pcie *ep)
-{
- clk_disable_unprepare(ep->bus_clk);
- clk_disable_unprepare(ep->clk);
-}
-
static void exynos_pcie_writel(void __iomem *base, u32 val, u32 reg)
{
writel(val, base + reg);
@@ -332,17 +300,9 @@ static int exynos_pcie_probe(struct platform_device *pdev)
if (IS_ERR(ep->elbi_base))
return PTR_ERR(ep->elbi_base);
- ep->clk = devm_clk_get(dev, "pcie");
- if (IS_ERR(ep->clk)) {
- dev_err(dev, "Failed to get pcie rc clock\n");
- return PTR_ERR(ep->clk);
- }
-
- ep->bus_clk = devm_clk_get(dev, "pcie_bus");
- if (IS_ERR(ep->bus_clk)) {
- dev_err(dev, "Failed to get pcie bus clock\n");
- return PTR_ERR(ep->bus_clk);
- }
+ ret = devm_clk_bulk_get_all_enabled(dev, &ep->clks);
+ if (ret < 0)
+ return ret;
ep->supplies[0].supply = "vdd18";
ep->supplies[1].supply = "vdd10";
@@ -351,10 +311,6 @@ static int exynos_pcie_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = exynos_pcie_init_clk_resources(ep);
- if (ret)
- return ret;
-
ret = regulator_bulk_enable(ARRAY_SIZE(ep->supplies), ep->supplies);
if (ret)
return ret;
@@ -369,7 +325,6 @@ static int exynos_pcie_probe(struct platform_device *pdev)
fail_probe:
phy_exit(ep->phy);
- exynos_pcie_deinit_clk_resources(ep);
regulator_bulk_disable(ARRAY_SIZE(ep->supplies), ep->supplies);
return ret;
@@ -383,7 +338,6 @@ static void exynos_pcie_remove(struct platform_device *pdev)
exynos_pcie_assert_core_reset(ep);
phy_power_off(ep->phy);
phy_exit(ep->phy);
- exynos_pcie_deinit_clk_resources(ep);
regulator_bulk_disable(ARRAY_SIZE(ep->supplies), ep->supplies);
}
@@ -429,7 +383,7 @@ static const struct of_device_id exynos_pcie_of_match[] = {
static struct platform_driver exynos_pcie_driver = {
.probe = exynos_pcie_probe,
- .remove_new = exynos_pcie_remove,
+ .remove = exynos_pcie_remove,
.driver = {
.name = "exynos-pcie",
.of_match_table = exynos_pcie_of_match,
@@ -437,5 +391,6 @@ static struct platform_driver exynos_pcie_driver = {
},
};
module_platform_driver(exynos_pcie_driver);
+MODULE_DESCRIPTION("Samsung Exynos PCIe host controller driver");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(of, exynos_pcie_of_match);
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 99a60270b26c..5f267dd261b5 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -11,14 +11,13 @@
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
#include <linux/mfd/syscon/imx7-iomuxc-gpr.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/of_address.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
@@ -29,10 +28,12 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/reset.h>
+#include <linux/phy/pcie.h>
#include <linux/phy/phy.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
+#include "../../pci.h"
#include "pcie-designware.h"
#define IMX8MQ_GPR_PCIE_REF_USE_PAD BIT(9)
@@ -40,7 +41,6 @@
#define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE BIT(11)
#define IMX8MQ_GPR_PCIE_VREG_BYPASS BIT(12)
#define IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE GENMASK(11, 8)
-#define IMX8MQ_PCIE2_BASE_ADDR 0x33c00000
#define IMX95_PCIE_PHY_GEN_CTRL 0x0
#define IMX95_PCIE_REF_USE_PAD BIT(17)
@@ -55,9 +55,25 @@
#define IMX95_PE0_GEN_CTRL_3 0x1058
#define IMX95_PCIE_LTSSM_EN BIT(0)
-#define to_imx6_pcie(x) dev_get_drvdata((x)->dev)
+#define IMX95_PE0_LUT_ACSCTRL 0x1008
+#define IMX95_PEO_LUT_RWA BIT(16)
+#define IMX95_PE0_LUT_ENLOC GENMASK(4, 0)
-enum imx6_pcie_variants {
+#define IMX95_PE0_LUT_DATA1 0x100c
+#define IMX95_PE0_LUT_VLD BIT(31)
+#define IMX95_PE0_LUT_DAC_ID GENMASK(10, 8)
+#define IMX95_PE0_LUT_STREAM_ID GENMASK(5, 0)
+
+#define IMX95_PE0_LUT_DATA2 0x1010
+#define IMX95_PE0_LUT_REQID GENMASK(31, 16)
+#define IMX95_PE0_LUT_MASK GENMASK(15, 0)
+
+#define IMX95_SID_MASK GENMASK(5, 0)
+#define IMX95_MAX_LUT 32
+
+#define to_imx_pcie(x) dev_get_drvdata((x)->dev)
+
+enum imx_pcie_variants {
IMX6Q,
IMX6SX,
IMX6QP,
@@ -65,58 +81,64 @@ enum imx6_pcie_variants {
IMX8MQ,
IMX8MM,
IMX8MP,
+ IMX8Q,
IMX95,
IMX8MQ_EP,
IMX8MM_EP,
IMX8MP_EP,
+ IMX8Q_EP,
IMX95_EP,
};
-#define IMX6_PCIE_FLAG_IMX6_PHY BIT(0)
-#define IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE BIT(1)
-#define IMX6_PCIE_FLAG_SUPPORTS_SUSPEND BIT(2)
-#define IMX6_PCIE_FLAG_HAS_PHYDRV BIT(3)
-#define IMX6_PCIE_FLAG_HAS_APP_RESET BIT(4)
-#define IMX6_PCIE_FLAG_HAS_PHY_RESET BIT(5)
-#define IMX6_PCIE_FLAG_HAS_SERDES BIT(6)
-#define IMX6_PCIE_FLAG_SUPPORT_64BIT BIT(7)
-
-#define imx6_check_flag(pci, val) (pci->drvdata->flags & val)
+#define IMX_PCIE_FLAG_IMX_PHY BIT(0)
+#define IMX_PCIE_FLAG_IMX_SPEED_CHANGE BIT(1)
+#define IMX_PCIE_FLAG_SUPPORTS_SUSPEND BIT(2)
+#define IMX_PCIE_FLAG_HAS_PHYDRV BIT(3)
+#define IMX_PCIE_FLAG_HAS_APP_RESET BIT(4)
+#define IMX_PCIE_FLAG_HAS_PHY_RESET BIT(5)
+#define IMX_PCIE_FLAG_HAS_SERDES BIT(6)
+#define IMX_PCIE_FLAG_SUPPORT_64BIT BIT(7)
+#define IMX_PCIE_FLAG_CPU_ADDR_FIXUP BIT(8)
+/*
+ * Because of ERR005723 (PCIe does not support L2 power down) we need to
+ * workaround suspend resume on some devices which are affected by this errata.
+ */
+#define IMX_PCIE_FLAG_BROKEN_SUSPEND BIT(9)
+#define IMX_PCIE_FLAG_HAS_LUT BIT(10)
-#define IMX6_PCIE_MAX_CLKS 6
+#define imx_check_flag(pci, val) (pci->drvdata->flags & val)
-#define IMX6_PCIE_MAX_INSTANCES 2
+#define IMX_PCIE_MAX_INSTANCES 2
-struct imx6_pcie;
+struct imx_pcie;
-struct imx6_pcie_drvdata {
- enum imx6_pcie_variants variant;
+struct imx_pcie_drvdata {
+ enum imx_pcie_variants variant;
enum dw_pcie_device_mode mode;
u32 flags;
int dbi_length;
const char *gpr;
- const char * const *clk_names;
- const u32 clks_cnt;
const u32 ltssm_off;
const u32 ltssm_mask;
- const u32 mode_off[IMX6_PCIE_MAX_INSTANCES];
- const u32 mode_mask[IMX6_PCIE_MAX_INSTANCES];
+ const u32 mode_off[IMX_PCIE_MAX_INSTANCES];
+ const u32 mode_mask[IMX_PCIE_MAX_INSTANCES];
const struct pci_epc_features *epc_features;
- int (*init_phy)(struct imx6_pcie *pcie);
+ int (*init_phy)(struct imx_pcie *pcie);
+ int (*enable_ref_clk)(struct imx_pcie *pcie, bool enable);
+ int (*core_reset)(struct imx_pcie *pcie, bool assert);
+ const struct dw_pcie_host_ops *ops;
};
-struct imx6_pcie {
+struct imx_pcie {
struct dw_pcie *pci;
- int reset_gpio;
- bool gpio_active_high;
- bool link_is_up;
- struct clk_bulk_data clks[IMX6_PCIE_MAX_CLKS];
+ struct gpio_desc *reset_gpiod;
+ struct clk_bulk_data *clks;
+ int num_clks;
struct regmap *iomuxc_gpr;
u16 msi_ctrl;
u32 controller_id;
struct reset_control *pciephy_reset;
struct reset_control *apps_reset;
- struct reset_control *turnoff_reset;
u32 tx_deemph_gen1;
u32 tx_deemph_gen2_3p5db;
u32 tx_deemph_gen2_6db;
@@ -131,7 +153,10 @@ struct imx6_pcie {
/* power domain for pcie phy */
struct device *pd_pcie_phy;
struct phy *phy;
- const struct imx6_pcie_drvdata *drvdata;
+ const struct imx_pcie_drvdata *drvdata;
+
+ /* Ensure that only one device's LUT is configured at any given time */
+ struct mutex lock;
};
/* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */
@@ -186,28 +211,28 @@ struct imx6_pcie {
#define PHY_RX_OVRD_IN_LO_RX_DATA_EN BIT(5)
#define PHY_RX_OVRD_IN_LO_RX_PLL_EN BIT(3)
-static unsigned int imx6_pcie_grp_offset(const struct imx6_pcie *imx6_pcie)
+static unsigned int imx_pcie_grp_offset(const struct imx_pcie *imx_pcie)
{
- WARN_ON(imx6_pcie->drvdata->variant != IMX8MQ &&
- imx6_pcie->drvdata->variant != IMX8MQ_EP &&
- imx6_pcie->drvdata->variant != IMX8MM &&
- imx6_pcie->drvdata->variant != IMX8MM_EP &&
- imx6_pcie->drvdata->variant != IMX8MP &&
- imx6_pcie->drvdata->variant != IMX8MP_EP);
- return imx6_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14;
+ WARN_ON(imx_pcie->drvdata->variant != IMX8MQ &&
+ imx_pcie->drvdata->variant != IMX8MQ_EP &&
+ imx_pcie->drvdata->variant != IMX8MM &&
+ imx_pcie->drvdata->variant != IMX8MM_EP &&
+ imx_pcie->drvdata->variant != IMX8MP &&
+ imx_pcie->drvdata->variant != IMX8MP_EP);
+ return imx_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14;
}
-static int imx95_pcie_init_phy(struct imx6_pcie *imx6_pcie)
+static int imx95_pcie_init_phy(struct imx_pcie *imx_pcie)
{
- regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ regmap_update_bits(imx_pcie->iomuxc_gpr,
IMX95_PCIE_SS_RW_REG_0,
IMX95_PCIE_PHY_CR_PARA_SEL,
IMX95_PCIE_PHY_CR_PARA_SEL);
- regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ regmap_update_bits(imx_pcie->iomuxc_gpr,
IMX95_PCIE_PHY_GEN_CTRL,
IMX95_PCIE_REF_USE_PAD, 0);
- regmap_update_bits(imx6_pcie->iomuxc_gpr,
+ regmap_update_bits(imx_pcie->iomuxc_gpr,
IMX95_PCIE_SS_RW_REG_0,
IMX95_PCIE_REF_CLKEN,
IMX95_PCIE_REF_CLKEN);
@@ -215,9 +240,9 @@ static int imx95_pcie_init_phy(struct imx6_pcie *imx6_pcie)
return 0;
}
-static void imx6_pcie_configure_type(struct imx6_pcie *imx6_pcie)
+static void imx_pcie_configure_type(struct imx_pcie *imx_pcie)
{
- const struct imx6_pcie_drvdata *drvdata = imx6_pcie->drvdata;
+ const struct imx_pcie_drvdata *drvdata = imx_pcie->drvdata;
unsigned int mask, val, mode, id;
if (drvdata->mode == DW_PCIE_EP_TYPE)
@@ -225,21 +250,25 @@ static void imx6_pcie_configure_type(struct imx6_pcie *imx6_pcie)
else
mode = PCI_EXP_TYPE_ROOT_PORT;
- id = imx6_pcie->controller_id;
+ id = imx_pcie->controller_id;
- /* If mode_mask[id] is zero, means each controller have its individual gpr */
+ /* If mode_mask is 0, generic PHY driver is used to set the mode */
+ if (!drvdata->mode_mask[0])
+ return;
+
+ /* If mode_mask[id] is 0, each controller has its individual GPR */
if (!drvdata->mode_mask[id])
id = 0;
mask = drvdata->mode_mask[id];
val = mode << (ffs(mask) - 1);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, drvdata->mode_off[id], mask, val);
+ regmap_update_bits(imx_pcie->iomuxc_gpr, drvdata->mode_off[id], mask, val);
}
-static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, bool exp_val)
+static int pcie_phy_poll_ack(struct imx_pcie *imx_pcie, bool exp_val)
{
- struct dw_pcie *pci = imx6_pcie->pci;
+ struct dw_pcie *pci = imx_pcie->pci;
bool val;
u32 max_iterations = 10;
u32 wait_counter = 0;
@@ -258,9 +287,9 @@ static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, bool exp_val)
return -ETIMEDOUT;
}
-static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr)
+static int pcie_phy_wait_ack(struct imx_pcie *imx_pcie, int addr)
{
- struct dw_pcie *pci = imx6_pcie->pci;
+ struct dw_pcie *pci = imx_pcie->pci;
u32 val;
int ret;
@@ -270,24 +299,24 @@ static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr)
val |= PCIE_PHY_CTRL_CAP_ADR;
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
- ret = pcie_phy_poll_ack(imx6_pcie, true);
+ ret = pcie_phy_poll_ack(imx_pcie, true);
if (ret)
return ret;
val = PCIE_PHY_CTRL_DATA(addr);
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
- return pcie_phy_poll_ack(imx6_pcie, false);
+ return pcie_phy_poll_ack(imx_pcie, false);
}
/* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
-static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, u16 *data)
+static int pcie_phy_read(struct imx_pcie *imx_pcie, int addr, u16 *data)
{
- struct dw_pcie *pci = imx6_pcie->pci;
+ struct dw_pcie *pci = imx_pcie->pci;
u32 phy_ctl;
int ret;
- ret = pcie_phy_wait_ack(imx6_pcie, addr);
+ ret = pcie_phy_wait_ack(imx_pcie, addr);
if (ret)
return ret;
@@ -295,7 +324,7 @@ static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, u16 *data)
phy_ctl = PCIE_PHY_CTRL_RD;
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, phy_ctl);
- ret = pcie_phy_poll_ack(imx6_pcie, true);
+ ret = pcie_phy_poll_ack(imx_pcie, true);
if (ret)
return ret;
@@ -304,18 +333,18 @@ static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, u16 *data)
/* deassert Read signal */
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x00);
- return pcie_phy_poll_ack(imx6_pcie, false);
+ return pcie_phy_poll_ack(imx_pcie, false);
}
-static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
+static int pcie_phy_write(struct imx_pcie *imx_pcie, int addr, u16 data)
{
- struct dw_pcie *pci = imx6_pcie->pci;
+ struct dw_pcie *pci = imx_pcie->pci;
u32 var;
int ret;
/* write addr */
/* cap addr */
- ret = pcie_phy_wait_ack(imx6_pcie, addr);
+ ret = pcie_phy_wait_ack(imx_pcie, addr);
if (ret)
return ret;
@@ -326,7 +355,7 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
var |= PCIE_PHY_CTRL_CAP_DAT;
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
- ret = pcie_phy_poll_ack(imx6_pcie, true);
+ ret = pcie_phy_poll_ack(imx_pcie, true);
if (ret)
return ret;
@@ -335,7 +364,7 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
/* wait for ack de-assertion */
- ret = pcie_phy_poll_ack(imx6_pcie, false);
+ ret = pcie_phy_poll_ack(imx_pcie, false);
if (ret)
return ret;
@@ -344,7 +373,7 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
/* wait for ack */
- ret = pcie_phy_poll_ack(imx6_pcie, true);
+ ret = pcie_phy_poll_ack(imx_pcie, true);
if (ret)
return ret;
@@ -353,7 +382,7 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
/* wait for ack de-assertion */
- ret = pcie_phy_poll_ack(imx6_pcie, false);
+ ret = pcie_phy_poll_ack(imx_pcie, false);
if (ret)
return ret;
@@ -362,74 +391,68 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
return 0;
}
-static int imx8mq_pcie_init_phy(struct imx6_pcie *imx6_pcie)
+static int imx8mq_pcie_init_phy(struct imx_pcie *imx_pcie)
{
- /* TODO: Currently this code assumes external oscillator is being used */
- regmap_update_bits(imx6_pcie->iomuxc_gpr,
- imx6_pcie_grp_offset(imx6_pcie),
+ /* TODO: This code assumes external oscillator is being used */
+ regmap_update_bits(imx_pcie->iomuxc_gpr,
+ imx_pcie_grp_offset(imx_pcie),
IMX8MQ_GPR_PCIE_REF_USE_PAD,
IMX8MQ_GPR_PCIE_REF_USE_PAD);
/*
- * Regarding the datasheet, the PCIE_VPH is suggested to be 1.8V. If the PCIE_VPH is
- * supplied by 3.3V, the VREG_BYPASS should be cleared to zero.
+ * Per the datasheet, the PCIE_VPH is suggested to be 1.8V. If the
+ * PCIE_VPH is supplied by 3.3V, the VREG_BYPASS should be cleared
+ * to zero.
*/
- if (imx6_pcie->vph && regulator_get_voltage(imx6_pcie->vph) > 3000000)
- regmap_update_bits(imx6_pcie->iomuxc_gpr,
- imx6_pcie_grp_offset(imx6_pcie),
+ if (imx_pcie->vph && regulator_get_voltage(imx_pcie->vph) > 3000000)
+ regmap_update_bits(imx_pcie->iomuxc_gpr,
+ imx_pcie_grp_offset(imx_pcie),
IMX8MQ_GPR_PCIE_VREG_BYPASS,
0);
return 0;
}
-static int imx7d_pcie_init_phy(struct imx6_pcie *imx6_pcie)
-{
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0);
-
- return 0;
-}
-
-static int imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
+static int imx_pcie_init_phy(struct imx_pcie *imx_pcie)
{
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
/* configure constant input signal to the pcie ctrl and phy */
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8,
IMX6Q_GPR8_TX_DEEMPH_GEN1,
- imx6_pcie->tx_deemph_gen1 << 0);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ imx_pcie->tx_deemph_gen1 << 0);
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8,
IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB,
- imx6_pcie->tx_deemph_gen2_3p5db << 6);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ imx_pcie->tx_deemph_gen2_3p5db << 6);
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8,
IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB,
- imx6_pcie->tx_deemph_gen2_6db << 12);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ imx_pcie->tx_deemph_gen2_6db << 12);
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8,
IMX6Q_GPR8_TX_SWING_FULL,
- imx6_pcie->tx_swing_full << 18);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ imx_pcie->tx_swing_full << 18);
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8,
IMX6Q_GPR8_TX_SWING_LOW,
- imx6_pcie->tx_swing_low << 25);
+ imx_pcie->tx_swing_low << 25);
return 0;
}
-static int imx6sx_pcie_init_phy(struct imx6_pcie *imx6_pcie)
+static int imx6sx_pcie_init_phy(struct imx_pcie *imx_pcie)
{
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6SX_GPR12_PCIE_RX_EQ_MASK, IMX6SX_GPR12_PCIE_RX_EQ_2);
- return imx6_pcie_init_phy(imx6_pcie);
+ return imx_pcie_init_phy(imx_pcie);
}
-static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
+static void imx7d_pcie_wait_for_phy_pll_lock(struct imx_pcie *imx_pcie)
{
u32 val;
- struct device *dev = imx6_pcie->pci->dev;
+ struct device *dev = imx_pcie->pci->dev;
- if (regmap_read_poll_timeout(imx6_pcie->iomuxc_gpr,
+ if (regmap_read_poll_timeout(imx_pcie->iomuxc_gpr,
IOMUXC_GPR22, val,
val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED,
PHY_PLL_LOCK_WAIT_USLEEP_MAX,
@@ -437,19 +460,20 @@ static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
dev_err(dev, "PCIe PLL lock timeout\n");
}
-static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie)
+static int imx_setup_phy_mpll(struct imx_pcie *imx_pcie)
{
unsigned long phy_rate = 0;
int mult, div;
u16 val;
int i;
+ struct clk_bulk_data *clks = imx_pcie->clks;
- if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
+ if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_IMX_PHY))
return 0;
- for (i = 0; i < imx6_pcie->drvdata->clks_cnt; i++)
- if (strncmp(imx6_pcie->clks[i].id, "pcie_phy", 8) == 0)
- phy_rate = clk_get_rate(imx6_pcie->clks[i].clk);
+ for (i = 0; i < imx_pcie->num_clks; i++)
+ if (strncmp(clks[i].id, "pcie_phy", 8) == 0)
+ phy_rate = clk_get_rate(clks[i].clk);
switch (phy_rate) {
case 125000000:
@@ -467,46 +491,46 @@ static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie)
div = 1;
break;
default:
- dev_err(imx6_pcie->pci->dev,
+ dev_err(imx_pcie->pci->dev,
"Unsupported PHY reference clock rate %lu\n", phy_rate);
return -EINVAL;
}
- pcie_phy_read(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, &val);
+ pcie_phy_read(imx_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, &val);
val &= ~(PCIE_PHY_MPLL_MULTIPLIER_MASK <<
PCIE_PHY_MPLL_MULTIPLIER_SHIFT);
val |= mult << PCIE_PHY_MPLL_MULTIPLIER_SHIFT;
val |= PCIE_PHY_MPLL_MULTIPLIER_OVRD;
- pcie_phy_write(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, val);
+ pcie_phy_write(imx_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, val);
- pcie_phy_read(imx6_pcie, PCIE_PHY_ATEOVRD, &val);
+ pcie_phy_read(imx_pcie, PCIE_PHY_ATEOVRD, &val);
val &= ~(PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK <<
PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT);
val |= div << PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT;
val |= PCIE_PHY_ATEOVRD_EN;
- pcie_phy_write(imx6_pcie, PCIE_PHY_ATEOVRD, val);
+ pcie_phy_write(imx_pcie, PCIE_PHY_ATEOVRD, val);
return 0;
}
-static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie)
+static void imx_pcie_reset_phy(struct imx_pcie *imx_pcie)
{
u16 tmp;
- if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
+ if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_IMX_PHY))
return;
- pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
+ pcie_phy_read(imx_pcie, PHY_RX_OVRD_IN_LO, &tmp);
tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
PHY_RX_OVRD_IN_LO_RX_PLL_EN);
- pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
+ pcie_phy_write(imx_pcie, PHY_RX_OVRD_IN_LO, tmp);
usleep_range(2000, 3000);
- pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
+ pcie_phy_read(imx_pcie, PHY_RX_OVRD_IN_LO, &tmp);
tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
PHY_RX_OVRD_IN_LO_RX_PLL_EN);
- pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
+ pcie_phy_write(imx_pcie, PHY_RX_OVRD_IN_LO, tmp);
}
#ifdef CONFIG_ARM
@@ -545,133 +569,112 @@ static int imx6q_pcie_abort_handler(unsigned long addr,
}
#endif
-static int imx6_pcie_attach_pd(struct device *dev)
+static int imx_pcie_attach_pd(struct device *dev)
{
- struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+ struct imx_pcie *imx_pcie = dev_get_drvdata(dev);
struct device_link *link;
/* Do nothing when in a single power domain */
if (dev->pm_domain)
return 0;
- imx6_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie");
- if (IS_ERR(imx6_pcie->pd_pcie))
- return PTR_ERR(imx6_pcie->pd_pcie);
+ imx_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie");
+ if (IS_ERR(imx_pcie->pd_pcie))
+ return PTR_ERR(imx_pcie->pd_pcie);
/* Do nothing when power domain missing */
- if (!imx6_pcie->pd_pcie)
+ if (!imx_pcie->pd_pcie)
return 0;
- link = device_link_add(dev, imx6_pcie->pd_pcie,
+ link = device_link_add(dev, imx_pcie->pd_pcie,
DL_FLAG_STATELESS |
DL_FLAG_PM_RUNTIME |
DL_FLAG_RPM_ACTIVE);
if (!link) {
- dev_err(dev, "Failed to add device_link to pcie pd.\n");
+ dev_err(dev, "Failed to add device_link to pcie pd\n");
return -EINVAL;
}
- imx6_pcie->pd_pcie_phy = dev_pm_domain_attach_by_name(dev, "pcie_phy");
- if (IS_ERR(imx6_pcie->pd_pcie_phy))
- return PTR_ERR(imx6_pcie->pd_pcie_phy);
+ imx_pcie->pd_pcie_phy = dev_pm_domain_attach_by_name(dev, "pcie_phy");
+ if (IS_ERR(imx_pcie->pd_pcie_phy))
+ return PTR_ERR(imx_pcie->pd_pcie_phy);
- link = device_link_add(dev, imx6_pcie->pd_pcie_phy,
+ link = device_link_add(dev, imx_pcie->pd_pcie_phy,
DL_FLAG_STATELESS |
DL_FLAG_PM_RUNTIME |
DL_FLAG_RPM_ACTIVE);
if (!link) {
- dev_err(dev, "Failed to add device_link to pcie_phy pd.\n");
+ dev_err(dev, "Failed to add device_link to pcie_phy pd\n");
return -EINVAL;
}
return 0;
}
-static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
+static int imx6sx_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable)
{
- unsigned int offset;
- int ret = 0;
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
+ enable ? 0 : IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
+ return 0;
+}
- switch (imx6_pcie->drvdata->variant) {
- case IMX6SX:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0);
- break;
- case IMX6QP:
- case IMX6Q:
+static int imx6q_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable)
+{
+ if (enable) {
/* power up core phy and enable ref clock */
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
+ regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD);
/*
- * the async reset input need ref clock to sync internally,
+ * The async reset input need ref clock to sync internally,
* when the ref clock comes after reset, internal synced
* reset time is too short, cannot meet the requirement.
- * add one ~10us delay here.
+ * Add a ~10us delay here.
*/
usleep_range(10, 100);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
- break;
- case IMX7D:
- case IMX95:
- case IMX95_EP:
- break;
- case IMX8MM:
- case IMX8MM_EP:
- case IMX8MQ:
- case IMX8MQ_EP:
- case IMX8MP:
- case IMX8MP_EP:
- offset = imx6_pcie_grp_offset(imx6_pcie);
- /*
- * Set the over ride low and enabled
- * make sure that REF_CLK is turned on.
- */
- regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
- IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE,
- 0);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
- IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN,
- IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN);
- break;
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN);
+ } else {
+ regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN);
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD);
}
- return ret;
+ return 0;
}
-static void imx6_pcie_disable_ref_clk(struct imx6_pcie *imx6_pcie)
+static int imx8mm_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable)
{
- switch (imx6_pcie->drvdata->variant) {
- case IMX6QP:
- case IMX6Q:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_REF_CLK_EN, 0);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_TEST_PD,
- IMX6Q_GPR1_PCIE_TEST_PD);
- break;
- case IMX7D:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX7D_GPR12_PCIE_PHY_REFCLK_SEL,
- IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
- break;
- default:
- break;
- }
+ int offset = imx_pcie_grp_offset(imx_pcie);
+
+ regmap_update_bits(imx_pcie->iomuxc_gpr, offset,
+ IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE,
+ enable ? 0 : IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE);
+ regmap_update_bits(imx_pcie->iomuxc_gpr, offset,
+ IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN,
+ enable ? IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN : 0);
+ return 0;
+}
+
+static int imx7d_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable)
+{
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX7D_GPR12_PCIE_PHY_REFCLK_SEL,
+ enable ? 0 : IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
+ return 0;
}
-static int imx6_pcie_clk_enable(struct imx6_pcie *imx6_pcie)
+static int imx_pcie_clk_enable(struct imx_pcie *imx_pcie)
{
- struct dw_pcie *pci = imx6_pcie->pci;
+ struct dw_pcie *pci = imx_pcie->pci;
struct device *dev = pci->dev;
int ret;
- ret = clk_bulk_prepare_enable(imx6_pcie->drvdata->clks_cnt, imx6_pcie->clks);
+ ret = clk_bulk_prepare_enable(imx_pcie->num_clks, imx_pcie->clks);
if (ret)
return ret;
- ret = imx6_pcie_enable_ref_clk(imx6_pcie);
- if (ret) {
- dev_err(dev, "unable to enable pcie ref clock\n");
- goto err_ref_clk;
+ if (imx_pcie->drvdata->enable_ref_clk) {
+ ret = imx_pcie->drvdata->enable_ref_clk(imx_pcie, true);
+ if (ret) {
+ dev_err(dev, "Failed to enable PCIe REFCLK\n");
+ goto err_ref_clk;
+ }
}
/* allow the clocks to stabilize */
@@ -679,102 +682,121 @@ static int imx6_pcie_clk_enable(struct imx6_pcie *imx6_pcie)
return 0;
err_ref_clk:
- clk_bulk_disable_unprepare(imx6_pcie->drvdata->clks_cnt, imx6_pcie->clks);
+ clk_bulk_disable_unprepare(imx_pcie->num_clks, imx_pcie->clks);
return ret;
}
-static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie)
+static void imx_pcie_clk_disable(struct imx_pcie *imx_pcie)
{
- imx6_pcie_disable_ref_clk(imx6_pcie);
- clk_bulk_disable_unprepare(imx6_pcie->drvdata->clks_cnt, imx6_pcie->clks);
+ if (imx_pcie->drvdata->enable_ref_clk)
+ imx_pcie->drvdata->enable_ref_clk(imx_pcie, false);
+ clk_bulk_disable_unprepare(imx_pcie->num_clks, imx_pcie->clks);
}
-static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
+static int imx6sx_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert)
{
- reset_control_assert(imx6_pcie->pciephy_reset);
- reset_control_assert(imx6_pcie->apps_reset);
-
- switch (imx6_pcie->drvdata->variant) {
- case IMX6SX:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
- IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
- /* Force PCIe PHY reset */
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
- IMX6SX_GPR5_PCIE_BTNRST_RESET,
- IMX6SX_GPR5_PCIE_BTNRST_RESET);
- break;
- case IMX6QP:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_SW_RST,
- IMX6Q_GPR1_PCIE_SW_RST);
- break;
- case IMX6Q:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
- break;
- default:
- break;
- }
+ if (assert)
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
- /* Some boards don't have PCIe reset GPIO. */
- if (gpio_is_valid(imx6_pcie->reset_gpio))
- gpio_set_value_cansleep(imx6_pcie->reset_gpio,
- imx6_pcie->gpio_active_high);
+ /* Force PCIe PHY reset */
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR5, IMX6SX_GPR5_PCIE_BTNRST_RESET,
+ assert ? IMX6SX_GPR5_PCIE_BTNRST_RESET : 0);
+ return 0;
}
-static int imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
+static int imx6qp_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert)
{
- struct dw_pcie *pci = imx6_pcie->pci;
- struct device *dev = pci->dev;
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_SW_RST,
+ assert ? IMX6Q_GPR1_PCIE_SW_RST : 0);
+ if (!assert)
+ usleep_range(200, 500);
- reset_control_deassert(imx6_pcie->pciephy_reset);
+ return 0;
+}
- switch (imx6_pcie->drvdata->variant) {
- case IMX7D:
- /* Workaround for ERR010728, failure of PCI-e PLL VCO to
- * oscillate, especially when cold. This turns off "Duty-cycle
- * Corrector" and other mysterious undocumented things.
- */
- if (likely(imx6_pcie->phy_base)) {
- /* De-assert DCC_FB_EN */
- writel(PCIE_PHY_CMN_REG4_DCC_FB_EN,
- imx6_pcie->phy_base + PCIE_PHY_CMN_REG4);
- /* Assert RX_EQS and RX_EQS_SEL */
- writel(PCIE_PHY_CMN_REG24_RX_EQ_SEL
- | PCIE_PHY_CMN_REG24_RX_EQ,
- imx6_pcie->phy_base + PCIE_PHY_CMN_REG24);
- /* Assert ATT_MODE */
- writel(PCIE_PHY_CMN_REG26_ATT_MODE,
- imx6_pcie->phy_base + PCIE_PHY_CMN_REG26);
- } else {
- dev_warn(dev, "Unable to apply ERR010728 workaround. DT missing fsl,imx7d-pcie-phy phandle ?\n");
- }
+static int imx6q_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert)
+{
+ if (!assert)
+ return 0;
- imx7d_pcie_wait_for_phy_pll_lock(imx6_pcie);
- break;
- case IMX6SX:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
- IMX6SX_GPR5_PCIE_BTNRST_RESET, 0);
- break;
- case IMX6QP:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_SW_RST, 0);
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD);
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN);
- usleep_range(200, 500);
- break;
- default:
- break;
+ return 0;
+}
+
+static int imx7d_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert)
+{
+ struct dw_pcie *pci = imx_pcie->pci;
+ struct device *dev = pci->dev;
+
+ if (assert)
+ return 0;
+
+ /*
+ * Workaround for ERR010728 (IMX7DS_2N09P, Rev. 1.1, 4/2023):
+ *
+ * PCIe: PLL may fail to lock under corner conditions.
+ *
+ * Initial VCO oscillation may fail under corner conditions such as
+ * cold temperature which will cause the PCIe PLL fail to lock in the
+ * initialization phase.
+ *
+ * The Duty-cycle Corrector calibration must be disabled.
+ *
+ * 1. De-assert the G_RST signal by clearing
+ * SRC_PCIEPHY_RCR[PCIEPHY_G_RST].
+ * 2. De-assert DCC_FB_EN by writing data “0x29” to the register
+ * address 0x306d0014 (PCIE_PHY_CMN_REG4).
+ * 3. Assert RX_EQS, RX_EQ_SEL by writing data “0x48” to the register
+ * address 0x306d0090 (PCIE_PHY_CMN_REG24).
+ * 4. Assert ATT_MODE by writing data “0xbc” to the register
+ * address 0x306d0098 (PCIE_PHY_CMN_REG26).
+ * 5. De-assert the CMN_RST signal by clearing register bit
+ * SRC_PCIEPHY_RCR[PCIEPHY_BTN]
+ */
+
+ if (likely(imx_pcie->phy_base)) {
+ /* De-assert DCC_FB_EN */
+ writel(PCIE_PHY_CMN_REG4_DCC_FB_EN, imx_pcie->phy_base + PCIE_PHY_CMN_REG4);
+ /* Assert RX_EQS and RX_EQS_SEL */
+ writel(PCIE_PHY_CMN_REG24_RX_EQ_SEL | PCIE_PHY_CMN_REG24_RX_EQ,
+ imx_pcie->phy_base + PCIE_PHY_CMN_REG24);
+ /* Assert ATT_MODE */
+ writel(PCIE_PHY_CMN_REG26_ATT_MODE, imx_pcie->phy_base + PCIE_PHY_CMN_REG26);
+ } else {
+ dev_warn(dev, "Unable to apply ERR010728 workaround. DT missing fsl,imx7d-pcie-phy phandle ?\n");
}
+ imx7d_pcie_wait_for_phy_pll_lock(imx_pcie);
+ return 0;
+}
+
+static void imx_pcie_assert_core_reset(struct imx_pcie *imx_pcie)
+{
+ reset_control_assert(imx_pcie->pciephy_reset);
+ reset_control_assert(imx_pcie->apps_reset);
+
+ if (imx_pcie->drvdata->core_reset)
+ imx_pcie->drvdata->core_reset(imx_pcie, true);
+
+ /* Some boards don't have PCIe reset GPIO. */
+ gpiod_set_value_cansleep(imx_pcie->reset_gpiod, 1);
+}
+
+static int imx_pcie_deassert_core_reset(struct imx_pcie *imx_pcie)
+{
+ reset_control_deassert(imx_pcie->pciephy_reset);
+ reset_control_deassert(imx_pcie->apps_reset);
+
+ if (imx_pcie->drvdata->core_reset)
+ imx_pcie->drvdata->core_reset(imx_pcie, false);
/* Some boards don't have PCIe reset GPIO. */
- if (gpio_is_valid(imx6_pcie->reset_gpio)) {
+ if (imx_pcie->reset_gpiod) {
msleep(100);
- gpio_set_value_cansleep(imx6_pcie->reset_gpio,
- !imx6_pcie->gpio_active_high);
+ gpiod_set_value_cansleep(imx_pcie->reset_gpiod, 0);
/* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */
msleep(100);
}
@@ -782,9 +804,9 @@ static int imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
return 0;
}
-static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
+static int imx_pcie_wait_for_speed_change(struct imx_pcie *imx_pcie)
{
- struct dw_pcie *pci = imx6_pcie->pci;
+ struct dw_pcie *pci = imx_pcie->pci;
struct device *dev = pci->dev;
u32 tmp;
unsigned int retries;
@@ -801,33 +823,38 @@ static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
return -ETIMEDOUT;
}
-static void imx6_pcie_ltssm_enable(struct device *dev)
+static void imx_pcie_ltssm_enable(struct device *dev)
{
- struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
- const struct imx6_pcie_drvdata *drvdata = imx6_pcie->drvdata;
+ struct imx_pcie *imx_pcie = dev_get_drvdata(dev);
+ const struct imx_pcie_drvdata *drvdata = imx_pcie->drvdata;
+ u8 offset = dw_pcie_find_capability(imx_pcie->pci, PCI_CAP_ID_EXP);
+ u32 tmp;
+ tmp = dw_pcie_readl_dbi(imx_pcie->pci, offset + PCI_EXP_LNKCAP);
+ phy_set_speed(imx_pcie->phy, FIELD_GET(PCI_EXP_LNKCAP_SLS, tmp));
if (drvdata->ltssm_mask)
- regmap_update_bits(imx6_pcie->iomuxc_gpr, drvdata->ltssm_off, drvdata->ltssm_mask,
+ regmap_update_bits(imx_pcie->iomuxc_gpr, drvdata->ltssm_off, drvdata->ltssm_mask,
drvdata->ltssm_mask);
- reset_control_deassert(imx6_pcie->apps_reset);
+ reset_control_deassert(imx_pcie->apps_reset);
}
-static void imx6_pcie_ltssm_disable(struct device *dev)
+static void imx_pcie_ltssm_disable(struct device *dev)
{
- struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
- const struct imx6_pcie_drvdata *drvdata = imx6_pcie->drvdata;
+ struct imx_pcie *imx_pcie = dev_get_drvdata(dev);
+ const struct imx_pcie_drvdata *drvdata = imx_pcie->drvdata;
+ phy_set_speed(imx_pcie->phy, 0);
if (drvdata->ltssm_mask)
- regmap_update_bits(imx6_pcie->iomuxc_gpr, drvdata->ltssm_off,
+ regmap_update_bits(imx_pcie->iomuxc_gpr, drvdata->ltssm_off,
drvdata->ltssm_mask, 0);
- reset_control_assert(imx6_pcie->apps_reset);
+ reset_control_assert(imx_pcie->apps_reset);
}
-static int imx6_pcie_start_link(struct dw_pcie *pci)
+static int imx_pcie_start_link(struct dw_pcie *pci)
{
- struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+ struct imx_pcie *imx_pcie = to_imx_pcie(pci);
struct device *dev = pci->dev;
u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
u32 tmp;
@@ -846,18 +873,18 @@ static int imx6_pcie_start_link(struct dw_pcie *pci)
dw_pcie_dbi_ro_wr_dis(pci);
/* Start LTSSM. */
- imx6_pcie_ltssm_enable(dev);
+ imx_pcie_ltssm_enable(dev);
ret = dw_pcie_wait_for_link(pci);
if (ret)
goto err_reset_phy;
- if (pci->link_gen > 1) {
+ if (pci->max_link_speed > 1) {
/* Allow faster modes after the link is up */
dw_pcie_dbi_ro_wr_en(pci);
tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
tmp &= ~PCI_EXP_LNKCAP_SLS;
- tmp |= pci->link_gen;
+ tmp |= pci->max_link_speed;
dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp);
/*
@@ -869,8 +896,9 @@ static int imx6_pcie_start_link(struct dw_pcie *pci)
dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);
dw_pcie_dbi_ro_wr_dis(pci);
- if (imx6_pcie->drvdata->flags &
- IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE) {
+ if (imx_pcie->drvdata->flags &
+ IMX_PCIE_FLAG_IMX_SPEED_CHANGE) {
+
/*
* On i.MX7, DIRECT_SPEED_CHANGE behaves differently
* from i.MX6 family when no link speed transition
@@ -879,8 +907,7 @@ static int imx6_pcie_start_link(struct dw_pcie *pci)
* which will cause the following code to report false
* failure.
*/
-
- ret = imx6_pcie_wait_for_speed_change(imx6_pcie);
+ ret = imx_pcie_wait_for_speed_change(imx_pcie);
if (ret) {
dev_err(dev, "Failed to bring link up!\n");
goto err_reset_phy;
@@ -895,37 +922,213 @@ static int imx6_pcie_start_link(struct dw_pcie *pci)
dev_info(dev, "Link: Only Gen1 is enabled\n");
}
- imx6_pcie->link_is_up = true;
tmp = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA);
dev_info(dev, "Link up, Gen%i\n", tmp & PCI_EXP_LNKSTA_CLS);
return 0;
err_reset_phy:
- imx6_pcie->link_is_up = false;
dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n",
dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0),
dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1));
- imx6_pcie_reset_phy(imx6_pcie);
+ imx_pcie_reset_phy(imx_pcie);
return 0;
}
-static void imx6_pcie_stop_link(struct dw_pcie *pci)
+static void imx_pcie_stop_link(struct dw_pcie *pci)
{
struct device *dev = pci->dev;
/* Turn off PCIe LTSSM */
- imx6_pcie_ltssm_disable(dev);
+ imx_pcie_ltssm_disable(dev);
+}
+
+static int imx_pcie_add_lut(struct imx_pcie *imx_pcie, u16 rid, u8 sid)
+{
+ struct dw_pcie *pci = imx_pcie->pci;
+ struct device *dev = pci->dev;
+ u32 data1, data2;
+ int free = -1;
+ int i;
+
+ if (sid >= 64) {
+ dev_err(dev, "Invalid SID for index %d\n", sid);
+ return -EINVAL;
+ }
+
+ guard(mutex)(&imx_pcie->lock);
+
+ /*
+ * Iterate through all LUT entries to check for duplicate RID and
+ * identify the first available entry. Configure this available entry
+ * immediately after verification to avoid rescanning it.
+ */
+ for (i = 0; i < IMX95_MAX_LUT; i++) {
+ regmap_write(imx_pcie->iomuxc_gpr,
+ IMX95_PE0_LUT_ACSCTRL, IMX95_PEO_LUT_RWA | i);
+ regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA1, &data1);
+
+ if (!(data1 & IMX95_PE0_LUT_VLD)) {
+ if (free < 0)
+ free = i;
+ continue;
+ }
+
+ regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, &data2);
+
+ /* Do not add duplicate RID */
+ if (rid == FIELD_GET(IMX95_PE0_LUT_REQID, data2)) {
+ dev_warn(dev, "Existing LUT entry available for RID (%d)", rid);
+ return 0;
+ }
+ }
+
+ if (free < 0) {
+ dev_err(dev, "LUT entry is not available\n");
+ return -ENOSPC;
+ }
+
+ data1 = FIELD_PREP(IMX95_PE0_LUT_DAC_ID, 0);
+ data1 |= FIELD_PREP(IMX95_PE0_LUT_STREAM_ID, sid);
+ data1 |= IMX95_PE0_LUT_VLD;
+ regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA1, data1);
+
+ data2 = IMX95_PE0_LUT_MASK; /* Match all bits of RID */
+ data2 |= FIELD_PREP(IMX95_PE0_LUT_REQID, rid);
+ regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, data2);
+
+ regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_ACSCTRL, free);
+
+ return 0;
+}
+
+static void imx_pcie_remove_lut(struct imx_pcie *imx_pcie, u16 rid)
+{
+ u32 data2;
+ int i;
+
+ guard(mutex)(&imx_pcie->lock);
+
+ for (i = 0; i < IMX95_MAX_LUT; i++) {
+ regmap_write(imx_pcie->iomuxc_gpr,
+ IMX95_PE0_LUT_ACSCTRL, IMX95_PEO_LUT_RWA | i);
+ regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, &data2);
+ if (FIELD_GET(IMX95_PE0_LUT_REQID, data2) == rid) {
+ regmap_write(imx_pcie->iomuxc_gpr,
+ IMX95_PE0_LUT_DATA1, 0);
+ regmap_write(imx_pcie->iomuxc_gpr,
+ IMX95_PE0_LUT_DATA2, 0);
+ regmap_write(imx_pcie->iomuxc_gpr,
+ IMX95_PE0_LUT_ACSCTRL, i);
+
+ break;
+ }
+ }
+}
+
+static int imx_pcie_enable_device(struct pci_host_bridge *bridge,
+ struct pci_dev *pdev)
+{
+ struct imx_pcie *imx_pcie = to_imx_pcie(to_dw_pcie_from_pp(bridge->sysdata));
+ u32 sid_i, sid_m, rid = pci_dev_id(pdev);
+ struct device_node *target;
+ struct device *dev;
+ int err_i, err_m;
+ u32 sid = 0;
+
+ dev = imx_pcie->pci->dev;
+
+ target = NULL;
+ err_i = of_map_id(dev->of_node, rid, "iommu-map", "iommu-map-mask",
+ &target, &sid_i);
+ if (target) {
+ of_node_put(target);
+ } else {
+ /*
+ * "target == NULL && err_i == 0" means RID out of map range.
+ * Use 1:1 map RID to streamID. Hardware can't support this
+ * because the streamID is only 6 bits
+ */
+ err_i = -EINVAL;
+ }
+
+ target = NULL;
+ err_m = of_map_id(dev->of_node, rid, "msi-map", "msi-map-mask",
+ &target, &sid_m);
+
+ /*
+ * err_m target
+ * 0 NULL RID out of range. Use 1:1 map RID to
+ * streamID, Current hardware can't
+ * support it, so return -EINVAL.
+ * != 0 NULL msi-map does not exist, use built-in MSI
+ * 0 != NULL Get correct streamID from RID
+ * != 0 != NULL Invalid combination
+ */
+ if (!err_m && !target)
+ return -EINVAL;
+ else if (target)
+ of_node_put(target); /* Find streamID map entry for RID in msi-map */
+
+ /*
+ * msi-map iommu-map
+ * N N DWC MSI Ctrl
+ * Y Y ITS + SMMU, require the same SID
+ * Y N ITS
+ * N Y DWC MSI Ctrl + SMMU
+ */
+ if (err_i && err_m)
+ return 0;
+
+ if (!err_i && !err_m) {
+ /*
+ * Glue Layer
+ * <==========>
+ * ┌─────┐ ┌──────────┐
+ * │ LUT │ 6-bit streamID │ │
+ * │ │─────────────────►│ MSI │
+ * └─────┘ 2-bit ctrl ID │ │
+ * ┌───────────►│ │
+ * (i.MX95) │ │ │
+ * 00 PCIe0 │ │ │
+ * 01 ENETC │ │ │
+ * 10 PCIe1 │ │ │
+ * │ └──────────┘
+ * The MSI glue layer auto adds 2 bits controller ID ahead of
+ * streamID, so mask these 2 bits to get streamID. The
+ * IOMMU glue layer doesn't do that.
+ */
+ if (sid_i != (sid_m & IMX95_SID_MASK)) {
+ dev_err(dev, "iommu-map and msi-map entries mismatch!\n");
+ return -EINVAL;
+ }
+ }
+
+ if (!err_i)
+ sid = sid_i;
+ else if (!err_m)
+ sid = sid_m & IMX95_SID_MASK;
+
+ return imx_pcie_add_lut(imx_pcie, rid, sid);
+}
+
+static void imx_pcie_disable_device(struct pci_host_bridge *bridge,
+ struct pci_dev *pdev)
+{
+ struct imx_pcie *imx_pcie;
+
+ imx_pcie = to_imx_pcie(to_dw_pcie_from_pp(bridge->sysdata));
+ imx_pcie_remove_lut(imx_pcie, pci_dev_id(pdev));
}
-static int imx6_pcie_host_init(struct dw_pcie_rp *pp)
+static int imx_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct device *dev = pci->dev;
- struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+ struct imx_pcie *imx_pcie = to_imx_pcie(pci);
int ret;
- if (imx6_pcie->vpcie) {
- ret = regulator_enable(imx6_pcie->vpcie);
+ if (imx_pcie->vpcie) {
+ ret = regulator_enable(imx_pcie->vpcie);
if (ret) {
dev_err(dev, "failed to enable vpcie regulator: %d\n",
ret);
@@ -933,83 +1136,117 @@ static int imx6_pcie_host_init(struct dw_pcie_rp *pp)
}
}
- imx6_pcie_assert_core_reset(imx6_pcie);
+ if (pp->bridge && imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_LUT)) {
+ pp->bridge->enable_device = imx_pcie_enable_device;
+ pp->bridge->disable_device = imx_pcie_disable_device;
+ }
+
+ imx_pcie_assert_core_reset(imx_pcie);
- if (imx6_pcie->drvdata->init_phy)
- imx6_pcie->drvdata->init_phy(imx6_pcie);
+ if (imx_pcie->drvdata->init_phy)
+ imx_pcie->drvdata->init_phy(imx_pcie);
- imx6_pcie_configure_type(imx6_pcie);
+ imx_pcie_configure_type(imx_pcie);
- ret = imx6_pcie_clk_enable(imx6_pcie);
+ ret = imx_pcie_clk_enable(imx_pcie);
if (ret) {
dev_err(dev, "unable to enable pcie clocks: %d\n", ret);
goto err_reg_disable;
}
- if (imx6_pcie->phy) {
- ret = phy_init(imx6_pcie->phy);
+ if (imx_pcie->phy) {
+ ret = phy_init(imx_pcie->phy);
if (ret) {
dev_err(dev, "pcie PHY power up failed\n");
goto err_clk_disable;
}
- }
- if (imx6_pcie->phy) {
- ret = phy_power_on(imx6_pcie->phy);
+ ret = phy_set_mode_ext(imx_pcie->phy, PHY_MODE_PCIE,
+ imx_pcie->drvdata->mode == DW_PCIE_EP_TYPE ?
+ PHY_MODE_PCIE_EP : PHY_MODE_PCIE_RC);
+ if (ret) {
+ dev_err(dev, "unable to set PCIe PHY mode\n");
+ goto err_phy_exit;
+ }
+
+ ret = phy_power_on(imx_pcie->phy);
if (ret) {
dev_err(dev, "waiting for PHY ready timeout!\n");
- goto err_phy_off;
+ goto err_phy_exit;
}
}
- ret = imx6_pcie_deassert_core_reset(imx6_pcie);
+ ret = imx_pcie_deassert_core_reset(imx_pcie);
if (ret < 0) {
dev_err(dev, "pcie deassert core reset failed: %d\n", ret);
goto err_phy_off;
}
- imx6_setup_phy_mpll(imx6_pcie);
+ imx_setup_phy_mpll(imx_pcie);
return 0;
err_phy_off:
- if (imx6_pcie->phy)
- phy_exit(imx6_pcie->phy);
+ phy_power_off(imx_pcie->phy);
+err_phy_exit:
+ phy_exit(imx_pcie->phy);
err_clk_disable:
- imx6_pcie_clk_disable(imx6_pcie);
+ imx_pcie_clk_disable(imx_pcie);
err_reg_disable:
- if (imx6_pcie->vpcie)
- regulator_disable(imx6_pcie->vpcie);
+ if (imx_pcie->vpcie)
+ regulator_disable(imx_pcie->vpcie);
return ret;
}
-static void imx6_pcie_host_exit(struct dw_pcie_rp *pp)
+static void imx_pcie_host_exit(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+ struct imx_pcie *imx_pcie = to_imx_pcie(pci);
- if (imx6_pcie->phy) {
- if (phy_power_off(imx6_pcie->phy))
+ if (imx_pcie->phy) {
+ if (phy_power_off(imx_pcie->phy))
dev_err(pci->dev, "unable to power off PHY\n");
- phy_exit(imx6_pcie->phy);
+ phy_exit(imx_pcie->phy);
}
- imx6_pcie_clk_disable(imx6_pcie);
+ imx_pcie_clk_disable(imx_pcie);
+
+ if (imx_pcie->vpcie)
+ regulator_disable(imx_pcie->vpcie);
+}
- if (imx6_pcie->vpcie)
- regulator_disable(imx6_pcie->vpcie);
+/*
+ * In old DWC implementations, PCIE_ATU_INHIBIT_PAYLOAD in iATU Ctrl2
+ * register is reserved, so the generic DWC implementation of sending the
+ * PME_Turn_Off message using a dummy MMIO write cannot be used.
+ */
+static void imx_pcie_pme_turn_off(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct imx_pcie *imx_pcie = to_imx_pcie(pci);
+
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6SX_GPR12_PCIE_PM_TURN_OFF);
+ regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6SX_GPR12_PCIE_PM_TURN_OFF);
+
+ usleep_range(PCIE_PME_TO_L2_TIMEOUT_US/10, PCIE_PME_TO_L2_TIMEOUT_US);
}
-static const struct dw_pcie_host_ops imx6_pcie_host_ops = {
- .init = imx6_pcie_host_init,
- .deinit = imx6_pcie_host_exit,
+static const struct dw_pcie_host_ops imx_pcie_host_ops = {
+ .init = imx_pcie_host_init,
+ .deinit = imx_pcie_host_exit,
+ .pme_turn_off = imx_pcie_pme_turn_off,
+};
+
+static const struct dw_pcie_host_ops imx_pcie_host_dw_pme_ops = {
+ .init = imx_pcie_host_init,
+ .deinit = imx_pcie_host_exit,
};
static const struct dw_pcie_ops dw_pcie_ops = {
- .start_link = imx6_pcie_start_link,
- .stop_link = imx6_pcie_stop_link,
+ .start_link = imx_pcie_start_link,
+ .stop_link = imx_pcie_stop_link,
};
-static void imx6_pcie_ep_init(struct dw_pcie_ep *ep)
+static void imx_pcie_ep_init(struct dw_pcie_ep *ep)
{
enum pci_barno bar;
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -1018,7 +1255,7 @@ static void imx6_pcie_ep_init(struct dw_pcie_ep *ep)
dw_pcie_ep_reset_bar(pci, bar);
}
-static int imx6_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+static int imx_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
unsigned int type, u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -1047,16 +1284,27 @@ static const struct pci_epc_features imx8m_pcie_epc_features = {
.align = SZ_64K,
};
+static const struct pci_epc_features imx8q_pcie_epc_features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = false,
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
+ .bar[BAR_5] = { .type = BAR_RESERVED, },
+ .align = SZ_64K,
+};
+
/*
- * BAR# | Default BAR enable | Default BAR Type | Default BAR Size | BAR Sizing Scheme
- * ================================================================================================
- * BAR0 | Enable | 64-bit | 1 MB | Programmable Size
- * BAR1 | Disable | 32-bit | 64 KB | Fixed Size
- * BAR1 should be disabled if BAR0 is 64bit.
- * BAR2 | Enable | 32-bit | 1 MB | Programmable Size
- * BAR3 | Enable | 32-bit | 64 KB | Programmable Size
- * BAR4 | Enable | 32-bit | 1M | Programmable Size
- * BAR5 | Enable | 32-bit | 64 KB | Programmable Size
+ * | Default | Default | Default | BAR Sizing
+ * BAR# | Enable? | Type | Size | Scheme
+ * =======================================================
+ * BAR0 | Enable | 64-bit | 1 MB | Programmable Size
+ * BAR1 | Disable | 32-bit | 64 KB | Fixed Size
+ * (BAR1 should be disabled if BAR0 is 64-bit)
+ * BAR2 | Enable | 32-bit | 1 MB | Programmable Size
+ * BAR3 | Enable | 32-bit | 64 KB | Programmable Size
+ * BAR4 | Enable | 32-bit | 1 MB | Programmable Size
+ * BAR5 | Enable | 32-bit | 64 KB | Programmable Size
*/
static const struct pci_epc_features imx95_pcie_epc_features = {
.msi_capable = true,
@@ -1065,183 +1313,153 @@ static const struct pci_epc_features imx95_pcie_epc_features = {
};
static const struct pci_epc_features*
-imx6_pcie_ep_get_features(struct dw_pcie_ep *ep)
+imx_pcie_ep_get_features(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+ struct imx_pcie *imx_pcie = to_imx_pcie(pci);
- return imx6_pcie->drvdata->epc_features;
+ return imx_pcie->drvdata->epc_features;
}
static const struct dw_pcie_ep_ops pcie_ep_ops = {
- .init = imx6_pcie_ep_init,
- .raise_irq = imx6_pcie_ep_raise_irq,
- .get_features = imx6_pcie_ep_get_features,
+ .init = imx_pcie_ep_init,
+ .raise_irq = imx_pcie_ep_raise_irq,
+ .get_features = imx_pcie_ep_get_features,
};
-static int imx6_add_pcie_ep(struct imx6_pcie *imx6_pcie,
+static int imx_add_pcie_ep(struct imx_pcie *imx_pcie,
struct platform_device *pdev)
{
int ret;
- unsigned int pcie_dbi2_offset;
struct dw_pcie_ep *ep;
- struct dw_pcie *pci = imx6_pcie->pci;
+ struct dw_pcie *pci = imx_pcie->pci;
struct dw_pcie_rp *pp = &pci->pp;
struct device *dev = pci->dev;
- imx6_pcie_host_init(pp);
+ imx_pcie_host_init(pp);
ep = &pci->ep;
ep->ops = &pcie_ep_ops;
- switch (imx6_pcie->drvdata->variant) {
- case IMX8MQ_EP:
- case IMX8MM_EP:
- case IMX8MP_EP:
- pcie_dbi2_offset = SZ_1M;
- break;
- default:
- pcie_dbi2_offset = SZ_4K;
- break;
- }
-
- pci->dbi_base2 = pci->dbi_base + pcie_dbi2_offset;
-
- /*
- * FIXME: Ideally, dbi2 base address should come from DT. But since only IMX95 is defining
- * "dbi2" in DT, "dbi_base2" is set to NULL here for that platform alone so that the DWC
- * core code can fetch that from DT. But once all platform DTs were fixed, this and the
- * above "dbi_base2" setting should be removed.
- */
- if (device_property_match_string(dev, "reg-names", "dbi2") >= 0)
- pci->dbi_base2 = NULL;
-
- if (imx6_check_flag(imx6_pcie, IMX6_PCIE_FLAG_SUPPORT_64BIT))
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_SUPPORT_64BIT))
dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ ep->page_size = imx_pcie->drvdata->epc_features->align;
+
ret = dw_pcie_ep_init(ep);
if (ret) {
dev_err(dev, "failed to initialize endpoint\n");
return ret;
}
- /* Start LTSSM. */
- imx6_pcie_ltssm_enable(dev);
-
- return 0;
-}
-static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie)
-{
- struct device *dev = imx6_pcie->pci->dev;
-
- /* Some variants have a turnoff reset in DT */
- if (imx6_pcie->turnoff_reset) {
- reset_control_assert(imx6_pcie->turnoff_reset);
- reset_control_deassert(imx6_pcie->turnoff_reset);
- goto pm_turnoff_sleep;
+ ret = dw_pcie_ep_init_registers(ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(ep);
+ return ret;
}
- /* Others poke directly at IOMUXC registers */
- switch (imx6_pcie->drvdata->variant) {
- case IMX6SX:
- case IMX6QP:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6SX_GPR12_PCIE_PM_TURN_OFF,
- IMX6SX_GPR12_PCIE_PM_TURN_OFF);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6SX_GPR12_PCIE_PM_TURN_OFF, 0);
- break;
- default:
- dev_err(dev, "PME_Turn_Off not implemented\n");
- return;
- }
+ pci_epc_init_notify(ep->epc);
- /*
- * Components with an upstream port must respond to
- * PME_Turn_Off with PME_TO_Ack but we can't check.
- *
- * The standard recommends a 1-10ms timeout after which to
- * proceed anyway as if acks were received.
- */
-pm_turnoff_sleep:
- usleep_range(1000, 10000);
+ /* Start LTSSM. */
+ imx_pcie_ltssm_enable(dev);
+
+ return 0;
}
-static void imx6_pcie_msi_save_restore(struct imx6_pcie *imx6_pcie, bool save)
+static void imx_pcie_msi_save_restore(struct imx_pcie *imx_pcie, bool save)
{
u8 offset;
u16 val;
- struct dw_pcie *pci = imx6_pcie->pci;
+ struct dw_pcie *pci = imx_pcie->pci;
if (pci_msi_enabled()) {
offset = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI);
if (save) {
val = dw_pcie_readw_dbi(pci, offset + PCI_MSI_FLAGS);
- imx6_pcie->msi_ctrl = val;
+ imx_pcie->msi_ctrl = val;
} else {
dw_pcie_dbi_ro_wr_en(pci);
- val = imx6_pcie->msi_ctrl;
+ val = imx_pcie->msi_ctrl;
dw_pcie_writew_dbi(pci, offset + PCI_MSI_FLAGS, val);
dw_pcie_dbi_ro_wr_dis(pci);
}
}
}
-static int imx6_pcie_suspend_noirq(struct device *dev)
+static int imx_pcie_suspend_noirq(struct device *dev)
{
- struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
- struct dw_pcie_rp *pp = &imx6_pcie->pci->pp;
+ struct imx_pcie *imx_pcie = dev_get_drvdata(dev);
- if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))
+ if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_SUPPORTS_SUSPEND))
return 0;
- imx6_pcie_msi_save_restore(imx6_pcie, true);
- imx6_pcie_pm_turnoff(imx6_pcie);
- imx6_pcie_stop_link(imx6_pcie->pci);
- imx6_pcie_host_exit(pp);
+ imx_pcie_msi_save_restore(imx_pcie, true);
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_BROKEN_SUSPEND)) {
+ /*
+ * The minimum for a workaround would be to set PERST# and to
+ * set the PCIE_TEST_PD flag. However, we can also disable the
+ * clock which saves some power.
+ */
+ imx_pcie_assert_core_reset(imx_pcie);
+ imx_pcie->drvdata->enable_ref_clk(imx_pcie, false);
+ } else {
+ return dw_pcie_suspend_noirq(imx_pcie->pci);
+ }
return 0;
}
-static int imx6_pcie_resume_noirq(struct device *dev)
+static int imx_pcie_resume_noirq(struct device *dev)
{
int ret;
- struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
- struct dw_pcie_rp *pp = &imx6_pcie->pci->pp;
+ struct imx_pcie *imx_pcie = dev_get_drvdata(dev);
- if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))
+ if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_SUPPORTS_SUSPEND))
return 0;
- ret = imx6_pcie_host_init(pp);
- if (ret)
- return ret;
- imx6_pcie_msi_save_restore(imx6_pcie, false);
- dw_pcie_setup_rc(pp);
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_BROKEN_SUSPEND)) {
+ ret = imx_pcie->drvdata->enable_ref_clk(imx_pcie, true);
+ if (ret)
+ return ret;
+ ret = imx_pcie_deassert_core_reset(imx_pcie);
+ if (ret)
+ return ret;
- if (imx6_pcie->link_is_up)
- imx6_pcie_start_link(imx6_pcie->pci);
+ /*
+ * Using PCIE_TEST_PD seems to disable MSI and powers down the
+ * root complex. This is why we have to setup the rc again and
+ * why we have to restore the MSI register.
+ */
+ ret = dw_pcie_setup_rc(&imx_pcie->pci->pp);
+ if (ret)
+ return ret;
+ } else {
+ ret = dw_pcie_resume_noirq(imx_pcie->pci);
+ if (ret)
+ return ret;
+ }
+ imx_pcie_msi_save_restore(imx_pcie, false);
return 0;
}
-static const struct dev_pm_ops imx6_pcie_pm_ops = {
- NOIRQ_SYSTEM_SLEEP_PM_OPS(imx6_pcie_suspend_noirq,
- imx6_pcie_resume_noirq)
+static const struct dev_pm_ops imx_pcie_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_pcie_suspend_noirq,
+ imx_pcie_resume_noirq)
};
-static int imx6_pcie_probe(struct platform_device *pdev)
+static int imx_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct dw_pcie *pci;
- struct imx6_pcie *imx6_pcie;
+ struct imx_pcie *imx_pcie;
struct device_node *np;
- struct resource *dbi_base;
struct device_node *node = dev->of_node;
- int ret;
+ int ret, domain;
u16 val;
- int i;
- imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL);
- if (!imx6_pcie)
+ imx_pcie = devm_kzalloc(dev, sizeof(*imx_pcie), GFP_KERNEL);
+ if (!imx_pcie)
return -ENOMEM;
pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
@@ -1250,10 +1468,16 @@ static int imx6_pcie_probe(struct platform_device *pdev)
pci->dev = dev;
pci->ops = &dw_pcie_ops;
- pci->pp.ops = &imx6_pcie_host_ops;
- imx6_pcie->pci = pci;
- imx6_pcie->drvdata = of_device_get_match_data(dev);
+ imx_pcie->pci = pci;
+ imx_pcie->drvdata = of_device_get_match_data(dev);
+
+ mutex_init(&imx_pcie->lock);
+
+ if (imx_pcie->drvdata->ops)
+ pci->pp.ops = imx_pcie->drvdata->ops;
+ else
+ pci->pp.ops = &imx_pcie_host_dw_pme_ops;
/* Find the PHY if one is defined, only imx7d uses it */
np = of_parse_phandle(node, "fsl,imx7d-pcie-phy", 0);
@@ -1265,93 +1489,68 @@ static int imx6_pcie_probe(struct platform_device *pdev)
dev_err(dev, "Unable to map PCIe PHY\n");
return ret;
}
- imx6_pcie->phy_base = devm_ioremap_resource(dev, &res);
- if (IS_ERR(imx6_pcie->phy_base))
- return PTR_ERR(imx6_pcie->phy_base);
+ imx_pcie->phy_base = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(imx_pcie->phy_base))
+ return PTR_ERR(imx_pcie->phy_base);
}
- pci->dbi_base = devm_platform_get_and_ioremap_resource(pdev, 0, &dbi_base);
- if (IS_ERR(pci->dbi_base))
- return PTR_ERR(pci->dbi_base);
-
/* Fetch GPIOs */
- imx6_pcie->reset_gpio = of_get_named_gpio(node, "reset-gpio", 0);
- imx6_pcie->gpio_active_high = of_property_read_bool(node,
- "reset-gpio-active-high");
- if (gpio_is_valid(imx6_pcie->reset_gpio)) {
- ret = devm_gpio_request_one(dev, imx6_pcie->reset_gpio,
- imx6_pcie->gpio_active_high ?
- GPIOF_OUT_INIT_HIGH :
- GPIOF_OUT_INIT_LOW,
- "PCIe reset");
- if (ret) {
- dev_err(dev, "unable to get reset gpio\n");
- return ret;
- }
- } else if (imx6_pcie->reset_gpio == -EPROBE_DEFER) {
- return imx6_pcie->reset_gpio;
- }
-
- if (imx6_pcie->drvdata->clks_cnt >= IMX6_PCIE_MAX_CLKS)
- return dev_err_probe(dev, -ENOMEM, "clks_cnt is too big\n");
-
- for (i = 0; i < imx6_pcie->drvdata->clks_cnt; i++)
- imx6_pcie->clks[i].id = imx6_pcie->drvdata->clk_names[i];
+ imx_pcie->reset_gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(imx_pcie->reset_gpiod))
+ return dev_err_probe(dev, PTR_ERR(imx_pcie->reset_gpiod),
+ "unable to get reset gpio\n");
+ gpiod_set_consumer_name(imx_pcie->reset_gpiod, "PCIe reset");
/* Fetch clocks */
- ret = devm_clk_bulk_get(dev, imx6_pcie->drvdata->clks_cnt, imx6_pcie->clks);
- if (ret)
- return ret;
-
- if (imx6_check_flag(imx6_pcie, IMX6_PCIE_FLAG_HAS_PHYDRV)) {
- imx6_pcie->phy = devm_phy_get(dev, "pcie-phy");
- if (IS_ERR(imx6_pcie->phy))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->phy),
+ imx_pcie->num_clks = devm_clk_bulk_get_all(dev, &imx_pcie->clks);
+ if (imx_pcie->num_clks < 0)
+ return dev_err_probe(dev, imx_pcie->num_clks,
+ "failed to get clocks\n");
+
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_PHYDRV)) {
+ imx_pcie->phy = devm_phy_get(dev, "pcie-phy");
+ if (IS_ERR(imx_pcie->phy))
+ return dev_err_probe(dev, PTR_ERR(imx_pcie->phy),
"failed to get pcie phy\n");
}
- if (imx6_check_flag(imx6_pcie, IMX6_PCIE_FLAG_HAS_APP_RESET)) {
- imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev, "apps");
- if (IS_ERR(imx6_pcie->apps_reset))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->apps_reset),
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_APP_RESET)) {
+ imx_pcie->apps_reset = devm_reset_control_get_exclusive(dev, "apps");
+ if (IS_ERR(imx_pcie->apps_reset))
+ return dev_err_probe(dev, PTR_ERR(imx_pcie->apps_reset),
"failed to get pcie apps reset control\n");
}
- if (imx6_check_flag(imx6_pcie, IMX6_PCIE_FLAG_HAS_PHY_RESET)) {
- imx6_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev, "pciephy");
- if (IS_ERR(imx6_pcie->pciephy_reset))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->pciephy_reset),
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_PHY_RESET)) {
+ imx_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev, "pciephy");
+ if (IS_ERR(imx_pcie->pciephy_reset))
+ return dev_err_probe(dev, PTR_ERR(imx_pcie->pciephy_reset),
"Failed to get PCIEPHY reset control\n");
}
- switch (imx6_pcie->drvdata->variant) {
+ switch (imx_pcie->drvdata->variant) {
case IMX8MQ:
case IMX8MQ_EP:
- case IMX7D:
- if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR)
- imx6_pcie->controller_id = 1;
+ domain = of_get_pci_domain_nr(node);
+ if (domain < 0 || domain > 1)
+ return dev_err_probe(dev, -ENODEV, "no \"linux,pci-domain\" property in devicetree\n");
+
+ imx_pcie->controller_id = domain;
break;
default:
break;
}
- /* Grab turnoff reset */
- imx6_pcie->turnoff_reset = devm_reset_control_get_optional_exclusive(dev, "turnoff");
- if (IS_ERR(imx6_pcie->turnoff_reset)) {
- dev_err(dev, "Failed to get TURNOFF reset control\n");
- return PTR_ERR(imx6_pcie->turnoff_reset);
- }
-
- if (imx6_pcie->drvdata->gpr) {
+ if (imx_pcie->drvdata->gpr) {
/* Grab GPR config register range */
- imx6_pcie->iomuxc_gpr =
- syscon_regmap_lookup_by_compatible(imx6_pcie->drvdata->gpr);
- if (IS_ERR(imx6_pcie->iomuxc_gpr))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->iomuxc_gpr),
+ imx_pcie->iomuxc_gpr =
+ syscon_regmap_lookup_by_compatible(imx_pcie->drvdata->gpr);
+ if (IS_ERR(imx_pcie->iomuxc_gpr))
+ return dev_err_probe(dev, PTR_ERR(imx_pcie->iomuxc_gpr),
"unable to find iomuxc registers\n");
}
- if (imx6_check_flag(imx6_pcie, IMX6_PCIE_FLAG_HAS_SERDES)) {
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_SERDES)) {
void __iomem *off = devm_platform_ioremap_resource_byname(pdev, "app");
if (IS_ERR(off))
@@ -1364,62 +1563,64 @@ static int imx6_pcie_probe(struct platform_device *pdev)
.reg_stride = 4,
};
- imx6_pcie->iomuxc_gpr = devm_regmap_init_mmio(dev, off, &regmap_config);
- if (IS_ERR(imx6_pcie->iomuxc_gpr))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->iomuxc_gpr),
+ imx_pcie->iomuxc_gpr = devm_regmap_init_mmio(dev, off, &regmap_config);
+ if (IS_ERR(imx_pcie->iomuxc_gpr))
+ return dev_err_probe(dev, PTR_ERR(imx_pcie->iomuxc_gpr),
"unable to find iomuxc registers\n");
}
/* Grab PCIe PHY Tx Settings */
if (of_property_read_u32(node, "fsl,tx-deemph-gen1",
- &imx6_pcie->tx_deemph_gen1))
- imx6_pcie->tx_deemph_gen1 = 0;
+ &imx_pcie->tx_deemph_gen1))
+ imx_pcie->tx_deemph_gen1 = 0;
if (of_property_read_u32(node, "fsl,tx-deemph-gen2-3p5db",
- &imx6_pcie->tx_deemph_gen2_3p5db))
- imx6_pcie->tx_deemph_gen2_3p5db = 0;
+ &imx_pcie->tx_deemph_gen2_3p5db))
+ imx_pcie->tx_deemph_gen2_3p5db = 0;
if (of_property_read_u32(node, "fsl,tx-deemph-gen2-6db",
- &imx6_pcie->tx_deemph_gen2_6db))
- imx6_pcie->tx_deemph_gen2_6db = 20;
+ &imx_pcie->tx_deemph_gen2_6db))
+ imx_pcie->tx_deemph_gen2_6db = 20;
if (of_property_read_u32(node, "fsl,tx-swing-full",
- &imx6_pcie->tx_swing_full))
- imx6_pcie->tx_swing_full = 127;
+ &imx_pcie->tx_swing_full))
+ imx_pcie->tx_swing_full = 127;
if (of_property_read_u32(node, "fsl,tx-swing-low",
- &imx6_pcie->tx_swing_low))
- imx6_pcie->tx_swing_low = 127;
+ &imx_pcie->tx_swing_low))
+ imx_pcie->tx_swing_low = 127;
/* Limit link speed */
- pci->link_gen = 1;
- of_property_read_u32(node, "fsl,max-link-speed", &pci->link_gen);
-
- imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie");
- if (IS_ERR(imx6_pcie->vpcie)) {
- if (PTR_ERR(imx6_pcie->vpcie) != -ENODEV)
- return PTR_ERR(imx6_pcie->vpcie);
- imx6_pcie->vpcie = NULL;
+ pci->max_link_speed = 1;
+ of_property_read_u32(node, "fsl,max-link-speed", &pci->max_link_speed);
+
+ imx_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie");
+ if (IS_ERR(imx_pcie->vpcie)) {
+ if (PTR_ERR(imx_pcie->vpcie) != -ENODEV)
+ return PTR_ERR(imx_pcie->vpcie);
+ imx_pcie->vpcie = NULL;
}
- imx6_pcie->vph = devm_regulator_get_optional(&pdev->dev, "vph");
- if (IS_ERR(imx6_pcie->vph)) {
- if (PTR_ERR(imx6_pcie->vph) != -ENODEV)
- return PTR_ERR(imx6_pcie->vph);
- imx6_pcie->vph = NULL;
+ imx_pcie->vph = devm_regulator_get_optional(&pdev->dev, "vph");
+ if (IS_ERR(imx_pcie->vph)) {
+ if (PTR_ERR(imx_pcie->vph) != -ENODEV)
+ return PTR_ERR(imx_pcie->vph);
+ imx_pcie->vph = NULL;
}
- platform_set_drvdata(pdev, imx6_pcie);
+ platform_set_drvdata(pdev, imx_pcie);
- ret = imx6_pcie_attach_pd(dev);
+ ret = imx_pcie_attach_pd(dev);
if (ret)
return ret;
- if (imx6_pcie->drvdata->mode == DW_PCIE_EP_TYPE) {
- ret = imx6_add_pcie_ep(imx6_pcie, pdev);
+ pci->use_parent_dt_ranges = true;
+ if (imx_pcie->drvdata->mode == DW_PCIE_EP_TYPE) {
+ ret = imx_add_pcie_ep(imx_pcie, pdev);
if (ret < 0)
return ret;
} else {
+ pci->pp.use_atu_msg = true;
ret = dw_pcie_host_init(&pci->pp);
if (ret < 0)
return ret;
@@ -1436,115 +1637,117 @@ static int imx6_pcie_probe(struct platform_device *pdev)
return 0;
}
-static void imx6_pcie_shutdown(struct platform_device *pdev)
+static void imx_pcie_shutdown(struct platform_device *pdev)
{
- struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev);
+ struct imx_pcie *imx_pcie = platform_get_drvdata(pdev);
/* bring down link, so bootloader gets clean state in case of reboot */
- imx6_pcie_assert_core_reset(imx6_pcie);
+ imx_pcie_assert_core_reset(imx_pcie);
}
-static const char * const imx6q_clks[] = {"pcie_bus", "pcie", "pcie_phy"};
-static const char * const imx8mm_clks[] = {"pcie_bus", "pcie", "pcie_aux"};
-static const char * const imx8mq_clks[] = {"pcie_bus", "pcie", "pcie_phy", "pcie_aux"};
-static const char * const imx6sx_clks[] = {"pcie_bus", "pcie", "pcie_phy", "pcie_inbound_axi"};
-
-static const struct imx6_pcie_drvdata drvdata[] = {
+static const struct imx_pcie_drvdata drvdata[] = {
[IMX6Q] = {
.variant = IMX6Q,
- .flags = IMX6_PCIE_FLAG_IMX6_PHY |
- IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE,
+ .flags = IMX_PCIE_FLAG_IMX_PHY |
+ IMX_PCIE_FLAG_IMX_SPEED_CHANGE |
+ IMX_PCIE_FLAG_BROKEN_SUSPEND |
+ IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
.dbi_length = 0x200,
.gpr = "fsl,imx6q-iomuxc-gpr",
- .clk_names = imx6q_clks,
- .clks_cnt = ARRAY_SIZE(imx6q_clks),
.ltssm_off = IOMUXC_GPR12,
.ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2,
.mode_off[0] = IOMUXC_GPR12,
.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
- .init_phy = imx6_pcie_init_phy,
+ .init_phy = imx_pcie_init_phy,
+ .enable_ref_clk = imx6q_pcie_enable_ref_clk,
+ .core_reset = imx6q_pcie_core_reset,
},
[IMX6SX] = {
.variant = IMX6SX,
- .flags = IMX6_PCIE_FLAG_IMX6_PHY |
- IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE |
- IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .flags = IMX_PCIE_FLAG_IMX_PHY |
+ IMX_PCIE_FLAG_IMX_SPEED_CHANGE |
+ IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
.gpr = "fsl,imx6q-iomuxc-gpr",
- .clk_names = imx6sx_clks,
- .clks_cnt = ARRAY_SIZE(imx6sx_clks),
.ltssm_off = IOMUXC_GPR12,
.ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2,
.mode_off[0] = IOMUXC_GPR12,
.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
.init_phy = imx6sx_pcie_init_phy,
+ .enable_ref_clk = imx6sx_pcie_enable_ref_clk,
+ .core_reset = imx6sx_pcie_core_reset,
+ .ops = &imx_pcie_host_ops,
},
[IMX6QP] = {
.variant = IMX6QP,
- .flags = IMX6_PCIE_FLAG_IMX6_PHY |
- IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE |
- IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .flags = IMX_PCIE_FLAG_IMX_PHY |
+ IMX_PCIE_FLAG_IMX_SPEED_CHANGE |
+ IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
.dbi_length = 0x200,
.gpr = "fsl,imx6q-iomuxc-gpr",
- .clk_names = imx6q_clks,
- .clks_cnt = ARRAY_SIZE(imx6q_clks),
.ltssm_off = IOMUXC_GPR12,
.ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2,
.mode_off[0] = IOMUXC_GPR12,
.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
- .init_phy = imx6_pcie_init_phy,
+ .init_phy = imx_pcie_init_phy,
+ .enable_ref_clk = imx6q_pcie_enable_ref_clk,
+ .core_reset = imx6qp_pcie_core_reset,
+ .ops = &imx_pcie_host_ops,
},
[IMX7D] = {
.variant = IMX7D,
- .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND |
- IMX6_PCIE_FLAG_HAS_APP_RESET |
- IMX6_PCIE_FLAG_HAS_PHY_RESET,
+ .flags = IMX_PCIE_FLAG_SUPPORTS_SUSPEND |
+ IMX_PCIE_FLAG_HAS_APP_RESET |
+ IMX_PCIE_FLAG_HAS_PHY_RESET,
.gpr = "fsl,imx7d-iomuxc-gpr",
- .clk_names = imx6q_clks,
- .clks_cnt = ARRAY_SIZE(imx6q_clks),
.mode_off[0] = IOMUXC_GPR12,
.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
- .init_phy = imx7d_pcie_init_phy,
+ .enable_ref_clk = imx7d_pcie_enable_ref_clk,
+ .core_reset = imx7d_pcie_core_reset,
},
[IMX8MQ] = {
.variant = IMX8MQ,
- .flags = IMX6_PCIE_FLAG_HAS_APP_RESET |
- IMX6_PCIE_FLAG_HAS_PHY_RESET,
+ .flags = IMX_PCIE_FLAG_HAS_APP_RESET |
+ IMX_PCIE_FLAG_HAS_PHY_RESET |
+ IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
.gpr = "fsl,imx8mq-iomuxc-gpr",
- .clk_names = imx8mq_clks,
- .clks_cnt = ARRAY_SIZE(imx8mq_clks),
.mode_off[0] = IOMUXC_GPR12,
.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
.mode_off[1] = IOMUXC_GPR12,
.mode_mask[1] = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
.init_phy = imx8mq_pcie_init_phy,
+ .enable_ref_clk = imx8mm_pcie_enable_ref_clk,
},
[IMX8MM] = {
.variant = IMX8MM,
- .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND |
- IMX6_PCIE_FLAG_HAS_PHYDRV |
- IMX6_PCIE_FLAG_HAS_APP_RESET,
+ .flags = IMX_PCIE_FLAG_SUPPORTS_SUSPEND |
+ IMX_PCIE_FLAG_HAS_PHYDRV |
+ IMX_PCIE_FLAG_HAS_APP_RESET,
.gpr = "fsl,imx8mm-iomuxc-gpr",
- .clk_names = imx8mm_clks,
- .clks_cnt = ARRAY_SIZE(imx8mm_clks),
.mode_off[0] = IOMUXC_GPR12,
.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .enable_ref_clk = imx8mm_pcie_enable_ref_clk,
},
[IMX8MP] = {
.variant = IMX8MP,
- .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND |
- IMX6_PCIE_FLAG_HAS_PHYDRV |
- IMX6_PCIE_FLAG_HAS_APP_RESET,
+ .flags = IMX_PCIE_FLAG_SUPPORTS_SUSPEND |
+ IMX_PCIE_FLAG_HAS_PHYDRV |
+ IMX_PCIE_FLAG_HAS_APP_RESET,
.gpr = "fsl,imx8mp-iomuxc-gpr",
- .clk_names = imx8mm_clks,
- .clks_cnt = ARRAY_SIZE(imx8mm_clks),
.mode_off[0] = IOMUXC_GPR12,
.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .enable_ref_clk = imx8mm_pcie_enable_ref_clk,
+ },
+ [IMX8Q] = {
+ .variant = IMX8Q,
+ .flags = IMX_PCIE_FLAG_HAS_PHYDRV |
+ IMX_PCIE_FLAG_CPU_ADDR_FIXUP |
+ IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
},
[IMX95] = {
.variant = IMX95,
- .flags = IMX6_PCIE_FLAG_HAS_SERDES,
- .clk_names = imx8mq_clks,
- .clks_cnt = ARRAY_SIZE(imx8mq_clks),
+ .flags = IMX_PCIE_FLAG_HAS_SERDES |
+ IMX_PCIE_FLAG_HAS_LUT |
+ IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
.ltssm_off = IMX95_PE0_GEN_CTRL_3,
.ltssm_mask = IMX95_PCIE_LTSSM_EN,
.mode_off[0] = IMX95_PE0_GEN_CTRL_1,
@@ -1553,47 +1756,50 @@ static const struct imx6_pcie_drvdata drvdata[] = {
},
[IMX8MQ_EP] = {
.variant = IMX8MQ_EP,
- .flags = IMX6_PCIE_FLAG_HAS_APP_RESET |
- IMX6_PCIE_FLAG_HAS_PHY_RESET,
+ .flags = IMX_PCIE_FLAG_HAS_APP_RESET |
+ IMX_PCIE_FLAG_HAS_PHY_RESET,
.mode = DW_PCIE_EP_TYPE,
.gpr = "fsl,imx8mq-iomuxc-gpr",
- .clk_names = imx8mq_clks,
- .clks_cnt = ARRAY_SIZE(imx8mq_clks),
.mode_off[0] = IOMUXC_GPR12,
.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
.mode_off[1] = IOMUXC_GPR12,
.mode_mask[1] = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
.epc_features = &imx8m_pcie_epc_features,
.init_phy = imx8mq_pcie_init_phy,
+ .enable_ref_clk = imx8mm_pcie_enable_ref_clk,
},
[IMX8MM_EP] = {
.variant = IMX8MM_EP,
- .flags = IMX6_PCIE_FLAG_HAS_PHYDRV,
+ .flags = IMX_PCIE_FLAG_HAS_APP_RESET |
+ IMX_PCIE_FLAG_HAS_PHYDRV,
.mode = DW_PCIE_EP_TYPE,
.gpr = "fsl,imx8mm-iomuxc-gpr",
- .clk_names = imx8mm_clks,
- .clks_cnt = ARRAY_SIZE(imx8mm_clks),
.mode_off[0] = IOMUXC_GPR12,
.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
.epc_features = &imx8m_pcie_epc_features,
+ .enable_ref_clk = imx8mm_pcie_enable_ref_clk,
},
[IMX8MP_EP] = {
.variant = IMX8MP_EP,
- .flags = IMX6_PCIE_FLAG_HAS_PHYDRV,
+ .flags = IMX_PCIE_FLAG_HAS_APP_RESET |
+ IMX_PCIE_FLAG_HAS_PHYDRV,
.mode = DW_PCIE_EP_TYPE,
.gpr = "fsl,imx8mp-iomuxc-gpr",
- .clk_names = imx8mm_clks,
- .clks_cnt = ARRAY_SIZE(imx8mm_clks),
.mode_off[0] = IOMUXC_GPR12,
.mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
.epc_features = &imx8m_pcie_epc_features,
+ .enable_ref_clk = imx8mm_pcie_enable_ref_clk,
+ },
+ [IMX8Q_EP] = {
+ .variant = IMX8Q_EP,
+ .flags = IMX_PCIE_FLAG_HAS_PHYDRV,
+ .mode = DW_PCIE_EP_TYPE,
+ .epc_features = &imx8q_pcie_epc_features,
},
[IMX95_EP] = {
.variant = IMX95_EP,
- .flags = IMX6_PCIE_FLAG_HAS_SERDES |
- IMX6_PCIE_FLAG_SUPPORT_64BIT,
- .clk_names = imx8mq_clks,
- .clks_cnt = ARRAY_SIZE(imx8mq_clks),
+ .flags = IMX_PCIE_FLAG_HAS_SERDES |
+ IMX_PCIE_FLAG_SUPPORT_64BIT,
.ltssm_off = IMX95_PE0_GEN_CTRL_3,
.ltssm_mask = IMX95_PCIE_LTSSM_EN,
.mode_off[0] = IMX95_PE0_GEN_CTRL_1,
@@ -1604,7 +1810,7 @@ static const struct imx6_pcie_drvdata drvdata[] = {
},
};
-static const struct of_device_id imx6_pcie_of_match[] = {
+static const struct of_device_id imx_pcie_of_match[] = {
{ .compatible = "fsl,imx6q-pcie", .data = &drvdata[IMX6Q], },
{ .compatible = "fsl,imx6sx-pcie", .data = &drvdata[IMX6SX], },
{ .compatible = "fsl,imx6qp-pcie", .data = &drvdata[IMX6QP], },
@@ -1612,27 +1818,29 @@ static const struct of_device_id imx6_pcie_of_match[] = {
{ .compatible = "fsl,imx8mq-pcie", .data = &drvdata[IMX8MQ], },
{ .compatible = "fsl,imx8mm-pcie", .data = &drvdata[IMX8MM], },
{ .compatible = "fsl,imx8mp-pcie", .data = &drvdata[IMX8MP], },
+ { .compatible = "fsl,imx8q-pcie", .data = &drvdata[IMX8Q], },
{ .compatible = "fsl,imx95-pcie", .data = &drvdata[IMX95], },
{ .compatible = "fsl,imx8mq-pcie-ep", .data = &drvdata[IMX8MQ_EP], },
{ .compatible = "fsl,imx8mm-pcie-ep", .data = &drvdata[IMX8MM_EP], },
{ .compatible = "fsl,imx8mp-pcie-ep", .data = &drvdata[IMX8MP_EP], },
+ { .compatible = "fsl,imx8q-pcie-ep", .data = &drvdata[IMX8Q_EP], },
{ .compatible = "fsl,imx95-pcie-ep", .data = &drvdata[IMX95_EP], },
{},
};
-static struct platform_driver imx6_pcie_driver = {
+static struct platform_driver imx_pcie_driver = {
.driver = {
.name = "imx6q-pcie",
- .of_match_table = imx6_pcie_of_match,
+ .of_match_table = imx_pcie_of_match,
.suppress_bind_attrs = true,
- .pm = &imx6_pcie_pm_ops,
+ .pm = &imx_pcie_pm_ops,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
- .probe = imx6_pcie_probe,
- .shutdown = imx6_pcie_shutdown,
+ .probe = imx_pcie_probe,
+ .shutdown = imx_pcie_shutdown,
};
-static void imx6_pcie_quirk(struct pci_dev *dev)
+static void imx_pcie_quirk(struct pci_dev *dev)
{
struct pci_bus *bus = dev->bus;
struct dw_pcie_rp *pp = bus->sysdata;
@@ -1642,33 +1850,33 @@ static void imx6_pcie_quirk(struct pci_dev *dev)
return;
/* Make sure we only quirk devices associated with this driver */
- if (bus->dev.parent->parent->driver != &imx6_pcie_driver.driver)
+ if (bus->dev.parent->parent->driver != &imx_pcie_driver.driver)
return;
if (pci_is_root_bus(bus)) {
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+ struct imx_pcie *imx_pcie = to_imx_pcie(pci);
/*
* Limit config length to avoid the kernel reading beyond
* the register set and causing an abort on i.MX 6Quad
*/
- if (imx6_pcie->drvdata->dbi_length) {
- dev->cfg_size = imx6_pcie->drvdata->dbi_length;
+ if (imx_pcie->drvdata->dbi_length) {
+ dev->cfg_size = imx_pcie->drvdata->dbi_length;
dev_info(&dev->dev, "Limiting cfg_size to %d\n",
dev->cfg_size);
}
}
}
DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, 0xabcd,
- PCI_CLASS_BRIDGE_PCI, 8, imx6_pcie_quirk);
+ PCI_CLASS_BRIDGE_PCI, 8, imx_pcie_quirk);
-static int __init imx6_pcie_init(void)
+static int __init imx_pcie_init(void)
{
#ifdef CONFIG_ARM
struct device_node *np;
- np = of_find_matching_node(NULL, imx6_pcie_of_match);
+ np = of_find_matching_node(NULL, imx_pcie_of_match);
if (!np)
return -ENODEV;
of_node_put(np);
@@ -1684,6 +1892,6 @@ static int __init imx6_pcie_init(void)
"external abort on non-linefetch");
#endif
- return platform_driver_register(&imx6_pcie_driver);
+ return platform_driver_register(&imx_pcie_driver);
}
-device_initcall(imx6_pcie_init);
+device_initcall(imx_pcie_init);
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index 844de4418724..1385d9db7b32 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -34,6 +34,11 @@
#define PCIE_DEVICEID_SHIFT 16
/* Application registers */
+#define PID 0x000
+#define RTL GENMASK(15, 11)
+#define RTL_SHIFT 11
+#define AM6_PCI_PG1_RTL_VER 0x15
+
#define CMD_STATUS 0x004
#define LTSSM_EN_VAL BIT(0)
#define OB_XLAT_EN_VAL BIT(1)
@@ -104,6 +109,8 @@
#define to_keystone_pcie(x) dev_get_drvdata((x)->dev)
+#define PCI_DEVICE_ID_TI_AM654X 0xb00c
+
struct ks_pcie_of_data {
enum dw_pcie_device_mode mode;
const struct dw_pcie_host_ops *host_ops;
@@ -182,12 +189,6 @@ static void ks_pcie_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
(int)data->hwirq, msg->address_hi, msg->address_lo);
}
-static int ks_pcie_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static void ks_pcie_msi_mask(struct irq_data *data)
{
struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
@@ -240,13 +241,72 @@ static struct irq_chip ks_pcie_msi_irq_chip = {
.name = "KEYSTONE-PCI-MSI",
.irq_ack = ks_pcie_msi_irq_ack,
.irq_compose_msi_msg = ks_pcie_compose_msi_msg,
- .irq_set_affinity = ks_pcie_msi_set_affinity,
.irq_mask = ks_pcie_msi_mask,
.irq_unmask = ks_pcie_msi_unmask,
};
+/**
+ * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask registers
+ * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
+ * PCIe host controller driver information.
+ *
+ * Since modification of dbi_cs2 involves different clock domain, read the
+ * status back to ensure the transition is complete.
+ */
+static void ks_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie)
+{
+ u32 val;
+
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ val |= DBI_CS2;
+ ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
+
+ do {
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ } while (!(val & DBI_CS2));
+}
+
+/**
+ * ks_pcie_clear_dbi_mode() - Disable DBI mode
+ * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
+ * PCIe host controller driver information.
+ *
+ * Since modification of dbi_cs2 involves different clock domain, read the
+ * status back to ensure the transition is complete.
+ */
+static void ks_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
+{
+ u32 val;
+
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ val &= ~DBI_CS2;
+ ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
+
+ do {
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ } while (val & DBI_CS2);
+}
+
static int ks_pcie_msi_host_init(struct dw_pcie_rp *pp)
{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+
+ /* Configure and set up BAR0 */
+ ks_pcie_set_dbi_mode(ks_pcie);
+
+ /* Enable BAR0 */
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1);
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1);
+
+ ks_pcie_clear_dbi_mode(ks_pcie);
+
+ /*
+ * For BAR0, just setting bus address for inbound writes (MSI) should
+ * be sufficient. Use physical address to avoid any conflicts.
+ */
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start);
+
pp->msi_irq_chip = &ks_pcie_msi_irq_chip;
return dw_pcie_allocate_domains(pp);
}
@@ -340,59 +400,22 @@ static const struct irq_domain_ops ks_pcie_intx_irq_domain_ops = {
.xlate = irq_domain_xlate_onetwocell,
};
-/**
- * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask registers
- * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
- * PCIe host controller driver information.
- *
- * Since modification of dbi_cs2 involves different clock domain, read the
- * status back to ensure the transition is complete.
- */
-static void ks_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie)
-{
- u32 val;
-
- val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
- val |= DBI_CS2;
- ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
-
- do {
- val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
- } while (!(val & DBI_CS2));
-}
-
-/**
- * ks_pcie_clear_dbi_mode() - Disable DBI mode
- * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
- * PCIe host controller driver information.
- *
- * Since modification of dbi_cs2 involves different clock domain, read the
- * status back to ensure the transition is complete.
- */
-static void ks_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
-{
- u32 val;
-
- val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
- val &= ~DBI_CS2;
- ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
-
- do {
- val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
- } while (val & DBI_CS2);
-}
-
-static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
+static int ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
{
u32 val;
u32 num_viewport = ks_pcie->num_viewport;
struct dw_pcie *pci = ks_pcie->pci;
struct dw_pcie_rp *pp = &pci->pp;
- u64 start, end;
+ struct resource_entry *entry;
struct resource *mem;
+ u64 start, end;
int i;
- mem = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM)->res;
+ entry = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM);
+ if (!entry)
+ return -ENODEV;
+
+ mem = entry->res;
start = mem->start;
end = mem->end;
@@ -403,7 +426,7 @@ static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
ks_pcie_clear_dbi_mode(ks_pcie);
if (ks_pcie->is_am6)
- return;
+ return 0;
val = ilog2(OB_WIN_SIZE);
ks_pcie_app_writel(ks_pcie, OB_SIZE, val);
@@ -420,6 +443,8 @@ static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
val |= OB_XLAT_EN_VAL;
ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
+
+ return 0;
}
static void __iomem *ks_pcie_other_map_bus(struct pci_bus *bus,
@@ -430,6 +455,17 @@ static void __iomem *ks_pcie_other_map_bus(struct pci_bus *bus,
struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
u32 reg;
+ /*
+ * Checking whether the link is up here is a last line of defense
+ * against platforms that forward errors on the system bus as
+ * SError upon PCI configuration transactions issued when the link
+ * is down. This check is racy by definition and does not stop
+ * the system from triggering an SError if the link goes down
+ * after this check is performed.
+ */
+ if (!dw_pcie_link_up(pci))
+ return NULL;
+
reg = CFG_BUS(bus->number) | CFG_DEVICE(PCI_SLOT(devfn)) |
CFG_FUNC(PCI_FUNC(devfn));
if (!pci_is_root_bus(bus->parent))
@@ -445,44 +481,10 @@ static struct pci_ops ks_child_pcie_ops = {
.write = pci_generic_config_write,
};
-/**
- * ks_pcie_v3_65_add_bus() - keystone add_bus post initialization
- * @bus: A pointer to the PCI bus structure.
- *
- * This sets BAR0 to enable inbound access for MSI_IRQ register
- */
-static int ks_pcie_v3_65_add_bus(struct pci_bus *bus)
-{
- struct dw_pcie_rp *pp = bus->sysdata;
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
-
- if (!pci_is_root_bus(bus))
- return 0;
-
- /* Configure and set up BAR0 */
- ks_pcie_set_dbi_mode(ks_pcie);
-
- /* Enable BAR0 */
- dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1);
- dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1);
-
- ks_pcie_clear_dbi_mode(ks_pcie);
-
- /*
- * For BAR0, just setting bus address for inbound writes (MSI) should
- * be sufficient. Use physical address to avoid any conflicts.
- */
- dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start);
-
- return 0;
-}
-
static struct pci_ops ks_pcie_ops = {
.map_bus = dw_pcie_own_conf_map_bus,
.read = pci_generic_config_read,
.write = pci_generic_config_write,
- .add_bus = ks_pcie_v3_65_add_bus,
};
/**
@@ -525,7 +527,11 @@ static int ks_pcie_start_link(struct dw_pcie *pci)
static void ks_pcie_quirk(struct pci_dev *dev)
{
struct pci_bus *bus = dev->bus;
+ struct keystone_pcie *ks_pcie;
+ struct device *bridge_dev;
struct pci_dev *bridge;
+ u32 val;
+
static const struct pci_device_id rc_pci_devids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK),
.class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
@@ -537,6 +543,11 @@ static void ks_pcie_quirk(struct pci_dev *dev)
.class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
{ 0, },
};
+ static const struct pci_device_id am6_pci_devids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654X),
+ .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
+ { 0, },
+ };
if (pci_is_root_bus(bus))
bridge = dev;
@@ -558,10 +569,36 @@ static void ks_pcie_quirk(struct pci_dev *dev)
*/
if (pci_match_id(rc_pci_devids, bridge)) {
if (pcie_get_readrq(dev) > 256) {
- dev_info(&dev->dev, "limiting MRRS to 256\n");
+ dev_info(&dev->dev, "limiting MRRS to 256 bytes\n");
pcie_set_readrq(dev, 256);
}
}
+
+ /*
+ * Memory transactions fail with PCI controller in AM654 PG1.0
+ * when MRRS is set to more than 128 bytes. Force the MRRS to
+ * 128 bytes in all downstream devices.
+ */
+ if (pci_match_id(am6_pci_devids, bridge)) {
+ bridge_dev = pci_get_host_bridge_device(dev);
+ if (!bridge_dev || !bridge_dev->parent)
+ return;
+
+ ks_pcie = dev_get_drvdata(bridge_dev->parent);
+ if (!ks_pcie)
+ return;
+
+ val = ks_pcie_app_readl(ks_pcie, PID);
+ val &= RTL;
+ val >>= RTL_SHIFT;
+ if (val != AM6_PCI_PG1_RTL_VER)
+ return;
+
+ if (pcie_get_readrq(dev) > 128) {
+ dev_info(&dev->dev, "limiting MRRS to 128 bytes\n");
+ pcie_set_readrq(dev, 128);
+ }
+ }
}
DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, ks_pcie_quirk);
@@ -724,7 +761,7 @@ static int ks_pcie_config_intx_irq(struct keystone_pcie *ks_pcie)
ks_pcie);
}
- intx_irq_domain = irq_domain_add_linear(intc_np, PCI_NUM_INTX,
+ intx_irq_domain = irq_domain_create_linear(of_fwnode_handle(intc_np), PCI_NUM_INTX,
&ks_pcie_intx_irq_domain_ops, NULL);
if (!intx_irq_domain) {
dev_err(dev, "Failed to add irq domain for INTX irqs\n");
@@ -814,7 +851,10 @@ static int __init ks_pcie_host_init(struct dw_pcie_rp *pp)
return ret;
ks_pcie_stop_link(pci);
- ks_pcie_setup_rc_app_regs(ks_pcie);
+ ret = ks_pcie_setup_rc_app_regs(ks_pcie);
+ if (ret)
+ return ret;
+
writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8),
pci->dbi_base + PCI_IO_BASE);
@@ -926,11 +966,11 @@ static const struct pci_epc_features ks_pcie_am654_epc_features = {
.msix_capable = true,
.bar[BAR_0] = { .type = BAR_RESERVED, },
.bar[BAR_1] = { .type = BAR_RESERVED, },
- .bar[BAR_2] = { .type = BAR_FIXED, .fixed_size = SZ_1M, },
+ .bar[BAR_2] = { .type = BAR_RESIZABLE, },
.bar[BAR_3] = { .type = BAR_FIXED, .fixed_size = SZ_64K, },
.bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = 256, },
- .bar[BAR_5] = { .type = BAR_FIXED, .fixed_size = SZ_1M, },
- .align = SZ_1M,
+ .bar[BAR_5] = { .type = BAR_RESIZABLE, },
+ .align = SZ_64K,
};
static const struct pci_epc_features*
@@ -1064,6 +1104,7 @@ static int ks_pcie_am654_set_mode(struct device *dev,
static const struct ks_pcie_of_data ks_pcie_rc_of_data = {
.host_ops = &ks_pcie_host_ops,
+ .mode = DW_PCIE_RC_TYPE,
.version = DW_PCIE_VER_365A,
};
@@ -1286,6 +1327,15 @@ static int ks_pcie_probe(struct platform_device *pdev)
ret = dw_pcie_ep_init(&pci->ep);
if (ret < 0)
goto err_get_sync;
+
+ ret = dw_pcie_ep_init_registers(&pci->ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ goto err_ep_init;
+ }
+
+ pci_epc_init_notify(pci->ep.epc);
+
break;
default:
dev_err(dev, "INVALID device type %d\n", mode);
@@ -1295,6 +1345,8 @@ static int ks_pcie_probe(struct platform_device *pdev)
return 0;
+err_ep_init:
+ dw_pcie_ep_deinit(&pci->ep);
err_get_sync:
pm_runtime_put(dev);
pm_runtime_disable(dev);
@@ -1323,7 +1375,7 @@ static void ks_pcie_remove(struct platform_device *pdev)
static struct platform_driver ks_pcie_driver = {
.probe = ks_pcie_probe,
- .remove_new = ks_pcie_remove,
+ .remove = ks_pcie_remove,
.driver = {
.name = "keystone-pcie",
.of_match_table = ks_pcie_of_match,
diff --git a/drivers/pci/controller/dwc/pci-layerscape-ep.c b/drivers/pci/controller/dwc/pci-layerscape-ep.c
index 1f6ee1460ec2..a4a800699f89 100644
--- a/drivers/pci/controller/dwc/pci-layerscape-ep.c
+++ b/drivers/pci/controller/dwc/pci-layerscape-ep.c
@@ -104,7 +104,7 @@ static irqreturn_t ls_pcie_ep_event_handler(int irq, void *dev_id)
dev_dbg(pci->dev, "Link up\n");
} else if (val & PEX_PF0_PME_MES_DR_LDD) {
dev_dbg(pci->dev, "Link down\n");
- pci_epc_linkdown(pci->ep.epc);
+ dw_pcie_ep_linkdown(&pci->ep);
} else if (val & PEX_PF0_PME_MES_DR_HRD) {
dev_dbg(pci->dev, "Hot reset\n");
}
@@ -279,6 +279,15 @@ static int __init ls_pcie_ep_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ret = dw_pcie_ep_init_registers(&pci->ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(&pci->ep);
+ return ret;
+ }
+
+ pci_epc_init_notify(pci->ep.epc);
+
return ls_pcie_ep_interrupt_init(pcie, pdev);
}
diff --git a/drivers/pci/controller/dwc/pci-layerscape.c b/drivers/pci/controller/dwc/pci-layerscape.c
index ee6f52568133..a44b5c256d6e 100644
--- a/drivers/pci/controller/dwc/pci-layerscape.c
+++ b/drivers/pci/controller/dwc/pci-layerscape.c
@@ -329,7 +329,6 @@ static int ls_pcie_probe(struct platform_device *pdev)
struct ls_pcie *pcie;
struct resource *dbi_base;
u32 index[2];
- int ret;
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
@@ -355,16 +354,15 @@ static int ls_pcie_probe(struct platform_device *pdev)
pcie->pf_lut_base = pci->dbi_base + pcie->drvdata->pf_lut_off;
if (pcie->drvdata->scfg_support) {
- pcie->scfg = syscon_regmap_lookup_by_phandle(dev->of_node, "fsl,pcie-scfg");
+ pcie->scfg =
+ syscon_regmap_lookup_by_phandle_args(dev->of_node,
+ "fsl,pcie-scfg", 1,
+ index);
if (IS_ERR(pcie->scfg)) {
dev_err(dev, "No syscfg phandle specified\n");
return PTR_ERR(pcie->scfg);
}
- ret = of_property_read_u32_array(dev->of_node, "fsl,pcie-scfg", index, 2);
- if (ret)
- return ret;
-
pcie->index = index[1];
}
diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c
index 6477c83262c2..db9482a113e9 100644
--- a/drivers/pci/controller/dwc/pci-meson.c
+++ b/drivers/pci/controller/dwc/pci-meson.c
@@ -9,7 +9,6 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
-#include <linux/of_gpio.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
diff --git a/drivers/pci/controller/dwc/pcie-al.c b/drivers/pci/controller/dwc/pcie-al.c
index 6dfdda59f328..643115f74092 100644
--- a/drivers/pci/controller/dwc/pcie-al.c
+++ b/drivers/pci/controller/dwc/pcie-al.c
@@ -242,18 +242,24 @@ static struct pci_ops al_child_pci_ops = {
.write = pci_generic_config_write,
};
-static void al_pcie_config_prepare(struct al_pcie *pcie)
+static int al_pcie_config_prepare(struct al_pcie *pcie)
{
struct al_pcie_target_bus_cfg *target_bus_cfg;
struct dw_pcie_rp *pp = &pcie->pci->pp;
unsigned int ecam_bus_mask;
+ struct resource_entry *ft;
u32 cfg_control_offset;
+ struct resource *bus;
u8 subordinate_bus;
u8 secondary_bus;
u32 cfg_control;
u32 reg;
- struct resource *bus = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS)->res;
+ ft = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS);
+ if (!ft)
+ return -ENODEV;
+
+ bus = ft->res;
target_bus_cfg = &pcie->target_bus_cfg;
ecam_bus_mask = (pcie->ecam_size >> PCIE_ECAM_BUS_SHIFT) - 1;
@@ -287,6 +293,8 @@ static void al_pcie_config_prepare(struct al_pcie *pcie)
FIELD_PREP(CFG_CONTROL_SEC_BUS_MASK, secondary_bus);
al_pcie_controller_writel(pcie, cfg_control_offset, reg);
+
+ return 0;
}
static int al_pcie_host_init(struct dw_pcie_rp *pp)
@@ -305,7 +313,9 @@ static int al_pcie_host_init(struct dw_pcie_rp *pp)
if (rc)
return rc;
- al_pcie_config_prepare(pcie);
+ rc = al_pcie_config_prepare(pcie);
+ if (rc)
+ return rc;
return 0;
}
diff --git a/drivers/pci/controller/dwc/pcie-amd-mdb.c b/drivers/pci/controller/dwc/pcie-amd-mdb.c
new file mode 100644
index 000000000000..9f7251a16d32
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-amd-mdb.c
@@ -0,0 +1,476 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for AMD MDB PCIe Bridge
+ *
+ * Copyright (C) 2024-2025, Advanced Micro Devices, Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of_device.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+
+#define AMD_MDB_TLP_IR_STATUS_MISC 0x4C0
+#define AMD_MDB_TLP_IR_MASK_MISC 0x4C4
+#define AMD_MDB_TLP_IR_ENABLE_MISC 0x4C8
+#define AMD_MDB_TLP_IR_DISABLE_MISC 0x4CC
+
+#define AMD_MDB_TLP_PCIE_INTX_MASK GENMASK(23, 16)
+
+#define AMD_MDB_PCIE_INTR_INTX_ASSERT(x) BIT((x) * 2)
+
+/* Interrupt registers definitions. */
+#define AMD_MDB_PCIE_INTR_CMPL_TIMEOUT 15
+#define AMD_MDB_PCIE_INTR_INTX 16
+#define AMD_MDB_PCIE_INTR_PM_PME_RCVD 24
+#define AMD_MDB_PCIE_INTR_PME_TO_ACK_RCVD 25
+#define AMD_MDB_PCIE_INTR_MISC_CORRECTABLE 26
+#define AMD_MDB_PCIE_INTR_NONFATAL 27
+#define AMD_MDB_PCIE_INTR_FATAL 28
+
+#define IMR(x) BIT(AMD_MDB_PCIE_INTR_ ##x)
+#define AMD_MDB_PCIE_IMR_ALL_MASK \
+ ( \
+ IMR(CMPL_TIMEOUT) | \
+ IMR(PM_PME_RCVD) | \
+ IMR(PME_TO_ACK_RCVD) | \
+ IMR(MISC_CORRECTABLE) | \
+ IMR(NONFATAL) | \
+ IMR(FATAL) | \
+ AMD_MDB_TLP_PCIE_INTX_MASK \
+ )
+
+/**
+ * struct amd_mdb_pcie - PCIe port information
+ * @pci: DesignWare PCIe controller structure
+ * @slcr: MDB System Level Control and Status Register (SLCR) base
+ * @intx_domain: INTx IRQ domain pointer
+ * @mdb_domain: MDB IRQ domain pointer
+ * @intx_irq: INTx IRQ interrupt number
+ */
+struct amd_mdb_pcie {
+ struct dw_pcie pci;
+ void __iomem *slcr;
+ struct irq_domain *intx_domain;
+ struct irq_domain *mdb_domain;
+ int intx_irq;
+};
+
+static const struct dw_pcie_host_ops amd_mdb_pcie_host_ops = {
+};
+
+static void amd_mdb_intx_irq_mask(struct irq_data *data)
+{
+ struct amd_mdb_pcie *pcie = irq_data_get_irq_chip_data(data);
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *port = &pci->pp;
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&port->lock, flags);
+ val = FIELD_PREP(AMD_MDB_TLP_PCIE_INTX_MASK,
+ AMD_MDB_PCIE_INTR_INTX_ASSERT(data->hwirq));
+
+ /*
+ * Writing '1' to a bit in AMD_MDB_TLP_IR_DISABLE_MISC disables that
+ * interrupt, writing '0' has no effect.
+ */
+ writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_DISABLE_MISC);
+ raw_spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void amd_mdb_intx_irq_unmask(struct irq_data *data)
+{
+ struct amd_mdb_pcie *pcie = irq_data_get_irq_chip_data(data);
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *port = &pci->pp;
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&port->lock, flags);
+ val = FIELD_PREP(AMD_MDB_TLP_PCIE_INTX_MASK,
+ AMD_MDB_PCIE_INTR_INTX_ASSERT(data->hwirq));
+
+ /*
+ * Writing '1' to a bit in AMD_MDB_TLP_IR_ENABLE_MISC enables that
+ * interrupt, writing '0' has no effect.
+ */
+ writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_ENABLE_MISC);
+ raw_spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static struct irq_chip amd_mdb_intx_irq_chip = {
+ .name = "AMD MDB INTx",
+ .irq_mask = amd_mdb_intx_irq_mask,
+ .irq_unmask = amd_mdb_intx_irq_unmask,
+};
+
+/**
+ * amd_mdb_pcie_intx_map - Set the handler for the INTx and mark IRQ as valid
+ * @domain: IRQ domain
+ * @irq: Virtual IRQ number
+ * @hwirq: Hardware interrupt number
+ *
+ * Return: Always returns '0'.
+ */
+static int amd_mdb_pcie_intx_map(struct irq_domain *domain,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &amd_mdb_intx_irq_chip,
+ handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ irq_set_status_flags(irq, IRQ_LEVEL);
+
+ return 0;
+}
+
+/* INTx IRQ domain operations. */
+static const struct irq_domain_ops amd_intx_domain_ops = {
+ .map = amd_mdb_pcie_intx_map,
+};
+
+static irqreturn_t dw_pcie_rp_intx(int irq, void *args)
+{
+ struct amd_mdb_pcie *pcie = args;
+ unsigned long val;
+ int i, int_status;
+
+ val = readl_relaxed(pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC);
+ int_status = FIELD_GET(AMD_MDB_TLP_PCIE_INTX_MASK, val);
+
+ for (i = 0; i < PCI_NUM_INTX; i++) {
+ if (int_status & AMD_MDB_PCIE_INTR_INTX_ASSERT(i))
+ generic_handle_domain_irq(pcie->intx_domain, i);
+ }
+
+ return IRQ_HANDLED;
+}
+
+#define _IC(x, s)[AMD_MDB_PCIE_INTR_ ## x] = { __stringify(x), s }
+
+static const struct {
+ const char *sym;
+ const char *str;
+} intr_cause[32] = {
+ _IC(CMPL_TIMEOUT, "Completion timeout"),
+ _IC(PM_PME_RCVD, "PM_PME message received"),
+ _IC(PME_TO_ACK_RCVD, "PME_TO_ACK message received"),
+ _IC(MISC_CORRECTABLE, "Correctable error message"),
+ _IC(NONFATAL, "Non fatal error message"),
+ _IC(FATAL, "Fatal error message"),
+};
+
+static void amd_mdb_event_irq_mask(struct irq_data *d)
+{
+ struct amd_mdb_pcie *pcie = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *port = &pci->pp;
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&port->lock, flags);
+ val = BIT(d->hwirq);
+ writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_DISABLE_MISC);
+ raw_spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void amd_mdb_event_irq_unmask(struct irq_data *d)
+{
+ struct amd_mdb_pcie *pcie = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *port = &pci->pp;
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&port->lock, flags);
+ val = BIT(d->hwirq);
+ writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_ENABLE_MISC);
+ raw_spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static struct irq_chip amd_mdb_event_irq_chip = {
+ .name = "AMD MDB RC-Event",
+ .irq_mask = amd_mdb_event_irq_mask,
+ .irq_unmask = amd_mdb_event_irq_unmask,
+};
+
+static int amd_mdb_pcie_event_map(struct irq_domain *domain,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &amd_mdb_event_irq_chip,
+ handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ irq_set_status_flags(irq, IRQ_LEVEL);
+
+ return 0;
+}
+
+static const struct irq_domain_ops event_domain_ops = {
+ .map = amd_mdb_pcie_event_map,
+};
+
+static irqreturn_t amd_mdb_pcie_event(int irq, void *args)
+{
+ struct amd_mdb_pcie *pcie = args;
+ unsigned long val;
+ int i;
+
+ val = readl_relaxed(pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC);
+ val &= ~readl_relaxed(pcie->slcr + AMD_MDB_TLP_IR_MASK_MISC);
+ for_each_set_bit(i, &val, 32)
+ generic_handle_domain_irq(pcie->mdb_domain, i);
+ writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC);
+
+ return IRQ_HANDLED;
+}
+
+static void amd_mdb_pcie_free_irq_domains(struct amd_mdb_pcie *pcie)
+{
+ if (pcie->intx_domain) {
+ irq_domain_remove(pcie->intx_domain);
+ pcie->intx_domain = NULL;
+ }
+
+ if (pcie->mdb_domain) {
+ irq_domain_remove(pcie->mdb_domain);
+ pcie->mdb_domain = NULL;
+ }
+}
+
+static int amd_mdb_pcie_init_port(struct amd_mdb_pcie *pcie)
+{
+ unsigned long val;
+
+ /* Disable all TLP interrupts. */
+ writel_relaxed(AMD_MDB_PCIE_IMR_ALL_MASK,
+ pcie->slcr + AMD_MDB_TLP_IR_DISABLE_MISC);
+
+ /* Clear pending TLP interrupts. */
+ val = readl_relaxed(pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC);
+ val &= AMD_MDB_PCIE_IMR_ALL_MASK;
+ writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC);
+
+ /* Enable all TLP interrupts. */
+ writel_relaxed(AMD_MDB_PCIE_IMR_ALL_MASK,
+ pcie->slcr + AMD_MDB_TLP_IR_ENABLE_MISC);
+
+ return 0;
+}
+
+/**
+ * amd_mdb_pcie_init_irq_domains - Initialize IRQ domain
+ * @pcie: PCIe port information
+ * @pdev: Platform device
+ *
+ * Return: Returns '0' on success and error value on failure.
+ */
+static int amd_mdb_pcie_init_irq_domains(struct amd_mdb_pcie *pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct device_node *pcie_intc_node;
+ int err;
+
+ pcie_intc_node = of_get_next_child(node, NULL);
+ if (!pcie_intc_node) {
+ dev_err(dev, "No PCIe Intc node found\n");
+ return -ENODEV;
+ }
+
+ pcie->mdb_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), 32,
+ &event_domain_ops, pcie);
+ if (!pcie->mdb_domain) {
+ err = -ENOMEM;
+ dev_err(dev, "Failed to add MDB domain\n");
+ goto out;
+ }
+
+ irq_domain_update_bus_token(pcie->mdb_domain, DOMAIN_BUS_NEXUS);
+
+ pcie->intx_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node),
+ PCI_NUM_INTX, &amd_intx_domain_ops, pcie);
+ if (!pcie->intx_domain) {
+ err = -ENOMEM;
+ dev_err(dev, "Failed to add INTx domain\n");
+ goto mdb_out;
+ }
+
+ of_node_put(pcie_intc_node);
+ irq_domain_update_bus_token(pcie->intx_domain, DOMAIN_BUS_WIRED);
+
+ raw_spin_lock_init(&pp->lock);
+
+ return 0;
+mdb_out:
+ amd_mdb_pcie_free_irq_domains(pcie);
+out:
+ of_node_put(pcie_intc_node);
+ return err;
+}
+
+static irqreturn_t amd_mdb_pcie_intr_handler(int irq, void *args)
+{
+ struct amd_mdb_pcie *pcie = args;
+ struct device *dev;
+ struct irq_data *d;
+
+ dev = pcie->pci.dev;
+
+ /*
+ * In the future, error reporting will be hooked to the AER subsystem.
+ * Currently, the driver prints a warning message to the user.
+ */
+ d = irq_domain_get_irq_data(pcie->mdb_domain, irq);
+ if (intr_cause[d->hwirq].str)
+ dev_warn(dev, "%s\n", intr_cause[d->hwirq].str);
+ else
+ dev_warn_once(dev, "Unknown IRQ %ld\n", d->hwirq);
+
+ return IRQ_HANDLED;
+}
+
+static int amd_mdb_setup_irq(struct amd_mdb_pcie *pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct device *dev = &pdev->dev;
+ int i, irq, err;
+
+ amd_mdb_pcie_init_port(pcie);
+
+ pp->irq = platform_get_irq(pdev, 0);
+ if (pp->irq < 0)
+ return pp->irq;
+
+ for (i = 0; i < ARRAY_SIZE(intr_cause); i++) {
+ if (!intr_cause[i].str)
+ continue;
+
+ irq = irq_create_mapping(pcie->mdb_domain, i);
+ if (!irq) {
+ dev_err(dev, "Failed to map MDB domain interrupt\n");
+ return -ENOMEM;
+ }
+
+ err = devm_request_irq(dev, irq, amd_mdb_pcie_intr_handler,
+ IRQF_NO_THREAD, intr_cause[i].sym, pcie);
+ if (err) {
+ dev_err(dev, "Failed to request IRQ %d, err=%d\n",
+ irq, err);
+ return err;
+ }
+ }
+
+ pcie->intx_irq = irq_create_mapping(pcie->mdb_domain,
+ AMD_MDB_PCIE_INTR_INTX);
+ if (!pcie->intx_irq) {
+ dev_err(dev, "Failed to map INTx interrupt\n");
+ return -ENXIO;
+ }
+
+ err = devm_request_irq(dev, pcie->intx_irq, dw_pcie_rp_intx,
+ IRQF_NO_THREAD, NULL, pcie);
+ if (err) {
+ dev_err(dev, "Failed to request INTx IRQ %d, err=%d\n",
+ irq, err);
+ return err;
+ }
+
+ /* Plug the main event handler. */
+ err = devm_request_irq(dev, pp->irq, amd_mdb_pcie_event, IRQF_NO_THREAD,
+ "amd_mdb pcie_irq", pcie);
+ if (err) {
+ dev_err(dev, "Failed to request event IRQ %d, err=%d\n",
+ pp->irq, err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int amd_mdb_add_pcie_port(struct amd_mdb_pcie *pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct device *dev = &pdev->dev;
+ int err;
+
+ pcie->slcr = devm_platform_ioremap_resource_byname(pdev, "slcr");
+ if (IS_ERR(pcie->slcr))
+ return PTR_ERR(pcie->slcr);
+
+ err = amd_mdb_pcie_init_irq_domains(pcie, pdev);
+ if (err)
+ return err;
+
+ err = amd_mdb_setup_irq(pcie, pdev);
+ if (err) {
+ dev_err(dev, "Failed to set up interrupts, err=%d\n", err);
+ goto out;
+ }
+
+ pp->ops = &amd_mdb_pcie_host_ops;
+
+ err = dw_pcie_host_init(pp);
+ if (err) {
+ dev_err(dev, "Failed to initialize host, err=%d\n", err);
+ goto out;
+ }
+
+ return 0;
+
+out:
+ amd_mdb_pcie_free_irq_domains(pcie);
+ return err;
+}
+
+static int amd_mdb_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct amd_mdb_pcie *pcie;
+ struct dw_pcie *pci;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pci = &pcie->pci;
+ pci->dev = dev;
+
+ platform_set_drvdata(pdev, pcie);
+
+ return amd_mdb_add_pcie_port(pcie, pdev);
+}
+
+static const struct of_device_id amd_mdb_pcie_of_match[] = {
+ {
+ .compatible = "amd,versal2-mdb-host",
+ },
+ {},
+};
+
+static struct platform_driver amd_mdb_pcie_driver = {
+ .driver = {
+ .name = "amd-mdb-pcie",
+ .of_match_table = amd_mdb_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = amd_mdb_pcie_probe,
+};
+
+builtin_platform_driver(amd_mdb_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c
index 9ed0a9ba7619..234c8cbcae3a 100644
--- a/drivers/pci/controller/dwc/pcie-artpec6.c
+++ b/drivers/pci/controller/dwc/pcie-artpec6.c
@@ -94,7 +94,7 @@ static void artpec6_pcie_writel(struct artpec6_pcie *artpec6_pcie, u32 offset, u
regmap_write(artpec6_pcie->regmap, offset, val);
}
-static u64 artpec6_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr)
+static u64 artpec6_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 cpu_addr)
{
struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
struct dw_pcie_rp *pp = &pci->pp;
@@ -102,13 +102,13 @@ static u64 artpec6_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr)
switch (artpec6_pcie->mode) {
case DW_PCIE_RC_TYPE:
- return pci_addr - pp->cfg0_base;
+ return cpu_addr - pp->cfg0_base;
case DW_PCIE_EP_TYPE:
- return pci_addr - ep->phys_base;
+ return cpu_addr - ep->phys_base;
default:
dev_err(pci->dev, "UNKNOWN device type\n");
}
- return pci_addr;
+ return cpu_addr;
}
static int artpec6_pcie_establish_link(struct dw_pcie *pci)
@@ -369,9 +369,22 @@ static int artpec6_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
return 0;
}
+static const struct pci_epc_features artpec6_pcie_epc_features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = false,
+};
+
+static const struct pci_epc_features *
+artpec6_pcie_get_features(struct dw_pcie_ep *ep)
+{
+ return &artpec6_pcie_epc_features;
+}
+
static const struct dw_pcie_ep_ops pcie_ep_ops = {
.init = artpec6_pcie_ep_init,
.raise_irq = artpec6_pcie_raise_irq,
+ .get_features = artpec6_pcie_get_features,
};
static int artpec6_pcie_probe(struct platform_device *pdev)
@@ -441,7 +454,20 @@ static int artpec6_pcie_probe(struct platform_device *pdev)
pci->ep.ops = &pcie_ep_ops;
- return dw_pcie_ep_init(&pci->ep);
+ ret = dw_pcie_ep_init(&pci->ep);
+ if (ret)
+ return ret;
+
+ ret = dw_pcie_ep_init_registers(&pci->ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(&pci->ep);
+ return ret;
+ }
+
+ pci_epc_init_notify(pci->ep.epc);
+
+ break;
default:
dev_err(dev, "INVALID device type %d\n", artpec6_pcie->mode);
}
diff --git a/drivers/pci/controller/dwc/pcie-bt1.c b/drivers/pci/controller/dwc/pcie-bt1.c
index 76d0ddea8007..1340edc18d12 100644
--- a/drivers/pci/controller/dwc/pcie-bt1.c
+++ b/drivers/pci/controller/dwc/pcie-bt1.c
@@ -632,7 +632,7 @@ MODULE_DEVICE_TABLE(of, bt1_pcie_of_match);
static struct platform_driver bt1_pcie_driver = {
.probe = bt1_pcie_probe,
- .remove_new = bt1_pcie_remove,
+ .remove = bt1_pcie_remove,
.driver = {
.name = "bt1-pcie",
.of_match_table = bt1_pcie_of_match,
diff --git a/drivers/pci/controller/dwc/pcie-designware-debugfs.c b/drivers/pci/controller/dwc/pcie-designware-debugfs.c
new file mode 100644
index 000000000000..9e6f4d00f262
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-designware-debugfs.c
@@ -0,0 +1,677 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Synopsys DesignWare PCIe controller debugfs driver
+ *
+ * Copyright (C) 2025 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Shradha Todi <shradha.t@samsung.com>
+ */
+
+#include <linux/debugfs.h>
+
+#include "pcie-designware.h"
+
+#define SD_STATUS_L1LANE_REG 0xb0
+#define PIPE_RXVALID BIT(18)
+#define PIPE_DETECT_LANE BIT(17)
+#define LANE_SELECT GENMASK(3, 0)
+
+#define ERR_INJ0_OFF 0x34
+#define EINJ_VAL_DIFF GENMASK(28, 16)
+#define EINJ_VC_NUM GENMASK(14, 12)
+#define EINJ_TYPE_SHIFT 8
+#define EINJ0_TYPE GENMASK(11, 8)
+#define EINJ1_TYPE BIT(8)
+#define EINJ2_TYPE GENMASK(9, 8)
+#define EINJ3_TYPE GENMASK(10, 8)
+#define EINJ4_TYPE GENMASK(10, 8)
+#define EINJ5_TYPE BIT(8)
+#define EINJ_COUNT GENMASK(7, 0)
+
+#define ERR_INJ_ENABLE_REG 0x30
+
+#define RAS_DES_EVENT_COUNTER_DATA_REG 0xc
+
+#define RAS_DES_EVENT_COUNTER_CTRL_REG 0x8
+#define EVENT_COUNTER_GROUP_SELECT GENMASK(27, 24)
+#define EVENT_COUNTER_EVENT_SELECT GENMASK(23, 16)
+#define EVENT_COUNTER_LANE_SELECT GENMASK(11, 8)
+#define EVENT_COUNTER_STATUS BIT(7)
+#define EVENT_COUNTER_ENABLE GENMASK(4, 2)
+#define PER_EVENT_ON 0x3
+#define PER_EVENT_OFF 0x1
+
+#define DWC_DEBUGFS_BUF_MAX 128
+
+/**
+ * struct dwc_pcie_rasdes_info - Stores controller common information
+ * @ras_cap_offset: RAS DES vendor specific extended capability offset
+ * @reg_event_lock: Mutex used for RAS DES shadow event registers
+ *
+ * Any parameter constant to all files of the debugfs hierarchy for a single
+ * controller will be stored in this struct. It is allocated and assigned to
+ * controller specific struct dw_pcie during initialization.
+ */
+struct dwc_pcie_rasdes_info {
+ u32 ras_cap_offset;
+ struct mutex reg_event_lock;
+};
+
+/**
+ * struct dwc_pcie_rasdes_priv - Stores file specific private data information
+ * @pci: Reference to the dw_pcie structure
+ * @idx: Index of specific file related information in array of structs
+ *
+ * All debugfs files will have this struct as its private data.
+ */
+struct dwc_pcie_rasdes_priv {
+ struct dw_pcie *pci;
+ int idx;
+};
+
+/**
+ * struct dwc_pcie_err_inj - Store details about each error injection
+ * supported by DWC RAS DES
+ * @name: Name of the error that can be injected
+ * @err_inj_group: Group number to which the error belongs. The value
+ * can range from 0 to 5
+ * @err_inj_type: Each group can have multiple types of error
+ */
+struct dwc_pcie_err_inj {
+ const char *name;
+ u32 err_inj_group;
+ u32 err_inj_type;
+};
+
+static const struct dwc_pcie_err_inj err_inj_list[] = {
+ {"tx_lcrc", 0x0, 0x0},
+ {"b16_crc_dllp", 0x0, 0x1},
+ {"b16_crc_upd_fc", 0x0, 0x2},
+ {"tx_ecrc", 0x0, 0x3},
+ {"fcrc_tlp", 0x0, 0x4},
+ {"parity_tsos", 0x0, 0x5},
+ {"parity_skpos", 0x0, 0x6},
+ {"rx_lcrc", 0x0, 0x8},
+ {"rx_ecrc", 0x0, 0xb},
+ {"tlp_err_seq", 0x1, 0x0},
+ {"ack_nak_dllp_seq", 0x1, 0x1},
+ {"ack_nak_dllp", 0x2, 0x0},
+ {"upd_fc_dllp", 0x2, 0x1},
+ {"nak_dllp", 0x2, 0x2},
+ {"inv_sync_hdr_sym", 0x3, 0x0},
+ {"com_pad_ts1", 0x3, 0x1},
+ {"com_pad_ts2", 0x3, 0x2},
+ {"com_fts", 0x3, 0x3},
+ {"com_idl", 0x3, 0x4},
+ {"end_edb", 0x3, 0x5},
+ {"stp_sdp", 0x3, 0x6},
+ {"com_skp", 0x3, 0x7},
+ {"posted_tlp_hdr", 0x4, 0x0},
+ {"non_post_tlp_hdr", 0x4, 0x1},
+ {"cmpl_tlp_hdr", 0x4, 0x2},
+ {"posted_tlp_data", 0x4, 0x4},
+ {"non_post_tlp_data", 0x4, 0x5},
+ {"cmpl_tlp_data", 0x4, 0x6},
+ {"duplicate_tlp", 0x5, 0x0},
+ {"nullified_tlp", 0x5, 0x1},
+};
+
+static const u32 err_inj_type_mask[] = {
+ EINJ0_TYPE,
+ EINJ1_TYPE,
+ EINJ2_TYPE,
+ EINJ3_TYPE,
+ EINJ4_TYPE,
+ EINJ5_TYPE,
+};
+
+/**
+ * struct dwc_pcie_event_counter - Store details about each event counter
+ * supported in DWC RAS DES
+ * @name: Name of the error counter
+ * @group_no: Group number that the event belongs to. The value can range
+ * from 0 to 4
+ * @event_no: Event number of the particular event. The value ranges are:
+ * Group 0: 0 - 10
+ * Group 1: 5 - 13
+ * Group 2: 0 - 7
+ * Group 3: 0 - 5
+ * Group 4: 0 - 1
+ */
+struct dwc_pcie_event_counter {
+ const char *name;
+ u32 group_no;
+ u32 event_no;
+};
+
+static const struct dwc_pcie_event_counter event_list[] = {
+ {"ebuf_overflow", 0x0, 0x0},
+ {"ebuf_underrun", 0x0, 0x1},
+ {"decode_err", 0x0, 0x2},
+ {"running_disparity_err", 0x0, 0x3},
+ {"skp_os_parity_err", 0x0, 0x4},
+ {"sync_header_err", 0x0, 0x5},
+ {"rx_valid_deassertion", 0x0, 0x6},
+ {"ctl_skp_os_parity_err", 0x0, 0x7},
+ {"retimer_parity_err_1st", 0x0, 0x8},
+ {"retimer_parity_err_2nd", 0x0, 0x9},
+ {"margin_crc_parity_err", 0x0, 0xA},
+ {"detect_ei_infer", 0x1, 0x5},
+ {"receiver_err", 0x1, 0x6},
+ {"rx_recovery_req", 0x1, 0x7},
+ {"n_fts_timeout", 0x1, 0x8},
+ {"framing_err", 0x1, 0x9},
+ {"deskew_err", 0x1, 0xa},
+ {"framing_err_in_l0", 0x1, 0xc},
+ {"deskew_uncompleted_err", 0x1, 0xd},
+ {"bad_tlp", 0x2, 0x0},
+ {"lcrc_err", 0x2, 0x1},
+ {"bad_dllp", 0x2, 0x2},
+ {"replay_num_rollover", 0x2, 0x3},
+ {"replay_timeout", 0x2, 0x4},
+ {"rx_nak_dllp", 0x2, 0x5},
+ {"tx_nak_dllp", 0x2, 0x6},
+ {"retry_tlp", 0x2, 0x7},
+ {"fc_timeout", 0x3, 0x0},
+ {"poisoned_tlp", 0x3, 0x1},
+ {"ecrc_error", 0x3, 0x2},
+ {"unsupported_request", 0x3, 0x3},
+ {"completer_abort", 0x3, 0x4},
+ {"completion_timeout", 0x3, 0x5},
+ {"ebuf_skp_add", 0x4, 0x0},
+ {"ebuf_skp_del", 0x4, 0x1},
+};
+
+static ssize_t lane_detect_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dw_pcie *pci = file->private_data;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ char debugfs_buf[DWC_DEBUGFS_BUF_MAX];
+ ssize_t pos;
+ u32 val;
+
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + SD_STATUS_L1LANE_REG);
+ val = FIELD_GET(PIPE_DETECT_LANE, val);
+ if (val)
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Lane Detected\n");
+ else
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Lane Undetected\n");
+
+ return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos);
+}
+
+static ssize_t lane_detect_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dw_pcie *pci = file->private_data;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ u32 lane, val;
+
+ val = kstrtou32_from_user(buf, count, 0, &lane);
+ if (val)
+ return val;
+
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + SD_STATUS_L1LANE_REG);
+ val &= ~(LANE_SELECT);
+ val |= FIELD_PREP(LANE_SELECT, lane);
+ dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + SD_STATUS_L1LANE_REG, val);
+
+ return count;
+}
+
+static ssize_t rx_valid_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dw_pcie *pci = file->private_data;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ char debugfs_buf[DWC_DEBUGFS_BUF_MAX];
+ ssize_t pos;
+ u32 val;
+
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + SD_STATUS_L1LANE_REG);
+ val = FIELD_GET(PIPE_RXVALID, val);
+ if (val)
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "RX Valid\n");
+ else
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "RX Invalid\n");
+
+ return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos);
+}
+
+static ssize_t rx_valid_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return lane_detect_write(file, buf, count, ppos);
+}
+
+static ssize_t err_inj_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dwc_pcie_rasdes_priv *pdata = file->private_data;
+ struct dw_pcie *pci = pdata->pci;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ u32 val, counter, vc_num, err_group, type_mask;
+ int val_diff = 0;
+ char *kern_buf;
+
+ err_group = err_inj_list[pdata->idx].err_inj_group;
+ type_mask = err_inj_type_mask[err_group];
+
+ kern_buf = memdup_user_nul(buf, count);
+ if (IS_ERR(kern_buf))
+ return PTR_ERR(kern_buf);
+
+ if (err_group == 4) {
+ val = sscanf(kern_buf, "%u %d %u", &counter, &val_diff, &vc_num);
+ if ((val != 3) || (val_diff < -4095 || val_diff > 4095)) {
+ kfree(kern_buf);
+ return -EINVAL;
+ }
+ } else if (err_group == 1) {
+ val = sscanf(kern_buf, "%u %d", &counter, &val_diff);
+ if ((val != 2) || (val_diff < -4095 || val_diff > 4095)) {
+ kfree(kern_buf);
+ return -EINVAL;
+ }
+ } else {
+ val = kstrtou32(kern_buf, 0, &counter);
+ if (val) {
+ kfree(kern_buf);
+ return val;
+ }
+ }
+
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + ERR_INJ0_OFF + (0x4 * err_group));
+ val &= ~(type_mask | EINJ_COUNT);
+ val |= ((err_inj_list[pdata->idx].err_inj_type << EINJ_TYPE_SHIFT) & type_mask);
+ val |= FIELD_PREP(EINJ_COUNT, counter);
+
+ if (err_group == 1 || err_group == 4) {
+ val &= ~(EINJ_VAL_DIFF);
+ val |= FIELD_PREP(EINJ_VAL_DIFF, val_diff);
+ }
+ if (err_group == 4) {
+ val &= ~(EINJ_VC_NUM);
+ val |= FIELD_PREP(EINJ_VC_NUM, vc_num);
+ }
+
+ dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + ERR_INJ0_OFF + (0x4 * err_group), val);
+ dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + ERR_INJ_ENABLE_REG, (0x1 << err_group));
+
+ kfree(kern_buf);
+ return count;
+}
+
+static void set_event_number(struct dwc_pcie_rasdes_priv *pdata,
+ struct dw_pcie *pci, struct dwc_pcie_rasdes_info *rinfo)
+{
+ u32 val;
+
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG);
+ val &= ~EVENT_COUNTER_ENABLE;
+ val &= ~(EVENT_COUNTER_GROUP_SELECT | EVENT_COUNTER_EVENT_SELECT);
+ val |= FIELD_PREP(EVENT_COUNTER_GROUP_SELECT, event_list[pdata->idx].group_no);
+ val |= FIELD_PREP(EVENT_COUNTER_EVENT_SELECT, event_list[pdata->idx].event_no);
+ dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG, val);
+}
+
+static ssize_t counter_enable_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dwc_pcie_rasdes_priv *pdata = file->private_data;
+ struct dw_pcie *pci = pdata->pci;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ char debugfs_buf[DWC_DEBUGFS_BUF_MAX];
+ ssize_t pos;
+ u32 val;
+
+ mutex_lock(&rinfo->reg_event_lock);
+ set_event_number(pdata, pci, rinfo);
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG);
+ mutex_unlock(&rinfo->reg_event_lock);
+ val = FIELD_GET(EVENT_COUNTER_STATUS, val);
+ if (val)
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Counter Enabled\n");
+ else
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Counter Disabled\n");
+
+ return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos);
+}
+
+static ssize_t counter_enable_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dwc_pcie_rasdes_priv *pdata = file->private_data;
+ struct dw_pcie *pci = pdata->pci;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ u32 val, enable;
+
+ val = kstrtou32_from_user(buf, count, 0, &enable);
+ if (val)
+ return val;
+
+ mutex_lock(&rinfo->reg_event_lock);
+ set_event_number(pdata, pci, rinfo);
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG);
+ if (enable)
+ val |= FIELD_PREP(EVENT_COUNTER_ENABLE, PER_EVENT_ON);
+ else
+ val |= FIELD_PREP(EVENT_COUNTER_ENABLE, PER_EVENT_OFF);
+
+ dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG, val);
+
+ /*
+ * While enabling the counter, always read the status back to check if
+ * it is enabled or not. Return error if it is not enabled to let the
+ * users know that the counter is not supported on the platform.
+ */
+ if (enable) {
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset +
+ RAS_DES_EVENT_COUNTER_CTRL_REG);
+ if (!FIELD_GET(EVENT_COUNTER_STATUS, val)) {
+ mutex_unlock(&rinfo->reg_event_lock);
+ return -EOPNOTSUPP;
+ }
+ }
+
+ mutex_unlock(&rinfo->reg_event_lock);
+
+ return count;
+}
+
+static ssize_t counter_lane_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dwc_pcie_rasdes_priv *pdata = file->private_data;
+ struct dw_pcie *pci = pdata->pci;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ char debugfs_buf[DWC_DEBUGFS_BUF_MAX];
+ ssize_t pos;
+ u32 val;
+
+ mutex_lock(&rinfo->reg_event_lock);
+ set_event_number(pdata, pci, rinfo);
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG);
+ mutex_unlock(&rinfo->reg_event_lock);
+ val = FIELD_GET(EVENT_COUNTER_LANE_SELECT, val);
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Lane: %d\n", val);
+
+ return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos);
+}
+
+static ssize_t counter_lane_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dwc_pcie_rasdes_priv *pdata = file->private_data;
+ struct dw_pcie *pci = pdata->pci;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ u32 val, lane;
+
+ val = kstrtou32_from_user(buf, count, 0, &lane);
+ if (val)
+ return val;
+
+ mutex_lock(&rinfo->reg_event_lock);
+ set_event_number(pdata, pci, rinfo);
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG);
+ val &= ~(EVENT_COUNTER_LANE_SELECT);
+ val |= FIELD_PREP(EVENT_COUNTER_LANE_SELECT, lane);
+ dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG, val);
+ mutex_unlock(&rinfo->reg_event_lock);
+
+ return count;
+}
+
+static ssize_t counter_value_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dwc_pcie_rasdes_priv *pdata = file->private_data;
+ struct dw_pcie *pci = pdata->pci;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ char debugfs_buf[DWC_DEBUGFS_BUF_MAX];
+ ssize_t pos;
+ u32 val;
+
+ mutex_lock(&rinfo->reg_event_lock);
+ set_event_number(pdata, pci, rinfo);
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_DATA_REG);
+ mutex_unlock(&rinfo->reg_event_lock);
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Counter value: %d\n", val);
+
+ return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos);
+}
+
+static const char *ltssm_status_string(enum dw_pcie_ltssm ltssm)
+{
+ const char *str;
+
+ switch (ltssm) {
+#define DW_PCIE_LTSSM_NAME(n) case n: str = #n; break
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DETECT_QUIET);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DETECT_ACT);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_POLL_ACTIVE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_POLL_COMPLIANCE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_POLL_CONFIG);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_PRE_DETECT_QUIET);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DETECT_WAIT);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_LINKWD_START);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_LINKWD_ACEPT);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_LANENUM_WAI);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_LANENUM_ACEPT);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_COMPLETE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_IDLE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_LOCK);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_SPEED);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_RCVRCFG);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_IDLE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L0);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L0S);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L123_SEND_EIDLE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L1_IDLE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L2_IDLE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L2_WAKE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DISABLED_ENTRY);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DISABLED_IDLE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DISABLED);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_LPBK_ENTRY);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_LPBK_ACTIVE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_LPBK_EXIT);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_LPBK_EXIT_TIMEOUT);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_HOT_RESET_ENTRY);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_HOT_RESET);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_EQ0);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_EQ1);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_EQ2);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_EQ3);
+ default:
+ str = "DW_PCIE_LTSSM_UNKNOWN";
+ break;
+ }
+
+ return str + strlen("DW_PCIE_LTSSM_");
+}
+
+static int ltssm_status_show(struct seq_file *s, void *v)
+{
+ struct dw_pcie *pci = s->private;
+ enum dw_pcie_ltssm val;
+
+ val = dw_pcie_get_ltssm(pci);
+ seq_printf(s, "%s (0x%02x)\n", ltssm_status_string(val), val);
+
+ return 0;
+}
+
+static int ltssm_status_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ltssm_status_show, inode->i_private);
+}
+
+#define dwc_debugfs_create(name) \
+debugfs_create_file(#name, 0644, rasdes_debug, pci, \
+ &dbg_ ## name ## _fops)
+
+#define DWC_DEBUGFS_FOPS(name) \
+static const struct file_operations dbg_ ## name ## _fops = { \
+ .open = simple_open, \
+ .read = name ## _read, \
+ .write = name ## _write \
+}
+
+DWC_DEBUGFS_FOPS(lane_detect);
+DWC_DEBUGFS_FOPS(rx_valid);
+
+static const struct file_operations dwc_pcie_err_inj_ops = {
+ .open = simple_open,
+ .write = err_inj_write,
+};
+
+static const struct file_operations dwc_pcie_counter_enable_ops = {
+ .open = simple_open,
+ .read = counter_enable_read,
+ .write = counter_enable_write,
+};
+
+static const struct file_operations dwc_pcie_counter_lane_ops = {
+ .open = simple_open,
+ .read = counter_lane_read,
+ .write = counter_lane_write,
+};
+
+static const struct file_operations dwc_pcie_counter_value_ops = {
+ .open = simple_open,
+ .read = counter_value_read,
+};
+
+static const struct file_operations dwc_pcie_ltssm_status_ops = {
+ .open = ltssm_status_open,
+ .read = seq_read,
+};
+
+static void dwc_pcie_rasdes_debugfs_deinit(struct dw_pcie *pci)
+{
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+
+ mutex_destroy(&rinfo->reg_event_lock);
+}
+
+static int dwc_pcie_rasdes_debugfs_init(struct dw_pcie *pci, struct dentry *dir)
+{
+ struct dentry *rasdes_debug, *rasdes_err_inj;
+ struct dentry *rasdes_event_counter, *rasdes_events;
+ struct dwc_pcie_rasdes_info *rasdes_info;
+ struct dwc_pcie_rasdes_priv *priv_tmp;
+ struct device *dev = pci->dev;
+ int ras_cap, i, ret;
+
+ /*
+ * If a given SoC has no RAS DES capability, the following call is
+ * bound to return an error, breaking some existing platforms. So,
+ * return 0 here, as this is not necessarily an error.
+ */
+ ras_cap = dw_pcie_find_rasdes_capability(pci);
+ if (!ras_cap) {
+ dev_dbg(dev, "no RAS DES capability available\n");
+ return 0;
+ }
+
+ rasdes_info = devm_kzalloc(dev, sizeof(*rasdes_info), GFP_KERNEL);
+ if (!rasdes_info)
+ return -ENOMEM;
+
+ /* Create subdirectories for Debug, Error Injection, Statistics. */
+ rasdes_debug = debugfs_create_dir("rasdes_debug", dir);
+ rasdes_err_inj = debugfs_create_dir("rasdes_err_inj", dir);
+ rasdes_event_counter = debugfs_create_dir("rasdes_event_counter", dir);
+
+ mutex_init(&rasdes_info->reg_event_lock);
+ rasdes_info->ras_cap_offset = ras_cap;
+ pci->debugfs->rasdes_info = rasdes_info;
+
+ /* Create debugfs files for Debug subdirectory. */
+ dwc_debugfs_create(lane_detect);
+ dwc_debugfs_create(rx_valid);
+
+ /* Create debugfs files for Error Injection subdirectory. */
+ for (i = 0; i < ARRAY_SIZE(err_inj_list); i++) {
+ priv_tmp = devm_kzalloc(dev, sizeof(*priv_tmp), GFP_KERNEL);
+ if (!priv_tmp) {
+ ret = -ENOMEM;
+ goto err_deinit;
+ }
+
+ priv_tmp->idx = i;
+ priv_tmp->pci = pci;
+ debugfs_create_file(err_inj_list[i].name, 0200, rasdes_err_inj, priv_tmp,
+ &dwc_pcie_err_inj_ops);
+ }
+
+ /* Create debugfs files for Statistical Counter subdirectory. */
+ for (i = 0; i < ARRAY_SIZE(event_list); i++) {
+ priv_tmp = devm_kzalloc(dev, sizeof(*priv_tmp), GFP_KERNEL);
+ if (!priv_tmp) {
+ ret = -ENOMEM;
+ goto err_deinit;
+ }
+
+ priv_tmp->idx = i;
+ priv_tmp->pci = pci;
+ rasdes_events = debugfs_create_dir(event_list[i].name, rasdes_event_counter);
+ if (event_list[i].group_no == 0 || event_list[i].group_no == 4) {
+ debugfs_create_file("lane_select", 0644, rasdes_events,
+ priv_tmp, &dwc_pcie_counter_lane_ops);
+ }
+ debugfs_create_file("counter_value", 0444, rasdes_events, priv_tmp,
+ &dwc_pcie_counter_value_ops);
+ debugfs_create_file("counter_enable", 0644, rasdes_events, priv_tmp,
+ &dwc_pcie_counter_enable_ops);
+ }
+
+ return 0;
+
+err_deinit:
+ dwc_pcie_rasdes_debugfs_deinit(pci);
+ return ret;
+}
+
+static void dwc_pcie_ltssm_debugfs_init(struct dw_pcie *pci, struct dentry *dir)
+{
+ debugfs_create_file("ltssm_status", 0444, dir, pci,
+ &dwc_pcie_ltssm_status_ops);
+}
+
+void dwc_pcie_debugfs_deinit(struct dw_pcie *pci)
+{
+ if (!pci->debugfs)
+ return;
+
+ dwc_pcie_rasdes_debugfs_deinit(pci);
+ debugfs_remove_recursive(pci->debugfs->debug_dir);
+}
+
+void dwc_pcie_debugfs_init(struct dw_pcie *pci)
+{
+ char dirname[DWC_DEBUGFS_BUF_MAX];
+ struct device *dev = pci->dev;
+ struct debugfs_info *debugfs;
+ struct dentry *dir;
+ int err;
+
+ /* Create main directory for each platform driver. */
+ snprintf(dirname, DWC_DEBUGFS_BUF_MAX, "dwc_pcie_%s", dev_name(dev));
+ dir = debugfs_create_dir(dirname, NULL);
+ debugfs = devm_kzalloc(dev, sizeof(*debugfs), GFP_KERNEL);
+ if (!debugfs)
+ return;
+
+ debugfs->debug_dir = dir;
+ pci->debugfs = debugfs;
+ err = dwc_pcie_rasdes_debugfs_init(pci, dir);
+ if (err)
+ dev_err(dev, "failed to initialize RAS DES debugfs, err=%d\n",
+ err);
+
+ dwc_pcie_ltssm_debugfs_init(pci, dir);
+}
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index 746a11dcb67f..1a0bf9341542 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -15,22 +15,14 @@
#include <linux/pci-epc.h>
#include <linux/pci-epf.h>
-void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
-{
- struct pci_epc *epc = ep->epc;
-
- pci_epc_linkup(epc);
-}
-EXPORT_SYMBOL_GPL(dw_pcie_ep_linkup);
-
-void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep)
-{
- struct pci_epc *epc = ep->epc;
-
- pci_epc_init_notify(epc);
-}
-EXPORT_SYMBOL_GPL(dw_pcie_ep_init_notify);
-
+/**
+ * dw_pcie_ep_get_func_from_ep - Get the struct dw_pcie_ep_func corresponding to
+ * the endpoint function
+ * @ep: DWC EP device
+ * @func_no: Function number of the endpoint device
+ *
+ * Return: struct dw_pcie_ep_func if success, NULL otherwise.
+ */
struct dw_pcie_ep_func *
dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no)
{
@@ -61,6 +53,11 @@ static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, u8 func_no,
dw_pcie_dbi_ro_wr_dis(pci);
}
+/**
+ * dw_pcie_ep_reset_bar - Reset endpoint BAR
+ * @pci: DWC PCI device
+ * @bar: BAR number of the endpoint
+ */
void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
{
u8 func_no, funcs;
@@ -105,6 +102,45 @@ static u8 dw_pcie_ep_find_capability(struct dw_pcie_ep *ep, u8 func_no, u8 cap)
return __dw_pcie_ep_find_next_cap(ep, func_no, next_cap_ptr, cap);
}
+/**
+ * dw_pcie_ep_hide_ext_capability - Hide a capability from the linked list
+ * @pci: DWC PCI device
+ * @prev_cap: Capability preceding the capability that should be hidden
+ * @cap: Capability that should be hidden
+ *
+ * Return: 0 if success, errno otherwise.
+ */
+int dw_pcie_ep_hide_ext_capability(struct dw_pcie *pci, u8 prev_cap, u8 cap)
+{
+ u16 prev_cap_offset, cap_offset;
+ u32 prev_cap_header, cap_header;
+
+ prev_cap_offset = dw_pcie_find_ext_capability(pci, prev_cap);
+ if (!prev_cap_offset)
+ return -EINVAL;
+
+ prev_cap_header = dw_pcie_readl_dbi(pci, prev_cap_offset);
+ cap_offset = PCI_EXT_CAP_NEXT(prev_cap_header);
+ cap_header = dw_pcie_readl_dbi(pci, cap_offset);
+
+ /* cap must immediately follow prev_cap. */
+ if (PCI_EXT_CAP_ID(cap_header) != cap)
+ return -EINVAL;
+
+ /* Clear next ptr. */
+ prev_cap_header &= ~GENMASK(31, 20);
+
+ /* Set next ptr to next ptr of cap. */
+ prev_cap_header |= cap_header & GENMASK(31, 20);
+
+ dw_pcie_dbi_ro_wr_en(pci);
+ dw_pcie_writel_dbi(pci, prev_cap_offset, prev_cap_header);
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_hide_ext_capability);
+
static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_header *hdr)
{
@@ -131,7 +167,8 @@ static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
}
static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,
- dma_addr_t cpu_addr, enum pci_barno bar)
+ dma_addr_t parent_bus_addr, enum pci_barno bar,
+ size_t size)
{
int ret;
u32 free_win;
@@ -140,7 +177,7 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,
if (!ep->bar_to_atu[bar])
free_win = find_first_zero_bit(ep->ib_window_map, pci->num_ib_windows);
else
- free_win = ep->bar_to_atu[bar];
+ free_win = ep->bar_to_atu[bar] - 1;
if (free_win >= pci->num_ib_windows) {
dev_err(pci->dev, "No free inbound window\n");
@@ -148,21 +185,24 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,
}
ret = dw_pcie_prog_ep_inbound_atu(pci, func_no, free_win, type,
- cpu_addr, bar);
+ parent_bus_addr, bar, size);
if (ret < 0) {
dev_err(pci->dev, "Failed to program IB window\n");
return ret;
}
- ep->bar_to_atu[bar] = free_win;
+ /*
+ * Always increment free_win before assignment, since value 0 is used to identify
+ * unallocated mapping.
+ */
+ ep->bar_to_atu[bar] = free_win + 1;
set_bit(free_win, ep->ib_window_map);
return 0;
}
-static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, u8 func_no,
- phys_addr_t phys_addr,
- u64 pci_addr, size_t size)
+static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep,
+ struct dw_pcie_ob_atu_cfg *atu)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
u32 free_win;
@@ -174,13 +214,13 @@ static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, u8 func_no,
return -EINVAL;
}
- ret = dw_pcie_prog_ep_outbound_atu(pci, func_no, free_win, PCIE_ATU_TYPE_MEM,
- phys_addr, pci_addr, size);
+ atu->index = free_win;
+ ret = dw_pcie_prog_outbound_atu(pci, atu);
if (ret)
return ret;
set_bit(free_win, ep->ob_window_map);
- ep->outbound_addr[free_win] = phys_addr;
+ ep->outbound_addr[free_win] = atu->parent_bus_addr;
return 0;
}
@@ -191,7 +231,10 @@ static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
enum pci_barno bar = epf_bar->barno;
- u32 atu_index = ep->bar_to_atu[bar];
+ u32 atu_index = ep->bar_to_atu[bar] - 1;
+
+ if (!ep->bar_to_atu[bar])
+ return;
__dw_pcie_ep_reset_bar(pci, func_no, bar, epf_bar->flags);
@@ -201,30 +244,96 @@ static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
ep->bar_to_atu[bar] = 0;
}
-static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
- struct pci_epf_bar *epf_bar)
+static unsigned int dw_pcie_ep_get_rebar_offset(struct dw_pcie *pci,
+ enum pci_barno bar)
+{
+ u32 reg, bar_index;
+ unsigned int offset, nbars;
+ int i;
+
+ offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
+ if (!offset)
+ return offset;
+
+ reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
+ nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >> PCI_REBAR_CTRL_NBAR_SHIFT;
+
+ for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) {
+ reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
+ bar_index = reg & PCI_REBAR_CTRL_BAR_IDX;
+ if (bar_index == bar)
+ return offset;
+ }
+
+ return 0;
+}
+
+static int dw_pcie_ep_set_bar_resizable(struct dw_pcie_ep *ep, u8 func_no,
+ struct pci_epf_bar *epf_bar)
{
- struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
enum pci_barno bar = epf_bar->barno;
size_t size = epf_bar->size;
int flags = epf_bar->flags;
- int ret, type;
- u32 reg;
-
- reg = PCI_BASE_ADDRESS_0 + (4 * bar);
+ u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar);
+ unsigned int rebar_offset;
+ u32 rebar_cap, rebar_ctrl;
+ int ret;
- if (!(flags & PCI_BASE_ADDRESS_SPACE))
- type = PCIE_ATU_TYPE_MEM;
- else
- type = PCIE_ATU_TYPE_IO;
+ rebar_offset = dw_pcie_ep_get_rebar_offset(pci, bar);
+ if (!rebar_offset)
+ return -EINVAL;
- ret = dw_pcie_ep_inbound_atu(ep, func_no, type, epf_bar->phys_addr, bar);
+ ret = pci_epc_bar_size_to_rebar_cap(size, &rebar_cap);
if (ret)
return ret;
- if (ep->epf_bar[bar])
- return 0;
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ /*
+ * A BAR mask should not be written for a resizable BAR. The BAR mask
+ * is automatically derived by the controller every time the "selected
+ * size" bits are updated, see "Figure 3-26 Resizable BAR Example for
+ * 32-bit Memory BAR0" in DWC EP databook 5.96a. We simply need to write
+ * BIT(0) to set the BAR enable bit.
+ */
+ dw_pcie_ep_writel_dbi2(ep, func_no, reg, BIT(0));
+ dw_pcie_ep_writel_dbi(ep, func_no, reg, flags);
+
+ if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
+ dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, 0);
+ dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0);
+ }
+
+ /*
+ * Bits 31:0 in PCI_REBAR_CAP define "supported sizes" bits for sizes
+ * 1 MB to 128 TB. Bits 31:16 in PCI_REBAR_CTRL define "supported sizes"
+ * bits for sizes 256 TB to 8 EB. Disallow sizes 256 TB to 8 EB.
+ */
+ rebar_ctrl = dw_pcie_readl_dbi(pci, rebar_offset + PCI_REBAR_CTRL);
+ rebar_ctrl &= ~GENMASK(31, 16);
+ dw_pcie_writel_dbi(pci, rebar_offset + PCI_REBAR_CTRL, rebar_ctrl);
+
+ /*
+ * The "selected size" (bits 13:8) in PCI_REBAR_CTRL are automatically
+ * updated when writing PCI_REBAR_CAP, see "Figure 3-26 Resizable BAR
+ * Example for 32-bit Memory BAR0" in DWC EP databook 5.96a.
+ */
+ dw_pcie_writel_dbi(pci, rebar_offset + PCI_REBAR_CAP, rebar_cap);
+
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ return 0;
+}
+
+static int dw_pcie_ep_set_bar_programmable(struct dw_pcie_ep *ep, u8 func_no,
+ struct pci_epf_bar *epf_bar)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar = epf_bar->barno;
+ size_t size = epf_bar->size;
+ int flags = epf_bar->flags;
+ u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar);
dw_pcie_dbi_ro_wr_en(pci);
@@ -236,19 +345,113 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0);
}
- ep->epf_bar[bar] = epf_bar;
dw_pcie_dbi_ro_wr_dis(pci);
return 0;
}
+static enum pci_epc_bar_type dw_pcie_ep_get_bar_type(struct dw_pcie_ep *ep,
+ enum pci_barno bar)
+{
+ const struct pci_epc_features *epc_features;
+
+ if (!ep->ops->get_features)
+ return BAR_PROGRAMMABLE;
+
+ epc_features = ep->ops->get_features(ep);
+
+ return epc_features->bar[bar].type;
+}
+
+static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ struct pci_epf_bar *epf_bar)
+{
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar = epf_bar->barno;
+ size_t size = epf_bar->size;
+ enum pci_epc_bar_type bar_type;
+ int flags = epf_bar->flags;
+ int ret, type;
+
+ /*
+ * DWC does not allow BAR pairs to overlap, e.g. you cannot combine BARs
+ * 1 and 2 to form a 64-bit BAR.
+ */
+ if ((flags & PCI_BASE_ADDRESS_MEM_TYPE_64) && (bar & 1))
+ return -EINVAL;
+
+ /*
+ * Certain EPF drivers dynamically change the physical address of a BAR
+ * (i.e. they call set_bar() twice, without ever calling clear_bar(), as
+ * calling clear_bar() would clear the BAR's PCI address assigned by the
+ * host).
+ */
+ if (ep->epf_bar[bar]) {
+ /*
+ * We can only dynamically change a BAR if the new BAR size and
+ * BAR flags do not differ from the existing configuration.
+ */
+ if (ep->epf_bar[bar]->barno != bar ||
+ ep->epf_bar[bar]->size != size ||
+ ep->epf_bar[bar]->flags != flags)
+ return -EINVAL;
+
+ /*
+ * When dynamically changing a BAR, skip writing the BAR reg, as
+ * that would clear the BAR's PCI address assigned by the host.
+ */
+ goto config_atu;
+ }
+
+ bar_type = dw_pcie_ep_get_bar_type(ep, bar);
+ switch (bar_type) {
+ case BAR_FIXED:
+ /*
+ * There is no need to write a BAR mask for a fixed BAR (except
+ * to write 1 to the LSB of the BAR mask register, to enable the
+ * BAR). Write the BAR mask regardless. (The fixed bits in the
+ * BAR mask register will be read-only anyway.)
+ */
+ fallthrough;
+ case BAR_PROGRAMMABLE:
+ ret = dw_pcie_ep_set_bar_programmable(ep, func_no, epf_bar);
+ break;
+ case BAR_RESIZABLE:
+ ret = dw_pcie_ep_set_bar_resizable(ep, func_no, epf_bar);
+ break;
+ default:
+ ret = -EINVAL;
+ dev_err(pci->dev, "Invalid BAR type\n");
+ break;
+ }
+
+ if (ret)
+ return ret;
+
+config_atu:
+ if (!(flags & PCI_BASE_ADDRESS_SPACE))
+ type = PCIE_ATU_TYPE_MEM;
+ else
+ type = PCIE_ATU_TYPE_IO;
+
+ ret = dw_pcie_ep_inbound_atu(ep, func_no, type, epf_bar->phys_addr, bar,
+ size);
+ if (ret)
+ return ret;
+
+ ep->epf_bar[bar] = epf_bar;
+
+ return 0;
+}
+
static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr,
u32 *atu_index)
{
u32 index;
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- for (index = 0; index < pci->num_ob_windows; index++) {
+ for_each_set_bit(index, ep->ob_window_map, pci->num_ob_windows) {
if (ep->outbound_addr[index] != addr)
continue;
*atu_index = index;
@@ -258,6 +461,20 @@ static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr,
return -EINVAL;
}
+static u64 dw_pcie_ep_align_addr(struct pci_epc *epc, u64 pci_addr,
+ size_t *pci_size, size_t *offset)
+{
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ u64 mask = pci->region_align - 1;
+ size_t ofst = pci_addr & mask;
+
+ *pci_size = ALIGN(ofst + *pci_size, epc->mem->window.page_size);
+ *offset = ofst;
+
+ return pci_addr & ~mask;
+}
+
static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t addr)
{
@@ -266,10 +483,12 @@ static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- ret = dw_pcie_find_index(ep, addr, &atu_index);
+ ret = dw_pcie_find_index(ep, addr - pci->parent_bus_offset,
+ &atu_index);
if (ret < 0)
return;
+ ep->outbound_addr[atu_index] = 0;
dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, atu_index);
clear_bit(atu_index, ep->ob_window_map);
}
@@ -280,8 +499,14 @@ static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
int ret;
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
-
- ret = dw_pcie_ep_outbound_atu(ep, func_no, addr, pci_addr, size);
+ struct dw_pcie_ob_atu_cfg atu = { 0 };
+
+ atu.func_no = func_no;
+ atu.type = PCIE_ATU_TYPE_MEM;
+ atu.parent_bus_addr = addr - pci->parent_bus_offset;
+ atu.pci_addr = pci_addr;
+ atu.size = size;
+ ret = dw_pcie_ep_outbound_atu(ep, &atu);
if (ret) {
dev_err(pci->dev, "Failed to enable address\n");
return ret;
@@ -428,6 +653,7 @@ static const struct pci_epc_ops epc_ops = {
.write_header = dw_pcie_ep_write_header,
.set_bar = dw_pcie_ep_set_bar,
.clear_bar = dw_pcie_ep_clear_bar,
+ .align_addr = dw_pcie_ep_align_addr,
.map_addr = dw_pcie_ep_map_addr,
.unmap_addr = dw_pcie_ep_unmap_addr,
.set_msi = dw_pcie_ep_set_msi,
@@ -440,6 +666,13 @@ static const struct pci_epc_ops epc_ops = {
.get_features = dw_pcie_ep_get_features,
};
+/**
+ * dw_pcie_ep_raise_intx_irq - Raise INTx IRQ to the host
+ * @ep: DWC EP device
+ * @func_no: Function number of the endpoint
+ *
+ * Return: 0 if success, errono otherwise.
+ */
int dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep, u8 func_no)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -451,13 +684,22 @@ int dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep, u8 func_no)
}
EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_intx_irq);
+/**
+ * dw_pcie_ep_raise_msi_irq - Raise MSI IRQ to the host
+ * @ep: DWC EP device
+ * @func_no: Function number of the endpoint
+ * @interrupt_num: Interrupt number to be raised
+ *
+ * Return: 0 if success, errono otherwise.
+ */
int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
u8 interrupt_num)
{
u32 msg_addr_lower, msg_addr_upper, reg;
struct dw_pcie_ep_func *ep_func;
struct pci_epc *epc = ep->epc;
- unsigned int aligned_offset;
+ size_t map_size = sizeof(u32);
+ size_t offset;
u16 msg_ctrl, msg_data;
bool has_upper;
u64 msg_addr;
@@ -485,14 +727,13 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
}
msg_addr = ((u64)msg_addr_upper) << 32 | msg_addr_lower;
- aligned_offset = msg_addr & (epc->mem->window.page_size - 1);
- msg_addr = ALIGN_DOWN(msg_addr, epc->mem->window.page_size);
+ msg_addr = dw_pcie_ep_align_addr(epc, msg_addr, &map_size, &offset);
ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr,
- epc->mem->window.page_size);
+ map_size);
if (ret)
return ret;
- writel(msg_data | (interrupt_num - 1), ep->msi_mem + aligned_offset);
+ writel(msg_data | (interrupt_num - 1), ep->msi_mem + offset);
dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys);
@@ -500,6 +741,15 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
}
EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_msi_irq);
+/**
+ * dw_pcie_ep_raise_msix_irq_doorbell - Raise MSI-X to the host using Doorbell
+ * method
+ * @ep: DWC EP device
+ * @func_no: Function number of the endpoint device
+ * @interrupt_num: Interrupt number to be raised
+ *
+ * Return: 0 if success, errno otherwise.
+ */
int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep, u8 func_no,
u16 interrupt_num)
{
@@ -519,6 +769,14 @@ int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep, u8 func_no,
return 0;
}
+/**
+ * dw_pcie_ep_raise_msix_irq - Raise MSI-X to the host
+ * @ep: DWC EP device
+ * @func_no: Function number of the endpoint device
+ * @interrupt_num: Interrupt number to be raised
+ *
+ * Return: 0 if success, errno otherwise.
+ */
int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
u16 interrupt_num)
{
@@ -526,8 +784,9 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
struct pci_epf_msix_tbl *msix_tbl;
struct dw_pcie_ep_func *ep_func;
struct pci_epc *epc = ep->epc;
+ size_t map_size = sizeof(u32);
+ size_t offset;
u32 reg, msg_data, vec_ctrl;
- unsigned int aligned_offset;
u32 tbl_offset;
u64 msg_addr;
int ret;
@@ -552,63 +811,125 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
return -EPERM;
}
- aligned_offset = msg_addr & (epc->mem->window.page_size - 1);
- msg_addr = ALIGN_DOWN(msg_addr, epc->mem->window.page_size);
+ msg_addr = dw_pcie_ep_align_addr(epc, msg_addr, &map_size, &offset);
ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr,
- epc->mem->window.page_size);
+ map_size);
if (ret)
return ret;
- writel(msg_data, ep->msi_mem + aligned_offset);
+ writel(msg_data, ep->msi_mem + offset);
dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys);
return 0;
}
-void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
+/**
+ * dw_pcie_ep_cleanup - Cleanup DWC EP resources after fundamental reset
+ * @ep: DWC EP device
+ *
+ * Cleans up the DWC EP specific resources like eDMA etc... after fundamental
+ * reset like PERST#. Note that this API is only applicable for drivers
+ * supporting PERST# or any other methods of fundamental reset.
+ */
+void dw_pcie_ep_cleanup(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- struct pci_epc *epc = ep->epc;
+ dwc_pcie_debugfs_deinit(pci);
dw_pcie_edma_remove(pci);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_cleanup);
+
+/**
+ * dw_pcie_ep_deinit - Deinitialize the endpoint device
+ * @ep: DWC EP device
+ *
+ * Deinitialize the endpoint device. EPC device is not destroyed since that will
+ * be taken care by Devres.
+ */
+void dw_pcie_ep_deinit(struct dw_pcie_ep *ep)
+{
+ struct pci_epc *epc = ep->epc;
+
+ dw_pcie_ep_cleanup(ep);
pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
epc->mem->window.page_size);
pci_epc_mem_exit(epc);
-
- if (ep->ops->deinit)
- ep->ops->deinit(ep);
}
-EXPORT_SYMBOL_GPL(dw_pcie_ep_exit);
+EXPORT_SYMBOL_GPL(dw_pcie_ep_deinit);
-static unsigned int dw_pcie_ep_find_ext_capability(struct dw_pcie *pci, int cap)
+static void dw_pcie_ep_init_non_sticky_registers(struct dw_pcie *pci)
{
- u32 header;
- int pos = PCI_CFG_SPACE_SIZE;
+ struct dw_pcie_ep *ep = &pci->ep;
+ unsigned int offset;
+ unsigned int nbars;
+ enum pci_barno bar;
+ u32 reg, i, val;
+
+ offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
- while (pos) {
- header = dw_pcie_readl_dbi(pci, pos);
- if (PCI_EXT_CAP_ID(header) == cap)
- return pos;
+ dw_pcie_dbi_ro_wr_en(pci);
- pos = PCI_EXT_CAP_NEXT(header);
- if (!pos)
- break;
+ if (offset) {
+ reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
+ nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >>
+ PCI_REBAR_CTRL_NBAR_SHIFT;
+
+ /*
+ * PCIe r6.0, sec 7.8.6.2 require us to support at least one
+ * size in the range from 1 MB to 512 GB. Advertise support
+ * for 1 MB BAR size only.
+ *
+ * For a BAR that has been configured via dw_pcie_ep_set_bar(),
+ * advertise support for only that size instead.
+ */
+ for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) {
+ /*
+ * While the RESBAR_CAP_REG_* fields are sticky, the
+ * RESBAR_CTRL_REG_BAR_SIZE field is non-sticky (it is
+ * sticky in certain versions of DWC PCIe, but not all).
+ *
+ * RESBAR_CTRL_REG_BAR_SIZE is updated automatically by
+ * the controller when RESBAR_CAP_REG is written, which
+ * is why RESBAR_CAP_REG is written here.
+ */
+ val = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
+ bar = val & PCI_REBAR_CTRL_BAR_IDX;
+ if (ep->epf_bar[bar])
+ pci_epc_bar_size_to_rebar_cap(ep->epf_bar[bar]->size, &val);
+ else
+ val = BIT(4);
+
+ dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, val);
+ }
}
- return 0;
+ dw_pcie_setup(pci);
+ dw_pcie_dbi_ro_wr_dis(pci);
}
-int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
+/**
+ * dw_pcie_ep_init_registers - Initialize DWC EP specific registers
+ * @ep: DWC EP device
+ *
+ * Initialize the registers (CSRs) specific to DWC EP. This API should be called
+ * only when the endpoint receives an active refclk (either from host or
+ * generated locally).
+ */
+int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- unsigned int offset, ptm_cap_base;
- unsigned int nbars;
+ struct dw_pcie_ep_func *ep_func;
+ struct device *dev = pci->dev;
+ struct pci_epc *epc = ep->epc;
+ u32 ptm_cap_base, reg;
u8 hdr_type;
- u32 reg;
- int i;
+ u8 func_no;
+ void *addr;
+ int ret;
hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE) &
PCI_HEADER_TYPE_MASK;
@@ -619,25 +940,61 @@ int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
return -EIO;
}
- offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
- ptm_cap_base = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_PTM);
+ dw_pcie_version_detect(pci);
- dw_pcie_dbi_ro_wr_en(pci);
+ dw_pcie_iatu_detect(pci);
- if (offset) {
- reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
- nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >>
- PCI_REBAR_CTRL_NBAR_SHIFT;
+ ret = dw_pcie_edma_detect(pci);
+ if (ret)
+ return ret;
- /*
- * PCIe r6.0, sec 7.8.6.2 require us to support at least one
- * size in the range from 1 MB to 512 GB. Advertise support
- * for 1 MB BAR size only.
- */
- for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL)
- dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, BIT(4));
+ ret = -ENOMEM;
+ if (!ep->ib_window_map) {
+ ep->ib_window_map = devm_bitmap_zalloc(dev, pci->num_ib_windows,
+ GFP_KERNEL);
+ if (!ep->ib_window_map)
+ goto err_remove_edma;
+ }
+
+ if (!ep->ob_window_map) {
+ ep->ob_window_map = devm_bitmap_zalloc(dev, pci->num_ob_windows,
+ GFP_KERNEL);
+ if (!ep->ob_window_map)
+ goto err_remove_edma;
+ }
+
+ if (!ep->outbound_addr) {
+ addr = devm_kcalloc(dev, pci->num_ob_windows, sizeof(phys_addr_t),
+ GFP_KERNEL);
+ if (!addr)
+ goto err_remove_edma;
+ ep->outbound_addr = addr;
+ }
+
+ for (func_no = 0; func_no < epc->max_functions; func_no++) {
+
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+ if (ep_func)
+ continue;
+
+ ep_func = devm_kzalloc(dev, sizeof(*ep_func), GFP_KERNEL);
+ if (!ep_func)
+ goto err_remove_edma;
+
+ ep_func->func_no = func_no;
+ ep_func->msi_cap = dw_pcie_ep_find_capability(ep, func_no,
+ PCI_CAP_ID_MSI);
+ ep_func->msix_cap = dw_pcie_ep_find_capability(ep, func_no,
+ PCI_CAP_ID_MSIX);
+
+ list_add_tail(&ep_func->list, &ep->func_list);
}
+ if (ep->ops->init)
+ ep->ops->init(ep);
+
+ ptm_cap_base = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_PTM);
+
/*
* PTM responder capability can be disabled only after disabling
* PTM root capability.
@@ -654,28 +1011,65 @@ int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
dw_pcie_dbi_ro_wr_dis(pci);
}
- dw_pcie_setup(pci);
- dw_pcie_dbi_ro_wr_dis(pci);
+ dw_pcie_ep_init_non_sticky_registers(pci);
+
+ dwc_pcie_debugfs_init(pci);
return 0;
+
+err_remove_edma:
+ dw_pcie_edma_remove(pci);
+
+ return ret;
}
-EXPORT_SYMBOL_GPL(dw_pcie_ep_init_complete);
+EXPORT_SYMBOL_GPL(dw_pcie_ep_init_registers);
-int dw_pcie_ep_init(struct dw_pcie_ep *ep)
+/**
+ * dw_pcie_ep_linkup - Notify EPF drivers about Link Up event
+ * @ep: DWC EP device
+ */
+void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
+{
+ struct pci_epc *epc = ep->epc;
+
+ pci_epc_linkup(epc);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_linkup);
+
+/**
+ * dw_pcie_ep_linkdown - Notify EPF drivers about Link Down event
+ * @ep: DWC EP device
+ *
+ * Non-sticky registers are also initialized before sending the notification to
+ * the EPF drivers. This is needed since the registers need to be initialized
+ * before the link comes back again.
+ */
+void dw_pcie_ep_linkdown(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct pci_epc *epc = ep->epc;
+
+ /*
+ * Initialize the non-sticky DWC registers as they would've reset post
+ * Link Down. This is specifically needed for drivers not supporting
+ * PERST# as they have no way to reinitialize the registers before the
+ * link comes back again.
+ */
+ dw_pcie_ep_init_non_sticky_registers(pci);
+
+ pci_epc_linkdown(epc);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_linkdown);
+
+static int dw_pcie_ep_get_resources(struct dw_pcie_ep *ep)
{
- int ret;
- void *addr;
- u8 func_no;
- struct resource *res;
- struct pci_epc *epc;
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct device *dev = pci->dev;
struct platform_device *pdev = to_platform_device(dev);
struct device_node *np = dev->of_node;
- const struct pci_epc_features *epc_features;
- struct dw_pcie_ep_func *ep_func;
-
- INIT_LIST_HEAD(&ep->func_list);
+ struct pci_epc *epc = ep->epc;
+ struct resource *res;
+ int ret;
ret = dw_pcie_get_resources(pci);
if (ret)
@@ -688,28 +1082,37 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
ep->phys_base = res->start;
ep->addr_size = resource_size(res);
- if (ep->ops->pre_init)
- ep->ops->pre_init(ep);
-
- dw_pcie_version_detect(pci);
+ /*
+ * artpec6_pcie_cpu_addr_fixup() uses ep->phys_base, so call
+ * dw_pcie_parent_bus_offset() after setting ep->phys_base.
+ */
+ pci->parent_bus_offset = dw_pcie_parent_bus_offset(pci, "addr_space",
+ ep->phys_base);
- dw_pcie_iatu_detect(pci);
+ ret = of_property_read_u8(np, "max-functions", &epc->max_functions);
+ if (ret < 0)
+ epc->max_functions = 1;
- ep->ib_window_map = devm_bitmap_zalloc(dev, pci->num_ib_windows,
- GFP_KERNEL);
- if (!ep->ib_window_map)
- return -ENOMEM;
+ return 0;
+}
- ep->ob_window_map = devm_bitmap_zalloc(dev, pci->num_ob_windows,
- GFP_KERNEL);
- if (!ep->ob_window_map)
- return -ENOMEM;
+/**
+ * dw_pcie_ep_init - Initialize the endpoint device
+ * @ep: DWC EP device
+ *
+ * Initialize the endpoint device. Allocate resources and create the EPC
+ * device with the endpoint framework.
+ *
+ * Return: 0 if success, errno otherwise.
+ */
+int dw_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ int ret;
+ struct pci_epc *epc;
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct device *dev = pci->dev;
- addr = devm_kcalloc(dev, pci->num_ob_windows, sizeof(phys_addr_t),
- GFP_KERNEL);
- if (!addr)
- return -ENOMEM;
- ep->outbound_addr = addr;
+ INIT_LIST_HEAD(&ep->func_list);
epc = devm_pci_epc_create(dev, &epc_ops);
if (IS_ERR(epc)) {
@@ -720,32 +1123,18 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
ep->epc = epc;
epc_set_drvdata(epc, ep);
- ret = of_property_read_u8(np, "max-functions", &epc->max_functions);
- if (ret < 0)
- epc->max_functions = 1;
-
- for (func_no = 0; func_no < epc->max_functions; func_no++) {
- ep_func = devm_kzalloc(dev, sizeof(*ep_func), GFP_KERNEL);
- if (!ep_func)
- return -ENOMEM;
-
- ep_func->func_no = func_no;
- ep_func->msi_cap = dw_pcie_ep_find_capability(ep, func_no,
- PCI_CAP_ID_MSI);
- ep_func->msix_cap = dw_pcie_ep_find_capability(ep, func_no,
- PCI_CAP_ID_MSIX);
-
- list_add_tail(&ep_func->list, &ep->func_list);
- }
+ ret = dw_pcie_ep_get_resources(ep);
+ if (ret)
+ return ret;
- if (ep->ops->init)
- ep->ops->init(ep);
+ if (ep->ops->pre_init)
+ ep->ops->pre_init(ep);
ret = pci_epc_mem_init(epc, ep->phys_base, ep->addr_size,
ep->page_size);
if (ret < 0) {
dev_err(dev, "Failed to initialize address space\n");
- goto err_ep_deinit;
+ return ret;
}
ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys,
@@ -756,36 +1145,11 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
goto err_exit_epc_mem;
}
- ret = dw_pcie_edma_detect(pci);
- if (ret)
- goto err_free_epc_mem;
-
- if (ep->ops->get_features) {
- epc_features = ep->ops->get_features(ep);
- if (epc_features->core_init_notifier)
- return 0;
- }
-
- ret = dw_pcie_ep_init_complete(ep);
- if (ret)
- goto err_remove_edma;
-
return 0;
-err_remove_edma:
- dw_pcie_edma_remove(pci);
-
-err_free_epc_mem:
- pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
- epc->mem->window.page_size);
-
err_exit_epc_mem:
pci_epc_mem_exit(epc);
-err_ep_deinit:
- if (ep->ops->deinit)
- ep->ops->deinit(ep);
-
return ret;
}
EXPORT_SYMBOL_GPL(dw_pcie_ep_init);
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index d15a5c2d5b48..d1cd48efad43 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -48,8 +48,9 @@ static struct irq_chip dw_pcie_msi_irq_chip = {
};
static struct msi_domain_info dw_pcie_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX |
+ MSI_FLAG_MULTI_PCI_MSI,
.chip = &dw_pcie_msi_irq_chip,
};
@@ -116,12 +117,6 @@ static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg)
(int)d->hwirq, msg->address_hi, msg->address_lo);
}
-static int dw_pci_msi_set_affinity(struct irq_data *d,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static void dw_pci_bottom_mask(struct irq_data *d)
{
struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
@@ -177,7 +172,6 @@ static struct irq_chip dw_pci_msi_bottom_irq_chip = {
.name = "DWPCI-MSI",
.irq_ack = dw_pci_bottom_ack,
.irq_compose_msi_msg = dw_pci_setup_msi_msg,
- .irq_set_affinity = dw_pci_msi_set_affinity,
.irq_mask = dw_pci_bottom_mask,
.irq_unmask = dw_pci_bottom_unmask,
};
@@ -233,7 +227,7 @@ static const struct irq_domain_ops dw_pcie_msi_domain_ops = {
int dw_pcie_allocate_domains(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node);
+ struct fwnode_handle *fwnode = of_fwnode_handle(pci->dev->of_node);
pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors,
&dw_pcie_msi_domain_ops, pp);
@@ -398,50 +392,95 @@ static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
return 0;
}
-int dw_pcie_host_init(struct dw_pcie_rp *pp)
+static void dw_pcie_host_request_msg_tlp_res(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct resource_entry *win;
+ struct resource *res;
+
+ win = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM);
+ if (win) {
+ res = devm_kzalloc(pci->dev, sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return;
+
+ /*
+ * Allocate MSG TLP region of size 'region_align' at the end of
+ * the host bridge window.
+ */
+ res->start = win->res->end - pci->region_align + 1;
+ res->end = win->res->end;
+ res->name = "msg";
+ res->flags = win->res->flags | IORESOURCE_BUSY;
+
+ if (!devm_request_resource(pci->dev, win->res, res))
+ pp->msg_res = res;
+ }
+}
+
+static int dw_pcie_host_get_resources(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct device *dev = pci->dev;
- struct device_node *np = dev->of_node;
struct platform_device *pdev = to_platform_device(dev);
struct resource_entry *win;
- struct pci_host_bridge *bridge;
struct resource *res;
int ret;
- raw_spin_lock_init(&pp->lock);
-
ret = dw_pcie_get_resources(pci);
if (ret)
return ret;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
- if (res) {
- pp->cfg0_size = resource_size(res);
- pp->cfg0_base = res->start;
-
- pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res);
- if (IS_ERR(pp->va_cfg0_base))
- return PTR_ERR(pp->va_cfg0_base);
- } else {
- dev_err(dev, "Missing *config* reg space\n");
+ if (!res) {
+ dev_err(dev, "Missing \"config\" reg space\n");
return -ENODEV;
}
- bridge = devm_pci_alloc_host_bridge(dev, 0);
- if (!bridge)
- return -ENOMEM;
+ pp->cfg0_size = resource_size(res);
+ pp->cfg0_base = res->start;
- pp->bridge = bridge;
+ pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pp->va_cfg0_base))
+ return PTR_ERR(pp->va_cfg0_base);
/* Get the I/O range from DT */
- win = resource_list_first_type(&bridge->windows, IORESOURCE_IO);
+ win = resource_list_first_type(&pp->bridge->windows, IORESOURCE_IO);
if (win) {
pp->io_size = resource_size(win->res);
pp->io_bus_addr = win->res->start - win->offset;
pp->io_base = pci_pio_to_address(win->res->start);
}
+ /*
+ * visconti_pcie_cpu_addr_fixup() uses pp->io_base, so we have to
+ * call dw_pcie_parent_bus_offset() after setting pp->io_base.
+ */
+ pci->parent_bus_offset = dw_pcie_parent_bus_offset(pci, "config",
+ pp->cfg0_base);
+ return 0;
+}
+
+int dw_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct device *dev = pci->dev;
+ struct device_node *np = dev->of_node;
+ struct pci_host_bridge *bridge;
+ int ret;
+
+ raw_spin_lock_init(&pp->lock);
+
+ bridge = devm_pci_alloc_host_bridge(dev, 0);
+ if (!bridge)
+ return -ENOMEM;
+
+ pp->bridge = bridge;
+
+ ret = dw_pcie_host_get_resources(pp);
+ if (ret)
+ return ret;
+
/* Set default bus ops */
bridge->ops = &dw_pcie_ops;
bridge->child_ops = &dw_child_pcie_ops;
@@ -454,8 +493,8 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
if (pci_msi_enabled()) {
pp->has_msi_ctrl = !(pp->ops->msi_init ||
- of_property_read_bool(np, "msi-parent") ||
- of_property_read_bool(np, "msi-map"));
+ of_property_present(np, "msi-parent") ||
+ of_property_present(np, "msi-map"));
/*
* For the has_msi_ctrl case the default assignment is handled
@@ -484,6 +523,18 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
dw_pcie_iatu_detect(pci);
+ /*
+ * Allocate the resource for MSG TLP before programming the iATU
+ * outbound window in dw_pcie_setup_rc(). Since the allocation depends
+ * on the value of 'region_align', this has to be done after
+ * dw_pcie_iatu_detect().
+ *
+ * Glue drivers need to set 'use_atu_msg' before dw_pcie_host_init() to
+ * make use of the generic MSG TLP implementation.
+ */
+ if (pp->use_atu_msg)
+ dw_pcie_host_request_msg_tlp_res(pp);
+
ret = dw_pcie_edma_detect(pci);
if (ret)
goto err_free_msi;
@@ -498,8 +549,14 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
goto err_remove_edma;
}
- /* Ignore errors, the link may come up later */
- dw_pcie_wait_for_link(pci);
+ /*
+ * Note: Skip the link up delay only when a Link Up IRQ is present.
+ * If there is no Link Up IRQ, we should not bypass the delay
+ * because that would require users to manually rescan for devices.
+ */
+ if (!pp->use_linkup_irq)
+ /* Ignore errors, the link may come up later */
+ dw_pcie_wait_for_link(pci);
bridge->sysdata = pp;
@@ -510,6 +567,8 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
if (pp->ops->post_init)
pp->ops->post_init(pp);
+ dwc_pcie_debugfs_init(pci);
+
return 0;
err_stop_link:
@@ -534,6 +593,8 @@ void dw_pcie_host_deinit(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ dwc_pcie_debugfs_deinit(pci);
+
pci_stop_root_bus(pp->bridge->bus);
pci_remove_root_bus(pp->bridge->bus);
@@ -554,6 +615,7 @@ static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
{
struct dw_pcie_rp *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct dw_pcie_ob_atu_cfg atu = { 0 };
int type, ret;
u32 busdev;
@@ -576,8 +638,12 @@ static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
else
type = PCIE_ATU_TYPE_CFG1;
- ret = dw_pcie_prog_outbound_atu(pci, 0, type, pp->cfg0_base, busdev,
- pp->cfg0_size);
+ atu.type = type;
+ atu.parent_bus_addr = pp->cfg0_base - pci->parent_bus_offset;
+ atu.pci_addr = busdev;
+ atu.size = pp->cfg0_size;
+
+ ret = dw_pcie_prog_outbound_atu(pci, &atu);
if (ret)
return NULL;
@@ -589,6 +655,7 @@ static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn,
{
struct dw_pcie_rp *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct dw_pcie_ob_atu_cfg atu = { 0 };
int ret;
ret = pci_generic_config_read(bus, devfn, where, size, val);
@@ -596,9 +663,12 @@ static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn,
return ret;
if (pp->cfg0_io_shared) {
- ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO,
- pp->io_base, pp->io_bus_addr,
- pp->io_size);
+ atu.type = PCIE_ATU_TYPE_IO;
+ atu.parent_bus_addr = pp->io_base - pci->parent_bus_offset;
+ atu.pci_addr = pp->io_bus_addr;
+ atu.size = pp->io_size;
+
+ ret = dw_pcie_prog_outbound_atu(pci, &atu);
if (ret)
return PCIBIOS_SET_FAILED;
}
@@ -611,6 +681,7 @@ static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn,
{
struct dw_pcie_rp *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct dw_pcie_ob_atu_cfg atu = { 0 };
int ret;
ret = pci_generic_config_write(bus, devfn, where, size, val);
@@ -618,9 +689,12 @@ static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn,
return ret;
if (pp->cfg0_io_shared) {
- ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO,
- pp->io_base, pp->io_bus_addr,
- pp->io_size);
+ atu.type = PCIE_ATU_TYPE_IO;
+ atu.parent_bus_addr = pp->io_base - pci->parent_bus_offset;
+ atu.pci_addr = pp->io_bus_addr;
+ atu.size = pp->io_size;
+
+ ret = dw_pcie_prog_outbound_atu(pci, &atu);
if (ret)
return PCIBIOS_SET_FAILED;
}
@@ -655,6 +729,7 @@ static struct pci_ops dw_pcie_ops = {
static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct dw_pcie_ob_atu_cfg atu = { 0 };
struct resource_entry *entry;
int i, ret;
@@ -682,10 +757,19 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
if (pci->num_ob_windows <= ++i)
break;
- ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_MEM,
- entry->res->start,
- entry->res->start - entry->offset,
- resource_size(entry->res));
+ atu.index = i;
+ atu.type = PCIE_ATU_TYPE_MEM;
+ atu.parent_bus_addr = entry->res->start - pci->parent_bus_offset;
+ atu.pci_addr = entry->res->start - entry->offset;
+
+ /* Adjust iATU size if MSG TLP region was allocated before */
+ if (pp->msg_res && pp->msg_res->parent == entry->res)
+ atu.size = resource_size(entry->res) -
+ resource_size(pp->msg_res);
+ else
+ atu.size = resource_size(entry->res);
+
+ ret = dw_pcie_prog_outbound_atu(pci, &atu);
if (ret) {
dev_err(pci->dev, "Failed to set MEM range %pr\n",
entry->res);
@@ -695,10 +779,13 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
if (pp->io_size) {
if (pci->num_ob_windows > ++i) {
- ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_IO,
- pp->io_base,
- pp->io_bus_addr,
- pp->io_size);
+ atu.index = i;
+ atu.type = PCIE_ATU_TYPE_IO;
+ atu.parent_bus_addr = pp->io_base - pci->parent_bus_offset;
+ atu.pci_addr = pp->io_bus_addr;
+ atu.size = pp->io_size;
+
+ ret = dw_pcie_prog_outbound_atu(pci, &atu);
if (ret) {
dev_err(pci->dev, "Failed to set IO range %pr\n",
entry->res);
@@ -713,6 +800,8 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
dev_warn(pci->dev, "Ranges exceed outbound iATU size (%d)\n",
pci->num_ob_windows);
+ pp->msg_atu_index = i;
+
i = 0;
resource_list_for_each_entry(entry, &pp->bridge->dma_ranges) {
if (resource_type(entry->res) != IORESOURCE_MEM)
@@ -818,6 +907,42 @@ int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
}
EXPORT_SYMBOL_GPL(dw_pcie_setup_rc);
+static int dw_pcie_pme_turn_off(struct dw_pcie *pci)
+{
+ struct dw_pcie_ob_atu_cfg atu = { 0 };
+ void __iomem *mem;
+ int ret;
+
+ if (pci->num_ob_windows <= pci->pp.msg_atu_index)
+ return -ENOSPC;
+
+ if (!pci->pp.msg_res)
+ return -ENOSPC;
+
+ atu.code = PCIE_MSG_CODE_PME_TURN_OFF;
+ atu.routing = PCIE_MSG_TYPE_R_BC;
+ atu.type = PCIE_ATU_TYPE_MSG;
+ atu.size = resource_size(pci->pp.msg_res);
+ atu.index = pci->pp.msg_atu_index;
+
+ atu.parent_bus_addr = pci->pp.msg_res->start - pci->parent_bus_offset;
+
+ ret = dw_pcie_prog_outbound_atu(pci, &atu);
+ if (ret)
+ return ret;
+
+ mem = ioremap(pci->pp.msg_res->start, pci->region_align);
+ if (!mem)
+ return -ENOMEM;
+
+ /* A dummy write is converted to a Msg TLP */
+ writel(0, mem);
+
+ iounmap(mem);
+
+ return 0;
+}
+
int dw_pcie_suspend_noirq(struct dw_pcie *pci)
{
u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
@@ -831,22 +956,33 @@ int dw_pcie_suspend_noirq(struct dw_pcie *pci)
if (dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKCTL) & PCI_EXP_LNKCTL_ASPM_L1)
return 0;
- if (dw_pcie_get_ltssm(pci) <= DW_PCIE_LTSSM_DETECT_ACT)
- return 0;
-
- if (!pci->pp.ops->pme_turn_off)
- return 0;
-
- pci->pp.ops->pme_turn_off(&pci->pp);
+ if (pci->pp.ops->pme_turn_off) {
+ pci->pp.ops->pme_turn_off(&pci->pp);
+ } else {
+ ret = dw_pcie_pme_turn_off(pci);
+ if (ret)
+ return ret;
+ }
- ret = read_poll_timeout(dw_pcie_get_ltssm, val, val == DW_PCIE_LTSSM_L2_IDLE,
+ ret = read_poll_timeout(dw_pcie_get_ltssm, val,
+ val == DW_PCIE_LTSSM_L2_IDLE ||
+ val <= DW_PCIE_LTSSM_DETECT_WAIT,
PCIE_PME_TO_L2_TIMEOUT_US/10,
PCIE_PME_TO_L2_TIMEOUT_US, false, pci);
if (ret) {
+ /* Only log message when LTSSM isn't in DETECT or POLL */
dev_err(pci->dev, "Timeout waiting for L2 entry! LTSSM: 0x%x\n", val);
return ret;
}
+ /*
+ * Per PCIe r6.0, sec 5.3.3.2.1, software should wait at least
+ * 100ns after L2/L3 Ready before turning off refclock and
+ * main power. This is harmless when no endpoint is connected.
+ */
+ udelay(1);
+
+ dw_pcie_stop_link(pci);
if (pci->pp.ops->deinit)
pci->pp.ops->deinit(&pci->pp);
diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c
index 778588b4be70..771b9d9be077 100644
--- a/drivers/pci/controller/dwc/pcie-designware-plat.c
+++ b/drivers/pci/controller/dwc/pcie-designware-plat.c
@@ -145,6 +145,17 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)
pci->ep.ops = &pcie_ep_ops;
ret = dw_pcie_ep_init(&pci->ep);
+ if (ret)
+ return ret;
+
+ ret = dw_pcie_ep_init_registers(&pci->ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(&pci->ep);
+ }
+
+ pci_epc_init_notify(pci->ep.epc);
+
break;
default:
dev_err(dev, "INVALID device type %d\n", dw_plat_pcie->mode);
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index 250cf7f40b85..97d76d3dc066 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -16,6 +16,8 @@
#include <linux/gpio/consumer.h>
#include <linux/ioport.h>
#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/pcie-dwc.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
#include <linux/types.h>
@@ -112,6 +114,7 @@ int dw_pcie_get_resources(struct dw_pcie *pci)
pci->dbi_base = devm_pci_remap_cfg_resource(pci->dev, res);
if (IS_ERR(pci->dbi_base))
return PTR_ERR(pci->dbi_base);
+ pci->dbi_phys_addr = res->start;
}
/* DBI2 is mainly useful for the endpoint controller */
@@ -134,6 +137,7 @@ int dw_pcie_get_resources(struct dw_pcie *pci)
pci->atu_base = devm_ioremap_resource(pci->dev, res);
if (IS_ERR(pci->atu_base))
return PTR_ERR(pci->atu_base);
+ pci->atu_phys_addr = res->start;
} else {
pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
}
@@ -166,8 +170,8 @@ int dw_pcie_get_resources(struct dw_pcie *pci)
return ret;
}
- if (pci->link_gen < 1)
- pci->link_gen = of_pci_get_max_link_speed(np);
+ if (pci->max_link_speed < 1)
+ pci->max_link_speed = of_pci_get_max_link_speed(np);
of_property_read_u32(np, "num-lanes", &pci->num_lanes);
@@ -281,6 +285,51 @@ u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap)
}
EXPORT_SYMBOL_GPL(dw_pcie_find_ext_capability);
+static u16 __dw_pcie_find_vsec_capability(struct dw_pcie *pci, u16 vendor_id,
+ u16 vsec_id)
+{
+ u16 vsec = 0;
+ u32 header;
+
+ if (vendor_id != dw_pcie_readw_dbi(pci, PCI_VENDOR_ID))
+ return 0;
+
+ while ((vsec = dw_pcie_find_next_ext_capability(pci, vsec,
+ PCI_EXT_CAP_ID_VNDR))) {
+ header = dw_pcie_readl_dbi(pci, vsec + PCI_VNDR_HEADER);
+ if (PCI_VNDR_HEADER_ID(header) == vsec_id)
+ return vsec;
+ }
+
+ return 0;
+}
+
+static u16 dw_pcie_find_vsec_capability(struct dw_pcie *pci,
+ const struct dwc_pcie_vsec_id *vsec_ids)
+{
+ const struct dwc_pcie_vsec_id *vid;
+ u16 vsec;
+ u32 header;
+
+ for (vid = vsec_ids; vid->vendor_id; vid++) {
+ vsec = __dw_pcie_find_vsec_capability(pci, vid->vendor_id,
+ vid->vsec_id);
+ if (vsec) {
+ header = dw_pcie_readl_dbi(pci, vsec + PCI_VNDR_HEADER);
+ if (PCI_VNDR_HEADER_REV(header) == vid->vsec_rev)
+ return vsec;
+ }
+ }
+
+ return 0;
+}
+
+u16 dw_pcie_find_rasdes_capability(struct dw_pcie *pci)
+{
+ return dw_pcie_find_vsec_capability(pci, dwc_pcie_rasdes_vsec_ids);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_find_rasdes_capability);
+
int dw_pcie_read(void __iomem *addr, int size, u32 *val)
{
if (!IS_ALIGNED((uintptr_t)addr, size)) {
@@ -465,56 +514,58 @@ static inline u32 dw_pcie_enable_ecrc(u32 val)
return val | PCIE_ATU_TD;
}
-static int __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, u8 func_no,
- int index, int type, u64 cpu_addr,
- u64 pci_addr, u64 size)
+int dw_pcie_prog_outbound_atu(struct dw_pcie *pci,
+ const struct dw_pcie_ob_atu_cfg *atu)
{
+ u64 parent_bus_addr = atu->parent_bus_addr;
u32 retries, val;
u64 limit_addr;
- if (pci->ops && pci->ops->cpu_addr_fixup)
- cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr);
+ limit_addr = parent_bus_addr + atu->size - 1;
- limit_addr = cpu_addr + size - 1;
-
- if ((limit_addr & ~pci->region_limit) != (cpu_addr & ~pci->region_limit) ||
- !IS_ALIGNED(cpu_addr, pci->region_align) ||
- !IS_ALIGNED(pci_addr, pci->region_align) || !size) {
+ if ((limit_addr & ~pci->region_limit) != (parent_bus_addr & ~pci->region_limit) ||
+ !IS_ALIGNED(parent_bus_addr, pci->region_align) ||
+ !IS_ALIGNED(atu->pci_addr, pci->region_align) || !atu->size) {
return -EINVAL;
}
- dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LOWER_BASE,
- lower_32_bits(cpu_addr));
- dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_BASE,
- upper_32_bits(cpu_addr));
+ dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_LOWER_BASE,
+ lower_32_bits(parent_bus_addr));
+ dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_UPPER_BASE,
+ upper_32_bits(parent_bus_addr));
- dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LIMIT,
+ dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_LIMIT,
lower_32_bits(limit_addr));
if (dw_pcie_ver_is_ge(pci, 460A))
- dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_LIMIT,
+ dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_UPPER_LIMIT,
upper_32_bits(limit_addr));
- dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LOWER_TARGET,
- lower_32_bits(pci_addr));
- dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_TARGET,
- upper_32_bits(pci_addr));
+ dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_LOWER_TARGET,
+ lower_32_bits(atu->pci_addr));
+ dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_UPPER_TARGET,
+ upper_32_bits(atu->pci_addr));
- val = type | PCIE_ATU_FUNC_NUM(func_no);
- if (upper_32_bits(limit_addr) > upper_32_bits(cpu_addr) &&
+ val = atu->type | atu->routing | PCIE_ATU_FUNC_NUM(atu->func_no);
+ if (upper_32_bits(limit_addr) > upper_32_bits(parent_bus_addr) &&
dw_pcie_ver_is_ge(pci, 460A))
val |= PCIE_ATU_INCREASE_REGION_SIZE;
if (dw_pcie_ver_is(pci, 490A))
val = dw_pcie_enable_ecrc(val);
- dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_REGION_CTRL1, val);
+ dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_REGION_CTRL1, val);
- dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_REGION_CTRL2, PCIE_ATU_ENABLE);
+ val = PCIE_ATU_ENABLE;
+ if (atu->type == PCIE_ATU_TYPE_MSG) {
+ /* The data-less messages only for now */
+ val |= PCIE_ATU_INHIBIT_PAYLOAD | atu->code;
+ }
+ dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_REGION_CTRL2, val);
/*
* Make sure ATU enable takes effect before any subsequent config
* and I/O accesses.
*/
for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
- val = dw_pcie_readl_atu_ob(pci, index, PCIE_ATU_REGION_CTRL2);
+ val = dw_pcie_readl_atu_ob(pci, atu->index, PCIE_ATU_REGION_CTRL2);
if (val & PCIE_ATU_ENABLE)
return 0;
@@ -526,21 +577,6 @@ static int __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, u8 func_no,
return -ETIMEDOUT;
}
-int dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
- u64 cpu_addr, u64 pci_addr, u64 size)
-{
- return __dw_pcie_prog_outbound_atu(pci, 0, index, type,
- cpu_addr, pci_addr, size);
-}
-
-int dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
- int type, u64 cpu_addr, u64 pci_addr,
- u64 size)
-{
- return __dw_pcie_prog_outbound_atu(pci, func_no, index, type,
- cpu_addr, pci_addr, size);
-}
-
static inline u32 dw_pcie_readl_atu_ib(struct dw_pcie *pci, u32 index, u32 reg)
{
return dw_pcie_readl_atu(pci, PCIE_ATU_REGION_DIR_IB, index, reg);
@@ -553,13 +589,13 @@ static inline void dw_pcie_writel_atu_ib(struct dw_pcie *pci, u32 index, u32 reg
}
int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,
- u64 cpu_addr, u64 pci_addr, u64 size)
+ u64 parent_bus_addr, u64 pci_addr, u64 size)
{
u64 limit_addr = pci_addr + size - 1;
u32 retries, val;
if ((limit_addr & ~pci->region_limit) != (pci_addr & ~pci->region_limit) ||
- !IS_ALIGNED(cpu_addr, pci->region_align) ||
+ !IS_ALIGNED(parent_bus_addr, pci->region_align) ||
!IS_ALIGNED(pci_addr, pci->region_align) || !size) {
return -EINVAL;
}
@@ -576,9 +612,9 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,
upper_32_bits(limit_addr));
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_TARGET,
- lower_32_bits(cpu_addr));
+ lower_32_bits(parent_bus_addr));
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_TARGET,
- upper_32_bits(cpu_addr));
+ upper_32_bits(parent_bus_addr));
val = type;
if (upper_32_bits(limit_addr) > upper_32_bits(pci_addr) &&
@@ -605,17 +641,18 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,
}
int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
- int type, u64 cpu_addr, u8 bar)
+ int type, u64 parent_bus_addr, u8 bar, size_t size)
{
u32 retries, val;
- if (!IS_ALIGNED(cpu_addr, pci->region_align))
+ if (!IS_ALIGNED(parent_bus_addr, pci->region_align) ||
+ !IS_ALIGNED(parent_bus_addr, size))
return -EINVAL;
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_TARGET,
- lower_32_bits(cpu_addr));
+ lower_32_bits(parent_bus_addr));
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_TARGET,
- upper_32_bits(cpu_addr));
+ upper_32_bits(parent_bus_addr));
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL1, type |
PCIE_ATU_FUNC_NUM(func_no));
@@ -655,7 +692,7 @@ int dw_pcie_wait_for_link(struct dw_pcie *pci)
if (dw_pcie_link_up(pci))
break;
- usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
+ msleep(LINK_WAIT_SLEEP_MS);
}
if (retries >= LINK_WAIT_MAX_RETRIES) {
@@ -697,16 +734,27 @@ void dw_pcie_upconfig_setup(struct dw_pcie *pci)
}
EXPORT_SYMBOL_GPL(dw_pcie_upconfig_setup);
-static void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen)
+static void dw_pcie_link_set_max_speed(struct dw_pcie *pci)
{
u32 cap, ctrl2, link_speed;
u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
cap = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
+
+ /*
+ * Even if the platform doesn't want to limit the maximum link speed,
+ * just cache the hardware default value so that the vendor drivers can
+ * use it to do any link specific configuration.
+ */
+ if (pci->max_link_speed < 1) {
+ pci->max_link_speed = FIELD_GET(PCI_EXP_LNKCAP_SLS, cap);
+ return;
+ }
+
ctrl2 = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCTL2);
ctrl2 &= ~PCI_EXP_LNKCTL2_TLS;
- switch (pcie_link_speed[link_gen]) {
+ switch (pcie_link_speed[pci->max_link_speed]) {
case PCIE_SPEED_2_5GT:
link_speed = PCI_EXP_LNKCTL2_TLS_2_5GT;
break;
@@ -880,30 +928,40 @@ static struct dw_edma_plat_ops dw_pcie_edma_ops = {
.irq_vector = dw_pcie_edma_irq_vector,
};
-static int dw_pcie_edma_find_chip(struct dw_pcie *pci)
+static void dw_pcie_edma_init_data(struct dw_pcie *pci)
+{
+ pci->edma.dev = pci->dev;
+
+ if (!pci->edma.ops)
+ pci->edma.ops = &dw_pcie_edma_ops;
+
+ pci->edma.flags |= DW_EDMA_CHIP_LOCAL;
+}
+
+static int dw_pcie_edma_find_mf(struct dw_pcie *pci)
{
u32 val;
/*
+ * Bail out finding the mapping format if it is already set by the glue
+ * driver. Also ensure that the edma.reg_base is pointing to a valid
+ * memory region.
+ */
+ if (pci->edma.mf != EDMA_MF_EDMA_LEGACY)
+ return pci->edma.reg_base ? 0 : -ENODEV;
+
+ /*
* Indirect eDMA CSRs access has been completely removed since v5.40a
* thus no space is now reserved for the eDMA channels viewport and
* former DMA CTRL register is no longer fixed to FFs.
- *
- * Note that Renesas R-Car S4-8's PCIe controllers for unknown reason
- * have zeros in the eDMA CTRL register even though the HW-manual
- * explicitly states there must FFs if the unrolled mapping is enabled.
- * For such cases the low-level drivers are supposed to manually
- * activate the unrolled mapping to bypass the auto-detection procedure.
*/
- if (dw_pcie_ver_is_ge(pci, 540A) || dw_pcie_cap_is(pci, EDMA_UNROLL))
+ if (dw_pcie_ver_is_ge(pci, 540A))
val = 0xFFFFFFFF;
else
val = dw_pcie_readl_dbi(pci, PCIE_DMA_VIEWPORT_BASE + PCIE_DMA_CTRL);
if (val == 0xFFFFFFFF && pci->edma.reg_base) {
pci->edma.mf = EDMA_MF_EDMA_UNROLL;
-
- val = dw_pcie_readl_dma(pci, PCIE_DMA_CTRL);
} else if (val != 0xFFFFFFFF) {
pci->edma.mf = EDMA_MF_EDMA_LEGACY;
@@ -912,15 +970,25 @@ static int dw_pcie_edma_find_chip(struct dw_pcie *pci)
return -ENODEV;
}
- pci->edma.dev = pci->dev;
+ return 0;
+}
- if (!pci->edma.ops)
- pci->edma.ops = &dw_pcie_edma_ops;
+static int dw_pcie_edma_find_channels(struct dw_pcie *pci)
+{
+ u32 val;
- pci->edma.flags |= DW_EDMA_CHIP_LOCAL;
+ /*
+ * Autodetect the read/write channels count only for non-HDMA platforms.
+ * HDMA platforms with native CSR mapping doesn't support autodetect,
+ * so the glue drivers should've passed the valid count already. If not,
+ * the below sanity check will catch it.
+ */
+ if (pci->edma.mf != EDMA_MF_HDMA_NATIVE) {
+ val = dw_pcie_readl_dma(pci, PCIE_DMA_CTRL);
- pci->edma.ll_wr_cnt = FIELD_GET(PCIE_DMA_NUM_WR_CHAN, val);
- pci->edma.ll_rd_cnt = FIELD_GET(PCIE_DMA_NUM_RD_CHAN, val);
+ pci->edma.ll_wr_cnt = FIELD_GET(PCIE_DMA_NUM_WR_CHAN, val);
+ pci->edma.ll_rd_cnt = FIELD_GET(PCIE_DMA_NUM_RD_CHAN, val);
+ }
/* Sanity check the channels count if the mapping was incorrect */
if (!pci->edma.ll_wr_cnt || pci->edma.ll_wr_cnt > EDMA_MAX_WR_CH ||
@@ -930,11 +998,24 @@ static int dw_pcie_edma_find_chip(struct dw_pcie *pci)
return 0;
}
+static int dw_pcie_edma_find_chip(struct dw_pcie *pci)
+{
+ int ret;
+
+ dw_pcie_edma_init_data(pci);
+
+ ret = dw_pcie_edma_find_mf(pci);
+ if (ret)
+ return ret;
+
+ return dw_pcie_edma_find_channels(pci);
+}
+
static int dw_pcie_edma_irq_verify(struct dw_pcie *pci)
{
struct platform_device *pdev = to_platform_device(pci->dev);
u16 ch_cnt = pci->edma.ll_wr_cnt + pci->edma.ll_rd_cnt;
- char name[6];
+ char name[15];
int ret;
if (pci->edma.nr_irqs == 1)
@@ -1035,8 +1116,7 @@ void dw_pcie_setup(struct dw_pcie *pci)
{
u32 val;
- if (pci->link_gen > 0)
- dw_pcie_link_set_max_speed(pci, pci->link_gen);
+ dw_pcie_link_set_max_speed(pci);
/* Configure Gen1 N_FTS */
if (pci->n_fts[0]) {
@@ -1069,3 +1149,63 @@ void dw_pcie_setup(struct dw_pcie *pci)
dw_pcie_link_set_max_link_width(pci, pci->num_lanes);
}
+
+resource_size_t dw_pcie_parent_bus_offset(struct dw_pcie *pci,
+ const char *reg_name,
+ resource_size_t cpu_phys_addr)
+{
+ struct device *dev = pci->dev;
+ struct device_node *np = dev->of_node;
+ int index;
+ u64 reg_addr, fixup_addr;
+ u64 (*fixup)(struct dw_pcie *pcie, u64 cpu_addr);
+
+ /* Look up reg_name address on parent bus */
+ index = of_property_match_string(np, "reg-names", reg_name);
+
+ if (index < 0) {
+ dev_err(dev, "No %s in devicetree \"reg\" property\n", reg_name);
+ return 0;
+ }
+
+ of_property_read_reg(np, index, &reg_addr, NULL);
+
+ fixup = pci->ops ? pci->ops->cpu_addr_fixup : NULL;
+ if (fixup) {
+ fixup_addr = fixup(pci, cpu_phys_addr);
+ if (reg_addr == fixup_addr) {
+ dev_info(dev, "%s reg[%d] %#010llx == %#010llx == fixup(cpu %#010llx); %ps is redundant with this devicetree\n",
+ reg_name, index, reg_addr, fixup_addr,
+ (unsigned long long) cpu_phys_addr, fixup);
+ } else {
+ dev_warn(dev, "%s reg[%d] %#010llx != %#010llx == fixup(cpu %#010llx); devicetree is broken\n",
+ reg_name, index, reg_addr, fixup_addr,
+ (unsigned long long) cpu_phys_addr);
+ reg_addr = fixup_addr;
+ }
+
+ return cpu_phys_addr - reg_addr;
+ }
+
+ if (pci->use_parent_dt_ranges) {
+
+ /*
+ * This platform once had a fixup, presumably because it
+ * translates between CPU and PCI controller addresses.
+ * Log a note if devicetree didn't describe a translation.
+ */
+ if (reg_addr == cpu_phys_addr)
+ dev_info(dev, "%s reg[%d] %#010llx == cpu %#010llx\n; no fixup was ever needed for this devicetree\n",
+ reg_name, index, reg_addr,
+ (unsigned long long) cpu_phys_addr);
+ } else {
+ if (reg_addr != cpu_phys_addr) {
+ dev_warn(dev, "%s reg[%d] %#010llx != cpu %#010llx; no fixup and devicetree \"ranges\" is broken, assuming no translation\n",
+ reg_name, index, reg_addr,
+ (unsigned long long) cpu_phys_addr);
+ return 0;
+ }
+ }
+
+ return cpu_phys_addr - reg_addr;
+}
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index 26dae4837462..56aafdbcdaca 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -51,9 +51,8 @@
/* DWC PCIe controller capabilities */
#define DW_PCIE_CAP_REQ_RES 0
-#define DW_PCIE_CAP_EDMA_UNROLL 1
-#define DW_PCIE_CAP_IATU_UNROLL 2
-#define DW_PCIE_CAP_CDM_CHECK 3
+#define DW_PCIE_CAP_IATU_UNROLL 1
+#define DW_PCIE_CAP_CDM_CHECK 2
#define dw_pcie_cap_is(_pci, _cap) \
test_bit(DW_PCIE_CAP_ ## _cap, &(_pci)->caps)
@@ -63,14 +62,16 @@
/* Parameters for the waiting for link up routine */
#define LINK_WAIT_MAX_RETRIES 10
-#define LINK_WAIT_USLEEP_MIN 90000
-#define LINK_WAIT_USLEEP_MAX 100000
+#define LINK_WAIT_SLEEP_MS 90
/* Parameters for the waiting for iATU enabled routine */
#define LINK_WAIT_MAX_IATU_RETRIES 5
#define LINK_WAIT_IATU 9
/* Synopsys-specific PCIe configuration registers */
+#define PCIE_PORT_FORCE 0x708
+#define PORT_FORCE_DO_DESKEW_FOR_SRIS BIT(23)
+
#define PCIE_PORT_AFR 0x70C
#define PORT_AFR_N_FTS_MASK GENMASK(15, 8)
#define PORT_AFR_N_FTS(n) FIELD_PREP(PORT_AFR_N_FTS_MASK, n)
@@ -92,6 +93,9 @@
#define PORT_LINK_MODE_4_LANES PORT_LINK_MODE(0x7)
#define PORT_LINK_MODE_8_LANES PORT_LINK_MODE(0xf)
+#define PCIE_PORT_LANE_SKEW 0x714
+#define PORT_LANE_SKEW_INSERT_MASK GENMASK(23, 0)
+
#define PCIE_PORT_DEBUG0 0x728
#define PORT_LOGIC_LTSSM_STATE_MASK 0x1f
#define PORT_LOGIC_LTSSM_STATE_L0 0x11
@@ -121,6 +125,19 @@
#define GEN3_RELATED_OFF_GEN3_EQ_DISABLE BIT(16)
#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT 24
#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK GENMASK(25, 24)
+#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_16_0GT 0x1
+
+#define GEN3_EQ_CONTROL_OFF 0x8A8
+#define GEN3_EQ_CONTROL_OFF_FB_MODE GENMASK(3, 0)
+#define GEN3_EQ_CONTROL_OFF_PHASE23_EXIT_MODE BIT(4)
+#define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC GENMASK(23, 8)
+#define GEN3_EQ_CONTROL_OFF_FOM_INC_INITIAL_EVAL BIT(24)
+
+#define GEN3_EQ_FB_MODE_DIR_CHANGE_OFF 0x8AC
+#define GEN3_EQ_FMDC_T_MIN_PHASE23 GENMASK(4, 0)
+#define GEN3_EQ_FMDC_N_EVALS GENMASK(9, 5)
+#define GEN3_EQ_FMDC_MAX_PRE_CUSROR_DELTA GENMASK(13, 10)
+#define GEN3_EQ_FMDC_MAX_POST_CUSROR_DELTA GENMASK(17, 14)
#define PCIE_PORT_MULTI_LANE_CTRL 0x8C0
#define PORT_MLTI_UPCFG_SUPPORT BIT(7)
@@ -148,11 +165,13 @@
#define PCIE_ATU_TYPE_IO 0x2
#define PCIE_ATU_TYPE_CFG0 0x4
#define PCIE_ATU_TYPE_CFG1 0x5
+#define PCIE_ATU_TYPE_MSG 0x10
#define PCIE_ATU_TD BIT(8)
#define PCIE_ATU_FUNC_NUM(pf) ((pf) << 20)
#define PCIE_ATU_REGION_CTRL2 0x004
#define PCIE_ATU_ENABLE BIT(31)
#define PCIE_ATU_BAR_MODE_ENABLE BIT(30)
+#define PCIE_ATU_INHIBIT_PAYLOAD BIT(22)
#define PCIE_ATU_FUNC_NUM_MATCH_EN BIT(19)
#define PCIE_ATU_LOWER_BASE 0x008
#define PCIE_ATU_UPPER_BASE 0x00C
@@ -192,6 +211,24 @@
#define PCIE_PL_CHK_REG_ERR_ADDR 0xB28
/*
+ * 16.0 GT/s (Gen 4) lane margining register definitions
+ */
+#define GEN4_LANE_MARGINING_1_OFF 0xB80
+#define MARGINING_MAX_VOLTAGE_OFFSET GENMASK(29, 24)
+#define MARGINING_NUM_VOLTAGE_STEPS GENMASK(22, 16)
+#define MARGINING_MAX_TIMING_OFFSET GENMASK(13, 8)
+#define MARGINING_NUM_TIMING_STEPS GENMASK(5, 0)
+
+#define GEN4_LANE_MARGINING_2_OFF 0xB84
+#define MARGINING_IND_ERROR_SAMPLER BIT(28)
+#define MARGINING_SAMPLE_REPORTING_METHOD BIT(27)
+#define MARGINING_IND_LEFT_RIGHT_TIMING BIT(26)
+#define MARGINING_IND_UP_DOWN_VOLTAGE BIT(25)
+#define MARGINING_VOLTAGE_SUPPORTED BIT(24)
+#define MARGINING_MAXLANES GENMASK(20, 16)
+#define MARGINING_SAMPLE_RATE_TIMING GENMASK(13, 8)
+#define MARGINING_SAMPLE_RATE_VOLTAGE GENMASK(5, 0)
+/*
* iATU Unroll-specific register definitions
* From 4.80 core version the address translation will be made by unroll
*/
@@ -293,12 +330,55 @@ enum dw_pcie_ltssm {
/* Need to align with PCIE_PORT_DEBUG0 bits 0:5 */
DW_PCIE_LTSSM_DETECT_QUIET = 0x0,
DW_PCIE_LTSSM_DETECT_ACT = 0x1,
+ DW_PCIE_LTSSM_POLL_ACTIVE = 0x2,
+ DW_PCIE_LTSSM_POLL_COMPLIANCE = 0x3,
+ DW_PCIE_LTSSM_POLL_CONFIG = 0x4,
+ DW_PCIE_LTSSM_PRE_DETECT_QUIET = 0x5,
+ DW_PCIE_LTSSM_DETECT_WAIT = 0x6,
+ DW_PCIE_LTSSM_CFG_LINKWD_START = 0x7,
+ DW_PCIE_LTSSM_CFG_LINKWD_ACEPT = 0x8,
+ DW_PCIE_LTSSM_CFG_LANENUM_WAI = 0x9,
+ DW_PCIE_LTSSM_CFG_LANENUM_ACEPT = 0xa,
+ DW_PCIE_LTSSM_CFG_COMPLETE = 0xb,
+ DW_PCIE_LTSSM_CFG_IDLE = 0xc,
+ DW_PCIE_LTSSM_RCVRY_LOCK = 0xd,
+ DW_PCIE_LTSSM_RCVRY_SPEED = 0xe,
+ DW_PCIE_LTSSM_RCVRY_RCVRCFG = 0xf,
+ DW_PCIE_LTSSM_RCVRY_IDLE = 0x10,
DW_PCIE_LTSSM_L0 = 0x11,
+ DW_PCIE_LTSSM_L0S = 0x12,
+ DW_PCIE_LTSSM_L123_SEND_EIDLE = 0x13,
+ DW_PCIE_LTSSM_L1_IDLE = 0x14,
DW_PCIE_LTSSM_L2_IDLE = 0x15,
+ DW_PCIE_LTSSM_L2_WAKE = 0x16,
+ DW_PCIE_LTSSM_DISABLED_ENTRY = 0x17,
+ DW_PCIE_LTSSM_DISABLED_IDLE = 0x18,
+ DW_PCIE_LTSSM_DISABLED = 0x19,
+ DW_PCIE_LTSSM_LPBK_ENTRY = 0x1a,
+ DW_PCIE_LTSSM_LPBK_ACTIVE = 0x1b,
+ DW_PCIE_LTSSM_LPBK_EXIT = 0x1c,
+ DW_PCIE_LTSSM_LPBK_EXIT_TIMEOUT = 0x1d,
+ DW_PCIE_LTSSM_HOT_RESET_ENTRY = 0x1e,
+ DW_PCIE_LTSSM_HOT_RESET = 0x1f,
+ DW_PCIE_LTSSM_RCVRY_EQ0 = 0x20,
+ DW_PCIE_LTSSM_RCVRY_EQ1 = 0x21,
+ DW_PCIE_LTSSM_RCVRY_EQ2 = 0x22,
+ DW_PCIE_LTSSM_RCVRY_EQ3 = 0x23,
DW_PCIE_LTSSM_UNKNOWN = 0xFFFFFFFF,
};
+struct dw_pcie_ob_atu_cfg {
+ int index;
+ int type;
+ u8 func_no;
+ u8 code;
+ u8 routing;
+ u64 parent_bus_addr;
+ u64 pci_addr;
+ u64 size;
+};
+
struct dw_pcie_host_ops {
int (*init)(struct dw_pcie_rp *pp);
void (*deinit)(struct dw_pcie_rp *pp);
@@ -328,12 +408,15 @@ struct dw_pcie_rp {
struct pci_host_bridge *bridge;
raw_spinlock_t lock;
DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS);
+ bool use_atu_msg;
+ int msg_atu_index;
+ struct resource *msg_res;
+ bool use_linkup_irq;
};
struct dw_pcie_ep_ops {
void (*pre_init)(struct dw_pcie_ep *ep);
void (*init)(struct dw_pcie_ep *ep);
- void (*deinit)(struct dw_pcie_ep *ep);
int (*raise_irq)(struct dw_pcie_ep *ep, u8 func_no,
unsigned int type, u16 interrupt_num);
const struct pci_epc_features* (*get_features)(struct dw_pcie_ep *ep);
@@ -385,12 +468,20 @@ struct dw_pcie_ops {
void (*stop_link)(struct dw_pcie *pcie);
};
+struct debugfs_info {
+ struct dentry *debug_dir;
+ void *rasdes_info;
+};
+
struct dw_pcie {
struct device *dev;
void __iomem *dbi_base;
+ resource_size_t dbi_phys_addr;
void __iomem *dbi_base2;
void __iomem *atu_base;
+ resource_size_t atu_phys_addr;
size_t atu_size;
+ resource_size_t parent_bus_offset;
u32 num_ib_windows;
u32 num_ob_windows;
u32 region_align;
@@ -402,7 +493,7 @@ struct dw_pcie {
u32 type;
unsigned long caps;
int num_lanes;
- int link_gen;
+ int max_link_speed;
u8 n_fts[2];
struct dw_edma_chip edma;
struct clk_bulk_data app_clks[DW_PCIE_NUM_APP_CLKS];
@@ -411,6 +502,20 @@ struct dw_pcie {
struct reset_control_bulk_data core_rsts[DW_PCIE_NUM_CORE_RSTS];
struct gpio_desc *pe_rst;
bool suspended;
+ struct debugfs_info *debugfs;
+
+ /*
+ * If iATU input addresses are offset from CPU physical addresses,
+ * we previously required .cpu_addr_fixup() to convert them. We
+ * now rely on the devicetree instead. If .cpu_addr_fixup()
+ * exists, we compare its results with devicetree.
+ *
+ * If .cpu_addr_fixup() does not exist, we assume the offset is
+ * zero and warn if devicetree claims otherwise. If we know all
+ * devicetrees correctly describe the offset, set
+ * use_parent_dt_ranges to true to avoid this warning.
+ */
+ bool use_parent_dt_ranges;
};
#define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp)
@@ -424,6 +529,7 @@ void dw_pcie_version_detect(struct dw_pcie *pci);
u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap);
u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap);
+u16 dw_pcie_find_rasdes_capability(struct dw_pcie *pci);
int dw_pcie_read(void __iomem *addr, int size, u32 *val);
int dw_pcie_write(void __iomem *addr, int size, u32 val);
@@ -434,22 +540,21 @@ void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val);
int dw_pcie_link_up(struct dw_pcie *pci);
void dw_pcie_upconfig_setup(struct dw_pcie *pci);
int dw_pcie_wait_for_link(struct dw_pcie *pci);
-int dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
- u64 cpu_addr, u64 pci_addr, u64 size);
-int dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
- int type, u64 cpu_addr, u64 pci_addr, u64 size);
+int dw_pcie_prog_outbound_atu(struct dw_pcie *pci,
+ const struct dw_pcie_ob_atu_cfg *atu);
int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,
- u64 cpu_addr, u64 pci_addr, u64 size);
+ u64 parent_bus_addr, u64 pci_addr, u64 size);
int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
- int type, u64 cpu_addr, u8 bar);
+ int type, u64 parent_bus_addr,
+ u8 bar, size_t size);
void dw_pcie_disable_atu(struct dw_pcie *pci, u32 dir, int index);
void dw_pcie_setup(struct dw_pcie *pci);
void dw_pcie_iatu_detect(struct dw_pcie *pci);
int dw_pcie_edma_detect(struct dw_pcie *pci);
void dw_pcie_edma_remove(struct dw_pcie *pci);
-
-int dw_pcie_suspend_noirq(struct dw_pcie *pci);
-int dw_pcie_resume_noirq(struct dw_pcie *pci);
+resource_size_t dw_pcie_parent_bus_offset(struct dw_pcie *pci,
+ const char *reg_name,
+ resource_size_t cpu_phy_addr);
static inline void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val)
{
@@ -628,6 +733,8 @@ static inline enum dw_pcie_ltssm dw_pcie_get_ltssm(struct dw_pcie *pci)
}
#ifdef CONFIG_PCIE_DW_HOST
+int dw_pcie_suspend_noirq(struct dw_pcie *pci);
+int dw_pcie_resume_noirq(struct dw_pcie *pci);
irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp);
int dw_pcie_setup_rc(struct dw_pcie_rp *pp);
int dw_pcie_host_init(struct dw_pcie_rp *pp);
@@ -636,6 +743,16 @@ int dw_pcie_allocate_domains(struct dw_pcie_rp *pp);
void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn,
int where);
#else
+static inline int dw_pcie_suspend_noirq(struct dw_pcie *pci)
+{
+ return 0;
+}
+
+static inline int dw_pcie_resume_noirq(struct dw_pcie *pci)
+{
+ return 0;
+}
+
static inline irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp)
{
return IRQ_NONE;
@@ -669,10 +786,11 @@ static inline void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus,
#ifdef CONFIG_PCIE_DW_EP
void dw_pcie_ep_linkup(struct dw_pcie_ep *ep);
+void dw_pcie_ep_linkdown(struct dw_pcie_ep *ep);
int dw_pcie_ep_init(struct dw_pcie_ep *ep);
-int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep);
-void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep);
-void dw_pcie_ep_exit(struct dw_pcie_ep *ep);
+int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep);
+void dw_pcie_ep_deinit(struct dw_pcie_ep *ep);
+void dw_pcie_ep_cleanup(struct dw_pcie_ep *ep);
int dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep, u8 func_no);
int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
u8 interrupt_num);
@@ -681,6 +799,7 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep, u8 func_no,
u16 interrupt_num);
void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar);
+int dw_pcie_ep_hide_ext_capability(struct dw_pcie *pci, u8 prev_cap, u8 cap);
struct dw_pcie_ep_func *
dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no);
#else
@@ -688,21 +807,25 @@ static inline void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
{
}
+static inline void dw_pcie_ep_linkdown(struct dw_pcie_ep *ep)
+{
+}
+
static inline int dw_pcie_ep_init(struct dw_pcie_ep *ep)
{
return 0;
}
-static inline int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
+static inline int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep)
{
return 0;
}
-static inline void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep)
+static inline void dw_pcie_ep_deinit(struct dw_pcie_ep *ep)
{
}
-static inline void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
+static inline void dw_pcie_ep_cleanup(struct dw_pcie_ep *ep)
{
}
@@ -734,10 +857,29 @@ static inline void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
{
}
+static inline int dw_pcie_ep_hide_ext_capability(struct dw_pcie *pci,
+ u8 prev_cap, u8 cap)
+{
+ return 0;
+}
+
static inline struct dw_pcie_ep_func *
dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no)
{
return NULL;
}
#endif
+
+#ifdef CONFIG_PCIE_DW_DEBUGFS
+void dwc_pcie_debugfs_init(struct dw_pcie *pci);
+void dwc_pcie_debugfs_deinit(struct dw_pcie *pci);
+#else
+static inline void dwc_pcie_debugfs_init(struct dw_pcie *pci)
+{
+}
+static inline void dwc_pcie_debugfs_deinit(struct dw_pcie *pci)
+{
+}
+#endif
+
#endif /* _PCIE_DESIGNWARE_H */
diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
index d6842141d384..678d510a261d 100644
--- a/drivers/pci/controller/dwc/pcie-dw-rockchip.c
+++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
@@ -34,10 +34,16 @@
#define to_rockchip_pcie(x) dev_get_drvdata((x)->dev)
#define PCIE_CLIENT_RC_MODE HIWORD_UPDATE_BIT(0x40)
+#define PCIE_CLIENT_EP_MODE HIWORD_UPDATE(0xf0, 0x0)
#define PCIE_CLIENT_ENABLE_LTSSM HIWORD_UPDATE_BIT(0xc)
+#define PCIE_CLIENT_DISABLE_LTSSM HIWORD_UPDATE(0x0c, 0x8)
+#define PCIE_CLIENT_INTR_STATUS_MISC 0x10
+#define PCIE_CLIENT_INTR_MASK_MISC 0x24
#define PCIE_SMLH_LINKUP BIT(16)
#define PCIE_RDLH_LINKUP BIT(17)
#define PCIE_LINKUP (PCIE_SMLH_LINKUP | PCIE_RDLH_LINKUP)
+#define PCIE_RDLH_LINK_UP_CHGED BIT(1)
+#define PCIE_LINK_REQ_RST_NOT_INT BIT(2)
#define PCIE_L0S_ENTRY 0x11
#define PCIE_CLIENT_GENERAL_CONTROL 0x0
#define PCIE_CLIENT_INTR_STATUS_LEGACY 0x8
@@ -49,25 +55,30 @@
#define PCIE_LTSSM_STATUS_MASK GENMASK(5, 0)
struct rockchip_pcie {
- struct dw_pcie pci;
- void __iomem *apb_base;
- struct phy *phy;
- struct clk_bulk_data *clks;
- unsigned int clk_cnt;
- struct reset_control *rst;
- struct gpio_desc *rst_gpio;
- struct regulator *vpcie3v3;
- struct irq_domain *irq_domain;
+ struct dw_pcie pci;
+ void __iomem *apb_base;
+ struct phy *phy;
+ struct clk_bulk_data *clks;
+ unsigned int clk_cnt;
+ struct reset_control *rst;
+ struct gpio_desc *rst_gpio;
+ struct regulator *vpcie3v3;
+ struct irq_domain *irq_domain;
+ const struct rockchip_pcie_of_data *data;
};
-static int rockchip_pcie_readl_apb(struct rockchip_pcie *rockchip,
- u32 reg)
+struct rockchip_pcie_of_data {
+ enum dw_pcie_device_mode mode;
+ const struct pci_epc_features *epc_features;
+};
+
+static int rockchip_pcie_readl_apb(struct rockchip_pcie *rockchip, u32 reg)
{
return readl_relaxed(rockchip->apb_base + reg);
}
-static void rockchip_pcie_writel_apb(struct rockchip_pcie *rockchip,
- u32 val, u32 reg)
+static void rockchip_pcie_writel_apb(struct rockchip_pcie *rockchip, u32 val,
+ u32 reg)
{
writel_relaxed(val, rockchip->apb_base + reg);
}
@@ -133,8 +144,8 @@ static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip)
return -EINVAL;
}
- rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX,
- &intx_domain_ops, rockchip);
+ rockchip->irq_domain = irq_domain_create_linear(of_fwnode_handle(intc), PCI_NUM_INTX,
+ &intx_domain_ops, rockchip);
of_node_put(intc);
if (!rockchip->irq_domain) {
dev_err(dev, "failed to get a INTx IRQ domain\n");
@@ -144,16 +155,27 @@ static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip)
return 0;
}
+static u32 rockchip_pcie_get_ltssm(struct rockchip_pcie *rockchip)
+{
+ return rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_LTSSM_STATUS);
+}
+
static void rockchip_pcie_enable_ltssm(struct rockchip_pcie *rockchip)
{
rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_ENABLE_LTSSM,
PCIE_CLIENT_GENERAL_CONTROL);
}
+static void rockchip_pcie_disable_ltssm(struct rockchip_pcie *rockchip)
+{
+ rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_DISABLE_LTSSM,
+ PCIE_CLIENT_GENERAL_CONTROL);
+}
+
static int rockchip_pcie_link_up(struct dw_pcie *pci)
{
struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
- u32 val = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_LTSSM_STATUS);
+ u32 val = rockchip_pcie_get_ltssm(rockchip);
if ((val & PCIE_LINKUP) == PCIE_LINKUP &&
(val & PCIE_LTSSM_STATUS_MASK) == PCIE_L0S_ENTRY)
@@ -186,12 +208,18 @@ static int rockchip_pcie_start_link(struct dw_pcie *pci)
return 0;
}
+static void rockchip_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
+
+ rockchip_pcie_disable_ltssm(rockchip);
+}
+
static int rockchip_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
struct device *dev = rockchip->pci.dev;
- u32 val = HIWORD_UPDATE_BIT(PCIE_LTSSM_ENABLE_ENHANCE);
int irq, ret;
irq = of_irq_get_byname(dev->of_node, "legacy");
@@ -205,12 +233,6 @@ static int rockchip_pcie_host_init(struct dw_pcie_rp *pp)
irq_set_chained_handler_and_data(irq, rockchip_pcie_intx_handler,
rockchip);
- /* LTSSM enable control mode */
- rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL);
-
- rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_RC_MODE,
- PCIE_CLIENT_GENERAL_CONTROL);
-
return 0;
}
@@ -218,6 +240,113 @@ static const struct dw_pcie_host_ops rockchip_pcie_host_ops = {
.init = rockchip_pcie_host_init,
};
+/*
+ * ATS does not work on RK3588 when running in EP mode.
+ *
+ * After the host has enabled ATS on the EP side, it will send an IOTLB
+ * invalidation request to the EP side. However, the RK3588 will never send
+ * a completion back and eventually the host will print an IOTLB_INV_TIMEOUT
+ * error, and the EP will not be operational. If we hide the ATS capability,
+ * things work as expected.
+ */
+static void rockchip_pcie_ep_hide_broken_ats_cap_rk3588(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct device *dev = pci->dev;
+
+ /* Only hide the ATS capability for RK3588 running in EP mode. */
+ if (!of_device_is_compatible(dev->of_node, "rockchip,rk3588-pcie-ep"))
+ return;
+
+ if (dw_pcie_ep_hide_ext_capability(pci, PCI_EXT_CAP_ID_SECPCI,
+ PCI_EXT_CAP_ID_ATS))
+ dev_err(dev, "failed to hide ATS capability\n");
+}
+
+static void rockchip_pcie_ep_pre_init(struct dw_pcie_ep *ep)
+{
+ rockchip_pcie_ep_hide_broken_ats_cap_rk3588(ep);
+}
+
+static void rockchip_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar;
+
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
+ dw_pcie_ep_reset_bar(pci, bar);
+};
+
+static int rockchip_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ unsigned int type, u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ switch (type) {
+ case PCI_IRQ_INTX:
+ return dw_pcie_ep_raise_intx_irq(ep, func_no);
+ case PCI_IRQ_MSI:
+ return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+ case PCI_IRQ_MSIX:
+ return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
+ default:
+ dev_err(pci->dev, "UNKNOWN IRQ type\n");
+ }
+
+ return 0;
+}
+
+static const struct pci_epc_features rockchip_pcie_epc_features_rk3568 = {
+ .linkup_notifier = true,
+ .msi_capable = true,
+ .msix_capable = true,
+ .intx_capable = false,
+ .align = SZ_64K,
+ .bar[BAR_0] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_1] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_2] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_3] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_4] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_5] = { .type = BAR_RESIZABLE, },
+};
+
+/*
+ * BAR4 on rk3588 exposes the ATU Port Logic Structure to the host regardless of
+ * iATU settings for BAR4. This means that BAR4 cannot be used by an EPF driver,
+ * so mark it as RESERVED. (rockchip_pcie_ep_init() will disable all BARs by
+ * default.) If the host could write to BAR4, the iATU settings (for all other
+ * BARs) would be overwritten, resulting in (all other BARs) no longer working.
+ */
+static const struct pci_epc_features rockchip_pcie_epc_features_rk3588 = {
+ .linkup_notifier = true,
+ .msi_capable = true,
+ .msix_capable = true,
+ .intx_capable = false,
+ .align = SZ_64K,
+ .bar[BAR_0] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_1] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_2] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_3] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_4] = { .type = BAR_RESERVED, },
+ .bar[BAR_5] = { .type = BAR_RESIZABLE, },
+};
+
+static const struct pci_epc_features *
+rockchip_pcie_get_features(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
+
+ return rockchip->data->epc_features;
+}
+
+static const struct dw_pcie_ep_ops rockchip_pcie_ep_ops = {
+ .init = rockchip_pcie_ep_init,
+ .pre_init = rockchip_pcie_ep_pre_init,
+ .raise_irq = rockchip_pcie_raise_irq,
+ .get_features = rockchip_pcie_get_features,
+};
+
static int rockchip_pcie_clk_init(struct rockchip_pcie *rockchip)
{
struct device *dev = rockchip->pci.dev;
@@ -225,11 +354,15 @@ static int rockchip_pcie_clk_init(struct rockchip_pcie *rockchip)
ret = devm_clk_bulk_get_all(dev, &rockchip->clks);
if (ret < 0)
- return ret;
+ return dev_err_probe(dev, ret, "failed to get clocks\n");
rockchip->clk_cnt = ret;
- return clk_bulk_prepare_enable(rockchip->clk_cnt, rockchip->clks);
+ ret = clk_bulk_prepare_enable(rockchip->clk_cnt, rockchip->clks);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to enable clocks\n");
+
+ return 0;
}
static int rockchip_pcie_resource_get(struct platform_device *pdev,
@@ -237,12 +370,14 @@ static int rockchip_pcie_resource_get(struct platform_device *pdev,
{
rockchip->apb_base = devm_platform_ioremap_resource_byname(pdev, "apb");
if (IS_ERR(rockchip->apb_base))
- return PTR_ERR(rockchip->apb_base);
+ return dev_err_probe(&pdev->dev, PTR_ERR(rockchip->apb_base),
+ "failed to map apb registers\n");
rockchip->rst_gpio = devm_gpiod_get_optional(&pdev->dev, "reset",
- GPIOD_OUT_HIGH);
+ GPIOD_OUT_LOW);
if (IS_ERR(rockchip->rst_gpio))
- return PTR_ERR(rockchip->rst_gpio);
+ return dev_err_probe(&pdev->dev, PTR_ERR(rockchip->rst_gpio),
+ "failed to get reset gpio\n");
rockchip->rst = devm_reset_control_array_get_exclusive(&pdev->dev);
if (IS_ERR(rockchip->rst))
@@ -282,15 +417,180 @@ static void rockchip_pcie_phy_deinit(struct rockchip_pcie *rockchip)
static const struct dw_pcie_ops dw_pcie_ops = {
.link_up = rockchip_pcie_link_up,
.start_link = rockchip_pcie_start_link,
+ .stop_link = rockchip_pcie_stop_link,
};
+static irqreturn_t rockchip_pcie_rc_sys_irq_thread(int irq, void *arg)
+{
+ struct rockchip_pcie *rockchip = arg;
+ struct dw_pcie *pci = &rockchip->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct device *dev = pci->dev;
+ u32 reg, val;
+
+ reg = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_INTR_STATUS_MISC);
+ rockchip_pcie_writel_apb(rockchip, reg, PCIE_CLIENT_INTR_STATUS_MISC);
+
+ dev_dbg(dev, "PCIE_CLIENT_INTR_STATUS_MISC: %#x\n", reg);
+ dev_dbg(dev, "LTSSM_STATUS: %#x\n", rockchip_pcie_get_ltssm(rockchip));
+
+ if (reg & PCIE_RDLH_LINK_UP_CHGED) {
+ val = rockchip_pcie_get_ltssm(rockchip);
+ if ((val & PCIE_LINKUP) == PCIE_LINKUP) {
+ dev_dbg(dev, "Received Link up event. Starting enumeration!\n");
+ /* Rescan the bus to enumerate endpoint devices */
+ pci_lock_rescan_remove();
+ pci_rescan_bus(pp->bridge->bus);
+ pci_unlock_rescan_remove();
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rockchip_pcie_ep_sys_irq_thread(int irq, void *arg)
+{
+ struct rockchip_pcie *rockchip = arg;
+ struct dw_pcie *pci = &rockchip->pci;
+ struct device *dev = pci->dev;
+ u32 reg, val;
+
+ reg = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_INTR_STATUS_MISC);
+ rockchip_pcie_writel_apb(rockchip, reg, PCIE_CLIENT_INTR_STATUS_MISC);
+
+ dev_dbg(dev, "PCIE_CLIENT_INTR_STATUS_MISC: %#x\n", reg);
+ dev_dbg(dev, "LTSSM_STATUS: %#x\n", rockchip_pcie_get_ltssm(rockchip));
+
+ if (reg & PCIE_LINK_REQ_RST_NOT_INT) {
+ dev_dbg(dev, "hot reset or link-down reset\n");
+ dw_pcie_ep_linkdown(&pci->ep);
+ }
+
+ if (reg & PCIE_RDLH_LINK_UP_CHGED) {
+ val = rockchip_pcie_get_ltssm(rockchip);
+ if ((val & PCIE_LINKUP) == PCIE_LINKUP) {
+ dev_dbg(dev, "link up\n");
+ dw_pcie_ep_linkup(&pci->ep);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int rockchip_pcie_configure_rc(struct platform_device *pdev,
+ struct rockchip_pcie *rockchip)
+{
+ struct device *dev = &pdev->dev;
+ struct dw_pcie_rp *pp;
+ int irq, ret;
+ u32 val;
+
+ if (!IS_ENABLED(CONFIG_PCIE_ROCKCHIP_DW_HOST))
+ return -ENODEV;
+
+ irq = platform_get_irq_byname(pdev, "sys");
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ rockchip_pcie_rc_sys_irq_thread,
+ IRQF_ONESHOT, "pcie-sys-rc", rockchip);
+ if (ret) {
+ dev_err(dev, "failed to request PCIe sys IRQ\n");
+ return ret;
+ }
+
+ /* LTSSM enable control mode */
+ val = HIWORD_UPDATE_BIT(PCIE_LTSSM_ENABLE_ENHANCE);
+ rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL);
+
+ rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_RC_MODE,
+ PCIE_CLIENT_GENERAL_CONTROL);
+
+ pp = &rockchip->pci.pp;
+ pp->ops = &rockchip_pcie_host_ops;
+ pp->use_linkup_irq = true;
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(dev, "failed to initialize host\n");
+ return ret;
+ }
+
+ /* unmask DLL up/down indicator */
+ val = HIWORD_UPDATE(PCIE_RDLH_LINK_UP_CHGED, 0);
+ rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_INTR_MASK_MISC);
+
+ return ret;
+}
+
+static int rockchip_pcie_configure_ep(struct platform_device *pdev,
+ struct rockchip_pcie *rockchip)
+{
+ struct device *dev = &pdev->dev;
+ int irq, ret;
+ u32 val;
+
+ if (!IS_ENABLED(CONFIG_PCIE_ROCKCHIP_DW_EP))
+ return -ENODEV;
+
+ irq = platform_get_irq_byname(pdev, "sys");
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ rockchip_pcie_ep_sys_irq_thread,
+ IRQF_ONESHOT, "pcie-sys-ep", rockchip);
+ if (ret) {
+ dev_err(dev, "failed to request PCIe sys IRQ\n");
+ return ret;
+ }
+
+ /* LTSSM enable control mode */
+ val = HIWORD_UPDATE_BIT(PCIE_LTSSM_ENABLE_ENHANCE);
+ rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL);
+
+ rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_EP_MODE,
+ PCIE_CLIENT_GENERAL_CONTROL);
+
+ rockchip->pci.ep.ops = &rockchip_pcie_ep_ops;
+ rockchip->pci.ep.page_size = SZ_64K;
+
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+
+ ret = dw_pcie_ep_init(&rockchip->pci.ep);
+ if (ret) {
+ dev_err(dev, "failed to initialize endpoint\n");
+ return ret;
+ }
+
+ ret = dw_pcie_ep_init_registers(&rockchip->pci.ep);
+ if (ret) {
+ dev_err(dev, "failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(&rockchip->pci.ep);
+ return ret;
+ }
+
+ pci_epc_init_notify(rockchip->pci.ep.epc);
+
+ /* unmask DLL up/down indicator and hot reset/link-down reset */
+ val = HIWORD_UPDATE(PCIE_RDLH_LINK_UP_CHGED | PCIE_LINK_REQ_RST_NOT_INT, 0);
+ rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_INTR_MASK_MISC);
+
+ return ret;
+}
+
static int rockchip_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rockchip_pcie *rockchip;
- struct dw_pcie_rp *pp;
+ const struct rockchip_pcie_of_data *data;
int ret;
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -EINVAL;
+
rockchip = devm_kzalloc(dev, sizeof(*rockchip), GFP_KERNEL);
if (!rockchip)
return -ENOMEM;
@@ -299,9 +599,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
rockchip->pci.dev = dev;
rockchip->pci.ops = &dw_pcie_ops;
-
- pp = &rockchip->pci.pp;
- pp->ops = &rockchip_pcie_host_ops;
+ rockchip->data = data;
ret = rockchip_pcie_resource_get(pdev, rockchip);
if (ret)
@@ -320,10 +618,9 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
rockchip->vpcie3v3 = NULL;
} else {
ret = regulator_enable(rockchip->vpcie3v3);
- if (ret) {
- dev_err(dev, "failed to enable vpcie3v3 regulator\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to enable vpcie3v3 regulator\n");
}
ret = rockchip_pcie_phy_init(rockchip);
@@ -338,10 +635,26 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
if (ret)
goto deinit_phy;
- ret = dw_pcie_host_init(pp);
- if (!ret)
- return 0;
+ switch (data->mode) {
+ case DW_PCIE_RC_TYPE:
+ ret = rockchip_pcie_configure_rc(pdev, rockchip);
+ if (ret)
+ goto deinit_clk;
+ break;
+ case DW_PCIE_EP_TYPE:
+ ret = rockchip_pcie_configure_ep(pdev, rockchip);
+ if (ret)
+ goto deinit_clk;
+ break;
+ default:
+ dev_err(dev, "INVALID device type %d\n", data->mode);
+ ret = -EINVAL;
+ goto deinit_clk;
+ }
+
+ return 0;
+deinit_clk:
clk_bulk_disable_unprepare(rockchip->clk_cnt, rockchip->clks);
deinit_phy:
rockchip_pcie_phy_deinit(rockchip);
@@ -352,8 +665,33 @@ disable_regulator:
return ret;
}
+static const struct rockchip_pcie_of_data rockchip_pcie_rc_of_data_rk3568 = {
+ .mode = DW_PCIE_RC_TYPE,
+};
+
+static const struct rockchip_pcie_of_data rockchip_pcie_ep_of_data_rk3568 = {
+ .mode = DW_PCIE_EP_TYPE,
+ .epc_features = &rockchip_pcie_epc_features_rk3568,
+};
+
+static const struct rockchip_pcie_of_data rockchip_pcie_ep_of_data_rk3588 = {
+ .mode = DW_PCIE_EP_TYPE,
+ .epc_features = &rockchip_pcie_epc_features_rk3588,
+};
+
static const struct of_device_id rockchip_pcie_of_match[] = {
- { .compatible = "rockchip,rk3568-pcie", },
+ {
+ .compatible = "rockchip,rk3568-pcie",
+ .data = &rockchip_pcie_rc_of_data_rk3568,
+ },
+ {
+ .compatible = "rockchip,rk3568-pcie-ep",
+ .data = &rockchip_pcie_ep_of_data_rk3568,
+ },
+ {
+ .compatible = "rockchip,rk3588-pcie-ep",
+ .data = &rockchip_pcie_ep_of_data_rk3588,
+ },
{},
};
diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c
index 7a11c618b9d9..1f2f4c28a949 100644
--- a/drivers/pci/controller/dwc/pcie-histb.c
+++ b/drivers/pci/controller/dwc/pcie-histb.c
@@ -409,16 +409,21 @@ static int histb_pcie_probe(struct platform_device *pdev)
ret = histb_pcie_host_enable(pp);
if (ret) {
dev_err(dev, "failed to enable host\n");
- return ret;
+ goto err_exit_phy;
}
ret = dw_pcie_host_init(pp);
if (ret) {
dev_err(dev, "failed to initialize host\n");
- return ret;
+ goto err_exit_phy;
}
return 0;
+
+err_exit_phy:
+ phy_exit(hipcie->phy);
+
+ return ret;
}
static void histb_pcie_remove(struct platform_device *pdev)
@@ -427,8 +432,7 @@ static void histb_pcie_remove(struct platform_device *pdev)
histb_pcie_host_disable(hipcie);
- if (hipcie->phy)
- phy_exit(hipcie->phy);
+ phy_exit(hipcie->phy);
}
static const struct of_device_id histb_pcie_of_match[] = {
@@ -439,7 +443,7 @@ MODULE_DEVICE_TABLE(of, histb_pcie_of_match);
static struct platform_driver histb_pcie_platform_driver = {
.probe = histb_pcie_probe,
- .remove_new = histb_pcie_remove,
+ .remove = histb_pcie_remove,
.driver = {
.name = "histb-pcie",
.of_match_table = histb_pcie_of_match,
diff --git a/drivers/pci/controller/dwc/pcie-intel-gw.c b/drivers/pci/controller/dwc/pcie-intel-gw.c
index acbe4f6d3291..c21906eced61 100644
--- a/drivers/pci/controller/dwc/pcie-intel-gw.c
+++ b/drivers/pci/controller/dwc/pcie-intel-gw.c
@@ -57,7 +57,6 @@
PCIE_APP_IRN_INTA | PCIE_APP_IRN_INTB | \
PCIE_APP_IRN_INTC | PCIE_APP_IRN_INTD)
-#define BUS_IATU_OFFSET SZ_256M
#define RESET_INTERVAL_MS 100
struct intel_pcie {
@@ -132,7 +131,7 @@ static void intel_pcie_link_setup(struct intel_pcie *pcie)
static void intel_pcie_init_n_fts(struct dw_pcie *pci)
{
- switch (pci->link_gen) {
+ switch (pci->max_link_speed) {
case 3:
pci->n_fts[1] = PORT_AFR_N_FTS_GEN3;
break;
@@ -252,7 +251,7 @@ static int intel_pcie_wait_l2(struct intel_pcie *pcie)
int ret;
struct dw_pcie *pci = &pcie->pci;
- if (pci->link_gen < 3)
+ if (pci->max_link_speed < 3)
return 0;
/* Send PME_TURN_OFF message */
@@ -381,13 +380,7 @@ static int intel_pcie_rc_init(struct dw_pcie_rp *pp)
return intel_pcie_host_setup(pcie);
}
-static u64 intel_pcie_cpu_addr(struct dw_pcie *pcie, u64 cpu_addr)
-{
- return cpu_addr + BUS_IATU_OFFSET;
-}
-
static const struct dw_pcie_ops intel_pcie_ops = {
- .cpu_addr_fixup = intel_pcie_cpu_addr,
};
static const struct dw_pcie_host_ops intel_pcie_dw_ops = {
@@ -409,6 +402,7 @@ static int intel_pcie_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pcie);
pci = &pcie->pci;
pci->dev = dev;
+ pci->use_parent_dt_ranges = true;
pp = &pci->pp;
ret = intel_pcie_get_resources(pdev);
@@ -443,7 +437,7 @@ static const struct of_device_id of_intel_pcie_match[] = {
static struct platform_driver intel_pcie_driver = {
.probe = intel_pcie_probe,
- .remove_new = intel_pcie_remove,
+ .remove = intel_pcie_remove,
.driver = {
.name = "intel-gw-pcie",
.of_match_table = of_intel_pcie_match,
diff --git a/drivers/pci/controller/dwc/pcie-keembay.c b/drivers/pci/controller/dwc/pcie-keembay.c
index 5e8e54f597dd..278205db60a2 100644
--- a/drivers/pci/controller/dwc/pcie-keembay.c
+++ b/drivers/pci/controller/dwc/pcie-keembay.c
@@ -396,6 +396,7 @@ static int keembay_pcie_probe(struct platform_device *pdev)
struct keembay_pcie *pcie;
struct dw_pcie *pci;
enum dw_pcie_device_mode mode;
+ int ret;
data = device_get_match_data(dev);
if (!data)
@@ -430,11 +431,26 @@ static int keembay_pcie_probe(struct platform_device *pdev)
return -ENODEV;
pci->ep.ops = &keembay_pcie_ep_ops;
- return dw_pcie_ep_init(&pci->ep);
+ ret = dw_pcie_ep_init(&pci->ep);
+ if (ret)
+ return ret;
+
+ ret = dw_pcie_ep_init_registers(&pci->ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(&pci->ep);
+ return ret;
+ }
+
+ pci_epc_init_notify(pci->ep.epc);
+
+ break;
default:
dev_err(dev, "Invalid device type %d\n", pcie->mode);
return -ENODEV;
}
+
+ return 0;
}
static const struct keembay_pcie_of_data keembay_pcie_rc_of_data = {
diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c
index d5523f302102..d0e6a3811b00 100644
--- a/drivers/pci/controller/dwc/pcie-kirin.c
+++ b/drivers/pci/controller/dwc/pcie-kirin.c
@@ -12,12 +12,10 @@
#include <linux/compiler.h>
#include <linux/delay.h>
#include <linux/err.h>
-#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/of_pci.h>
#include <linux/phy/phy.h>
#include <linux/pci.h>
@@ -78,16 +76,16 @@ struct kirin_pcie {
void *phy_priv; /* only for PCIE_KIRIN_INTERNAL_PHY */
/* DWC PERST# */
- int gpio_id_dwc_perst;
+ struct gpio_desc *id_dwc_perst_gpio;
/* Per-slot PERST# */
int num_slots;
- int gpio_id_reset[MAX_PCI_SLOTS];
+ struct gpio_desc *id_reset_gpio[MAX_PCI_SLOTS];
const char *reset_names[MAX_PCI_SLOTS];
/* Per-slot clkreq */
int n_gpio_clkreq;
- int gpio_id_clkreq[MAX_PCI_SLOTS];
+ struct gpio_desc *id_clkreq_gpio[MAX_PCI_SLOTS];
const char *clkreq_names[MAX_PCI_SLOTS];
};
@@ -218,10 +216,9 @@ static int hi3660_pcie_phy_start(struct hi3660_pcie_phy *phy)
usleep_range(PIPE_CLK_WAIT_MIN, PIPE_CLK_WAIT_MAX);
reg_val = kirin_apb_phy_readl(phy, PCIE_APB_PHY_STATUS0);
- if (reg_val & PIPE_CLK_STABLE) {
- dev_err(dev, "PIPE clk is not stable\n");
- return -EINVAL;
- }
+ if (reg_val & PIPE_CLK_STABLE)
+ return dev_err_probe(dev, -ETIMEDOUT,
+ "PIPE clk is not stable\n");
return 0;
}
@@ -373,23 +370,27 @@ static int kirin_pcie_get_gpio_enable(struct kirin_pcie *pcie,
if (ret < 0)
return 0;
- if (ret > MAX_PCI_SLOTS) {
- dev_err(dev, "Too many GPIO clock requests!\n");
- return -EINVAL;
- }
+ if (ret > MAX_PCI_SLOTS)
+ return dev_err_probe(dev, -EINVAL,
+ "Too many GPIO clock requests!\n");
pcie->n_gpio_clkreq = ret;
for (i = 0; i < pcie->n_gpio_clkreq; i++) {
- pcie->gpio_id_clkreq[i] = of_get_named_gpio(dev->of_node,
- "hisilicon,clken-gpios", i);
- if (pcie->gpio_id_clkreq[i] < 0)
- return pcie->gpio_id_clkreq[i];
+ pcie->id_clkreq_gpio[i] = devm_gpiod_get_index(dev,
+ "hisilicon,clken", i,
+ GPIOD_OUT_LOW);
+ if (IS_ERR(pcie->id_clkreq_gpio[i]))
+ return dev_err_probe(dev, PTR_ERR(pcie->id_clkreq_gpio[i]),
+ "unable to get a valid clken gpio\n");
pcie->clkreq_names[i] = devm_kasprintf(dev, GFP_KERNEL,
"pcie_clkreq_%d", i);
if (!pcie->clkreq_names[i])
return -ENOMEM;
+
+ gpiod_set_consumer_name(pcie->id_clkreq_gpio[i],
+ pcie->clkreq_names[i]);
}
return 0;
@@ -400,56 +401,55 @@ static int kirin_pcie_parse_port(struct kirin_pcie *pcie,
struct device_node *node)
{
struct device *dev = &pdev->dev;
- struct device_node *parent, *child;
int ret, slot, i;
- for_each_available_child_of_node(node, parent) {
- for_each_available_child_of_node(parent, child) {
+ for_each_available_child_of_node_scoped(node, parent) {
+ for_each_available_child_of_node_scoped(parent, child) {
i = pcie->num_slots;
- pcie->gpio_id_reset[i] = of_get_named_gpio(child,
- "reset-gpios", 0);
- if (pcie->gpio_id_reset[i] < 0)
- continue;
+ pcie->id_reset_gpio[i] = devm_fwnode_gpiod_get_index(dev,
+ of_fwnode_handle(child),
+ "reset", 0, GPIOD_OUT_LOW,
+ NULL);
+ if (IS_ERR(pcie->id_reset_gpio[i])) {
+ if (PTR_ERR(pcie->id_reset_gpio[i]) == -ENOENT)
+ continue;
+ return dev_err_probe(dev, PTR_ERR(pcie->id_reset_gpio[i]),
+ "unable to get a valid reset gpio\n");
+ }
+
+ if (pcie->num_slots + 1 >= MAX_PCI_SLOTS)
+ return dev_err_probe(dev, -EINVAL,
+ "Too many PCI slots!\n");
pcie->num_slots++;
- if (pcie->num_slots > MAX_PCI_SLOTS) {
- dev_err(dev, "Too many PCI slots!\n");
- ret = -EINVAL;
- goto put_node;
- }
ret = of_pci_get_devfn(child);
- if (ret < 0) {
- dev_err(dev, "failed to parse devfn: %d\n", ret);
- goto put_node;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "failed to parse devfn\n");
slot = PCI_SLOT(ret);
pcie->reset_names[i] = devm_kasprintf(dev, GFP_KERNEL,
"pcie_perst_%d",
slot);
- if (!pcie->reset_names[i]) {
- ret = -ENOMEM;
- goto put_node;
- }
+ if (!pcie->reset_names[i])
+ return -ENOMEM;
+
+ gpiod_set_consumer_name(pcie->id_reset_gpio[i],
+ pcie->reset_names[i]);
}
}
return 0;
-
-put_node:
- of_node_put(child);
- of_node_put(parent);
- return ret;
}
static long kirin_pcie_get_resource(struct kirin_pcie *kirin_pcie,
struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *child, *node = dev->of_node;
+ struct device_node *node = dev->of_node;
void __iomem *apb_base;
int ret;
@@ -463,31 +463,24 @@ static long kirin_pcie_get_resource(struct kirin_pcie *kirin_pcie,
return PTR_ERR(kirin_pcie->apb);
/* pcie internal PERST# gpio */
- kirin_pcie->gpio_id_dwc_perst = of_get_named_gpio(dev->of_node,
- "reset-gpios", 0);
- if (kirin_pcie->gpio_id_dwc_perst == -EPROBE_DEFER) {
- return -EPROBE_DEFER;
- } else if (!gpio_is_valid(kirin_pcie->gpio_id_dwc_perst)) {
- dev_err(dev, "unable to get a valid gpio pin\n");
- return -ENODEV;
- }
+ kirin_pcie->id_dwc_perst_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(kirin_pcie->id_dwc_perst_gpio))
+ return dev_err_probe(dev, PTR_ERR(kirin_pcie->id_dwc_perst_gpio),
+ "unable to get a valid gpio pin\n");
+ gpiod_set_consumer_name(kirin_pcie->id_dwc_perst_gpio, "pcie_perst_bridge");
ret = kirin_pcie_get_gpio_enable(kirin_pcie, pdev);
if (ret)
return ret;
/* Parse OF children */
- for_each_available_child_of_node(node, child) {
+ for_each_available_child_of_node_scoped(node, child) {
ret = kirin_pcie_parse_port(kirin_pcie, pdev, child);
if (ret)
- goto put_node;
+ return ret;
}
return 0;
-
-put_node:
- of_node_put(child);
- return ret;
}
static void kirin_pcie_sideband_dbi_w_mode(struct kirin_pcie *kirin_pcie,
@@ -553,7 +546,7 @@ static int kirin_pcie_add_bus(struct pci_bus *bus)
/* Send PERST# to each slot */
for (i = 0; i < kirin_pcie->num_slots; i++) {
- ret = gpio_direction_output(kirin_pcie->gpio_id_reset[i], 1);
+ ret = gpiod_direction_output_raw(kirin_pcie->id_reset_gpio[i], 1);
if (ret) {
dev_err(pci->dev, "PERST# %s error: %d\n",
kirin_pcie->reset_names[i], ret);
@@ -623,44 +616,6 @@ static int kirin_pcie_host_init(struct dw_pcie_rp *pp)
return 0;
}
-static int kirin_pcie_gpio_request(struct kirin_pcie *kirin_pcie,
- struct device *dev)
-{
- int ret, i;
-
- for (i = 0; i < kirin_pcie->num_slots; i++) {
- if (!gpio_is_valid(kirin_pcie->gpio_id_reset[i])) {
- dev_err(dev, "unable to get a valid %s gpio\n",
- kirin_pcie->reset_names[i]);
- return -ENODEV;
- }
-
- ret = devm_gpio_request(dev, kirin_pcie->gpio_id_reset[i],
- kirin_pcie->reset_names[i]);
- if (ret)
- return ret;
- }
-
- for (i = 0; i < kirin_pcie->n_gpio_clkreq; i++) {
- if (!gpio_is_valid(kirin_pcie->gpio_id_clkreq[i])) {
- dev_err(dev, "unable to get a valid %s gpio\n",
- kirin_pcie->clkreq_names[i]);
- return -ENODEV;
- }
-
- ret = devm_gpio_request(dev, kirin_pcie->gpio_id_clkreq[i],
- kirin_pcie->clkreq_names[i]);
- if (ret)
- return ret;
-
- ret = gpio_direction_output(kirin_pcie->gpio_id_clkreq[i], 0);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
static const struct dw_pcie_ops kirin_dw_pcie_ops = {
.read_dbi = kirin_pcie_read_dbi,
.write_dbi = kirin_pcie_write_dbi,
@@ -680,7 +635,7 @@ static int kirin_pcie_power_off(struct kirin_pcie *kirin_pcie)
return hi3660_pcie_phy_power_off(kirin_pcie);
for (i = 0; i < kirin_pcie->n_gpio_clkreq; i++)
- gpio_direction_output(kirin_pcie->gpio_id_clkreq[i], 1);
+ gpiod_direction_output_raw(kirin_pcie->id_clkreq_gpio[i], 1);
phy_power_off(kirin_pcie->phy);
phy_exit(kirin_pcie->phy);
@@ -707,10 +662,6 @@ static int kirin_pcie_power_on(struct platform_device *pdev,
if (IS_ERR(kirin_pcie->phy))
return PTR_ERR(kirin_pcie->phy);
- ret = kirin_pcie_gpio_request(kirin_pcie, dev);
- if (ret)
- return ret;
-
ret = phy_init(kirin_pcie->phy);
if (ret)
goto err;
@@ -723,11 +674,9 @@ static int kirin_pcie_power_on(struct platform_device *pdev,
/* perst assert Endpoint */
usleep_range(REF_2_PERST_MIN, REF_2_PERST_MAX);
- if (!gpio_request(kirin_pcie->gpio_id_dwc_perst, "pcie_perst_bridge")) {
- ret = gpio_direction_output(kirin_pcie->gpio_id_dwc_perst, 1);
- if (ret)
- goto err;
- }
+ ret = gpiod_direction_output_raw(kirin_pcie->id_dwc_perst_gpio, 1);
+ if (ret)
+ goto err;
usleep_range(PERST_2_ACCESS_MIN, PERST_2_ACCESS_MAX);
@@ -773,16 +722,9 @@ static int kirin_pcie_probe(struct platform_device *pdev)
struct dw_pcie *pci;
int ret;
- if (!dev->of_node) {
- dev_err(dev, "NULL node\n");
- return -EINVAL;
- }
-
data = of_device_get_match_data(dev);
- if (!data) {
- dev_err(dev, "OF data missing\n");
- return -EINVAL;
- }
+ if (!data)
+ return dev_err_probe(dev, -EINVAL, "OF data missing\n");
kirin_pcie = devm_kzalloc(dev, sizeof(struct kirin_pcie), GFP_KERNEL);
if (!kirin_pcie)
@@ -813,7 +755,7 @@ static int kirin_pcie_probe(struct platform_device *pdev)
static struct platform_driver kirin_pcie_driver = {
.probe = kirin_pcie_probe,
- .remove_new = kirin_pcie_remove,
+ .remove = kirin_pcie_remove,
.driver = {
.name = "kirin-pcie",
.of_match_table = kirin_pcie_match,
diff --git a/drivers/pci/controller/dwc/pcie-qcom-common.c b/drivers/pci/controller/dwc/pcie-qcom-common.c
new file mode 100644
index 000000000000..3aad19b56da8
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-qcom-common.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/pci.h>
+
+#include "pcie-designware.h"
+#include "pcie-qcom-common.h"
+
+void qcom_pcie_common_set_16gt_equalization(struct dw_pcie *pci)
+{
+ u32 reg;
+
+ /*
+ * GEN3_RELATED_OFF register is repurposed to apply equalization
+ * settings at various data transmission rates through registers namely
+ * GEN3_EQ_*. The RATE_SHADOW_SEL bit field of GEN3_RELATED_OFF
+ * determines the data rate for which these equalization settings are
+ * applied.
+ */
+ reg = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
+ reg &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
+ reg &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
+ reg |= FIELD_PREP(GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK,
+ GEN3_RELATED_OFF_RATE_SHADOW_SEL_16_0GT);
+ dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, reg);
+
+ reg = dw_pcie_readl_dbi(pci, GEN3_EQ_FB_MODE_DIR_CHANGE_OFF);
+ reg &= ~(GEN3_EQ_FMDC_T_MIN_PHASE23 |
+ GEN3_EQ_FMDC_N_EVALS |
+ GEN3_EQ_FMDC_MAX_PRE_CUSROR_DELTA |
+ GEN3_EQ_FMDC_MAX_POST_CUSROR_DELTA);
+ reg |= FIELD_PREP(GEN3_EQ_FMDC_T_MIN_PHASE23, 0x1) |
+ FIELD_PREP(GEN3_EQ_FMDC_N_EVALS, 0xd) |
+ FIELD_PREP(GEN3_EQ_FMDC_MAX_PRE_CUSROR_DELTA, 0x5) |
+ FIELD_PREP(GEN3_EQ_FMDC_MAX_POST_CUSROR_DELTA, 0x5);
+ dw_pcie_writel_dbi(pci, GEN3_EQ_FB_MODE_DIR_CHANGE_OFF, reg);
+
+ reg = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
+ reg &= ~(GEN3_EQ_CONTROL_OFF_FB_MODE |
+ GEN3_EQ_CONTROL_OFF_PHASE23_EXIT_MODE |
+ GEN3_EQ_CONTROL_OFF_FOM_INC_INITIAL_EVAL |
+ GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC);
+ dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, reg);
+}
+EXPORT_SYMBOL_GPL(qcom_pcie_common_set_16gt_equalization);
+
+void qcom_pcie_common_set_16gt_lane_margining(struct dw_pcie *pci)
+{
+ u32 reg;
+
+ reg = dw_pcie_readl_dbi(pci, GEN4_LANE_MARGINING_1_OFF);
+ reg &= ~(MARGINING_MAX_VOLTAGE_OFFSET |
+ MARGINING_NUM_VOLTAGE_STEPS |
+ MARGINING_MAX_TIMING_OFFSET |
+ MARGINING_NUM_TIMING_STEPS);
+ reg |= FIELD_PREP(MARGINING_MAX_VOLTAGE_OFFSET, 0x24) |
+ FIELD_PREP(MARGINING_NUM_VOLTAGE_STEPS, 0x78) |
+ FIELD_PREP(MARGINING_MAX_TIMING_OFFSET, 0x32) |
+ FIELD_PREP(MARGINING_NUM_TIMING_STEPS, 0x10);
+ dw_pcie_writel_dbi(pci, GEN4_LANE_MARGINING_1_OFF, reg);
+
+ reg = dw_pcie_readl_dbi(pci, GEN4_LANE_MARGINING_2_OFF);
+ reg |= MARGINING_IND_ERROR_SAMPLER |
+ MARGINING_SAMPLE_REPORTING_METHOD |
+ MARGINING_IND_LEFT_RIGHT_TIMING |
+ MARGINING_VOLTAGE_SUPPORTED;
+ reg &= ~(MARGINING_IND_UP_DOWN_VOLTAGE |
+ MARGINING_MAXLANES |
+ MARGINING_SAMPLE_RATE_TIMING |
+ MARGINING_SAMPLE_RATE_VOLTAGE);
+ reg |= FIELD_PREP(MARGINING_MAXLANES, pci->num_lanes) |
+ FIELD_PREP(MARGINING_SAMPLE_RATE_TIMING, 0x3f) |
+ FIELD_PREP(MARGINING_SAMPLE_RATE_VOLTAGE, 0x3f);
+ dw_pcie_writel_dbi(pci, GEN4_LANE_MARGINING_2_OFF, reg);
+}
+EXPORT_SYMBOL_GPL(qcom_pcie_common_set_16gt_lane_margining);
diff --git a/drivers/pci/controller/dwc/pcie-qcom-common.h b/drivers/pci/controller/dwc/pcie-qcom-common.h
new file mode 100644
index 000000000000..7d88d29e4766
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-qcom-common.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _PCIE_QCOM_COMMON_H
+#define _PCIE_QCOM_COMMON_H
+
+struct dw_pcie;
+
+void qcom_pcie_common_set_16gt_equalization(struct dw_pcie *pci);
+void qcom_pcie_common_set_16gt_lane_margining(struct dw_pcie *pci);
+
+#endif
diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c
index 36e5e80cd22f..46b1c6d19974 100644
--- a/drivers/pci/controller/dwc/pcie-qcom-ep.c
+++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c
@@ -25,6 +25,7 @@
#include "../../pci.h"
#include "pcie-designware.h"
+#include "pcie-qcom-common.h"
/* PARF registers */
#define PARF_SYS_CTRL 0x00
@@ -47,6 +48,7 @@
#define PARF_DBI_BASE_ADDR_HI 0x354
#define PARF_SLV_ADDR_SPACE_SIZE 0x358
#define PARF_SLV_ADDR_SPACE_SIZE_HI 0x35c
+#define PARF_NO_SNOOP_OVERRIDE 0x3d4
#define PARF_ATU_BASE_ADDR 0x634
#define PARF_ATU_BASE_ADDR_HI 0x638
#define PARF_SRIS_MODE 0x644
@@ -57,6 +59,7 @@
#define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2 0xc88
#define PARF_DEVICE_TYPE 0x1000
#define PARF_BDF_TO_SID_CFG 0x2c00
+#define PARF_INT_ALL_5_MASK 0x2dcc
/* PARF_INT_ALL_{STATUS/CLEAR/MASK} register fields */
#define PARF_INT_ALL_LINK_DOWN BIT(1)
@@ -86,6 +89,10 @@
#define PARF_DEBUG_INT_CFG_BUS_MASTER_EN BIT(2)
#define PARF_DEBUG_INT_RADM_PM_TURNOFF BIT(3)
+/* PARF_NO_SNOOP_OVERRIDE register fields */
+#define WR_NO_SNOOP_OVERRIDE_EN BIT(1)
+#define RD_NO_SNOOP_OVERRIDE_EN BIT(3)
+
/* PARF_DEVICE_TYPE register fields */
#define PARF_DEVICE_TYPE_EP 0x0
@@ -122,6 +129,9 @@
/* PARF_CFG_BITS register fields */
#define PARF_CFG_BITS_REQ_EXIT_L1SS_MSI_LTR_EN BIT(1)
+/* PARF_INT_ALL_5_MASK fields */
+#define PARF_INT_ALL_5_MHI_RAM_DATA_PARITY_ERR BIT(0)
+
/* ELBI registers */
#define ELBI_SYS_STTS 0x08
#define ELBI_CS2_ENABLE 0xa4
@@ -150,6 +160,18 @@ enum qcom_pcie_ep_link_status {
};
/**
+ * struct qcom_pcie_ep_cfg - Per SoC config struct
+ * @hdma_support: HDMA support on this SoC
+ * @override_no_snoop: Override NO_SNOOP attribute in TLP to enable cache snooping
+ * @disable_mhi_ram_parity_check: Disable MHI RAM data parity error check
+ */
+struct qcom_pcie_ep_cfg {
+ bool hdma_support;
+ bool override_no_snoop;
+ bool disable_mhi_ram_parity_check;
+};
+
+/**
* struct qcom_pcie_ep - Qualcomm PCIe Endpoint Controller
* @pci: Designware PCIe controller struct
* @parf: Qualcomm PCIe specific PARF register base
@@ -167,6 +189,7 @@ enum qcom_pcie_ep_link_status {
* @num_clks: PCIe clocks count
* @perst_en: Flag for PERST enable
* @perst_sep_en: Flag for PERST separation enable
+ * @cfg: PCIe EP config struct
* @link_status: PCIe Link status
* @global_irq: Qualcomm PCIe specific Global IRQ
* @perst_irq: PERST# IRQ
@@ -194,6 +217,7 @@ struct qcom_pcie_ep {
u32 perst_en;
u32 perst_sep_en;
+ const struct qcom_pcie_ep_cfg *cfg;
enum qcom_pcie_ep_link_status link_status;
int global_irq;
int perst_irq;
@@ -372,6 +396,10 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
return ret;
}
+ /* Perform cleanup that requires refclk */
+ pci_epc_deinit_notify(pci->ep.epc);
+ dw_pcie_ep_cleanup(&pci->ep);
+
/* Assert WAKE# to RC to indicate device is ready */
gpiod_set_value_cansleep(pcie_ep->wake, 1);
usleep_range(WAKE_DELAY_US, WAKE_DELAY_US + 500);
@@ -463,12 +491,23 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
PARF_INT_ALL_LINK_UP | PARF_INT_ALL_EDMA;
writel_relaxed(val, pcie_ep->parf + PARF_INT_ALL_MASK);
- ret = dw_pcie_ep_init_complete(&pcie_ep->pci.ep);
+ if (pcie_ep->cfg && pcie_ep->cfg->disable_mhi_ram_parity_check) {
+ val = readl_relaxed(pcie_ep->parf + PARF_INT_ALL_5_MASK);
+ val &= ~PARF_INT_ALL_5_MHI_RAM_DATA_PARITY_ERR;
+ writel_relaxed(val, pcie_ep->parf + PARF_INT_ALL_5_MASK);
+ }
+
+ ret = dw_pcie_ep_init_registers(&pcie_ep->pci.ep);
if (ret) {
dev_err(dev, "Failed to complete initialization: %d\n", ret);
goto err_disable_resources;
}
+ if (pcie_link_speed[pci->max_link_speed] == PCIE_SPEED_16_0GT) {
+ qcom_pcie_common_set_16gt_equalization(pci);
+ qcom_pcie_common_set_16gt_lane_margining(pci);
+ }
+
/*
* The physical address of the MMIO region which is exposed as the BAR
* should be written to MHI BASE registers.
@@ -482,13 +521,17 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
val &= ~PARF_MSTR_AXI_CLK_EN;
writel_relaxed(val, pcie_ep->parf + PARF_MHI_CLOCK_RESET_CTRL);
- dw_pcie_ep_init_notify(&pcie_ep->pci.ep);
+ pci_epc_init_notify(pcie_ep->pci.ep.epc);
/* Enable LTSSM */
val = readl_relaxed(pcie_ep->parf + PARF_LTSSM);
val |= BIT(8);
writel_relaxed(val, pcie_ep->parf + PARF_LTSSM);
+ if (pcie_ep->cfg && pcie_ep->cfg->override_no_snoop)
+ writel_relaxed(WR_NO_SNOOP_OVERRIDE_EN | RD_NO_SNOOP_OVERRIDE_EN,
+ pcie_ep->parf + PARF_NO_SNOOP_OVERRIDE);
+
return 0;
err_disable_resources:
@@ -500,12 +543,6 @@ err_disable_resources:
static void qcom_pcie_perst_assert(struct dw_pcie *pci)
{
struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
- struct device *dev = pci->dev;
-
- if (pcie_ep->link_status == QCOM_PCIE_EP_LINK_DISABLED) {
- dev_dbg(dev, "Link is already disabled\n");
- return;
- }
qcom_pcie_disable_resources(pcie_ep);
pcie_ep->link_status = QCOM_PCIE_EP_LINK_DISABLED;
@@ -630,21 +667,19 @@ static irqreturn_t qcom_pcie_ep_global_irq_thread(int irq, void *data)
struct dw_pcie *pci = &pcie_ep->pci;
struct device *dev = pci->dev;
u32 status = readl_relaxed(pcie_ep->parf + PARF_INT_ALL_STATUS);
- u32 mask = readl_relaxed(pcie_ep->parf + PARF_INT_ALL_MASK);
u32 dstate, val;
writel_relaxed(status, pcie_ep->parf + PARF_INT_ALL_CLEAR);
- status &= mask;
if (FIELD_GET(PARF_INT_ALL_LINK_DOWN, status)) {
dev_dbg(dev, "Received Linkdown event\n");
pcie_ep->link_status = QCOM_PCIE_EP_LINK_DOWN;
- pci_epc_linkdown(pci->ep.epc);
+ dw_pcie_ep_linkdown(&pci->ep);
} else if (FIELD_GET(PARF_INT_ALL_BME, status)) {
- dev_dbg(dev, "Received BME event. Link is enabled!\n");
+ dev_dbg(dev, "Received Bus Master Enable event\n");
pcie_ep->link_status = QCOM_PCIE_EP_LINK_ENABLED;
qcom_pcie_ep_icc_update(pcie_ep);
- pci_epc_bme_notify(pci->ep.epc);
+ pci_epc_bus_master_enable_notify(pci->ep.epc);
} else if (FIELD_GET(PARF_INT_ALL_PM_TURNOFF, status)) {
dev_dbg(dev, "Received PM Turn-off event! Entering L23\n");
val = readl_relaxed(pcie_ep->parf + PARF_PM_CTRL);
@@ -664,7 +699,8 @@ static irqreturn_t qcom_pcie_ep_global_irq_thread(int irq, void *data)
dw_pcie_ep_linkup(&pci->ep);
pcie_ep->link_status = QCOM_PCIE_EP_LINK_UP;
} else {
- dev_err(dev, "Received unknown event: %d\n", status);
+ dev_WARN_ONCE(dev, 1, "Received unknown event. INT_STATUS: 0x%08x\n",
+ status);
}
return IRQ_HANDLED;
@@ -695,8 +731,15 @@ static irqreturn_t qcom_pcie_ep_perst_irq_thread(int irq, void *data)
static int qcom_pcie_ep_enable_irq_resources(struct platform_device *pdev,
struct qcom_pcie_ep *pcie_ep)
{
+ struct device *dev = pcie_ep->pci.dev;
+ char *name;
int ret;
+ name = devm_kasprintf(dev, GFP_KERNEL, "qcom_pcie_ep_global_irq%d",
+ pcie_ep->pci.ep.epc->domain_nr);
+ if (!name)
+ return -ENOMEM;
+
pcie_ep->global_irq = platform_get_irq_byname(pdev, "global");
if (pcie_ep->global_irq < 0)
return pcie_ep->global_irq;
@@ -704,18 +747,23 @@ static int qcom_pcie_ep_enable_irq_resources(struct platform_device *pdev,
ret = devm_request_threaded_irq(&pdev->dev, pcie_ep->global_irq, NULL,
qcom_pcie_ep_global_irq_thread,
IRQF_ONESHOT,
- "global_irq", pcie_ep);
+ name, pcie_ep);
if (ret) {
dev_err(&pdev->dev, "Failed to request Global IRQ\n");
return ret;
}
+ name = devm_kasprintf(dev, GFP_KERNEL, "qcom_pcie_ep_perst_irq%d",
+ pcie_ep->pci.ep.epc->domain_nr);
+ if (!name)
+ return -ENOMEM;
+
pcie_ep->perst_irq = gpiod_to_irq(pcie_ep->reset);
irq_set_status_flags(pcie_ep->perst_irq, IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(&pdev->dev, pcie_ep->perst_irq, NULL,
qcom_pcie_ep_perst_irq_thread,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
- "perst_irq", pcie_ep);
+ name, pcie_ep);
if (ret) {
dev_err(&pdev->dev, "Failed to request PERST IRQ\n");
disable_irq(pcie_ep->global_irq);
@@ -774,10 +822,13 @@ static void qcom_pcie_ep_init_debugfs(struct qcom_pcie_ep *pcie_ep)
static const struct pci_epc_features qcom_pcie_epc_features = {
.linkup_notifier = true,
- .core_init_notifier = true,
.msi_capable = true,
.msix_capable = false,
.align = SZ_4K,
+ .bar[BAR_0] = { .only_64bit = true, },
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_2] = { .only_64bit = true, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
};
static const struct pci_epc_features *
@@ -816,27 +867,29 @@ static int qcom_pcie_ep_probe(struct platform_device *pdev)
pcie_ep->pci.ops = &pci_ops;
pcie_ep->pci.ep.ops = &pci_ep_ops;
pcie_ep->pci.edma.nr_irqs = 1;
+
+ pcie_ep->cfg = of_device_get_match_data(dev);
+ if (pcie_ep->cfg && pcie_ep->cfg->hdma_support) {
+ pcie_ep->pci.edma.ll_wr_cnt = 8;
+ pcie_ep->pci.edma.ll_rd_cnt = 8;
+ pcie_ep->pci.edma.mf = EDMA_MF_HDMA_NATIVE;
+ }
+
platform_set_drvdata(pdev, pcie_ep);
ret = qcom_pcie_ep_get_resources(pdev, pcie_ep);
if (ret)
return ret;
- ret = qcom_pcie_enable_resources(pcie_ep);
- if (ret) {
- dev_err(dev, "Failed to enable resources: %d\n", ret);
- return ret;
- }
-
ret = dw_pcie_ep_init(&pcie_ep->pci.ep);
if (ret) {
dev_err(dev, "Failed to initialize endpoint: %d\n", ret);
- goto err_disable_resources;
+ return ret;
}
ret = qcom_pcie_ep_enable_irq_resources(pdev, pcie_ep);
if (ret)
- goto err_disable_resources;
+ goto err_ep_deinit;
name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
if (!name) {
@@ -853,8 +906,8 @@ err_disable_irqs:
disable_irq(pcie_ep->global_irq);
disable_irq(pcie_ep->perst_irq);
-err_disable_resources:
- qcom_pcie_disable_resources(pcie_ep);
+err_ep_deinit:
+ dw_pcie_ep_deinit(&pcie_ep->pci.ep);
return ret;
}
@@ -874,16 +927,24 @@ static void qcom_pcie_ep_remove(struct platform_device *pdev)
qcom_pcie_disable_resources(pcie_ep);
}
+static const struct qcom_pcie_ep_cfg cfg_1_34_0 = {
+ .hdma_support = true,
+ .override_no_snoop = true,
+ .disable_mhi_ram_parity_check = true,
+};
+
static const struct of_device_id qcom_pcie_ep_match[] = {
+ { .compatible = "qcom,sa8775p-pcie-ep", .data = &cfg_1_34_0},
{ .compatible = "qcom,sdx55-pcie-ep", },
{ .compatible = "qcom,sm8450-pcie-ep", },
+ { .compatible = "qcom,sar2130p-pcie-ep", },
{ }
};
MODULE_DEVICE_TABLE(of, qcom_pcie_ep_match);
static struct platform_driver qcom_pcie_ep_driver = {
.probe = qcom_pcie_ep_probe,
- .remove_new = qcom_pcie_ep_remove,
+ .remove = qcom_pcie_ep_remove,
.driver = {
.name = "qcom-pcie-ep",
.of_match_table = qcom_pcie_ep_match,
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 14772edcf0d3..dc98ae63362d 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -18,10 +18,11 @@
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
+#include <linux/limits.h>
#include <linux/init.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/pci.h>
+#include <linux/pm_opp.h>
#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
#include <linux/phy/pcie.h>
@@ -30,9 +31,11 @@
#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/types.h>
+#include <linux/units.h>
#include "../../pci.h"
#include "pcie-designware.h"
+#include "pcie-qcom-common.h"
/* PARF registers */
#define PARF_SYS_CTRL 0x00
@@ -43,14 +46,24 @@
#define PARF_PHY_REFCLK 0x4c
#define PARF_CONFIG_BITS 0x50
#define PARF_DBI_BASE_ADDR 0x168
+#define PARF_SLV_ADDR_SPACE_SIZE 0x16c
#define PARF_MHI_CLOCK_RESET_CTRL 0x174
#define PARF_AXI_MSTR_WR_ADDR_HALT 0x178
#define PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1a8
#define PARF_Q2A_FLUSH 0x1ac
#define PARF_LTSSM 0x1b0
+#define PARF_INT_ALL_STATUS 0x224
+#define PARF_INT_ALL_CLEAR 0x228
+#define PARF_INT_ALL_MASK 0x22c
#define PARF_SID_OFFSET 0x234
#define PARF_BDF_TRANSLATE_CFG 0x24c
-#define PARF_SLV_ADDR_SPACE_SIZE 0x358
+#define PARF_DBI_BASE_ADDR_V2 0x350
+#define PARF_DBI_BASE_ADDR_V2_HI 0x354
+#define PARF_SLV_ADDR_SPACE_SIZE_V2 0x358
+#define PARF_SLV_ADDR_SPACE_SIZE_V2_HI 0x35c
+#define PARF_NO_SNOOP_OVERRIDE 0x3d4
+#define PARF_ATU_BASE_ADDR 0x634
+#define PARF_ATU_BASE_ADDR_HI 0x638
#define PARF_DEVICE_TYPE 0x1000
#define PARF_BDF_TO_SID_TABLE_N 0x2000
#define PARF_BDF_TO_SID_CFG 0x2c00
@@ -105,7 +118,7 @@
#define PHY_RX0_EQ(x) FIELD_PREP(GENMASK(26, 24), x)
/* PARF_SLV_ADDR_SPACE_SIZE register value */
-#define SLV_ADDR_SPACE_SZ 0x10000000
+#define SLV_ADDR_SPACE_SZ 0x80000000
/* PARF_MHI_CLOCK_RESET_CTRL register fields */
#define AHB_CLK_EN BIT(0)
@@ -118,6 +131,14 @@
/* PARF_LTSSM register fields */
#define LTSSM_EN BIT(8)
+/* PARF_INT_ALL_{STATUS/CLEAR/MASK} register fields */
+#define PARF_INT_ALL_LINK_UP BIT(13)
+#define PARF_INT_MSI_DEV_0_7 GENMASK(30, 23)
+
+/* PARF_NO_SNOOP_OVERRIDE register fields */
+#define WR_NO_SNOOP_OVERRIDE_EN BIT(1)
+#define RD_NO_SNOOP_OVERRIDE_EN BIT(3)
+
/* PARF_DEVICE_TYPE register fields */
#define DEVICE_TYPE_RC 0x4
@@ -154,58 +175,56 @@
#define QCOM_PCIE_LINK_SPEED_TO_BW(speed) \
Mbps_to_icc(PCIE_SPEED2MBS_ENC(pcie_link_speed[speed]))
-#define QCOM_PCIE_1_0_0_MAX_CLOCKS 4
struct qcom_pcie_resources_1_0_0 {
- struct clk_bulk_data clks[QCOM_PCIE_1_0_0_MAX_CLOCKS];
+ struct clk_bulk_data *clks;
+ int num_clks;
struct reset_control *core;
struct regulator *vdda;
};
-#define QCOM_PCIE_2_1_0_MAX_CLOCKS 5
#define QCOM_PCIE_2_1_0_MAX_RESETS 6
#define QCOM_PCIE_2_1_0_MAX_SUPPLY 3
struct qcom_pcie_resources_2_1_0 {
- struct clk_bulk_data clks[QCOM_PCIE_2_1_0_MAX_CLOCKS];
+ struct clk_bulk_data *clks;
+ int num_clks;
struct reset_control_bulk_data resets[QCOM_PCIE_2_1_0_MAX_RESETS];
int num_resets;
struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY];
};
-#define QCOM_PCIE_2_3_2_MAX_CLOCKS 4
#define QCOM_PCIE_2_3_2_MAX_SUPPLY 2
struct qcom_pcie_resources_2_3_2 {
- struct clk_bulk_data clks[QCOM_PCIE_2_3_2_MAX_CLOCKS];
+ struct clk_bulk_data *clks;
+ int num_clks;
struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY];
};
-#define QCOM_PCIE_2_3_3_MAX_CLOCKS 5
#define QCOM_PCIE_2_3_3_MAX_RESETS 7
struct qcom_pcie_resources_2_3_3 {
- struct clk_bulk_data clks[QCOM_PCIE_2_3_3_MAX_CLOCKS];
+ struct clk_bulk_data *clks;
+ int num_clks;
struct reset_control_bulk_data rst[QCOM_PCIE_2_3_3_MAX_RESETS];
};
-#define QCOM_PCIE_2_4_0_MAX_CLOCKS 4
#define QCOM_PCIE_2_4_0_MAX_RESETS 12
struct qcom_pcie_resources_2_4_0 {
- struct clk_bulk_data clks[QCOM_PCIE_2_4_0_MAX_CLOCKS];
+ struct clk_bulk_data *clks;
int num_clks;
struct reset_control_bulk_data resets[QCOM_PCIE_2_4_0_MAX_RESETS];
int num_resets;
};
-#define QCOM_PCIE_2_7_0_MAX_CLOCKS 15
#define QCOM_PCIE_2_7_0_MAX_SUPPLIES 2
struct qcom_pcie_resources_2_7_0 {
- struct clk_bulk_data clks[QCOM_PCIE_2_7_0_MAX_CLOCKS];
+ struct clk_bulk_data *clks;
int num_clks;
struct regulator_bulk_data supplies[QCOM_PCIE_2_7_0_MAX_SUPPLIES];
struct reset_control *rst;
};
-#define QCOM_PCIE_2_9_0_MAX_CLOCKS 5
struct qcom_pcie_resources_2_9_0 {
- struct clk_bulk_data clks[QCOM_PCIE_2_9_0_MAX_CLOCKS];
+ struct clk_bulk_data *clks;
+ int num_clks;
struct reset_control *rst;
};
@@ -231,8 +250,15 @@ struct qcom_pcie_ops {
int (*config_sid)(struct qcom_pcie *pcie);
};
+ /**
+ * struct qcom_pcie_cfg - Per SoC config struct
+ * @ops: qcom PCIe ops structure
+ * @override_no_snoop: Override NO_SNOOP attribute in TLP to enable cache
+ * snooping
+ */
struct qcom_pcie_cfg {
const struct qcom_pcie_ops *ops;
+ bool override_no_snoop;
bool no_l0s;
};
@@ -245,9 +271,11 @@ struct qcom_pcie {
struct phy *phy;
struct gpio_desc *reset;
struct icc_path *icc_mem;
+ struct icc_path *icc_cpu;
const struct qcom_pcie_cfg *cfg;
struct dentry *debugfs;
bool suspended;
+ bool use_pm_opp;
};
#define to_qcom_pcie(x) dev_get_drvdata((x)->dev)
@@ -270,6 +298,11 @@ static int qcom_pcie_start_link(struct dw_pcie *pci)
{
struct qcom_pcie *pcie = to_qcom_pcie(pci);
+ if (pcie_link_speed[pci->max_link_speed] == PCIE_SPEED_16_0GT) {
+ qcom_pcie_common_set_16gt_equalization(pci);
+ qcom_pcie_common_set_16gt_lane_margining(pci);
+ }
+
/* Enable Link Training state machine */
if (pcie->cfg->ops->ltssm_enable)
pcie->cfg->ops->ltssm_enable(pcie);
@@ -311,6 +344,50 @@ static void qcom_pcie_clear_hpc(struct dw_pcie *pci)
dw_pcie_dbi_ro_wr_dis(pci);
}
+static void qcom_pcie_configure_dbi_base(struct qcom_pcie *pcie)
+{
+ struct dw_pcie *pci = pcie->pci;
+
+ if (pci->dbi_phys_addr) {
+ /*
+ * PARF_DBI_BASE_ADDR register is in CPU domain and require to
+ * be programmed with CPU physical address.
+ */
+ writel(lower_32_bits(pci->dbi_phys_addr), pcie->parf +
+ PARF_DBI_BASE_ADDR);
+ writel(SLV_ADDR_SPACE_SZ, pcie->parf +
+ PARF_SLV_ADDR_SPACE_SIZE);
+ }
+}
+
+static void qcom_pcie_configure_dbi_atu_base(struct qcom_pcie *pcie)
+{
+ struct dw_pcie *pci = pcie->pci;
+
+ if (pci->dbi_phys_addr) {
+ /*
+ * PARF_DBI_BASE_ADDR_V2 and PARF_ATU_BASE_ADDR registers are
+ * in CPU domain and require to be programmed with CPU
+ * physical addresses.
+ */
+ writel(lower_32_bits(pci->dbi_phys_addr), pcie->parf +
+ PARF_DBI_BASE_ADDR_V2);
+ writel(upper_32_bits(pci->dbi_phys_addr), pcie->parf +
+ PARF_DBI_BASE_ADDR_V2_HI);
+
+ if (pci->atu_phys_addr) {
+ writel(lower_32_bits(pci->atu_phys_addr), pcie->parf +
+ PARF_ATU_BASE_ADDR);
+ writel(upper_32_bits(pci->atu_phys_addr), pcie->parf +
+ PARF_ATU_BASE_ADDR_HI);
+ }
+
+ writel(0x0, pcie->parf + PARF_SLV_ADDR_SPACE_SIZE_V2);
+ writel(SLV_ADDR_SPACE_SZ, pcie->parf +
+ PARF_SLV_ADDR_SPACE_SIZE_V2_HI);
+ }
+}
+
static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie)
{
u32 val;
@@ -337,21 +414,11 @@ static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
if (ret)
return ret;
- res->clks[0].id = "iface";
- res->clks[1].id = "core";
- res->clks[2].id = "phy";
- res->clks[3].id = "aux";
- res->clks[4].id = "ref";
-
- /* iface, core, phy are required */
- ret = devm_clk_bulk_get(dev, 3, res->clks);
- if (ret < 0)
- return ret;
-
- /* aux, ref are optional */
- ret = devm_clk_bulk_get_optional(dev, 2, res->clks + 3);
- if (ret < 0)
- return ret;
+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
+ if (res->num_clks < 0) {
+ dev_err(dev, "Failed to get clocks\n");
+ return res->num_clks;
+ }
res->resets[0].id = "pci";
res->resets[1].id = "axi";
@@ -373,7 +440,7 @@ static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
- clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
reset_control_bulk_assert(res->num_resets, res->resets);
writel(1, pcie->parf + PARF_PHY_CTRL);
@@ -425,7 +492,7 @@ static int qcom_pcie_post_init_2_1_0(struct qcom_pcie *pcie)
val &= ~PHY_TEST_PWR_DOWN;
writel(val, pcie->parf + PARF_PHY_CTRL);
- ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
+ ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
if (ret)
return ret;
@@ -476,20 +543,16 @@ static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie)
struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
- int ret;
res->vdda = devm_regulator_get(dev, "vdda");
if (IS_ERR(res->vdda))
return PTR_ERR(res->vdda);
- res->clks[0].id = "iface";
- res->clks[1].id = "aux";
- res->clks[2].id = "master_bus";
- res->clks[3].id = "slave_bus";
-
- ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
- if (ret < 0)
- return ret;
+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
+ if (res->num_clks < 0) {
+ dev_err(dev, "Failed to get clocks\n");
+ return res->num_clks;
+ }
res->core = devm_reset_control_get_exclusive(dev, "core");
return PTR_ERR_OR_ZERO(res->core);
@@ -500,7 +563,7 @@ static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie)
struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
reset_control_assert(res->core);
- clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
regulator_disable(res->vdda);
}
@@ -517,7 +580,7 @@ static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)
return ret;
}
- ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
+ ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
if (ret) {
dev_err(dev, "cannot prepare/enable clocks\n");
goto err_assert_reset;
@@ -532,7 +595,7 @@ static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)
return 0;
err_disable_clks:
- clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
err_assert_reset:
reset_control_assert(res->core);
@@ -541,8 +604,7 @@ err_assert_reset:
static int qcom_pcie_post_init_1_0_0(struct qcom_pcie *pcie)
{
- /* change DBI base address */
- writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
+ qcom_pcie_configure_dbi_base(pcie);
if (IS_ENABLED(CONFIG_PCI_MSI)) {
u32 val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT);
@@ -580,14 +642,11 @@ static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie)
if (ret)
return ret;
- res->clks[0].id = "aux";
- res->clks[1].id = "cfg";
- res->clks[2].id = "bus_master";
- res->clks[3].id = "bus_slave";
-
- ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
- if (ret < 0)
- return ret;
+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
+ if (res->num_clks < 0) {
+ dev_err(dev, "Failed to get clocks\n");
+ return res->num_clks;
+ }
return 0;
}
@@ -596,7 +655,7 @@ static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
- clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
}
@@ -613,7 +672,7 @@ static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)
return ret;
}
- ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
+ ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
if (ret) {
dev_err(dev, "cannot prepare/enable clocks\n");
regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
@@ -632,8 +691,7 @@ static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie)
val &= ~PHY_TEST_PWR_DOWN;
writel(val, pcie->parf + PARF_PHY_CTRL);
- /* change DBI base address */
- writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
+ qcom_pcie_configure_dbi_base(pcie);
/* MAC PHY_POWERDOWN MUX DISABLE */
val = readl(pcie->parf + PARF_SYS_CTRL);
@@ -661,17 +719,11 @@ static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019");
int ret;
- res->clks[0].id = "aux";
- res->clks[1].id = "master_bus";
- res->clks[2].id = "slave_bus";
- res->clks[3].id = "iface";
-
- /* qcom,pcie-ipq4019 is defined without "iface" */
- res->num_clks = is_ipq ? 3 : 4;
-
- ret = devm_clk_bulk_get(dev, res->num_clks, res->clks);
- if (ret < 0)
- return ret;
+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
+ if (res->num_clks < 0) {
+ dev_err(dev, "Failed to get clocks\n");
+ return res->num_clks;
+ }
res->resets[0].id = "axi_m";
res->resets[1].id = "axi_s";
@@ -742,15 +794,11 @@ static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie)
struct device *dev = pci->dev;
int ret;
- res->clks[0].id = "iface";
- res->clks[1].id = "axi_m";
- res->clks[2].id = "axi_s";
- res->clks[3].id = "ahb";
- res->clks[4].id = "aux";
-
- ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
- if (ret < 0)
- return ret;
+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
+ if (res->num_clks < 0) {
+ dev_err(dev, "Failed to get clocks\n");
+ return res->num_clks;
+ }
res->rst[0].id = "axi_m";
res->rst[1].id = "axi_s";
@@ -771,7 +819,7 @@ static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
- clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
}
static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
@@ -801,7 +849,7 @@ static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
*/
usleep_range(2000, 2500);
- ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
+ ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
if (ret) {
dev_err(dev, "cannot prepare/enable clocks\n");
goto err_assert_resets;
@@ -825,13 +873,11 @@ static int qcom_pcie_post_init_2_3_3(struct qcom_pcie *pcie)
u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
u32 val;
- writel(SLV_ADDR_SPACE_SZ, pcie->parf + PARF_SLV_ADDR_SPACE_SIZE);
-
val = readl(pcie->parf + PARF_PHY_CTRL);
val &= ~PHY_TEST_PWR_DOWN;
writel(val, pcie->parf + PARF_PHY_CTRL);
- writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
+ qcom_pcie_configure_dbi_atu_base(pcie);
writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS
| SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
@@ -862,8 +908,6 @@ static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
- unsigned int num_clks, num_opt_clks;
- unsigned int idx;
int ret;
res->rst = devm_reset_control_array_get_exclusive(dev);
@@ -877,36 +921,11 @@ static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
if (ret)
return ret;
- idx = 0;
- res->clks[idx++].id = "aux";
- res->clks[idx++].id = "cfg";
- res->clks[idx++].id = "bus_master";
- res->clks[idx++].id = "bus_slave";
- res->clks[idx++].id = "slave_q2a";
-
- num_clks = idx;
-
- ret = devm_clk_bulk_get(dev, num_clks, res->clks);
- if (ret < 0)
- return ret;
-
- res->clks[idx++].id = "tbu";
- res->clks[idx++].id = "ddrss_sf_tbu";
- res->clks[idx++].id = "aggre0";
- res->clks[idx++].id = "aggre1";
- res->clks[idx++].id = "noc_aggr";
- res->clks[idx++].id = "noc_aggr_4";
- res->clks[idx++].id = "noc_aggr_south_sf";
- res->clks[idx++].id = "cnoc_qx";
- res->clks[idx++].id = "sleep";
- res->clks[idx++].id = "cnoc_sf_axi";
-
- num_opt_clks = idx - num_clks;
- res->num_clks = idx;
-
- ret = devm_clk_bulk_get_optional(dev, num_opt_clks, res->clks + num_clks);
- if (ret < 0)
- return ret;
+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
+ if (res->num_clks < 0) {
+ dev_err(dev, "Failed to get clocks\n");
+ return res->num_clks;
+ }
return 0;
}
@@ -954,8 +973,7 @@ static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
val &= ~PHY_TEST_PWR_DOWN;
writel(val, pcie->parf + PARF_PHY_CTRL);
- /* change DBI base address */
- writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
+ qcom_pcie_configure_dbi_atu_base(pcie);
/* MAC PHY_POWERDOWN MUX DISABLE */
val = readl(pcie->parf + PARF_SYS_CTRL);
@@ -986,6 +1004,12 @@ err_disable_regulators:
static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)
{
+ const struct qcom_pcie_cfg *pcie_cfg = pcie->cfg;
+
+ if (pcie_cfg->override_no_snoop)
+ writel(WR_NO_SNOOP_OVERRIDE_EN | RD_NO_SNOOP_OVERRIDE_EN,
+ pcie->parf + PARF_NO_SNOOP_OVERRIDE);
+
qcom_pcie_clear_aspm_l0s(pcie->pci);
qcom_pcie_clear_hpc(pcie->pci);
@@ -1101,17 +1125,12 @@ static int qcom_pcie_get_resources_2_9_0(struct qcom_pcie *pcie)
struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
- int ret;
- res->clks[0].id = "iface";
- res->clks[1].id = "axi_m";
- res->clks[2].id = "axi_s";
- res->clks[3].id = "axi_bridge";
- res->clks[4].id = "rchng";
-
- ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
- if (ret < 0)
- return ret;
+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
+ if (res->num_clks < 0) {
+ dev_err(dev, "Failed to get clocks\n");
+ return res->num_clks;
+ }
res->rst = devm_reset_control_array_get_exclusive(dev);
if (IS_ERR(res->rst))
@@ -1124,7 +1143,7 @@ static void qcom_pcie_deinit_2_9_0(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
- clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
}
static int qcom_pcie_init_2_9_0(struct qcom_pcie *pcie)
@@ -1153,7 +1172,7 @@ static int qcom_pcie_init_2_9_0(struct qcom_pcie *pcie)
usleep_range(2000, 2500);
- return clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
+ return clk_bulk_prepare_enable(res->num_clks, res->clks);
}
static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie)
@@ -1163,14 +1182,11 @@ static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie)
u32 val;
int i;
- writel(SLV_ADDR_SPACE_SZ,
- pcie->parf + PARF_SLV_ADDR_SPACE_SIZE);
-
val = readl(pcie->parf + PARF_PHY_CTRL);
val &= ~PHY_TEST_PWR_DOWN;
writel(val, pcie->parf + PARF_PHY_CTRL);
- writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
+ qcom_pcie_configure_dbi_atu_base(pcie);
writel(DEVICE_TYPE_RC, pcie->parf + PARF_DEVICE_TYPE);
writel(BYPASS | MSTR_AXI_CLK_EN | AHB_CLK_EN,
@@ -1349,6 +1365,16 @@ static const struct qcom_pcie_ops ops_1_9_0 = {
.config_sid = qcom_pcie_config_sid_1_9_0,
};
+/* Qcom IP rev.: 1.21.0 Synopsys IP rev.: 5.60a */
+static const struct qcom_pcie_ops ops_1_21_0 = {
+ .get_resources = qcom_pcie_get_resources_2_7_0,
+ .init = qcom_pcie_init_2_7_0,
+ .post_init = qcom_pcie_post_init_2_7_0,
+ .host_post_init = qcom_pcie_host_post_init_2_7_0,
+ .deinit = qcom_pcie_deinit_2_7_0,
+ .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
+};
+
/* Qcom IP rev.: 2.9.0 Synopsys IP rev.: 5.00a */
static const struct qcom_pcie_ops ops_2_9_0 = {
.get_resources = qcom_pcie_get_resources_2_9_0,
@@ -1366,6 +1392,11 @@ static const struct qcom_pcie_cfg cfg_1_9_0 = {
.ops = &ops_1_9_0,
};
+static const struct qcom_pcie_cfg cfg_1_34_0 = {
+ .ops = &ops_1_9_0,
+ .override_no_snoop = true,
+};
+
static const struct qcom_pcie_cfg cfg_2_1_0 = {
.ops = &ops_2_1_0,
};
@@ -1391,7 +1422,7 @@ static const struct qcom_pcie_cfg cfg_2_9_0 = {
};
static const struct qcom_pcie_cfg cfg_sc8280xp = {
- .ops = &ops_1_9_0,
+ .ops = &ops_1_21_0,
.no_l0s = true,
};
@@ -1409,6 +1440,9 @@ static int qcom_pcie_icc_init(struct qcom_pcie *pcie)
if (IS_ERR(pcie->icc_mem))
return PTR_ERR(pcie->icc_mem);
+ pcie->icc_cpu = devm_of_icc_get(pci->dev, "cpu-pcie");
+ if (IS_ERR(pcie->icc_cpu))
+ return PTR_ERR(pcie->icc_cpu);
/*
* Some Qualcomm platforms require interconnect bandwidth constraints
* to be set before enabling interconnect clocks.
@@ -1418,23 +1452,35 @@ static int qcom_pcie_icc_init(struct qcom_pcie *pcie)
*/
ret = icc_set_bw(pcie->icc_mem, 0, QCOM_PCIE_LINK_SPEED_TO_BW(1));
if (ret) {
- dev_err(pci->dev, "failed to set interconnect bandwidth: %d\n",
+ dev_err(pci->dev, "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n",
+ ret);
+ return ret;
+ }
+
+ /*
+ * Since the CPU-PCIe path is only used for activities like register
+ * access of the host controller and endpoint Config/BAR space access,
+ * HW team has recommended to use a minimal bandwidth of 1KBps just to
+ * keep the path active.
+ */
+ ret = icc_set_bw(pcie->icc_cpu, 0, kBps_to_icc(1));
+ if (ret) {
+ dev_err(pci->dev, "Failed to set bandwidth for CPU-PCIe interconnect path: %d\n",
ret);
+ icc_set_bw(pcie->icc_mem, 0, 0);
return ret;
}
return 0;
}
-static void qcom_pcie_icc_update(struct qcom_pcie *pcie)
+static void qcom_pcie_icc_opp_update(struct qcom_pcie *pcie)
{
+ u32 offset, status, width, speed;
struct dw_pcie *pci = pcie->pci;
- u32 offset, status;
- int speed, width;
- int ret;
-
- if (!pcie->icc_mem)
- return;
+ unsigned long freq_kbps;
+ struct dev_pm_opp *opp;
+ int ret, freq_mbps;
offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
status = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA);
@@ -1446,10 +1492,28 @@ static void qcom_pcie_icc_update(struct qcom_pcie *pcie)
speed = FIELD_GET(PCI_EXP_LNKSTA_CLS, status);
width = FIELD_GET(PCI_EXP_LNKSTA_NLW, status);
- ret = icc_set_bw(pcie->icc_mem, 0, width * QCOM_PCIE_LINK_SPEED_TO_BW(speed));
- if (ret) {
- dev_err(pci->dev, "failed to set interconnect bandwidth: %d\n",
- ret);
+ if (pcie->icc_mem) {
+ ret = icc_set_bw(pcie->icc_mem, 0,
+ width * QCOM_PCIE_LINK_SPEED_TO_BW(speed));
+ if (ret) {
+ dev_err(pci->dev, "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n",
+ ret);
+ }
+ } else if (pcie->use_pm_opp) {
+ freq_mbps = pcie_dev_speed_mbps(pcie_link_speed[speed]);
+ if (freq_mbps < 0)
+ return;
+
+ freq_kbps = freq_mbps * KILO;
+ opp = dev_pm_opp_find_freq_exact(pci->dev, freq_kbps * width,
+ true);
+ if (!IS_ERR(opp)) {
+ ret = dev_pm_opp_set_opp(pci->dev, opp);
+ if (ret)
+ dev_err(pci->dev, "Failed to set OPP for freq (%lu): %d\n",
+ freq_kbps * width, ret);
+ dev_pm_opp_put(opp);
+ }
}
}
@@ -1490,15 +1554,43 @@ static void qcom_pcie_init_debugfs(struct qcom_pcie *pcie)
qcom_pcie_link_transition_count);
}
+static irqreturn_t qcom_pcie_global_irq_thread(int irq, void *data)
+{
+ struct qcom_pcie *pcie = data;
+ struct dw_pcie_rp *pp = &pcie->pci->pp;
+ struct device *dev = pcie->pci->dev;
+ u32 status = readl_relaxed(pcie->parf + PARF_INT_ALL_STATUS);
+
+ writel_relaxed(status, pcie->parf + PARF_INT_ALL_CLEAR);
+
+ if (FIELD_GET(PARF_INT_ALL_LINK_UP, status)) {
+ dev_dbg(dev, "Received Link up event. Starting enumeration!\n");
+ /* Rescan the bus to enumerate endpoint devices */
+ pci_lock_rescan_remove();
+ pci_rescan_bus(pp->bridge->bus);
+ pci_unlock_rescan_remove();
+
+ qcom_pcie_icc_opp_update(pcie);
+ } else {
+ dev_WARN_ONCE(dev, 1, "Received unknown event. INT_STATUS: 0x%08x\n",
+ status);
+ }
+
+ return IRQ_HANDLED;
+}
+
static int qcom_pcie_probe(struct platform_device *pdev)
{
const struct qcom_pcie_cfg *pcie_cfg;
+ unsigned long max_freq = ULONG_MAX;
struct device *dev = &pdev->dev;
+ struct dev_pm_opp *opp;
struct qcom_pcie *pcie;
struct dw_pcie_rp *pp;
struct resource *res;
struct dw_pcie *pci;
- int ret;
+ int ret, irq;
+ char *name;
pcie_cfg = of_device_get_match_data(dev);
if (!pcie_cfg || !pcie_cfg->ops) {
@@ -1561,9 +1653,45 @@ static int qcom_pcie_probe(struct platform_device *pdev)
goto err_pm_runtime_put;
}
- ret = qcom_pcie_icc_init(pcie);
- if (ret)
+ /* OPP table is optional */
+ ret = devm_pm_opp_of_add_table(dev);
+ if (ret && ret != -ENODEV) {
+ dev_err_probe(dev, ret, "Failed to add OPP table\n");
goto err_pm_runtime_put;
+ }
+
+ /*
+ * Before the PCIe link is initialized, vote for highest OPP in the OPP
+ * table, so that we are voting for maximum voltage corner for the
+ * link to come up in maximum supported speed. At the end of the
+ * probe(), OPP will be updated using qcom_pcie_icc_opp_update().
+ */
+ if (!ret) {
+ opp = dev_pm_opp_find_freq_floor(dev, &max_freq);
+ if (IS_ERR(opp)) {
+ ret = PTR_ERR(opp);
+ dev_err_probe(pci->dev, ret,
+ "Unable to find max freq OPP\n");
+ goto err_pm_runtime_put;
+ } else {
+ ret = dev_pm_opp_set_opp(dev, opp);
+ }
+
+ dev_pm_opp_put(opp);
+ if (ret) {
+ dev_err_probe(pci->dev, ret,
+ "Failed to set OPP for freq %lu\n",
+ max_freq);
+ goto err_pm_runtime_put;
+ }
+
+ pcie->use_pm_opp = true;
+ } else {
+ /* Skip ICC init if OPP is supported as it is handled by OPP */
+ ret = qcom_pcie_icc_init(pcie);
+ if (ret)
+ goto err_pm_runtime_put;
+ }
ret = pcie->cfg->ops->get_resources(pcie);
if (ret)
@@ -1577,19 +1705,46 @@ static int qcom_pcie_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pcie);
+ irq = platform_get_irq_byname_optional(pdev, "global");
+ if (irq > 0)
+ pp->use_linkup_irq = true;
+
ret = dw_pcie_host_init(pp);
if (ret) {
dev_err(dev, "cannot initialize host\n");
goto err_phy_exit;
}
- qcom_pcie_icc_update(pcie);
+ name = devm_kasprintf(dev, GFP_KERNEL, "qcom_pcie_global_irq%d",
+ pci_domain_nr(pp->bridge->bus));
+ if (!name) {
+ ret = -ENOMEM;
+ goto err_host_deinit;
+ }
+
+ if (irq > 0) {
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ qcom_pcie_global_irq_thread,
+ IRQF_ONESHOT, name, pcie);
+ if (ret) {
+ dev_err_probe(&pdev->dev, ret,
+ "Failed to request Global IRQ\n");
+ goto err_host_deinit;
+ }
+
+ writel_relaxed(PARF_INT_ALL_LINK_UP | PARF_INT_MSI_DEV_0_7,
+ pcie->parf + PARF_INT_ALL_MASK);
+ }
+
+ qcom_pcie_icc_opp_update(pcie);
if (pcie->mhi)
qcom_pcie_init_debugfs(pcie);
return 0;
+err_host_deinit:
+ dw_pcie_host_deinit(pp);
err_phy_exit:
phy_exit(pcie->phy);
err_pm_runtime_put:
@@ -1602,16 +1757,20 @@ err_pm_runtime_put:
static int qcom_pcie_suspend_noirq(struct device *dev)
{
struct qcom_pcie *pcie = dev_get_drvdata(dev);
- int ret;
+ int ret = 0;
/*
* Set minimum bandwidth required to keep data path functional during
* suspend.
*/
- ret = icc_set_bw(pcie->icc_mem, 0, kBps_to_icc(1));
- if (ret) {
- dev_err(dev, "Failed to set interconnect bandwidth: %d\n", ret);
- return ret;
+ if (pcie->icc_mem) {
+ ret = icc_set_bw(pcie->icc_mem, 0, kBps_to_icc(1));
+ if (ret) {
+ dev_err(dev,
+ "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n",
+ ret);
+ return ret;
+ }
}
/*
@@ -1634,7 +1793,21 @@ static int qcom_pcie_suspend_noirq(struct device *dev)
pcie->suspended = true;
}
- return 0;
+ /*
+ * Only disable CPU-PCIe interconnect path if the suspend is non-S2RAM.
+ * Because on some platforms, DBI access can happen very late during the
+ * S2RAM and a non-active CPU-PCIe interconnect path may lead to NoC
+ * error.
+ */
+ if (pm_suspend_target_state != PM_SUSPEND_MEM) {
+ ret = icc_disable(pcie->icc_cpu);
+ if (ret)
+ dev_err(dev, "Failed to disable CPU-PCIe interconnect path: %d\n", ret);
+
+ if (pcie->use_pm_opp)
+ dev_pm_opp_set_opp(pcie->pci->dev, NULL);
+ }
+ return ret;
}
static int qcom_pcie_resume_noirq(struct device *dev)
@@ -1642,6 +1815,14 @@ static int qcom_pcie_resume_noirq(struct device *dev)
struct qcom_pcie *pcie = dev_get_drvdata(dev);
int ret;
+ if (pm_suspend_target_state != PM_SUSPEND_MEM) {
+ ret = icc_enable(pcie->icc_cpu);
+ if (ret) {
+ dev_err(dev, "Failed to enable CPU-PCIe interconnect path: %d\n", ret);
+ return ret;
+ }
+ }
+
if (pcie->suspended) {
ret = qcom_pcie_host_init(&pcie->pci->pp);
if (ret)
@@ -1650,7 +1831,7 @@ static int qcom_pcie_resume_noirq(struct device *dev)
pcie->suspended = false;
}
- qcom_pcie_icc_update(pcie);
+ qcom_pcie_icc_opp_update(pcie);
return 0;
}
@@ -1664,10 +1845,11 @@ static const struct of_device_id qcom_pcie_match[] = {
{ .compatible = "qcom,pcie-ipq8064-v2", .data = &cfg_2_1_0 },
{ .compatible = "qcom,pcie-ipq8074", .data = &cfg_2_3_3 },
{ .compatible = "qcom,pcie-ipq8074-gen3", .data = &cfg_2_9_0 },
+ { .compatible = "qcom,pcie-ipq9574", .data = &cfg_2_9_0 },
{ .compatible = "qcom,pcie-msm8996", .data = &cfg_2_3_2 },
{ .compatible = "qcom,pcie-qcs404", .data = &cfg_2_4_0 },
{ .compatible = "qcom,pcie-sa8540p", .data = &cfg_sc8280xp },
- { .compatible = "qcom,pcie-sa8775p", .data = &cfg_1_9_0},
+ { .compatible = "qcom,pcie-sa8775p", .data = &cfg_1_34_0},
{ .compatible = "qcom,pcie-sc7280", .data = &cfg_1_9_0 },
{ .compatible = "qcom,pcie-sc8180x", .data = &cfg_1_9_0 },
{ .compatible = "qcom,pcie-sc8280xp", .data = &cfg_sc8280xp },
@@ -1679,7 +1861,7 @@ static const struct of_device_id qcom_pcie_match[] = {
{ .compatible = "qcom,pcie-sm8450-pcie0", .data = &cfg_1_9_0 },
{ .compatible = "qcom,pcie-sm8450-pcie1", .data = &cfg_1_9_0 },
{ .compatible = "qcom,pcie-sm8550", .data = &cfg_1_9_0 },
- { .compatible = "qcom,pcie-x1e80100", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-x1e80100", .data = &cfg_sc8280xp },
{ }
};
diff --git a/drivers/pci/controller/dwc/pcie-rcar-gen4.c b/drivers/pci/controller/dwc/pcie-rcar-gen4.c
index 0be760ed420b..fc872dd35029 100644
--- a/drivers/pci/controller/dwc/pcie-rcar-gen4.c
+++ b/drivers/pci/controller/dwc/pcie-rcar-gen4.c
@@ -2,11 +2,17 @@
/*
* PCIe controller driver for Renesas R-Car Gen4 Series SoCs
* Copyright (C) 2022-2023 Renesas Electronics Corporation
+ *
+ * The r8a779g0 (R-Car V4H) controller requires a specific firmware to be
+ * provided, to initialize the PHY. Otherwise, the PCIe controller will not
+ * work.
*/
#include <linux/delay.h>
+#include <linux/firmware.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/pci.h>
@@ -20,9 +26,10 @@
/* Renesas-specific */
/* PCIe Mode Setting Register 0 */
#define PCIEMSR0 0x0000
-#define BIFUR_MOD_SET_ON BIT(0)
+#define APP_SRIS_MODE BIT(6)
#define DEVICE_TYPE_EP 0
#define DEVICE_TYPE_RC BIT(4)
+#define BIFUR_MOD_SET_ON BIT(0)
/* PCIe Interrupt Status 0 */
#define PCIEINTSTS0 0x0084
@@ -37,47 +44,49 @@
#define PCIEDMAINTSTSEN 0x0314
#define PCIEDMAINTSTSEN_INIT GENMASK(15, 0)
+/* Port Logic Registers 89 */
+#define PRTLGC89 0x0b70
+
+/* Port Logic Registers 90 */
+#define PRTLGC90 0x0b74
+
/* PCIe Reset Control Register 1 */
#define PCIERSTCTRL1 0x0014
#define APP_HOLD_PHY_RST BIT(16)
#define APP_LTSSM_ENABLE BIT(0)
+/* PCIe Power Management Control */
+#define PCIEPWRMNGCTRL 0x0070
+#define APP_CLK_REQ_N BIT(11)
+#define APP_CLK_PM_EN BIT(10)
+
#define RCAR_NUM_SPEED_CHANGE_RETRIES 10
#define RCAR_MAX_LINK_SPEED 4
#define RCAR_GEN4_PCIE_EP_FUNC_DBI_OFFSET 0x1000
#define RCAR_GEN4_PCIE_EP_FUNC_DBI2_OFFSET 0x800
+#define RCAR_GEN4_PCIE_FIRMWARE_NAME "rcar_gen4_pcie.bin"
+#define RCAR_GEN4_PCIE_FIRMWARE_BASE_ADDR 0xc000
+MODULE_FIRMWARE(RCAR_GEN4_PCIE_FIRMWARE_NAME);
+
+struct rcar_gen4_pcie;
+struct rcar_gen4_pcie_drvdata {
+ void (*additional_common_init)(struct rcar_gen4_pcie *rcar);
+ int (*ltssm_control)(struct rcar_gen4_pcie *rcar, bool enable);
+ enum dw_pcie_device_mode mode;
+};
+
struct rcar_gen4_pcie {
struct dw_pcie dw;
void __iomem *base;
+ void __iomem *phy_base;
struct platform_device *pdev;
- enum dw_pcie_device_mode mode;
+ const struct rcar_gen4_pcie_drvdata *drvdata;
};
#define to_rcar_gen4_pcie(_dw) container_of(_dw, struct rcar_gen4_pcie, dw)
/* Common */
-static void rcar_gen4_pcie_ltssm_enable(struct rcar_gen4_pcie *rcar,
- bool enable)
-{
- u32 val;
-
- val = readl(rcar->base + PCIERSTCTRL1);
- if (enable) {
- val |= APP_LTSSM_ENABLE;
- val &= ~APP_HOLD_PHY_RST;
- } else {
- /*
- * Since the datasheet of R-Car doesn't mention how to assert
- * the APP_HOLD_PHY_RST, don't assert it again. Otherwise,
- * hang-up issue happened in the dw_edma_core_off() when
- * the controller didn't detect a PCI device.
- */
- val &= ~APP_LTSSM_ENABLE;
- }
- writel(val, rcar->base + PCIERSTCTRL1);
-}
-
static int rcar_gen4_pcie_link_up(struct dw_pcie *dw)
{
struct rcar_gen4_pcie *rcar = to_rcar_gen4_pcie(dw);
@@ -123,21 +132,25 @@ static int rcar_gen4_pcie_speed_change(struct dw_pcie *dw)
static int rcar_gen4_pcie_start_link(struct dw_pcie *dw)
{
struct rcar_gen4_pcie *rcar = to_rcar_gen4_pcie(dw);
- int i, changes;
+ int i, changes, ret;
- rcar_gen4_pcie_ltssm_enable(rcar, true);
+ if (rcar->drvdata->ltssm_control) {
+ ret = rcar->drvdata->ltssm_control(rcar, true);
+ if (ret)
+ return ret;
+ }
/*
- * Require direct speed change with retrying here if the link_gen is
- * PCIe Gen2 or higher.
+ * Require direct speed change with retrying here if the max_link_speed
+ * is PCIe Gen2 or higher.
*/
- changes = min_not_zero(dw->link_gen, RCAR_MAX_LINK_SPEED) - 1;
+ changes = min_not_zero(dw->max_link_speed, RCAR_MAX_LINK_SPEED) - 1;
/*
* Since dw_pcie_setup_rc() sets it once, PCIe Gen2 will be trained.
* So, this needs remaining times for up to PCIe Gen4 if RC mode.
*/
- if (changes && rcar->mode == DW_PCIE_RC_TYPE)
+ if (changes && rcar->drvdata->mode == DW_PCIE_RC_TYPE)
changes--;
for (i = 0; i < changes; i++) {
@@ -153,7 +166,8 @@ static void rcar_gen4_pcie_stop_link(struct dw_pcie *dw)
{
struct rcar_gen4_pcie *rcar = to_rcar_gen4_pcie(dw);
- rcar_gen4_pcie_ltssm_enable(rcar, false);
+ if (rcar->drvdata->ltssm_control)
+ rcar->drvdata->ltssm_control(rcar, false);
}
static int rcar_gen4_pcie_common_init(struct rcar_gen4_pcie *rcar)
@@ -172,9 +186,9 @@ static int rcar_gen4_pcie_common_init(struct rcar_gen4_pcie *rcar)
reset_control_assert(dw->core_rsts[DW_PCIE_PWR_RST].rstc);
val = readl(rcar->base + PCIEMSR0);
- if (rcar->mode == DW_PCIE_RC_TYPE) {
+ if (rcar->drvdata->mode == DW_PCIE_RC_TYPE) {
val |= DEVICE_TYPE_RC;
- } else if (rcar->mode == DW_PCIE_EP_TYPE) {
+ } else if (rcar->drvdata->mode == DW_PCIE_EP_TYPE) {
val |= DEVICE_TYPE_EP;
} else {
ret = -EINVAL;
@@ -190,6 +204,9 @@ static int rcar_gen4_pcie_common_init(struct rcar_gen4_pcie *rcar)
if (ret)
goto err_unprepare;
+ if (rcar->drvdata->additional_common_init)
+ rcar->drvdata->additional_common_init(rcar);
+
return 0;
err_unprepare:
@@ -231,6 +248,10 @@ static void rcar_gen4_pcie_unprepare(struct rcar_gen4_pcie *rcar)
static int rcar_gen4_pcie_get_resources(struct rcar_gen4_pcie *rcar)
{
+ rcar->phy_base = devm_platform_ioremap_resource_byname(rcar->pdev, "phy");
+ if (IS_ERR(rcar->phy_base))
+ return PTR_ERR(rcar->phy_base);
+
/* Renesas-specific registers */
rcar->base = devm_platform_ioremap_resource_byname(rcar->pdev, "app");
@@ -255,7 +276,7 @@ static struct rcar_gen4_pcie *rcar_gen4_pcie_alloc(struct platform_device *pdev)
rcar->dw.ops = &dw_pcie_ops;
rcar->dw.dev = dev;
rcar->pdev = pdev;
- dw_pcie_cap_set(&rcar->dw, EDMA_UNROLL);
+ rcar->dw.edma.mf = EDMA_MF_EDMA_UNROLL;
dw_pcie_cap_set(&rcar->dw, REQ_RES);
platform_set_drvdata(pdev, rcar);
@@ -352,11 +373,8 @@ static void rcar_gen4_pcie_ep_init(struct dw_pcie_ep *ep)
dw_pcie_ep_reset_bar(pci, bar);
}
-static void rcar_gen4_pcie_ep_deinit(struct dw_pcie_ep *ep)
+static void rcar_gen4_pcie_ep_deinit(struct rcar_gen4_pcie *rcar)
{
- struct dw_pcie *dw = to_dw_pcie_from_ep(ep);
- struct rcar_gen4_pcie *rcar = to_rcar_gen4_pcie(dw);
-
writel(0, rcar->base + PCIEDMAINTSTSEN);
rcar_gen4_pcie_common_deinit(rcar);
}
@@ -410,7 +428,6 @@ static unsigned int rcar_gen4_pcie_ep_get_dbi2_offset(struct dw_pcie_ep *ep,
static const struct dw_pcie_ep_ops pcie_ep_ops = {
.pre_init = rcar_gen4_pcie_ep_pre_init,
.init = rcar_gen4_pcie_ep_init,
- .deinit = rcar_gen4_pcie_ep_deinit,
.raise_irq = rcar_gen4_pcie_ep_raise_irq,
.get_features = rcar_gen4_pcie_ep_get_features,
.get_dbi_offset = rcar_gen4_pcie_ep_get_dbi_offset,
@@ -420,26 +437,46 @@ static const struct dw_pcie_ep_ops pcie_ep_ops = {
static int rcar_gen4_add_dw_pcie_ep(struct rcar_gen4_pcie *rcar)
{
struct dw_pcie_ep *ep = &rcar->dw.ep;
+ struct device *dev = rcar->dw.dev;
+ int ret;
if (!IS_ENABLED(CONFIG_PCIE_RCAR_GEN4_EP))
return -ENODEV;
ep->ops = &pcie_ep_ops;
- return dw_pcie_ep_init(ep);
+ ret = dw_pcie_ep_init(ep);
+ if (ret) {
+ rcar_gen4_pcie_ep_deinit(rcar);
+ return ret;
+ }
+
+ ret = dw_pcie_ep_init_registers(ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(ep);
+ rcar_gen4_pcie_ep_deinit(rcar);
+ }
+
+ pci_epc_init_notify(ep->epc);
+
+ return ret;
}
static void rcar_gen4_remove_dw_pcie_ep(struct rcar_gen4_pcie *rcar)
{
- dw_pcie_ep_exit(&rcar->dw.ep);
+ dw_pcie_ep_deinit(&rcar->dw.ep);
+ rcar_gen4_pcie_ep_deinit(rcar);
}
/* Common */
static int rcar_gen4_add_dw_pcie(struct rcar_gen4_pcie *rcar)
{
- rcar->mode = (uintptr_t)of_device_get_match_data(&rcar->pdev->dev);
+ rcar->drvdata = of_device_get_match_data(&rcar->pdev->dev);
+ if (!rcar->drvdata)
+ return -EINVAL;
- switch (rcar->mode) {
+ switch (rcar->drvdata->mode) {
case DW_PCIE_RC_TYPE:
return rcar_gen4_add_dw_pcie_rp(rcar);
case DW_PCIE_EP_TYPE:
@@ -480,7 +517,7 @@ err_unprepare:
static void rcar_gen4_remove_dw_pcie(struct rcar_gen4_pcie *rcar)
{
- switch (rcar->mode) {
+ switch (rcar->drvdata->mode) {
case DW_PCIE_RC_TYPE:
rcar_gen4_remove_dw_pcie_rp(rcar);
break;
@@ -500,14 +537,232 @@ static void rcar_gen4_pcie_remove(struct platform_device *pdev)
rcar_gen4_pcie_unprepare(rcar);
}
+static int r8a779f0_pcie_ltssm_control(struct rcar_gen4_pcie *rcar, bool enable)
+{
+ u32 val;
+
+ val = readl(rcar->base + PCIERSTCTRL1);
+ if (enable) {
+ val |= APP_LTSSM_ENABLE;
+ val &= ~APP_HOLD_PHY_RST;
+ } else {
+ /*
+ * Since the datasheet of R-Car doesn't mention how to assert
+ * the APP_HOLD_PHY_RST, don't assert it again. Otherwise,
+ * hang-up issue happened in the dw_edma_core_off() when
+ * the controller didn't detect a PCI device.
+ */
+ val &= ~APP_LTSSM_ENABLE;
+ }
+ writel(val, rcar->base + PCIERSTCTRL1);
+
+ return 0;
+}
+
+static void rcar_gen4_pcie_additional_common_init(struct rcar_gen4_pcie *rcar)
+{
+ struct dw_pcie *dw = &rcar->dw;
+ u32 val;
+
+ val = dw_pcie_readl_dbi(dw, PCIE_PORT_LANE_SKEW);
+ val &= ~PORT_LANE_SKEW_INSERT_MASK;
+ if (dw->num_lanes < 4)
+ val |= BIT(6);
+ dw_pcie_writel_dbi(dw, PCIE_PORT_LANE_SKEW, val);
+
+ val = readl(rcar->base + PCIEPWRMNGCTRL);
+ val |= APP_CLK_REQ_N | APP_CLK_PM_EN;
+ writel(val, rcar->base + PCIEPWRMNGCTRL);
+}
+
+static void rcar_gen4_pcie_phy_reg_update_bits(struct rcar_gen4_pcie *rcar,
+ u32 offset, u32 mask, u32 val)
+{
+ u32 tmp;
+
+ tmp = readl(rcar->phy_base + offset);
+ tmp &= ~mask;
+ tmp |= val;
+ writel(tmp, rcar->phy_base + offset);
+}
+
+/*
+ * SoC datasheet suggests checking port logic register bits during firmware
+ * write. If read returns non-zero value, then this function returns -EAGAIN
+ * indicating that the write needs to be done again. If read returns zero,
+ * then return 0 to indicate success.
+ */
+static int rcar_gen4_pcie_reg_test_bit(struct rcar_gen4_pcie *rcar,
+ u32 offset, u32 mask)
+{
+ struct dw_pcie *dw = &rcar->dw;
+
+ if (dw_pcie_readl_dbi(dw, offset) & mask)
+ return -EAGAIN;
+
+ return 0;
+}
+
+static int rcar_gen4_pcie_download_phy_firmware(struct rcar_gen4_pcie *rcar)
+{
+ /* The check_addr values are magical numbers in the datasheet */
+ static const u32 check_addr[] = {
+ 0x00101018,
+ 0x00101118,
+ 0x00101021,
+ 0x00101121,
+ };
+ struct dw_pcie *dw = &rcar->dw;
+ const struct firmware *fw;
+ unsigned int i, timeout;
+ u32 data;
+ int ret;
+
+ ret = request_firmware(&fw, RCAR_GEN4_PCIE_FIRMWARE_NAME, dw->dev);
+ if (ret) {
+ dev_err(dw->dev, "Failed to load firmware (%s): %d\n",
+ RCAR_GEN4_PCIE_FIRMWARE_NAME, ret);
+ return ret;
+ }
+
+ for (i = 0; i < (fw->size / 2); i++) {
+ data = fw->data[(i * 2) + 1] << 8 | fw->data[i * 2];
+ timeout = 100;
+ do {
+ dw_pcie_writel_dbi(dw, PRTLGC89, RCAR_GEN4_PCIE_FIRMWARE_BASE_ADDR + i);
+ dw_pcie_writel_dbi(dw, PRTLGC90, data);
+ if (!rcar_gen4_pcie_reg_test_bit(rcar, PRTLGC89, BIT(30)))
+ break;
+ if (!(--timeout)) {
+ ret = -ETIMEDOUT;
+ goto exit;
+ }
+ usleep_range(100, 200);
+ } while (1);
+ }
+
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x0f8, BIT(17), BIT(17));
+
+ for (i = 0; i < ARRAY_SIZE(check_addr); i++) {
+ timeout = 100;
+ do {
+ dw_pcie_writel_dbi(dw, PRTLGC89, check_addr[i]);
+ ret = rcar_gen4_pcie_reg_test_bit(rcar, PRTLGC89, BIT(30));
+ ret |= rcar_gen4_pcie_reg_test_bit(rcar, PRTLGC90, BIT(0));
+ if (!ret)
+ break;
+ if (!(--timeout)) {
+ ret = -ETIMEDOUT;
+ goto exit;
+ }
+ usleep_range(100, 200);
+ } while (1);
+ }
+
+exit:
+ release_firmware(fw);
+
+ return ret;
+}
+
+static int rcar_gen4_pcie_ltssm_control(struct rcar_gen4_pcie *rcar, bool enable)
+{
+ struct dw_pcie *dw = &rcar->dw;
+ u32 val;
+ int ret;
+
+ if (!enable) {
+ val = readl(rcar->base + PCIERSTCTRL1);
+ val &= ~APP_LTSSM_ENABLE;
+ writel(val, rcar->base + PCIERSTCTRL1);
+
+ return 0;
+ }
+
+ val = dw_pcie_readl_dbi(dw, PCIE_PORT_FORCE);
+ val |= PORT_FORCE_DO_DESKEW_FOR_SRIS;
+ dw_pcie_writel_dbi(dw, PCIE_PORT_FORCE, val);
+
+ val = readl(rcar->base + PCIEMSR0);
+ val |= APP_SRIS_MODE;
+ writel(val, rcar->base + PCIEMSR0);
+
+ /*
+ * The R-Car Gen4 datasheet doesn't describe the PHY registers' name.
+ * But, the initialization procedure describes these offsets. So,
+ * this driver has magical offset numbers.
+ */
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x700, BIT(28), 0);
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x700, BIT(20), 0);
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x700, BIT(12), 0);
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x700, BIT(4), 0);
+
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(23, 22), BIT(22));
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(18, 16), GENMASK(17, 16));
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(7, 6), BIT(6));
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(2, 0), GENMASK(11, 0));
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x1d4, GENMASK(16, 15), GENMASK(16, 15));
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x514, BIT(26), BIT(26));
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x0f8, BIT(16), 0);
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x0f8, BIT(19), BIT(19));
+
+ val = readl(rcar->base + PCIERSTCTRL1);
+ val &= ~APP_HOLD_PHY_RST;
+ writel(val, rcar->base + PCIERSTCTRL1);
+
+ ret = readl_poll_timeout(rcar->phy_base + 0x0f8, val, !(val & BIT(18)), 100, 10000);
+ if (ret < 0)
+ return ret;
+
+ ret = rcar_gen4_pcie_download_phy_firmware(rcar);
+ if (ret)
+ return ret;
+
+ val = readl(rcar->base + PCIERSTCTRL1);
+ val |= APP_LTSSM_ENABLE;
+ writel(val, rcar->base + PCIERSTCTRL1);
+
+ return 0;
+}
+
+static struct rcar_gen4_pcie_drvdata drvdata_r8a779f0_pcie = {
+ .ltssm_control = r8a779f0_pcie_ltssm_control,
+ .mode = DW_PCIE_RC_TYPE,
+};
+
+static struct rcar_gen4_pcie_drvdata drvdata_r8a779f0_pcie_ep = {
+ .ltssm_control = r8a779f0_pcie_ltssm_control,
+ .mode = DW_PCIE_EP_TYPE,
+};
+
+static struct rcar_gen4_pcie_drvdata drvdata_rcar_gen4_pcie = {
+ .additional_common_init = rcar_gen4_pcie_additional_common_init,
+ .ltssm_control = rcar_gen4_pcie_ltssm_control,
+ .mode = DW_PCIE_RC_TYPE,
+};
+
+static struct rcar_gen4_pcie_drvdata drvdata_rcar_gen4_pcie_ep = {
+ .additional_common_init = rcar_gen4_pcie_additional_common_init,
+ .ltssm_control = rcar_gen4_pcie_ltssm_control,
+ .mode = DW_PCIE_EP_TYPE,
+};
+
static const struct of_device_id rcar_gen4_pcie_of_match[] = {
{
+ .compatible = "renesas,r8a779f0-pcie",
+ .data = &drvdata_r8a779f0_pcie,
+ },
+ {
+ .compatible = "renesas,r8a779f0-pcie-ep",
+ .data = &drvdata_r8a779f0_pcie_ep,
+ },
+ {
.compatible = "renesas,rcar-gen4-pcie",
- .data = (void *)DW_PCIE_RC_TYPE,
+ .data = &drvdata_rcar_gen4_pcie,
},
{
.compatible = "renesas,rcar-gen4-pcie-ep",
- .data = (void *)DW_PCIE_EP_TYPE,
+ .data = &drvdata_rcar_gen4_pcie_ep,
},
{},
};
@@ -520,7 +775,7 @@ static struct platform_driver rcar_gen4_pcie_driver = {
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = rcar_gen4_pcie_probe,
- .remove_new = rcar_gen4_pcie_remove,
+ .remove = rcar_gen4_pcie_remove,
};
module_platform_driver(rcar_gen4_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-spear13xx.c b/drivers/pci/controller/dwc/pcie-spear13xx.c
index 201dced209f0..ff986ced56b2 100644
--- a/drivers/pci/controller/dwc/pcie-spear13xx.c
+++ b/drivers/pci/controller/dwc/pcie-spear13xx.c
@@ -233,7 +233,7 @@ static int spear13xx_pcie_probe(struct platform_device *pdev)
}
if (of_property_read_bool(np, "st,pcie-is-gen1"))
- pci->link_gen = 1;
+ pci->max_link_speed = 1;
platform_set_drvdata(pdev, spear13xx_pcie);
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index 1f7b662cb8e1..5103995cd6c7 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -13,7 +13,6 @@
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/interconnect.h>
#include <linux/interrupt.h>
@@ -21,7 +20,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
#include <linux/phy/phy.h>
@@ -179,17 +177,12 @@
#define N_FTS_VAL 52
#define FTS_VAL 52
-#define GEN3_EQ_CONTROL_OFF 0x8a8
-#define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT 8
-#define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK GENMASK(23, 8)
-#define GEN3_EQ_CONTROL_OFF_FB_MODE_MASK GENMASK(3, 0)
-
#define PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT 0x8D0
-#define AMBA_ERROR_RESPONSE_CRS_SHIFT 3
-#define AMBA_ERROR_RESPONSE_CRS_MASK GENMASK(1, 0)
-#define AMBA_ERROR_RESPONSE_CRS_OKAY 0
-#define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFFFFFF 1
-#define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 2
+#define AMBA_ERROR_RESPONSE_RRS_SHIFT 3
+#define AMBA_ERROR_RESPONSE_RRS_MASK GENMASK(1, 0)
+#define AMBA_ERROR_RESPONSE_RRS_OKAY 0
+#define AMBA_ERROR_RESPONSE_RRS_OKAY_FFFFFFFF 1
+#define AMBA_ERROR_RESPONSE_RRS_OKAY_FFFF0001 2
#define MSIX_ADDR_MATCH_LOW_OFF 0x940
#define MSIX_ADDR_MATCH_LOW_OFF_EN BIT(0)
@@ -308,10 +301,6 @@ static inline u32 appl_readl(struct tegra_pcie_dw *pcie, const u32 reg)
return readl_relaxed(pcie->appl_base + reg);
}
-struct tegra_pcie_soc {
- enum dw_pcie_device_mode mode;
-};
-
static void tegra_pcie_icc_set(struct tegra_pcie_dw *pcie)
{
struct dw_pcie *pci = &pcie->pci;
@@ -867,9 +856,9 @@ static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie)
dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
- val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK;
- val |= (0x3ff << GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT);
- val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK;
+ val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC;
+ val |= FIELD_PREP(GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC, 0x3ff);
+ val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE;
dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val);
val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
@@ -878,10 +867,10 @@ static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie)
dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
- val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK;
- val |= (pcie->of_data->gen4_preset_vec <<
- GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT);
- val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK;
+ val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC;
+ val |= FIELD_PREP(GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC,
+ pcie->of_data->gen4_preset_vec);
+ val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE;
dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val);
val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
@@ -913,11 +902,11 @@ static int tegra_pcie_dw_host_init(struct dw_pcie_rp *pp)
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
- /* Enable as 0xFFFF0001 response for CRS */
+ /* Enable as 0xFFFF0001 response for RRS */
val = dw_pcie_readl_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT);
- val &= ~(AMBA_ERROR_RESPONSE_CRS_MASK << AMBA_ERROR_RESPONSE_CRS_SHIFT);
- val |= (AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 <<
- AMBA_ERROR_RESPONSE_CRS_SHIFT);
+ val &= ~(AMBA_ERROR_RESPONSE_RRS_MASK << AMBA_ERROR_RESPONSE_RRS_SHIFT);
+ val |= (AMBA_ERROR_RESPONSE_RRS_OKAY_FFFF0001 <<
+ AMBA_ERROR_RESPONSE_RRS_SHIFT);
dw_pcie_writel_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT, val);
/* Clear Slot Clock Configuration bit if SRNS configuration */
@@ -1793,6 +1782,10 @@ static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
goto fail_phy;
}
+ /* Perform cleanup that requires refclk */
+ pci_epc_deinit_notify(pcie->pci.ep.epc);
+ dw_pcie_ep_cleanup(&pcie->pci.ep);
+
/* Clear any stale interrupt statuses */
appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
@@ -1895,13 +1888,13 @@ static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
val = (upper_32_bits(ep->msi_mem_phys) & MSIX_ADDR_MATCH_HIGH_OFF_MASK);
dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_HIGH_OFF, val);
- ret = dw_pcie_ep_init_complete(ep);
+ ret = dw_pcie_ep_init_registers(ep);
if (ret) {
dev_err(dev, "Failed to complete initialization: %d\n", ret);
goto fail_init_complete;
}
- dw_pcie_ep_init_notify(ep);
+ pci_epc_init_notify(ep->epc);
/* Program the private control to allow sending LTR upstream */
if (pcie->of_data->has_ltr_req_fix) {
@@ -2004,7 +1997,6 @@ static int tegra_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
static const struct pci_epc_features tegra_pcie_epc_features = {
.linkup_notifier = true,
- .core_init_notifier = true,
.msi_capable = false,
.msix_capable = false,
.bar[BAR_0] = { .type = BAR_FIXED, .fixed_size = SZ_1M,
@@ -2014,6 +2006,7 @@ static const struct pci_epc_features tegra_pcie_epc_features = {
.bar[BAR_3] = { .type = BAR_RESERVED, },
.bar[BAR_4] = { .type = BAR_RESERVED, },
.bar[BAR_5] = { .type = BAR_RESERVED, },
+ .align = SZ_64K,
};
static const struct pci_epc_features*
@@ -2273,11 +2266,14 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev)
ret = tegra_pcie_config_ep(pcie, pdev);
if (ret < 0)
goto fail;
+ else
+ return 0;
break;
default:
dev_err(dev, "Invalid PCIe device type %d\n",
pcie->of_data->mode);
+ ret = -EINVAL;
}
fail:
@@ -2498,7 +2494,7 @@ static const struct dev_pm_ops tegra_pcie_dw_pm_ops = {
static struct platform_driver tegra_pcie_dw_driver = {
.probe = tegra_pcie_dw_probe,
- .remove_new = tegra_pcie_dw_remove,
+ .remove = tegra_pcie_dw_remove,
.shutdown = tegra_pcie_dw_shutdown,
.driver = {
.name = "tegra194-pcie",
diff --git a/drivers/pci/controller/dwc/pcie-uniphier-ep.c b/drivers/pci/controller/dwc/pcie-uniphier-ep.c
index 639bc2e12476..d6e73811216e 100644
--- a/drivers/pci/controller/dwc/pcie-uniphier-ep.c
+++ b/drivers/pci/controller/dwc/pcie-uniphier-ep.c
@@ -399,7 +399,20 @@ static int uniphier_pcie_ep_probe(struct platform_device *pdev)
return ret;
priv->pci.ep.ops = &uniphier_pcie_ep_ops;
- return dw_pcie_ep_init(&priv->pci.ep);
+ ret = dw_pcie_ep_init(&priv->pci.ep);
+ if (ret)
+ return ret;
+
+ ret = dw_pcie_ep_init_registers(&priv->pci.ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(&priv->pci.ep);
+ return ret;
+ }
+
+ pci_epc_init_notify(priv->pci.ep.epc);
+
+ return 0;
}
static const struct uniphier_pcie_ep_soc_data uniphier_pro5_data = {
diff --git a/drivers/pci/controller/dwc/pcie-uniphier.c b/drivers/pci/controller/dwc/pcie-uniphier.c
index 5757ca3803c9..43b28f826edd 100644
--- a/drivers/pci/controller/dwc/pcie-uniphier.c
+++ b/drivers/pci/controller/dwc/pcie-uniphier.c
@@ -279,7 +279,7 @@ static int uniphier_pcie_config_intx_irq(struct dw_pcie_rp *pp)
goto out_put_node;
}
- pcie->intx_irq_domain = irq_domain_add_linear(np_intc, PCI_NUM_INTX,
+ pcie->intx_irq_domain = irq_domain_create_linear(of_fwnode_handle(np_intc), PCI_NUM_INTX,
&uniphier_intx_domain_ops, pp);
if (!pcie->intx_irq_domain) {
dev_err(pci->dev, "Failed to get INTx domain\n");
diff --git a/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c b/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c
index d7b7350f02dd..5af22bee913b 100644
--- a/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c
+++ b/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c
@@ -190,7 +190,7 @@ static void ls_g4_pcie_reset(struct work_struct *work)
ls_g4_pcie_enable_interrupt(pcie);
}
-static struct mobiveil_rp_ops ls_g4_pcie_rp_ops = {
+static const struct mobiveil_rp_ops ls_g4_pcie_rp_ops = {
.interrupt_init = ls_g4_pcie_interrupt_init,
};
diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
index 32951f7d6d6d..a600f46ee3c3 100644
--- a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
+++ b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
@@ -360,8 +360,8 @@ static struct irq_chip mobiveil_msi_irq_chip = {
};
static struct msi_domain_info mobiveil_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX),
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX,
.chip = &mobiveil_msi_irq_chip,
};
@@ -378,16 +378,9 @@ static void mobiveil_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
(int)data->hwirq, msg->address_hi, msg->address_lo);
}
-static int mobiveil_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static struct irq_chip mobiveil_msi_bottom_irq_chip = {
.name = "Mobiveil MSI",
.irq_compose_msi_msg = mobiveil_compose_msi_msg,
- .irq_set_affinity = mobiveil_msi_set_affinity,
};
static int mobiveil_irq_msi_domain_alloc(struct irq_domain *domain,
@@ -442,12 +435,12 @@ static const struct irq_domain_ops msi_domain_ops = {
static int mobiveil_allocate_msi_domains(struct mobiveil_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
- struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
+ struct fwnode_handle *fwnode = of_fwnode_handle(dev->of_node);
struct mobiveil_msi *msi = &pcie->rp.msi;
mutex_init(&msi->lock);
- msi->dev_domain = irq_domain_add_linear(NULL, msi->num_of_vectors,
- &msi_domain_ops, pcie);
+ msi->dev_domain = irq_domain_create_linear(NULL, msi->num_of_vectors,
+ &msi_domain_ops, pcie);
if (!msi->dev_domain) {
dev_err(dev, "failed to create IRQ domain\n");
return -ENOMEM;
@@ -468,12 +461,11 @@ static int mobiveil_allocate_msi_domains(struct mobiveil_pcie *pcie)
static int mobiveil_pcie_init_irq_domain(struct mobiveil_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
- struct device_node *node = dev->of_node;
struct mobiveil_root_port *rp = &pcie->rp;
/* setup INTx */
- rp->intx_domain = irq_domain_add_linear(node, PCI_NUM_INTX,
- &intx_domain_ops, pcie);
+ rp->intx_domain = irq_domain_create_linear(of_fwnode_handle(dev->of_node), PCI_NUM_INTX,
+ &intx_domain_ops, pcie);
if (!rp->intx_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil.h b/drivers/pci/controller/mobiveil/pcie-mobiveil.h
index 6082b8afbc31..e63abb887ee3 100644
--- a/drivers/pci/controller/mobiveil/pcie-mobiveil.h
+++ b/drivers/pci/controller/mobiveil/pcie-mobiveil.h
@@ -151,7 +151,7 @@ struct mobiveil_rp_ops {
struct mobiveil_root_port {
void __iomem *config_axi_slave_base; /* endpoint config base */
struct resource *ob_io_res;
- struct mobiveil_rp_ops *ops;
+ const struct mobiveil_rp_ops *ops;
int irq;
raw_spinlock_t intx_mask_lock;
struct irq_domain *intx_domain;
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index 71ecd7ddcc8a..7bac64533b14 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -23,7 +23,6 @@
#include <linux/platform_device.h>
#include <linux/msi.h>
#include <linux/of_address.h>
-#include <linux/of_gpio.h>
#include <linux/of_pci.h>
#include "../pci.h"
@@ -51,7 +50,7 @@
#define PIO_COMPLETION_STATUS_MASK GENMASK(9, 7)
#define PIO_COMPLETION_STATUS_OK 0
#define PIO_COMPLETION_STATUS_UR 1
-#define PIO_COMPLETION_STATUS_CRS 2
+#define PIO_COMPLETION_STATUS_RRS 2
#define PIO_COMPLETION_STATUS_CA 4
#define PIO_NON_POSTED_REQ BIT(10)
#define PIO_ERR_STATUS BIT(11)
@@ -263,7 +262,7 @@ enum {
#define MSI_IRQ_NUM 32
-#define CFG_RD_CRS_VAL 0xffff0001
+#define CFG_RD_RRS_VAL 0xffff0001
struct advk_pcie {
struct platform_device *pdev;
@@ -650,7 +649,7 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
advk_pcie_train_link(pcie);
}
-static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u32 *val)
+static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_rrs, u32 *val)
{
struct device *dev = &pcie->pdev->dev;
u32 reg;
@@ -670,7 +669,7 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
* 2) value Unsupported Request(1) of COMPLETION_STATUS(bit9:7) only
* means a PIO write error, and for PIO read it is successful with
* a read value of 0xFFFFFFFF.
- * 3) value Completion Retry Status(CRS) of COMPLETION_STATUS(bit9:7)
+ * 3) value Config Request Retry Status(RRS) of COMPLETION_STATUS(bit9:7)
* only means a PIO write error, and for PIO read it is successful
* with a read value of 0xFFFF0001.
* 4) value Completer Abort (CA) of COMPLETION_STATUS(bit9:7) means
@@ -695,10 +694,10 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
strcomp_status = "UR";
ret = -EOPNOTSUPP;
break;
- case PIO_COMPLETION_STATUS_CRS:
- if (allow_crs && val) {
- /* PCIe r4.0, sec 2.3.2, says:
- * If CRS Software Visibility is enabled:
+ case PIO_COMPLETION_STATUS_RRS:
+ if (allow_rrs && val) {
+ /* PCIe r6.0, sec 2.3.2, says:
+ * If Configuration RRS Software Visibility is enabled:
* For a Configuration Read Request that includes both
* bytes of the Vendor ID field of a device Function's
* Configuration Space Header, the Root Complex must
@@ -707,22 +706,22 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
* all '1's for any additional bytes included in the
* request.
*
- * So CRS in this case is not an error status.
+ * So RRS in this case is not an error status.
*/
- *val = CFG_RD_CRS_VAL;
+ *val = CFG_RD_RRS_VAL;
strcomp_status = NULL;
ret = 0;
break;
}
- /* PCIe r4.0, sec 2.3.2, says:
- * If CRS Software Visibility is not enabled, the Root Complex
+ /* PCIe r6.0, sec 2.3.2, says:
+ * If RRS Software Visibility is not enabled, the Root Complex
* must re-issue the Configuration Request as a new Request.
- * If CRS Software Visibility is enabled: For a Configuration
+ * If RRS Software Visibility is enabled: For a Configuration
* Write Request or for any other Configuration Read Request,
* the Root Complex must re-issue the Configuration Request as
* a new Request.
* A Root Complex implementation may choose to limit the number
- * of Configuration Request/CRS Completion Status loops before
+ * of Configuration Request/RRS Completion Status loops before
* determining that something is wrong with the target of the
* Request and taking appropriate action, e.g., complete the
* Request to the host as a failed transaction.
@@ -730,7 +729,7 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
* So return -EAGAIN and caller (pci-aardvark.c driver) will
* re-issue request again up to the PIO_RETRY_CNT retries.
*/
- strcomp_status = "CRS";
+ strcomp_status = "RRS";
ret = -EAGAIN;
break;
case PIO_COMPLETION_STATUS_CA:
@@ -921,8 +920,8 @@ advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
case PCI_EXP_RTCTL: {
u16 rootctl = le16_to_cpu(bridge->pcie_conf.rootctl);
- /* Only emulation of PMEIE and CRSSVE bits is provided */
- rootctl &= PCI_EXP_RTCTL_PMEIE | PCI_EXP_RTCTL_CRSSVE;
+ /* Only emulation of PMEIE and RRS_SVE bits is provided */
+ rootctl &= PCI_EXP_RTCTL_PMEIE | PCI_EXP_RTCTL_RRS_SVE;
bridge->pcie_conf.rootctl = cpu_to_le16(rootctl);
break;
}
@@ -1076,7 +1075,7 @@ static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
bridge->pcie_conf.slotsta = cpu_to_le16(PCI_EXP_SLTSTA_PDS);
/* Indicates supports for Completion Retry Status */
- bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS);
+ bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_RRS_SV);
bridge->subsystem_vendor_id = advk_readl(pcie, PCIE_CORE_SSDEV_ID_REG) & 0xffff;
bridge->subsystem_id = advk_readl(pcie, PCIE_CORE_SSDEV_ID_REG) >> 16;
@@ -1142,7 +1141,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
{
struct advk_pcie *pcie = bus->sysdata;
int retry_count;
- bool allow_crs;
+ bool allow_rrs;
u32 reg;
int ret;
@@ -1154,16 +1153,16 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
size, val);
/*
- * Completion Retry Status is possible to return only when reading all
- * 4 bytes from PCI_VENDOR_ID and PCI_DEVICE_ID registers at once and
- * CRSSVE flag on Root Bridge is enabled.
+ * Configuration Request Retry Status (RRS) is possible to return
+ * only when reading both bytes from PCI_VENDOR_ID at once and
+ * RRS_SVE flag on Root Port is enabled.
*/
- allow_crs = (where == PCI_VENDOR_ID) && (size == 4) &&
+ allow_rrs = (where == PCI_VENDOR_ID) && (size >= 2) &&
(le16_to_cpu(pcie->bridge.pcie_conf.rootctl) &
- PCI_EXP_RTCTL_CRSSVE);
+ PCI_EXP_RTCTL_RRS_SVE);
if (advk_pcie_pio_is_running(pcie))
- goto try_crs;
+ goto try_rrs;
/* Program the control register */
reg = advk_readl(pcie, PIO_CTRL);
@@ -1190,12 +1189,12 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
ret = advk_pcie_wait_pio(pcie);
if (ret < 0)
- goto try_crs;
+ goto try_rrs;
retry_count += ret;
/* Check PIO status and get the read result */
- ret = advk_pcie_check_pio_status(pcie, allow_crs, val);
+ ret = advk_pcie_check_pio_status(pcie, allow_rrs, val);
} while (ret == -EAGAIN && retry_count < PIO_RETRY_CNT);
if (ret < 0)
@@ -1208,13 +1207,13 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
return PCIBIOS_SUCCESSFUL;
-try_crs:
+try_rrs:
/*
- * If it is possible, return Completion Retry Status so that caller
- * tries to issue the request again instead of failing.
+ * If it is possible, return Configuration Request Retry Status so
+ * that caller tries to issue the request again instead of failing.
*/
- if (allow_crs) {
- *val = CFG_RD_CRS_VAL;
+ if (allow_rrs) {
+ *val = CFG_RD_RRS_VAL;
return PCIBIOS_SUCCESSFUL;
}
@@ -1305,12 +1304,6 @@ static void advk_msi_irq_compose_msi_msg(struct irq_data *data,
msg->data = data->hwirq;
}
-static int advk_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static void advk_msi_irq_mask(struct irq_data *d)
{
struct advk_pcie *pcie = d->domain->host_data;
@@ -1354,7 +1347,6 @@ static void advk_msi_top_irq_unmask(struct irq_data *d)
static struct irq_chip advk_msi_bottom_irq_chip = {
.name = "MSI",
.irq_compose_msi_msg = advk_msi_irq_compose_msi_msg,
- .irq_set_affinity = advk_msi_set_affinity,
.irq_mask = advk_msi_irq_mask,
.irq_unmask = advk_msi_irq_unmask,
};
@@ -1452,7 +1444,8 @@ static struct irq_chip advk_msi_irq_chip = {
static struct msi_domain_info advk_msi_domain_info = {
.flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX,
+ MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI |
+ MSI_FLAG_PCI_MSIX,
.chip = &advk_msi_irq_chip,
};
@@ -1463,9 +1456,8 @@ static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie)
raw_spin_lock_init(&pcie->msi_irq_lock);
mutex_init(&pcie->msi_used_lock);
- pcie->msi_inner_domain =
- irq_domain_add_linear(NULL, MSI_IRQ_NUM,
- &advk_msi_domain_ops, pcie);
+ pcie->msi_inner_domain = irq_domain_create_linear(NULL, MSI_IRQ_NUM,
+ &advk_msi_domain_ops, pcie);
if (!pcie->msi_inner_domain)
return -ENOMEM;
@@ -1515,9 +1507,8 @@ static int advk_pcie_init_irq_domain(struct advk_pcie *pcie)
irq_chip->irq_mask = advk_pcie_irq_mask;
irq_chip->irq_unmask = advk_pcie_irq_unmask;
- pcie->irq_domain =
- irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
- &advk_pcie_irq_domain_ops, pcie);
+ pcie->irq_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX,
+ &advk_pcie_irq_domain_ops, pcie);
if (!pcie->irq_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
ret = -ENOMEM;
@@ -1556,9 +1547,7 @@ static const struct irq_domain_ops advk_pcie_rp_irq_domain_ops = {
static int advk_pcie_init_rp_irq_domain(struct advk_pcie *pcie)
{
- pcie->rp_irq_domain = irq_domain_add_linear(NULL, 1,
- &advk_pcie_rp_irq_domain_ops,
- pcie);
+ pcie->rp_irq_domain = irq_domain_create_linear(NULL, 1, &advk_pcie_rp_irq_domain_ops, pcie);
if (!pcie->rp_irq_domain) {
dev_err(&pcie->pdev->dev, "Failed to add Root Port IRQ domain\n");
return -ENOMEM;
@@ -2003,7 +1992,7 @@ static struct platform_driver advk_pcie_driver = {
.of_match_table = advk_pcie_of_match_table,
},
.probe = advk_pcie_probe,
- .remove_new = advk_pcie_remove,
+ .remove = advk_pcie_remove,
};
module_platform_driver(advk_pcie_driver);
diff --git a/drivers/pci/controller/pci-ftpci100.c b/drivers/pci/controller/pci-ftpci100.c
index ffdeed25e961..28e43831c0f1 100644
--- a/drivers/pci/controller/pci-ftpci100.c
+++ b/drivers/pci/controller/pci-ftpci100.c
@@ -345,8 +345,8 @@ static int faraday_pci_setup_cascaded_irq(struct faraday_pci *p)
return irq ?: -EINVAL;
}
- p->irqdomain = irq_domain_add_linear(intc, PCI_NUM_INTX,
- &faraday_pci_irqdomain_ops, p);
+ p->irqdomain = irq_domain_create_linear(of_fwnode_handle(intc), PCI_NUM_INTX,
+ &faraday_pci_irqdomain_ops, p);
of_node_put(intc);
if (!p->irqdomain) {
dev_err(p->dev, "failed to create Gemini PCI IRQ domain\n");
diff --git a/drivers/pci/controller/pci-host-common.c b/drivers/pci/controller/pci-host-common.c
index 45b71806182d..f441bfd6f96a 100644
--- a/drivers/pci/controller/pci-host-common.c
+++ b/drivers/pci/controller/pci-host-common.c
@@ -73,12 +73,10 @@ int pci_host_common_probe(struct platform_device *pdev)
if (IS_ERR(cfg))
return PTR_ERR(cfg);
- /* Do not reassign resources if probe only */
- if (!pci_has_flag(PCI_PROBE_ONLY))
- pci_add_flags(PCI_REASSIGN_ALL_BUS);
-
bridge->sysdata = cfg;
bridge->ops = (struct pci_ops *)&ops->pci_ops;
+ bridge->enable_device = ops->enable_device;
+ bridge->disable_device = ops->disable_device;
bridge->msi_domain = true;
return pci_host_probe(bridge);
@@ -96,4 +94,5 @@ void pci_host_common_remove(struct platform_device *pdev)
}
EXPORT_SYMBOL_GPL(pci_host_common_remove);
+MODULE_DESCRIPTION("Generic PCI host common driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pci-host-generic.c b/drivers/pci/controller/pci-host-generic.c
index 41cb6a057f6e..4051b9b61dac 100644
--- a/drivers/pci/controller/pci-host-generic.c
+++ b/drivers/pci/controller/pci-host-generic.c
@@ -82,8 +82,9 @@ static struct platform_driver gen_pci_driver = {
.of_match_table = gen_pci_of_match,
},
.probe = pci_host_common_probe,
- .remove_new = pci_host_common_remove,
+ .remove = pci_host_common_remove,
};
module_platform_driver(gen_pci_driver);
+MODULE_DESCRIPTION("Generic PCI host controller driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index 5992280e8110..e1eaa24559a2 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -1130,8 +1130,8 @@ static void _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where,
PCI_CAPABILITY_LIST) {
/* ROM BARs are unimplemented */
*val = 0;
- } else if (where >= PCI_INTERRUPT_LINE && where + size <=
- PCI_INTERRUPT_PIN) {
+ } else if ((where >= PCI_INTERRUPT_LINE && where + size <= PCI_INTERRUPT_PIN) ||
+ (where >= PCI_INTERRUPT_PIN && where + size <= PCI_MIN_GNT)) {
/*
* Interrupt Line and Interrupt PIN are hard-wired to zero
* because this front-end only supports message-signaled
@@ -1356,7 +1356,7 @@ static struct pci_ops hv_pcifront_ops = {
*
* If the PF driver wishes to initiate communication, it can "invalidate" one or
* more of the first 64 blocks. This invalidation is delivered via a callback
- * supplied by the VF driver by this driver.
+ * supplied to the VF driver by this driver.
*
* No protocol is implied, except that supplied by the PF and VF drivers.
*/
@@ -1757,8 +1757,7 @@ static int hv_compose_multi_msi_req_get_cpu(void)
spin_lock_irqsave(&multi_msi_cpu_lock, flags);
- cpu_next = cpumask_next_wrap(cpu_next, cpu_online_mask, nr_cpu_ids,
- false);
+ cpu_next = cpumask_next_wrap(cpu_next, cpu_online_mask);
cpu = cpu_next;
spin_unlock_irqrestore(&multi_msi_cpu_lock, flags);
@@ -2053,6 +2052,7 @@ static struct irq_chip hv_msi_irq_chip = {
.irq_set_affinity = irq_chip_set_affinity_parent,
#ifdef CONFIG_X86
.irq_ack = irq_chip_ack_parent,
+ .flags = IRQCHIP_MOVE_DEFERRED,
#elif defined(CONFIG_ARM64)
.irq_eoi = irq_chip_eoi_parent,
#endif
@@ -3975,24 +3975,18 @@ static int hv_pci_restore_msi_msg(struct pci_dev *pdev, void *arg)
{
struct irq_data *irq_data;
struct msi_desc *entry;
- int ret = 0;
if (!pdev->msi_enabled && !pdev->msix_enabled)
return 0;
- msi_lock_descs(&pdev->dev);
+ guard(msi_descs_lock)(&pdev->dev);
msi_for_each_desc(entry, &pdev->dev, MSI_DESC_ASSOCIATED) {
irq_data = irq_get_irq_data(entry->irq);
- if (WARN_ON_ONCE(!irq_data)) {
- ret = -EINVAL;
- break;
- }
-
+ if (WARN_ON_ONCE(!irq_data))
+ return -EINVAL;
hv_compose_msi_msg(irq_data, &entry->msg);
}
- msi_unlock_descs(&pdev->dev);
-
- return ret;
+ return 0;
}
/*
diff --git a/drivers/pci/controller/pci-loongson.c b/drivers/pci/controller/pci-loongson.c
index 8b34ccff073a..bc630ab8a283 100644
--- a/drivers/pci/controller/pci-loongson.c
+++ b/drivers/pci/controller/pci-loongson.c
@@ -163,6 +163,19 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON,
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON,
DEV_LS7A_HDMI, loongson_pci_pin_quirk);
+static void loongson_pci_msi_quirk(struct pci_dev *dev)
+{
+ u16 val, class = dev->class >> 8;
+
+ if (class != PCI_CLASS_BRIDGE_HOST)
+ return;
+
+ pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &val);
+ val |= PCI_MSI_FLAGS_ENABLE;
+ pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, val);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, DEV_LS7A_PCIE_PORT5, loongson_pci_msi_quirk);
+
static struct loongson_pci *pci_bus_to_loongson_pci(struct pci_bus *bus)
{
struct pci_config_window *cfg;
diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
index 29fe09c99e7d..60da24ba0a19 100644
--- a/drivers/pci/controller/pci-mvebu.c
+++ b/drivers/pci/controller/pci-mvebu.c
@@ -1078,9 +1078,9 @@ static int mvebu_pcie_init_irq_domain(struct mvebu_pcie_port *port)
return -ENODEV;
}
- port->intx_irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
- &mvebu_pcie_intx_irq_domain_ops,
- port);
+ port->intx_irq_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node),
+ PCI_NUM_INTX,
+ &mvebu_pcie_intx_irq_domain_ops, port);
of_node_put(pcie_intc_node);
if (!port->intx_irq_domain) {
dev_err(dev, "Failed to get INTx IRQ domain for %s\n", port->name);
@@ -1422,7 +1422,7 @@ static void mvebu_pcie_powerdown(struct mvebu_pcie_port *port)
}
/*
- * devm_of_pci_get_host_bridge_resources() only sets up translateable resources,
+ * devm_of_pci_get_host_bridge_resources() only sets up translatable resources,
* so we need extra resource setup parsing our special DT properties encoding
* the MEM and IO apertures.
*/
@@ -1715,6 +1715,7 @@ static const struct of_device_id mvebu_pcie_of_match_table[] = {
{ .compatible = "marvell,kirkwood-pcie", },
{},
};
+MODULE_DEVICE_TABLE(of, mvebu_pcie_of_match_table);
static const struct dev_pm_ops mvebu_pcie_pm_ops = {
NOIRQ_SYSTEM_SLEEP_PM_OPS(mvebu_pcie_suspend, mvebu_pcie_resume)
@@ -1727,7 +1728,7 @@ static struct platform_driver mvebu_pcie_driver = {
.pm = &mvebu_pcie_pm_ops,
},
.probe = mvebu_pcie_probe,
- .remove_new = mvebu_pcie_remove,
+ .remove = mvebu_pcie_remove,
};
module_platform_driver(mvebu_pcie_driver);
diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
index 038d974a318e..467ddc701adc 100644
--- a/drivers/pci/controller/pci-tegra.c
+++ b/drivers/pci/controller/pci-tegra.c
@@ -22,6 +22,7 @@
#include <linux/iopoll.h>
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -1460,7 +1461,7 @@ static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
pcie->cs = *res;
/* constrain configuration space to 4 KiB */
- pcie->cs.end = pcie->cs.start + SZ_4K - 1;
+ resource_set_size(&pcie->cs, SZ_4K);
pcie->cfg = devm_ioremap_resource(dev, &pcie->cs);
if (IS_ERR(pcie->cfg)) {
@@ -1547,7 +1548,7 @@ static void tegra_pcie_msi_irq(struct irq_desc *desc)
unsigned int index = i * 32 + offset;
int ret;
- ret = generic_handle_domain_irq(msi->domain->parent, index);
+ ret = generic_handle_domain_irq(msi->domain, index);
if (ret) {
/*
* that's weird who triggered this?
@@ -1565,30 +1566,6 @@ static void tegra_pcie_msi_irq(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
-static void tegra_msi_top_irq_ack(struct irq_data *d)
-{
- irq_chip_ack_parent(d);
-}
-
-static void tegra_msi_top_irq_mask(struct irq_data *d)
-{
- pci_msi_mask_irq(d);
- irq_chip_mask_parent(d);
-}
-
-static void tegra_msi_top_irq_unmask(struct irq_data *d)
-{
- pci_msi_unmask_irq(d);
- irq_chip_unmask_parent(d);
-}
-
-static struct irq_chip tegra_msi_top_chip = {
- .name = "Tegra PCIe MSI",
- .irq_ack = tegra_msi_top_irq_ack,
- .irq_mask = tegra_msi_top_irq_mask,
- .irq_unmask = tegra_msi_top_irq_unmask,
-};
-
static void tegra_msi_irq_ack(struct irq_data *d)
{
struct tegra_msi *msi = irq_data_get_irq_chip_data(d);
@@ -1629,11 +1606,6 @@ static void tegra_msi_irq_unmask(struct irq_data *d)
spin_unlock_irqrestore(&msi->mask_lock, flags);
}
-static int tegra_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static void tegra_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
struct tegra_msi *msi = irq_data_get_irq_chip_data(data);
@@ -1648,7 +1620,6 @@ static struct irq_chip tegra_msi_bottom_chip = {
.irq_ack = tegra_msi_irq_ack,
.irq_mask = tegra_msi_irq_mask,
.irq_unmask = tegra_msi_irq_unmask,
- .irq_set_affinity = tegra_msi_set_affinity,
.irq_compose_msi_msg = tegra_compose_msi_msg,
};
@@ -1696,42 +1667,40 @@ static const struct irq_domain_ops tegra_msi_domain_ops = {
.free = tegra_msi_domain_free,
};
-static struct msi_domain_info tegra_msi_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX),
- .chip = &tegra_msi_top_chip,
+static const struct msi_parent_ops tegra_msi_parent_ops = {
+ .supported_flags = (MSI_GENERIC_FLAGS_MASK |
+ MSI_FLAG_PCI_MSIX),
+ .required_flags = (MSI_FLAG_USE_DEF_DOM_OPS |
+ MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_PCI_MSI_MASK_PARENT |
+ MSI_FLAG_NO_AFFINITY),
+ .chip_flags = MSI_CHIP_FLAG_SET_ACK,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
};
static int tegra_allocate_domains(struct tegra_msi *msi)
{
struct tegra_pcie *pcie = msi_to_pcie(msi);
struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
- struct irq_domain *parent;
-
- parent = irq_domain_create_linear(fwnode, INT_PCI_MSI_NR,
- &tegra_msi_domain_ops, msi);
- if (!parent) {
- dev_err(pcie->dev, "failed to create IRQ domain\n");
- return -ENOMEM;
- }
- irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
+ struct irq_domain_info info = {
+ .fwnode = fwnode,
+ .ops = &tegra_msi_domain_ops,
+ .size = INT_PCI_MSI_NR,
+ .host_data = msi,
+ };
- msi->domain = pci_msi_create_irq_domain(fwnode, &tegra_msi_info, parent);
+ msi->domain = msi_create_parent_irq_domain(&info, &tegra_msi_parent_ops);
if (!msi->domain) {
dev_err(pcie->dev, "failed to create MSI domain\n");
- irq_domain_remove(parent);
return -ENOMEM;
}
-
return 0;
}
static void tegra_free_domains(struct tegra_msi *msi)
{
- struct irq_domain *parent = msi->domain->parent;
-
irq_domain_remove(msi->domain);
- irq_domain_remove(parent);
}
static int tegra_pcie_msi_setup(struct tegra_pcie *pcie)
@@ -2112,47 +2081,39 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
{
struct device *dev = pcie->dev;
- struct device_node *np = dev->of_node, *port;
+ struct device_node *np = dev->of_node;
const struct tegra_pcie_soc *soc = pcie->soc;
u32 lanes = 0, mask = 0;
unsigned int lane = 0;
int err;
/* parse root ports */
- for_each_child_of_node(np, port) {
+ for_each_child_of_node_scoped(np, port) {
struct tegra_pcie_port *rp;
unsigned int index;
u32 value;
char *label;
err = of_pci_get_devfn(port);
- if (err < 0) {
- dev_err(dev, "failed to parse address: %d\n", err);
- goto err_node_put;
- }
+ if (err < 0)
+ return dev_err_probe(dev, err, "failed to parse address\n");
index = PCI_SLOT(err);
- if (index < 1 || index > soc->num_ports) {
- dev_err(dev, "invalid port number: %d\n", index);
- err = -EINVAL;
- goto err_node_put;
- }
+ if (index < 1 || index > soc->num_ports)
+ return dev_err_probe(dev, -EINVAL,
+ "invalid port number: %d\n", index);
index--;
err = of_property_read_u32(port, "nvidia,num-lanes", &value);
- if (err < 0) {
- dev_err(dev, "failed to parse # of lanes: %d\n",
- err);
- goto err_node_put;
- }
+ if (err < 0)
+ return dev_err_probe(dev, err,
+ "failed to parse # of lanes\n");
- if (value > 16) {
- dev_err(dev, "invalid # of lanes: %u\n", value);
- err = -EINVAL;
- goto err_node_put;
- }
+ if (value > 16)
+ return dev_err_probe(dev, -EINVAL,
+ "invalid # of lanes: %u\n", value);
lanes |= value << (index << 3);
@@ -2165,16 +2126,12 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
lane += value;
rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL);
- if (!rp) {
- err = -ENOMEM;
- goto err_node_put;
- }
+ if (!rp)
+ return -ENOMEM;
err = of_address_to_resource(port, 0, &rp->regs);
- if (err < 0) {
- dev_err(dev, "failed to parse address: %d\n", err);
- goto err_node_put;
- }
+ if (err < 0)
+ return dev_err_probe(dev, err, "failed to parse address\n");
INIT_LIST_HEAD(&rp->list);
rp->index = index;
@@ -2183,16 +2140,12 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
rp->np = port;
rp->base = devm_pci_remap_cfg_resource(dev, &rp->regs);
- if (IS_ERR(rp->base)) {
- err = PTR_ERR(rp->base);
- goto err_node_put;
- }
+ if (IS_ERR(rp->base))
+ return PTR_ERR(rp->base);
label = devm_kasprintf(dev, GFP_KERNEL, "pex-reset-%u", index);
- if (!label) {
- err = -ENOMEM;
- goto err_node_put;
- }
+ if (!label)
+ return -ENOMEM;
/*
* Returns -ENOENT if reset-gpios property is not populated
@@ -2205,34 +2158,26 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
GPIOD_OUT_LOW,
label);
if (IS_ERR(rp->reset_gpio)) {
- if (PTR_ERR(rp->reset_gpio) == -ENOENT) {
+ if (PTR_ERR(rp->reset_gpio) == -ENOENT)
rp->reset_gpio = NULL;
- } else {
- dev_err(dev, "failed to get reset GPIO: %ld\n",
- PTR_ERR(rp->reset_gpio));
- err = PTR_ERR(rp->reset_gpio);
- goto err_node_put;
- }
+ else
+ return dev_err_probe(dev, PTR_ERR(rp->reset_gpio),
+ "failed to get reset GPIO\n");
}
list_add_tail(&rp->list, &pcie->ports);
}
err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
- if (err < 0) {
- dev_err(dev, "invalid lane configuration\n");
- return err;
- }
+ if (err < 0)
+ return dev_err_probe(dev, err,
+ "invalid lane configuration\n");
err = tegra_pcie_get_regulators(pcie, mask);
if (err < 0)
return err;
return 0;
-
-err_node_put:
- of_node_put(port);
- return err;
}
/*
@@ -2806,6 +2751,6 @@ static struct platform_driver tegra_pcie_driver = {
.pm = &tegra_pcie_pm_ops,
},
.probe = tegra_pcie_probe,
- .remove_new = tegra_pcie_remove,
+ .remove = tegra_pcie_remove,
};
module_platform_driver(tegra_pcie_driver);
diff --git a/drivers/pci/controller/pci-thunder-ecam.c b/drivers/pci/controller/pci-thunder-ecam.c
index b5bd10a62adb..08161065a89c 100644
--- a/drivers/pci/controller/pci-thunder-ecam.c
+++ b/drivers/pci/controller/pci-thunder-ecam.c
@@ -204,7 +204,7 @@ static int thunder_ecam_config_read(struct pci_bus *bus, unsigned int devfn,
v = readl(addr);
if (v & 0xff00)
- pr_err("Bad MSIX cap header: %08x\n", v);
+ pr_err("Bad MSI-X cap header: %08x\n", v);
v |= 0xbc00; /* next capability is EA at 0xbc */
set_val(v, where, size, val);
return PCIBIOS_SUCCESSFUL;
diff --git a/drivers/pci/controller/pci-thunder-pem.c b/drivers/pci/controller/pci-thunder-pem.c
index 06a9855cb431..f1bd5de67997 100644
--- a/drivers/pci/controller/pci-thunder-pem.c
+++ b/drivers/pci/controller/pci-thunder-pem.c
@@ -400,9 +400,9 @@ static int thunder_pem_acpi_init(struct pci_config_window *cfg)
* Reserve 64K size PEM specific resources. The full 16M range
* size is required for thunder_pem_init() call.
*/
- res_pem->end = res_pem->start + SZ_64K - 1;
+ resource_set_size(res_pem, SZ_64K);
thunder_pem_reserve_range(dev, root->segment, res_pem);
- res_pem->end = res_pem->start + SZ_16M - 1;
+ resource_set_size(res_pem, SZ_16M);
/* Reserve PCI configuration space as well. */
thunder_pem_reserve_range(dev, root->segment, &cfg->res);
diff --git a/drivers/pci/controller/pci-xgene-msi.c b/drivers/pci/controller/pci-xgene-msi.c
index 3ce38dfd0d29..b05ec8b0bb93 100644
--- a/drivers/pci/controller/pci-xgene-msi.c
+++ b/drivers/pci/controller/pci-xgene-msi.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/msi.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/of_pci.h>
@@ -32,7 +33,6 @@ struct xgene_msi_group {
struct xgene_msi {
struct device_node *node;
struct irq_domain *inner_domain;
- struct irq_domain *msi_domain;
u64 msi_addr;
void __iomem *msi_regs;
unsigned long *bitmap;
@@ -44,20 +44,6 @@ struct xgene_msi {
/* Global data */
static struct xgene_msi xgene_msi_ctrl;
-static struct irq_chip xgene_msi_top_irq_chip = {
- .name = "X-Gene1 MSI",
- .irq_enable = pci_msi_unmask_irq,
- .irq_disable = pci_msi_mask_irq,
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
-};
-
-static struct msi_domain_info xgene_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX),
- .chip = &xgene_msi_top_irq_chip,
-};
-
/*
* X-Gene v1 has 16 groups of MSI termination registers MSInIRx, where
* n is group number (0..F), x is index of registers in each group (0..7)
@@ -154,7 +140,7 @@ static void xgene_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
* X-Gene v1 only has 16 MSI GIC IRQs for 2048 MSI vectors. To maintain
* the expected behaviour of .set_affinity for each MSI interrupt, the 16
* MSI GIC IRQs are statically allocated to 8 X-Gene v1 cores (2 GIC IRQs
- * for each core). The MSI vector is moved fom 1 MSI GIC IRQ to another
+ * for each core). The MSI vector is moved from 1 MSI GIC IRQ to another
* MSI GIC IRQ to steer its MSI interrupt to correct X-Gene v1 core. As a
* consequence, the total MSI vectors that X-Gene v1 supports will be
* reduced to 256 (2048/8) vectors.
@@ -235,34 +221,35 @@ static void xgene_irq_domain_free(struct irq_domain *domain,
irq_domain_free_irqs_parent(domain, virq, nr_irqs);
}
-static const struct irq_domain_ops msi_domain_ops = {
+static const struct irq_domain_ops xgene_msi_domain_ops = {
.alloc = xgene_irq_domain_alloc,
.free = xgene_irq_domain_free,
};
+static const struct msi_parent_ops xgene_msi_parent_ops = {
+ .supported_flags = (MSI_GENERIC_FLAGS_MASK |
+ MSI_FLAG_PCI_MSIX),
+ .required_flags = (MSI_FLAG_USE_DEF_DOM_OPS |
+ MSI_FLAG_USE_DEF_CHIP_OPS),
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
+};
+
static int xgene_allocate_domains(struct xgene_msi *msi)
{
- msi->inner_domain = irq_domain_add_linear(NULL, NR_MSI_VEC,
- &msi_domain_ops, msi);
- if (!msi->inner_domain)
- return -ENOMEM;
-
- msi->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(msi->node),
- &xgene_msi_domain_info,
- msi->inner_domain);
-
- if (!msi->msi_domain) {
- irq_domain_remove(msi->inner_domain);
- return -ENOMEM;
- }
-
- return 0;
+ struct irq_domain_info info = {
+ .fwnode = of_fwnode_handle(msi->node),
+ .ops = &xgene_msi_domain_ops,
+ .size = NR_MSI_VEC,
+ .host_data = msi,
+ };
+
+ msi->inner_domain = msi_create_parent_irq_domain(&info, &xgene_msi_parent_ops);
+ return msi->inner_domain ? 0 : -ENOMEM;
}
static void xgene_free_domains(struct xgene_msi *msi)
{
- if (msi->msi_domain)
- irq_domain_remove(msi->msi_domain);
if (msi->inner_domain)
irq_domain_remove(msi->inner_domain);
}
@@ -518,7 +505,7 @@ static struct platform_driver xgene_msi_driver = {
.of_match_table = xgene_msi_match_table,
},
.probe = xgene_msi_probe,
- .remove_new = xgene_msi_remove,
+ .remove = xgene_msi_remove,
};
static int __init xgene_pcie_msi_init(void)
diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c
index 8e457fa450a2..1e2ebbfa36d1 100644
--- a/drivers/pci/controller/pci-xgene.c
+++ b/drivers/pci/controller/pci-xgene.c
@@ -171,17 +171,17 @@ static int xgene_pcie_config_read32(struct pci_bus *bus, unsigned int devfn,
/*
* The v1 controller has a bug in its Configuration Request Retry
- * Status (CRS) logic: when CRS Software Visibility is enabled and
+ * Status (RRS) logic: when RRS Software Visibility is enabled and
* we read the Vendor and Device ID of a non-existent device, the
* controller fabricates return data of 0xFFFF0001 ("device exists
* but is not ready") instead of 0xFFFFFFFF (PCI_ERROR_RESPONSE)
* ("device does not exist"). This causes the PCI core to retry
* the read until it times out. Avoid this by not claiming to
- * support CRS SV.
+ * support RRS SV.
*/
if (pci_is_root_bus(bus) && (port->version == XGENE_PCIE_IP_VER_1) &&
((where & ~0x3) == XGENE_V1_PCI_EXP_CAP + PCI_EXP_RTCTL))
- *val &= ~(PCI_EXP_RTCAP_CRSVIS << 16);
+ *val &= ~(PCI_EXP_RTCAP_RRS_SV << 16);
if (size <= 2)
*val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
diff --git a/drivers/pci/controller/pcie-altera-msi.c b/drivers/pci/controller/pcie-altera-msi.c
index 6ad5427490b5..a43f21eb8fbb 100644
--- a/drivers/pci/controller/pcie-altera-msi.c
+++ b/drivers/pci/controller/pcie-altera-msi.c
@@ -81,8 +81,8 @@ static struct irq_chip altera_msi_irq_chip = {
};
static struct msi_domain_info altera_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX),
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX,
.chip = &altera_msi_irq_chip,
};
@@ -99,16 +99,9 @@ static void altera_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
(int)data->hwirq, msg->address_hi, msg->address_lo);
}
-static int altera_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static struct irq_chip altera_msi_bottom_irq_chip = {
.name = "Altera MSI",
.irq_compose_msi_msg = altera_compose_msi_msg,
- .irq_set_affinity = altera_msi_set_affinity,
};
static int altera_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
@@ -171,9 +164,9 @@ static const struct irq_domain_ops msi_domain_ops = {
static int altera_allocate_domains(struct altera_msi *msi)
{
- struct fwnode_handle *fwnode = of_node_to_fwnode(msi->pdev->dev.of_node);
+ struct fwnode_handle *fwnode = of_fwnode_handle(msi->pdev->dev.of_node);
- msi->inner_domain = irq_domain_add_linear(NULL, msi->num_of_vectors,
+ msi->inner_domain = irq_domain_create_linear(NULL, msi->num_of_vectors,
&msi_domain_ops, msi);
if (!msi->inner_domain) {
dev_err(&msi->pdev->dev, "failed to create IRQ domain\n");
@@ -274,7 +267,7 @@ static struct platform_driver altera_msi_driver = {
.of_match_table = altera_msi_of_match,
},
.probe = altera_msi_probe,
- .remove_new = altera_msi_remove,
+ .remove = altera_msi_remove,
};
static int __init altera_msi_init(void)
@@ -290,4 +283,5 @@ static void __exit altera_msi_exit(void)
subsys_initcall(altera_msi_init);
MODULE_DEVICE_TABLE(of, altera_msi_of_match);
module_exit(altera_msi_exit);
+MODULE_DESCRIPTION("Altera PCIe MSI support driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-altera.c b/drivers/pci/controller/pcie-altera.c
index a9536dc4bf96..0fc77176a52e 100644
--- a/drivers/pci/controller/pcie-altera.c
+++ b/drivers/pci/controller/pcie-altera.c
@@ -6,6 +6,7 @@
* Description: Altera PCIe host controller driver
*/
+#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/irqchip/chained_irq.h>
@@ -55,12 +56,11 @@
#define TLP_READ_TAG 0x1d
#define TLP_WRITE_TAG 0x10
#define RP_DEVFN 0
-#define TLP_REQ_ID(bus, devfn) (((bus) << 8) | (devfn))
#define TLP_CFG_DW0(pcie, cfg) \
(((cfg) << 24) | \
TLP_PAYLOAD_SIZE)
#define TLP_CFG_DW1(pcie, tag, be) \
- (((TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN)) << 16) | (tag << 8) | (be))
+ (((PCI_DEVID(pcie->root_bus_nr, RP_DEVFN)) << 16) | (tag << 8) | (be))
#define TLP_CFG_DW2(bus, devfn, offset) \
(((bus) << 24) | ((devfn) << 16) | (offset))
#define TLP_COMP_STATUS(s) (((s) >> 13) & 7)
@@ -78,9 +78,25 @@
#define S10_TLP_FMTTYPE_CFGWR0 0x45
#define S10_TLP_FMTTYPE_CFGWR1 0x44
+#define AGLX_RP_CFG_ADDR(pcie, reg) (((pcie)->hip_base) + (reg))
+#define AGLX_RP_SECONDARY(pcie) \
+ readb(AGLX_RP_CFG_ADDR(pcie, PCI_SECONDARY_BUS))
+
+#define AGLX_BDF_REG 0x00002004
+#define AGLX_ROOT_PORT_IRQ_STATUS 0x14c
+#define AGLX_ROOT_PORT_IRQ_ENABLE 0x150
+#define CFG_AER BIT(4)
+
+#define AGLX_CFG_TARGET GENMASK(13, 12)
+#define AGLX_CFG_TARGET_TYPE0 0
+#define AGLX_CFG_TARGET_TYPE1 1
+#define AGLX_CFG_TARGET_LOCAL_2000 2
+#define AGLX_CFG_TARGET_LOCAL_3000 3
+
enum altera_pcie_version {
ALTERA_PCIE_V1 = 0,
ALTERA_PCIE_V2,
+ ALTERA_PCIE_V3,
};
struct altera_pcie {
@@ -103,6 +119,11 @@ struct altera_pcie_ops {
int size, u32 *value);
int (*rp_write_cfg)(struct altera_pcie *pcie, u8 busno,
int where, int size, u32 value);
+ int (*ep_read_cfg)(struct altera_pcie *pcie, u8 busno,
+ unsigned int devfn, int where, int size, u32 *value);
+ int (*ep_write_cfg)(struct altera_pcie *pcie, u8 busno,
+ unsigned int devfn, int where, int size, u32 value);
+ void (*rp_isr)(struct irq_desc *desc);
};
struct altera_pcie_data {
@@ -113,6 +134,9 @@ struct altera_pcie_data {
u32 cfgrd1;
u32 cfgwr0;
u32 cfgwr1;
+ u32 port_conf_offset;
+ u32 port_irq_status_offset;
+ u32 port_irq_enable_offset;
};
struct tlp_rp_regpair_t {
@@ -132,6 +156,28 @@ static inline u32 cra_readl(struct altera_pcie *pcie, const u32 reg)
return readl_relaxed(pcie->cra_base + reg);
}
+static inline void cra_writew(struct altera_pcie *pcie, const u32 value,
+ const u32 reg)
+{
+ writew_relaxed(value, pcie->cra_base + reg);
+}
+
+static inline u32 cra_readw(struct altera_pcie *pcie, const u32 reg)
+{
+ return readw_relaxed(pcie->cra_base + reg);
+}
+
+static inline void cra_writeb(struct altera_pcie *pcie, const u32 value,
+ const u32 reg)
+{
+ writeb_relaxed(value, pcie->cra_base + reg);
+}
+
+static inline u32 cra_readb(struct altera_pcie *pcie, const u32 reg)
+{
+ return readb_relaxed(pcie->cra_base + reg);
+}
+
static bool altera_pcie_link_up(struct altera_pcie *pcie)
{
return !!((cra_readl(pcie, RP_LTSSM) & RP_LTSSM_MASK) == LTSSM_L0);
@@ -146,11 +192,20 @@ static bool s10_altera_pcie_link_up(struct altera_pcie *pcie)
return !!(readw(addr) & PCI_EXP_LNKSTA_DLLLA);
}
+static bool aglx_altera_pcie_link_up(struct altera_pcie *pcie)
+{
+ void __iomem *addr = AGLX_RP_CFG_ADDR(pcie,
+ pcie->pcie_data->cap_offset +
+ PCI_EXP_LNKSTA);
+
+ return (readw_relaxed(addr) & PCI_EXP_LNKSTA_DLLLA);
+}
+
/*
* Altera PCIe port uses BAR0 of RC's configuration space as the translation
* from PCI bus to native BUS. Entire DDR region is mapped into PCIe space
* using these registers, so it can be reached by DMA from EP devices.
- * This BAR0 will also access to MSI vector when receiving MSI/MSIX interrupt
+ * This BAR0 will also access to MSI vector when receiving MSI/MSI-X interrupt
* from EP devices, eventually trigger interrupt to GIC. The BAR0 of bridge
* should be hidden during enumeration to avoid the sizing and resource
* allocation by PCIe core.
@@ -426,6 +481,103 @@ static int s10_rp_write_cfg(struct altera_pcie *pcie, u8 busno,
return PCIBIOS_SUCCESSFUL;
}
+static int aglx_rp_read_cfg(struct altera_pcie *pcie, int where,
+ int size, u32 *value)
+{
+ void __iomem *addr = AGLX_RP_CFG_ADDR(pcie, where);
+
+ switch (size) {
+ case 1:
+ *value = readb_relaxed(addr);
+ break;
+ case 2:
+ *value = readw_relaxed(addr);
+ break;
+ default:
+ *value = readl_relaxed(addr);
+ break;
+ }
+
+ /* Interrupt PIN not programmed in hardware, set to INTA. */
+ if (where == PCI_INTERRUPT_PIN && size == 1 && !(*value))
+ *value = 0x01;
+ else if (where == PCI_INTERRUPT_LINE && !(*value & 0xff00))
+ *value |= 0x0100;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int aglx_rp_write_cfg(struct altera_pcie *pcie, u8 busno,
+ int where, int size, u32 value)
+{
+ void __iomem *addr = AGLX_RP_CFG_ADDR(pcie, where);
+
+ switch (size) {
+ case 1:
+ writeb_relaxed(value, addr);
+ break;
+ case 2:
+ writew_relaxed(value, addr);
+ break;
+ default:
+ writel_relaxed(value, addr);
+ break;
+ }
+
+ /*
+ * Monitor changes to PCI_PRIMARY_BUS register on Root Port
+ * and update local copy of root bus number accordingly.
+ */
+ if (busno == pcie->root_bus_nr && where == PCI_PRIMARY_BUS)
+ pcie->root_bus_nr = value & 0xff;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int aglx_ep_write_cfg(struct altera_pcie *pcie, u8 busno,
+ unsigned int devfn, int where, int size, u32 value)
+{
+ cra_writel(pcie, ((busno << 8) | devfn), AGLX_BDF_REG);
+ if (busno > AGLX_RP_SECONDARY(pcie))
+ where |= FIELD_PREP(AGLX_CFG_TARGET, AGLX_CFG_TARGET_TYPE1);
+
+ switch (size) {
+ case 1:
+ cra_writeb(pcie, value, where);
+ break;
+ case 2:
+ cra_writew(pcie, value, where);
+ break;
+ default:
+ cra_writel(pcie, value, where);
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int aglx_ep_read_cfg(struct altera_pcie *pcie, u8 busno,
+ unsigned int devfn, int where, int size, u32 *value)
+{
+ cra_writel(pcie, ((busno << 8) | devfn), AGLX_BDF_REG);
+ if (busno > AGLX_RP_SECONDARY(pcie))
+ where |= FIELD_PREP(AGLX_CFG_TARGET, AGLX_CFG_TARGET_TYPE1);
+
+ switch (size) {
+ case 1:
+ *value = cra_readb(pcie, where);
+ break;
+ case 2:
+ *value = cra_readw(pcie, where);
+ break;
+ default:
+ *value = cra_readl(pcie, where);
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
static int _altera_pcie_cfg_read(struct altera_pcie *pcie, u8 busno,
unsigned int devfn, int where, int size,
u32 *value)
@@ -438,6 +590,10 @@ static int _altera_pcie_cfg_read(struct altera_pcie *pcie, u8 busno,
return pcie->pcie_data->ops->rp_read_cfg(pcie, where,
size, value);
+ if (pcie->pcie_data->ops->ep_read_cfg)
+ return pcie->pcie_data->ops->ep_read_cfg(pcie, busno, devfn,
+ where, size, value);
+
switch (size) {
case 1:
byte_en = 1 << (where & 3);
@@ -482,6 +638,10 @@ static int _altera_pcie_cfg_write(struct altera_pcie *pcie, u8 busno,
return pcie->pcie_data->ops->rp_write_cfg(pcie, busno,
where, size, value);
+ if (pcie->pcie_data->ops->ep_write_cfg)
+ return pcie->pcie_data->ops->ep_write_cfg(pcie, busno, devfn,
+ where, size, value);
+
switch (size) {
case 1:
data32 = (value & 0xff) << shift;
@@ -660,7 +820,32 @@ static void altera_pcie_isr(struct irq_desc *desc)
dev_err_ratelimited(dev, "unexpected IRQ, INT%d\n", bit);
}
}
+ chained_irq_exit(chip, desc);
+}
+
+static void aglx_isr(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct altera_pcie *pcie;
+ struct device *dev;
+ u32 status;
+ int ret;
+
+ chained_irq_enter(chip, desc);
+ pcie = irq_desc_get_handler_data(desc);
+ dev = &pcie->pdev->dev;
+
+ status = readl(pcie->hip_base + pcie->pcie_data->port_conf_offset +
+ pcie->pcie_data->port_irq_status_offset);
+ if (status & CFG_AER) {
+ writel(CFG_AER, (pcie->hip_base + pcie->pcie_data->port_conf_offset +
+ pcie->pcie_data->port_irq_status_offset));
+
+ ret = generic_handle_domain_irq(pcie->irq_domain, 0);
+ if (ret)
+ dev_err_ratelimited(dev, "unexpected IRQ %d\n", pcie->irq);
+ }
chained_irq_exit(chip, desc);
}
@@ -670,7 +855,7 @@ static int altera_pcie_init_irq_domain(struct altera_pcie *pcie)
struct device_node *node = dev->of_node;
/* Setup INTx */
- pcie->irq_domain = irq_domain_add_linear(node, PCI_NUM_INTX,
+ pcie->irq_domain = irq_domain_create_linear(of_fwnode_handle(node), PCI_NUM_INTX,
&intx_domain_ops, pcie);
if (!pcie->irq_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
@@ -695,9 +880,9 @@ static int altera_pcie_parse_dt(struct altera_pcie *pcie)
if (IS_ERR(pcie->cra_base))
return PTR_ERR(pcie->cra_base);
- if (pcie->pcie_data->version == ALTERA_PCIE_V2) {
- pcie->hip_base =
- devm_platform_ioremap_resource_byname(pdev, "Hip");
+ if (pcie->pcie_data->version == ALTERA_PCIE_V2 ||
+ pcie->pcie_data->version == ALTERA_PCIE_V3) {
+ pcie->hip_base = devm_platform_ioremap_resource_byname(pdev, "Hip");
if (IS_ERR(pcie->hip_base))
return PTR_ERR(pcie->hip_base);
}
@@ -707,7 +892,7 @@ static int altera_pcie_parse_dt(struct altera_pcie *pcie)
if (pcie->irq < 0)
return pcie->irq;
- irq_set_chained_handler_and_data(pcie->irq, altera_pcie_isr, pcie);
+ irq_set_chained_handler_and_data(pcie->irq, pcie->pcie_data->ops->rp_isr, pcie);
return 0;
}
@@ -720,6 +905,7 @@ static const struct altera_pcie_ops altera_pcie_ops_1_0 = {
.tlp_read_pkt = tlp_read_packet,
.tlp_write_pkt = tlp_write_packet,
.get_link_status = altera_pcie_link_up,
+ .rp_isr = altera_pcie_isr,
};
static const struct altera_pcie_ops altera_pcie_ops_2_0 = {
@@ -728,6 +914,16 @@ static const struct altera_pcie_ops altera_pcie_ops_2_0 = {
.get_link_status = s10_altera_pcie_link_up,
.rp_read_cfg = s10_rp_read_cfg,
.rp_write_cfg = s10_rp_write_cfg,
+ .rp_isr = altera_pcie_isr,
+};
+
+static const struct altera_pcie_ops altera_pcie_ops_3_0 = {
+ .rp_read_cfg = aglx_rp_read_cfg,
+ .rp_write_cfg = aglx_rp_write_cfg,
+ .get_link_status = aglx_altera_pcie_link_up,
+ .ep_read_cfg = aglx_ep_read_cfg,
+ .ep_write_cfg = aglx_ep_write_cfg,
+ .rp_isr = aglx_isr,
};
static const struct altera_pcie_data altera_pcie_1_0_data = {
@@ -750,11 +946,44 @@ static const struct altera_pcie_data altera_pcie_2_0_data = {
.cfgwr1 = S10_TLP_FMTTYPE_CFGWR1,
};
+static const struct altera_pcie_data altera_pcie_3_0_f_tile_data = {
+ .ops = &altera_pcie_ops_3_0,
+ .version = ALTERA_PCIE_V3,
+ .cap_offset = 0x70,
+ .port_conf_offset = 0x14000,
+ .port_irq_status_offset = AGLX_ROOT_PORT_IRQ_STATUS,
+ .port_irq_enable_offset = AGLX_ROOT_PORT_IRQ_ENABLE,
+};
+
+static const struct altera_pcie_data altera_pcie_3_0_p_tile_data = {
+ .ops = &altera_pcie_ops_3_0,
+ .version = ALTERA_PCIE_V3,
+ .cap_offset = 0x70,
+ .port_conf_offset = 0x104000,
+ .port_irq_status_offset = AGLX_ROOT_PORT_IRQ_STATUS,
+ .port_irq_enable_offset = AGLX_ROOT_PORT_IRQ_ENABLE,
+};
+
+static const struct altera_pcie_data altera_pcie_3_0_r_tile_data = {
+ .ops = &altera_pcie_ops_3_0,
+ .version = ALTERA_PCIE_V3,
+ .cap_offset = 0x70,
+ .port_conf_offset = 0x1300,
+ .port_irq_status_offset = 0x0,
+ .port_irq_enable_offset = 0x4,
+};
+
static const struct of_device_id altera_pcie_of_match[] = {
{.compatible = "altr,pcie-root-port-1.0",
.data = &altera_pcie_1_0_data },
{.compatible = "altr,pcie-root-port-2.0",
.data = &altera_pcie_2_0_data },
+ {.compatible = "altr,pcie-root-port-3.0-f-tile",
+ .data = &altera_pcie_3_0_f_tile_data },
+ {.compatible = "altr,pcie-root-port-3.0-p-tile",
+ .data = &altera_pcie_3_0_p_tile_data },
+ {.compatible = "altr,pcie-root-port-3.0-r-tile",
+ .data = &altera_pcie_3_0_r_tile_data },
{},
};
@@ -792,11 +1021,18 @@ static int altera_pcie_probe(struct platform_device *pdev)
return ret;
}
- /* clear all interrupts */
- cra_writel(pcie, P2A_INT_STS_ALL, P2A_INT_STATUS);
- /* enable all interrupts */
- cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE);
- altera_pcie_host_init(pcie);
+ if (pcie->pcie_data->version == ALTERA_PCIE_V1 ||
+ pcie->pcie_data->version == ALTERA_PCIE_V2) {
+ /* clear all interrupts */
+ cra_writel(pcie, P2A_INT_STS_ALL, P2A_INT_STATUS);
+ /* enable all interrupts */
+ cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE);
+ altera_pcie_host_init(pcie);
+ } else if (pcie->pcie_data->version == ALTERA_PCIE_V3) {
+ writel(CFG_AER,
+ pcie->hip_base + pcie->pcie_data->port_conf_offset +
+ pcie->pcie_data->port_irq_enable_offset);
+ }
bridge->sysdata = pcie;
bridge->busnr = pcie->root_bus_nr;
@@ -816,14 +1052,15 @@ static void altera_pcie_remove(struct platform_device *pdev)
}
static struct platform_driver altera_pcie_driver = {
- .probe = altera_pcie_probe,
- .remove_new = altera_pcie_remove,
+ .probe = altera_pcie_probe,
+ .remove = altera_pcie_remove,
.driver = {
- .name = "altera-pcie",
+ .name = "altera-pcie",
.of_match_table = altera_pcie_of_match,
},
};
MODULE_DEVICE_TABLE(of, altera_pcie_of_match);
module_platform_driver(altera_pcie_driver);
+MODULE_DESCRIPTION("Altera PCIe host controller driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-apple.c b/drivers/pci/controller/pcie-apple.c
index f7a248393a8f..3d412a931774 100644
--- a/drivers/pci/controller/pcie-apple.c
+++ b/drivers/pci/controller/pcie-apple.c
@@ -22,11 +22,11 @@
#include <linux/kernel.h>
#include <linux/iopoll.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/msi.h>
-#include <linux/notifier.h>
#include <linux/of_irq.h>
#include <linux/pci-ecam.h>
@@ -134,7 +134,6 @@ struct apple_pcie {
struct mutex lock;
struct device *dev;
void __iomem *base;
- struct irq_domain *domain;
unsigned long *bitmap;
struct list_head ports;
struct completion event;
@@ -163,27 +162,6 @@ static void rmw_clear(u32 clr, void __iomem *addr)
writel_relaxed(readl_relaxed(addr) & ~clr, addr);
}
-static void apple_msi_top_irq_mask(struct irq_data *d)
-{
- pci_msi_mask_irq(d);
- irq_chip_mask_parent(d);
-}
-
-static void apple_msi_top_irq_unmask(struct irq_data *d)
-{
- pci_msi_unmask_irq(d);
- irq_chip_unmask_parent(d);
-}
-
-static struct irq_chip apple_msi_top_chip = {
- .name = "PCIe MSI",
- .irq_mask = apple_msi_top_irq_mask,
- .irq_unmask = apple_msi_top_irq_unmask,
- .irq_eoi = irq_chip_eoi_parent,
- .irq_set_affinity = irq_chip_set_affinity_parent,
- .irq_set_type = irq_chip_set_type_parent,
-};
-
static void apple_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
{
msg->address_hi = upper_32_bits(DOORBELL_ADDR);
@@ -227,8 +205,7 @@ static int apple_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
for (i = 0; i < nr_irqs; i++) {
irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
- &apple_msi_bottom_chip,
- domain->host_data);
+ &apple_msi_bottom_chip, pcie);
}
return 0;
@@ -252,12 +229,6 @@ static const struct irq_domain_ops apple_msi_domain_ops = {
.free = apple_msi_domain_free,
};
-static struct msi_domain_info apple_msi_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
- .chip = &apple_msi_top_chip,
-};
-
static void apple_port_irq_mask(struct irq_data *data)
{
struct apple_pcie_port *port = irq_data_get_irq_chip_data(data);
@@ -596,11 +567,28 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
return 0;
}
+static const struct msi_parent_ops apple_msi_parent_ops = {
+ .supported_flags = (MSI_GENERIC_FLAGS_MASK |
+ MSI_FLAG_PCI_MSIX |
+ MSI_FLAG_MULTI_PCI_MSI),
+ .required_flags = (MSI_FLAG_USE_DEF_DOM_OPS |
+ MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_PCI_MSI_MASK_PARENT),
+ .chip_flags = MSI_CHIP_FLAG_SET_EOI,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
+};
+
static int apple_msi_init(struct apple_pcie *pcie)
{
struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
+ struct irq_domain_info info = {
+ .fwnode = fwnode,
+ .ops = &apple_msi_domain_ops,
+ .size = pcie->nvecs,
+ .host_data = pcie,
+ };
struct of_phandle_args args = {};
- struct irq_domain *parent;
int ret;
ret = of_parse_phandle_with_args(to_of_node(fwnode), "msi-ranges",
@@ -620,28 +608,16 @@ static int apple_msi_init(struct apple_pcie *pcie)
if (!pcie->bitmap)
return -ENOMEM;
- parent = irq_find_matching_fwspec(&pcie->fwspec, DOMAIN_BUS_WIRED);
- if (!parent) {
+ info.parent = irq_find_matching_fwspec(&pcie->fwspec, DOMAIN_BUS_WIRED);
+ if (!info.parent) {
dev_err(pcie->dev, "failed to find parent domain\n");
return -ENXIO;
}
- parent = irq_domain_create_hierarchy(parent, 0, pcie->nvecs, fwnode,
- &apple_msi_domain_ops, pcie);
- if (!parent) {
+ if (!msi_create_parent_irq_domain(&info, &apple_msi_parent_ops)) {
dev_err(pcie->dev, "failed to create IRQ domain\n");
return -ENOMEM;
}
- irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
-
- pcie->domain = pci_msi_create_irq_domain(fwnode, &apple_msi_info,
- parent);
- if (!pcie->domain) {
- dev_err(pcie->dev, "failed to create MSI domain\n");
- irq_domain_remove(parent);
- return -ENOMEM;
- }
-
return 0;
}
@@ -667,12 +643,16 @@ static struct apple_pcie_port *apple_pcie_get_port(struct pci_dev *pdev)
return NULL;
}
-static int apple_pcie_add_device(struct apple_pcie_port *port,
- struct pci_dev *pdev)
+static int apple_pcie_enable_device(struct pci_host_bridge *bridge, struct pci_dev *pdev)
{
u32 sid, rid = pci_dev_id(pdev);
+ struct apple_pcie_port *port;
int idx, err;
+ port = apple_pcie_get_port(pdev);
+ if (!port)
+ return 0;
+
dev_dbg(&pdev->dev, "added to bus %s, index %d\n",
pci_name(pdev->bus->self), port->idx);
@@ -698,12 +678,16 @@ static int apple_pcie_add_device(struct apple_pcie_port *port,
return idx >= 0 ? 0 : -ENOSPC;
}
-static void apple_pcie_release_device(struct apple_pcie_port *port,
- struct pci_dev *pdev)
+static void apple_pcie_disable_device(struct pci_host_bridge *bridge, struct pci_dev *pdev)
{
+ struct apple_pcie_port *port;
u32 rid = pci_dev_id(pdev);
int idx;
+ port = apple_pcie_get_port(pdev);
+ if (!port)
+ return;
+
mutex_lock(&port->pcie->lock);
for_each_set_bit(idx, port->sid_map, port->sid_map_sz) {
@@ -721,50 +705,10 @@ static void apple_pcie_release_device(struct apple_pcie_port *port,
mutex_unlock(&port->pcie->lock);
}
-static int apple_pcie_bus_notifier(struct notifier_block *nb,
- unsigned long action,
- void *data)
-{
- struct device *dev = data;
- struct pci_dev *pdev = to_pci_dev(dev);
- struct apple_pcie_port *port;
- int err;
-
- /*
- * This is a bit ugly. We assume that if we get notified for
- * any PCI device, we must be in charge of it, and that there
- * is no other PCI controller in the whole system. It probably
- * holds for now, but who knows for how long?
- */
- port = apple_pcie_get_port(pdev);
- if (!port)
- return NOTIFY_DONE;
-
- switch (action) {
- case BUS_NOTIFY_ADD_DEVICE:
- err = apple_pcie_add_device(port, pdev);
- if (err)
- return notifier_from_errno(err);
- break;
- case BUS_NOTIFY_DEL_DEVICE:
- apple_pcie_release_device(port, pdev);
- break;
- default:
- return NOTIFY_DONE;
- }
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block apple_pcie_nb = {
- .notifier_call = apple_pcie_bus_notifier,
-};
-
static int apple_pcie_init(struct pci_config_window *cfg)
{
struct device *dev = cfg->parent;
struct platform_device *platform = to_platform_device(dev);
- struct device_node *of_port;
struct apple_pcie *pcie;
int ret;
@@ -787,11 +731,10 @@ static int apple_pcie_init(struct pci_config_window *cfg)
if (ret)
return ret;
- for_each_child_of_node(dev->of_node, of_port) {
+ for_each_child_of_node_scoped(dev->of_node, of_port) {
ret = apple_pcie_setup_port(pcie, of_port);
if (ret) {
dev_err(pcie->dev, "Port %pOF setup fail: %d\n", of_port, ret);
- of_node_put(of_port);
return ret;
}
}
@@ -799,23 +742,10 @@ static int apple_pcie_init(struct pci_config_window *cfg)
return 0;
}
-static int apple_pcie_probe(struct platform_device *pdev)
-{
- int ret;
-
- ret = bus_register_notifier(&pci_bus_type, &apple_pcie_nb);
- if (ret)
- return ret;
-
- ret = pci_host_common_probe(pdev);
- if (ret)
- bus_unregister_notifier(&pci_bus_type, &apple_pcie_nb);
-
- return ret;
-}
-
static const struct pci_ecam_ops apple_pcie_cfg_ecam_ops = {
.init = apple_pcie_init,
+ .enable_device = apple_pcie_enable_device,
+ .disable_device = apple_pcie_disable_device,
.pci_ops = {
.map_bus = pci_ecam_map_bus,
.read = pci_generic_config_read,
@@ -830,7 +760,7 @@ static const struct of_device_id apple_pcie_of_match[] = {
MODULE_DEVICE_TABLE(of, apple_pcie_of_match);
static struct platform_driver apple_pcie_driver = {
- .probe = apple_pcie_probe,
+ .probe = pci_host_common_probe,
.driver = {
.name = "pcie-apple",
.of_match_table = apple_pcie_of_match,
@@ -839,4 +769,5 @@ static struct platform_driver apple_pcie_driver = {
};
module_platform_driver(apple_pcie_driver);
+MODULE_DESCRIPTION("Apple PCIe host bridge driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
index c08683febdd4..92887b394eb4 100644
--- a/drivers/pci/controller/pcie-brcmstb.c
+++ b/drivers/pci/controller/pcie-brcmstb.c
@@ -40,7 +40,7 @@
/* Broadcom STB PCIe Register Offsets */
#define PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1 0x0188
#define PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK 0xc
-#define PCIE_RC_CFG_VENDOR_SPCIFIC_REG1_LITTLE_ENDIAN 0x0
+#define PCIE_RC_CFG_VENDOR_SPECIFIC_REG1_LITTLE_ENDIAN 0x0
#define PCIE_RC_CFG_PRIV1_ID_VAL3 0x043c
#define PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK 0xffffff
@@ -55,6 +55,10 @@
#define PCIE_RC_DL_MDIO_WR_DATA 0x1104
#define PCIE_RC_DL_MDIO_RD_DATA 0x1108
+#define PCIE_RC_PL_PHY_CTL_15 0x184c
+#define PCIE_RC_PL_PHY_CTL_15_DIS_PLL_PD_MASK 0x400000
+#define PCIE_RC_PL_PHY_CTL_15_PM_CLK_PERIOD_MASK 0xff
+
#define PCIE_MISC_MISC_CTRL 0x4008
#define PCIE_MISC_MISC_CTRL_PCIE_RCB_64B_MODE_MASK 0x80
#define PCIE_MISC_MISC_CTRL_PCIE_RCB_MPS_MODE_MASK 0x400
@@ -75,15 +79,19 @@
#define PCIE_MEM_WIN0_HI(win) \
PCIE_MISC_CPU_2_PCIE_MEM_WIN0_HI + ((win) * 8)
+/*
+ * NOTE: You may see the term "BAR" in a number of register names used by
+ * this driver. The term is an artifact of when the HW core was an
+ * endpoint device (EP). Now it is a root complex (RC) and anywhere a
+ * register has the term "BAR" it is related to an inbound window.
+ */
+
+#define PCIE_BRCM_MAX_INBOUND_WINS 16
#define PCIE_MISC_RC_BAR1_CONFIG_LO 0x402c
#define PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK 0x1f
-#define PCIE_MISC_RC_BAR2_CONFIG_LO 0x4034
-#define PCIE_MISC_RC_BAR2_CONFIG_LO_SIZE_MASK 0x1f
-#define PCIE_MISC_RC_BAR2_CONFIG_HI 0x4038
+#define PCIE_MISC_RC_BAR4_CONFIG_LO 0x40d4
-#define PCIE_MISC_RC_BAR3_CONFIG_LO 0x403c
-#define PCIE_MISC_RC_BAR3_CONFIG_LO_SIZE_MASK 0x1f
#define PCIE_MISC_MSI_BAR_CONFIG_LO 0x4044
#define PCIE_MISC_MSI_BAR_CONFIG_HI 0x4048
@@ -122,7 +130,6 @@
#define PCIE_MEM_WIN0_LIMIT_HI(win) \
PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI + ((win) * 8)
-#define PCIE_MISC_HARD_PCIE_HARD_DEBUG 0x4204
#define PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK 0x2
#define PCIE_MISC_HARD_PCIE_HARD_DEBUG_L1SS_ENABLE_MASK 0x200000
#define PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK 0x08000000
@@ -131,17 +138,18 @@
(PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK | \
PCIE_MISC_HARD_PCIE_HARD_DEBUG_L1SS_ENABLE_MASK)
-#define PCIE_INTR2_CPU_BASE 0x4300
+#define PCIE_MISC_UBUS_BAR1_CONFIG_REMAP 0x40ac
+#define PCIE_MISC_UBUS_BAR1_CONFIG_REMAP_ACCESS_EN_MASK BIT(0)
+#define PCIE_MISC_UBUS_BAR4_CONFIG_REMAP 0x410c
+
#define PCIE_MSI_INTR2_BASE 0x4500
-/* Offsets from PCIE_INTR2_CPU_BASE and PCIE_MSI_INTR2_BASE */
+
+/* Offsets from INTR2_CPU and MSI_INTR2 BASE offsets */
#define MSI_INT_STATUS 0x0
#define MSI_INT_CLR 0x8
#define MSI_INT_MASK_SET 0x10
#define MSI_INT_MASK_CLR 0x14
-#define PCIE_EXT_CFG_DATA 0x8000
-#define PCIE_EXT_CFG_INDEX 0x9000
-
#define PCIE_RGR1_SW_INIT_1_PERST_MASK 0x1
#define PCIE_RGR1_SW_INIT_1_PERST_SHIFT 0x0
@@ -167,8 +175,9 @@
#define MDIO_PORT0 0x0
#define MDIO_DATA_MASK 0x7fffffff
#define MDIO_PORT_MASK 0xf0000
+#define MDIO_PORT_EXT_MASK 0x200000
#define MDIO_REGAD_MASK 0xffff
-#define MDIO_CMD_MASK 0xfff00000
+#define MDIO_CMD_MASK 0x00100000
#define MDIO_CMD_READ 0x1
#define MDIO_CMD_WRITE 0x0
#define MDIO_DATA_DONE_MASK 0x80000000
@@ -184,9 +193,11 @@
#define SSC_STATUS_PLL_LOCK_MASK 0x800
#define PCIE_BRCM_MAX_MEMC 3
-#define IDX_ADDR(pcie) (pcie->reg_offsets[EXT_CFG_INDEX])
-#define DATA_ADDR(pcie) (pcie->reg_offsets[EXT_CFG_DATA])
-#define PCIE_RGR1_SW_INIT_1(pcie) (pcie->reg_offsets[RGR1_SW_INIT_1])
+#define IDX_ADDR(pcie) ((pcie)->cfg->offsets[EXT_CFG_INDEX])
+#define DATA_ADDR(pcie) ((pcie)->cfg->offsets[EXT_CFG_DATA])
+#define PCIE_RGR1_SW_INIT_1(pcie) ((pcie)->cfg->offsets[RGR1_SW_INIT_1])
+#define HARD_DEBUG(pcie) ((pcie)->cfg->offsets[PCIE_HARD_DEBUG])
+#define INTR2_CPU_BASE(pcie) ((pcie)->cfg->offsets[PCIE_INTR2_CPU_BASE])
/* Rescal registers */
#define PCIE_DVT_PMU_PCIE_PHY_CTRL 0xc700
@@ -205,27 +216,44 @@ enum {
RGR1_SW_INIT_1,
EXT_CFG_INDEX,
EXT_CFG_DATA,
+ PCIE_HARD_DEBUG,
+ PCIE_INTR2_CPU_BASE,
};
-enum {
- RGR1_SW_INIT_1_INIT_MASK,
- RGR1_SW_INIT_1_INIT_SHIFT,
-};
-
-enum pcie_type {
+enum pcie_soc_base {
GENERIC,
- BCM7425,
- BCM7435,
+ BCM2711,
BCM4908,
BCM7278,
- BCM2711,
+ BCM7425,
+ BCM7435,
+ BCM7712,
+};
+
+struct inbound_win {
+ u64 size;
+ u64 pci_offset;
+ u64 cpu_addr;
};
+/*
+ * The RESCAL block is tied to PCIe controller #1, regardless of the number of
+ * controllers, and turning off PCIe controller #1 prevents access to the RESCAL
+ * register blocks, therefore no other controller can access this register
+ * space, and depending upon the bus fabric we may get a timeout (UBUS/GISB),
+ * or a hang (AXI).
+ */
+#define CFG_QUIRK_AVOID_BRIDGE_SHUTDOWN BIT(0)
+
struct pcie_cfg_data {
const int *offsets;
- const enum pcie_type type;
- void (*perst_set)(struct brcm_pcie *pcie, u32 val);
- void (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
+ const enum pcie_soc_base soc_base;
+ const bool has_phy;
+ const u32 quirks;
+ u8 num_inbound_wins;
+ int (*perst_set)(struct brcm_pcie *pcie, u32 val);
+ int (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
+ int (*post_setup)(struct brcm_pcie *pcie);
};
struct subdev_regulators {
@@ -261,22 +289,21 @@ struct brcm_pcie {
int gen;
u64 msi_target_addr;
struct brcm_msi *msi;
- const int *reg_offsets;
- enum pcie_type type;
struct reset_control *rescal;
struct reset_control *perst_reset;
+ struct reset_control *bridge_reset;
+ struct reset_control *swinit_reset;
int num_memc;
u64 memc_size[PCIE_BRCM_MAX_MEMC];
u32 hw_rev;
- void (*perst_set)(struct brcm_pcie *pcie, u32 val);
- void (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
struct subdev_regulators *sr;
bool ep_wakeup_capable;
+ const struct pcie_cfg_data *cfg;
};
static inline bool is_bmips(const struct brcm_pcie *pcie)
{
- return pcie->type == BCM7435 || pcie->type == BCM7425;
+ return pcie->cfg->soc_base == BCM7435 || pcie->cfg->soc_base == BCM7425;
}
/*
@@ -290,8 +317,8 @@ static int brcm_pcie_encode_ibar_size(u64 size)
if (log2_in >= 12 && log2_in <= 15)
/* Covers 4KB to 32KB (inclusive) */
return (log2_in - 12) + 0x1c;
- else if (log2_in >= 16 && log2_in <= 35)
- /* Covers 64KB to 32GB, (inclusive) */
+ else if (log2_in >= 16 && log2_in <= 36)
+ /* Covers 64KB to 64GB, (inclusive) */
return log2_in - 15;
/* Something is awry so disable */
return 0;
@@ -301,6 +328,7 @@ static u32 brcm_pcie_mdio_form_pkt(int port, int regad, int cmd)
{
u32 pkt = 0;
+ pkt |= FIELD_PREP(MDIO_PORT_EXT_MASK, port >> 4);
pkt |= FIELD_PREP(MDIO_PORT_MASK, port);
pkt |= FIELD_PREP(MDIO_REGAD_MASK, regad);
pkt |= FIELD_PREP(MDIO_CMD_MASK, cmd);
@@ -384,17 +412,17 @@ static int brcm_pcie_set_ssc(struct brcm_pcie *pcie)
static void brcm_pcie_set_gen(struct brcm_pcie *pcie, int gen)
{
u16 lnkctl2 = readw(pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCTL2);
- u32 lnkcap = readl(pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCAP);
+ u32 lnkcap = readl(pcie->base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
- lnkcap = (lnkcap & ~PCI_EXP_LNKCAP_SLS) | gen;
- writel(lnkcap, pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCAP);
+ u32p_replace_bits(&lnkcap, gen, PCI_EXP_LNKCAP_SLS);
+ writel(lnkcap, pcie->base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
- lnkctl2 = (lnkctl2 & ~0xf) | gen;
+ u16p_replace_bits(&lnkctl2, gen, PCI_EXP_LNKCTL2_TLS);
writew(lnkctl2, pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCTL2);
}
static void brcm_pcie_set_outbound_win(struct brcm_pcie *pcie,
- unsigned int win, u64 cpu_addr,
+ u8 win, u64 cpu_addr,
u64 pcie_addr, u64 size)
{
u32 cpu_addr_mb_high, limit_addr_mb_high;
@@ -445,8 +473,8 @@ static struct irq_chip brcm_msi_irq_chip = {
};
static struct msi_domain_info brcm_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_MULTI_PCI_MSI),
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI,
.chip = &brcm_msi_irq_chip,
};
@@ -484,12 +512,6 @@ static void brcm_msi_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
msg->data = (0xffff & PCIE_MISC_MSI_DATA_CONFIG_VAL_32) | data->hwirq;
}
-static int brcm_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static void brcm_msi_ack_irq(struct irq_data *data)
{
struct brcm_msi *msi = irq_data_get_irq_chip_data(data);
@@ -502,7 +524,6 @@ static void brcm_msi_ack_irq(struct irq_data *data)
static struct irq_chip brcm_msi_bottom_irq_chip = {
.name = "BRCM STB MSI",
.irq_compose_msi_msg = brcm_msi_compose_msi_msg,
- .irq_set_affinity = brcm_msi_set_affinity,
.irq_ack = brcm_msi_ack_irq,
};
@@ -538,7 +559,7 @@ static int brcm_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
return hwirq;
for (i = 0; i < nr_irqs; i++)
- irq_domain_set_info(domain, virq + i, hwirq + i,
+ irq_domain_set_info(domain, virq + i, (irq_hw_number_t)hwirq + i,
&brcm_msi_bottom_irq_chip, domain->host_data,
handle_edge_irq, NULL, NULL);
return 0;
@@ -560,10 +581,10 @@ static const struct irq_domain_ops msi_domain_ops = {
static int brcm_allocate_domains(struct brcm_msi *msi)
{
- struct fwnode_handle *fwnode = of_node_to_fwnode(msi->np);
+ struct fwnode_handle *fwnode = of_fwnode_handle(msi->np);
struct device *dev = msi->dev;
- msi->inner_domain = irq_domain_add_linear(NULL, msi->nr, &msi_domain_ops, msi);
+ msi->inner_domain = irq_domain_create_linear(NULL, msi->nr, &msi_domain_ops, msi);
if (!msi->inner_domain) {
dev_err(dev, "failed to create IRQ domain\n");
return -ENOMEM;
@@ -649,7 +670,7 @@ static int brcm_pcie_enable_msi(struct brcm_pcie *pcie)
BUILD_BUG_ON(BRCM_INT_PCI_MSI_LEGACY_NR > BRCM_INT_PCI_MSI_NR);
if (msi->legacy) {
- msi->intr_base = msi->base + PCIE_INTR2_CPU_BASE;
+ msi->intr_base = msi->base + INTR2_CPU_BASE(pcie);
msi->nr = BRCM_INT_PCI_MSI_LEGACY_NR;
msi->legacy_shift = 24;
} else {
@@ -705,8 +726,8 @@ static void __iomem *brcm_pcie_map_bus(struct pci_bus *bus,
/* For devices, write to the config space index register */
idx = PCIE_ECAM_OFFSET(bus->number, devfn, 0);
- writel(idx, pcie->base + PCIE_EXT_CFG_INDEX);
- return base + PCIE_EXT_CFG_DATA + PCIE_ECAM_REG(where);
+ writel(idx, base + IDX_ADDR(pcie));
+ return base + DATA_ADDR(pcie) + PCIE_ECAM_REG(where);
}
static void __iomem *brcm7425_pcie_map_bus(struct pci_bus *bus,
@@ -730,17 +751,33 @@ static void __iomem *brcm7425_pcie_map_bus(struct pci_bus *bus,
return base + DATA_ADDR(pcie);
}
-static void brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val)
+static int brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val)
{
- u32 tmp, mask = RGR1_SW_INIT_1_INIT_GENERIC_MASK;
+ u32 tmp, mask = RGR1_SW_INIT_1_INIT_GENERIC_MASK;
u32 shift = RGR1_SW_INIT_1_INIT_GENERIC_SHIFT;
+ int ret = 0;
+
+ if (pcie->bridge_reset) {
+ if (val)
+ ret = reset_control_assert(pcie->bridge_reset);
+ else
+ ret = reset_control_deassert(pcie->bridge_reset);
+
+ if (ret)
+ dev_err(pcie->dev, "failed to %s 'bridge' reset, err=%d\n",
+ val ? "assert" : "deassert", ret);
+
+ return ret;
+ }
tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
tmp = (tmp & ~mask) | ((val << shift) & mask);
writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
+
+ return ret;
}
-static void brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 val)
+static int brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 val)
{
u32 tmp, mask = RGR1_SW_INIT_1_INIT_7278_MASK;
u32 shift = RGR1_SW_INIT_1_INIT_7278_SHIFT;
@@ -748,20 +785,29 @@ static void brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 val)
tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
tmp = (tmp & ~mask) | ((val << shift) & mask);
writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
+
+ return 0;
}
-static void brcm_pcie_perst_set_4908(struct brcm_pcie *pcie, u32 val)
+static int brcm_pcie_perst_set_4908(struct brcm_pcie *pcie, u32 val)
{
+ int ret;
+
if (WARN_ONCE(!pcie->perst_reset, "missing PERST# reset controller\n"))
- return;
+ return -EINVAL;
if (val)
- reset_control_assert(pcie->perst_reset);
+ ret = reset_control_assert(pcie->perst_reset);
else
- reset_control_deassert(pcie->perst_reset);
+ ret = reset_control_deassert(pcie->perst_reset);
+
+ if (ret)
+ dev_err(pcie->dev, "failed to %s 'perst' reset, err=%d\n",
+ val ? "assert" : "deassert", ret);
+ return ret;
}
-static void brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val)
+static int brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val)
{
u32 tmp;
@@ -769,34 +815,110 @@ static void brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val)
tmp = readl(pcie->base + PCIE_MISC_PCIE_CTRL);
u32p_replace_bits(&tmp, !val, PCIE_MISC_PCIE_CTRL_PCIE_PERSTB_MASK);
writel(tmp, pcie->base + PCIE_MISC_PCIE_CTRL);
+
+ return 0;
}
-static void brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val)
+static int brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val)
{
u32 tmp;
tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
u32p_replace_bits(&tmp, val, PCIE_RGR1_SW_INIT_1_PERST_MASK);
writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
+
+ return 0;
}
-static int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
- u64 *rc_bar2_size,
- u64 *rc_bar2_offset)
+static int brcm_pcie_post_setup_bcm2712(struct brcm_pcie *pcie)
+{
+ static const u16 data[] = { 0x50b9, 0xbda1, 0x0094, 0x97b4, 0x5030,
+ 0x5030, 0x0007 };
+ static const u8 regs[] = { 0x16, 0x17, 0x18, 0x19, 0x1b, 0x1c, 0x1e };
+ int ret, i;
+ u32 tmp;
+
+ /* Allow a 54MHz (xosc) refclk source */
+ ret = brcm_pcie_mdio_write(pcie->base, MDIO_PORT0, SET_ADDR_OFFSET, 0x1600);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(regs); i++) {
+ ret = brcm_pcie_mdio_write(pcie->base, MDIO_PORT0, regs[i], data[i]);
+ if (ret < 0)
+ return ret;
+ }
+
+ usleep_range(100, 200);
+
+ /*
+ * Set L1SS sub-state timers to avoid lengthy state transitions,
+ * PM clock period is 18.52ns (1/54MHz, round down).
+ */
+ tmp = readl(pcie->base + PCIE_RC_PL_PHY_CTL_15);
+ tmp &= ~PCIE_RC_PL_PHY_CTL_15_PM_CLK_PERIOD_MASK;
+ tmp |= 0x12;
+ writel(tmp, pcie->base + PCIE_RC_PL_PHY_CTL_15);
+
+ return 0;
+}
+
+static void add_inbound_win(struct inbound_win *b, u8 *count, u64 size,
+ u64 cpu_addr, u64 pci_offset)
+{
+ b->size = size;
+ b->cpu_addr = cpu_addr;
+ b->pci_offset = pci_offset;
+ (*count)++;
+}
+
+static int brcm_pcie_get_inbound_wins(struct brcm_pcie *pcie,
+ struct inbound_win inbound_wins[])
{
struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
+ u64 pci_offset, cpu_addr, size = 0, tot_size = 0;
struct resource_entry *entry;
struct device *dev = pcie->dev;
u64 lowest_pcie_addr = ~(u64)0;
int ret, i = 0;
- u64 size = 0;
+ u8 n = 0;
+
+ /*
+ * The HW registers (and PCIe) use order-1 numbering for BARs. As such,
+ * we have inbound_wins[0] unused and BAR1 starts at inbound_wins[1].
+ */
+ struct inbound_win *b_begin = &inbound_wins[1];
+ struct inbound_win *b = b_begin;
+
+ /*
+ * STB chips beside 7712 disable the first inbound window default.
+ * Rather being mapped to system memory it is mapped to the
+ * internal registers of the SoC. This feature is deprecated, has
+ * security considerations, and is not implemented in our modern
+ * SoCs.
+ */
+ if (pcie->cfg->soc_base != BCM7712)
+ add_inbound_win(b++, &n, 0, 0, 0);
resource_list_for_each_entry(entry, &bridge->dma_ranges) {
- u64 pcie_beg = entry->res->start - entry->offset;
+ u64 pcie_start = entry->res->start - entry->offset;
+ u64 cpu_start = entry->res->start;
- size += entry->res->end - entry->res->start + 1;
- if (pcie_beg < lowest_pcie_addr)
- lowest_pcie_addr = pcie_beg;
+ size = resource_size(entry->res);
+ tot_size += size;
+ if (pcie_start < lowest_pcie_addr)
+ lowest_pcie_addr = pcie_start;
+ /*
+ * 7712 and newer chips may have many BARs, with each
+ * offering a non-overlapping viewport to system memory.
+ * That being said, each BARs size must still be a power of
+ * two.
+ */
+ if (pcie->cfg->soc_base == BCM7712)
+ add_inbound_win(b++, &n, size, cpu_start, pcie_start);
+
+ if (n > pcie->cfg->num_inbound_wins)
+ break;
}
if (lowest_pcie_addr == ~(u64)0) {
@@ -804,13 +926,20 @@ static int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
return -EINVAL;
}
+ /*
+ * 7712 and newer chips do not have an internal memory mapping system
+ * that enables multiple memory controllers. As such, it can return
+ * now w/o doing special configuration.
+ */
+ if (pcie->cfg->soc_base == BCM7712)
+ return n;
+
ret = of_property_read_variable_u64_array(pcie->np, "brcm,scb-sizes", pcie->memc_size, 1,
PCIE_BRCM_MAX_MEMC);
-
if (ret <= 0) {
/* Make an educated guess */
pcie->num_memc = 1;
- pcie->memc_size[0] = 1ULL << fls64(size - 1);
+ pcie->memc_size[0] = 1ULL << fls64(tot_size - 1);
} else {
pcie->num_memc = ret;
}
@@ -819,10 +948,15 @@ static int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
for (i = 0, size = 0; i < pcie->num_memc; i++)
size += pcie->memc_size[i];
- /* System memory starts at this address in PCIe-space */
- *rc_bar2_offset = lowest_pcie_addr;
- /* The sum of all memc views must also be a power of 2 */
- *rc_bar2_size = 1ULL << fls64(size - 1);
+ /* Our HW mandates that the window size must be a power of 2 */
+ size = 1ULL << fls64(size - 1);
+
+ /*
+ * For STB chips, the BAR2 cpu_addr is hardwired to the start
+ * of system memory, so we set it to 0.
+ */
+ cpu_addr = 0;
+ pci_offset = lowest_pcie_addr;
/*
* We validate the inbound memory view even though we should trust
@@ -857,44 +991,119 @@ static int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
* outbound memory @ 3GB). So instead it will start at the 1x
* multiple of its size
*/
- if (!*rc_bar2_size || (*rc_bar2_offset & (*rc_bar2_size - 1)) ||
- (*rc_bar2_offset < SZ_4G && *rc_bar2_offset > SZ_2G)) {
- dev_err(dev, "Invalid rc_bar2_offset/size: size 0x%llx, off 0x%llx\n",
- *rc_bar2_size, *rc_bar2_offset);
+ if (!size || (pci_offset & (size - 1)) ||
+ (pci_offset < SZ_4G && pci_offset > SZ_2G)) {
+ dev_err(dev, "Invalid inbound_win2_offset/size: size 0x%llx, off 0x%llx\n",
+ size, pci_offset);
return -EINVAL;
}
- return 0;
+ /* Enable inbound window 2, the main inbound window for STB chips */
+ add_inbound_win(b++, &n, size, cpu_addr, pci_offset);
+
+ /*
+ * Disable inbound window 3. On some chips presents the same
+ * window as #2 but the data appears in a settable endianness.
+ */
+ add_inbound_win(b++, &n, 0, 0, 0);
+
+ return n;
+}
+
+static u32 brcm_bar_reg_offset(int bar)
+{
+ if (bar <= 3)
+ return PCIE_MISC_RC_BAR1_CONFIG_LO + 8 * (bar - 1);
+ else
+ return PCIE_MISC_RC_BAR4_CONFIG_LO + 8 * (bar - 4);
+}
+
+static u32 brcm_ubus_reg_offset(int bar)
+{
+ if (bar <= 3)
+ return PCIE_MISC_UBUS_BAR1_CONFIG_REMAP + 8 * (bar - 1);
+ else
+ return PCIE_MISC_UBUS_BAR4_CONFIG_REMAP + 8 * (bar - 4);
+}
+
+static void set_inbound_win_registers(struct brcm_pcie *pcie,
+ const struct inbound_win *inbound_wins,
+ u8 num_inbound_wins)
+{
+ void __iomem *base = pcie->base;
+ int i;
+
+ for (i = 1; i <= num_inbound_wins; i++) {
+ u64 pci_offset = inbound_wins[i].pci_offset;
+ u64 cpu_addr = inbound_wins[i].cpu_addr;
+ u64 size = inbound_wins[i].size;
+ u32 reg_offset = brcm_bar_reg_offset(i);
+ u32 tmp = lower_32_bits(pci_offset);
+
+ u32p_replace_bits(&tmp, brcm_pcie_encode_ibar_size(size),
+ PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK);
+
+ /* Write low */
+ writel_relaxed(tmp, base + reg_offset);
+ /* Write high */
+ writel_relaxed(upper_32_bits(pci_offset), base + reg_offset + 4);
+
+ /*
+ * Most STB chips:
+ * Do nothing.
+ * 7712:
+ * All of their BARs need to be set.
+ */
+ if (pcie->cfg->soc_base == BCM7712) {
+ /* BUS remap register settings */
+ reg_offset = brcm_ubus_reg_offset(i);
+ tmp = lower_32_bits(cpu_addr) & ~0xfff;
+ tmp |= PCIE_MISC_UBUS_BAR1_CONFIG_REMAP_ACCESS_EN_MASK;
+ writel_relaxed(tmp, base + reg_offset);
+ tmp = upper_32_bits(cpu_addr);
+ writel_relaxed(tmp, base + reg_offset + 4);
+ }
+ }
}
static int brcm_pcie_setup(struct brcm_pcie *pcie)
{
- u64 rc_bar2_offset, rc_bar2_size;
+ struct inbound_win inbound_wins[PCIE_BRCM_MAX_INBOUND_WINS];
void __iomem *base = pcie->base;
struct pci_host_bridge *bridge;
struct resource_entry *entry;
u32 tmp, burst, aspm_support;
- int num_out_wins = 0;
- int ret, memc;
+ u8 num_out_wins = 0;
+ int num_inbound_wins = 0;
+ int memc, ret;
/* Reset the bridge */
- pcie->bridge_sw_init_set(pcie, 1);
+ ret = pcie->cfg->bridge_sw_init_set(pcie, 1);
+ if (ret)
+ return ret;
/* Ensure that PERST# is asserted; some bootloaders may deassert it. */
- if (pcie->type == BCM2711)
- pcie->perst_set(pcie, 1);
+ if (pcie->cfg->soc_base == BCM2711) {
+ ret = pcie->cfg->perst_set(pcie, 1);
+ if (ret) {
+ pcie->cfg->bridge_sw_init_set(pcie, 0);
+ return ret;
+ }
+ }
usleep_range(100, 200);
/* Take the bridge out of reset */
- pcie->bridge_sw_init_set(pcie, 0);
+ ret = pcie->cfg->bridge_sw_init_set(pcie, 0);
+ if (ret)
+ return ret;
- tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ tmp = readl(base + HARD_DEBUG(pcie));
if (is_bmips(pcie))
tmp &= ~PCIE_BMIPS_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK;
else
tmp &= ~PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK;
- writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ writel(tmp, base + HARD_DEBUG(pcie));
/* Wait for SerDes to be stable */
usleep_range(100, 200);
@@ -905,9 +1114,9 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
*/
if (is_bmips(pcie))
burst = 0x1; /* 256 bytes */
- else if (pcie->type == BCM2711)
+ else if (pcie->cfg->soc_base == BCM2711)
burst = 0x0; /* 128 bytes */
- else if (pcie->type == BCM7278)
+ else if (pcie->cfg->soc_base == BCM7278)
burst = 0x3; /* 512 bytes */
else
burst = 0x2; /* 512 bytes */
@@ -924,17 +1133,16 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_PCIE_RCB_64B_MODE_MASK);
writel(tmp, base + PCIE_MISC_MISC_CTRL);
- ret = brcm_pcie_get_rc_bar2_size_and_offset(pcie, &rc_bar2_size,
- &rc_bar2_offset);
- if (ret)
- return ret;
+ num_inbound_wins = brcm_pcie_get_inbound_wins(pcie, inbound_wins);
+ if (num_inbound_wins < 0)
+ return num_inbound_wins;
- tmp = lower_32_bits(rc_bar2_offset);
- u32p_replace_bits(&tmp, brcm_pcie_encode_ibar_size(rc_bar2_size),
- PCIE_MISC_RC_BAR2_CONFIG_LO_SIZE_MASK);
- writel(tmp, base + PCIE_MISC_RC_BAR2_CONFIG_LO);
- writel(upper_32_bits(rc_bar2_offset),
- base + PCIE_MISC_RC_BAR2_CONFIG_HI);
+ set_inbound_win_registers(pcie, inbound_wins, num_inbound_wins);
+
+ if (!brcm_pcie_rc_mode(pcie)) {
+ dev_err(pcie->dev, "PCIe RC controller misconfigured as Endpoint\n");
+ return -EINVAL;
+ }
tmp = readl(base + PCIE_MISC_MISC_CTRL);
for (memc = 0; memc < pcie->num_memc; memc++) {
@@ -956,25 +1164,12 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
* 4GB or when the inbound area is smaller than 4GB (taking into
* account the rounding-up we're forced to perform).
*/
- if (rc_bar2_offset >= SZ_4G || (rc_bar2_size + rc_bar2_offset) < SZ_4G)
+ if (inbound_wins[2].pci_offset >= SZ_4G ||
+ (inbound_wins[2].size + inbound_wins[2].pci_offset) < SZ_4G)
pcie->msi_target_addr = BRCM_MSI_TARGET_ADDR_LT_4GB;
else
pcie->msi_target_addr = BRCM_MSI_TARGET_ADDR_GT_4GB;
- if (!brcm_pcie_rc_mode(pcie)) {
- dev_err(pcie->dev, "PCIe RC controller misconfigured as Endpoint\n");
- return -EINVAL;
- }
-
- /* disable the PCIe->GISB memory window (RC_BAR1) */
- tmp = readl(base + PCIE_MISC_RC_BAR1_CONFIG_LO);
- tmp &= ~PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK;
- writel(tmp, base + PCIE_MISC_RC_BAR1_CONFIG_LO);
-
- /* disable the PCIe->SCB memory window (RC_BAR3) */
- tmp = readl(base + PCIE_MISC_RC_BAR3_CONFIG_LO);
- tmp &= ~PCIE_MISC_RC_BAR3_CONFIG_LO_SIZE_MASK;
- writel(tmp, base + PCIE_MISC_RC_BAR3_CONFIG_LO);
/* Don't advertise L0s capability if 'aspm-no-l0s' */
aspm_support = PCIE_LINK_STATE_L1;
@@ -1025,12 +1220,18 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
num_out_wins++;
}
- /* PCIe->SCB endian mode for BAR */
+ /* PCIe->SCB endian mode for inbound window */
tmp = readl(base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
- u32p_replace_bits(&tmp, PCIE_RC_CFG_VENDOR_SPCIFIC_REG1_LITTLE_ENDIAN,
+ u32p_replace_bits(&tmp, PCIE_RC_CFG_VENDOR_SPECIFIC_REG1_LITTLE_ENDIAN,
PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK);
writel(tmp, base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
+ if (pcie->cfg->post_setup) {
+ ret = pcie->cfg->post_setup(pcie);
+ if (ret < 0)
+ return ret;
+ }
+
return 0;
}
@@ -1045,6 +1246,10 @@ static void brcm_extend_rbus_timeout(struct brcm_pcie *pcie)
const unsigned int REG_OFFSET = PCIE_RGR1_SW_INIT_1(pcie) - 8;
u32 timeout_us = 4000000; /* 4 seconds, our setting for L1SS */
+ /* 7712 does not have this (RGR1) timer */
+ if (pcie->cfg->soc_base == BCM7712)
+ return;
+
/* Each unit in timeout register is 1/216,000,000 seconds */
writel(216 * timeout_us, pcie->base + REG_OFFSET);
}
@@ -1063,7 +1268,7 @@ static void brcm_config_clkreq(struct brcm_pcie *pcie)
}
/* Start out assuming safe mode (both mode bits cleared) */
- clkreq_cntl = readl(pcie->base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ clkreq_cntl = readl(pcie->base + HARD_DEBUG(pcie));
clkreq_cntl &= ~PCIE_CLKREQ_MASK;
if (strcmp(mode, "no-l1ss") == 0) {
@@ -1106,7 +1311,7 @@ static void brcm_config_clkreq(struct brcm_pcie *pcie)
dev_err(pcie->dev, err_msg);
mode = "safe";
}
- writel(clkreq_cntl, pcie->base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ writel(clkreq_cntl, pcie->base + HARD_DEBUG(pcie));
dev_info(pcie->dev, "clkreq-mode set to %s\n", mode);
}
@@ -1119,8 +1324,14 @@ static int brcm_pcie_start_link(struct brcm_pcie *pcie)
bool ssc_good = false;
int ret, i;
+ /* Limit the generation if specified */
+ if (pcie->gen)
+ brcm_pcie_set_gen(pcie, pcie->gen);
+
/* Unassert the fundamental reset */
- pcie->perst_set(pcie, 0);
+ ret = pcie->cfg->perst_set(pcie, 0);
+ if (ret)
+ return ret;
/*
* Wait for 100ms after PERST# deassertion; see PCIe CEM specification
@@ -1143,9 +1354,6 @@ static int brcm_pcie_start_link(struct brcm_pcie *pcie)
brcm_config_clkreq(pcie);
- if (pcie->gen)
- brcm_pcie_set_gen(pcie, pcie->gen);
-
if (pcie->ssc) {
ret = brcm_pcie_set_ssc(pcie);
if (ret == 0)
@@ -1208,7 +1416,8 @@ static int brcm_pcie_add_bus(struct pci_bus *bus)
ret = regulator_bulk_get(dev, sr->num_supplies, sr->supplies);
if (ret) {
- dev_info(dev, "No regulators for downstream device\n");
+ dev_info(dev, "Did not get regulators, err=%d\n", ret);
+ pcie->sr = NULL;
goto no_regulators;
}
@@ -1231,7 +1440,7 @@ static void brcm_pcie_remove_bus(struct pci_bus *bus)
struct subdev_regulators *sr = pcie->sr;
struct device *dev = &bus->dev;
- if (!sr)
+ if (!sr || !bus->parent || !pci_is_root_bus(bus->parent))
return;
if (regulator_bulk_disable(sr->num_supplies, sr->supplies))
@@ -1304,23 +1513,25 @@ static int brcm_phy_cntl(struct brcm_pcie *pcie, const int start)
static inline int brcm_phy_start(struct brcm_pcie *pcie)
{
- return pcie->rescal ? brcm_phy_cntl(pcie, 1) : 0;
+ return pcie->cfg->has_phy ? brcm_phy_cntl(pcie, 1) : 0;
}
static inline int brcm_phy_stop(struct brcm_pcie *pcie)
{
- return pcie->rescal ? brcm_phy_cntl(pcie, 0) : 0;
+ return pcie->cfg->has_phy ? brcm_phy_cntl(pcie, 0) : 0;
}
-static void brcm_pcie_turn_off(struct brcm_pcie *pcie)
+static int brcm_pcie_turn_off(struct brcm_pcie *pcie)
{
void __iomem *base = pcie->base;
- int tmp;
+ int tmp, ret;
if (brcm_pcie_link_up(pcie))
brcm_pcie_enter_l23(pcie);
/* Assert fundamental reset */
- pcie->perst_set(pcie, 1);
+ ret = pcie->cfg->perst_set(pcie, 1);
+ if (ret)
+ return ret;
/* Deassert request for L23 in case it was asserted */
tmp = readl(base + PCIE_MISC_PCIE_CTRL);
@@ -1328,12 +1539,15 @@ static void brcm_pcie_turn_off(struct brcm_pcie *pcie)
writel(tmp, base + PCIE_MISC_PCIE_CTRL);
/* Turn off SerDes */
- tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ tmp = readl(base + HARD_DEBUG(pcie));
u32p_replace_bits(&tmp, 1, PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK);
- writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ writel(tmp, base + HARD_DEBUG(pcie));
- /* Shutdown PCIe bridge */
- pcie->bridge_sw_init_set(pcie, 1);
+ if (!(pcie->cfg->quirks & CFG_QUIRK_AVOID_BRIDGE_SHUTDOWN))
+ /* Shutdown PCIe bridge */
+ ret = pcie->cfg->bridge_sw_init_set(pcie, 1);
+
+ return ret;
}
static int pci_dev_may_wakeup(struct pci_dev *dev, void *data)
@@ -1351,9 +1565,12 @@ static int brcm_pcie_suspend_noirq(struct device *dev)
{
struct brcm_pcie *pcie = dev_get_drvdata(dev);
struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
- int ret;
+ int ret, rret;
+
+ ret = brcm_pcie_turn_off(pcie);
+ if (ret)
+ return ret;
- brcm_pcie_turn_off(pcie);
/*
* If brcm_phy_stop() returns an error, just dev_err(). If we
* return the error it will cause the suspend to fail and this is a
@@ -1382,7 +1599,10 @@ static int brcm_pcie_suspend_noirq(struct device *dev)
pcie->sr->supplies);
if (ret) {
dev_err(dev, "Could not turn off regulators\n");
- reset_control_reset(pcie->rescal);
+ rret = reset_control_reset(pcie->rescal);
+ if (rret)
+ dev_err(dev, "failed to reset 'rascal' controller ret=%d\n",
+ rret);
return ret;
}
}
@@ -1397,7 +1617,7 @@ static int brcm_pcie_resume_noirq(struct device *dev)
struct brcm_pcie *pcie = dev_get_drvdata(dev);
void __iomem *base;
u32 tmp;
- int ret;
+ int ret, rret;
base = pcie->base;
ret = clk_prepare_enable(pcie->clk);
@@ -1413,12 +1633,12 @@ static int brcm_pcie_resume_noirq(struct device *dev)
goto err_reset;
/* Take bridge out of reset so we can access the SERDES reg */
- pcie->bridge_sw_init_set(pcie, 0);
+ pcie->cfg->bridge_sw_init_set(pcie, 0);
/* SERDES_IDDQ = 0 */
- tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ tmp = readl(base + HARD_DEBUG(pcie));
u32p_replace_bits(&tmp, 0, PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK);
- writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ writel(tmp, base + HARD_DEBUG(pcie));
/* wait for serdes to be stable */
udelay(100);
@@ -1459,7 +1679,9 @@ err_regulator:
if (pcie->sr)
regulator_bulk_disable(pcie->sr->num_supplies, pcie->sr->supplies);
err_reset:
- reset_control_rearm(pcie->rescal);
+ rret = reset_control_rearm(pcie->rescal);
+ if (rret)
+ dev_err(pcie->dev, "failed to rearm 'rescal' reset, err=%d\n", rret);
err_disable_clk:
clk_disable_unprepare(pcie->clk);
return ret;
@@ -1487,74 +1709,123 @@ static void brcm_pcie_remove(struct platform_device *pdev)
}
static const int pcie_offsets[] = {
- [RGR1_SW_INIT_1] = 0x9210,
- [EXT_CFG_INDEX] = 0x9000,
- [EXT_CFG_DATA] = 0x9004,
+ [RGR1_SW_INIT_1] = 0x9210,
+ [EXT_CFG_INDEX] = 0x9000,
+ [EXT_CFG_DATA] = 0x8000,
+ [PCIE_HARD_DEBUG] = 0x4204,
+ [PCIE_INTR2_CPU_BASE] = 0x4300,
+};
+
+static const int pcie_offsets_bcm7278[] = {
+ [RGR1_SW_INIT_1] = 0xc010,
+ [EXT_CFG_INDEX] = 0x9000,
+ [EXT_CFG_DATA] = 0x8000,
+ [PCIE_HARD_DEBUG] = 0x4204,
+ [PCIE_INTR2_CPU_BASE] = 0x4300,
};
-static const int pcie_offsets_bmips_7425[] = {
- [RGR1_SW_INIT_1] = 0x8010,
- [EXT_CFG_INDEX] = 0x8300,
- [EXT_CFG_DATA] = 0x8304,
+static const int pcie_offsets_bcm7425[] = {
+ [RGR1_SW_INIT_1] = 0x8010,
+ [EXT_CFG_INDEX] = 0x8300,
+ [EXT_CFG_DATA] = 0x8304,
+ [PCIE_HARD_DEBUG] = 0x4204,
+ [PCIE_INTR2_CPU_BASE] = 0x4300,
+};
+
+static const int pcie_offsets_bcm7712[] = {
+ [RGR1_SW_INIT_1] = 0x9210,
+ [EXT_CFG_INDEX] = 0x9000,
+ [EXT_CFG_DATA] = 0x8000,
+ [PCIE_HARD_DEBUG] = 0x4304,
+ [PCIE_INTR2_CPU_BASE] = 0x4400,
};
static const struct pcie_cfg_data generic_cfg = {
.offsets = pcie_offsets,
- .type = GENERIC,
+ .soc_base = GENERIC,
.perst_set = brcm_pcie_perst_set_generic,
.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+ .num_inbound_wins = 3,
};
-static const struct pcie_cfg_data bcm7425_cfg = {
- .offsets = pcie_offsets_bmips_7425,
- .type = BCM7425,
+static const struct pcie_cfg_data bcm2711_cfg = {
+ .offsets = pcie_offsets,
+ .soc_base = BCM2711,
.perst_set = brcm_pcie_perst_set_generic,
.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+ .num_inbound_wins = 3,
};
-static const struct pcie_cfg_data bcm7435_cfg = {
- .offsets = pcie_offsets,
- .type = BCM7435,
- .perst_set = brcm_pcie_perst_set_generic,
+static const struct pcie_cfg_data bcm2712_cfg = {
+ .offsets = pcie_offsets_bcm7712,
+ .soc_base = BCM7712,
+ .perst_set = brcm_pcie_perst_set_7278,
.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+ .post_setup = brcm_pcie_post_setup_bcm2712,
+ .quirks = CFG_QUIRK_AVOID_BRIDGE_SHUTDOWN,
+ .num_inbound_wins = 10,
};
static const struct pcie_cfg_data bcm4908_cfg = {
.offsets = pcie_offsets,
- .type = BCM4908,
+ .soc_base = BCM4908,
.perst_set = brcm_pcie_perst_set_4908,
.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
-};
-
-static const int pcie_offset_bcm7278[] = {
- [RGR1_SW_INIT_1] = 0xc010,
- [EXT_CFG_INDEX] = 0x9000,
- [EXT_CFG_DATA] = 0x9004,
+ .num_inbound_wins = 3,
};
static const struct pcie_cfg_data bcm7278_cfg = {
- .offsets = pcie_offset_bcm7278,
- .type = BCM7278,
+ .offsets = pcie_offsets_bcm7278,
+ .soc_base = BCM7278,
.perst_set = brcm_pcie_perst_set_7278,
.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_7278,
+ .num_inbound_wins = 3,
};
-static const struct pcie_cfg_data bcm2711_cfg = {
+static const struct pcie_cfg_data bcm7425_cfg = {
+ .offsets = pcie_offsets_bcm7425,
+ .soc_base = BCM7425,
+ .perst_set = brcm_pcie_perst_set_generic,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+ .num_inbound_wins = 3,
+};
+
+static const struct pcie_cfg_data bcm7435_cfg = {
.offsets = pcie_offsets,
- .type = BCM2711,
+ .soc_base = BCM7435,
.perst_set = brcm_pcie_perst_set_generic,
.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+ .num_inbound_wins = 3,
+};
+
+static const struct pcie_cfg_data bcm7216_cfg = {
+ .offsets = pcie_offsets_bcm7278,
+ .soc_base = BCM7278,
+ .perst_set = brcm_pcie_perst_set_7278,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_7278,
+ .has_phy = true,
+ .num_inbound_wins = 3,
+};
+
+static const struct pcie_cfg_data bcm7712_cfg = {
+ .offsets = pcie_offsets_bcm7712,
+ .perst_set = brcm_pcie_perst_set_7278,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+ .soc_base = BCM7712,
+ .num_inbound_wins = 10,
};
static const struct of_device_id brcm_pcie_match[] = {
{ .compatible = "brcm,bcm2711-pcie", .data = &bcm2711_cfg },
+ { .compatible = "brcm,bcm2712-pcie", .data = &bcm2712_cfg },
{ .compatible = "brcm,bcm4908-pcie", .data = &bcm4908_cfg },
{ .compatible = "brcm,bcm7211-pcie", .data = &generic_cfg },
+ { .compatible = "brcm,bcm7216-pcie", .data = &bcm7216_cfg },
{ .compatible = "brcm,bcm7278-pcie", .data = &bcm7278_cfg },
- { .compatible = "brcm,bcm7216-pcie", .data = &bcm7278_cfg },
- { .compatible = "brcm,bcm7445-pcie", .data = &generic_cfg },
- { .compatible = "brcm,bcm7435-pcie", .data = &bcm7435_cfg },
{ .compatible = "brcm,bcm7425-pcie", .data = &bcm7425_cfg },
+ { .compatible = "brcm,bcm7435-pcie", .data = &bcm7435_cfg },
+ { .compatible = "brcm,bcm7445-pcie", .data = &generic_cfg },
+ { .compatible = "brcm,bcm7712-pcie", .data = &bcm7712_cfg },
{},
};
@@ -1576,7 +1847,7 @@ static struct pci_ops brcm7425_pcie_ops = {
static int brcm_pcie_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node, *msi_np;
+ struct device_node *np = pdev->dev.of_node;
struct pci_host_bridge *bridge;
const struct pcie_cfg_data *data;
struct brcm_pcie *pcie;
@@ -1595,10 +1866,7 @@ static int brcm_pcie_probe(struct platform_device *pdev)
pcie = pci_host_bridge_priv(bridge);
pcie->dev = &pdev->dev;
pcie->np = np;
- pcie->reg_offsets = data->offsets;
- pcie->type = data->type;
- pcie->perst_set = data->perst_set;
- pcie->bridge_sw_init_set = data->bridge_sw_init_set;
+ pcie->cfg = data;
pcie->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pcie->base))
@@ -1613,25 +1881,52 @@ static int brcm_pcie_probe(struct platform_device *pdev)
pcie->ssc = of_property_read_bool(np, "brcm,enable-ssc");
- ret = clk_prepare_enable(pcie->clk);
- if (ret) {
- dev_err(&pdev->dev, "could not enable clock\n");
- return ret;
- }
pcie->rescal = devm_reset_control_get_optional_shared(&pdev->dev, "rescal");
- if (IS_ERR(pcie->rescal)) {
- clk_disable_unprepare(pcie->clk);
+ if (IS_ERR(pcie->rescal))
return PTR_ERR(pcie->rescal);
- }
+
pcie->perst_reset = devm_reset_control_get_optional_exclusive(&pdev->dev, "perst");
- if (IS_ERR(pcie->perst_reset)) {
- clk_disable_unprepare(pcie->clk);
+ if (IS_ERR(pcie->perst_reset))
return PTR_ERR(pcie->perst_reset);
+
+ pcie->bridge_reset = devm_reset_control_get_optional_exclusive(&pdev->dev, "bridge");
+ if (IS_ERR(pcie->bridge_reset))
+ return PTR_ERR(pcie->bridge_reset);
+
+ pcie->swinit_reset = devm_reset_control_get_optional_exclusive(&pdev->dev, "swinit");
+ if (IS_ERR(pcie->swinit_reset))
+ return PTR_ERR(pcie->swinit_reset);
+
+ ret = clk_prepare_enable(pcie->clk);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "could not enable clock\n");
+
+ pcie->cfg->bridge_sw_init_set(pcie, 0);
+
+ if (pcie->swinit_reset) {
+ ret = reset_control_assert(pcie->swinit_reset);
+ if (ret) {
+ clk_disable_unprepare(pcie->clk);
+ return dev_err_probe(&pdev->dev, ret,
+ "could not assert reset 'swinit'\n");
+ }
+
+ /* HW team recommends 1us for proper sync and propagation of reset */
+ udelay(1);
+
+ ret = reset_control_deassert(pcie->swinit_reset);
+ if (ret) {
+ clk_disable_unprepare(pcie->clk);
+ return dev_err_probe(&pdev->dev, ret,
+ "could not de-assert reset 'swinit'\n");
+ }
}
ret = reset_control_reset(pcie->rescal);
- if (ret)
- dev_err(&pdev->dev, "failed to deassert 'rescal'\n");
+ if (ret) {
+ clk_disable_unprepare(pcie->clk);
+ return dev_err_probe(&pdev->dev, ret, "failed to deassert 'rescal'\n");
+ }
ret = brcm_phy_start(pcie);
if (ret) {
@@ -1645,22 +1940,29 @@ static int brcm_pcie_probe(struct platform_device *pdev)
goto fail;
pcie->hw_rev = readl(pcie->base + PCIE_MISC_REVISION);
- if (pcie->type == BCM4908 && pcie->hw_rev >= BRCM_PCIE_HW_REV_3_20) {
+ if (pcie->cfg->soc_base == BCM4908 &&
+ pcie->hw_rev >= BRCM_PCIE_HW_REV_3_20) {
dev_err(pcie->dev, "hardware revision with unsupported PERST# setup\n");
ret = -ENODEV;
goto fail;
}
- msi_np = of_parse_phandle(pcie->np, "msi-parent", 0);
- if (pci_msi_enabled() && msi_np == pcie->np) {
- ret = brcm_pcie_enable_msi(pcie);
+ if (pci_msi_enabled()) {
+ struct device_node *msi_np = of_parse_phandle(pcie->np, "msi-parent", 0);
+
+ if (msi_np == pcie->np)
+ ret = brcm_pcie_enable_msi(pcie);
+
+ of_node_put(msi_np);
+
if (ret) {
dev_err(pcie->dev, "probe of internal MSI failed");
goto fail;
}
}
- bridge->ops = pcie->type == BCM7425 ? &brcm7425_pcie_ops : &brcm_pcie_ops;
+ bridge->ops = pcie->cfg->soc_base == BCM7425 ?
+ &brcm7425_pcie_ops : &brcm_pcie_ops;
bridge->sysdata = pcie;
platform_set_drvdata(pdev, pcie);
@@ -1678,6 +1980,7 @@ static int brcm_pcie_probe(struct platform_device *pdev)
fail:
__brcm_pcie_remove(pcie);
+
return ret;
}
@@ -1690,7 +1993,7 @@ static const struct dev_pm_ops brcm_pcie_pm_ops = {
static struct platform_driver brcm_pcie_driver = {
.probe = brcm_pcie_probe,
- .remove_new = brcm_pcie_remove,
+ .remove = brcm_pcie_remove,
.driver = {
.name = "brcm-pcie",
.of_match_table = brcm_pcie_match,
@@ -1702,3 +2005,4 @@ module_platform_driver(brcm_pcie_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Broadcom STB PCIe RC driver");
MODULE_AUTHOR("Broadcom");
+MODULE_SOFTDEP("pre: irq_bcm2712_mip");
diff --git a/drivers/pci/controller/pcie-hisi-error.c b/drivers/pci/controller/pcie-hisi-error.c
index ad9d5ffcd9e3..aaf1ed2b6e59 100644
--- a/drivers/pci/controller/pcie-hisi-error.c
+++ b/drivers/pci/controller/pcie-hisi-error.c
@@ -317,7 +317,7 @@ static struct platform_driver hisi_pcie_error_handler_driver = {
.acpi_match_table = hisi_pcie_acpi_match,
},
.probe = hisi_pcie_error_handler_probe,
- .remove_new = hisi_pcie_error_handler_remove,
+ .remove = hisi_pcie_error_handler_remove,
};
module_platform_driver(hisi_pcie_error_handler_driver);
diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c
index 649fcb449f34..d2cb4c4f821a 100644
--- a/drivers/pci/controller/pcie-iproc-msi.c
+++ b/drivers/pci/controller/pcie-iproc-msi.c
@@ -446,12 +446,12 @@ static void iproc_msi_disable(struct iproc_msi *msi)
static int iproc_msi_alloc_domains(struct device_node *node,
struct iproc_msi *msi)
{
- msi->inner_domain = irq_domain_add_linear(NULL, msi->nr_msi_vecs,
- &msi_domain_ops, msi);
+ msi->inner_domain = irq_domain_create_linear(NULL, msi->nr_msi_vecs,
+ &msi_domain_ops, msi);
if (!msi->inner_domain)
return -ENOMEM;
- msi->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node),
+ msi->msi_domain = pci_msi_create_irq_domain(of_fwnode_handle(node),
&iproc_msi_domain_info,
msi->inner_domain);
if (!msi->msi_domain) {
diff --git a/drivers/pci/controller/pcie-iproc-platform.c b/drivers/pci/controller/pcie-iproc-platform.c
index 4e6aa882a567..0cb78c583c7e 100644
--- a/drivers/pci/controller/pcie-iproc-platform.c
+++ b/drivers/pci/controller/pcie-iproc-platform.c
@@ -134,7 +134,7 @@ static struct platform_driver iproc_pltfm_pcie_driver = {
.of_match_table = of_match_ptr(iproc_pcie_of_match_table),
},
.probe = iproc_pltfm_pcie_probe,
- .remove_new = iproc_pltfm_pcie_remove,
+ .remove = iproc_pltfm_pcie_remove,
.shutdown = iproc_pltfm_pcie_shutdown,
};
module_platform_driver(iproc_pltfm_pcie_driver);
diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c
index 97f739a2c9f8..22134e95574b 100644
--- a/drivers/pci/controller/pcie-iproc.c
+++ b/drivers/pci/controller/pcie-iproc.c
@@ -54,7 +54,7 @@
#define CFG_RD_SUCCESS 0
#define CFG_RD_UR 1
-#define CFG_RD_CRS 2
+#define CFG_RD_RRS 2
#define CFG_RD_CA 3
#define CFG_RETRY_STATUS 0xffff0001
#define CFG_RETRY_STATUS_TIMEOUT_US 500000 /* 500 milliseconds */
@@ -485,31 +485,31 @@ static unsigned int iproc_pcie_cfg_retry(struct iproc_pcie *pcie,
u32 status;
/*
- * As per PCIe spec r3.1, sec 2.3.2, CRS Software Visibility only
+ * As per PCIe r6.0, sec 2.3.2, Config RRS Software Visibility only
* affects config reads of the Vendor ID. For config writes or any
* other config reads, the Root may automatically reissue the
* configuration request again as a new request.
*
* For config reads, this hardware returns CFG_RETRY_STATUS data
- * when it receives a CRS completion, regardless of the address of
- * the read or the CRS Software Visibility Enable bit. As a
+ * when it receives a RRS completion, regardless of the address of
+ * the read or the RRS Software Visibility Enable bit. As a
* partial workaround for this, we retry in software any read that
* returns CFG_RETRY_STATUS.
*
* Note that a non-Vendor ID config register may have a value of
* CFG_RETRY_STATUS. If we read that, we can't distinguish it from
- * a CRS completion, so we will incorrectly retry the read and
+ * a RRS completion, so we will incorrectly retry the read and
* eventually return the wrong data (0xffffffff).
*/
data = readl(cfg_data_p);
while (data == CFG_RETRY_STATUS && timeout--) {
/*
- * CRS state is set in CFG_RD status register
+ * RRS state is set in CFG_RD status register
* This will handle the case where CFG_RETRY_STATUS is
* valid config data.
*/
status = iproc_pcie_read_reg(pcie, IPROC_PCIE_CFG_RD_STATUS);
- if (status != CFG_RD_CRS)
+ if (status != CFG_RD_RRS)
return data;
udelay(1);
@@ -556,8 +556,8 @@ static void iproc_pcie_fix_cap(struct iproc_pcie *pcie, int where, u32 *val)
break;
case IPROC_PCI_EXP_CAP + PCI_EXP_RTCTL:
- /* Don't advertise CRS SV support */
- *val &= ~(PCI_EXP_RTCAP_CRSVIS << 16);
+ /* Don't advertise RRS SV support */
+ *val &= ~(PCI_EXP_RTCAP_RRS_SV << 16);
break;
default:
diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c
index 975b3024fb08..b55f5973414c 100644
--- a/drivers/pci/controller/pcie-mediatek-gen3.c
+++ b/drivers/pci/controller/pcie-mediatek-gen3.c
@@ -6,29 +6,46 @@
* Author: Jianjun Wang <jianjun.wang@mediatek.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/iopoll.h>
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/msi.h>
+#include <linux/of_device.h>
+#include <linux/of_pci.h>
#include <linux/pci.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
#include <linux/reset.h>
#include "../pci.h"
+#define PCIE_BASE_CFG_REG 0x14
+#define PCIE_BASE_CFG_SPEED GENMASK(15, 8)
+
#define PCIE_SETTING_REG 0x80
+#define PCIE_SETTING_LINK_WIDTH GENMASK(11, 8)
+#define PCIE_SETTING_GEN_SUPPORT GENMASK(14, 12)
#define PCIE_PCI_IDS_1 0x9c
#define PCI_CLASS(class) (class << 8)
#define PCIE_RC_MODE BIT(0)
+#define PCIE_EQ_PRESET_01_REG 0x100
+#define PCIE_VAL_LN0_DOWNSTREAM GENMASK(6, 0)
+#define PCIE_VAL_LN0_UPSTREAM GENMASK(14, 8)
+#define PCIE_VAL_LN1_DOWNSTREAM GENMASK(22, 16)
+#define PCIE_VAL_LN1_UPSTREAM GENMASK(30, 24)
+
#define PCIE_CFGNUM_REG 0x140
#define PCIE_CFG_DEVFN(devfn) ((devfn) & GENMASK(7, 0))
#define PCIE_CFG_BUS(bus) (((bus) << 8) & GENMASK(15, 8))
@@ -68,6 +85,14 @@
#define PCIE_MSI_SET_ENABLE_REG 0x190
#define PCIE_MSI_SET_ENABLE GENMASK(PCIE_MSI_SET_NUM - 1, 0)
+#define PCIE_PIPE4_PIE8_REG 0x338
+#define PCIE_K_FINETUNE_MAX GENMASK(5, 0)
+#define PCIE_K_FINETUNE_ERR GENMASK(7, 6)
+#define PCIE_K_PRESET_TO_USE GENMASK(18, 8)
+#define PCIE_K_PHYPARAM_QUERY BIT(19)
+#define PCIE_K_QUERY_TIMEOUT BIT(20)
+#define PCIE_K_PRESET_TO_USE_16G GENMASK(31, 21)
+
#define PCIE_MSI_SET_BASE_REG 0xc00
#define PCIE_MSI_SET_OFFSET 0x10
#define PCIE_MSI_SET_STATUS_OFFSET 0x04
@@ -100,6 +125,40 @@
#define PCIE_ATR_TLP_TYPE_MEM PCIE_ATR_TLP_TYPE(0)
#define PCIE_ATR_TLP_TYPE_IO PCIE_ATR_TLP_TYPE(2)
+#define MAX_NUM_PHY_RESETS 3
+
+#define PCIE_MTK_RESET_TIME_US 10
+
+/* Time in ms needed to complete PCIe reset on EN7581 SoC */
+#define PCIE_EN7581_RESET_TIME_MS 100
+
+struct mtk_gen3_pcie;
+
+#define PCIE_CONF_LINK2_CTL_STS (PCIE_CFG_OFFSET_ADDR + 0xb0)
+#define PCIE_CONF_LINK2_LCR2_LINK_SPEED GENMASK(3, 0)
+
+enum mtk_gen3_pcie_flags {
+ SKIP_PCIE_RSTB = BIT(0), /* Skip PERST# assertion during device
+ * probing or suspend/resume phase to
+ * avoid hw bugs/issues.
+ */
+};
+
+/**
+ * struct mtk_gen3_pcie_pdata - differentiate between host generations
+ * @power_up: pcie power_up callback
+ * @phy_resets: phy reset lines SoC data.
+ * @flags: pcie device flags.
+ */
+struct mtk_gen3_pcie_pdata {
+ int (*power_up)(struct mtk_gen3_pcie *pcie);
+ struct {
+ const char *id[MAX_NUM_PHY_RESETS];
+ int num_resets;
+ } phy_resets;
+ u32 flags;
+};
+
/**
* struct mtk_msi_set - MSI information for each set
* @base: IO mapped register base
@@ -118,10 +177,12 @@ struct mtk_msi_set {
* @base: IO mapped register base
* @reg_base: physical register base
* @mac_reset: MAC reset control
- * @phy_reset: PHY reset control
+ * @phy_resets: PHY reset controllers
* @phy: PHY controller block
* @clks: PCIe clocks
* @num_clks: PCIe clocks count for this port
+ * @max_link_speed: Maximum link speed (PCIe Gen) for this port
+ * @num_lanes: Number of PCIe lanes for this port
* @irq: PCIe controller interrupt number
* @saved_irq_state: IRQ enable state saved at suspend time
* @irq_lock: lock protecting IRQ register access
@@ -131,16 +192,19 @@ struct mtk_msi_set {
* @msi_sets: MSI sets information
* @lock: lock protecting IRQ bit map
* @msi_irq_in_use: bit map for assigned MSI IRQ
+ * @soc: pointer to SoC-dependent operations
*/
struct mtk_gen3_pcie {
struct device *dev;
void __iomem *base;
phys_addr_t reg_base;
struct reset_control *mac_reset;
- struct reset_control *phy_reset;
+ struct reset_control_bulk_data phy_resets[MAX_NUM_PHY_RESETS];
struct phy *phy;
struct clk_bulk_data *clks;
int num_clks;
+ u8 max_link_speed;
+ u8 num_lanes;
int irq;
u32 saved_irq_state;
@@ -151,6 +215,8 @@ struct mtk_gen3_pcie {
struct mtk_msi_set msi_sets[PCIE_MSI_SET_NUM];
struct mutex lock;
DECLARE_BITMAP(msi_irq_in_use, PCIE_MSI_IRQS_NUM);
+
+ const struct mtk_gen3_pcie_pdata *soc;
};
/* LTSSM state in PCIE_LTSSM_STATUS_REG bit[28:24] */
@@ -288,7 +354,8 @@ static int mtk_pcie_set_trans_table(struct mtk_gen3_pcie *pcie,
dev_dbg(pcie->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n",
range_type, *num, (unsigned long long)cpu_addr,
- (unsigned long long)pci_addr, (unsigned long long)table_size);
+ (unsigned long long)pci_addr,
+ (unsigned long long)table_size);
cpu_addr += table_size;
pci_addr += table_size;
@@ -340,11 +407,35 @@ static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie)
int err;
u32 val;
- /* Set as RC mode */
+ /* Set as RC mode and set controller PCIe Gen speed restriction, if any */
val = readl_relaxed(pcie->base + PCIE_SETTING_REG);
val |= PCIE_RC_MODE;
+ if (pcie->max_link_speed) {
+ val &= ~PCIE_SETTING_GEN_SUPPORT;
+
+ /* Can enable link speed support only from Gen2 onwards */
+ if (pcie->max_link_speed >= 2)
+ val |= FIELD_PREP(PCIE_SETTING_GEN_SUPPORT,
+ GENMASK(pcie->max_link_speed - 2, 0));
+ }
+ if (pcie->num_lanes) {
+ val &= ~PCIE_SETTING_LINK_WIDTH;
+
+ /* Zero means one lane, each bit activates x2/x4/x8/x16 */
+ if (pcie->num_lanes > 1)
+ val |= FIELD_PREP(PCIE_SETTING_LINK_WIDTH,
+ GENMASK(fls(pcie->num_lanes >> 2), 0));
+ }
writel_relaxed(val, pcie->base + PCIE_SETTING_REG);
+ /* Set Link Control 2 (LNKCTL2) speed restriction, if any */
+ if (pcie->max_link_speed) {
+ val = readl_relaxed(pcie->base + PCIE_CONF_LINK2_CTL_STS);
+ val &= ~PCIE_CONF_LINK2_LCR2_LINK_SPEED;
+ val |= FIELD_PREP(PCIE_CONF_LINK2_LCR2_LINK_SPEED, pcie->max_link_speed);
+ writel_relaxed(val, pcie->base + PCIE_CONF_LINK2_CTL_STS);
+ }
+
/* Set class code */
val = readl_relaxed(pcie->base + PCIE_PCI_IDS_1);
val &= ~GENMASK(31, 8);
@@ -361,22 +452,33 @@ static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie)
val |= PCIE_DISABLE_DVFSRC_VLT_REQ;
writel_relaxed(val, pcie->base + PCIE_MISC_CTRL_REG);
- /* Assert all reset signals */
- val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
- val |= PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB;
- writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
-
/*
- * Described in PCIe CEM specification sections 2.2 (PERST# Signal)
- * and 2.2.1 (Initial Power-Up (G3 to S0)).
- * The deassertion of PERST# should be delayed 100ms (TPVPERL)
- * for the power and clock to become stable.
+ * Airoha EN7581 has a hw bug asserting/releasing PCIE_PE_RSTB signal
+ * causing occasional PCIe link down. In order to overcome the issue,
+ * PCIE_RSTB signals are not asserted/released at this stage and the
+ * PCIe block is reset using en7523_reset_assert() and
+ * en7581_pci_enable().
*/
- msleep(100);
-
- /* De-assert reset signals */
- val &= ~(PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB);
- writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
+ if (!(pcie->soc->flags & SKIP_PCIE_RSTB)) {
+ /* Assert all reset signals */
+ val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
+ val |= PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB |
+ PCIE_PE_RSTB;
+ writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
+
+ /*
+ * Described in PCIe CEM specification revision 6.0.
+ *
+ * The deassertion of PERST# should be delayed 100ms (TPVPERL)
+ * for the power and clock to become stable.
+ */
+ msleep(PCIE_T_PVPERL_MS);
+
+ /* De-assert reset signals */
+ val &= ~(PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB |
+ PCIE_PE_RSTB);
+ writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
+ }
/* Check if the link is up or not */
err = readl_poll_timeout(pcie->base + PCIE_LINK_STATUS_REG, val,
@@ -424,12 +526,6 @@ static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie)
return 0;
}
-static int mtk_pcie_set_affinity(struct irq_data *data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static void mtk_pcie_msi_irq_mask(struct irq_data *data)
{
pci_msi_mask_irq(data);
@@ -450,8 +546,9 @@ static struct irq_chip mtk_msi_irq_chip = {
};
static struct msi_domain_info mtk_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX |
+ MSI_FLAG_MULTI_PCI_MSI,
.chip = &mtk_msi_irq_chip,
};
@@ -517,7 +614,6 @@ static struct irq_chip mtk_msi_bottom_irq_chip = {
.irq_mask = mtk_msi_bottom_irq_mask,
.irq_unmask = mtk_msi_bottom_irq_unmask,
.irq_compose_msi_msg = mtk_compose_msi_msg,
- .irq_set_affinity = mtk_pcie_set_affinity,
.name = "MSI",
};
@@ -618,7 +714,6 @@ static struct irq_chip mtk_intx_irq_chip = {
.irq_mask = mtk_intx_mask,
.irq_unmask = mtk_intx_unmask,
.irq_eoi = mtk_intx_eoi,
- .irq_set_affinity = mtk_pcie_set_affinity,
.name = "INTx",
};
@@ -650,8 +745,8 @@ static int mtk_pcie_init_irq_domains(struct mtk_gen3_pcie *pcie)
return -ENODEV;
}
- pcie->intx_domain = irq_domain_add_linear(intc_node, PCI_NUM_INTX,
- &intx_domain_ops, pcie);
+ pcie->intx_domain = irq_domain_create_linear(of_fwnode_handle(intc_node), PCI_NUM_INTX,
+ &intx_domain_ops, pcie);
if (!pcie->intx_domain) {
dev_err(dev, "failed to create INTx IRQ domain\n");
ret = -ENODEV;
@@ -661,8 +756,9 @@ static int mtk_pcie_init_irq_domains(struct mtk_gen3_pcie *pcie)
/* Setup MSI */
mutex_init(&pcie->lock);
- pcie->msi_bottom_domain = irq_domain_add_linear(node, PCIE_MSI_IRQS_NUM,
- &mtk_msi_bottom_domain_ops, pcie);
+ pcie->msi_bottom_domain = irq_domain_create_linear(of_fwnode_handle(node),
+ PCIE_MSI_IRQS_NUM,
+ &mtk_msi_bottom_domain_ops, pcie);
if (!pcie->msi_bottom_domain) {
dev_err(dev, "failed to create MSI bottom domain\n");
ret = -ENODEV;
@@ -775,10 +871,11 @@ static int mtk_pcie_setup_irq(struct mtk_gen3_pcie *pcie)
static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie)
{
+ int i, ret, num_resets = pcie->soc->phy_resets.num_resets;
struct device *dev = pcie->dev;
struct platform_device *pdev = to_platform_device(dev);
struct resource *regs;
- int ret;
+ u32 num_lanes;
regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcie-mac");
if (!regs)
@@ -791,12 +888,13 @@ static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie)
pcie->reg_base = regs->start;
- pcie->phy_reset = devm_reset_control_get_optional_exclusive(dev, "phy");
- if (IS_ERR(pcie->phy_reset)) {
- ret = PTR_ERR(pcie->phy_reset);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to get PHY reset\n");
+ for (i = 0; i < num_resets; i++)
+ pcie->phy_resets[i].id = pcie->soc->phy_resets.id[i];
+ ret = devm_reset_control_bulk_get_optional_shared(dev, num_resets,
+ pcie->phy_resets);
+ if (ret) {
+ dev_err(dev, "failed to get PHY bulk reset\n");
return ret;
}
@@ -824,16 +922,151 @@ static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie)
return pcie->num_clks;
}
+ ret = of_property_read_u32(dev->of_node, "num-lanes", &num_lanes);
+ if (ret == 0) {
+ if (num_lanes == 0 || num_lanes > 16 ||
+ (num_lanes != 1 && num_lanes % 2))
+ dev_warn(dev, "invalid num-lanes, using controller defaults\n");
+ else
+ pcie->num_lanes = num_lanes;
+ }
+
return 0;
}
+static int mtk_pcie_en7581_power_up(struct mtk_gen3_pcie *pcie)
+{
+ struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
+ struct device *dev = pcie->dev;
+ struct resource_entry *entry;
+ struct regmap *pbus_regmap;
+ u32 val, args[2], size;
+ resource_size_t addr;
+ int err;
+
+ /*
+ * The controller may have been left out of reset by the bootloader
+ * so make sure that we get a clean start by asserting resets here.
+ */
+ reset_control_bulk_assert(pcie->soc->phy_resets.num_resets,
+ pcie->phy_resets);
+
+ /* Wait for the time needed to complete the reset lines assert. */
+ msleep(PCIE_EN7581_RESET_TIME_MS);
+
+ /*
+ * Configure PBus base address and base address mask to allow the
+ * hw to detect if a given address is accessible on PCIe controller.
+ */
+ pbus_regmap = syscon_regmap_lookup_by_phandle_args(dev->of_node,
+ "mediatek,pbus-csr",
+ ARRAY_SIZE(args),
+ args);
+ if (IS_ERR(pbus_regmap))
+ return PTR_ERR(pbus_regmap);
+
+ entry = resource_list_first_type(&host->windows, IORESOURCE_MEM);
+ if (!entry)
+ return -ENODEV;
+
+ addr = entry->res->start - entry->offset;
+ regmap_write(pbus_regmap, args[0], lower_32_bits(addr));
+ size = lower_32_bits(resource_size(entry->res));
+ regmap_write(pbus_regmap, args[1], GENMASK(31, __fls(size)));
+
+ /*
+ * Unlike the other MediaTek Gen3 controllers, the Airoha EN7581
+ * requires PHY initialization and power-on before PHY reset deassert.
+ */
+ err = phy_init(pcie->phy);
+ if (err) {
+ dev_err(dev, "failed to initialize PHY\n");
+ return err;
+ }
+
+ err = phy_power_on(pcie->phy);
+ if (err) {
+ dev_err(dev, "failed to power on PHY\n");
+ goto err_phy_on;
+ }
+
+ err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets,
+ pcie->phy_resets);
+ if (err) {
+ dev_err(dev, "failed to deassert PHYs\n");
+ goto err_phy_deassert;
+ }
+
+ /*
+ * Wait for the time needed to complete the bulk de-assert above.
+ * This time is specific for EN7581 SoC.
+ */
+ msleep(PCIE_EN7581_RESET_TIME_MS);
+
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+
+ val = FIELD_PREP(PCIE_VAL_LN0_DOWNSTREAM, 0x47) |
+ FIELD_PREP(PCIE_VAL_LN1_DOWNSTREAM, 0x47) |
+ FIELD_PREP(PCIE_VAL_LN0_UPSTREAM, 0x41) |
+ FIELD_PREP(PCIE_VAL_LN1_UPSTREAM, 0x41);
+ writel_relaxed(val, pcie->base + PCIE_EQ_PRESET_01_REG);
+
+ val = PCIE_K_PHYPARAM_QUERY | PCIE_K_QUERY_TIMEOUT |
+ FIELD_PREP(PCIE_K_PRESET_TO_USE_16G, 0x80) |
+ FIELD_PREP(PCIE_K_PRESET_TO_USE, 0x2) |
+ FIELD_PREP(PCIE_K_FINETUNE_MAX, 0xf);
+ writel_relaxed(val, pcie->base + PCIE_PIPE4_PIE8_REG);
+
+ err = clk_bulk_prepare_enable(pcie->num_clks, pcie->clks);
+ if (err) {
+ dev_err(dev, "failed to prepare clock\n");
+ goto err_clk_prepare_enable;
+ }
+
+ /*
+ * Airoha EN7581 performs PCIe reset via clk callbacks since it has a
+ * hw issue with PCIE_PE_RSTB signal. Add wait for the time needed to
+ * complete the PCIe reset.
+ */
+ msleep(PCIE_T_PVPERL_MS);
+
+ return 0;
+
+err_clk_prepare_enable:
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+ reset_control_bulk_assert(pcie->soc->phy_resets.num_resets,
+ pcie->phy_resets);
+err_phy_deassert:
+ phy_power_off(pcie->phy);
+err_phy_on:
+ phy_exit(pcie->phy);
+
+ return err;
+}
+
static int mtk_pcie_power_up(struct mtk_gen3_pcie *pcie)
{
struct device *dev = pcie->dev;
int err;
+ /*
+ * The controller may have been left out of reset by the bootloader
+ * so make sure that we get a clean start by asserting resets here.
+ */
+ reset_control_bulk_assert(pcie->soc->phy_resets.num_resets,
+ pcie->phy_resets);
+ reset_control_assert(pcie->mac_reset);
+ usleep_range(PCIE_MTK_RESET_TIME_US, 2 * PCIE_MTK_RESET_TIME_US);
+
/* PHY power on and enable pipe clock */
- reset_control_deassert(pcie->phy_reset);
+ err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets,
+ pcie->phy_resets);
+ if (err) {
+ dev_err(dev, "failed to deassert PHYs\n");
+ return err;
+ }
err = phy_init(pcie->phy);
if (err) {
@@ -869,7 +1102,8 @@ err_clk_init:
err_phy_on:
phy_exit(pcie->phy);
err_phy_init:
- reset_control_assert(pcie->phy_reset);
+ reset_control_bulk_assert(pcie->soc->phy_resets.num_resets,
+ pcie->phy_resets);
return err;
}
@@ -884,30 +1118,56 @@ static void mtk_pcie_power_down(struct mtk_gen3_pcie *pcie)
phy_power_off(pcie->phy);
phy_exit(pcie->phy);
- reset_control_assert(pcie->phy_reset);
+ reset_control_bulk_assert(pcie->soc->phy_resets.num_resets,
+ pcie->phy_resets);
+}
+
+static int mtk_pcie_get_controller_max_link_speed(struct mtk_gen3_pcie *pcie)
+{
+ u32 val;
+ int ret;
+
+ val = readl_relaxed(pcie->base + PCIE_BASE_CFG_REG);
+ val = FIELD_GET(PCIE_BASE_CFG_SPEED, val);
+ ret = fls(val);
+
+ return ret > 0 ? ret : -EINVAL;
}
static int mtk_pcie_setup(struct mtk_gen3_pcie *pcie)
{
- int err;
+ int err, max_speed;
err = mtk_pcie_parse_port(pcie);
if (err)
return err;
/*
- * The controller may have been left out of reset by the bootloader
- * so make sure that we get a clean start by asserting resets here.
+ * Deassert the line in order to avoid unbalance in deassert_count
+ * counter since the bulk is shared.
*/
- reset_control_assert(pcie->phy_reset);
- reset_control_assert(pcie->mac_reset);
- usleep_range(10, 20);
+ reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets,
+ pcie->phy_resets);
/* Don't touch the hardware registers before power up */
- err = mtk_pcie_power_up(pcie);
+ err = pcie->soc->power_up(pcie);
if (err)
return err;
+ err = of_pci_get_max_link_speed(pcie->dev->of_node);
+ if (err) {
+ /* Get the maximum speed supported by the controller */
+ max_speed = mtk_pcie_get_controller_max_link_speed(pcie);
+
+ /* Set max_link_speed only if the controller supports it */
+ if (max_speed >= 0 && max_speed <= err) {
+ pcie->max_link_speed = err;
+ dev_info(pcie->dev,
+ "maximum controller link speed Gen%d, overriding to Gen%u",
+ max_speed, pcie->max_link_speed);
+ }
+ }
+
/* Try link up */
err = mtk_pcie_startup_port(pcie);
if (err)
@@ -939,6 +1199,7 @@ static int mtk_pcie_probe(struct platform_device *pdev)
pcie = pci_host_bridge_priv(host);
pcie->dev = dev;
+ pcie->soc = device_get_match_data(dev);
platform_set_drvdata(pdev, pcie);
err = mtk_pcie_setup(pcie);
@@ -1036,10 +1297,12 @@ static int mtk_pcie_suspend_noirq(struct device *dev)
return err;
}
- /* Pull down the PERST# pin */
- val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
- val |= PCIE_PE_RSTB;
- writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
+ if (!(pcie->soc->flags & SKIP_PCIE_RSTB)) {
+ /* Assert the PERST# pin */
+ val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
+ val |= PCIE_PE_RSTB;
+ writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
+ }
dev_dbg(pcie->dev, "entered L2 states successfully");
@@ -1054,7 +1317,7 @@ static int mtk_pcie_resume_noirq(struct device *dev)
struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev);
int err;
- err = mtk_pcie_power_up(pcie);
+ err = pcie->soc->power_up(pcie);
if (err)
return err;
@@ -1074,21 +1337,43 @@ static const struct dev_pm_ops mtk_pcie_pm_ops = {
mtk_pcie_resume_noirq)
};
+static const struct mtk_gen3_pcie_pdata mtk_pcie_soc_mt8192 = {
+ .power_up = mtk_pcie_power_up,
+ .phy_resets = {
+ .id[0] = "phy",
+ .num_resets = 1,
+ },
+};
+
+static const struct mtk_gen3_pcie_pdata mtk_pcie_soc_en7581 = {
+ .power_up = mtk_pcie_en7581_power_up,
+ .phy_resets = {
+ .id[0] = "phy-lane0",
+ .id[1] = "phy-lane1",
+ .id[2] = "phy-lane2",
+ .num_resets = 3,
+ },
+ .flags = SKIP_PCIE_RSTB,
+};
+
static const struct of_device_id mtk_pcie_of_match[] = {
- { .compatible = "mediatek,mt8192-pcie" },
+ { .compatible = "airoha,en7581-pcie", .data = &mtk_pcie_soc_en7581 },
+ { .compatible = "mediatek,mt8192-pcie", .data = &mtk_pcie_soc_mt8192 },
{},
};
MODULE_DEVICE_TABLE(of, mtk_pcie_of_match);
static struct platform_driver mtk_pcie_driver = {
.probe = mtk_pcie_probe,
- .remove_new = mtk_pcie_remove,
+ .remove = mtk_pcie_remove,
.driver = {
.name = "mtk-pcie-gen3",
.of_match_table = mtk_pcie_of_match,
.pm = &mtk_pcie_pm_ops,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
module_platform_driver(mtk_pcie_driver);
+MODULE_DESCRIPTION("MediaTek Gen3 PCIe host controller driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
index 48372013f26d..e1934aa06c8d 100644
--- a/drivers/pci/controller/pcie-mediatek.c
+++ b/drivers/pci/controller/pcie-mediatek.c
@@ -211,7 +211,6 @@ struct mtk_pcie_port {
* @base: IO mapped register base
* @cfg: IO mapped register map for PCIe config
* @free_ck: free-run reference clock
- * @mem: non-prefetchable memory resource
* @ports: pointer to PCIe port information
* @soc: pointer to SoC-dependent operations
*/
@@ -407,12 +406,6 @@ static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
(int)data->hwirq, msg->address_hi, msg->address_lo);
}
-static int mtk_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static void mtk_msi_ack_irq(struct irq_data *data)
{
struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
@@ -424,7 +417,6 @@ static void mtk_msi_ack_irq(struct irq_data *data)
static struct irq_chip mtk_msi_bottom_irq_chip = {
.name = "MTK MSI",
.irq_compose_msi_msg = mtk_compose_msi_msg,
- .irq_set_affinity = mtk_msi_set_affinity,
.irq_ack = mtk_msi_ack_irq,
};
@@ -486,14 +478,14 @@ static struct irq_chip mtk_msi_irq_chip = {
};
static struct msi_domain_info mtk_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX),
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX,
.chip = &mtk_msi_irq_chip,
};
static int mtk_pcie_allocate_msi_domains(struct mtk_pcie_port *port)
{
- struct fwnode_handle *fwnode = of_node_to_fwnode(port->pcie->dev->of_node);
+ struct fwnode_handle *fwnode = of_fwnode_handle(port->pcie->dev->of_node);
mutex_init(&port->lock);
@@ -577,8 +569,8 @@ static int mtk_pcie_init_irq_domain(struct mtk_pcie_port *port,
return -ENODEV;
}
- port->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
- &intx_domain_ops, port);
+ port->irq_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX,
+ &intx_domain_ops, port);
of_node_put(pcie_intc_node);
if (!port->irq_domain) {
dev_err(dev, "failed to get INTx IRQ domain\n");
@@ -1049,24 +1041,22 @@ err_free_ck:
static int mtk_pcie_setup(struct mtk_pcie *pcie)
{
struct device *dev = pcie->dev;
- struct device_node *node = dev->of_node, *child;
+ struct device_node *node = dev->of_node;
struct mtk_pcie_port *port, *tmp;
int err, slot;
slot = of_get_pci_domain_nr(dev->of_node);
if (slot < 0) {
- for_each_available_child_of_node(node, child) {
+ for_each_available_child_of_node_scoped(node, child) {
err = of_pci_get_devfn(child);
- if (err < 0) {
- dev_err(dev, "failed to get devfn: %d\n", err);
- goto error_put_node;
- }
+ if (err < 0)
+ return dev_err_probe(dev, err, "failed to get devfn\n");
slot = PCI_SLOT(err);
err = mtk_pcie_parse_port(pcie, child, slot);
if (err)
- goto error_put_node;
+ return err;
}
} else {
err = mtk_pcie_parse_port(pcie, node, slot);
@@ -1087,9 +1077,6 @@ static int mtk_pcie_setup(struct mtk_pcie *pcie)
mtk_pcie_subsys_powerdown(pcie);
return 0;
-error_put_node:
- of_node_put(child);
- return err;
}
static int mtk_pcie_probe(struct platform_device *pdev)
@@ -1243,7 +1230,7 @@ MODULE_DEVICE_TABLE(of, mtk_pcie_ids);
static struct platform_driver mtk_pcie_driver = {
.probe = mtk_pcie_probe,
- .remove_new = mtk_pcie_remove,
+ .remove = mtk_pcie_remove,
.driver = {
.name = "mtk-pcie",
.of_match_table = mtk_pcie_ids,
@@ -1252,4 +1239,5 @@ static struct platform_driver mtk_pcie_driver = {
},
};
module_platform_driver(mtk_pcie_driver);
+MODULE_DESCRIPTION("MediaTek PCIe host controller driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-microchip-host.c b/drivers/pci/controller/pcie-microchip-host.c
deleted file mode 100644
index 137fb8570ba2..000000000000
--- a/drivers/pci/controller/pcie-microchip-host.c
+++ /dev/null
@@ -1,1216 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Microchip AXI PCIe Bridge host controller driver
- *
- * Copyright (c) 2018 - 2020 Microchip Corporation. All rights reserved.
- *
- * Author: Daire McNamara <daire.mcnamara@microchip.com>
- */
-
-#include <linux/bitfield.h>
-#include <linux/clk.h>
-#include <linux/irqchip/chained_irq.h>
-#include <linux/irqdomain.h>
-#include <linux/module.h>
-#include <linux/msi.h>
-#include <linux/of_address.h>
-#include <linux/of_pci.h>
-#include <linux/pci-ecam.h>
-#include <linux/platform_device.h>
-
-#include "../pci.h"
-
-/* Number of MSI IRQs */
-#define MC_MAX_NUM_MSI_IRQS 32
-
-/* PCIe Bridge Phy and Controller Phy offsets */
-#define MC_PCIE1_BRIDGE_ADDR 0x00008000u
-#define MC_PCIE1_CTRL_ADDR 0x0000a000u
-
-#define MC_PCIE_BRIDGE_ADDR (MC_PCIE1_BRIDGE_ADDR)
-#define MC_PCIE_CTRL_ADDR (MC_PCIE1_CTRL_ADDR)
-
-/* PCIe Bridge Phy Regs */
-#define PCIE_PCI_IRQ_DW0 0xa8
-#define MSIX_CAP_MASK BIT(31)
-#define NUM_MSI_MSGS_MASK GENMASK(6, 4)
-#define NUM_MSI_MSGS_SHIFT 4
-
-#define IMASK_LOCAL 0x180
-#define DMA_END_ENGINE_0_MASK 0x00000000u
-#define DMA_END_ENGINE_0_SHIFT 0
-#define DMA_END_ENGINE_1_MASK 0x00000000u
-#define DMA_END_ENGINE_1_SHIFT 1
-#define DMA_ERROR_ENGINE_0_MASK 0x00000100u
-#define DMA_ERROR_ENGINE_0_SHIFT 8
-#define DMA_ERROR_ENGINE_1_MASK 0x00000200u
-#define DMA_ERROR_ENGINE_1_SHIFT 9
-#define A_ATR_EVT_POST_ERR_MASK 0x00010000u
-#define A_ATR_EVT_POST_ERR_SHIFT 16
-#define A_ATR_EVT_FETCH_ERR_MASK 0x00020000u
-#define A_ATR_EVT_FETCH_ERR_SHIFT 17
-#define A_ATR_EVT_DISCARD_ERR_MASK 0x00040000u
-#define A_ATR_EVT_DISCARD_ERR_SHIFT 18
-#define A_ATR_EVT_DOORBELL_MASK 0x00000000u
-#define A_ATR_EVT_DOORBELL_SHIFT 19
-#define P_ATR_EVT_POST_ERR_MASK 0x00100000u
-#define P_ATR_EVT_POST_ERR_SHIFT 20
-#define P_ATR_EVT_FETCH_ERR_MASK 0x00200000u
-#define P_ATR_EVT_FETCH_ERR_SHIFT 21
-#define P_ATR_EVT_DISCARD_ERR_MASK 0x00400000u
-#define P_ATR_EVT_DISCARD_ERR_SHIFT 22
-#define P_ATR_EVT_DOORBELL_MASK 0x00000000u
-#define P_ATR_EVT_DOORBELL_SHIFT 23
-#define PM_MSI_INT_INTA_MASK 0x01000000u
-#define PM_MSI_INT_INTA_SHIFT 24
-#define PM_MSI_INT_INTB_MASK 0x02000000u
-#define PM_MSI_INT_INTB_SHIFT 25
-#define PM_MSI_INT_INTC_MASK 0x04000000u
-#define PM_MSI_INT_INTC_SHIFT 26
-#define PM_MSI_INT_INTD_MASK 0x08000000u
-#define PM_MSI_INT_INTD_SHIFT 27
-#define PM_MSI_INT_INTX_MASK 0x0f000000u
-#define PM_MSI_INT_INTX_SHIFT 24
-#define PM_MSI_INT_MSI_MASK 0x10000000u
-#define PM_MSI_INT_MSI_SHIFT 28
-#define PM_MSI_INT_AER_EVT_MASK 0x20000000u
-#define PM_MSI_INT_AER_EVT_SHIFT 29
-#define PM_MSI_INT_EVENTS_MASK 0x40000000u
-#define PM_MSI_INT_EVENTS_SHIFT 30
-#define PM_MSI_INT_SYS_ERR_MASK 0x80000000u
-#define PM_MSI_INT_SYS_ERR_SHIFT 31
-#define NUM_LOCAL_EVENTS 15
-#define ISTATUS_LOCAL 0x184
-#define IMASK_HOST 0x188
-#define ISTATUS_HOST 0x18c
-#define IMSI_ADDR 0x190
-#define ISTATUS_MSI 0x194
-
-/* PCIe Master table init defines */
-#define ATR0_PCIE_WIN0_SRCADDR_PARAM 0x600u
-#define ATR0_PCIE_ATR_SIZE 0x25
-#define ATR0_PCIE_ATR_SIZE_SHIFT 1
-#define ATR0_PCIE_WIN0_SRC_ADDR 0x604u
-#define ATR0_PCIE_WIN0_TRSL_ADDR_LSB 0x608u
-#define ATR0_PCIE_WIN0_TRSL_ADDR_UDW 0x60cu
-#define ATR0_PCIE_WIN0_TRSL_PARAM 0x610u
-
-/* PCIe AXI slave table init defines */
-#define ATR0_AXI4_SLV0_SRCADDR_PARAM 0x800u
-#define ATR_SIZE_SHIFT 1
-#define ATR_IMPL_ENABLE 1
-#define ATR0_AXI4_SLV0_SRC_ADDR 0x804u
-#define ATR0_AXI4_SLV0_TRSL_ADDR_LSB 0x808u
-#define ATR0_AXI4_SLV0_TRSL_ADDR_UDW 0x80cu
-#define ATR0_AXI4_SLV0_TRSL_PARAM 0x810u
-#define PCIE_TX_RX_INTERFACE 0x00000000u
-#define PCIE_CONFIG_INTERFACE 0x00000001u
-
-#define ATR_ENTRY_SIZE 32
-
-/* PCIe Controller Phy Regs */
-#define SEC_ERROR_EVENT_CNT 0x20
-#define DED_ERROR_EVENT_CNT 0x24
-#define SEC_ERROR_INT 0x28
-#define SEC_ERROR_INT_TX_RAM_SEC_ERR_INT GENMASK(3, 0)
-#define SEC_ERROR_INT_RX_RAM_SEC_ERR_INT GENMASK(7, 4)
-#define SEC_ERROR_INT_PCIE2AXI_RAM_SEC_ERR_INT GENMASK(11, 8)
-#define SEC_ERROR_INT_AXI2PCIE_RAM_SEC_ERR_INT GENMASK(15, 12)
-#define SEC_ERROR_INT_ALL_RAM_SEC_ERR_INT GENMASK(15, 0)
-#define NUM_SEC_ERROR_INTS (4)
-#define SEC_ERROR_INT_MASK 0x2c
-#define DED_ERROR_INT 0x30
-#define DED_ERROR_INT_TX_RAM_DED_ERR_INT GENMASK(3, 0)
-#define DED_ERROR_INT_RX_RAM_DED_ERR_INT GENMASK(7, 4)
-#define DED_ERROR_INT_PCIE2AXI_RAM_DED_ERR_INT GENMASK(11, 8)
-#define DED_ERROR_INT_AXI2PCIE_RAM_DED_ERR_INT GENMASK(15, 12)
-#define DED_ERROR_INT_ALL_RAM_DED_ERR_INT GENMASK(15, 0)
-#define NUM_DED_ERROR_INTS (4)
-#define DED_ERROR_INT_MASK 0x34
-#define ECC_CONTROL 0x38
-#define ECC_CONTROL_TX_RAM_INJ_ERROR_0 BIT(0)
-#define ECC_CONTROL_TX_RAM_INJ_ERROR_1 BIT(1)
-#define ECC_CONTROL_TX_RAM_INJ_ERROR_2 BIT(2)
-#define ECC_CONTROL_TX_RAM_INJ_ERROR_3 BIT(3)
-#define ECC_CONTROL_RX_RAM_INJ_ERROR_0 BIT(4)
-#define ECC_CONTROL_RX_RAM_INJ_ERROR_1 BIT(5)
-#define ECC_CONTROL_RX_RAM_INJ_ERROR_2 BIT(6)
-#define ECC_CONTROL_RX_RAM_INJ_ERROR_3 BIT(7)
-#define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_0 BIT(8)
-#define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_1 BIT(9)
-#define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_2 BIT(10)
-#define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_3 BIT(11)
-#define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_0 BIT(12)
-#define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_1 BIT(13)
-#define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_2 BIT(14)
-#define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_3 BIT(15)
-#define ECC_CONTROL_TX_RAM_ECC_BYPASS BIT(24)
-#define ECC_CONTROL_RX_RAM_ECC_BYPASS BIT(25)
-#define ECC_CONTROL_PCIE2AXI_RAM_ECC_BYPASS BIT(26)
-#define ECC_CONTROL_AXI2PCIE_RAM_ECC_BYPASS BIT(27)
-#define PCIE_EVENT_INT 0x14c
-#define PCIE_EVENT_INT_L2_EXIT_INT BIT(0)
-#define PCIE_EVENT_INT_HOTRST_EXIT_INT BIT(1)
-#define PCIE_EVENT_INT_DLUP_EXIT_INT BIT(2)
-#define PCIE_EVENT_INT_MASK GENMASK(2, 0)
-#define PCIE_EVENT_INT_L2_EXIT_INT_MASK BIT(16)
-#define PCIE_EVENT_INT_HOTRST_EXIT_INT_MASK BIT(17)
-#define PCIE_EVENT_INT_DLUP_EXIT_INT_MASK BIT(18)
-#define PCIE_EVENT_INT_ENB_MASK GENMASK(18, 16)
-#define PCIE_EVENT_INT_ENB_SHIFT 16
-#define NUM_PCIE_EVENTS (3)
-
-/* PCIe Config space MSI capability structure */
-#define MC_MSI_CAP_CTRL_OFFSET 0xe0u
-
-/* Events */
-#define EVENT_PCIE_L2_EXIT 0
-#define EVENT_PCIE_HOTRST_EXIT 1
-#define EVENT_PCIE_DLUP_EXIT 2
-#define EVENT_SEC_TX_RAM_SEC_ERR 3
-#define EVENT_SEC_RX_RAM_SEC_ERR 4
-#define EVENT_SEC_PCIE2AXI_RAM_SEC_ERR 5
-#define EVENT_SEC_AXI2PCIE_RAM_SEC_ERR 6
-#define EVENT_DED_TX_RAM_DED_ERR 7
-#define EVENT_DED_RX_RAM_DED_ERR 8
-#define EVENT_DED_PCIE2AXI_RAM_DED_ERR 9
-#define EVENT_DED_AXI2PCIE_RAM_DED_ERR 10
-#define EVENT_LOCAL_DMA_END_ENGINE_0 11
-#define EVENT_LOCAL_DMA_END_ENGINE_1 12
-#define EVENT_LOCAL_DMA_ERROR_ENGINE_0 13
-#define EVENT_LOCAL_DMA_ERROR_ENGINE_1 14
-#define EVENT_LOCAL_A_ATR_EVT_POST_ERR 15
-#define EVENT_LOCAL_A_ATR_EVT_FETCH_ERR 16
-#define EVENT_LOCAL_A_ATR_EVT_DISCARD_ERR 17
-#define EVENT_LOCAL_A_ATR_EVT_DOORBELL 18
-#define EVENT_LOCAL_P_ATR_EVT_POST_ERR 19
-#define EVENT_LOCAL_P_ATR_EVT_FETCH_ERR 20
-#define EVENT_LOCAL_P_ATR_EVT_DISCARD_ERR 21
-#define EVENT_LOCAL_P_ATR_EVT_DOORBELL 22
-#define EVENT_LOCAL_PM_MSI_INT_INTX 23
-#define EVENT_LOCAL_PM_MSI_INT_MSI 24
-#define EVENT_LOCAL_PM_MSI_INT_AER_EVT 25
-#define EVENT_LOCAL_PM_MSI_INT_EVENTS 26
-#define EVENT_LOCAL_PM_MSI_INT_SYS_ERR 27
-#define NUM_EVENTS 28
-
-#define PCIE_EVENT_CAUSE(x, s) \
- [EVENT_PCIE_ ## x] = { __stringify(x), s }
-
-#define SEC_ERROR_CAUSE(x, s) \
- [EVENT_SEC_ ## x] = { __stringify(x), s }
-
-#define DED_ERROR_CAUSE(x, s) \
- [EVENT_DED_ ## x] = { __stringify(x), s }
-
-#define LOCAL_EVENT_CAUSE(x, s) \
- [EVENT_LOCAL_ ## x] = { __stringify(x), s }
-
-#define PCIE_EVENT(x) \
- .base = MC_PCIE_CTRL_ADDR, \
- .offset = PCIE_EVENT_INT, \
- .mask_offset = PCIE_EVENT_INT, \
- .mask_high = 1, \
- .mask = PCIE_EVENT_INT_ ## x ## _INT, \
- .enb_mask = PCIE_EVENT_INT_ENB_MASK
-
-#define SEC_EVENT(x) \
- .base = MC_PCIE_CTRL_ADDR, \
- .offset = SEC_ERROR_INT, \
- .mask_offset = SEC_ERROR_INT_MASK, \
- .mask = SEC_ERROR_INT_ ## x ## _INT, \
- .mask_high = 1, \
- .enb_mask = 0
-
-#define DED_EVENT(x) \
- .base = MC_PCIE_CTRL_ADDR, \
- .offset = DED_ERROR_INT, \
- .mask_offset = DED_ERROR_INT_MASK, \
- .mask_high = 1, \
- .mask = DED_ERROR_INT_ ## x ## _INT, \
- .enb_mask = 0
-
-#define LOCAL_EVENT(x) \
- .base = MC_PCIE_BRIDGE_ADDR, \
- .offset = ISTATUS_LOCAL, \
- .mask_offset = IMASK_LOCAL, \
- .mask_high = 0, \
- .mask = x ## _MASK, \
- .enb_mask = 0
-
-#define PCIE_EVENT_TO_EVENT_MAP(x) \
- { PCIE_EVENT_INT_ ## x ## _INT, EVENT_PCIE_ ## x }
-
-#define SEC_ERROR_TO_EVENT_MAP(x) \
- { SEC_ERROR_INT_ ## x ## _INT, EVENT_SEC_ ## x }
-
-#define DED_ERROR_TO_EVENT_MAP(x) \
- { DED_ERROR_INT_ ## x ## _INT, EVENT_DED_ ## x }
-
-#define LOCAL_STATUS_TO_EVENT_MAP(x) \
- { x ## _MASK, EVENT_LOCAL_ ## x }
-
-struct event_map {
- u32 reg_mask;
- u32 event_bit;
-};
-
-struct mc_msi {
- struct mutex lock; /* Protect used bitmap */
- struct irq_domain *msi_domain;
- struct irq_domain *dev_domain;
- u32 num_vectors;
- u64 vector_phy;
- DECLARE_BITMAP(used, MC_MAX_NUM_MSI_IRQS);
-};
-
-struct mc_pcie {
- void __iomem *axi_base_addr;
- struct device *dev;
- struct irq_domain *intx_domain;
- struct irq_domain *event_domain;
- raw_spinlock_t lock;
- struct mc_msi msi;
-};
-
-struct cause {
- const char *sym;
- const char *str;
-};
-
-static const struct cause event_cause[NUM_EVENTS] = {
- PCIE_EVENT_CAUSE(L2_EXIT, "L2 exit event"),
- PCIE_EVENT_CAUSE(HOTRST_EXIT, "Hot reset exit event"),
- PCIE_EVENT_CAUSE(DLUP_EXIT, "DLUP exit event"),
- SEC_ERROR_CAUSE(TX_RAM_SEC_ERR, "sec error in tx buffer"),
- SEC_ERROR_CAUSE(RX_RAM_SEC_ERR, "sec error in rx buffer"),
- SEC_ERROR_CAUSE(PCIE2AXI_RAM_SEC_ERR, "sec error in pcie2axi buffer"),
- SEC_ERROR_CAUSE(AXI2PCIE_RAM_SEC_ERR, "sec error in axi2pcie buffer"),
- DED_ERROR_CAUSE(TX_RAM_DED_ERR, "ded error in tx buffer"),
- DED_ERROR_CAUSE(RX_RAM_DED_ERR, "ded error in rx buffer"),
- DED_ERROR_CAUSE(PCIE2AXI_RAM_DED_ERR, "ded error in pcie2axi buffer"),
- DED_ERROR_CAUSE(AXI2PCIE_RAM_DED_ERR, "ded error in axi2pcie buffer"),
- LOCAL_EVENT_CAUSE(DMA_ERROR_ENGINE_0, "dma engine 0 error"),
- LOCAL_EVENT_CAUSE(DMA_ERROR_ENGINE_1, "dma engine 1 error"),
- LOCAL_EVENT_CAUSE(A_ATR_EVT_POST_ERR, "axi write request error"),
- LOCAL_EVENT_CAUSE(A_ATR_EVT_FETCH_ERR, "axi read request error"),
- LOCAL_EVENT_CAUSE(A_ATR_EVT_DISCARD_ERR, "axi read timeout"),
- LOCAL_EVENT_CAUSE(P_ATR_EVT_POST_ERR, "pcie write request error"),
- LOCAL_EVENT_CAUSE(P_ATR_EVT_FETCH_ERR, "pcie read request error"),
- LOCAL_EVENT_CAUSE(P_ATR_EVT_DISCARD_ERR, "pcie read timeout"),
- LOCAL_EVENT_CAUSE(PM_MSI_INT_AER_EVT, "aer event"),
- LOCAL_EVENT_CAUSE(PM_MSI_INT_EVENTS, "pm/ltr/hotplug event"),
- LOCAL_EVENT_CAUSE(PM_MSI_INT_SYS_ERR, "system error"),
-};
-
-static struct event_map pcie_event_to_event[] = {
- PCIE_EVENT_TO_EVENT_MAP(L2_EXIT),
- PCIE_EVENT_TO_EVENT_MAP(HOTRST_EXIT),
- PCIE_EVENT_TO_EVENT_MAP(DLUP_EXIT),
-};
-
-static struct event_map sec_error_to_event[] = {
- SEC_ERROR_TO_EVENT_MAP(TX_RAM_SEC_ERR),
- SEC_ERROR_TO_EVENT_MAP(RX_RAM_SEC_ERR),
- SEC_ERROR_TO_EVENT_MAP(PCIE2AXI_RAM_SEC_ERR),
- SEC_ERROR_TO_EVENT_MAP(AXI2PCIE_RAM_SEC_ERR),
-};
-
-static struct event_map ded_error_to_event[] = {
- DED_ERROR_TO_EVENT_MAP(TX_RAM_DED_ERR),
- DED_ERROR_TO_EVENT_MAP(RX_RAM_DED_ERR),
- DED_ERROR_TO_EVENT_MAP(PCIE2AXI_RAM_DED_ERR),
- DED_ERROR_TO_EVENT_MAP(AXI2PCIE_RAM_DED_ERR),
-};
-
-static struct event_map local_status_to_event[] = {
- LOCAL_STATUS_TO_EVENT_MAP(DMA_END_ENGINE_0),
- LOCAL_STATUS_TO_EVENT_MAP(DMA_END_ENGINE_1),
- LOCAL_STATUS_TO_EVENT_MAP(DMA_ERROR_ENGINE_0),
- LOCAL_STATUS_TO_EVENT_MAP(DMA_ERROR_ENGINE_1),
- LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_POST_ERR),
- LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_FETCH_ERR),
- LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_DISCARD_ERR),
- LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_DOORBELL),
- LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_POST_ERR),
- LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_FETCH_ERR),
- LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_DISCARD_ERR),
- LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_DOORBELL),
- LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_INTX),
- LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_MSI),
- LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_AER_EVT),
- LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_EVENTS),
- LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_SYS_ERR),
-};
-
-static struct {
- u32 base;
- u32 offset;
- u32 mask;
- u32 shift;
- u32 enb_mask;
- u32 mask_high;
- u32 mask_offset;
-} event_descs[] = {
- { PCIE_EVENT(L2_EXIT) },
- { PCIE_EVENT(HOTRST_EXIT) },
- { PCIE_EVENT(DLUP_EXIT) },
- { SEC_EVENT(TX_RAM_SEC_ERR) },
- { SEC_EVENT(RX_RAM_SEC_ERR) },
- { SEC_EVENT(PCIE2AXI_RAM_SEC_ERR) },
- { SEC_EVENT(AXI2PCIE_RAM_SEC_ERR) },
- { DED_EVENT(TX_RAM_DED_ERR) },
- { DED_EVENT(RX_RAM_DED_ERR) },
- { DED_EVENT(PCIE2AXI_RAM_DED_ERR) },
- { DED_EVENT(AXI2PCIE_RAM_DED_ERR) },
- { LOCAL_EVENT(DMA_END_ENGINE_0) },
- { LOCAL_EVENT(DMA_END_ENGINE_1) },
- { LOCAL_EVENT(DMA_ERROR_ENGINE_0) },
- { LOCAL_EVENT(DMA_ERROR_ENGINE_1) },
- { LOCAL_EVENT(A_ATR_EVT_POST_ERR) },
- { LOCAL_EVENT(A_ATR_EVT_FETCH_ERR) },
- { LOCAL_EVENT(A_ATR_EVT_DISCARD_ERR) },
- { LOCAL_EVENT(A_ATR_EVT_DOORBELL) },
- { LOCAL_EVENT(P_ATR_EVT_POST_ERR) },
- { LOCAL_EVENT(P_ATR_EVT_FETCH_ERR) },
- { LOCAL_EVENT(P_ATR_EVT_DISCARD_ERR) },
- { LOCAL_EVENT(P_ATR_EVT_DOORBELL) },
- { LOCAL_EVENT(PM_MSI_INT_INTX) },
- { LOCAL_EVENT(PM_MSI_INT_MSI) },
- { LOCAL_EVENT(PM_MSI_INT_AER_EVT) },
- { LOCAL_EVENT(PM_MSI_INT_EVENTS) },
- { LOCAL_EVENT(PM_MSI_INT_SYS_ERR) },
-};
-
-static char poss_clks[][5] = { "fic0", "fic1", "fic2", "fic3" };
-
-static struct mc_pcie *port;
-
-static void mc_pcie_enable_msi(struct mc_pcie *port, void __iomem *ecam)
-{
- struct mc_msi *msi = &port->msi;
- u16 reg;
- u8 queue_size;
-
- /* Fixup MSI enable flag */
- reg = readw_relaxed(ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_FLAGS);
- reg |= PCI_MSI_FLAGS_ENABLE;
- writew_relaxed(reg, ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_FLAGS);
-
- /* Fixup PCI MSI queue flags */
- queue_size = FIELD_GET(PCI_MSI_FLAGS_QMASK, reg);
- reg |= FIELD_PREP(PCI_MSI_FLAGS_QSIZE, queue_size);
- writew_relaxed(reg, ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_FLAGS);
-
- /* Fixup MSI addr fields */
- writel_relaxed(lower_32_bits(msi->vector_phy),
- ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_ADDRESS_LO);
- writel_relaxed(upper_32_bits(msi->vector_phy),
- ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_ADDRESS_HI);
-}
-
-static void mc_handle_msi(struct irq_desc *desc)
-{
- struct mc_pcie *port = irq_desc_get_handler_data(desc);
- struct irq_chip *chip = irq_desc_get_chip(desc);
- struct device *dev = port->dev;
- struct mc_msi *msi = &port->msi;
- void __iomem *bridge_base_addr =
- port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
- unsigned long status;
- u32 bit;
- int ret;
-
- chained_irq_enter(chip, desc);
-
- status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
- if (status & PM_MSI_INT_MSI_MASK) {
- writel_relaxed(status & PM_MSI_INT_MSI_MASK, bridge_base_addr + ISTATUS_LOCAL);
- status = readl_relaxed(bridge_base_addr + ISTATUS_MSI);
- for_each_set_bit(bit, &status, msi->num_vectors) {
- ret = generic_handle_domain_irq(msi->dev_domain, bit);
- if (ret)
- dev_err_ratelimited(dev, "bad MSI IRQ %d\n",
- bit);
- }
- }
-
- chained_irq_exit(chip, desc);
-}
-
-static void mc_msi_bottom_irq_ack(struct irq_data *data)
-{
- struct mc_pcie *port = irq_data_get_irq_chip_data(data);
- void __iomem *bridge_base_addr =
- port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
- u32 bitpos = data->hwirq;
-
- writel_relaxed(BIT(bitpos), bridge_base_addr + ISTATUS_MSI);
-}
-
-static void mc_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
-{
- struct mc_pcie *port = irq_data_get_irq_chip_data(data);
- phys_addr_t addr = port->msi.vector_phy;
-
- msg->address_lo = lower_32_bits(addr);
- msg->address_hi = upper_32_bits(addr);
- msg->data = data->hwirq;
-
- dev_dbg(port->dev, "msi#%x address_hi %#x address_lo %#x\n",
- (int)data->hwirq, msg->address_hi, msg->address_lo);
-}
-
-static int mc_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
-static struct irq_chip mc_msi_bottom_irq_chip = {
- .name = "Microchip MSI",
- .irq_ack = mc_msi_bottom_irq_ack,
- .irq_compose_msi_msg = mc_compose_msi_msg,
- .irq_set_affinity = mc_msi_set_affinity,
-};
-
-static int mc_irq_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
- unsigned int nr_irqs, void *args)
-{
- struct mc_pcie *port = domain->host_data;
- struct mc_msi *msi = &port->msi;
- unsigned long bit;
-
- mutex_lock(&msi->lock);
- bit = find_first_zero_bit(msi->used, msi->num_vectors);
- if (bit >= msi->num_vectors) {
- mutex_unlock(&msi->lock);
- return -ENOSPC;
- }
-
- set_bit(bit, msi->used);
-
- irq_domain_set_info(domain, virq, bit, &mc_msi_bottom_irq_chip,
- domain->host_data, handle_edge_irq, NULL, NULL);
-
- mutex_unlock(&msi->lock);
-
- return 0;
-}
-
-static void mc_irq_msi_domain_free(struct irq_domain *domain, unsigned int virq,
- unsigned int nr_irqs)
-{
- struct irq_data *d = irq_domain_get_irq_data(domain, virq);
- struct mc_pcie *port = irq_data_get_irq_chip_data(d);
- struct mc_msi *msi = &port->msi;
-
- mutex_lock(&msi->lock);
-
- if (test_bit(d->hwirq, msi->used))
- __clear_bit(d->hwirq, msi->used);
- else
- dev_err(port->dev, "trying to free unused MSI%lu\n", d->hwirq);
-
- mutex_unlock(&msi->lock);
-}
-
-static const struct irq_domain_ops msi_domain_ops = {
- .alloc = mc_irq_msi_domain_alloc,
- .free = mc_irq_msi_domain_free,
-};
-
-static struct irq_chip mc_msi_irq_chip = {
- .name = "Microchip PCIe MSI",
- .irq_ack = irq_chip_ack_parent,
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
-};
-
-static struct msi_domain_info mc_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX),
- .chip = &mc_msi_irq_chip,
-};
-
-static int mc_allocate_msi_domains(struct mc_pcie *port)
-{
- struct device *dev = port->dev;
- struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
- struct mc_msi *msi = &port->msi;
-
- mutex_init(&port->msi.lock);
-
- msi->dev_domain = irq_domain_add_linear(NULL, msi->num_vectors,
- &msi_domain_ops, port);
- if (!msi->dev_domain) {
- dev_err(dev, "failed to create IRQ domain\n");
- return -ENOMEM;
- }
-
- msi->msi_domain = pci_msi_create_irq_domain(fwnode, &mc_msi_domain_info,
- msi->dev_domain);
- if (!msi->msi_domain) {
- dev_err(dev, "failed to create MSI domain\n");
- irq_domain_remove(msi->dev_domain);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static void mc_handle_intx(struct irq_desc *desc)
-{
- struct mc_pcie *port = irq_desc_get_handler_data(desc);
- struct irq_chip *chip = irq_desc_get_chip(desc);
- struct device *dev = port->dev;
- void __iomem *bridge_base_addr =
- port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
- unsigned long status;
- u32 bit;
- int ret;
-
- chained_irq_enter(chip, desc);
-
- status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
- if (status & PM_MSI_INT_INTX_MASK) {
- status &= PM_MSI_INT_INTX_MASK;
- status >>= PM_MSI_INT_INTX_SHIFT;
- for_each_set_bit(bit, &status, PCI_NUM_INTX) {
- ret = generic_handle_domain_irq(port->intx_domain, bit);
- if (ret)
- dev_err_ratelimited(dev, "bad INTx IRQ %d\n",
- bit);
- }
- }
-
- chained_irq_exit(chip, desc);
-}
-
-static void mc_ack_intx_irq(struct irq_data *data)
-{
- struct mc_pcie *port = irq_data_get_irq_chip_data(data);
- void __iomem *bridge_base_addr =
- port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
- u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
-
- writel_relaxed(mask, bridge_base_addr + ISTATUS_LOCAL);
-}
-
-static void mc_mask_intx_irq(struct irq_data *data)
-{
- struct mc_pcie *port = irq_data_get_irq_chip_data(data);
- void __iomem *bridge_base_addr =
- port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
- unsigned long flags;
- u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
- u32 val;
-
- raw_spin_lock_irqsave(&port->lock, flags);
- val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
- val &= ~mask;
- writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
- raw_spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static void mc_unmask_intx_irq(struct irq_data *data)
-{
- struct mc_pcie *port = irq_data_get_irq_chip_data(data);
- void __iomem *bridge_base_addr =
- port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
- unsigned long flags;
- u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
- u32 val;
-
- raw_spin_lock_irqsave(&port->lock, flags);
- val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
- val |= mask;
- writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
- raw_spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static struct irq_chip mc_intx_irq_chip = {
- .name = "Microchip PCIe INTx",
- .irq_ack = mc_ack_intx_irq,
- .irq_mask = mc_mask_intx_irq,
- .irq_unmask = mc_unmask_intx_irq,
-};
-
-static int mc_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
- irq_hw_number_t hwirq)
-{
- irq_set_chip_and_handler(irq, &mc_intx_irq_chip, handle_level_irq);
- irq_set_chip_data(irq, domain->host_data);
-
- return 0;
-}
-
-static const struct irq_domain_ops intx_domain_ops = {
- .map = mc_pcie_intx_map,
-};
-
-static inline u32 reg_to_event(u32 reg, struct event_map field)
-{
- return (reg & field.reg_mask) ? BIT(field.event_bit) : 0;
-}
-
-static u32 pcie_events(struct mc_pcie *port)
-{
- void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR;
- u32 reg = readl_relaxed(ctrl_base_addr + PCIE_EVENT_INT);
- u32 val = 0;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(pcie_event_to_event); i++)
- val |= reg_to_event(reg, pcie_event_to_event[i]);
-
- return val;
-}
-
-static u32 sec_errors(struct mc_pcie *port)
-{
- void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR;
- u32 reg = readl_relaxed(ctrl_base_addr + SEC_ERROR_INT);
- u32 val = 0;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(sec_error_to_event); i++)
- val |= reg_to_event(reg, sec_error_to_event[i]);
-
- return val;
-}
-
-static u32 ded_errors(struct mc_pcie *port)
-{
- void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR;
- u32 reg = readl_relaxed(ctrl_base_addr + DED_ERROR_INT);
- u32 val = 0;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(ded_error_to_event); i++)
- val |= reg_to_event(reg, ded_error_to_event[i]);
-
- return val;
-}
-
-static u32 local_events(struct mc_pcie *port)
-{
- void __iomem *bridge_base_addr = port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
- u32 reg = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
- u32 val = 0;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(local_status_to_event); i++)
- val |= reg_to_event(reg, local_status_to_event[i]);
-
- return val;
-}
-
-static u32 get_events(struct mc_pcie *port)
-{
- u32 events = 0;
-
- events |= pcie_events(port);
- events |= sec_errors(port);
- events |= ded_errors(port);
- events |= local_events(port);
-
- return events;
-}
-
-static irqreturn_t mc_event_handler(int irq, void *dev_id)
-{
- struct mc_pcie *port = dev_id;
- struct device *dev = port->dev;
- struct irq_data *data;
-
- data = irq_domain_get_irq_data(port->event_domain, irq);
-
- if (event_cause[data->hwirq].str)
- dev_err_ratelimited(dev, "%s\n", event_cause[data->hwirq].str);
- else
- dev_err_ratelimited(dev, "bad event IRQ %ld\n", data->hwirq);
-
- return IRQ_HANDLED;
-}
-
-static void mc_handle_event(struct irq_desc *desc)
-{
- struct mc_pcie *port = irq_desc_get_handler_data(desc);
- unsigned long events;
- u32 bit;
- struct irq_chip *chip = irq_desc_get_chip(desc);
-
- chained_irq_enter(chip, desc);
-
- events = get_events(port);
-
- for_each_set_bit(bit, &events, NUM_EVENTS)
- generic_handle_domain_irq(port->event_domain, bit);
-
- chained_irq_exit(chip, desc);
-}
-
-static void mc_ack_event_irq(struct irq_data *data)
-{
- struct mc_pcie *port = irq_data_get_irq_chip_data(data);
- u32 event = data->hwirq;
- void __iomem *addr;
- u32 mask;
-
- addr = port->axi_base_addr + event_descs[event].base +
- event_descs[event].offset;
- mask = event_descs[event].mask;
- mask |= event_descs[event].enb_mask;
-
- writel_relaxed(mask, addr);
-}
-
-static void mc_mask_event_irq(struct irq_data *data)
-{
- struct mc_pcie *port = irq_data_get_irq_chip_data(data);
- u32 event = data->hwirq;
- void __iomem *addr;
- u32 mask;
- u32 val;
-
- addr = port->axi_base_addr + event_descs[event].base +
- event_descs[event].mask_offset;
- mask = event_descs[event].mask;
- if (event_descs[event].enb_mask) {
- mask <<= PCIE_EVENT_INT_ENB_SHIFT;
- mask &= PCIE_EVENT_INT_ENB_MASK;
- }
-
- if (!event_descs[event].mask_high)
- mask = ~mask;
-
- raw_spin_lock(&port->lock);
- val = readl_relaxed(addr);
- if (event_descs[event].mask_high)
- val |= mask;
- else
- val &= mask;
-
- writel_relaxed(val, addr);
- raw_spin_unlock(&port->lock);
-}
-
-static void mc_unmask_event_irq(struct irq_data *data)
-{
- struct mc_pcie *port = irq_data_get_irq_chip_data(data);
- u32 event = data->hwirq;
- void __iomem *addr;
- u32 mask;
- u32 val;
-
- addr = port->axi_base_addr + event_descs[event].base +
- event_descs[event].mask_offset;
- mask = event_descs[event].mask;
-
- if (event_descs[event].enb_mask)
- mask <<= PCIE_EVENT_INT_ENB_SHIFT;
-
- if (event_descs[event].mask_high)
- mask = ~mask;
-
- if (event_descs[event].enb_mask)
- mask &= PCIE_EVENT_INT_ENB_MASK;
-
- raw_spin_lock(&port->lock);
- val = readl_relaxed(addr);
- if (event_descs[event].mask_high)
- val &= mask;
- else
- val |= mask;
- writel_relaxed(val, addr);
- raw_spin_unlock(&port->lock);
-}
-
-static struct irq_chip mc_event_irq_chip = {
- .name = "Microchip PCIe EVENT",
- .irq_ack = mc_ack_event_irq,
- .irq_mask = mc_mask_event_irq,
- .irq_unmask = mc_unmask_event_irq,
-};
-
-static int mc_pcie_event_map(struct irq_domain *domain, unsigned int irq,
- irq_hw_number_t hwirq)
-{
- irq_set_chip_and_handler(irq, &mc_event_irq_chip, handle_level_irq);
- irq_set_chip_data(irq, domain->host_data);
-
- return 0;
-}
-
-static const struct irq_domain_ops event_domain_ops = {
- .map = mc_pcie_event_map,
-};
-
-static inline void mc_pcie_deinit_clk(void *data)
-{
- struct clk *clk = data;
-
- clk_disable_unprepare(clk);
-}
-
-static inline struct clk *mc_pcie_init_clk(struct device *dev, const char *id)
-{
- struct clk *clk;
- int ret;
-
- clk = devm_clk_get_optional(dev, id);
- if (IS_ERR(clk))
- return clk;
- if (!clk)
- return clk;
-
- ret = clk_prepare_enable(clk);
- if (ret)
- return ERR_PTR(ret);
-
- devm_add_action_or_reset(dev, mc_pcie_deinit_clk, clk);
-
- return clk;
-}
-
-static int mc_pcie_init_clks(struct device *dev)
-{
- int i;
- struct clk *fic;
-
- /*
- * PCIe may be clocked via Fabric Interface using between 1 and 4
- * clocks. Scan DT for clocks and enable them if present
- */
- for (i = 0; i < ARRAY_SIZE(poss_clks); i++) {
- fic = mc_pcie_init_clk(dev, poss_clks[i]);
- if (IS_ERR(fic))
- return PTR_ERR(fic);
- }
-
- return 0;
-}
-
-static int mc_pcie_init_irq_domains(struct mc_pcie *port)
-{
- struct device *dev = port->dev;
- struct device_node *node = dev->of_node;
- struct device_node *pcie_intc_node;
-
- /* Setup INTx */
- pcie_intc_node = of_get_next_child(node, NULL);
- if (!pcie_intc_node) {
- dev_err(dev, "failed to find PCIe Intc node\n");
- return -EINVAL;
- }
-
- port->event_domain = irq_domain_add_linear(pcie_intc_node, NUM_EVENTS,
- &event_domain_ops, port);
- if (!port->event_domain) {
- dev_err(dev, "failed to get event domain\n");
- of_node_put(pcie_intc_node);
- return -ENOMEM;
- }
-
- irq_domain_update_bus_token(port->event_domain, DOMAIN_BUS_NEXUS);
-
- port->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
- &intx_domain_ops, port);
- if (!port->intx_domain) {
- dev_err(dev, "failed to get an INTx IRQ domain\n");
- of_node_put(pcie_intc_node);
- return -ENOMEM;
- }
-
- irq_domain_update_bus_token(port->intx_domain, DOMAIN_BUS_WIRED);
-
- of_node_put(pcie_intc_node);
- raw_spin_lock_init(&port->lock);
-
- return mc_allocate_msi_domains(port);
-}
-
-static void mc_pcie_setup_window(void __iomem *bridge_base_addr, u32 index,
- phys_addr_t axi_addr, phys_addr_t pci_addr,
- size_t size)
-{
- u32 atr_sz = ilog2(size) - 1;
- u32 val;
-
- if (index == 0)
- val = PCIE_CONFIG_INTERFACE;
- else
- val = PCIE_TX_RX_INTERFACE;
-
- writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
- ATR0_AXI4_SLV0_TRSL_PARAM);
-
- val = lower_32_bits(axi_addr) | (atr_sz << ATR_SIZE_SHIFT) |
- ATR_IMPL_ENABLE;
- writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
- ATR0_AXI4_SLV0_SRCADDR_PARAM);
-
- val = upper_32_bits(axi_addr);
- writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
- ATR0_AXI4_SLV0_SRC_ADDR);
-
- val = lower_32_bits(pci_addr);
- writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
- ATR0_AXI4_SLV0_TRSL_ADDR_LSB);
-
- val = upper_32_bits(pci_addr);
- writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
- ATR0_AXI4_SLV0_TRSL_ADDR_UDW);
-
- val = readl(bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
- val |= (ATR0_PCIE_ATR_SIZE << ATR0_PCIE_ATR_SIZE_SHIFT);
- writel(val, bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
- writel(0, bridge_base_addr + ATR0_PCIE_WIN0_SRC_ADDR);
-}
-
-static int mc_pcie_setup_windows(struct platform_device *pdev,
- struct mc_pcie *port)
-{
- void __iomem *bridge_base_addr =
- port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
- struct pci_host_bridge *bridge = platform_get_drvdata(pdev);
- struct resource_entry *entry;
- u64 pci_addr;
- u32 index = 1;
-
- resource_list_for_each_entry(entry, &bridge->windows) {
- if (resource_type(entry->res) == IORESOURCE_MEM) {
- pci_addr = entry->res->start - entry->offset;
- mc_pcie_setup_window(bridge_base_addr, index,
- entry->res->start, pci_addr,
- resource_size(entry->res));
- index++;
- }
- }
-
- return 0;
-}
-
-static inline void mc_clear_secs(struct mc_pcie *port)
-{
- void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR;
-
- writel_relaxed(SEC_ERROR_INT_ALL_RAM_SEC_ERR_INT, ctrl_base_addr +
- SEC_ERROR_INT);
- writel_relaxed(0, ctrl_base_addr + SEC_ERROR_EVENT_CNT);
-}
-
-static inline void mc_clear_deds(struct mc_pcie *port)
-{
- void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR;
-
- writel_relaxed(DED_ERROR_INT_ALL_RAM_DED_ERR_INT, ctrl_base_addr +
- DED_ERROR_INT);
- writel_relaxed(0, ctrl_base_addr + DED_ERROR_EVENT_CNT);
-}
-
-static void mc_disable_interrupts(struct mc_pcie *port)
-{
- void __iomem *bridge_base_addr = port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
- void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR;
- u32 val;
-
- /* Ensure ECC bypass is enabled */
- val = ECC_CONTROL_TX_RAM_ECC_BYPASS |
- ECC_CONTROL_RX_RAM_ECC_BYPASS |
- ECC_CONTROL_PCIE2AXI_RAM_ECC_BYPASS |
- ECC_CONTROL_AXI2PCIE_RAM_ECC_BYPASS;
- writel_relaxed(val, ctrl_base_addr + ECC_CONTROL);
-
- /* Disable SEC errors and clear any outstanding */
- writel_relaxed(SEC_ERROR_INT_ALL_RAM_SEC_ERR_INT, ctrl_base_addr +
- SEC_ERROR_INT_MASK);
- mc_clear_secs(port);
-
- /* Disable DED errors and clear any outstanding */
- writel_relaxed(DED_ERROR_INT_ALL_RAM_DED_ERR_INT, ctrl_base_addr +
- DED_ERROR_INT_MASK);
- mc_clear_deds(port);
-
- /* Disable local interrupts and clear any outstanding */
- writel_relaxed(0, bridge_base_addr + IMASK_LOCAL);
- writel_relaxed(GENMASK(31, 0), bridge_base_addr + ISTATUS_LOCAL);
- writel_relaxed(GENMASK(31, 0), bridge_base_addr + ISTATUS_MSI);
-
- /* Disable PCIe events and clear any outstanding */
- val = PCIE_EVENT_INT_L2_EXIT_INT |
- PCIE_EVENT_INT_HOTRST_EXIT_INT |
- PCIE_EVENT_INT_DLUP_EXIT_INT |
- PCIE_EVENT_INT_L2_EXIT_INT_MASK |
- PCIE_EVENT_INT_HOTRST_EXIT_INT_MASK |
- PCIE_EVENT_INT_DLUP_EXIT_INT_MASK;
- writel_relaxed(val, ctrl_base_addr + PCIE_EVENT_INT);
-
- /* Disable host interrupts and clear any outstanding */
- writel_relaxed(0, bridge_base_addr + IMASK_HOST);
- writel_relaxed(GENMASK(31, 0), bridge_base_addr + ISTATUS_HOST);
-}
-
-static int mc_init_interrupts(struct platform_device *pdev, struct mc_pcie *port)
-{
- struct device *dev = &pdev->dev;
- int irq;
- int i, intx_irq, msi_irq, event_irq;
- int ret;
-
- ret = mc_pcie_init_irq_domains(port);
- if (ret) {
- dev_err(dev, "failed creating IRQ domains\n");
- return ret;
- }
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return -ENODEV;
-
- for (i = 0; i < NUM_EVENTS; i++) {
- event_irq = irq_create_mapping(port->event_domain, i);
- if (!event_irq) {
- dev_err(dev, "failed to map hwirq %d\n", i);
- return -ENXIO;
- }
-
- ret = devm_request_irq(dev, event_irq, mc_event_handler,
- 0, event_cause[i].sym, port);
- if (ret) {
- dev_err(dev, "failed to request IRQ %d\n", event_irq);
- return ret;
- }
- }
-
- intx_irq = irq_create_mapping(port->event_domain,
- EVENT_LOCAL_PM_MSI_INT_INTX);
- if (!intx_irq) {
- dev_err(dev, "failed to map INTx interrupt\n");
- return -ENXIO;
- }
-
- /* Plug the INTx chained handler */
- irq_set_chained_handler_and_data(intx_irq, mc_handle_intx, port);
-
- msi_irq = irq_create_mapping(port->event_domain,
- EVENT_LOCAL_PM_MSI_INT_MSI);
- if (!msi_irq)
- return -ENXIO;
-
- /* Plug the MSI chained handler */
- irq_set_chained_handler_and_data(msi_irq, mc_handle_msi, port);
-
- /* Plug the main event chained handler */
- irq_set_chained_handler_and_data(irq, mc_handle_event, port);
-
- return 0;
-}
-
-static int mc_platform_init(struct pci_config_window *cfg)
-{
- struct device *dev = cfg->parent;
- struct platform_device *pdev = to_platform_device(dev);
- void __iomem *bridge_base_addr =
- port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
- int ret;
-
- /* Configure address translation table 0 for PCIe config space */
- mc_pcie_setup_window(bridge_base_addr, 0, cfg->res.start,
- cfg->res.start,
- resource_size(&cfg->res));
-
- /* Need some fixups in config space */
- mc_pcie_enable_msi(port, cfg->win);
-
- /* Configure non-config space outbound ranges */
- ret = mc_pcie_setup_windows(pdev, port);
- if (ret)
- return ret;
-
- /* Address translation is up; safe to enable interrupts */
- ret = mc_init_interrupts(pdev, port);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int mc_host_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- void __iomem *bridge_base_addr;
- int ret;
- u32 val;
-
- port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
- if (!port)
- return -ENOMEM;
-
- port->dev = dev;
-
- port->axi_base_addr = devm_platform_ioremap_resource(pdev, 1);
- if (IS_ERR(port->axi_base_addr))
- return PTR_ERR(port->axi_base_addr);
-
- mc_disable_interrupts(port);
-
- bridge_base_addr = port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
-
- /* Allow enabling MSI by disabling MSI-X */
- val = readl(bridge_base_addr + PCIE_PCI_IRQ_DW0);
- val &= ~MSIX_CAP_MASK;
- writel(val, bridge_base_addr + PCIE_PCI_IRQ_DW0);
-
- /* Pick num vectors from bitfile programmed onto FPGA fabric */
- val = readl(bridge_base_addr + PCIE_PCI_IRQ_DW0);
- val &= NUM_MSI_MSGS_MASK;
- val >>= NUM_MSI_MSGS_SHIFT;
-
- port->msi.num_vectors = 1 << val;
-
- /* Pick vector address from design */
- port->msi.vector_phy = readl_relaxed(bridge_base_addr + IMSI_ADDR);
-
- ret = mc_pcie_init_clks(dev);
- if (ret) {
- dev_err(dev, "failed to get clock resources, error %d\n", ret);
- return -ENODEV;
- }
-
- return pci_host_common_probe(pdev);
-}
-
-static const struct pci_ecam_ops mc_ecam_ops = {
- .init = mc_platform_init,
- .pci_ops = {
- .map_bus = pci_ecam_map_bus,
- .read = pci_generic_config_read,
- .write = pci_generic_config_write,
- }
-};
-
-static const struct of_device_id mc_pcie_of_match[] = {
- {
- .compatible = "microchip,pcie-host-1.0",
- .data = &mc_ecam_ops,
- },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, mc_pcie_of_match);
-
-static struct platform_driver mc_pcie_driver = {
- .probe = mc_host_probe,
- .driver = {
- .name = "microchip-pcie",
- .of_match_table = mc_pcie_of_match,
- .suppress_bind_attrs = true,
- },
-};
-
-builtin_platform_driver(mc_pcie_driver);
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Microchip PCIe host controller driver");
-MODULE_AUTHOR("Daire McNamara <daire.mcnamara@microchip.com>");
diff --git a/drivers/pci/controller/pcie-mt7621.c b/drivers/pci/controller/pcie-mt7621.c
index 79e225edb42a..01ead2f92e87 100644
--- a/drivers/pci/controller/pcie-mt7621.c
+++ b/drivers/pci/controller/pcie-mt7621.c
@@ -202,7 +202,7 @@ static int mt7621_pcie_parse_port(struct mt7621_pcie *pcie,
struct mt7621_pcie_port *port;
struct device *dev = pcie->dev;
struct platform_device *pdev = to_platform_device(dev);
- char name[10];
+ char name[11];
int err;
port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
@@ -258,30 +258,25 @@ static int mt7621_pcie_parse_dt(struct mt7621_pcie *pcie)
{
struct device *dev = pcie->dev;
struct platform_device *pdev = to_platform_device(dev);
- struct device_node *node = dev->of_node, *child;
+ struct device_node *node = dev->of_node;
int err;
pcie->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pcie->base))
return PTR_ERR(pcie->base);
- for_each_available_child_of_node(node, child) {
+ for_each_available_child_of_node_scoped(node, child) {
int slot;
err = of_pci_get_devfn(child);
- if (err < 0) {
- of_node_put(child);
- dev_err(dev, "failed to parse devfn: %d\n", err);
- return err;
- }
+ if (err < 0)
+ return dev_err_probe(dev, err, "failed to parse devfn\n");
slot = PCI_SLOT(err);
err = mt7621_pcie_parse_port(pcie, child, slot);
- if (err) {
- of_node_put(child);
+ if (err)
return err;
- }
}
return 0;
@@ -541,7 +536,7 @@ MODULE_DEVICE_TABLE(of, mt7621_pcie_ids);
static struct platform_driver mt7621_pcie_driver = {
.probe = mt7621_pcie_probe,
- .remove_new = mt7621_pcie_remove,
+ .remove = mt7621_pcie_remove,
.driver = {
.name = "mt7621-pci",
.of_match_table = mt7621_pcie_ids,
@@ -549,4 +544,5 @@ static struct platform_driver mt7621_pcie_driver = {
};
builtin_platform_driver(mt7621_pcie_driver);
+MODULE_DESCRIPTION("MediaTek MT7621 PCIe host controller driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-rcar-ep.c b/drivers/pci/controller/pcie-rcar-ep.c
index 05967c6c0b42..c5e0d025bc43 100644
--- a/drivers/pci/controller/pcie-rcar-ep.c
+++ b/drivers/pci/controller/pcie-rcar-ep.c
@@ -107,7 +107,7 @@ static int rcar_pcie_parse_outbound_ranges(struct rcar_pcie_endpoint *ep,
}
if (!devm_request_mem_region(&pdev->dev, res->start,
resource_size(res),
- outbound_name)) {
+ res->name)) {
dev_err(pcie->dev, "Cannot request memory region %s.\n",
outbound_name);
return -EIO;
@@ -542,6 +542,8 @@ static int rcar_pcie_ep_probe(struct platform_device *pdev)
goto err_pm_put;
}
+ pci_epc_init_notify(epc);
+
return 0;
err_pm_put:
diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c
index 996077ab7cfd..c32b803a47c7 100644
--- a/drivers/pci/controller/pcie-rcar-host.c
+++ b/drivers/pci/controller/pcie-rcar-host.c
@@ -78,7 +78,11 @@ static int rcar_pcie_wakeup(struct device *pcie_dev, void __iomem *pcie_base)
writel(L1IATN, pcie_base + PMCTLR);
ret = readl_poll_timeout_atomic(pcie_base + PMSR, val,
val & L1FAEG, 10, 1000);
- WARN(ret, "Timeout waiting for L1 link state, ret=%d\n", ret);
+ if (ret) {
+ dev_warn_ratelimited(pcie_dev,
+ "Timeout waiting for L1 link state, ret=%d\n",
+ ret);
+ }
writel(L1FAEG | PMEL1RX, pcie_base + PMSR);
}
@@ -174,8 +178,8 @@ static int rcar_pcie_config_access(struct rcar_pcie_host *host,
* space, it's generally only accessible when in endpoint mode.
* When in root complex mode, the controller is unable to target
* itself with either type 0 or type 1 accesses, and indeed, any
- * controller initiated target transfer to its own config space
- * result in a completer abort.
+ * controller-initiated target transfer to its own config space
+ * results in a completer abort.
*
* Each channel effectively only supports a single device, but as
* the same channel <-> device access works for any PCI_SLOT()
@@ -654,11 +658,6 @@ static void rcar_msi_irq_unmask(struct irq_data *d)
spin_unlock_irqrestore(&msi->mask_lock, flags);
}
-static int rcar_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static void rcar_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
struct rcar_msi *msi = irq_data_get_irq_chip_data(data);
@@ -674,7 +673,6 @@ static struct irq_chip rcar_msi_bottom_chip = {
.irq_ack = rcar_msi_irq_ack,
.irq_mask = rcar_msi_irq_mask,
.irq_unmask = rcar_msi_irq_unmask,
- .irq_set_affinity = rcar_msi_set_affinity,
.irq_compose_msi_msg = rcar_compose_msi_msg,
};
@@ -721,8 +719,8 @@ static const struct irq_domain_ops rcar_msi_domain_ops = {
};
static struct msi_domain_info rcar_msi_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_MULTI_PCI_MSI),
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI,
.chip = &rcar_msi_top_chip,
};
@@ -777,7 +775,7 @@ static int rcar_pcie_enable_msi(struct rcar_pcie_host *host)
if (err)
return err;
- /* Two irqs are for MSI, but they are also used for non-MSI irqs */
+ /* Two IRQs are for MSI, but they are also used for non-MSI IRQs */
err = devm_request_irq(dev, msi->irq1, rcar_pcie_msi_irq,
IRQF_SHARED | IRQF_NO_THREAD,
rcar_msi_bottom_chip.name, host);
@@ -794,12 +792,12 @@ static int rcar_pcie_enable_msi(struct rcar_pcie_host *host)
goto err;
}
- /* disable all MSIs */
+ /* Disable all MSIs */
rcar_pci_write_reg(pcie, 0, PCIEMSIIER);
/*
- * Setup MSI data target using RC base address address, which
- * is guaranteed to be in the low 32bit range on any R-Car HW.
+ * Setup MSI data target using RC base address, which is guaranteed
+ * to be in the low 32bit range on any R-Car HW.
*/
rcar_pci_write_reg(pcie, lower_32_bits(res.start) | MSIFE, PCIEMSIALR);
rcar_pci_write_reg(pcie, upper_32_bits(res.start), PCIEMSIAUR);
@@ -894,6 +892,7 @@ static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
dev_err(pcie->dev, "Failed to map inbound regions!\n");
return -EINVAL;
}
+
/*
* If the size of the range is larger than the alignment of
* the start address, we have to use multiple entries to
@@ -905,6 +904,7 @@ static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
size = min(size, alignment);
}
+
/* Hardware supports max 4GiB inbound region */
size = min(size, 1ULL << 32);
diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c
index c9046e97a1d2..85ea36df2f59 100644
--- a/drivers/pci/controller/pcie-rockchip-ep.c
+++ b/drivers/pci/controller/pcie-rockchip-ep.c
@@ -10,12 +10,16 @@
#include <linux/configfs.h>
#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
+#include <linux/irq.h>
#include <linux/of.h>
#include <linux/pci-epc.h>
#include <linux/platform_device.h>
#include <linux/pci-epf.h>
#include <linux/sizes.h>
+#include <linux/workqueue.h>
#include "pcie-rockchip.h"
@@ -36,6 +40,10 @@
* @irq_pci_fn: the latest PCI function that has updated the mapping of
* the MSI/INTX IRQ dedicated outbound region.
* @irq_pending: bitmask of asserted INTX IRQs.
+ * @perst_irq: IRQ used for the PERST# signal.
+ * @perst_asserted: True if the PERST# signal was asserted.
+ * @link_up: True if the PCI link is up.
+ * @link_training: Work item to execute PCI link training.
*/
struct rockchip_pcie_ep {
struct rockchip_pcie rockchip;
@@ -48,6 +56,10 @@ struct rockchip_pcie_ep {
u64 irq_pci_addr;
u8 irq_pci_fn;
u8 irq_pending;
+ int perst_irq;
+ bool perst_asserted;
+ bool link_up;
+ struct delayed_work link_training;
};
static void rockchip_pcie_clear_ep_ob_atu(struct rockchip_pcie *rockchip,
@@ -63,15 +75,25 @@ static void rockchip_pcie_clear_ep_ob_atu(struct rockchip_pcie *rockchip,
ROCKCHIP_PCIE_AT_OB_REGION_DESC1(region));
}
+static int rockchip_pcie_ep_ob_atu_num_bits(struct rockchip_pcie *rockchip,
+ u64 pci_addr, size_t size)
+{
+ int num_pass_bits = fls64(pci_addr ^ (pci_addr + size - 1));
+
+ return clamp(num_pass_bits,
+ ROCKCHIP_PCIE_AT_MIN_NUM_BITS,
+ ROCKCHIP_PCIE_AT_MAX_NUM_BITS);
+}
+
static void rockchip_pcie_prog_ep_ob_atu(struct rockchip_pcie *rockchip, u8 fn,
u32 r, u64 cpu_addr, u64 pci_addr,
size_t size)
{
- int num_pass_bits = fls64(size - 1);
+ int num_pass_bits;
u32 addr0, addr1, desc0;
- if (num_pass_bits < 8)
- num_pass_bits = 8;
+ num_pass_bits = rockchip_pcie_ep_ob_atu_num_bits(rockchip,
+ pci_addr, size);
addr0 = ((num_pass_bits - 1) & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS) |
(lower_32_bits(pci_addr) & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR);
@@ -98,10 +120,8 @@ static int rockchip_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn,
/* All functions share the same vendor ID with function 0 */
if (fn == 0) {
- u32 vid_regs = (hdr->vendorid & GENMASK(15, 0)) |
- (hdr->subsys_vendor_id & GENMASK(31, 16)) << 16;
-
- rockchip_pcie_write(rockchip, vid_regs,
+ rockchip_pcie_write(rockchip,
+ hdr->vendorid | hdr->subsys_vendor_id << 16,
PCIE_CORE_CONFIG_VENDOR);
}
@@ -153,7 +173,7 @@ static int rockchip_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, u8 vfn,
ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_IO_32BITS;
} else {
bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH);
- bool is_64bits = sz > SZ_2G;
+ bool is_64bits = !!(flags & PCI_BASE_ADDRESS_MEM_TYPE_64);
if (is_64bits && (bar & 1))
return -EINVAL;
@@ -230,6 +250,28 @@ static inline u32 rockchip_ob_region(phys_addr_t addr)
return (addr >> ilog2(SZ_1M)) & 0x1f;
}
+static u64 rockchip_pcie_ep_align_addr(struct pci_epc *epc, u64 pci_addr,
+ size_t *pci_size, size_t *addr_offset)
+{
+ struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+ size_t size = *pci_size;
+ u64 offset, mask;
+ int num_bits;
+
+ num_bits = rockchip_pcie_ep_ob_atu_num_bits(&ep->rockchip,
+ pci_addr, size);
+ mask = (1ULL << num_bits) - 1;
+
+ offset = pci_addr & mask;
+ if (size + offset > SZ_1M)
+ size = SZ_1M - offset;
+
+ *pci_size = ALIGN(offset + size, ROCKCHIP_PCIE_AT_SIZE_ALIGN);
+ *addr_offset = offset;
+
+ return pci_addr & ~mask;
+}
+
static int rockchip_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn,
phys_addr_t addr, u64 pci_addr,
size_t size)
@@ -238,6 +280,9 @@ static int rockchip_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn,
struct rockchip_pcie *pcie = &ep->rockchip;
u32 r = rockchip_ob_region(addr);
+ if (test_bit(r, &ep->ob_region_map))
+ return -EBUSY;
+
rockchip_pcie_prog_ep_ob_atu(pcie, fn, r, addr, pci_addr, size);
set_bit(r, &ep->ob_region_map);
@@ -251,13 +296,9 @@ static void rockchip_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn,
{
struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
struct rockchip_pcie *rockchip = &ep->rockchip;
- u32 r;
-
- for (r = 0; r < ep->max_regions; r++)
- if (ep->ob_addr[r] == addr)
- break;
+ u32 r = rockchip_ob_region(addr);
- if (r == ep->max_regions)
+ if (addr != ep->ob_addr[r] || !test_bit(r, &ep->ob_region_map))
return;
rockchip_pcie_clear_ep_ob_atu(rockchip, r);
@@ -353,9 +394,10 @@ static int rockchip_pcie_ep_send_msi_irq(struct rockchip_pcie_ep *ep, u8 fn,
{
struct rockchip_pcie *rockchip = &ep->rockchip;
u32 flags, mme, data, data_mask;
+ size_t irq_pci_size, offset;
+ u64 irq_pci_addr;
u8 msi_count;
u64 pci_addr;
- u32 r;
/* Check MSI enable bit */
flags = rockchip_pcie_read(&ep->rockchip,
@@ -391,18 +433,21 @@ static int rockchip_pcie_ep_send_msi_irq(struct rockchip_pcie_ep *ep, u8 fn,
PCI_MSI_ADDRESS_LO);
/* Set the outbound region if needed. */
- if (unlikely(ep->irq_pci_addr != (pci_addr & PCIE_ADDR_MASK) ||
+ irq_pci_size = ~PCIE_ADDR_MASK + 1;
+ irq_pci_addr = rockchip_pcie_ep_align_addr(ep->epc,
+ pci_addr & PCIE_ADDR_MASK,
+ &irq_pci_size, &offset);
+ if (unlikely(ep->irq_pci_addr != irq_pci_addr ||
ep->irq_pci_fn != fn)) {
- r = rockchip_ob_region(ep->irq_phys_addr);
- rockchip_pcie_prog_ep_ob_atu(rockchip, fn, r,
- ep->irq_phys_addr,
- pci_addr & PCIE_ADDR_MASK,
- ~PCIE_ADDR_MASK + 1);
- ep->irq_pci_addr = (pci_addr & PCIE_ADDR_MASK);
+ rockchip_pcie_prog_ep_ob_atu(rockchip, fn,
+ rockchip_ob_region(ep->irq_phys_addr),
+ ep->irq_phys_addr,
+ irq_pci_addr, irq_pci_size);
+ ep->irq_pci_addr = irq_pci_addr;
ep->irq_pci_fn = fn;
}
- writew(data, ep->irq_cpu_addr + (pci_addr & ~PCIE_ADDR_MASK));
+ writew(data, ep->irq_cpu_addr + offset + (pci_addr & ~PCIE_ADDR_MASK));
return 0;
}
@@ -434,14 +479,222 @@ static int rockchip_pcie_ep_start(struct pci_epc *epc)
rockchip_pcie_write(rockchip, cfg, PCIE_CORE_PHY_FUNC_CFG);
+ if (rockchip->perst_gpio)
+ enable_irq(ep->perst_irq);
+
+ /* Enable configuration and start link training */
+ rockchip_pcie_write(rockchip,
+ PCIE_CLIENT_LINK_TRAIN_ENABLE |
+ PCIE_CLIENT_CONF_ENABLE,
+ PCIE_CLIENT_CONFIG);
+
+ if (!rockchip->perst_gpio)
+ schedule_delayed_work(&ep->link_training, 0);
+
+ return 0;
+}
+
+static void rockchip_pcie_ep_stop(struct pci_epc *epc)
+{
+ struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+
+ if (rockchip->perst_gpio) {
+ ep->perst_asserted = true;
+ disable_irq(ep->perst_irq);
+ }
+
+ cancel_delayed_work_sync(&ep->link_training);
+
+ /* Stop link training and disable configuration */
+ rockchip_pcie_write(rockchip,
+ PCIE_CLIENT_CONF_DISABLE |
+ PCIE_CLIENT_LINK_TRAIN_DISABLE,
+ PCIE_CLIENT_CONFIG);
+}
+
+static void rockchip_pcie_ep_retrain_link(struct rockchip_pcie *rockchip)
+{
+ u32 status;
+
+ status = rockchip_pcie_read(rockchip, PCIE_EP_CONFIG_LCS);
+ status |= PCI_EXP_LNKCTL_RL;
+ rockchip_pcie_write(rockchip, status, PCIE_EP_CONFIG_LCS);
+}
+
+static bool rockchip_pcie_ep_link_up(struct rockchip_pcie *rockchip)
+{
+ u32 val = rockchip_pcie_read(rockchip, PCIE_CLIENT_BASIC_STATUS1);
+
+ return PCIE_LINK_UP(val);
+}
+
+static void rockchip_pcie_ep_link_training(struct work_struct *work)
+{
+ struct rockchip_pcie_ep *ep =
+ container_of(work, struct rockchip_pcie_ep, link_training.work);
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+ struct device *dev = rockchip->dev;
+ u32 val;
+ int ret;
+
+ /* Enable Gen1 training and wait for its completion */
+ ret = readl_poll_timeout(rockchip->apb_base + PCIE_CORE_CTRL,
+ val, PCIE_LINK_TRAINING_DONE(val), 50,
+ LINK_TRAIN_TIMEOUT);
+ if (ret)
+ goto again;
+
+ /* Make sure that the link is up */
+ ret = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_BASIC_STATUS1,
+ val, PCIE_LINK_UP(val), 50,
+ LINK_TRAIN_TIMEOUT);
+ if (ret)
+ goto again;
+
+ /*
+ * Check the current speed: if gen2 speed was requested and we are not
+ * at gen2 speed yet, retrain again for gen2.
+ */
+ val = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL);
+ if (!PCIE_LINK_IS_GEN2(val) && rockchip->link_gen == 2) {
+ /* Enable retrain for gen2 */
+ rockchip_pcie_ep_retrain_link(rockchip);
+ readl_poll_timeout(rockchip->apb_base + PCIE_CORE_CTRL,
+ val, PCIE_LINK_IS_GEN2(val), 50,
+ LINK_TRAIN_TIMEOUT);
+ }
+
+ /* Check again that the link is up */
+ if (!rockchip_pcie_ep_link_up(rockchip))
+ goto again;
+
+ /*
+ * If PERST# was asserted while polling the link, do not notify
+ * the function.
+ */
+ if (ep->perst_asserted)
+ return;
+
+ val = rockchip_pcie_read(rockchip, PCIE_CLIENT_BASIC_STATUS0);
+ dev_info(dev,
+ "link up (negotiated speed: %sGT/s, width: x%lu)\n",
+ (val & PCIE_CLIENT_NEG_LINK_SPEED) ? "5" : "2.5",
+ ((val & PCIE_CLIENT_NEG_LINK_WIDTH_MASK) >>
+ PCIE_CLIENT_NEG_LINK_WIDTH_SHIFT) << 1);
+
+ /* Notify the function */
+ pci_epc_linkup(ep->epc);
+ ep->link_up = true;
+
+ return;
+
+again:
+ schedule_delayed_work(&ep->link_training, msecs_to_jiffies(5));
+}
+
+static void rockchip_pcie_ep_perst_assert(struct rockchip_pcie_ep *ep)
+{
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+
+ dev_dbg(rockchip->dev, "PERST# asserted, link down\n");
+
+ if (ep->perst_asserted)
+ return;
+
+ ep->perst_asserted = true;
+
+ cancel_delayed_work_sync(&ep->link_training);
+
+ if (ep->link_up) {
+ pci_epc_linkdown(ep->epc);
+ ep->link_up = false;
+ }
+}
+
+static void rockchip_pcie_ep_perst_deassert(struct rockchip_pcie_ep *ep)
+{
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+
+ dev_dbg(rockchip->dev, "PERST# de-asserted, starting link training\n");
+
+ if (!ep->perst_asserted)
+ return;
+
+ ep->perst_asserted = false;
+
+ /* Enable link re-training */
+ rockchip_pcie_ep_retrain_link(rockchip);
+
+ /* Start link training */
+ schedule_delayed_work(&ep->link_training, 0);
+}
+
+static irqreturn_t rockchip_pcie_ep_perst_irq_thread(int irq, void *data)
+{
+ struct pci_epc *epc = data;
+ struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+ u32 perst = gpiod_get_value(rockchip->perst_gpio);
+
+ if (perst)
+ rockchip_pcie_ep_perst_assert(ep);
+ else
+ rockchip_pcie_ep_perst_deassert(ep);
+
+ irq_set_irq_type(ep->perst_irq,
+ (perst ? IRQF_TRIGGER_HIGH : IRQF_TRIGGER_LOW));
+
+ return IRQ_HANDLED;
+}
+
+static int rockchip_pcie_ep_setup_irq(struct pci_epc *epc)
+{
+ struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+ struct device *dev = rockchip->dev;
+ int ret;
+
+ if (!rockchip->perst_gpio)
+ return 0;
+
+ /* PCIe reset interrupt */
+ ep->perst_irq = gpiod_to_irq(rockchip->perst_gpio);
+ if (ep->perst_irq < 0) {
+ dev_err(dev,
+ "failed to get IRQ for PERST# GPIO: %d\n",
+ ep->perst_irq);
+
+ return ep->perst_irq;
+ }
+
+ /*
+ * The perst_gpio is active low, so when it is inactive on start, it
+ * is high and will trigger the perst_irq handler. So treat this initial
+ * IRQ as a dummy one by faking the host asserting PERST#.
+ */
+ ep->perst_asserted = true;
+ irq_set_status_flags(ep->perst_irq, IRQ_NOAUTOEN);
+ ret = devm_request_threaded_irq(dev, ep->perst_irq, NULL,
+ rockchip_pcie_ep_perst_irq_thread,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ "pcie-ep-perst", epc);
+ if (ret) {
+ dev_err(dev,
+ "failed to request IRQ for PERST# GPIO: %d\n",
+ ret);
+
+ return ret;
+ }
+
return 0;
}
static const struct pci_epc_features rockchip_pcie_epc_features = {
- .linkup_notifier = false,
+ .linkup_notifier = true,
.msi_capable = true,
.msix_capable = false,
- .align = 256,
+ .align = ROCKCHIP_PCIE_AT_SIZE_ALIGN,
};
static const struct pci_epc_features*
@@ -454,17 +707,19 @@ static const struct pci_epc_ops rockchip_pcie_epc_ops = {
.write_header = rockchip_pcie_ep_write_header,
.set_bar = rockchip_pcie_ep_set_bar,
.clear_bar = rockchip_pcie_ep_clear_bar,
+ .align_addr = rockchip_pcie_ep_align_addr,
.map_addr = rockchip_pcie_ep_map_addr,
.unmap_addr = rockchip_pcie_ep_unmap_addr,
.set_msi = rockchip_pcie_ep_set_msi,
.get_msi = rockchip_pcie_ep_get_msi,
.raise_irq = rockchip_pcie_ep_raise_irq,
.start = rockchip_pcie_ep_start,
+ .stop = rockchip_pcie_ep_stop,
.get_features = rockchip_pcie_ep_get_features,
};
-static int rockchip_pcie_parse_ep_dt(struct rockchip_pcie *rockchip,
- struct rockchip_pcie_ep *ep)
+static int rockchip_pcie_ep_get_resources(struct rockchip_pcie *rockchip,
+ struct rockchip_pcie_ep *ep)
{
struct device *dev = rockchip->dev;
int err;
@@ -498,82 +753,38 @@ static const struct of_device_id rockchip_pcie_ep_of_match[] = {
{},
};
-static int rockchip_pcie_ep_probe(struct platform_device *pdev)
+static int rockchip_pcie_ep_init_ob_mem(struct rockchip_pcie_ep *ep)
{
- struct device *dev = &pdev->dev;
- struct rockchip_pcie_ep *ep;
- struct rockchip_pcie *rockchip;
- struct pci_epc *epc;
- size_t max_regions;
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+ struct device *dev = rockchip->dev;
struct pci_epc_mem_window *windows = NULL;
int err, i;
- u32 cfg_msi, cfg_msix_cp;
-
- ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
- if (!ep)
- return -ENOMEM;
-
- rockchip = &ep->rockchip;
- rockchip->is_rc = false;
- rockchip->dev = dev;
-
- epc = devm_pci_epc_create(dev, &rockchip_pcie_epc_ops);
- if (IS_ERR(epc)) {
- dev_err(dev, "failed to create epc device\n");
- return PTR_ERR(epc);
- }
-
- ep->epc = epc;
- epc_set_drvdata(epc, ep);
-
- err = rockchip_pcie_parse_ep_dt(rockchip, ep);
- if (err)
- return err;
-
- err = rockchip_pcie_enable_clocks(rockchip);
- if (err)
- return err;
- err = rockchip_pcie_init_port(rockchip);
- if (err)
- goto err_disable_clocks;
-
- /* Establish the link automatically */
- rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE,
- PCIE_CLIENT_CONFIG);
-
- max_regions = ep->max_regions;
- ep->ob_addr = devm_kcalloc(dev, max_regions, sizeof(*ep->ob_addr),
+ ep->ob_addr = devm_kcalloc(dev, ep->max_regions, sizeof(*ep->ob_addr),
GFP_KERNEL);
- if (!ep->ob_addr) {
- err = -ENOMEM;
- goto err_uninit_port;
- }
-
- /* Only enable function 0 by default */
- rockchip_pcie_write(rockchip, BIT(0), PCIE_CORE_PHY_FUNC_CFG);
+ if (!ep->ob_addr)
+ return -ENOMEM;
windows = devm_kcalloc(dev, ep->max_regions,
sizeof(struct pci_epc_mem_window), GFP_KERNEL);
- if (!windows) {
- err = -ENOMEM;
- goto err_uninit_port;
- }
+ if (!windows)
+ return -ENOMEM;
+
for (i = 0; i < ep->max_regions; i++) {
windows[i].phys_base = rockchip->mem_res->start + (SZ_1M * i);
windows[i].size = SZ_1M;
windows[i].page_size = SZ_1M;
}
- err = pci_epc_multi_mem_init(epc, windows, ep->max_regions);
+ err = pci_epc_multi_mem_init(ep->epc, windows, ep->max_regions);
devm_kfree(dev, windows);
if (err < 0) {
dev_err(dev, "failed to initialize the memory space\n");
- goto err_uninit_port;
+ return err;
}
- ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &ep->irq_phys_addr,
+ ep->irq_cpu_addr = pci_epc_mem_alloc_addr(ep->epc, &ep->irq_phys_addr,
SZ_1M);
if (!ep->irq_cpu_addr) {
dev_err(dev, "failed to reserve memory space for MSI\n");
@@ -583,6 +794,23 @@ static int rockchip_pcie_ep_probe(struct platform_device *pdev)
ep->irq_pci_addr = ROCKCHIP_PCIE_EP_DUMMY_IRQ_ADDR;
+ return 0;
+
+err_epc_mem_exit:
+ pci_epc_mem_exit(ep->epc);
+
+ return err;
+}
+
+static void rockchip_pcie_ep_exit_ob_mem(struct rockchip_pcie_ep *ep)
+{
+ pci_epc_mem_exit(ep->epc);
+}
+
+static void rockchip_pcie_ep_hide_broken_msix_cap(struct rockchip_pcie *rockchip)
+{
+ u32 cfg_msi, cfg_msix_cp;
+
/*
* MSI-X is not supported but the controller still advertises the MSI-X
* capability by default, which can lead to the Root Complex side
@@ -605,17 +833,68 @@ static int rockchip_pcie_ep_probe(struct platform_device *pdev)
rockchip_pcie_write(rockchip, cfg_msi,
PCIE_EP_CONFIG_BASE + ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
+}
- rockchip_pcie_write(rockchip, PCIE_CLIENT_CONF_ENABLE,
- PCIE_CLIENT_CONFIG);
+static int rockchip_pcie_ep_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rockchip_pcie_ep *ep;
+ struct rockchip_pcie *rockchip;
+ struct pci_epc *epc;
+ int err;
+
+ ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
+ if (!ep)
+ return -ENOMEM;
+
+ rockchip = &ep->rockchip;
+ rockchip->is_rc = false;
+ rockchip->dev = dev;
+ INIT_DELAYED_WORK(&ep->link_training, rockchip_pcie_ep_link_training);
+
+ epc = devm_pci_epc_create(dev, &rockchip_pcie_epc_ops);
+ if (IS_ERR(epc)) {
+ dev_err(dev, "failed to create EPC device\n");
+ return PTR_ERR(epc);
+ }
+
+ ep->epc = epc;
+ epc_set_drvdata(epc, ep);
+
+ err = rockchip_pcie_ep_get_resources(rockchip, ep);
+ if (err)
+ return err;
+
+ err = rockchip_pcie_ep_init_ob_mem(ep);
+ if (err)
+ return err;
+
+ err = rockchip_pcie_enable_clocks(rockchip);
+ if (err)
+ goto err_exit_ob_mem;
+
+ err = rockchip_pcie_init_port(rockchip);
+ if (err)
+ goto err_disable_clocks;
+
+ rockchip_pcie_ep_hide_broken_msix_cap(rockchip);
+
+ /* Only enable function 0 by default */
+ rockchip_pcie_write(rockchip, BIT(0), PCIE_CORE_PHY_FUNC_CFG);
+
+ pci_epc_init_notify(epc);
+
+ err = rockchip_pcie_ep_setup_irq(epc);
+ if (err < 0)
+ goto err_uninit_port;
return 0;
-err_epc_mem_exit:
- pci_epc_mem_exit(epc);
err_uninit_port:
rockchip_pcie_deinit_phys(rockchip);
err_disable_clocks:
rockchip_pcie_disable_clocks(rockchip);
+err_exit_ob_mem:
+ rockchip_pcie_ep_exit_ob_mem(ep);
return err;
}
diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c
index 300b9dc85ecc..b9e7a8710cf0 100644
--- a/drivers/pci/controller/pcie-rockchip-host.c
+++ b/drivers/pci/controller/pcie-rockchip-host.c
@@ -294,7 +294,7 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip)
int err, i = MAX_LANE_NUM;
u32 status;
- gpiod_set_value_cansleep(rockchip->ep_gpio, 0);
+ gpiod_set_value_cansleep(rockchip->perst_gpio, 0);
err = rockchip_pcie_init_port(rockchip);
if (err)
@@ -322,7 +322,10 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip)
rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE,
PCIE_CLIENT_CONFIG);
- gpiod_set_value_cansleep(rockchip->ep_gpio, 1);
+ msleep(PCIE_T_PVPERL_MS);
+ gpiod_set_value_cansleep(rockchip->perst_gpio, 1);
+
+ msleep(PCIE_T_RRS_READY_MS);
/* 500ms timeout value should be enough for Gen1/2 training */
err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_BASIC_STATUS1,
@@ -364,7 +367,7 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip)
}
}
- rockchip_pcie_write(rockchip, ROCKCHIP_VENDOR_ID,
+ rockchip_pcie_write(rockchip, PCI_VENDOR_ID_ROCKCHIP,
PCIE_CORE_CONFIG_VENDOR);
rockchip_pcie_write(rockchip,
PCI_CLASS_BRIDGE_PCI_NORMAL << 8,
@@ -690,8 +693,8 @@ static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip)
return -EINVAL;
}
- rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX,
- &intx_domain_ops, rockchip);
+ rockchip->irq_domain = irq_domain_create_linear(of_fwnode_handle(intc), PCI_NUM_INTX,
+ &intx_domain_ops, rockchip);
of_node_put(intc);
if (!rockchip->irq_domain) {
dev_err(dev, "failed to get a INTx IRQ domain\n");
@@ -1047,7 +1050,7 @@ static struct platform_driver rockchip_pcie_driver = {
.pm = &rockchip_pcie_pm_ops,
},
.probe = rockchip_pcie_probe,
- .remove_new = rockchip_pcie_remove,
+ .remove = rockchip_pcie_remove,
};
module_platform_driver(rockchip_pcie_driver);
diff --git a/drivers/pci/controller/pcie-rockchip.c b/drivers/pci/controller/pcie-rockchip.c
index 0ef2e622d36e..0f88da378805 100644
--- a/drivers/pci/controller/pcie-rockchip.c
+++ b/drivers/pci/controller/pcie-rockchip.c
@@ -30,7 +30,7 @@ int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
struct platform_device *pdev = to_platform_device(dev);
struct device_node *node = dev->of_node;
struct resource *regs;
- int err;
+ int err, i;
if (rockchip->is_rc) {
regs = platform_get_resource_byname(pdev,
@@ -69,87 +69,38 @@ int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
if (rockchip->link_gen < 0 || rockchip->link_gen > 2)
rockchip->link_gen = 2;
- rockchip->core_rst = devm_reset_control_get_exclusive(dev, "core");
- if (IS_ERR(rockchip->core_rst)) {
- if (PTR_ERR(rockchip->core_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing core reset property in node\n");
- return PTR_ERR(rockchip->core_rst);
- }
-
- rockchip->mgmt_rst = devm_reset_control_get_exclusive(dev, "mgmt");
- if (IS_ERR(rockchip->mgmt_rst)) {
- if (PTR_ERR(rockchip->mgmt_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing mgmt reset property in node\n");
- return PTR_ERR(rockchip->mgmt_rst);
- }
-
- rockchip->mgmt_sticky_rst = devm_reset_control_get_exclusive(dev,
- "mgmt-sticky");
- if (IS_ERR(rockchip->mgmt_sticky_rst)) {
- if (PTR_ERR(rockchip->mgmt_sticky_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing mgmt-sticky reset property in node\n");
- return PTR_ERR(rockchip->mgmt_sticky_rst);
- }
-
- rockchip->pipe_rst = devm_reset_control_get_exclusive(dev, "pipe");
- if (IS_ERR(rockchip->pipe_rst)) {
- if (PTR_ERR(rockchip->pipe_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing pipe reset property in node\n");
- return PTR_ERR(rockchip->pipe_rst);
- }
-
- rockchip->pm_rst = devm_reset_control_get_exclusive(dev, "pm");
- if (IS_ERR(rockchip->pm_rst)) {
- if (PTR_ERR(rockchip->pm_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing pm reset property in node\n");
- return PTR_ERR(rockchip->pm_rst);
- }
+ for (i = 0; i < ROCKCHIP_NUM_PM_RSTS; i++)
+ rockchip->pm_rsts[i].id = rockchip_pci_pm_rsts[i];
- rockchip->pclk_rst = devm_reset_control_get_exclusive(dev, "pclk");
- if (IS_ERR(rockchip->pclk_rst)) {
- if (PTR_ERR(rockchip->pclk_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing pclk reset property in node\n");
- return PTR_ERR(rockchip->pclk_rst);
- }
-
- rockchip->aclk_rst = devm_reset_control_get_exclusive(dev, "aclk");
- if (IS_ERR(rockchip->aclk_rst)) {
- if (PTR_ERR(rockchip->aclk_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing aclk reset property in node\n");
- return PTR_ERR(rockchip->aclk_rst);
- }
-
- if (rockchip->is_rc) {
- rockchip->ep_gpio = devm_gpiod_get_optional(dev, "ep",
- GPIOD_OUT_HIGH);
- if (IS_ERR(rockchip->ep_gpio))
- return dev_err_probe(dev, PTR_ERR(rockchip->ep_gpio),
- "failed to get ep GPIO\n");
- }
+ err = devm_reset_control_bulk_get_exclusive(dev,
+ ROCKCHIP_NUM_PM_RSTS,
+ rockchip->pm_rsts);
+ if (err)
+ return dev_err_probe(dev, err, "Cannot get the PM reset\n");
- rockchip->aclk_pcie = devm_clk_get(dev, "aclk");
- if (IS_ERR(rockchip->aclk_pcie)) {
- dev_err(dev, "aclk clock not found\n");
- return PTR_ERR(rockchip->aclk_pcie);
- }
+ for (i = 0; i < ROCKCHIP_NUM_CORE_RSTS; i++)
+ rockchip->core_rsts[i].id = rockchip_pci_core_rsts[i];
- rockchip->aclk_perf_pcie = devm_clk_get(dev, "aclk-perf");
- if (IS_ERR(rockchip->aclk_perf_pcie)) {
- dev_err(dev, "aclk_perf clock not found\n");
- return PTR_ERR(rockchip->aclk_perf_pcie);
- }
+ err = devm_reset_control_bulk_get_exclusive(dev,
+ ROCKCHIP_NUM_CORE_RSTS,
+ rockchip->core_rsts);
+ if (err)
+ return dev_err_probe(dev, err, "Cannot get the Core resets\n");
- rockchip->hclk_pcie = devm_clk_get(dev, "hclk");
- if (IS_ERR(rockchip->hclk_pcie)) {
- dev_err(dev, "hclk clock not found\n");
- return PTR_ERR(rockchip->hclk_pcie);
- }
+ if (rockchip->is_rc)
+ rockchip->perst_gpio = devm_gpiod_get_optional(dev, "ep",
+ GPIOD_OUT_LOW);
+ else
+ rockchip->perst_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_IN);
+ if (IS_ERR(rockchip->perst_gpio))
+ return dev_err_probe(dev, PTR_ERR(rockchip->perst_gpio),
+ "failed to get PERST# GPIO\n");
- rockchip->clk_pcie_pm = devm_clk_get(dev, "pm");
- if (IS_ERR(rockchip->clk_pcie_pm)) {
- dev_err(dev, "pm clock not found\n");
- return PTR_ERR(rockchip->clk_pcie_pm);
- }
+ rockchip->num_clks = devm_clk_bulk_get_all(dev, &rockchip->clks);
+ if (rockchip->num_clks < 0)
+ return dev_err_probe(dev, rockchip->num_clks,
+ "failed to get clocks\n");
return 0;
}
@@ -167,23 +118,10 @@ int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
int err, i;
u32 regs;
- err = reset_control_assert(rockchip->aclk_rst);
- if (err) {
- dev_err(dev, "assert aclk_rst err %d\n", err);
- return err;
- }
-
- err = reset_control_assert(rockchip->pclk_rst);
- if (err) {
- dev_err(dev, "assert pclk_rst err %d\n", err);
- return err;
- }
-
- err = reset_control_assert(rockchip->pm_rst);
- if (err) {
- dev_err(dev, "assert pm_rst err %d\n", err);
- return err;
- }
+ err = reset_control_bulk_assert(ROCKCHIP_NUM_PM_RSTS,
+ rockchip->pm_rsts);
+ if (err)
+ return dev_err_probe(dev, err, "Couldn't assert PM resets\n");
for (i = 0; i < MAX_LANE_NUM; i++) {
err = phy_init(rockchip->phys[i]);
@@ -193,47 +131,19 @@ int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
}
}
- err = reset_control_assert(rockchip->core_rst);
- if (err) {
- dev_err(dev, "assert core_rst err %d\n", err);
- goto err_exit_phy;
- }
-
- err = reset_control_assert(rockchip->mgmt_rst);
- if (err) {
- dev_err(dev, "assert mgmt_rst err %d\n", err);
- goto err_exit_phy;
- }
-
- err = reset_control_assert(rockchip->mgmt_sticky_rst);
+ err = reset_control_bulk_assert(ROCKCHIP_NUM_CORE_RSTS,
+ rockchip->core_rsts);
if (err) {
- dev_err(dev, "assert mgmt_sticky_rst err %d\n", err);
- goto err_exit_phy;
- }
-
- err = reset_control_assert(rockchip->pipe_rst);
- if (err) {
- dev_err(dev, "assert pipe_rst err %d\n", err);
+ dev_err_probe(dev, err, "Couldn't assert Core resets\n");
goto err_exit_phy;
}
udelay(10);
- err = reset_control_deassert(rockchip->pm_rst);
- if (err) {
- dev_err(dev, "deassert pm_rst err %d\n", err);
- goto err_exit_phy;
- }
-
- err = reset_control_deassert(rockchip->aclk_rst);
+ err = reset_control_bulk_deassert(ROCKCHIP_NUM_PM_RSTS,
+ rockchip->pm_rsts);
if (err) {
- dev_err(dev, "deassert aclk_rst err %d\n", err);
- goto err_exit_phy;
- }
-
- err = reset_control_deassert(rockchip->pclk_rst);
- if (err) {
- dev_err(dev, "deassert pclk_rst err %d\n", err);
+ dev_err(dev, "Couldn't deassert PM resets %d\n", err);
goto err_exit_phy;
}
@@ -244,11 +154,12 @@ int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
rockchip_pcie_write(rockchip, PCIE_CLIENT_GEN_SEL_1,
PCIE_CLIENT_CONFIG);
- regs = PCIE_CLIENT_LINK_TRAIN_ENABLE | PCIE_CLIENT_ARI_ENABLE |
+ regs = PCIE_CLIENT_ARI_ENABLE |
PCIE_CLIENT_CONF_LANE_NUM(rockchip->lanes);
if (rockchip->is_rc)
- regs |= PCIE_CLIENT_CONF_ENABLE | PCIE_CLIENT_MODE_RC;
+ regs |= PCIE_CLIENT_LINK_TRAIN_ENABLE |
+ PCIE_CLIENT_CONF_ENABLE | PCIE_CLIENT_MODE_RC;
else
regs |= PCIE_CLIENT_CONF_DISABLE | PCIE_CLIENT_MODE_EP;
@@ -272,31 +183,10 @@ int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
goto err_power_off_phy;
}
- /*
- * Please don't reorder the deassert sequence of the following
- * four reset pins.
- */
- err = reset_control_deassert(rockchip->mgmt_sticky_rst);
- if (err) {
- dev_err(dev, "deassert mgmt_sticky_rst err %d\n", err);
- goto err_power_off_phy;
- }
-
- err = reset_control_deassert(rockchip->core_rst);
- if (err) {
- dev_err(dev, "deassert core_rst err %d\n", err);
- goto err_power_off_phy;
- }
-
- err = reset_control_deassert(rockchip->mgmt_rst);
- if (err) {
- dev_err(dev, "deassert mgmt_rst err %d\n", err);
- goto err_power_off_phy;
- }
-
- err = reset_control_deassert(rockchip->pipe_rst);
+ err = reset_control_bulk_deassert(ROCKCHIP_NUM_CORE_RSTS,
+ rockchip->core_rsts);
if (err) {
- dev_err(dev, "deassert pipe_rst err %d\n", err);
+ dev_err(dev, "Couldn't deassert Core reset %d\n", err);
goto err_power_off_phy;
}
@@ -372,50 +262,18 @@ int rockchip_pcie_enable_clocks(struct rockchip_pcie *rockchip)
struct device *dev = rockchip->dev;
int err;
- err = clk_prepare_enable(rockchip->aclk_pcie);
- if (err) {
- dev_err(dev, "unable to enable aclk_pcie clock\n");
- return err;
- }
-
- err = clk_prepare_enable(rockchip->aclk_perf_pcie);
- if (err) {
- dev_err(dev, "unable to enable aclk_perf_pcie clock\n");
- goto err_aclk_perf_pcie;
- }
-
- err = clk_prepare_enable(rockchip->hclk_pcie);
- if (err) {
- dev_err(dev, "unable to enable hclk_pcie clock\n");
- goto err_hclk_pcie;
- }
-
- err = clk_prepare_enable(rockchip->clk_pcie_pm);
- if (err) {
- dev_err(dev, "unable to enable clk_pcie_pm clock\n");
- goto err_clk_pcie_pm;
- }
+ err = clk_bulk_prepare_enable(rockchip->num_clks, rockchip->clks);
+ if (err)
+ return dev_err_probe(dev, err, "failed to enable clocks\n");
return 0;
-
-err_clk_pcie_pm:
- clk_disable_unprepare(rockchip->hclk_pcie);
-err_hclk_pcie:
- clk_disable_unprepare(rockchip->aclk_perf_pcie);
-err_aclk_perf_pcie:
- clk_disable_unprepare(rockchip->aclk_pcie);
- return err;
}
EXPORT_SYMBOL_GPL(rockchip_pcie_enable_clocks);
-void rockchip_pcie_disable_clocks(void *data)
+void rockchip_pcie_disable_clocks(struct rockchip_pcie *rockchip)
{
- struct rockchip_pcie *rockchip = data;
- clk_disable_unprepare(rockchip->clk_pcie_pm);
- clk_disable_unprepare(rockchip->hclk_pcie);
- clk_disable_unprepare(rockchip->aclk_perf_pcie);
- clk_disable_unprepare(rockchip->aclk_pcie);
+ clk_bulk_disable_unprepare(rockchip->num_clks, rockchip->clks);
}
EXPORT_SYMBOL_GPL(rockchip_pcie_disable_clocks);
diff --git a/drivers/pci/controller/pcie-rockchip.h b/drivers/pci/controller/pcie-rockchip.h
index 6111de35f84c..14954f43e5e9 100644
--- a/drivers/pci/controller/pcie-rockchip.h
+++ b/drivers/pci/controller/pcie-rockchip.h
@@ -11,9 +11,11 @@
#ifndef _PCIE_ROCKCHIP_H
#define _PCIE_ROCKCHIP_H
+#include <linux/clk.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/pci-ecam.h>
+#include <linux/reset.h>
/*
* The upper 16 bits of PCIE_CLIENT_CONFIG are a write mask for the lower 16
@@ -26,12 +28,14 @@
#define MAX_LANE_NUM 4
#define MAX_REGION_LIMIT 32
#define MIN_EP_APERTURE 28
+#define LINK_TRAIN_TIMEOUT (500 * USEC_PER_MSEC)
#define PCIE_CLIENT_BASE 0x0
#define PCIE_CLIENT_CONFIG (PCIE_CLIENT_BASE + 0x00)
#define PCIE_CLIENT_CONF_ENABLE HIWORD_UPDATE_BIT(0x0001)
#define PCIE_CLIENT_CONF_DISABLE HIWORD_UPDATE(0x0001, 0)
#define PCIE_CLIENT_LINK_TRAIN_ENABLE HIWORD_UPDATE_BIT(0x0002)
+#define PCIE_CLIENT_LINK_TRAIN_DISABLE HIWORD_UPDATE(0x0002, 0)
#define PCIE_CLIENT_ARI_ENABLE HIWORD_UPDATE_BIT(0x0008)
#define PCIE_CLIENT_CONF_LANE_NUM(x) HIWORD_UPDATE(0x0030, ENCODE_LANES(x))
#define PCIE_CLIENT_MODE_RC HIWORD_UPDATE_BIT(0x0040)
@@ -49,6 +53,10 @@
#define PCIE_CLIENT_DEBUG_LTSSM_MASK GENMASK(5, 0)
#define PCIE_CLIENT_DEBUG_LTSSM_L1 0x18
#define PCIE_CLIENT_DEBUG_LTSSM_L2 0x19
+#define PCIE_CLIENT_BASIC_STATUS0 (PCIE_CLIENT_BASE + 0x44)
+#define PCIE_CLIENT_NEG_LINK_WIDTH_MASK GENMASK(7, 6)
+#define PCIE_CLIENT_NEG_LINK_WIDTH_SHIFT 6
+#define PCIE_CLIENT_NEG_LINK_SPEED BIT(5)
#define PCIE_CLIENT_BASIC_STATUS1 (PCIE_CLIENT_BASE + 0x48)
#define PCIE_CLIENT_LINK_STATUS_UP 0x00300000
#define PCIE_CLIENT_LINK_STATUS_MASK 0x00300000
@@ -86,6 +94,8 @@
#define PCIE_CORE_CTRL_MGMT_BASE 0x900000
#define PCIE_CORE_CTRL (PCIE_CORE_CTRL_MGMT_BASE + 0x000)
+#define PCIE_CORE_PL_CONF_LS_MASK 0x00000001
+#define PCIE_CORE_PL_CONF_LS_READY 0x00000001
#define PCIE_CORE_PL_CONF_SPEED_5G 0x00000008
#define PCIE_CORE_PL_CONF_SPEED_MASK 0x00000018
#define PCIE_CORE_PL_CONF_LANE_MASK 0x00000006
@@ -143,6 +153,7 @@
#define PCIE_RC_CONFIG_BASE 0xa00000
#define PCIE_EP_CONFIG_BASE 0xa00000
#define PCIE_EP_CONFIG_DID_VID (PCIE_EP_CONFIG_BASE + 0x00)
+#define PCIE_EP_CONFIG_LCS (PCIE_EP_CONFIG_BASE + 0xd0)
#define PCIE_RC_CONFIG_RID_CCR (PCIE_RC_CONFIG_BASE + 0x08)
#define PCIE_RC_CONFIG_DCR (PCIE_RC_CONFIG_BASE + 0xc4)
#define PCIE_RC_CONFIG_DCR_CSPL_SHIFT 18
@@ -154,6 +165,7 @@
#define PCIE_RC_CONFIG_LINK_CAP (PCIE_RC_CONFIG_BASE + 0xcc)
#define PCIE_RC_CONFIG_LINK_CAP_L0S BIT(10)
#define PCIE_RC_CONFIG_LCS (PCIE_RC_CONFIG_BASE + 0xd0)
+#define PCIE_EP_CONFIG_LCS (PCIE_EP_CONFIG_BASE + 0xd0)
#define PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2 (PCIE_RC_CONFIG_BASE + 0x90c)
#define PCIE_RC_CONFIG_THP_CAP (PCIE_RC_CONFIG_BASE + 0x274)
#define PCIE_RC_CONFIG_THP_CAP_NEXT_MASK GENMASK(31, 20)
@@ -188,9 +200,10 @@
#define AXI_WRAPPER_NOR_MSG 0xc
#define PCIE_RC_SEND_PME_OFF 0x11960
-#define ROCKCHIP_VENDOR_ID 0x1d87
#define PCIE_LINK_IS_L2(x) \
(((x) & PCIE_CLIENT_DEBUG_LTSSM_MASK) == PCIE_CLIENT_DEBUG_LTSSM_L2)
+#define PCIE_LINK_TRAINING_DONE(x) \
+ (((x) & PCIE_CORE_PL_CONF_LS_MASK) == PCIE_CORE_PL_CONF_LS_READY)
#define PCIE_LINK_UP(x) \
(((x) & PCIE_CLIENT_LINK_STATUS_MASK) == PCIE_CLIENT_LINK_STATUS_UP)
#define PCIE_LINK_IS_GEN2(x) \
@@ -241,10 +254,20 @@
#define ROCKCHIP_PCIE_EP_MSIX_CAP_CP_MASK GENMASK(15, 8)
#define ROCKCHIP_PCIE_EP_DUMMY_IRQ_ADDR 0x1
#define ROCKCHIP_PCIE_EP_PCI_LEGACY_IRQ_ADDR 0x3
+
+#define ROCKCHIP_PCIE_AT_MIN_NUM_BITS 8
+#define ROCKCHIP_PCIE_AT_MAX_NUM_BITS 20
+#define ROCKCHIP_PCIE_AT_SIZE_ALIGN (1UL << ROCKCHIP_PCIE_AT_MIN_NUM_BITS)
+
#define ROCKCHIP_PCIE_EP_FUNC_BASE(fn) \
(PCIE_EP_PF_CONFIG_REGS_BASE + (((fn) << 12) & GENMASK(19, 12)))
#define ROCKCHIP_PCIE_EP_VIRT_FUNC_BASE(fn) \
(PCIE_EP_PF_CONFIG_REGS_BASE + 0x10000 + (((fn) << 12) & GENMASK(19, 12)))
+
+#define ROCKCHIP_PCIE_AT_MIN_NUM_BITS 8
+#define ROCKCHIP_PCIE_AT_MAX_NUM_BITS 20
+#define ROCKCHIP_PCIE_AT_SIZE_ALIGN (1UL << ROCKCHIP_PCIE_AT_MIN_NUM_BITS)
+
#define ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar) \
(PCIE_CORE_AXI_CONF_BASE + 0x0828 + (fn) * 0x0040 + (bar) * 0x0008)
#define ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar) \
@@ -287,27 +310,36 @@
(((c) << ((b) * 8 + 5)) & \
ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b))
+#define ROCKCHIP_NUM_PM_RSTS ARRAY_SIZE(rockchip_pci_pm_rsts)
+#define ROCKCHIP_NUM_CORE_RSTS ARRAY_SIZE(rockchip_pci_core_rsts)
+
+static const char * const rockchip_pci_pm_rsts[] = {
+ "pm",
+ "pclk",
+ "aclk",
+};
+
+static const char * const rockchip_pci_core_rsts[] = {
+ "mgmt-sticky",
+ "core",
+ "mgmt",
+ "pipe",
+};
+
struct rockchip_pcie {
void __iomem *reg_base; /* DT axi-base */
void __iomem *apb_base; /* DT apb-base */
bool legacy_phy;
struct phy *phys[MAX_LANE_NUM];
- struct reset_control *core_rst;
- struct reset_control *mgmt_rst;
- struct reset_control *mgmt_sticky_rst;
- struct reset_control *pipe_rst;
- struct reset_control *pm_rst;
- struct reset_control *aclk_rst;
- struct reset_control *pclk_rst;
- struct clk *aclk_pcie;
- struct clk *aclk_perf_pcie;
- struct clk *hclk_pcie;
- struct clk *clk_pcie_pm;
+ struct reset_control_bulk_data pm_rsts[ROCKCHIP_NUM_PM_RSTS];
+ struct reset_control_bulk_data core_rsts[ROCKCHIP_NUM_CORE_RSTS];
+ struct clk_bulk_data *clks;
+ int num_clks;
struct regulator *vpcie12v; /* 12V power supply */
struct regulator *vpcie3v3; /* 3.3V power supply */
struct regulator *vpcie1v8; /* 1.8V power supply */
struct regulator *vpcie0v9; /* 0.9V power supply */
- struct gpio_desc *ep_gpio;
+ struct gpio_desc *perst_gpio;
u32 lanes;
u8 lanes_map;
int link_gen;
@@ -336,7 +368,7 @@ int rockchip_pcie_init_port(struct rockchip_pcie *rockchip);
int rockchip_pcie_get_phys(struct rockchip_pcie *rockchip);
void rockchip_pcie_deinit_phys(struct rockchip_pcie *rockchip);
int rockchip_pcie_enable_clocks(struct rockchip_pcie *rockchip);
-void rockchip_pcie_disable_clocks(void *data);
+void rockchip_pcie_disable_clocks(struct rockchip_pcie *rockchip);
void rockchip_pcie_cfg_configuration_accesses(
struct rockchip_pcie *rockchip, u32 type);
diff --git a/drivers/pci/controller/pcie-xilinx-cpm.c b/drivers/pci/controller/pcie-xilinx-cpm.c
index a0f5e1d67b04..d38f27e20761 100644
--- a/drivers/pci/controller/pcie-xilinx-cpm.c
+++ b/drivers/pci/controller/pcie-xilinx-cpm.c
@@ -30,11 +30,14 @@
#define XILINX_CPM_PCIE_REG_IDRN_MASK 0x00000E3C
#define XILINX_CPM_PCIE_MISC_IR_STATUS 0x00000340
#define XILINX_CPM_PCIE_MISC_IR_ENABLE 0x00000348
-#define XILINX_CPM_PCIE_MISC_IR_LOCAL BIT(1)
+#define XILINX_CPM_PCIE0_MISC_IR_LOCAL BIT(1)
+#define XILINX_CPM_PCIE1_MISC_IR_LOCAL BIT(2)
-#define XILINX_CPM_PCIE_IR_STATUS 0x000002A0
-#define XILINX_CPM_PCIE_IR_ENABLE 0x000002A8
-#define XILINX_CPM_PCIE_IR_LOCAL BIT(0)
+#define XILINX_CPM_PCIE0_IR_STATUS 0x000002A0
+#define XILINX_CPM_PCIE1_IR_STATUS 0x000002B4
+#define XILINX_CPM_PCIE0_IR_ENABLE 0x000002A8
+#define XILINX_CPM_PCIE1_IR_ENABLE 0x000002BC
+#define XILINX_CPM_PCIE_IR_LOCAL BIT(0)
#define IMR(x) BIT(XILINX_PCIE_INTR_ ##x)
@@ -80,14 +83,22 @@
enum xilinx_cpm_version {
CPM,
CPM5,
+ CPM5_HOST1,
+ CPM5NC_HOST,
};
/**
* struct xilinx_cpm_variant - CPM variant information
* @version: CPM version
+ * @ir_status: Offset for the error interrupt status register
+ * @ir_enable: Offset for the CPM5 local error interrupt enable register
+ * @ir_misc_value: A bitmask for the miscellaneous interrupt status
*/
struct xilinx_cpm_variant {
enum xilinx_cpm_version version;
+ u32 ir_status;
+ u32 ir_enable;
+ u32 ir_misc_value;
};
/**
@@ -269,6 +280,7 @@ static void xilinx_cpm_pcie_event_flow(struct irq_desc *desc)
{
struct xilinx_cpm_pcie *port = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
+ const struct xilinx_cpm_variant *variant = port->variant;
unsigned long val;
int i;
@@ -279,11 +291,11 @@ static void xilinx_cpm_pcie_event_flow(struct irq_desc *desc)
generic_handle_domain_irq(port->cpm_domain, i);
pcie_write(port, val, XILINX_CPM_PCIE_REG_IDR);
- if (port->variant->version == CPM5) {
- val = readl_relaxed(port->cpm_base + XILINX_CPM_PCIE_IR_STATUS);
+ if (variant->ir_status) {
+ val = readl_relaxed(port->cpm_base + variant->ir_status);
if (val)
writel_relaxed(val, port->cpm_base +
- XILINX_CPM_PCIE_IR_STATUS);
+ variant->ir_status);
}
/*
@@ -383,17 +395,15 @@ static int xilinx_cpm_pcie_init_irq_domain(struct xilinx_cpm_pcie *port)
return -EINVAL;
}
- port->cpm_domain = irq_domain_add_linear(pcie_intc_node, 32,
- &event_domain_ops,
- port);
+ port->cpm_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), 32,
+ &event_domain_ops, port);
if (!port->cpm_domain)
goto out;
irq_domain_update_bus_token(port->cpm_domain, DOMAIN_BUS_NEXUS);
- port->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
- &intx_domain_ops,
- port);
+ port->intx_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX,
+ &intx_domain_ops, port);
if (!port->intx_domain)
goto out;
@@ -465,6 +475,11 @@ static int xilinx_cpm_setup_irq(struct xilinx_cpm_pcie *port)
*/
static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie *port)
{
+ const struct xilinx_cpm_variant *variant = port->variant;
+
+ if (variant->version == CPM5NC_HOST)
+ return;
+
if (cpm_pcie_link_up(port))
dev_info(port->dev, "PCIe Link is UP\n");
else
@@ -483,15 +498,15 @@ static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie *port)
* XILINX_CPM_PCIE_MISC_IR_ENABLE register is mapped to
* CPM SLCR block.
*/
- writel(XILINX_CPM_PCIE_MISC_IR_LOCAL,
+ writel(variant->ir_misc_value,
port->cpm_base + XILINX_CPM_PCIE_MISC_IR_ENABLE);
- if (port->variant->version == CPM5) {
+ if (variant->ir_enable) {
writel(XILINX_CPM_PCIE_IR_LOCAL,
- port->cpm_base + XILINX_CPM_PCIE_IR_ENABLE);
+ port->cpm_base + variant->ir_enable);
}
- /* Enable the Bridge enable bit */
+ /* Set Bridge enable bit */
pcie_write(port, pcie_read(port, XILINX_CPM_PCIE_REG_RPSC) |
XILINX_CPM_PCIE_REG_RPSC_BEN,
XILINX_CPM_PCIE_REG_RPSC);
@@ -525,7 +540,8 @@ static int xilinx_cpm_pcie_parse_dt(struct xilinx_cpm_pcie *port,
if (IS_ERR(port->cfg))
return PTR_ERR(port->cfg);
- if (port->variant->version == CPM5) {
+ if (port->variant->version == CPM5 ||
+ port->variant->version == CPM5_HOST1) {
port->reg_base = devm_platform_ioremap_resource_byname(pdev,
"cpm_csr");
if (IS_ERR(port->reg_base))
@@ -565,28 +581,34 @@ static int xilinx_cpm_pcie_probe(struct platform_device *pdev)
port->dev = dev;
- err = xilinx_cpm_pcie_init_irq_domain(port);
- if (err)
- return err;
+ port->variant = of_device_get_match_data(dev);
- bus = resource_list_first_type(&bridge->windows, IORESOURCE_BUS);
- if (!bus)
- return -ENODEV;
+ if (port->variant->version != CPM5NC_HOST) {
+ err = xilinx_cpm_pcie_init_irq_domain(port);
+ if (err)
+ return err;
+ }
- port->variant = of_device_get_match_data(dev);
+ bus = resource_list_first_type(&bridge->windows, IORESOURCE_BUS);
+ if (!bus) {
+ err = -ENODEV;
+ goto err_free_irq_domains;
+ }
err = xilinx_cpm_pcie_parse_dt(port, bus->res);
if (err) {
dev_err(dev, "Parsing DT failed\n");
- goto err_parse_dt;
+ goto err_free_irq_domains;
}
xilinx_cpm_pcie_init_port(port);
- err = xilinx_cpm_setup_irq(port);
- if (err) {
- dev_err(dev, "Failed to set up interrupts\n");
- goto err_setup_irq;
+ if (port->variant->version != CPM5NC_HOST) {
+ err = xilinx_cpm_setup_irq(port);
+ if (err) {
+ dev_err(dev, "Failed to set up interrupts\n");
+ goto err_setup_irq;
+ }
}
bridge->sysdata = port->cfg;
@@ -599,20 +621,37 @@ static int xilinx_cpm_pcie_probe(struct platform_device *pdev)
return 0;
err_host_bridge:
- xilinx_cpm_free_interrupts(port);
+ if (port->variant->version != CPM5NC_HOST)
+ xilinx_cpm_free_interrupts(port);
err_setup_irq:
pci_ecam_free(port->cfg);
-err_parse_dt:
- xilinx_cpm_free_irq_domains(port);
+err_free_irq_domains:
+ if (port->variant->version != CPM5NC_HOST)
+ xilinx_cpm_free_irq_domains(port);
return err;
}
static const struct xilinx_cpm_variant cpm_host = {
.version = CPM,
+ .ir_misc_value = XILINX_CPM_PCIE0_MISC_IR_LOCAL,
};
static const struct xilinx_cpm_variant cpm5_host = {
.version = CPM5,
+ .ir_misc_value = XILINX_CPM_PCIE0_MISC_IR_LOCAL,
+ .ir_status = XILINX_CPM_PCIE0_IR_STATUS,
+ .ir_enable = XILINX_CPM_PCIE0_IR_ENABLE,
+};
+
+static const struct xilinx_cpm_variant cpm5_host1 = {
+ .version = CPM5_HOST1,
+ .ir_misc_value = XILINX_CPM_PCIE1_MISC_IR_LOCAL,
+ .ir_status = XILINX_CPM_PCIE1_IR_STATUS,
+ .ir_enable = XILINX_CPM_PCIE1_IR_ENABLE,
+};
+
+static const struct xilinx_cpm_variant cpm5n_host = {
+ .version = CPM5NC_HOST,
};
static const struct of_device_id xilinx_cpm_pcie_of_match[] = {
@@ -624,6 +663,14 @@ static const struct of_device_id xilinx_cpm_pcie_of_match[] = {
.compatible = "xlnx,versal-cpm5-host",
.data = &cpm5_host,
},
+ {
+ .compatible = "xlnx,versal-cpm5-host1",
+ .data = &cpm5_host1,
+ },
+ {
+ .compatible = "xlnx,versal-cpm5nc-host",
+ .data = &cpm5n_host,
+ },
{}
};
diff --git a/drivers/pci/controller/pcie-xilinx-dma-pl.c b/drivers/pci/controller/pcie-xilinx-dma-pl.c
index 5be5dfd8398f..dc9690a535e1 100644
--- a/drivers/pci/controller/pcie-xilinx-dma-pl.c
+++ b/drivers/pci/controller/pcie-xilinx-dma-pl.c
@@ -71,10 +71,24 @@
/* Phy Status/Control Register definitions */
#define XILINX_PCIE_DMA_REG_PSCR_LNKUP BIT(11)
+#define QDMA_BRIDGE_BASE_OFF 0xcd8
/* Number of MSI IRQs */
#define XILINX_NUM_MSI_IRQS 64
+enum xilinx_pl_dma_version {
+ XDMA,
+ QDMA,
+};
+
+/**
+ * struct xilinx_pl_dma_variant - PL DMA PCIe variant information
+ * @version: DMA version
+ */
+struct xilinx_pl_dma_variant {
+ enum xilinx_pl_dma_version version;
+};
+
struct xilinx_msi {
struct irq_domain *msi_domain;
unsigned long *bitmap;
@@ -88,6 +102,7 @@ struct xilinx_msi {
* struct pl_dma_pcie - PCIe port information
* @dev: Device pointer
* @reg_base: IO Mapped Register Base
+ * @cfg_base: IO Mapped Configuration Base
* @irq: Interrupt number
* @cfg: Holds mappings of config space window
* @phys_reg_base: Physical address of reg base
@@ -97,10 +112,12 @@ struct xilinx_msi {
* @msi: MSI information
* @intx_irq: INTx error interrupt number
* @lock: Lock protecting shared register access
+ * @variant: PL DMA PCIe version check pointer
*/
struct pl_dma_pcie {
struct device *dev;
void __iomem *reg_base;
+ void __iomem *cfg_base;
int irq;
struct pci_config_window *cfg;
phys_addr_t phys_reg_base;
@@ -110,16 +127,23 @@ struct pl_dma_pcie {
struct xilinx_msi msi;
int intx_irq;
raw_spinlock_t lock;
+ const struct xilinx_pl_dma_variant *variant;
};
static inline u32 pcie_read(struct pl_dma_pcie *port, u32 reg)
{
+ if (port->variant->version == QDMA)
+ return readl(port->reg_base + reg + QDMA_BRIDGE_BASE_OFF);
+
return readl(port->reg_base + reg);
}
static inline void pcie_write(struct pl_dma_pcie *port, u32 val, u32 reg)
{
- writel(val, port->reg_base + reg);
+ if (port->variant->version == QDMA)
+ writel(val, port->reg_base + reg + QDMA_BRIDGE_BASE_OFF);
+ else
+ writel(val, port->reg_base + reg);
}
static inline bool xilinx_pl_dma_pcie_link_up(struct pl_dma_pcie *port)
@@ -173,6 +197,9 @@ static void __iomem *xilinx_pl_dma_pcie_map_bus(struct pci_bus *bus,
if (!xilinx_pl_dma_pcie_valid_device(bus, devfn))
return NULL;
+ if (port->variant->version == QDMA)
+ return port->cfg_base + PCIE_ECAM_OFFSET(bus->number, devfn, where);
+
return port->reg_base + PCIE_ECAM_OFFSET(bus->number, devfn, where);
}
@@ -355,8 +382,8 @@ static struct irq_chip xilinx_msi_irq_chip = {
};
static struct msi_domain_info xilinx_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_MULTI_PCI_MSI),
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI,
.chip = &xilinx_msi_irq_chip,
};
@@ -370,16 +397,9 @@ static void xilinx_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
msg->data = data->hwirq;
}
-static int xilinx_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static struct irq_chip xilinx_irq_chip = {
.name = "pl_dma:MSI",
.irq_compose_msi_msg = xilinx_compose_msi_msg,
- .irq_set_affinity = xilinx_msi_set_affinity,
};
static int xilinx_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
@@ -450,10 +470,10 @@ static int xilinx_pl_dma_pcie_init_msi_irq_domain(struct pl_dma_pcie *port)
struct device *dev = port->dev;
struct xilinx_msi *msi = &port->msi;
int size = BITS_TO_LONGS(XILINX_NUM_MSI_IRQS) * sizeof(long);
- struct fwnode_handle *fwnode = of_node_to_fwnode(port->dev->of_node);
+ struct fwnode_handle *fwnode = of_fwnode_handle(port->dev->of_node);
- msi->dev_domain = irq_domain_add_linear(NULL, XILINX_NUM_MSI_IRQS,
- &dev_msi_domain_ops, port);
+ msi->dev_domain = irq_domain_create_linear(NULL, XILINX_NUM_MSI_IRQS,
+ &dev_msi_domain_ops, port);
if (!msi->dev_domain)
goto out;
@@ -565,15 +585,15 @@ static int xilinx_pl_dma_pcie_init_irq_domain(struct pl_dma_pcie *port)
return -EINVAL;
}
- port->pldma_domain = irq_domain_add_linear(pcie_intc_node, 32,
- &event_domain_ops, port);
+ port->pldma_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), 32,
+ &event_domain_ops, port);
if (!port->pldma_domain)
return -ENOMEM;
irq_domain_update_bus_token(port->pldma_domain, DOMAIN_BUS_NEXUS);
- port->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
- &intx_domain_ops, port);
+ port->intx_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX,
+ &intx_domain_ops, port);
if (!port->intx_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
return -ENOMEM;
@@ -731,6 +751,15 @@ static int xilinx_pl_dma_pcie_parse_dt(struct pl_dma_pcie *port,
port->reg_base = port->cfg->win;
+ if (port->variant->version == QDMA) {
+ port->cfg_base = port->cfg->win;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "breg");
+ port->reg_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(port->reg_base))
+ return PTR_ERR(port->reg_base);
+ port->phys_reg_base = res->start;
+ }
+
err = xilinx_request_msi_irq(port);
if (err) {
pci_ecam_free(port->cfg);
@@ -760,6 +789,8 @@ static int xilinx_pl_dma_pcie_probe(struct platform_device *pdev)
if (!bus)
return -ENODEV;
+ port->variant = of_device_get_match_data(dev);
+
err = xilinx_pl_dma_pcie_parse_dt(port, bus->res);
if (err) {
dev_err(dev, "Parsing DT failed\n");
@@ -791,9 +822,22 @@ err_irq_domain:
return err;
}
+static const struct xilinx_pl_dma_variant xdma_host = {
+ .version = XDMA,
+};
+
+static const struct xilinx_pl_dma_variant qdma_host = {
+ .version = QDMA,
+};
+
static const struct of_device_id xilinx_pl_dma_pcie_of_match[] = {
{
.compatible = "xlnx,xdma-host-3.00",
+ .data = &xdma_host,
+ },
+ {
+ .compatible = "xlnx,qdma-host-3.00",
+ .data = &qdma_host,
},
{}
};
diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c
index 0408f4d612b5..c8b05477b719 100644
--- a/drivers/pci/controller/pcie-xilinx-nwl.c
+++ b/drivers/pci/controller/pcie-xilinx-nwl.c
@@ -19,6 +19,7 @@
#include <linux/of_platform.h>
#include <linux/pci.h>
#include <linux/pci-ecam.h>
+#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/irqchip/chained_irq.h>
@@ -80,8 +81,8 @@
#define MSGF_MISC_SR_NON_FATAL_DEV BIT(22)
#define MSGF_MISC_SR_FATAL_DEV BIT(23)
#define MSGF_MISC_SR_LINK_DOWN BIT(24)
-#define MSGF_MSIC_SR_LINK_AUTO_BWIDTH BIT(25)
-#define MSGF_MSIC_SR_LINK_BWIDTH BIT(26)
+#define MSGF_MISC_SR_LINK_AUTO_BWIDTH BIT(25)
+#define MSGF_MISC_SR_LINK_BWIDTH BIT(26)
#define MSGF_MISC_SR_MASKALL (MSGF_MISC_SR_RXMSG_AVAIL | \
MSGF_MISC_SR_RXMSG_OVER | \
@@ -96,8 +97,8 @@
MSGF_MISC_SR_NON_FATAL_DEV | \
MSGF_MISC_SR_FATAL_DEV | \
MSGF_MISC_SR_LINK_DOWN | \
- MSGF_MSIC_SR_LINK_AUTO_BWIDTH | \
- MSGF_MSIC_SR_LINK_BWIDTH)
+ MSGF_MISC_SR_LINK_AUTO_BWIDTH | \
+ MSGF_MISC_SR_LINK_BWIDTH)
/* Legacy interrupt status mask bits */
#define MSGF_LEG_SR_INTA BIT(0)
@@ -157,6 +158,7 @@ struct nwl_pcie {
void __iomem *breg_base;
void __iomem *pcireg_base;
void __iomem *ecam_base;
+ struct phy *phy[4];
phys_addr_t phys_breg_base; /* Physical Bridge Register Base */
phys_addr_t phys_pcie_reg_base; /* Physical PCIe Controller Base */
phys_addr_t phys_ecam_base; /* Physical Configuration Base */
@@ -267,42 +269,42 @@ static irqreturn_t nwl_pcie_misc_handler(int irq, void *data)
return IRQ_NONE;
if (misc_stat & MSGF_MISC_SR_RXMSG_OVER)
- dev_err(dev, "Received Message FIFO Overflow\n");
+ dev_err_ratelimited(dev, "Received Message FIFO Overflow\n");
if (misc_stat & MSGF_MISC_SR_SLAVE_ERR)
- dev_err(dev, "Slave error\n");
+ dev_err_ratelimited(dev, "Slave error\n");
if (misc_stat & MSGF_MISC_SR_MASTER_ERR)
- dev_err(dev, "Master error\n");
+ dev_err_ratelimited(dev, "Master error\n");
if (misc_stat & MSGF_MISC_SR_I_ADDR_ERR)
- dev_err(dev, "In Misc Ingress address translation error\n");
+ dev_err_ratelimited(dev, "In Misc Ingress address translation error\n");
if (misc_stat & MSGF_MISC_SR_E_ADDR_ERR)
- dev_err(dev, "In Misc Egress address translation error\n");
+ dev_err_ratelimited(dev, "In Misc Egress address translation error\n");
if (misc_stat & MSGF_MISC_SR_FATAL_AER)
- dev_err(dev, "Fatal Error in AER Capability\n");
+ dev_err_ratelimited(dev, "Fatal Error in AER Capability\n");
if (misc_stat & MSGF_MISC_SR_NON_FATAL_AER)
- dev_err(dev, "Non-Fatal Error in AER Capability\n");
+ dev_err_ratelimited(dev, "Non-Fatal Error in AER Capability\n");
if (misc_stat & MSGF_MISC_SR_CORR_AER)
- dev_err(dev, "Correctable Error in AER Capability\n");
+ dev_err_ratelimited(dev, "Correctable Error in AER Capability\n");
if (misc_stat & MSGF_MISC_SR_UR_DETECT)
- dev_err(dev, "Unsupported request Detected\n");
+ dev_err_ratelimited(dev, "Unsupported request Detected\n");
if (misc_stat & MSGF_MISC_SR_NON_FATAL_DEV)
- dev_err(dev, "Non-Fatal Error Detected\n");
+ dev_err_ratelimited(dev, "Non-Fatal Error Detected\n");
if (misc_stat & MSGF_MISC_SR_FATAL_DEV)
- dev_err(dev, "Fatal Error Detected\n");
+ dev_err_ratelimited(dev, "Fatal Error Detected\n");
- if (misc_stat & MSGF_MSIC_SR_LINK_AUTO_BWIDTH)
+ if (misc_stat & MSGF_MISC_SR_LINK_AUTO_BWIDTH)
dev_info(dev, "Link Autonomous Bandwidth Management Status bit set\n");
- if (misc_stat & MSGF_MSIC_SR_LINK_BWIDTH)
+ if (misc_stat & MSGF_MISC_SR_LINK_BWIDTH)
dev_info(dev, "Link Bandwidth Management Status bit set\n");
/* Clear misc interrupt status */
@@ -371,7 +373,7 @@ static void nwl_mask_intx_irq(struct irq_data *data)
u32 mask;
u32 val;
- mask = 1 << (data->hwirq - 1);
+ mask = 1 << data->hwirq;
raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags);
val = nwl_bridge_readl(pcie, MSGF_LEG_MASK);
nwl_bridge_writel(pcie, (val & (~mask)), MSGF_LEG_MASK);
@@ -385,7 +387,7 @@ static void nwl_unmask_intx_irq(struct irq_data *data)
u32 mask;
u32 val;
- mask = 1 << (data->hwirq - 1);
+ mask = 1 << data->hwirq;
raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags);
val = nwl_bridge_readl(pcie, MSGF_LEG_MASK);
nwl_bridge_writel(pcie, (val | mask), MSGF_LEG_MASK);
@@ -425,8 +427,8 @@ static struct irq_chip nwl_msi_irq_chip = {
};
static struct msi_domain_info nwl_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_MULTI_PCI_MSI),
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI,
.chip = &nwl_msi_irq_chip,
};
#endif
@@ -441,16 +443,9 @@ static void nwl_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
msg->data = data->hwirq;
}
-static int nwl_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static struct irq_chip nwl_irq_chip = {
.name = "Xilinx MSI",
.irq_compose_msi_msg = nwl_compose_msi_msg,
- .irq_set_affinity = nwl_msi_set_affinity,
};
static int nwl_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
@@ -500,11 +495,10 @@ static int nwl_pcie_init_msi_irq_domain(struct nwl_pcie *pcie)
{
#ifdef CONFIG_PCI_MSI
struct device *dev = pcie->dev;
- struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
+ struct fwnode_handle *fwnode = of_fwnode_handle(dev->of_node);
struct nwl_msi *msi = &pcie->msi;
- msi->dev_domain = irq_domain_add_linear(NULL, INT_PCI_MSI_NR,
- &dev_msi_domain_ops, pcie);
+ msi->dev_domain = irq_domain_create_linear(NULL, INT_PCI_MSI_NR, &dev_msi_domain_ops, pcie);
if (!msi->dev_domain) {
dev_err(dev, "failed to create dev IRQ domain\n");
return -ENOMEM;
@@ -521,6 +515,60 @@ static int nwl_pcie_init_msi_irq_domain(struct nwl_pcie *pcie)
return 0;
}
+static void nwl_pcie_phy_power_off(struct nwl_pcie *pcie, int i)
+{
+ int err = phy_power_off(pcie->phy[i]);
+
+ if (err)
+ dev_err(pcie->dev, "could not power off phy %d (err=%d)\n", i,
+ err);
+}
+
+static void nwl_pcie_phy_exit(struct nwl_pcie *pcie, int i)
+{
+ int err = phy_exit(pcie->phy[i]);
+
+ if (err)
+ dev_err(pcie->dev, "could not exit phy %d (err=%d)\n", i, err);
+}
+
+static int nwl_pcie_phy_enable(struct nwl_pcie *pcie)
+{
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(pcie->phy); i++) {
+ ret = phy_init(pcie->phy[i]);
+ if (ret)
+ goto err;
+
+ ret = phy_power_on(pcie->phy[i]);
+ if (ret) {
+ nwl_pcie_phy_exit(pcie, i);
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ while (i--) {
+ nwl_pcie_phy_power_off(pcie, i);
+ nwl_pcie_phy_exit(pcie, i);
+ }
+
+ return ret;
+}
+
+static void nwl_pcie_phy_disable(struct nwl_pcie *pcie)
+{
+ int i;
+
+ for (i = ARRAY_SIZE(pcie->phy); i--;) {
+ nwl_pcie_phy_power_off(pcie, i);
+ nwl_pcie_phy_exit(pcie, i);
+ }
+}
+
static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie)
{
struct device *dev = pcie->dev;
@@ -533,10 +581,8 @@ static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie)
return -EINVAL;
}
- pcie->intx_irq_domain = irq_domain_add_linear(intc_node,
- PCI_NUM_INTX,
- &intx_domain_ops,
- pcie);
+ pcie->intx_irq_domain = irq_domain_create_linear(of_fwnode_handle(intc_node), PCI_NUM_INTX,
+ &intx_domain_ops, pcie);
of_node_put(intc_node);
if (!pcie->intx_irq_domain) {
dev_err(dev, "failed to create IRQ domain\n");
@@ -732,6 +778,7 @@ static int nwl_pcie_parse_dt(struct nwl_pcie *pcie,
{
struct device *dev = pcie->dev;
struct resource *res;
+ int i;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "breg");
pcie->breg_base = devm_ioremap_resource(dev, res);
@@ -759,6 +806,18 @@ static int nwl_pcie_parse_dt(struct nwl_pcie *pcie,
irq_set_chained_handler_and_data(pcie->irq_intx,
nwl_pcie_leg_handler, pcie);
+
+ for (i = 0; i < ARRAY_SIZE(pcie->phy); i++) {
+ pcie->phy[i] = devm_of_phy_get_by_index(dev, dev->of_node, i);
+ if (PTR_ERR(pcie->phy[i]) == -ENODEV) {
+ pcie->phy[i] = NULL;
+ break;
+ }
+
+ if (IS_ERR(pcie->phy[i]))
+ return PTR_ERR(pcie->phy[i]);
+ }
+
return 0;
}
@@ -779,6 +838,7 @@ static int nwl_pcie_probe(struct platform_device *pdev)
return -ENODEV;
pcie = pci_host_bridge_priv(bridge);
+ platform_set_drvdata(pdev, pcie);
pcie->dev = dev;
@@ -798,16 +858,22 @@ static int nwl_pcie_probe(struct platform_device *pdev)
return err;
}
+ err = nwl_pcie_phy_enable(pcie);
+ if (err) {
+ dev_err(dev, "could not enable PHYs\n");
+ goto err_clk;
+ }
+
err = nwl_pcie_bridge_init(pcie);
if (err) {
dev_err(dev, "HW Initialization failed\n");
- return err;
+ goto err_phy;
}
err = nwl_pcie_init_irq_domain(pcie);
if (err) {
dev_err(dev, "Failed creating IRQ Domain\n");
- return err;
+ goto err_phy;
}
bridge->sysdata = pcie;
@@ -817,11 +883,27 @@ static int nwl_pcie_probe(struct platform_device *pdev)
err = nwl_pcie_enable_msi(pcie);
if (err < 0) {
dev_err(dev, "failed to enable MSI support: %d\n", err);
- return err;
+ goto err_phy;
}
}
- return pci_host_probe(bridge);
+ err = pci_host_probe(bridge);
+ if (!err)
+ return 0;
+
+err_phy:
+ nwl_pcie_phy_disable(pcie);
+err_clk:
+ clk_disable_unprepare(pcie->clk);
+ return err;
+}
+
+static void nwl_pcie_remove(struct platform_device *pdev)
+{
+ struct nwl_pcie *pcie = platform_get_drvdata(pdev);
+
+ nwl_pcie_phy_disable(pcie);
+ clk_disable_unprepare(pcie->clk);
}
static struct platform_driver nwl_pcie_driver = {
@@ -831,5 +913,6 @@ static struct platform_driver nwl_pcie_driver = {
.of_match_table = nwl_pcie_of_match,
},
.probe = nwl_pcie_probe,
+ .remove = nwl_pcie_remove,
};
builtin_platform_driver(nwl_pcie_driver);
diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c
index cb6e9f7b0152..e36aa874bae9 100644
--- a/drivers/pci/controller/pcie-xilinx.c
+++ b/drivers/pci/controller/pcie-xilinx.c
@@ -208,11 +208,6 @@ static struct irq_chip xilinx_msi_top_chip = {
.irq_ack = xilinx_msi_top_irq_ack,
};
-static int xilinx_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static void xilinx_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
struct xilinx_pcie *pcie = irq_data_get_irq_chip_data(data);
@@ -225,7 +220,6 @@ static void xilinx_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
static struct irq_chip xilinx_msi_bottom_chip = {
.name = "Xilinx MSI",
- .irq_set_affinity = xilinx_msi_set_affinity,
.irq_compose_msi_msg = xilinx_compose_msi_msg,
};
@@ -271,7 +265,8 @@ static const struct irq_domain_ops xilinx_msi_domain_ops = {
};
static struct msi_domain_info xilinx_msi_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_NO_AFFINITY,
.chip = &xilinx_msi_top_chip,
};
@@ -466,9 +461,8 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie *pcie)
return -ENODEV;
}
- pcie->leg_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
- &intx_domain_ops,
- pcie);
+ pcie->leg_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX,
+ &intx_domain_ops, pcie);
of_node_put(pcie_intc_node);
if (!pcie->leg_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
diff --git a/drivers/pci/controller/plda/Kconfig b/drivers/pci/controller/plda/Kconfig
new file mode 100644
index 000000000000..c0e14146d7e4
--- /dev/null
+++ b/drivers/pci/controller/plda/Kconfig
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: GPL-2.0
+
+menu "PLDA-based PCIe controllers"
+ depends on PCI
+
+config PCIE_PLDA_HOST
+ bool
+
+config PCIE_MICROCHIP_HOST
+ tristate "Microchip AXI PCIe controller"
+ depends on PCI_MSI && OF
+ select PCI_HOST_COMMON
+ select PCIE_PLDA_HOST
+ help
+ Say Y here if you want kernel to support the Microchip AXI PCIe
+ Host Bridge driver.
+
+config PCIE_STARFIVE_HOST
+ tristate "StarFive PCIe host controller"
+ depends on PCI_MSI && OF
+ depends on ARCH_STARFIVE || COMPILE_TEST
+ select PCIE_PLDA_HOST
+ help
+ Say Y here if you want to support the StarFive PCIe controller in
+ host mode. StarFive PCIe controller uses PLDA PCIe core.
+
+ If you choose to build this driver as module it will be dynamically
+ linked and module will be called pcie-starfive.ko.
+
+endmenu
diff --git a/drivers/pci/controller/plda/Makefile b/drivers/pci/controller/plda/Makefile
new file mode 100644
index 000000000000..0ac6851bed48
--- /dev/null
+++ b/drivers/pci/controller/plda/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_PCIE_PLDA_HOST) += pcie-plda-host.o
+obj-$(CONFIG_PCIE_MICROCHIP_HOST) += pcie-microchip-host.o
+obj-$(CONFIG_PCIE_STARFIVE_HOST) += pcie-starfive.o
diff --git a/drivers/pci/controller/plda/pcie-microchip-host.c b/drivers/pci/controller/plda/pcie-microchip-host.c
new file mode 100644
index 000000000000..3fdfffdf0270
--- /dev/null
+++ b/drivers/pci/controller/plda/pcie-microchip-host.c
@@ -0,0 +1,833 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microchip AXI PCIe Bridge host controller driver
+ *
+ * Copyright (c) 2018 - 2020 Microchip Corporation. All rights reserved.
+ *
+ * Author: Daire McNamara <daire.mcnamara@microchip.com>
+ */
+
+#include <linux/align.h>
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/log2.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/pci-ecam.h>
+#include <linux/platform_device.h>
+#include <linux/wordpart.h>
+
+#include "../../pci.h"
+#include "pcie-plda.h"
+
+#define MC_MAX_NUM_INBOUND_WINDOWS 8
+#define MPFS_NC_BOUNCE_ADDR 0x80000000
+
+/* PCIe Bridge Phy and Controller Phy offsets */
+#define MC_PCIE1_BRIDGE_ADDR 0x00008000u
+#define MC_PCIE1_CTRL_ADDR 0x0000a000u
+
+/* PCIe Controller Phy Regs */
+#define SEC_ERROR_EVENT_CNT 0x20
+#define DED_ERROR_EVENT_CNT 0x24
+#define SEC_ERROR_INT 0x28
+#define SEC_ERROR_INT_TX_RAM_SEC_ERR_INT GENMASK(3, 0)
+#define SEC_ERROR_INT_RX_RAM_SEC_ERR_INT GENMASK(7, 4)
+#define SEC_ERROR_INT_PCIE2AXI_RAM_SEC_ERR_INT GENMASK(11, 8)
+#define SEC_ERROR_INT_AXI2PCIE_RAM_SEC_ERR_INT GENMASK(15, 12)
+#define SEC_ERROR_INT_ALL_RAM_SEC_ERR_INT GENMASK(15, 0)
+#define NUM_SEC_ERROR_INTS (4)
+#define SEC_ERROR_INT_MASK 0x2c
+#define DED_ERROR_INT 0x30
+#define DED_ERROR_INT_TX_RAM_DED_ERR_INT GENMASK(3, 0)
+#define DED_ERROR_INT_RX_RAM_DED_ERR_INT GENMASK(7, 4)
+#define DED_ERROR_INT_PCIE2AXI_RAM_DED_ERR_INT GENMASK(11, 8)
+#define DED_ERROR_INT_AXI2PCIE_RAM_DED_ERR_INT GENMASK(15, 12)
+#define DED_ERROR_INT_ALL_RAM_DED_ERR_INT GENMASK(15, 0)
+#define NUM_DED_ERROR_INTS (4)
+#define DED_ERROR_INT_MASK 0x34
+#define ECC_CONTROL 0x38
+#define ECC_CONTROL_TX_RAM_INJ_ERROR_0 BIT(0)
+#define ECC_CONTROL_TX_RAM_INJ_ERROR_1 BIT(1)
+#define ECC_CONTROL_TX_RAM_INJ_ERROR_2 BIT(2)
+#define ECC_CONTROL_TX_RAM_INJ_ERROR_3 BIT(3)
+#define ECC_CONTROL_RX_RAM_INJ_ERROR_0 BIT(4)
+#define ECC_CONTROL_RX_RAM_INJ_ERROR_1 BIT(5)
+#define ECC_CONTROL_RX_RAM_INJ_ERROR_2 BIT(6)
+#define ECC_CONTROL_RX_RAM_INJ_ERROR_3 BIT(7)
+#define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_0 BIT(8)
+#define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_1 BIT(9)
+#define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_2 BIT(10)
+#define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_3 BIT(11)
+#define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_0 BIT(12)
+#define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_1 BIT(13)
+#define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_2 BIT(14)
+#define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_3 BIT(15)
+#define ECC_CONTROL_TX_RAM_ECC_BYPASS BIT(24)
+#define ECC_CONTROL_RX_RAM_ECC_BYPASS BIT(25)
+#define ECC_CONTROL_PCIE2AXI_RAM_ECC_BYPASS BIT(26)
+#define ECC_CONTROL_AXI2PCIE_RAM_ECC_BYPASS BIT(27)
+#define PCIE_EVENT_INT 0x14c
+#define PCIE_EVENT_INT_L2_EXIT_INT BIT(0)
+#define PCIE_EVENT_INT_HOTRST_EXIT_INT BIT(1)
+#define PCIE_EVENT_INT_DLUP_EXIT_INT BIT(2)
+#define PCIE_EVENT_INT_MASK GENMASK(2, 0)
+#define PCIE_EVENT_INT_L2_EXIT_INT_MASK BIT(16)
+#define PCIE_EVENT_INT_HOTRST_EXIT_INT_MASK BIT(17)
+#define PCIE_EVENT_INT_DLUP_EXIT_INT_MASK BIT(18)
+#define PCIE_EVENT_INT_ENB_MASK GENMASK(18, 16)
+#define PCIE_EVENT_INT_ENB_SHIFT 16
+#define NUM_PCIE_EVENTS (3)
+
+/* PCIe Config space MSI capability structure */
+#define MC_MSI_CAP_CTRL_OFFSET 0xe0u
+
+/* Events */
+#define EVENT_PCIE_L2_EXIT 0
+#define EVENT_PCIE_HOTRST_EXIT 1
+#define EVENT_PCIE_DLUP_EXIT 2
+#define EVENT_SEC_TX_RAM_SEC_ERR 3
+#define EVENT_SEC_RX_RAM_SEC_ERR 4
+#define EVENT_SEC_PCIE2AXI_RAM_SEC_ERR 5
+#define EVENT_SEC_AXI2PCIE_RAM_SEC_ERR 6
+#define EVENT_DED_TX_RAM_DED_ERR 7
+#define EVENT_DED_RX_RAM_DED_ERR 8
+#define EVENT_DED_PCIE2AXI_RAM_DED_ERR 9
+#define EVENT_DED_AXI2PCIE_RAM_DED_ERR 10
+#define EVENT_LOCAL_DMA_END_ENGINE_0 11
+#define EVENT_LOCAL_DMA_END_ENGINE_1 12
+#define EVENT_LOCAL_DMA_ERROR_ENGINE_0 13
+#define EVENT_LOCAL_DMA_ERROR_ENGINE_1 14
+#define NUM_MC_EVENTS 15
+#define EVENT_LOCAL_A_ATR_EVT_POST_ERR (NUM_MC_EVENTS + PLDA_AXI_POST_ERR)
+#define EVENT_LOCAL_A_ATR_EVT_FETCH_ERR (NUM_MC_EVENTS + PLDA_AXI_FETCH_ERR)
+#define EVENT_LOCAL_A_ATR_EVT_DISCARD_ERR (NUM_MC_EVENTS + PLDA_AXI_DISCARD_ERR)
+#define EVENT_LOCAL_A_ATR_EVT_DOORBELL (NUM_MC_EVENTS + PLDA_AXI_DOORBELL)
+#define EVENT_LOCAL_P_ATR_EVT_POST_ERR (NUM_MC_EVENTS + PLDA_PCIE_POST_ERR)
+#define EVENT_LOCAL_P_ATR_EVT_FETCH_ERR (NUM_MC_EVENTS + PLDA_PCIE_FETCH_ERR)
+#define EVENT_LOCAL_P_ATR_EVT_DISCARD_ERR (NUM_MC_EVENTS + PLDA_PCIE_DISCARD_ERR)
+#define EVENT_LOCAL_P_ATR_EVT_DOORBELL (NUM_MC_EVENTS + PLDA_PCIE_DOORBELL)
+#define EVENT_LOCAL_PM_MSI_INT_INTX (NUM_MC_EVENTS + PLDA_INTX)
+#define EVENT_LOCAL_PM_MSI_INT_MSI (NUM_MC_EVENTS + PLDA_MSI)
+#define EVENT_LOCAL_PM_MSI_INT_AER_EVT (NUM_MC_EVENTS + PLDA_AER_EVENT)
+#define EVENT_LOCAL_PM_MSI_INT_EVENTS (NUM_MC_EVENTS + PLDA_MISC_EVENTS)
+#define EVENT_LOCAL_PM_MSI_INT_SYS_ERR (NUM_MC_EVENTS + PLDA_SYS_ERR)
+#define NUM_EVENTS (NUM_MC_EVENTS + PLDA_INT_EVENT_NUM)
+
+#define PCIE_EVENT_CAUSE(x, s) \
+ [EVENT_PCIE_ ## x] = { __stringify(x), s }
+
+#define SEC_ERROR_CAUSE(x, s) \
+ [EVENT_SEC_ ## x] = { __stringify(x), s }
+
+#define DED_ERROR_CAUSE(x, s) \
+ [EVENT_DED_ ## x] = { __stringify(x), s }
+
+#define LOCAL_EVENT_CAUSE(x, s) \
+ [EVENT_LOCAL_ ## x] = { __stringify(x), s }
+
+#define PCIE_EVENT(x) \
+ .offset = PCIE_EVENT_INT, \
+ .mask_offset = PCIE_EVENT_INT, \
+ .mask_high = 1, \
+ .mask = PCIE_EVENT_INT_ ## x ## _INT, \
+ .enb_mask = PCIE_EVENT_INT_ENB_MASK
+
+#define SEC_EVENT(x) \
+ .offset = SEC_ERROR_INT, \
+ .mask_offset = SEC_ERROR_INT_MASK, \
+ .mask = SEC_ERROR_INT_ ## x ## _INT, \
+ .mask_high = 1, \
+ .enb_mask = 0
+
+#define DED_EVENT(x) \
+ .offset = DED_ERROR_INT, \
+ .mask_offset = DED_ERROR_INT_MASK, \
+ .mask_high = 1, \
+ .mask = DED_ERROR_INT_ ## x ## _INT, \
+ .enb_mask = 0
+
+#define LOCAL_EVENT(x) \
+ .offset = ISTATUS_LOCAL, \
+ .mask_offset = IMASK_LOCAL, \
+ .mask_high = 0, \
+ .mask = x ## _MASK, \
+ .enb_mask = 0
+
+#define PCIE_EVENT_TO_EVENT_MAP(x) \
+ { PCIE_EVENT_INT_ ## x ## _INT, EVENT_PCIE_ ## x }
+
+#define SEC_ERROR_TO_EVENT_MAP(x) \
+ { SEC_ERROR_INT_ ## x ## _INT, EVENT_SEC_ ## x }
+
+#define DED_ERROR_TO_EVENT_MAP(x) \
+ { DED_ERROR_INT_ ## x ## _INT, EVENT_DED_ ## x }
+
+#define LOCAL_STATUS_TO_EVENT_MAP(x) \
+ { x ## _MASK, EVENT_LOCAL_ ## x }
+
+struct event_map {
+ u32 reg_mask;
+ u32 event_bit;
+};
+
+
+struct mc_pcie {
+ struct plda_pcie_rp plda;
+ void __iomem *bridge_base_addr;
+ void __iomem *ctrl_base_addr;
+};
+
+struct cause {
+ const char *sym;
+ const char *str;
+};
+
+static const struct cause event_cause[NUM_EVENTS] = {
+ PCIE_EVENT_CAUSE(L2_EXIT, "L2 exit event"),
+ PCIE_EVENT_CAUSE(HOTRST_EXIT, "Hot reset exit event"),
+ PCIE_EVENT_CAUSE(DLUP_EXIT, "DLUP exit event"),
+ SEC_ERROR_CAUSE(TX_RAM_SEC_ERR, "sec error in tx buffer"),
+ SEC_ERROR_CAUSE(RX_RAM_SEC_ERR, "sec error in rx buffer"),
+ SEC_ERROR_CAUSE(PCIE2AXI_RAM_SEC_ERR, "sec error in pcie2axi buffer"),
+ SEC_ERROR_CAUSE(AXI2PCIE_RAM_SEC_ERR, "sec error in axi2pcie buffer"),
+ DED_ERROR_CAUSE(TX_RAM_DED_ERR, "ded error in tx buffer"),
+ DED_ERROR_CAUSE(RX_RAM_DED_ERR, "ded error in rx buffer"),
+ DED_ERROR_CAUSE(PCIE2AXI_RAM_DED_ERR, "ded error in pcie2axi buffer"),
+ DED_ERROR_CAUSE(AXI2PCIE_RAM_DED_ERR, "ded error in axi2pcie buffer"),
+ LOCAL_EVENT_CAUSE(DMA_ERROR_ENGINE_0, "dma engine 0 error"),
+ LOCAL_EVENT_CAUSE(DMA_ERROR_ENGINE_1, "dma engine 1 error"),
+ LOCAL_EVENT_CAUSE(A_ATR_EVT_POST_ERR, "axi write request error"),
+ LOCAL_EVENT_CAUSE(A_ATR_EVT_FETCH_ERR, "axi read request error"),
+ LOCAL_EVENT_CAUSE(A_ATR_EVT_DISCARD_ERR, "axi read timeout"),
+ LOCAL_EVENT_CAUSE(P_ATR_EVT_POST_ERR, "pcie write request error"),
+ LOCAL_EVENT_CAUSE(P_ATR_EVT_FETCH_ERR, "pcie read request error"),
+ LOCAL_EVENT_CAUSE(P_ATR_EVT_DISCARD_ERR, "pcie read timeout"),
+ LOCAL_EVENT_CAUSE(PM_MSI_INT_AER_EVT, "aer event"),
+ LOCAL_EVENT_CAUSE(PM_MSI_INT_EVENTS, "pm/ltr/hotplug event"),
+ LOCAL_EVENT_CAUSE(PM_MSI_INT_SYS_ERR, "system error"),
+};
+
+static struct event_map pcie_event_to_event[] = {
+ PCIE_EVENT_TO_EVENT_MAP(L2_EXIT),
+ PCIE_EVENT_TO_EVENT_MAP(HOTRST_EXIT),
+ PCIE_EVENT_TO_EVENT_MAP(DLUP_EXIT),
+};
+
+static struct event_map sec_error_to_event[] = {
+ SEC_ERROR_TO_EVENT_MAP(TX_RAM_SEC_ERR),
+ SEC_ERROR_TO_EVENT_MAP(RX_RAM_SEC_ERR),
+ SEC_ERROR_TO_EVENT_MAP(PCIE2AXI_RAM_SEC_ERR),
+ SEC_ERROR_TO_EVENT_MAP(AXI2PCIE_RAM_SEC_ERR),
+};
+
+static struct event_map ded_error_to_event[] = {
+ DED_ERROR_TO_EVENT_MAP(TX_RAM_DED_ERR),
+ DED_ERROR_TO_EVENT_MAP(RX_RAM_DED_ERR),
+ DED_ERROR_TO_EVENT_MAP(PCIE2AXI_RAM_DED_ERR),
+ DED_ERROR_TO_EVENT_MAP(AXI2PCIE_RAM_DED_ERR),
+};
+
+static struct event_map local_status_to_event[] = {
+ LOCAL_STATUS_TO_EVENT_MAP(DMA_END_ENGINE_0),
+ LOCAL_STATUS_TO_EVENT_MAP(DMA_END_ENGINE_1),
+ LOCAL_STATUS_TO_EVENT_MAP(DMA_ERROR_ENGINE_0),
+ LOCAL_STATUS_TO_EVENT_MAP(DMA_ERROR_ENGINE_1),
+ LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_POST_ERR),
+ LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_FETCH_ERR),
+ LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_DISCARD_ERR),
+ LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_DOORBELL),
+ LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_POST_ERR),
+ LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_FETCH_ERR),
+ LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_DISCARD_ERR),
+ LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_DOORBELL),
+ LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_INTX),
+ LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_MSI),
+ LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_AER_EVT),
+ LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_EVENTS),
+ LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_SYS_ERR),
+};
+
+static struct {
+ u32 offset;
+ u32 mask;
+ u32 shift;
+ u32 enb_mask;
+ u32 mask_high;
+ u32 mask_offset;
+} event_descs[] = {
+ { PCIE_EVENT(L2_EXIT) },
+ { PCIE_EVENT(HOTRST_EXIT) },
+ { PCIE_EVENT(DLUP_EXIT) },
+ { SEC_EVENT(TX_RAM_SEC_ERR) },
+ { SEC_EVENT(RX_RAM_SEC_ERR) },
+ { SEC_EVENT(PCIE2AXI_RAM_SEC_ERR) },
+ { SEC_EVENT(AXI2PCIE_RAM_SEC_ERR) },
+ { DED_EVENT(TX_RAM_DED_ERR) },
+ { DED_EVENT(RX_RAM_DED_ERR) },
+ { DED_EVENT(PCIE2AXI_RAM_DED_ERR) },
+ { DED_EVENT(AXI2PCIE_RAM_DED_ERR) },
+ { LOCAL_EVENT(DMA_END_ENGINE_0) },
+ { LOCAL_EVENT(DMA_END_ENGINE_1) },
+ { LOCAL_EVENT(DMA_ERROR_ENGINE_0) },
+ { LOCAL_EVENT(DMA_ERROR_ENGINE_1) },
+ { LOCAL_EVENT(A_ATR_EVT_POST_ERR) },
+ { LOCAL_EVENT(A_ATR_EVT_FETCH_ERR) },
+ { LOCAL_EVENT(A_ATR_EVT_DISCARD_ERR) },
+ { LOCAL_EVENT(A_ATR_EVT_DOORBELL) },
+ { LOCAL_EVENT(P_ATR_EVT_POST_ERR) },
+ { LOCAL_EVENT(P_ATR_EVT_FETCH_ERR) },
+ { LOCAL_EVENT(P_ATR_EVT_DISCARD_ERR) },
+ { LOCAL_EVENT(P_ATR_EVT_DOORBELL) },
+ { LOCAL_EVENT(PM_MSI_INT_INTX) },
+ { LOCAL_EVENT(PM_MSI_INT_MSI) },
+ { LOCAL_EVENT(PM_MSI_INT_AER_EVT) },
+ { LOCAL_EVENT(PM_MSI_INT_EVENTS) },
+ { LOCAL_EVENT(PM_MSI_INT_SYS_ERR) },
+};
+
+static char poss_clks[][5] = { "fic0", "fic1", "fic2", "fic3" };
+
+static struct mc_pcie *port;
+
+static void mc_pcie_enable_msi(struct mc_pcie *port, void __iomem *ecam)
+{
+ struct plda_msi *msi = &port->plda.msi;
+ u16 reg;
+ u8 queue_size;
+
+ /* Fixup MSI enable flag */
+ reg = readw_relaxed(ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_FLAGS);
+ reg |= PCI_MSI_FLAGS_ENABLE;
+ writew_relaxed(reg, ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_FLAGS);
+
+ /* Fixup PCI MSI queue flags */
+ queue_size = FIELD_GET(PCI_MSI_FLAGS_QMASK, reg);
+ reg |= FIELD_PREP(PCI_MSI_FLAGS_QSIZE, queue_size);
+ writew_relaxed(reg, ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_FLAGS);
+
+ /* Fixup MSI addr fields */
+ writel_relaxed(lower_32_bits(msi->vector_phy),
+ ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_ADDRESS_LO);
+ writel_relaxed(upper_32_bits(msi->vector_phy),
+ ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_ADDRESS_HI);
+}
+
+static inline u32 reg_to_event(u32 reg, struct event_map field)
+{
+ return (reg & field.reg_mask) ? BIT(field.event_bit) : 0;
+}
+
+static u32 pcie_events(struct mc_pcie *port)
+{
+ u32 reg = readl_relaxed(port->ctrl_base_addr + PCIE_EVENT_INT);
+ u32 val = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pcie_event_to_event); i++)
+ val |= reg_to_event(reg, pcie_event_to_event[i]);
+
+ return val;
+}
+
+static u32 sec_errors(struct mc_pcie *port)
+{
+ u32 reg = readl_relaxed(port->ctrl_base_addr + SEC_ERROR_INT);
+ u32 val = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sec_error_to_event); i++)
+ val |= reg_to_event(reg, sec_error_to_event[i]);
+
+ return val;
+}
+
+static u32 ded_errors(struct mc_pcie *port)
+{
+ u32 reg = readl_relaxed(port->ctrl_base_addr + DED_ERROR_INT);
+ u32 val = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ded_error_to_event); i++)
+ val |= reg_to_event(reg, ded_error_to_event[i]);
+
+ return val;
+}
+
+static u32 local_events(struct mc_pcie *port)
+{
+ u32 reg = readl_relaxed(port->bridge_base_addr + ISTATUS_LOCAL);
+ u32 val = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(local_status_to_event); i++)
+ val |= reg_to_event(reg, local_status_to_event[i]);
+
+ return val;
+}
+
+static u32 mc_get_events(struct plda_pcie_rp *port)
+{
+ struct mc_pcie *mc_port = container_of(port, struct mc_pcie, plda);
+ u32 events = 0;
+
+ events |= pcie_events(mc_port);
+ events |= sec_errors(mc_port);
+ events |= ded_errors(mc_port);
+ events |= local_events(mc_port);
+
+ return events;
+}
+
+static irqreturn_t mc_event_handler(int irq, void *dev_id)
+{
+ struct plda_pcie_rp *port = dev_id;
+ struct device *dev = port->dev;
+ struct irq_data *data;
+
+ data = irq_domain_get_irq_data(port->event_domain, irq);
+
+ if (event_cause[data->hwirq].str)
+ dev_err_ratelimited(dev, "%s\n", event_cause[data->hwirq].str);
+ else
+ dev_err_ratelimited(dev, "bad event IRQ %ld\n", data->hwirq);
+
+ return IRQ_HANDLED;
+}
+
+static void mc_ack_event_irq(struct irq_data *data)
+{
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+ struct mc_pcie *mc_port = container_of(port, struct mc_pcie, plda);
+ u32 event = data->hwirq;
+ void __iomem *addr;
+ u32 mask;
+
+ if (event_descs[event].offset == ISTATUS_LOCAL)
+ addr = mc_port->bridge_base_addr;
+ else
+ addr = mc_port->ctrl_base_addr;
+
+ addr += event_descs[event].offset;
+ mask = event_descs[event].mask;
+ mask |= event_descs[event].enb_mask;
+
+ writel_relaxed(mask, addr);
+}
+
+static void mc_mask_event_irq(struct irq_data *data)
+{
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+ struct mc_pcie *mc_port = container_of(port, struct mc_pcie, plda);
+ u32 event = data->hwirq;
+ void __iomem *addr;
+ u32 mask;
+ u32 val;
+
+ if (event_descs[event].offset == ISTATUS_LOCAL)
+ addr = mc_port->bridge_base_addr;
+ else
+ addr = mc_port->ctrl_base_addr;
+
+ addr += event_descs[event].mask_offset;
+ mask = event_descs[event].mask;
+ if (event_descs[event].enb_mask) {
+ mask <<= PCIE_EVENT_INT_ENB_SHIFT;
+ mask &= PCIE_EVENT_INT_ENB_MASK;
+ }
+
+ if (!event_descs[event].mask_high)
+ mask = ~mask;
+
+ raw_spin_lock(&port->lock);
+ val = readl_relaxed(addr);
+ if (event_descs[event].mask_high)
+ val |= mask;
+ else
+ val &= mask;
+
+ writel_relaxed(val, addr);
+ raw_spin_unlock(&port->lock);
+}
+
+static void mc_unmask_event_irq(struct irq_data *data)
+{
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+ struct mc_pcie *mc_port = container_of(port, struct mc_pcie, plda);
+ u32 event = data->hwirq;
+ void __iomem *addr;
+ u32 mask;
+ u32 val;
+
+ if (event_descs[event].offset == ISTATUS_LOCAL)
+ addr = mc_port->bridge_base_addr;
+ else
+ addr = mc_port->ctrl_base_addr;
+
+ addr += event_descs[event].mask_offset;
+ mask = event_descs[event].mask;
+
+ if (event_descs[event].enb_mask)
+ mask <<= PCIE_EVENT_INT_ENB_SHIFT;
+
+ if (event_descs[event].mask_high)
+ mask = ~mask;
+
+ if (event_descs[event].enb_mask)
+ mask &= PCIE_EVENT_INT_ENB_MASK;
+
+ raw_spin_lock(&port->lock);
+ val = readl_relaxed(addr);
+ if (event_descs[event].mask_high)
+ val &= mask;
+ else
+ val |= mask;
+ writel_relaxed(val, addr);
+ raw_spin_unlock(&port->lock);
+}
+
+static struct irq_chip mc_event_irq_chip = {
+ .name = "Microchip PCIe EVENT",
+ .irq_ack = mc_ack_event_irq,
+ .irq_mask = mc_mask_event_irq,
+ .irq_unmask = mc_unmask_event_irq,
+};
+
+static inline void mc_pcie_deinit_clk(void *data)
+{
+ struct clk *clk = data;
+
+ clk_disable_unprepare(clk);
+}
+
+static inline struct clk *mc_pcie_init_clk(struct device *dev, const char *id)
+{
+ struct clk *clk;
+ int ret;
+
+ clk = devm_clk_get_optional(dev, id);
+ if (IS_ERR(clk))
+ return clk;
+ if (!clk)
+ return clk;
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ERR_PTR(ret);
+
+ devm_add_action_or_reset(dev, mc_pcie_deinit_clk, clk);
+
+ return clk;
+}
+
+static int mc_pcie_init_clks(struct device *dev)
+{
+ int i;
+ struct clk *fic;
+
+ /*
+ * PCIe may be clocked via Fabric Interface using between 1 and 4
+ * clocks. Scan DT for clocks and enable them if present
+ */
+ for (i = 0; i < ARRAY_SIZE(poss_clks); i++) {
+ fic = mc_pcie_init_clk(dev, poss_clks[i]);
+ if (IS_ERR(fic))
+ return PTR_ERR(fic);
+ }
+
+ return 0;
+}
+
+static int mc_request_event_irq(struct plda_pcie_rp *plda, int event_irq,
+ int event)
+{
+ return devm_request_irq(plda->dev, event_irq, mc_event_handler,
+ 0, event_cause[event].sym, plda);
+}
+
+static const struct plda_event_ops mc_event_ops = {
+ .get_events = mc_get_events,
+};
+
+static const struct plda_event mc_event = {
+ .request_event_irq = mc_request_event_irq,
+ .intx_event = EVENT_LOCAL_PM_MSI_INT_INTX,
+ .msi_event = EVENT_LOCAL_PM_MSI_INT_MSI,
+};
+
+static inline void mc_clear_secs(struct mc_pcie *port)
+{
+ writel_relaxed(SEC_ERROR_INT_ALL_RAM_SEC_ERR_INT,
+ port->ctrl_base_addr + SEC_ERROR_INT);
+ writel_relaxed(0, port->ctrl_base_addr + SEC_ERROR_EVENT_CNT);
+}
+
+static inline void mc_clear_deds(struct mc_pcie *port)
+{
+ writel_relaxed(DED_ERROR_INT_ALL_RAM_DED_ERR_INT,
+ port->ctrl_base_addr + DED_ERROR_INT);
+ writel_relaxed(0, port->ctrl_base_addr + DED_ERROR_EVENT_CNT);
+}
+
+static void mc_disable_interrupts(struct mc_pcie *port)
+{
+ u32 val;
+
+ /* Ensure ECC bypass is enabled */
+ val = ECC_CONTROL_TX_RAM_ECC_BYPASS |
+ ECC_CONTROL_RX_RAM_ECC_BYPASS |
+ ECC_CONTROL_PCIE2AXI_RAM_ECC_BYPASS |
+ ECC_CONTROL_AXI2PCIE_RAM_ECC_BYPASS;
+ writel_relaxed(val, port->ctrl_base_addr + ECC_CONTROL);
+
+ /* Disable SEC errors and clear any outstanding */
+ writel_relaxed(SEC_ERROR_INT_ALL_RAM_SEC_ERR_INT,
+ port->ctrl_base_addr + SEC_ERROR_INT_MASK);
+ mc_clear_secs(port);
+
+ /* Disable DED errors and clear any outstanding */
+ writel_relaxed(DED_ERROR_INT_ALL_RAM_DED_ERR_INT,
+ port->ctrl_base_addr + DED_ERROR_INT_MASK);
+ mc_clear_deds(port);
+
+ /* Disable local interrupts and clear any outstanding */
+ writel_relaxed(0, port->bridge_base_addr + IMASK_LOCAL);
+ writel_relaxed(GENMASK(31, 0), port->bridge_base_addr + ISTATUS_LOCAL);
+ writel_relaxed(GENMASK(31, 0), port->bridge_base_addr + ISTATUS_MSI);
+
+ /* Disable PCIe events and clear any outstanding */
+ val = PCIE_EVENT_INT_L2_EXIT_INT |
+ PCIE_EVENT_INT_HOTRST_EXIT_INT |
+ PCIE_EVENT_INT_DLUP_EXIT_INT |
+ PCIE_EVENT_INT_L2_EXIT_INT_MASK |
+ PCIE_EVENT_INT_HOTRST_EXIT_INT_MASK |
+ PCIE_EVENT_INT_DLUP_EXIT_INT_MASK;
+ writel_relaxed(val, port->ctrl_base_addr + PCIE_EVENT_INT);
+
+ /* Disable host interrupts and clear any outstanding */
+ writel_relaxed(0, port->bridge_base_addr + IMASK_HOST);
+ writel_relaxed(GENMASK(31, 0), port->bridge_base_addr + ISTATUS_HOST);
+}
+
+static void mc_pcie_setup_inbound_atr(struct mc_pcie *port, int window_index,
+ u64 axi_addr, u64 pcie_addr, u64 size)
+{
+ u32 table_offset = window_index * ATR_ENTRY_SIZE;
+ void __iomem *table_addr = port->bridge_base_addr + table_offset;
+ u32 atr_sz;
+ u32 val;
+
+ atr_sz = ilog2(size) - 1;
+
+ val = ALIGN_DOWN(lower_32_bits(pcie_addr), SZ_4K);
+ val |= FIELD_PREP(ATR_SIZE_MASK, atr_sz);
+ val |= ATR_IMPL_ENABLE;
+
+ writel(val, table_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
+
+ writel(upper_32_bits(pcie_addr), table_addr + ATR0_PCIE_WIN0_SRC_ADDR);
+
+ writel(lower_32_bits(axi_addr), table_addr + ATR0_PCIE_WIN0_TRSL_ADDR_LSB);
+ writel(upper_32_bits(axi_addr), table_addr + ATR0_PCIE_WIN0_TRSL_ADDR_UDW);
+
+ writel(TRSL_ID_AXI4_MASTER_0, table_addr + ATR0_PCIE_WIN0_TRSL_PARAM);
+}
+
+static int mc_pcie_setup_inbound_ranges(struct platform_device *pdev,
+ struct mc_pcie *port)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *dn = dev->of_node;
+ struct of_range_parser parser;
+ struct of_range range;
+ int atr_index = 0;
+
+ /*
+ * MPFS PCIe Root Port is 32-bit only, behind a Fabric Interface
+ * Controller FPGA logic block which contains the AXI-S interface.
+ *
+ * From the point of view of the PCIe Root Port, there are only two
+ * supported Root Port configurations:
+ *
+ * Configuration 1: for use with fully coherent designs; supports a
+ * window from 0x0 (CPU space) to specified PCIe space.
+ *
+ * Configuration 2: for use with non-coherent designs; supports two
+ * 1 GB windows to CPU space; one mapping CPU space 0 to PCIe space
+ * 0x80000000 and a second mapping CPU space 0x40000000 to PCIe
+ * space 0xc0000000. This cfg needs two windows because of how the
+ * MSI space is allocated in the AXI-S range on MPFS.
+ *
+ * The FIC interface outside the PCIe block *must* complete the
+ * inbound address translation as per MCHP MPFS FPGA design
+ * guidelines.
+ */
+ if (device_property_read_bool(dev, "dma-noncoherent")) {
+ /*
+ * Always need same two tables in this case. Need two tables
+ * due to hardware interactions between address and size.
+ */
+ mc_pcie_setup_inbound_atr(port, 0, 0,
+ MPFS_NC_BOUNCE_ADDR, SZ_1G);
+ mc_pcie_setup_inbound_atr(port, 1, SZ_1G,
+ MPFS_NC_BOUNCE_ADDR + SZ_1G, SZ_1G);
+ } else {
+ /* Find any DMA ranges */
+ if (of_pci_dma_range_parser_init(&parser, dn)) {
+ /* No DMA range property - setup default */
+ mc_pcie_setup_inbound_atr(port, 0, 0, 0, SZ_4G);
+ return 0;
+ }
+
+ for_each_of_range(&parser, &range) {
+ if (atr_index >= MC_MAX_NUM_INBOUND_WINDOWS) {
+ dev_err(dev, "too many inbound ranges; %d available tables\n",
+ MC_MAX_NUM_INBOUND_WINDOWS);
+ return -EINVAL;
+ }
+ mc_pcie_setup_inbound_atr(port, atr_index, 0,
+ range.pci_addr, range.size);
+ atr_index++;
+ }
+ }
+
+ return 0;
+}
+
+static int mc_platform_init(struct pci_config_window *cfg)
+{
+ struct device *dev = cfg->parent;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct pci_host_bridge *bridge = platform_get_drvdata(pdev);
+ int ret;
+
+ /* Configure address translation table 0 for PCIe config space */
+ plda_pcie_setup_window(port->bridge_base_addr, 0, cfg->res.start,
+ cfg->res.start,
+ resource_size(&cfg->res));
+
+ /* Need some fixups in config space */
+ mc_pcie_enable_msi(port, cfg->win);
+
+ /* Configure non-config space outbound ranges */
+ ret = plda_pcie_setup_iomems(bridge, &port->plda);
+ if (ret)
+ return ret;
+
+ ret = mc_pcie_setup_inbound_ranges(pdev, port);
+ if (ret)
+ return ret;
+
+ port->plda.event_ops = &mc_event_ops;
+ port->plda.event_irq_chip = &mc_event_irq_chip;
+ port->plda.events_bitmap = GENMASK(NUM_EVENTS - 1, 0);
+
+ /* Address translation is up; safe to enable interrupts */
+ ret = plda_init_interrupts(pdev, &port->plda, &mc_event);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int mc_host_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ void __iomem *apb_base_addr;
+ struct plda_pcie_rp *plda;
+ int ret;
+ u32 val;
+
+ port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ plda = &port->plda;
+ plda->dev = dev;
+
+ port->bridge_base_addr = devm_platform_ioremap_resource_byname(pdev,
+ "bridge");
+ port->ctrl_base_addr = devm_platform_ioremap_resource_byname(pdev,
+ "ctrl");
+ if (!IS_ERR(port->bridge_base_addr) && !IS_ERR(port->ctrl_base_addr))
+ goto addrs_set;
+
+ /*
+ * The original, incorrect, binding that lumped the control and
+ * bridge addresses together still needs to be handled by the driver.
+ */
+ apb_base_addr = devm_platform_ioremap_resource_byname(pdev, "apb");
+ if (IS_ERR(apb_base_addr))
+ return dev_err_probe(dev, PTR_ERR(apb_base_addr),
+ "both legacy apb register and ctrl/bridge regions missing");
+
+ port->bridge_base_addr = apb_base_addr + MC_PCIE1_BRIDGE_ADDR;
+ port->ctrl_base_addr = apb_base_addr + MC_PCIE1_CTRL_ADDR;
+
+addrs_set:
+ mc_disable_interrupts(port);
+
+ plda->bridge_addr = port->bridge_base_addr;
+ plda->num_events = NUM_EVENTS;
+
+ /* Allow enabling MSI by disabling MSI-X */
+ val = readl(port->bridge_base_addr + PCIE_PCI_IRQ_DW0);
+ val &= ~MSIX_CAP_MASK;
+ writel(val, port->bridge_base_addr + PCIE_PCI_IRQ_DW0);
+
+ /* Pick num vectors from bitfile programmed onto FPGA fabric */
+ val = readl(port->bridge_base_addr + PCIE_PCI_IRQ_DW0);
+ val &= NUM_MSI_MSGS_MASK;
+ val >>= NUM_MSI_MSGS_SHIFT;
+
+ plda->msi.num_vectors = 1 << val;
+
+ /* Pick vector address from design */
+ plda->msi.vector_phy = readl_relaxed(port->bridge_base_addr + IMSI_ADDR);
+
+ ret = mc_pcie_init_clks(dev);
+ if (ret) {
+ dev_err(dev, "failed to get clock resources, error %d\n", ret);
+ return -ENODEV;
+ }
+
+ return pci_host_common_probe(pdev);
+}
+
+static const struct pci_ecam_ops mc_ecam_ops = {
+ .init = mc_platform_init,
+ .pci_ops = {
+ .map_bus = pci_ecam_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ }
+};
+
+static const struct of_device_id mc_pcie_of_match[] = {
+ {
+ .compatible = "microchip,pcie-host-1.0",
+ .data = &mc_ecam_ops,
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, mc_pcie_of_match);
+
+static struct platform_driver mc_pcie_driver = {
+ .probe = mc_host_probe,
+ .driver = {
+ .name = "microchip-pcie",
+ .of_match_table = mc_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+builtin_platform_driver(mc_pcie_driver);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Microchip PCIe host controller driver");
+MODULE_AUTHOR("Daire McNamara <daire.mcnamara@microchip.com>");
diff --git a/drivers/pci/controller/plda/pcie-plda-host.c b/drivers/pci/controller/plda/pcie-plda-host.c
new file mode 100644
index 000000000000..3abedf723215
--- /dev/null
+++ b/drivers/pci/controller/plda/pcie-plda-host.c
@@ -0,0 +1,653 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PLDA PCIe XpressRich host controller driver
+ *
+ * Copyright (C) 2023 Microchip Co. Ltd
+ * StarFive Co. Ltd
+ *
+ * Author: Daire McNamara <daire.mcnamara@microchip.com>
+ */
+
+#include <linux/align.h>
+#include <linux/bitfield.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/msi.h>
+#include <linux/pci_regs.h>
+#include <linux/pci-ecam.h>
+#include <linux/wordpart.h>
+
+#include "pcie-plda.h"
+
+void __iomem *plda_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int where)
+{
+ struct plda_pcie_rp *pcie = bus->sysdata;
+
+ return pcie->config_base + PCIE_ECAM_OFFSET(bus->number, devfn, where);
+}
+EXPORT_SYMBOL_GPL(plda_pcie_map_bus);
+
+static void plda_handle_msi(struct irq_desc *desc)
+{
+ struct plda_pcie_rp *port = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct device *dev = port->dev;
+ struct plda_msi *msi = &port->msi;
+ void __iomem *bridge_base_addr = port->bridge_addr;
+ unsigned long status;
+ u32 bit;
+ int ret;
+
+ chained_irq_enter(chip, desc);
+
+ status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
+ if (status & PM_MSI_INT_MSI_MASK) {
+ writel_relaxed(status & PM_MSI_INT_MSI_MASK,
+ bridge_base_addr + ISTATUS_LOCAL);
+ status = readl_relaxed(bridge_base_addr + ISTATUS_MSI);
+ for_each_set_bit(bit, &status, msi->num_vectors) {
+ ret = generic_handle_domain_irq(msi->dev_domain, bit);
+ if (ret)
+ dev_err_ratelimited(dev, "bad MSI IRQ %d\n",
+ bit);
+ }
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void plda_msi_bottom_irq_ack(struct irq_data *data)
+{
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+ void __iomem *bridge_base_addr = port->bridge_addr;
+ u32 bitpos = data->hwirq;
+
+ writel_relaxed(BIT(bitpos), bridge_base_addr + ISTATUS_MSI);
+}
+
+static void plda_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+ phys_addr_t addr = port->msi.vector_phy;
+
+ msg->address_lo = lower_32_bits(addr);
+ msg->address_hi = upper_32_bits(addr);
+ msg->data = data->hwirq;
+
+ dev_dbg(port->dev, "msi#%x address_hi %#x address_lo %#x\n",
+ (int)data->hwirq, msg->address_hi, msg->address_lo);
+}
+
+static struct irq_chip plda_msi_bottom_irq_chip = {
+ .name = "PLDA MSI",
+ .irq_ack = plda_msi_bottom_irq_ack,
+ .irq_compose_msi_msg = plda_compose_msi_msg,
+};
+
+static int plda_irq_msi_domain_alloc(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs,
+ void *args)
+{
+ struct plda_pcie_rp *port = domain->host_data;
+ struct plda_msi *msi = &port->msi;
+ unsigned long bit;
+
+ mutex_lock(&msi->lock);
+ bit = find_first_zero_bit(msi->used, msi->num_vectors);
+ if (bit >= msi->num_vectors) {
+ mutex_unlock(&msi->lock);
+ return -ENOSPC;
+ }
+
+ set_bit(bit, msi->used);
+
+ irq_domain_set_info(domain, virq, bit, &plda_msi_bottom_irq_chip,
+ domain->host_data, handle_edge_irq, NULL, NULL);
+
+ mutex_unlock(&msi->lock);
+
+ return 0;
+}
+
+static void plda_irq_msi_domain_free(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(d);
+ struct plda_msi *msi = &port->msi;
+
+ mutex_lock(&msi->lock);
+
+ if (test_bit(d->hwirq, msi->used))
+ __clear_bit(d->hwirq, msi->used);
+ else
+ dev_err(port->dev, "trying to free unused MSI%lu\n", d->hwirq);
+
+ mutex_unlock(&msi->lock);
+}
+
+static const struct irq_domain_ops msi_domain_ops = {
+ .alloc = plda_irq_msi_domain_alloc,
+ .free = plda_irq_msi_domain_free,
+};
+
+static struct irq_chip plda_msi_irq_chip = {
+ .name = "PLDA PCIe MSI",
+ .irq_ack = irq_chip_ack_parent,
+ .irq_mask = pci_msi_mask_irq,
+ .irq_unmask = pci_msi_unmask_irq,
+};
+
+static struct msi_domain_info plda_msi_domain_info = {
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX,
+ .chip = &plda_msi_irq_chip,
+};
+
+static int plda_allocate_msi_domains(struct plda_pcie_rp *port)
+{
+ struct device *dev = port->dev;
+ struct fwnode_handle *fwnode = of_fwnode_handle(dev->of_node);
+ struct plda_msi *msi = &port->msi;
+
+ mutex_init(&port->msi.lock);
+
+ msi->dev_domain = irq_domain_create_linear(NULL, msi->num_vectors, &msi_domain_ops, port);
+ if (!msi->dev_domain) {
+ dev_err(dev, "failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ msi->msi_domain = pci_msi_create_irq_domain(fwnode,
+ &plda_msi_domain_info,
+ msi->dev_domain);
+ if (!msi->msi_domain) {
+ dev_err(dev, "failed to create MSI domain\n");
+ irq_domain_remove(msi->dev_domain);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void plda_handle_intx(struct irq_desc *desc)
+{
+ struct plda_pcie_rp *port = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct device *dev = port->dev;
+ void __iomem *bridge_base_addr = port->bridge_addr;
+ unsigned long status;
+ u32 bit;
+ int ret;
+
+ chained_irq_enter(chip, desc);
+
+ status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
+ if (status & PM_MSI_INT_INTX_MASK) {
+ status &= PM_MSI_INT_INTX_MASK;
+ status >>= PM_MSI_INT_INTX_SHIFT;
+ for_each_set_bit(bit, &status, PCI_NUM_INTX) {
+ ret = generic_handle_domain_irq(port->intx_domain, bit);
+ if (ret)
+ dev_err_ratelimited(dev, "bad INTx IRQ %d\n",
+ bit);
+ }
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void plda_ack_intx_irq(struct irq_data *data)
+{
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+ void __iomem *bridge_base_addr = port->bridge_addr;
+ u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
+
+ writel_relaxed(mask, bridge_base_addr + ISTATUS_LOCAL);
+}
+
+static void plda_mask_intx_irq(struct irq_data *data)
+{
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+ void __iomem *bridge_base_addr = port->bridge_addr;
+ unsigned long flags;
+ u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
+ u32 val;
+
+ raw_spin_lock_irqsave(&port->lock, flags);
+ val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
+ val &= ~mask;
+ writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
+ raw_spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void plda_unmask_intx_irq(struct irq_data *data)
+{
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+ void __iomem *bridge_base_addr = port->bridge_addr;
+ unsigned long flags;
+ u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
+ u32 val;
+
+ raw_spin_lock_irqsave(&port->lock, flags);
+ val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
+ val |= mask;
+ writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
+ raw_spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static struct irq_chip plda_intx_irq_chip = {
+ .name = "PLDA PCIe INTx",
+ .irq_ack = plda_ack_intx_irq,
+ .irq_mask = plda_mask_intx_irq,
+ .irq_unmask = plda_unmask_intx_irq,
+};
+
+static int plda_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &plda_intx_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = plda_pcie_intx_map,
+};
+
+static u32 plda_get_events(struct plda_pcie_rp *port)
+{
+ u32 events, val, origin;
+
+ origin = readl_relaxed(port->bridge_addr + ISTATUS_LOCAL);
+
+ /* MSI event and sys events */
+ val = (origin & SYS_AND_MSI_MASK) >> PM_MSI_INT_MSI_SHIFT;
+ events = val << (PM_MSI_INT_MSI_SHIFT - PCI_NUM_INTX + 1);
+
+ /* INTx events */
+ if (origin & PM_MSI_INT_INTX_MASK)
+ events |= BIT(PM_MSI_INT_INTX_SHIFT);
+
+ /* remains are same with register */
+ events |= origin & GENMASK(P_ATR_EVT_DOORBELL_SHIFT, 0);
+
+ return events;
+}
+
+static irqreturn_t plda_event_handler(int irq, void *dev_id)
+{
+ return IRQ_HANDLED;
+}
+
+static void plda_handle_event(struct irq_desc *desc)
+{
+ struct plda_pcie_rp *port = irq_desc_get_handler_data(desc);
+ unsigned long events;
+ u32 bit;
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+
+ chained_irq_enter(chip, desc);
+
+ events = port->event_ops->get_events(port);
+
+ events &= port->events_bitmap;
+ for_each_set_bit(bit, &events, port->num_events)
+ generic_handle_domain_irq(port->event_domain, bit);
+
+ chained_irq_exit(chip, desc);
+}
+
+static u32 plda_hwirq_to_mask(int hwirq)
+{
+ u32 mask;
+
+ /* hwirq 23 - 0 are the same with register */
+ if (hwirq < EVENT_PM_MSI_INT_INTX)
+ mask = BIT(hwirq);
+ else if (hwirq == EVENT_PM_MSI_INT_INTX)
+ mask = PM_MSI_INT_INTX_MASK;
+ else
+ mask = BIT(hwirq + PCI_NUM_INTX - 1);
+
+ return mask;
+}
+
+static void plda_ack_event_irq(struct irq_data *data)
+{
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+
+ writel_relaxed(plda_hwirq_to_mask(data->hwirq),
+ port->bridge_addr + ISTATUS_LOCAL);
+}
+
+static void plda_mask_event_irq(struct irq_data *data)
+{
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+ u32 mask, val;
+
+ mask = plda_hwirq_to_mask(data->hwirq);
+
+ raw_spin_lock(&port->lock);
+ val = readl_relaxed(port->bridge_addr + IMASK_LOCAL);
+ val &= ~mask;
+ writel_relaxed(val, port->bridge_addr + IMASK_LOCAL);
+ raw_spin_unlock(&port->lock);
+}
+
+static void plda_unmask_event_irq(struct irq_data *data)
+{
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+ u32 mask, val;
+
+ mask = plda_hwirq_to_mask(data->hwirq);
+
+ raw_spin_lock(&port->lock);
+ val = readl_relaxed(port->bridge_addr + IMASK_LOCAL);
+ val |= mask;
+ writel_relaxed(val, port->bridge_addr + IMASK_LOCAL);
+ raw_spin_unlock(&port->lock);
+}
+
+static struct irq_chip plda_event_irq_chip = {
+ .name = "PLDA PCIe EVENT",
+ .irq_ack = plda_ack_event_irq,
+ .irq_mask = plda_mask_event_irq,
+ .irq_unmask = plda_unmask_event_irq,
+};
+
+static const struct plda_event_ops plda_event_ops = {
+ .get_events = plda_get_events,
+};
+
+static int plda_pcie_event_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct plda_pcie_rp *port = (void *)domain->host_data;
+
+ irq_set_chip_and_handler(irq, port->event_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops plda_event_domain_ops = {
+ .map = plda_pcie_event_map,
+};
+
+static int plda_pcie_init_irq_domains(struct plda_pcie_rp *port)
+{
+ struct device *dev = port->dev;
+ struct device_node *node = dev->of_node;
+ struct device_node *pcie_intc_node;
+
+ /* Setup INTx */
+ pcie_intc_node = of_get_next_child(node, NULL);
+ if (!pcie_intc_node) {
+ dev_err(dev, "failed to find PCIe Intc node\n");
+ return -EINVAL;
+ }
+
+ port->event_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node),
+ port->num_events, &plda_event_domain_ops,
+ port);
+ if (!port->event_domain) {
+ dev_err(dev, "failed to get event domain\n");
+ of_node_put(pcie_intc_node);
+ return -ENOMEM;
+ }
+
+ irq_domain_update_bus_token(port->event_domain, DOMAIN_BUS_NEXUS);
+
+ port->intx_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX,
+ &intx_domain_ops, port);
+ if (!port->intx_domain) {
+ dev_err(dev, "failed to get an INTx IRQ domain\n");
+ of_node_put(pcie_intc_node);
+ return -ENOMEM;
+ }
+
+ irq_domain_update_bus_token(port->intx_domain, DOMAIN_BUS_WIRED);
+
+ of_node_put(pcie_intc_node);
+ raw_spin_lock_init(&port->lock);
+
+ return plda_allocate_msi_domains(port);
+}
+
+int plda_init_interrupts(struct platform_device *pdev,
+ struct plda_pcie_rp *port,
+ const struct plda_event *event)
+{
+ struct device *dev = &pdev->dev;
+ int event_irq, ret;
+ u32 i;
+
+ if (!port->event_ops)
+ port->event_ops = &plda_event_ops;
+
+ if (!port->event_irq_chip)
+ port->event_irq_chip = &plda_event_irq_chip;
+
+ ret = plda_pcie_init_irq_domains(port);
+ if (ret) {
+ dev_err(dev, "failed creating IRQ domains\n");
+ return ret;
+ }
+
+ port->irq = platform_get_irq(pdev, 0);
+ if (port->irq < 0)
+ return -ENODEV;
+
+ for_each_set_bit(i, &port->events_bitmap, port->num_events) {
+ event_irq = irq_create_mapping(port->event_domain, i);
+ if (!event_irq) {
+ dev_err(dev, "failed to map hwirq %d\n", i);
+ return -ENXIO;
+ }
+
+ if (event->request_event_irq)
+ ret = event->request_event_irq(port, event_irq, i);
+ else
+ ret = devm_request_irq(dev, event_irq,
+ plda_event_handler,
+ 0, NULL, port);
+
+ if (ret) {
+ dev_err(dev, "failed to request IRQ %d\n", event_irq);
+ return ret;
+ }
+ }
+
+ port->intx_irq = irq_create_mapping(port->event_domain,
+ event->intx_event);
+ if (!port->intx_irq) {
+ dev_err(dev, "failed to map INTx interrupt\n");
+ return -ENXIO;
+ }
+
+ /* Plug the INTx chained handler */
+ irq_set_chained_handler_and_data(port->intx_irq, plda_handle_intx, port);
+
+ port->msi_irq = irq_create_mapping(port->event_domain,
+ event->msi_event);
+ if (!port->msi_irq)
+ return -ENXIO;
+
+ /* Plug the MSI chained handler */
+ irq_set_chained_handler_and_data(port->msi_irq, plda_handle_msi, port);
+
+ /* Plug the main event chained handler */
+ irq_set_chained_handler_and_data(port->irq, plda_handle_event, port);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(plda_init_interrupts);
+
+void plda_pcie_setup_window(void __iomem *bridge_base_addr, u32 index,
+ phys_addr_t axi_addr, phys_addr_t pci_addr,
+ size_t size)
+{
+ u32 atr_sz = ilog2(size) - 1;
+ u32 val;
+
+ if (index == 0)
+ val = PCIE_CONFIG_INTERFACE;
+ else
+ val = PCIE_TX_RX_INTERFACE;
+
+ writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
+ ATR0_AXI4_SLV0_TRSL_PARAM);
+
+ val = ALIGN_DOWN(lower_32_bits(axi_addr), SZ_4K);
+ val |= FIELD_PREP(ATR_SIZE_MASK, atr_sz);
+ val |= ATR_IMPL_ENABLE;
+ writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
+ ATR0_AXI4_SLV0_SRCADDR_PARAM);
+
+ val = upper_32_bits(axi_addr);
+ writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
+ ATR0_AXI4_SLV0_SRC_ADDR);
+
+ val = lower_32_bits(pci_addr);
+ writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
+ ATR0_AXI4_SLV0_TRSL_ADDR_LSB);
+
+ val = upper_32_bits(pci_addr);
+ writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
+ ATR0_AXI4_SLV0_TRSL_ADDR_UDW);
+}
+EXPORT_SYMBOL_GPL(plda_pcie_setup_window);
+
+void plda_pcie_setup_inbound_address_translation(struct plda_pcie_rp *port)
+{
+ void __iomem *bridge_base_addr = port->bridge_addr;
+ u32 val;
+
+ val = readl(bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
+ val |= (ATR0_PCIE_ATR_SIZE << ATR0_PCIE_ATR_SIZE_SHIFT);
+ writel(val, bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
+ writel(0, bridge_base_addr + ATR0_PCIE_WIN0_SRC_ADDR);
+}
+EXPORT_SYMBOL_GPL(plda_pcie_setup_inbound_address_translation);
+
+int plda_pcie_setup_iomems(struct pci_host_bridge *bridge,
+ struct plda_pcie_rp *port)
+{
+ void __iomem *bridge_base_addr = port->bridge_addr;
+ struct resource_entry *entry;
+ u64 pci_addr;
+ u32 index = 1;
+
+ resource_list_for_each_entry(entry, &bridge->windows) {
+ if (resource_type(entry->res) == IORESOURCE_MEM) {
+ pci_addr = entry->res->start - entry->offset;
+ plda_pcie_setup_window(bridge_base_addr, index,
+ entry->res->start, pci_addr,
+ resource_size(entry->res));
+ index++;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(plda_pcie_setup_iomems);
+
+static void plda_pcie_irq_domain_deinit(struct plda_pcie_rp *pcie)
+{
+ irq_set_chained_handler_and_data(pcie->irq, NULL, NULL);
+ irq_set_chained_handler_and_data(pcie->msi_irq, NULL, NULL);
+ irq_set_chained_handler_and_data(pcie->intx_irq, NULL, NULL);
+
+ irq_domain_remove(pcie->msi.msi_domain);
+ irq_domain_remove(pcie->msi.dev_domain);
+
+ irq_domain_remove(pcie->intx_domain);
+ irq_domain_remove(pcie->event_domain);
+}
+
+int plda_pcie_host_init(struct plda_pcie_rp *port, struct pci_ops *ops,
+ const struct plda_event *plda_event)
+{
+ struct device *dev = port->dev;
+ struct pci_host_bridge *bridge;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource *cfg_res;
+ int ret;
+
+ pdev = to_platform_device(dev);
+
+ port->bridge_addr =
+ devm_platform_ioremap_resource_byname(pdev, "apb");
+
+ if (IS_ERR(port->bridge_addr))
+ return dev_err_probe(dev, PTR_ERR(port->bridge_addr),
+ "failed to map reg memory\n");
+
+ cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
+ if (!cfg_res)
+ return dev_err_probe(dev, -ENODEV,
+ "failed to get config memory\n");
+
+ port->config_base = devm_ioremap_resource(dev, cfg_res);
+ if (IS_ERR(port->config_base))
+ return dev_err_probe(dev, PTR_ERR(port->config_base),
+ "failed to map config memory\n");
+
+ bridge = devm_pci_alloc_host_bridge(dev, 0);
+ if (!bridge)
+ return dev_err_probe(dev, -ENOMEM,
+ "failed to alloc bridge\n");
+
+ if (port->host_ops && port->host_ops->host_init) {
+ ret = port->host_ops->host_init(port);
+ if (ret)
+ return ret;
+ }
+
+ port->bridge = bridge;
+ plda_pcie_setup_window(port->bridge_addr, 0, cfg_res->start, 0,
+ resource_size(cfg_res));
+ plda_pcie_setup_iomems(bridge, port);
+ plda_set_default_msi(&port->msi);
+ ret = plda_init_interrupts(pdev, port, plda_event);
+ if (ret)
+ goto err_host;
+
+ /* Set default bus ops */
+ bridge->ops = ops;
+ bridge->sysdata = port;
+
+ ret = pci_host_probe(bridge);
+ if (ret < 0) {
+ dev_err_probe(dev, ret, "failed to probe pci host\n");
+ goto err_probe;
+ }
+
+ return ret;
+
+err_probe:
+ plda_pcie_irq_domain_deinit(port);
+err_host:
+ if (port->host_ops && port->host_ops->host_deinit)
+ port->host_ops->host_deinit(port);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(plda_pcie_host_init);
+
+void plda_pcie_host_deinit(struct plda_pcie_rp *port)
+{
+ pci_stop_root_bus(port->bridge->bus);
+ pci_remove_root_bus(port->bridge->bus);
+
+ plda_pcie_irq_domain_deinit(port);
+
+ if (port->host_ops && port->host_ops->host_deinit)
+ port->host_ops->host_deinit(port);
+}
+EXPORT_SYMBOL_GPL(plda_pcie_host_deinit);
diff --git a/drivers/pci/controller/plda/pcie-plda.h b/drivers/pci/controller/plda/pcie-plda.h
new file mode 100644
index 000000000000..61ece26065ea
--- /dev/null
+++ b/drivers/pci/controller/plda/pcie-plda.h
@@ -0,0 +1,275 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * PLDA PCIe host controller driver
+ */
+
+#ifndef _PCIE_PLDA_H
+#define _PCIE_PLDA_H
+
+/* Number of MSI IRQs */
+#define PLDA_MAX_NUM_MSI_IRQS 32
+
+/* PCIe Bridge Phy Regs */
+#define GEN_SETTINGS 0x80
+#define RP_ENABLE 1
+#define PCIE_PCI_IDS_DW1 0x9c
+#define IDS_CLASS_CODE_SHIFT 16
+#define REVISION_ID_MASK GENMASK(7, 0)
+#define CLASS_CODE_ID_MASK GENMASK(31, 8)
+#define PCIE_PCI_IRQ_DW0 0xa8
+#define MSIX_CAP_MASK BIT(31)
+#define NUM_MSI_MSGS_MASK GENMASK(6, 4)
+#define NUM_MSI_MSGS_SHIFT 4
+#define PCI_MISC 0xb4
+#define PHY_FUNCTION_DIS BIT(15)
+#define PCIE_WINROM 0xfc
+#define PREF_MEM_WIN_64_SUPPORT BIT(3)
+
+#define IMASK_LOCAL 0x180
+#define DMA_END_ENGINE_0_MASK 0x00000000u
+#define DMA_END_ENGINE_0_SHIFT 0
+#define DMA_END_ENGINE_1_MASK 0x00000000u
+#define DMA_END_ENGINE_1_SHIFT 1
+#define DMA_ERROR_ENGINE_0_MASK 0x00000100u
+#define DMA_ERROR_ENGINE_0_SHIFT 8
+#define DMA_ERROR_ENGINE_1_MASK 0x00000200u
+#define DMA_ERROR_ENGINE_1_SHIFT 9
+#define A_ATR_EVT_POST_ERR_MASK 0x00010000u
+#define A_ATR_EVT_POST_ERR_SHIFT 16
+#define A_ATR_EVT_FETCH_ERR_MASK 0x00020000u
+#define A_ATR_EVT_FETCH_ERR_SHIFT 17
+#define A_ATR_EVT_DISCARD_ERR_MASK 0x00040000u
+#define A_ATR_EVT_DISCARD_ERR_SHIFT 18
+#define A_ATR_EVT_DOORBELL_MASK 0x00000000u
+#define A_ATR_EVT_DOORBELL_SHIFT 19
+#define P_ATR_EVT_POST_ERR_MASK 0x00100000u
+#define P_ATR_EVT_POST_ERR_SHIFT 20
+#define P_ATR_EVT_FETCH_ERR_MASK 0x00200000u
+#define P_ATR_EVT_FETCH_ERR_SHIFT 21
+#define P_ATR_EVT_DISCARD_ERR_MASK 0x00400000u
+#define P_ATR_EVT_DISCARD_ERR_SHIFT 22
+#define P_ATR_EVT_DOORBELL_MASK 0x00000000u
+#define P_ATR_EVT_DOORBELL_SHIFT 23
+#define PM_MSI_INT_INTA_MASK 0x01000000u
+#define PM_MSI_INT_INTA_SHIFT 24
+#define PM_MSI_INT_INTB_MASK 0x02000000u
+#define PM_MSI_INT_INTB_SHIFT 25
+#define PM_MSI_INT_INTC_MASK 0x04000000u
+#define PM_MSI_INT_INTC_SHIFT 26
+#define PM_MSI_INT_INTD_MASK 0x08000000u
+#define PM_MSI_INT_INTD_SHIFT 27
+#define PM_MSI_INT_INTX_MASK 0x0f000000u
+#define PM_MSI_INT_INTX_SHIFT 24
+#define PM_MSI_INT_MSI_MASK 0x10000000u
+#define PM_MSI_INT_MSI_SHIFT 28
+#define PM_MSI_INT_AER_EVT_MASK 0x20000000u
+#define PM_MSI_INT_AER_EVT_SHIFT 29
+#define PM_MSI_INT_EVENTS_MASK 0x40000000u
+#define PM_MSI_INT_EVENTS_SHIFT 30
+#define PM_MSI_INT_SYS_ERR_MASK 0x80000000u
+#define PM_MSI_INT_SYS_ERR_SHIFT 31
+#define SYS_AND_MSI_MASK GENMASK(31, 28)
+#define NUM_LOCAL_EVENTS 15
+#define ISTATUS_LOCAL 0x184
+#define IMASK_HOST 0x188
+#define ISTATUS_HOST 0x18c
+#define IMSI_ADDR 0x190
+#define ISTATUS_MSI 0x194
+#define PMSG_SUPPORT_RX 0x3f0
+#define PMSG_LTR_SUPPORT BIT(2)
+
+/* PCIe Master table init defines */
+#define ATR0_PCIE_WIN0_SRCADDR_PARAM 0x600u
+#define ATR0_PCIE_ATR_SIZE 0x25
+#define ATR0_PCIE_ATR_SIZE_SHIFT 1
+#define ATR0_PCIE_WIN0_SRC_ADDR 0x604u
+#define ATR0_PCIE_WIN0_TRSL_ADDR_LSB 0x608u
+#define ATR0_PCIE_WIN0_TRSL_ADDR_UDW 0x60cu
+#define ATR0_PCIE_WIN0_TRSL_PARAM 0x610u
+
+/* PCIe AXI slave table init defines */
+#define ATR0_AXI4_SLV0_SRCADDR_PARAM 0x800u
+#define ATR_SIZE_MASK GENMASK(6, 1)
+#define ATR_IMPL_ENABLE BIT(0)
+#define ATR0_AXI4_SLV0_SRC_ADDR 0x804u
+#define ATR0_AXI4_SLV0_TRSL_ADDR_LSB 0x808u
+#define ATR0_AXI4_SLV0_TRSL_ADDR_UDW 0x80cu
+#define ATR0_AXI4_SLV0_TRSL_PARAM 0x810u
+#define PCIE_TX_RX_INTERFACE 0x00000000u
+#define PCIE_CONFIG_INTERFACE 0x00000001u
+#define TRSL_ID_AXI4_MASTER_0 0x00000004u
+
+#define CONFIG_SPACE_ADDR_OFFSET 0x1000u
+
+#define ATR_ENTRY_SIZE 32
+
+enum plda_int_event {
+ PLDA_AXI_POST_ERR,
+ PLDA_AXI_FETCH_ERR,
+ PLDA_AXI_DISCARD_ERR,
+ PLDA_AXI_DOORBELL,
+ PLDA_PCIE_POST_ERR,
+ PLDA_PCIE_FETCH_ERR,
+ PLDA_PCIE_DISCARD_ERR,
+ PLDA_PCIE_DOORBELL,
+ PLDA_INTX,
+ PLDA_MSI,
+ PLDA_AER_EVENT,
+ PLDA_MISC_EVENTS,
+ PLDA_SYS_ERR,
+ PLDA_INT_EVENT_NUM
+};
+
+#define PLDA_NUM_DMA_EVENTS 16
+
+#define EVENT_PM_MSI_INT_INTX (PLDA_NUM_DMA_EVENTS + PLDA_INTX)
+#define EVENT_PM_MSI_INT_MSI (PLDA_NUM_DMA_EVENTS + PLDA_MSI)
+#define PLDA_MAX_EVENT_NUM (PLDA_NUM_DMA_EVENTS + PLDA_INT_EVENT_NUM)
+
+/*
+ * PLDA interrupt register
+ *
+ * 31 27 23 15 7 0
+ * +--+--+--+-+------+-+-+-+-+-+-+-+-+-----------+-----------+
+ * |12|11|10|9| intx |7|6|5|4|3|2|1|0| DMA error | DMA end |
+ * +--+--+--+-+------+-+-+-+-+-+-+-+-+-----------+-----------+
+ * event bit
+ * 0-7 (0-7) DMA interrupt end : reserved for vendor implement
+ * 8-15 (8-15) DMA error : reserved for vendor implement
+ * 16 (16) AXI post error (PLDA_AXI_POST_ERR)
+ * 17 (17) AXI fetch error (PLDA_AXI_FETCH_ERR)
+ * 18 (18) AXI discard error (PLDA_AXI_DISCARD_ERR)
+ * 19 (19) AXI doorbell (PLDA_PCIE_DOORBELL)
+ * 20 (20) PCIe post error (PLDA_PCIE_POST_ERR)
+ * 21 (21) PCIe fetch error (PLDA_PCIE_FETCH_ERR)
+ * 22 (22) PCIe discard error (PLDA_PCIE_DISCARD_ERR)
+ * 23 (23) PCIe doorbell (PLDA_PCIE_DOORBELL)
+ * 24 (27-24) INTx interruts (PLDA_INTX)
+ * 25 (28): MSI interrupt (PLDA_MSI)
+ * 26 (29): AER event (PLDA_AER_EVENT)
+ * 27 (30): PM/LTR/Hotplug (PLDA_MISC_EVENTS)
+ * 28 (31): System error (PLDA_SYS_ERR)
+ */
+
+struct plda_pcie_rp;
+
+struct plda_event_ops {
+ u32 (*get_events)(struct plda_pcie_rp *pcie);
+};
+
+struct plda_pcie_host_ops {
+ int (*host_init)(struct plda_pcie_rp *pcie);
+ void (*host_deinit)(struct plda_pcie_rp *pcie);
+};
+
+struct plda_msi {
+ struct mutex lock; /* Protect used bitmap */
+ struct irq_domain *msi_domain;
+ struct irq_domain *dev_domain;
+ u32 num_vectors;
+ u64 vector_phy;
+ DECLARE_BITMAP(used, PLDA_MAX_NUM_MSI_IRQS);
+};
+
+struct plda_pcie_rp {
+ struct device *dev;
+ struct pci_host_bridge *bridge;
+ struct irq_domain *intx_domain;
+ struct irq_domain *event_domain;
+ raw_spinlock_t lock;
+ struct plda_msi msi;
+ const struct plda_event_ops *event_ops;
+ const struct irq_chip *event_irq_chip;
+ const struct plda_pcie_host_ops *host_ops;
+ void __iomem *bridge_addr;
+ void __iomem *config_base;
+ unsigned long events_bitmap;
+ int irq;
+ int msi_irq;
+ int intx_irq;
+ int num_events;
+};
+
+struct plda_event {
+ int (*request_event_irq)(struct plda_pcie_rp *pcie,
+ int event_irq, int event);
+ int intx_event;
+ int msi_event;
+};
+
+void __iomem *plda_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int where);
+int plda_init_interrupts(struct platform_device *pdev,
+ struct plda_pcie_rp *port,
+ const struct plda_event *event);
+void plda_pcie_setup_window(void __iomem *bridge_base_addr, u32 index,
+ phys_addr_t axi_addr, phys_addr_t pci_addr,
+ size_t size);
+void plda_pcie_setup_inbound_address_translation(struct plda_pcie_rp *port);
+int plda_pcie_setup_iomems(struct pci_host_bridge *bridge,
+ struct plda_pcie_rp *port);
+int plda_pcie_host_init(struct plda_pcie_rp *port, struct pci_ops *ops,
+ const struct plda_event *plda_event);
+void plda_pcie_host_deinit(struct plda_pcie_rp *pcie);
+
+static inline void plda_set_default_msi(struct plda_msi *msi)
+{
+ msi->vector_phy = IMSI_ADDR;
+ msi->num_vectors = PLDA_MAX_NUM_MSI_IRQS;
+}
+
+static inline void plda_pcie_enable_root_port(struct plda_pcie_rp *plda)
+{
+ u32 value;
+
+ value = readl_relaxed(plda->bridge_addr + GEN_SETTINGS);
+ value |= RP_ENABLE;
+ writel_relaxed(value, plda->bridge_addr + GEN_SETTINGS);
+}
+
+static inline void plda_pcie_set_standard_class(struct plda_pcie_rp *plda)
+{
+ u32 value;
+
+ /* set class code and reserve revision id */
+ value = readl_relaxed(plda->bridge_addr + PCIE_PCI_IDS_DW1);
+ value &= REVISION_ID_MASK;
+ value |= (PCI_CLASS_BRIDGE_PCI << IDS_CLASS_CODE_SHIFT);
+ writel_relaxed(value, plda->bridge_addr + PCIE_PCI_IDS_DW1);
+}
+
+static inline void plda_pcie_set_pref_win_64bit(struct plda_pcie_rp *plda)
+{
+ u32 value;
+
+ value = readl_relaxed(plda->bridge_addr + PCIE_WINROM);
+ value |= PREF_MEM_WIN_64_SUPPORT;
+ writel_relaxed(value, plda->bridge_addr + PCIE_WINROM);
+}
+
+static inline void plda_pcie_disable_ltr(struct plda_pcie_rp *plda)
+{
+ u32 value;
+
+ value = readl_relaxed(plda->bridge_addr + PMSG_SUPPORT_RX);
+ value &= ~PMSG_LTR_SUPPORT;
+ writel_relaxed(value, plda->bridge_addr + PMSG_SUPPORT_RX);
+}
+
+static inline void plda_pcie_disable_func(struct plda_pcie_rp *plda)
+{
+ u32 value;
+
+ value = readl_relaxed(plda->bridge_addr + PCI_MISC);
+ value |= PHY_FUNCTION_DIS;
+ writel_relaxed(value, plda->bridge_addr + PCI_MISC);
+}
+
+static inline void plda_pcie_write_rc_bar(struct plda_pcie_rp *plda, u64 val)
+{
+ void __iomem *addr = plda->bridge_addr + CONFIG_SPACE_ADDR_OFFSET;
+
+ writel_relaxed(lower_32_bits(val), addr + PCI_BASE_ADDRESS_0);
+ writel_relaxed(upper_32_bits(val), addr + PCI_BASE_ADDRESS_1);
+}
+#endif /* _PCIE_PLDA_H */
diff --git a/drivers/pci/controller/plda/pcie-starfive.c b/drivers/pci/controller/plda/pcie-starfive.c
new file mode 100644
index 000000000000..e73c1b7bc8ef
--- /dev/null
+++ b/drivers/pci/controller/plda/pcie-starfive.c
@@ -0,0 +1,492 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * PCIe host controller driver for StarFive JH7110 Soc.
+ *
+ * Copyright (C) 2023 StarFive Technology Co., Ltd.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include "../../pci.h"
+
+#include "pcie-plda.h"
+
+#define PCIE_FUNC_NUM 4
+
+/* system control */
+#define STG_SYSCON_PCIE0_BASE 0x48
+#define STG_SYSCON_PCIE1_BASE 0x1f8
+
+#define STG_SYSCON_AR_OFFSET 0x78
+#define STG_SYSCON_AXI4_SLVL_AR_MASK GENMASK(22, 8)
+#define STG_SYSCON_AXI4_SLVL_PHY_AR(x) FIELD_PREP(GENMASK(20, 17), x)
+#define STG_SYSCON_AW_OFFSET 0x7c
+#define STG_SYSCON_AXI4_SLVL_AW_MASK GENMASK(14, 0)
+#define STG_SYSCON_AXI4_SLVL_PHY_AW(x) FIELD_PREP(GENMASK(12, 9), x)
+#define STG_SYSCON_CLKREQ BIT(22)
+#define STG_SYSCON_CKREF_SRC_MASK GENMASK(19, 18)
+#define STG_SYSCON_RP_NEP_OFFSET 0xe8
+#define STG_SYSCON_K_RP_NEP BIT(8)
+#define STG_SYSCON_LNKSTA_OFFSET 0x170
+#define DATA_LINK_ACTIVE BIT(5)
+
+/* Parameters for the waiting for link up routine */
+#define LINK_WAIT_MAX_RETRIES 10
+#define LINK_WAIT_USLEEP_MIN 90000
+#define LINK_WAIT_USLEEP_MAX 100000
+
+struct starfive_jh7110_pcie {
+ struct plda_pcie_rp plda;
+ struct reset_control *resets;
+ struct clk_bulk_data *clks;
+ struct regmap *reg_syscon;
+ struct gpio_desc *power_gpio;
+ struct gpio_desc *reset_gpio;
+ struct phy *phy;
+
+ unsigned int stg_pcie_base;
+ int num_clks;
+};
+
+/*
+ * JH7110 PCIe port BAR0/1 can be configured as 64-bit prefetchable memory
+ * space. PCIe read and write requests targeting BAR0/1 are routed to so called
+ * 'Bridge Configuration space' in PLDA IP datasheet, which contains the bridge
+ * internal registers, such as interrupt, DMA and ATU registers...
+ * JH7110 can access the Bridge Configuration space by local bus, and don`t
+ * want the bridge internal registers accessed by the DMA from EP devices.
+ * Thus, they are unimplemented and should be hidden here.
+ */
+static bool starfive_pcie_hide_rc_bar(struct pci_bus *bus, unsigned int devfn,
+ int offset)
+{
+ if (pci_is_root_bus(bus) && !devfn &&
+ (offset == PCI_BASE_ADDRESS_0 || offset == PCI_BASE_ADDRESS_1))
+ return true;
+
+ return false;
+}
+
+static int starfive_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 value)
+{
+ if (starfive_pcie_hide_rc_bar(bus, devfn, where))
+ return PCIBIOS_SUCCESSFUL;
+
+ return pci_generic_config_write(bus, devfn, where, size, value);
+}
+
+static int starfive_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *value)
+{
+ if (starfive_pcie_hide_rc_bar(bus, devfn, where)) {
+ *value = 0;
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+ return pci_generic_config_read(bus, devfn, where, size, value);
+}
+
+static int starfive_pcie_parse_dt(struct starfive_jh7110_pcie *pcie,
+ struct device *dev)
+{
+ int domain_nr;
+
+ pcie->num_clks = devm_clk_bulk_get_all(dev, &pcie->clks);
+ if (pcie->num_clks < 0)
+ return dev_err_probe(dev, pcie->num_clks,
+ "failed to get pcie clocks\n");
+
+ pcie->resets = devm_reset_control_array_get_exclusive(dev);
+ if (IS_ERR(pcie->resets))
+ return dev_err_probe(dev, PTR_ERR(pcie->resets),
+ "failed to get pcie resets");
+
+ pcie->reg_syscon =
+ syscon_regmap_lookup_by_phandle(dev->of_node,
+ "starfive,stg-syscon");
+
+ if (IS_ERR(pcie->reg_syscon))
+ return dev_err_probe(dev, PTR_ERR(pcie->reg_syscon),
+ "failed to parse starfive,stg-syscon\n");
+
+ pcie->phy = devm_phy_optional_get(dev, NULL);
+ if (IS_ERR(pcie->phy))
+ return dev_err_probe(dev, PTR_ERR(pcie->phy),
+ "failed to get pcie phy\n");
+
+ /*
+ * The PCIe domain numbers are set to be static in JH7110 DTS.
+ * As the STG system controller defines different bases in PCIe RP0 &
+ * RP1, we use them to identify which controller is doing the hardware
+ * initialization.
+ */
+ domain_nr = of_get_pci_domain_nr(dev->of_node);
+
+ if (domain_nr < 0 || domain_nr > 1)
+ return dev_err_probe(dev, -ENODEV,
+ "failed to get valid pcie domain\n");
+
+ if (domain_nr == 0)
+ pcie->stg_pcie_base = STG_SYSCON_PCIE0_BASE;
+ else
+ pcie->stg_pcie_base = STG_SYSCON_PCIE1_BASE;
+
+ pcie->reset_gpio = devm_gpiod_get_optional(dev, "perst",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(pcie->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(pcie->reset_gpio),
+ "failed to get perst-gpio\n");
+
+ pcie->power_gpio = devm_gpiod_get_optional(dev, "enable",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(pcie->power_gpio))
+ return dev_err_probe(dev, PTR_ERR(pcie->power_gpio),
+ "failed to get power-gpio\n");
+
+ return 0;
+}
+
+static struct pci_ops starfive_pcie_ops = {
+ .map_bus = plda_pcie_map_bus,
+ .read = starfive_pcie_config_read,
+ .write = starfive_pcie_config_write,
+};
+
+static int starfive_pcie_clk_rst_init(struct starfive_jh7110_pcie *pcie)
+{
+ struct device *dev = pcie->plda.dev;
+ int ret;
+
+ ret = clk_bulk_prepare_enable(pcie->num_clks, pcie->clks);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to enable clocks\n");
+
+ ret = reset_control_deassert(pcie->resets);
+ if (ret) {
+ clk_bulk_disable_unprepare(pcie->num_clks, pcie->clks);
+ dev_err_probe(dev, ret, "failed to deassert resets\n");
+ }
+
+ return ret;
+}
+
+static void starfive_pcie_clk_rst_deinit(struct starfive_jh7110_pcie *pcie)
+{
+ reset_control_assert(pcie->resets);
+ clk_bulk_disable_unprepare(pcie->num_clks, pcie->clks);
+}
+
+static bool starfive_pcie_link_up(struct plda_pcie_rp *plda)
+{
+ struct starfive_jh7110_pcie *pcie =
+ container_of(plda, struct starfive_jh7110_pcie, plda);
+ int ret;
+ u32 stg_reg_val;
+
+ ret = regmap_read(pcie->reg_syscon,
+ pcie->stg_pcie_base + STG_SYSCON_LNKSTA_OFFSET,
+ &stg_reg_val);
+ if (ret) {
+ dev_err(pcie->plda.dev, "failed to read link status\n");
+ return false;
+ }
+
+ return !!(stg_reg_val & DATA_LINK_ACTIVE);
+}
+
+static int starfive_pcie_host_wait_for_link(struct starfive_jh7110_pcie *pcie)
+{
+ int retries;
+
+ /* Check if the link is up or not */
+ for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
+ if (starfive_pcie_link_up(&pcie->plda)) {
+ dev_info(pcie->plda.dev, "port link up\n");
+ return 0;
+ }
+ usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int starfive_pcie_enable_phy(struct device *dev,
+ struct starfive_jh7110_pcie *pcie)
+{
+ int ret;
+
+ if (!pcie->phy)
+ return 0;
+
+ ret = phy_init(pcie->phy);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to initialize pcie phy\n");
+
+ ret = phy_set_mode(pcie->phy, PHY_MODE_PCIE);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to set pcie mode\n");
+ goto err_phy_on;
+ }
+
+ ret = phy_power_on(pcie->phy);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to power on pcie phy\n");
+ goto err_phy_on;
+ }
+
+ return 0;
+
+err_phy_on:
+ phy_exit(pcie->phy);
+ return ret;
+}
+
+static void starfive_pcie_disable_phy(struct starfive_jh7110_pcie *pcie)
+{
+ phy_power_off(pcie->phy);
+ phy_exit(pcie->phy);
+}
+
+static void starfive_pcie_host_deinit(struct plda_pcie_rp *plda)
+{
+ struct starfive_jh7110_pcie *pcie =
+ container_of(plda, struct starfive_jh7110_pcie, plda);
+
+ starfive_pcie_clk_rst_deinit(pcie);
+ if (pcie->power_gpio)
+ gpiod_set_value_cansleep(pcie->power_gpio, 0);
+ starfive_pcie_disable_phy(pcie);
+}
+
+static int starfive_pcie_host_init(struct plda_pcie_rp *plda)
+{
+ struct starfive_jh7110_pcie *pcie =
+ container_of(plda, struct starfive_jh7110_pcie, plda);
+ struct device *dev = plda->dev;
+ int ret;
+ int i;
+
+ ret = starfive_pcie_enable_phy(dev, pcie);
+ if (ret)
+ return ret;
+
+ regmap_update_bits(pcie->reg_syscon,
+ pcie->stg_pcie_base + STG_SYSCON_RP_NEP_OFFSET,
+ STG_SYSCON_K_RP_NEP, STG_SYSCON_K_RP_NEP);
+
+ regmap_update_bits(pcie->reg_syscon,
+ pcie->stg_pcie_base + STG_SYSCON_AW_OFFSET,
+ STG_SYSCON_CKREF_SRC_MASK,
+ FIELD_PREP(STG_SYSCON_CKREF_SRC_MASK, 2));
+
+ regmap_update_bits(pcie->reg_syscon,
+ pcie->stg_pcie_base + STG_SYSCON_AW_OFFSET,
+ STG_SYSCON_CLKREQ, STG_SYSCON_CLKREQ);
+
+ ret = starfive_pcie_clk_rst_init(pcie);
+ if (ret)
+ return ret;
+
+ if (pcie->power_gpio)
+ gpiod_set_value_cansleep(pcie->power_gpio, 1);
+
+ if (pcie->reset_gpio)
+ gpiod_set_value_cansleep(pcie->reset_gpio, 1);
+
+ /* Disable physical functions except #0 */
+ for (i = 1; i < PCIE_FUNC_NUM; i++) {
+ regmap_update_bits(pcie->reg_syscon,
+ pcie->stg_pcie_base + STG_SYSCON_AR_OFFSET,
+ STG_SYSCON_AXI4_SLVL_AR_MASK,
+ STG_SYSCON_AXI4_SLVL_PHY_AR(i));
+
+ regmap_update_bits(pcie->reg_syscon,
+ pcie->stg_pcie_base + STG_SYSCON_AW_OFFSET,
+ STG_SYSCON_AXI4_SLVL_AW_MASK,
+ STG_SYSCON_AXI4_SLVL_PHY_AW(i));
+
+ plda_pcie_disable_func(plda);
+ }
+
+ regmap_update_bits(pcie->reg_syscon,
+ pcie->stg_pcie_base + STG_SYSCON_AR_OFFSET,
+ STG_SYSCON_AXI4_SLVL_AR_MASK, 0);
+ regmap_update_bits(pcie->reg_syscon,
+ pcie->stg_pcie_base + STG_SYSCON_AW_OFFSET,
+ STG_SYSCON_AXI4_SLVL_AW_MASK, 0);
+
+ plda_pcie_enable_root_port(plda);
+ plda_pcie_write_rc_bar(plda, 0);
+
+ /* PCIe PCI Standard Configuration Identification Settings. */
+ plda_pcie_set_standard_class(plda);
+
+ /*
+ * The LTR message receiving is enabled by the register "PCIe Message
+ * Reception" as default, but the forward id & addr are uninitialized.
+ * If we do not disable LTR message forwarding here, or set a legal
+ * forwarding address, the kernel will get stuck.
+ * To workaround, disable the LTR message forwarding here before using
+ * this feature.
+ */
+ plda_pcie_disable_ltr(plda);
+
+ /*
+ * Enable the prefetchable memory window 64-bit addressing in JH7110.
+ * The 64-bits prefetchable address translation configurations in ATU
+ * can be work after enable the register setting below.
+ */
+ plda_pcie_set_pref_win_64bit(plda);
+
+ /*
+ * Ensure that PERST has been asserted for at least 100 ms,
+ * the sleep value is T_PVPERL from PCIe CEM spec r2.0 (Table 2-4)
+ */
+ msleep(100);
+ if (pcie->reset_gpio)
+ gpiod_set_value_cansleep(pcie->reset_gpio, 0);
+
+ /*
+ * With a Downstream Port (<=5GT/s), software must wait a minimum
+ * of 100ms following exit from a conventional reset before
+ * sending a configuration request to the device.
+ */
+ msleep(PCIE_RESET_CONFIG_DEVICE_WAIT_MS);
+
+ if (starfive_pcie_host_wait_for_link(pcie))
+ dev_info(dev, "port link down\n");
+
+ return 0;
+}
+
+static const struct plda_pcie_host_ops sf_host_ops = {
+ .host_init = starfive_pcie_host_init,
+ .host_deinit = starfive_pcie_host_deinit,
+};
+
+static const struct plda_event stf_pcie_event = {
+ .intx_event = EVENT_PM_MSI_INT_INTX,
+ .msi_event = EVENT_PM_MSI_INT_MSI
+};
+
+static int starfive_pcie_probe(struct platform_device *pdev)
+{
+ struct starfive_jh7110_pcie *pcie;
+ struct device *dev = &pdev->dev;
+ struct plda_pcie_rp *plda;
+ int ret;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ plda = &pcie->plda;
+ plda->dev = dev;
+
+ ret = starfive_pcie_parse_dt(pcie, dev);
+ if (ret)
+ return ret;
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
+ plda->host_ops = &sf_host_ops;
+ plda->num_events = PLDA_MAX_EVENT_NUM;
+ /* mask doorbell event */
+ plda->events_bitmap = GENMASK(PLDA_INT_EVENT_NUM - 1, 0)
+ & ~BIT(PLDA_AXI_DOORBELL)
+ & ~BIT(PLDA_PCIE_DOORBELL);
+ plda->events_bitmap <<= PLDA_NUM_DMA_EVENTS;
+ ret = plda_pcie_host_init(&pcie->plda, &starfive_pcie_ops,
+ &stf_pcie_event);
+ if (ret) {
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, pcie);
+
+ return 0;
+}
+
+static void starfive_pcie_remove(struct platform_device *pdev)
+{
+ struct starfive_jh7110_pcie *pcie = platform_get_drvdata(pdev);
+
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ plda_pcie_host_deinit(&pcie->plda);
+ platform_set_drvdata(pdev, NULL);
+}
+
+static int starfive_pcie_suspend_noirq(struct device *dev)
+{
+ struct starfive_jh7110_pcie *pcie = dev_get_drvdata(dev);
+
+ clk_bulk_disable_unprepare(pcie->num_clks, pcie->clks);
+ starfive_pcie_disable_phy(pcie);
+
+ return 0;
+}
+
+static int starfive_pcie_resume_noirq(struct device *dev)
+{
+ struct starfive_jh7110_pcie *pcie = dev_get_drvdata(dev);
+ int ret;
+
+ ret = starfive_pcie_enable_phy(dev, pcie);
+ if (ret)
+ return ret;
+
+ ret = clk_bulk_prepare_enable(pcie->num_clks, pcie->clks);
+ if (ret) {
+ dev_err(dev, "failed to enable clocks\n");
+ starfive_pcie_disable_phy(pcie);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops starfive_pcie_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(starfive_pcie_suspend_noirq,
+ starfive_pcie_resume_noirq)
+};
+
+static const struct of_device_id starfive_pcie_of_match[] = {
+ { .compatible = "starfive,jh7110-pcie", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, starfive_pcie_of_match);
+
+static struct platform_driver starfive_pcie_driver = {
+ .driver = {
+ .name = "pcie-starfive",
+ .of_match_table = of_match_ptr(starfive_pcie_of_match),
+ .pm = pm_sleep_ptr(&starfive_pcie_pm_ops),
+ },
+ .probe = starfive_pcie_probe,
+ .remove = starfive_pcie_remove,
+};
+module_platform_driver(starfive_pcie_driver);
+
+MODULE_DESCRIPTION("StarFive JH7110 PCIe host driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index 87b7856f375a..8df064b62a2f 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -17,6 +17,8 @@
#include <linux/rculist.h>
#include <linux/rcupdate.h>
+#include <xen/xen.h>
+
#include <asm/irqdomain.h>
#define VMD_CFGBAR 0
@@ -125,7 +127,7 @@ struct vmd_irq_list {
struct vmd_dev {
struct pci_dev *dev;
- spinlock_t cfg_lock;
+ raw_spinlock_t cfg_lock;
void __iomem *cfgbar;
int msix_count;
@@ -204,22 +206,11 @@ static void vmd_irq_disable(struct irq_data *data)
raw_spin_unlock_irqrestore(&list_lock, flags);
}
-/*
- * XXX: Stubbed until we develop acceptable way to not create conflicts with
- * other devices sharing the same vector.
- */
-static int vmd_irq_set_affinity(struct irq_data *data,
- const struct cpumask *dest, bool force)
-{
- return -EINVAL;
-}
-
static struct irq_chip vmd_msi_controller = {
.name = "VMD-MSI",
.irq_enable = vmd_irq_enable,
.irq_disable = vmd_irq_disable,
.irq_compose_msi_msg = vmd_compose_msi_msg,
- .irq_set_affinity = vmd_irq_set_affinity,
};
static irq_hw_number_t vmd_get_hwirq(struct msi_domain_info *info,
@@ -326,7 +317,7 @@ static struct msi_domain_ops vmd_msi_domain_ops = {
static struct msi_domain_info vmd_msi_domain_info = {
.flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX,
+ MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX,
.ops = &vmd_msi_domain_ops,
.chip = &vmd_msi_controller,
};
@@ -402,7 +393,7 @@ static int vmd_pci_read(struct pci_bus *bus, unsigned int devfn, int reg,
if (!addr)
return -EFAULT;
- spin_lock_irqsave(&vmd->cfg_lock, flags);
+ raw_spin_lock_irqsave(&vmd->cfg_lock, flags);
switch (len) {
case 1:
*value = readb(addr);
@@ -417,7 +408,7 @@ static int vmd_pci_read(struct pci_bus *bus, unsigned int devfn, int reg,
ret = -EINVAL;
break;
}
- spin_unlock_irqrestore(&vmd->cfg_lock, flags);
+ raw_spin_unlock_irqrestore(&vmd->cfg_lock, flags);
return ret;
}
@@ -437,7 +428,7 @@ static int vmd_pci_write(struct pci_bus *bus, unsigned int devfn, int reg,
if (!addr)
return -EFAULT;
- spin_lock_irqsave(&vmd->cfg_lock, flags);
+ raw_spin_lock_irqsave(&vmd->cfg_lock, flags);
switch (len) {
case 1:
writeb(value, addr);
@@ -455,7 +446,7 @@ static int vmd_pci_write(struct pci_bus *bus, unsigned int devfn, int reg,
ret = -EINVAL;
break;
}
- spin_unlock_irqrestore(&vmd->cfg_lock, flags);
+ raw_spin_unlock_irqrestore(&vmd->cfg_lock, flags);
return ret;
}
@@ -751,11 +742,9 @@ static int vmd_pm_enable_quirk(struct pci_dev *pdev, void *userdata)
if (!(features & VMD_FEAT_BIOS_PM_QUIRK))
return 0;
- pci_enable_link_state_locked(pdev, PCIE_LINK_STATE_ALL);
-
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_LTR);
if (!pos)
- return 0;
+ goto out_state_change;
/*
* Skip if the max snoop LTR is non-zero, indicating BIOS has set it
@@ -763,7 +752,7 @@ static int vmd_pm_enable_quirk(struct pci_dev *pdev, void *userdata)
*/
pci_read_config_dword(pdev, pos + PCI_LTR_MAX_SNOOP_LAT, &ltr_reg);
if (!!(ltr_reg & (PCI_LTR_VALUE_MASK | PCI_LTR_SCALE_MASK)))
- return 0;
+ goto out_state_change;
/*
* Set the default values to the maximum required by the platform to
@@ -775,6 +764,13 @@ static int vmd_pm_enable_quirk(struct pci_dev *pdev, void *userdata)
pci_write_config_dword(pdev, pos + PCI_LTR_MAX_SNOOP_LAT, ltr_reg);
pci_info(pdev, "VMD: Default LTR value set by driver\n");
+out_state_change:
+ /*
+ * Ensure devices are in D0 before enabling PCI-PM L1 PM Substates, per
+ * PCIe r6.0, sec 5.5.4.
+ */
+ pci_set_power_state_locked(pdev, PCI_D0);
+ pci_enable_link_state_locked(pdev, PCIE_LINK_STATE_ALL);
return 0;
}
@@ -925,6 +921,9 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
dev_set_msi_domain(&vmd->bus->dev,
dev_get_msi_domain(&vmd->dev->dev));
+ WARN(sysfs_create_link(&vmd->dev->dev.kobj, &vmd->bus->dev.kobj,
+ "domain"), "Can't create symlink to domain\n");
+
vmd_acpi_begin();
pci_scan_child_bus(vmd->bus);
@@ -964,9 +963,6 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
pci_bus_add_devices(vmd->bus);
vmd_acpi_end();
-
- WARN(sysfs_create_link(&vmd->dev->dev.kobj, &vmd->bus->dev.kobj,
- "domain"), "Can't create symlink to domain\n");
return 0;
}
@@ -976,6 +972,24 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
struct vmd_dev *vmd;
int err;
+ if (xen_domain()) {
+ /*
+ * Xen doesn't have knowledge about devices in the VMD bus
+ * because the config space of devices behind the VMD bridge is
+ * not known to Xen, and hence Xen cannot discover or configure
+ * them in any way.
+ *
+ * Bypass of MSI remapping won't work in that case as direct
+ * write by Linux to the MSI entries won't result in functional
+ * interrupts, as Xen is the entity that manages the host
+ * interrupt controller and must configure interrupts. However
+ * multiplexing of interrupts by the VMD bridge will work under
+ * Xen, so force the usage of that mode which must always be
+ * supported by VMD bridges.
+ */
+ features &= ~VMD_FEAT_CAN_BYPASS_MSI_REMAP;
+ }
+
if (resource_size(&dev->resource[VMD_CFGBAR]) < (1 << 20))
return -ENOMEM;
@@ -1015,7 +1029,7 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (features & VMD_FEAT_OFFSET_FIRST_VECTOR)
vmd->first_vec = 1;
- spin_lock_init(&vmd->cfg_lock);
+ raw_spin_lock_init(&vmd->cfg_lock);
pci_set_drvdata(dev, vmd);
err = vmd_enable_domain(vmd, features);
if (err)
@@ -1042,8 +1056,8 @@ static void vmd_remove(struct pci_dev *dev)
{
struct vmd_dev *vmd = pci_get_drvdata(dev);
- sysfs_remove_link(&vmd->dev->dev.kobj, "domain");
pci_stop_root_bus(vmd->bus);
+ sysfs_remove_link(&vmd->dev->dev.kobj, "domain");
pci_remove_root_bus(vmd->bus);
vmd_cleanup_srcu(vmd);
vmd_detach_resources(vmd);
@@ -1053,9 +1067,9 @@ static void vmd_remove(struct pci_dev *dev)
static void vmd_shutdown(struct pci_dev *dev)
{
- struct vmd_dev *vmd = pci_get_drvdata(dev);
+ struct vmd_dev *vmd = pci_get_drvdata(dev);
- vmd_remove_irq_domain(vmd);
+ vmd_remove_irq_domain(vmd);
}
#ifdef CONFIG_PM_SLEEP
@@ -1111,6 +1125,10 @@ static const struct pci_device_id vmd_ids[] = {
.driver_data = VMD_FEATS_CLIENT,},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_VMD_9A0B),
.driver_data = VMD_FEATS_CLIENT,},
+ {PCI_VDEVICE(INTEL, 0xb60b),
+ .driver_data = VMD_FEATS_CLIENT,},
+ {PCI_VDEVICE(INTEL, 0xb06f),
+ .driver_data = VMD_FEATS_CLIENT,},
{0,}
};
MODULE_DEVICE_TABLE(pci, vmd_ids);
@@ -1128,5 +1146,6 @@ static struct pci_driver vmd_drv = {
module_pci_driver(vmd_drv);
MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("Volume Management Device driver");
MODULE_LICENSE("GPL v2");
MODULE_VERSION("0.6");
diff --git a/drivers/pci/devres.c b/drivers/pci/devres.c
index 2c562b9eaf80..73047316889e 100644
--- a/drivers/pci/devres.c
+++ b/drivers/pci/devres.c
@@ -4,14 +4,249 @@
#include "pci.h"
/*
- * PCI iomap devres
+ * On the state of PCI's devres implementation:
+ *
+ * The older devres API for PCI has two significant problems:
+ *
+ * 1. It is very strongly tied to the statically allocated mapping table in
+ * struct pcim_iomap_devres below. This is mostly solved in the sense of the
+ * pcim_ functions in this file providing things like ranged mapping by
+ * bypassing this table, whereas the functions that were present in the old
+ * API still enter the mapping addresses into the table for users of the old
+ * API.
+ *
+ * 2. The region-request-functions in pci.c do become managed IF the device has
+ * been enabled with pcim_enable_device() instead of pci_enable_device().
+ * This resulted in the API becoming inconsistent: Some functions have an
+ * obviously managed counter-part (e.g., pci_iomap() <-> pcim_iomap()),
+ * whereas some don't and are never managed, while others don't and are
+ * _sometimes_ managed (e.g. pci_request_region()).
+ *
+ * Consequently, in the new API, region requests performed by the pcim_
+ * functions are automatically cleaned up through the devres callback
+ * pcim_addr_resource_release().
+ *
+ * Users of pcim_enable_device() + pci_*region*() are redirected in
+ * pci.c to the managed functions here in this file. This isn't exactly
+ * perfect, but the only alternative way would be to port ALL drivers
+ * using said combination to pcim_ functions.
+ *
+ * TODO:
+ * Remove the legacy table entirely once all calls to pcim_iomap_table() in
+ * the kernel have been removed.
*/
-#define PCIM_IOMAP_MAX PCI_STD_NUM_BARS
+/*
+ * Legacy struct storing addresses to whole mapped BARs.
+ */
struct pcim_iomap_devres {
- void __iomem *table[PCIM_IOMAP_MAX];
+ void __iomem *table[PCI_NUM_RESOURCES];
};
+/* Used to restore the old INTx state on driver detach. */
+struct pcim_intx_devres {
+ int orig_intx;
+};
+
+enum pcim_addr_devres_type {
+ /* Default initializer. */
+ PCIM_ADDR_DEVRES_TYPE_INVALID,
+
+ /* A requested region spanning an entire BAR. */
+ PCIM_ADDR_DEVRES_TYPE_REGION,
+
+ /*
+ * A requested region spanning an entire BAR, and a mapping for
+ * the entire BAR.
+ */
+ PCIM_ADDR_DEVRES_TYPE_REGION_MAPPING,
+
+ /*
+ * A mapping within a BAR, either spanning the whole BAR or just a
+ * range. Without a requested region.
+ */
+ PCIM_ADDR_DEVRES_TYPE_MAPPING,
+};
+
+/*
+ * This struct envelops IO or MEM addresses, i.e., mappings and region
+ * requests, because those are very frequently requested and released
+ * together.
+ */
+struct pcim_addr_devres {
+ enum pcim_addr_devres_type type;
+ void __iomem *baseaddr;
+ unsigned long offset;
+ unsigned long len;
+ int bar;
+};
+
+static inline void pcim_addr_devres_clear(struct pcim_addr_devres *res)
+{
+ memset(res, 0, sizeof(*res));
+ res->bar = -1;
+}
+
+/*
+ * The following functions, __pcim_*_region*, exist as counterparts to the
+ * versions from pci.c - which, unfortunately, can be in "hybrid mode", i.e.,
+ * sometimes managed, sometimes not.
+ *
+ * To separate the APIs cleanly, we define our own, simplified versions here.
+ */
+
+/**
+ * __pcim_request_region_range - Request a ranged region
+ * @pdev: PCI device the region belongs to
+ * @bar: BAR the range is within
+ * @offset: offset from the BAR's start address
+ * @maxlen: length in bytes, beginning at @offset
+ * @name: name of the driver requesting the resource
+ * @req_flags: flags for the request, e.g., for kernel-exclusive requests
+ *
+ * Returns: 0 on success, a negative error code on failure.
+ *
+ * Request a range within a device's PCI BAR. Sanity check the input.
+ */
+static int __pcim_request_region_range(struct pci_dev *pdev, int bar,
+ unsigned long offset,
+ unsigned long maxlen,
+ const char *name, int req_flags)
+{
+ resource_size_t start = pci_resource_start(pdev, bar);
+ resource_size_t len = pci_resource_len(pdev, bar);
+ unsigned long dev_flags = pci_resource_flags(pdev, bar);
+
+ if (start == 0 || len == 0) /* Unused BAR. */
+ return 0;
+ if (len <= offset)
+ return -EINVAL;
+
+ start += offset;
+ len -= offset;
+
+ if (len > maxlen && maxlen != 0)
+ len = maxlen;
+
+ if (dev_flags & IORESOURCE_IO) {
+ if (!request_region(start, len, name))
+ return -EBUSY;
+ } else if (dev_flags & IORESOURCE_MEM) {
+ if (!__request_mem_region(start, len, name, req_flags))
+ return -EBUSY;
+ } else {
+ /* That's not a device we can request anything on. */
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void __pcim_release_region_range(struct pci_dev *pdev, int bar,
+ unsigned long offset,
+ unsigned long maxlen)
+{
+ resource_size_t start = pci_resource_start(pdev, bar);
+ resource_size_t len = pci_resource_len(pdev, bar);
+ unsigned long flags = pci_resource_flags(pdev, bar);
+
+ if (len <= offset || start == 0)
+ return;
+
+ if (len == 0 || maxlen == 0) /* This an unused BAR. Do nothing. */
+ return;
+
+ start += offset;
+ len -= offset;
+
+ if (len > maxlen)
+ len = maxlen;
+
+ if (flags & IORESOURCE_IO)
+ release_region(start, len);
+ else if (flags & IORESOURCE_MEM)
+ release_mem_region(start, len);
+}
+
+static int __pcim_request_region(struct pci_dev *pdev, int bar,
+ const char *name, int flags)
+{
+ unsigned long offset = 0;
+ unsigned long len = pci_resource_len(pdev, bar);
+
+ return __pcim_request_region_range(pdev, bar, offset, len, name, flags);
+}
+
+static void __pcim_release_region(struct pci_dev *pdev, int bar)
+{
+ unsigned long offset = 0;
+ unsigned long len = pci_resource_len(pdev, bar);
+
+ __pcim_release_region_range(pdev, bar, offset, len);
+}
+
+static void pcim_addr_resource_release(struct device *dev, void *resource_raw)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pcim_addr_devres *res = resource_raw;
+
+ switch (res->type) {
+ case PCIM_ADDR_DEVRES_TYPE_REGION:
+ __pcim_release_region(pdev, res->bar);
+ break;
+ case PCIM_ADDR_DEVRES_TYPE_REGION_MAPPING:
+ pci_iounmap(pdev, res->baseaddr);
+ __pcim_release_region(pdev, res->bar);
+ break;
+ case PCIM_ADDR_DEVRES_TYPE_MAPPING:
+ pci_iounmap(pdev, res->baseaddr);
+ break;
+ default:
+ break;
+ }
+}
+
+static struct pcim_addr_devres *pcim_addr_devres_alloc(struct pci_dev *pdev)
+{
+ struct pcim_addr_devres *res;
+
+ res = devres_alloc_node(pcim_addr_resource_release, sizeof(*res),
+ GFP_KERNEL, dev_to_node(&pdev->dev));
+ if (res)
+ pcim_addr_devres_clear(res);
+ return res;
+}
+
+/* Just for consistency and readability. */
+static inline void pcim_addr_devres_free(struct pcim_addr_devres *res)
+{
+ devres_free(res);
+}
+
+/*
+ * Used by devres to identify a pcim_addr_devres.
+ */
+static int pcim_addr_resources_match(struct device *dev,
+ void *a_raw, void *b_raw)
+{
+ struct pcim_addr_devres *a, *b;
+
+ a = a_raw;
+ b = b_raw;
+
+ if (a->type != b->type)
+ return 0;
+
+ switch (a->type) {
+ case PCIM_ADDR_DEVRES_TYPE_REGION:
+ case PCIM_ADDR_DEVRES_TYPE_REGION_MAPPING:
+ return a->bar == b->bar;
+ case PCIM_ADDR_DEVRES_TYPE_MAPPING:
+ return a->baseaddr == b->baseaddr;
+ default:
+ return 0;
+ }
+}
static void devm_pci_unmap_iospace(struct device *dev, void *ptr)
{
@@ -92,8 +327,8 @@ EXPORT_SYMBOL(devm_pci_remap_cfgspace);
*
* All operations are managed and will be undone on driver detach.
*
- * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
- * on failure. Usage example::
+ * Returns a pointer to the remapped memory or an IOMEM_ERR_PTR() encoded error
+ * code on failure. Usage example::
*
* res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
* base = devm_pci_remap_cfg_resource(&pdev->dev, res);
@@ -140,96 +375,135 @@ void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
}
EXPORT_SYMBOL(devm_pci_remap_cfg_resource);
+static void __pcim_clear_mwi(void *pdev_raw)
+{
+ struct pci_dev *pdev = pdev_raw;
+
+ pci_clear_mwi(pdev);
+}
+
/**
* pcim_set_mwi - a device-managed pci_set_mwi()
- * @dev: the PCI device for which MWI is enabled
+ * @pdev: the PCI device for which MWI is enabled
*
* Managed pci_set_mwi().
*
* RETURNS: An appropriate -ERRNO error value on error, or zero for success.
*/
-int pcim_set_mwi(struct pci_dev *dev)
+int pcim_set_mwi(struct pci_dev *pdev)
{
- struct pci_devres *dr;
+ int ret;
- dr = find_pci_dr(dev);
- if (!dr)
- return -ENOMEM;
+ ret = devm_add_action(&pdev->dev, __pcim_clear_mwi, pdev);
+ if (ret != 0)
+ return ret;
+
+ ret = pci_set_mwi(pdev);
+ if (ret != 0)
+ devm_remove_action(&pdev->dev, __pcim_clear_mwi, pdev);
- dr->mwi = 1;
- return pci_set_mwi(dev);
+ return ret;
}
EXPORT_SYMBOL(pcim_set_mwi);
-
-static void pcim_release(struct device *gendev, void *res)
+static inline bool mask_contains_bar(int mask, int bar)
{
- struct pci_dev *dev = to_pci_dev(gendev);
- struct pci_devres *this = res;
- int i;
+ return mask & BIT(bar);
+}
- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
- if (this->region_mask & (1 << i))
- pci_release_region(dev, i);
+static void pcim_intx_restore(struct device *dev, void *data)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pcim_intx_devres *res = data;
- if (this->mwi)
- pci_clear_mwi(dev);
+ pci_intx(pdev, res->orig_intx);
+}
- if (this->restore_intx)
- pci_intx(dev, this->orig_intx);
+static void save_orig_intx(struct pci_dev *pdev, struct pcim_intx_devres *res)
+{
+ u16 pci_command;
- if (this->enabled && !this->pinned)
- pci_disable_device(dev);
+ pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
+ res->orig_intx = !(pci_command & PCI_COMMAND_INTX_DISABLE);
}
-/*
- * TODO: After the last four callers in pci.c are ported, find_pci_dr()
- * needs to be made static again.
+/**
+ * pcim_intx - managed pci_intx()
+ * @pdev: the PCI device to operate on
+ * @enable: boolean: whether to enable or disable PCI INTx
+ *
+ * Returns: 0 on success, -ENOMEM on error.
+ *
+ * Enable/disable PCI INTx for device @pdev.
+ * Restore the original state on driver detach.
*/
-struct pci_devres *find_pci_dr(struct pci_dev *pdev)
+int pcim_intx(struct pci_dev *pdev, int enable)
{
- if (pci_is_managed(pdev))
- return devres_find(&pdev->dev, pcim_release, NULL, NULL);
- return NULL;
+ struct pcim_intx_devres *res;
+ struct device *dev = &pdev->dev;
+
+ /*
+ * pcim_intx() must only restore the INTx value that existed before the
+ * driver was loaded, i.e., before it called pcim_intx() for the
+ * first time.
+ */
+ res = devres_find(dev, pcim_intx_restore, NULL, NULL);
+ if (!res) {
+ res = devres_alloc(pcim_intx_restore, sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+
+ save_orig_intx(pdev, res);
+ devres_add(dev, res);
+ }
+
+ pci_intx(pdev, enable);
+
+ return 0;
}
+EXPORT_SYMBOL_GPL(pcim_intx);
-static struct pci_devres *get_pci_dr(struct pci_dev *pdev)
+static void pcim_disable_device(void *pdev_raw)
{
- struct pci_devres *dr, *new_dr;
+ struct pci_dev *pdev = pdev_raw;
- dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
- if (dr)
- return dr;
+ if (!pdev->pinned)
+ pci_disable_device(pdev);
- new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
- if (!new_dr)
- return NULL;
- return devres_get(&pdev->dev, new_dr, NULL, NULL);
+ pdev->is_managed = false;
}
/**
* pcim_enable_device - Managed pci_enable_device()
* @pdev: PCI device to be initialized
*
- * Managed pci_enable_device().
+ * Returns: 0 on success, negative error code on failure.
+ *
+ * Managed pci_enable_device(). Device will automatically be disabled on
+ * driver detach.
*/
int pcim_enable_device(struct pci_dev *pdev)
{
- struct pci_devres *dr;
- int rc;
+ int ret;
+
+ ret = devm_add_action(&pdev->dev, pcim_disable_device, pdev);
+ if (ret != 0)
+ return ret;
+
+ /*
+ * We prefer removing the action in case of an error over
+ * devm_add_action_or_reset() because the latter could theoretically be
+ * disturbed by users having pinned the device too soon.
+ */
+ ret = pci_enable_device(pdev);
+ if (ret != 0) {
+ devm_remove_action(&pdev->dev, pcim_disable_device, pdev);
+ return ret;
+ }
- dr = get_pci_dr(pdev);
- if (unlikely(!dr))
- return -ENOMEM;
- if (dr->enabled)
- return 0;
+ pdev->is_managed = true;
- rc = pci_enable_device(pdev);
- if (!rc) {
- pdev->is_managed = 1;
- dr->enabled = 1;
- }
- return rc;
+ return ret;
}
EXPORT_SYMBOL(pcim_enable_device);
@@ -237,36 +511,32 @@ EXPORT_SYMBOL(pcim_enable_device);
* pcim_pin_device - Pin managed PCI device
* @pdev: PCI device to pin
*
- * Pin managed PCI device @pdev. Pinned device won't be disabled on
- * driver detach. @pdev must have been enabled with
- * pcim_enable_device().
+ * Pin managed PCI device @pdev. Pinned device won't be disabled on driver
+ * detach. @pdev must have been enabled with pcim_enable_device().
*/
void pcim_pin_device(struct pci_dev *pdev)
{
- struct pci_devres *dr;
-
- dr = find_pci_dr(pdev);
- WARN_ON(!dr || !dr->enabled);
- if (dr)
- dr->pinned = 1;
+ pdev->pinned = true;
}
EXPORT_SYMBOL(pcim_pin_device);
static void pcim_iomap_release(struct device *gendev, void *res)
{
- struct pci_dev *dev = to_pci_dev(gendev);
- struct pcim_iomap_devres *this = res;
- int i;
-
- for (i = 0; i < PCIM_IOMAP_MAX; i++)
- if (this->table[i])
- pci_iounmap(dev, this->table[i]);
+ /*
+ * Do nothing. This is legacy code.
+ *
+ * Cleanup of the mappings is now done directly through the callbacks
+ * registered when creating them.
+ */
}
/**
- * pcim_iomap_table - access iomap allocation table
+ * pcim_iomap_table - access iomap allocation table (DEPRECATED)
* @pdev: PCI device to access iomap table for
*
+ * Returns:
+ * Const pointer to array of __iomem pointers on success, NULL on failure.
+ *
* Access iomap allocation table for @dev. If iomap table doesn't
* exist and @pdev is managed, it will be allocated. All iomaps
* recorded in the iomap table are automatically unmapped on driver
@@ -275,6 +545,11 @@ static void pcim_iomap_release(struct device *gendev, void *res)
* This function might sleep when the table is first allocated but can
* be safely called without context and guaranteed to succeed once
* allocated.
+ *
+ * This function is DEPRECATED. Do not use it in new code. Instead, obtain a
+ * mapping's address directly from one of the pcim_* mapping functions. For
+ * example:
+ * void __iomem \*mappy = pcim_iomap(pdev, bar, length);
*/
void __iomem * const *pcim_iomap_table(struct pci_dev *pdev)
{
@@ -293,27 +568,117 @@ void __iomem * const *pcim_iomap_table(struct pci_dev *pdev)
}
EXPORT_SYMBOL(pcim_iomap_table);
+/*
+ * Fill the legacy mapping-table, so that drivers using the old API can
+ * still get a BAR's mapping address through pcim_iomap_table().
+ */
+static int pcim_add_mapping_to_legacy_table(struct pci_dev *pdev,
+ void __iomem *mapping, int bar)
+{
+ void __iomem **legacy_iomap_table;
+
+ if (!pci_bar_index_is_valid(bar))
+ return -EINVAL;
+
+ legacy_iomap_table = (void __iomem **)pcim_iomap_table(pdev);
+ if (!legacy_iomap_table)
+ return -ENOMEM;
+
+ /* The legacy mechanism doesn't allow for duplicate mappings. */
+ WARN_ON(legacy_iomap_table[bar]);
+
+ legacy_iomap_table[bar] = mapping;
+
+ return 0;
+}
+
+/*
+ * Remove a mapping. The table only contains whole-BAR mappings, so this will
+ * never interfere with ranged mappings.
+ */
+static void pcim_remove_mapping_from_legacy_table(struct pci_dev *pdev,
+ void __iomem *addr)
+{
+ int bar;
+ void __iomem **legacy_iomap_table;
+
+ legacy_iomap_table = (void __iomem **)pcim_iomap_table(pdev);
+ if (!legacy_iomap_table)
+ return;
+
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
+ if (legacy_iomap_table[bar] == addr) {
+ legacy_iomap_table[bar] = NULL;
+ return;
+ }
+ }
+}
+
+/*
+ * The same as pcim_remove_mapping_from_legacy_table(), but identifies the
+ * mapping by its BAR index.
+ */
+static void pcim_remove_bar_from_legacy_table(struct pci_dev *pdev, int bar)
+{
+ void __iomem **legacy_iomap_table;
+
+ if (!pci_bar_index_is_valid(bar))
+ return;
+
+ legacy_iomap_table = (void __iomem **)pcim_iomap_table(pdev);
+ if (!legacy_iomap_table)
+ return;
+
+ legacy_iomap_table[bar] = NULL;
+}
+
/**
* pcim_iomap - Managed pcim_iomap()
* @pdev: PCI device to iomap for
* @bar: BAR to iomap
* @maxlen: Maximum length of iomap
*
- * Managed pci_iomap(). Map is automatically unmapped on driver
- * detach.
+ * Returns: __iomem pointer on success, NULL on failure.
+ *
+ * Managed pci_iomap(). Map is automatically unmapped on driver detach. If
+ * desired, unmap manually only with pcim_iounmap().
+ *
+ * This SHOULD only be used once per BAR.
+ *
+ * NOTE:
+ * Contrary to the other pcim_* functions, this function does not return an
+ * IOMEM_ERR_PTR() on failure, but a simple NULL. This is done for backwards
+ * compatibility.
*/
void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
{
- void __iomem **tbl;
+ void __iomem *mapping;
+ struct pcim_addr_devres *res;
- BUG_ON(bar >= PCIM_IOMAP_MAX);
+ if (!pci_bar_index_is_valid(bar))
+ return NULL;
- tbl = (void __iomem **)pcim_iomap_table(pdev);
- if (!tbl || tbl[bar]) /* duplicate mappings not allowed */
+ res = pcim_addr_devres_alloc(pdev);
+ if (!res)
return NULL;
+ res->type = PCIM_ADDR_DEVRES_TYPE_MAPPING;
+
+ mapping = pci_iomap(pdev, bar, maxlen);
+ if (!mapping)
+ goto err_iomap;
+ res->baseaddr = mapping;
+
+ if (pcim_add_mapping_to_legacy_table(pdev, mapping, bar) != 0)
+ goto err_table;
+
+ devres_add(&pdev->dev, res);
+ return mapping;
- tbl[bar] = pci_iomap(pdev, bar, maxlen);
- return tbl[bar];
+err_table:
+ pci_iounmap(pdev, mapping);
+err_iomap:
+ pcim_addr_devres_free(res);
+ return NULL;
}
EXPORT_SYMBOL(pcim_iomap);
@@ -322,127 +687,341 @@ EXPORT_SYMBOL(pcim_iomap);
* @pdev: PCI device to iounmap for
* @addr: Address to unmap
*
- * Managed pci_iounmap(). @addr must have been mapped using pcim_iomap().
+ * Managed pci_iounmap(). @addr must have been mapped using a pcim_* mapping
+ * function.
*/
void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr)
{
- void __iomem **tbl;
- int i;
+ struct pcim_addr_devres res_searched;
- pci_iounmap(pdev, addr);
+ pcim_addr_devres_clear(&res_searched);
+ res_searched.type = PCIM_ADDR_DEVRES_TYPE_MAPPING;
+ res_searched.baseaddr = addr;
- tbl = (void __iomem **)pcim_iomap_table(pdev);
- BUG_ON(!tbl);
+ if (devres_release(&pdev->dev, pcim_addr_resource_release,
+ pcim_addr_resources_match, &res_searched) != 0) {
+ /* Doesn't exist. User passed nonsense. */
+ return;
+ }
- for (i = 0; i < PCIM_IOMAP_MAX; i++)
- if (tbl[i] == addr) {
- tbl[i] = NULL;
- return;
- }
- WARN_ON(1);
+ pcim_remove_mapping_from_legacy_table(pdev, addr);
}
EXPORT_SYMBOL(pcim_iounmap);
/**
- * pcim_iomap_regions - Request and iomap PCI BARs
+ * pcim_iomap_region - Request and iomap a PCI BAR
+ * @pdev: PCI device to map IO resources for
+ * @bar: Index of a BAR to map
+ * @name: Name of the driver requesting the resource
+ *
+ * Returns: __iomem pointer on success, an IOMEM_ERR_PTR on failure.
+ *
+ * Mapping and region will get automatically released on driver detach. If
+ * desired, release manually only with pcim_iounmap_region().
+ */
+void __iomem *pcim_iomap_region(struct pci_dev *pdev, int bar,
+ const char *name)
+{
+ int ret;
+ struct pcim_addr_devres *res;
+
+ if (!pci_bar_index_is_valid(bar))
+ return IOMEM_ERR_PTR(-EINVAL);
+
+ res = pcim_addr_devres_alloc(pdev);
+ if (!res)
+ return IOMEM_ERR_PTR(-ENOMEM);
+
+ res->type = PCIM_ADDR_DEVRES_TYPE_REGION_MAPPING;
+ res->bar = bar;
+
+ ret = __pcim_request_region(pdev, bar, name, 0);
+ if (ret != 0)
+ goto err_region;
+
+ res->baseaddr = pci_iomap(pdev, bar, 0);
+ if (!res->baseaddr) {
+ ret = -EINVAL;
+ goto err_iomap;
+ }
+
+ devres_add(&pdev->dev, res);
+ return res->baseaddr;
+
+err_iomap:
+ __pcim_release_region(pdev, bar);
+err_region:
+ pcim_addr_devres_free(res);
+
+ return IOMEM_ERR_PTR(ret);
+}
+EXPORT_SYMBOL(pcim_iomap_region);
+
+/**
+ * pcim_iounmap_region - Unmap and release a PCI BAR
+ * @pdev: PCI device to operate on
+ * @bar: Index of BAR to unmap and release
+ *
+ * Unmap a BAR and release its region manually. Only pass BARs that were
+ * previously mapped by pcim_iomap_region().
+ */
+void pcim_iounmap_region(struct pci_dev *pdev, int bar)
+{
+ struct pcim_addr_devres res_searched;
+
+ pcim_addr_devres_clear(&res_searched);
+ res_searched.type = PCIM_ADDR_DEVRES_TYPE_REGION_MAPPING;
+ res_searched.bar = bar;
+
+ devres_release(&pdev->dev, pcim_addr_resource_release,
+ pcim_addr_resources_match, &res_searched);
+}
+EXPORT_SYMBOL(pcim_iounmap_region);
+
+/**
+ * pcim_iomap_regions - Request and iomap PCI BARs (DEPRECATED)
* @pdev: PCI device to map IO resources for
* @mask: Mask of BARs to request and iomap
- * @name: Name used when requesting regions
+ * @name: Name of the driver requesting the resources
+ *
+ * Returns: 0 on success, negative error code on failure.
*
* Request and iomap regions specified by @mask.
+ *
+ * This function is DEPRECATED. Do not use it in new code.
+ * Use pcim_iomap_region() instead.
*/
int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name)
{
- void __iomem * const *iomap;
- int i, rc;
+ int ret;
+ int bar;
+ void __iomem *mapping;
- iomap = pcim_iomap_table(pdev);
- if (!iomap)
- return -ENOMEM;
+ for (bar = 0; bar < DEVICE_COUNT_RESOURCE; bar++) {
+ if (!mask_contains_bar(mask, bar))
+ continue;
- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
- unsigned long len;
+ mapping = pcim_iomap_region(pdev, bar, name);
+ if (IS_ERR(mapping)) {
+ ret = PTR_ERR(mapping);
+ goto err;
+ }
+ ret = pcim_add_mapping_to_legacy_table(pdev, mapping, bar);
+ if (ret != 0)
+ goto err;
+ }
- if (!(mask & (1 << i)))
- continue;
+ return 0;
- rc = -EINVAL;
- len = pci_resource_len(pdev, i);
- if (!len)
- goto err_inval;
+err:
+ while (--bar >= 0) {
+ pcim_iounmap_region(pdev, bar);
+ pcim_remove_bar_from_legacy_table(pdev, bar);
+ }
- rc = pci_request_region(pdev, i, name);
- if (rc)
- goto err_inval;
+ return ret;
+}
+EXPORT_SYMBOL(pcim_iomap_regions);
- rc = -ENOMEM;
- if (!pcim_iomap(pdev, i, 0))
- goto err_region;
+static int _pcim_request_region(struct pci_dev *pdev, int bar, const char *name,
+ int request_flags)
+{
+ int ret;
+ struct pcim_addr_devres *res;
+
+ if (!pci_bar_index_is_valid(bar))
+ return -EINVAL;
+
+ res = pcim_addr_devres_alloc(pdev);
+ if (!res)
+ return -ENOMEM;
+ res->type = PCIM_ADDR_DEVRES_TYPE_REGION;
+ res->bar = bar;
+
+ ret = __pcim_request_region(pdev, bar, name, request_flags);
+ if (ret != 0) {
+ pcim_addr_devres_free(res);
+ return ret;
}
+ devres_add(&pdev->dev, res);
return 0;
+}
- err_region:
- pci_release_region(pdev, i);
- err_inval:
- while (--i >= 0) {
- if (!(mask & (1 << i)))
- continue;
- pcim_iounmap(pdev, iomap[i]);
- pci_release_region(pdev, i);
- }
+/**
+ * pcim_request_region - Request a PCI BAR
+ * @pdev: PCI device to request region for
+ * @bar: Index of BAR to request
+ * @name: Name of the driver requesting the resource
+ *
+ * Returns: 0 on success, a negative error code on failure.
+ *
+ * Request region specified by @bar.
+ *
+ * The region will automatically be released on driver detach. If desired,
+ * release manually only with pcim_release_region().
+ */
+int pcim_request_region(struct pci_dev *pdev, int bar, const char *name)
+{
+ return _pcim_request_region(pdev, bar, name, 0);
+}
+EXPORT_SYMBOL(pcim_request_region);
- return rc;
+/**
+ * pcim_request_region_exclusive - Request a PCI BAR exclusively
+ * @pdev: PCI device to request region for
+ * @bar: Index of BAR to request
+ * @name: Name of the driver requesting the resource
+ *
+ * Returns: 0 on success, a negative error code on failure.
+ *
+ * Request region specified by @bar exclusively.
+ *
+ * The region will automatically be released on driver detach. If desired,
+ * release manually only with pcim_release_region().
+ */
+int pcim_request_region_exclusive(struct pci_dev *pdev, int bar, const char *name)
+{
+ return _pcim_request_region(pdev, bar, name, IORESOURCE_EXCLUSIVE);
+}
+
+/**
+ * pcim_release_region - Release a PCI BAR
+ * @pdev: PCI device to operate on
+ * @bar: Index of BAR to release
+ *
+ * Release a region manually that was previously requested by
+ * pcim_request_region().
+ */
+void pcim_release_region(struct pci_dev *pdev, int bar)
+{
+ struct pcim_addr_devres res_searched;
+
+ pcim_addr_devres_clear(&res_searched);
+ res_searched.type = PCIM_ADDR_DEVRES_TYPE_REGION;
+ res_searched.bar = bar;
+
+ devres_release(&pdev->dev, pcim_addr_resource_release,
+ pcim_addr_resources_match, &res_searched);
}
-EXPORT_SYMBOL(pcim_iomap_regions);
+
/**
- * pcim_iomap_regions_request_all - Request all BARs and iomap specified ones
+ * pcim_release_all_regions - Release all regions of a PCI-device
+ * @pdev: the PCI device
+ *
+ * Release all regions previously requested through pcim_request_region()
+ * or pcim_request_all_regions().
+ *
+ * Can be called from any context, i.e., not necessarily as a counterpart to
+ * pcim_request_all_regions().
+ */
+static void pcim_release_all_regions(struct pci_dev *pdev)
+{
+ int bar;
+
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
+ pcim_release_region(pdev, bar);
+}
+
+/**
+ * pcim_request_all_regions - Request all regions
* @pdev: PCI device to map IO resources for
- * @mask: Mask of BARs to iomap
- * @name: Name used when requesting regions
+ * @name: name of the driver requesting the resources
+ *
+ * Returns: 0 on success, negative error code on failure.
*
- * Request all PCI BARs and iomap regions specified by @mask.
+ * Requested regions will automatically be released at driver detach. If
+ * desired, release individual regions with pcim_release_region() or all of
+ * them at once with pcim_release_all_regions().
*/
-int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask,
- const char *name)
+int pcim_request_all_regions(struct pci_dev *pdev, const char *name)
{
- int request_mask = ((1 << 6) - 1) & ~mask;
- int rc;
+ int ret;
+ int bar;
+
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
+ ret = pcim_request_region(pdev, bar, name);
+ if (ret != 0)
+ goto err;
+ }
- rc = pci_request_selected_regions(pdev, request_mask, name);
- if (rc)
- return rc;
+ return 0;
+
+err:
+ pcim_release_all_regions(pdev);
- rc = pcim_iomap_regions(pdev, mask, name);
- if (rc)
- pci_release_selected_regions(pdev, request_mask);
- return rc;
+ return ret;
}
-EXPORT_SYMBOL(pcim_iomap_regions_request_all);
+EXPORT_SYMBOL(pcim_request_all_regions);
/**
- * pcim_iounmap_regions - Unmap and release PCI BARs
+ * pcim_iounmap_regions - Unmap and release PCI BARs (DEPRECATED)
* @pdev: PCI device to map IO resources for
* @mask: Mask of BARs to unmap and release
*
* Unmap and release regions specified by @mask.
+ *
+ * This function is DEPRECATED. Do not use it in new code.
+ * Use pcim_iounmap_region() instead.
*/
void pcim_iounmap_regions(struct pci_dev *pdev, int mask)
{
- void __iomem * const *iomap;
int i;
- iomap = pcim_iomap_table(pdev);
- if (!iomap)
- return;
-
- for (i = 0; i < PCIM_IOMAP_MAX; i++) {
- if (!(mask & (1 << i)))
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
+ if (!mask_contains_bar(mask, i))
continue;
- pcim_iounmap(pdev, iomap[i]);
- pci_release_region(pdev, i);
+ pcim_iounmap_region(pdev, i);
+ pcim_remove_bar_from_legacy_table(pdev, i);
}
}
EXPORT_SYMBOL(pcim_iounmap_regions);
+
+/**
+ * pcim_iomap_range - Create a ranged __iomap mapping within a PCI BAR
+ * @pdev: PCI device to map IO resources for
+ * @bar: Index of the BAR
+ * @offset: Offset from the begin of the BAR
+ * @len: Length in bytes for the mapping
+ *
+ * Returns: __iomem pointer on success, an IOMEM_ERR_PTR on failure.
+ *
+ * Creates a new IO-Mapping within the specified @bar, ranging from @offset to
+ * @offset + @len.
+ *
+ * The mapping will automatically get unmapped on driver detach. If desired,
+ * release manually only with pcim_iounmap().
+ */
+void __iomem *pcim_iomap_range(struct pci_dev *pdev, int bar,
+ unsigned long offset, unsigned long len)
+{
+ void __iomem *mapping;
+ struct pcim_addr_devres *res;
+
+ if (!pci_bar_index_is_valid(bar))
+ return IOMEM_ERR_PTR(-EINVAL);
+
+ res = pcim_addr_devres_alloc(pdev);
+ if (!res)
+ return IOMEM_ERR_PTR(-ENOMEM);
+
+ mapping = pci_iomap_range(pdev, bar, offset, len);
+ if (!mapping) {
+ pcim_addr_devres_free(res);
+ return IOMEM_ERR_PTR(-EINVAL);
+ }
+
+ res->type = PCIM_ADDR_DEVRES_TYPE_MAPPING;
+ res->baseaddr = mapping;
+
+ /*
+ * Ranged mappings don't get added to the legacy-table, since the table
+ * only ever keeps track of whole BARs.
+ */
+
+ devres_add(&pdev->dev, res);
+ return mapping;
+}
+EXPORT_SYMBOL(pcim_iomap_range);
diff --git a/drivers/pci/doe.c b/drivers/pci/doe.c
index e3aab5edaf70..aae9a8a00406 100644
--- a/drivers/pci/doe.c
+++ b/drivers/pci/doe.c
@@ -14,15 +14,17 @@
#include <linux/bitfield.h>
#include <linux/delay.h>
+#include <linux/device.h>
#include <linux/jiffies.h>
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/pci-doe.h>
+#include <linux/sysfs.h>
#include <linux/workqueue.h>
#include "pci.h"
-#define PCI_DOE_PROTOCOL_DISCOVERY 0
+#define PCI_DOE_FEATURE_DISCOVERY 0
/* Timeout of 1 second from 6.30.2 Operation, PCI Spec r6.0 */
#define PCI_DOE_TIMEOUT HZ
@@ -43,22 +45,27 @@
*
* @pdev: PCI device this mailbox belongs to
* @cap_offset: Capability offset
- * @prots: Array of protocols supported (encoded as long values)
+ * @feats: Array of features supported (encoded as long values)
* @wq: Wait queue for work item
* @work_queue: Queue of pci_doe_work items
* @flags: Bit array of PCI_DOE_FLAG_* flags
+ * @sysfs_attrs: Array of sysfs device attributes
*/
struct pci_doe_mb {
struct pci_dev *pdev;
u16 cap_offset;
- struct xarray prots;
+ struct xarray feats;
wait_queue_head_t wq;
struct workqueue_struct *work_queue;
unsigned long flags;
+
+#ifdef CONFIG_SYSFS
+ struct device_attribute *sysfs_attrs;
+#endif
};
-struct pci_doe_protocol {
+struct pci_doe_feature {
u16 vid;
u8 type;
};
@@ -66,7 +73,7 @@ struct pci_doe_protocol {
/**
* struct pci_doe_task - represents a single query/response
*
- * @prot: DOE Protocol
+ * @feat: DOE Feature
* @request_pl: The request payload
* @request_pl_sz: Size of the request payload (bytes)
* @response_pl: The response payload
@@ -78,7 +85,7 @@ struct pci_doe_protocol {
* @doe_mb: Used internally by the mailbox
*/
struct pci_doe_task {
- struct pci_doe_protocol prot;
+ struct pci_doe_feature feat;
const __le32 *request_pl;
size_t request_pl_sz;
__le32 *response_pl;
@@ -92,6 +99,152 @@ struct pci_doe_task {
struct pci_doe_mb *doe_mb;
};
+#ifdef CONFIG_SYSFS
+static ssize_t doe_discovery_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "0001:00\n");
+}
+static DEVICE_ATTR_RO(doe_discovery);
+
+static struct attribute *pci_doe_sysfs_feature_attrs[] = {
+ &dev_attr_doe_discovery.attr,
+ NULL
+};
+
+static bool pci_doe_features_sysfs_group_visible(struct kobject *kobj)
+{
+ struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
+
+ return !xa_empty(&pdev->doe_mbs);
+}
+DEFINE_SIMPLE_SYSFS_GROUP_VISIBLE(pci_doe_features_sysfs)
+
+const struct attribute_group pci_doe_sysfs_group = {
+ .name = "doe_features",
+ .attrs = pci_doe_sysfs_feature_attrs,
+ .is_visible = SYSFS_GROUP_VISIBLE(pci_doe_features_sysfs),
+};
+
+static ssize_t pci_doe_sysfs_feature_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%s\n", attr->attr.name);
+}
+
+static void pci_doe_sysfs_feature_remove(struct pci_dev *pdev,
+ struct pci_doe_mb *doe_mb)
+{
+ struct device_attribute *attrs = doe_mb->sysfs_attrs;
+ struct device *dev = &pdev->dev;
+ unsigned long i;
+ void *entry;
+
+ if (!attrs)
+ return;
+
+ doe_mb->sysfs_attrs = NULL;
+ xa_for_each(&doe_mb->feats, i, entry) {
+ if (attrs[i].show)
+ sysfs_remove_file_from_group(&dev->kobj, &attrs[i].attr,
+ pci_doe_sysfs_group.name);
+ kfree(attrs[i].attr.name);
+ }
+ kfree(attrs);
+}
+
+static int pci_doe_sysfs_feature_populate(struct pci_dev *pdev,
+ struct pci_doe_mb *doe_mb)
+{
+ struct device *dev = &pdev->dev;
+ struct device_attribute *attrs;
+ unsigned long num_features = 0;
+ unsigned long vid, type;
+ unsigned long i;
+ void *entry;
+ int ret;
+
+ xa_for_each(&doe_mb->feats, i, entry)
+ num_features++;
+
+ attrs = kcalloc(num_features, sizeof(*attrs), GFP_KERNEL);
+ if (!attrs) {
+ pci_warn(pdev, "Failed allocating the device_attribute array\n");
+ return -ENOMEM;
+ }
+
+ doe_mb->sysfs_attrs = attrs;
+ xa_for_each(&doe_mb->feats, i, entry) {
+ sysfs_attr_init(&attrs[i].attr);
+ vid = xa_to_value(entry) >> 8;
+ type = xa_to_value(entry) & 0xFF;
+
+ if (vid == PCI_VENDOR_ID_PCI_SIG &&
+ type == PCI_DOE_FEATURE_DISCOVERY) {
+
+ /*
+ * DOE Discovery, manually displayed by
+ * `dev_attr_doe_discovery`
+ */
+ continue;
+ }
+
+ attrs[i].attr.name = kasprintf(GFP_KERNEL,
+ "%04lx:%02lx", vid, type);
+ if (!attrs[i].attr.name) {
+ ret = -ENOMEM;
+ pci_warn(pdev, "Failed allocating the attribute name\n");
+ goto fail;
+ }
+
+ attrs[i].attr.mode = 0444;
+ attrs[i].show = pci_doe_sysfs_feature_show;
+
+ ret = sysfs_add_file_to_group(&dev->kobj, &attrs[i].attr,
+ pci_doe_sysfs_group.name);
+ if (ret) {
+ attrs[i].show = NULL;
+ if (ret != -EEXIST) {
+ pci_warn(pdev, "Failed adding %s to sysfs group\n",
+ attrs[i].attr.name);
+ goto fail;
+ } else
+ kfree(attrs[i].attr.name);
+ }
+ }
+
+ return 0;
+
+fail:
+ pci_doe_sysfs_feature_remove(pdev, doe_mb);
+ return ret;
+}
+
+void pci_doe_sysfs_teardown(struct pci_dev *pdev)
+{
+ struct pci_doe_mb *doe_mb;
+ unsigned long index;
+
+ xa_for_each(&pdev->doe_mbs, index, doe_mb)
+ pci_doe_sysfs_feature_remove(pdev, doe_mb);
+}
+
+void pci_doe_sysfs_init(struct pci_dev *pdev)
+{
+ struct pci_doe_mb *doe_mb;
+ unsigned long index;
+ int ret;
+
+ xa_for_each(&pdev->doe_mbs, index, doe_mb) {
+ ret = pci_doe_sysfs_feature_populate(pdev, doe_mb);
+ if (ret)
+ return;
+ }
+}
+#endif
+
static int pci_doe_wait(struct pci_doe_mb *doe_mb, unsigned long timeout)
{
if (wait_event_timeout(doe_mb->wq,
@@ -146,6 +299,7 @@ static int pci_doe_send_req(struct pci_doe_mb *doe_mb,
{
struct pci_dev *pdev = doe_mb->pdev;
int offset = doe_mb->cap_offset;
+ unsigned long timeout_jiffies;
size_t length, remainder;
u32 val;
int i;
@@ -155,8 +309,19 @@ static int pci_doe_send_req(struct pci_doe_mb *doe_mb,
* someone other than Linux (e.g. firmware) is using the mailbox. Note
* it is expected that firmware and OS will negotiate access rights via
* an, as yet to be defined, method.
+ *
+ * Wait up to one PCI_DOE_TIMEOUT period to allow the prior command to
+ * finish. Otherwise, simply error out as unable to field the request.
+ *
+ * PCIe r6.2 sec 6.30.3 states no interrupt is raised when the DOE Busy
+ * bit is cleared, so polling here is our best option for the moment.
*/
- pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val);
+ timeout_jiffies = jiffies + PCI_DOE_TIMEOUT;
+ do {
+ pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val);
+ } while (FIELD_GET(PCI_DOE_STATUS_BUSY, val) &&
+ !time_after(jiffies, timeout_jiffies));
+
if (FIELD_GET(PCI_DOE_STATUS_BUSY, val))
return -EBUSY;
@@ -171,8 +336,8 @@ static int pci_doe_send_req(struct pci_doe_mb *doe_mb,
length = 0;
/* Write DOE Header */
- val = FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_1_VID, task->prot.vid) |
- FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, task->prot.type);
+ val = FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_1_VID, task->feat.vid) |
+ FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, task->feat.type);
pci_write_config_dword(pdev, offset + PCI_DOE_WRITE, val);
pci_write_config_dword(pdev, offset + PCI_DOE_WRITE,
FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_2_LENGTH,
@@ -217,12 +382,12 @@ static int pci_doe_recv_resp(struct pci_doe_mb *doe_mb, struct pci_doe_task *tas
int i = 0;
u32 val;
- /* Read the first dword to get the protocol */
+ /* Read the first dword to get the feature */
pci_read_config_dword(pdev, offset + PCI_DOE_READ, &val);
- if ((FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_VID, val) != task->prot.vid) ||
- (FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, val) != task->prot.type)) {
- dev_err_ratelimited(&pdev->dev, "[%x] expected [VID, Protocol] = [%04x, %02x], got [%04x, %02x]\n",
- doe_mb->cap_offset, task->prot.vid, task->prot.type,
+ if ((FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_VID, val) != task->feat.vid) ||
+ (FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, val) != task->feat.type)) {
+ dev_err_ratelimited(&pdev->dev, "[%x] expected [VID, Feature] = [%04x, %02x], got [%04x, %02x]\n",
+ doe_mb->cap_offset, task->feat.vid, task->feat.type,
FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_VID, val),
FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, val));
return -EIO;
@@ -383,17 +548,19 @@ static void pci_doe_task_complete(struct pci_doe_task *task)
complete(task->private);
}
-static int pci_doe_discovery(struct pci_doe_mb *doe_mb, u8 *index, u16 *vid,
- u8 *protocol)
+static int pci_doe_discovery(struct pci_doe_mb *doe_mb, u8 capver, u8 *index, u16 *vid,
+ u8 *feature)
{
u32 request_pl = FIELD_PREP(PCI_DOE_DATA_OBJECT_DISC_REQ_3_INDEX,
- *index);
+ *index) |
+ FIELD_PREP(PCI_DOE_DATA_OBJECT_DISC_REQ_3_VER,
+ (capver >= 2) ? 2 : 0);
__le32 request_pl_le = cpu_to_le32(request_pl);
__le32 response_pl_le;
u32 response_pl;
int rc;
- rc = pci_doe(doe_mb, PCI_VENDOR_ID_PCI_SIG, PCI_DOE_PROTOCOL_DISCOVERY,
+ rc = pci_doe(doe_mb, PCI_VENDOR_ID_PCI_SIG, PCI_DOE_FEATURE_DISCOVERY,
&request_pl_le, sizeof(request_pl_le),
&response_pl_le, sizeof(response_pl_le));
if (rc < 0)
@@ -404,7 +571,7 @@ static int pci_doe_discovery(struct pci_doe_mb *doe_mb, u8 *index, u16 *vid,
response_pl = le32_to_cpu(response_pl_le);
*vid = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_VID, response_pl);
- *protocol = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_PROTOCOL,
+ *feature = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_TYPE,
response_pl);
*index = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_NEXT_INDEX,
response_pl);
@@ -412,31 +579,35 @@ static int pci_doe_discovery(struct pci_doe_mb *doe_mb, u8 *index, u16 *vid,
return 0;
}
-static void *pci_doe_xa_prot_entry(u16 vid, u8 prot)
+static void *pci_doe_xa_feat_entry(u16 vid, u8 type)
{
- return xa_mk_value((vid << 8) | prot);
+ return xa_mk_value((vid << 8) | type);
}
-static int pci_doe_cache_protocols(struct pci_doe_mb *doe_mb)
+static int pci_doe_cache_features(struct pci_doe_mb *doe_mb)
{
u8 index = 0;
u8 xa_idx = 0;
+ u32 hdr = 0;
+
+ pci_read_config_dword(doe_mb->pdev, doe_mb->cap_offset, &hdr);
do {
int rc;
u16 vid;
- u8 prot;
+ u8 type;
- rc = pci_doe_discovery(doe_mb, &index, &vid, &prot);
+ rc = pci_doe_discovery(doe_mb, PCI_EXT_CAP_VER(hdr), &index,
+ &vid, &type);
if (rc)
return rc;
pci_dbg(doe_mb->pdev,
- "[%x] Found protocol %d vid: %x prot: %x\n",
- doe_mb->cap_offset, xa_idx, vid, prot);
+ "[%x] Found feature %d vid: %x type: %x\n",
+ doe_mb->cap_offset, xa_idx, vid, type);
- rc = xa_insert(&doe_mb->prots, xa_idx++,
- pci_doe_xa_prot_entry(vid, prot), GFP_KERNEL);
+ rc = xa_insert(&doe_mb->feats, xa_idx++,
+ pci_doe_xa_feat_entry(vid, type), GFP_KERNEL);
if (rc)
return rc;
} while (index);
@@ -460,7 +631,7 @@ static void pci_doe_cancel_tasks(struct pci_doe_mb *doe_mb)
* @pdev: PCI device to create the DOE mailbox for
* @cap_offset: Offset of the DOE mailbox
*
- * Create a single mailbox object to manage the mailbox protocol at the
+ * Create a single mailbox object to manage the mailbox feature at the
* cap_offset specified.
*
* RETURNS: created mailbox object on success
@@ -479,7 +650,7 @@ static struct pci_doe_mb *pci_doe_create_mb(struct pci_dev *pdev,
doe_mb->pdev = pdev;
doe_mb->cap_offset = cap_offset;
init_waitqueue_head(&doe_mb->wq);
- xa_init(&doe_mb->prots);
+ xa_init(&doe_mb->feats);
doe_mb->work_queue = alloc_ordered_workqueue("%s %s DOE [%x]", 0,
dev_bus_name(&pdev->dev),
@@ -502,11 +673,11 @@ static struct pci_doe_mb *pci_doe_create_mb(struct pci_dev *pdev,
/*
* The state machine and the mailbox should be in sync now;
- * Use the mailbox to query protocols.
+ * Use the mailbox to query features.
*/
- rc = pci_doe_cache_protocols(doe_mb);
+ rc = pci_doe_cache_features(doe_mb);
if (rc) {
- pci_err(pdev, "[%x] failed to cache protocols : %d\n",
+ pci_err(pdev, "[%x] failed to cache features : %d\n",
doe_mb->cap_offset, rc);
goto err_cancel;
}
@@ -515,7 +686,7 @@ static struct pci_doe_mb *pci_doe_create_mb(struct pci_dev *pdev,
err_cancel:
pci_doe_cancel_tasks(doe_mb);
- xa_destroy(&doe_mb->prots);
+ xa_destroy(&doe_mb->feats);
err_destroy_wq:
destroy_workqueue(doe_mb->work_queue);
err_free:
@@ -533,31 +704,31 @@ err_free:
static void pci_doe_destroy_mb(struct pci_doe_mb *doe_mb)
{
pci_doe_cancel_tasks(doe_mb);
- xa_destroy(&doe_mb->prots);
+ xa_destroy(&doe_mb->feats);
destroy_workqueue(doe_mb->work_queue);
kfree(doe_mb);
}
/**
- * pci_doe_supports_prot() - Return if the DOE instance supports the given
- * protocol
+ * pci_doe_supports_feat() - Return if the DOE instance supports the given
+ * feature
* @doe_mb: DOE mailbox capability to query
- * @vid: Protocol Vendor ID
- * @type: Protocol type
+ * @vid: Feature Vendor ID
+ * @type: Feature type
*
- * RETURNS: True if the DOE mailbox supports the protocol specified
+ * RETURNS: True if the DOE mailbox supports the feature specified
*/
-static bool pci_doe_supports_prot(struct pci_doe_mb *doe_mb, u16 vid, u8 type)
+static bool pci_doe_supports_feat(struct pci_doe_mb *doe_mb, u16 vid, u8 type)
{
unsigned long index;
void *entry;
- /* The discovery protocol must always be supported */
- if (vid == PCI_VENDOR_ID_PCI_SIG && type == PCI_DOE_PROTOCOL_DISCOVERY)
+ /* The discovery feature must always be supported */
+ if (vid == PCI_VENDOR_ID_PCI_SIG && type == PCI_DOE_FEATURE_DISCOVERY)
return true;
- xa_for_each(&doe_mb->prots, index, entry)
- if (entry == pci_doe_xa_prot_entry(vid, type))
+ xa_for_each(&doe_mb->feats, index, entry)
+ if (entry == pci_doe_xa_feat_entry(vid, type))
return true;
return false;
@@ -585,7 +756,7 @@ static bool pci_doe_supports_prot(struct pci_doe_mb *doe_mb, u16 vid, u8 type)
static int pci_doe_submit_task(struct pci_doe_mb *doe_mb,
struct pci_doe_task *task)
{
- if (!pci_doe_supports_prot(doe_mb, task->prot.vid, task->prot.type))
+ if (!pci_doe_supports_feat(doe_mb, task->feat.vid, task->feat.type))
return -EINVAL;
if (test_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags))
@@ -631,8 +802,8 @@ int pci_doe(struct pci_doe_mb *doe_mb, u16 vendor, u8 type,
{
DECLARE_COMPLETION_ONSTACK(c);
struct pci_doe_task task = {
- .prot.vid = vendor,
- .prot.type = type,
+ .feat.vid = vendor,
+ .feat.type = type,
.request_pl = request,
.request_pl_sz = request_sz,
.response_pl = response,
@@ -659,7 +830,7 @@ EXPORT_SYMBOL_GPL(pci_doe);
* @vendor: Vendor ID
* @type: Data Object Type
*
- * Find first DOE mailbox of a PCI device which supports the given protocol.
+ * Find first DOE mailbox of a PCI device which supports the given feature.
*
* RETURNS: Pointer to the DOE mailbox or NULL if none was found.
*/
@@ -670,7 +841,7 @@ struct pci_doe_mb *pci_find_doe_mailbox(struct pci_dev *pdev, u16 vendor,
unsigned long index;
xa_for_each(&pdev->doe_mbs, index, doe_mb)
- if (pci_doe_supports_prot(doe_mb, vendor, type))
+ if (pci_doe_supports_feat(doe_mb, vendor, type))
return doe_mb;
return NULL;
diff --git a/drivers/pci/ecam.c b/drivers/pci/ecam.c
index 1c40d2506aef..260b7de2dbd5 100644
--- a/drivers/pci/ecam.c
+++ b/drivers/pci/ecam.c
@@ -55,7 +55,7 @@ struct pci_config_window *pci_ecam_create(struct device *dev,
bus_range_max = resource_size(cfgres) >> bus_shift;
if (bus_range > bus_range_max) {
bus_range = bus_range_max;
- cfg->busr.end = busr->start + bus_range - 1;
+ resource_set_size(&cfg->busr, bus_range);
dev_warn(dev, "ECAM area %pR can only accommodate %pR (reduced from %pR desired)\n",
cfgres, &cfg->busr, busr);
}
diff --git a/drivers/pci/endpoint/Kconfig b/drivers/pci/endpoint/Kconfig
index 17bbdc9bbde0..1c5d82eb57d4 100644
--- a/drivers/pci/endpoint/Kconfig
+++ b/drivers/pci/endpoint/Kconfig
@@ -26,7 +26,7 @@ config PCI_ENDPOINT_CONFIGFS
help
This will enable the configfs entry that can be used to
configure the endpoint function and used to bind the
- function with a endpoint controller.
+ function with an endpoint controller.
source "drivers/pci/endpoint/functions/Kconfig"
diff --git a/drivers/pci/endpoint/functions/pci-epf-mhi.c b/drivers/pci/endpoint/functions/pci-epf-mhi.c
index 2c54d80107cf..6643a88c7a0c 100644
--- a/drivers/pci/endpoint/functions/pci-epf-mhi.c
+++ b/drivers/pci/endpoint/functions/pci-epf-mhi.c
@@ -125,7 +125,7 @@ static const struct pci_epf_mhi_ep_info sm8450_info = {
static struct pci_epf_header sa8775p_header = {
.vendorid = PCI_VENDOR_ID_QCOM,
- .deviceid = 0x0306, /* FIXME: Update deviceid for sa8775p EP */
+ .deviceid = 0x0116,
.baseclass_code = PCI_CLASS_OTHERS,
.interrupt_pin = PCI_INTERRUPT_INTA,
};
@@ -137,6 +137,7 @@ static const struct pci_epf_mhi_ep_info sa8775p_info = {
.epf_flags = PCI_BASE_ADDRESS_MEM_TYPE_32,
.msi_count = 32,
.mru = 0x8000,
+ .flags = MHI_EPF_USE_DMA,
};
struct pci_epf_mhi {
@@ -716,7 +717,7 @@ static void pci_epf_mhi_dma_deinit(struct pci_epf_mhi *epf_mhi)
epf_mhi->dma_chan_rx = NULL;
}
-static int pci_epf_mhi_core_init(struct pci_epf *epf)
+static int pci_epf_mhi_epc_init(struct pci_epf *epf)
{
struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
@@ -753,9 +754,35 @@ static int pci_epf_mhi_core_init(struct pci_epf *epf)
if (!epf_mhi->epc_features)
return -ENODATA;
+ if (info->flags & MHI_EPF_USE_DMA) {
+ ret = pci_epf_mhi_dma_init(epf_mhi);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DMA: %d\n", ret);
+ return ret;
+ }
+ }
+
return 0;
}
+static void pci_epf_mhi_epc_deinit(struct pci_epf *epf)
+{
+ struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
+ const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
+ struct pci_epf_bar *epf_bar = &epf->bar[info->bar_num];
+ struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl;
+ struct pci_epc *epc = epf->epc;
+
+ if (mhi_cntrl->mhi_dev) {
+ mhi_ep_power_down(mhi_cntrl);
+ if (info->flags & MHI_EPF_USE_DMA)
+ pci_epf_mhi_dma_deinit(epf_mhi);
+ mhi_ep_unregister_controller(mhi_cntrl);
+ }
+
+ pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no, epf_bar);
+}
+
static int pci_epf_mhi_link_up(struct pci_epf *epf)
{
struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
@@ -765,14 +792,6 @@ static int pci_epf_mhi_link_up(struct pci_epf *epf)
struct device *dev = &epf->dev;
int ret;
- if (info->flags & MHI_EPF_USE_DMA) {
- ret = pci_epf_mhi_dma_init(epf_mhi);
- if (ret) {
- dev_err(dev, "Failed to initialize DMA: %d\n", ret);
- return ret;
- }
- }
-
mhi_cntrl->mmio = epf_mhi->mmio;
mhi_cntrl->irq = epf_mhi->irq;
mhi_cntrl->mru = info->mru;
@@ -819,7 +838,7 @@ static int pci_epf_mhi_link_down(struct pci_epf *epf)
return 0;
}
-static int pci_epf_mhi_bme(struct pci_epf *epf)
+static int pci_epf_mhi_bus_master_enable(struct pci_epf *epf)
{
struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
@@ -848,12 +867,18 @@ static int pci_epf_mhi_bind(struct pci_epf *epf)
{
struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
struct pci_epc *epc = epf->epc;
+ struct device *dev = &epf->dev;
struct platform_device *pdev = to_platform_device(epc->dev.parent);
struct resource *res;
int ret;
/* Get MMIO base address from Endpoint controller */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mmio");
+ if (!res) {
+ dev_err(dev, "Failed to get \"mmio\" resource\n");
+ return -ENODEV;
+ }
+
epf_mhi->mmio_phys = res->start;
epf_mhi->mmio_size = resource_size(res);
@@ -882,8 +907,8 @@ static void pci_epf_mhi_unbind(struct pci_epf *epf)
/*
* Forcefully power down the MHI EP stack. Only way to bring the MHI EP
- * stack back to working state after successive bind is by getting BME
- * from host.
+ * stack back to working state after successive bind is by getting Bus
+ * Master Enable event from host.
*/
if (mhi_cntrl->mhi_dev) {
mhi_ep_power_down(mhi_cntrl);
@@ -897,10 +922,11 @@ static void pci_epf_mhi_unbind(struct pci_epf *epf)
}
static const struct pci_epc_event_ops pci_epf_mhi_event_ops = {
- .core_init = pci_epf_mhi_core_init,
+ .epc_init = pci_epf_mhi_epc_init,
+ .epc_deinit = pci_epf_mhi_epc_deinit,
.link_up = pci_epf_mhi_link_up,
.link_down = pci_epf_mhi_link_down,
- .bme = pci_epf_mhi_bme,
+ .bus_master_enable = pci_epf_mhi_bus_master_enable,
};
static int pci_epf_mhi_probe(struct pci_epf *epf,
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index cd4ffb39dcdc..50eb4106369f 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -44,6 +44,11 @@
#define TIMER_RESOLUTION 1
+#define CAP_UNALIGNED_ACCESS BIT(0)
+#define CAP_MSI BIT(1)
+#define CAP_MSIX BIT(2)
+#define CAP_INTX BIT(3)
+
static struct workqueue_struct *kpcitest_workqueue;
struct pci_epf_test {
@@ -64,16 +69,17 @@ struct pci_epf_test {
};
struct pci_epf_test_reg {
- u32 magic;
- u32 command;
- u32 status;
- u64 src_addr;
- u64 dst_addr;
- u32 size;
- u32 checksum;
- u32 irq_type;
- u32 irq_number;
- u32 flags;
+ __le32 magic;
+ __le32 command;
+ __le32 status;
+ __le64 src_addr;
+ __le64 dst_addr;
+ __le32 size;
+ __le32 checksum;
+ __le32 irq_type;
+ __le32 irq_number;
+ __le32 flags;
+ __le32 caps;
} __packed;
static struct pci_epf_header test_header = {
@@ -251,7 +257,7 @@ static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test)
fail_back_rx:
dma_release_channel(epf_test->dma_chan_rx);
- epf_test->dma_chan_tx = NULL;
+ epf_test->dma_chan_rx = NULL;
fail_back_tx:
dma_cap_zero(mask);
@@ -291,8 +297,6 @@ static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test)
dma_release_channel(epf_test->dma_chan_rx);
epf_test->dma_chan_rx = NULL;
-
- return;
}
static void pci_epf_test_print_rate(struct pci_epf_test *epf_test,
@@ -317,250 +321,281 @@ static void pci_epf_test_print_rate(struct pci_epf_test *epf_test,
static void pci_epf_test_copy(struct pci_epf_test *epf_test,
struct pci_epf_test_reg *reg)
{
- int ret;
- void __iomem *src_addr;
- void __iomem *dst_addr;
- phys_addr_t src_phys_addr;
- phys_addr_t dst_phys_addr;
+ int ret = 0;
struct timespec64 start, end;
struct pci_epf *epf = epf_test->epf;
- struct device *dev = &epf->dev;
struct pci_epc *epc = epf->epc;
-
- src_addr = pci_epc_mem_alloc_addr(epc, &src_phys_addr, reg->size);
- if (!src_addr) {
- dev_err(dev, "Failed to allocate source address\n");
- reg->status = STATUS_SRC_ADDR_INVALID;
- ret = -ENOMEM;
- goto err;
- }
-
- ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, src_phys_addr,
- reg->src_addr, reg->size);
- if (ret) {
- dev_err(dev, "Failed to map source address\n");
- reg->status = STATUS_SRC_ADDR_INVALID;
- goto err_src_addr;
- }
-
- dst_addr = pci_epc_mem_alloc_addr(epc, &dst_phys_addr, reg->size);
- if (!dst_addr) {
- dev_err(dev, "Failed to allocate destination address\n");
- reg->status = STATUS_DST_ADDR_INVALID;
- ret = -ENOMEM;
- goto err_src_map_addr;
+ struct device *dev = &epf->dev;
+ struct pci_epc_map src_map, dst_map;
+ u64 src_addr = le64_to_cpu(reg->src_addr);
+ u64 dst_addr = le64_to_cpu(reg->dst_addr);
+ size_t orig_size, copy_size;
+ ssize_t map_size = 0;
+ u32 flags = le32_to_cpu(reg->flags);
+ u32 status = 0;
+ void *copy_buf = NULL, *buf;
+
+ orig_size = copy_size = le32_to_cpu(reg->size);
+
+ if (flags & FLAG_USE_DMA) {
+ if (!dma_has_cap(DMA_MEMCPY, epf_test->dma_chan_tx->device->cap_mask)) {
+ dev_err(dev, "DMA controller doesn't support MEMCPY\n");
+ ret = -EINVAL;
+ goto set_status;
+ }
+ } else {
+ copy_buf = kzalloc(copy_size, GFP_KERNEL);
+ if (!copy_buf) {
+ ret = -ENOMEM;
+ goto set_status;
+ }
+ buf = copy_buf;
}
- ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr,
- reg->dst_addr, reg->size);
- if (ret) {
- dev_err(dev, "Failed to map destination address\n");
- reg->status = STATUS_DST_ADDR_INVALID;
- goto err_dst_addr;
- }
+ while (copy_size) {
+ ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no,
+ src_addr, copy_size, &src_map);
+ if (ret) {
+ dev_err(dev, "Failed to map source address\n");
+ status = STATUS_SRC_ADDR_INVALID;
+ goto free_buf;
+ }
- ktime_get_ts64(&start);
- if (reg->flags & FLAG_USE_DMA) {
- if (epf_test->dma_private) {
- dev_err(dev, "Cannot transfer data using DMA\n");
- ret = -EINVAL;
- goto err_map_addr;
+ ret = pci_epc_mem_map(epf->epc, epf->func_no, epf->vfunc_no,
+ dst_addr, copy_size, &dst_map);
+ if (ret) {
+ dev_err(dev, "Failed to map destination address\n");
+ status = STATUS_DST_ADDR_INVALID;
+ pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no,
+ &src_map);
+ goto free_buf;
}
- ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr,
- src_phys_addr, reg->size, 0,
- DMA_MEM_TO_MEM);
- if (ret)
- dev_err(dev, "Data transfer failed\n");
- } else {
- void *buf;
+ map_size = min_t(size_t, dst_map.pci_size, src_map.pci_size);
- buf = kzalloc(reg->size, GFP_KERNEL);
- if (!buf) {
- ret = -ENOMEM;
- goto err_map_addr;
+ ktime_get_ts64(&start);
+ if (flags & FLAG_USE_DMA) {
+ ret = pci_epf_test_data_transfer(epf_test,
+ dst_map.phys_addr, src_map.phys_addr,
+ map_size, 0, DMA_MEM_TO_MEM);
+ if (ret) {
+ dev_err(dev, "Data transfer failed\n");
+ goto unmap;
+ }
+ } else {
+ memcpy_fromio(buf, src_map.virt_addr, map_size);
+ memcpy_toio(dst_map.virt_addr, buf, map_size);
+ buf += map_size;
}
+ ktime_get_ts64(&end);
- memcpy_fromio(buf, src_addr, reg->size);
- memcpy_toio(dst_addr, buf, reg->size);
- kfree(buf);
- }
- ktime_get_ts64(&end);
- pci_epf_test_print_rate(epf_test, "COPY", reg->size, &start, &end,
- reg->flags & FLAG_USE_DMA);
+ copy_size -= map_size;
+ src_addr += map_size;
+ dst_addr += map_size;
-err_map_addr:
- pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr);
+ pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &dst_map);
+ pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &src_map);
+ map_size = 0;
+ }
-err_dst_addr:
- pci_epc_mem_free_addr(epc, dst_phys_addr, dst_addr, reg->size);
+ pci_epf_test_print_rate(epf_test, "COPY", orig_size, &start, &end,
+ flags & FLAG_USE_DMA);
-err_src_map_addr:
- pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, src_phys_addr);
+unmap:
+ if (map_size) {
+ pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &dst_map);
+ pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &src_map);
+ }
-err_src_addr:
- pci_epc_mem_free_addr(epc, src_phys_addr, src_addr, reg->size);
+free_buf:
+ kfree(copy_buf);
-err:
+set_status:
if (!ret)
- reg->status |= STATUS_COPY_SUCCESS;
+ status |= STATUS_COPY_SUCCESS;
else
- reg->status |= STATUS_COPY_FAIL;
+ status |= STATUS_COPY_FAIL;
+ reg->status = cpu_to_le32(status);
}
static void pci_epf_test_read(struct pci_epf_test *epf_test,
struct pci_epf_test_reg *reg)
{
- int ret;
- void __iomem *src_addr;
- void *buf;
+ int ret = 0;
+ void *src_buf, *buf;
u32 crc32;
- phys_addr_t phys_addr;
+ struct pci_epc_map map;
phys_addr_t dst_phys_addr;
struct timespec64 start, end;
struct pci_epf *epf = epf_test->epf;
- struct device *dev = &epf->dev;
struct pci_epc *epc = epf->epc;
+ struct device *dev = &epf->dev;
struct device *dma_dev = epf->epc->dev.parent;
+ u64 src_addr = le64_to_cpu(reg->src_addr);
+ size_t orig_size, src_size;
+ ssize_t map_size = 0;
+ u32 flags = le32_to_cpu(reg->flags);
+ u32 checksum = le32_to_cpu(reg->checksum);
+ u32 status = 0;
- src_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
- if (!src_addr) {
- dev_err(dev, "Failed to allocate address\n");
- reg->status = STATUS_SRC_ADDR_INVALID;
- ret = -ENOMEM;
- goto err;
- }
-
- ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr,
- reg->src_addr, reg->size);
- if (ret) {
- dev_err(dev, "Failed to map address\n");
- reg->status = STATUS_SRC_ADDR_INVALID;
- goto err_addr;
- }
+ orig_size = src_size = le32_to_cpu(reg->size);
- buf = kzalloc(reg->size, GFP_KERNEL);
- if (!buf) {
+ src_buf = kzalloc(src_size, GFP_KERNEL);
+ if (!src_buf) {
ret = -ENOMEM;
- goto err_map_addr;
+ goto set_status;
}
+ buf = src_buf;
- if (reg->flags & FLAG_USE_DMA) {
- dst_phys_addr = dma_map_single(dma_dev, buf, reg->size,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(dma_dev, dst_phys_addr)) {
- dev_err(dev, "Failed to map destination buffer addr\n");
- ret = -ENOMEM;
- goto err_dma_map;
+ while (src_size) {
+ ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no,
+ src_addr, src_size, &map);
+ if (ret) {
+ dev_err(dev, "Failed to map address\n");
+ status = STATUS_SRC_ADDR_INVALID;
+ goto free_buf;
}
- ktime_get_ts64(&start);
- ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr,
- phys_addr, reg->size,
- reg->src_addr, DMA_DEV_TO_MEM);
- if (ret)
- dev_err(dev, "Data transfer failed\n");
- ktime_get_ts64(&end);
+ map_size = map.pci_size;
+ if (flags & FLAG_USE_DMA) {
+ dst_phys_addr = dma_map_single(dma_dev, buf, map_size,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dma_dev, dst_phys_addr)) {
+ dev_err(dev,
+ "Failed to map destination buffer addr\n");
+ ret = -ENOMEM;
+ goto unmap;
+ }
+
+ ktime_get_ts64(&start);
+ ret = pci_epf_test_data_transfer(epf_test,
+ dst_phys_addr, map.phys_addr,
+ map_size, src_addr, DMA_DEV_TO_MEM);
+ if (ret)
+ dev_err(dev, "Data transfer failed\n");
+ ktime_get_ts64(&end);
+
+ dma_unmap_single(dma_dev, dst_phys_addr, map_size,
+ DMA_FROM_DEVICE);
+
+ if (ret)
+ goto unmap;
+ } else {
+ ktime_get_ts64(&start);
+ memcpy_fromio(buf, map.virt_addr, map_size);
+ ktime_get_ts64(&end);
+ }
- dma_unmap_single(dma_dev, dst_phys_addr, reg->size,
- DMA_FROM_DEVICE);
- } else {
- ktime_get_ts64(&start);
- memcpy_fromio(buf, src_addr, reg->size);
- ktime_get_ts64(&end);
+ src_size -= map_size;
+ src_addr += map_size;
+ buf += map_size;
+
+ pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
+ map_size = 0;
}
- pci_epf_test_print_rate(epf_test, "READ", reg->size, &start, &end,
- reg->flags & FLAG_USE_DMA);
+ pci_epf_test_print_rate(epf_test, "READ", orig_size, &start, &end,
+ flags & FLAG_USE_DMA);
- crc32 = crc32_le(~0, buf, reg->size);
- if (crc32 != reg->checksum)
+ crc32 = crc32_le(~0, src_buf, orig_size);
+ if (crc32 != checksum)
ret = -EIO;
-err_dma_map:
- kfree(buf);
+unmap:
+ if (map_size)
+ pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
-err_map_addr:
- pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr);
+free_buf:
+ kfree(src_buf);
-err_addr:
- pci_epc_mem_free_addr(epc, phys_addr, src_addr, reg->size);
-
-err:
+set_status:
if (!ret)
- reg->status |= STATUS_READ_SUCCESS;
+ status |= STATUS_READ_SUCCESS;
else
- reg->status |= STATUS_READ_FAIL;
+ status |= STATUS_READ_FAIL;
+ reg->status = cpu_to_le32(status);
}
static void pci_epf_test_write(struct pci_epf_test *epf_test,
struct pci_epf_test_reg *reg)
{
- int ret;
- void __iomem *dst_addr;
- void *buf;
- phys_addr_t phys_addr;
+ int ret = 0;
+ void *dst_buf, *buf;
+ struct pci_epc_map map;
phys_addr_t src_phys_addr;
struct timespec64 start, end;
struct pci_epf *epf = epf_test->epf;
- struct device *dev = &epf->dev;
struct pci_epc *epc = epf->epc;
+ struct device *dev = &epf->dev;
struct device *dma_dev = epf->epc->dev.parent;
+ u64 dst_addr = le64_to_cpu(reg->dst_addr);
+ size_t orig_size, dst_size;
+ ssize_t map_size = 0;
+ u32 flags = le32_to_cpu(reg->flags);
+ u32 status = 0;
- dst_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
- if (!dst_addr) {
- dev_err(dev, "Failed to allocate address\n");
- reg->status = STATUS_DST_ADDR_INVALID;
- ret = -ENOMEM;
- goto err;
- }
-
- ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr,
- reg->dst_addr, reg->size);
- if (ret) {
- dev_err(dev, "Failed to map address\n");
- reg->status = STATUS_DST_ADDR_INVALID;
- goto err_addr;
- }
+ orig_size = dst_size = le32_to_cpu(reg->size);
- buf = kzalloc(reg->size, GFP_KERNEL);
- if (!buf) {
+ dst_buf = kzalloc(dst_size, GFP_KERNEL);
+ if (!dst_buf) {
ret = -ENOMEM;
- goto err_map_addr;
+ goto set_status;
}
+ get_random_bytes(dst_buf, dst_size);
+ reg->checksum = cpu_to_le32(crc32_le(~0, dst_buf, dst_size));
+ buf = dst_buf;
- get_random_bytes(buf, reg->size);
- reg->checksum = crc32_le(~0, buf, reg->size);
-
- if (reg->flags & FLAG_USE_DMA) {
- src_phys_addr = dma_map_single(dma_dev, buf, reg->size,
- DMA_TO_DEVICE);
- if (dma_mapping_error(dma_dev, src_phys_addr)) {
- dev_err(dev, "Failed to map source buffer addr\n");
- ret = -ENOMEM;
- goto err_dma_map;
+ while (dst_size) {
+ ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no,
+ dst_addr, dst_size, &map);
+ if (ret) {
+ dev_err(dev, "Failed to map address\n");
+ status = STATUS_DST_ADDR_INVALID;
+ goto free_buf;
}
- ktime_get_ts64(&start);
+ map_size = map.pci_size;
+ if (flags & FLAG_USE_DMA) {
+ src_phys_addr = dma_map_single(dma_dev, buf, map_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dma_dev, src_phys_addr)) {
+ dev_err(dev,
+ "Failed to map source buffer addr\n");
+ ret = -ENOMEM;
+ goto unmap;
+ }
+
+ ktime_get_ts64(&start);
+
+ ret = pci_epf_test_data_transfer(epf_test,
+ map.phys_addr, src_phys_addr,
+ map_size, dst_addr,
+ DMA_MEM_TO_DEV);
+ if (ret)
+ dev_err(dev, "Data transfer failed\n");
+ ktime_get_ts64(&end);
+
+ dma_unmap_single(dma_dev, src_phys_addr, map_size,
+ DMA_TO_DEVICE);
+
+ if (ret)
+ goto unmap;
+ } else {
+ ktime_get_ts64(&start);
+ memcpy_toio(map.virt_addr, buf, map_size);
+ ktime_get_ts64(&end);
+ }
- ret = pci_epf_test_data_transfer(epf_test, phys_addr,
- src_phys_addr, reg->size,
- reg->dst_addr,
- DMA_MEM_TO_DEV);
- if (ret)
- dev_err(dev, "Data transfer failed\n");
- ktime_get_ts64(&end);
+ dst_size -= map_size;
+ dst_addr += map_size;
+ buf += map_size;
- dma_unmap_single(dma_dev, src_phys_addr, reg->size,
- DMA_TO_DEVICE);
- } else {
- ktime_get_ts64(&start);
- memcpy_toio(dst_addr, buf, reg->size);
- ktime_get_ts64(&end);
+ pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
+ map_size = 0;
}
- pci_epf_test_print_rate(epf_test, "WRITE", reg->size, &start, &end,
- reg->flags & FLAG_USE_DMA);
+ pci_epf_test_print_rate(epf_test, "WRITE", orig_size, &start, &end,
+ flags & FLAG_USE_DMA);
/*
* wait 1ms inorder for the write to complete. Without this delay L3
@@ -568,20 +603,19 @@ static void pci_epf_test_write(struct pci_epf_test *epf_test,
*/
usleep_range(1000, 2000);
-err_dma_map:
- kfree(buf);
+unmap:
+ if (map_size)
+ pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
-err_map_addr:
- pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr);
+free_buf:
+ kfree(dst_buf);
-err_addr:
- pci_epc_mem_free_addr(epc, phys_addr, dst_addr, reg->size);
-
-err:
+set_status:
if (!ret)
- reg->status |= STATUS_WRITE_SUCCESS;
+ status |= STATUS_WRITE_SUCCESS;
else
- reg->status |= STATUS_WRITE_FAIL;
+ status |= STATUS_WRITE_FAIL;
+ reg->status = cpu_to_le32(status);
}
static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test,
@@ -590,39 +624,42 @@ static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test,
struct pci_epf *epf = epf_test->epf;
struct device *dev = &epf->dev;
struct pci_epc *epc = epf->epc;
- u32 status = reg->status | STATUS_IRQ_RAISED;
+ u32 status = le32_to_cpu(reg->status);
+ u32 irq_number = le32_to_cpu(reg->irq_number);
+ u32 irq_type = le32_to_cpu(reg->irq_type);
int count;
/*
* Set the status before raising the IRQ to ensure that the host sees
* the updated value when it gets the IRQ.
*/
- WRITE_ONCE(reg->status, status);
+ status |= STATUS_IRQ_RAISED;
+ WRITE_ONCE(reg->status, cpu_to_le32(status));
- switch (reg->irq_type) {
+ switch (irq_type) {
case IRQ_TYPE_INTX:
pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
PCI_IRQ_INTX, 0);
break;
case IRQ_TYPE_MSI:
count = pci_epc_get_msi(epc, epf->func_no, epf->vfunc_no);
- if (reg->irq_number > count || count <= 0) {
+ if (irq_number > count || count <= 0) {
dev_err(dev, "Invalid MSI IRQ number %d / %d\n",
- reg->irq_number, count);
+ irq_number, count);
return;
}
pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
- PCI_IRQ_MSI, reg->irq_number);
+ PCI_IRQ_MSI, irq_number);
break;
case IRQ_TYPE_MSIX:
count = pci_epc_get_msix(epc, epf->func_no, epf->vfunc_no);
- if (reg->irq_number > count || count <= 0) {
- dev_err(dev, "Invalid MSIX IRQ number %d / %d\n",
- reg->irq_number, count);
+ if (irq_number > count || count <= 0) {
+ dev_err(dev, "Invalid MSI-X IRQ number %d / %d\n",
+ irq_number, count);
return;
}
pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
- PCI_IRQ_MSIX, reg->irq_number);
+ PCI_IRQ_MSIX, irq_number);
break;
default:
dev_err(dev, "Failed to raise IRQ, unknown type\n");
@@ -639,21 +676,22 @@ static void pci_epf_test_cmd_handler(struct work_struct *work)
struct device *dev = &epf->dev;
enum pci_barno test_reg_bar = epf_test->test_reg_bar;
struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
+ u32 irq_type = le32_to_cpu(reg->irq_type);
- command = READ_ONCE(reg->command);
+ command = le32_to_cpu(READ_ONCE(reg->command));
if (!command)
goto reset_handler;
WRITE_ONCE(reg->command, 0);
WRITE_ONCE(reg->status, 0);
- if ((READ_ONCE(reg->flags) & FLAG_USE_DMA) &&
+ if ((le32_to_cpu(READ_ONCE(reg->flags)) & FLAG_USE_DMA) &&
!epf_test->dma_supported) {
dev_err(dev, "Cannot transfer data using DMA\n");
goto reset_handler;
}
- if (reg->irq_type > IRQ_TYPE_MSIX) {
+ if (irq_type > IRQ_TYPE_MSIX) {
dev_err(dev, "Failed to detect IRQ type\n");
goto reset_handler;
}
@@ -686,57 +724,24 @@ reset_handler:
msecs_to_jiffies(1));
}
-static void pci_epf_test_unbind(struct pci_epf *epf)
-{
- struct pci_epf_test *epf_test = epf_get_drvdata(epf);
- struct pci_epc *epc = epf->epc;
- struct pci_epf_bar *epf_bar;
- int bar;
-
- cancel_delayed_work(&epf_test->cmd_handler);
- pci_epf_test_clean_dma_chan(epf_test);
- for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
- epf_bar = &epf->bar[bar];
-
- if (epf_test->reg[bar]) {
- pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no,
- epf_bar);
- pci_epf_free_space(epf, epf_test->reg[bar], bar,
- PRIMARY_INTERFACE);
- }
- }
-}
-
static int pci_epf_test_set_bar(struct pci_epf *epf)
{
- int bar, add;
- int ret;
- struct pci_epf_bar *epf_bar;
+ int bar, ret;
struct pci_epc *epc = epf->epc;
struct device *dev = &epf->dev;
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
enum pci_barno test_reg_bar = epf_test->test_reg_bar;
- const struct pci_epc_features *epc_features;
- epc_features = epf_test->epc_features;
-
- for (bar = 0; bar < PCI_STD_NUM_BARS; bar += add) {
- epf_bar = &epf->bar[bar];
- /*
- * pci_epc_set_bar() sets PCI_BASE_ADDRESS_MEM_TYPE_64
- * if the specific implementation required a 64-bit BAR,
- * even if we only requested a 32-bit BAR.
- */
- add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1;
-
- if (epc_features->bar[bar].type == BAR_RESERVED)
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
+ if (!epf_test->reg[bar])
continue;
ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no,
- epf_bar);
+ &epf->bar[bar]);
if (ret) {
pci_epf_free_space(epf, epf_test->reg[bar], bar,
PRIMARY_INTERFACE);
+ epf_test->reg[bar] = NULL;
dev_err(dev, "Failed to set BAR%d\n", bar);
if (bar == test_reg_bar)
return ret;
@@ -746,22 +751,59 @@ static int pci_epf_test_set_bar(struct pci_epf *epf)
return 0;
}
-static int pci_epf_test_core_init(struct pci_epf *epf)
+static void pci_epf_test_clear_bar(struct pci_epf *epf)
+{
+ struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+ struct pci_epc *epc = epf->epc;
+ int bar;
+
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
+ if (!epf_test->reg[bar])
+ continue;
+
+ pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no,
+ &epf->bar[bar]);
+ }
+}
+
+static void pci_epf_test_set_capabilities(struct pci_epf *epf)
+{
+ struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+ enum pci_barno test_reg_bar = epf_test->test_reg_bar;
+ struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
+ struct pci_epc *epc = epf->epc;
+ u32 caps = 0;
+
+ if (epc->ops->align_addr)
+ caps |= CAP_UNALIGNED_ACCESS;
+
+ if (epf_test->epc_features->msi_capable)
+ caps |= CAP_MSI;
+
+ if (epf_test->epc_features->msix_capable)
+ caps |= CAP_MSIX;
+
+ if (epf_test->epc_features->intx_capable)
+ caps |= CAP_INTX;
+
+ reg->caps = cpu_to_le32(caps);
+}
+
+static int pci_epf_test_epc_init(struct pci_epf *epf)
{
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
struct pci_epf_header *header = epf->header;
- const struct pci_epc_features *epc_features;
+ const struct pci_epc_features *epc_features = epf_test->epc_features;
struct pci_epc *epc = epf->epc;
struct device *dev = &epf->dev;
- bool msix_capable = false;
- bool msi_capable = true;
+ bool linkup_notifier = false;
int ret;
- epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
- if (epc_features) {
- msix_capable = epc_features->msix_capable;
- msi_capable = epc_features->msi_capable;
- }
+ epf_test->dma_supported = true;
+
+ ret = pci_epf_test_init_dma_chan(epf_test);
+ if (ret)
+ epf_test->dma_supported = false;
if (epf->vfunc_no <= 1) {
ret = pci_epc_write_header(epc, epf->func_no, epf->vfunc_no, header);
@@ -771,11 +813,13 @@ static int pci_epf_test_core_init(struct pci_epf *epf)
}
}
+ pci_epf_test_set_capabilities(epf);
+
ret = pci_epf_test_set_bar(epf);
if (ret)
return ret;
- if (msi_capable) {
+ if (epc_features->msi_capable) {
ret = pci_epc_set_msi(epc, epf->func_no, epf->vfunc_no,
epf->msi_interrupts);
if (ret) {
@@ -784,7 +828,7 @@ static int pci_epf_test_core_init(struct pci_epf *epf)
}
}
- if (msix_capable) {
+ if (epc_features->msix_capable) {
ret = pci_epc_set_msix(epc, epf->func_no, epf->vfunc_no,
epf->msix_interrupts,
epf_test->test_reg_bar,
@@ -795,9 +839,22 @@ static int pci_epf_test_core_init(struct pci_epf *epf)
}
}
+ linkup_notifier = epc_features->linkup_notifier;
+ if (!linkup_notifier)
+ queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
+
return 0;
}
+static void pci_epf_test_epc_deinit(struct pci_epf *epf)
+{
+ struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+
+ cancel_delayed_work_sync(&epf_test->cmd_handler);
+ pci_epf_test_clean_dma_chan(epf_test);
+ pci_epf_test_clear_bar(epf);
+}
+
static int pci_epf_test_link_up(struct pci_epf *epf)
{
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
@@ -808,32 +865,38 @@ static int pci_epf_test_link_up(struct pci_epf *epf)
return 0;
}
+static int pci_epf_test_link_down(struct pci_epf *epf)
+{
+ struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+
+ cancel_delayed_work_sync(&epf_test->cmd_handler);
+
+ return 0;
+}
+
static const struct pci_epc_event_ops pci_epf_test_event_ops = {
- .core_init = pci_epf_test_core_init,
+ .epc_init = pci_epf_test_epc_init,
+ .epc_deinit = pci_epf_test_epc_deinit,
.link_up = pci_epf_test_link_up,
+ .link_down = pci_epf_test_link_down,
};
static int pci_epf_test_alloc_space(struct pci_epf *epf)
{
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
struct device *dev = &epf->dev;
- struct pci_epf_bar *epf_bar;
size_t msix_table_size = 0;
size_t test_reg_bar_size;
size_t pba_size = 0;
- bool msix_capable;
void *base;
- int bar, add;
enum pci_barno test_reg_bar = epf_test->test_reg_bar;
- const struct pci_epc_features *epc_features;
+ enum pci_barno bar;
+ const struct pci_epc_features *epc_features = epf_test->epc_features;
size_t test_reg_size;
- epc_features = epf_test->epc_features;
-
test_reg_bar_size = ALIGN(sizeof(struct pci_epf_test_reg), 128);
- msix_capable = epc_features->msix_capable;
- if (msix_capable) {
+ if (epc_features->msix_capable) {
msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts;
epf_test->msix_table_offset = test_reg_bar_size;
/* Align to QWORD or 8 Bytes */
@@ -849,16 +912,14 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
}
epf_test->reg[test_reg_bar] = base;
- for (bar = 0; bar < PCI_STD_NUM_BARS; bar += add) {
- epf_bar = &epf->bar[bar];
- add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1;
+ for (bar = BAR_0; bar < PCI_STD_NUM_BARS; bar++) {
+ bar = pci_epc_get_next_free_bar(epc_features, bar);
+ if (bar == NO_BAR)
+ break;
if (bar == test_reg_bar)
continue;
- if (epc_features->bar[bar].type == BAR_RESERVED)
- continue;
-
base = pci_epf_alloc_space(epf, bar_size[bar], bar,
epc_features, PRIMARY_INTERFACE);
if (!base)
@@ -870,16 +931,18 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
return 0;
}
-static void pci_epf_configure_bar(struct pci_epf *epf,
- const struct pci_epc_features *epc_features)
+static void pci_epf_test_free_space(struct pci_epf *epf)
{
- struct pci_epf_bar *epf_bar;
- int i;
+ struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+ int bar;
- for (i = 0; i < PCI_STD_NUM_BARS; i++) {
- epf_bar = &epf->bar[i];
- if (epc_features->bar[i].only_64bit)
- epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
+ if (!epf_test->reg[bar])
+ continue;
+
+ pci_epf_free_space(epf, epf_test->reg[bar], bar,
+ PRIMARY_INTERFACE);
+ epf_test->reg[bar] = NULL;
}
}
@@ -890,8 +953,6 @@ static int pci_epf_test_bind(struct pci_epf *epf)
const struct pci_epc_features *epc_features;
enum pci_barno test_reg_bar = BAR_0;
struct pci_epc *epc = epf->epc;
- bool linkup_notifier = false;
- bool core_init_notifier = false;
if (WARN_ON_ONCE(!epc))
return -EINVAL;
@@ -902,12 +963,9 @@ static int pci_epf_test_bind(struct pci_epf *epf)
return -EOPNOTSUPP;
}
- linkup_notifier = epc_features->linkup_notifier;
- core_init_notifier = epc_features->core_init_notifier;
test_reg_bar = pci_epc_get_first_free_bar(epc_features);
if (test_reg_bar < 0)
return -EINVAL;
- pci_epf_configure_bar(epf, epc_features);
epf_test->test_reg_bar = test_reg_bar;
epf_test->epc_features = epc_features;
@@ -916,22 +974,20 @@ static int pci_epf_test_bind(struct pci_epf *epf)
if (ret)
return ret;
- if (!core_init_notifier) {
- ret = pci_epf_test_core_init(epf);
- if (ret)
- return ret;
- }
-
- epf_test->dma_supported = true;
-
- ret = pci_epf_test_init_dma_chan(epf_test);
- if (ret)
- epf_test->dma_supported = false;
+ return 0;
+}
- if (!linkup_notifier && !core_init_notifier)
- queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
+static void pci_epf_test_unbind(struct pci_epf *epf)
+{
+ struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+ struct pci_epc *epc = epf->epc;
- return 0;
+ cancel_delayed_work_sync(&epf_test->cmd_handler);
+ if (epc->init_complete) {
+ pci_epf_test_clean_dma_chan(epf_test);
+ pci_epf_test_clear_bar(epf);
+ }
+ pci_epf_test_free_space(epf);
}
static const struct pci_epf_device_id pci_epf_test_ids[] = {
diff --git a/drivers/pci/endpoint/functions/pci-epf-vntb.c b/drivers/pci/endpoint/functions/pci-epf-vntb.c
index 8e779eecd62d..874cb097b093 100644
--- a/drivers/pci/endpoint/functions/pci-epf-vntb.c
+++ b/drivers/pci/endpoint/functions/pci-epf-vntb.c
@@ -799,8 +799,9 @@ err_config_interrupt:
*/
static void epf_ntb_epc_cleanup(struct epf_ntb *ntb)
{
- epf_ntb_db_bar_clear(ntb);
epf_ntb_mw_bar_clear(ntb, ntb->num_mws);
+ epf_ntb_db_bar_clear(ntb);
+ epf_ntb_config_sspad_bar_clear(ntb);
}
#define EPF_NTB_R(_name) \
@@ -1018,8 +1019,10 @@ static int vpci_scan_bus(void *sysdata)
struct epf_ntb *ndev = sysdata;
vpci_bus = pci_scan_bus(ndev->vbus_number, &vpci_ops, sysdata);
- if (vpci_bus)
- pr_err("create pci bus\n");
+ if (!vpci_bus) {
+ pr_err("create pci bus failed\n");
+ return -EINVAL;
+ }
pci_bus_add_devices(vpci_bus);
@@ -1335,13 +1338,19 @@ static int epf_ntb_bind(struct pci_epf *epf)
ret = pci_register_driver(&vntb_pci_driver);
if (ret) {
dev_err(dev, "failure register vntb pci driver\n");
- goto err_bar_alloc;
+ goto err_epc_cleanup;
}
- vpci_scan_bus(ntb);
+ ret = vpci_scan_bus(ntb);
+ if (ret)
+ goto err_unregister;
return 0;
+err_unregister:
+ pci_unregister_driver(&vntb_pci_driver);
+err_epc_cleanup:
+ epf_ntb_epc_cleanup(ntb);
err_bar_alloc:
epf_ntb_config_spad_bar_free(ntb);
diff --git a/drivers/pci/endpoint/pci-ep-cfs.c b/drivers/pci/endpoint/pci-ep-cfs.c
index 0ea64e24ed61..d712c7a866d2 100644
--- a/drivers/pci/endpoint/pci-ep-cfs.c
+++ b/drivers/pci/endpoint/pci-ep-cfs.c
@@ -23,7 +23,6 @@ struct pci_epf_group {
struct config_group group;
struct config_group primary_epc_group;
struct config_group secondary_epc_group;
- struct config_group *type_group;
struct delayed_work cfs_work;
struct pci_epf *epf;
int index;
@@ -64,6 +63,9 @@ static int pci_secondary_epc_epf_link(struct config_item *epf_item,
return ret;
}
+ /* Send any pending EPC initialization complete to the EPF driver */
+ pci_epc_notify_pending_init(epc, epf);
+
return 0;
}
@@ -125,6 +127,9 @@ static int pci_primary_epc_epf_link(struct config_item *epf_item,
return ret;
}
+ /* Send any pending EPC initialization complete to the EPF driver */
+ pci_epc_notify_pending_init(epc, epf);
+
return 0;
}
@@ -230,6 +235,9 @@ static int pci_epc_epf_link(struct config_item *epc_item,
return ret;
}
+ /* Send any pending EPC initialization complete to the EPF driver */
+ pci_epc_notify_pending_init(epc, epf);
+
return 0;
}
diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
index da3fc0795b0b..beabea00af91 100644
--- a/drivers/pci/endpoint/pci-epc-core.c
+++ b/drivers/pci/endpoint/pci-epc-core.c
@@ -14,7 +14,9 @@
#include <linux/pci-epf.h>
#include <linux/pci-ep-cfs.h>
-static struct class *pci_epc_class;
+static const struct class pci_epc_class = {
+ .name = "pci_epc",
+};
static void devm_pci_epc_release(struct device *dev, void *res)
{
@@ -23,13 +25,6 @@ static void devm_pci_epc_release(struct device *dev, void *res)
pci_epc_destroy(epc);
}
-static int devm_pci_epc_match(struct device *dev, void *res, void *match_data)
-{
- struct pci_epc **epc = res;
-
- return *epc == match_data;
-}
-
/**
* pci_epc_put() - release the PCI endpoint controller
* @epc: epc returned by pci_epc_get()
@@ -58,26 +53,17 @@ struct pci_epc *pci_epc_get(const char *epc_name)
int ret = -EINVAL;
struct pci_epc *epc;
struct device *dev;
- struct class_dev_iter iter;
-
- class_dev_iter_init(&iter, pci_epc_class, NULL, NULL);
- while ((dev = class_dev_iter_next(&iter))) {
- if (strcmp(epc_name, dev_name(dev)))
- continue;
- epc = to_pci_epc(dev);
- if (!try_module_get(epc->ops->owner)) {
- ret = -EINVAL;
- goto err;
- }
+ dev = class_find_device_by_name(&pci_epc_class, epc_name);
+ if (!dev)
+ goto err;
- class_dev_iter_exit(&iter);
- get_device(&epc->dev);
+ epc = to_pci_epc(dev);
+ if (try_module_get(epc->ops->owner))
return epc;
- }
err:
- class_dev_iter_exit(&iter);
+ put_device(dev);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(pci_epc_get);
@@ -126,6 +112,18 @@ enum pci_barno pci_epc_get_next_free_bar(const struct pci_epc_features
}
EXPORT_SYMBOL_GPL(pci_epc_get_next_free_bar);
+static bool pci_epc_function_is_valid(struct pci_epc *epc,
+ u8 func_no, u8 vfunc_no)
+{
+ if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
+ return false;
+
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return false;
+
+ return true;
+}
+
/**
* pci_epc_get_features() - get the features supported by EPC
* @epc: the features supported by *this* EPC device will be returned
@@ -143,10 +141,7 @@ const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc,
{
const struct pci_epc_features *epc_features;
- if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
- return NULL;
-
- if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
return NULL;
if (!epc->ops->get_features)
@@ -216,10 +211,7 @@ int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
{
int ret;
- if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
- return -EINVAL;
-
- if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
return -EINVAL;
if (!epc->ops->raise_irq)
@@ -260,10 +252,7 @@ int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
{
int ret;
- if (IS_ERR_OR_NULL(epc))
- return -EINVAL;
-
- if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
return -EINVAL;
if (!epc->ops->map_msi_irq)
@@ -291,10 +280,7 @@ int pci_epc_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
int interrupt;
- if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
- return 0;
-
- if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
return 0;
if (!epc->ops->get_msi)
@@ -327,11 +313,10 @@ int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, u8 interrupts)
int ret;
u8 encode_int;
- if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
- interrupts < 1 || interrupts > 32)
+ if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
return -EINVAL;
- if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ if (interrupts < 1 || interrupts > 32)
return -EINVAL;
if (!epc->ops->set_msi)
@@ -359,10 +344,7 @@ int pci_epc_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
int interrupt;
- if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
- return 0;
-
- if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
return 0;
if (!epc->ops->get_msix)
@@ -395,11 +377,10 @@ int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
{
int ret;
- if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
- interrupts < 1 || interrupts > 2048)
+ if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
return -EINVAL;
- if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ if (interrupts < 1 || interrupts > 2048)
return -EINVAL;
if (!epc->ops->set_msix)
@@ -426,10 +407,7 @@ EXPORT_SYMBOL_GPL(pci_epc_set_msix);
void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t phys_addr)
{
- if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
- return;
-
- if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
return;
if (!epc->ops->unmap_addr)
@@ -457,10 +435,7 @@ int pci_epc_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
{
int ret;
- if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
- return -EINVAL;
-
- if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
return -EINVAL;
if (!epc->ops->map_addr)
@@ -476,6 +451,109 @@ int pci_epc_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
EXPORT_SYMBOL_GPL(pci_epc_map_addr);
/**
+ * pci_epc_mem_map() - allocate and map a PCI address to a CPU address
+ * @epc: the EPC device on which the CPU address is to be allocated and mapped
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
+ * @pci_addr: PCI address to which the CPU address should be mapped
+ * @pci_size: the number of bytes to map starting from @pci_addr
+ * @map: where to return the mapping information
+ *
+ * Allocate a controller memory address region and map it to a RC PCI address
+ * region, taking into account the controller physical address mapping
+ * constraints using the controller operation align_addr(). If this operation is
+ * not defined, we assume that there are no alignment constraints for the
+ * mapping.
+ *
+ * The effective size of the PCI address range mapped from @pci_addr is
+ * indicated by @map->pci_size. This size may be less than the requested
+ * @pci_size. The local virtual CPU address for the mapping is indicated by
+ * @map->virt_addr (@map->phys_addr indicates the physical address).
+ * The size and CPU address of the controller memory allocated and mapped are
+ * respectively indicated by @map->map_size and @map->virt_base (and
+ * @map->phys_base for the physical address of @map->virt_base).
+ *
+ * Returns 0 on success and a negative error code in case of error.
+ */
+int pci_epc_mem_map(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ u64 pci_addr, size_t pci_size, struct pci_epc_map *map)
+{
+ size_t map_size = pci_size;
+ size_t map_offset = 0;
+ int ret;
+
+ if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
+ return -EINVAL;
+
+ if (!pci_size || !map)
+ return -EINVAL;
+
+ /*
+ * Align the PCI address to map. If the controller defines the
+ * .align_addr() operation, use it to determine the PCI address to map
+ * and the size of the mapping. Otherwise, assume that the controller
+ * has no alignment constraint.
+ */
+ memset(map, 0, sizeof(*map));
+ map->pci_addr = pci_addr;
+ if (epc->ops->align_addr)
+ map->map_pci_addr =
+ epc->ops->align_addr(epc, pci_addr,
+ &map_size, &map_offset);
+ else
+ map->map_pci_addr = pci_addr;
+ map->map_size = map_size;
+ if (map->map_pci_addr + map->map_size < pci_addr + pci_size)
+ map->pci_size = map->map_pci_addr + map->map_size - pci_addr;
+ else
+ map->pci_size = pci_size;
+
+ map->virt_base = pci_epc_mem_alloc_addr(epc, &map->phys_base,
+ map->map_size);
+ if (!map->virt_base)
+ return -ENOMEM;
+
+ map->phys_addr = map->phys_base + map_offset;
+ map->virt_addr = map->virt_base + map_offset;
+
+ ret = pci_epc_map_addr(epc, func_no, vfunc_no, map->phys_base,
+ map->map_pci_addr, map->map_size);
+ if (ret) {
+ pci_epc_mem_free_addr(epc, map->phys_base, map->virt_base,
+ map->map_size);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_epc_mem_map);
+
+/**
+ * pci_epc_mem_unmap() - unmap and free a CPU address region
+ * @epc: the EPC device on which the CPU address is allocated and mapped
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
+ * @map: the mapping information
+ *
+ * Unmap and free a CPU address region that was allocated and mapped with
+ * pci_epc_mem_map().
+ */
+void pci_epc_mem_unmap(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ struct pci_epc_map *map)
+{
+ if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
+ return;
+
+ if (!map || !map->virt_base)
+ return;
+
+ pci_epc_unmap_addr(epc, func_no, vfunc_no, map->phys_base);
+ pci_epc_mem_free_addr(epc, map->phys_base, map->virt_base,
+ map->map_size);
+}
+EXPORT_SYMBOL_GPL(pci_epc_mem_unmap);
+
+/**
* pci_epc_clear_bar() - reset the BAR
* @epc: the EPC device for which the BAR has to be cleared
* @func_no: the physical endpoint function number in the EPC device
@@ -487,12 +565,11 @@ EXPORT_SYMBOL_GPL(pci_epc_map_addr);
void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar)
{
- if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
- (epf_bar->barno == BAR_5 &&
- epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64))
+ if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
return;
- if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ if (epf_bar->barno == BAR_5 &&
+ epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
return;
if (!epc->ops->clear_bar)
@@ -516,21 +593,33 @@ EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar)
{
- int ret;
+ const struct pci_epc_features *epc_features;
+ enum pci_barno bar = epf_bar->barno;
int flags = epf_bar->flags;
+ int ret;
+
+ epc_features = pci_epc_get_features(epc, func_no, vfunc_no);
+ if (!epc_features)
+ return -EINVAL;
+
+ if (epc_features->bar[bar].type == BAR_RESIZABLE &&
+ (epf_bar->size < SZ_1M || (u64)epf_bar->size > (SZ_128G * 1024)))
+ return -EINVAL;
- if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
- (epf_bar->barno == BAR_5 &&
- flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ||
+ if (epc_features->bar[bar].type == BAR_FIXED &&
+ (epc_features->bar[bar].fixed_size != epf_bar->size))
+ return -EINVAL;
+
+ if (!is_power_of_2(epf_bar->size))
+ return -EINVAL;
+
+ if ((epf_bar->barno == BAR_5 && flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ||
(flags & PCI_BASE_ADDRESS_SPACE_IO &&
flags & PCI_BASE_ADDRESS_IO_MASK) ||
(upper_32_bits(epf_bar->size) &&
!(flags & PCI_BASE_ADDRESS_MEM_TYPE_64)))
return -EINVAL;
- if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
- return -EINVAL;
-
if (!epc->ops->set_bar)
return 0;
@@ -543,6 +632,33 @@ int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
EXPORT_SYMBOL_GPL(pci_epc_set_bar);
/**
+ * pci_epc_bar_size_to_rebar_cap() - convert a size to the representation used
+ * by the Resizable BAR Capability Register
+ * @size: the size to convert
+ * @cap: where to store the result
+ *
+ * Returns 0 on success and a negative error code in case of error.
+ */
+int pci_epc_bar_size_to_rebar_cap(size_t size, u32 *cap)
+{
+ /*
+ * As per PCIe r6.0, sec 7.8.6.2, min size for a resizable BAR is 1 MB,
+ * thus disallow a requested BAR size smaller than 1 MB.
+ * Disallow a requested BAR size larger than 128 TB.
+ */
+ if (size < SZ_1M || (u64)size > (SZ_128G * 1024))
+ return -EINVAL;
+
+ *cap = ilog2(size) - ilog2(SZ_1M);
+
+ /* Sizes in REBAR_CAP start at BIT(4). */
+ *cap = BIT(*cap + 4);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_epc_bar_size_to_rebar_cap);
+
+/**
* pci_epc_write_header() - write standard configuration header
* @epc: the EPC device to which the configuration header should be written
* @func_no: the physical endpoint function number in the EPC device
@@ -559,10 +675,7 @@ int pci_epc_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
{
int ret;
- if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
- return -EINVAL;
-
- if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
return -EINVAL;
/* Only Virtual Function #1 has deviceID */
@@ -658,18 +771,18 @@ void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf,
if (IS_ERR_OR_NULL(epc) || !epf)
return;
+ mutex_lock(&epc->list_lock);
if (type == PRIMARY_INTERFACE) {
func_no = epf->func_no;
list = &epf->list;
+ epf->epc = NULL;
} else {
func_no = epf->sec_epc_func_no;
list = &epf->sec_epc_list;
+ epf->sec_epc = NULL;
}
-
- mutex_lock(&epc->list_lock);
clear_bit(func_no, &epc->function_num_map);
list_del(list);
- epf->epc = NULL;
mutex_unlock(&epc->list_lock);
}
EXPORT_SYMBOL_GPL(pci_epc_remove_epf);
@@ -727,9 +840,9 @@ void pci_epc_linkdown(struct pci_epc *epc)
EXPORT_SYMBOL_GPL(pci_epc_linkdown);
/**
- * pci_epc_init_notify() - Notify the EPF device that EPC device's core
- * initialization is completed.
- * @epc: the EPC device whose core initialization is completed
+ * pci_epc_init_notify() - Notify the EPF device that EPC device initialization
+ * is completed.
+ * @epc: the EPC device whose initialization is completed
*
* Invoke to Notify the EPF device that the EPC device's initialization
* is completed.
@@ -744,23 +857,43 @@ void pci_epc_init_notify(struct pci_epc *epc)
mutex_lock(&epc->list_lock);
list_for_each_entry(epf, &epc->pci_epf, list) {
mutex_lock(&epf->lock);
- if (epf->event_ops && epf->event_ops->core_init)
- epf->event_ops->core_init(epf);
+ if (epf->event_ops && epf->event_ops->epc_init)
+ epf->event_ops->epc_init(epf);
mutex_unlock(&epf->lock);
}
+ epc->init_complete = true;
mutex_unlock(&epc->list_lock);
}
EXPORT_SYMBOL_GPL(pci_epc_init_notify);
/**
- * pci_epc_bme_notify() - Notify the EPF device that the EPC device has received
- * the BME event from the Root complex
- * @epc: the EPC device that received the BME event
+ * pci_epc_notify_pending_init() - Notify the pending EPC device initialization
+ * complete to the EPF device
+ * @epc: the EPC device whose initialization is pending to be notified
+ * @epf: the EPF device to be notified
*
- * Invoke to Notify the EPF device that the EPC device has received the Bus
- * Master Enable (BME) event from the Root complex
+ * Invoke to notify the pending EPC device initialization complete to the EPF
+ * device. This is used to deliver the notification if the EPC initialization
+ * got completed before the EPF driver bind.
*/
-void pci_epc_bme_notify(struct pci_epc *epc)
+void pci_epc_notify_pending_init(struct pci_epc *epc, struct pci_epf *epf)
+{
+ if (epc->init_complete) {
+ mutex_lock(&epf->lock);
+ if (epf->event_ops && epf->event_ops->epc_init)
+ epf->event_ops->epc_init(epf);
+ mutex_unlock(&epf->lock);
+ }
+}
+EXPORT_SYMBOL_GPL(pci_epc_notify_pending_init);
+
+/**
+ * pci_epc_deinit_notify() - Notify the EPF device about EPC deinitialization
+ * @epc: the EPC device whose deinitialization is completed
+ *
+ * Invoke to notify the EPF device that the EPC deinitialization is completed.
+ */
+void pci_epc_deinit_notify(struct pci_epc *epc)
{
struct pci_epf *epf;
@@ -770,44 +903,57 @@ void pci_epc_bme_notify(struct pci_epc *epc)
mutex_lock(&epc->list_lock);
list_for_each_entry(epf, &epc->pci_epf, list) {
mutex_lock(&epf->lock);
- if (epf->event_ops && epf->event_ops->bme)
- epf->event_ops->bme(epf);
+ if (epf->event_ops && epf->event_ops->epc_deinit)
+ epf->event_ops->epc_deinit(epf);
mutex_unlock(&epf->lock);
}
+ epc->init_complete = false;
mutex_unlock(&epc->list_lock);
}
-EXPORT_SYMBOL_GPL(pci_epc_bme_notify);
+EXPORT_SYMBOL_GPL(pci_epc_deinit_notify);
/**
- * pci_epc_destroy() - destroy the EPC device
- * @epc: the EPC device that has to be destroyed
+ * pci_epc_bus_master_enable_notify() - Notify the EPF device that the EPC
+ * device has received the Bus Master
+ * Enable event from the Root complex
+ * @epc: the EPC device that received the Bus Master Enable event
*
- * Invoke to destroy the PCI EPC device
+ * Notify the EPF device that the EPC device has generated the Bus Master Enable
+ * event due to host setting the Bus Master Enable bit in the Command register.
*/
-void pci_epc_destroy(struct pci_epc *epc)
+void pci_epc_bus_master_enable_notify(struct pci_epc *epc)
{
- pci_ep_cfs_remove_epc_group(epc->group);
- device_unregister(&epc->dev);
+ struct pci_epf *epf;
+
+ if (IS_ERR_OR_NULL(epc))
+ return;
+
+ mutex_lock(&epc->list_lock);
+ list_for_each_entry(epf, &epc->pci_epf, list) {
+ mutex_lock(&epf->lock);
+ if (epf->event_ops && epf->event_ops->bus_master_enable)
+ epf->event_ops->bus_master_enable(epf);
+ mutex_unlock(&epf->lock);
+ }
+ mutex_unlock(&epc->list_lock);
}
-EXPORT_SYMBOL_GPL(pci_epc_destroy);
+EXPORT_SYMBOL_GPL(pci_epc_bus_master_enable_notify);
/**
- * devm_pci_epc_destroy() - destroy the EPC device
- * @dev: device that wants to destroy the EPC
+ * pci_epc_destroy() - destroy the EPC device
* @epc: the EPC device that has to be destroyed
*
- * Invoke to destroy the devres associated with this
- * pci_epc and destroy the EPC device.
+ * Invoke to destroy the PCI EPC device
*/
-void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc)
+void pci_epc_destroy(struct pci_epc *epc)
{
- int r;
-
- r = devres_destroy(dev, devm_pci_epc_release, devm_pci_epc_match,
- epc);
- dev_WARN_ONCE(dev, r, "couldn't find PCI EPC resource\n");
+ pci_ep_cfs_remove_epc_group(epc->group);
+#ifdef CONFIG_PCI_DOMAINS_GENERIC
+ pci_bus_release_domain_nr(epc->dev.parent, epc->domain_nr);
+#endif
+ device_unregister(&epc->dev);
}
-EXPORT_SYMBOL_GPL(devm_pci_epc_destroy);
+EXPORT_SYMBOL_GPL(pci_epc_destroy);
static void pci_epc_release(struct device *dev)
{
@@ -845,11 +991,21 @@ __pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
INIT_LIST_HEAD(&epc->pci_epf);
device_initialize(&epc->dev);
- epc->dev.class = pci_epc_class;
+ epc->dev.class = &pci_epc_class;
epc->dev.parent = dev;
epc->dev.release = pci_epc_release;
epc->ops = ops;
+#ifdef CONFIG_PCI_DOMAINS_GENERIC
+ epc->domain_nr = pci_bus_find_domain_nr(NULL, dev);
+#else
+ /*
+ * TODO: If the architecture doesn't support generic PCI
+ * domains, then a custom implementation has to be used.
+ */
+ WARN_ONCE(1, "This architecture doesn't support generic PCI domains\n");
+#endif
+
ret = dev_set_name(&epc->dev, "%s", dev_name(dev));
if (ret)
goto put_dev;
@@ -905,20 +1061,13 @@ EXPORT_SYMBOL_GPL(__devm_pci_epc_create);
static int __init pci_epc_init(void)
{
- pci_epc_class = class_create("pci_epc");
- if (IS_ERR(pci_epc_class)) {
- pr_err("failed to create pci epc class --> %ld\n",
- PTR_ERR(pci_epc_class));
- return PTR_ERR(pci_epc_class);
- }
-
- return 0;
+ return class_register(&pci_epc_class);
}
module_init(pci_epc_init);
static void __exit pci_epc_exit(void)
{
- class_destroy(pci_epc_class);
+ class_unregister(&pci_epc_class);
}
module_exit(pci_epc_exit);
diff --git a/drivers/pci/endpoint/pci-epc-mem.c b/drivers/pci/endpoint/pci-epc-mem.c
index a9c028f58da1..218a60e945db 100644
--- a/drivers/pci/endpoint/pci-epc-mem.c
+++ b/drivers/pci/endpoint/pci-epc-mem.c
@@ -178,7 +178,7 @@ EXPORT_SYMBOL_GPL(pci_epc_mem_exit);
void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc,
phys_addr_t *phys_addr, size_t size)
{
- void __iomem *virt_addr = NULL;
+ void __iomem *virt_addr;
struct pci_epc_mem *mem;
unsigned int page_shift;
size_t align_size;
@@ -188,10 +188,13 @@ void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc,
for (i = 0; i < epc->num_windows; i++) {
mem = epc->windows[i];
- mutex_lock(&mem->lock);
+ if (size > mem->window.size)
+ continue;
+
align_size = ALIGN(size, mem->window.page_size);
order = pci_epc_mem_get_order(mem, align_size);
+ mutex_lock(&mem->lock);
pageno = bitmap_find_free_region(mem->bitmap, mem->pages,
order);
if (pageno >= 0) {
@@ -211,7 +214,7 @@ void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc,
mutex_unlock(&mem->lock);
}
- return virt_addr;
+ return NULL;
}
EXPORT_SYMBOL_GPL(pci_epc_mem_alloc_addr);
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
index 0a28a0b0911b..394395c7f8de 100644
--- a/drivers/pci/endpoint/pci-epf-core.c
+++ b/drivers/pci/endpoint/pci-epf-core.c
@@ -202,6 +202,7 @@ void pci_epf_remove_vepf(struct pci_epf *epf_pf, struct pci_epf *epf_vf)
mutex_lock(&epf_pf->lock);
clear_bit(epf_vf->vfunc_no, &epf_pf->vfunction_num_map);
+ epf_vf->epf_pf = NULL;
list_del(&epf_vf->list);
mutex_unlock(&epf_pf->lock);
}
@@ -255,6 +256,8 @@ EXPORT_SYMBOL_GPL(pci_epf_free_space);
* @type: Identifies if the allocation is for primary EPC or secondary EPC
*
* Invoke to allocate memory for the PCI EPF register space.
+ * Flag PCI_BASE_ADDRESS_MEM_TYPE_64 will automatically get set if the BAR
+ * can only be a 64-bit BAR, or if the requested size is larger than 2 GB.
*/
void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
const struct pci_epc_features *epc_features,
@@ -271,6 +274,10 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
if (size < 128)
size = 128;
+ /* According to PCIe base spec, min size for a resizable BAR is 1 MB. */
+ if (epc_features->bar[bar].type == BAR_RESIZABLE && size < SZ_1M)
+ size = SZ_1M;
+
if (epc_features->bar[bar].type == BAR_FIXED && bar_fixed_size) {
if (size > bar_fixed_size) {
dev_err(&epf->dev,
@@ -304,9 +311,10 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
epf_bar[bar].addr = space;
epf_bar[bar].size = size;
epf_bar[bar].barno = bar;
- epf_bar[bar].flags |= upper_32_bits(size) ?
- PCI_BASE_ADDRESS_MEM_TYPE_64 :
- PCI_BASE_ADDRESS_MEM_TYPE_32;
+ if (upper_32_bits(size) || epc_features->bar[bar].only_64bit)
+ epf_bar[bar].flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
+ else
+ epf_bar[bar].flags |= PCI_BASE_ADDRESS_MEM_TYPE_32;
return space;
}
@@ -485,10 +493,10 @@ pci_epf_match_id(const struct pci_epf_device_id *id, const struct pci_epf *epf)
return NULL;
}
-static int pci_epf_device_match(struct device *dev, struct device_driver *drv)
+static int pci_epf_device_match(struct device *dev, const struct device_driver *drv)
{
struct pci_epf *epf = to_pci_epf(dev);
- struct pci_epf_driver *driver = to_pci_epf_driver(drv);
+ const struct pci_epf_driver *driver = to_pci_epf_driver(drv);
if (driver->id_table)
return !!pci_epf_match_id(driver->id_table, epf);
diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig
index 1472aef0fb81..3207860b52e4 100644
--- a/drivers/pci/hotplug/Kconfig
+++ b/drivers/pci/hotplug/Kconfig
@@ -97,7 +97,7 @@ config HOTPLUG_PCI_CPCI_ZT5550
tristate "Ziatech ZT5550 CompactPCI Hotplug driver"
depends on HOTPLUG_PCI_CPCI && X86
help
- Say Y here if you have an Performance Technologies (formerly Intel,
+ Say Y here if you have a Performance Technologies (formerly Intel,
formerly just Ziatech) Ziatech ZT5550 CompactPCI system card.
To compile this driver as a module, choose M here: the
@@ -118,6 +118,16 @@ config HOTPLUG_PCI_CPCI_GENERIC
When in doubt, say N.
+config HOTPLUG_PCI_OCTEONEP
+ bool "Marvell OCTEON PCI Hotplug driver"
+ depends on HOTPLUG_PCI
+ help
+ Say Y here if you have an OCTEON PCIe device with a hotplug
+ controller. This driver enables the non-controller functions of the
+ device to be registered as hotplug slots.
+
+ When in doubt, say N.
+
config HOTPLUG_PCI_SHPC
bool "SHPC PCI Hotplug driver"
help
diff --git a/drivers/pci/hotplug/Makefile b/drivers/pci/hotplug/Makefile
index 240c99517d5e..40aaf31fe338 100644
--- a/drivers/pci/hotplug/Makefile
+++ b/drivers/pci/hotplug/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_HOTPLUG_PCI_RPA) += rpaphp.o
obj-$(CONFIG_HOTPLUG_PCI_RPA_DLPAR) += rpadlpar_io.o
obj-$(CONFIG_HOTPLUG_PCI_ACPI) += acpiphp.o
obj-$(CONFIG_HOTPLUG_PCI_S390) += s390_pci_hpc.o
+obj-$(CONFIG_HOTPLUG_PCI_OCTEONEP) += octep_hp.o
# acpiphp_ibm extends acpiphp, so should be linked afterwards.
diff --git a/drivers/pci/hotplug/TODO b/drivers/pci/hotplug/TODO
index fdb8dd6ea24d..92e6e20e8595 100644
--- a/drivers/pci/hotplug/TODO
+++ b/drivers/pci/hotplug/TODO
@@ -6,6 +6,8 @@ cpcihp:
->set_power callbacks in struct cpci_hp_controller_ops. Why were they
introduced? Can they be removed from the struct?
+* Returned code from pci_hp_add_bridge() is not checked.
+
cpqphp:
* The driver spawns a kthread cpqhp_event_thread() which is woken by the
@@ -16,6 +18,8 @@ cpqphp:
* A large portion of cpqphp_ctrl.c and cpqphp_pci.c concerns resource
management. Doesn't this duplicate functionality in the core?
+* Returned code from pci_hp_add_bridge() is not checked.
+
ibmphp:
* Implementations of hotplug_slot_ops callbacks such as get_adapter_present()
@@ -43,21 +47,10 @@ ibmphp:
* A large portion of ibmphp_res.c and ibmphp_pci.c concerns resource
management. Doesn't this duplicate functionality in the core?
-sgi_hotplug:
-
-* Several functions access the pci_slot member in struct hotplug_slot even
- though pci_hotplug.h declares it private. See sn_hp_destroy() for an
- example. Either the pci_slot member should no longer be declared private
- or sgi_hotplug should store a pointer to it in struct slot. Probably the
- former.
+* Returned code from pci_hp_add_bridge() is not checked.
shpchp:
-* There is only a single implementation of struct hpc_ops. Can the struct be
- removed and its functions invoked directly? This has already been done in
- pciehp with commit 82a9e79ef132 ("PCI: pciehp: remove hpc_ops"). Clarify
- if there was a specific reason not to apply the same change to shpchp.
-
* The hardirq handler shpc_isr() queues events on a workqueue. It can be
simplified by converting it to threaded IRQ handling. Use pciehp as a
template.
diff --git a/drivers/pci/hotplug/acpiphp_ampere_altra.c b/drivers/pci/hotplug/acpiphp_ampere_altra.c
index 3fddd04851b6..70dbc0431fc6 100644
--- a/drivers/pci/hotplug/acpiphp_ampere_altra.c
+++ b/drivers/pci/hotplug/acpiphp_ampere_altra.c
@@ -119,9 +119,10 @@ static struct platform_driver altra_led_driver = {
.acpi_match_table = altra_led_ids,
},
.probe = altra_led_probe,
- .remove_new = altra_led_remove,
+ .remove = altra_led_remove,
};
module_platform_driver(altra_led_driver);
MODULE_AUTHOR("D Scott Phillips <scott@os.amperecomputing.com>");
+MODULE_DESCRIPTION("ACPI PCI Hot Plug Extension for Ampere Altra");
MODULE_LICENSE("GPL");
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
index 8f3a0a33f362..b3aa34e3a4a2 100644
--- a/drivers/pci/hotplug/acpiphp_ibm.c
+++ b/drivers/pci/hotplug/acpiphp_ibm.c
@@ -84,7 +84,7 @@ static int ibm_get_attention_status(struct hotplug_slot *slot, u8 *status);
static void ibm_handle_events(acpi_handle handle, u32 event, void *context);
static int ibm_get_table_from_acpi(char **bufp);
static ssize_t ibm_read_apci_table(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t size);
static acpi_status __init ibm_find_acpi_device(acpi_handle handle,
u32 lvl, void *context, void **rv);
@@ -98,7 +98,7 @@ static struct bin_attribute ibm_apci_table_attr __ro_after_init = {
.name = "apci_table",
.mode = S_IRUGO,
},
- .read = ibm_read_apci_table,
+ .read_new = ibm_read_apci_table,
.write = NULL,
};
static struct acpiphp_attention_info ibm_attention_info =
@@ -353,7 +353,7 @@ read_table_done:
* our solution is to only allow reading the table in all at once.
*/
static ssize_t ibm_read_apci_table(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t size)
{
int bytes_read = -EINVAL;
diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
index 6d8970d8c3f2..a31d6b62f523 100644
--- a/drivers/pci/hotplug/cpci_hotplug.h
+++ b/drivers/pci/hotplug/cpci_hotplug.h
@@ -44,9 +44,6 @@ struct cpci_hp_controller_ops {
int (*enable_irq)(void);
int (*disable_irq)(void);
int (*check_irq)(void *dev_id);
- int (*hardware_test)(struct slot *slot, u32 value);
- u8 (*get_power)(struct slot *slot);
- int (*set_power)(struct slot *slot, int value);
};
struct cpci_hp_controller {
diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c
index d0559d2faf50..dd93e53ea7c2 100644
--- a/drivers/pci/hotplug/cpci_hotplug_core.c
+++ b/drivers/pci/hotplug/cpci_hotplug_core.c
@@ -71,13 +71,10 @@ static int
enable_slot(struct hotplug_slot *hotplug_slot)
{
struct slot *slot = to_slot(hotplug_slot);
- int retval = 0;
dbg("%s - physical_slot = %s", __func__, slot_name(slot));
- if (controller->ops->set_power)
- retval = controller->ops->set_power(slot, 1);
- return retval;
+ return 0;
}
static int
@@ -109,12 +106,6 @@ disable_slot(struct hotplug_slot *hotplug_slot)
}
cpci_led_on(slot);
- if (controller->ops->set_power) {
- retval = controller->ops->set_power(slot, 0);
- if (retval)
- goto disable_error;
- }
-
slot->adapter_status = 0;
if (slot->extracting) {
@@ -129,11 +120,7 @@ disable_error:
static u8
cpci_get_power_status(struct slot *slot)
{
- u8 power = 1;
-
- if (controller->ops->get_power)
- power = controller->ops->get_power(slot);
- return power;
+ return 1;
}
static int
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index c94b40e64baf..47a3ed16159a 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -328,7 +328,7 @@ get_slot_mapping(struct pci_bus *bus, u8 bus_num, u8 dev_num, u8 *slot)
} else {
/* Did not get a match on the target PCI device. Check
* if the current IRQ table entry is a PCI-to-PCI
- * bridge device. If so, and it's secondary bus
+ * bridge device. If so, and its secondary bus
* matches the bus number for the target device, I need
* to save the bridge's slot number. If I can not find
* an entry for the target device, I will have to
diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c
index c01968ef0bd7..20529d1a3c44 100644
--- a/drivers/pci/hotplug/cpqphp_ctrl.c
+++ b/drivers/pci/hotplug/cpqphp_ctrl.c
@@ -1794,7 +1794,7 @@ static void interrupt_event_handler(struct controller *ctrl)
} else if (ctrl->event_queue[loop].event_type ==
INT_BUTTON_CANCEL) {
dbg("button cancel\n");
- del_timer(&p_slot->task_event);
+ timer_delete(&p_slot->task_event);
mutex_lock(&ctrl->crit_sect);
diff --git a/drivers/pci/hotplug/cpqphp_pci.c b/drivers/pci/hotplug/cpqphp_pci.c
index e9f1fb333a71..ef7534a3ca40 100644
--- a/drivers/pci/hotplug/cpqphp_pci.c
+++ b/drivers/pci/hotplug/cpqphp_pci.c
@@ -12,8 +12,11 @@
*
*/
+#define pr_fmt(fmt) "cpqphp: " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/printk.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
@@ -132,18 +135,6 @@ int cpqhp_unconfigure_device(struct pci_func *func)
return 0;
}
-static int PCI_RefinedAccessConfig(struct pci_bus *bus, unsigned int devfn, u8 offset, u32 *value)
-{
- u32 vendID = 0;
-
- if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, &vendID) == -1)
- return -1;
- if (vendID == 0xffffffff)
- return -1;
- return pci_bus_read_config_dword(bus, devfn, offset, value);
-}
-
-
/*
* cpqhp_set_irq
*
@@ -202,13 +193,16 @@ static int PCI_ScanBusForNonBridge(struct controller *ctrl, u8 bus_num, u8 *dev_
{
u16 tdevice;
u32 work;
- u8 tbus;
+ int ret = -1;
ctrl->pci_bus->number = bus_num;
for (tdevice = 0; tdevice < 0xFF; tdevice++) {
/* Scan for access first */
- if (PCI_RefinedAccessConfig(ctrl->pci_bus, tdevice, 0x08, &work) == -1)
+ if (!pci_bus_read_dev_vendor_id(ctrl->pci_bus, tdevice, &work, 0))
+ continue;
+ ret = pci_bus_read_config_dword(ctrl->pci_bus, tdevice, PCI_CLASS_REVISION, &work);
+ if (ret)
continue;
dbg("Looking for nonbridge bus_num %d dev_num %d\n", bus_num, tdevice);
/* Yep we got one. Not a bridge ? */
@@ -216,23 +210,20 @@ static int PCI_ScanBusForNonBridge(struct controller *ctrl, u8 bus_num, u8 *dev_
*dev_num = tdevice;
dbg("found it !\n");
return 0;
- }
- }
- for (tdevice = 0; tdevice < 0xFF; tdevice++) {
- /* Scan for access first */
- if (PCI_RefinedAccessConfig(ctrl->pci_bus, tdevice, 0x08, &work) == -1)
- continue;
- dbg("Looking for bridge bus_num %d dev_num %d\n", bus_num, tdevice);
- /* Yep we got one. bridge ? */
- if ((work >> 8) == PCI_TO_PCI_BRIDGE_CLASS) {
- pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(tdevice, 0), PCI_SECONDARY_BUS, &tbus);
- /* XXX: no recursion, wtf? */
- dbg("Recurse on bus_num %d tdevice %d\n", tbus, tdevice);
- return 0;
+ } else {
+ /*
+ * XXX: Code whose debug printout indicated
+ * recursion to buses underneath bridges might be
+ * necessary was removed because it never did
+ * any recursion.
+ */
+ ret = 0;
+ pr_warn("missing feature: bridge scan recursion not implemented\n");
}
}
- return -1;
+
+ return ret;
}
@@ -253,7 +244,7 @@ static int PCI_GetBusDevHelper(struct controller *ctrl, u8 *bus_num, u8 *dev_num
*dev_num = tdevice;
ctrl->pci_bus->number = tbus;
pci_bus_read_config_dword(ctrl->pci_bus, *dev_num, PCI_VENDOR_ID, &work);
- if (!nobridge || (work == 0xffffffff))
+ if (!nobridge || PCI_POSSIBLE_ERROR(work))
return 0;
dbg("bus_num %d devfn %d\n", *bus_num, *dev_num);
diff --git a/drivers/pci/hotplug/cpqphp_sysfs.c b/drivers/pci/hotplug/cpqphp_sysfs.c
index fed1360ee9b1..6143ebf71f21 100644
--- a/drivers/pci/hotplug/cpqphp_sysfs.c
+++ b/drivers/pci/hotplug/cpqphp_sysfs.c
@@ -123,7 +123,6 @@ static int spew_debug_info(struct controller *ctrl, char *data, int size)
struct ctrl_dbg {
int size;
char *data;
- struct controller *ctrl;
};
#define MAX_OUTPUT (4*PAGE_SIZE)
diff --git a/drivers/pci/hotplug/octep_hp.c b/drivers/pci/hotplug/octep_hp.c
new file mode 100644
index 000000000000..2bce7296c050
--- /dev/null
+++ b/drivers/pci/hotplug/octep_hp.c
@@ -0,0 +1,427 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2024 Marvell. */
+
+#include <linux/cleanup.h>
+#include <linux/container_of.h>
+#include <linux/delay.h>
+#include <linux/dev_printk.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/pci_hotplug.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#define OCTEP_HP_INTR_OFFSET(x) (0x20400 + ((x) << 4))
+#define OCTEP_HP_INTR_VECTOR(x) (16 + (x))
+#define OCTEP_HP_DRV_NAME "octep_hp"
+
+/*
+ * Type of MSI-X interrupts. OCTEP_HP_INTR_VECTOR() and
+ * OCTEP_HP_INTR_OFFSET() generate the vector and offset for an interrupt
+ * type.
+ */
+enum octep_hp_intr_type {
+ OCTEP_HP_INTR_INVALID = -1,
+ OCTEP_HP_INTR_ENA = 0,
+ OCTEP_HP_INTR_DIS = 1,
+ OCTEP_HP_INTR_MAX = 2,
+};
+
+struct octep_hp_cmd {
+ struct list_head list;
+ enum octep_hp_intr_type intr_type;
+ u64 intr_val;
+};
+
+struct octep_hp_slot {
+ struct list_head list;
+ struct hotplug_slot slot;
+ u16 slot_number;
+ struct pci_dev *hp_pdev;
+ unsigned int hp_devfn;
+ struct octep_hp_controller *ctrl;
+};
+
+struct octep_hp_intr_info {
+ enum octep_hp_intr_type type;
+ int number;
+ char name[16];
+};
+
+struct octep_hp_controller {
+ void __iomem *base;
+ struct pci_dev *pdev;
+ struct octep_hp_intr_info intr[OCTEP_HP_INTR_MAX];
+ struct work_struct work;
+ struct list_head slot_list;
+ struct mutex slot_lock; /* Protects slot_list */
+ struct list_head hp_cmd_list;
+ spinlock_t hp_cmd_lock; /* Protects hp_cmd_list */
+};
+
+static void octep_hp_enable_pdev(struct octep_hp_controller *hp_ctrl,
+ struct octep_hp_slot *hp_slot)
+{
+ guard(mutex)(&hp_ctrl->slot_lock);
+ if (hp_slot->hp_pdev) {
+ pci_dbg(hp_slot->hp_pdev, "Slot %s is already enabled\n",
+ hotplug_slot_name(&hp_slot->slot));
+ return;
+ }
+
+ /* Scan the device and add it to the bus */
+ hp_slot->hp_pdev = pci_scan_single_device(hp_ctrl->pdev->bus,
+ hp_slot->hp_devfn);
+ pci_bus_assign_resources(hp_ctrl->pdev->bus);
+ pci_bus_add_device(hp_slot->hp_pdev);
+
+ dev_dbg(&hp_slot->hp_pdev->dev, "Enabled slot %s\n",
+ hotplug_slot_name(&hp_slot->slot));
+}
+
+static void octep_hp_disable_pdev(struct octep_hp_controller *hp_ctrl,
+ struct octep_hp_slot *hp_slot)
+{
+ guard(mutex)(&hp_ctrl->slot_lock);
+ if (!hp_slot->hp_pdev) {
+ pci_dbg(hp_ctrl->pdev, "Slot %s is already disabled\n",
+ hotplug_slot_name(&hp_slot->slot));
+ return;
+ }
+
+ pci_dbg(hp_slot->hp_pdev, "Disabling slot %s\n",
+ hotplug_slot_name(&hp_slot->slot));
+
+ /* Remove the device from the bus */
+ pci_stop_and_remove_bus_device_locked(hp_slot->hp_pdev);
+ hp_slot->hp_pdev = NULL;
+}
+
+static int octep_hp_enable_slot(struct hotplug_slot *slot)
+{
+ struct octep_hp_slot *hp_slot =
+ container_of(slot, struct octep_hp_slot, slot);
+
+ octep_hp_enable_pdev(hp_slot->ctrl, hp_slot);
+ return 0;
+}
+
+static int octep_hp_disable_slot(struct hotplug_slot *slot)
+{
+ struct octep_hp_slot *hp_slot =
+ container_of(slot, struct octep_hp_slot, slot);
+
+ octep_hp_disable_pdev(hp_slot->ctrl, hp_slot);
+ return 0;
+}
+
+static struct hotplug_slot_ops octep_hp_slot_ops = {
+ .enable_slot = octep_hp_enable_slot,
+ .disable_slot = octep_hp_disable_slot,
+};
+
+#define SLOT_NAME_SIZE 16
+static struct octep_hp_slot *
+octep_hp_register_slot(struct octep_hp_controller *hp_ctrl,
+ struct pci_dev *pdev, u16 slot_number)
+{
+ char slot_name[SLOT_NAME_SIZE];
+ struct octep_hp_slot *hp_slot;
+ int ret;
+
+ hp_slot = kzalloc(sizeof(*hp_slot), GFP_KERNEL);
+ if (!hp_slot)
+ return ERR_PTR(-ENOMEM);
+
+ hp_slot->ctrl = hp_ctrl;
+ hp_slot->hp_pdev = pdev;
+ hp_slot->hp_devfn = pdev->devfn;
+ hp_slot->slot_number = slot_number;
+ hp_slot->slot.ops = &octep_hp_slot_ops;
+
+ snprintf(slot_name, sizeof(slot_name), "octep_hp_%u", slot_number);
+ ret = pci_hp_register(&hp_slot->slot, hp_ctrl->pdev->bus,
+ PCI_SLOT(pdev->devfn), slot_name);
+ if (ret) {
+ kfree(hp_slot);
+ return ERR_PTR(ret);
+ }
+
+ pci_info(pdev, "Registered slot %s for device %s\n",
+ slot_name, pci_name(pdev));
+
+ list_add_tail(&hp_slot->list, &hp_ctrl->slot_list);
+ octep_hp_disable_pdev(hp_ctrl, hp_slot);
+
+ return hp_slot;
+}
+
+static void octep_hp_deregister_slot(void *data)
+{
+ struct octep_hp_slot *hp_slot = data;
+ struct octep_hp_controller *hp_ctrl = hp_slot->ctrl;
+
+ pci_hp_deregister(&hp_slot->slot);
+ octep_hp_enable_pdev(hp_ctrl, hp_slot);
+ list_del(&hp_slot->list);
+ kfree(hp_slot);
+}
+
+static const char *octep_hp_cmd_name(enum octep_hp_intr_type type)
+{
+ switch (type) {
+ case OCTEP_HP_INTR_ENA:
+ return "hotplug enable";
+ case OCTEP_HP_INTR_DIS:
+ return "hotplug disable";
+ default:
+ return "invalid";
+ }
+}
+
+static void octep_hp_cmd_handler(struct octep_hp_controller *hp_ctrl,
+ struct octep_hp_cmd *hp_cmd)
+{
+ struct octep_hp_slot *hp_slot;
+
+ /*
+ * Enable or disable the slots based on the slot mask.
+ * intr_val is a bit mask where each bit represents a slot.
+ */
+ list_for_each_entry(hp_slot, &hp_ctrl->slot_list, list) {
+ if (!(hp_cmd->intr_val & BIT(hp_slot->slot_number)))
+ continue;
+
+ pci_info(hp_ctrl->pdev, "Received %s command for slot %s\n",
+ octep_hp_cmd_name(hp_cmd->intr_type),
+ hotplug_slot_name(&hp_slot->slot));
+
+ switch (hp_cmd->intr_type) {
+ case OCTEP_HP_INTR_ENA:
+ octep_hp_enable_pdev(hp_ctrl, hp_slot);
+ break;
+ case OCTEP_HP_INTR_DIS:
+ octep_hp_disable_pdev(hp_ctrl, hp_slot);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void octep_hp_work_handler(struct work_struct *work)
+{
+ struct octep_hp_controller *hp_ctrl;
+ struct octep_hp_cmd *hp_cmd;
+ unsigned long flags;
+
+ hp_ctrl = container_of(work, struct octep_hp_controller, work);
+
+ /* Process all the hotplug commands */
+ spin_lock_irqsave(&hp_ctrl->hp_cmd_lock, flags);
+ while (!list_empty(&hp_ctrl->hp_cmd_list)) {
+ hp_cmd = list_first_entry(&hp_ctrl->hp_cmd_list,
+ struct octep_hp_cmd, list);
+ list_del(&hp_cmd->list);
+ spin_unlock_irqrestore(&hp_ctrl->hp_cmd_lock, flags);
+
+ octep_hp_cmd_handler(hp_ctrl, hp_cmd);
+ kfree(hp_cmd);
+
+ spin_lock_irqsave(&hp_ctrl->hp_cmd_lock, flags);
+ }
+ spin_unlock_irqrestore(&hp_ctrl->hp_cmd_lock, flags);
+}
+
+static enum octep_hp_intr_type octep_hp_intr_type(struct octep_hp_intr_info *intr,
+ int irq)
+{
+ enum octep_hp_intr_type type;
+
+ for (type = OCTEP_HP_INTR_ENA; type < OCTEP_HP_INTR_MAX; type++) {
+ if (intr[type].number == irq)
+ return type;
+ }
+
+ return OCTEP_HP_INTR_INVALID;
+}
+
+static irqreturn_t octep_hp_intr_handler(int irq, void *data)
+{
+ struct octep_hp_controller *hp_ctrl = data;
+ struct pci_dev *pdev = hp_ctrl->pdev;
+ enum octep_hp_intr_type type;
+ struct octep_hp_cmd *hp_cmd;
+ u64 intr_val;
+
+ type = octep_hp_intr_type(hp_ctrl->intr, irq);
+ if (type == OCTEP_HP_INTR_INVALID) {
+ pci_err(pdev, "Invalid interrupt %d\n", irq);
+ return IRQ_HANDLED;
+ }
+
+ /* Read and clear the interrupt */
+ intr_val = readq(hp_ctrl->base + OCTEP_HP_INTR_OFFSET(type));
+ writeq(intr_val, hp_ctrl->base + OCTEP_HP_INTR_OFFSET(type));
+
+ hp_cmd = kzalloc(sizeof(*hp_cmd), GFP_ATOMIC);
+ if (!hp_cmd)
+ return IRQ_HANDLED;
+
+ hp_cmd->intr_val = intr_val;
+ hp_cmd->intr_type = type;
+
+ /* Add the command to the list and schedule the work */
+ spin_lock(&hp_ctrl->hp_cmd_lock);
+ list_add_tail(&hp_cmd->list, &hp_ctrl->hp_cmd_list);
+ spin_unlock(&hp_ctrl->hp_cmd_lock);
+ schedule_work(&hp_ctrl->work);
+
+ return IRQ_HANDLED;
+}
+
+static void octep_hp_irq_cleanup(void *data)
+{
+ struct octep_hp_controller *hp_ctrl = data;
+
+ pci_free_irq_vectors(hp_ctrl->pdev);
+ flush_work(&hp_ctrl->work);
+}
+
+static int octep_hp_request_irq(struct octep_hp_controller *hp_ctrl,
+ enum octep_hp_intr_type type)
+{
+ struct pci_dev *pdev = hp_ctrl->pdev;
+ struct octep_hp_intr_info *intr;
+ int irq;
+
+ irq = pci_irq_vector(pdev, OCTEP_HP_INTR_VECTOR(type));
+ if (irq < 0)
+ return irq;
+
+ intr = &hp_ctrl->intr[type];
+ intr->number = irq;
+ intr->type = type;
+ snprintf(intr->name, sizeof(intr->name), "octep_hp_%d", type);
+
+ return devm_request_irq(&pdev->dev, irq, octep_hp_intr_handler,
+ IRQF_SHARED, intr->name, hp_ctrl);
+}
+
+static int octep_hp_controller_setup(struct pci_dev *pdev,
+ struct octep_hp_controller *hp_ctrl)
+{
+ struct device *dev = &pdev->dev;
+ enum octep_hp_intr_type type;
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable PCI device\n");
+
+ hp_ctrl->base = pcim_iomap_region(pdev, 0, OCTEP_HP_DRV_NAME);
+ if (IS_ERR(hp_ctrl->base))
+ return dev_err_probe(dev, PTR_ERR(hp_ctrl->base),
+ "Failed to map PCI device region\n");
+
+ pci_set_master(pdev);
+ pci_set_drvdata(pdev, hp_ctrl);
+
+ INIT_LIST_HEAD(&hp_ctrl->slot_list);
+ INIT_LIST_HEAD(&hp_ctrl->hp_cmd_list);
+ mutex_init(&hp_ctrl->slot_lock);
+ spin_lock_init(&hp_ctrl->hp_cmd_lock);
+ INIT_WORK(&hp_ctrl->work, octep_hp_work_handler);
+ hp_ctrl->pdev = pdev;
+
+ ret = pci_alloc_irq_vectors(pdev, 1,
+ OCTEP_HP_INTR_VECTOR(OCTEP_HP_INTR_MAX),
+ PCI_IRQ_MSIX);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to alloc MSI-X vectors\n");
+
+ ret = devm_add_action(&pdev->dev, octep_hp_irq_cleanup, hp_ctrl);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Failed to add IRQ cleanup action\n");
+
+ for (type = OCTEP_HP_INTR_ENA; type < OCTEP_HP_INTR_MAX; type++) {
+ ret = octep_hp_request_irq(hp_ctrl, type);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to request IRQ for vector %d\n",
+ OCTEP_HP_INTR_VECTOR(type));
+ }
+
+ return 0;
+}
+
+static int octep_hp_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct octep_hp_controller *hp_ctrl;
+ struct pci_dev *tmp_pdev, *next;
+ struct octep_hp_slot *hp_slot;
+ u16 slot_number = 0;
+ int ret;
+
+ hp_ctrl = devm_kzalloc(&pdev->dev, sizeof(*hp_ctrl), GFP_KERNEL);
+ if (!hp_ctrl)
+ return -ENOMEM;
+
+ ret = octep_hp_controller_setup(pdev, hp_ctrl);
+ if (ret)
+ return ret;
+
+ /*
+ * Register all hotplug slots. Hotplug controller is the first function
+ * of the PCI device. The hotplug slots are the remaining functions of
+ * the PCI device. The hotplug slot functions are logically removed from
+ * the bus during probing and are re-enabled by the driver when a
+ * hotplug event is received.
+ */
+ list_for_each_entry_safe(tmp_pdev, next, &pdev->bus->devices, bus_list) {
+ if (tmp_pdev == pdev)
+ continue;
+
+ hp_slot = octep_hp_register_slot(hp_ctrl, tmp_pdev, slot_number);
+ if (IS_ERR(hp_slot))
+ return dev_err_probe(&pdev->dev, PTR_ERR(hp_slot),
+ "Failed to register hotplug slot %u\n",
+ slot_number);
+
+ ret = devm_add_action(&pdev->dev, octep_hp_deregister_slot,
+ hp_slot);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to add action for deregistering slot %u\n",
+ slot_number);
+ slot_number++;
+ }
+
+ return 0;
+}
+
+#define PCI_DEVICE_ID_CAVIUM_OCTEP_HP_CTLR 0xa0e3
+static struct pci_device_id octep_hp_pci_map[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_CAVIUM_OCTEP_HP_CTLR) },
+ { },
+};
+
+static struct pci_driver octep_hp = {
+ .name = OCTEP_HP_DRV_NAME,
+ .id_table = octep_hp_pci_map,
+ .probe = octep_hp_pci_probe,
+};
+
+module_pci_driver(octep_hp);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Marvell");
+MODULE_DESCRIPTION("Marvell OCTEON PCI Hotplug driver");
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index 058d5937d8a9..d30f1316c98e 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -14,18 +14,16 @@
* Scott Murray <scottm@somanetworks.com>
*/
-#include <linux/module.h> /* try_module_get & module_put */
+#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/types.h>
-#include <linux/list.h>
#include <linux/kobject.h>
#include <linux/sysfs.h>
#include <linux/pagemap.h>
#include <linux/init.h>
#include <linux/mount.h>
#include <linux/namei.h>
-#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
#include <linux/uaccess.h>
@@ -42,20 +40,14 @@
/* local variables */
static bool debug;
-static LIST_HEAD(pci_hotplug_slot_list);
-static DEFINE_MUTEX(pci_hp_mutex);
-
/* Weee, fun with macros... */
#define GET_STATUS(name, type) \
static int get_##name(struct hotplug_slot *slot, type *value) \
{ \
const struct hotplug_slot_ops *ops = slot->ops; \
int retval = 0; \
- if (!try_module_get(slot->owner)) \
- return -ENODEV; \
if (ops->get_##name) \
retval = ops->get_##name(slot, value); \
- module_put(slot->owner); \
return retval; \
}
@@ -88,10 +80,6 @@ static ssize_t power_write_file(struct pci_slot *pci_slot, const char *buf,
power = (u8)(lpower & 0xff);
dbg("power = %d\n", power);
- if (!try_module_get(slot->owner)) {
- retval = -ENODEV;
- goto exit;
- }
switch (power) {
case 0:
if (slot->ops->disable_slot)
@@ -107,9 +95,7 @@ static ssize_t power_write_file(struct pci_slot *pci_slot, const char *buf,
err("Illegal value specified for power\n");
retval = -EINVAL;
}
- module_put(slot->owner);
-exit:
if (retval)
return retval;
return count;
@@ -146,15 +132,9 @@ static ssize_t attention_write_file(struct pci_slot *pci_slot, const char *buf,
attention = (u8)(lattention & 0xff);
dbg(" - attention = %d\n", attention);
- if (!try_module_get(slot->owner)) {
- retval = -ENODEV;
- goto exit;
- }
if (ops->set_attention_status)
retval = ops->set_attention_status(slot, attention);
- module_put(slot->owner);
-exit:
if (retval)
return retval;
return count;
@@ -212,15 +192,9 @@ static ssize_t test_write_file(struct pci_slot *pci_slot, const char *buf,
test = (u32)(ltest & 0xffffffff);
dbg("test = %d\n", test);
- if (!try_module_get(slot->owner)) {
- retval = -ENODEV;
- goto exit;
- }
if (slot->ops->hardware_test)
retval = slot->ops->hardware_test(slot, test);
- module_put(slot->owner);
-exit:
if (retval)
return retval;
return count;
@@ -231,12 +205,8 @@ static struct pci_slot_attribute hotplug_slot_attr_test = {
.store = test_write_file
};
-static bool has_power_file(struct pci_slot *pci_slot)
+static bool has_power_file(struct hotplug_slot *slot)
{
- struct hotplug_slot *slot = pci_slot->hotplug;
-
- if ((!slot) || (!slot->ops))
- return false;
if ((slot->ops->enable_slot) ||
(slot->ops->disable_slot) ||
(slot->ops->get_power_status))
@@ -244,87 +214,79 @@ static bool has_power_file(struct pci_slot *pci_slot)
return false;
}
-static bool has_attention_file(struct pci_slot *pci_slot)
+static bool has_attention_file(struct hotplug_slot *slot)
{
- struct hotplug_slot *slot = pci_slot->hotplug;
-
- if ((!slot) || (!slot->ops))
- return false;
if ((slot->ops->set_attention_status) ||
(slot->ops->get_attention_status))
return true;
return false;
}
-static bool has_latch_file(struct pci_slot *pci_slot)
+static bool has_latch_file(struct hotplug_slot *slot)
{
- struct hotplug_slot *slot = pci_slot->hotplug;
-
- if ((!slot) || (!slot->ops))
- return false;
if (slot->ops->get_latch_status)
return true;
return false;
}
-static bool has_adapter_file(struct pci_slot *pci_slot)
+static bool has_adapter_file(struct hotplug_slot *slot)
{
- struct hotplug_slot *slot = pci_slot->hotplug;
-
- if ((!slot) || (!slot->ops))
- return false;
if (slot->ops->get_adapter_status)
return true;
return false;
}
-static bool has_test_file(struct pci_slot *pci_slot)
+static bool has_test_file(struct hotplug_slot *slot)
{
- struct hotplug_slot *slot = pci_slot->hotplug;
-
- if ((!slot) || (!slot->ops))
- return false;
if (slot->ops->hardware_test)
return true;
return false;
}
-static int fs_add_slot(struct pci_slot *pci_slot)
+static int fs_add_slot(struct hotplug_slot *slot, struct pci_slot *pci_slot)
{
+ struct kobject *kobj;
int retval = 0;
/* Create symbolic link to the hotplug driver module */
- pci_hp_create_module_link(pci_slot);
+ kobj = kset_find_obj(module_kset, slot->mod_name);
+ if (kobj) {
+ retval = sysfs_create_link(&pci_slot->kobj, kobj, "module");
+ if (retval)
+ dev_err(&pci_slot->bus->dev,
+ "Error creating sysfs link (%d)\n", retval);
+ kobject_put(kobj);
+ }
- if (has_power_file(pci_slot)) {
+ if (has_power_file(slot)) {
retval = sysfs_create_file(&pci_slot->kobj,
&hotplug_slot_attr_power.attr);
if (retval)
goto exit_power;
}
- if (has_attention_file(pci_slot)) {
+ if (has_attention_file(slot)) {
retval = sysfs_create_file(&pci_slot->kobj,
&hotplug_slot_attr_attention.attr);
if (retval)
goto exit_attention;
}
- if (has_latch_file(pci_slot)) {
+ if (has_latch_file(slot)) {
retval = sysfs_create_file(&pci_slot->kobj,
&hotplug_slot_attr_latch.attr);
if (retval)
goto exit_latch;
}
- if (has_adapter_file(pci_slot)) {
+ if (has_adapter_file(slot)) {
retval = sysfs_create_file(&pci_slot->kobj,
&hotplug_slot_attr_presence.attr);
if (retval)
goto exit_adapter;
}
- if (has_test_file(pci_slot)) {
+ if (has_test_file(slot)) {
retval = sysfs_create_file(&pci_slot->kobj,
&hotplug_slot_attr_test.attr);
if (retval)
@@ -334,62 +296,51 @@ static int fs_add_slot(struct pci_slot *pci_slot)
goto exit;
exit_test:
- if (has_adapter_file(pci_slot))
+ if (has_adapter_file(slot))
sysfs_remove_file(&pci_slot->kobj,
&hotplug_slot_attr_presence.attr);
exit_adapter:
- if (has_latch_file(pci_slot))
+ if (has_latch_file(slot))
sysfs_remove_file(&pci_slot->kobj, &hotplug_slot_attr_latch.attr);
exit_latch:
- if (has_attention_file(pci_slot))
+ if (has_attention_file(slot))
sysfs_remove_file(&pci_slot->kobj,
&hotplug_slot_attr_attention.attr);
exit_attention:
- if (has_power_file(pci_slot))
+ if (has_power_file(slot))
sysfs_remove_file(&pci_slot->kobj, &hotplug_slot_attr_power.attr);
exit_power:
- pci_hp_remove_module_link(pci_slot);
+ sysfs_remove_link(&pci_slot->kobj, "module");
exit:
return retval;
}
-static void fs_remove_slot(struct pci_slot *pci_slot)
+static void fs_remove_slot(struct hotplug_slot *slot, struct pci_slot *pci_slot)
{
- if (has_power_file(pci_slot))
+ if (has_power_file(slot))
sysfs_remove_file(&pci_slot->kobj, &hotplug_slot_attr_power.attr);
- if (has_attention_file(pci_slot))
+ if (has_attention_file(slot))
sysfs_remove_file(&pci_slot->kobj,
&hotplug_slot_attr_attention.attr);
- if (has_latch_file(pci_slot))
+ if (has_latch_file(slot))
sysfs_remove_file(&pci_slot->kobj, &hotplug_slot_attr_latch.attr);
- if (has_adapter_file(pci_slot))
+ if (has_adapter_file(slot))
sysfs_remove_file(&pci_slot->kobj,
&hotplug_slot_attr_presence.attr);
- if (has_test_file(pci_slot))
+ if (has_test_file(slot))
sysfs_remove_file(&pci_slot->kobj, &hotplug_slot_attr_test.attr);
- pci_hp_remove_module_link(pci_slot);
-}
-
-static struct hotplug_slot *get_slot_from_name(const char *name)
-{
- struct hotplug_slot *slot;
-
- list_for_each_entry(slot, &pci_hotplug_slot_list, slot_list) {
- if (strcmp(hotplug_slot_name(slot), name) == 0)
- return slot;
- }
- return NULL;
+ sysfs_remove_link(&pci_slot->kobj, "module");
}
/**
* __pci_hp_register - register a hotplug_slot with the PCI hotplug subsystem
- * @bus: bus this slot is on
* @slot: pointer to the &struct hotplug_slot to register
+ * @bus: bus this slot is on
* @devnr: device number
* @name: name registered with kobject core
* @owner: caller module owner
@@ -476,18 +427,19 @@ EXPORT_SYMBOL_GPL(__pci_hp_initialize);
*/
int pci_hp_add(struct hotplug_slot *slot)
{
- struct pci_slot *pci_slot = slot->pci_slot;
+ struct pci_slot *pci_slot;
int result;
- result = fs_add_slot(pci_slot);
+ if (WARN_ON(!slot))
+ return -EINVAL;
+
+ pci_slot = slot->pci_slot;
+
+ result = fs_add_slot(slot, pci_slot);
if (result)
return result;
kobject_uevent(&pci_slot->kobj, KOBJ_ADD);
- mutex_lock(&pci_hp_mutex);
- list_add(&slot->slot_list, &pci_hotplug_slot_list);
- mutex_unlock(&pci_hp_mutex);
- dbg("Added slot %s to the list\n", hotplug_slot_name(slot));
return 0;
}
EXPORT_SYMBOL_GPL(pci_hp_add);
@@ -498,8 +450,6 @@ EXPORT_SYMBOL_GPL(pci_hp_add);
*
* The @slot must have been registered with the pci hotplug subsystem
* previously with a call to pci_hp_register().
- *
- * Returns 0 if successful, anything else for an error.
*/
void pci_hp_deregister(struct hotplug_slot *slot)
{
@@ -513,27 +463,13 @@ EXPORT_SYMBOL_GPL(pci_hp_deregister);
* @slot: pointer to the &struct hotplug_slot to unpublish
*
* Remove a hotplug slot's sysfs interface.
- *
- * Returns 0 on success or a negative int on error.
*/
void pci_hp_del(struct hotplug_slot *slot)
{
- struct hotplug_slot *temp;
-
if (WARN_ON(!slot))
return;
- mutex_lock(&pci_hp_mutex);
- temp = get_slot_from_name(hotplug_slot_name(slot));
- if (WARN_ON(temp != slot)) {
- mutex_unlock(&pci_hp_mutex);
- return;
- }
-
- list_del(&slot->slot_list);
- mutex_unlock(&pci_hp_mutex);
- dbg("Removed slot %s from the list\n", hotplug_slot_name(slot));
- fs_remove_slot(slot->pci_slot);
+ fs_remove_slot(slot, slot->pci_slot);
}
EXPORT_SYMBOL_GPL(pci_hp_del);
@@ -545,8 +481,6 @@ EXPORT_SYMBOL_GPL(pci_hp_del);
* the driver may no longer invoke hotplug_slot_name() to get the slot's
* unique name. The driver no longer needs to handle a ->reset_slot callback
* from this point on.
- *
- * Returns 0 on success or a negative int on error.
*/
void pci_hp_destroy(struct hotplug_slot *slot)
{
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index e0a614acee05..273dd8c66f4e 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -46,6 +46,9 @@ extern int pciehp_poll_time;
/**
* struct controller - PCIe hotplug controller
* @pcie: pointer to the controller's PCIe port service device
+ * @dsn: cached copy of Device Serial Number of Function 0 in the hotplug slot
+ * (PCIe r6.2 sec 7.9.3); used to determine whether a hotplugged device
+ * was replaced with a different one during system sleep
* @slot_cap: cached copy of the Slot Capabilities register
* @inband_presence_disabled: In-Band Presence Detect Disable supported by
* controller and disabled per spec recommendation (PCIe r5.0, appendix I
@@ -87,6 +90,7 @@ extern int pciehp_poll_time;
*/
struct controller {
struct pcie_device *pcie;
+ u64 dsn;
u32 slot_cap; /* capabilities and quirks */
unsigned int inband_presence_disabled:1;
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index ddd55ad97a58..997841c69893 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -284,6 +284,35 @@ static int pciehp_suspend(struct pcie_device *dev)
return 0;
}
+static bool pciehp_device_replaced(struct controller *ctrl)
+{
+ struct pci_dev *pdev __free(pci_dev_put) = NULL;
+ u32 reg;
+
+ if (pci_dev_is_disconnected(ctrl->pcie->port))
+ return false;
+
+ pdev = pci_get_slot(ctrl->pcie->port->subordinate, PCI_DEVFN(0, 0));
+ if (!pdev)
+ return true;
+
+ if (pci_read_config_dword(pdev, PCI_VENDOR_ID, &reg) ||
+ reg != (pdev->vendor | (pdev->device << 16)) ||
+ pci_read_config_dword(pdev, PCI_CLASS_REVISION, &reg) ||
+ reg != (pdev->revision | (pdev->class << 8)))
+ return true;
+
+ if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
+ (pci_read_config_dword(pdev, PCI_SUBSYSTEM_VENDOR_ID, &reg) ||
+ reg != (pdev->subsystem_vendor | (pdev->subsystem_device << 16))))
+ return true;
+
+ if (pci_get_dsn(pdev) != ctrl->dsn)
+ return true;
+
+ return false;
+}
+
static int pciehp_resume_noirq(struct pcie_device *dev)
{
struct controller *ctrl = get_service_data(dev);
@@ -293,9 +322,23 @@ static int pciehp_resume_noirq(struct pcie_device *dev)
ctrl->cmd_busy = true;
/* clear spurious events from rediscovery of inserted card */
- if (ctrl->state == ON_STATE || ctrl->state == BLINKINGOFF_STATE)
+ if (ctrl->state == ON_STATE || ctrl->state == BLINKINGOFF_STATE) {
pcie_clear_hotplug_events(ctrl);
+ /*
+ * If hotplugged device was replaced with a different one
+ * during system sleep, mark the old device disconnected
+ * (to prevent its driver from accessing the new device)
+ * and synthesize a Presence Detect Changed event.
+ */
+ if (pciehp_device_replaced(ctrl)) {
+ ctrl_dbg(ctrl, "device replaced during system sleep\n");
+ pci_walk_bus(ctrl->pcie->port->subordinate,
+ pci_dev_set_disconnected, NULL);
+ pciehp_request(ctrl, PCI_EXP_SLTSTA_PDC);
+ }
+ }
+
return 0;
}
#endif
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index dcdbfcf404dd..d603a7aa7483 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -19,6 +19,8 @@
#include <linux/types.h>
#include <linux/pm_runtime.h>
#include <linux/pci.h>
+
+#include "../pci.h"
#include "pciehp.h"
/* The following routines constitute the bulk of the
@@ -127,6 +129,9 @@ static void remove_board(struct controller *ctrl, bool safe_removal)
pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF,
INDICATOR_NOOP);
+
+ /* Don't carry LBMS indications across */
+ pcie_reset_lbms_count(ctrl->pcie->port);
}
static int pciehp_enable_slot(struct controller *ctrl);
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index b1d0a1b3917d..8a09fb6083e2 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -292,7 +292,7 @@ int pciehp_check_link_status(struct controller *ctrl)
{
struct pci_dev *pdev = ctrl_dev(ctrl);
bool found;
- u16 lnk_status;
+ u16 lnk_status, linksta2;
if (!pcie_wait_for_link(pdev, true)) {
ctrl_info(ctrl, "Slot(%s): No link\n", slot_name(ctrl));
@@ -319,7 +319,8 @@ int pciehp_check_link_status(struct controller *ctrl)
return -1;
}
- pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status);
+ pcie_capability_read_word(pdev, PCI_EXP_LNKSTA2, &linksta2);
+ __pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status, linksta2);
if (!found) {
ctrl_info(ctrl, "Slot(%s): No device found\n",
@@ -430,7 +431,7 @@ void pciehp_get_latch_status(struct controller *ctrl, u8 *status)
* removed immediately after the check so the caller may need to take
* this into account.
*
- * It the hotplug controller itself is not available anymore returns
+ * If the hotplug controller itself is not available anymore returns
* %-ENODEV.
*/
int pciehp_card_present(struct controller *ctrl)
@@ -485,7 +486,9 @@ int pciehp_set_raw_indicator_status(struct hotplug_slot *hotplug_slot,
struct pci_dev *pdev = ctrl_dev(ctrl);
pci_config_pm_runtime_get(pdev);
- pcie_write_cmd_nowait(ctrl, FIELD_PREP(PCI_EXP_SLTCTL_AIC, status),
+
+ /* Attention and Power Indicator Control bits are supported */
+ pcie_write_cmd_nowait(ctrl, FIELD_PREP(PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_PIC, status),
PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_PIC);
pci_config_pm_runtime_put(pdev);
return 0;
@@ -840,7 +843,9 @@ void pcie_enable_interrupt(struct controller *ctrl)
{
u16 mask;
- mask = PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_DLLSCE;
+ mask = PCI_EXP_SLTCTL_DLLSCE;
+ if (!pciehp_poll_mode)
+ mask |= PCI_EXP_SLTCTL_HPIE;
pcie_write_cmd(ctrl, mask, mask);
}
@@ -1055,6 +1060,11 @@ struct controller *pcie_init(struct pcie_device *dev)
}
}
+ pdev = pci_get_slot(subordinate, PCI_DEVFN(0, 0));
+ if (pdev)
+ ctrl->dsn = pci_get_dsn(pdev);
+ pci_dev_put(pdev);
+
return ctrl;
}
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index ad12515a4a12..65e50bee1a8c 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -72,6 +72,10 @@ int pciehp_configure_device(struct controller *ctrl)
pci_bus_add_devices(parent);
down_read_nested(&ctrl->reset_lock, ctrl->depth);
+ dev = pci_get_slot(parent, PCI_DEVFN(0, 0));
+ ctrl->dsn = pci_get_dsn(dev);
+ pci_dev_put(dev);
+
out:
pci_unlock_rescan_remove();
return ret;
diff --git a/drivers/pci/hotplug/pnv_php.c b/drivers/pci/hotplug/pnv_php.c
index 694349be9d0a..573a41869c15 100644
--- a/drivers/pci/hotplug/pnv_php.c
+++ b/drivers/pci/hotplug/pnv_php.c
@@ -40,7 +40,6 @@ static void pnv_php_disable_irq(struct pnv_php_slot *php_slot,
bool disable_device)
{
struct pci_dev *pdev = php_slot->pdev;
- int irq = php_slot->irq;
u16 ctrl;
if (php_slot->irq > 0) {
@@ -59,7 +58,7 @@ static void pnv_php_disable_irq(struct pnv_php_slot *php_slot,
php_slot->wq = NULL;
}
- if (disable_device || irq > 0) {
+ if (disable_device) {
if (pdev->msix_enabled)
pci_disable_msix(pdev);
else if (pdev->msi_enabled)
diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
index 7333b305f2a5..d9996516f49e 100644
--- a/drivers/pci/hotplug/s390_pci_hpc.c
+++ b/drivers/pci/hotplug/s390_pci_hpc.c
@@ -59,16 +59,15 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn);
if (pdev && pci_num_vf(pdev)) {
- pci_dev_put(pdev);
rc = -EBUSY;
goto out;
}
rc = zpci_deconfigure_device(zdev);
out:
- mutex_unlock(&zdev->state_lock);
if (pdev)
pci_dev_put(pdev);
+ mutex_unlock(&zdev->state_lock);
return rc;
}
@@ -112,7 +111,7 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- /* if the slot exits it always contains a function */
+ /* if the slot exists it always contains a function */
*value = 1;
return 0;
}
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index 3a97f455336e..a425530e0939 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -33,24 +33,8 @@ extern bool shpchp_poll_mode;
extern int shpchp_poll_time;
extern bool shpchp_debug;
-#define dbg(format, arg...) \
-do { \
- if (shpchp_debug) \
- printk(KERN_DEBUG "%s: " format, MY_NAME, ## arg); \
-} while (0)
-#define err(format, arg...) \
- printk(KERN_ERR "%s: " format, MY_NAME, ## arg)
-#define info(format, arg...) \
- printk(KERN_INFO "%s: " format, MY_NAME, ## arg)
-#define warn(format, arg...) \
- printk(KERN_WARNING "%s: " format, MY_NAME, ## arg)
-
#define ctrl_dbg(ctrl, format, arg...) \
- do { \
- if (shpchp_debug) \
- pci_printk(KERN_DEBUG, ctrl->pci_dev, \
- format, ## arg); \
- } while (0)
+ pci_dbg(ctrl->pci_dev, format, ## arg)
#define ctrl_err(ctrl, format, arg...) \
pci_err(ctrl->pci_dev, format, ## arg)
#define ctrl_info(ctrl, format, arg...) \
@@ -72,7 +56,6 @@ struct slot {
u8 latch_save;
u8 pwr_save;
struct controller *ctrl;
- const struct hpc_ops *hpc_ops;
struct hotplug_slot hotplug_slot;
struct list_head slot_list;
struct delayed_work work; /* work for button event */
@@ -94,7 +77,6 @@ struct controller {
int slot_num_inc; /* 1 or -1 */
struct pci_dev *pci_dev;
struct list_head slot_list;
- const struct hpc_ops *hpc_ops;
wait_queue_head_t queue; /* sleep & wake process */
u8 slot_device_offset;
u32 pcix_misc2_reg; /* for amd pogo errata */
@@ -300,24 +282,22 @@ static inline void amd_pogo_errata_restore_misc_reg(struct slot *p_slot)
pci_write_config_dword(p_slot->ctrl->pci_dev, PCIX_MISCII_OFFSET, pcix_misc2_temp);
}
-struct hpc_ops {
- int (*power_on_slot)(struct slot *slot);
- int (*slot_enable)(struct slot *slot);
- int (*slot_disable)(struct slot *slot);
- int (*set_bus_speed_mode)(struct slot *slot, enum pci_bus_speed speed);
- int (*get_power_status)(struct slot *slot, u8 *status);
- int (*get_attention_status)(struct slot *slot, u8 *status);
- int (*set_attention_status)(struct slot *slot, u8 status);
- int (*get_latch_status)(struct slot *slot, u8 *status);
- int (*get_adapter_status)(struct slot *slot, u8 *status);
- int (*get_adapter_speed)(struct slot *slot, enum pci_bus_speed *speed);
- int (*get_prog_int)(struct slot *slot, u8 *prog_int);
- int (*query_power_fault)(struct slot *slot);
- void (*green_led_on)(struct slot *slot);
- void (*green_led_off)(struct slot *slot);
- void (*green_led_blink)(struct slot *slot);
- void (*release_ctlr)(struct controller *ctrl);
- int (*check_cmd_status)(struct controller *ctrl);
-};
+int shpchp_power_on_slot(struct slot *slot);
+int shpchp_slot_enable(struct slot *slot);
+int shpchp_slot_disable(struct slot *slot);
+int shpchp_set_bus_speed_mode(struct slot *slot, enum pci_bus_speed speed);
+int shpchp_get_power_status(struct slot *slot, u8 *status);
+int shpchp_get_attention_status(struct slot *slot, u8 *status);
+int shpchp_set_attention_status(struct slot *slot, u8 status);
+int shpchp_get_latch_status(struct slot *slot, u8 *status);
+int shpchp_get_adapter_status(struct slot *slot, u8 *status);
+int shpchp_get_adapter_speed(struct slot *slot, enum pci_bus_speed *speed);
+int shpchp_get_prog_int(struct slot *slot, u8 *prog_int);
+int shpchp_query_power_fault(struct slot *slot);
+void shpchp_green_led_on(struct slot *slot);
+void shpchp_green_led_off(struct slot *slot);
+void shpchp_green_led_blink(struct slot *slot);
+void shpchp_release_ctlr(struct controller *ctrl);
+int shpchp_check_cmd_status(struct controller *ctrl);
#endif /* _SHPCHP_H */
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index 56c7795ed890..0c341453afc6 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -22,7 +22,6 @@
#include "shpchp.h"
/* Global variables */
-bool shpchp_debug;
bool shpchp_poll_mode;
int shpchp_poll_time;
@@ -33,10 +32,8 @@ int shpchp_poll_time;
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
-module_param(shpchp_debug, bool, 0644);
module_param(shpchp_poll_mode, bool, 0644);
module_param(shpchp_poll_time, int, 0644);
-MODULE_PARM_DESC(shpchp_debug, "Debugging mode enabled or not");
MODULE_PARM_DESC(shpchp_poll_mode, "Using polling mechanism for hot-plug events or not");
MODULE_PARM_DESC(shpchp_poll_time, "Polling mechanism frequency, in seconds");
@@ -81,7 +78,6 @@ static int init_slots(struct controller *ctrl)
slot->ctrl = ctrl;
slot->bus = ctrl->pci_dev->subordinate->number;
slot->device = ctrl->slot_device_offset + i;
- slot->hpc_ops = ctrl->hpc_ops;
slot->number = ctrl->first_slot + (ctrl->slot_num_inc * i);
slot->wq = alloc_workqueue("shpchp-%d", 0, 0, slot->number);
@@ -150,7 +146,7 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
__func__, slot_name(slot));
slot->attention_save = status;
- slot->hpc_ops->set_attention_status(slot, status);
+ shpchp_set_attention_status(slot, status);
return 0;
}
@@ -183,7 +179,7 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
__func__, slot_name(slot));
- retval = slot->hpc_ops->get_power_status(slot, value);
+ retval = shpchp_get_power_status(slot, value);
if (retval < 0)
*value = slot->pwr_save;
@@ -198,7 +194,7 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
__func__, slot_name(slot));
- retval = slot->hpc_ops->get_attention_status(slot, value);
+ retval = shpchp_get_attention_status(slot, value);
if (retval < 0)
*value = slot->attention_save;
@@ -213,7 +209,7 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
__func__, slot_name(slot));
- retval = slot->hpc_ops->get_latch_status(slot, value);
+ retval = shpchp_get_latch_status(slot, value);
if (retval < 0)
*value = slot->latch_save;
@@ -228,7 +224,7 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
__func__, slot_name(slot));
- retval = slot->hpc_ops->get_adapter_status(slot, value);
+ retval = shpchp_get_adapter_status(slot, value);
if (retval < 0)
*value = slot->presence_save;
@@ -293,7 +289,7 @@ static int shpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err_cleanup_slots:
cleanup_slots(ctrl);
err_out_release_ctlr:
- ctrl->hpc_ops->release_ctlr(ctrl);
+ shpchp_release_ctlr(ctrl);
err_out_free_ctrl:
kfree(ctrl);
err_out_none:
@@ -306,7 +302,7 @@ static void shpc_remove(struct pci_dev *dev)
dev->shpc_managed = 0;
shpchp_remove_ctrl_files(ctrl);
- ctrl->hpc_ops->release_ctlr(ctrl);
+ shpchp_release_ctlr(ctrl);
kfree(ctrl);
}
@@ -325,20 +321,12 @@ static struct pci_driver shpc_driver = {
static int __init shpcd_init(void)
{
- int retval;
-
- retval = pci_register_driver(&shpc_driver);
- dbg("%s: pci_register_driver = %d\n", __func__, retval);
- info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
-
- return retval;
+ return pci_register_driver(&shpc_driver);
}
static void __exit shpcd_cleanup(void)
{
- dbg("unload_shpchpd()\n");
pci_unregister_driver(&shpc_driver);
- info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n");
}
module_init(shpcd_init);
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
index 6a6705e0cf17..e6c6f23bae27 100644
--- a/drivers/pci/hotplug/shpchp_ctrl.c
+++ b/drivers/pci/hotplug/shpchp_ctrl.c
@@ -51,7 +51,7 @@ u8 shpchp_handle_attention_button(u8 hp_slot, struct controller *ctrl)
ctrl_dbg(ctrl, "Attention button interrupt received\n");
p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
- p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save));
+ shpchp_get_adapter_status(p_slot, &p_slot->presence_save);
/*
* Button pressed - See if need to TAKE ACTION!!!
@@ -75,8 +75,8 @@ u8 shpchp_handle_switch_change(u8 hp_slot, struct controller *ctrl)
ctrl_dbg(ctrl, "Switch interrupt received\n");
p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
- p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save));
- p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
+ shpchp_get_adapter_status(p_slot, &p_slot->presence_save);
+ shpchp_get_latch_status(p_slot, &getstatus);
ctrl_dbg(ctrl, "Card present %x Power status %x\n",
p_slot->presence_save, p_slot->pwr_save);
@@ -116,7 +116,7 @@ u8 shpchp_handle_presence_change(u8 hp_slot, struct controller *ctrl)
/*
* Save the presence state
*/
- p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save));
+ shpchp_get_adapter_status(p_slot, &p_slot->presence_save);
if (p_slot->presence_save) {
/*
* Card Present
@@ -148,7 +148,7 @@ u8 shpchp_handle_power_fault(u8 hp_slot, struct controller *ctrl)
p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
- if (!(p_slot->hpc_ops->query_power_fault(p_slot))) {
+ if (!(shpchp_query_power_fault(p_slot))) {
/*
* Power fault Cleared
*/
@@ -181,7 +181,7 @@ static int change_bus_speed(struct controller *ctrl, struct slot *p_slot,
int rc = 0;
ctrl_dbg(ctrl, "Change speed to %d\n", speed);
- rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, speed);
+ rc = shpchp_set_bus_speed_mode(p_slot, speed);
if (rc) {
ctrl_err(ctrl, "%s: Issue of set bus speed mode command failed\n",
__func__);
@@ -241,14 +241,14 @@ static int board_added(struct slot *p_slot)
__func__, p_slot->device, ctrl->slot_device_offset, hp_slot);
/* Power on slot without connecting to bus */
- rc = p_slot->hpc_ops->power_on_slot(p_slot);
+ rc = shpchp_power_on_slot(p_slot);
if (rc) {
ctrl_err(ctrl, "Failed to power on slot\n");
return -1;
}
if ((ctrl->pci_dev->vendor == 0x8086) && (ctrl->pci_dev->device == 0x0332)) {
- rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, PCI_SPEED_33MHz);
+ rc = shpchp_set_bus_speed_mode(p_slot, PCI_SPEED_33MHz);
if (rc) {
ctrl_err(ctrl, "%s: Issue of set bus speed mode command failed\n",
__func__);
@@ -256,14 +256,14 @@ static int board_added(struct slot *p_slot)
}
/* turn on board, blink green LED, turn off Amber LED */
- rc = p_slot->hpc_ops->slot_enable(p_slot);
+ rc = shpchp_slot_enable(p_slot);
if (rc) {
ctrl_err(ctrl, "Issue of Slot Enable command failed\n");
return rc;
}
}
- rc = p_slot->hpc_ops->get_adapter_speed(p_slot, &asp);
+ rc = shpchp_get_adapter_speed(p_slot, &asp);
if (rc) {
ctrl_err(ctrl, "Can't get adapter speed or bus mode mismatch\n");
return WRONG_BUS_FREQUENCY;
@@ -285,7 +285,7 @@ static int board_added(struct slot *p_slot)
return rc;
/* turn on board, blink green LED, turn off Amber LED */
- rc = p_slot->hpc_ops->slot_enable(p_slot);
+ rc = shpchp_slot_enable(p_slot);
if (rc) {
ctrl_err(ctrl, "Issue of Slot Enable command failed\n");
return rc;
@@ -313,13 +313,13 @@ static int board_added(struct slot *p_slot)
p_slot->is_a_board = 0x01;
p_slot->pwr_save = 1;
- p_slot->hpc_ops->green_led_on(p_slot);
+ shpchp_green_led_on(p_slot);
return 0;
err_exit:
/* turn off slot, turn on Amber LED, turn off Green LED */
- rc = p_slot->hpc_ops->slot_disable(p_slot);
+ rc = shpchp_slot_disable(p_slot);
if (rc) {
ctrl_err(ctrl, "%s: Issue of Slot Disable command failed\n",
__func__);
@@ -352,14 +352,14 @@ static int remove_board(struct slot *p_slot)
p_slot->status = 0x01;
/* turn off slot, turn on Amber LED, turn off Green LED */
- rc = p_slot->hpc_ops->slot_disable(p_slot);
+ rc = shpchp_slot_disable(p_slot);
if (rc) {
ctrl_err(ctrl, "%s: Issue of Slot Disable command failed\n",
__func__);
return rc;
}
- rc = p_slot->hpc_ops->set_attention_status(p_slot, 0);
+ rc = shpchp_set_attention_status(p_slot, 0);
if (rc) {
ctrl_err(ctrl, "Issue of Set Attention command failed\n");
return rc;
@@ -401,7 +401,7 @@ static void shpchp_pushbutton_thread(struct work_struct *work)
case POWERON_STATE:
mutex_unlock(&p_slot->lock);
if (shpchp_enable_slot(p_slot))
- p_slot->hpc_ops->green_led_off(p_slot);
+ shpchp_green_led_off(p_slot);
mutex_lock(&p_slot->lock);
p_slot->state = STATIC_STATE;
break;
@@ -446,10 +446,10 @@ void shpchp_queue_pushbutton_work(struct work_struct *work)
static void update_slot_info(struct slot *slot)
{
- slot->hpc_ops->get_power_status(slot, &slot->pwr_save);
- slot->hpc_ops->get_attention_status(slot, &slot->attention_save);
- slot->hpc_ops->get_latch_status(slot, &slot->latch_save);
- slot->hpc_ops->get_adapter_status(slot, &slot->presence_save);
+ shpchp_get_power_status(slot, &slot->pwr_save);
+ shpchp_get_attention_status(slot, &slot->attention_save);
+ shpchp_get_latch_status(slot, &slot->latch_save);
+ shpchp_get_adapter_status(slot, &slot->presence_save);
}
/*
@@ -462,7 +462,7 @@ static void handle_button_press_event(struct slot *p_slot)
switch (p_slot->state) {
case STATIC_STATE:
- p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
+ shpchp_get_power_status(p_slot, &getstatus);
if (getstatus) {
p_slot->state = BLINKINGOFF_STATE;
ctrl_info(ctrl, "PCI slot #%s - powering off due to button press\n",
@@ -473,8 +473,8 @@ static void handle_button_press_event(struct slot *p_slot)
slot_name(p_slot));
}
/* blink green LED and turn off amber */
- p_slot->hpc_ops->green_led_blink(p_slot);
- p_slot->hpc_ops->set_attention_status(p_slot, 0);
+ shpchp_green_led_blink(p_slot);
+ shpchp_set_attention_status(p_slot, 0);
queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ);
break;
@@ -489,10 +489,10 @@ static void handle_button_press_event(struct slot *p_slot)
slot_name(p_slot));
cancel_delayed_work(&p_slot->work);
if (p_slot->state == BLINKINGOFF_STATE)
- p_slot->hpc_ops->green_led_on(p_slot);
+ shpchp_green_led_on(p_slot);
else
- p_slot->hpc_ops->green_led_off(p_slot);
- p_slot->hpc_ops->set_attention_status(p_slot, 0);
+ shpchp_green_led_off(p_slot);
+ shpchp_set_attention_status(p_slot, 0);
ctrl_info(ctrl, "PCI slot #%s - action canceled due to button press\n",
slot_name(p_slot));
p_slot->state = STATIC_STATE;
@@ -526,8 +526,8 @@ static void interrupt_event_handler(struct work_struct *work)
break;
case INT_POWER_FAULT:
ctrl_dbg(p_slot->ctrl, "%s: Power fault\n", __func__);
- p_slot->hpc_ops->set_attention_status(p_slot, 1);
- p_slot->hpc_ops->green_led_off(p_slot);
+ shpchp_set_attention_status(p_slot, 1);
+ shpchp_green_led_off(p_slot);
break;
default:
update_slot_info(p_slot);
@@ -547,17 +547,17 @@ static int shpchp_enable_slot (struct slot *p_slot)
/* Check to see if (latch closed, card present, power off) */
mutex_lock(&p_slot->ctrl->crit_sect);
- rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus);
+ rc = shpchp_get_adapter_status(p_slot, &getstatus);
if (rc || !getstatus) {
ctrl_info(ctrl, "No adapter on slot(%s)\n", slot_name(p_slot));
goto out;
}
- rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
+ rc = shpchp_get_latch_status(p_slot, &getstatus);
if (rc || getstatus) {
ctrl_info(ctrl, "Latch open on slot(%s)\n", slot_name(p_slot));
goto out;
}
- rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
+ rc = shpchp_get_power_status(p_slot, &getstatus);
if (rc || getstatus) {
ctrl_info(ctrl, "Already enabled on slot(%s)\n",
slot_name(p_slot));
@@ -567,10 +567,10 @@ static int shpchp_enable_slot (struct slot *p_slot)
p_slot->is_a_board = 1;
/* We have to save the presence info for these slots */
- p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save));
- p_slot->hpc_ops->get_power_status(p_slot, &(p_slot->pwr_save));
+ shpchp_get_adapter_status(p_slot, &p_slot->presence_save);
+ shpchp_get_power_status(p_slot, &p_slot->pwr_save);
ctrl_dbg(ctrl, "%s: p_slot->pwr_save %x\n", __func__, p_slot->pwr_save);
- p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
+ shpchp_get_latch_status(p_slot, &getstatus);
if ((p_slot->ctrl->pci_dev->vendor == PCI_VENDOR_ID_AMD &&
p_slot->ctrl->pci_dev->device == PCI_DEVICE_ID_AMD_POGO_7458)
@@ -584,9 +584,8 @@ static int shpchp_enable_slot (struct slot *p_slot)
retval = board_added(p_slot);
if (retval) {
- p_slot->hpc_ops->get_adapter_status(p_slot,
- &(p_slot->presence_save));
- p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
+ shpchp_get_adapter_status(p_slot, &p_slot->presence_save);
+ shpchp_get_latch_status(p_slot, &getstatus);
}
update_slot_info(p_slot);
@@ -608,17 +607,17 @@ static int shpchp_disable_slot (struct slot *p_slot)
/* Check to see if (latch closed, card present, power on) */
mutex_lock(&p_slot->ctrl->crit_sect);
- rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus);
+ rc = shpchp_get_adapter_status(p_slot, &getstatus);
if (rc || !getstatus) {
ctrl_info(ctrl, "No adapter on slot(%s)\n", slot_name(p_slot));
goto out;
}
- rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
+ rc = shpchp_get_latch_status(p_slot, &getstatus);
if (rc || getstatus) {
ctrl_info(ctrl, "Latch open on slot(%s)\n", slot_name(p_slot));
goto out;
}
- rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
+ rc = shpchp_get_power_status(p_slot, &getstatus);
if (rc || !getstatus) {
ctrl_info(ctrl, "Already disabled on slot(%s)\n",
slot_name(p_slot));
diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c
index 48e4daefc44a..387b85585263 100644
--- a/drivers/pci/hotplug/shpchp_hpc.c
+++ b/drivers/pci/hotplug/shpchp_hpc.c
@@ -167,7 +167,6 @@
static irqreturn_t shpc_isr(int irq, void *dev_id);
static void start_int_poll_timer(struct controller *ctrl, int sec);
-static int hpc_check_cmd_status(struct controller *ctrl);
static inline u8 shpc_readb(struct controller *ctrl, int reg)
{
@@ -317,7 +316,7 @@ static int shpc_write_cmd(struct slot *slot, u8 t_slot, u8 cmd)
if (retval)
goto out;
- cmd_status = hpc_check_cmd_status(slot->ctrl);
+ cmd_status = shpchp_check_cmd_status(slot->ctrl);
if (cmd_status) {
ctrl_err(ctrl, "Failed to issued command 0x%x (error code = %d)\n",
cmd, cmd_status);
@@ -328,7 +327,7 @@ static int shpc_write_cmd(struct slot *slot, u8 t_slot, u8 cmd)
return retval;
}
-static int hpc_check_cmd_status(struct controller *ctrl)
+int shpchp_check_cmd_status(struct controller *ctrl)
{
int retval = 0;
u16 cmd_status = shpc_readw(ctrl, CMD_STATUS) & 0x000F;
@@ -357,7 +356,7 @@ static int hpc_check_cmd_status(struct controller *ctrl)
}
-static int hpc_get_attention_status(struct slot *slot, u8 *status)
+int shpchp_get_attention_status(struct slot *slot, u8 *status)
{
struct controller *ctrl = slot->ctrl;
u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
@@ -381,7 +380,7 @@ static int hpc_get_attention_status(struct slot *slot, u8 *status)
return 0;
}
-static int hpc_get_power_status(struct slot *slot, u8 *status)
+int shpchp_get_power_status(struct slot *slot, u8 *status)
{
struct controller *ctrl = slot->ctrl;
u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
@@ -406,7 +405,7 @@ static int hpc_get_power_status(struct slot *slot, u8 *status)
}
-static int hpc_get_latch_status(struct slot *slot, u8 *status)
+int shpchp_get_latch_status(struct slot *slot, u8 *status)
{
struct controller *ctrl = slot->ctrl;
u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
@@ -416,7 +415,7 @@ static int hpc_get_latch_status(struct slot *slot, u8 *status)
return 0;
}
-static int hpc_get_adapter_status(struct slot *slot, u8 *status)
+int shpchp_get_adapter_status(struct slot *slot, u8 *status)
{
struct controller *ctrl = slot->ctrl;
u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
@@ -427,7 +426,7 @@ static int hpc_get_adapter_status(struct slot *slot, u8 *status)
return 0;
}
-static int hpc_get_prog_int(struct slot *slot, u8 *prog_int)
+int shpchp_get_prog_int(struct slot *slot, u8 *prog_int)
{
struct controller *ctrl = slot->ctrl;
@@ -436,7 +435,7 @@ static int hpc_get_prog_int(struct slot *slot, u8 *prog_int)
return 0;
}
-static int hpc_get_adapter_speed(struct slot *slot, enum pci_bus_speed *value)
+int shpchp_get_adapter_speed(struct slot *slot, enum pci_bus_speed *value)
{
int retval = 0;
struct controller *ctrl = slot->ctrl;
@@ -444,7 +443,7 @@ static int hpc_get_adapter_speed(struct slot *slot, enum pci_bus_speed *value)
u8 m66_cap = !!(slot_reg & MHZ66_CAP);
u8 pi, pcix_cap;
- retval = hpc_get_prog_int(slot, &pi);
+ retval = shpchp_get_prog_int(slot, &pi);
if (retval)
return retval;
@@ -489,7 +488,7 @@ static int hpc_get_adapter_speed(struct slot *slot, enum pci_bus_speed *value)
return retval;
}
-static int hpc_query_power_fault(struct slot *slot)
+int shpchp_query_power_fault(struct slot *slot)
{
struct controller *ctrl = slot->ctrl;
u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
@@ -498,7 +497,7 @@ static int hpc_query_power_fault(struct slot *slot)
return !(slot_reg & POWER_FAULT);
}
-static int hpc_set_attention_status(struct slot *slot, u8 value)
+int shpchp_set_attention_status(struct slot *slot, u8 value)
{
u8 slot_cmd = 0;
@@ -520,22 +519,22 @@ static int hpc_set_attention_status(struct slot *slot, u8 value)
}
-static void hpc_set_green_led_on(struct slot *slot)
+void shpchp_green_led_on(struct slot *slot)
{
shpc_write_cmd(slot, slot->hp_slot, SET_PWR_ON);
}
-static void hpc_set_green_led_off(struct slot *slot)
+void shpchp_green_led_off(struct slot *slot)
{
shpc_write_cmd(slot, slot->hp_slot, SET_PWR_OFF);
}
-static void hpc_set_green_led_blink(struct slot *slot)
+void shpchp_green_led_blink(struct slot *slot)
{
shpc_write_cmd(slot, slot->hp_slot, SET_PWR_BLINK);
}
-static void hpc_release_ctlr(struct controller *ctrl)
+void shpchp_release_ctlr(struct controller *ctrl)
{
int i;
u32 slot_reg, serr_int;
@@ -565,7 +564,7 @@ static void hpc_release_ctlr(struct controller *ctrl)
shpc_writel(ctrl, SERR_INTR_ENABLE, serr_int);
if (shpchp_poll_mode)
- del_timer(&ctrl->poll_timer);
+ timer_delete(&ctrl->poll_timer);
else {
free_irq(ctrl->pci_dev->irq, ctrl);
pci_disable_msi(ctrl->pci_dev);
@@ -575,7 +574,7 @@ static void hpc_release_ctlr(struct controller *ctrl)
release_mem_region(ctrl->mmio_base, ctrl->mmio_size);
}
-static int hpc_power_on_slot(struct slot *slot)
+int shpchp_power_on_slot(struct slot *slot)
{
int retval;
@@ -586,7 +585,7 @@ static int hpc_power_on_slot(struct slot *slot)
return retval;
}
-static int hpc_slot_enable(struct slot *slot)
+int shpchp_slot_enable(struct slot *slot)
{
int retval;
@@ -599,7 +598,7 @@ static int hpc_slot_enable(struct slot *slot)
return retval;
}
-static int hpc_slot_disable(struct slot *slot)
+int shpchp_slot_disable(struct slot *slot)
{
int retval;
@@ -676,12 +675,12 @@ static int shpc_get_cur_bus_speed(struct controller *ctrl)
out:
bus->cur_bus_speed = bus_speed;
- dbg("Current bus speed = %d\n", bus_speed);
+ ctrl_dbg(ctrl, "Current bus speed = %d\n", bus_speed);
return retval;
}
-static int hpc_set_bus_speed_mode(struct slot *slot, enum pci_bus_speed value)
+int shpchp_set_bus_speed_mode(struct slot *slot, enum pci_bus_speed value)
{
int retval;
struct controller *ctrl = slot->ctrl;
@@ -871,28 +870,6 @@ static int shpc_get_max_bus_speed(struct controller *ctrl)
return retval;
}
-static const struct hpc_ops shpchp_hpc_ops = {
- .power_on_slot = hpc_power_on_slot,
- .slot_enable = hpc_slot_enable,
- .slot_disable = hpc_slot_disable,
- .set_bus_speed_mode = hpc_set_bus_speed_mode,
- .set_attention_status = hpc_set_attention_status,
- .get_power_status = hpc_get_power_status,
- .get_attention_status = hpc_get_attention_status,
- .get_latch_status = hpc_get_latch_status,
- .get_adapter_status = hpc_get_adapter_status,
-
- .get_adapter_speed = hpc_get_adapter_speed,
- .get_prog_int = hpc_get_prog_int,
-
- .query_power_fault = hpc_query_power_fault,
- .green_led_on = hpc_set_green_led_on,
- .green_led_off = hpc_set_green_led_off,
- .green_led_blink = hpc_set_green_led_blink,
-
- .release_ctlr = hpc_release_ctlr,
-};
-
int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
{
int rc = -1, num_slots = 0;
@@ -978,8 +955,6 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
/* Setup wait queue */
init_waitqueue_head(&ctrl->queue);
- ctrl->hpc_ops = &shpchp_hpc_ops;
-
/* Return PCI Controller Info */
slot_config = shpc_readl(ctrl, SLOT_CONFIG);
ctrl->slot_device_offset = (slot_config & FIRST_DEV_NUM) >> 8;
diff --git a/drivers/pci/iomap.c b/drivers/pci/iomap.c
index c9725428e387..fe706ed946df 100644
--- a/drivers/pci/iomap.c
+++ b/drivers/pci/iomap.c
@@ -9,6 +9,8 @@
#include <linux/export.h>
+#include "pci.h" /* for pci_bar_index_is_valid() */
+
/**
* pci_iomap_range - create a virtual mapping cookie for a PCI BAR
* @dev: PCI device that owns the BAR
@@ -23,18 +25,29 @@
*
* @maxlen specifies the maximum length to map. If you want to get access to
* the complete BAR from offset to the end, pass %0 here.
+ *
+ * NOTE:
+ * This function is never managed, even if you initialized with
+ * pcim_enable_device().
* */
void __iomem *pci_iomap_range(struct pci_dev *dev,
int bar,
unsigned long offset,
unsigned long maxlen)
{
- resource_size_t start = pci_resource_start(dev, bar);
- resource_size_t len = pci_resource_len(dev, bar);
- unsigned long flags = pci_resource_flags(dev, bar);
+ resource_size_t start, len;
+ unsigned long flags;
+
+ if (!pci_bar_index_is_valid(bar))
+ return NULL;
+
+ start = pci_resource_start(dev, bar);
+ len = pci_resource_len(dev, bar);
+ flags = pci_resource_flags(dev, bar);
if (len <= offset || !start)
return NULL;
+
len -= offset;
start += offset;
if (maxlen && len > maxlen)
@@ -63,22 +76,30 @@ EXPORT_SYMBOL(pci_iomap_range);
*
* @maxlen specifies the maximum length to map. If you want to get access to
* the complete BAR from offset to the end, pass %0 here.
+ *
+ * NOTE:
+ * This function is never managed, even if you initialized with
+ * pcim_enable_device().
* */
void __iomem *pci_iomap_wc_range(struct pci_dev *dev,
int bar,
unsigned long offset,
unsigned long maxlen)
{
- resource_size_t start = pci_resource_start(dev, bar);
- resource_size_t len = pci_resource_len(dev, bar);
- unsigned long flags = pci_resource_flags(dev, bar);
+ resource_size_t start, len;
+ unsigned long flags;
-
- if (flags & IORESOURCE_IO)
+ if (!pci_bar_index_is_valid(bar))
return NULL;
+ start = pci_resource_start(dev, bar);
+ len = pci_resource_len(dev, bar);
+ flags = pci_resource_flags(dev, bar);
+
if (len <= offset || !start)
return NULL;
+ if (flags & IORESOURCE_IO)
+ return NULL;
len -= offset;
start += offset;
@@ -106,6 +127,10 @@ EXPORT_SYMBOL_GPL(pci_iomap_wc_range);
*
* @maxlen specifies the maximum length to map. If you want to get access to
* the complete BAR without checking for its length first, pass %0 here.
+ *
+ * NOTE:
+ * This function is never managed, even if you initialized with
+ * pcim_enable_device(). If you need automatic cleanup, use pcim_iomap().
* */
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
{
@@ -127,6 +152,10 @@ EXPORT_SYMBOL(pci_iomap);
*
* @maxlen specifies the maximum length to map. If you want to get access to
* the complete BAR without checking for its length first, pass %0 here.
+ *
+ * NOTE:
+ * This function is never managed, even if you initialized with
+ * pcim_enable_device().
* */
void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen)
{
@@ -140,7 +169,7 @@ EXPORT_SYMBOL_GPL(pci_iomap_wc);
* the different IOMAP ranges.
*
* But if the architecture does not use the generic iomap code, and if
- * it has _not_ defined it's own private pci_iounmap function, we define
+ * it has _not_ defined its own private pci_iounmap function, we define
* it here.
*
* NOTE! This default implementation assumes that if the architecture
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index aaa33e8dc4c9..10693b5d7eb6 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -285,23 +285,16 @@ const struct attribute_group sriov_vf_dev_attr_group = {
.is_visible = sriov_vf_attrs_are_visible,
};
-int pci_iov_add_virtfn(struct pci_dev *dev, int id)
+static struct pci_dev *pci_iov_scan_device(struct pci_dev *dev, int id,
+ struct pci_bus *bus)
{
- int i;
- int rc = -ENOMEM;
- u64 size;
- struct pci_dev *virtfn;
- struct resource *res;
struct pci_sriov *iov = dev->sriov;
- struct pci_bus *bus;
-
- bus = virtfn_add_bus(dev->bus, pci_iov_virtfn_bus(dev, id));
- if (!bus)
- goto failed;
+ struct pci_dev *virtfn;
+ int rc;
virtfn = pci_alloc_dev(bus);
if (!virtfn)
- goto failed0;
+ return ERR_PTR(-ENOMEM);
virtfn->devfn = pci_iov_virtfn_devfn(dev, id);
virtfn->vendor = dev->vendor;
@@ -314,8 +307,35 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id)
pci_read_vf_config_common(virtfn);
rc = pci_setup_device(virtfn);
- if (rc)
- goto failed1;
+ if (rc) {
+ pci_dev_put(dev);
+ pci_bus_put(virtfn->bus);
+ kfree(virtfn);
+ return ERR_PTR(rc);
+ }
+
+ return virtfn;
+}
+
+int pci_iov_add_virtfn(struct pci_dev *dev, int id)
+{
+ struct pci_bus *bus;
+ struct pci_dev *virtfn;
+ struct resource *res;
+ int rc, i;
+ u64 size;
+
+ bus = virtfn_add_bus(dev->bus, pci_iov_virtfn_bus(dev, id));
+ if (!bus) {
+ rc = -ENOMEM;
+ goto failed;
+ }
+
+ virtfn = pci_iov_scan_device(dev, id, bus);
+ if (IS_ERR(virtfn)) {
+ rc = PTR_ERR(virtfn);
+ goto failed0;
+ }
virtfn->dev.parent = dev->dev.parent;
virtfn->multifunction = 0;
@@ -327,8 +347,8 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id)
virtfn->resource[i].name = pci_name(virtfn);
virtfn->resource[i].flags = res->flags;
size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES);
- virtfn->resource[i].start = res->start + size * id;
- virtfn->resource[i].end = virtfn->resource[i].start + size - 1;
+ resource_set_range(&virtfn->resource[i],
+ res->start + size * id, size);
rc = request_resource(res, &virtfn->resource[i]);
BUG_ON(rc);
}
@@ -747,6 +767,7 @@ static int sriov_init(struct pci_dev *dev, int pos)
struct resource *res;
const char *res_name;
struct pci_dev *pdev;
+ u32 sriovbars[PCI_SRIOV_NUM_BARS];
pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &ctrl);
if (ctrl & PCI_SRIOV_CTRL_VFE) {
@@ -783,6 +804,10 @@ found:
if (!iov)
return -ENOMEM;
+ /* Sizing SR-IOV BARs with VF Enable cleared - no decode */
+ __pci_size_stdbars(dev, PCI_SRIOV_NUM_BARS,
+ pos + PCI_SRIOV_BAR, sriovbars);
+
nres = 0;
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
res = &dev->resource[i + PCI_IOV_RESOURCES];
@@ -796,7 +821,8 @@ found:
bar64 = (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
else
bar64 = __pci_read_base(dev, pci_bar_unknown, res,
- pos + PCI_SRIOV_BAR + i * 4);
+ pos + PCI_SRIOV_BAR + i * 4,
+ &sriovbars[i]);
if (!res->flags)
continue;
if (resource_size(res) & (PAGE_SIZE - 1)) {
@@ -804,7 +830,7 @@ found:
goto failed;
}
iov->barsz[i] = resource_size(res);
- res->end = res->start + resource_size(res) * total - 1;
+ resource_set_size(res, resource_size(res) * total);
pci_info(dev, "%s %pR: contains BAR %d for %d VFs\n",
res_name, res, i, total);
i += bar64;
@@ -946,7 +972,7 @@ void pci_iov_remove(struct pci_dev *dev)
void pci_iov_update_resource(struct pci_dev *dev, int resno)
{
struct pci_sriov *iov = dev->is_physfn ? dev->sriov : NULL;
- struct resource *res = dev->resource + resno;
+ struct resource *res = pci_resource_n(dev, resno);
int vf_bar = resno - PCI_IOV_RESOURCES;
struct pci_bus_region region;
u16 cmd;
diff --git a/drivers/pci/msi/api.c b/drivers/pci/msi/api.c
index be679aa5db64..818d55fbad0d 100644
--- a/drivers/pci/msi/api.c
+++ b/drivers/pci/msi/api.c
@@ -53,10 +53,9 @@ void pci_disable_msi(struct pci_dev *dev)
if (!pci_msi_enabled() || !dev || !dev->msi_enabled)
return;
- msi_lock_descs(&dev->dev);
+ guard(msi_descs_lock)(&dev->dev);
pci_msi_shutdown(dev);
pci_free_msi_irqs(dev);
- msi_unlock_descs(&dev->dev);
}
EXPORT_SYMBOL(pci_disable_msi);
@@ -162,7 +161,7 @@ struct msi_map pci_msix_alloc_irq_at(struct pci_dev *dev, unsigned int index,
EXPORT_SYMBOL_GPL(pci_msix_alloc_irq_at);
/**
- * pci_msix_free_irq - Free an interrupt on a PCI/MSIX interrupt domain
+ * pci_msix_free_irq - Free an interrupt on a PCI/MSI-X interrupt domain
*
* @dev: The PCI device to operate on
* @map: A struct msi_map describing the interrupt to free
@@ -196,10 +195,9 @@ void pci_disable_msix(struct pci_dev *dev)
if (!pci_msi_enabled() || !dev || !dev->msix_enabled)
return;
- msi_lock_descs(&dev->dev);
+ guard(msi_descs_lock)(&dev->dev);
pci_msix_shutdown(dev);
pci_free_msi_irqs(dev);
- msi_unlock_descs(&dev->dev);
}
EXPORT_SYMBOL(pci_disable_msix);
@@ -213,8 +211,8 @@ EXPORT_SYMBOL(pci_disable_msix);
* * %PCI_IRQ_MSIX Allow trying MSI-X vector allocations
* * %PCI_IRQ_MSI Allow trying MSI vector allocations
*
- * * %PCI_IRQ_LEGACY Allow trying legacy INTx interrupts, if
- * and only if @min_vecs == 1
+ * * %PCI_IRQ_INTX Allow trying INTx interrupts, if and
+ * only if @min_vecs == 1
*
* * %PCI_IRQ_AFFINITY Auto-manage IRQs affinity by spreading
* the vectors around available CPUs
@@ -279,8 +277,8 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
return nvecs;
}
- /* use legacy IRQ if allowed */
- if (flags & PCI_IRQ_LEGACY) {
+ /* use INTx IRQ if allowed */
+ if (flags & PCI_IRQ_INTX) {
if (min_vecs == 1 && dev->irq) {
/*
* Invoke the affinity spreading logic to ensure that
@@ -366,56 +364,6 @@ const struct cpumask *pci_irq_get_affinity(struct pci_dev *dev, int nr)
EXPORT_SYMBOL(pci_irq_get_affinity);
/**
- * pci_ims_alloc_irq - Allocate an interrupt on a PCI/IMS interrupt domain
- * @dev: The PCI device to operate on
- * @icookie: Pointer to an IMS implementation specific cookie for this
- * IMS instance (PASID, queue ID, pointer...).
- * The cookie content is copied into the MSI descriptor for the
- * interrupt chip callbacks or domain specific setup functions.
- * @affdesc: Optional pointer to an interrupt affinity descriptor
- *
- * There is no index for IMS allocations as IMS is an implementation
- * specific storage and does not have any direct associations between
- * index, which might be a pure software construct, and device
- * functionality. This association is established by the driver either via
- * the index - if there is a hardware table - or in case of purely software
- * managed IMS implementation the association happens via the
- * irq_write_msi_msg() callback of the implementation specific interrupt
- * chip, which utilizes the provided @icookie to store the MSI message in
- * the appropriate place.
- *
- * Return: A struct msi_map
- *
- * On success msi_map::index contains the allocated index (>= 0) and
- * msi_map::virq the allocated Linux interrupt number (> 0).
- *
- * On fail msi_map::index contains the error code and msi_map::virq
- * is set to 0.
- */
-struct msi_map pci_ims_alloc_irq(struct pci_dev *dev, union msi_instance_cookie *icookie,
- const struct irq_affinity_desc *affdesc)
-{
- return msi_domain_alloc_irq_at(&dev->dev, MSI_SECONDARY_DOMAIN, MSI_ANY_INDEX,
- affdesc, icookie);
-}
-EXPORT_SYMBOL_GPL(pci_ims_alloc_irq);
-
-/**
- * pci_ims_free_irq - Allocate an interrupt on a PCI/IMS interrupt domain
- * which was allocated via pci_ims_alloc_irq()
- * @dev: The PCI device to operate on
- * @map: A struct msi_map describing the interrupt to free as
- * returned from pci_ims_alloc_irq()
- */
-void pci_ims_free_irq(struct pci_dev *dev, struct msi_map map)
-{
- if (WARN_ON_ONCE(map.index < 0 || map.virq <= 0))
- return;
- msi_domain_free_irqs_range(&dev->dev, MSI_SECONDARY_DOMAIN, map.index, map.index);
-}
-EXPORT_SYMBOL_GPL(pci_ims_free_irq);
-
-/**
* pci_free_irq_vectors() - Free previously allocated IRQs for a device
* @dev: the PCI device to operate on
*
@@ -451,7 +399,7 @@ EXPORT_SYMBOL_GPL(pci_restore_msi_state);
* Return: true if MSI has not been globally disabled through ACPI FADT,
* PCI bridge quirks, or the "pci=nomsi" kernel command-line option.
*/
-int pci_msi_enabled(void)
+bool pci_msi_enabled(void)
{
return pci_msi_enable;
}
diff --git a/drivers/pci/msi/irqdomain.c b/drivers/pci/msi/irqdomain.c
index cfd84a899c82..d7ba8795d60f 100644
--- a/drivers/pci/msi/irqdomain.c
+++ b/drivers/pci/msi/irqdomain.c
@@ -148,17 +148,35 @@ static void pci_device_domain_set_desc(msi_alloc_info_t *arg, struct msi_desc *d
arg->hwirq = desc->msi_index;
}
+static __always_inline void cond_mask_parent(struct irq_data *data)
+{
+ struct msi_domain_info *info = data->domain->host_data;
+
+ if (unlikely(info->flags & MSI_FLAG_PCI_MSI_MASK_PARENT))
+ irq_chip_mask_parent(data);
+}
+
+static __always_inline void cond_unmask_parent(struct irq_data *data)
+{
+ struct msi_domain_info *info = data->domain->host_data;
+
+ if (unlikely(info->flags & MSI_FLAG_PCI_MSI_MASK_PARENT))
+ irq_chip_unmask_parent(data);
+}
+
static void pci_irq_mask_msi(struct irq_data *data)
{
struct msi_desc *desc = irq_data_get_msi_desc(data);
pci_msi_mask(desc, BIT(data->irq - desc->irq));
+ cond_mask_parent(data);
}
static void pci_irq_unmask_msi(struct irq_data *data)
{
struct msi_desc *desc = irq_data_get_msi_desc(data);
+ cond_unmask_parent(data);
pci_msi_unmask(desc, BIT(data->irq - desc->irq));
}
@@ -195,10 +213,12 @@ static const struct msi_domain_template pci_msi_template = {
static void pci_irq_mask_msix(struct irq_data *data)
{
pci_msix_mask(irq_data_get_msi_desc(data));
+ cond_mask_parent(data);
}
static void pci_irq_unmask_msix(struct irq_data *data)
{
+ cond_unmask_parent(data);
pci_msix_unmask(irq_data_get_msi_desc(data));
}
@@ -330,8 +350,11 @@ bool pci_msi_domain_supports(struct pci_dev *pdev, unsigned int feature_mask,
domain = dev_get_msi_domain(&pdev->dev);
- if (!domain || !irq_domain_is_hierarchy(domain))
- return mode == ALLOW_LEGACY;
+ if (!domain || !irq_domain_is_hierarchy(domain)) {
+ if (IS_ENABLED(CONFIG_PCI_MSI_ARCH_FALLBACKS))
+ return mode == ALLOW_LEGACY;
+ return false;
+ }
if (!irq_domain_is_msi_parent(domain)) {
/*
@@ -355,65 +378,6 @@ bool pci_msi_domain_supports(struct pci_dev *pdev, unsigned int feature_mask,
return (supported & feature_mask) == feature_mask;
}
-/**
- * pci_create_ims_domain - Create a secondary IMS domain for a PCI device
- * @pdev: The PCI device to operate on
- * @template: The MSI info template which describes the domain
- * @hwsize: The size of the hardware entry table or 0 if the domain
- * is purely software managed
- * @data: Optional pointer to domain specific data to be stored
- * in msi_domain_info::data
- *
- * Return: True on success, false otherwise
- *
- * An IMS domain is expected to have the following constraints:
- * - The index space is managed by the core code
- *
- * - There is no requirement for consecutive index ranges
- *
- * - The interrupt chip must provide the following callbacks:
- * - irq_mask()
- * - irq_unmask()
- * - irq_write_msi_msg()
- *
- * - The interrupt chip must provide the following optional callbacks
- * when the irq_mask(), irq_unmask() and irq_write_msi_msg() callbacks
- * cannot operate directly on hardware, e.g. in the case that the
- * interrupt message store is in queue memory:
- * - irq_bus_lock()
- * - irq_bus_unlock()
- *
- * These callbacks are invoked from preemptible task context and are
- * allowed to sleep. In this case the mandatory callbacks above just
- * store the information. The irq_bus_unlock() callback is supposed
- * to make the change effective before returning.
- *
- * - Interrupt affinity setting is handled by the underlying parent
- * interrupt domain and communicated to the IMS domain via
- * irq_write_msi_msg().
- *
- * The domain is automatically destroyed when the PCI device is removed.
- */
-bool pci_create_ims_domain(struct pci_dev *pdev, const struct msi_domain_template *template,
- unsigned int hwsize, void *data)
-{
- struct irq_domain *domain = dev_get_msi_domain(&pdev->dev);
-
- if (!domain || !irq_domain_is_msi_parent(domain))
- return false;
-
- if (template->info.bus_token != DOMAIN_BUS_PCI_DEVICE_IMS ||
- !(template->info.flags & MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS) ||
- !(template->info.flags & MSI_FLAG_FREE_MSI_DESCS) ||
- !template->chip.irq_mask || !template->chip.irq_unmask ||
- !template->chip.irq_write_msi_msg || template->chip.irq_set_affinity)
- return false;
-
- return msi_create_device_irq_domain(&pdev->dev, MSI_SECONDARY_DOMAIN, template,
- hwsize, data, NULL);
-}
-EXPORT_SYMBOL_GPL(pci_create_ims_domain);
-
/*
* Users of the generic MSI infrastructure expect a device to have a single ID,
* so with DMA aliases we have to pick the least-worst compromise. Devices with
diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c
index 682fa877478f..d6ce04054702 100644
--- a/drivers/pci/msi/msi.c
+++ b/drivers/pci/msi/msi.c
@@ -10,12 +10,12 @@
#include <linux/err.h>
#include <linux/export.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include "../pci.h"
#include "msi.h"
-int pci_msi_enable = 1;
-int pci_msi_ignore_mask;
+bool pci_msi_enable = true;
/**
* pci_msi_supported - check whether MSI may be enabled on a device
@@ -86,9 +86,11 @@ static int pcim_setup_msi_release(struct pci_dev *dev)
return 0;
ret = devm_add_action(&dev->dev, pcim_msi_release, dev);
- if (!ret)
- dev->is_msi_managed = true;
- return ret;
+ if (ret)
+ return ret;
+
+ dev->is_msi_managed = true;
+ return 0;
}
/*
@@ -99,9 +101,10 @@ static int pci_setup_msi_context(struct pci_dev *dev)
{
int ret = msi_setup_device_data(&dev->dev);
- if (!ret)
- ret = pcim_setup_msi_release(dev);
- return ret;
+ if (ret)
+ return ret;
+
+ return pcim_setup_msi_release(dev);
}
/*
@@ -292,8 +295,7 @@ static int msi_setup_msi_desc(struct pci_dev *dev, int nvec,
/* Lies, damned lies, and MSIs */
if (dev->dev_flags & PCI_DEV_FLAGS_HAS_MSI_MASKING)
control |= PCI_MSI_FLAGS_MASKBIT;
- /* Respect XEN's mask disabling */
- if (pci_msi_ignore_mask)
+ if (pci_msi_domain_supports(dev, MSI_FLAG_NO_MASK, DENY_LEGACY))
control &= ~PCI_MSI_FLAGS_MASKBIT;
desc.nvec_used = nvec;
@@ -333,47 +335,23 @@ static int msi_verify_entries(struct pci_dev *dev)
return !entry ? 0 : -EIO;
}
-/**
- * msi_capability_init - configure device's MSI capability structure
- * @dev: pointer to the pci_dev data structure of MSI device function
- * @nvec: number of interrupts to allocate
- * @affd: description of automatic IRQ affinity assignments (may be %NULL)
- *
- * Setup the MSI capability structure of the device with the requested
- * number of interrupts. A return value of zero indicates the successful
- * setup of an entry with the new MSI IRQ. A negative return value indicates
- * an error, and a positive return value indicates the number of interrupts
- * which could have been allocated.
- */
-static int msi_capability_init(struct pci_dev *dev, int nvec,
- struct irq_affinity *affd)
+static int __msi_capability_init(struct pci_dev *dev, int nvec, struct irq_affinity_desc *masks)
{
- struct irq_affinity_desc *masks = NULL;
- struct msi_desc *entry;
- int ret;
-
- /* Reject multi-MSI early on irq domain enabled architectures */
- if (nvec > 1 && !pci_msi_domain_supports(dev, MSI_FLAG_MULTI_PCI_MSI, ALLOW_LEGACY))
- return 1;
+ int ret = msi_setup_msi_desc(dev, nvec, masks);
+ struct msi_desc *entry, desc;
- /*
- * Disable MSI during setup in the hardware, but mark it enabled
- * so that setup code can evaluate it.
- */
- pci_msi_set_enable(dev, 0);
- dev->msi_enabled = 1;
-
- if (affd)
- masks = irq_create_affinity_masks(nvec, affd);
-
- msi_lock_descs(&dev->dev);
- ret = msi_setup_msi_desc(dev, nvec, masks);
if (ret)
- goto fail;
+ return ret;
/* All MSIs are unmasked by default; mask them all */
entry = msi_first_desc(&dev->dev, MSI_DESC_ALL);
pci_msi_mask(entry, msi_multi_mask(entry));
+ /*
+ * Copy the MSI descriptor for the error path because
+ * pci_msi_setup_msi_irqs() will free it for the hierarchical
+ * interrupt domain case.
+ */
+ memcpy(&desc, entry, sizeof(desc));
/* Configure MSI capability structure */
ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
@@ -385,24 +363,51 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
goto err;
/* Set MSI enabled bits */
+ dev->msi_enabled = 1;
pci_intx_for_msi(dev, 0);
pci_msi_set_enable(dev, 1);
pcibios_free_irq(dev);
dev->irq = entry->irq;
- goto unlock;
-
+ return 0;
err:
- pci_msi_unmask(entry, msi_multi_mask(entry));
+ pci_msi_unmask(&desc, msi_multi_mask(&desc));
pci_free_msi_irqs(dev);
-fail:
- dev->msi_enabled = 0;
-unlock:
- msi_unlock_descs(&dev->dev);
- kfree(masks);
return ret;
}
+/**
+ * msi_capability_init - configure device's MSI capability structure
+ * @dev: pointer to the pci_dev data structure of MSI device function
+ * @nvec: number of interrupts to allocate
+ * @affd: description of automatic IRQ affinity assignments (may be %NULL)
+ *
+ * Setup the MSI capability structure of the device with the requested
+ * number of interrupts. A return value of zero indicates the successful
+ * setup of an entry with the new MSI IRQ. A negative return value indicates
+ * an error, and a positive return value indicates the number of interrupts
+ * which could have been allocated.
+ */
+static int msi_capability_init(struct pci_dev *dev, int nvec,
+ struct irq_affinity *affd)
+{
+ /* Reject multi-MSI early on irq domain enabled architectures */
+ if (nvec > 1 && !pci_msi_domain_supports(dev, MSI_FLAG_MULTI_PCI_MSI, ALLOW_LEGACY))
+ return 1;
+
+ /*
+ * Disable MSI during setup in the hardware, but mark it enabled
+ * so that setup code can evaluate it.
+ */
+ pci_msi_set_enable(dev, 0);
+
+ struct irq_affinity_desc *masks __free(kfree) =
+ affd ? irq_create_affinity_masks(nvec, affd) : NULL;
+
+ guard(msi_descs_lock)(&dev->dev);
+ return __msi_capability_init(dev, nvec, masks);
+}
+
int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
struct irq_affinity *affd)
{
@@ -424,6 +429,10 @@ int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
if (WARN_ON_ONCE(dev->msi_enabled))
return -EINVAL;
+ /* Test for the availability of MSI support */
+ if (!pci_msi_domain_supports(dev, 0, ALLOW_LEGACY))
+ return -ENOTSUPP;
+
nvec = pci_msi_vec_count(dev);
if (nvec < 0)
return nvec;
@@ -596,12 +605,16 @@ void msix_prepare_msi_desc(struct pci_dev *dev, struct msi_desc *desc)
desc->pci.msi_attrib.is_64 = 1;
desc->pci.msi_attrib.default_irq = dev->irq;
desc->pci.mask_base = dev->msix_base;
- desc->pci.msi_attrib.can_mask = !pci_msi_ignore_mask &&
- !desc->pci.msi_attrib.is_virtual;
- if (desc->pci.msi_attrib.can_mask) {
+
+ if (!pci_msi_domain_supports(dev, MSI_FLAG_NO_MASK, DENY_LEGACY) &&
+ !desc->pci.msi_attrib.is_virtual) {
void __iomem *addr = pci_msix_desc_addr(desc);
+ desc->pci.msi_attrib.can_mask = 1;
+ /* Workaround for SUN NIU insanity, which requires write before read */
+ if (dev->dev_flags & PCI_DEV_FLAGS_MSIX_TOUCH_ENTRY_DATA_FIRST)
+ writel(0, addr + PCI_MSIX_ENTRY_DATA);
desc->pci.msix_ctrl = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
}
}
@@ -646,45 +659,43 @@ static void msix_mask_all(void __iomem *base, int tsize)
u32 ctrl = PCI_MSIX_ENTRY_CTRL_MASKBIT;
int i;
- if (pci_msi_ignore_mask)
- return;
-
for (i = 0; i < tsize; i++, base += PCI_MSIX_ENTRY_SIZE)
writel(ctrl, base + PCI_MSIX_ENTRY_VECTOR_CTRL);
}
-static int msix_setup_interrupts(struct pci_dev *dev, struct msix_entry *entries,
- int nvec, struct irq_affinity *affd)
-{
- struct irq_affinity_desc *masks = NULL;
- int ret;
+DEFINE_FREE(free_msi_irqs, struct pci_dev *, if (_T) pci_free_msi_irqs(_T));
- if (affd)
- masks = irq_create_affinity_masks(nvec, affd);
+static int __msix_setup_interrupts(struct pci_dev *__dev, struct msix_entry *entries,
+ int nvec, struct irq_affinity_desc *masks)
+{
+ struct pci_dev *dev __free(free_msi_irqs) = __dev;
- msi_lock_descs(&dev->dev);
- ret = msix_setup_msi_descs(dev, entries, nvec, masks);
+ int ret = msix_setup_msi_descs(dev, entries, nvec, masks);
if (ret)
- goto out_free;
+ return ret;
ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
if (ret)
- goto out_free;
+ return ret;
/* Check if all MSI entries honor device restrictions */
ret = msi_verify_entries(dev);
if (ret)
- goto out_free;
+ return ret;
msix_update_entries(dev, entries);
- goto out_unlock;
+ retain_and_null_ptr(dev);
+ return 0;
+}
-out_free:
- pci_free_msi_irqs(dev);
-out_unlock:
- msi_unlock_descs(&dev->dev);
- kfree(masks);
- return ret;
+static int msix_setup_interrupts(struct pci_dev *dev, struct msix_entry *entries,
+ int nvec, struct irq_affinity *affd)
+{
+ struct irq_affinity_desc *masks __free(kfree) =
+ affd ? irq_create_affinity_masks(nvec, affd) : NULL;
+
+ guard(msi_descs_lock)(&dev->dev);
+ return __msix_setup_interrupts(dev, entries, nvec, masks);
}
/**
@@ -731,15 +742,17 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
/* Disable INTX */
pci_intx_for_msi(dev, 0);
- /*
- * Ensure that all table entries are masked to prevent
- * stale entries from firing in a crash kernel.
- *
- * Done late to deal with a broken Marvell NVME device
- * which takes the MSI-X mask bits into account even
- * when MSI-X is disabled, which prevents MSI delivery.
- */
- msix_mask_all(dev->msix_base, tsize);
+ if (!pci_msi_domain_supports(dev, MSI_FLAG_NO_MASK, DENY_LEGACY)) {
+ /*
+ * Ensure that all table entries are masked to prevent
+ * stale entries from firing in a crash kernel.
+ *
+ * Done late to deal with a broken Marvell NVME device
+ * which takes the MSI-X mask bits into account even
+ * when MSI-X is disabled, which prevents MSI delivery.
+ */
+ msix_mask_all(dev->msix_base, tsize);
+ }
pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
pcibios_free_irq(dev);
@@ -858,13 +871,13 @@ void __pci_restore_msix_state(struct pci_dev *dev)
write_msg = arch_restore_msi_irqs(dev);
- msi_lock_descs(&dev->dev);
- msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) {
- if (write_msg)
- __pci_write_msi_msg(entry, &entry->msg);
- pci_msix_write_vector_ctrl(entry, entry->pci.msix_ctrl);
+ scoped_guard (msi_descs_lock, &dev->dev) {
+ msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) {
+ if (write_msg)
+ __pci_write_msi_msg(entry, &entry->msg);
+ pci_msix_write_vector_ctrl(entry, entry->pci.msix_ctrl);
+ }
}
- msi_unlock_descs(&dev->dev);
pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
}
@@ -903,6 +916,53 @@ void pci_free_msi_irqs(struct pci_dev *dev)
}
}
+#ifdef CONFIG_PCIE_TPH
+/**
+ * pci_msix_write_tph_tag - Update the TPH tag for a given MSI-X vector
+ * @pdev: The PCIe device to update
+ * @index: The MSI-X index to update
+ * @tag: The tag to write
+ *
+ * Returns: 0 on success, error code on failure
+ */
+int pci_msix_write_tph_tag(struct pci_dev *pdev, unsigned int index, u16 tag)
+{
+ struct msi_desc *msi_desc;
+ struct irq_desc *irq_desc;
+ unsigned int virq;
+
+ if (!pdev->msix_enabled)
+ return -ENXIO;
+
+ guard(msi_descs_lock)(&pdev->dev);
+ virq = msi_get_virq(&pdev->dev, index);
+ if (!virq)
+ return -ENXIO;
+ /*
+ * This is a horrible hack, but short of implementing a PCI
+ * specific interrupt chip callback and a huge pile of
+ * infrastructure, this is the minor nuissance. It provides the
+ * protection against concurrent operations on this entry and keeps
+ * the control word cache in sync.
+ */
+ irq_desc = irq_to_desc(virq);
+ if (!irq_desc)
+ return -ENXIO;
+
+ guard(raw_spinlock_irq)(&irq_desc->lock);
+ msi_desc = irq_data_get_msi_desc(&irq_desc->irq_data);
+ if (!msi_desc || msi_desc->pci.msi_attrib.is_virtual)
+ return -ENXIO;
+
+ msi_desc->pci.msix_ctrl &= ~PCI_MSIX_ENTRY_CTRL_ST;
+ msi_desc->pci.msix_ctrl |= FIELD_PREP(PCI_MSIX_ENTRY_CTRL_ST, tag);
+ pci_msix_write_vector_ctrl(msi_desc, msi_desc->pci.msix_ctrl);
+ /* Flush the write */
+ readl(pci_msix_desc_addr(msi_desc));
+ return 0;
+}
+#endif
+
/* Misc. infrastructure */
struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc)
@@ -913,5 +973,5 @@ EXPORT_SYMBOL(msi_desc_to_pci_dev);
void pci_no_msi(void)
{
- pci_msi_enable = 0;
+ pci_msi_enable = false;
}
diff --git a/drivers/pci/msi/msi.h b/drivers/pci/msi/msi.h
index ee53cf079f4e..fc70b601e942 100644
--- a/drivers/pci/msi/msi.h
+++ b/drivers/pci/msi/msi.h
@@ -87,7 +87,7 @@ static inline __attribute_const__ u32 msi_multi_mask(struct msi_desc *desc)
void msix_prepare_msi_desc(struct pci_dev *dev, struct msi_desc *desc);
/* Subsystem variables */
-extern int pci_msi_enable;
+extern bool pci_msi_enable;
/* MSI internal functions invoked from the public APIs */
void pci_msi_shutdown(struct pci_dev *dev);
diff --git a/drivers/pci/npem.c b/drivers/pci/npem.c
new file mode 100644
index 000000000000..97507e0df769
--- /dev/null
+++ b/drivers/pci/npem.c
@@ -0,0 +1,595 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe Enclosure management driver created for LED interfaces based on
+ * indications. It says *what indications* blink but does not specify *how*
+ * they blink - it is hardware defined.
+ *
+ * The driver name refers to Native PCIe Enclosure Management. It is
+ * first indication oriented standard with specification.
+ *
+ * Native PCIe Enclosure Management (NPEM)
+ * PCIe Base Specification r6.1 sec 6.28, 7.9.19
+ *
+ * _DSM Definitions for PCIe SSD Status LED
+ * PCI Firmware Specification, r3.3 sec 4.7
+ *
+ * Two backends are supported to manipulate indications: Direct NPEM register
+ * access (npem_ops) and indirect access through the ACPI _DSM (dsm_ops).
+ * _DSM is used if supported, else NPEM.
+ *
+ * Copyright (c) 2021-2022 Dell Inc.
+ * Copyright (c) 2023-2024 Intel Corporation
+ * Mariusz Tkaczyk <mariusz.tkaczyk@linux.intel.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/iopoll.h>
+#include <linux/leds.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/pci_regs.h>
+#include <linux/types.h>
+#include <linux/uleds.h>
+
+#include "pci.h"
+
+struct indication {
+ u32 bit;
+ const char *name;
+};
+
+static const struct indication npem_indications[] = {
+ {PCI_NPEM_IND_OK, "enclosure:ok"},
+ {PCI_NPEM_IND_LOCATE, "enclosure:locate"},
+ {PCI_NPEM_IND_FAIL, "enclosure:fail"},
+ {PCI_NPEM_IND_REBUILD, "enclosure:rebuild"},
+ {PCI_NPEM_IND_PFA, "enclosure:pfa"},
+ {PCI_NPEM_IND_HOTSPARE, "enclosure:hotspare"},
+ {PCI_NPEM_IND_ICA, "enclosure:ica"},
+ {PCI_NPEM_IND_IFA, "enclosure:ifa"},
+ {PCI_NPEM_IND_IDT, "enclosure:idt"},
+ {PCI_NPEM_IND_DISABLED, "enclosure:disabled"},
+ {PCI_NPEM_IND_SPEC_0, "enclosure:specific_0"},
+ {PCI_NPEM_IND_SPEC_1, "enclosure:specific_1"},
+ {PCI_NPEM_IND_SPEC_2, "enclosure:specific_2"},
+ {PCI_NPEM_IND_SPEC_3, "enclosure:specific_3"},
+ {PCI_NPEM_IND_SPEC_4, "enclosure:specific_4"},
+ {PCI_NPEM_IND_SPEC_5, "enclosure:specific_5"},
+ {PCI_NPEM_IND_SPEC_6, "enclosure:specific_6"},
+ {PCI_NPEM_IND_SPEC_7, "enclosure:specific_7"},
+ {0, NULL}
+};
+
+/* _DSM PCIe SSD LED States correspond to NPEM register values */
+static const struct indication dsm_indications[] = {
+ {PCI_NPEM_IND_OK, "enclosure:ok"},
+ {PCI_NPEM_IND_LOCATE, "enclosure:locate"},
+ {PCI_NPEM_IND_FAIL, "enclosure:fail"},
+ {PCI_NPEM_IND_REBUILD, "enclosure:rebuild"},
+ {PCI_NPEM_IND_PFA, "enclosure:pfa"},
+ {PCI_NPEM_IND_HOTSPARE, "enclosure:hotspare"},
+ {PCI_NPEM_IND_ICA, "enclosure:ica"},
+ {PCI_NPEM_IND_IFA, "enclosure:ifa"},
+ {PCI_NPEM_IND_IDT, "enclosure:idt"},
+ {PCI_NPEM_IND_DISABLED, "enclosure:disabled"},
+ {0, NULL}
+};
+
+#define for_each_indication(ind, inds) \
+ for (ind = inds; ind->bit; ind++)
+
+/*
+ * The driver has internal list of supported indications. Ideally, the driver
+ * should not touch bits that are not defined and for which LED devices are
+ * not exposed but in reality, it needs to turn them off.
+ *
+ * Otherwise, there will be no possibility to turn off indications turned on by
+ * other utilities or turned on by default and it leads to bad user experience.
+ *
+ * Additionally, it excludes NPEM commands like RESET or ENABLE.
+ */
+static u32 reg_to_indications(u32 caps, const struct indication *inds)
+{
+ const struct indication *ind;
+ u32 supported_indications = 0;
+
+ for_each_indication(ind, inds)
+ supported_indications |= ind->bit;
+
+ return caps & supported_indications;
+}
+
+/**
+ * struct npem_led - LED details
+ * @indication: indication details
+ * @npem: NPEM device
+ * @name: LED name
+ * @led: LED device
+ */
+struct npem_led {
+ const struct indication *indication;
+ struct npem *npem;
+ char name[LED_MAX_NAME_SIZE];
+ struct led_classdev led;
+};
+
+/**
+ * struct npem_ops - backend specific callbacks
+ * @get_active_indications: get active indications
+ * npem: NPEM device
+ * inds: response buffer
+ * @set_active_indications: set new indications
+ * npem: npem device
+ * inds: bit mask to set
+ * @inds: supported indications array, set of indications is backend specific
+ * @name: backend name
+ */
+struct npem_ops {
+ int (*get_active_indications)(struct npem *npem, u32 *inds);
+ int (*set_active_indications)(struct npem *npem, u32 inds);
+ const struct indication *inds;
+ const char *name;
+};
+
+/**
+ * struct npem - NPEM device properties
+ * @dev: PCI device this driver is attached to
+ * @ops: backend specific callbacks
+ * @lock: serializes concurrent access to NPEM device by multiple LED devices
+ * @pos: cached offset of NPEM Capability Register in Configuration Space;
+ * only used if NPEM registers are accessed directly and not through _DSM
+ * @supported_indications: cached bit mask of supported indications;
+ * non-indication and reserved bits in the NPEM Capability Register are
+ * cleared in this bit mask
+ * @active_indications: cached bit mask of active indications;
+ * non-indication and reserved bits in the NPEM Control Register are
+ * cleared in this bit mask
+ * @active_inds_initialized: whether @active_indications has been initialized;
+ * On Dell platforms, it is required that IPMI drivers are loaded before
+ * the GET_STATE_DSM method is invoked: They use an IPMI OpRegion to
+ * get/set the active LEDs. By initializing @active_indications lazily
+ * (on first access to an LED), IPMI drivers are given a chance to load.
+ * If they are not loaded in time, users will see various errors on LED
+ * access in dmesg. Once they are loaded, the errors go away and LED
+ * access becomes possible.
+ * @led_cnt: size of @leds array
+ * @leds: array containing LED class devices of all supported LEDs
+ */
+struct npem {
+ struct pci_dev *dev;
+ const struct npem_ops *ops;
+ struct mutex lock;
+ u16 pos;
+ u32 supported_indications;
+ u32 active_indications;
+ unsigned int active_inds_initialized:1;
+ int led_cnt;
+ struct npem_led leds[];
+};
+
+static int npem_read_reg(struct npem *npem, u16 reg, u32 *val)
+{
+ int ret = pci_read_config_dword(npem->dev, npem->pos + reg, val);
+
+ return pcibios_err_to_errno(ret);
+}
+
+static int npem_write_ctrl(struct npem *npem, u32 reg)
+{
+ int pos = npem->pos + PCI_NPEM_CTRL;
+ int ret = pci_write_config_dword(npem->dev, pos, reg);
+
+ return pcibios_err_to_errno(ret);
+}
+
+static int npem_get_active_indications(struct npem *npem, u32 *inds)
+{
+ u32 ctrl;
+ int ret;
+
+ ret = npem_read_reg(npem, PCI_NPEM_CTRL, &ctrl);
+ if (ret)
+ return ret;
+
+ /* If PCI_NPEM_CTRL_ENABLE is not set then no indication should blink */
+ if (!(ctrl & PCI_NPEM_CTRL_ENABLE)) {
+ *inds = 0;
+ return 0;
+ }
+
+ *inds = ctrl & npem->supported_indications;
+
+ return 0;
+}
+
+static int npem_set_active_indications(struct npem *npem, u32 inds)
+{
+ int ctrl, ret, ret_val;
+ u32 cc_status;
+
+ lockdep_assert_held(&npem->lock);
+
+ /* This bit is always required */
+ ctrl = inds | PCI_NPEM_CTRL_ENABLE;
+
+ ret = npem_write_ctrl(npem, ctrl);
+ if (ret)
+ return ret;
+
+ /*
+ * For the case where a NPEM command has not completed immediately,
+ * it is recommended that software not continuously "spin" on polling
+ * the status register, but rather poll under interrupt at a reduced
+ * rate; for example at 10 ms intervals.
+ *
+ * PCIe r6.1 sec 6.28 "Implementation Note: Software Polling of NPEM
+ * Command Completed"
+ */
+ ret = read_poll_timeout(npem_read_reg, ret_val,
+ ret_val || (cc_status & PCI_NPEM_STATUS_CC),
+ 10 * USEC_PER_MSEC, USEC_PER_SEC, false, npem,
+ PCI_NPEM_STATUS, &cc_status);
+ if (ret)
+ return ret;
+ if (ret_val)
+ return ret_val;
+
+ /*
+ * All writes to control register, including writes that do not change
+ * the register value, are NPEM commands and should eventually result
+ * in a command completion indication in the NPEM Status Register.
+ *
+ * PCIe Base Specification r6.1 sec 7.9.19.3
+ *
+ * Register may not be updated, or other conflicting bits may be
+ * cleared. Spec is not strict here. Read NPEM Control register after
+ * write to keep cache in-sync.
+ */
+ return npem_get_active_indications(npem, &npem->active_indications);
+}
+
+static const struct npem_ops npem_ops = {
+ .get_active_indications = npem_get_active_indications,
+ .set_active_indications = npem_set_active_indications,
+ .name = "Native PCIe Enclosure Management",
+ .inds = npem_indications,
+};
+
+#define DSM_GUID GUID_INIT(0x5d524d9d, 0xfff9, 0x4d4b, 0x8c, 0xb7, 0x74, 0x7e,\
+ 0xd5, 0x1e, 0x19, 0x4d)
+#define GET_SUPPORTED_STATES_DSM 1
+#define GET_STATE_DSM 2
+#define SET_STATE_DSM 3
+
+static const guid_t dsm_guid = DSM_GUID;
+
+static bool npem_has_dsm(struct pci_dev *pdev)
+{
+ acpi_handle handle;
+
+ handle = ACPI_HANDLE(&pdev->dev);
+ if (!handle)
+ return false;
+
+ return acpi_check_dsm(handle, &dsm_guid, 0x1,
+ BIT(GET_SUPPORTED_STATES_DSM) |
+ BIT(GET_STATE_DSM) | BIT(SET_STATE_DSM));
+}
+
+struct dsm_output {
+ u16 status;
+ u8 function_specific_err;
+ u8 vendor_specific_err;
+ u32 state;
+};
+
+/**
+ * dsm_evaluate() - send DSM PCIe SSD Status LED command
+ * @pdev: PCI device
+ * @dsm_func: DSM LED Function
+ * @output: buffer to copy DSM Response
+ * @value_to_set: value for SET_STATE_DSM function
+ *
+ * To not bother caller with ACPI context, the returned _DSM Output Buffer is
+ * copied.
+ */
+static int dsm_evaluate(struct pci_dev *pdev, u64 dsm_func,
+ struct dsm_output *output, u32 value_to_set)
+{
+ acpi_handle handle = ACPI_HANDLE(&pdev->dev);
+ union acpi_object *out_obj, arg3[2];
+ union acpi_object *arg3_p = NULL;
+
+ if (dsm_func == SET_STATE_DSM) {
+ arg3[0].type = ACPI_TYPE_PACKAGE;
+ arg3[0].package.count = 1;
+ arg3[0].package.elements = &arg3[1];
+
+ arg3[1].type = ACPI_TYPE_BUFFER;
+ arg3[1].buffer.length = 4;
+ arg3[1].buffer.pointer = (u8 *)&value_to_set;
+
+ arg3_p = arg3;
+ }
+
+ out_obj = acpi_evaluate_dsm_typed(handle, &dsm_guid, 0x1, dsm_func,
+ arg3_p, ACPI_TYPE_BUFFER);
+ if (!out_obj)
+ return -EIO;
+
+ if (out_obj->buffer.length < sizeof(struct dsm_output)) {
+ ACPI_FREE(out_obj);
+ return -EIO;
+ }
+
+ memcpy(output, out_obj->buffer.pointer, sizeof(struct dsm_output));
+
+ ACPI_FREE(out_obj);
+ return 0;
+}
+
+static int dsm_get(struct pci_dev *pdev, u64 dsm_func, u32 *buf)
+{
+ struct dsm_output output;
+ int ret = dsm_evaluate(pdev, dsm_func, &output, 0);
+
+ if (ret)
+ return ret;
+
+ if (output.status != 0)
+ return -EIO;
+
+ *buf = output.state;
+ return 0;
+}
+
+static int dsm_get_active_indications(struct npem *npem, u32 *buf)
+{
+ int ret = dsm_get(npem->dev, GET_STATE_DSM, buf);
+
+ /* Filter out not supported indications in response */
+ *buf &= npem->supported_indications;
+ return ret;
+}
+
+static int dsm_set_active_indications(struct npem *npem, u32 value)
+{
+ struct dsm_output output;
+ int ret = dsm_evaluate(npem->dev, SET_STATE_DSM, &output, value);
+
+ if (ret)
+ return ret;
+
+ switch (output.status) {
+ case 4:
+ /*
+ * Not all bits are set. If this bit is set, the platform
+ * disregarded some or all of the request state changes. OSPM
+ * should check the resulting PCIe SSD Status LED States to see
+ * what, if anything, has changed.
+ *
+ * PCI Firmware Specification, r3.3 Table 4-19.
+ */
+ if (output.function_specific_err != 1)
+ return -EIO;
+ fallthrough;
+ case 0:
+ break;
+ default:
+ return -EIO;
+ }
+
+ npem->active_indications = output.state;
+
+ return 0;
+}
+
+static const struct npem_ops dsm_ops = {
+ .get_active_indications = dsm_get_active_indications,
+ .set_active_indications = dsm_set_active_indications,
+ .name = "_DSM PCIe SSD Status LED Management",
+ .inds = dsm_indications,
+};
+
+static int npem_initialize_active_indications(struct npem *npem)
+{
+ int ret;
+
+ lockdep_assert_held(&npem->lock);
+
+ if (npem->active_inds_initialized)
+ return 0;
+
+ ret = npem->ops->get_active_indications(npem,
+ &npem->active_indications);
+ if (ret)
+ return ret;
+
+ npem->active_inds_initialized = true;
+ return 0;
+}
+
+/*
+ * The status of each indicator is cached on first brightness_ get/set time
+ * and updated at write time. brightness_get() is only responsible for
+ * reflecting the last written/cached value.
+ */
+static enum led_brightness brightness_get(struct led_classdev *led)
+{
+ struct npem_led *nled = container_of(led, struct npem_led, led);
+ struct npem *npem = nled->npem;
+ int ret, val = 0;
+
+ ret = mutex_lock_interruptible(&npem->lock);
+ if (ret)
+ return ret;
+
+ ret = npem_initialize_active_indications(npem);
+ if (ret)
+ goto out;
+
+ if (npem->active_indications & nled->indication->bit)
+ val = 1;
+
+out:
+ mutex_unlock(&npem->lock);
+ return val;
+}
+
+static int brightness_set(struct led_classdev *led,
+ enum led_brightness brightness)
+{
+ struct npem_led *nled = container_of(led, struct npem_led, led);
+ struct npem *npem = nled->npem;
+ u32 indications;
+ int ret;
+
+ ret = mutex_lock_interruptible(&npem->lock);
+ if (ret)
+ return ret;
+
+ ret = npem_initialize_active_indications(npem);
+ if (ret)
+ goto out;
+
+ if (brightness == 0)
+ indications = npem->active_indications & ~(nled->indication->bit);
+ else
+ indications = npem->active_indications | nled->indication->bit;
+
+ ret = npem->ops->set_active_indications(npem, indications);
+
+out:
+ mutex_unlock(&npem->lock);
+ return ret;
+}
+
+static void npem_free(struct npem *npem)
+{
+ struct npem_led *nled;
+ int cnt;
+
+ if (!npem)
+ return;
+
+ for (cnt = 0; cnt < npem->led_cnt; cnt++) {
+ nled = &npem->leds[cnt];
+
+ if (nled->name[0])
+ led_classdev_unregister(&nled->led);
+ }
+
+ mutex_destroy(&npem->lock);
+ kfree(npem);
+}
+
+static int pci_npem_set_led_classdev(struct npem *npem, struct npem_led *nled)
+{
+ struct led_classdev *led = &nled->led;
+ struct led_init_data init_data = {};
+ char *name = nled->name;
+ int ret;
+
+ init_data.devicename = pci_name(npem->dev);
+ init_data.default_label = nled->indication->name;
+
+ ret = led_compose_name(&npem->dev->dev, &init_data, name);
+ if (ret)
+ return ret;
+
+ led->name = name;
+ led->brightness_set_blocking = brightness_set;
+ led->brightness_get = brightness_get;
+ led->max_brightness = 1;
+ led->default_trigger = "none";
+ led->flags = 0;
+
+ ret = led_classdev_register(&npem->dev->dev, led);
+ if (ret)
+ /* Clear the name to indicate that it is not registered. */
+ name[0] = 0;
+ return ret;
+}
+
+static int pci_npem_init(struct pci_dev *dev, const struct npem_ops *ops,
+ int pos, u32 caps)
+{
+ u32 supported = reg_to_indications(caps, ops->inds);
+ int supported_cnt = hweight32(supported);
+ const struct indication *indication;
+ struct npem_led *nled;
+ struct npem *npem;
+ int led_idx = 0;
+ int ret;
+
+ npem = kzalloc(struct_size(npem, leds, supported_cnt), GFP_KERNEL);
+ if (!npem)
+ return -ENOMEM;
+
+ npem->supported_indications = supported;
+ npem->led_cnt = supported_cnt;
+ npem->pos = pos;
+ npem->dev = dev;
+ npem->ops = ops;
+
+ mutex_init(&npem->lock);
+
+ for_each_indication(indication, npem_indications) {
+ if (!(npem->supported_indications & indication->bit))
+ continue;
+
+ nled = &npem->leds[led_idx++];
+ nled->indication = indication;
+ nled->npem = npem;
+
+ ret = pci_npem_set_led_classdev(npem, nled);
+ if (ret) {
+ npem_free(npem);
+ return ret;
+ }
+ }
+
+ dev->npem = npem;
+ return 0;
+}
+
+void pci_npem_remove(struct pci_dev *dev)
+{
+ npem_free(dev->npem);
+}
+
+void pci_npem_create(struct pci_dev *dev)
+{
+ const struct npem_ops *ops = &npem_ops;
+ int pos = 0, ret;
+ u32 cap;
+
+ if (npem_has_dsm(dev)) {
+ /*
+ * OS should use the DSM for LED control if it is available
+ * PCI Firmware Spec r3.3 sec 4.7.
+ */
+ ret = dsm_get(dev, GET_SUPPORTED_STATES_DSM, &cap);
+ if (ret)
+ return;
+
+ ops = &dsm_ops;
+ } else {
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_NPEM);
+ if (pos == 0)
+ return;
+
+ if (pci_read_config_dword(dev, pos + PCI_NPEM_CAP, &cap) != 0 ||
+ (cap & PCI_NPEM_CAP_CAPABLE) == 0)
+ return;
+ }
+
+ pci_info(dev, "Configuring %s\n", ops->name);
+
+ ret = pci_npem_init(dev, ops, pos, cap);
+ if (ret)
+ pci_err(dev, "Failed to register %s, err: %d\n", ops->name,
+ ret);
+}
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index 51e3dd0ea5ab..ab7a8252bf41 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -6,6 +6,7 @@
*/
#define pr_fmt(fmt) "PCI: OF: " fmt
+#include <linux/cleanup.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/pci.h>
@@ -13,6 +14,7 @@
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
+#include <linux/platform_device.h>
#include "pci.h"
#ifdef CONFIG_PCI
@@ -25,16 +27,20 @@
*/
int pci_set_of_node(struct pci_dev *dev)
{
- struct device_node *node;
-
if (!dev->bus->dev.of_node)
return 0;
- node = of_pci_find_child_device(dev->bus->dev.of_node, dev->devfn);
+ struct device_node *node __free(device_node) =
+ of_pci_find_child_device(dev->bus->dev.of_node, dev->devfn);
if (!node)
return 0;
- device_set_node(&dev->dev, of_fwnode_handle(node));
+ struct device *pdev __free(put_device) =
+ bus_find_device_by_of_node(&platform_bus_type, node);
+ if (pdev)
+ dev->bus->dev.of_node_reused = true;
+
+ device_set_node(&dev->dev, of_fwnode_handle(no_free_ptr(node)));
return 0;
}
@@ -184,7 +190,8 @@ EXPORT_SYMBOL_GPL(of_pci_get_devfn);
*
* Returns 0 on success or a negative error-code on failure.
*/
-int of_pci_parse_bus_range(struct device_node *node, struct resource *res)
+static int of_pci_parse_bus_range(struct device_node *node,
+ struct resource *res)
{
u32 bus_range[2];
int error;
@@ -201,7 +208,6 @@ int of_pci_parse_bus_range(struct device_node *node, struct resource *res)
return 0;
}
-EXPORT_SYMBOL_GPL(of_pci_parse_bus_range);
/**
* of_get_pci_domain_nr - Find the host bridge domain number
@@ -234,27 +240,61 @@ int of_get_pci_domain_nr(struct device_node *node)
EXPORT_SYMBOL_GPL(of_get_pci_domain_nr);
/**
- * of_pci_check_probe_only - Setup probe only mode if linux,pci-probe-only
- * is present and valid
+ * of_pci_preserve_config - Return true if the boot configuration needs to
+ * be preserved
+ * @node: Device tree node.
+ *
+ * Look for "linux,pci-probe-only" property for a given PCI controller's
+ * node and return true if found. Also look in the chosen node if the
+ * property is not found in the given controller's node. Having this
+ * property ensures that the kernel doesn't reconfigure the BARs and bridge
+ * windows that are already done by the platform firmware.
+ *
+ * Return: true if the property exists; false otherwise.
*/
-void of_pci_check_probe_only(void)
+bool of_pci_preserve_config(struct device_node *node)
{
- u32 val;
+ u32 val = 0;
int ret;
- ret = of_property_read_u32(of_chosen, "linux,pci-probe-only", &val);
+ if (!node) {
+ pr_warn("device node is NULL, trying with of_chosen\n");
+ node = of_chosen;
+ }
+
+retry:
+ ret = of_property_read_u32(node, "linux,pci-probe-only", &val);
if (ret) {
- if (ret == -ENODATA || ret == -EOVERFLOW)
- pr_warn("linux,pci-probe-only without valid value, ignoring\n");
- return;
+ if (ret == -ENODATA || ret == -EOVERFLOW) {
+ pr_warn("Incorrect value for linux,pci-probe-only in %pOF, ignoring\n",
+ node);
+ return false;
+ }
+ if (ret == -EINVAL) {
+ if (node == of_chosen)
+ return false;
+
+ node = of_chosen;
+ goto retry;
+ }
}
if (val)
+ return true;
+ else
+ return false;
+}
+
+/**
+ * of_pci_check_probe_only - Setup probe only mode if linux,pci-probe-only
+ * is present and valid
+ */
+void of_pci_check_probe_only(void)
+{
+ if (of_pci_preserve_config(of_chosen))
pci_add_flags(PCI_PROBE_ONLY);
else
pci_clear_flags(PCI_PROBE_ONLY);
-
- pr_info("PROBE_ONLY %s\n", val ? "enabled" : "disabled");
}
EXPORT_SYMBOL_GPL(of_pci_check_probe_only);
@@ -262,8 +302,6 @@ EXPORT_SYMBOL_GPL(of_pci_check_probe_only);
* devm_of_pci_get_host_bridge_resources() - Resource-managed parsing of PCI
* host bridge resources from DT
* @dev: host bridge device
- * @busno: bus number associated with the bridge root bus
- * @bus_max: maximum number of buses for this bridge
* @resources: list where the range of resources will be added after DT parsing
* @ib_resources: list where the range of inbound resources (with addresses
* from 'dma-ranges') will be added after DT parsing
@@ -279,7 +317,6 @@ EXPORT_SYMBOL_GPL(of_pci_check_probe_only);
* value if it failed.
*/
static int devm_of_pci_get_host_bridge_resources(struct device *dev,
- unsigned char busno, unsigned char bus_max,
struct list_head *resources,
struct list_head *ib_resources,
resource_size_t *io_base)
@@ -303,14 +340,15 @@ static int devm_of_pci_get_host_bridge_resources(struct device *dev,
err = of_pci_parse_bus_range(dev_node, bus_range);
if (err) {
- bus_range->start = busno;
- bus_range->end = bus_max;
+ bus_range->start = 0;
+ bus_range->end = 0xff;
bus_range->flags = IORESOURCE_BUS;
- dev_info(dev, " No bus range found for %pOF, using %pR\n",
- dev_node, bus_range);
} else {
- if (bus_range->end > bus_range->start + bus_max)
- bus_range->end = bus_range->start + bus_max;
+ if (bus_range->end > 0xff) {
+ dev_warn(dev, " Invalid end bus number in %pR, defaulting to 0xff\n",
+ bus_range);
+ bus_range->end = 0xff;
+ }
}
pci_add_resource(resources, bus_range);
@@ -417,9 +455,9 @@ failed:
* @out_irq: structure of_phandle_args filled by this function
*
* This function resolves the PCI interrupt for a given PCI device. If a
- * device-node exists for a given pci_dev, it will use normal OF tree
+ * device node exists for a given pci_dev, it will use normal OF tree
* walking. If not, it will implement standard swizzling and walk up the
- * PCI tree until an device-node is found, at which point it will finish
+ * PCI tree until a device node is found, at which point it will finish
* resolving using the OF tree walking.
*/
static int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq)
@@ -479,13 +517,16 @@ static int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *
}
/*
- * Ok, we have found a parent with a device-node, hand over to
+ * Ok, we have found a parent with a device node, hand over to
* the OF parsing code.
+ *
* We build a unit address from the linux device to be used for
* resolution. Note that we use the linux bus number which may
* not match your firmware bus numbering.
+ *
* Fortunately, in most cases, interrupt-map-mask doesn't
* include the bus number as part of the matching.
+ *
* You should still be careful about that though if you intend
* to rely on this function (you ship a firmware that doesn't
* create device nodes for all PCI devices).
@@ -557,7 +598,7 @@ static int pci_parse_request_of_pci_ranges(struct device *dev,
INIT_LIST_HEAD(&bridge->windows);
INIT_LIST_HEAD(&bridge->dma_ranges);
- err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &bridge->windows,
+ err = devm_of_pci_get_host_bridge_resources(dev, &bridge->windows,
&bridge->dma_ranges, &iobase);
if (err)
return err;
@@ -615,8 +656,8 @@ void of_pci_remove_node(struct pci_dev *pdev)
np = pci_device_to_OF_node(pdev);
if (!np || !of_node_check_flag(np, OF_DYNAMIC))
return;
- pdev->dev.of_node = NULL;
+ device_remove_of_node(&pdev->dev);
of_changeset_revert(np->data);
of_changeset_destroy(np->data);
of_node_put(np);
@@ -673,11 +714,18 @@ void of_pci_make_dev_node(struct pci_dev *pdev)
goto out_free_node;
np->data = cset;
- pdev->dev.of_node = np;
+
+ ret = device_add_of_node(&pdev->dev, np);
+ if (ret)
+ goto out_revert_cset;
+
kfree(name);
return;
+out_revert_cset:
+ np->data = NULL;
+ of_changeset_revert(cset);
out_free_node:
of_node_put(np);
out_destroy_cset:
@@ -686,7 +734,139 @@ out_destroy_cset:
out_free_name:
kfree(name);
}
-#endif
+
+void of_pci_remove_host_bridge_node(struct pci_host_bridge *bridge)
+{
+ struct device_node *np;
+
+ np = pci_bus_to_OF_node(bridge->bus);
+ if (!np || !of_node_check_flag(np, OF_DYNAMIC))
+ return;
+
+ device_remove_of_node(&bridge->bus->dev);
+ device_remove_of_node(&bridge->dev);
+ of_changeset_revert(np->data);
+ of_changeset_destroy(np->data);
+ of_node_put(np);
+}
+
+void of_pci_make_host_bridge_node(struct pci_host_bridge *bridge)
+{
+ struct device_node *np = NULL;
+ struct of_changeset *cset;
+ const char *name;
+ int ret;
+
+ /*
+ * If there is already a device tree node linked to the PCI bus handled
+ * by this bridge (i.e. the PCI root bus), nothing to do.
+ */
+ if (pci_bus_to_OF_node(bridge->bus))
+ return;
+
+ /*
+ * The root bus has no node. Check that the host bridge has no node
+ * too
+ */
+ if (bridge->dev.of_node) {
+ dev_err(&bridge->dev, "PCI host bridge of_node already set");
+ return;
+ }
+
+ /* Check if there is a DT root node to attach the created node */
+ if (!of_root) {
+ pr_err("of_root node is NULL, cannot create PCI host bridge node\n");
+ return;
+ }
+
+ name = kasprintf(GFP_KERNEL, "pci@%x,%x", pci_domain_nr(bridge->bus),
+ bridge->bus->number);
+ if (!name)
+ return;
+
+ cset = kmalloc(sizeof(*cset), GFP_KERNEL);
+ if (!cset)
+ goto out_free_name;
+ of_changeset_init(cset);
+
+ np = of_changeset_create_node(cset, of_root, name);
+ if (!np)
+ goto out_destroy_cset;
+
+ ret = of_pci_add_host_bridge_properties(bridge, cset, np);
+ if (ret)
+ goto out_free_node;
+
+ /*
+ * This of_node will be added to an existing device. The of_node parent
+ * is the root OF node and so this node will be handled by the platform
+ * bus. Avoid any new device creation.
+ */
+ of_node_set_flag(np, OF_POPULATED);
+ np->fwnode.dev = &bridge->dev;
+ fwnode_dev_initialized(&np->fwnode, true);
+
+ ret = of_changeset_apply(cset);
+ if (ret)
+ goto out_free_node;
+
+ np->data = cset;
+
+ /* Add the of_node to host bridge and the root bus */
+ ret = device_add_of_node(&bridge->dev, np);
+ if (ret)
+ goto out_revert_cset;
+
+ ret = device_add_of_node(&bridge->bus->dev, np);
+ if (ret)
+ goto out_remove_bridge_dev_of_node;
+
+ kfree(name);
+
+ return;
+
+out_remove_bridge_dev_of_node:
+ device_remove_of_node(&bridge->dev);
+out_revert_cset:
+ np->data = NULL;
+ of_changeset_revert(cset);
+out_free_node:
+ of_node_put(np);
+out_destroy_cset:
+ of_changeset_destroy(cset);
+ kfree(cset);
+out_free_name:
+ kfree(name);
+}
+
+#endif /* CONFIG_PCI_DYNAMIC_OF_NODES */
+
+/**
+ * of_pci_supply_present() - Check if the power supply is present for the PCI
+ * device
+ * @np: Device tree node
+ *
+ * Check if the power supply for the PCI device is present in the device tree
+ * node or not.
+ *
+ * Return: true if at least one power supply exists; false otherwise.
+ */
+bool of_pci_supply_present(struct device_node *np)
+{
+ struct property *prop;
+ char *supply;
+
+ if (!np)
+ return false;
+
+ for_each_property_of_node(np, prop) {
+ supply = strrchr(prop->name, '-');
+ if (supply && !strcmp(supply, "-supply"))
+ return true;
+ }
+
+ return false;
+}
#endif /* CONFIG_PCI */
diff --git a/drivers/pci/of_property.c b/drivers/pci/of_property.c
index c2c7334152bc..506fcd507113 100644
--- a/drivers/pci/of_property.c
+++ b/drivers/pci/of_property.c
@@ -26,7 +26,7 @@ struct of_pci_addr_pair {
* side and the child address is the corresponding address on the secondary
* side.
*/
-struct of_pci_range {
+struct of_pci_range_entry {
u32 child_addr[OF_PCI_ADDRESS_CELLS];
u32 parent_addr[OF_PCI_ADDRESS_CELLS];
u32 size[OF_PCI_SIZE_CELLS];
@@ -54,9 +54,13 @@ enum of_pci_prop_compatible {
static void of_pci_set_address(struct pci_dev *pdev, u32 *prop, u64 addr,
u32 reg_num, u32 flags, bool reloc)
{
- prop[0] = FIELD_PREP(OF_PCI_ADDR_FIELD_BUS, pdev->bus->number) |
- FIELD_PREP(OF_PCI_ADDR_FIELD_DEV, PCI_SLOT(pdev->devfn)) |
- FIELD_PREP(OF_PCI_ADDR_FIELD_FUNC, PCI_FUNC(pdev->devfn));
+ if (pdev) {
+ prop[0] = FIELD_PREP(OF_PCI_ADDR_FIELD_BUS, pdev->bus->number) |
+ FIELD_PREP(OF_PCI_ADDR_FIELD_DEV, PCI_SLOT(pdev->devfn)) |
+ FIELD_PREP(OF_PCI_ADDR_FIELD_FUNC, PCI_FUNC(pdev->devfn));
+ } else
+ prop[0] = 0;
+
prop[0] |= flags | reg_num;
if (!reloc) {
prop[0] |= OF_PCI_ADDR_FIELD_NONRELOC;
@@ -65,7 +69,7 @@ static void of_pci_set_address(struct pci_dev *pdev, u32 *prop, u64 addr,
}
}
-static int of_pci_get_addr_flags(struct resource *res, u32 *flags)
+static int of_pci_get_addr_flags(const struct resource *res, u32 *flags)
{
u32 ss;
@@ -101,7 +105,7 @@ static int of_pci_prop_bus_range(struct pci_dev *pdev,
static int of_pci_prop_ranges(struct pci_dev *pdev, struct of_changeset *ocs,
struct device_node *np)
{
- struct of_pci_range *rp;
+ struct of_pci_range_entry *rp;
struct resource *res;
int i, j, ret;
u32 flags, num;
@@ -126,7 +130,7 @@ static int of_pci_prop_ranges(struct pci_dev *pdev, struct of_changeset *ocs,
if (of_pci_get_addr_flags(&res[j], &flags))
continue;
- val64 = res[j].start;
+ val64 = pci_bus_address(pdev, &res[j] - pdev->resource);
of_pci_set_address(pdev, rp[i].parent_addr, val64, 0, flags,
false);
if (pci_is_bridge(pdev)) {
@@ -183,6 +187,26 @@ static int of_pci_prop_interrupts(struct pci_dev *pdev,
return of_changeset_add_prop_u32(ocs, np, "interrupts", (u32)pin);
}
+static int of_pci_prop_intr_ctrl(struct pci_dev *pdev, struct of_changeset *ocs,
+ struct device_node *np)
+{
+ int ret;
+ u8 pin;
+
+ ret = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin);
+ if (ret != 0)
+ return ret;
+
+ if (!pin)
+ return 0;
+
+ ret = of_changeset_add_prop_u32(ocs, np, "#interrupt-cells", 1);
+ if (ret)
+ return ret;
+
+ return of_changeset_add_prop_bool(ocs, np, "interrupt-controller");
+}
+
static int of_pci_prop_intr_map(struct pci_dev *pdev, struct of_changeset *ocs,
struct device_node *np)
{
@@ -238,6 +262,8 @@ static int of_pci_prop_intr_map(struct pci_dev *pdev, struct of_changeset *ocs,
return 0;
int_map = kcalloc(map_sz, sizeof(u32), GFP_KERNEL);
+ if (!int_map)
+ return -ENOMEM;
mapp = int_map;
list_for_each_entry(child, &pdev->subordinate->devices, bus_list) {
@@ -334,6 +360,10 @@ int of_pci_add_properties(struct pci_dev *pdev, struct of_changeset *ocs,
ret = of_pci_prop_intr_map(pdev, ocs, np);
if (ret)
return ret;
+ } else {
+ ret = of_pci_prop_intr_ctrl(pdev, ocs, np);
+ if (ret)
+ return ret;
}
ret = of_pci_prop_ranges(pdev, ocs, np);
@@ -364,3 +394,106 @@ int of_pci_add_properties(struct pci_dev *pdev, struct of_changeset *ocs,
return 0;
}
+
+static bool of_pci_is_range_resource(const struct resource *res, u32 *flags)
+{
+ if (!(resource_type(res) & IORESOURCE_MEM) &&
+ !(resource_type(res) & IORESOURCE_MEM_64))
+ return false;
+
+ if (of_pci_get_addr_flags(res, flags))
+ return false;
+
+ return true;
+}
+
+static int of_pci_host_bridge_prop_ranges(struct pci_host_bridge *bridge,
+ struct of_changeset *ocs,
+ struct device_node *np)
+{
+ struct resource_entry *window;
+ unsigned int ranges_sz = 0;
+ unsigned int n_range = 0;
+ struct resource *res;
+ int n_addr_cells;
+ u32 *ranges;
+ u64 val64;
+ u32 flags;
+ int ret;
+
+ n_addr_cells = of_n_addr_cells(np);
+ if (n_addr_cells <= 0 || n_addr_cells > 2)
+ return -EINVAL;
+
+ resource_list_for_each_entry(window, &bridge->windows) {
+ res = window->res;
+ if (!of_pci_is_range_resource(res, &flags))
+ continue;
+ n_range++;
+ }
+
+ if (!n_range)
+ return 0;
+
+ ranges = kcalloc(n_range,
+ (OF_PCI_ADDRESS_CELLS + OF_PCI_SIZE_CELLS +
+ n_addr_cells) * sizeof(*ranges),
+ GFP_KERNEL);
+ if (!ranges)
+ return -ENOMEM;
+
+ resource_list_for_each_entry(window, &bridge->windows) {
+ res = window->res;
+ if (!of_pci_is_range_resource(res, &flags))
+ continue;
+
+ /* PCI bus address */
+ val64 = res->start;
+ of_pci_set_address(NULL, &ranges[ranges_sz],
+ val64 - window->offset, 0, flags, false);
+ ranges_sz += OF_PCI_ADDRESS_CELLS;
+
+ /* Host bus address */
+ if (n_addr_cells == 2)
+ ranges[ranges_sz++] = upper_32_bits(val64);
+ ranges[ranges_sz++] = lower_32_bits(val64);
+
+ /* Size */
+ val64 = resource_size(res);
+ ranges[ranges_sz] = upper_32_bits(val64);
+ ranges[ranges_sz + 1] = lower_32_bits(val64);
+ ranges_sz += OF_PCI_SIZE_CELLS;
+ }
+
+ ret = of_changeset_add_prop_u32_array(ocs, np, "ranges", ranges,
+ ranges_sz);
+ kfree(ranges);
+ return ret;
+}
+
+int of_pci_add_host_bridge_properties(struct pci_host_bridge *bridge,
+ struct of_changeset *ocs,
+ struct device_node *np)
+{
+ int ret;
+
+ ret = of_changeset_add_prop_string(ocs, np, "device_type", "pci");
+ if (ret)
+ return ret;
+
+ ret = of_changeset_add_prop_u32(ocs, np, "#address-cells",
+ OF_PCI_ADDRESS_CELLS);
+ if (ret)
+ return ret;
+
+ ret = of_changeset_add_prop_u32(ocs, np, "#size-cells",
+ OF_PCI_SIZE_CELLS);
+ if (ret)
+ return ret;
+
+ ret = of_pci_host_bridge_prop_ranges(bridge, ocs, np);
+ if (ret)
+ return ret;
+
+ return 0;
+}
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
index 4f47a13cb500..8d955c25aed3 100644
--- a/drivers/pci/p2pdma.c
+++ b/drivers/pci/p2pdma.c
@@ -90,7 +90,7 @@ static ssize_t published_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RO(published);
static int p2pmem_alloc_mmap(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, struct vm_area_struct *vma)
+ const struct bin_attribute *attr, struct vm_area_struct *vma)
{
struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
size_t len = vma->vm_end - vma->vm_start;
@@ -140,13 +140,22 @@ static int p2pmem_alloc_mmap(struct file *filp, struct kobject *kobj,
rcu_read_unlock();
for (vaddr = vma->vm_start; vaddr < vma->vm_end; vaddr += PAGE_SIZE) {
- ret = vm_insert_page(vma, vaddr, virt_to_page(kaddr));
+ struct page *page = virt_to_page(kaddr);
+
+ /*
+ * Initialise the refcount for the freshly allocated page. As
+ * we have just allocated the page no one else should be
+ * using it.
+ */
+ VM_WARN_ON_ONCE_PAGE(!page_ref_count(page), page);
+ set_page_count(page, 1);
+ ret = vm_insert_page(vma, vaddr, page);
if (ret) {
gen_pool_free(p2pdma->pool, (uintptr_t)kaddr, len);
return ret;
}
percpu_ref_get(ref);
- put_page(virt_to_page(kaddr));
+ put_page(page);
kaddr += PAGE_SIZE;
len -= PAGE_SIZE;
}
@@ -161,7 +170,7 @@ out:
return ret;
}
-static struct bin_attribute p2pmem_alloc_attr = {
+static const struct bin_attribute p2pmem_alloc_attr = {
.attr = { .name = "allocate", .mode = 0660 },
.mmap = p2pmem_alloc_mmap,
/*
@@ -180,20 +189,20 @@ static struct attribute *p2pmem_attrs[] = {
NULL,
};
-static struct bin_attribute *p2pmem_bin_attrs[] = {
+static const struct bin_attribute *const p2pmem_bin_attrs[] = {
&p2pmem_alloc_attr,
NULL,
};
static const struct attribute_group p2pmem_group = {
.attrs = p2pmem_attrs,
- .bin_attrs = p2pmem_bin_attrs,
+ .bin_attrs_new = p2pmem_bin_attrs,
.name = "p2pmem",
};
static void p2pdma_page_free(struct page *page)
{
- struct pci_p2pdma_pagemap *pgmap = to_p2p_pgmap(page->pgmap);
+ struct pci_p2pdma_pagemap *pgmap = to_p2p_pgmap(page_pgmap(page));
/* safe to dereference while a reference is held to the percpu ref */
struct pci_p2pdma *p2pdma =
rcu_dereference_protected(pgmap->provider->p2pdma, 1);
@@ -995,40 +1004,12 @@ static enum pci_p2pdma_map_type pci_p2pdma_map_type(struct dev_pagemap *pgmap,
return type;
}
-/**
- * pci_p2pdma_map_segment - map an sg segment determining the mapping type
- * @state: State structure that should be declared outside of the for_each_sg()
- * loop and initialized to zero.
- * @dev: DMA device that's doing the mapping operation
- * @sg: scatterlist segment to map
- *
- * This is a helper to be used by non-IOMMU dma_map_sg() implementations where
- * the sg segment is the same for the page_link and the dma_address.
- *
- * Attempt to map a single segment in an SGL with the PCI bus address.
- * The segment must point to a PCI P2PDMA page and thus must be
- * wrapped in a is_pci_p2pdma_page(sg_page(sg)) check.
- *
- * Returns the type of mapping used and maps the page if the type is
- * PCI_P2PDMA_MAP_BUS_ADDR.
- */
-enum pci_p2pdma_map_type
-pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev,
- struct scatterlist *sg)
+void __pci_p2pdma_update_state(struct pci_p2pdma_map_state *state,
+ struct device *dev, struct page *page)
{
- if (state->pgmap != sg_page(sg)->pgmap) {
- state->pgmap = sg_page(sg)->pgmap;
- state->map = pci_p2pdma_map_type(state->pgmap, dev);
- state->bus_off = to_p2p_pgmap(state->pgmap)->bus_offset;
- }
-
- if (state->map == PCI_P2PDMA_MAP_BUS_ADDR) {
- sg->dma_address = sg_phys(sg) + state->bus_off;
- sg_dma_len(sg) = sg->length;
- sg_dma_mark_bus_address(sg);
- }
-
- return state->map;
+ state->pgmap = page_pgmap(page);
+ state->map = pci_p2pdma_map_type(state->pgmap, dev);
+ state->bus_off = to_p2p_pgmap(state->pgmap)->bus_offset;
}
/**
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 004575091596..af370628e583 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -15,6 +15,7 @@
#include <linux/pci_hotplug.h>
#include <linux/module.h>
#include <linux/pci-acpi.h>
+#include <linux/pci-ecam.h>
#include <linux/pm_runtime.h>
#include <linux/pm_qos.h>
#include <linux/rwsem.h>
@@ -119,6 +120,28 @@ phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle)
return (phys_addr_t)mcfg_addr;
}
+bool pci_acpi_preserve_config(struct pci_host_bridge *host_bridge)
+{
+ if (ACPI_HANDLE(&host_bridge->dev)) {
+ union acpi_object *obj;
+
+ /*
+ * Evaluate the "PCI Boot Configuration" _DSM Function. If it
+ * exists and returns 0, we must preserve any PCI resource
+ * assignments made by firmware for this host bridge.
+ */
+ obj = acpi_evaluate_dsm_typed(ACPI_HANDLE(&host_bridge->dev),
+ &pci_acpi_dsm_guid,
+ 1, DSM_PCI_PRESERVE_BOOT_CONFIG,
+ NULL, ACPI_TYPE_INTEGER);
+ if (obj && obj->integer.value == 0)
+ return true;
+ ACPI_FREE(obj);
+ }
+
+ return false;
+}
+
/* _HPX PCI Setting Record (Type 0); same as _HPP */
struct hpx_type0 {
u32 revision; /* Not present in _HPP */
@@ -1519,3 +1542,184 @@ static int __init acpi_pci_init(void)
return 0;
}
arch_initcall(acpi_pci_init);
+
+#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
+
+/*
+ * Try to assign the IRQ number when probing a new device
+ */
+int pcibios_alloc_irq(struct pci_dev *dev)
+{
+ if (!acpi_disabled)
+ acpi_pci_irq_enable(dev);
+
+ return 0;
+}
+
+struct acpi_pci_generic_root_info {
+ struct acpi_pci_root_info common;
+ struct pci_config_window *cfg; /* config space mapping */
+};
+
+int acpi_pci_bus_find_domain_nr(struct pci_bus *bus)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ struct acpi_device *adev = to_acpi_device(cfg->parent);
+ struct acpi_pci_root *root = acpi_driver_data(adev);
+
+ return root->segment;
+}
+
+int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
+{
+ struct pci_config_window *cfg;
+ struct acpi_device *adev;
+ struct device *bus_dev;
+
+ if (acpi_disabled)
+ return 0;
+
+ cfg = bridge->bus->sysdata;
+
+ /*
+ * On Hyper-V there is no corresponding ACPI device for a root bridge,
+ * therefore ->parent is set as NULL by the driver. And set 'adev' as
+ * NULL in this case because there is no proper ACPI device.
+ */
+ if (!cfg->parent)
+ adev = NULL;
+ else
+ adev = to_acpi_device(cfg->parent);
+
+ bus_dev = &bridge->bus->dev;
+
+ ACPI_COMPANION_SET(&bridge->dev, adev);
+ set_dev_node(bus_dev, acpi_get_node(acpi_device_handle(adev)));
+
+ return 0;
+}
+
+static int pci_acpi_root_prepare_resources(struct acpi_pci_root_info *ci)
+{
+ struct resource_entry *entry, *tmp;
+ int status;
+
+ status = acpi_pci_probe_root_resources(ci);
+ resource_list_for_each_entry_safe(entry, tmp, &ci->resources) {
+ if (!(entry->res->flags & IORESOURCE_WINDOW))
+ resource_list_destroy_entry(entry);
+ }
+ return status;
+}
+
+/*
+ * Lookup the bus range for the domain in MCFG, and set up config space
+ * mapping.
+ */
+static struct pci_config_window *
+pci_acpi_setup_ecam_mapping(struct acpi_pci_root *root)
+{
+ struct device *dev = &root->device->dev;
+ struct resource *bus_res = &root->secondary;
+ u16 seg = root->segment;
+ const struct pci_ecam_ops *ecam_ops;
+ struct resource cfgres;
+ struct acpi_device *adev;
+ struct pci_config_window *cfg;
+ int ret;
+
+ ret = pci_mcfg_lookup(root, &cfgres, &ecam_ops);
+ if (ret) {
+ dev_err(dev, "%04x:%pR ECAM region not found\n", seg, bus_res);
+ return NULL;
+ }
+
+ adev = acpi_resource_consumer(&cfgres);
+ if (adev)
+ dev_info(dev, "ECAM area %pR reserved by %s\n", &cfgres,
+ dev_name(&adev->dev));
+ else
+ dev_warn(dev, FW_BUG "ECAM area %pR not reserved in ACPI namespace\n",
+ &cfgres);
+
+ cfg = pci_ecam_create(dev, &cfgres, bus_res, ecam_ops);
+ if (IS_ERR(cfg)) {
+ dev_err(dev, "%04x:%pR error %ld mapping ECAM\n", seg, bus_res,
+ PTR_ERR(cfg));
+ return NULL;
+ }
+
+ return cfg;
+}
+
+/* release_info: free resources allocated by init_info */
+static void pci_acpi_generic_release_info(struct acpi_pci_root_info *ci)
+{
+ struct acpi_pci_generic_root_info *ri;
+
+ ri = container_of(ci, struct acpi_pci_generic_root_info, common);
+ pci_ecam_free(ri->cfg);
+ kfree(ci->ops);
+ kfree(ri);
+}
+
+/* Interface called from ACPI code to setup PCI host controller */
+struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
+{
+ struct acpi_pci_generic_root_info *ri;
+ struct pci_bus *bus, *child;
+ struct acpi_pci_root_ops *root_ops;
+ struct pci_host_bridge *host;
+
+ ri = kzalloc(sizeof(*ri), GFP_KERNEL);
+ if (!ri)
+ return NULL;
+
+ root_ops = kzalloc(sizeof(*root_ops), GFP_KERNEL);
+ if (!root_ops) {
+ kfree(ri);
+ return NULL;
+ }
+
+ ri->cfg = pci_acpi_setup_ecam_mapping(root);
+ if (!ri->cfg) {
+ kfree(ri);
+ kfree(root_ops);
+ return NULL;
+ }
+
+ root_ops->release_info = pci_acpi_generic_release_info;
+ root_ops->prepare_resources = pci_acpi_root_prepare_resources;
+ root_ops->pci_ops = (struct pci_ops *)&ri->cfg->ops->pci_ops;
+ bus = acpi_pci_root_create(root, root_ops, &ri->common, ri->cfg);
+ if (!bus)
+ return NULL;
+
+ /* If we must preserve the resource configuration, claim now */
+ host = pci_find_host_bridge(bus);
+ if (host->preserve_config)
+ pci_bus_claim_resources(bus);
+
+ /*
+ * Assign whatever was left unassigned. If we didn't claim above,
+ * this will reassign everything.
+ */
+ pci_assign_unassigned_root_bus_resources(bus);
+
+ list_for_each_entry(child, &bus->children, node)
+ pcie_bus_configure_settings(child);
+
+ return bus;
+}
+
+void pcibios_add_bus(struct pci_bus *bus)
+{
+ acpi_pci_add_bus(bus);
+}
+
+void pcibios_remove_bus(struct pci_bus *bus)
+{
+ acpi_pci_remove_bus(bus);
+}
+
+#endif
diff --git a/drivers/pci/pci-bridge-emul.c b/drivers/pci/pci-bridge-emul.c
index 9334b2dd4764..6658c1edd464 100644
--- a/drivers/pci/pci-bridge-emul.c
+++ b/drivers/pci/pci-bridge-emul.c
@@ -257,8 +257,8 @@ struct pci_bridge_reg_behavior pcie_cap_regs_behavior[PCI_CAP_PCIE_SIZEOF / 4] =
*/
.rw = (PCI_EXP_RTCTL_SECEE | PCI_EXP_RTCTL_SENFEE |
PCI_EXP_RTCTL_SEFEE | PCI_EXP_RTCTL_PMEIE |
- PCI_EXP_RTCTL_CRSSVE),
- .ro = PCI_EXP_RTCAP_CRSVIS << 16,
+ PCI_EXP_RTCTL_RRS_SVE),
+ .ro = PCI_EXP_RTCAP_RRS_SV << 16,
},
[PCI_EXP_RTSTA / 4] = {
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index af2996d0d17f..c8bd71a739f7 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -812,8 +812,7 @@ static int pci_pm_suspend(struct device *dev)
* suspend callbacks can cope with runtime-suspended devices, it is
* better to resume the device from runtime suspend here.
*/
- if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
- pci_dev_need_resume(pci_dev)) {
+ if (!dev_pm_smart_suspend(dev) || pci_dev_need_resume(pci_dev)) {
pm_runtime_resume(dev);
pci_dev->state_saved = false;
} else {
@@ -1151,8 +1150,7 @@ static int pci_pm_poweroff(struct device *dev)
}
/* The reason to do that is the same as in pci_pm_suspend(). */
- if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
- pci_dev_need_resume(pci_dev)) {
+ if (!dev_pm_smart_suspend(dev) || pci_dev_need_resume(pci_dev)) {
pm_runtime_resume(dev);
pci_dev->state_saved = false;
} else {
@@ -1503,7 +1501,7 @@ EXPORT_SYMBOL(pci_dev_driver);
* system is in its list of supported devices. Returns the matching
* pci_device_id structure or %NULL if there is no match.
*/
-static int pci_bus_match(struct device *dev, struct device_driver *drv)
+static int pci_bus_match(struct device *dev, const struct device_driver *drv)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
struct pci_driver *pci_drv;
@@ -1512,7 +1510,7 @@ static int pci_bus_match(struct device *dev, struct device_driver *drv)
if (!pci_dev->match_driver)
return 0;
- pci_drv = to_pci_driver(drv);
+ pci_drv = (struct pci_driver *)to_pci_driver(drv);
found_id = pci_match_device(pci_drv, pci_dev);
if (found_id)
return 1;
@@ -1653,7 +1651,8 @@ static int pci_dma_configure(struct device *dev)
pci_put_host_bridge_device(bridge);
- if (!ret && !driver->driver_managed_dma) {
+ /* @driver may not be valid when we're called from the IOMMU layer */
+ if (!ret && dev->driver && !driver->driver_managed_dma) {
ret = iommu_device_use_default_domain(dev);
if (ret)
arch_teardown_dma_ops(dev);
@@ -1670,13 +1669,27 @@ static void pci_dma_cleanup(struct device *dev)
iommu_device_unuse_default_domain(dev);
}
-struct bus_type pci_bus_type = {
+/*
+ * pci_device_irq_get_affinity - get IRQ affinity mask for device
+ * @dev: ptr to dev structure
+ * @irq_vec: interrupt vector number
+ *
+ * Return the CPU affinity mask for @dev and @irq_vec.
+ */
+static const struct cpumask *pci_device_irq_get_affinity(struct device *dev,
+ unsigned int irq_vec)
+{
+ return pci_irq_get_affinity(to_pci_dev(dev), irq_vec);
+}
+
+const struct bus_type pci_bus_type = {
.name = "pci",
.match = pci_bus_match,
.uevent = pci_uevent,
.probe = pci_device_probe,
.remove = pci_device_remove,
.shutdown = pci_device_shutdown,
+ .irq_get_affinity = pci_device_irq_get_affinity,
.dev_groups = pci_dev_groups,
.bus_groups = pci_bus_groups,
.drv_groups = pci_drv_groups,
@@ -1688,10 +1701,10 @@ struct bus_type pci_bus_type = {
EXPORT_SYMBOL(pci_bus_type);
#ifdef CONFIG_PCIEPORTBUS
-static int pcie_port_bus_match(struct device *dev, struct device_driver *drv)
+static int pcie_port_bus_match(struct device *dev, const struct device_driver *drv)
{
struct pcie_device *pciedev;
- struct pcie_port_service_driver *driver;
+ const struct pcie_port_service_driver *driver;
if (drv->bus != &pcie_port_bus_type || dev->bus != &pcie_port_bus_type)
return 0;
diff --git a/drivers/pci/pci-mid.c b/drivers/pci/pci-mid.c
index fbfd78127123..bed9f0755271 100644
--- a/drivers/pci/pci-mid.c
+++ b/drivers/pci/pci-mid.c
@@ -38,8 +38,8 @@ pci_power_t mid_pci_get_power_state(struct pci_dev *pdev)
* arch/x86/platform/intel-mid/pwr.c.
*/
static const struct x86_cpu_id lpss_cpu_ids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_SALTWELL_MID, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_MID, NULL),
+ X86_MATCH_VFM(INTEL_ATOM_SALTWELL_MID, NULL),
+ X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_MID, NULL),
{}
};
diff --git a/drivers/pci/pci-pf-stub.c b/drivers/pci/pci-pf-stub.c
index 45855a5e9fca..da4db4928907 100644
--- a/drivers/pci/pci-pf-stub.c
+++ b/drivers/pci/pci-pf-stub.c
@@ -39,4 +39,5 @@ static struct pci_driver pf_stub_driver = {
};
module_pci_driver(pf_stub_driver);
+MODULE_DESCRIPTION("SR-IOV PF stub driver with no functionality");
MODULE_LICENSE("GPL");
diff --git a/drivers/pci/pci-stub.c b/drivers/pci/pci-stub.c
index d1f4c1ce7bd1..9bc478df4e8f 100644
--- a/drivers/pci/pci-stub.c
+++ b/drivers/pci/pci-stub.c
@@ -92,5 +92,6 @@ static void __exit pci_stub_exit(void)
module_init(pci_stub_init);
module_exit(pci_stub_exit);
+MODULE_DESCRIPTION("VM device assignment stub driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Chris Wright <chrisw@sous-sol.org>");
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 40cfa716392f..c6cda56ca52c 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -13,6 +13,7 @@
*/
#include <linux/bitfield.h>
+#include <linux/cleanup.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/pci.h>
@@ -31,6 +32,10 @@
#include <linux/aperture.h>
#include "pci.h"
+#ifndef ARCH_PCI_DEV_GROUPS
+#define ARCH_PCI_DEV_GROUPS
+#endif
+
static int sysfs_initialized; /* = 0 */
/* show configuration fields */
@@ -517,6 +522,31 @@ static ssize_t bus_rescan_store(struct device *dev,
static struct device_attribute dev_attr_bus_rescan = __ATTR(rescan, 0200, NULL,
bus_rescan_store);
+static ssize_t reset_subordinate_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pci_bus *bus = pdev->subordinate;
+ unsigned long val;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (kstrtoul(buf, 0, &val) < 0)
+ return -EINVAL;
+
+ if (val) {
+ int ret = __pci_reset_bus(bus);
+
+ if (ret)
+ return ret;
+ }
+
+ return count;
+}
+static DEVICE_ATTR_WO(reset_subordinate);
+
#if defined(CONFIG_PM) && defined(CONFIG_ACPI)
static ssize_t d3cold_allowed_store(struct device *dev,
struct device_attribute *attr,
@@ -621,6 +651,7 @@ static struct attribute *pci_dev_attrs[] = {
static struct attribute *pci_bridge_attrs[] = {
&dev_attr_subordinate_bus_number.attr,
&dev_attr_secondary_bus_number.attr,
+ &dev_attr_reset_subordinate.attr,
NULL,
};
@@ -664,7 +695,7 @@ static ssize_t boot_vga_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RO(boot_vga);
static ssize_t pci_read_config(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
@@ -739,7 +770,7 @@ static ssize_t pci_read_config(struct file *filp, struct kobject *kobj,
}
static ssize_t pci_write_config(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
@@ -807,28 +838,27 @@ static ssize_t pci_write_config(struct file *filp, struct kobject *kobj,
return count;
}
-static BIN_ATTR(config, 0644, pci_read_config, pci_write_config, 0);
+static const BIN_ATTR(config, 0644, pci_read_config, pci_write_config, 0);
-static struct bin_attribute *pci_dev_config_attrs[] = {
+static const struct bin_attribute *const pci_dev_config_attrs[] = {
&bin_attr_config,
NULL,
};
-static umode_t pci_dev_config_attr_is_visible(struct kobject *kobj,
- struct bin_attribute *a, int n)
+static size_t pci_dev_config_attr_bin_size(struct kobject *kobj,
+ const struct bin_attribute *a,
+ int n)
{
struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
- a->size = PCI_CFG_SPACE_SIZE;
if (pdev->cfg_size > PCI_CFG_SPACE_SIZE)
- a->size = PCI_CFG_SPACE_EXP_SIZE;
-
- return a->attr.mode;
+ return PCI_CFG_SPACE_EXP_SIZE;
+ return PCI_CFG_SPACE_SIZE;
}
static const struct attribute_group pci_dev_config_attr_group = {
- .bin_attrs = pci_dev_config_attrs,
- .is_bin_visible = pci_dev_config_attr_is_visible,
+ .bin_attrs_new = pci_dev_config_attrs,
+ .bin_size = pci_dev_config_attr_bin_size,
};
/*
@@ -838,7 +868,7 @@ static const struct attribute_group pci_dev_config_attr_group = {
static __maybe_unused loff_t
pci_llseek_resource(struct file *filep,
struct kobject *kobj __always_unused,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
loff_t offset, int whence)
{
return fixed_size_llseek(filep, offset, whence, attr->size);
@@ -858,8 +888,8 @@ pci_llseek_resource(struct file *filep,
* callback routine (pci_legacy_read).
*/
static ssize_t pci_read_legacy_io(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
- loff_t off, size_t count)
+ const struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj));
@@ -883,8 +913,8 @@ static ssize_t pci_read_legacy_io(struct file *filp, struct kobject *kobj,
* callback routine (pci_legacy_write).
*/
static ssize_t pci_write_legacy_io(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
- loff_t off, size_t count)
+ const struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj));
@@ -907,7 +937,7 @@ static ssize_t pci_write_legacy_io(struct file *filp, struct kobject *kobj,
* memory space.
*/
static int pci_mmap_legacy_mem(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
struct vm_area_struct *vma)
{
struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj));
@@ -927,7 +957,7 @@ static int pci_mmap_legacy_mem(struct file *filp, struct kobject *kobj,
* memory space. Returns -ENOSYS if the operation isn't supported
*/
static int pci_mmap_legacy_io(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
struct vm_area_struct *vma)
{
struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj));
@@ -974,8 +1004,8 @@ void pci_create_legacy_files(struct pci_bus *b)
b->legacy_io->attr.name = "legacy_io";
b->legacy_io->size = 0xffff;
b->legacy_io->attr.mode = 0600;
- b->legacy_io->read = pci_read_legacy_io;
- b->legacy_io->write = pci_write_legacy_io;
+ b->legacy_io->read_new = pci_read_legacy_io;
+ b->legacy_io->write_new = pci_write_legacy_io;
/* See pci_create_attr() for motivation */
b->legacy_io->llseek = pci_llseek_resource;
b->legacy_io->mmap = pci_mmap_legacy_io;
@@ -1031,7 +1061,7 @@ void pci_remove_legacy_files(struct pci_bus *b)
*
* Use the regular PCI mapping routines to map a PCI resource into userspace.
*/
-static int pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr,
+static int pci_mmap_resource(struct kobject *kobj, const struct bin_attribute *attr,
struct vm_area_struct *vma, int write_combine)
{
struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
@@ -1056,21 +1086,21 @@ static int pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr,
}
static int pci_mmap_resource_uc(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
struct vm_area_struct *vma)
{
return pci_mmap_resource(kobj, attr, vma, 0);
}
static int pci_mmap_resource_wc(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
struct vm_area_struct *vma)
{
return pci_mmap_resource(kobj, attr, vma, 1);
}
static ssize_t pci_resource_io(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t off, size_t count, bool write)
{
#ifdef CONFIG_HAS_IOPORT
@@ -1113,14 +1143,14 @@ static ssize_t pci_resource_io(struct file *filp, struct kobject *kobj,
}
static ssize_t pci_read_resource_io(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
return pci_resource_io(filp, kobj, attr, buf, off, count, false);
}
static ssize_t pci_write_resource_io(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
int ret;
@@ -1181,8 +1211,8 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
} else {
sprintf(res_attr_name, "resource%d", num);
if (pci_resource_flags(pdev, num) & IORESOURCE_IO) {
- res_attr->read = pci_read_resource_io;
- res_attr->write = pci_write_resource_io;
+ res_attr->read_new = pci_read_resource_io;
+ res_attr->write_new = pci_write_resource_io;
if (arch_can_pci_mmap_io())
res_attr->mmap = pci_mmap_resource_uc;
} else {
@@ -1227,6 +1257,10 @@ static int pci_create_resource_files(struct pci_dev *pdev)
int i;
int retval;
+ /* Skip devices with non-mappable BARs */
+ if (pdev->non_mappable_bars)
+ return 0;
+
/* Expose the PCI resources from this device as files */
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
@@ -1263,7 +1297,7 @@ void __weak pci_remove_resource_files(struct pci_dev *dev) { return; }
* writing anything except 0 enables it
*/
static ssize_t pci_write_rom(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
@@ -1289,7 +1323,7 @@ static ssize_t pci_write_rom(struct file *filp, struct kobject *kobj,
* device corresponding to @kobj.
*/
static ssize_t pci_read_rom(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
@@ -1315,32 +1349,37 @@ static ssize_t pci_read_rom(struct file *filp, struct kobject *kobj,
return count;
}
-static BIN_ATTR(rom, 0600, pci_read_rom, pci_write_rom, 0);
+static const BIN_ATTR(rom, 0600, pci_read_rom, pci_write_rom, 0);
-static struct bin_attribute *pci_dev_rom_attrs[] = {
+static const struct bin_attribute *const pci_dev_rom_attrs[] = {
&bin_attr_rom,
NULL,
};
static umode_t pci_dev_rom_attr_is_visible(struct kobject *kobj,
- struct bin_attribute *a, int n)
+ const struct bin_attribute *a, int n)
{
struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
- size_t rom_size;
/* If the device has a ROM, try to expose it in sysfs. */
- rom_size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
- if (!rom_size)
+ if (!pci_resource_end(pdev, PCI_ROM_RESOURCE))
return 0;
- a->size = rom_size;
-
return a->attr.mode;
}
+static size_t pci_dev_rom_attr_bin_size(struct kobject *kobj,
+ const struct bin_attribute *a, int n)
+{
+ struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
+
+ return pci_resource_len(pdev, PCI_ROM_RESOURCE);
+}
+
static const struct attribute_group pci_dev_rom_attr_group = {
- .bin_attrs = pci_dev_rom_attrs,
+ .bin_attrs_new = pci_dev_rom_attrs,
.is_bin_visible = pci_dev_rom_attr_is_visible,
+ .bin_size = pci_dev_rom_attr_bin_size,
};
static ssize_t reset_store(struct device *dev, struct device_attribute *attr,
@@ -1387,6 +1426,113 @@ static const struct attribute_group pci_dev_reset_attr_group = {
.is_visible = pci_dev_reset_attr_is_visible,
};
+static ssize_t reset_method_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ ssize_t len = 0;
+ int i, m;
+
+ for (i = 0; i < PCI_NUM_RESET_METHODS; i++) {
+ m = pdev->reset_methods[i];
+ if (!m)
+ break;
+
+ len += sysfs_emit_at(buf, len, "%s%s", len ? " " : "",
+ pci_reset_fn_methods[m].name);
+ }
+
+ if (len)
+ len += sysfs_emit_at(buf, len, "\n");
+
+ return len;
+}
+
+static int reset_method_lookup(const char *name)
+{
+ int m;
+
+ for (m = 1; m < PCI_NUM_RESET_METHODS; m++) {
+ if (sysfs_streq(name, pci_reset_fn_methods[m].name))
+ return m;
+ }
+
+ return 0; /* not found */
+}
+
+static ssize_t reset_method_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ char *tmp_options, *name;
+ int m, n;
+ u8 reset_methods[PCI_NUM_RESET_METHODS] = {};
+
+ if (sysfs_streq(buf, "")) {
+ pdev->reset_methods[0] = 0;
+ pci_warn(pdev, "All device reset methods disabled by user");
+ return count;
+ }
+
+ if (sysfs_streq(buf, "default")) {
+ pci_init_reset_methods(pdev);
+ return count;
+ }
+
+ char *options __free(kfree) = kstrndup(buf, count, GFP_KERNEL);
+ if (!options)
+ return -ENOMEM;
+
+ n = 0;
+ tmp_options = options;
+ while ((name = strsep(&tmp_options, " ")) != NULL) {
+ if (sysfs_streq(name, ""))
+ continue;
+
+ name = strim(name);
+
+ /* Leave previous methods unchanged if input is invalid */
+ m = reset_method_lookup(name);
+ if (!m) {
+ pci_err(pdev, "Invalid reset method '%s'", name);
+ return -EINVAL;
+ }
+
+ if (pci_reset_fn_methods[m].reset_fn(pdev, PCI_RESET_PROBE)) {
+ pci_err(pdev, "Unsupported reset method '%s'", name);
+ return -EINVAL;
+ }
+
+ if (n == PCI_NUM_RESET_METHODS - 1) {
+ pci_err(pdev, "Too many reset methods\n");
+ return -EINVAL;
+ }
+
+ reset_methods[n++] = m;
+ }
+
+ reset_methods[n] = 0;
+
+ /* Warn if dev-specific supported but not highest priority */
+ if (pci_reset_fn_methods[1].reset_fn(pdev, PCI_RESET_PROBE) == 0 &&
+ reset_methods[0] != 1)
+ pci_warn(pdev, "Device-specific reset disabled/de-prioritized by user");
+ memcpy(pdev->reset_methods, reset_methods, sizeof(pdev->reset_methods));
+ return count;
+}
+static DEVICE_ATTR_RW(reset_method);
+
+static struct attribute *pci_dev_reset_method_attrs[] = {
+ &dev_attr_reset_method.attr,
+ NULL,
+};
+
+static const struct attribute_group pci_dev_reset_method_attr_group = {
+ .attrs = pci_dev_reset_method_attrs,
+ .is_visible = pci_dev_reset_attr_is_visible,
+};
+
static ssize_t __resource_resize_show(struct device *dev, int n, char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -1414,7 +1560,7 @@ static ssize_t __resource_resize_store(struct device *dev, int n,
return -EINVAL;
device_lock(dev);
- if (dev->driver) {
+ if (dev->driver || pci_num_vf(pdev)) {
ret = -EBUSY;
goto unlock;
}
@@ -1436,7 +1582,7 @@ static ssize_t __resource_resize_store(struct device *dev, int n,
pci_remove_resource_files(pdev);
- for (i = 0; i < PCI_STD_NUM_BARS; i++) {
+ for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
if (pci_resource_len(pdev, i) &&
pci_resource_flags(pdev, i) == flags)
pci_release_resource(pdev, i);
@@ -1624,6 +1770,7 @@ const struct attribute_group *pci_dev_groups[] = {
&pci_dev_acpi_attr_group,
#endif
&pci_dev_resource_resize_group,
+ ARCH_PCI_DEV_GROUPS
NULL,
};
@@ -1662,5 +1809,8 @@ const struct attribute_group *pci_dev_attr_groups[] = {
#ifdef CONFIG_PCIEASPM
&aspm_ctrl_attr_group,
#endif
+#ifdef CONFIG_PCI_DOE
+ &pci_doe_sysfs_group,
+#endif
NULL,
};
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index e5f243dd4288..e77d5b53c0ce 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -23,7 +23,6 @@
#include <linux/string.h>
#include <linux/log2.h>
#include <linux/logic_pio.h>
-#include <linux/pm_wakeup.h>
#include <linux/device.h>
#include <linux/pm_runtime.h>
#include <linux/pci_hotplug.h>
@@ -142,8 +141,8 @@ enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
* the dfl or actual value as it sees fit. Don't forget this is
* measured in 32-bit words, not bytes.
*/
-u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2;
-u8 pci_cache_line_size;
+u8 pci_dfl_cache_line_size __ro_after_init = L1_CACHE_BYTES >> 2;
+u8 pci_cache_line_size __ro_after_init ;
/*
* If we set up a device for bus mastering, we need to check the latency
@@ -946,30 +945,71 @@ void pci_request_acs(void)
}
static const char *disable_acs_redir_param;
+static const char *config_acs_param;
-/**
- * pci_disable_acs_redir - disable ACS redirect capabilities
- * @dev: the PCI device
- *
- * For only devices specified in the disable_acs_redir parameter.
- */
-static void pci_disable_acs_redir(struct pci_dev *dev)
+struct pci_acs {
+ u16 cap;
+ u16 ctrl;
+ u16 fw_ctrl;
+};
+
+static void __pci_config_acs(struct pci_dev *dev, struct pci_acs *caps,
+ const char *p, const u16 acs_mask, const u16 acs_flags)
{
+ u16 flags = acs_flags;
+ u16 mask = acs_mask;
+ char *delimit;
int ret = 0;
- const char *p;
- int pos;
- u16 ctrl;
- if (!disable_acs_redir_param)
+ if (!p)
return;
- p = disable_acs_redir_param;
while (*p) {
+ if (!acs_mask) {
+ /* Check for ACS flags */
+ delimit = strstr(p, "@");
+ if (delimit) {
+ int end;
+ u32 shift = 0;
+
+ end = delimit - p - 1;
+ mask = 0;
+ flags = 0;
+
+ while (end > -1) {
+ if (*(p + end) == '0') {
+ mask |= 1 << shift;
+ shift++;
+ end--;
+ } else if (*(p + end) == '1') {
+ mask |= 1 << shift;
+ flags |= 1 << shift;
+ shift++;
+ end--;
+ } else if ((*(p + end) == 'x') || (*(p + end) == 'X')) {
+ shift++;
+ end--;
+ } else {
+ pci_err(dev, "Invalid ACS flags... Ignoring\n");
+ return;
+ }
+ }
+ p = delimit + 1;
+ } else {
+ pci_err(dev, "ACS Flags missing\n");
+ return;
+ }
+ }
+
+ if (mask & ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR | PCI_ACS_CR |
+ PCI_ACS_UF | PCI_ACS_EC | PCI_ACS_DT)) {
+ pci_err(dev, "Invalid ACS flags specified\n");
+ return;
+ }
+
ret = pci_dev_str_match(dev, p, &p);
if (ret < 0) {
- pr_info_once("PCI: Can't parse disable_acs_redir parameter: %s\n",
- disable_acs_redir_param);
-
+ pr_info_once("PCI: Can't parse ACS command line parameter\n");
break;
} else if (ret == 1) {
/* Found a match */
@@ -989,56 +1029,42 @@ static void pci_disable_acs_redir(struct pci_dev *dev)
if (!pci_dev_specific_disable_acs_redir(dev))
return;
- pos = dev->acs_cap;
- if (!pos) {
- pci_warn(dev, "cannot disable ACS redirect for this hardware as it does not have ACS capabilities\n");
- return;
- }
-
- pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
+ pci_dbg(dev, "ACS mask = %#06x\n", mask);
+ pci_dbg(dev, "ACS flags = %#06x\n", flags);
+ pci_dbg(dev, "ACS control = %#06x\n", caps->ctrl);
+ pci_dbg(dev, "ACS fw_ctrl = %#06x\n", caps->fw_ctrl);
- /* P2P Request & Completion Redirect */
- ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC);
-
- pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
+ /*
+ * For mask bits that are 0, copy them from the firmware setting
+ * and apply flags for all the mask bits that are 1.
+ */
+ caps->ctrl = (caps->fw_ctrl & ~mask) | (flags & mask);
- pci_info(dev, "disabled ACS redirect\n");
+ pci_info(dev, "Configured ACS to %#06x\n", caps->ctrl);
}
/**
* pci_std_enable_acs - enable ACS on devices using standard ACS capabilities
* @dev: the PCI device
+ * @caps: default ACS controls
*/
-static void pci_std_enable_acs(struct pci_dev *dev)
+static void pci_std_enable_acs(struct pci_dev *dev, struct pci_acs *caps)
{
- int pos;
- u16 cap;
- u16 ctrl;
-
- pos = dev->acs_cap;
- if (!pos)
- return;
-
- pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
- pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
-
/* Source Validation */
- ctrl |= (cap & PCI_ACS_SV);
+ caps->ctrl |= (caps->cap & PCI_ACS_SV);
/* P2P Request Redirect */
- ctrl |= (cap & PCI_ACS_RR);
+ caps->ctrl |= (caps->cap & PCI_ACS_RR);
/* P2P Completion Redirect */
- ctrl |= (cap & PCI_ACS_CR);
+ caps->ctrl |= (caps->cap & PCI_ACS_CR);
/* Upstream Forwarding */
- ctrl |= (cap & PCI_ACS_UF);
+ caps->ctrl |= (caps->cap & PCI_ACS_UF);
/* Enable Translation Blocking for external devices and noats */
if (pci_ats_disabled() || dev->external_facing || dev->untrusted)
- ctrl |= (cap & PCI_ACS_TB);
-
- pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
+ caps->ctrl |= (caps->cap & PCI_ACS_TB);
}
/**
@@ -1047,52 +1073,38 @@ static void pci_std_enable_acs(struct pci_dev *dev)
*/
static void pci_enable_acs(struct pci_dev *dev)
{
- if (!pci_acs_enable)
- goto disable_acs_redir;
-
- if (!pci_dev_specific_enable_acs(dev))
- goto disable_acs_redir;
+ struct pci_acs caps;
+ bool enable_acs = false;
+ int pos;
- pci_std_enable_acs(dev);
+ /* If an iommu is present we start with kernel default caps */
+ if (pci_acs_enable) {
+ if (pci_dev_specific_enable_acs(dev))
+ enable_acs = true;
+ }
-disable_acs_redir:
- /*
- * Note: pci_disable_acs_redir() must be called even if ACS was not
- * enabled by the kernel because it may have been enabled by
- * platform firmware. So if we are told to disable it, we should
- * always disable it after setting the kernel's default
- * preferences.
- */
- pci_disable_acs_redir(dev);
-}
+ pos = dev->acs_cap;
+ if (!pos)
+ return;
-/**
- * pcie_read_tlp_log - read TLP Header Log
- * @dev: PCIe device
- * @where: PCI Config offset of TLP Header Log
- * @tlp_log: TLP Log structure to fill
- *
- * Fill @tlp_log from TLP Header Log registers, e.g., AER or DPC.
- *
- * Return: 0 on success and filled TLP Log structure, <0 on error.
- */
-int pcie_read_tlp_log(struct pci_dev *dev, int where,
- struct pcie_tlp_log *tlp_log)
-{
- int i, ret;
+ pci_read_config_word(dev, pos + PCI_ACS_CAP, &caps.cap);
+ pci_read_config_word(dev, pos + PCI_ACS_CTRL, &caps.ctrl);
+ caps.fw_ctrl = caps.ctrl;
- memset(tlp_log, 0, sizeof(*tlp_log));
+ if (enable_acs)
+ pci_std_enable_acs(dev, &caps);
- for (i = 0; i < 4; i++) {
- ret = pci_read_config_dword(dev, where + i * 4,
- &tlp_log->dw[i]);
- if (ret)
- return pcibios_err_to_errno(ret);
- }
+ /*
+ * Always apply caps from the command line, even if there is no iommu.
+ * Trust that the admin has a reason to change the ACS settings.
+ */
+ __pci_config_acs(dev, &caps, disable_acs_redir_param,
+ PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC,
+ ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC));
+ __pci_config_acs(dev, &caps, config_acs_param, 0, 0);
- return 0;
+ pci_write_config_word(dev, pos + PCI_ACS_CTRL, caps.ctrl);
}
-EXPORT_SYMBOL_GPL(pcie_read_tlp_log);
/**
* pci_restore_bars - restore a device's BAR values (e.g. after wake-up)
@@ -1254,7 +1266,9 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
{
int delay = 1;
bool retrain = false;
- struct pci_dev *bridge;
+ struct pci_dev *root, *bridge;
+
+ root = pcie_find_root_port(dev);
if (pci_is_pcie(dev)) {
bridge = pci_upstream_bridge(dev);
@@ -1263,23 +1277,41 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
}
/*
- * After reset, the device should not silently discard config
- * requests, but it may still indicate that it needs more time by
- * responding to them with CRS completions. The Root Port will
- * generally synthesize ~0 (PCI_ERROR_RESPONSE) data to complete
- * the read (except when CRS SV is enabled and the read was for the
- * Vendor ID; in that case it synthesizes 0x0001 data).
+ * The caller has already waited long enough after a reset that the
+ * device should respond to config requests, but it may respond
+ * with Request Retry Status (RRS) if it needs more time to
+ * initialize.
+ *
+ * If the device is below a Root Port with Configuration RRS
+ * Software Visibility enabled, reading the Vendor ID returns a
+ * special data value if the device responded with RRS. Read the
+ * Vendor ID until we get non-RRS status.
*
- * Wait for the device to return a non-CRS completion. Read the
- * Command register instead of Vendor ID so we don't have to
- * contend with the CRS SV value.
+ * If there's no Root Port or Configuration RRS Software Visibility
+ * is not enabled, the device may still respond with RRS, but
+ * hardware may retry the config request. If no retries receive
+ * Successful Completion, hardware generally synthesizes ~0
+ * (PCI_ERROR_RESPONSE) data to complete the read. Reading Vendor
+ * ID for VFs and non-existent devices also returns ~0, so read the
+ * Command register until it returns something other than ~0.
*/
for (;;) {
u32 id;
- pci_read_config_dword(dev, PCI_COMMAND, &id);
- if (!PCI_POSSIBLE_ERROR(id))
- break;
+ if (pci_dev_is_disconnected(dev)) {
+ pci_dbg(dev, "disconnected; not waiting\n");
+ return -ENOTTY;
+ }
+
+ if (root && root->config_rrs_sv) {
+ pci_read_config_dword(dev, PCI_VENDOR_ID, &id);
+ if (!pci_bus_rrs_vendor_id(id))
+ break;
+ } else {
+ pci_read_config_dword(dev, PCI_COMMAND, &id);
+ if (!PCI_POSSIBLE_ERROR(id))
+ break;
+ }
if (delay > timeout) {
pci_warn(dev, "not ready %dms after %s; giving up\n",
@@ -1290,7 +1322,7 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
if (delay > PCI_RESET_WAIT) {
if (retrain) {
retrain = false;
- if (pcie_failed_link_retrain(bridge)) {
+ if (pcie_failed_link_retrain(bridge) == 0) {
delay = 1;
continue;
}
@@ -1779,6 +1811,7 @@ int pci_save_state(struct pci_dev *dev)
pci_save_dpc_state(dev);
pci_save_aer_state(dev);
pci_save_ptm_state(dev);
+ pci_save_tph_state(dev);
return pci_save_vc_state(dev);
}
EXPORT_SYMBOL(pci_save_state);
@@ -1846,7 +1879,7 @@ static void pci_restore_rebar_state(struct pci_dev *pdev)
unsigned int pos, nbars, i;
u32 ctrl;
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
+ pos = pdev->rebar_cap;
if (!pos)
return;
@@ -1859,7 +1892,7 @@ static void pci_restore_rebar_state(struct pci_dev *pdev)
pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
- res = pdev->resource + bar_idx;
+ res = pci_resource_n(pdev, bar_idx);
size = pci_rebar_bytes_to_size(resource_size(res));
ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
ctrl |= FIELD_PREP(PCI_REBAR_CTRL_BAR_SIZE, size);
@@ -1884,6 +1917,7 @@ void pci_restore_state(struct pci_dev *dev)
pci_restore_rebar_state(dev);
pci_restore_dpc_state(dev);
pci_restore_ptm_state(dev);
+ pci_restore_tph_state(dev);
pci_aer_clear_status(dev);
pci_restore_aer_state(dev);
@@ -2004,6 +2038,28 @@ int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
return pci_enable_resources(dev, bars);
}
+static int pci_host_bridge_enable_device(struct pci_dev *dev)
+{
+ struct pci_host_bridge *host_bridge = pci_find_host_bridge(dev->bus);
+ int err;
+
+ if (host_bridge && host_bridge->enable_device) {
+ err = host_bridge->enable_device(host_bridge, dev);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void pci_host_bridge_disable_device(struct pci_dev *dev)
+{
+ struct pci_host_bridge *host_bridge = pci_find_host_bridge(dev->bus);
+
+ if (host_bridge && host_bridge->disable_device)
+ host_bridge->disable_device(host_bridge, dev);
+}
+
static int do_pci_enable_device(struct pci_dev *dev, int bars)
{
int err;
@@ -2019,9 +2075,13 @@ static int do_pci_enable_device(struct pci_dev *dev, int bars)
if (bridge)
pcie_aspm_powersave_config_link(bridge);
+ err = pci_host_bridge_enable_device(dev);
+ if (err)
+ return err;
+
err = pcibios_enable_device(dev, bars);
if (err < 0)
- return err;
+ goto err_enable;
pci_fixup_device(pci_fixup_enable, dev);
if (dev->msi_enabled || dev->msix_enabled)
@@ -2036,6 +2096,12 @@ static int do_pci_enable_device(struct pci_dev *dev, int bars)
}
return 0;
+
+err_enable:
+ pci_host_bridge_disable_device(dev);
+
+ return err;
+
}
/**
@@ -2111,20 +2177,6 @@ static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
}
/**
- * pci_enable_device_io - Initialize a device for use with IO space
- * @dev: PCI device to be initialized
- *
- * Initialize device before it's used by a driver. Ask low-level code
- * to enable I/O resources. Wake up the device if it was suspended.
- * Beware, this function can fail.
- */
-int pci_enable_device_io(struct pci_dev *dev)
-{
- return pci_enable_device_flags(dev, IORESOURCE_IO);
-}
-EXPORT_SYMBOL(pci_enable_device_io);
-
-/**
* pci_enable_device_mem - Initialize a device for use with Memory space
* @dev: PCI device to be initialized
*
@@ -2227,18 +2279,14 @@ void pci_disable_enabled_device(struct pci_dev *dev)
*/
void pci_disable_device(struct pci_dev *dev)
{
- struct pci_devres *dr;
-
- dr = find_pci_dr(dev);
- if (dr)
- dr->enabled = 0;
-
dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
"disabling already-disabled device");
if (atomic_dec_return(&dev->enable_cnt) != 0)
return;
+ pci_host_bridge_disable_device(dev);
+
do_pci_disable_device(dev);
dev->is_busmaster = 0;
@@ -2962,6 +3010,18 @@ static const struct dmi_system_id bridge_d3_blacklist[] = {
DMI_MATCH(DMI_BOARD_VERSION, "Continental Z2"),
},
},
+ {
+ /*
+ * Changing power state of root port dGPU is connected fails
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/3229
+ */
+ .ident = "Hewlett-Packard HP Pavilion 17 Notebook PC/1972",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_BOARD_NAME, "1972"),
+ DMI_MATCH(DMI_BOARD_VERSION, "95.33"),
+ },
+ },
#endif
{ }
};
@@ -2971,7 +3031,7 @@ static const struct dmi_system_id bridge_d3_blacklist[] = {
* @bridge: Bridge to check
*
* This function checks if it is possible to move the bridge to D3.
- * Currently we only allow D3 for recent enough PCIe ports and Thunderbolt.
+ * Currently we only allow D3 for some PCIe ports and for Thunderbolt.
*/
bool pci_bridge_d3_possible(struct pci_dev *bridge)
{
@@ -3015,10 +3075,10 @@ bool pci_bridge_d3_possible(struct pci_dev *bridge)
return false;
/*
- * It should be safe to put PCIe ports from 2015 or newer
- * to D3.
+ * Out of caution, we only allow PCIe ports from 2015 or newer
+ * into D3 on x86.
*/
- if (dmi_get_bios_year() >= 2015)
+ if (!IS_ENABLED(CONFIG_X86) || dmi_get_bios_year() >= 2015)
return true;
break;
}
@@ -3666,6 +3726,11 @@ void pci_acs_init(struct pci_dev *dev)
pci_enable_acs(dev);
}
+void pci_rebar_init(struct pci_dev *pdev)
+{
+ pdev->rebar_cap = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
+}
+
/**
* pci_rebar_find_pos - find position of resize ctrl reg for BAR
* @pdev: PCI device
@@ -3680,7 +3745,7 @@ static int pci_rebar_find_pos(struct pci_dev *pdev, int bar)
unsigned int pos, nbars, i;
u32 ctrl;
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
+ pos = pdev->rebar_cap;
if (!pos)
return -ENOTSUPP;
@@ -3705,7 +3770,7 @@ static int pci_rebar_find_pos(struct pci_dev *pdev, int bar)
* @bar: BAR to query
*
* Get the possible sizes of a resizable BAR as bitmask defined in the spec
- * (bit 0=1MB, bit 19=512GB). Returns 0 if BAR isn't resizable.
+ * (bit 0=1MB, bit 31=128TB). Returns 0 if BAR isn't resizable.
*/
u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
{
@@ -3753,7 +3818,7 @@ int pci_rebar_get_current_size(struct pci_dev *pdev, int bar)
* pci_rebar_set_size - set a new size for a BAR
* @pdev: PCI device
* @bar: BAR to set size to
- * @size: new size as defined in the spec (0=1MB, 19=512GB)
+ * @size: new size as defined in the spec (0=1MB, 31=128TB)
*
* Set the new size of a BAR as defined in the spec.
* Returns zero if resizing was successful, error code otherwise.
@@ -3869,7 +3934,18 @@ EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
*/
void pci_release_region(struct pci_dev *pdev, int bar)
{
- struct pci_devres *dr;
+ if (!pci_bar_index_is_valid(bar))
+ return;
+
+ /*
+ * This is done for backwards compatibility, because the old PCI devres
+ * API had a mode in which the function became managed if it had been
+ * enabled with pcim_enable_device() instead of pci_enable_device().
+ */
+ if (pci_is_managed(pdev)) {
+ pcim_release_region(pdev, bar);
+ return;
+ }
if (pci_resource_len(pdev, bar) == 0)
return;
@@ -3879,10 +3955,6 @@ void pci_release_region(struct pci_dev *pdev, int bar)
else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
release_mem_region(pci_resource_start(pdev, bar),
pci_resource_len(pdev, bar));
-
- dr = find_pci_dr(pdev);
- if (dr)
- dr->region_mask &= ~(1 << bar);
}
EXPORT_SYMBOL(pci_release_region);
@@ -3890,13 +3962,14 @@ EXPORT_SYMBOL(pci_release_region);
* __pci_request_region - Reserved PCI I/O and memory resource
* @pdev: PCI device whose resources are to be reserved
* @bar: BAR to be reserved
- * @res_name: Name to be associated with resource.
+ * @name: name of the driver requesting the resource
* @exclusive: whether the region access is exclusive or not
*
- * Mark the PCI region associated with PCI device @pdev BAR @bar as
- * being reserved by owner @res_name. Do not access any
- * address inside the PCI regions unless this call returns
- * successfully.
+ * Returns: 0 on success, negative error code on failure.
+ *
+ * Mark the PCI region associated with PCI device @pdev BAR @bar as being
+ * reserved by owner @name. Do not access any address inside the PCI regions
+ * unless this call returns successfully.
*
* If @exclusive is set, then the region is marked so that userspace
* is explicitly not allowed to map the resource via /dev/mem or
@@ -3906,28 +3979,32 @@ EXPORT_SYMBOL(pci_release_region);
* message is also printed on failure.
*/
static int __pci_request_region(struct pci_dev *pdev, int bar,
- const char *res_name, int exclusive)
+ const char *name, int exclusive)
{
- struct pci_devres *dr;
+ if (!pci_bar_index_is_valid(bar))
+ return -EINVAL;
+
+ if (pci_is_managed(pdev)) {
+ if (exclusive == IORESOURCE_EXCLUSIVE)
+ return pcim_request_region_exclusive(pdev, bar, name);
+
+ return pcim_request_region(pdev, bar, name);
+ }
if (pci_resource_len(pdev, bar) == 0)
return 0;
if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
if (!request_region(pci_resource_start(pdev, bar),
- pci_resource_len(pdev, bar), res_name))
+ pci_resource_len(pdev, bar), name))
goto err_out;
} else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
if (!__request_mem_region(pci_resource_start(pdev, bar),
- pci_resource_len(pdev, bar), res_name,
+ pci_resource_len(pdev, bar), name,
exclusive))
goto err_out;
}
- dr = find_pci_dr(pdev);
- if (dr)
- dr->region_mask |= 1 << bar;
-
return 0;
err_out:
@@ -3940,19 +4017,25 @@ err_out:
* pci_request_region - Reserve PCI I/O and memory resource
* @pdev: PCI device whose resources are to be reserved
* @bar: BAR to be reserved
- * @res_name: Name to be associated with resource
+ * @name: name of the driver requesting the resource
+ *
+ * Returns: 0 on success, negative error code on failure.
*
- * Mark the PCI region associated with PCI device @pdev BAR @bar as
- * being reserved by owner @res_name. Do not access any
- * address inside the PCI regions unless this call returns
- * successfully.
+ * Mark the PCI region associated with PCI device @pdev BAR @bar as being
+ * reserved by owner @name. Do not access any address inside the PCI regions
+ * unless this call returns successfully.
*
* Returns 0 on success, or %EBUSY on error. A warning
* message is also printed on failure.
+ *
+ * NOTE:
+ * This is a "hybrid" function: It's normally unmanaged, but becomes managed
+ * when pcim_enable_device() has been called in advance. This hybrid feature is
+ * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
*/
-int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
+int pci_request_region(struct pci_dev *pdev, int bar, const char *name)
{
- return __pci_request_region(pdev, bar, res_name, 0);
+ return __pci_request_region(pdev, bar, name, 0);
}
EXPORT_SYMBOL(pci_request_region);
@@ -3975,13 +4058,13 @@ void pci_release_selected_regions(struct pci_dev *pdev, int bars)
EXPORT_SYMBOL(pci_release_selected_regions);
static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
- const char *res_name, int excl)
+ const char *name, int excl)
{
int i;
for (i = 0; i < PCI_STD_NUM_BARS; i++)
if (bars & (1 << i))
- if (__pci_request_region(pdev, i, res_name, excl))
+ if (__pci_request_region(pdev, i, name, excl))
goto err_out;
return 0;
@@ -3998,19 +4081,39 @@ err_out:
* pci_request_selected_regions - Reserve selected PCI I/O and memory resources
* @pdev: PCI device whose resources are to be reserved
* @bars: Bitmask of BARs to be requested
- * @res_name: Name to be associated with resource
+ * @name: Name of the driver requesting the resources
+ *
+ * Returns: 0 on success, negative error code on failure.
+ *
+ * NOTE:
+ * This is a "hybrid" function: It's normally unmanaged, but becomes managed
+ * when pcim_enable_device() has been called in advance. This hybrid feature is
+ * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
*/
int pci_request_selected_regions(struct pci_dev *pdev, int bars,
- const char *res_name)
+ const char *name)
{
- return __pci_request_selected_regions(pdev, bars, res_name, 0);
+ return __pci_request_selected_regions(pdev, bars, name, 0);
}
EXPORT_SYMBOL(pci_request_selected_regions);
+/**
+ * pci_request_selected_regions_exclusive - Request regions exclusively
+ * @pdev: PCI device to request regions from
+ * @bars: bit mask of BARs to request
+ * @name: name of the driver requesting the resources
+ *
+ * Returns: 0 on success, negative error code on failure.
+ *
+ * NOTE:
+ * This is a "hybrid" function: It's normally unmanaged, but becomes managed
+ * when pcim_enable_device() has been called in advance. This hybrid feature is
+ * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
+ */
int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
- const char *res_name)
+ const char *name)
{
- return __pci_request_selected_regions(pdev, bars, res_name,
+ return __pci_request_selected_regions(pdev, bars, name,
IORESOURCE_EXCLUSIVE);
}
EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
@@ -4024,7 +4127,6 @@ EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
* successful call to pci_request_regions(). Call this function only
* after all use of the PCI regions has ceased.
*/
-
void pci_release_regions(struct pci_dev *pdev)
{
pci_release_selected_regions(pdev, (1 << PCI_STD_NUM_BARS) - 1);
@@ -4034,30 +4136,36 @@ EXPORT_SYMBOL(pci_release_regions);
/**
* pci_request_regions - Reserve PCI I/O and memory resources
* @pdev: PCI device whose resources are to be reserved
- * @res_name: Name to be associated with resource.
+ * @name: name of the driver requesting the resources
*
- * Mark all PCI regions associated with PCI device @pdev as
- * being reserved by owner @res_name. Do not access any
- * address inside the PCI regions unless this call returns
- * successfully.
+ * Mark all PCI regions associated with PCI device @pdev as being reserved by
+ * owner @name. Do not access any address inside the PCI regions unless this
+ * call returns successfully.
*
* Returns 0 on success, or %EBUSY on error. A warning
* message is also printed on failure.
+ *
+ * NOTE:
+ * This is a "hybrid" function: It's normally unmanaged, but becomes managed
+ * when pcim_enable_device() has been called in advance. This hybrid feature is
+ * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
*/
-int pci_request_regions(struct pci_dev *pdev, const char *res_name)
+int pci_request_regions(struct pci_dev *pdev, const char *name)
{
return pci_request_selected_regions(pdev,
- ((1 << PCI_STD_NUM_BARS) - 1), res_name);
+ ((1 << PCI_STD_NUM_BARS) - 1), name);
}
EXPORT_SYMBOL(pci_request_regions);
/**
* pci_request_regions_exclusive - Reserve PCI I/O and memory resources
* @pdev: PCI device whose resources are to be reserved
- * @res_name: Name to be associated with resource.
+ * @name: name of the driver requesting the resources
+ *
+ * Returns: 0 on success, negative error code on failure.
*
* Mark all PCI regions associated with PCI device @pdev as being reserved
- * by owner @res_name. Do not access any address inside the PCI regions
+ * by owner @name. Do not access any address inside the PCI regions
* unless this call returns successfully.
*
* pci_request_regions_exclusive() will mark the region so that /dev/mem
@@ -4065,11 +4173,16 @@ EXPORT_SYMBOL(pci_request_regions);
*
* Returns 0 on success, or %EBUSY on error. A warning message is also
* printed on failure.
+ *
+ * NOTE:
+ * This is a "hybrid" function: It's normally unmanaged, but becomes managed
+ * when pcim_enable_device() has been called in advance. This hybrid feature is
+ * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
*/
-int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
+int pci_request_regions_exclusive(struct pci_dev *pdev, const char *name)
{
return pci_request_selected_regions_exclusive(pdev,
- ((1 << PCI_STD_NUM_BARS) - 1), res_name);
+ ((1 << PCI_STD_NUM_BARS) - 1), name);
}
EXPORT_SYMBOL(pci_request_regions_exclusive);
@@ -4077,7 +4190,7 @@ EXPORT_SYMBOL(pci_request_regions_exclusive);
* Record the PCI IO range (expressed as CPU physical address + size).
* Return a negative value if an error has occurred, zero otherwise
*/
-int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
+int pci_register_io_range(const struct fwnode_handle *fwnode, phys_addr_t addr,
resource_size_t size)
{
int ret = 0;
@@ -4408,17 +4521,10 @@ void pci_intx(struct pci_dev *pdev, int enable)
else
new = pci_command | PCI_COMMAND_INTX_DISABLE;
- if (new != pci_command) {
- struct pci_devres *dr;
-
- pci_write_config_word(pdev, PCI_COMMAND, new);
+ if (new == pci_command)
+ return;
- dr = find_pci_dr(pdev);
- if (dr && !dr->restore_intx) {
- dr->restore_intx = 1;
- dr->orig_intx = !enable;
- }
- }
+ pci_write_config_word(pdev, PCI_COMMAND, new);
}
EXPORT_SYMBOL_GPL(pci_intx);
@@ -4625,11 +4731,12 @@ int pcie_retrain_link(struct pci_dev *pdev, bool use_lt)
/*
* Ensure the updated LNKCTL parameters are used during link
- * training by checking that there is no ongoing link training to
- * avoid LTSSM race as recommended in Implementation Note at the
- * end of PCIe r6.0.1 sec 7.5.3.7.
+ * training by checking that there is no ongoing link training that
+ * may have started before link parameters were changed, so as to
+ * avoid LTSSM race as recommended in Implementation Note at the end
+ * of PCIe r6.1 sec 7.5.3.7.
*/
- rc = pcie_wait_for_link_status(pdev, use_lt, !use_lt);
+ rc = pcie_wait_for_link_status(pdev, true, false);
if (rc)
return rc;
@@ -4643,7 +4750,15 @@ int pcie_retrain_link(struct pci_dev *pdev, bool use_lt)
pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_RL);
}
- return pcie_wait_for_link_status(pdev, use_lt, !use_lt);
+ rc = pcie_wait_for_link_status(pdev, use_lt, !use_lt);
+
+ /*
+ * Clear LBMS after a manual retrain so that the bit can be used
+ * to track link speed or width changes made by hardware itself
+ * in attempt to correct unreliable link operation.
+ */
+ pcie_reset_lbms_count(pdev);
+ return rc;
}
/**
@@ -4670,7 +4785,7 @@ static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
/*
* PCIe r4.0 sec 6.6.1, a component must enter LTSSM Detect within 20ms,
- * after which we should expect an link active if the reset was
+ * after which we should expect the link to be active if the reset was
* successful. If so, software must wait a minimum 100ms before sending
* configuration requests to devices downstream this port.
*
@@ -4749,7 +4864,7 @@ static int pci_bus_max_d3cold_delay(const struct pci_bus *bus)
*/
int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type)
{
- struct pci_dev *child;
+ struct pci_dev *child __free(pci_dev_put) = NULL;
int delay;
if (pci_dev_is_disconnected(dev))
@@ -4778,8 +4893,8 @@ int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type)
return 0;
}
- child = list_first_entry(&dev->subordinate->devices, struct pci_dev,
- bus_list);
+ child = pci_dev_get(list_first_entry(&dev->subordinate->devices,
+ struct pci_dev, bus_list));
up_read(&pci_bus_sem);
/*
@@ -4879,6 +4994,9 @@ void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
*/
int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
{
+ if (!dev->block_cfg_access)
+ pci_warn_once(dev, "unlocked secondary bus reset via: %pS\n",
+ __builtin_return_address(0));
pcibios_reset_secondary_bus(dev);
return pci_bridge_wait_for_secondary_bus(dev, "bus reset");
@@ -4927,16 +5045,96 @@ static int pci_dev_reset_slot_function(struct pci_dev *dev, bool probe)
return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
}
+static u16 cxl_port_dvsec(struct pci_dev *dev)
+{
+ return pci_find_dvsec_capability(dev, PCI_VENDOR_ID_CXL,
+ PCI_DVSEC_CXL_PORT);
+}
+
+static bool cxl_sbr_masked(struct pci_dev *dev)
+{
+ u16 dvsec, reg;
+ int rc;
+
+ dvsec = cxl_port_dvsec(dev);
+ if (!dvsec)
+ return false;
+
+ rc = pci_read_config_word(dev, dvsec + PCI_DVSEC_CXL_PORT_CTL, &reg);
+ if (rc || PCI_POSSIBLE_ERROR(reg))
+ return false;
+
+ /*
+ * Per CXL spec r3.1, sec 8.1.5.2, when "Unmask SBR" is 0, the SBR
+ * bit in Bridge Control has no effect. When 1, the Port generates
+ * hot reset when the SBR bit is set to 1.
+ */
+ if (reg & PCI_DVSEC_CXL_PORT_CTL_UNMASK_SBR)
+ return false;
+
+ return true;
+}
+
static int pci_reset_bus_function(struct pci_dev *dev, bool probe)
{
+ struct pci_dev *bridge = pci_upstream_bridge(dev);
int rc;
+ /*
+ * If "dev" is below a CXL port that has SBR control masked, SBR
+ * won't do anything, so return error.
+ */
+ if (bridge && cxl_sbr_masked(bridge)) {
+ if (probe)
+ return 0;
+
+ return -ENOTTY;
+ }
+
rc = pci_dev_reset_slot_function(dev, probe);
if (rc != -ENOTTY)
return rc;
return pci_parent_bus_reset(dev, probe);
}
+static int cxl_reset_bus_function(struct pci_dev *dev, bool probe)
+{
+ struct pci_dev *bridge;
+ u16 dvsec, reg, val;
+ int rc;
+
+ bridge = pci_upstream_bridge(dev);
+ if (!bridge)
+ return -ENOTTY;
+
+ dvsec = cxl_port_dvsec(bridge);
+ if (!dvsec)
+ return -ENOTTY;
+
+ if (probe)
+ return 0;
+
+ rc = pci_read_config_word(bridge, dvsec + PCI_DVSEC_CXL_PORT_CTL, &reg);
+ if (rc)
+ return -ENOTTY;
+
+ if (reg & PCI_DVSEC_CXL_PORT_CTL_UNMASK_SBR) {
+ val = reg;
+ } else {
+ val = reg | PCI_DVSEC_CXL_PORT_CTL_UNMASK_SBR;
+ pci_write_config_word(bridge, dvsec + PCI_DVSEC_CXL_PORT_CTL,
+ val);
+ }
+
+ rc = pci_reset_bus_function(dev, probe);
+
+ if (reg != val)
+ pci_write_config_word(bridge, dvsec + PCI_DVSEC_CXL_PORT_CTL,
+ reg);
+
+ return rc;
+}
+
void pci_dev_lock(struct pci_dev *dev)
{
/* block PM suspend, driver probe, etc. */
@@ -4977,6 +5175,8 @@ static void pci_dev_save_and_disable(struct pci_dev *dev)
*/
if (err_handler && err_handler->reset_prepare)
err_handler->reset_prepare(dev);
+ else if (dev->driver)
+ pci_warn(dev, "resetting");
/*
* Wake-up device prior to save. PM registers default to D0 after
@@ -5010,10 +5210,12 @@ static void pci_dev_restore(struct pci_dev *dev)
*/
if (err_handler && err_handler->reset_done)
err_handler->reset_done(dev);
+ else if (dev->driver)
+ pci_warn(dev, "reset done");
}
/* dev->reset_methods[] is a 0-terminated list of indices into this array */
-static const struct pci_reset_fn_method pci_reset_fn_methods[] = {
+const struct pci_reset_fn_method pci_reset_fn_methods[] = {
{ },
{ pci_dev_specific_reset, .name = "device_specific" },
{ pci_dev_acpi_reset, .name = "acpi" },
@@ -5021,128 +5223,7 @@ static const struct pci_reset_fn_method pci_reset_fn_methods[] = {
{ pci_af_flr, .name = "af_flr" },
{ pci_pm_reset, .name = "pm" },
{ pci_reset_bus_function, .name = "bus" },
-};
-
-static ssize_t reset_method_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- ssize_t len = 0;
- int i, m;
-
- for (i = 0; i < PCI_NUM_RESET_METHODS; i++) {
- m = pdev->reset_methods[i];
- if (!m)
- break;
-
- len += sysfs_emit_at(buf, len, "%s%s", len ? " " : "",
- pci_reset_fn_methods[m].name);
- }
-
- if (len)
- len += sysfs_emit_at(buf, len, "\n");
-
- return len;
-}
-
-static int reset_method_lookup(const char *name)
-{
- int m;
-
- for (m = 1; m < PCI_NUM_RESET_METHODS; m++) {
- if (sysfs_streq(name, pci_reset_fn_methods[m].name))
- return m;
- }
-
- return 0; /* not found */
-}
-
-static ssize_t reset_method_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- char *options, *name;
- int m, n;
- u8 reset_methods[PCI_NUM_RESET_METHODS] = { 0 };
-
- if (sysfs_streq(buf, "")) {
- pdev->reset_methods[0] = 0;
- pci_warn(pdev, "All device reset methods disabled by user");
- return count;
- }
-
- if (sysfs_streq(buf, "default")) {
- pci_init_reset_methods(pdev);
- return count;
- }
-
- options = kstrndup(buf, count, GFP_KERNEL);
- if (!options)
- return -ENOMEM;
-
- n = 0;
- while ((name = strsep(&options, " ")) != NULL) {
- if (sysfs_streq(name, ""))
- continue;
-
- name = strim(name);
-
- m = reset_method_lookup(name);
- if (!m) {
- pci_err(pdev, "Invalid reset method '%s'", name);
- goto error;
- }
-
- if (pci_reset_fn_methods[m].reset_fn(pdev, PCI_RESET_PROBE)) {
- pci_err(pdev, "Unsupported reset method '%s'", name);
- goto error;
- }
-
- if (n == PCI_NUM_RESET_METHODS - 1) {
- pci_err(pdev, "Too many reset methods\n");
- goto error;
- }
-
- reset_methods[n++] = m;
- }
-
- reset_methods[n] = 0;
-
- /* Warn if dev-specific supported but not highest priority */
- if (pci_reset_fn_methods[1].reset_fn(pdev, PCI_RESET_PROBE) == 0 &&
- reset_methods[0] != 1)
- pci_warn(pdev, "Device-specific reset disabled/de-prioritized by user");
- memcpy(pdev->reset_methods, reset_methods, sizeof(pdev->reset_methods));
- kfree(options);
- return count;
-
-error:
- /* Leave previous methods unchanged */
- kfree(options);
- return -EINVAL;
-}
-static DEVICE_ATTR_RW(reset_method);
-
-static struct attribute *pci_dev_reset_method_attrs[] = {
- &dev_attr_reset_method.attr,
- NULL,
-};
-
-static umode_t pci_dev_reset_method_attr_is_visible(struct kobject *kobj,
- struct attribute *a, int n)
-{
- struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
-
- if (!pci_reset_supported(pdev))
- return 0;
-
- return a->mode;
-}
-
-const struct attribute_group pci_dev_reset_method_attr_group = {
- .attrs = pci_dev_reset_method_attrs,
- .is_visible = pci_dev_reset_method_attr_is_visible,
+ { cxl_reset_bus_function, .name = "cxl_bus" },
};
/**
@@ -5168,6 +5249,7 @@ const struct attribute_group pci_dev_reset_method_attr_group = {
int __pci_reset_function_locked(struct pci_dev *dev)
{
int i, m, rc;
+ const struct pci_reset_fn_method *method;
might_sleep();
@@ -5184,9 +5266,13 @@ int __pci_reset_function_locked(struct pci_dev *dev)
if (!m)
return -ENOTTY;
- rc = pci_reset_fn_methods[m].reset_fn(dev, PCI_RESET_DO_RESET);
+ method = &pci_reset_fn_methods[m];
+ pci_dbg(dev, "reset via %s\n", method->name);
+ rc = method->reset_fn(dev, PCI_RESET_DO_RESET);
if (!rc)
return 0;
+
+ pci_dbg(dev, "%s failed with %d\n", method->name, rc);
if (rc != -ENOTTY)
return rc;
}
@@ -5245,11 +5331,20 @@ void pci_init_reset_methods(struct pci_dev *dev)
*/
int pci_reset_function(struct pci_dev *dev)
{
+ struct pci_dev *bridge;
int rc;
if (!pci_reset_supported(dev))
return -ENOTTY;
+ /*
+ * If there's no upstream bridge, no locking is needed since there is
+ * no upstream bridge configuration to hold consistent.
+ */
+ bridge = pci_upstream_bridge(dev);
+ if (bridge)
+ pci_dev_lock(bridge);
+
pci_dev_lock(dev);
pci_dev_save_and_disable(dev);
@@ -5258,6 +5353,9 @@ int pci_reset_function(struct pci_dev *dev)
pci_dev_restore(dev);
pci_dev_unlock(dev);
+ if (bridge)
+ pci_dev_unlock(bridge);
+
return rc;
}
EXPORT_SYMBOL_GPL(pci_reset_function);
@@ -5344,10 +5442,12 @@ static void pci_bus_lock(struct pci_bus *bus)
{
struct pci_dev *dev;
+ pci_dev_lock(bus->self);
list_for_each_entry(dev, &bus->devices, bus_list) {
- pci_dev_lock(dev);
if (dev->subordinate)
pci_bus_lock(dev->subordinate);
+ else
+ pci_dev_lock(dev);
}
}
@@ -5359,8 +5459,10 @@ static void pci_bus_unlock(struct pci_bus *bus)
list_for_each_entry(dev, &bus->devices, bus_list) {
if (dev->subordinate)
pci_bus_unlock(dev->subordinate);
- pci_dev_unlock(dev);
+ else
+ pci_dev_unlock(dev);
}
+ pci_dev_unlock(bus->self);
}
/* Return 1 on successful lock, 0 on contention */
@@ -5368,15 +5470,15 @@ static int pci_bus_trylock(struct pci_bus *bus)
{
struct pci_dev *dev;
+ if (!pci_dev_trylock(bus->self))
+ return 0;
+
list_for_each_entry(dev, &bus->devices, bus_list) {
- if (!pci_dev_trylock(dev))
- goto unlock;
if (dev->subordinate) {
- if (!pci_bus_trylock(dev->subordinate)) {
- pci_dev_unlock(dev);
+ if (!pci_bus_trylock(dev->subordinate))
goto unlock;
- }
- }
+ } else if (!pci_dev_trylock(dev))
+ goto unlock;
}
return 1;
@@ -5384,8 +5486,10 @@ unlock:
list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
if (dev->subordinate)
pci_bus_unlock(dev->subordinate);
- pci_dev_unlock(dev);
+ else
+ pci_dev_unlock(dev);
}
+ pci_dev_unlock(bus->self);
return 0;
}
@@ -5417,9 +5521,10 @@ static void pci_slot_lock(struct pci_slot *slot)
list_for_each_entry(dev, &slot->bus->devices, bus_list) {
if (!dev->slot || dev->slot != slot)
continue;
- pci_dev_lock(dev);
if (dev->subordinate)
pci_bus_lock(dev->subordinate);
+ else
+ pci_dev_lock(dev);
}
}
@@ -5445,14 +5550,13 @@ static int pci_slot_trylock(struct pci_slot *slot)
list_for_each_entry(dev, &slot->bus->devices, bus_list) {
if (!dev->slot || dev->slot != slot)
continue;
- if (!pci_dev_trylock(dev))
- goto unlock;
if (dev->subordinate) {
if (!pci_bus_trylock(dev->subordinate)) {
pci_dev_unlock(dev);
goto unlock;
}
- }
+ } else if (!pci_dev_trylock(dev))
+ goto unlock;
}
return 1;
@@ -5463,7 +5567,8 @@ unlock:
continue;
if (dev->subordinate)
pci_bus_unlock(dev->subordinate);
- pci_dev_unlock(dev);
+ else
+ pci_dev_unlock(dev);
}
return 0;
}
@@ -5494,8 +5599,10 @@ static void pci_bus_restore_locked(struct pci_bus *bus)
list_for_each_entry(dev, &bus->devices, bus_list) {
pci_dev_restore(dev);
- if (dev->subordinate)
+ if (dev->subordinate) {
+ pci_bridge_wait_for_secondary_bus(dev, "bus reset");
pci_bus_restore_locked(dev->subordinate);
+ }
}
}
@@ -5529,8 +5636,10 @@ static void pci_slot_restore_locked(struct pci_slot *slot)
if (!dev->slot || dev->slot != slot)
continue;
pci_dev_restore(dev);
- if (dev->subordinate)
+ if (dev->subordinate) {
+ pci_bridge_wait_for_secondary_bus(dev, "slot reset");
pci_bus_restore_locked(dev->subordinate);
+ }
}
}
@@ -5675,7 +5784,7 @@ EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
*
* Same as above except return -EAGAIN if the bus cannot be locked
*/
-static int __pci_reset_bus(struct pci_bus *bus)
+int __pci_reset_bus(struct pci_bus *bus)
{
int rc;
@@ -5922,24 +6031,7 @@ int pcie_link_speed_mbps(struct pci_dev *pdev)
if (err)
return err;
- switch (to_pcie_link_speed(lnksta)) {
- case PCIE_SPEED_2_5GT:
- return 2500;
- case PCIE_SPEED_5_0GT:
- return 5000;
- case PCIE_SPEED_8_0GT:
- return 8000;
- case PCIE_SPEED_16_0GT:
- return 16000;
- case PCIE_SPEED_32_0GT:
- return 32000;
- case PCIE_SPEED_64_0GT:
- return 64000;
- default:
- break;
- }
-
- return -EINVAL;
+ return pcie_dev_speed_mbps(to_pcie_link_speed(lnksta));
}
EXPORT_SYMBOL(pcie_link_speed_mbps);
@@ -6001,38 +6093,66 @@ u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
EXPORT_SYMBOL(pcie_bandwidth_available);
/**
- * pcie_get_speed_cap - query for the PCI device's link speed capability
+ * pcie_get_supported_speeds - query Supported Link Speed Vector
* @dev: PCI device to query
*
- * Query the PCI device speed capability. Return the maximum link speed
- * supported by the device.
+ * Query @dev supported link speeds.
+ *
+ * Implementation Note in PCIe r6.0 sec 7.5.3.18 recommends determining
+ * supported link speeds using the Supported Link Speeds Vector in the Link
+ * Capabilities 2 Register (when available).
+ *
+ * Link Capabilities 2 was added in PCIe r3.0, sec 7.8.18.
+ *
+ * Without Link Capabilities 2, i.e., prior to PCIe r3.0, Supported Link
+ * Speeds field in Link Capabilities is used and only 2.5 GT/s and 5.0 GT/s
+ * speeds were defined.
+ *
+ * For @dev without Supported Link Speed Vector, the field is synthesized
+ * from the Max Link Speed field in the Link Capabilities Register.
+ *
+ * Return: Supported Link Speeds Vector (+ reserved 0 at LSB).
*/
-enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
+u8 pcie_get_supported_speeds(struct pci_dev *dev)
{
u32 lnkcap2, lnkcap;
+ u8 speeds;
/*
- * Link Capabilities 2 was added in PCIe r3.0, sec 7.8.18. The
- * implementation note there recommends using the Supported Link
- * Speeds Vector in Link Capabilities 2 when supported.
- *
- * Without Link Capabilities 2, i.e., prior to PCIe r3.0, software
- * should use the Supported Link Speeds field in Link Capabilities,
- * where only 2.5 GT/s and 5.0 GT/s speeds were defined.
+ * Speeds retain the reserved 0 at LSB before PCIe Supported Link
+ * Speeds Vector to allow using SLS Vector bit defines directly.
*/
pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
+ speeds = lnkcap2 & PCI_EXP_LNKCAP2_SLS;
+
+ /* Ignore speeds higher than Max Link Speed */
+ pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
+ speeds &= GENMASK(lnkcap & PCI_EXP_LNKCAP_SLS, 0);
/* PCIe r3.0-compliant */
- if (lnkcap2)
- return PCIE_LNKCAP2_SLS2SPEED(lnkcap2);
+ if (speeds)
+ return speeds;
- pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
+ /* Synthesize from the Max Link Speed field */
if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB)
- return PCIE_SPEED_5_0GT;
+ speeds = PCI_EXP_LNKCAP2_SLS_5_0GB | PCI_EXP_LNKCAP2_SLS_2_5GB;
else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_2_5GB)
- return PCIE_SPEED_2_5GT;
+ speeds = PCI_EXP_LNKCAP2_SLS_2_5GB;
- return PCI_SPEED_UNKNOWN;
+ return speeds;
+}
+
+/**
+ * pcie_get_speed_cap - query for the PCI device's link speed capability
+ * @dev: PCI device to query
+ *
+ * Query the PCI device speed capability.
+ *
+ * Return: the maximum link speed supported by the device.
+ */
+enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
+{
+ return PCIE_LNKCAP2_SLS2SPEED(dev->supported_speeds);
}
EXPORT_SYMBOL(pcie_get_speed_cap);
@@ -6065,8 +6185,9 @@ EXPORT_SYMBOL(pcie_get_width_cap);
* and width, multiplying them, and applying encoding overhead. The result
* is in Mb/s, i.e., megabits/second of raw bandwidth.
*/
-u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
- enum pcie_link_width *width)
+static u32 pcie_bandwidth_capable(struct pci_dev *dev,
+ enum pci_bus_speed *speed,
+ enum pcie_link_width *width)
{
*speed = pcie_get_speed_cap(dev);
*width = pcie_get_width_cap(dev);
@@ -6093,21 +6214,25 @@ void __pcie_print_link_status(struct pci_dev *dev, bool verbose)
enum pci_bus_speed speed, speed_cap;
struct pci_dev *limiting_dev = NULL;
u32 bw_avail, bw_cap;
+ char *flit_mode = "";
bw_cap = pcie_bandwidth_capable(dev, &speed_cap, &width_cap);
bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width);
+ if (dev->bus && dev->bus->flit_mode)
+ flit_mode = ", in Flit mode";
+
if (bw_avail >= bw_cap && verbose)
- pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n",
+ pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)%s\n",
bw_cap / 1000, bw_cap % 1000,
- pci_speed_string(speed_cap), width_cap);
+ pci_speed_string(speed_cap), width_cap, flit_mode);
else if (bw_avail < bw_cap)
- pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
+ pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)%s\n",
bw_avail / 1000, bw_avail % 1000,
pci_speed_string(speed), width,
limiting_dev ? pci_name(limiting_dev) : "<unknown>",
bw_cap / 1000, bw_cap % 1000,
- pci_speed_string(speed_cap), width_cap);
+ pci_speed_string(speed_cap), width_cap, flit_mode);
}
/**
@@ -6460,8 +6585,7 @@ static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
} else {
r->flags &= ~IORESOURCE_SIZEALIGN;
r->flags |= IORESOURCE_STARTALIGN;
- r->start = align;
- r->end = r->start + size - 1;
+ resource_set_range(r, align, size);
}
r->flags |= IORESOURCE_UNSET;
}
@@ -6640,16 +6764,16 @@ static int of_pci_bus_find_domain_nr(struct device *parent)
return ida_alloc(&pci_domain_nr_dynamic_ida, GFP_KERNEL);
}
-static void of_pci_bus_release_domain_nr(struct pci_bus *bus, struct device *parent)
+static void of_pci_bus_release_domain_nr(struct device *parent, int domain_nr)
{
- if (bus->domain_nr < 0)
+ if (domain_nr < 0)
return;
/* Release domain from IDA where it was allocated. */
- if (of_get_pci_domain_nr(parent->of_node) == bus->domain_nr)
- ida_free(&pci_domain_nr_static_ida, bus->domain_nr);
+ if (of_get_pci_domain_nr(parent->of_node) == domain_nr)
+ ida_free(&pci_domain_nr_static_ida, domain_nr);
else
- ida_free(&pci_domain_nr_dynamic_ida, bus->domain_nr);
+ ida_free(&pci_domain_nr_dynamic_ida, domain_nr);
}
int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
@@ -6658,11 +6782,11 @@ int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
acpi_pci_bus_find_domain_nr(bus);
}
-void pci_bus_release_domain_nr(struct pci_bus *bus, struct device *parent)
+void pci_bus_release_domain_nr(struct device *parent, int domain_nr)
{
if (!acpi_disabled)
return;
- of_pci_bus_release_domain_nr(bus, parent);
+ of_pci_bus_release_domain_nr(parent, domain_nr);
}
#endif
@@ -6707,6 +6831,8 @@ static int __init pci_setup(char *str)
pci_no_domains();
} else if (!strncmp(str, "noari", 5)) {
pcie_ari_disabled = true;
+ } else if (!strncmp(str, "notph", 5)) {
+ pci_no_tph();
} else if (!strncmp(str, "cbiosize=", 9)) {
pci_cardbus_io_size = memparse(str + 9, &str);
} else if (!strncmp(str, "cbmemsize=", 10)) {
@@ -6741,6 +6867,8 @@ static int __init pci_setup(char *str)
pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
} else if (!strncmp(str, "disable_acs_redir=", 18)) {
disable_acs_redir_param = str + 18;
+ } else if (!strncmp(str, "config_acs=", 11)) {
+ config_acs_param = str + 11;
} else {
pr_err("PCI: Unknown option `%s'\n", str);
}
@@ -6765,6 +6893,7 @@ static int __init pci_realloc_setup_params(void)
resource_alignment_param = kstrdup(resource_alignment_param,
GFP_KERNEL);
disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL);
+ config_acs_param = kstrdup(config_acs_param, GFP_KERNEL);
return 0;
}
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 17fed1846847..39f368d2f26d 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -4,6 +4,8 @@
#include <linux/pci.h>
+struct pcie_tlp_log;
+
/* Number of possible devfns: 0.0 to 1f.7 inclusive */
#define MAX_NR_DEVFNS 256
@@ -13,15 +15,73 @@
#define PCIE_LINK_RETRAIN_TIMEOUT_MS 1000
-/* Power stable to PERST# inactive from PCIe card Electromechanical Spec */
+/*
+ * Power stable to PERST# inactive.
+ *
+ * See the "Power Sequencing and Reset Signal Timings" table of the PCI Express
+ * Card Electromechanical Specification, Revision 5.1, Section 2.9.2, Symbol
+ * "T_PVPERL".
+ */
#define PCIE_T_PVPERL_MS 100
/*
+ * REFCLK stable before PERST# inactive.
+ *
+ * See the "Power Sequencing and Reset Signal Timings" table of the PCI Express
+ * Card Electromechanical Specification, Revision 5.1, Section 2.9.2, Symbol
+ * "T_PERST-CLK".
+ */
+#define PCIE_T_PERST_CLK_US 100
+
+/*
+ * End of conventional reset (PERST# de-asserted) to first configuration
+ * request (device able to respond with a "Request Retry Status" completion),
+ * from PCIe r6.0, sec 6.6.1.
+ */
+#define PCIE_T_RRS_READY_MS 100
+
+/*
* PCIe r6.0, sec 5.3.3.2.1 <PME Synchronization>
* Recommends 1ms to 10ms timeout to check L2 ready.
*/
#define PCIE_PME_TO_L2_TIMEOUT_US 10000
+/*
+ * PCIe r6.0, sec 6.6.1 <Conventional Reset>
+ *
+ * - "With a Downstream Port that does not support Link speeds greater
+ * than 5.0 GT/s, software must wait a minimum of 100 ms following exit
+ * from a Conventional Reset before sending a Configuration Request to
+ * the device immediately below that Port."
+ *
+ * - "With a Downstream Port that supports Link speeds greater than
+ * 5.0 GT/s, software must wait a minimum of 100 ms after Link training
+ * completes before sending a Configuration Request to the device
+ * immediately below that Port."
+ */
+#define PCIE_RESET_CONFIG_DEVICE_WAIT_MS 100
+
+/* Message Routing (r[2:0]); PCIe r6.0, sec 2.2.8 */
+#define PCIE_MSG_TYPE_R_RC 0
+#define PCIE_MSG_TYPE_R_ADDR 1
+#define PCIE_MSG_TYPE_R_ID 2
+#define PCIE_MSG_TYPE_R_BC 3
+#define PCIE_MSG_TYPE_R_LOCAL 4
+#define PCIE_MSG_TYPE_R_GATHER 5
+
+/* Power Management Messages; PCIe r6.0, sec 2.2.8.2 */
+#define PCIE_MSG_CODE_PME_TURN_OFF 0x19
+
+/* INTx Mechanism Messages; PCIe r6.0, sec 2.2.8.1 */
+#define PCIE_MSG_CODE_ASSERT_INTA 0x20
+#define PCIE_MSG_CODE_ASSERT_INTB 0x21
+#define PCIE_MSG_CODE_ASSERT_INTC 0x22
+#define PCIE_MSG_CODE_ASSERT_INTD 0x23
+#define PCIE_MSG_CODE_DEASSERT_INTA 0x24
+#define PCIE_MSG_CODE_DEASSERT_INTB 0x25
+#define PCIE_MSG_CODE_DEASSERT_INTC 0x26
+#define PCIE_MSG_CODE_DEASSERT_INTD 0x27
+
extern const unsigned char pcie_link_speed[];
extern bool pci_early_dump;
@@ -46,6 +106,7 @@ bool pci_reset_supported(struct pci_dev *dev);
void pci_init_reset_methods(struct pci_dev *dev);
int pci_bridge_secondary_bus_reset(struct pci_dev *dev);
int pci_bus_error_reset(struct pci_dev *dev);
+int __pci_reset_bus(struct pci_bus *bus);
struct pci_cap_saved_data {
u16 cap_nr;
@@ -81,7 +142,6 @@ void pcie_clear_device_status(struct pci_dev *dev);
void pcie_clear_root_pme_status(struct pci_dev *dev);
bool pci_check_pme_status(struct pci_dev *dev);
void pci_pme_wakeup_bus(struct pci_bus *bus);
-int __pci_pme_wakeup(struct pci_dev *dev, void *ign);
void pci_pme_restore(struct pci_dev *dev);
bool pci_dev_need_resume(struct pci_dev *dev);
void pci_dev_adjust_pme(struct pci_dev *dev);
@@ -96,12 +156,33 @@ bool pci_bridge_d3_possible(struct pci_dev *dev);
void pci_bridge_d3_update(struct pci_dev *dev);
int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type);
+static inline bool pci_bus_rrs_vendor_id(u32 l)
+{
+ return (l & 0xffff) == PCI_VENDOR_ID_PCI_SIG;
+}
+
static inline void pci_wakeup_event(struct pci_dev *dev)
{
/* Wait 100 ms before the system can be put into a sleep state. */
pm_wakeup_event(&dev->dev, 100);
}
+/**
+ * pci_bar_index_is_valid - Check whether a BAR index is within valid range
+ * @bar: BAR index
+ *
+ * Protects against overflowing &struct pci_dev.resource array.
+ *
+ * Return: true for valid index, false otherwise.
+ */
+static inline bool pci_bar_index_is_valid(int bar)
+{
+ if (bar >= 0 && bar < PCI_NUM_RESOURCES)
+ return true;
+
+ return false;
+}
+
static inline bool pci_has_subordinate(struct pci_dev *pci_dev)
{
return !!(pci_dev->subordinate);
@@ -126,7 +207,6 @@ static inline bool pcie_downstream_port(const struct pci_dev *dev)
}
void pci_vpd_init(struct pci_dev *dev);
-void pci_vpd_release(struct pci_dev *dev);
extern const struct attribute_group pci_dev_vpd_attr_group;
/* PCI Virtual Channel */
@@ -189,6 +269,7 @@ extern const struct attribute_group *pci_dev_groups[];
extern const struct attribute_group *pci_dev_attr_groups[];
extern const struct attribute_group *pcibus_groups[];
extern const struct attribute_group *pci_bus_groups[];
+extern const struct attribute_group pci_doe_sysfs_group;
#else
static inline int pci_create_sysfs_dev_files(struct pci_dev *pdev) { return 0; }
static inline void pci_remove_sysfs_dev_files(struct pci_dev *pdev) { }
@@ -202,6 +283,8 @@ extern unsigned long pci_hotplug_io_size;
extern unsigned long pci_hotplug_mmio_size;
extern unsigned long pci_hotplug_mmio_pref_size;
extern unsigned long pci_hotplug_bus_size;
+extern unsigned long pci_cardbus_io_size;
+extern unsigned long pci_cardbus_mem_size;
/**
* pci_match_one_device - Tell if a PCI device structure has a matching
@@ -245,16 +328,22 @@ enum pci_bar_type {
struct device *pci_get_host_bridge_device(struct pci_dev *dev);
void pci_put_host_bridge_device(struct device *dev);
+unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge);
+int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type);
+int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align);
+
int pci_configure_extended_tags(struct pci_dev *dev, void *ign);
bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl,
- int crs_timeout);
+ int rrs_timeout);
bool pci_bus_generic_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl,
- int crs_timeout);
-int pci_idt_bus_quirk(struct pci_bus *bus, int devfn, u32 *pl, int crs_timeout);
+ int rrs_timeout);
+int pci_idt_bus_quirk(struct pci_bus *bus, int devfn, u32 *pl, int rrs_timeout);
int pci_setup_device(struct pci_dev *dev);
+void __pci_size_stdbars(struct pci_dev *dev, int count,
+ unsigned int pos, u32 *sizes);
int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
- struct resource *res, unsigned int reg);
+ struct resource *res, unsigned int reg, u32 *sizes);
void pci_configure_ari(struct pci_dev *dev);
void __pci_bus_size_bridges(struct pci_bus *bus,
struct list_head *realloc_head);
@@ -262,14 +351,51 @@ void __pci_bus_assign_resources(const struct pci_bus *bus,
struct list_head *realloc_head,
struct list_head *fail_head);
bool pci_bus_clip_resource(struct pci_dev *dev, int idx);
+void pci_walk_bus_locked(struct pci_bus *top,
+ int (*cb)(struct pci_dev *, void *),
+ void *userdata);
const char *pci_resource_name(struct pci_dev *dev, unsigned int i);
+bool pci_resource_is_optional(const struct pci_dev *dev, int resno);
+
+/**
+ * pci_resource_num - Reverse lookup resource number from device resources
+ * @dev: PCI device
+ * @res: Resource to lookup index for (MUST be a @dev's resource)
+ *
+ * Perform reverse lookup to determine the resource number for @res within
+ * @dev resource array. NOTE: The caller is responsible for ensuring @res is
+ * among @dev's resources!
+ *
+ * Returns: resource number.
+ */
+static inline int pci_resource_num(const struct pci_dev *dev,
+ const struct resource *res)
+{
+ int resno = res - &dev->resource[0];
+
+ /* Passing a resource that is not among dev's resources? */
+ WARN_ON_ONCE(resno >= PCI_NUM_RESOURCES);
+
+ return resno;
+}
void pci_reassigndev_resource_alignment(struct pci_dev *dev);
void pci_disable_bridge_window(struct pci_dev *dev);
struct pci_bus *pci_bus_get(struct pci_bus *bus);
void pci_bus_put(struct pci_bus *bus);
+#define PCIE_LNKCAP_SLS2SPEED(lnkcap) \
+({ \
+ ((lnkcap) == PCI_EXP_LNKCAP_SLS_64_0GB ? PCIE_SPEED_64_0GT : \
+ (lnkcap) == PCI_EXP_LNKCAP_SLS_32_0GB ? PCIE_SPEED_32_0GT : \
+ (lnkcap) == PCI_EXP_LNKCAP_SLS_16_0GB ? PCIE_SPEED_16_0GT : \
+ (lnkcap) == PCI_EXP_LNKCAP_SLS_8_0GB ? PCIE_SPEED_8_0GT : \
+ (lnkcap) == PCI_EXP_LNKCAP_SLS_5_0GB ? PCIE_SPEED_5_0GT : \
+ (lnkcap) == PCI_EXP_LNKCAP_SLS_2_5GB ? PCIE_SPEED_2_5GT : \
+ PCI_SPEED_UNKNOWN); \
+})
+
/* PCIe link information from Link Capabilities 2 */
#define PCIE_LNKCAP2_SLS2SPEED(lnkcap2) \
((lnkcap2) & PCI_EXP_LNKCAP2_SLS_64_0GB ? PCIE_SPEED_64_0GT : \
@@ -280,6 +406,15 @@ void pci_bus_put(struct pci_bus *bus);
(lnkcap2) & PCI_EXP_LNKCAP2_SLS_2_5GB ? PCIE_SPEED_2_5GT : \
PCI_SPEED_UNKNOWN)
+#define PCIE_LNKCTL2_TLS2SPEED(lnkctl2) \
+ ((lnkctl2) == PCI_EXP_LNKCTL2_TLS_64_0GT ? PCIE_SPEED_64_0GT : \
+ (lnkctl2) == PCI_EXP_LNKCTL2_TLS_32_0GT ? PCIE_SPEED_32_0GT : \
+ (lnkctl2) == PCI_EXP_LNKCTL2_TLS_16_0GT ? PCIE_SPEED_16_0GT : \
+ (lnkctl2) == PCI_EXP_LNKCTL2_TLS_8_0GT ? PCIE_SPEED_8_0GT : \
+ (lnkctl2) == PCI_EXP_LNKCTL2_TLS_5_0GT ? PCIE_SPEED_5_0GT : \
+ (lnkctl2) == PCI_EXP_LNKCTL2_TLS_2_5GT ? PCIE_SPEED_2_5GT : \
+ PCI_SPEED_UNKNOWN)
+
/* PCIe speed to Mb/s reduced by encoding overhead */
#define PCIE_SPEED2MBS_ENC(speed) \
((speed) == PCIE_SPEED_64_0GT ? 64000*1/1 : \
@@ -290,14 +425,39 @@ void pci_bus_put(struct pci_bus *bus);
(speed) == PCIE_SPEED_2_5GT ? 2500*8/10 : \
0)
+static inline int pcie_dev_speed_mbps(enum pci_bus_speed speed)
+{
+ switch (speed) {
+ case PCIE_SPEED_2_5GT:
+ return 2500;
+ case PCIE_SPEED_5_0GT:
+ return 5000;
+ case PCIE_SPEED_8_0GT:
+ return 8000;
+ case PCIE_SPEED_16_0GT:
+ return 16000;
+ case PCIE_SPEED_32_0GT:
+ return 32000;
+ case PCIE_SPEED_64_0GT:
+ return 64000;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+u8 pcie_get_supported_speeds(struct pci_dev *dev);
const char *pci_speed_string(enum pci_bus_speed speed);
-enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev);
-enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev);
-u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
- enum pcie_link_width *width);
void __pcie_print_link_status(struct pci_dev *dev, bool verbose);
void pcie_report_downtraining(struct pci_dev *dev);
-void pcie_update_link_speed(struct pci_bus *bus, u16 link_status);
+
+static inline void __pcie_update_link_speed(struct pci_bus *bus, u16 linksta, u16 linksta2)
+{
+ bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS];
+ bus->flit_mode = (linksta2 & PCI_EXP_LNKSTA2_FLIT) ? 1 : 0;
+}
+void pcie_update_link_speed(struct pci_bus *bus);
/* Single Root I/O Virtualization */
struct pci_sriov {
@@ -335,6 +495,22 @@ static inline void pci_doe_destroy(struct pci_dev *pdev) { }
static inline void pci_doe_disconnected(struct pci_dev *pdev) { }
#endif
+#ifdef CONFIG_PCI_NPEM
+void pci_npem_create(struct pci_dev *dev);
+void pci_npem_remove(struct pci_dev *dev);
+#else
+static inline void pci_npem_create(struct pci_dev *dev) { }
+static inline void pci_npem_remove(struct pci_dev *dev) { }
+#endif
+
+#if defined(CONFIG_PCI_DOE) && defined(CONFIG_SYSFS)
+void pci_doe_sysfs_init(struct pci_dev *pci_dev);
+void pci_doe_sysfs_teardown(struct pci_dev *pdev);
+#else
+static inline void pci_doe_sysfs_init(struct pci_dev *pdev) { }
+static inline void pci_doe_sysfs_teardown(struct pci_dev *pdev) { }
+#endif
+
/**
* pci_dev_set_io_state - Set the new error state if possible.
*
@@ -380,10 +556,18 @@ static inline int pci_dev_set_disconnected(struct pci_dev *dev, void *unused)
#define PCI_DEV_ADDED 0
#define PCI_DPC_RECOVERED 1
#define PCI_DPC_RECOVERING 2
+#define PCI_DEV_REMOVED 3
+
+static inline void pci_dev_assign_added(struct pci_dev *dev)
+{
+ smp_mb__before_atomic();
+ set_bit(PCI_DEV_ADDED, &dev->priv_flags);
+ smp_mb__after_atomic();
+}
-static inline void pci_dev_assign_added(struct pci_dev *dev, bool added)
+static inline bool pci_dev_test_and_clear_added(struct pci_dev *dev)
{
- assign_bit(PCI_DEV_ADDED, &dev->priv_flags, added);
+ return test_and_clear_bit(PCI_DEV_ADDED, &dev->priv_flags);
}
static inline bool pci_dev_is_added(const struct pci_dev *dev)
@@ -391,6 +575,11 @@ static inline bool pci_dev_is_added(const struct pci_dev *dev)
return test_bit(PCI_DEV_ADDED, &dev->priv_flags);
}
+static inline bool pci_dev_test_and_set_removed(struct pci_dev *dev)
+{
+ return test_and_set_bit(PCI_DEV_REMOVED, &dev->priv_flags);
+}
+
#ifdef CONFIG_PCIEAER
#include <linux/aer.h>
@@ -417,6 +606,13 @@ struct aer_err_info {
int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info);
void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
+
+int pcie_read_tlp_log(struct pci_dev *dev, int where, int where2,
+ unsigned int tlp_len, bool flit,
+ struct pcie_tlp_log *log);
+unsigned int aer_tlp_log_len(struct pci_dev *dev, u32 aercc);
+void pcie_print_tlp_log(const struct pci_dev *dev,
+ const struct pcie_tlp_log *log, const char *pfx);
#endif /* CONFIG_PCIEAER */
#ifdef CONFIG_PCIEPORTBUS
@@ -435,6 +631,7 @@ void pci_dpc_init(struct pci_dev *pdev);
void dpc_process_error(struct pci_dev *pdev);
pci_ers_result_t dpc_reset_link(struct pci_dev *pdev);
bool pci_dpc_recovered(struct pci_dev *pdev);
+unsigned int dpc_tlp_log_len(struct pci_dev *dev);
#else
static inline void pci_save_dpc_state(struct pci_dev *dev) { }
static inline void pci_restore_dpc_state(struct pci_dev *dev) { }
@@ -491,6 +688,10 @@ void pci_iov_update_resource(struct pci_dev *dev, int resno);
resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno);
void pci_restore_iov_state(struct pci_dev *dev);
int pci_iov_bus_range(struct pci_bus *bus);
+static inline bool pci_resource_is_iov(int resno)
+{
+ return resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END;
+}
extern const struct attribute_group sriov_pf_dev_attr_group;
extern const struct attribute_group sriov_vf_dev_attr_group;
#else
@@ -500,14 +701,35 @@ static inline int pci_iov_init(struct pci_dev *dev)
}
static inline void pci_iov_release(struct pci_dev *dev) { }
static inline void pci_iov_remove(struct pci_dev *dev) { }
+static inline void pci_iov_update_resource(struct pci_dev *dev, int resno) { }
+static inline resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev,
+ int resno)
+{
+ return 0;
+}
static inline void pci_restore_iov_state(struct pci_dev *dev) { }
static inline int pci_iov_bus_range(struct pci_bus *bus)
{
return 0;
}
-
+static inline bool pci_resource_is_iov(int resno)
+{
+ return false;
+}
#endif /* CONFIG_PCI_IOV */
+#ifdef CONFIG_PCIE_TPH
+void pci_restore_tph_state(struct pci_dev *dev);
+void pci_save_tph_state(struct pci_dev *dev);
+void pci_no_tph(void);
+void pci_tph_init(struct pci_dev *dev);
+#else
+static inline void pci_restore_tph_state(struct pci_dev *dev) { }
+static inline void pci_save_tph_state(struct pci_dev *dev) { }
+static inline void pci_no_tph(void) { }
+static inline void pci_tph_init(struct pci_dev *dev) { }
+#endif
+
#ifdef CONFIG_PCIE_PTM
void pci_ptm_init(struct pci_dev *dev);
void pci_save_ptm_state(struct pci_dev *dev);
@@ -527,12 +749,10 @@ unsigned long pci_cardbus_resource_alignment(struct resource *);
static inline resource_size_t pci_resource_alignment(struct pci_dev *dev,
struct resource *res)
{
-#ifdef CONFIG_PCI_IOV
- int resno = res - dev->resource;
+ int resno = pci_resource_num(dev, res);
- if (resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END)
+ if (pci_resource_is_iov(resno))
return pci_sriov_resource_alignment(dev, resno);
-#endif
if (dev->class >> 8 == PCI_CLASS_BRIDGE_CARDBUS)
return pci_cardbus_resource_alignment(res);
return resource_alignment(res);
@@ -543,7 +763,7 @@ void pci_acs_init(struct pci_dev *dev);
int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags);
int pci_dev_specific_enable_acs(struct pci_dev *dev);
int pci_dev_specific_disable_acs_redir(struct pci_dev *dev);
-bool pcie_failed_link_retrain(struct pci_dev *dev);
+int pcie_failed_link_retrain(struct pci_dev *dev);
#else
static inline int pci_dev_specific_acs_enabled(struct pci_dev *dev,
u16 acs_flags)
@@ -558,9 +778,9 @@ static inline int pci_dev_specific_disable_acs_redir(struct pci_dev *dev)
{
return -ENOTTY;
}
-static inline bool pcie_failed_link_retrain(struct pci_dev *dev)
+static inline int pcie_failed_link_retrain(struct pci_dev *dev)
{
- return false;
+ return -ENOTTY;
}
#endif
@@ -603,6 +823,17 @@ static inline void pcie_set_ecrc_checking(struct pci_dev *dev) { }
static inline void pcie_ecrc_get_policy(char *str) { }
#endif
+#ifdef CONFIG_PCIEPORTBUS
+void pcie_reset_lbms_count(struct pci_dev *port);
+int pcie_lbms_count(struct pci_dev *port, unsigned long *val);
+#else
+static inline void pcie_reset_lbms_count(struct pci_dev *port) {}
+static inline int pcie_lbms_count(struct pci_dev *port, unsigned long *val)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
struct pci_dev_reset_methods {
u16 vendor;
u16 device;
@@ -613,6 +844,7 @@ struct pci_reset_fn_method {
int (*reset_fn)(struct pci_dev *pdev, bool probe);
char *name;
};
+extern const struct pci_reset_fn_method pci_reset_fn_methods[];
#ifdef CONFIG_PCI_QUIRKS
int pci_dev_specific_reset(struct pci_dev *dev, bool probe);
@@ -634,6 +866,7 @@ static inline int acpi_get_rc_resources(struct device *dev, const char *hid,
}
#endif
+void pci_rebar_init(struct pci_dev *pdev);
int pci_rebar_get_current_size(struct pci_dev *pdev, int bar);
int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size);
static inline u64 pci_rebar_size_to_bytes(int size)
@@ -644,27 +877,22 @@ static inline u64 pci_rebar_size_to_bytes(int size)
struct device_node;
#ifdef CONFIG_OF
-int of_pci_parse_bus_range(struct device_node *node, struct resource *res);
int of_get_pci_domain_nr(struct device_node *node);
int of_pci_get_max_link_speed(struct device_node *node);
u32 of_pci_get_slot_power_limit(struct device_node *node,
u8 *slot_power_limit_value,
u8 *slot_power_limit_scale);
+bool of_pci_preserve_config(struct device_node *node);
int pci_set_of_node(struct pci_dev *dev);
void pci_release_of_node(struct pci_dev *dev);
void pci_set_bus_of_node(struct pci_bus *bus);
void pci_release_bus_of_node(struct pci_bus *bus);
int devm_of_pci_bridge_init(struct device *dev, struct pci_host_bridge *bridge);
+bool of_pci_supply_present(struct device_node *np);
#else
static inline int
-of_pci_parse_bus_range(struct device_node *node, struct resource *res)
-{
- return -EINVAL;
-}
-
-static inline int
of_get_pci_domain_nr(struct device_node *node)
{
return -1;
@@ -688,6 +916,11 @@ of_pci_get_slot_power_limit(struct device_node *node,
return 0;
}
+static inline bool of_pci_preserve_config(struct device_node *node)
+{
+ return false;
+}
+
static inline int pci_set_of_node(struct pci_dev *dev) { return 0; }
static inline void pci_release_of_node(struct pci_dev *dev) { }
static inline void pci_set_bus_of_node(struct pci_bus *bus) { }
@@ -698,6 +931,10 @@ static inline int devm_of_pci_bridge_init(struct device *dev, struct pci_host_br
return 0;
}
+static inline bool of_pci_supply_present(struct device_node *np)
+{
+ return false;
+}
#endif /* CONFIG_OF */
struct of_changeset;
@@ -707,9 +944,16 @@ void of_pci_make_dev_node(struct pci_dev *pdev);
void of_pci_remove_node(struct pci_dev *pdev);
int of_pci_add_properties(struct pci_dev *pdev, struct of_changeset *ocs,
struct device_node *np);
+void of_pci_make_host_bridge_node(struct pci_host_bridge *bridge);
+void of_pci_remove_host_bridge_node(struct pci_host_bridge *bridge);
+int of_pci_add_host_bridge_properties(struct pci_host_bridge *bridge,
+ struct of_changeset *ocs,
+ struct device_node *np);
#else
static inline void of_pci_make_dev_node(struct pci_dev *pdev) { }
static inline void of_pci_remove_node(struct pci_dev *pdev) { }
+static inline void of_pci_make_host_bridge_node(struct pci_host_bridge *bridge) { }
+static inline void of_pci_remove_host_bridge_node(struct pci_host_bridge *bridge) { }
#endif
#ifdef CONFIG_PCIEAER
@@ -734,6 +978,7 @@ static inline void pci_restore_aer_state(struct pci_dev *dev) { }
#endif
#ifdef CONFIG_ACPI
+bool pci_acpi_preserve_config(struct pci_host_bridge *bridge);
int pci_acpi_program_hp_params(struct pci_dev *dev);
extern const struct attribute_group pci_dev_acpi_attr_group;
void pci_set_acpi_fwnode(struct pci_dev *dev);
@@ -747,6 +992,10 @@ int acpi_pci_wakeup(struct pci_dev *dev, bool enable);
bool acpi_pci_need_resume(struct pci_dev *dev);
pci_power_t acpi_pci_choose_state(struct pci_dev *pdev);
#else
+static inline bool pci_acpi_preserve_config(struct pci_host_bridge *bridge)
+{
+ return false;
+}
static inline int pci_dev_acpi_reset(struct pci_dev *dev, bool probe)
{
return -ENOTTY;
@@ -791,8 +1040,6 @@ static inline pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
extern const struct attribute_group aspm_ctrl_attr_group;
#endif
-extern const struct attribute_group pci_dev_reset_method_attr_group;
-
#ifdef CONFIG_X86_INTEL_MID
bool pci_use_mid_pm(void);
int mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state);
@@ -812,26 +1059,19 @@ static inline pci_power_t mid_pci_get_power_state(struct pci_dev *pdev)
}
#endif
-/*
- * Managed PCI resources. This manages device on/off, INTx/MSI/MSI-X
- * on/off and BAR regions. pci_dev itself records MSI/MSI-X status, so
- * there's no need to track it separately. pci_devres is initialized
- * when a device is enabled using managed PCI device enable interface.
- *
- * TODO: Struct pci_devres and find_pci_dr() only need to be here because
- * they're used in pci.c. Port or move these functions to devres.c and
- * then remove them from here.
- */
-struct pci_devres {
- unsigned int enabled:1;
- unsigned int pinned:1;
- unsigned int orig_intx:1;
- unsigned int restore_intx:1;
- unsigned int mwi:1;
- u32 region_mask;
-};
+int pcim_intx(struct pci_dev *dev, int enable);
+int pcim_request_region_exclusive(struct pci_dev *pdev, int bar,
+ const char *name);
+void pcim_release_region(struct pci_dev *pdev, int bar);
-struct pci_devres *find_pci_dr(struct pci_dev *pdev);
+#ifdef CONFIG_PCI_MSI
+int pci_msix_write_tph_tag(struct pci_dev *pdev, unsigned int index, u16 tag);
+#else
+static inline int pci_msix_write_tph_tag(struct pci_dev *pdev, unsigned int index, u16 tag)
+{
+ return -ENODEV;
+}
+#endif
/*
* Config Address for PCI Configuration Mechanism #1
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index 8999fcebde6a..17919b99fa66 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -47,7 +47,7 @@ config PCIEAER_INJECT
error injection can fake almost all kinds of errors with the
help of a user space helper tool aer-inject, which can be
gotten from:
- https://git.kernel.org/cgit/linux/kernel/git/gong.chen/aer-inject.git/
+ https://github.com/intel/aer-inject.git
config PCIEAER_CXL
bool "PCI Express CXL RAS support"
diff --git a/drivers/pci/pcie/Makefile b/drivers/pci/pcie/Makefile
index 6461aa93fe76..173829aa02e6 100644
--- a/drivers/pci/pcie/Makefile
+++ b/drivers/pci/pcie/Makefile
@@ -4,10 +4,10 @@
pcieportdrv-y := portdrv.o rcec.o
-obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o
+obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o bwctrl.o
obj-y += aspm.o
-obj-$(CONFIG_PCIEAER) += aer.o err.o
+obj-$(CONFIG_PCIEAER) += aer.o err.o tlp.o
obj-$(CONFIG_PCIEAER_INJECT) += aer_inject.o
obj-$(CONFIG_PCIE_PME) += pme.o
obj-$(CONFIG_PCIE_DPC) += dpc.o
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index ac6293c24976..a1cf8c7ef628 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -2,7 +2,7 @@
/*
* Implement the AER root port service driver. The driver registers an IRQ
* handler. When a root port triggers an AER interrupt, the IRQ handler
- * collects root port status and schedules work.
+ * collects Root Port status and schedules work.
*
* Copyright (C) 2006 Intel Corp.
* Tom Long Nguyen (tom.l.nguyen@intel.com)
@@ -17,6 +17,7 @@
#include <linux/bitops.h>
#include <linux/cper.h>
+#include <linux/dev_printk.h>
#include <linux/pci.h>
#include <linux/pci-acpi.h>
#include <linux/sched.h>
@@ -35,6 +36,9 @@
#include "../pci.h"
#include "portdrv.h"
+#define aer_printk(level, pdev, fmt, arg...) \
+ dev_printk(level, &(pdev)->dev, fmt, ##arg)
+
#define AER_ERROR_SOURCES_MAX 128
#define AER_MAX_TYPEOF_COR_ERRS 16 /* as per PCI_ERR_COR_STATUS */
@@ -56,9 +60,9 @@ struct aer_stats {
/*
* Fields for all AER capable devices. They indicate the errors
* "as seen by this device". Note that this may mean that if an
- * end point is causing problems, the AER counters may increment
- * at its link partner (e.g. root port) because the errors will be
- * "seen" by the link partner and not the problematic end point
+ * Endpoint is causing problems, the AER counters may increment
+ * at its link partner (e.g. Root Port) because the errors will be
+ * "seen" by the link partner and not the problematic Endpoint
* itself (which may report all counters as 0 as it never saw any
* problems).
*/
@@ -76,10 +80,10 @@ struct aer_stats {
u64 dev_total_nonfatal_errs;
/*
- * Fields for Root ports & root complex event collectors only, these
+ * Fields for Root Ports & Root Complex Event Collectors only; these
* indicate the total number of ERR_COR, ERR_FATAL, and ERR_NONFATAL
- * messages received by the root port / event collector, INCLUDING the
- * ones that are generated internally (by the rootport itself)
+ * messages received by the Root Port / Event Collector, INCLUDING the
+ * ones that are generated internally (by the Root Port itself)
*/
u64 rootport_total_cor_errs;
u64 rootport_total_fatal_errs;
@@ -138,7 +142,7 @@ static const char * const ecrc_policy_str[] = {
* enable_ecrc_checking - enable PCIe ECRC checking for a device
* @dev: the PCI device
*
- * Returns 0 on success, or negative on failure.
+ * Return: 0 on success, or negative on failure.
*/
static int enable_ecrc_checking(struct pci_dev *dev)
{
@@ -159,10 +163,10 @@ static int enable_ecrc_checking(struct pci_dev *dev)
}
/**
- * disable_ecrc_checking - disables PCIe ECRC checking for a device
+ * disable_ecrc_checking - disable PCIe ECRC checking for a device
* @dev: the PCI device
*
- * Returns 0 on success, or negative on failure.
+ * Return: 0 on success, or negative on failure.
*/
static int disable_ecrc_checking(struct pci_dev *dev)
{
@@ -180,7 +184,8 @@ static int disable_ecrc_checking(struct pci_dev *dev)
}
/**
- * pcie_set_ecrc_checking - set/unset PCIe ECRC checking for a device based on global policy
+ * pcie_set_ecrc_checking - set/unset PCIe ECRC checking for a device based
+ * on global policy
* @dev: the PCI device
*/
void pcie_set_ecrc_checking(struct pci_dev *dev)
@@ -230,7 +235,7 @@ int pcie_aer_is_native(struct pci_dev *dev)
return pcie_ports_native || host->native_aer;
}
-EXPORT_SYMBOL_NS_GPL(pcie_aer_is_native, CXL);
+EXPORT_SYMBOL_NS_GPL(pcie_aer_is_native, "CXL");
static int pci_enable_pcie_error_reporting(struct pci_dev *dev)
{
@@ -282,10 +287,10 @@ void pci_aer_clear_fatal_status(struct pci_dev *dev)
* pci_aer_raw_clear_status - Clear AER error registers.
* @dev: the PCI device
*
- * Clearing AER error status registers unconditionally, regardless of
+ * Clear AER error status registers unconditionally, regardless of
* whether they're owned by firmware or the OS.
*
- * Returns 0 on success, or negative on failure.
+ * Return: 0 on success, or negative on failure.
*/
int pci_aer_raw_clear_status(struct pci_dev *dev)
{
@@ -377,8 +382,8 @@ void pci_aer_init(struct pci_dev *dev)
/*
* We save/restore PCI_ERR_UNCOR_MASK, PCI_ERR_UNCOR_SEVER,
* PCI_ERR_COR_MASK, and PCI_ERR_CAP. Root and Root Complex Event
- * Collectors also implement PCI_ERR_ROOT_COMMAND (PCIe r5.0, sec
- * 7.8.4).
+ * Collectors also implement PCI_ERR_ROOT_COMMAND (PCIe r6.0, sec
+ * 7.8.4.9).
*/
n = pcie_cap_has_rtctl(dev) ? 5 : 4;
pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_ERR, sizeof(u32) * n);
@@ -664,12 +669,6 @@ static void pci_rootport_aer_stats_incr(struct pci_dev *pdev,
}
}
-static void __print_tlp_header(struct pci_dev *dev, struct pcie_tlp_log *t)
-{
- pci_err(dev, " TLP Header: %08x %08x %08x %08x\n",
- t->dw[0], t->dw[1], t->dw[2], t->dw[3]);
-}
-
static void __aer_print_error(struct pci_dev *dev,
struct aer_err_info *info)
{
@@ -691,7 +690,7 @@ static void __aer_print_error(struct pci_dev *dev,
if (!errmsg)
errmsg = "Unknown Error Bit";
- pci_printk(level, dev, " [%2d] %-22s%s\n", i, errmsg,
+ aer_printk(level, dev, " [%2d] %-22s%s\n", i, errmsg,
info->first_error == i ? " (First)" : "");
}
pci_dev_aer_stats_incr(dev, info);
@@ -714,17 +713,17 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
level = (info->severity == AER_CORRECTABLE) ? KERN_WARNING : KERN_ERR;
- pci_printk(level, dev, "PCIe Bus Error: severity=%s, type=%s, (%s)\n",
+ aer_printk(level, dev, "PCIe Bus Error: severity=%s, type=%s, (%s)\n",
aer_error_severity_string[info->severity],
aer_error_layer[layer], aer_agent_string[agent]);
- pci_printk(level, dev, " device [%04x:%04x] error status/mask=%08x/%08x\n",
+ aer_printk(level, dev, " device [%04x:%04x] error status/mask=%08x/%08x\n",
dev->vendor, dev->device, info->status, info->mask);
__aer_print_error(dev, info);
if (info->tlp_header_valid)
- __print_tlp_header(dev, &info->tlp);
+ pcie_print_tlp_log(dev, &info->tlp, dev_fmt(" "));
out:
if (info->id && info->error_dev_num > 1 && info->id == id)
@@ -796,12 +795,12 @@ void pci_print_aer(struct pci_dev *dev, int aer_severity,
aer->uncor_severity);
if (tlp_header_valid)
- __print_tlp_header(dev, &aer->header_log);
+ pcie_print_tlp_log(dev, &aer->header_log, dev_fmt(" "));
trace_aer_event(dev_name(&dev->dev), (status & ~mask),
aer_severity, tlp_header_valid, &aer->header_log);
}
-EXPORT_SYMBOL_NS_GPL(pci_print_aer, CXL);
+EXPORT_SYMBOL_NS_GPL(pci_print_aer, "CXL");
/**
* add_error_device - list device to be handled
@@ -830,8 +829,8 @@ static bool is_error_source(struct pci_dev *dev, struct aer_err_info *e_info)
u16 reg16;
/*
- * When bus id is equal to 0, it might be a bad id
- * reported by root port.
+ * When bus ID is equal to 0, it might be a bad ID
+ * reported by Root Port.
*/
if ((PCI_BUS_NUM(e_info->id) != 0) &&
!(dev->bus->bus_flags & PCI_BUS_FLAGS_NO_AERSID)) {
@@ -839,15 +838,15 @@ static bool is_error_source(struct pci_dev *dev, struct aer_err_info *e_info)
if (e_info->id == pci_dev_id(dev))
return true;
- /* Continue id comparing if there is no multiple error */
+ /* Continue ID comparing if there is no multiple error */
if (!e_info->multi_error_valid)
return false;
}
/*
* When either
- * 1) bus id is equal to 0. Some ports might lose the bus
- * id of error source id;
+ * 1) bus ID is equal to 0. Some ports might lose the bus
+ * ID of error source id;
* 2) bus flag PCI_BUS_FLAGS_NO_AERSID is set
* 3) There are multiple errors and prior ID comparing fails;
* We check AER status registers to find possible reporter.
@@ -899,9 +898,9 @@ static int find_device_iter(struct pci_dev *dev, void *data)
/**
* find_source_device - search through device hierarchy for source device
* @parent: pointer to Root Port pci_dev data structure
- * @e_info: including detailed error information such like id
+ * @e_info: including detailed error information such as ID
*
- * Return true if found.
+ * Return: true if found.
*
* Invoked by DPC when error is detected at the Root Port.
* Caller of this function must set id, severity, and multi_error_valid of
@@ -943,9 +942,9 @@ static bool find_source_device(struct pci_dev *parent,
/**
* pci_aer_unmask_internal_errors - unmask internal errors
- * @dev: pointer to the pcie_dev data structure
+ * @dev: pointer to the pci_dev data structure
*
- * Unmasks internal errors in the Uncorrectable and Correctable Error
+ * Unmask internal errors in the Uncorrectable and Correctable Error
* Mask registers.
*
* Note: AER must be enabled and supported by the device which must be
@@ -1008,7 +1007,7 @@ static int cxl_rch_handle_error_iter(struct pci_dev *dev, void *data)
if (!is_cxl_mem_dev(dev) || !cxl_error_is_native(dev))
return 0;
- /* protect dev->driver */
+ /* Protect dev->driver */
device_lock(&dev->dev);
err_handler = dev->driver ? dev->driver->err_handler : NULL;
@@ -1148,14 +1147,16 @@ static void aer_recover_work_func(struct work_struct *work)
continue;
}
pci_print_aer(pdev, entry.severity, entry.regs);
+
/*
- * Memory for aer_capability_regs(entry.regs) is being allocated from the
- * ghes_estatus_pool to protect it from overwriting when multiple sections
- * are present in the error status. Thus free the same after processing
- * the data.
+ * Memory for aer_capability_regs(entry.regs) is being
+ * allocated from the ghes_estatus_pool to protect it from
+ * overwriting when multiple sections are present in the
+ * error status. Thus free the same after processing the
+ * data.
*/
ghes_estatus_pool_region_free((unsigned long)entry.regs,
- sizeof(struct aer_capability_regs));
+ sizeof(struct aer_capability_regs));
if (entry.severity == AER_NONFATAL)
pcie_do_recovery(pdev, pci_channel_io_normal,
@@ -1198,10 +1199,10 @@ EXPORT_SYMBOL_GPL(aer_recover_queue);
/**
* aer_get_device_error_info - read error status from dev and store it to info
- * @dev: pointer to the device expected to have a error record
+ * @dev: pointer to the device expected to have an error record
* @info: pointer to structure to store the error record
*
- * Return 1 on success, 0 on error.
+ * Return: 1 on success, 0 on error.
*
* Note that @info is reused among all error devices. Clear fields properly.
*/
@@ -1245,7 +1246,11 @@ int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
if (info->status & AER_LOG_TLP_MASKS) {
info->tlp_header_valid = 1;
- pcie_read_tlp_log(dev, aer + PCI_ERR_HEADER_LOG, &info->tlp);
+ pcie_read_tlp_log(dev, aer + PCI_ERR_HEADER_LOG,
+ aer + PCI_ERR_PREFIX_LOG,
+ aer_tlp_log_len(dev, aercc),
+ aercc & PCI_ERR_CAP_TLP_LOG_FLIT,
+ &info->tlp);
}
}
@@ -1256,7 +1261,7 @@ static inline void aer_process_err_devices(struct aer_err_info *e_info)
{
int i;
- /* Report all before handle them, not to lost records by reset etc. */
+ /* Report all before handling them, to not lose records by reset etc. */
for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
if (aer_get_device_error_info(e_info->dev[i], e_info))
aer_print_error(e_info->dev[i], e_info);
@@ -1268,8 +1273,8 @@ static inline void aer_process_err_devices(struct aer_err_info *e_info)
}
/**
- * aer_isr_one_error - consume an error detected by root port
- * @rpc: pointer to the root port which holds an error
+ * aer_isr_one_error - consume an error detected by Root Port
+ * @rpc: pointer to the Root Port which holds an error
* @e_src: pointer to an error source
*/
static void aer_isr_one_error(struct aer_rpc *rpc,
@@ -1319,11 +1324,11 @@ static void aer_isr_one_error(struct aer_rpc *rpc,
}
/**
- * aer_isr - consume errors detected by root port
+ * aer_isr - consume errors detected by Root Port
* @irq: IRQ assigned to Root Port
* @context: pointer to Root Port data structure
*
- * Invoked, as DPC, when root port records new detected error
+ * Invoked, as DPC, when Root Port records new detected error
*/
static irqreturn_t aer_isr(int irq, void *context)
{
@@ -1383,7 +1388,7 @@ static void aer_disable_irq(struct pci_dev *pdev)
int aer = pdev->aer_cap;
u32 reg32;
- /* Disable Root's interrupt in response to error messages */
+ /* Disable Root Port's interrupt in response to error messages */
pci_read_config_dword(pdev, aer + PCI_ERR_ROOT_COMMAND, &reg32);
reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK;
pci_write_config_dword(pdev, aer + PCI_ERR_ROOT_COMMAND, reg32);
@@ -1497,6 +1502,22 @@ static int aer_probe(struct pcie_device *dev)
return 0;
}
+static int aer_suspend(struct pcie_device *dev)
+{
+ struct aer_rpc *rpc = get_service_data(dev);
+
+ aer_disable_rootport(rpc);
+ return 0;
+}
+
+static int aer_resume(struct pcie_device *dev)
+{
+ struct aer_rpc *rpc = get_service_data(dev);
+
+ aer_enable_rootport(rpc);
+ return 0;
+}
+
/**
* aer_root_reset - reset Root Port hierarchy, RCEC, or RCiEP
* @dev: pointer to Root Port, RCEC, or RCiEP
@@ -1561,13 +1582,15 @@ static struct pcie_port_service_driver aerdriver = {
.service = PCIE_PORT_SERVICE_AER,
.probe = aer_probe,
+ .suspend = aer_suspend,
+ .resume = aer_resume,
.remove = aer_remove,
};
/**
- * pcie_aer_init - register AER root service driver
+ * pcie_aer_init - register AER service driver
*
- * Invoked when AER root service driver is loaded.
+ * Invoked when AER service driver is loaded.
*/
int __init pcie_aer_init(void)
{
diff --git a/drivers/pci/pcie/aer_inject.c b/drivers/pci/pcie/aer_inject.c
index 2dab275d252f..91acc7b17f68 100644
--- a/drivers/pci/pcie/aer_inject.c
+++ b/drivers/pci/pcie/aer_inject.c
@@ -6,7 +6,7 @@
* trigger various real hardware errors. Software based error
* injection can fake almost all kinds of errors with the help of a
* user space helper tool aer-inject, which can be gotten from:
- * https://git.kernel.org/cgit/linux/kernel/git/gong.chen/aer-inject.git/
+ * https://github.com/intel/aer-inject.git
*
* Copyright 2009 Intel Corporation.
* Huang Ying <ying.huang@intel.com>
@@ -430,7 +430,7 @@ static int aer_inject(struct aer_error_inj *einj)
else
rperr->root_status |= PCI_ERR_ROOT_COR_RCV;
rperr->source_id &= 0xffff0000;
- rperr->source_id |= (einj->bus << 8) | devfn;
+ rperr->source_id |= PCI_DEVID(einj->bus, devfn);
}
if (einj->uncor_status) {
if (rperr->root_status & PCI_ERR_ROOT_UNCOR_RCV)
@@ -443,7 +443,7 @@ static int aer_inject(struct aer_error_inj *einj)
rperr->root_status |= PCI_ERR_ROOT_NONFATAL_RCV;
rperr->root_status |= PCI_ERR_ROOT_UNCOR_RCV;
rperr->source_id &= 0x0000ffff;
- rperr->source_id |= ((einj->bus << 8) | devfn) << 16;
+ rperr->source_id |= PCI_DEVID(einj->bus, devfn) << 16;
}
spin_unlock_irqrestore(&inject_lock, flags);
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 2428d278e015..29fcb0689a91 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -8,6 +8,8 @@
*/
#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/build_bug.h>
#include <linux/kernel.h>
#include <linux/limits.h>
#include <linux/math.h>
@@ -79,24 +81,44 @@ void pci_configure_aspm_l1ss(struct pci_dev *pdev)
void pci_save_aspm_l1ss_state(struct pci_dev *pdev)
{
+ struct pci_dev *parent = pdev->bus->self;
struct pci_cap_saved_state *save_state;
- u16 l1ss = pdev->l1ss;
u32 *cap;
/*
+ * If this is a Downstream Port, we never restore the L1SS state
+ * directly; we only restore it when we restore the state of the
+ * Upstream Port below it.
+ */
+ if (pcie_downstream_port(pdev) || !parent)
+ return;
+
+ if (!pdev->l1ss || !parent->l1ss)
+ return;
+
+ /*
* Save L1 substate configuration. The ASPM L0s/L1 configuration
* in PCI_EXP_LNKCTL_ASPMC is saved by pci_save_pcie_state().
*/
- if (!l1ss)
+ save_state = pci_find_saved_ext_cap(pdev, PCI_EXT_CAP_ID_L1SS);
+ if (!save_state)
return;
- save_state = pci_find_saved_ext_cap(pdev, PCI_EXT_CAP_ID_L1SS);
+ cap = &save_state->cap.data[0];
+ pci_read_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL2, cap++);
+ pci_read_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1, cap++);
+
+ /*
+ * Save parent's L1 substate configuration so we have it for
+ * pci_restore_aspm_l1ss_state(pdev) to restore.
+ */
+ save_state = pci_find_saved_ext_cap(parent, PCI_EXT_CAP_ID_L1SS);
if (!save_state)
return;
cap = &save_state->cap.data[0];
- pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL2, cap++);
- pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, cap++);
+ pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, cap++);
+ pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, cap++);
}
void pci_restore_aspm_l1ss_state(struct pci_dev *pdev)
@@ -177,8 +199,8 @@ void pci_restore_aspm_l1ss_state(struct pci_dev *pdev)
/* Restore L0s/L1 if they were enabled */
if (FIELD_GET(PCI_EXP_LNKCTL_ASPMC, clnkctl) ||
FIELD_GET(PCI_EXP_LNKCTL_ASPMC, plnkctl)) {
- pcie_capability_write_word(parent, PCI_EXP_LNKCTL, clnkctl);
- pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, plnkctl);
+ pcie_capability_write_word(parent, PCI_EXP_LNKCTL, plnkctl);
+ pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, clnkctl);
}
}
@@ -189,21 +211,18 @@ void pci_restore_aspm_l1ss_state(struct pci_dev *pdev)
#endif
#define MODULE_PARAM_PREFIX "pcie_aspm."
-/* Note: those are not register definitions */
-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
-#define ASPM_STATE_L1 (4) /* L1 state */
-#define ASPM_STATE_L1_1 (8) /* ASPM L1.1 state */
-#define ASPM_STATE_L1_2 (0x10) /* ASPM L1.2 state */
-#define ASPM_STATE_L1_1_PCIPM (0x20) /* PCI PM L1.1 state */
-#define ASPM_STATE_L1_2_PCIPM (0x40) /* PCI PM L1.2 state */
-#define ASPM_STATE_L1_SS_PCIPM (ASPM_STATE_L1_1_PCIPM | ASPM_STATE_L1_2_PCIPM)
-#define ASPM_STATE_L1_2_MASK (ASPM_STATE_L1_2 | ASPM_STATE_L1_2_PCIPM)
-#define ASPM_STATE_L1SS (ASPM_STATE_L1_1 | ASPM_STATE_L1_1_PCIPM |\
- ASPM_STATE_L1_2_MASK)
-#define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
-#define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1 | \
- ASPM_STATE_L1SS)
+/* Note: these are not register definitions */
+#define PCIE_LINK_STATE_L0S_UP BIT(0) /* Upstream direction L0s state */
+#define PCIE_LINK_STATE_L0S_DW BIT(1) /* Downstream direction L0s state */
+static_assert(PCIE_LINK_STATE_L0S == (PCIE_LINK_STATE_L0S_UP | PCIE_LINK_STATE_L0S_DW));
+
+#define PCIE_LINK_STATE_L1_SS_PCIPM (PCIE_LINK_STATE_L1_1_PCIPM |\
+ PCIE_LINK_STATE_L1_2_PCIPM)
+#define PCIE_LINK_STATE_L1_2_MASK (PCIE_LINK_STATE_L1_2 |\
+ PCIE_LINK_STATE_L1_2_PCIPM)
+#define PCIE_LINK_STATE_L1SS (PCIE_LINK_STATE_L1_1 |\
+ PCIE_LINK_STATE_L1_1_PCIPM |\
+ PCIE_LINK_STATE_L1_2_MASK)
struct pcie_link_state {
struct pci_dev *pdev; /* Upstream component of the Link */
@@ -275,10 +294,10 @@ static int policy_to_aspm_state(struct pcie_link_state *link)
return 0;
case POLICY_POWERSAVE:
/* Enable ASPM L0s/L1 */
- return (ASPM_STATE_L0S | ASPM_STATE_L1);
+ return PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1;
case POLICY_POWER_SUPERSAVE:
/* Enable Everything */
- return ASPM_STATE_ALL;
+ return PCIE_LINK_STATE_ASPM_ALL;
case POLICY_DEFAULT:
return link->aspm_default;
}
@@ -581,14 +600,14 @@ static void pcie_aspm_check_latency(struct pci_dev *endpoint)
latency_dw_l1 = calc_l1_latency(lnkcap_dw);
/* Check upstream direction L0s latency */
- if ((link->aspm_capable & ASPM_STATE_L0S_UP) &&
+ if ((link->aspm_capable & PCIE_LINK_STATE_L0S_UP) &&
(latency_up_l0s > acceptable_l0s))
- link->aspm_capable &= ~ASPM_STATE_L0S_UP;
+ link->aspm_capable &= ~PCIE_LINK_STATE_L0S_UP;
/* Check downstream direction L0s latency */
- if ((link->aspm_capable & ASPM_STATE_L0S_DW) &&
+ if ((link->aspm_capable & PCIE_LINK_STATE_L0S_DW) &&
(latency_dw_l0s > acceptable_l0s))
- link->aspm_capable &= ~ASPM_STATE_L0S_DW;
+ link->aspm_capable &= ~PCIE_LINK_STATE_L0S_DW;
/*
* Check L1 latency.
* Every switch on the path to root complex need 1
@@ -603,9 +622,9 @@ static void pcie_aspm_check_latency(struct pci_dev *endpoint)
* substate latencies (and hence do not do any check).
*/
latency = max_t(u32, latency_up_l1, latency_dw_l1);
- if ((link->aspm_capable & ASPM_STATE_L1) &&
+ if ((link->aspm_capable & PCIE_LINK_STATE_L1) &&
(latency + l1_switch_latency > acceptable_l1))
- link->aspm_capable &= ~ASPM_STATE_L1;
+ link->aspm_capable &= ~PCIE_LINK_STATE_L1;
l1_switch_latency += NSEC_PER_USEC;
link = link->parent;
@@ -741,13 +760,13 @@ static void aspm_l1ss_init(struct pcie_link_state *link)
child_l1ss_cap &= ~PCI_L1SS_CAP_ASPM_L1_2;
if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_ASPM_L1_1)
- link->aspm_support |= ASPM_STATE_L1_1;
+ link->aspm_support |= PCIE_LINK_STATE_L1_1;
if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_ASPM_L1_2)
- link->aspm_support |= ASPM_STATE_L1_2;
+ link->aspm_support |= PCIE_LINK_STATE_L1_2;
if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_1)
- link->aspm_support |= ASPM_STATE_L1_1_PCIPM;
+ link->aspm_support |= PCIE_LINK_STATE_L1_1_PCIPM;
if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_2)
- link->aspm_support |= ASPM_STATE_L1_2_PCIPM;
+ link->aspm_support |= PCIE_LINK_STATE_L1_2_PCIPM;
if (parent_l1ss_cap)
pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
@@ -757,15 +776,15 @@ static void aspm_l1ss_init(struct pcie_link_state *link)
&child_l1ss_ctl1);
if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_1)
- link->aspm_enabled |= ASPM_STATE_L1_1;
+ link->aspm_enabled |= PCIE_LINK_STATE_L1_1;
if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_2)
- link->aspm_enabled |= ASPM_STATE_L1_2;
+ link->aspm_enabled |= PCIE_LINK_STATE_L1_2;
if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_1)
- link->aspm_enabled |= ASPM_STATE_L1_1_PCIPM;
+ link->aspm_enabled |= PCIE_LINK_STATE_L1_1_PCIPM;
if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_2)
- link->aspm_enabled |= ASPM_STATE_L1_2_PCIPM;
+ link->aspm_enabled |= PCIE_LINK_STATE_L1_2_PCIPM;
- if (link->aspm_support & ASPM_STATE_L1_2_MASK)
+ if (link->aspm_support & PCIE_LINK_STATE_L1_2_MASK)
aspm_calc_l12_info(link, parent_l1ss_cap, child_l1ss_cap);
}
@@ -778,8 +797,8 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
if (blacklist) {
/* Set enabled/disable so that we will disable ASPM later */
- link->aspm_enabled = ASPM_STATE_ALL;
- link->aspm_disable = ASPM_STATE_ALL;
+ link->aspm_enabled = PCIE_LINK_STATE_ASPM_ALL;
+ link->aspm_disable = PCIE_LINK_STATE_ASPM_ALL;
return;
}
@@ -806,6 +825,15 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &parent_lnkctl);
pcie_capability_read_word(child, PCI_EXP_LNKCTL, &child_lnkctl);
+ /* Disable L0s/L1 before updating L1SS config */
+ if (FIELD_GET(PCI_EXP_LNKCTL_ASPMC, child_lnkctl) ||
+ FIELD_GET(PCI_EXP_LNKCTL_ASPMC, parent_lnkctl)) {
+ pcie_capability_write_word(child, PCI_EXP_LNKCTL,
+ child_lnkctl & ~PCI_EXP_LNKCTL_ASPMC);
+ pcie_capability_write_word(parent, PCI_EXP_LNKCTL,
+ parent_lnkctl & ~PCI_EXP_LNKCTL_ASPMC);
+ }
+
/*
* Setup L0s state
*
@@ -814,22 +842,29 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
* support L0s.
*/
if (parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPM_L0S)
- link->aspm_support |= ASPM_STATE_L0S;
+ link->aspm_support |= PCIE_LINK_STATE_L0S;
if (child_lnkctl & PCI_EXP_LNKCTL_ASPM_L0S)
- link->aspm_enabled |= ASPM_STATE_L0S_UP;
+ link->aspm_enabled |= PCIE_LINK_STATE_L0S_UP;
if (parent_lnkctl & PCI_EXP_LNKCTL_ASPM_L0S)
- link->aspm_enabled |= ASPM_STATE_L0S_DW;
+ link->aspm_enabled |= PCIE_LINK_STATE_L0S_DW;
/* Setup L1 state */
if (parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPM_L1)
- link->aspm_support |= ASPM_STATE_L1;
+ link->aspm_support |= PCIE_LINK_STATE_L1;
if (parent_lnkctl & child_lnkctl & PCI_EXP_LNKCTL_ASPM_L1)
- link->aspm_enabled |= ASPM_STATE_L1;
+ link->aspm_enabled |= PCIE_LINK_STATE_L1;
aspm_l1ss_init(link);
+ /* Restore L0s/L1 if they were enabled */
+ if (FIELD_GET(PCI_EXP_LNKCTL_ASPMC, child_lnkctl) ||
+ FIELD_GET(PCI_EXP_LNKCTL_ASPMC, parent_lnkctl)) {
+ pcie_capability_write_word(parent, PCI_EXP_LNKCTL, parent_lnkctl);
+ pcie_capability_write_word(child, PCI_EXP_LNKCTL, child_lnkctl);
+ }
+
/* Save default state */
link->aspm_default = link->aspm_enabled;
@@ -846,25 +881,28 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
}
}
-/* Configure the ASPM L1 substates */
+/* Configure the ASPM L1 substates. Caller must disable L1 first. */
static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
{
- u32 val, enable_req;
+ u32 val;
struct pci_dev *child = link->downstream, *parent = link->pdev;
- enable_req = (link->aspm_enabled ^ state) & state;
+ val = 0;
+ if (state & PCIE_LINK_STATE_L1_1)
+ val |= PCI_L1SS_CTL1_ASPM_L1_1;
+ if (state & PCIE_LINK_STATE_L1_2)
+ val |= PCI_L1SS_CTL1_ASPM_L1_2;
+ if (state & PCIE_LINK_STATE_L1_1_PCIPM)
+ val |= PCI_L1SS_CTL1_PCIPM_L1_1;
+ if (state & PCIE_LINK_STATE_L1_2_PCIPM)
+ val |= PCI_L1SS_CTL1_PCIPM_L1_2;
/*
- * Here are the rules specified in the PCIe spec for enabling L1SS:
- * - When enabling L1.x, enable bit at parent first, then at child
- * - When disabling L1.x, disable bit at child first, then at parent
- * - When enabling ASPM L1.x, need to disable L1
- * (at child followed by parent).
- * - The ASPM/PCIPM L1.2 must be disabled while programming timing
+ * PCIe r6.2, sec 5.5.4, rules for enabling L1 PM Substates:
+ * - Clear L1.x enable bits at child first, then at parent
+ * - Set L1.x enable bits at parent first, then at child
+ * - ASPM/PCIPM L1.2 must be disabled while programming timing
* parameters
- *
- * To keep it simple, disable all L1SS bits first, and later enable
- * what is needed.
*/
/* Disable all L1 substates */
@@ -872,26 +910,6 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
PCI_L1SS_CTL1_L1SS_MASK, 0);
pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
PCI_L1SS_CTL1_L1SS_MASK, 0);
- /*
- * If needed, disable L1, and it gets enabled later
- * in pcie_config_aspm_link().
- */
- if (enable_req & (ASPM_STATE_L1_1 | ASPM_STATE_L1_2)) {
- pcie_capability_clear_word(child, PCI_EXP_LNKCTL,
- PCI_EXP_LNKCTL_ASPM_L1);
- pcie_capability_clear_word(parent, PCI_EXP_LNKCTL,
- PCI_EXP_LNKCTL_ASPM_L1);
- }
-
- val = 0;
- if (state & ASPM_STATE_L1_1)
- val |= PCI_L1SS_CTL1_ASPM_L1_1;
- if (state & ASPM_STATE_L1_2)
- val |= PCI_L1SS_CTL1_ASPM_L1_2;
- if (state & ASPM_STATE_L1_1_PCIPM)
- val |= PCI_L1SS_CTL1_PCIPM_L1_1;
- if (state & ASPM_STATE_L1_2_PCIPM)
- val |= PCI_L1SS_CTL1_PCIPM_L1_2;
/* Enable what we need to enable */
pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
@@ -916,43 +934,52 @@ static void pcie_config_aspm_link(struct pcie_link_state *link, u32 state)
state &= (link->aspm_capable & ~link->aspm_disable);
/* Can't enable any substates if L1 is not enabled */
- if (!(state & ASPM_STATE_L1))
- state &= ~ASPM_STATE_L1SS;
+ if (!(state & PCIE_LINK_STATE_L1))
+ state &= ~PCIE_LINK_STATE_L1SS;
/* Spec says both ports must be in D0 before enabling PCI PM substates*/
if (parent->current_state != PCI_D0 || child->current_state != PCI_D0) {
- state &= ~ASPM_STATE_L1_SS_PCIPM;
- state |= (link->aspm_enabled & ASPM_STATE_L1_SS_PCIPM);
+ state &= ~PCIE_LINK_STATE_L1_SS_PCIPM;
+ state |= (link->aspm_enabled & PCIE_LINK_STATE_L1_SS_PCIPM);
}
/* Nothing to do if the link is already in the requested state */
if (link->aspm_enabled == state)
return;
/* Convert ASPM state to upstream/downstream ASPM register state */
- if (state & ASPM_STATE_L0S_UP)
+ if (state & PCIE_LINK_STATE_L0S_UP)
dwstream |= PCI_EXP_LNKCTL_ASPM_L0S;
- if (state & ASPM_STATE_L0S_DW)
+ if (state & PCIE_LINK_STATE_L0S_DW)
upstream |= PCI_EXP_LNKCTL_ASPM_L0S;
- if (state & ASPM_STATE_L1) {
+ if (state & PCIE_LINK_STATE_L1) {
upstream |= PCI_EXP_LNKCTL_ASPM_L1;
dwstream |= PCI_EXP_LNKCTL_ASPM_L1;
}
- if (link->aspm_capable & ASPM_STATE_L1SS)
- pcie_config_aspm_l1ss(link, state);
-
/*
- * Spec 2.0 suggests all functions should be configured the
- * same setting for ASPM. Enabling ASPM L1 should be done in
- * upstream component first and then downstream, and vice
- * versa for disabling ASPM L1. Spec doesn't mention L0S.
+ * Per PCIe r6.2, sec 5.5.4, setting either or both of the enable
+ * bits for ASPM L1 PM Substates must be done while ASPM L1 is
+ * disabled. Disable L1 here and apply new configuration after L1SS
+ * configuration has been completed.
+ *
+ * Per sec 7.5.3.7, when disabling ASPM L1, software must disable
+ * it in the Downstream component prior to disabling it in the
+ * Upstream component, and ASPM L1 must be enabled in the Upstream
+ * component prior to enabling it in the Downstream component.
+ *
+ * Sec 7.5.3.7 also recommends programming the same ASPM Control
+ * value for all functions of a multi-function device.
*/
- if (state & ASPM_STATE_L1)
- pcie_config_aspm_dev(parent, upstream);
+ list_for_each_entry(child, &linkbus->devices, bus_list)
+ pcie_config_aspm_dev(child, 0);
+ pcie_config_aspm_dev(parent, 0);
+
+ if (link->aspm_capable & PCIE_LINK_STATE_L1SS)
+ pcie_config_aspm_l1ss(link, state);
+
+ pcie_config_aspm_dev(parent, upstream);
list_for_each_entry(child, &linkbus->devices, bus_list)
pcie_config_aspm_dev(child, dwstream);
- if (!(state & ASPM_STATE_L1))
- pcie_config_aspm_dev(parent, upstream);
link->aspm_enabled = state;
@@ -1243,16 +1270,16 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
parent_link = link->parent;
/*
- * link->downstream is a pointer to the pci_dev of function 0. If
- * we remove that function, the pci_dev is about to be deallocated,
- * so we can't use link->downstream again. Free the link state to
- * avoid this.
+ * Free the parent link state, no later than function 0 (i.e.
+ * link->downstream) being removed.
*
- * If we're removing a non-0 function, it's possible we could
- * retain the link state, but PCIe r6.0, sec 7.5.3.7, recommends
- * programming the same ASPM Control value for all functions of
- * multi-function devices, so disable ASPM for all of them.
+ * Do not free the link state any earlier. If function 0 is a
+ * switch upstream port, this link state is parent_link to all
+ * subordinate ones.
*/
+ if (pdev != link->downstream)
+ goto out;
+
pcie_config_aspm_link(link, 0);
list_del(&link->sibling);
free_link_state(link);
@@ -1263,6 +1290,7 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
pcie_config_aspm_path(parent_link);
}
+ out:
mutex_unlock(&aspm_lock);
up_read(&pci_bus_sem);
}
@@ -1324,6 +1352,28 @@ static struct pcie_link_state *pcie_aspm_get_link(struct pci_dev *pdev)
return bridge->link_state;
}
+static u8 pci_calc_aspm_disable_mask(int state)
+{
+ state &= ~PCIE_LINK_STATE_CLKPM;
+
+ /* L1 PM substates require L1 */
+ if (state & PCIE_LINK_STATE_L1)
+ state |= PCIE_LINK_STATE_L1SS;
+
+ return state;
+}
+
+static u8 pci_calc_aspm_enable_mask(int state)
+{
+ state &= ~PCIE_LINK_STATE_CLKPM;
+
+ /* L1 PM substates require L1 */
+ if (state & PCIE_LINK_STATE_L1SS)
+ state |= PCIE_LINK_STATE_L1;
+
+ return state;
+}
+
static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool locked)
{
struct pcie_link_state *link = pcie_aspm_get_link(pdev);
@@ -1346,19 +1396,7 @@ static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool locked
if (!locked)
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
- if (state & PCIE_LINK_STATE_L0S)
- link->aspm_disable |= ASPM_STATE_L0S;
- if (state & PCIE_LINK_STATE_L1)
- /* L1 PM substates require L1 */
- link->aspm_disable |= ASPM_STATE_L1 | ASPM_STATE_L1SS;
- if (state & PCIE_LINK_STATE_L1_1)
- link->aspm_disable |= ASPM_STATE_L1_1;
- if (state & PCIE_LINK_STATE_L1_2)
- link->aspm_disable |= ASPM_STATE_L1_2;
- if (state & PCIE_LINK_STATE_L1_1_PCIPM)
- link->aspm_disable |= ASPM_STATE_L1_1_PCIPM;
- if (state & PCIE_LINK_STATE_L1_2_PCIPM)
- link->aspm_disable |= ASPM_STATE_L1_2_PCIPM;
+ link->aspm_disable |= pci_calc_aspm_disable_mask(state);
pcie_config_aspm_link(link, policy_to_aspm_state(link));
if (state & PCIE_LINK_STATE_CLKPM)
@@ -1414,20 +1452,7 @@ static int __pci_enable_link_state(struct pci_dev *pdev, int state, bool locked)
if (!locked)
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
- link->aspm_default = 0;
- if (state & PCIE_LINK_STATE_L0S)
- link->aspm_default |= ASPM_STATE_L0S;
- if (state & PCIE_LINK_STATE_L1)
- link->aspm_default |= ASPM_STATE_L1;
- /* L1 PM substates require L1 */
- if (state & PCIE_LINK_STATE_L1_1)
- link->aspm_default |= ASPM_STATE_L1_1 | ASPM_STATE_L1;
- if (state & PCIE_LINK_STATE_L1_2)
- link->aspm_default |= ASPM_STATE_L1_2 | ASPM_STATE_L1;
- if (state & PCIE_LINK_STATE_L1_1_PCIPM)
- link->aspm_default |= ASPM_STATE_L1_1_PCIPM | ASPM_STATE_L1;
- if (state & PCIE_LINK_STATE_L1_2_PCIPM)
- link->aspm_default |= ASPM_STATE_L1_2_PCIPM | ASPM_STATE_L1;
+ link->aspm_default = pci_calc_aspm_enable_mask(state);
pcie_config_aspm_link(link, policy_to_aspm_state(link));
link->clkpm_default = (state & PCIE_LINK_STATE_CLKPM) ? 1 : 0;
@@ -1446,6 +1471,9 @@ static int __pci_enable_link_state(struct pci_dev *pdev, int state, bool locked)
* touch the LNKCTL register. Also note that this does not enable states
* disabled by pci_disable_link_state(). Return 0 or a negative errno.
*
+ * Note: Ensure devices are in D0 before enabling PCI-PM L1 PM Substates, per
+ * PCIe r6.0, sec 5.5.4.
+ *
* @pdev: PCI device
* @state: Mask of ASPM link states to enable
*/
@@ -1462,6 +1490,9 @@ EXPORT_SYMBOL(pci_enable_link_state);
* can't touch the LNKCTL register. Also note that this does not enable states
* disabled by pci_disable_link_state(). Return 0 or a negative errno.
*
+ * Note: Ensure devices are in D0 before enabling PCI-PM L1 PM Substates, per
+ * PCIe r6.0, sec 5.5.4.
+ *
* @pdev: PCI device
* @state: Mask of ASPM link states to enable
*
@@ -1563,12 +1594,12 @@ static ssize_t aspm_attr_store_common(struct device *dev,
if (state_enable) {
link->aspm_disable &= ~state;
/* need to enable L1 for substates */
- if (state & ASPM_STATE_L1SS)
- link->aspm_disable &= ~ASPM_STATE_L1;
+ if (state & PCIE_LINK_STATE_L1SS)
+ link->aspm_disable &= ~PCIE_LINK_STATE_L1;
} else {
link->aspm_disable |= state;
- if (state & ASPM_STATE_L1)
- link->aspm_disable |= ASPM_STATE_L1SS;
+ if (state & PCIE_LINK_STATE_L1)
+ link->aspm_disable |= PCIE_LINK_STATE_L1SS;
}
pcie_config_aspm_link(link, policy_to_aspm_state(link));
@@ -1582,12 +1613,12 @@ static ssize_t aspm_attr_store_common(struct device *dev,
#define ASPM_ATTR(_f, _s) \
static ssize_t _f##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
-{ return aspm_attr_show_common(dev, attr, buf, ASPM_STATE_##_s); } \
+{ return aspm_attr_show_common(dev, attr, buf, PCIE_LINK_STATE_##_s); } \
\
static ssize_t _f##_store(struct device *dev, \
struct device_attribute *attr, \
const char *buf, size_t len) \
-{ return aspm_attr_store_common(dev, attr, buf, len, ASPM_STATE_##_s); }
+{ return aspm_attr_store_common(dev, attr, buf, len, PCIE_LINK_STATE_##_s); }
ASPM_ATTR(l0s_aspm, L0S)
ASPM_ATTR(l1_aspm, L1)
@@ -1654,12 +1685,12 @@ static umode_t aspm_ctrl_attrs_are_visible(struct kobject *kobj,
struct pci_dev *pdev = to_pci_dev(dev);
struct pcie_link_state *link = pcie_aspm_get_link(pdev);
static const u8 aspm_state_map[] = {
- ASPM_STATE_L0S,
- ASPM_STATE_L1,
- ASPM_STATE_L1_1,
- ASPM_STATE_L1_2,
- ASPM_STATE_L1_1_PCIPM,
- ASPM_STATE_L1_2_PCIPM,
+ PCIE_LINK_STATE_L0S,
+ PCIE_LINK_STATE_L1,
+ PCIE_LINK_STATE_L1_1,
+ PCIE_LINK_STATE_L1_2,
+ PCIE_LINK_STATE_L1_1_PCIPM,
+ PCIE_LINK_STATE_L1_2_PCIPM,
};
if (aspm_disabled || !link)
diff --git a/drivers/pci/pcie/bwctrl.c b/drivers/pci/pcie/bwctrl.c
new file mode 100644
index 000000000000..d8d2aa85a229
--- /dev/null
+++ b/drivers/pci/pcie/bwctrl.c
@@ -0,0 +1,377 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * PCIe bandwidth controller
+ *
+ * Author: Alexandru Gagniuc <mr.nuke.me@gmail.com>
+ *
+ * Copyright (C) 2019 Dell Inc
+ * Copyright (C) 2023-2024 Intel Corporation
+ *
+ * The PCIe bandwidth controller provides a way to alter PCIe Link Speeds
+ * and notify the operating system when the Link Width or Speed changes. The
+ * notification capability is required for all Root Ports and Downstream
+ * Ports supporting Link Width wider than x1 and/or multiple Link Speeds.
+ *
+ * This service port driver hooks into the Bandwidth Notification interrupt
+ * watching for changes or links becoming degraded in operation. It updates
+ * the cached Current Link Speed that is exposed to user space through sysfs.
+ */
+
+#define dev_fmt(fmt) "bwctrl: " fmt
+
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/pci-bwctrl.h>
+#include <linux/rwsem.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "../pci.h"
+#include "portdrv.h"
+
+/**
+ * struct pcie_bwctrl_data - PCIe bandwidth controller
+ * @set_speed_mutex: Serializes link speed changes
+ * @lbms_count: Count for LBMS (since last reset)
+ * @cdev: Thermal cooling device associated with the port
+ */
+struct pcie_bwctrl_data {
+ struct mutex set_speed_mutex;
+ atomic_t lbms_count;
+ struct thermal_cooling_device *cdev;
+};
+
+/*
+ * Prevent port removal during LBMS count accessors and Link Speed changes.
+ *
+ * These have to be differentiated because pcie_bwctrl_change_speed() calls
+ * pcie_retrain_link() which uses LBMS count reset accessor on success
+ * (using just one rwsem triggers "possible recursive locking detected"
+ * warning).
+ */
+static DECLARE_RWSEM(pcie_bwctrl_lbms_rwsem);
+static DECLARE_RWSEM(pcie_bwctrl_setspeed_rwsem);
+
+static bool pcie_valid_speed(enum pci_bus_speed speed)
+{
+ return (speed >= PCIE_SPEED_2_5GT) && (speed <= PCIE_SPEED_64_0GT);
+}
+
+static u16 pci_bus_speed2lnkctl2(enum pci_bus_speed speed)
+{
+ static const u8 speed_conv[] = {
+ [PCIE_SPEED_2_5GT] = PCI_EXP_LNKCTL2_TLS_2_5GT,
+ [PCIE_SPEED_5_0GT] = PCI_EXP_LNKCTL2_TLS_5_0GT,
+ [PCIE_SPEED_8_0GT] = PCI_EXP_LNKCTL2_TLS_8_0GT,
+ [PCIE_SPEED_16_0GT] = PCI_EXP_LNKCTL2_TLS_16_0GT,
+ [PCIE_SPEED_32_0GT] = PCI_EXP_LNKCTL2_TLS_32_0GT,
+ [PCIE_SPEED_64_0GT] = PCI_EXP_LNKCTL2_TLS_64_0GT,
+ };
+
+ if (WARN_ON_ONCE(!pcie_valid_speed(speed)))
+ return 0;
+
+ return speed_conv[speed];
+}
+
+static inline u16 pcie_supported_speeds2target_speed(u8 supported_speeds)
+{
+ return __fls(supported_speeds);
+}
+
+/**
+ * pcie_bwctrl_select_speed - Select Target Link Speed
+ * @port: PCIe Port
+ * @speed_req: Requested PCIe Link Speed
+ *
+ * Select Target Link Speed by take into account Supported Link Speeds of
+ * both the Root Port and the Endpoint.
+ *
+ * Return: Target Link Speed (1=2.5GT/s, 2=5GT/s, 3=8GT/s, etc.)
+ */
+static u16 pcie_bwctrl_select_speed(struct pci_dev *port, enum pci_bus_speed speed_req)
+{
+ struct pci_bus *bus = port->subordinate;
+ u8 desired_speeds, supported_speeds;
+ struct pci_dev *dev;
+
+ desired_speeds = GENMASK(pci_bus_speed2lnkctl2(speed_req),
+ __fls(PCI_EXP_LNKCAP2_SLS_2_5GB));
+
+ supported_speeds = port->supported_speeds;
+ if (bus) {
+ down_read(&pci_bus_sem);
+ dev = list_first_entry_or_null(&bus->devices, struct pci_dev, bus_list);
+ if (dev)
+ supported_speeds &= dev->supported_speeds;
+ up_read(&pci_bus_sem);
+ }
+ if (!supported_speeds)
+ supported_speeds = PCI_EXP_LNKCAP2_SLS_2_5GB;
+
+ return pcie_supported_speeds2target_speed(supported_speeds & desired_speeds);
+}
+
+static int pcie_bwctrl_change_speed(struct pci_dev *port, u16 target_speed, bool use_lt)
+{
+ int ret;
+
+ ret = pcie_capability_clear_and_set_word(port, PCI_EXP_LNKCTL2,
+ PCI_EXP_LNKCTL2_TLS, target_speed);
+ if (ret != PCIBIOS_SUCCESSFUL)
+ return pcibios_err_to_errno(ret);
+
+ ret = pcie_retrain_link(port, use_lt);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Ensure link speed updates also with platforms that have problems
+ * with notifications.
+ */
+ if (port->subordinate)
+ pcie_update_link_speed(port->subordinate);
+
+ return 0;
+}
+
+/**
+ * pcie_set_target_speed - Set downstream Link Speed for PCIe Port
+ * @port: PCIe Port
+ * @speed_req: Requested PCIe Link Speed
+ * @use_lt: Wait for the LT or DLLLA bit to detect the end of link training
+ *
+ * Attempt to set PCIe Port Link Speed to @speed_req. @speed_req may be
+ * adjusted downwards to the best speed supported by both the Port and PCIe
+ * Device underneath it.
+ *
+ * Return:
+ * * 0 - on success
+ * * -EINVAL - @speed_req is not a PCIe Link Speed
+ * * -ENODEV - @port is not controllable
+ * * -ETIMEDOUT - changing Link Speed took too long
+ * * -EAGAIN - Link Speed was changed but @speed_req was not achieved
+ */
+int pcie_set_target_speed(struct pci_dev *port, enum pci_bus_speed speed_req,
+ bool use_lt)
+{
+ struct pci_bus *bus = port->subordinate;
+ u16 target_speed;
+ int ret;
+
+ if (WARN_ON_ONCE(!pcie_valid_speed(speed_req)))
+ return -EINVAL;
+
+ if (bus && bus->cur_bus_speed == speed_req)
+ return 0;
+
+ target_speed = pcie_bwctrl_select_speed(port, speed_req);
+
+ scoped_guard(rwsem_read, &pcie_bwctrl_setspeed_rwsem) {
+ struct pcie_bwctrl_data *data = port->link_bwctrl;
+
+ /*
+ * port->link_bwctrl is NULL during initial scan when called
+ * e.g. from the Target Speed quirk.
+ */
+ if (data)
+ mutex_lock(&data->set_speed_mutex);
+
+ ret = pcie_bwctrl_change_speed(port, target_speed, use_lt);
+
+ if (data)
+ mutex_unlock(&data->set_speed_mutex);
+ }
+
+ /*
+ * Despite setting higher speed into the Target Link Speed, empty
+ * bus won't train to 5GT+ speeds.
+ */
+ if (!ret && bus && bus->cur_bus_speed != speed_req &&
+ !list_empty(&bus->devices))
+ ret = -EAGAIN;
+
+ return ret;
+}
+
+static void pcie_bwnotif_enable(struct pcie_device *srv)
+{
+ struct pcie_bwctrl_data *data = srv->port->link_bwctrl;
+ struct pci_dev *port = srv->port;
+ u16 link_status;
+ int ret;
+
+ /* Count LBMS seen so far as one */
+ ret = pcie_capability_read_word(port, PCI_EXP_LNKSTA, &link_status);
+ if (ret == PCIBIOS_SUCCESSFUL && link_status & PCI_EXP_LNKSTA_LBMS)
+ atomic_inc(&data->lbms_count);
+
+ pcie_capability_set_word(port, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_LBMIE | PCI_EXP_LNKCTL_LABIE);
+ pcie_capability_write_word(port, PCI_EXP_LNKSTA,
+ PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_LABS);
+
+ /*
+ * Update after enabling notifications & clearing status bits ensures
+ * link speed is up to date.
+ */
+ pcie_update_link_speed(port->subordinate);
+}
+
+static void pcie_bwnotif_disable(struct pci_dev *port)
+{
+ pcie_capability_clear_word(port, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_LBMIE | PCI_EXP_LNKCTL_LABIE);
+}
+
+static irqreturn_t pcie_bwnotif_irq(int irq, void *context)
+{
+ struct pcie_device *srv = context;
+ struct pcie_bwctrl_data *data = srv->port->link_bwctrl;
+ struct pci_dev *port = srv->port;
+ u16 link_status, events;
+ int ret;
+
+ ret = pcie_capability_read_word(port, PCI_EXP_LNKSTA, &link_status);
+ if (ret != PCIBIOS_SUCCESSFUL)
+ return IRQ_NONE;
+
+ events = link_status & (PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_LABS);
+ if (!events)
+ return IRQ_NONE;
+
+ if (events & PCI_EXP_LNKSTA_LBMS)
+ atomic_inc(&data->lbms_count);
+
+ pcie_capability_write_word(port, PCI_EXP_LNKSTA, events);
+
+ /*
+ * Interrupts will not be triggered from any further Link Speed
+ * change until LBMS is cleared by the write. Therefore, re-read the
+ * speed (inside pcie_update_link_speed()) after LBMS has been
+ * cleared to avoid missing link speed changes.
+ */
+ pcie_update_link_speed(port->subordinate);
+
+ return IRQ_HANDLED;
+}
+
+void pcie_reset_lbms_count(struct pci_dev *port)
+{
+ struct pcie_bwctrl_data *data;
+
+ guard(rwsem_read)(&pcie_bwctrl_lbms_rwsem);
+ data = port->link_bwctrl;
+ if (data)
+ atomic_set(&data->lbms_count, 0);
+ else
+ pcie_capability_write_word(port, PCI_EXP_LNKSTA,
+ PCI_EXP_LNKSTA_LBMS);
+}
+
+int pcie_lbms_count(struct pci_dev *port, unsigned long *val)
+{
+ struct pcie_bwctrl_data *data;
+
+ guard(rwsem_read)(&pcie_bwctrl_lbms_rwsem);
+ data = port->link_bwctrl;
+ if (!data)
+ return -ENOTTY;
+
+ *val = atomic_read(&data->lbms_count);
+
+ return 0;
+}
+
+static int pcie_bwnotif_probe(struct pcie_device *srv)
+{
+ struct pci_dev *port = srv->port;
+ int ret;
+
+ /* Can happen if we run out of bus numbers during enumeration. */
+ if (!port->subordinate)
+ return -ENODEV;
+
+ struct pcie_bwctrl_data *data = devm_kzalloc(&srv->device,
+ sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ ret = devm_mutex_init(&srv->device, &data->set_speed_mutex);
+ if (ret)
+ return ret;
+
+ scoped_guard(rwsem_write, &pcie_bwctrl_setspeed_rwsem) {
+ scoped_guard(rwsem_write, &pcie_bwctrl_lbms_rwsem) {
+ port->link_bwctrl = data;
+
+ ret = request_irq(srv->irq, pcie_bwnotif_irq,
+ IRQF_SHARED, "PCIe bwctrl", srv);
+ if (ret) {
+ port->link_bwctrl = NULL;
+ return ret;
+ }
+
+ pcie_bwnotif_enable(srv);
+ }
+ }
+
+ pci_dbg(port, "enabled with IRQ %d\n", srv->irq);
+
+ /* Don't fail on errors. Don't leave IS_ERR() "pointer" into ->cdev */
+ port->link_bwctrl->cdev = pcie_cooling_device_register(port);
+ if (IS_ERR(port->link_bwctrl->cdev))
+ port->link_bwctrl->cdev = NULL;
+
+ return 0;
+}
+
+static void pcie_bwnotif_remove(struct pcie_device *srv)
+{
+ struct pcie_bwctrl_data *data = srv->port->link_bwctrl;
+
+ pcie_cooling_device_unregister(data->cdev);
+
+ scoped_guard(rwsem_write, &pcie_bwctrl_setspeed_rwsem) {
+ scoped_guard(rwsem_write, &pcie_bwctrl_lbms_rwsem) {
+ pcie_bwnotif_disable(srv->port);
+
+ free_irq(srv->irq, srv);
+
+ srv->port->link_bwctrl = NULL;
+ }
+ }
+}
+
+static int pcie_bwnotif_suspend(struct pcie_device *srv)
+{
+ pcie_bwnotif_disable(srv->port);
+ return 0;
+}
+
+static int pcie_bwnotif_resume(struct pcie_device *srv)
+{
+ pcie_bwnotif_enable(srv);
+ return 0;
+}
+
+static struct pcie_port_service_driver pcie_bwctrl_driver = {
+ .name = "pcie_bwctrl",
+ .port_type = PCIE_ANY_PORT,
+ .service = PCIE_PORT_SERVICE_BWCTRL,
+ .probe = pcie_bwnotif_probe,
+ .suspend = pcie_bwnotif_suspend,
+ .resume = pcie_bwnotif_resume,
+ .remove = pcie_bwnotif_remove,
+};
+
+int __init pcie_bwctrl_init(void)
+{
+ return pcie_port_service_register(&pcie_bwctrl_driver);
+}
diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
index a668820696dc..df42f15c9829 100644
--- a/drivers/pci/pcie/dpc.c
+++ b/drivers/pci/pcie/dpc.c
@@ -190,7 +190,7 @@ out:
static void dpc_process_rp_pio_error(struct pci_dev *pdev)
{
u16 cap = pdev->dpc_cap, dpc_status, first_error;
- u32 status, mask, sev, syserr, exc, log, prefix;
+ u32 status, mask, sev, syserr, exc, log;
struct pcie_tlp_log tlp_log;
int i;
@@ -215,22 +215,20 @@ static void dpc_process_rp_pio_error(struct pci_dev *pdev)
first_error == i ? " (First)" : "");
}
- if (pdev->dpc_rp_log_size < 4)
+ if (pdev->dpc_rp_log_size < PCIE_STD_NUM_TLP_HEADERLOG)
goto clear_status;
- pcie_read_tlp_log(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG, &tlp_log);
- pci_err(pdev, "TLP Header: %#010x %#010x %#010x %#010x\n",
- tlp_log.dw[0], tlp_log.dw[1], tlp_log.dw[2], tlp_log.dw[3]);
-
- if (pdev->dpc_rp_log_size < 5)
+ pcie_read_tlp_log(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG,
+ cap + PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG,
+ dpc_tlp_log_len(pdev),
+ pdev->subordinate->flit_mode,
+ &tlp_log);
+ pcie_print_tlp_log(pdev, &tlp_log, dev_fmt(""));
+
+ if (pdev->dpc_rp_log_size < PCIE_STD_NUM_TLP_HEADERLOG + 1)
goto clear_status;
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_IMPSPEC_LOG, &log);
pci_err(pdev, "RP PIO ImpSpec Log %#010x\n", log);
- for (i = 0; i < pdev->dpc_rp_log_size - 5; i++) {
- pci_read_config_dword(pdev,
- cap + PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG + i * 4, &prefix);
- pci_err(pdev, "TLP Prefix Header: dw%d, %#010x\n", i, prefix);
- }
clear_status:
pci_write_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS, status);
}
@@ -402,9 +400,21 @@ void pci_dpc_init(struct pci_dev *pdev)
/* Quirks may set dpc_rp_log_size if device or firmware is buggy */
if (!pdev->dpc_rp_log_size) {
+ u16 flags;
+ int ret;
+
+ ret = pcie_capability_read_word(pdev, PCI_EXP_FLAGS, &flags);
+ if (ret)
+ return;
+
pdev->dpc_rp_log_size =
FIELD_GET(PCI_EXP_DPC_RP_PIO_LOG_SIZE, cap);
- if (pdev->dpc_rp_log_size < 4 || pdev->dpc_rp_log_size > 9) {
+ if (FIELD_GET(PCI_EXP_FLAGS_FLIT, flags))
+ pdev->dpc_rp_log_size += FIELD_GET(PCI_EXP_DPC_RP_PIO_LOG_SIZE4,
+ cap) << 4;
+
+ if (pdev->dpc_rp_log_size < PCIE_STD_NUM_TLP_HEADERLOG ||
+ pdev->dpc_rp_log_size > PCIE_STD_MAX_TLP_HEADERLOG + 1) {
pci_err(pdev, "RP PIO log size %u is invalid\n",
pdev->dpc_rp_log_size);
pdev->dpc_rp_log_size = 0;
@@ -412,13 +422,44 @@ void pci_dpc_init(struct pci_dev *pdev)
}
}
+static void dpc_enable(struct pcie_device *dev)
+{
+ struct pci_dev *pdev = dev->port;
+ int dpc = pdev->dpc_cap;
+ u16 ctl;
+
+ /*
+ * Clear DPC Interrupt Status so we don't get an interrupt for an
+ * old event when setting DPC Interrupt Enable.
+ */
+ pci_write_config_word(pdev, dpc + PCI_EXP_DPC_STATUS,
+ PCI_EXP_DPC_STATUS_INTERRUPT);
+
+ pci_read_config_word(pdev, dpc + PCI_EXP_DPC_CTL, &ctl);
+ ctl &= ~PCI_EXP_DPC_CTL_EN_MASK;
+ ctl |= PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN;
+ pci_write_config_word(pdev, dpc + PCI_EXP_DPC_CTL, ctl);
+}
+
+static void dpc_disable(struct pcie_device *dev)
+{
+ struct pci_dev *pdev = dev->port;
+ int dpc = pdev->dpc_cap;
+ u16 ctl;
+
+ /* Disable DPC triggering and DPC interrupts */
+ pci_read_config_word(pdev, dpc + PCI_EXP_DPC_CTL, &ctl);
+ ctl &= ~(PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN);
+ pci_write_config_word(pdev, dpc + PCI_EXP_DPC_CTL, ctl);
+}
+
#define FLAG(x, y) (((x) & (y)) ? '+' : '-')
static int dpc_probe(struct pcie_device *dev)
{
struct pci_dev *pdev = dev->port;
struct device *device = &dev->device;
int status;
- u16 ctl, cap;
+ u16 cap;
if (!pcie_aer_is_native(pdev) && !pcie_ports_dpc_native)
return -ENOTSUPP;
@@ -433,11 +474,7 @@ static int dpc_probe(struct pcie_device *dev)
}
pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CAP, &cap);
-
- pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, &ctl);
- ctl &= ~PCI_EXP_DPC_CTL_EN_MASK;
- ctl |= PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN;
- pci_write_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, ctl);
+ dpc_enable(dev);
pci_info(pdev, "enabled with IRQ %d\n", dev->irq);
pci_info(pdev, "error containment capabilities: Int Msg #%d, RPExt%c PoisonedTLP%c SwTrigger%c RP PIO Log %d, DL_ActiveErr%c\n",
@@ -450,14 +487,21 @@ static int dpc_probe(struct pcie_device *dev)
return status;
}
-static void dpc_remove(struct pcie_device *dev)
+static int dpc_suspend(struct pcie_device *dev)
{
- struct pci_dev *pdev = dev->port;
- u16 ctl;
+ dpc_disable(dev);
+ return 0;
+}
- pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, &ctl);
- ctl &= ~(PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN);
- pci_write_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, ctl);
+static int dpc_resume(struct pcie_device *dev)
+{
+ dpc_enable(dev);
+ return 0;
+}
+
+static void dpc_remove(struct pcie_device *dev)
+{
+ dpc_disable(dev);
}
static struct pcie_port_service_driver dpcdriver = {
@@ -465,6 +509,8 @@ static struct pcie_port_service_driver dpcdriver = {
.port_type = PCIE_ANY_PORT,
.service = PCIE_PORT_SERVICE_DPC,
.probe = dpc_probe,
+ .suspend = dpc_suspend,
+ .resume = dpc_resume,
.remove = dpc_remove,
};
diff --git a/drivers/pci/pcie/edr.c b/drivers/pci/pcie/edr.c
index 5f4914d313a1..e86298dbbcff 100644
--- a/drivers/pci/pcie/edr.c
+++ b/drivers/pci/pcie/edr.c
@@ -32,10 +32,10 @@ static int acpi_enable_dpc(struct pci_dev *pdev)
int status = 0;
/*
- * Behavior when calling unsupported _DSM functions is undefined,
- * so check whether EDR_PORT_DPC_ENABLE_DSM is supported.
+ * Per PCI Firmware r3.3, sec 4.6.12, EDR_PORT_DPC_ENABLE_DSM is
+ * optional. Return success if it's not implemented.
*/
- if (!acpi_check_dsm(adev->handle, &pci_acpi_dsm_guid, 5,
+ if (!acpi_check_dsm(adev->handle, &pci_acpi_dsm_guid, 6,
1ULL << EDR_PORT_DPC_ENABLE_DSM))
return 0;
@@ -46,12 +46,7 @@ static int acpi_enable_dpc(struct pci_dev *pdev)
argv4.package.count = 1;
argv4.package.elements = &req;
- /*
- * Per Downstream Port Containment Related Enhancements ECN to PCI
- * Firmware Specification r3.2, sec 4.6.12, EDR_PORT_DPC_ENABLE_DSM is
- * optional. Return success if it's not implemented.
- */
- obj = acpi_evaluate_dsm(adev->handle, &pci_acpi_dsm_guid, 5,
+ obj = acpi_evaluate_dsm(adev->handle, &pci_acpi_dsm_guid, 6,
EDR_PORT_DPC_ENABLE_DSM, &argv4);
if (!obj)
return 0;
@@ -85,8 +80,9 @@ static struct pci_dev *acpi_dpc_port_get(struct pci_dev *pdev)
u16 port;
/*
- * Behavior when calling unsupported _DSM functions is undefined,
- * so check whether EDR_PORT_DPC_ENABLE_DSM is supported.
+ * If EDR_PORT_LOCATE_DSM is not implemented under the target of
+ * EDR, the target is the port that experienced the containment
+ * event (PCI Firmware r3.3, sec 4.6.13).
*/
if (!acpi_check_dsm(adev->handle, &pci_acpi_dsm_guid, 5,
1ULL << EDR_PORT_LOCATE_DSM))
@@ -104,6 +100,16 @@ static struct pci_dev *acpi_dpc_port_get(struct pci_dev *pdev)
}
/*
+ * Bit 31 represents the success/failure of the operation. If bit
+ * 31 is set, the operation failed.
+ */
+ if (obj->integer.value & BIT(31)) {
+ ACPI_FREE(obj);
+ pci_err(pdev, "Locate Port _DSM failed\n");
+ return NULL;
+ }
+
+ /*
* Firmware returns DPC port BDF details in following format:
* 15:8 = bus
* 7:3 = device
diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c
index 705893b5f7b0..31090770fffc 100644
--- a/drivers/pci/pcie/err.c
+++ b/drivers/pci/pcie/err.c
@@ -116,9 +116,7 @@ static int report_mmio_enabled(struct pci_dev *dev, void *data)
device_lock(&dev->dev);
pdrv = dev->driver;
- if (!pdrv ||
- !pdrv->err_handler ||
- !pdrv->err_handler->mmio_enabled)
+ if (!pdrv || !pdrv->err_handler || !pdrv->err_handler->mmio_enabled)
goto out;
err_handler = pdrv->err_handler;
@@ -137,9 +135,7 @@ static int report_slot_reset(struct pci_dev *dev, void *data)
device_lock(&dev->dev);
pdrv = dev->driver;
- if (!pdrv ||
- !pdrv->err_handler ||
- !pdrv->err_handler->slot_reset)
+ if (!pdrv || !pdrv->err_handler || !pdrv->err_handler->slot_reset)
goto out;
err_handler = pdrv->err_handler;
@@ -158,9 +154,7 @@ static int report_resume(struct pci_dev *dev, void *data)
device_lock(&dev->dev);
pdrv = dev->driver;
if (!pci_dev_set_io_state(dev, pci_channel_io_normal) ||
- !pdrv ||
- !pdrv->err_handler ||
- !pdrv->err_handler->resume)
+ !pdrv || !pdrv->err_handler || !pdrv->err_handler->resume)
goto out;
err_handler = pdrv->err_handler;
diff --git a/drivers/pci/pcie/portdrv.c b/drivers/pci/pcie/portdrv.c
index 14a4b89a3b83..e8318fd5f6ed 100644
--- a/drivers/pci/pcie/portdrv.c
+++ b/drivers/pci/pcie/portdrv.c
@@ -68,7 +68,7 @@ static int pcie_message_numbers(struct pci_dev *dev, int mask,
*/
if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP |
- PCIE_PORT_SERVICE_BWNOTIF)) {
+ PCIE_PORT_SERVICE_BWCTRL)) {
pcie_capability_read_word(dev, PCI_EXP_FLAGS, &reg16);
*pme = FIELD_GET(PCI_EXP_FLAGS_IRQ, reg16);
nvec = *pme + 1;
@@ -150,11 +150,11 @@ static int pcie_port_enable_irq_vec(struct pci_dev *dev, int *irqs, int mask)
/* PME, hotplug and bandwidth notification share an MSI/MSI-X vector */
if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP |
- PCIE_PORT_SERVICE_BWNOTIF)) {
+ PCIE_PORT_SERVICE_BWCTRL)) {
pcie_irq = pci_irq_vector(dev, pme);
irqs[PCIE_PORT_SERVICE_PME_SHIFT] = pcie_irq;
irqs[PCIE_PORT_SERVICE_HP_SHIFT] = pcie_irq;
- irqs[PCIE_PORT_SERVICE_BWNOTIF_SHIFT] = pcie_irq;
+ irqs[PCIE_PORT_SERVICE_BWCTRL_SHIFT] = pcie_irq;
}
if (mask & PCIE_PORT_SERVICE_AER)
@@ -187,15 +187,15 @@ static int pcie_init_service_irqs(struct pci_dev *dev, int *irqs, int mask)
* interrupt.
*/
if ((mask & PCIE_PORT_SERVICE_PME) && pcie_pme_no_msi())
- goto legacy_irq;
+ goto intx_irq;
/* Try to use MSI-X or MSI if supported */
if (pcie_port_enable_irq_vec(dev, irqs, mask) == 0)
return 0;
-legacy_irq:
- /* fall back to legacy IRQ */
- ret = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_LEGACY);
+intx_irq:
+ /* fall back to INTX IRQ */
+ ret = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_INTX);
if (ret < 0)
return -ENODEV;
@@ -228,10 +228,12 @@ static int get_port_device_capability(struct pci_dev *dev)
/*
* Disable hot-plug interrupts in case they have been enabled
- * by the BIOS and the hot-plug service driver is not loaded.
+ * by the BIOS and the hot-plug service driver won't be loaded
+ * to handle them.
*/
- pcie_capability_clear_word(dev, PCI_EXP_SLTCTL,
- PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_HPIE);
+ if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
+ pcie_capability_clear_word(dev, PCI_EXP_SLTCTL,
+ PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_HPIE);
}
#ifdef CONFIG_PCIEAER
@@ -265,13 +267,15 @@ static int get_port_device_capability(struct pci_dev *dev)
(pcie_ports_dpc_native || (services & PCIE_PORT_SERVICE_AER)))
services |= PCIE_PORT_SERVICE_DPC;
+ /* Enable bandwidth control if more than one speed is supported. */
if (pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM ||
pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) {
u32 linkcap;
pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &linkcap);
- if (linkcap & PCI_EXP_LNKCAP_LBNC)
- services |= PCIE_PORT_SERVICE_BWNOTIF;
+ if (linkcap & PCI_EXP_LNKCAP_LBNC &&
+ hweight8(dev->supported_speeds) > 1)
+ services |= PCIE_PORT_SERVICE_BWCTRL;
}
return services;
@@ -786,7 +790,7 @@ static const struct pci_error_handlers pcie_portdrv_err_handler = {
static struct pci_driver pcie_portdriver = {
.name = "pcieport",
- .id_table = &port_pci_ids[0],
+ .id_table = port_pci_ids,
.probe = pcie_portdrv_probe,
.remove = pcie_portdrv_remove,
@@ -828,6 +832,7 @@ static void __init pcie_init_services(void)
pcie_aer_init();
pcie_pme_init();
pcie_dpc_init();
+ pcie_bwctrl_init();
pcie_hp_init();
}
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index 12c89ea0313b..bd29d1cc7b8b 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -20,8 +20,8 @@
#define PCIE_PORT_SERVICE_HP (1 << PCIE_PORT_SERVICE_HP_SHIFT)
#define PCIE_PORT_SERVICE_DPC_SHIFT 3 /* Downstream Port Containment */
#define PCIE_PORT_SERVICE_DPC (1 << PCIE_PORT_SERVICE_DPC_SHIFT)
-#define PCIE_PORT_SERVICE_BWNOTIF_SHIFT 4 /* Bandwidth notification */
-#define PCIE_PORT_SERVICE_BWNOTIF (1 << PCIE_PORT_SERVICE_BWNOTIF_SHIFT)
+#define PCIE_PORT_SERVICE_BWCTRL_SHIFT 4 /* Bandwidth Controller (notifications) */
+#define PCIE_PORT_SERVICE_BWCTRL (1 << PCIE_PORT_SERVICE_BWCTRL_SHIFT)
#define PCIE_PORT_DEVICE_MAXSERVICES 5
@@ -51,6 +51,8 @@ int pcie_dpc_init(void);
static inline int pcie_dpc_init(void) { return 0; }
#endif
+int pcie_bwctrl_init(void);
+
/* Port Type */
#define PCIE_ANY_PORT (~0)
diff --git a/drivers/pci/pcie/tlp.c b/drivers/pci/pcie/tlp.c
new file mode 100644
index 000000000000..890d5391d7f5
--- /dev/null
+++ b/drivers/pci/pcie/tlp.c
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe TLP Log handling
+ *
+ * Copyright (C) 2024 Intel Corporation
+ */
+
+#include <linux/aer.h>
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+
+#include "../pci.h"
+
+/**
+ * aer_tlp_log_len - Calculate AER Capability TLP Header/Prefix Log length
+ * @dev: PCIe device
+ * @aercc: AER Capabilities and Control register value
+ *
+ * Return: TLP Header/Prefix Log length
+ */
+unsigned int aer_tlp_log_len(struct pci_dev *dev, u32 aercc)
+{
+ if (aercc & PCI_ERR_CAP_TLP_LOG_FLIT)
+ return FIELD_GET(PCI_ERR_CAP_TLP_LOG_SIZE, aercc);
+
+ return PCIE_STD_NUM_TLP_HEADERLOG +
+ ((aercc & PCI_ERR_CAP_PREFIX_LOG_PRESENT) ?
+ dev->eetlp_prefix_max : 0);
+}
+
+#ifdef CONFIG_PCIE_DPC
+/**
+ * dpc_tlp_log_len - Calculate DPC RP PIO TLP Header/Prefix Log length
+ * @dev: PCIe device
+ *
+ * Return: TLP Header/Prefix Log length
+ */
+unsigned int dpc_tlp_log_len(struct pci_dev *dev)
+{
+ /* Remove ImpSpec Log register from the count */
+ if (dev->dpc_rp_log_size >= PCIE_STD_NUM_TLP_HEADERLOG + 1)
+ return dev->dpc_rp_log_size - 1;
+
+ return dev->dpc_rp_log_size;
+}
+#endif
+
+/**
+ * pcie_read_tlp_log - read TLP Header Log
+ * @dev: PCIe device
+ * @where: PCI Config offset of TLP Header Log
+ * @where2: PCI Config offset of TLP Prefix Log
+ * @tlp_len: TLP Log length (Header Log + TLP Prefix Log in DWORDs)
+ * @flit: TLP Logged in Flit mode
+ * @log: TLP Log structure to fill
+ *
+ * Fill @log from TLP Header Log registers, e.g., AER or DPC.
+ *
+ * Return: 0 on success and filled TLP Log structure, <0 on error.
+ */
+int pcie_read_tlp_log(struct pci_dev *dev, int where, int where2,
+ unsigned int tlp_len, bool flit, struct pcie_tlp_log *log)
+{
+ unsigned int i;
+ int off, ret;
+
+ if (tlp_len > ARRAY_SIZE(log->dw))
+ tlp_len = ARRAY_SIZE(log->dw);
+
+ memset(log, 0, sizeof(*log));
+
+ for (i = 0; i < tlp_len; i++) {
+ if (i < PCIE_STD_NUM_TLP_HEADERLOG)
+ off = where + i * 4;
+ else
+ off = where2 + (i - PCIE_STD_NUM_TLP_HEADERLOG) * 4;
+
+ ret = pci_read_config_dword(dev, off, &log->dw[i]);
+ if (ret)
+ return pcibios_err_to_errno(ret);
+ }
+
+ /*
+ * Hard-code non-Flit mode to 4 DWORDs, for now. The exact length
+ * can only be known if the TLP is parsed.
+ */
+ log->header_len = flit ? tlp_len : 4;
+ log->flit = flit;
+
+ return 0;
+}
+
+#define EE_PREFIX_STR " E-E Prefixes:"
+
+/**
+ * pcie_print_tlp_log - Print TLP Header / Prefix Log contents
+ * @dev: PCIe device
+ * @log: TLP Log structure
+ * @pfx: String prefix
+ *
+ * Prints TLP Header and Prefix Log information held by @log.
+ */
+void pcie_print_tlp_log(const struct pci_dev *dev,
+ const struct pcie_tlp_log *log, const char *pfx)
+{
+ /* EE_PREFIX_STR fits the extended DW space needed for the Flit mode */
+ char buf[11 * PCIE_STD_MAX_TLP_HEADERLOG + 1];
+ unsigned int i;
+ int len;
+
+ len = scnprintf(buf, sizeof(buf), "%#010x %#010x %#010x %#010x",
+ log->dw[0], log->dw[1], log->dw[2], log->dw[3]);
+
+ if (log->flit) {
+ for (i = PCIE_STD_NUM_TLP_HEADERLOG; i < log->header_len; i++) {
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ " %#010x", log->dw[i]);
+ }
+ } else {
+ if (log->prefix[0])
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ EE_PREFIX_STR);
+ for (i = 0; i < ARRAY_SIZE(log->prefix); i++) {
+ if (!log->prefix[i])
+ break;
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ " %#010x", log->prefix[i]);
+ }
+ }
+
+ pci_err(dev, "%sTLP Header%s: %s\n", pfx,
+ log->flit ? " (Flit)" : "", buf);
+}
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 1325fbae2f28..364fa2a514f8 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -9,6 +9,8 @@
#include <linux/pci.h>
#include <linux/msi.h>
#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/pci_hotplug.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -95,7 +97,7 @@ static void release_pcibus_dev(struct device *dev)
kfree(pci_bus);
}
-static struct class pcibus_class = {
+static const struct class pcibus_class = {
.name = "pci_bus",
.dev_release = &release_pcibus_dev,
.dev_groups = pcibus_groups,
@@ -165,40 +167,66 @@ static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
#define PCI_COMMAND_DECODE_ENABLE (PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
/**
+ * __pci_size_bars - Read the raw BAR mask for a range of PCI BARs
+ * @dev: the PCI device
+ * @count: number of BARs to size
+ * @pos: starting config space position
+ * @sizes: array to store mask values
+ * @rom: indicate whether to use ROM mask, which avoids enabling ROM BARs
+ *
+ * Provided @sizes array must be sufficiently sized to store results for
+ * @count u32 BARs. Caller is responsible for disabling decode to specified
+ * BAR range around calling this function. This function is intended to avoid
+ * disabling decode around sizing each BAR individually, which can result in
+ * non-trivial overhead in virtualized environments with very large PCI BARs.
+ */
+static void __pci_size_bars(struct pci_dev *dev, int count,
+ unsigned int pos, u32 *sizes, bool rom)
+{
+ u32 orig, mask = rom ? PCI_ROM_ADDRESS_MASK : ~0;
+ int i;
+
+ for (i = 0; i < count; i++, pos += 4, sizes++) {
+ pci_read_config_dword(dev, pos, &orig);
+ pci_write_config_dword(dev, pos, mask);
+ pci_read_config_dword(dev, pos, sizes);
+ pci_write_config_dword(dev, pos, orig);
+ }
+}
+
+void __pci_size_stdbars(struct pci_dev *dev, int count,
+ unsigned int pos, u32 *sizes)
+{
+ __pci_size_bars(dev, count, pos, sizes, false);
+}
+
+static void __pci_size_rom(struct pci_dev *dev, unsigned int pos, u32 *sizes)
+{
+ __pci_size_bars(dev, 1, pos, sizes, true);
+}
+
+/**
* __pci_read_base - Read a PCI BAR
* @dev: the PCI device
* @type: type of the BAR
* @res: resource buffer to be filled in
* @pos: BAR position in the config space
+ * @sizes: array of one or more pre-read BAR masks
*
* Returns 1 if the BAR is 64-bit, or 0 if 32-bit.
*/
int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
- struct resource *res, unsigned int pos)
+ struct resource *res, unsigned int pos, u32 *sizes)
{
- u32 l = 0, sz = 0, mask;
+ u32 l = 0, sz;
u64 l64, sz64, mask64;
- u16 orig_cmd;
struct pci_bus_region region, inverted_region;
const char *res_name = pci_resource_name(dev, res - dev->resource);
- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
-
- /* No printks while decoding is disabled! */
- if (!dev->mmio_always_on) {
- pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
- if (orig_cmd & PCI_COMMAND_DECODE_ENABLE) {
- pci_write_config_word(dev, PCI_COMMAND,
- orig_cmd & ~PCI_COMMAND_DECODE_ENABLE);
- }
- }
-
res->name = pci_name(dev);
pci_read_config_dword(dev, pos, &l);
- pci_write_config_dword(dev, pos, l | mask);
- pci_read_config_dword(dev, pos, &sz);
- pci_write_config_dword(dev, pos, l);
+ sz = sizes[0];
/*
* All bits set in sz means the device isn't working properly.
@@ -238,18 +266,13 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
if (res->flags & IORESOURCE_MEM_64) {
pci_read_config_dword(dev, pos + 4, &l);
- pci_write_config_dword(dev, pos + 4, ~0);
- pci_read_config_dword(dev, pos + 4, &sz);
- pci_write_config_dword(dev, pos + 4, l);
+ sz = sizes[1];
l64 |= ((u64)l << 32);
sz64 |= ((u64)sz << 32);
mask64 |= ((u64)~0 << 32);
}
- if (!dev->mmio_always_on && (orig_cmd & PCI_COMMAND_DECODE_ENABLE))
- pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
-
if (!sz64)
goto fail;
@@ -318,9 +341,14 @@ out:
return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
}
-static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
+static __always_inline void pci_read_bases(struct pci_dev *dev,
+ unsigned int howmany, int rom)
{
+ u32 rombar, stdbars[PCI_STD_NUM_BARS];
unsigned int pos, reg;
+ u16 orig_cmd;
+
+ BUILD_BUG_ON(statically_true(howmany > PCI_STD_NUM_BARS));
if (dev->non_compliant_bars)
return;
@@ -329,10 +357,28 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
if (dev->is_virtfn)
return;
+ /* No printks while decoding is disabled! */
+ if (!dev->mmio_always_on) {
+ pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
+ if (orig_cmd & PCI_COMMAND_DECODE_ENABLE) {
+ pci_write_config_word(dev, PCI_COMMAND,
+ orig_cmd & ~PCI_COMMAND_DECODE_ENABLE);
+ }
+ }
+
+ __pci_size_stdbars(dev, howmany, PCI_BASE_ADDRESS_0, stdbars);
+ if (rom)
+ __pci_size_rom(dev, rom, &rombar);
+
+ if (!dev->mmio_always_on &&
+ (orig_cmd & PCI_COMMAND_DECODE_ENABLE))
+ pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
+
for (pos = 0; pos < howmany; pos++) {
struct resource *res = &dev->resource[pos];
reg = PCI_BASE_ADDRESS_0 + (pos << 2);
- pos += __pci_read_base(dev, pci_bar_unknown, res, reg);
+ pos += __pci_read_base(dev, pci_bar_unknown,
+ res, reg, &stdbars[pos]);
}
if (rom) {
@@ -340,7 +386,7 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
dev->rom_base_reg = rom;
res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH |
IORESOURCE_READONLY | IORESOURCE_SIZEALIGN;
- __pci_read_base(dev, pci_bar_mem32, res, rom);
+ __pci_read_base(dev, pci_bar_mem32, res, rom, &rombar);
}
}
@@ -543,15 +589,15 @@ void pci_read_bridge_bases(struct pci_bus *child)
pci_read_bridge_mmio(child->self, child->resource[1], false);
pci_read_bridge_mmio_pref(child->self, child->resource[2], false);
- if (dev->transparent) {
- pci_bus_for_each_resource(child->parent, res) {
- if (res && res->flags) {
- pci_bus_add_resource(child, res,
- PCI_SUBTRACTIVE_DECODE);
- pci_info(dev, " bridge window %pR (subtractive decode)\n",
- res);
- }
- }
+ if (!dev->transparent)
+ return;
+
+ pci_bus_for_each_resource(child->parent, res) {
+ if (!res || !res->flags)
+ continue;
+
+ pci_bus_add_resource(child, res);
+ pci_info(dev, " bridge window %pR (subtractive decode)\n", res);
}
}
@@ -742,9 +788,14 @@ const char *pci_speed_string(enum pci_bus_speed speed)
}
EXPORT_SYMBOL_GPL(pci_speed_string);
-void pcie_update_link_speed(struct pci_bus *bus, u16 linksta)
+void pcie_update_link_speed(struct pci_bus *bus)
{
- bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS];
+ struct pci_dev *bridge = bus->self;
+ u16 linksta, linksta2;
+
+ pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta);
+ pcie_capability_read_word(bridge, PCI_EXP_LNKSTA2, &linksta2);
+ __pcie_update_link_speed(bus, linksta, linksta2);
}
EXPORT_SYMBOL_GPL(pcie_update_link_speed);
@@ -827,13 +878,11 @@ static void pci_set_bus_speed(struct pci_bus *bus)
if (pci_is_pcie(bridge)) {
u32 linkcap;
- u16 linksta;
pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap);
bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS];
- pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta);
- pcie_update_link_speed(bus, linksta);
+ pcie_update_link_speed(bus);
}
}
@@ -889,6 +938,17 @@ static void pci_set_bus_msi_domain(struct pci_bus *bus)
dev_set_msi_domain(&bus->dev, d);
}
+static bool pci_preserve_config(struct pci_host_bridge *host_bridge)
+{
+ if (pci_acpi_preserve_config(host_bridge))
+ return true;
+
+ if (host_bridge->dev.parent && host_bridge->dev.parent->of_node)
+ return of_pci_preserve_config(host_bridge->dev.parent->of_node);
+
+ return false;
+}
+
static int pci_register_host_bridge(struct pci_host_bridge *bridge)
{
struct device *parent = bridge->dev.parent;
@@ -897,6 +957,7 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
resource_size_t offset, next_offset;
LIST_HEAD(resources);
struct resource *res, *next_res;
+ bool bus_registered = false;
char addr[64], *fmt;
const char *name;
int err;
@@ -939,10 +1000,9 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
/* Temporarily move resources off the list */
list_splice_init(&bridge->windows, &resources);
err = device_add(&bridge->dev);
- if (err) {
- put_device(&bridge->dev);
+ if (err)
goto free;
- }
+
bus->bridge = get_device(&bridge->dev);
device_enable_async_suspend(bus->bridge);
pci_set_bus_of_node(bus);
@@ -961,6 +1021,7 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
name = dev_name(&bus->dev);
err = device_register(&bus->dev);
+ bus_registered = true;
if (err)
goto unregister;
@@ -983,6 +1044,9 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
if (nr_node_ids > 1 && pcibus_to_node(bus) == NUMA_NO_NODE)
dev_warn(&bus->dev, "Unknown NUMA node; performance will be reduced\n");
+ /* Check if the boot configuration by FW needs to be preserved */
+ bridge->preserve_config = pci_preserve_config(bridge);
+
/* Coalesce contiguous windows */
resource_list_for_each_entry_safe(window, n, &resources) {
if (list_is_last(&window->node, &resources))
@@ -1018,7 +1082,7 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
if (res->flags & IORESOURCE_BUS)
pci_bus_insert_busn_res(bus, bus->number, res->end);
else
- pci_bus_add_resource(bus, res, 0);
+ pci_bus_add_resource(bus, res);
if (offset) {
if (resource_type(res) == IORESOURCE_IO)
@@ -1035,6 +1099,8 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
dev_info(&bus->dev, "root bus resource %pR%s\n", res, addr);
}
+ of_pci_make_host_bridge_node(bridge);
+
down_write(&pci_bus_sem);
list_add_tail(&bus->node, &pci_root_buses);
up_write(&pci_bus_sem);
@@ -1044,12 +1110,15 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
unregister:
put_device(&bridge->dev);
device_del(&bridge->dev);
-
free:
#ifdef CONFIG_PCI_DOMAINS_GENERIC
- pci_bus_release_domain_nr(bus, parent);
+ pci_bus_release_domain_nr(parent, bus->domain_nr);
#endif
- kfree(bus);
+ if (bus_registered)
+ put_device(&bus->dev);
+ else
+ kfree(bus);
+
return err;
}
@@ -1158,7 +1227,10 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
add_dev:
pci_set_bus_msi_domain(child);
ret = device_register(&child->dev);
- WARN_ON(ret < 0);
+ if (WARN_ON(ret < 0)) {
+ put_device(&child->dev);
+ return NULL;
+ }
pcibios_add_bus(child);
@@ -1189,15 +1261,17 @@ struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
}
EXPORT_SYMBOL(pci_add_new_bus);
-static void pci_enable_crs(struct pci_dev *pdev)
+static void pci_enable_rrs_sv(struct pci_dev *pdev)
{
u16 root_cap = 0;
- /* Enable CRS Software Visibility if supported */
+ /* Enable Configuration RRS Software Visibility if supported */
pcie_capability_read_word(pdev, PCI_EXP_RTCAP, &root_cap);
- if (root_cap & PCI_EXP_RTCAP_CRSVIS)
+ if (root_cap & PCI_EXP_RTCAP_RRS_SV) {
pcie_capability_set_word(pdev, PCI_EXP_RTCTL,
- PCI_EXP_RTCTL_CRSSVE);
+ PCI_EXP_RTCTL_RRS_SVE);
+ pdev->config_rrs_sv = 1;
+ }
}
static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
@@ -1312,8 +1386,6 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
- pci_enable_crs(dev);
-
if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
!is_cardbus && !broken) {
unsigned int cmax, buses;
@@ -1482,6 +1554,9 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
}
out:
+ /* Clear errors in the Secondary Status Register */
+ pci_write_config_word(dev, PCI_SEC_STATUS, 0xffff);
+
pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
pm_runtime_put(&dev->dev);
@@ -1551,6 +1626,11 @@ void set_pcie_port_type(struct pci_dev *pdev)
pdev->pcie_cap = pos;
pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
pdev->pcie_flags_reg = reg16;
+
+ type = pci_pcie_type(pdev);
+ if (type == PCI_EXP_TYPE_ROOT_PORT)
+ pci_enable_rrs_sv(pdev);
+
pci_read_config_dword(pdev, pos + PCI_EXP_DEVCAP, &pdev->devcap);
pdev->pcie_mpss = FIELD_GET(PCI_EXP_DEVCAP_PAYLOAD, pdev->devcap);
@@ -1567,7 +1647,6 @@ void set_pcie_port_type(struct pci_dev *pdev)
* correctly so detect impossible configurations here and correct
* the port type accordingly.
*/
- type = pci_pcie_type(pdev);
if (type == PCI_EXP_TYPE_DOWNSTREAM) {
/*
* If pdev claims to be downstream port but the parent
@@ -1614,23 +1693,33 @@ static void set_pcie_thunderbolt(struct pci_dev *dev)
static void set_pcie_untrusted(struct pci_dev *dev)
{
- struct pci_dev *parent;
+ struct pci_dev *parent = pci_upstream_bridge(dev);
+ if (!parent)
+ return;
/*
- * If the upstream bridge is untrusted we treat this device
+ * If the upstream bridge is untrusted we treat this device as
* untrusted as well.
*/
- parent = pci_upstream_bridge(dev);
- if (parent && (parent->untrusted || parent->external_facing))
+ if (parent->untrusted) {
+ dev->untrusted = true;
+ return;
+ }
+
+ if (arch_pci_dev_is_removable(dev)) {
+ pci_dbg(dev, "marking as untrusted\n");
dev->untrusted = true;
+ }
}
static void pci_set_removable(struct pci_dev *dev)
{
struct pci_dev *parent = pci_upstream_bridge(dev);
+ if (!parent)
+ return;
/*
- * We (only) consider everything downstream from an external_facing
+ * We (only) consider everything tunneled below an external_facing
* device to be removable by the user. We're mainly concerned with
* consumer platforms with user accessible thunderbolt ports that are
* vulnerable to DMA attacks, and we expect those ports to be marked by
@@ -1640,9 +1729,15 @@ static void pci_set_removable(struct pci_dev *dev)
* accessible to user / may not be removed by end user, and thus not
* exposed as "removable" to userspace.
*/
- if (parent &&
- (parent->external_facing || dev_is_removable(&parent->dev)))
+ if (dev_is_removable(&parent->dev)) {
+ dev_set_removable(&dev->dev, DEVICE_REMOVABLE);
+ return;
+ }
+
+ if (arch_pci_dev_is_removable(dev)) {
+ pci_dbg(dev, "marking as removable\n");
dev_set_removable(&dev->dev, DEVICE_REMOVABLE);
+ }
}
/**
@@ -1928,6 +2023,9 @@ int pci_setup_device(struct pci_dev *dev)
set_pcie_untrusted(dev);
+ if (pci_is_pcie(dev))
+ dev->supported_speeds = pcie_get_supported_speeds(dev);
+
/* "Unknown power state" */
dev->current_state = PCI_UNKNOWN;
@@ -2211,8 +2309,8 @@ static void pci_configure_relaxed_ordering(struct pci_dev *dev)
static void pci_configure_eetlp_prefix(struct pci_dev *dev)
{
-#ifdef CONFIG_PCI_PASID
struct pci_dev *bridge;
+ unsigned int eetlp_max;
int pcie_type;
u32 cap;
@@ -2224,15 +2322,19 @@ static void pci_configure_eetlp_prefix(struct pci_dev *dev)
return;
pcie_type = pci_pcie_type(dev);
+
+ eetlp_max = FIELD_GET(PCI_EXP_DEVCAP2_EE_PREFIX_MAX, cap);
+ /* 00b means 4 */
+ eetlp_max = eetlp_max ?: 4;
+
if (pcie_type == PCI_EXP_TYPE_ROOT_PORT ||
pcie_type == PCI_EXP_TYPE_RC_END)
- dev->eetlp_prefix_path = 1;
+ dev->eetlp_prefix_max = eetlp_max;
else {
bridge = pci_upstream_bridge(dev);
- if (bridge && bridge->eetlp_prefix_path)
- dev->eetlp_prefix_path = 1;
+ if (bridge && bridge->eetlp_prefix_max)
+ dev->eetlp_prefix_max = eetlp_max;
}
-#endif
}
static void pci_configure_serr(struct pci_dev *dev)
@@ -2326,28 +2428,23 @@ struct pci_dev *pci_alloc_dev(struct pci_bus *bus)
}
EXPORT_SYMBOL(pci_alloc_dev);
-static bool pci_bus_crs_vendor_id(u32 l)
-{
- return (l & 0xffff) == PCI_VENDOR_ID_PCI_SIG;
-}
-
-static bool pci_bus_wait_crs(struct pci_bus *bus, int devfn, u32 *l,
+static bool pci_bus_wait_rrs(struct pci_bus *bus, int devfn, u32 *l,
int timeout)
{
int delay = 1;
- if (!pci_bus_crs_vendor_id(*l))
- return true; /* not a CRS completion */
+ if (!pci_bus_rrs_vendor_id(*l))
+ return true; /* not a Configuration RRS completion */
if (!timeout)
- return false; /* CRS, but caller doesn't want to wait */
+ return false; /* RRS, but caller doesn't want to wait */
/*
* We got the reserved Vendor ID that indicates a completion with
- * Configuration Request Retry Status (CRS). Retry until we get a
+ * Configuration Request Retry Status (RRS). Retry until we get a
* valid Vendor ID or we time out.
*/
- while (pci_bus_crs_vendor_id(*l)) {
+ while (pci_bus_rrs_vendor_id(*l)) {
if (delay > timeout) {
pr_warn("pci %04x:%02x:%02x.%d: not ready after %dms; giving up\n",
pci_domain_nr(bus), bus->number,
@@ -2386,8 +2483,8 @@ bool pci_bus_generic_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
*l == 0x0000ffff || *l == 0xffff0000)
return false;
- if (pci_bus_crs_vendor_id(*l))
- return pci_bus_wait_crs(bus, devfn, l, timeout);
+ if (pci_bus_rrs_vendor_id(*l))
+ return pci_bus_wait_rrs(bus, devfn, l, timeout);
return true;
}
@@ -2411,6 +2508,36 @@ bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
}
EXPORT_SYMBOL(pci_bus_read_dev_vendor_id);
+static struct platform_device *pci_pwrctrl_create_device(struct pci_bus *bus, int devfn)
+{
+ struct pci_host_bridge *host = pci_find_host_bridge(bus);
+ struct platform_device *pdev;
+ struct device_node *np;
+
+ np = of_pci_find_child_device(dev_of_node(&bus->dev), devfn);
+ if (!np || of_find_device_by_node(np))
+ return NULL;
+
+ /*
+ * First check whether the pwrctrl device really needs to be created or
+ * not. This is decided based on at least one of the power supplies
+ * being defined in the devicetree node of the device.
+ */
+ if (!of_pci_supply_present(np)) {
+ pr_debug("PCI/pwrctrl: Skipping OF node: %s\n", np->name);
+ return NULL;
+ }
+
+ /* Now create the pwrctrl device */
+ pdev = of_platform_device_create(np, NULL, &host->dev);
+ if (!pdev) {
+ pr_err("PCI/pwrctrl: Failed to create pwrctrl device for node: %s\n", np->name);
+ return NULL;
+ }
+
+ return pdev;
+}
+
/*
* Read the config data for a PCI device, sanity-check it,
* and fill in the dev structure.
@@ -2420,6 +2547,15 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
struct pci_dev *dev;
u32 l;
+ /*
+ * Create pwrctrl device (if required) for the PCI device to handle the
+ * power state. If the pwrctrl device is created, then skip scanning
+ * further as the pwrctrl core will rescan the bus after powering on
+ * the device.
+ */
+ if (pci_pwrctrl_create_device(bus, devfn))
+ return NULL;
+
if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000))
return NULL;
@@ -2481,6 +2617,8 @@ static void pci_init_capabilities(struct pci_dev *dev)
pci_dpc_init(dev); /* Downstream Port Containment */
pci_rcec_init(dev); /* Root Complex Event Collector */
pci_doe_init(dev); /* Data Object Exchange */
+ pci_tph_init(dev); /* TLP Processing Hints */
+ pci_rebar_init(dev); /* Resizable BAR */
pcie_report_downtraining(dev);
pci_init_reset_methods(dev);
@@ -2576,6 +2714,10 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
dev->match_driver = false;
ret = device_add(&dev->dev);
WARN_ON(ret < 0);
+
+ pci_npem_create(dev);
+
+ pci_doe_sysfs_init(dev);
}
struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn)
@@ -3066,7 +3208,9 @@ int pci_host_probe(struct pci_host_bridge *bridge)
struct pci_bus *bus, *child;
int ret;
+ pci_lock_rescan_remove();
ret = pci_scan_root_bus_bridge(bridge);
+ pci_unlock_rescan_remove();
if (ret < 0) {
dev_err(bridge->dev.parent, "Scanning root bridge failed");
return ret;
@@ -3074,22 +3218,33 @@ int pci_host_probe(struct pci_host_bridge *bridge)
bus = bridge->bus;
+ /* If we must preserve the resource configuration, claim now */
+ if (bridge->preserve_config)
+ pci_bus_claim_resources(bus);
+
/*
- * We insert PCI resources into the iomem_resource and
- * ioport_resource trees in either pci_bus_claim_resources()
- * or pci_bus_assign_resources().
+ * Assign whatever was left unassigned. If we didn't claim above,
+ * this will reassign everything.
*/
- if (pci_has_flag(PCI_PROBE_ONLY)) {
- pci_bus_claim_resources(bus);
- } else {
- pci_bus_size_bridges(bus);
- pci_bus_assign_resources(bus);
+ pci_assign_unassigned_root_bus_resources(bus);
- list_for_each_entry(child, &bus->children, node)
- pcie_bus_configure_settings(child);
- }
+ list_for_each_entry(child, &bus->children, node)
+ pcie_bus_configure_settings(child);
+ pci_lock_rescan_remove();
pci_bus_add_devices(bus);
+ pci_unlock_rescan_remove();
+
+ /*
+ * Ensure pm_runtime_enable() is called for the controller drivers
+ * before calling pci_host_probe(). The PM framework expects that
+ * if the parent device supports runtime PM, it will be enabled
+ * before child runtime PM is enabled.
+ */
+ pm_runtime_set_active(&bridge->dev);
+ pm_runtime_no_callbacks(&bridge->dev);
+ devm_pm_runtime_enable(&bridge->dev);
+
return 0;
}
EXPORT_SYMBOL_GPL(pci_host_probe);
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index f967709082d6..9348a0fb8084 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -251,6 +251,10 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma)
security_locked_down(LOCKDOWN_PCI_ACCESS))
return -EPERM;
+ /* Skip devices with non-mappable BARs */
+ if (dev->non_mappable_bars)
+ return -EINVAL;
+
if (fpriv->mmap_state == pci_mmap_io) {
if (!arch_can_pci_mmap_io())
return -EINVAL;
diff --git a/drivers/pci/pwrctrl/Kconfig b/drivers/pci/pwrctrl/Kconfig
new file mode 100644
index 000000000000..990cab67d413
--- /dev/null
+++ b/drivers/pci/pwrctrl/Kconfig
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config HAVE_PWRCTL
+ bool
+
+config PCI_PWRCTL
+ tristate
+
+config PCI_PWRCTL_PWRSEQ
+ tristate
+ select POWER_SEQUENCING
+ select PCI_PWRCTL
+
+config PCI_PWRCTL_SLOT
+ tristate "PCI Power Control driver for PCI slots"
+ select PCI_PWRCTL
+ help
+ Say Y here to enable the PCI Power Control driver to control the power
+ state of PCI slots.
+
+ This is a generic driver that controls the power state of different
+ PCI slots. The voltage regulators powering the rails of the PCI slots
+ are expected to be defined in the devicetree node of the PCI bridge.
diff --git a/drivers/pci/pwrctrl/Makefile b/drivers/pci/pwrctrl/Makefile
new file mode 100644
index 000000000000..ddfb12c5aadf
--- /dev/null
+++ b/drivers/pci/pwrctrl/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_PCI_PWRCTL) += pci-pwrctrl-core.o
+pci-pwrctrl-core-y := core.o
+
+obj-$(CONFIG_PCI_PWRCTL_PWRSEQ) += pci-pwrctrl-pwrseq.o
+
+obj-$(CONFIG_PCI_PWRCTL_SLOT) += pci-pwrctl-slot.o
+pci-pwrctl-slot-y := slot.o
diff --git a/drivers/pci/pwrctrl/core.c b/drivers/pci/pwrctrl/core.c
new file mode 100644
index 000000000000..9cc7e2b7f2b5
--- /dev/null
+++ b/drivers/pci/pwrctrl/core.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Linaro Ltd.
+ */
+
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/pci-pwrctrl.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+
+static int pci_pwrctrl_notify(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ struct pci_pwrctrl *pwrctrl = container_of(nb, struct pci_pwrctrl, nb);
+ struct device *dev = data;
+
+ if (dev_fwnode(dev) != dev_fwnode(pwrctrl->dev))
+ return NOTIFY_DONE;
+
+ switch (action) {
+ case BUS_NOTIFY_ADD_DEVICE:
+ /*
+ * We will have two struct device objects bound to two different
+ * drivers on different buses but consuming the same DT node. We
+ * must not bind the pins twice in this case but only once for
+ * the first device to be added.
+ *
+ * If we got here then the PCI device is the second after the
+ * power control platform device. Mark its OF node as reused.
+ */
+ dev->of_node_reused = true;
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static void rescan_work_func(struct work_struct *work)
+{
+ struct pci_pwrctrl *pwrctrl = container_of(work,
+ struct pci_pwrctrl, work);
+
+ pci_lock_rescan_remove();
+ pci_rescan_bus(to_pci_host_bridge(pwrctrl->dev->parent)->bus);
+ pci_unlock_rescan_remove();
+}
+
+/**
+ * pci_pwrctrl_init() - Initialize the PCI power control context struct
+ *
+ * @pwrctrl: PCI power control data
+ * @dev: Parent device
+ */
+void pci_pwrctrl_init(struct pci_pwrctrl *pwrctrl, struct device *dev)
+{
+ pwrctrl->dev = dev;
+ INIT_WORK(&pwrctrl->work, rescan_work_func);
+}
+EXPORT_SYMBOL_GPL(pci_pwrctrl_init);
+
+/**
+ * pci_pwrctrl_device_set_ready() - Notify the pwrctrl subsystem that the PCI
+ * device is powered-up and ready to be detected.
+ *
+ * @pwrctrl: PCI power control data.
+ *
+ * Returns:
+ * 0 on success, negative error number on error.
+ *
+ * Note:
+ * This function returning 0 doesn't mean the device was detected. It means,
+ * that the bus rescan was successfully started. The device will get bound to
+ * its PCI driver asynchronously.
+ */
+int pci_pwrctrl_device_set_ready(struct pci_pwrctrl *pwrctrl)
+{
+ int ret;
+
+ if (!pwrctrl->dev)
+ return -ENODEV;
+
+ pwrctrl->nb.notifier_call = pci_pwrctrl_notify;
+ ret = bus_register_notifier(&pci_bus_type, &pwrctrl->nb);
+ if (ret)
+ return ret;
+
+ schedule_work(&pwrctrl->work);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_pwrctrl_device_set_ready);
+
+/**
+ * pci_pwrctrl_device_unset_ready() - Notify the pwrctrl subsystem that the PCI
+ * device is about to be powered-down.
+ *
+ * @pwrctrl: PCI power control data.
+ */
+void pci_pwrctrl_device_unset_ready(struct pci_pwrctrl *pwrctrl)
+{
+ /*
+ * We don't have to delete the link here. Typically, this function
+ * is only called when the power control device is being detached. If
+ * it is being detached then the child PCI device must have already
+ * been unbound too or the device core wouldn't let us unbind.
+ */
+ bus_unregister_notifier(&pci_bus_type, &pwrctrl->nb);
+}
+EXPORT_SYMBOL_GPL(pci_pwrctrl_device_unset_ready);
+
+static void devm_pci_pwrctrl_device_unset_ready(void *data)
+{
+ struct pci_pwrctrl *pwrctrl = data;
+
+ pci_pwrctrl_device_unset_ready(pwrctrl);
+}
+
+/**
+ * devm_pci_pwrctrl_device_set_ready - Managed variant of
+ * pci_pwrctrl_device_set_ready().
+ *
+ * @dev: Device managing this pwrctrl provider.
+ * @pwrctrl: PCI power control data.
+ *
+ * Returns:
+ * 0 on success, negative error number on error.
+ */
+int devm_pci_pwrctrl_device_set_ready(struct device *dev,
+ struct pci_pwrctrl *pwrctrl)
+{
+ int ret;
+
+ ret = pci_pwrctrl_device_set_ready(pwrctrl);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev,
+ devm_pci_pwrctrl_device_unset_ready,
+ pwrctrl);
+}
+EXPORT_SYMBOL_GPL(devm_pci_pwrctrl_device_set_ready);
+
+MODULE_AUTHOR("Bartosz Golaszewski <bartosz.golaszewski@linaro.org>");
+MODULE_DESCRIPTION("PCI Device Power Control core driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pci/pwrctrl/pci-pwrctrl-pwrseq.c b/drivers/pci/pwrctrl/pci-pwrctrl-pwrseq.c
new file mode 100644
index 000000000000..4e664e7b8dd2
--- /dev/null
+++ b/drivers/pci/pwrctrl/pci-pwrctrl-pwrseq.c
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Linaro Ltd.
+ */
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/pci-pwrctrl.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/pwrseq/consumer.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+struct pci_pwrctrl_pwrseq_data {
+ struct pci_pwrctrl ctx;
+ struct pwrseq_desc *pwrseq;
+};
+
+struct pci_pwrctrl_pwrseq_pdata {
+ const char *target;
+ /*
+ * Called before doing anything else to perform device-specific
+ * verification between requesting the power sequencing handle.
+ */
+ int (*validate_device)(struct device *dev);
+};
+
+static int pci_pwrctrl_pwrseq_qcm_wcn_validate_device(struct device *dev)
+{
+ /*
+ * Old device trees for some platforms already define wifi nodes for
+ * the WCN family of chips since before power sequencing was added
+ * upstream.
+ *
+ * These nodes don't consume the regulator outputs from the PMU, and
+ * if we allow this driver to bind to one of such "incomplete" nodes,
+ * we'll see a kernel log error about the indefinite probe deferral.
+ *
+ * Check the existence of the regulator supply that exists on all
+ * WCN models before moving forward.
+ */
+ if (!device_property_present(dev, "vddaon-supply"))
+ return -ENODEV;
+
+ return 0;
+}
+
+static const struct pci_pwrctrl_pwrseq_pdata pci_pwrctrl_pwrseq_qcom_wcn_pdata = {
+ .target = "wlan",
+ .validate_device = pci_pwrctrl_pwrseq_qcm_wcn_validate_device,
+};
+
+static void devm_pci_pwrctrl_pwrseq_power_off(void *data)
+{
+ struct pwrseq_desc *pwrseq = data;
+
+ pwrseq_power_off(pwrseq);
+}
+
+static int pci_pwrctrl_pwrseq_probe(struct platform_device *pdev)
+{
+ const struct pci_pwrctrl_pwrseq_pdata *pdata;
+ struct pci_pwrctrl_pwrseq_data *data;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ pdata = device_get_match_data(dev);
+ if (!pdata || !pdata->target)
+ return -EINVAL;
+
+ if (pdata->validate_device) {
+ ret = pdata->validate_device(dev);
+ if (ret)
+ return ret;
+ }
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->pwrseq = devm_pwrseq_get(dev, pdata->target);
+ if (IS_ERR(data->pwrseq))
+ return dev_err_probe(dev, PTR_ERR(data->pwrseq),
+ "Failed to get the power sequencer\n");
+
+ ret = pwrseq_power_on(data->pwrseq);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to power-on the device\n");
+
+ ret = devm_add_action_or_reset(dev, devm_pci_pwrctrl_pwrseq_power_off,
+ data->pwrseq);
+ if (ret)
+ return ret;
+
+ pci_pwrctrl_init(&data->ctx, dev);
+
+ ret = devm_pci_pwrctrl_device_set_ready(dev, &data->ctx);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to register the pwrctrl wrapper\n");
+
+ return 0;
+}
+
+static const struct of_device_id pci_pwrctrl_pwrseq_of_match[] = {
+ {
+ /* ATH11K in QCA6390 package. */
+ .compatible = "pci17cb,1101",
+ .data = &pci_pwrctrl_pwrseq_qcom_wcn_pdata,
+ },
+ {
+ /* ATH11K in WCN6855 package. */
+ .compatible = "pci17cb,1103",
+ .data = &pci_pwrctrl_pwrseq_qcom_wcn_pdata,
+ },
+ {
+ /* ATH12K in WCN7850 package. */
+ .compatible = "pci17cb,1107",
+ .data = &pci_pwrctrl_pwrseq_qcom_wcn_pdata,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, pci_pwrctrl_pwrseq_of_match);
+
+static struct platform_driver pci_pwrctrl_pwrseq_driver = {
+ .driver = {
+ .name = "pci-pwrctrl-pwrseq",
+ .of_match_table = pci_pwrctrl_pwrseq_of_match,
+ },
+ .probe = pci_pwrctrl_pwrseq_probe,
+};
+module_platform_driver(pci_pwrctrl_pwrseq_driver);
+
+MODULE_AUTHOR("Bartosz Golaszewski <bartosz.golaszewski@linaro.org>");
+MODULE_DESCRIPTION("Generic PCI Power Control module for power sequenced devices");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pci/pwrctrl/slot.c b/drivers/pci/pwrctrl/slot.c
new file mode 100644
index 000000000000..18becc144913
--- /dev/null
+++ b/drivers/pci/pwrctrl/slot.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/pci-pwrctrl.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+struct pci_pwrctrl_slot_data {
+ struct pci_pwrctrl ctx;
+ struct regulator_bulk_data *supplies;
+ int num_supplies;
+};
+
+static void devm_pci_pwrctrl_slot_power_off(void *data)
+{
+ struct pci_pwrctrl_slot_data *slot = data;
+
+ regulator_bulk_disable(slot->num_supplies, slot->supplies);
+ regulator_bulk_free(slot->num_supplies, slot->supplies);
+}
+
+static int pci_pwrctrl_slot_probe(struct platform_device *pdev)
+{
+ struct pci_pwrctrl_slot_data *slot;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ slot = devm_kzalloc(dev, sizeof(*slot), GFP_KERNEL);
+ if (!slot)
+ return -ENOMEM;
+
+ ret = of_regulator_bulk_get_all(dev, dev_of_node(dev),
+ &slot->supplies);
+ if (ret < 0) {
+ dev_err_probe(dev, ret, "Failed to get slot regulators\n");
+ return ret;
+ }
+
+ slot->num_supplies = ret;
+ ret = regulator_bulk_enable(slot->num_supplies, slot->supplies);
+ if (ret < 0) {
+ dev_err_probe(dev, ret, "Failed to enable slot regulators\n");
+ goto err_regulator_free;
+ }
+
+ ret = devm_add_action_or_reset(dev, devm_pci_pwrctrl_slot_power_off,
+ slot);
+ if (ret)
+ goto err_regulator_disable;
+
+ pci_pwrctrl_init(&slot->ctx, dev);
+
+ ret = devm_pci_pwrctrl_device_set_ready(dev, &slot->ctx);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register pwrctrl driver\n");
+
+ return 0;
+
+err_regulator_disable:
+ regulator_bulk_disable(slot->num_supplies, slot->supplies);
+err_regulator_free:
+ regulator_bulk_free(slot->num_supplies, slot->supplies);
+
+ return ret;
+}
+
+static const struct of_device_id pci_pwrctrl_slot_of_match[] = {
+ {
+ .compatible = "pciclass,0604",
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, pci_pwrctrl_slot_of_match);
+
+static struct platform_driver pci_pwrctrl_slot_driver = {
+ .driver = {
+ .name = "pci-pwrctrl-slot",
+ .of_match_table = pci_pwrctrl_slot_of_match,
+ },
+ .probe = pci_pwrctrl_slot_probe,
+};
+module_platform_driver(pci_pwrctrl_slot_driver);
+
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
+MODULE_DESCRIPTION("Generic PCI Power Control driver for PCI Slots");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index eff7f5df08e2..94daca15a096 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -12,6 +12,8 @@
* file, where their drivers can use them.
*/
+#include <linux/aer.h>
+#include <linux/align.h>
#include <linux/bitfield.h>
#include <linux/types.h>
#include <linux/kernel.h>
@@ -29,10 +31,23 @@
#include <linux/nvme.h>
#include <linux/platform_data/x86/apple.h>
#include <linux/pm_runtime.h>
+#include <linux/sizes.h>
#include <linux/suspend.h>
#include <linux/switchtec.h>
#include "pci.h"
+static bool pcie_lbms_seen(struct pci_dev *dev, u16 lnksta)
+{
+ unsigned long count;
+ int ret;
+
+ ret = pcie_lbms_count(dev, &count);
+ if (ret < 0)
+ return lnksta & PCI_EXP_LNKSTA_LBMS;
+
+ return count > 0;
+}
+
/*
* Retrain the link of a downstream PCIe port by hand if necessary.
*
@@ -66,7 +81,7 @@
* apply this erratum workaround to any downstream ports as long as they
* support Link Active reporting and have the Link Control 2 register.
* Restrict the speed to 2.5GT/s then with the Target Link Speed field,
- * request a retrain and wait 200ms for the data link to go up.
+ * request a retrain and check the result.
*
* If this turns out successful and we know by the Vendor:Device ID it is
* safe to do so, then lift the restriction, letting the devices negotiate
@@ -74,33 +89,39 @@
* firmware may have already arranged and lift it with ports that already
* report their data link being up.
*
- * Return TRUE if the link has been successfully retrained, otherwise FALSE.
+ * Otherwise revert the speed to the original setting and request a retrain
+ * again to remove any residual state, ignoring the result as it's supposed
+ * to fail anyway.
+ *
+ * Return 0 if the link has been successfully retrained. Return an error
+ * if retraining was not needed or we attempted a retrain and it failed.
*/
-bool pcie_failed_link_retrain(struct pci_dev *dev)
+int pcie_failed_link_retrain(struct pci_dev *dev)
{
static const struct pci_device_id ids[] = {
{ PCI_VDEVICE(ASMEDIA, 0x2824) }, /* ASMedia ASM2824 */
{}
};
u16 lnksta, lnkctl2;
+ int ret = -ENOTTY;
if (!pci_is_pcie(dev) || !pcie_downstream_port(dev) ||
!pcie_cap_has_lnkctl2(dev) || !dev->link_active_reporting)
- return false;
+ return ret;
pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &lnkctl2);
pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
- if ((lnksta & (PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_DLLLA)) ==
- PCI_EXP_LNKSTA_LBMS) {
- pci_info(dev, "broken device, retraining non-functional downstream link at 2.5GT/s\n");
+ if (!(lnksta & PCI_EXP_LNKSTA_DLLLA) && pcie_lbms_seen(dev, lnksta)) {
+ u16 oldlnkctl2 = lnkctl2;
- lnkctl2 &= ~PCI_EXP_LNKCTL2_TLS;
- lnkctl2 |= PCI_EXP_LNKCTL2_TLS_2_5GT;
- pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, lnkctl2);
+ pci_info(dev, "broken device, retraining non-functional downstream link at 2.5GT/s\n");
- if (pcie_retrain_link(dev, false)) {
+ ret = pcie_set_target_speed(dev, PCIE_SPEED_2_5GT, false);
+ if (ret) {
pci_info(dev, "retraining failed\n");
- return false;
+ pcie_set_target_speed(dev, PCIE_LNKCTL2_TLS2SPEED(oldlnkctl2),
+ true);
+ return ret;
}
pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
@@ -113,17 +134,14 @@ bool pcie_failed_link_retrain(struct pci_dev *dev)
pci_info(dev, "removing 2.5GT/s downstream link speed restriction\n");
pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
- lnkctl2 &= ~PCI_EXP_LNKCTL2_TLS;
- lnkctl2 |= lnkcap & PCI_EXP_LNKCAP_SLS;
- pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, lnkctl2);
-
- if (pcie_retrain_link(dev, false)) {
+ ret = pcie_set_target_speed(dev, PCIE_LNKCAP_SLS2SPEED(lnkcap), false);
+ if (ret) {
pci_info(dev, "retraining failed\n");
- return false;
+ return ret;
}
}
- return true;
+ return ret;
}
static ktime_t fixup_debug_start(struct pci_dev *dev,
@@ -573,8 +591,7 @@ static void quirk_extend_bar_to_page(struct pci_dev *dev)
const char *r_name = pci_resource_name(dev, i);
if (r->flags & IORESOURCE_MEM && resource_size(r) < PAGE_SIZE) {
- r->end = PAGE_SIZE - 1;
- r->start = 0;
+ resource_set_range(r, 0, PAGE_SIZE);
r->flags |= IORESOURCE_UNSET;
pci_info(dev, "%s %pR: expanded to page size\n",
r_name, r);
@@ -591,10 +608,9 @@ static void quirk_s3_64M(struct pci_dev *dev)
{
struct resource *r = &dev->resource[0];
- if ((r->start & 0x3ffffff) || r->end != r->start + 0x3ffffff) {
+ if (!IS_ALIGNED(r->start, SZ_64M) || resource_size(r) != SZ_64M) {
r->flags |= IORESOURCE_UNSET;
- r->start = 0;
- r->end = 0x3ffffff;
+ resource_set_range(r, 0, SZ_64M);
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M);
@@ -605,7 +621,7 @@ static void quirk_io(struct pci_dev *dev, int pos, unsigned int size,
{
u32 region;
struct pci_bus_region bus_region;
- struct resource *res = dev->resource + pos;
+ struct resource *res = pci_resource_n(dev, pos);
const char *res_name = pci_resource_name(dev, pos);
pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + (pos << 2), &region);
@@ -655,7 +671,7 @@ static void quirk_io_region(struct pci_dev *dev, int port,
{
u16 region;
struct pci_bus_region bus_region;
- struct resource *res = dev->resource + nr;
+ struct resource *res = pci_resource_n(dev, nr);
pci_read_config_word(dev, port, &region);
region &= ~(size - 1);
@@ -1329,8 +1345,7 @@ static void quirk_dunord(struct pci_dev *dev)
struct resource *r = &dev->resource[1];
r->flags |= IORESOURCE_UNSET;
- r->start = 0;
- r->end = 0xffffff;
+ resource_set_range(r, 0, SZ_16M);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DUNORD, PCI_DEVICE_ID_DUNORD_I3000, quirk_dunord);
@@ -1975,12 +1990,12 @@ static void quirk_huawei_pcie_sva(struct pci_dev *pdev)
device_create_managed_software_node(&pdev->dev, properties, NULL))
pci_warn(pdev, "could not add stall property");
}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa250, quirk_huawei_pcie_sva);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa251, quirk_huawei_pcie_sva);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa255, quirk_huawei_pcie_sva);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa256, quirk_huawei_pcie_sva);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa258, quirk_huawei_pcie_sva);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa259, quirk_huawei_pcie_sva);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HUAWEI, 0xa250, quirk_huawei_pcie_sva);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HUAWEI, 0xa251, quirk_huawei_pcie_sva);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HUAWEI, 0xa255, quirk_huawei_pcie_sva);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HUAWEI, 0xa256, quirk_huawei_pcie_sva);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HUAWEI, 0xa258, quirk_huawei_pcie_sva);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HUAWEI, 0xa259, quirk_huawei_pcie_sva);
/*
* It's possible for the MSI to get corrupted if SHPC and ACPI are used
@@ -2327,8 +2342,7 @@ static void quirk_tc86c001_ide(struct pci_dev *dev)
if (r->start & 0x8) {
r->flags |= IORESOURCE_UNSET;
- r->start = 0;
- r->end = 0xf;
+ resource_set_range(r, 0, SZ_16);
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA_2,
@@ -2356,8 +2370,7 @@ static void quirk_plx_pci9050(struct pci_dev *dev)
pci_info(dev, "Re-allocating PLX PCI 9050 BAR %u to length 256 to avoid bit 7 bug\n",
bar);
r->flags |= IORESOURCE_UNSET;
- r->start = 0;
- r->end = 0xff;
+ resource_set_range(r, 0, SZ_256);
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
@@ -3509,13 +3522,13 @@ static void quirk_intel_ntb(struct pci_dev *dev)
if (rc)
return;
- dev->resource[2].end = dev->resource[2].start + ((u64) 1 << val) - 1;
+ resource_set_size(&dev->resource[2], (resource_size_t)1 << val);
rc = pci_read_config_byte(dev, 0x00D1, &val);
if (rc)
return;
- dev->resource[4].end = dev->resource[4].start + ((u64) 1 << val) - 1;
+ resource_set_size(&dev->resource[4], (resource_size_t)1 << val);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e08, quirk_intel_ntb);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e0d, quirk_intel_ntb);
@@ -3608,6 +3621,8 @@ DECLARE_PCI_FIXUP_FINAL(0x1814, 0x0601, /* Ralink RT2800 802.11n PCI */
quirk_broken_intx_masking);
DECLARE_PCI_FIXUP_FINAL(0x1b7c, 0x0004, /* Ceton InfiniTV4 */
quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_20K2,
+ quirk_broken_intx_masking);
/*
* Realtek RTL8169 PCI Gigabit Ethernet Controller (rev 10)
@@ -4246,6 +4261,10 @@ static void quirk_dma_func0_alias(struct pci_dev *dev)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe832, quirk_dma_func0_alias);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe476, quirk_dma_func0_alias);
+/* Some Glenfly chips use function 0 as the PCIe Requester ID for DMA */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_GLENFLY, 0x3d40, quirk_dma_func0_alias);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_GLENFLY, 0x3d41, quirk_dma_func0_alias);
+
static void quirk_dma_func1_alias(struct pci_dev *dev)
{
if (PCI_FUNC(dev->devfn) != 1)
@@ -4977,18 +4996,21 @@ static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags)
}
/*
- * Wangxun 10G/1G NICs have no ACS capability, and on multi-function
- * devices, peer-to-peer transactions are not be used between the functions.
- * So add an ACS quirk for below devices to isolate functions.
+ * Wangxun 40G/25G/10G/1G NICs have no ACS capability, but on
+ * multi-function devices, the hardware isolates the functions by
+ * directing all peer-to-peer traffic upstream as though PCI_ACS_RR and
+ * PCI_ACS_CR were set.
* SFxxx 1G NICs(em).
* RP1000/RP2000 10G NICs(sp).
+ * FF5xxx 40G/25G/10G NICs(aml).
*/
static int pci_quirk_wangxun_nic_acs(struct pci_dev *dev, u16 acs_flags)
{
switch (dev->device) {
- case 0x0100 ... 0x010F:
- case 0x1001:
- case 0x2001:
+ case 0x0100 ... 0x010F: /* EM */
+ case 0x1001: case 0x2001: /* SP */
+ case 0x5010: case 0x5025: case 0x5040: /* AML */
+ case 0x5110: case 0x5125: case 0x5140: /* AML */
return pci_acs_ctrl_enabled(acs_flags,
PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
}
@@ -5070,6 +5092,8 @@ static const struct pci_dev_acs_enabled {
/* QCOM QDF2xxx root ports */
{ PCI_VENDOR_ID_QCOM, 0x0400, pci_quirk_qcom_rp_acs },
{ PCI_VENDOR_ID_QCOM, 0x0401, pci_quirk_qcom_rp_acs },
+ /* QCOM SA8775P root port */
+ { PCI_VENDOR_ID_QCOM, 0x0115, pci_quirk_qcom_rp_acs },
/* HXT SD4800 root ports. The ACS design is same as QCOM QDF2xxx */
{ PCI_VENDOR_ID_HXT, 0x0401, pci_quirk_qcom_rp_acs },
/* Intel PCH root ports */
@@ -5099,6 +5123,10 @@ static const struct pci_dev_acs_enabled {
{ PCI_VENDOR_ID_BROADCOM, 0x1750, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_BROADCOM, 0x1751, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_BROADCOM, 0x1752, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_BROADCOM, 0x1760, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_BROADCOM, 0x1761, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_BROADCOM, 0x1762, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_BROADCOM, 0x1763, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_BROADCOM, 0xD714, pci_quirk_brcm_acs },
/* Amazon Annapurna Labs */
{ PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, pci_quirk_al_acs },
@@ -5494,7 +5522,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x443, quirk_intel_qat_vf_cap);
* AMD Matisse USB 3.0 Host Controller 0x149c
* Intel 82579LM Gigabit Ethernet Controller 0x1502
* Intel 82579V Gigabit Ethernet Controller 0x1503
- *
+ * Mediatek MT7922 802.11ax PCI Express Wireless Network Adapter
*/
static void quirk_no_flr(struct pci_dev *dev)
{
@@ -5506,6 +5534,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x149c, quirk_no_flr);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x7901, quirk_no_flr);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_no_flr);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_no_flr);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_MEDIATEK, 0x0616, quirk_no_flr);
/* FLR may cause the SolidRun SNET DPU (rev 0x1) to hang */
static void quirk_no_flr_snet(struct pci_dev *dev)
@@ -5957,6 +5986,17 @@ SWITCHTEC_QUIRK(0x5552); /* PAXA 52XG5 */
SWITCHTEC_QUIRK(0x5536); /* PAXA 36XG5 */
SWITCHTEC_QUIRK(0x5528); /* PAXA 28XG5 */
+#define SWITCHTEC_PCI100X_QUIRK(vid) \
+ DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_EFAR, vid, \
+ PCI_CLASS_BRIDGE_OTHER, 8, quirk_switchtec_ntb_dma_alias)
+SWITCHTEC_PCI100X_QUIRK(0x1001); /* PCI1001XG4 */
+SWITCHTEC_PCI100X_QUIRK(0x1002); /* PCI1002XG4 */
+SWITCHTEC_PCI100X_QUIRK(0x1003); /* PCI1003XG4 */
+SWITCHTEC_PCI100X_QUIRK(0x1004); /* PCI1004XG4 */
+SWITCHTEC_PCI100X_QUIRK(0x1005); /* PCI1005XG4 */
+SWITCHTEC_PCI100X_QUIRK(0x1006); /* PCI1006XG4 */
+
+
/*
* The PLX NTB uses devfn proxy IDs to move TLPs between NT endpoints.
* These IDs are used to forward responses to the originator on the other
@@ -6206,8 +6246,9 @@ static void dpc_log_size(struct pci_dev *dev)
return;
if (FIELD_GET(PCI_EXP_DPC_RP_PIO_LOG_SIZE, val) == 0) {
- pci_info(dev, "Overriding RP PIO Log Size to 4\n");
- dev->dpc_rp_log_size = 4;
+ pci_info(dev, "Overriding RP PIO Log Size to %d\n",
+ PCIE_STD_NUM_TLP_HEADERLOG);
+ dev->dpc_rp_log_size = PCIE_STD_NUM_TLP_HEADERLOG;
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x461f, dpc_log_size);
@@ -6226,6 +6267,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2b, dpc_log_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2d, dpc_log_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2f, dpc_log_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a31, dpc_log_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa72f, dpc_log_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa73f, dpc_log_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa76e, dpc_log_size);
#endif
@@ -6241,6 +6283,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa76e, dpc_log_size);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_XILINX, 0x5020, of_pci_make_dev_node);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_XILINX, 0x5021, of_pci_make_dev_node);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_REDHAT, 0x0005, of_pci_make_dev_node);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_EFAR, 0x9660, of_pci_make_dev_node);
/*
* Devices known to require a longer delay before first config space access
@@ -6253,3 +6296,23 @@ static void pci_fixup_d3cold_delay_1sec(struct pci_dev *pdev)
pdev->d3cold_delay = 1000;
}
DECLARE_PCI_FIXUP_FINAL(0x5555, 0x0004, pci_fixup_d3cold_delay_1sec);
+
+#ifdef CONFIG_PCIEAER
+static void pci_mask_replay_timer_timeout(struct pci_dev *pdev)
+{
+ struct pci_dev *parent = pci_upstream_bridge(pdev);
+ u32 val;
+
+ if (!parent || !parent->aer_cap)
+ return;
+
+ pci_info(parent, "mask Replay Timer Timeout Correctable Errors due to %s hardware defect",
+ pci_name(pdev));
+
+ pci_read_config_dword(parent, parent->aer_cap + PCI_ERR_COR_MASK, &val);
+ val |= PCI_ERR_COR_REP_TIMER;
+ pci_write_config_dword(parent, parent->aer_cap + PCI_ERR_COR_MASK, val);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_GLI, 0x9750, pci_mask_replay_timer_timeout);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_GLI, 0x9755, pci_mask_replay_timer_timeout);
+#endif
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index d749ea8250d6..445afdfa6498 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -1,6 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/pci.h>
#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
#include "pci.h"
static void pci_free_resources(struct pci_dev *dev)
@@ -13,26 +17,44 @@ static void pci_free_resources(struct pci_dev *dev)
}
}
+static void pci_pwrctrl_unregister(struct device *dev)
+{
+ struct device_node *np;
+ struct platform_device *pdev;
+
+ np = dev_of_node(dev);
+ if (!np)
+ return;
+
+ pdev = of_find_device_by_node(np);
+ if (!pdev)
+ return;
+
+ of_device_unregister(pdev);
+ of_node_clear_flag(np, OF_POPULATED);
+}
+
static void pci_stop_dev(struct pci_dev *dev)
{
pci_pme_active(dev, false);
- if (pci_dev_is_added(dev)) {
-
- device_release_driver(&dev->dev);
- pci_proc_detach_device(dev);
- pci_remove_sysfs_dev_files(dev);
- of_pci_remove_node(dev);
+ if (!pci_dev_test_and_clear_added(dev))
+ return;
- pci_dev_assign_added(dev, false);
- }
+ device_release_driver(&dev->dev);
+ pci_proc_detach_device(dev);
+ pci_remove_sysfs_dev_files(dev);
+ of_pci_remove_node(dev);
}
static void pci_destroy_dev(struct pci_dev *dev)
{
- if (!dev->dev.kobj.parent)
+ if (pci_dev_test_and_set_removed(dev))
return;
+ pci_doe_sysfs_teardown(dev);
+ pci_npem_remove(dev);
+
device_del(&dev->dev);
down_write(&pci_bus_sem);
@@ -42,6 +64,7 @@ static void pci_destroy_dev(struct pci_dev *dev)
pci_doe_destroy(dev);
pcie_aspm_exit_link_state(dev);
pci_bridge_d3_update(dev);
+ pci_pwrctrl_unregister(&dev->dev);
pci_free_resources(dev);
put_device(&dev->dev);
}
@@ -141,6 +164,8 @@ void pci_stop_root_bus(struct pci_bus *bus)
&bus->devices, bus_list)
pci_stop_bus_device(child);
+ of_pci_remove_host_bridge_node(host_bridge);
+
/* stop the host bridge */
device_release_driver(&host_bridge->dev);
}
@@ -162,7 +187,7 @@ void pci_remove_root_bus(struct pci_bus *bus)
#ifdef CONFIG_PCI_DOMAINS_GENERIC
/* Release domain_nr if it was dynamically allocated */
if (host_bridge->domain_nr == PCI_DOMAIN_NR_NOT_SET)
- pci_bus_release_domain_nr(bus, host_bridge->dev.parent);
+ pci_bus_release_domain_nr(host_bridge->dev.parent, bus->domain_nr);
#endif
pci_remove_bus(bus);
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 909e6a7c3cc3..e994c546422c 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -14,6 +14,7 @@
* tighter packing. Prefetchable range support.
*/
+#include <linux/bitops.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -21,6 +22,8 @@
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/cache.h>
+#include <linux/limits.h>
+#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/acpi.h>
#include "pci.h"
@@ -124,13 +127,44 @@ static resource_size_t get_res_add_align(struct list_head *head,
return dev_res ? dev_res->min_align : 0;
}
+static void restore_dev_resource(struct pci_dev_resource *dev_res)
+{
+ struct resource *res = dev_res->res;
+
+ res->start = dev_res->start;
+ res->end = dev_res->end;
+ res->flags = dev_res->flags;
+}
+
+static bool pdev_resources_assignable(struct pci_dev *dev)
+{
+ u16 class = dev->class >> 8, command;
+
+ /* Don't touch classless devices or host bridges or IOAPICs */
+ if (class == PCI_CLASS_NOT_DEFINED || class == PCI_CLASS_BRIDGE_HOST)
+ return false;
+
+ /* Don't touch IOAPIC devices already enabled by firmware */
+ if (class == PCI_CLASS_SYSTEM_PIC) {
+ pci_read_config_word(dev, PCI_COMMAND, &command);
+ if (command & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY))
+ return false;
+ }
+
+ return true;
+}
+
/* Sort resources by alignment */
static void pdev_sort_resources(struct pci_dev *dev, struct list_head *head)
{
struct resource *r;
int i;
+ if (!pdev_resources_assignable(dev))
+ return;
+
pci_dev_for_each_resource(dev, r, i) {
+ const char *r_name = pci_resource_name(dev, i);
struct pci_dev_resource *dev_res, *tmp;
resource_size_t r_align;
struct list_head *n;
@@ -143,8 +177,8 @@ static void pdev_sort_resources(struct pci_dev *dev, struct list_head *head)
r_align = pci_resource_alignment(dev, r);
if (!r_align) {
- pci_warn(dev, "BAR %d: %pR has bogus alignment\n",
- i, r);
+ pci_warn(dev, "%s %pR: alignment must not be zero\n",
+ r_name, r);
continue;
}
@@ -153,6 +187,9 @@ static void pdev_sort_resources(struct pci_dev *dev, struct list_head *head)
panic("%s: kzalloc() failed!\n", __func__);
tmp->res = r;
tmp->dev = dev;
+ tmp->start = r->start;
+ tmp->end = r->end;
+ tmp->flags = r->flags;
/* Fallback is smallest one or list is empty */
n = head;
@@ -172,23 +209,16 @@ static void pdev_sort_resources(struct pci_dev *dev, struct list_head *head)
}
}
-static void __dev_sort_resources(struct pci_dev *dev, struct list_head *head)
+bool pci_resource_is_optional(const struct pci_dev *dev, int resno)
{
- u16 class = dev->class >> 8;
+ const struct resource *res = pci_resource_n(dev, resno);
- /* Don't touch classless devices or host bridges or IOAPICs */
- if (class == PCI_CLASS_NOT_DEFINED || class == PCI_CLASS_BRIDGE_HOST)
- return;
+ if (pci_resource_is_iov(resno))
+ return true;
+ if (resno == PCI_ROM_RESOURCE && !(res->flags & IORESOURCE_ROM_ENABLE))
+ return true;
- /* Don't touch IOAPIC devices already enabled by firmware */
- if (class == PCI_CLASS_SYSTEM_PIC) {
- u16 command;
- pci_read_config_word(dev, PCI_COMMAND, &command);
- if (command & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY))
- return;
- }
-
- pdev_sort_resources(dev, head);
+ return false;
}
static inline void reset_resource(struct resource *res)
@@ -212,10 +242,11 @@ static inline void reset_resource(struct resource *res)
static void reassign_resources_sorted(struct list_head *realloc_head,
struct list_head *head)
{
- struct resource *res;
- const char *res_name;
struct pci_dev_resource *add_res, *tmp;
struct pci_dev_resource *dev_res;
+ struct pci_dev *dev;
+ struct resource *res;
+ const char *res_name;
resource_size_t add_size, align;
int idx;
@@ -223,9 +254,15 @@ static void reassign_resources_sorted(struct list_head *realloc_head,
bool found_match = false;
res = add_res->res;
+ dev = add_res->dev;
+ idx = pci_resource_num(dev, res);
- /* Skip resource that has been reset */
- if (!res->flags)
+ /*
+ * Skip resource that failed the earlier assignment and is
+ * not optional as it would just fail again.
+ */
+ if (!res->parent && resource_size(res) &&
+ !pci_resource_is_optional(dev, idx))
goto out;
/* Skip this resource if not found in head list */
@@ -238,21 +275,22 @@ static void reassign_resources_sorted(struct list_head *realloc_head,
if (!found_match) /* Just skip */
continue;
- idx = res - &add_res->dev->resource[0];
- res_name = pci_resource_name(add_res->dev, idx);
+ res_name = pci_resource_name(dev, idx);
add_size = add_res->add_size;
align = add_res->min_align;
- if (!resource_size(res)) {
- res->start = align;
- res->end = res->start + add_size - 1;
- if (pci_assign_resource(add_res->dev, idx))
- reset_resource(res);
- } else {
+ if (!res->parent) {
+ resource_set_range(res, align,
+ resource_size(res) + add_size);
+ if (pci_assign_resource(dev, idx)) {
+ pci_dbg(dev,
+ "%s %pR: ignoring failure in optional allocation\n",
+ res_name, res);
+ }
+ } else if (add_size > 0) {
res->flags |= add_res->flags &
(IORESOURCE_STARTALIGN|IORESOURCE_SIZEALIGN);
- if (pci_reassign_resource(add_res->dev, idx,
- add_size, align))
- pci_info(add_res->dev, "%s %pR: failed to add %llx\n",
+ if (pci_reassign_resource(dev, idx, add_size, align))
+ pci_info(dev, "%s %pR: failed to add optional %llx\n",
res_name, res,
(unsigned long long) add_size);
}
@@ -268,36 +306,39 @@ out:
* @head: Head of the list tracking requests for resources
* @fail_head: Head of the list tracking requests that could not be
* allocated
+ * @optional: Assign also optional resources
*
* Satisfy resource requests of each element in the list. Add requests that
* could not be satisfied to the failed_list.
*/
static void assign_requested_resources_sorted(struct list_head *head,
- struct list_head *fail_head)
+ struct list_head *fail_head,
+ bool optional)
{
- struct resource *res;
struct pci_dev_resource *dev_res;
+ struct resource *res;
+ struct pci_dev *dev;
+ bool optional_res;
int idx;
list_for_each_entry(dev_res, head, list) {
res = dev_res->res;
- idx = res - &dev_res->dev->resource[0];
- if (resource_size(res) &&
- pci_assign_resource(dev_res->dev, idx)) {
+ dev = dev_res->dev;
+ idx = pci_resource_num(dev, res);
+ optional_res = pci_resource_is_optional(dev, idx);
+
+ if (!resource_size(res))
+ continue;
+
+ if (!optional && optional_res)
+ continue;
+
+ if (pci_assign_resource(dev, idx)) {
if (fail_head) {
- /*
- * If the failed resource is a ROM BAR and
- * it will be enabled later, don't add it
- * to the list.
- */
- if (!((idx == PCI_ROM_RESOURCE) &&
- (!(res->flags & IORESOURCE_ROM_ENABLE))))
- add_to_list(fail_head,
- dev_res->dev, res,
- 0 /* don't care */,
- 0 /* don't care */);
+ add_to_list(fail_head, dev, res,
+ 0 /* don't care */,
+ 0 /* don't care */);
}
- reset_resource(res);
}
}
}
@@ -342,6 +383,20 @@ static bool pci_need_to_release(unsigned long mask, struct resource *res)
return false; /* Should not get here */
}
+/* Return: @true if assignment of a required resource failed. */
+static bool pci_required_resource_failed(struct list_head *fail_head)
+{
+ struct pci_dev_resource *fail_res;
+
+ list_for_each_entry(fail_res, fail_head, list) {
+ int idx = pci_resource_num(fail_res->dev, fail_res->res);
+
+ if (!pci_resource_is_optional(fail_res->dev, idx))
+ return true;
+ }
+ return false;
+}
+
static void __assign_resources_sorted(struct list_head *head,
struct list_head *realloc_head,
struct list_head *fail_head)
@@ -351,9 +406,11 @@ static void __assign_resources_sorted(struct list_head *head,
* adjacent, so later reassign can not reallocate them one by one in
* parent resource window.
*
- * Try to assign requested + add_size at beginning. If could do that,
- * could get out early. If could not do that, we still try to assign
- * requested at first, then try to reassign add_size for some resources.
+ * Try to assign required and any optional resources at beginning
+ * (add_size included). If all required resources were successfully
+ * assigned, get out early. If could not do that, we still try to
+ * assign required at first, then try to reassign some optional
+ * resources.
*
* Separate three resource type checking if we need to release
* assigned resource after requested + add_size try.
@@ -369,27 +426,36 @@ static void __assign_resources_sorted(struct list_head *head,
*/
LIST_HEAD(save_head);
LIST_HEAD(local_fail_head);
+ LIST_HEAD(dummy_head);
struct pci_dev_resource *save_res;
struct pci_dev_resource *dev_res, *tmp_res, *dev_res2;
+ struct resource *res;
+ struct pci_dev *dev;
+ const char *res_name;
+ int idx;
unsigned long fail_type;
resource_size_t add_align, align;
+ if (!realloc_head)
+ realloc_head = &dummy_head;
+
/* Check if optional add_size is there */
- if (!realloc_head || list_empty(realloc_head))
- goto requested_and_reassign;
+ if (list_empty(realloc_head))
+ goto assign;
/* Save original start, end, flags etc at first */
list_for_each_entry(dev_res, head, list) {
if (add_to_list(&save_head, dev_res->dev, dev_res->res, 0, 0)) {
free_list(&save_head);
- goto requested_and_reassign;
+ goto assign;
}
}
/* Update res in head list with add_size in realloc_head list */
list_for_each_entry_safe(dev_res, tmp_res, head, list) {
- dev_res->res->end += get_res_add_size(realloc_head,
- dev_res->res);
+ res = dev_res->res;
+
+ res->end += get_res_add_size(realloc_head, res);
/*
* There are two kinds of additional resources in the list:
@@ -397,10 +463,10 @@ static void __assign_resources_sorted(struct list_head *head,
* 2. SR-IOV resource -- IORESOURCE_SIZEALIGN
* Here just fix the additional alignment for bridge
*/
- if (!(dev_res->res->flags & IORESOURCE_STARTALIGN))
+ if (!(res->flags & IORESOURCE_STARTALIGN))
continue;
- add_align = get_res_add_align(realloc_head, dev_res->res);
+ add_align = get_res_add_align(realloc_head, res);
/*
* The "head" list is sorted by alignment so resources with
@@ -409,11 +475,8 @@ static void __assign_resources_sorted(struct list_head *head,
* need to reorder the list by alignment to make it
* consistent.
*/
- if (add_align > dev_res->res->start) {
- resource_size_t r_size = resource_size(dev_res->res);
-
- dev_res->res->start = add_align;
- dev_res->res->end = add_align + r_size - 1;
+ if (add_align > res->start) {
+ resource_set_range(res, add_align, resource_size(res));
list_for_each_entry(dev_res2, head, list) {
align = pci_resource_alignment(dev_res2->dev,
@@ -428,54 +491,95 @@ static void __assign_resources_sorted(struct list_head *head,
}
- /* Try updated head list with add_size added */
- assign_requested_resources_sorted(head, &local_fail_head);
+assign:
+ assign_requested_resources_sorted(head, &local_fail_head, true);
- /* All assigned with add_size? */
+ /* All non-optional resources assigned? */
if (list_empty(&local_fail_head)) {
/* Remove head list from realloc_head list */
list_for_each_entry(dev_res, head, list)
remove_from_list(realloc_head, dev_res->res);
free_list(&save_head);
- free_list(head);
- return;
+ goto out;
+ }
+
+ /* Without realloc_head and only optional fails, nothing more to do. */
+ if (!pci_required_resource_failed(&local_fail_head) &&
+ list_empty(realloc_head)) {
+ list_for_each_entry(save_res, &save_head, list) {
+ struct resource *res = save_res->res;
+
+ if (res->parent)
+ continue;
+
+ restore_dev_resource(save_res);
+ }
+ free_list(&local_fail_head);
+ free_list(&save_head);
+ goto out;
}
/* Check failed type */
fail_type = pci_fail_res_type_mask(&local_fail_head);
/* Remove not need to be released assigned res from head list etc */
- list_for_each_entry_safe(dev_res, tmp_res, head, list)
- if (dev_res->res->parent &&
- !pci_need_to_release(fail_type, dev_res->res)) {
+ list_for_each_entry_safe(dev_res, tmp_res, head, list) {
+ res = dev_res->res;
+
+ if (res->parent && !pci_need_to_release(fail_type, res)) {
/* Remove it from realloc_head list */
- remove_from_list(realloc_head, dev_res->res);
- remove_from_list(&save_head, dev_res->res);
+ remove_from_list(realloc_head, res);
+ remove_from_list(&save_head, res);
list_del(&dev_res->list);
kfree(dev_res);
}
+ }
free_list(&local_fail_head);
/* Release assigned resource */
- list_for_each_entry(dev_res, head, list)
- if (dev_res->res->parent)
- release_resource(dev_res->res);
- /* Restore start/end/flags from saved list */
- list_for_each_entry(save_res, &save_head, list) {
- struct resource *res = save_res->res;
+ list_for_each_entry(dev_res, head, list) {
+ res = dev_res->res;
+ dev = dev_res->dev;
+
+ if (!res->parent)
+ continue;
+
+ idx = pci_resource_num(dev, res);
+ res_name = pci_resource_name(dev, idx);
+ pci_dbg(dev, "%s %pR: releasing\n", res_name, res);
- res->start = save_res->start;
- res->end = save_res->end;
- res->flags = save_res->flags;
+ release_resource(res);
+ restore_dev_resource(dev_res);
}
+ /* Restore start/end/flags from saved list */
+ list_for_each_entry(save_res, &save_head, list)
+ restore_dev_resource(save_res);
free_list(&save_head);
-requested_and_reassign:
/* Satisfy the must-have resource requests */
- assign_requested_resources_sorted(head, fail_head);
+ assign_requested_resources_sorted(head, NULL, false);
/* Try to satisfy any additional optional resource requests */
- if (realloc_head)
+ if (!list_empty(realloc_head))
reassign_resources_sorted(realloc_head, head);
+
+out:
+ /* Reset any failed resource, cannot use fail_head as it can be NULL. */
+ list_for_each_entry(dev_res, head, list) {
+ res = dev_res->res;
+ dev = dev_res->dev;
+
+ if (res->parent)
+ continue;
+
+ if (fail_head) {
+ add_to_list(fail_head, dev, res,
+ 0 /* don't care */,
+ 0 /* don't care */);
+ }
+
+ reset_resource(res);
+ }
+
free_list(head);
}
@@ -485,7 +589,7 @@ static void pdev_assign_resources_sorted(struct pci_dev *dev,
{
LIST_HEAD(head);
- __dev_sort_resources(dev, &head);
+ pdev_sort_resources(dev, &head);
__assign_resources_sorted(&head, add_head, fail_head);
}
@@ -498,7 +602,7 @@ static void pbus_assign_resources_sorted(const struct pci_bus *bus,
LIST_HEAD(head);
list_for_each_entry(dev, &bus->devices, bus_list)
- __dev_sort_resources(dev, &head);
+ pdev_sort_resources(dev, &head);
__assign_resources_sorted(&head, realloc_head, fail_head);
}
@@ -691,7 +795,7 @@ void __weak pcibios_setup_bridge(struct pci_bus *bus, unsigned long type)
{
}
-void pci_setup_bridge(struct pci_bus *bus)
+static void pci_setup_bridge(struct pci_bus *bus)
{
unsigned long type = IORESOURCE_IO | IORESOURCE_MEM |
IORESOURCE_PREFETCH;
@@ -811,11 +915,9 @@ static resource_size_t calculate_iosize(resource_size_t size,
size = (size & 0xff) + ((size & ~0xffUL) << 2);
#endif
size = size + size1;
- if (size < old_size)
- size = old_size;
- size = ALIGN(max(size, add_size) + children_add_size, align);
- return size;
+ size = max(size, add_size) + children_add_size;
+ return ALIGN(max(size, old_size), align);
}
static resource_size_t calculate_memsize(resource_size_t size,
@@ -829,11 +931,9 @@ static resource_size_t calculate_memsize(resource_size_t size,
size = min_size;
if (old_size == 1)
old_size = 0;
- if (size < old_size)
- size = old_size;
- size = ALIGN(max(size, add_size) + children_add_size, align);
- return size;
+ size = max(size, add_size) + children_add_size;
+ return ALIGN(max(size, old_size), align);
}
resource_size_t __weak pcibios_window_alignment(struct pci_bus *bus,
@@ -842,9 +942,9 @@ resource_size_t __weak pcibios_window_alignment(struct pci_bus *bus,
return 1;
}
-#define PCI_P2P_DEFAULT_MEM_ALIGN 0x100000 /* 1MiB */
-#define PCI_P2P_DEFAULT_IO_ALIGN 0x1000 /* 4KiB */
-#define PCI_P2P_DEFAULT_IO_ALIGN_1K 0x400 /* 1KiB */
+#define PCI_P2P_DEFAULT_MEM_ALIGN SZ_1M
+#define PCI_P2P_DEFAULT_IO_ALIGN SZ_4K
+#define PCI_P2P_DEFAULT_IO_ALIGN_1K SZ_1K
static resource_size_t window_alignment(struct pci_bus *bus, unsigned long type)
{
@@ -909,7 +1009,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
continue;
r_size = resource_size(r);
- if (r_size < 0x400)
+ if (r_size < SZ_1K)
/* Might be re-aligned for ISA */
size += r_size;
else
@@ -926,9 +1026,14 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
size0 = calculate_iosize(size, min_size, size1, 0, 0,
resource_size(b_res), min_align);
- size1 = (!realloc_head || (realloc_head && !add_size && !children_add_size)) ? size0 :
- calculate_iosize(size, min_size, size1, add_size, children_add_size,
- resource_size(b_res), min_align);
+
+ size1 = size0;
+ if (realloc_head && (add_size > 0 || children_add_size > 0)) {
+ size1 = calculate_iosize(size, min_size, size1, add_size,
+ children_add_size, resource_size(b_res),
+ min_align);
+ }
+
if (!size0 && !size1) {
if (bus->self && (b_res->start || b_res->end))
pci_info(bus->self, "disabling bridge window %pR to %pR (unused)\n",
@@ -937,8 +1042,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
return;
}
- b_res->start = min_align;
- b_res->end = b_res->start + size0 - 1;
+ resource_set_range(b_res, min_align, size0);
b_res->flags |= IORESOURCE_STARTALIGN;
if (bus->self && size1 > size0 && realloc_head) {
add_to_list(realloc_head, bus->self, b_res, size1-size0,
@@ -959,7 +1063,7 @@ static inline resource_size_t calculate_mem_align(resource_size_t *aligns,
for (order = 0; order <= max_order; order++) {
resource_size_t align1 = 1;
- align1 <<= (order + 20);
+ align1 <<= order + __ffs(SZ_1M);
if (!align)
min_align = align1;
@@ -972,6 +1076,67 @@ static inline resource_size_t calculate_mem_align(resource_size_t *aligns,
}
/**
+ * pbus_upstream_space_available - Check no upstream resource limits allocation
+ * @bus: The bus
+ * @mask: Mask the resource flag, then compare it with type
+ * @type: The type of resource from bridge
+ * @size: The size required from the bridge window
+ * @align: Required alignment for the resource
+ *
+ * Checks that @size can fit inside the upstream bridge resources that are
+ * already assigned.
+ *
+ * Return: %true if enough space is available on all assigned upstream
+ * resources.
+ */
+static bool pbus_upstream_space_available(struct pci_bus *bus, unsigned long mask,
+ unsigned long type, resource_size_t size,
+ resource_size_t align)
+{
+ struct resource_constraint constraint = {
+ .max = RESOURCE_SIZE_MAX,
+ .align = align,
+ };
+ struct pci_bus *downstream = bus;
+ struct resource *r;
+
+ while ((bus = bus->parent)) {
+ if (pci_is_root_bus(bus))
+ break;
+
+ pci_bus_for_each_resource(bus, r) {
+ if (!r || !r->parent || (r->flags & mask) != type)
+ continue;
+
+ if (resource_size(r) >= size) {
+ struct resource gap = {};
+
+ if (find_resource_space(r, &gap, size, &constraint) == 0) {
+ gap.flags = type;
+ pci_dbg(bus->self,
+ "Assigned bridge window %pR to %pR free space at %pR\n",
+ r, &bus->busn_res, &gap);
+ return true;
+ }
+ }
+
+ if (bus->self) {
+ pci_info(bus->self,
+ "Assigned bridge window %pR to %pR cannot fit 0x%llx required for %s bridging to %pR\n",
+ r, &bus->busn_res,
+ (unsigned long long)size,
+ pci_name(downstream->self),
+ &downstream->busn_res);
+ }
+
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/**
* pbus_size_mem() - Size the memory window of a given bus
*
* @bus: The bus
@@ -997,8 +1162,8 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
struct list_head *realloc_head)
{
struct pci_dev *dev;
- resource_size_t min_align, align, size, size0, size1;
- resource_size_t aligns[24]; /* Alignments from 1MB to 8TB */
+ resource_size_t min_align, win_align, align, size, size0, size1 = 0;
+ resource_size_t aligns[28]; /* Alignments from 1MB to 128TB */
int order, max_order;
struct resource *b_res = find_bus_resource_of_type(bus,
mask | IORESOURCE_PREFETCH, type);
@@ -1031,17 +1196,15 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
(r->flags & mask) != type3))
continue;
r_size = resource_size(r);
-#ifdef CONFIG_PCI_IOV
+
/* Put SRIOV requested res to the optional list */
- if (realloc_head && i >= PCI_IOV_RESOURCES &&
- i <= PCI_IOV_RESOURCE_END) {
+ if (realloc_head && pci_resource_is_optional(dev, i)) {
add_align = max(pci_resource_alignment(dev, r), add_align);
- r->end = r->start - 1;
- add_to_list(realloc_head, dev, r, r_size, 0 /* Don't care */);
+ add_to_list(realloc_head, dev, r, 0, 0 /* Don't care */);
children_add_size += r_size;
continue;
}
-#endif
+
/*
* aligns[0] is for 1MB (since bridge memory
* windows are always at least 1MB aligned), so
@@ -1049,7 +1212,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
* resources.
*/
align = pci_resource_alignment(dev, r);
- order = __ffs(align) - 20;
+ order = __ffs(align) - __ffs(SZ_1M);
if (order < 0)
order = 0;
if (order >= ARRAY_SIZE(aligns)) {
@@ -1076,13 +1239,39 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
}
}
+ win_align = window_alignment(bus, b_res->flags);
min_align = calculate_mem_align(aligns, max_order);
- min_align = max(min_align, window_alignment(bus, b_res->flags));
+ min_align = max(min_align, win_align);
size0 = calculate_memsize(size, min_size, 0, 0, resource_size(b_res), min_align);
- add_align = max(min_align, add_align);
- size1 = (!realloc_head || (realloc_head && !add_size && !children_add_size)) ? size0 :
- calculate_memsize(size, min_size, add_size, children_add_size,
- resource_size(b_res), add_align);
+
+ if (bus->self && size0 &&
+ !pbus_upstream_space_available(bus, mask | IORESOURCE_PREFETCH, type,
+ size0, min_align)) {
+ min_align = 1ULL << (max_order + __ffs(SZ_1M));
+ min_align = max(min_align, win_align);
+ size0 = calculate_memsize(size, min_size, 0, 0, resource_size(b_res), win_align);
+ pci_info(bus->self, "bridge window %pR to %pR requires relaxed alignment rules\n",
+ b_res, &bus->busn_res);
+ }
+
+ if (realloc_head && (add_size > 0 || children_add_size > 0)) {
+ add_align = max(min_align, add_align);
+ size1 = calculate_memsize(size, min_size, add_size, children_add_size,
+ resource_size(b_res), add_align);
+
+ if (bus->self && size1 &&
+ !pbus_upstream_space_available(bus, mask | IORESOURCE_PREFETCH, type,
+ size1, add_align)) {
+ min_align = 1ULL << (max_order + __ffs(SZ_1M));
+ min_align = max(min_align, win_align);
+ size1 = calculate_memsize(size, min_size, add_size, children_add_size,
+ resource_size(b_res), win_align);
+ pci_info(bus->self,
+ "bridge window %pR to %pR requires relaxed alignment rules\n",
+ b_res, &bus->busn_res);
+ }
+ }
+
if (!size0 && !size1) {
if (bus->self && (b_res->start || b_res->end))
pci_info(bus->self, "disabling bridge window %pR to %pR (unused)\n",
@@ -1090,8 +1279,8 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
b_res->flags = 0;
return 0;
}
- b_res->start = min_align;
- b_res->end = size0 + min_align - 1;
+
+ resource_set_range(b_res, min_align, size0);
b_res->flags |= IORESOURCE_STARTALIGN;
if (bus->self && size1 > size0 && realloc_head) {
add_to_list(realloc_head, bus->self, b_res, size1-size0, add_align);
@@ -1127,8 +1316,7 @@ static void pci_bus_size_cardbus(struct pci_bus *bus,
* Reserve some resources for CardBus. We reserve a fixed amount
* of bus space for CardBus bridges.
*/
- b_res->start = pci_cardbus_io_size;
- b_res->end = b_res->start + pci_cardbus_io_size - 1;
+ resource_set_range(b_res, pci_cardbus_io_size, pci_cardbus_io_size);
b_res->flags |= IORESOURCE_IO | IORESOURCE_STARTALIGN;
if (realloc_head) {
b_res->end -= pci_cardbus_io_size;
@@ -1140,8 +1328,7 @@ handle_b_res_1:
b_res = &bridge->resource[PCI_CB_BRIDGE_IO_1_WINDOW];
if (b_res->parent)
goto handle_b_res_2;
- b_res->start = pci_cardbus_io_size;
- b_res->end = b_res->start + pci_cardbus_io_size - 1;
+ resource_set_range(b_res, pci_cardbus_io_size, pci_cardbus_io_size);
b_res->flags |= IORESOURCE_IO | IORESOURCE_STARTALIGN;
if (realloc_head) {
b_res->end -= pci_cardbus_io_size;
@@ -1174,8 +1361,8 @@ handle_b_res_2:
* Otherwise, allocate one region of twice the size.
*/
if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0) {
- b_res->start = pci_cardbus_mem_size;
- b_res->end = b_res->start + pci_cardbus_mem_size - 1;
+ resource_set_range(b_res, pci_cardbus_mem_size,
+ pci_cardbus_mem_size);
b_res->flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH |
IORESOURCE_STARTALIGN;
if (realloc_head) {
@@ -1192,8 +1379,7 @@ handle_b_res_3:
b_res = &bridge->resource[PCI_CB_BRIDGE_MEM_1_WINDOW];
if (b_res->parent)
goto handle_done;
- b_res->start = pci_cardbus_mem_size;
- b_res->end = b_res->start + b_res_3_size - 1;
+ resource_set_range(b_res, pci_cardbus_mem_size, b_res_3_size);
b_res->flags |= IORESOURCE_MEM | IORESOURCE_STARTALIGN;
if (realloc_head) {
b_res->end -= b_res_3_size;
@@ -1569,8 +1755,7 @@ static void pci_bridge_release_resources(struct pci_bus *bus,
pci_info(dev, "resource %d %pR released\n",
PCI_BRIDGE_RESOURCES + idx, r);
/* Keep the old size */
- r->end = resource_size(r) - 1;
- r->start = 0;
+ resource_set_range(r, 0, resource_size(r));
r->flags = 0;
/* Avoiding touch the one without PREF */
@@ -1772,7 +1957,7 @@ static void adjust_bridge_window(struct pci_dev *bridge, struct resource *res,
return;
}
- res->end = res->start + new_size - 1;
+ resource_set_size(res, new_size);
/* If the resource is part of the add_list, remove it now */
if (add_list)
@@ -1809,7 +1994,7 @@ static void remove_dev_resources(struct pci_dev *dev, struct resource *io,
* Make sure prefetchable memory is reduced from
* the correct resource. Specifically we put 32-bit
* prefetchable memory in non-prefetchable window
- * if there is an 64-bit prefetchable window.
+ * if there is a 64-bit prefetchable window.
*
* See comments in __pci_bus_size_bridges() for
* more information.
@@ -1824,6 +2009,9 @@ static void remove_dev_resources(struct pci_dev *dev, struct resource *io,
}
}
+#define ALIGN_DOWN_IF_NONZERO(addr, align) \
+ ((align) ? ALIGN_DOWN((addr), (align)) : (addr))
+
/*
* io, mmio and mmio_pref contain the total amount of bridge window space
* available. This includes the minimal space needed to cover all the
@@ -1935,8 +2123,7 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus,
* what is available).
*/
align = pci_resource_alignment(dev, res);
- io.end = align ? io.start + ALIGN_DOWN(io_per_b, align) - 1
- : io.start + io_per_b - 1;
+ resource_set_size(&io, ALIGN_DOWN_IF_NONZERO(io_per_b, align));
/*
* The x_per_b holds the extra resource space that can be
@@ -1948,15 +2135,14 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus,
res = &dev->resource[PCI_BRIDGE_MEM_WINDOW];
align = pci_resource_alignment(dev, res);
- mmio.end = align ? mmio.start + ALIGN_DOWN(mmio_per_b, align) - 1
- : mmio.start + mmio_per_b - 1;
+ resource_set_size(&mmio,
+ ALIGN_DOWN_IF_NONZERO(mmio_per_b,align));
mmio.start -= resource_size(res);
res = &dev->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
align = pci_resource_alignment(dev, res);
- mmio_pref.end = align ? mmio_pref.start +
- ALIGN_DOWN(mmio_pref_per_b, align) - 1
- : mmio_pref.start + mmio_pref_per_b - 1;
+ resource_set_size(&mmio_pref,
+ ALIGN_DOWN_IF_NONZERO(mmio_pref_per_b, align));
mmio_pref.start -= resource_size(res);
pci_bus_distribute_available_resources(b, add_list, io, mmio,
@@ -2030,13 +2216,49 @@ pci_root_bus_distribute_available_resources(struct pci_bus *bus,
* in case of root bus.
*/
if (bridge && pci_bridge_resources_not_assigned(dev))
- pci_bridge_distribute_available_resources(bridge,
- add_list);
+ pci_bridge_distribute_available_resources(dev, add_list);
else
pci_root_bus_distribute_available_resources(b, add_list);
}
}
+static void pci_prepare_next_assign_round(struct list_head *fail_head,
+ int tried_times,
+ enum release_type rel_type)
+{
+ struct pci_dev_resource *fail_res;
+
+ pr_info("PCI: No. %d try to assign unassigned res\n", tried_times + 1);
+
+ /*
+ * Try to release leaf bridge's resources that aren't big
+ * enough to contain child device resources.
+ */
+ list_for_each_entry(fail_res, fail_head, list) {
+ pci_bus_release_bridge_resources(fail_res->dev->bus,
+ fail_res->flags & PCI_RES_TYPE_MASK,
+ rel_type);
+ }
+
+ /* Restore size and flags */
+ list_for_each_entry(fail_res, fail_head, list) {
+ struct resource *res = fail_res->res;
+ struct pci_dev *dev = fail_res->dev;
+ int idx = pci_resource_num(dev, res);
+
+ restore_dev_resource(fail_res);
+
+ if (!pci_is_bridge(dev))
+ continue;
+
+ if (idx >= PCI_BRIDGE_RESOURCES &&
+ idx <= PCI_BRIDGE_RESOURCE_END)
+ res->flags = 0;
+ }
+
+ free_list(fail_head);
+}
+
/*
* First try will not touch PCI bridge res.
* Second and later try will clear small leaf bridge res.
@@ -2050,7 +2272,6 @@ void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus)
int tried_times = 0;
enum release_type rel_type = leaf_only;
LIST_HEAD(fail_head);
- struct pci_dev_resource *fail_res;
int pci_try_num = 1;
enum enable_type enable_local;
@@ -2064,78 +2285,50 @@ void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus)
max_depth, pci_try_num);
}
-again:
- /*
- * Last try will use add_list, otherwise will try good to have as must
- * have, so can realloc parent bridge resource
- */
- if (tried_times + 1 == pci_try_num)
- add_list = &realloc_head;
- /*
- * Depth first, calculate sizes and alignments of all subordinate buses.
- */
- __pci_bus_size_bridges(bus, add_list);
-
- pci_root_bus_distribute_available_resources(bus, add_list);
-
- /* Depth last, allocate resources and update the hardware. */
- __pci_bus_assign_resources(bus, add_list, &fail_head);
- if (add_list)
- BUG_ON(!list_empty(add_list));
- tried_times++;
-
- /* Any device complain? */
- if (list_empty(&fail_head))
- goto dump;
-
- if (tried_times >= pci_try_num) {
- if (enable_local == undefined)
- dev_info(&bus->dev, "Some PCI device resources are unassigned, try booting with pci=realloc\n");
- else if (enable_local == auto_enabled)
- dev_info(&bus->dev, "Automatically enabled pci realloc, if you have problem, try booting with pci=realloc=off\n");
-
- free_list(&fail_head);
- goto dump;
- }
+ while (1) {
+ /*
+ * Last try will use add_list, otherwise will try good to
+ * have as must have, so can realloc parent bridge resource
+ */
+ if (tried_times + 1 == pci_try_num)
+ add_list = &realloc_head;
+ /*
+ * Depth first, calculate sizes and alignments of all
+ * subordinate buses.
+ */
+ __pci_bus_size_bridges(bus, add_list);
- dev_info(&bus->dev, "No. %d try to assign unassigned res\n",
- tried_times + 1);
+ pci_root_bus_distribute_available_resources(bus, add_list);
- /* Third times and later will not check if it is leaf */
- if ((tried_times + 1) > 2)
- rel_type = whole_subtree;
+ /* Depth last, allocate resources and update the hardware. */
+ __pci_bus_assign_resources(bus, add_list, &fail_head);
+ if (add_list)
+ BUG_ON(!list_empty(add_list));
+ tried_times++;
- /*
- * Try to release leaf bridge's resources that doesn't fit resource of
- * child device under that bridge.
- */
- list_for_each_entry(fail_res, &fail_head, list)
- pci_bus_release_bridge_resources(fail_res->dev->bus,
- fail_res->flags & PCI_RES_TYPE_MASK,
- rel_type);
+ /* Any device complain? */
+ if (list_empty(&fail_head))
+ break;
- /* Restore size and flags */
- list_for_each_entry(fail_res, &fail_head, list) {
- struct resource *res = fail_res->res;
- int idx;
+ if (tried_times >= pci_try_num) {
+ if (enable_local == undefined) {
+ dev_info(&bus->dev,
+ "Some PCI device resources are unassigned, try booting with pci=realloc\n");
+ } else if (enable_local == auto_enabled) {
+ dev_info(&bus->dev,
+ "Automatically enabled pci realloc, if you have problem, try booting with pci=realloc=off\n");
+ }
+ free_list(&fail_head);
+ break;
+ }
- res->start = fail_res->start;
- res->end = fail_res->end;
- res->flags = fail_res->flags;
+ /* Third times and later will not check if it is leaf */
+ if (tried_times + 1 > 2)
+ rel_type = whole_subtree;
- if (pci_is_bridge(fail_res->dev)) {
- idx = res - &fail_res->dev->resource[0];
- if (idx >= PCI_BRIDGE_RESOURCES &&
- idx <= PCI_BRIDGE_RESOURCE_END)
- res->flags = 0;
- }
+ pci_prepare_next_assign_round(&fail_head, tried_times, rel_type);
}
- free_list(&fail_head);
- goto again;
-
-dump:
- /* Dump the resource on buses */
pci_bus_dump_resources(bus);
}
@@ -2157,71 +2350,40 @@ void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge)
struct pci_bus *parent = bridge->subordinate;
/* List of resources that want additional resources */
LIST_HEAD(add_list);
-
int tried_times = 0;
LIST_HEAD(fail_head);
- struct pci_dev_resource *fail_res;
- int retval;
-
-again:
- __pci_bus_size_bridges(parent, &add_list);
-
- /*
- * Distribute remaining resources (if any) equally between hotplug
- * bridges below. This makes it possible to extend the hierarchy
- * later without running out of resources.
- */
- pci_bridge_distribute_available_resources(bridge, &add_list);
-
- __pci_bridge_assign_resources(bridge, &add_list, &fail_head);
- BUG_ON(!list_empty(&add_list));
- tried_times++;
-
- if (list_empty(&fail_head))
- goto enable_all;
-
- if (tried_times >= 2) {
- /* Still fail, don't need to try more */
- free_list(&fail_head);
- goto enable_all;
- }
+ int ret;
- printk(KERN_DEBUG "PCI: No. %d try to assign unassigned res\n",
- tried_times + 1);
+ while (1) {
+ __pci_bus_size_bridges(parent, &add_list);
- /*
- * Try to release leaf bridge's resources that aren't big enough
- * to contain child device resources.
- */
- list_for_each_entry(fail_res, &fail_head, list)
- pci_bus_release_bridge_resources(fail_res->dev->bus,
- fail_res->flags & PCI_RES_TYPE_MASK,
- whole_subtree);
+ /*
+ * Distribute remaining resources (if any) equally between
+ * hotplug bridges below. This makes it possible to extend
+ * the hierarchy later without running out of resources.
+ */
+ pci_bridge_distribute_available_resources(bridge, &add_list);
- /* Restore size and flags */
- list_for_each_entry(fail_res, &fail_head, list) {
- struct resource *res = fail_res->res;
- int idx;
+ __pci_bridge_assign_resources(bridge, &add_list, &fail_head);
+ BUG_ON(!list_empty(&add_list));
+ tried_times++;
- res->start = fail_res->start;
- res->end = fail_res->end;
- res->flags = fail_res->flags;
+ if (list_empty(&fail_head))
+ break;
- if (pci_is_bridge(fail_res->dev)) {
- idx = res - &fail_res->dev->resource[0];
- if (idx >= PCI_BRIDGE_RESOURCES &&
- idx <= PCI_BRIDGE_RESOURCE_END)
- res->flags = 0;
+ if (tried_times >= 2) {
+ /* Still fail, don't need to try more */
+ free_list(&fail_head);
+ break;
}
- }
- free_list(&fail_head);
- goto again;
+ pci_prepare_next_assign_round(&fail_head, tried_times,
+ whole_subtree);
+ }
-enable_all:
- retval = pci_reenable_device(bridge);
- if (retval)
- pci_err(bridge, "Error reenabling bridge (%d)\n", retval);
+ ret = pci_reenable_device(bridge);
+ if (ret)
+ pci_err(bridge, "Error reenabling bridge (%d)\n", ret);
pci_set_master(bridge);
}
EXPORT_SYMBOL_GPL(pci_assign_unassigned_bridge_resources);
@@ -2301,13 +2463,8 @@ int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type)
cleanup:
/* Restore size and flags */
- list_for_each_entry(dev_res, &failed, list) {
- struct resource *res = dev_res->res;
-
- res->start = dev_res->start;
- res->end = dev_res->end;
- res->flags = dev_res->flags;
- }
+ list_for_each_entry(dev_res, &failed, list)
+ restore_dev_resource(dev_res);
free_list(&failed);
/* Revert to the old configuration */
@@ -2315,11 +2472,9 @@ cleanup:
struct resource *res = dev_res->res;
bridge = dev_res->dev;
- i = res - bridge->resource;
+ i = pci_resource_num(bridge, res);
- res->start = dev_res->start;
- res->end = dev_res->end;
- res->flags = dev_res->flags;
+ restore_dev_resource(dev_res);
pci_claim_resource(bridge, i);
pci_setup_bridge(bridge->subordinate);
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index c6d933ddfd46..c6657cdd06f6 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -29,7 +29,7 @@ static void pci_std_update_resource(struct pci_dev *dev, int resno)
u16 cmd;
u32 new, check, mask;
int reg;
- struct resource *res = dev->resource + resno;
+ struct resource *res = pci_resource_n(dev, resno);
const char *res_name = pci_resource_name(dev, resno);
/* Per SR-IOV spec 3.4.1.11, VF BARs are RO zero */
@@ -127,10 +127,8 @@ void pci_update_resource(struct pci_dev *dev, int resno)
{
if (resno <= PCI_ROM_RESOURCE)
pci_std_update_resource(dev, resno);
-#ifdef CONFIG_PCI_IOV
- else if (resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END)
+ else if (pci_resource_is_iov(resno))
pci_iov_update_resource(dev, resno);
-#endif
}
int pci_claim_resource(struct pci_dev *dev, int resource)
@@ -211,8 +209,7 @@ static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev,
start = res->start;
end = res->end;
- res->start = fw_addr;
- res->end = res->start + size - 1;
+ resource_set_range(res, fw_addr, size);
res->flags &= ~IORESOURCE_UNSET;
root = pci_find_parent_resource(dev, res);
@@ -263,7 +260,7 @@ resource_size_t __weak pcibios_align_resource(void *data,
static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev,
int resno, resource_size_t size, resource_size_t align)
{
- struct resource *res = dev->resource + resno;
+ struct resource *res = pci_resource_n(dev, resno);
resource_size_t min;
int ret;
@@ -326,7 +323,7 @@ static int _pci_assign_resource(struct pci_dev *dev, int resno,
int pci_assign_resource(struct pci_dev *dev, int resno)
{
- struct resource *res = dev->resource + resno;
+ struct resource *res = pci_resource_n(dev, resno);
const char *res_name = pci_resource_name(dev, resno);
resource_size_t align, size;
int ret;
@@ -373,7 +370,7 @@ EXPORT_SYMBOL(pci_assign_resource);
int pci_reassign_resource(struct pci_dev *dev, int resno,
resource_size_t addsize, resource_size_t min_align)
{
- struct resource *res = dev->resource + resno;
+ struct resource *res = pci_resource_n(dev, resno);
const char *res_name = pci_resource_name(dev, resno);
unsigned long flags;
resource_size_t new_size;
@@ -390,7 +387,6 @@ int pci_reassign_resource(struct pci_dev *dev, int resno,
return -EINVAL;
}
- /* already aligned with min_align */
new_size = resource_size(res) + addsize;
ret = _pci_assign_resource(dev, resno, new_size, min_align);
if (ret) {
@@ -412,14 +408,14 @@ int pci_reassign_resource(struct pci_dev *dev, int resno,
void pci_release_resource(struct pci_dev *dev, int resno)
{
- struct resource *res = dev->resource + resno;
+ struct resource *res = pci_resource_n(dev, resno);
const char *res_name = pci_resource_name(dev, resno);
- pci_info(dev, "%s %pR: releasing\n", res_name, res);
-
if (!res->parent)
return;
+ pci_info(dev, "%s %pR: releasing\n", res_name, res);
+
release_resource(res);
res->end = resource_size(res) - 1;
res->start = 0;
@@ -429,7 +425,7 @@ EXPORT_SYMBOL(pci_release_resource);
int pci_resize_resource(struct pci_dev *dev, int resno, int size)
{
- struct resource *res = dev->resource + resno;
+ struct resource *res = pci_resource_n(dev, resno);
struct pci_host_bridge *host;
int old, ret;
u32 sizes;
@@ -463,7 +459,7 @@ int pci_resize_resource(struct pci_dev *dev, int resno, int size)
if (ret)
return ret;
- res->end = res->start + pci_rebar_size_to_bytes(size) - 1;
+ resource_set_size(res, pci_rebar_size_to_bytes(size));
/* Check if the new config works by trying to assign everything. */
if (dev->bus->self) {
@@ -475,7 +471,7 @@ int pci_resize_resource(struct pci_dev *dev, int resno, int size)
error_resize:
pci_rebar_set_size(dev, resno, old);
- res->end = res->start + pci_rebar_size_to_bytes(old) - 1;
+ resource_set_size(res, pci_rebar_size_to_bytes(old));
return ret;
}
EXPORT_SYMBOL(pci_resize_resource);
@@ -498,8 +494,7 @@ int pci_enable_resources(struct pci_dev *dev, int mask)
if (!(r->flags & (IORESOURCE_IO | IORESOURCE_MEM)))
continue;
- if ((i == PCI_ROM_RESOURCE) &&
- (!(r->flags & IORESOURCE_ROM_ENABLE)))
+ if (pci_resource_is_optional(dev, i))
continue;
if (r->flags & IORESOURCE_UNSET) {
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index 0f87cade10f7..50fb3eb595fe 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -7,7 +7,6 @@
#include <linux/kobject.h>
#include <linux/slab.h>
-#include <linux/module.h>
#include <linux/pci.h>
#include <linux/err.h>
#include "pci.h"
@@ -79,6 +78,7 @@ static void pci_slot_release(struct kobject *kobj)
up_read(&pci_bus_sem);
list_del(&slot->list);
+ pci_bus_put(slot->bus);
kfree(slot);
}
@@ -244,12 +244,13 @@ struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
slot = get_slot(parent, slot_nr);
if (slot) {
if (hotplug) {
- if ((err = slot->hotplug ? -EBUSY : 0)
- || (err = rename_slot(slot, name))) {
- kobject_put(&slot->kobj);
- slot = NULL;
- goto err;
+ if (slot->hotplug) {
+ err = -EBUSY;
+ goto put_slot;
}
+ err = rename_slot(slot, name);
+ if (err)
+ goto put_slot;
}
goto out;
}
@@ -261,7 +262,7 @@ placeholder:
goto err;
}
- slot->bus = parent;
+ slot->bus = pci_bus_get(parent);
slot->number = slot_nr;
slot->kobj.kset = pci_slots_kset;
@@ -269,6 +270,7 @@ placeholder:
slot_name = make_slot_name(name);
if (!slot_name) {
err = -ENOMEM;
+ pci_bus_put(slot->bus);
kfree(slot);
goto err;
}
@@ -278,10 +280,8 @@ placeholder:
err = kobject_init_and_add(&slot->kobj, &pci_slot_ktype, NULL,
"%s", slot_name);
- if (err) {
- kobject_put(&slot->kobj);
- goto err;
- }
+ if (err)
+ goto put_slot;
down_read(&pci_bus_sem);
list_for_each_entry(dev, &parent->devices, bus_list)
@@ -296,6 +296,9 @@ out:
kfree(slot_name);
mutex_unlock(&pci_slot_mutex);
return slot;
+
+put_slot:
+ kobject_put(&slot->kobj);
err:
slot = ERR_PTR(err);
goto out;
@@ -321,49 +324,6 @@ void pci_destroy_slot(struct pci_slot *slot)
}
EXPORT_SYMBOL_GPL(pci_destroy_slot);
-#if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE)
-#include <linux/pci_hotplug.h>
-/**
- * pci_hp_create_module_link - create symbolic link to hotplug driver module
- * @pci_slot: struct pci_slot
- *
- * Helper function for pci_hotplug_core.c to create symbolic link to
- * the hotplug driver module.
- */
-void pci_hp_create_module_link(struct pci_slot *pci_slot)
-{
- struct hotplug_slot *slot = pci_slot->hotplug;
- struct kobject *kobj = NULL;
- int ret;
-
- if (!slot || !slot->ops)
- return;
- kobj = kset_find_obj(module_kset, slot->mod_name);
- if (!kobj)
- return;
- ret = sysfs_create_link(&pci_slot->kobj, kobj, "module");
- if (ret)
- dev_err(&pci_slot->bus->dev, "Error creating sysfs link (%d)\n",
- ret);
- kobject_put(kobj);
-}
-EXPORT_SYMBOL_GPL(pci_hp_create_module_link);
-
-/**
- * pci_hp_remove_module_link - remove symbolic link to the hotplug driver
- * module.
- * @pci_slot: struct pci_slot
- *
- * Helper function for pci_hotplug_core.c to remove symbolic link to
- * the hotplug driver module.
- */
-void pci_hp_remove_module_link(struct pci_slot *pci_slot)
-{
- sysfs_remove_link(&pci_slot->kobj, "module");
-}
-EXPORT_SYMBOL_GPL(pci_hp_remove_module_link);
-#endif
-
static int pci_slot_init(void)
{
struct kset *pci_bus_kset;
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index 5a4adf6c04cf..b14dfab04d84 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -37,7 +37,9 @@ MODULE_PARM_DESC(nirqs, "number of interrupts to allocate (more may be useful fo
static dev_t switchtec_devt;
static DEFINE_IDA(switchtec_minor_ida);
-struct class *switchtec_class;
+const struct class switchtec_class = {
+ .name = "switchtec",
+};
EXPORT_SYMBOL_GPL(switchtec_class);
enum mrpc_state {
@@ -1363,7 +1365,7 @@ static struct switchtec_dev *stdev_create(struct pci_dev *pdev)
dev = &stdev->dev;
device_initialize(dev);
- dev->class = switchtec_class;
+ dev->class = &switchtec_class;
dev->parent = &pdev->dev;
dev->groups = switchtec_device_groups;
dev->release = stdev_release;
@@ -1737,6 +1739,26 @@ static void switchtec_pci_remove(struct pci_dev *pdev)
.driver_data = gen, \
}
+#define SWITCHTEC_PCI100X_DEVICE(device_id, gen) \
+ { \
+ .vendor = PCI_VENDOR_ID_EFAR, \
+ .device = device_id, \
+ .subvendor = PCI_ANY_ID, \
+ .subdevice = PCI_ANY_ID, \
+ .class = (PCI_CLASS_MEMORY_OTHER << 8), \
+ .class_mask = 0xFFFFFFFF, \
+ .driver_data = gen, \
+ }, \
+ { \
+ .vendor = PCI_VENDOR_ID_EFAR, \
+ .device = device_id, \
+ .subvendor = PCI_ANY_ID, \
+ .subdevice = PCI_ANY_ID, \
+ .class = (PCI_CLASS_BRIDGE_OTHER << 8), \
+ .class_mask = 0xFFFFFFFF, \
+ .driver_data = gen, \
+ }
+
static const struct pci_device_id switchtec_pci_tbl[] = {
SWITCHTEC_PCI_DEVICE(0x8531, SWITCHTEC_GEN3), /* PFX 24xG3 */
SWITCHTEC_PCI_DEVICE(0x8532, SWITCHTEC_GEN3), /* PFX 32xG3 */
@@ -1831,6 +1853,12 @@ static const struct pci_device_id switchtec_pci_tbl[] = {
SWITCHTEC_PCI_DEVICE(0x5552, SWITCHTEC_GEN5), /* PAXA 52XG5 */
SWITCHTEC_PCI_DEVICE(0x5536, SWITCHTEC_GEN5), /* PAXA 36XG5 */
SWITCHTEC_PCI_DEVICE(0x5528, SWITCHTEC_GEN5), /* PAXA 28XG5 */
+ SWITCHTEC_PCI100X_DEVICE(0x1001, SWITCHTEC_GEN4), /* PCI1001 16XG4 */
+ SWITCHTEC_PCI100X_DEVICE(0x1002, SWITCHTEC_GEN4), /* PCI1002 12XG4 */
+ SWITCHTEC_PCI100X_DEVICE(0x1003, SWITCHTEC_GEN4), /* PCI1003 16XG4 */
+ SWITCHTEC_PCI100X_DEVICE(0x1004, SWITCHTEC_GEN4), /* PCI1004 16XG4 */
+ SWITCHTEC_PCI100X_DEVICE(0x1005, SWITCHTEC_GEN4), /* PCI1005 16XG4 */
+ SWITCHTEC_PCI100X_DEVICE(0x1006, SWITCHTEC_GEN4), /* PCI1006 16XG4 */
{0}
};
MODULE_DEVICE_TABLE(pci, switchtec_pci_tbl);
@@ -1851,11 +1879,9 @@ static int __init switchtec_init(void)
if (rc)
return rc;
- switchtec_class = class_create("switchtec");
- if (IS_ERR(switchtec_class)) {
- rc = PTR_ERR(switchtec_class);
+ rc = class_register(&switchtec_class);
+ if (rc)
goto err_create_class;
- }
rc = pci_register_driver(&switchtec_pci_driver);
if (rc)
@@ -1866,7 +1892,7 @@ static int __init switchtec_init(void)
return 0;
err_pci_register:
- class_destroy(switchtec_class);
+ class_unregister(&switchtec_class);
err_create_class:
unregister_chrdev_region(switchtec_devt, max_devices);
@@ -1878,7 +1904,7 @@ module_init(switchtec_init);
static void __exit switchtec_exit(void)
{
pci_unregister_driver(&switchtec_pci_driver);
- class_destroy(switchtec_class);
+ class_unregister(&switchtec_class);
unregister_chrdev_region(switchtec_devt, max_devices);
ida_destroy(&switchtec_minor_ida);
diff --git a/drivers/pci/tph.c b/drivers/pci/tph.c
new file mode 100644
index 000000000000..77fce5e1b830
--- /dev/null
+++ b/drivers/pci/tph.c
@@ -0,0 +1,505 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * TPH (TLP Processing Hints) support
+ *
+ * Copyright (C) 2024 Advanced Micro Devices, Inc.
+ * Eric Van Tassell <Eric.VanTassell@amd.com>
+ * Wei Huang <wei.huang2@amd.com>
+ */
+#include <linux/pci.h>
+#include <linux/pci-acpi.h>
+#include <linux/msi.h>
+#include <linux/bitfield.h>
+#include <linux/pci-tph.h>
+
+#include "pci.h"
+
+/* System-wide TPH disabled */
+static bool pci_tph_disabled;
+
+#ifdef CONFIG_ACPI
+/*
+ * The st_info struct defines the Steering Tag (ST) info returned by the
+ * firmware PCI ACPI _DSM method (rev=0x7, func=0xF, "_DSM to Query Cache
+ * Locality TPH Features"), as specified in the approved ECN for PCI Firmware
+ * Spec and available at https://members.pcisig.com/wg/PCI-SIG/document/15470.
+ *
+ * @vm_st_valid: 8-bit ST for volatile memory is valid
+ * @vm_xst_valid: 16-bit extended ST for volatile memory is valid
+ * @vm_ph_ignore: 1 => PH was and will be ignored, 0 => PH should be supplied
+ * @vm_st: 8-bit ST for volatile mem
+ * @vm_xst: 16-bit extended ST for volatile mem
+ * @pm_st_valid: 8-bit ST for persistent memory is valid
+ * @pm_xst_valid: 16-bit extended ST for persistent memory is valid
+ * @pm_ph_ignore: 1 => PH was and will be ignored, 0 => PH should be supplied
+ * @pm_st: 8-bit ST for persistent mem
+ * @pm_xst: 16-bit extended ST for persistent mem
+ */
+union st_info {
+ struct {
+ u64 vm_st_valid : 1;
+ u64 vm_xst_valid : 1;
+ u64 vm_ph_ignore : 1;
+ u64 rsvd1 : 5;
+ u64 vm_st : 8;
+ u64 vm_xst : 16;
+ u64 pm_st_valid : 1;
+ u64 pm_xst_valid : 1;
+ u64 pm_ph_ignore : 1;
+ u64 rsvd2 : 5;
+ u64 pm_st : 8;
+ u64 pm_xst : 16;
+ };
+ u64 value;
+};
+
+static u16 tph_extract_tag(enum tph_mem_type mem_type, u8 req_type,
+ union st_info *info)
+{
+ switch (req_type) {
+ case PCI_TPH_REQ_TPH_ONLY: /* 8-bit tag */
+ switch (mem_type) {
+ case TPH_MEM_TYPE_VM:
+ if (info->vm_st_valid)
+ return info->vm_st;
+ break;
+ case TPH_MEM_TYPE_PM:
+ if (info->pm_st_valid)
+ return info->pm_st;
+ break;
+ }
+ break;
+ case PCI_TPH_REQ_EXT_TPH: /* 16-bit tag */
+ switch (mem_type) {
+ case TPH_MEM_TYPE_VM:
+ if (info->vm_xst_valid)
+ return info->vm_xst;
+ break;
+ case TPH_MEM_TYPE_PM:
+ if (info->pm_xst_valid)
+ return info->pm_xst;
+ break;
+ }
+ break;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+#define TPH_ST_DSM_FUNC_INDEX 0xF
+static acpi_status tph_invoke_dsm(acpi_handle handle, u32 cpu_uid,
+ union st_info *st_out)
+{
+ union acpi_object arg3[3], in_obj, *out_obj;
+
+ if (!acpi_check_dsm(handle, &pci_acpi_dsm_guid, 7,
+ BIT(TPH_ST_DSM_FUNC_INDEX)))
+ return AE_ERROR;
+
+ /* DWORD: feature ID (0 for processor cache ST query) */
+ arg3[0].integer.type = ACPI_TYPE_INTEGER;
+ arg3[0].integer.value = 0;
+
+ /* DWORD: target UID */
+ arg3[1].integer.type = ACPI_TYPE_INTEGER;
+ arg3[1].integer.value = cpu_uid;
+
+ /* QWORD: properties, all 0's */
+ arg3[2].integer.type = ACPI_TYPE_INTEGER;
+ arg3[2].integer.value = 0;
+
+ in_obj.type = ACPI_TYPE_PACKAGE;
+ in_obj.package.count = ARRAY_SIZE(arg3);
+ in_obj.package.elements = arg3;
+
+ out_obj = acpi_evaluate_dsm(handle, &pci_acpi_dsm_guid, 7,
+ TPH_ST_DSM_FUNC_INDEX, &in_obj);
+ if (!out_obj)
+ return AE_ERROR;
+
+ if (out_obj->type != ACPI_TYPE_BUFFER) {
+ ACPI_FREE(out_obj);
+ return AE_ERROR;
+ }
+
+ st_out->value = *((u64 *)(out_obj->buffer.pointer));
+
+ ACPI_FREE(out_obj);
+
+ return AE_OK;
+}
+#endif
+
+/* Update the TPH Requester Enable field of TPH Control Register */
+static void set_ctrl_reg_req_en(struct pci_dev *pdev, u8 req_type)
+{
+ u32 reg;
+
+ pci_read_config_dword(pdev, pdev->tph_cap + PCI_TPH_CTRL, &reg);
+
+ reg &= ~PCI_TPH_CTRL_REQ_EN_MASK;
+ reg |= FIELD_PREP(PCI_TPH_CTRL_REQ_EN_MASK, req_type);
+
+ pci_write_config_dword(pdev, pdev->tph_cap + PCI_TPH_CTRL, reg);
+}
+
+static u8 get_st_modes(struct pci_dev *pdev)
+{
+ u32 reg;
+
+ pci_read_config_dword(pdev, pdev->tph_cap + PCI_TPH_CAP, &reg);
+ reg &= PCI_TPH_CAP_ST_NS | PCI_TPH_CAP_ST_IV | PCI_TPH_CAP_ST_DS;
+
+ return reg;
+}
+
+static u32 get_st_table_loc(struct pci_dev *pdev)
+{
+ u32 reg;
+
+ pci_read_config_dword(pdev, pdev->tph_cap + PCI_TPH_CAP, &reg);
+
+ return FIELD_GET(PCI_TPH_CAP_LOC_MASK, reg);
+}
+
+/*
+ * Return the size of ST table. If ST table is not in TPH Requester Extended
+ * Capability space, return 0. Otherwise return the ST Table Size + 1.
+ */
+static u16 get_st_table_size(struct pci_dev *pdev)
+{
+ u32 reg;
+ u32 loc;
+
+ /* Check ST table location first */
+ loc = get_st_table_loc(pdev);
+
+ /* Convert loc to match with PCI_TPH_LOC_* defined in pci_regs.h */
+ loc = FIELD_PREP(PCI_TPH_CAP_LOC_MASK, loc);
+ if (loc != PCI_TPH_LOC_CAP)
+ return 0;
+
+ pci_read_config_dword(pdev, pdev->tph_cap + PCI_TPH_CAP, &reg);
+
+ return FIELD_GET(PCI_TPH_CAP_ST_MASK, reg) + 1;
+}
+
+/* Return device's Root Port completer capability */
+static u8 get_rp_completer_type(struct pci_dev *pdev)
+{
+ struct pci_dev *rp;
+ u32 reg;
+ int ret;
+
+ rp = pcie_find_root_port(pdev);
+ if (!rp)
+ return 0;
+
+ ret = pcie_capability_read_dword(rp, PCI_EXP_DEVCAP2, &reg);
+ if (ret)
+ return 0;
+
+ return FIELD_GET(PCI_EXP_DEVCAP2_TPH_COMP_MASK, reg);
+}
+
+/* Write tag to ST table - Return 0 if OK, otherwise -errno */
+static int write_tag_to_st_table(struct pci_dev *pdev, int index, u16 tag)
+{
+ int st_table_size;
+ int offset;
+
+ /* Check if index is out of bound */
+ st_table_size = get_st_table_size(pdev);
+ if (index >= st_table_size)
+ return -ENXIO;
+
+ offset = pdev->tph_cap + PCI_TPH_BASE_SIZEOF + index * sizeof(u16);
+
+ return pci_write_config_word(pdev, offset, tag);
+}
+
+/**
+ * pcie_tph_get_cpu_st() - Retrieve Steering Tag for a target memory associated
+ * with a specific CPU
+ * @pdev: PCI device
+ * @mem_type: target memory type (volatile or persistent RAM)
+ * @cpu_uid: associated CPU id
+ * @tag: Steering Tag to be returned
+ *
+ * Return the Steering Tag for a target memory that is associated with a
+ * specific CPU as indicated by cpu_uid.
+ *
+ * Return: 0 if success, otherwise negative value (-errno)
+ */
+int pcie_tph_get_cpu_st(struct pci_dev *pdev, enum tph_mem_type mem_type,
+ unsigned int cpu_uid, u16 *tag)
+{
+#ifdef CONFIG_ACPI
+ struct pci_dev *rp;
+ acpi_handle rp_acpi_handle;
+ union st_info info;
+
+ rp = pcie_find_root_port(pdev);
+ if (!rp || !rp->bus || !rp->bus->bridge)
+ return -ENODEV;
+
+ rp_acpi_handle = ACPI_HANDLE(rp->bus->bridge);
+
+ if (tph_invoke_dsm(rp_acpi_handle, cpu_uid, &info) != AE_OK) {
+ *tag = 0;
+ return -EINVAL;
+ }
+
+ *tag = tph_extract_tag(mem_type, pdev->tph_req_type, &info);
+
+ pci_dbg(pdev, "get steering tag: mem_type=%s, cpu_uid=%d, tag=%#04x\n",
+ (mem_type == TPH_MEM_TYPE_VM) ? "volatile" : "persistent",
+ cpu_uid, *tag);
+
+ return 0;
+#else
+ return -ENODEV;
+#endif
+}
+EXPORT_SYMBOL(pcie_tph_get_cpu_st);
+
+/**
+ * pcie_tph_set_st_entry() - Set Steering Tag in the ST table entry
+ * @pdev: PCI device
+ * @index: ST table entry index
+ * @tag: Steering Tag to be written
+ *
+ * Figure out the proper location of ST table, either in the MSI-X table or
+ * in the TPH Extended Capability space, and write the Steering Tag into
+ * the ST entry pointed by index.
+ *
+ * Return: 0 if success, otherwise negative value (-errno)
+ */
+int pcie_tph_set_st_entry(struct pci_dev *pdev, unsigned int index, u16 tag)
+{
+ u32 loc;
+ int err = 0;
+
+ if (!pdev->tph_cap)
+ return -EINVAL;
+
+ if (!pdev->tph_enabled)
+ return -EINVAL;
+
+ /* No need to write tag if device is in "No ST Mode" */
+ if (pdev->tph_mode == PCI_TPH_ST_NS_MODE)
+ return 0;
+
+ /*
+ * Disable TPH before updating ST to avoid potential instability as
+ * cautioned in PCIe r6.2, sec 6.17.3, "ST Modes of Operation"
+ */
+ set_ctrl_reg_req_en(pdev, PCI_TPH_REQ_DISABLE);
+
+ loc = get_st_table_loc(pdev);
+ /* Convert loc to match with PCI_TPH_LOC_* */
+ loc = FIELD_PREP(PCI_TPH_CAP_LOC_MASK, loc);
+
+ switch (loc) {
+ case PCI_TPH_LOC_MSIX:
+ err = pci_msix_write_tph_tag(pdev, index, tag);
+ break;
+ case PCI_TPH_LOC_CAP:
+ err = write_tag_to_st_table(pdev, index, tag);
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ if (err) {
+ pcie_disable_tph(pdev);
+ return err;
+ }
+
+ set_ctrl_reg_req_en(pdev, pdev->tph_req_type);
+
+ pci_dbg(pdev, "set steering tag: %s table, index=%d, tag=%#04x\n",
+ (loc == PCI_TPH_LOC_MSIX) ? "MSI-X" : "ST", index, tag);
+
+ return 0;
+}
+EXPORT_SYMBOL(pcie_tph_set_st_entry);
+
+/**
+ * pcie_disable_tph - Turn off TPH support for device
+ * @pdev: PCI device
+ *
+ * Return: none
+ */
+void pcie_disable_tph(struct pci_dev *pdev)
+{
+ if (!pdev->tph_cap)
+ return;
+
+ if (!pdev->tph_enabled)
+ return;
+
+ pci_write_config_dword(pdev, pdev->tph_cap + PCI_TPH_CTRL, 0);
+
+ pdev->tph_mode = 0;
+ pdev->tph_req_type = 0;
+ pdev->tph_enabled = 0;
+}
+EXPORT_SYMBOL(pcie_disable_tph);
+
+/**
+ * pcie_enable_tph - Enable TPH support for device using a specific ST mode
+ * @pdev: PCI device
+ * @mode: ST mode to enable. Current supported modes include:
+ *
+ * - PCI_TPH_ST_NS_MODE: NO ST Mode
+ * - PCI_TPH_ST_IV_MODE: Interrupt Vector Mode
+ * - PCI_TPH_ST_DS_MODE: Device Specific Mode
+ *
+ * Check whether the mode is actually supported by the device before enabling
+ * and return an error if not. Additionally determine what types of requests,
+ * TPH or extended TPH, can be issued by the device based on its TPH requester
+ * capability and the Root Port's completer capability.
+ *
+ * Return: 0 on success, otherwise negative value (-errno)
+ */
+int pcie_enable_tph(struct pci_dev *pdev, int mode)
+{
+ u32 reg;
+ u8 dev_modes;
+ u8 rp_req_type;
+
+ /* Honor "notph" kernel parameter */
+ if (pci_tph_disabled)
+ return -EINVAL;
+
+ if (!pdev->tph_cap)
+ return -EINVAL;
+
+ if (pdev->tph_enabled)
+ return -EBUSY;
+
+ /* Sanitize and check ST mode compatibility */
+ mode &= PCI_TPH_CTRL_MODE_SEL_MASK;
+ dev_modes = get_st_modes(pdev);
+ if (!((1 << mode) & dev_modes))
+ return -EINVAL;
+
+ pdev->tph_mode = mode;
+
+ /* Get req_type supported by device and its Root Port */
+ pci_read_config_dword(pdev, pdev->tph_cap + PCI_TPH_CAP, &reg);
+ if (FIELD_GET(PCI_TPH_CAP_EXT_TPH, reg))
+ pdev->tph_req_type = PCI_TPH_REQ_EXT_TPH;
+ else
+ pdev->tph_req_type = PCI_TPH_REQ_TPH_ONLY;
+
+ rp_req_type = get_rp_completer_type(pdev);
+
+ /* Final req_type is the smallest value of two */
+ pdev->tph_req_type = min(pdev->tph_req_type, rp_req_type);
+
+ if (pdev->tph_req_type == PCI_TPH_REQ_DISABLE)
+ return -EINVAL;
+
+ /* Write them into TPH control register */
+ pci_read_config_dword(pdev, pdev->tph_cap + PCI_TPH_CTRL, &reg);
+
+ reg &= ~PCI_TPH_CTRL_MODE_SEL_MASK;
+ reg |= FIELD_PREP(PCI_TPH_CTRL_MODE_SEL_MASK, pdev->tph_mode);
+
+ reg &= ~PCI_TPH_CTRL_REQ_EN_MASK;
+ reg |= FIELD_PREP(PCI_TPH_CTRL_REQ_EN_MASK, pdev->tph_req_type);
+
+ pci_write_config_dword(pdev, pdev->tph_cap + PCI_TPH_CTRL, reg);
+
+ pdev->tph_enabled = 1;
+
+ return 0;
+}
+EXPORT_SYMBOL(pcie_enable_tph);
+
+void pci_restore_tph_state(struct pci_dev *pdev)
+{
+ struct pci_cap_saved_state *save_state;
+ int num_entries, i, offset;
+ u16 *st_entry;
+ u32 *cap;
+
+ if (!pdev->tph_cap)
+ return;
+
+ if (!pdev->tph_enabled)
+ return;
+
+ save_state = pci_find_saved_ext_cap(pdev, PCI_EXT_CAP_ID_TPH);
+ if (!save_state)
+ return;
+
+ /* Restore control register and all ST entries */
+ cap = &save_state->cap.data[0];
+ pci_write_config_dword(pdev, pdev->tph_cap + PCI_TPH_CTRL, *cap++);
+ st_entry = (u16 *)cap;
+ offset = PCI_TPH_BASE_SIZEOF;
+ num_entries = get_st_table_size(pdev);
+ for (i = 0; i < num_entries; i++) {
+ pci_write_config_word(pdev, pdev->tph_cap + offset,
+ *st_entry++);
+ offset += sizeof(u16);
+ }
+}
+
+void pci_save_tph_state(struct pci_dev *pdev)
+{
+ struct pci_cap_saved_state *save_state;
+ int num_entries, i, offset;
+ u16 *st_entry;
+ u32 *cap;
+
+ if (!pdev->tph_cap)
+ return;
+
+ if (!pdev->tph_enabled)
+ return;
+
+ save_state = pci_find_saved_ext_cap(pdev, PCI_EXT_CAP_ID_TPH);
+ if (!save_state)
+ return;
+
+ /* Save control register */
+ cap = &save_state->cap.data[0];
+ pci_read_config_dword(pdev, pdev->tph_cap + PCI_TPH_CTRL, cap++);
+
+ /* Save all ST entries in extended capability structure */
+ st_entry = (u16 *)cap;
+ offset = PCI_TPH_BASE_SIZEOF;
+ num_entries = get_st_table_size(pdev);
+ for (i = 0; i < num_entries; i++) {
+ pci_read_config_word(pdev, pdev->tph_cap + offset,
+ st_entry++);
+ offset += sizeof(u16);
+ }
+}
+
+void pci_no_tph(void)
+{
+ pci_tph_disabled = true;
+
+ pr_info("PCIe TPH is disabled\n");
+}
+
+void pci_tph_init(struct pci_dev *pdev)
+{
+ int num_entries;
+ u32 save_size;
+
+ pdev->tph_cap = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_TPH);
+ if (!pdev->tph_cap)
+ return;
+
+ num_entries = get_st_table_size(pdev);
+ save_size = sizeof(u32) + num_entries * sizeof(u16);
+ pci_add_ext_cap_save_buffer(pdev, PCI_EXT_CAP_ID_TPH, save_size);
+}
diff --git a/drivers/pci/vpd.c b/drivers/pci/vpd.c
index 485a642b9304..3d29b2602d0f 100644
--- a/drivers/pci/vpd.c
+++ b/drivers/pci/vpd.c
@@ -9,7 +9,7 @@
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/sched/signal.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "pci.h"
#define PCI_VPD_LRDT_TAG_SIZE 3
@@ -271,8 +271,8 @@ void pci_vpd_init(struct pci_dev *dev)
}
static ssize_t vpd_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf, loff_t off,
- size_t count)
+ const struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
{
struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
struct pci_dev *vpd_dev = dev;
@@ -295,8 +295,8 @@ static ssize_t vpd_read(struct file *filp, struct kobject *kobj,
}
static ssize_t vpd_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf, loff_t off,
- size_t count)
+ const struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
{
struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
struct pci_dev *vpd_dev = dev;
@@ -317,15 +317,15 @@ static ssize_t vpd_write(struct file *filp, struct kobject *kobj,
return ret;
}
-static BIN_ATTR(vpd, 0600, vpd_read, vpd_write, 0);
+static const BIN_ATTR(vpd, 0600, vpd_read, vpd_write, 0);
-static struct bin_attribute *vpd_attrs[] = {
+static const struct bin_attribute *const vpd_attrs[] = {
&bin_attr_vpd,
NULL,
};
static umode_t vpd_attr_is_visible(struct kobject *kobj,
- struct bin_attribute *a, int n)
+ const struct bin_attribute *a, int n)
{
struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
@@ -336,7 +336,7 @@ static umode_t vpd_attr_is_visible(struct kobject *kobj,
}
const struct attribute_group pci_dev_vpd_attr_group = {
- .bin_attrs = vpd_attrs,
+ .bin_attrs_new = vpd_attrs,
.is_bin_visible = vpd_attr_is_visible,
};