summaryrefslogtreecommitdiff
path: root/drivers/pci
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/Kconfig72
-rw-r--r--drivers/pci/Makefile13
-rw-r--r--drivers/pci/access.c92
-rw-r--r--drivers/pci/ats.c79
-rw-r--r--drivers/pci/bus.c175
-rw-r--r--drivers/pci/controller/Kconfig63
-rw-r--r--drivers/pci/controller/Makefile4
-rw-r--r--drivers/pci/controller/cadence/Kconfig51
-rw-r--r--drivers/pci/controller/cadence/Makefile12
-rw-r--r--drivers/pci/controller/cadence/pci-j721e.c310
-rw-r--r--drivers/pci/controller/cadence/pci-sky1.c238
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-ep.c134
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-host-common.c288
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-host-common.h46
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-host-hpa.c368
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-host.c388
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-hpa-regs.h193
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-hpa.c167
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-lga-regs.h230
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-plat.c17
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.c47
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.h512
-rw-r--r--drivers/pci/controller/cadence/pcie-sg2042.c131
-rw-r--r--drivers/pci/controller/dwc/Kconfig151
-rw-r--r--drivers/pci/controller/dwc/Makefile14
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c78
-rw-r--r--drivers/pci/controller/dwc/pci-exynos.c131
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c1932
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c452
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape-ep.c69
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape.c291
-rw-r--r--drivers/pci/controller/dwc/pci-meson.c40
-rw-r--r--drivers/pci/controller/dwc/pcie-al.c19
-rw-r--r--drivers/pci/controller/dwc/pcie-amd-mdb.c526
-rw-r--r--drivers/pci/controller/dwc/pcie-armada8k.c8
-rw-r--r--drivers/pci/controller/dwc/pcie-artpec6.c48
-rw-r--r--drivers/pci/controller/dwc/pcie-bt1.c6
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-debugfs.c927
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c882
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c696
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-plat.c27
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c507
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h430
-rw-r--r--drivers/pci/controller/dwc/pcie-dw-rockchip.c565
-rw-r--r--drivers/pci/controller/dwc/pcie-fu740.c3
-rw-r--r--drivers/pci/controller/dwc/pcie-hisi.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-histb.c25
-rw-r--r--drivers/pci/controller/dwc/pcie-intel-gw.c18
-rw-r--r--drivers/pci/controller/dwc/pcie-keembay.c55
-rw-r--r--drivers/pci/controller/dwc/pcie-kirin.c205
-rw-r--r--drivers/pci/controller/dwc/pcie-nxp-s32g.c406
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-common.c88
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-common.h14
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-ep.c223
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c965
-rw-r--r--drivers/pci/controller/dwc/pcie-rcar-gen4.c804
-rw-r--r--drivers/pci/controller/dwc/pcie-sophgo.c257
-rw-r--r--drivers/pci/controller/dwc/pcie-spacemit-k1.c357
-rw-r--r--drivers/pci/controller/dwc/pcie-spear13xx.c11
-rw-r--r--drivers/pci/controller/dwc/pcie-stm32-ep.c343
-rw-r--r--drivers/pci/controller/dwc/pcie-stm32.c370
-rw-r--r--drivers/pci/controller/dwc/pcie-stm32.h19
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c249
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier-ep.c45
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier.c16
-rw-r--r--drivers/pci/controller/dwc/pcie-visconti.c6
-rw-r--r--drivers/pci/controller/mobiveil/Kconfig1
-rw-r--r--drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c14
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil-host.c61
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil.h5
-rw-r--r--drivers/pci/controller/pci-aardvark.c142
-rw-r--r--drivers/pci/controller/pci-ftpci100.c7
-rw-r--r--drivers/pci/controller/pci-host-common.c55
-rw-r--r--drivers/pci/controller/pci-host-common.h23
-rw-r--r--drivers/pci/controller/pci-host-generic.c3
-rw-r--r--drivers/pci/controller/pci-hyperv-intf.c1
-rw-r--r--drivers/pci/controller/pci-hyperv.c331
-rw-r--r--drivers/pci/controller/pci-ixp4xx.c9
-rw-r--r--drivers/pci/controller/pci-loongson.c61
-rw-r--r--drivers/pci/controller/pci-mvebu.c59
-rw-r--r--drivers/pci/controller/pci-rcar-gen2.c3
-rw-r--r--drivers/pci/controller/pci-tegra.c182
-rw-r--r--drivers/pci/controller/pci-thunder-ecam.c4
-rw-r--r--drivers/pci/controller/pci-thunder-pem.c5
-rw-r--r--drivers/pci/controller/pci-v3-semi.c6
-rw-r--r--drivers/pci/controller/pci-xgene-msi.c482
-rw-r--r--drivers/pci/controller/pci-xgene.c46
-rw-r--r--drivers/pci/controller/pcie-altera-msi.c55
-rw-r--r--drivers/pci/controller/pcie-altera.c275
-rw-r--r--drivers/pci/controller/pcie-apple.c396
-rw-r--r--drivers/pci/controller/pcie-brcmstb.c1059
-rw-r--r--drivers/pci/controller/pcie-hisi-error.c2
-rw-r--r--drivers/pci/controller/pcie-iproc-msi.c51
-rw-r--r--drivers/pci/controller/pcie-iproc-platform.c4
-rw-r--r--drivers/pci/controller/pcie-iproc.c42
-rw-r--r--drivers/pci/controller/pcie-mediatek-gen3.c557
-rw-r--r--drivers/pci/controller/pcie-mediatek.c201
-rw-r--r--drivers/pci/controller/pcie-microchip-host.c1139
-rw-r--r--drivers/pci/controller/pcie-mt7621.c20
-rw-r--r--drivers/pci/controller/pcie-rcar-ep.c37
-rw-r--r--drivers/pci/controller/pcie-rcar-host.c158
-rw-r--r--drivers/pci/controller/pcie-rockchip-ep.c487
-rw-r--r--drivers/pci/controller/pcie-rockchip-host.c85
-rw-r--r--drivers/pci/controller/pcie-rockchip.c241
-rw-r--r--drivers/pci/controller/pcie-rockchip.h124
-rw-r--r--drivers/pci/controller/pcie-rzg3s-host.c1761
-rw-r--r--drivers/pci/controller/pcie-xilinx-common.h31
-rw-r--r--drivers/pci/controller/pcie-xilinx-cpm.c155
-rw-r--r--drivers/pci/controller/pcie-xilinx-dma-pl.c847
-rw-r--r--drivers/pci/controller/pcie-xilinx-nwl.c266
-rw-r--r--drivers/pci/controller/pcie-xilinx.c68
-rw-r--r--drivers/pci/controller/plda/Kconfig31
-rw-r--r--drivers/pci/controller/plda/Makefile4
-rw-r--r--drivers/pci/controller/plda/pcie-microchip-host.c834
-rw-r--r--drivers/pci/controller/plda/pcie-plda-host.c651
-rw-r--r--drivers/pci/controller/plda/pcie-plda.h274
-rw-r--r--drivers/pci/controller/plda/pcie-starfive.c492
-rw-r--r--drivers/pci/controller/vmd.c389
-rw-r--r--drivers/pci/devres.c864
-rw-r--r--drivers/pci/doe.c273
-rw-r--r--drivers/pci/ecam.c2
-rw-r--r--drivers/pci/endpoint/Kconfig10
-rw-r--r--drivers/pci/endpoint/Makefile1
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-mhi.c603
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-ntb.c12
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c863
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-vntb.c402
-rw-r--r--drivers/pci/endpoint/pci-ep-cfs.c11
-rw-r--r--drivers/pci/endpoint/pci-ep-msi.c100
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c448
-rw-r--r--drivers/pci/endpoint/pci-epc-mem.c19
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c199
-rw-r--r--drivers/pci/host-bridge.c1
-rw-r--r--drivers/pci/hotplug/Kconfig24
-rw-r--r--drivers/pci/hotplug/Makefile2
-rw-r--r--drivers/pci/hotplug/TODO19
-rw-r--r--drivers/pci/hotplug/acpiphp.h1
-rw-r--r--drivers/pci/hotplug/acpiphp_ampere_altra.c128
-rw-r--r--drivers/pci/hotplug/acpiphp_core.c3
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c5
-rw-r--r--drivers/pci/hotplug/acpiphp_ibm.c4
-rw-r--r--drivers/pci/hotplug/cpci_hotplug.h5
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_core.c17
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c2
-rw-r--r--drivers/pci/hotplug/cpqphp_ctrl.c10
-rw-r--r--drivers/pci/hotplug/cpqphp_pci.c79
-rw-r--r--drivers/pci/hotplug/cpqphp_sysfs.c1
-rw-r--r--drivers/pci/hotplug/ibmphp.h7
-rw-r--r--drivers/pci/hotplug/ibmphp_hpc.c6
-rw-r--r--drivers/pci/hotplug/ibmphp_pci.c12
-rw-r--r--drivers/pci/hotplug/octep_hp.c427
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c223
-rw-r--r--drivers/pci/hotplug/pciehp.h5
-rw-r--r--drivers/pci/hotplug/pciehp_core.c19
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c5
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c115
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c4
-rw-r--r--drivers/pci/hotplug/pnv_php.c252
-rw-r--r--drivers/pci/hotplug/rpaphp_pci.c85
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c71
-rw-r--r--drivers/pci/hotplug/shpchp.h56
-rw-r--r--drivers/pci/hotplug/shpchp_core.c28
-rw-r--r--drivers/pci/hotplug/shpchp_ctrl.c79
-rw-r--r--drivers/pci/hotplug/shpchp_hpc.c69
-rw-r--r--drivers/pci/ide.c815
-rw-r--r--drivers/pci/iomap.c190
-rw-r--r--drivers/pci/iov.c217
-rw-r--r--drivers/pci/irq.c204
-rw-r--r--drivers/pci/mmap.c29
-rw-r--r--drivers/pci/msi/api.c68
-rw-r--r--drivers/pci/msi/irqdomain.c229
-rw-r--r--drivers/pci/msi/msi.c272
-rw-r--r--drivers/pci/msi/msi.h4
-rw-r--r--drivers/pci/npem.c595
-rw-r--r--drivers/pci/of.c367
-rw-r--r--drivers/pci/of_property.c507
-rw-r--r--drivers/pci/p2pdma.c253
-rw-r--r--drivers/pci/pci-acpi.c228
-rw-r--r--drivers/pci/pci-bridge-emul.c4
-rw-r--r--drivers/pci/pci-driver.c111
-rw-r--r--drivers/pci/pci-mid.c4
-rw-r--r--drivers/pci/pci-pf-stub.c1
-rw-r--r--drivers/pci/pci-stub.c1
-rw-r--r--drivers/pci/pci-sysfs.c480
-rw-r--r--drivers/pci/pci.c1913
-rw-r--r--drivers/pci/pci.h626
-rw-r--r--drivers/pci/pcie/Kconfig11
-rw-r--r--drivers/pci/pcie/Makefile6
-rw-r--r--drivers/pci/pcie/aer.c849
-rw-r--r--drivers/pci/pcie/aer_inject.c6
-rw-r--r--drivers/pci/pcie/aspm.c882
-rw-r--r--drivers/pci/pcie/bwctrl.c329
-rw-r--r--drivers/pci/pcie/dpc.c241
-rw-r--r--drivers/pci/pcie/edr.c28
-rw-r--r--drivers/pci/pcie/err.c71
-rw-r--r--drivers/pci/pcie/pme.c4
-rw-r--r--drivers/pci/pcie/portdrv.c41
-rw-r--r--drivers/pci/pcie/portdrv.h10
-rw-r--r--drivers/pci/pcie/ptm.c330
-rw-r--r--drivers/pci/pcie/tlp.c137
-rw-r--r--drivers/pci/probe.c778
-rw-r--r--drivers/pci/proc.c4
-rw-r--r--drivers/pci/pwrctrl/Kconfig48
-rw-r--r--drivers/pci/pwrctrl/Makefile11
-rw-r--r--drivers/pci/pwrctrl/core.c150
-rw-r--r--drivers/pci/pwrctrl/pci-pwrctrl-pwrseq.c139
-rw-r--r--drivers/pci/pwrctrl/pci-pwrctrl-tc9563.c648
-rw-r--r--drivers/pci/pwrctrl/slot.c95
-rw-r--r--drivers/pci/quirks.c413
-rw-r--r--drivers/pci/rebar.c328
-rw-r--r--drivers/pci/remove.c54
-rw-r--r--drivers/pci/search.c93
-rw-r--r--drivers/pci/setup-bus.c1538
-rw-r--r--drivers/pci/setup-irq.c64
-rw-r--r--drivers/pci/setup-res.c182
-rw-r--r--drivers/pci/slot.c68
-rw-r--r--drivers/pci/switch/switchtec.c254
-rw-r--r--drivers/pci/syscall.c12
-rw-r--r--drivers/pci/tph.c516
-rw-r--r--drivers/pci/tsm.c900
-rw-r--r--drivers/pci/vc.c9
-rw-r--r--drivers/pci/vgaarb.c403
-rw-r--r--drivers/pci/vpd.c50
-rw-r--r--drivers/pci/xen-pcifront.c6
224 files changed, 38526 insertions, 12016 deletions
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 3c07d8d214b3..00b0210e1f1d 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -13,9 +13,15 @@ config FORCE_PCI
select HAVE_PCI
select PCI
+# select this to provide a generic PCI iomap,
+# without PCI itself having to be defined
+config GENERIC_PCI_IOMAP
+ bool
+
menuconfig PCI
bool "PCI support"
depends on HAVE_PCI
+ depends on MMU
help
This option enables support for the PCI local bus, including
support for PCI-X and the foundations for PCI Express support.
@@ -116,9 +122,30 @@ config XEN_PCIDEV_FRONTEND
config PCI_ATS
bool
-config PCI_DOE
+config PCI_IDE
bool
+config PCI_TSM
+ bool "PCI TSM: Device security protocol support"
+ select PCI_IDE
+ select PCI_DOE
+ select TSM
+ help
+ The TEE (Trusted Execution Environment) Device Interface
+ Security Protocol (TDISP) defines a "TSM" as a platform agent
+ that manages device authentication, link encryption, link
+ integrity protection, and assignment of PCI device functions
+ (virtual or physical) to confidential computing VMs that can
+ access (DMA) guest private memory.
+
+ Enable a platform TSM driver to use this capability.
+
+config PCI_DOE
+ bool "Enable PCI Data Object Exchange (DOE) support"
+ help
+ Say Y here if you want be able to communicate with PCIe DOE
+ mailboxes.
+
config PCI_ECAM
bool
@@ -138,6 +165,15 @@ config PCI_IOV
If unsure, say N.
+config PCI_NPEM
+ bool "Native PCIe Enclosure Management"
+ depends on LEDS_CLASS=y
+ help
+ Support for Native PCIe Enclosure Management. It allows managing LED
+ indications in storage enclosures. Enclosure must support following
+ indications: OK, Locate, Fail, Rebuild, other indications are
+ optional.
+
config PCI_PRI
bool "PCI PRI support"
select PCI_ATS
@@ -159,6 +195,15 @@ config PCI_PASID
If unsure, say N.
+config PCIE_TPH
+ bool "TLP Processing Hints"
+ help
+ This option adds support for PCIe TLP Processing Hints (TPH).
+ TPH allows endpoint devices to provide optimization hints, such as
+ desired caching behavior, for requests that target memory space.
+ These hints, called Steering Tags, can empower the system hardware
+ to optimize the utilization of platform resources.
+
config PCI_P2PDMA
bool "PCI peer-to-peer transfer support"
depends on ZONE_DEVICE
@@ -170,7 +215,7 @@ config PCI_P2PDMA
select GENERIC_ALLOCATOR
select NEED_SG_DMA_FLAGS
help
- Enableѕ drivers to do PCI peer-to-peer transactions to and from
+ Enables drivers to do PCI peer-to-peer transactions to and from
BARs that are exposed in other devices that are the part of
the hierarchy where peer-to-peer DMA is guaranteed by the PCI
specification to work (ie. anything below a single PCI bridge).
@@ -180,6 +225,12 @@ config PCI_P2PDMA
P2P DMA transactions must be between devices behind the same root
port.
+ Enabling this option will reduce the entropy of x86 KASLR memory
+ regions. For example - on a 46 bit system, the entropy goes down
+ from 16 bits to 15 bits. The actual reduction in entropy depends
+ on the physical address bits, on processor features, kernel config
+ (5 level page table) and physical memory present on the system.
+
If unsure, say N.
config PCI_LABEL
@@ -188,12 +239,25 @@ config PCI_LABEL
config PCI_HYPERV
tristate "Hyper-V PCI Frontend"
- depends on ((X86 && X86_64) || ARM64) && HYPERV && PCI_MSI && SYSFS
+ depends on ((X86 && X86_64) || ARM64) && HYPERV_VMBUS && PCI_MSI && SYSFS
select PCI_HYPERV_INTERFACE
+ select IRQ_MSI_LIB
help
The PCI device frontend driver allows the kernel to import arbitrary
PCI devices from a PCI backend to support PCI driver domains.
+config PCI_DYNAMIC_OF_NODES
+ bool "Create Device tree nodes for PCI devices"
+ depends on OF_IRQ
+ select OF_DYNAMIC
+ help
+ This option enables support for generating device tree nodes for some
+ PCI devices. Thus, the driver of this kind can load and overlay
+ flattened device tree for its downstream devices.
+
+ Once this option is selected, the device tree nodes will be generated
+ for all PCI bridges.
+
choice
prompt "PCI Express hierarchy optimization setting"
default PCIE_BUS_DEFAULT
@@ -260,6 +324,7 @@ config VGA_ARB
bool "VGA Arbitration" if EXPERT
default y
depends on (PCI && !S390)
+ select SCREEN_INFO if X86
help
Some "legacy" VGA devices implemented on PCI typically have the same
hard-decoded addresses as they did on ISA. When multiple PCI devices
@@ -279,5 +344,6 @@ source "drivers/pci/hotplug/Kconfig"
source "drivers/pci/controller/Kconfig"
source "drivers/pci/endpoint/Kconfig"
source "drivers/pci/switch/Kconfig"
+source "drivers/pci/pwrctrl/Kconfig"
endif
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 2680e4c92f0a..e10cfe5a280b 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -4,16 +4,18 @@
obj-$(CONFIG_PCI) += access.o bus.o probe.o host-bridge.o \
remove.o pci.o pci-driver.o search.o \
- pci-sysfs.o rom.o setup-res.o irq.o vpd.o \
- setup-bus.o vc.o mmap.o setup-irq.o
+ rebar.o rom.o setup-res.o irq.o vpd.o \
+ setup-bus.o vc.o mmap.o devres.o
obj-$(CONFIG_PCI) += msi/
obj-$(CONFIG_PCI) += pcie/
+obj-$(CONFIG_PCI) += pwrctrl/
ifdef CONFIG_PCI
obj-$(CONFIG_PROC_FS) += proc.o
-obj-$(CONFIG_SYSFS) += slot.o
+obj-$(CONFIG_SYSFS) += pci-sysfs.o slot.o
obj-$(CONFIG_ACPI) += pci-acpi.o
+obj-$(CONFIG_GENERIC_PCI_IOMAP) += iomap.o
endif
obj-$(CONFIG_OF) += of.o
@@ -32,6 +34,11 @@ obj-$(CONFIG_PCI_P2PDMA) += p2pdma.o
obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += xen-pcifront.o
obj-$(CONFIG_VGA_ARB) += vgaarb.o
obj-$(CONFIG_PCI_DOE) += doe.o
+obj-$(CONFIG_PCI_IDE) += ide.o
+obj-$(CONFIG_PCI_TSM) += tsm.o
+obj-$(CONFIG_PCI_DYNAMIC_OF_NODES) += of_property.o
+obj-$(CONFIG_PCI_NPEM) += npem.o
+obj-$(CONFIG_PCIE_TPH) += tph.o
# Endpoint library must be initialized before its users
obj-$(CONFIG_PCI_ENDPOINT) += endpoint/
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 3c230ca3de58..b123da16b63b 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -36,10 +36,13 @@ DEFINE_RAW_SPINLOCK(pci_lock);
int noinline pci_bus_read_config_##size \
(struct pci_bus *bus, unsigned int devfn, int pos, type *value) \
{ \
- int res; \
unsigned long flags; \
u32 data = 0; \
- if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
+ int res; \
+ \
+ if (PCI_##size##_BAD) \
+ return PCIBIOS_BAD_REGISTER_NUMBER; \
+ \
pci_lock_config(flags); \
res = bus->ops->read(bus, devfn, pos, len, &data); \
if (res) \
@@ -47,6 +50,7 @@ int noinline pci_bus_read_config_##size \
else \
*value = (type)data; \
pci_unlock_config(flags); \
+ \
return res; \
}
@@ -54,12 +58,16 @@ int noinline pci_bus_read_config_##size \
int noinline pci_bus_write_config_##size \
(struct pci_bus *bus, unsigned int devfn, int pos, type value) \
{ \
- int res; \
unsigned long flags; \
- if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
+ int res; \
+ \
+ if (PCI_##size##_BAD) \
+ return PCIBIOS_BAD_REGISTER_NUMBER; \
+ \
pci_lock_config(flags); \
res = bus->ops->write(bus, devfn, pos, len, value); \
pci_unlock_config(flags); \
+ \
return res; \
}
@@ -216,24 +224,27 @@ static noinline void pci_wait_cfg(struct pci_dev *dev)
}
/* Returns 0 on success, negative values indicate error. */
-#define PCI_USER_READ_CONFIG(size, type) \
+#define PCI_USER_READ_CONFIG(size, type) \
int pci_user_read_config_##size \
(struct pci_dev *dev, int pos, type *val) \
{ \
- int ret = PCIBIOS_SUCCESSFUL; \
u32 data = -1; \
+ int ret; \
+ \
if (PCI_##size##_BAD) \
return -EINVAL; \
- raw_spin_lock_irq(&pci_lock); \
+ \
+ raw_spin_lock_irq(&pci_lock); \
if (unlikely(dev->block_cfg_access)) \
pci_wait_cfg(dev); \
ret = dev->bus->ops->read(dev->bus, dev->devfn, \
- pos, sizeof(type), &data); \
- raw_spin_unlock_irq(&pci_lock); \
+ pos, sizeof(type), &data); \
+ raw_spin_unlock_irq(&pci_lock); \
if (ret) \
PCI_SET_ERROR_RESPONSE(val); \
else \
*val = (type)data; \
+ \
return pcibios_err_to_errno(ret); \
} \
EXPORT_SYMBOL_GPL(pci_user_read_config_##size);
@@ -243,15 +254,18 @@ EXPORT_SYMBOL_GPL(pci_user_read_config_##size);
int pci_user_write_config_##size \
(struct pci_dev *dev, int pos, type val) \
{ \
- int ret = PCIBIOS_SUCCESSFUL; \
+ int ret; \
+ \
if (PCI_##size##_BAD) \
return -EINVAL; \
- raw_spin_lock_irq(&pci_lock); \
+ \
+ raw_spin_lock_irq(&pci_lock); \
if (unlikely(dev->block_cfg_access)) \
pci_wait_cfg(dev); \
ret = dev->bus->ops->write(dev->bus, dev->devfn, \
- pos, sizeof(type), val); \
- raw_spin_unlock_irq(&pci_lock); \
+ pos, sizeof(type), val); \
+ raw_spin_unlock_irq(&pci_lock); \
+ \
return pcibios_err_to_errno(ret); \
} \
EXPORT_SYMBOL_GPL(pci_user_write_config_##size);
@@ -497,22 +511,35 @@ int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val)
}
EXPORT_SYMBOL(pcie_capability_write_dword);
-int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
- u16 clear, u16 set)
+int pcie_capability_clear_and_set_word_unlocked(struct pci_dev *dev, int pos,
+ u16 clear, u16 set)
{
int ret;
u16 val;
ret = pcie_capability_read_word(dev, pos, &val);
- if (!ret) {
- val &= ~clear;
- val |= set;
- ret = pcie_capability_write_word(dev, pos, val);
- }
+ if (ret)
+ return ret;
+
+ val &= ~clear;
+ val |= set;
+ return pcie_capability_write_word(dev, pos, val);
+}
+EXPORT_SYMBOL(pcie_capability_clear_and_set_word_unlocked);
+
+int pcie_capability_clear_and_set_word_locked(struct pci_dev *dev, int pos,
+ u16 clear, u16 set)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&dev->pcie_cap_lock, flags);
+ ret = pcie_capability_clear_and_set_word_unlocked(dev, pos, clear, set);
+ spin_unlock_irqrestore(&dev->pcie_cap_lock, flags);
return ret;
}
-EXPORT_SYMBOL(pcie_capability_clear_and_set_word);
+EXPORT_SYMBOL(pcie_capability_clear_and_set_word_locked);
int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos,
u32 clear, u32 set)
@@ -521,13 +548,12 @@ int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos,
u32 val;
ret = pcie_capability_read_dword(dev, pos, &val);
- if (!ret) {
- val &= ~clear;
- val |= set;
- ret = pcie_capability_write_dword(dev, pos, val);
- }
+ if (ret)
+ return ret;
- return ret;
+ val &= ~clear;
+ val |= set;
+ return pcie_capability_write_dword(dev, pos, val);
}
EXPORT_SYMBOL(pcie_capability_clear_and_set_dword);
@@ -586,3 +612,15 @@ int pci_write_config_dword(const struct pci_dev *dev, int where,
return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val);
}
EXPORT_SYMBOL(pci_write_config_dword);
+
+void pci_clear_and_set_config_dword(const struct pci_dev *dev, int pos,
+ u32 clear, u32 set)
+{
+ u32 val;
+
+ pci_read_config_dword(dev, pos, &val);
+ val &= ~clear;
+ val |= set;
+ pci_write_config_dword(dev, pos, val);
+}
+EXPORT_SYMBOL(pci_clear_and_set_config_dword);
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
index f9cc2e10b676..ec6c8dbdc5e9 100644
--- a/drivers/pci/ats.c
+++ b/drivers/pci/ats.c
@@ -9,6 +9,7 @@
* Copyright (C) 2011 Advanced Micro Devices,
*/
+#include <linux/bitfield.h>
#include <linux/export.h>
#include <linux/pci-ats.h>
#include <linux/pci.h>
@@ -47,6 +48,39 @@ bool pci_ats_supported(struct pci_dev *dev)
EXPORT_SYMBOL_GPL(pci_ats_supported);
/**
+ * pci_prepare_ats - Setup the PS for ATS
+ * @dev: the PCI device
+ * @ps: the IOMMU page shift
+ *
+ * This must be done by the IOMMU driver on the PF before any VFs are created to
+ * ensure that the VF can have ATS enabled.
+ *
+ * Returns 0 on success, or negative on failure.
+ */
+int pci_prepare_ats(struct pci_dev *dev, int ps)
+{
+ u16 ctrl;
+
+ if (!pci_ats_supported(dev))
+ return -EINVAL;
+
+ if (WARN_ON(dev->ats_enabled))
+ return -EBUSY;
+
+ if (ps < PCI_ATS_MIN_STU)
+ return -EINVAL;
+
+ if (dev->is_virtfn)
+ return 0;
+
+ dev->ats_stu = ps;
+ ctrl = PCI_ATS_CTRL_STU(dev->ats_stu - PCI_ATS_MIN_STU);
+ pci_write_config_word(dev, dev->ats_cap + PCI_ATS_CTRL, ctrl);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_prepare_ats);
+
+/**
* pci_enable_ats - enable the ATS capability
* @dev: the PCI device
* @ps: the IOMMU page shift
@@ -376,7 +410,7 @@ int pci_enable_pasid(struct pci_dev *pdev, int features)
if (WARN_ON(pdev->pasid_enabled))
return -EBUSY;
- if (!pdev->eetlp_prefix_path && !pdev->pasid_no_tlp)
+ if (!pdev->eetlp_prefix_max && !pdev->pasid_no_tlp)
return -EINVAL;
if (!pasid)
@@ -454,8 +488,8 @@ void pci_restore_pasid_state(struct pci_dev *pdev)
* pci_pasid_features - Check which PASID features are supported
* @pdev: PCI device structure
*
- * Returns a negative value when no PASI capability is present.
- * Otherwise is returns a bitmask with supported features. Current
+ * Return a negative value when no PASID capability is present.
+ * Otherwise return a bitmask with supported features. Current
* features reported are:
* PCI_PASID_CAP_EXEC - Execute permission supported
* PCI_PASID_CAP_PRIV - Privileged mode supported
@@ -480,8 +514,6 @@ int pci_pasid_features(struct pci_dev *pdev)
}
EXPORT_SYMBOL_GPL(pci_pasid_features);
-#define PASID_NUMBER_SHIFT 8
-#define PASID_NUMBER_MASK (0x1f << PASID_NUMBER_SHIFT)
/**
* pci_max_pasids - Get maximum number of PASIDs supported by device
* @pdev: PCI device structure
@@ -503,9 +535,40 @@ int pci_max_pasids(struct pci_dev *pdev)
pci_read_config_word(pdev, pasid + PCI_PASID_CAP, &supported);
- supported = (supported & PASID_NUMBER_MASK) >> PASID_NUMBER_SHIFT;
-
- return (1 << supported);
+ return (1 << FIELD_GET(PCI_PASID_CAP_WIDTH, supported));
}
EXPORT_SYMBOL_GPL(pci_max_pasids);
+
+/**
+ * pci_pasid_status - Check the PASID status
+ * @pdev: PCI device structure
+ *
+ * Returns a negative value when no PASID capability is present.
+ * Otherwise the value of the control register is returned.
+ * Status reported are:
+ *
+ * PCI_PASID_CTRL_ENABLE - PASID enabled
+ * PCI_PASID_CTRL_EXEC - Execute permission enabled
+ * PCI_PASID_CTRL_PRIV - Privileged mode enabled
+ */
+int pci_pasid_status(struct pci_dev *pdev)
+{
+ int pasid;
+ u16 ctrl;
+
+ if (pdev->is_virtfn)
+ pdev = pci_physfn(pdev);
+
+ pasid = pdev->pasid_cap;
+ if (!pasid)
+ return -EINVAL;
+
+ pci_read_config_word(pdev, pasid + PCI_PASID_CTRL, &ctrl);
+
+ ctrl &= PCI_PASID_CTRL_ENABLE | PCI_PASID_CTRL_EXEC |
+ PCI_PASID_CTRL_PRIV;
+
+ return ctrl;
+}
+EXPORT_SYMBOL_GPL(pci_pasid_status);
#endif /* CONFIG_PCI_PASID */
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 5bc81cc0a2de..4383a36fd6ca 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -8,14 +8,30 @@
*/
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/cleanup.h>
#include <linux/pci.h>
#include <linux/errno.h>
#include <linux/ioport.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/proc_fs.h>
#include <linux/slab.h>
#include "pci.h"
+/*
+ * The first PCI_BRIDGE_RESOURCE_NUM PCI bus resources (those that correspond
+ * to P2P or CardBus bridge windows) go in a table. Additional ones (for
+ * buses below host bridges or subtractive decode bridges) go in the list.
+ * Use pci_bus_for_each_resource() to iterate through all the resources.
+ */
+
+struct pci_bus_resource {
+ struct list_head list;
+ struct resource *res;
+};
+
void pci_add_resource_offset(struct list_head *resources, struct resource *res,
resource_size_t offset)
{
@@ -44,8 +60,7 @@ void pci_free_resource_list(struct list_head *resources)
}
EXPORT_SYMBOL(pci_free_resource_list);
-void pci_bus_add_resource(struct pci_bus *bus, struct resource *res,
- unsigned int flags)
+void pci_bus_add_resource(struct pci_bus *bus, struct resource *res)
{
struct pci_bus_resource *bus_res;
@@ -56,7 +71,6 @@ void pci_bus_add_resource(struct pci_bus *bus, struct resource *res,
}
bus_res->res = res;
- bus_res->flags = flags;
list_add_tail(&bus_res->list, &bus->resources);
}
@@ -175,10 +189,7 @@ static void pci_clip_resource_to_region(struct pci_bus *bus,
static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res,
resource_size_t size, resource_size_t align,
resource_size_t min, unsigned long type_mask,
- resource_size_t (*alignf)(void *,
- const struct resource *,
- resource_size_t,
- resource_size_t),
+ resource_alignf alignf,
void *alignf_data,
struct pci_bus_region *region)
{
@@ -194,6 +205,9 @@ static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res,
if (!r)
continue;
+ if (r->flags & (IORESOURCE_UNSET|IORESOURCE_DISABLED))
+ continue;
+
/* type_mask must match */
if ((res->flags ^ r->flags) & type_mask)
continue;
@@ -249,10 +263,7 @@ static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res,
int pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
resource_size_t size, resource_size_t align,
resource_size_t min, unsigned long type_mask,
- resource_size_t (*alignf)(void *,
- const struct resource *,
- resource_size_t,
- resource_size_t),
+ resource_alignf alignf,
void *alignf_data)
{
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
@@ -332,7 +343,8 @@ void __weak pcibios_bus_add_device(struct pci_dev *pdev) { }
*/
void pci_bus_add_device(struct pci_dev *dev)
{
- int retval;
+ struct device_node *dn = dev->dev.of_node;
+ struct platform_device *pdev;
/*
* Can not put in pci_device_add yet because resources
@@ -340,16 +352,39 @@ void pci_bus_add_device(struct pci_dev *dev)
*/
pcibios_bus_add_device(dev);
pci_fixup_device(pci_fixup_final, dev);
+ if (pci_is_bridge(dev))
+ of_pci_make_dev_node(dev);
pci_create_sysfs_dev_files(dev);
pci_proc_attach_device(dev);
pci_bridge_d3_update(dev);
- dev->match_driver = true;
- retval = device_attach(&dev->dev);
- if (retval < 0 && retval != -EPROBE_DEFER)
- pci_warn(dev, "device attach failed (%d)\n", retval);
+ /* Save config space for error recoverability */
+ pci_save_state(dev);
+
+ /*
+ * If the PCI device is associated with a pwrctrl device with a
+ * power supply, create a device link between the PCI device and
+ * pwrctrl device. This ensures that pwrctrl drivers are probed
+ * before PCI client drivers.
+ */
+ pdev = of_find_device_by_node(dn);
+ if (pdev) {
+ if (of_pci_supply_present(dn)) {
+ if (!device_link_add(&dev->dev, &pdev->dev,
+ DL_FLAG_AUTOREMOVE_CONSUMER)) {
+ pci_err(dev, "failed to add device link to power control device %s\n",
+ pdev->name);
+ }
+ }
+ put_device(&pdev->dev);
+ }
- pci_dev_assign_added(dev, true);
+ if (!dn || of_device_is_available(dn))
+ pci_dev_allow_binding(dev);
+
+ device_initial_probe(&dev->dev);
+
+ pci_dev_assign_added(dev);
}
EXPORT_SYMBOL_GPL(pci_bus_add_device);
@@ -382,10 +417,51 @@ void pci_bus_add_devices(const struct pci_bus *bus)
}
EXPORT_SYMBOL(pci_bus_add_devices);
-/** pci_walk_bus - walk devices on/under bus, calling callback.
- * @top bus whose devices should be walked
- * @cb callback to be called for each device found
- * @userdata arbitrary pointer to be passed to callback.
+static int __pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
+ void *userdata)
+{
+ struct pci_dev *dev;
+ int ret = 0;
+
+ list_for_each_entry(dev, &top->devices, bus_list) {
+ ret = cb(dev, userdata);
+ if (ret)
+ break;
+ if (dev->subordinate) {
+ ret = __pci_walk_bus(dev->subordinate, cb, userdata);
+ if (ret)
+ break;
+ }
+ }
+ return ret;
+}
+
+static int __pci_walk_bus_reverse(struct pci_bus *top,
+ int (*cb)(struct pci_dev *, void *),
+ void *userdata)
+{
+ struct pci_dev *dev;
+ int ret = 0;
+
+ list_for_each_entry_reverse(dev, &top->devices, bus_list) {
+ if (dev->subordinate) {
+ ret = __pci_walk_bus_reverse(dev->subordinate, cb,
+ userdata);
+ if (ret)
+ break;
+ }
+ ret = cb(dev, userdata);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+/**
+ * pci_walk_bus - walk devices on/under bus, calling callback.
+ * @top: bus whose devices should be walked
+ * @cb: callback to be called for each device found
+ * @userdata: arbitrary pointer to be passed to callback
*
* Walk the given bus, including any bridged devices
* on buses under this bus. Call the provided callback
@@ -393,44 +469,39 @@ EXPORT_SYMBOL(pci_bus_add_devices);
*
* We check the return of @cb each time. If it returns anything
* other than 0, we break out.
- *
*/
-void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
- void *userdata)
+void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), void *userdata)
{
- struct pci_dev *dev;
- struct pci_bus *bus;
- struct list_head *next;
- int retval;
-
- bus = top;
down_read(&pci_bus_sem);
- next = top->devices.next;
- for (;;) {
- if (next == &bus->devices) {
- /* end of this bus, go up or finish */
- if (bus == top)
- break;
- next = bus->self->bus_list.next;
- bus = bus->self->bus;
- continue;
- }
- dev = list_entry(next, struct pci_dev, bus_list);
- if (dev->subordinate) {
- /* this is a pci-pci bridge, do its devices next */
- next = dev->subordinate->devices.next;
- bus = dev->subordinate;
- } else
- next = dev->bus_list.next;
-
- retval = cb(dev, userdata);
- if (retval)
- break;
- }
+ __pci_walk_bus(top, cb, userdata);
up_read(&pci_bus_sem);
}
EXPORT_SYMBOL_GPL(pci_walk_bus);
+/**
+ * pci_walk_bus_reverse - walk devices on/under bus, calling callback.
+ * @top: bus whose devices should be walked
+ * @cb: callback to be called for each device found
+ * @userdata: arbitrary pointer to be passed to callback
+ *
+ * Same semantics as pci_walk_bus(), but walks the bus in reverse order.
+ */
+void pci_walk_bus_reverse(struct pci_bus *top,
+ int (*cb)(struct pci_dev *, void *), void *userdata)
+{
+ down_read(&pci_bus_sem);
+ __pci_walk_bus_reverse(top, cb, userdata);
+ up_read(&pci_bus_sem);
+}
+EXPORT_SYMBOL_GPL(pci_walk_bus_reverse);
+
+void pci_walk_bus_locked(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), void *userdata)
+{
+ lockdep_assert_held(&pci_bus_sem);
+
+ __pci_walk_bus(top, cb, userdata);
+}
+
struct pci_bus *pci_bus_get(struct pci_bus *bus)
{
if (bus)
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index 8d49bad7f847..c254d2b8bf17 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -3,12 +3,17 @@
menu "PCI controller drivers"
depends on PCI
+config PCI_HOST_COMMON
+ tristate
+ select PCI_ECAM
+
config PCI_AARDVARK
tristate "Aardvark PCIe controller"
depends on (ARCH_MVEBU && ARM64) || COMPILE_TEST
depends on OF
depends on PCI_MSI
select PCI_BRIDGE_EMUL
+ select IRQ_MSI_LIB
help
Add support for Aardvark 64bit PCIe Host Controller. This
controller is part of the South Bridge of the Marvel Armada
@@ -25,6 +30,7 @@ config PCIE_ALTERA_MSI
tristate "Altera PCIe MSI feature"
depends on PCIE_ALTERA
depends on PCI_MSI
+ select IRQ_MSI_LIB
help
Say Y here if you want PCIe MSI support for the Altera FPGA.
This MSI driver supports Altera MSI to GIC controller IP.
@@ -40,6 +46,7 @@ config PCIE_APPLE
depends on OF
depends on PCI_MSI
select PCI_HOST_COMMON
+ select IRQ_MSI_LIB
help
Say Y here if you want to enable PCIe controller support on Apple
system-on-chips, like the Apple M1. This is required for the USB
@@ -57,6 +64,7 @@ config PCIE_BRCMSTB
BMIPS_GENERIC || COMPILE_TEST
depends on OF
depends on PCI_MSI
+ select IRQ_MSI_LIB
default ARCH_BRCMSTB || BMIPS_GENERIC
help
Say Y here to enable PCIe host controller support for
@@ -93,6 +101,7 @@ config PCIE_IPROC_MSI
bool "Broadcom iProc PCIe MSI support"
depends on PCIE_IPROC_PLATFORM || PCIE_IPROC_BCMA
depends on PCI_MSI
+ select IRQ_MSI_LIB
default ARCH_BCM_IPROC
help
Say Y here if you want to enable MSI support for Broadcom's iProc
@@ -119,10 +128,6 @@ config PCI_FTPCI100
depends on OF
default ARCH_GEMINI
-config PCI_HOST_COMMON
- tristate
- select PCI_ECAM
-
config PCI_HOST_GENERIC
tristate "Generic PCI host controller"
depends on OF
@@ -141,7 +146,7 @@ config PCIE_HISI_ERR
config PCI_IXP4XX
bool "Intel IXP4xx PCI controller"
- depends on ARM && OF
+ depends on OF
depends on ARCH_IXP4XX || COMPILE_TEST
default ARCH_IXP4XX
help
@@ -151,6 +156,7 @@ config PCI_IXP4XX
config VMD
depends on PCI_MSI && X86_64 && !UML
tristate "Intel Volume Management Device Driver"
+ select IRQ_MSI_LIB
help
Adds support for the Intel Volume Management Device (VMD). VMD is a
secondary PCI host bridge that allows PCI Express root ports,
@@ -179,7 +185,6 @@ config PCI_MVEBU
depends on MVEBU_MBUS
depends on ARM
depends on OF
- depends on BROKEN
select PCI_BRIDGE_EMUL
help
Add support for Marvell EBU PCIe controller. This PCIe controller
@@ -191,14 +196,16 @@ config PCIE_MEDIATEK
depends on ARCH_AIROHA || ARCH_MEDIATEK || COMPILE_TEST
depends on OF
depends on PCI_MSI
+ select IRQ_MSI_LIB
help
Say Y here if you want to enable PCIe controller support on
MediaTek SoCs.
config PCIE_MEDIATEK_GEN3
tristate "MediaTek Gen3 PCIe controller"
- depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on ARCH_AIROHA || ARCH_MEDIATEK || COMPILE_TEST
depends on PCI_MSI
+ select IRQ_MSI_LIB
help
Adds support for PCIe Gen3 MAC controller for MediaTek SoCs.
This PCIe controller is compatible with Gen3, Gen2 and Gen1 speed,
@@ -216,14 +223,6 @@ config PCIE_MT7621
help
This selects a driver for the MediaTek MT7621 PCIe Controller.
-config PCIE_MICROCHIP_HOST
- bool "Microchip AXI PCIe controller"
- depends on PCI_MSI && OF
- select PCI_HOST_COMMON
- help
- Say Y here if you want kernel to support the Microchip AXI PCIe
- Host Bridge driver.
-
config PCI_HYPERV_INTERFACE
tristate "Microsoft Hyper-V PCI Interface"
depends on ((X86 && X86_64) || ARM64) && HYPERV && PCI_MSI
@@ -236,6 +235,7 @@ config PCI_TEGRA
bool "NVIDIA Tegra PCIe controller"
depends on ARCH_TEGRA || COMPILE_TEST
depends on PCI_MSI
+ select IRQ_MSI_LIB
help
Say Y here if you want support for the PCIe host controller found
on NVIDIA Tegra SoCs.
@@ -244,6 +244,7 @@ config PCIE_RCAR_HOST
bool "Renesas R-Car PCIe controller (host mode)"
depends on ARCH_RENESAS || COMPILE_TEST
depends on PCI_MSI
+ select IRQ_MSI_LIB
help
Say Y here if you want PCIe controller support on R-Car SoCs in host
mode.
@@ -258,12 +259,20 @@ config PCIE_RCAR_EP
config PCI_RCAR_GEN2
bool "Renesas R-Car Gen2 Internal PCI controller"
- depends on ARCH_RENESAS || COMPILE_TEST
- depends on ARM
+ depends on (ARCH_RENESAS && ARM) || COMPILE_TEST
help
Say Y here if you want internal PCI support on R-Car Gen2 SoC.
- There are 3 internal PCI controllers available with a single
- built-in EHCI/OHCI host controller present on each one.
+ Each internal PCI controller contains a single built-in EHCI/OHCI
+ host controller.
+
+config PCIE_RENESAS_RZG3S_HOST
+ bool "Renesas RZ/G3S PCIe host controller"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ select MFD_SYSCON
+ select IRQ_MSI_LIB
+ help
+ Say Y here if you want PCIe host controller support on Renesas RZ/G3S
+ SoC.
config PCIE_ROCKCHIP
bool
@@ -312,6 +321,7 @@ config PCI_XGENE_MSI
bool "X-Gene v1 PCIe MSI feature"
depends on PCI_XGENE
depends on PCI_MSI
+ select IRQ_MSI_LIB
default y
help
Say Y here if you want PCIe MSI support for the APM X-Gene v1 SoC.
@@ -321,14 +331,28 @@ config PCIE_XILINX
bool "Xilinx AXI PCIe controller"
depends on OF
depends on PCI_MSI
+ select IRQ_MSI_LIB
help
Say 'Y' here if you want kernel to support the Xilinx AXI PCIe
Host Bridge driver.
+config PCIE_XILINX_DMA_PL
+ bool "Xilinx DMA PL PCIe host bridge support"
+ depends on ARCH_ZYNQMP || COMPILE_TEST
+ depends on PCI_MSI
+ select PCI_HOST_COMMON
+ select IRQ_MSI_LIB
+ help
+ Say 'Y' here if you want kernel support for the Xilinx PL DMA
+ PCIe host bridge. The controller is a Soft IP which can act as
+ Root Port. If your system provides Xilinx PCIe host controller
+ bridge DMA as Soft IP say 'Y'; if you are not sure, say 'N'.
+
config PCIE_XILINX_NWL
bool "Xilinx NWL PCIe controller"
depends on ARCH_ZYNQMP || COMPILE_TEST
depends on PCI_MSI
+ select IRQ_MSI_LIB
help
Say 'Y' here if you want kernel support for Xilinx
NWL PCIe controller. The controller can act as Root Port
@@ -346,4 +370,5 @@ config PCIE_XILINX_CPM
source "drivers/pci/controller/cadence/Kconfig"
source "drivers/pci/controller/dwc/Kconfig"
source "drivers/pci/controller/mobiveil/Kconfig"
+source "drivers/pci/controller/plda/Kconfig"
endmenu
diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile
index 37c8663de7fe..229929a945c2 100644
--- a/drivers/pci/controller/Makefile
+++ b/drivers/pci/controller/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_PCI_TEGRA) += pci-tegra.o
obj-$(CONFIG_PCI_RCAR_GEN2) += pci-rcar-gen2.o
obj-$(CONFIG_PCIE_RCAR_HOST) += pcie-rcar.o pcie-rcar-host.o
obj-$(CONFIG_PCIE_RCAR_EP) += pcie-rcar.o pcie-rcar-ep.o
+obj-$(CONFIG_PCIE_RENESAS_RZG3S_HOST) += pcie-rzg3s-host.o
obj-$(CONFIG_PCI_HOST_COMMON) += pci-host-common.o
obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o
obj-$(CONFIG_PCI_HOST_THUNDER_ECAM) += pci-thunder-ecam.o
@@ -17,6 +18,7 @@ obj-$(CONFIG_PCI_HOST_THUNDER_PEM) += pci-thunder-pem.o
obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o
obj-$(CONFIG_PCIE_XILINX_NWL) += pcie-xilinx-nwl.o
obj-$(CONFIG_PCIE_XILINX_CPM) += pcie-xilinx-cpm.o
+obj-$(CONFIG_PCIE_XILINX_DMA_PL) += pcie-xilinx-dma-pl.o
obj-$(CONFIG_PCI_V3_SEMI) += pci-v3-semi.o
obj-$(CONFIG_PCI_XGENE) += pci-xgene.o
obj-$(CONFIG_PCI_XGENE_MSI) += pci-xgene-msi.o
@@ -32,7 +34,6 @@ obj-$(CONFIG_PCIE_ROCKCHIP_EP) += pcie-rockchip-ep.o
obj-$(CONFIG_PCIE_ROCKCHIP_HOST) += pcie-rockchip-host.o
obj-$(CONFIG_PCIE_MEDIATEK) += pcie-mediatek.o
obj-$(CONFIG_PCIE_MEDIATEK_GEN3) += pcie-mediatek-gen3.o
-obj-$(CONFIG_PCIE_MICROCHIP_HOST) += pcie-microchip-host.o
obj-$(CONFIG_VMD) += vmd.o
obj-$(CONFIG_PCIE_BRCMSTB) += pcie-brcmstb.o
obj-$(CONFIG_PCI_LOONGSON) += pci-loongson.o
@@ -43,6 +44,7 @@ obj-$(CONFIG_PCIE_MT7621) += pcie-mt7621.o
# pcie-hisi.o quirks are needed even without CONFIG_PCIE_DW
obj-y += dwc/
obj-y += mobiveil/
+obj-y += plda/
# The following drivers are for devices that use the generic ACPI
diff --git a/drivers/pci/controller/cadence/Kconfig b/drivers/pci/controller/cadence/Kconfig
index 291d12711363..9e651d545973 100644
--- a/drivers/pci/controller/cadence/Kconfig
+++ b/drivers/pci/controller/cadence/Kconfig
@@ -4,25 +4,25 @@ menu "Cadence-based PCIe controllers"
depends on PCI
config PCIE_CADENCE
- bool
+ tristate
config PCIE_CADENCE_HOST
- bool
+ tristate
depends on OF
select IRQ_DOMAIN
select PCIE_CADENCE
config PCIE_CADENCE_EP
- bool
+ tristate
depends on OF
depends on PCI_ENDPOINT
select PCIE_CADENCE
config PCIE_CADENCE_PLAT
- bool
+ tristate
config PCIE_CADENCE_PLAT_HOST
- bool "Cadence platform PCIe controller (host mode)"
+ tristate "Cadence platform PCIe controller (host mode)"
depends on OF
select PCIE_CADENCE_HOST
select PCIE_CADENCE_PLAT
@@ -32,23 +32,49 @@ config PCIE_CADENCE_PLAT_HOST
vendors SoCs.
config PCIE_CADENCE_PLAT_EP
- bool "Cadence platform PCIe controller (endpoint mode)"
+ tristate "Cadence platform PCIe controller (endpoint mode)"
depends on OF
depends on PCI_ENDPOINT
select PCIE_CADENCE_EP
select PCIE_CADENCE_PLAT
help
- Say Y here if you want to support the Cadence PCIe platform controller in
+ Say Y here if you want to support the Cadence PCIe platform controller in
endpoint mode. This PCIe controller may be embedded into many
different vendors SoCs.
+config PCI_SKY1_HOST
+ tristate "CIX SKY1 PCIe controller (host mode)"
+ depends on OF && (ARCH_CIX || COMPILE_TEST)
+ select PCIE_CADENCE_HOST
+ select PCI_ECAM
+ help
+ Say Y here if you want to support the CIX SKY1 PCIe platform
+ controller in host mode. CIX SKY1 PCIe controller uses Cadence
+ HPA (High Performance Architecture IP [Second generation of
+ Cadence PCIe IP])
+
+ This driver requires Cadence PCIe core infrastructure
+ (PCIE_CADENCE_HOST) and hardware platform adaptation layer
+ to function.
+
+config PCIE_SG2042_HOST
+ tristate "Sophgo SG2042 PCIe controller (host mode)"
+ depends on OF && (ARCH_SOPHGO || COMPILE_TEST)
+ select PCIE_CADENCE_HOST
+ help
+ Say Y here if you want to support the Sophgo SG2042 PCIe platform
+ controller in host mode. Sophgo SG2042 PCIe controller uses Cadence
+ PCIe core.
+
config PCI_J721E
- bool
+ tristate
+ select PCIE_CADENCE_HOST if PCI_J721E_HOST != n
+ select PCIE_CADENCE_EP if PCI_J721E_EP != n
config PCI_J721E_HOST
- bool "TI J721E PCIe controller (host mode)"
+ tristate "TI J721E PCIe controller (host mode)"
+ depends on ARCH_K3 || COMPILE_TEST
depends on OF
- select PCIE_CADENCE_HOST
select PCI_J721E
help
Say Y here if you want to support the TI J721E PCIe platform
@@ -56,13 +82,14 @@ config PCI_J721E_HOST
core.
config PCI_J721E_EP
- bool "TI J721E PCIe controller (endpoint mode)"
+ tristate "TI J721E PCIe controller (endpoint mode)"
+ depends on ARCH_K3 || COMPILE_TEST
depends on OF
depends on PCI_ENDPOINT
- select PCIE_CADENCE_EP
select PCI_J721E
help
Say Y here if you want to support the TI J721E PCIe platform
controller in endpoint mode. TI J721E PCIe controller uses Cadence PCIe
core.
+
endmenu
diff --git a/drivers/pci/controller/cadence/Makefile b/drivers/pci/controller/cadence/Makefile
index 9bac5fb2f13d..b8ec1cecfaa8 100644
--- a/drivers/pci/controller/cadence/Makefile
+++ b/drivers/pci/controller/cadence/Makefile
@@ -1,6 +1,12 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_PCIE_CADENCE) += pcie-cadence.o
-obj-$(CONFIG_PCIE_CADENCE_HOST) += pcie-cadence-host.o
-obj-$(CONFIG_PCIE_CADENCE_EP) += pcie-cadence-ep.o
+pcie-cadence-mod-y := pcie-cadence-hpa.o pcie-cadence.o
+pcie-cadence-host-mod-y := pcie-cadence-host-common.o pcie-cadence-host.o pcie-cadence-host-hpa.o
+pcie-cadence-ep-mod-y := pcie-cadence-ep.o
+
+obj-$(CONFIG_PCIE_CADENCE) = pcie-cadence-mod.o
+obj-$(CONFIG_PCIE_CADENCE_HOST) += pcie-cadence-host-mod.o
+obj-$(CONFIG_PCIE_CADENCE_EP) += pcie-cadence-ep-mod.o
obj-$(CONFIG_PCIE_CADENCE_PLAT) += pcie-cadence-plat.o
obj-$(CONFIG_PCI_J721E) += pci-j721e.o
+obj-$(CONFIG_PCIE_SG2042_HOST) += pcie-sg2042.o
+obj-$(CONFIG_PCI_SKY1_HOST) += pci-sky1.o
diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c
index e70213c9060a..ecd1b0312400 100644
--- a/drivers/pci/controller/cadence/pci-j721e.c
+++ b/drivers/pci/controller/cadence/pci-j721e.c
@@ -7,22 +7,28 @@
*/
#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/container_of.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/io.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/mfd/syscon.h>
+#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/pci.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include "../../pci.h"
#include "pcie-cadence.h"
+#define cdns_pcie_to_rc(p) container_of(p, struct cdns_pcie_rc, pcie)
+
#define ENABLE_REG_SYS_2 0x108
+#define ENABLE_CLR_REG_SYS_2 0x308
#define STATUS_REG_SYS_2 0x508
#define STATUS_CLR_REG_SYS_2 0x708
#define LINK_DOWN BIT(1)
@@ -42,18 +48,18 @@ enum link_status {
};
#define J721E_MODE_RC BIT(7)
-#define LANE_COUNT_MASK BIT(8)
#define LANE_COUNT(n) ((n) << 8)
+#define ACSPCIE_PAD_DISABLE_MASK GENMASK(1, 0)
#define GENERATION_SEL_MASK GENMASK(1, 0)
-#define MAX_LANES 2
-
struct j721e_pcie {
struct cdns_pcie *cdns_pcie;
struct clk *refclk;
u32 mode;
u32 num_lanes;
+ u32 max_lanes;
+ struct gpio_desc *reset_gpio;
void __iomem *user_cfg_base;
void __iomem *intd_cfg_base;
u32 linkdown_irq_regfield;
@@ -71,6 +77,7 @@ struct j721e_pcie_data {
unsigned int quirk_disable_flr:1;
u32 linkdown_irq_regfield;
unsigned int byte_access_allowed:1;
+ unsigned int max_lanes;
};
static inline u32 j721e_pcie_user_readl(struct j721e_pcie *pcie, u32 offset)
@@ -111,6 +118,15 @@ static irqreturn_t j721e_pcie_link_irq_handler(int irq, void *priv)
return IRQ_HANDLED;
}
+static void j721e_pcie_disable_link_irq(struct j721e_pcie *pcie)
+{
+ u32 reg;
+
+ reg = j721e_pcie_intd_readl(pcie, ENABLE_CLR_REG_SYS_2);
+ reg |= pcie->linkdown_irq_regfield;
+ j721e_pcie_intd_writel(pcie, ENABLE_CLR_REG_SYS_2, reg);
+}
+
static void j721e_pcie_config_link_irq(struct j721e_pcie *pcie)
{
u32 reg;
@@ -148,11 +164,7 @@ static bool j721e_pcie_link_up(struct cdns_pcie *cdns_pcie)
u32 reg;
reg = j721e_pcie_user_readl(pcie, J721E_PCIE_USER_LINKSTATUS);
- reg &= LINK_STATUS;
- if (reg == LINK_UP_DL_COMPLETED)
- return true;
-
- return false;
+ return (reg & LINK_STATUS) == LINK_UP_DL_COMPLETED;
}
static const struct cdns_pcie_ops j721e_pcie_ops = {
@@ -206,17 +218,51 @@ static int j721e_pcie_set_lane_count(struct j721e_pcie *pcie,
{
struct device *dev = pcie->cdns_pcie->dev;
u32 lanes = pcie->num_lanes;
+ u32 mask = BIT(8);
u32 val = 0;
int ret;
+ if (pcie->max_lanes == 4)
+ mask = GENMASK(9, 8);
+
val = LANE_COUNT(lanes - 1);
- ret = regmap_update_bits(syscon, offset, LANE_COUNT_MASK, val);
+ ret = regmap_update_bits(syscon, offset, mask, val);
if (ret)
dev_err(dev, "failed to set link count\n");
return ret;
}
+static int j721e_enable_acspcie_refclk(struct j721e_pcie *pcie,
+ struct regmap *syscon)
+{
+ struct device *dev = pcie->cdns_pcie->dev;
+ struct device_node *node = dev->of_node;
+ u32 mask = ACSPCIE_PAD_DISABLE_MASK;
+ struct of_phandle_args args;
+ u32 val;
+ int ret;
+
+ ret = of_parse_phandle_with_fixed_args(node,
+ "ti,syscon-acspcie-proxy-ctrl",
+ 1, 0, &args);
+ if (ret) {
+ dev_err(dev,
+ "ti,syscon-acspcie-proxy-ctrl has invalid arguments\n");
+ return ret;
+ }
+
+ /* Clear PAD IO disable bits to enable refclk output */
+ val = ~(args.args[0]);
+ ret = regmap_update_bits(syscon, 0, mask, val);
+ if (ret) {
+ dev_err(dev, "failed to enable ACSPCIE refclk: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie)
{
struct device *dev = pcie->cdns_pcie->dev;
@@ -238,6 +284,25 @@ static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie)
if (!ret)
offset = args.args[0];
+ /*
+ * The PCIe Controller's registers have different "reset-values"
+ * depending on the "strap" settings programmed into the PCIEn_CTRL
+ * register within the CTRL_MMR memory-mapped register space.
+ * The registers latch onto a "reset-value" based on the "strap"
+ * settings sampled after the PCIe Controller is powered on.
+ * To ensure that the "reset-values" are sampled accurately, power
+ * off the PCIe Controller before programming the "strap" settings
+ * and power it on after that. The runtime PM APIs namely
+ * pm_runtime_put_sync() and pm_runtime_get_sync() will decrement and
+ * increment the usage counter respectively, causing GENPD to power off
+ * and power on the PCIe Controller.
+ */
+ ret = pm_runtime_put_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to power off PCIe Controller\n");
+ return ret;
+ }
+
ret = j721e_pcie_set_mode(pcie, syscon, offset);
if (ret < 0) {
dev_err(dev, "Failed to set pci mode\n");
@@ -256,7 +321,19 @@ static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie)
return ret;
}
- return 0;
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to power on PCIe Controller\n");
+ return ret;
+ }
+
+ /* Enable ACSPCIE refclk output if the optional property exists */
+ syscon = syscon_regmap_lookup_by_phandle_optional(node,
+ "ti,syscon-acspcie-proxy-ctrl");
+ if (!syscon)
+ return 0;
+
+ return j721e_enable_acspcie_refclk(pcie, syscon);
}
static int cdns_ti_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
@@ -290,11 +367,13 @@ static const struct j721e_pcie_data j721e_pcie_rc_data = {
.quirk_retrain_flag = true,
.byte_access_allowed = false,
.linkdown_irq_regfield = LINK_DOWN,
+ .max_lanes = 2,
};
static const struct j721e_pcie_data j721e_pcie_ep_data = {
.mode = PCI_MODE_EP,
.linkdown_irq_regfield = LINK_DOWN,
+ .max_lanes = 2,
};
static const struct j721e_pcie_data j7200_pcie_rc_data = {
@@ -302,23 +381,49 @@ static const struct j721e_pcie_data j7200_pcie_rc_data = {
.quirk_detect_quiet_flag = true,
.linkdown_irq_regfield = J7200_LINK_DOWN,
.byte_access_allowed = true,
+ .max_lanes = 2,
};
static const struct j721e_pcie_data j7200_pcie_ep_data = {
.mode = PCI_MODE_EP,
.quirk_detect_quiet_flag = true,
+ .linkdown_irq_regfield = J7200_LINK_DOWN,
.quirk_disable_flr = true,
+ .max_lanes = 2,
};
static const struct j721e_pcie_data am64_pcie_rc_data = {
.mode = PCI_MODE_RC,
.linkdown_irq_regfield = J7200_LINK_DOWN,
.byte_access_allowed = true,
+ .max_lanes = 1,
};
static const struct j721e_pcie_data am64_pcie_ep_data = {
.mode = PCI_MODE_EP,
.linkdown_irq_regfield = J7200_LINK_DOWN,
+ .max_lanes = 1,
+};
+
+static const struct j721e_pcie_data j784s4_pcie_rc_data = {
+ .mode = PCI_MODE_RC,
+ .quirk_retrain_flag = true,
+ .byte_access_allowed = false,
+ .linkdown_irq_regfield = J7200_LINK_DOWN,
+ .max_lanes = 4,
+};
+
+static const struct j721e_pcie_data j784s4_pcie_ep_data = {
+ .mode = PCI_MODE_EP,
+ .linkdown_irq_regfield = J7200_LINK_DOWN,
+ .max_lanes = 4,
+};
+
+static const struct j721e_pcie_data j722s_pcie_rc_data = {
+ .mode = PCI_MODE_RC,
+ .linkdown_irq_regfield = J7200_LINK_DOWN,
+ .byte_access_allowed = true,
+ .max_lanes = 1,
};
static const struct of_device_id of_j721e_pcie_match[] = {
@@ -346,8 +451,21 @@ static const struct of_device_id of_j721e_pcie_match[] = {
.compatible = "ti,am64-pcie-ep",
.data = &am64_pcie_ep_data,
},
+ {
+ .compatible = "ti,j784s4-pcie-host",
+ .data = &j784s4_pcie_rc_data,
+ },
+ {
+ .compatible = "ti,j784s4-pcie-ep",
+ .data = &j784s4_pcie_ep_data,
+ },
+ {
+ .compatible = "ti,j722s-pcie-host",
+ .data = &j722s_pcie_rc_data,
+ },
{},
};
+MODULE_DEVICE_TABLE(of, of_j721e_pcie_match);
static int j721e_pcie_probe(struct platform_device *pdev)
{
@@ -359,9 +477,7 @@ static int j721e_pcie_probe(struct platform_device *pdev)
struct j721e_pcie *pcie;
struct cdns_pcie_rc *rc = NULL;
struct cdns_pcie_ep *ep = NULL;
- struct gpio_desc *gpiod;
void __iomem *base;
- struct clk *clk;
u32 num_lanes;
u32 mode;
int ret;
@@ -379,7 +495,7 @@ static int j721e_pcie_probe(struct platform_device *pdev)
switch (mode) {
case PCI_MODE_RC:
- if (!IS_ENABLED(CONFIG_PCIE_CADENCE_HOST))
+ if (!IS_ENABLED(CONFIG_PCI_J721E_HOST))
return -ENODEV;
bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc));
@@ -398,7 +514,7 @@ static int j721e_pcie_probe(struct platform_device *pdev)
pcie->cdns_pcie = cdns_pcie;
break;
case PCI_MODE_EP:
- if (!IS_ENABLED(CONFIG_PCIE_CADENCE_EP))
+ if (!IS_ENABLED(CONFIG_PCI_J721E_EP))
return -ENODEV;
ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
@@ -432,9 +548,13 @@ static int j721e_pcie_probe(struct platform_device *pdev)
pcie->user_cfg_base = base;
ret = of_property_read_u32(node, "num-lanes", &num_lanes);
- if (ret || num_lanes > MAX_LANES)
+ if (ret || num_lanes > data->max_lanes) {
+ dev_warn(dev, "num-lanes property not provided or invalid, setting num-lanes to 1\n");
num_lanes = 1;
+ }
+
pcie->num_lanes = num_lanes;
+ pcie->max_lanes = data->max_lanes;
if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)))
return -EINVAL;
@@ -447,20 +567,20 @@ static int j721e_pcie_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
- dev_err(dev, "pm_runtime_get_sync failed\n");
+ dev_err_probe(dev, ret, "pm_runtime_get_sync failed\n");
goto err_get_sync;
}
ret = j721e_pcie_ctrl_init(pcie);
if (ret < 0) {
- dev_err(dev, "pm_runtime_get_sync failed\n");
+ dev_err_probe(dev, ret, "j721e_pcie_ctrl_init failed\n");
goto err_get_sync;
}
ret = devm_request_irq(dev, irq, j721e_pcie_link_irq_handler, 0,
"j721e-pcie-link-down-irq", pcie);
if (ret < 0) {
- dev_err(dev, "failed to request link state IRQ %d\n", irq);
+ dev_err_probe(dev, ret, "failed to request link state IRQ %d\n", irq);
goto err_get_sync;
}
@@ -468,58 +588,47 @@ static int j721e_pcie_probe(struct platform_device *pdev)
switch (mode) {
case PCI_MODE_RC:
- gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
- if (IS_ERR(gpiod)) {
- ret = PTR_ERR(gpiod);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Failed to get reset GPIO\n");
+ pcie->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(pcie->reset_gpio)) {
+ ret = dev_err_probe(dev, PTR_ERR(pcie->reset_gpio),
+ "Failed to get reset GPIO\n");
goto err_get_sync;
}
ret = cdns_pcie_init_phy(dev, cdns_pcie);
if (ret) {
- dev_err(dev, "Failed to init phy\n");
+ dev_err_probe(dev, ret, "Failed to init phy\n");
goto err_get_sync;
}
- clk = devm_clk_get_optional(dev, "pcie_refclk");
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- dev_err(dev, "failed to get pcie_refclk\n");
- goto err_pcie_setup;
- }
-
- ret = clk_prepare_enable(clk);
- if (ret) {
- dev_err(dev, "failed to enable pcie_refclk\n");
+ pcie->refclk = devm_clk_get_optional_enabled(dev, "pcie_refclk");
+ if (IS_ERR(pcie->refclk)) {
+ ret = dev_err_probe(dev, PTR_ERR(pcie->refclk),
+ "failed to enable pcie_refclk\n");
goto err_pcie_setup;
}
- pcie->refclk = clk;
/*
- * "Power Sequencing and Reset Signal Timings" table in
- * PCI EXPRESS CARD ELECTROMECHANICAL SPECIFICATION, REV. 3.0
- * indicates PERST# should be deasserted after minimum of 100us
- * once REFCLK is stable. The REFCLK to the connector in RC
- * mode is selected while enabling the PHY. So deassert PERST#
- * after 100 us.
+ * Section 2.2 of the PCI Express Card Electromechanical
+ * Specification (Revision 5.1) mandates that the deassertion
+ * of the PERST# signal should be delayed by 100 ms (TPVPERL).
+ * This shall ensure that the power and the reference clock
+ * are stable.
*/
- if (gpiod) {
- usleep_range(100, 200);
- gpiod_set_value_cansleep(gpiod, 1);
+ if (pcie->reset_gpio) {
+ msleep(PCIE_T_PVPERL_MS);
+ gpiod_set_value_cansleep(pcie->reset_gpio, 1);
}
ret = cdns_pcie_host_setup(rc);
- if (ret < 0) {
- clk_disable_unprepare(pcie->refclk);
+ if (ret < 0)
goto err_pcie_setup;
- }
break;
case PCI_MODE_EP:
ret = cdns_pcie_init_phy(dev, cdns_pcie);
if (ret) {
- dev_err(dev, "Failed to init phy\n");
+ dev_err_probe(dev, ret, "Failed to init phy\n");
goto err_get_sync;
}
@@ -547,20 +656,117 @@ static void j721e_pcie_remove(struct platform_device *pdev)
struct j721e_pcie *pcie = platform_get_drvdata(pdev);
struct cdns_pcie *cdns_pcie = pcie->cdns_pcie;
struct device *dev = &pdev->dev;
+ struct cdns_pcie_ep *ep;
+ struct cdns_pcie_rc *rc;
+
+ if (pcie->mode == PCI_MODE_RC) {
+ rc = container_of(cdns_pcie, struct cdns_pcie_rc, pcie);
+ cdns_pcie_host_disable(rc);
+ } else {
+ ep = container_of(cdns_pcie, struct cdns_pcie_ep, pcie);
+ cdns_pcie_ep_disable(ep);
+ }
+
+ gpiod_set_value_cansleep(pcie->reset_gpio, 0);
- clk_disable_unprepare(pcie->refclk);
cdns_pcie_disable_phy(cdns_pcie);
+ j721e_pcie_disable_link_irq(pcie);
pm_runtime_put(dev);
pm_runtime_disable(dev);
}
+static int j721e_pcie_suspend_noirq(struct device *dev)
+{
+ struct j721e_pcie *pcie = dev_get_drvdata(dev);
+
+ if (pcie->mode == PCI_MODE_RC) {
+ gpiod_set_value_cansleep(pcie->reset_gpio, 0);
+ clk_disable_unprepare(pcie->refclk);
+ }
+
+ cdns_pcie_disable_phy(pcie->cdns_pcie);
+
+ return 0;
+}
+
+static int j721e_pcie_resume_noirq(struct device *dev)
+{
+ struct j721e_pcie *pcie = dev_get_drvdata(dev);
+ struct cdns_pcie *cdns_pcie = pcie->cdns_pcie;
+ int ret;
+
+ ret = j721e_pcie_ctrl_init(pcie);
+ if (ret < 0)
+ return ret;
+
+ j721e_pcie_config_link_irq(pcie);
+
+ /*
+ * This is not called explicitly in the probe, it is called by
+ * cdns_pcie_init_phy().
+ */
+ ret = cdns_pcie_enable_phy(pcie->cdns_pcie);
+ if (ret < 0)
+ return ret;
+
+ if (pcie->mode == PCI_MODE_RC) {
+ struct cdns_pcie_rc *rc = cdns_pcie_to_rc(cdns_pcie);
+
+ ret = clk_prepare_enable(pcie->refclk);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Section 2.2 of the PCI Express Card Electromechanical
+ * Specification (Revision 5.1) mandates that the deassertion
+ * of the PERST# signal should be delayed by 100 ms (TPVPERL).
+ * This shall ensure that the power and the reference clock
+ * are stable.
+ */
+ if (pcie->reset_gpio) {
+ msleep(PCIE_T_PVPERL_MS);
+ gpiod_set_value_cansleep(pcie->reset_gpio, 1);
+ }
+
+ ret = cdns_pcie_host_link_setup(rc);
+ if (ret < 0) {
+ clk_disable_unprepare(pcie->refclk);
+ return ret;
+ }
+
+ /*
+ * Reset internal status of BARs to force reinitialization in
+ * cdns_pcie_host_init().
+ */
+ for (enum cdns_pcie_rp_bar bar = RP_BAR0; bar <= RP_NO_BAR; bar++)
+ rc->avail_ib_bar[bar] = true;
+
+ ret = cdns_pcie_host_init(rc);
+ if (ret) {
+ clk_disable_unprepare(pcie->refclk);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static DEFINE_NOIRQ_DEV_PM_OPS(j721e_pcie_pm_ops,
+ j721e_pcie_suspend_noirq,
+ j721e_pcie_resume_noirq);
+
static struct platform_driver j721e_pcie_driver = {
.probe = j721e_pcie_probe,
- .remove_new = j721e_pcie_remove,
+ .remove = j721e_pcie_remove,
.driver = {
.name = "j721e-pcie",
.of_match_table = of_j721e_pcie_match,
.suppress_bind_attrs = true,
+ .pm = pm_sleep_ptr(&j721e_pcie_pm_ops),
},
};
-builtin_platform_driver(j721e_pcie_driver);
+module_platform_driver(j721e_pcie_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("PCIe controller driver for TI's J721E and related SoCs");
+MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
diff --git a/drivers/pci/controller/cadence/pci-sky1.c b/drivers/pci/controller/cadence/pci-sky1.c
new file mode 100644
index 000000000000..d8c216dc120d
--- /dev/null
+++ b/drivers/pci/controller/cadence/pci-sky1.c
@@ -0,0 +1,238 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe controller driver for CIX's sky1 SoCs
+ *
+ * Copyright 2025 Cix Technology Group Co., Ltd.
+ * Author: Hans Zhang <hans.zhang@cixtech.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pci.h>
+#include <linux/pci-ecam.h>
+#include <linux/pci_ids.h>
+
+#include "pcie-cadence.h"
+#include "pcie-cadence-host-common.h"
+
+#define PCI_VENDOR_ID_CIX 0x1f6c
+#define PCI_DEVICE_ID_CIX_SKY1 0x0001
+
+#define STRAP_REG(n) ((n) * 0x04)
+#define STATUS_REG(n) ((n) * 0x04)
+#define LINK_TRAINING_ENABLE BIT(0)
+#define LINK_COMPLETE BIT(0)
+
+#define SKY1_IP_REG_BANK 0x1000
+#define SKY1_IP_CFG_CTRL_REG_BANK 0x4c00
+#define SKY1_IP_AXI_MASTER_COMMON 0xf000
+#define SKY1_AXI_SLAVE 0x9000
+#define SKY1_AXI_MASTER 0xb000
+#define SKY1_AXI_HLS_REGISTERS 0xc000
+#define SKY1_AXI_RAS_REGISTERS 0xe000
+#define SKY1_DTI_REGISTERS 0xd000
+
+#define IP_REG_I_DBG_STS_0 0x420
+
+struct sky1_pcie {
+ struct cdns_pcie *cdns_pcie;
+ struct cdns_pcie_rc *cdns_pcie_rc;
+
+ struct resource *cfg_res;
+ struct resource *msg_res;
+ struct pci_config_window *cfg;
+ void __iomem *strap_base;
+ void __iomem *status_base;
+ void __iomem *reg_base;
+ void __iomem *cfg_base;
+ void __iomem *msg_base;
+};
+
+static int sky1_pcie_resource_get(struct platform_device *pdev,
+ struct sky1_pcie *pcie)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ void __iomem *base;
+
+ base = devm_platform_ioremap_resource_byname(pdev, "reg");
+ if (IS_ERR(base))
+ return dev_err_probe(dev, PTR_ERR(base),
+ "unable to find \"reg\" registers\n");
+ pcie->reg_base = base;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
+ if (!res)
+ return dev_err_probe(dev, -ENODEV, "unable to get \"cfg\" resource\n");
+ pcie->cfg_res = res;
+
+ base = devm_platform_ioremap_resource_byname(pdev, "rcsu_strap");
+ if (IS_ERR(base))
+ return dev_err_probe(dev, PTR_ERR(base),
+ "unable to find \"rcsu_strap\" registers\n");
+ pcie->strap_base = base;
+
+ base = devm_platform_ioremap_resource_byname(pdev, "rcsu_status");
+ if (IS_ERR(base))
+ return dev_err_probe(dev, PTR_ERR(base),
+ "unable to find \"rcsu_status\" registers\n");
+ pcie->status_base = base;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "msg");
+ if (!res)
+ return dev_err_probe(dev, -ENODEV, "unable to get \"msg\" resource\n");
+ pcie->msg_res = res;
+ pcie->msg_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pcie->msg_base)) {
+ return dev_err_probe(dev, PTR_ERR(pcie->msg_base),
+ "unable to ioremap msg resource\n");
+ }
+
+ return 0;
+}
+
+static int sky1_pcie_start_link(struct cdns_pcie *cdns_pcie)
+{
+ struct sky1_pcie *pcie = dev_get_drvdata(cdns_pcie->dev);
+ u32 val;
+
+ val = readl(pcie->strap_base + STRAP_REG(1));
+ val |= LINK_TRAINING_ENABLE;
+ writel(val, pcie->strap_base + STRAP_REG(1));
+
+ return 0;
+}
+
+static void sky1_pcie_stop_link(struct cdns_pcie *cdns_pcie)
+{
+ struct sky1_pcie *pcie = dev_get_drvdata(cdns_pcie->dev);
+ u32 val;
+
+ val = readl(pcie->strap_base + STRAP_REG(1));
+ val &= ~LINK_TRAINING_ENABLE;
+ writel(val, pcie->strap_base + STRAP_REG(1));
+}
+
+static bool sky1_pcie_link_up(struct cdns_pcie *cdns_pcie)
+{
+ u32 val;
+
+ val = cdns_pcie_hpa_readl(cdns_pcie, REG_BANK_IP_REG,
+ IP_REG_I_DBG_STS_0);
+ return val & LINK_COMPLETE;
+}
+
+static const struct cdns_pcie_ops sky1_pcie_ops = {
+ .start_link = sky1_pcie_start_link,
+ .stop_link = sky1_pcie_stop_link,
+ .link_up = sky1_pcie_link_up,
+};
+
+static int sky1_pcie_probe(struct platform_device *pdev)
+{
+ struct cdns_plat_pcie_of_data *reg_off;
+ struct device *dev = &pdev->dev;
+ struct pci_host_bridge *bridge;
+ struct cdns_pcie *cdns_pcie;
+ struct resource_entry *bus;
+ struct cdns_pcie_rc *rc;
+ struct sky1_pcie *pcie;
+ int ret;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc));
+ if (!bridge)
+ return -ENOMEM;
+
+ ret = sky1_pcie_resource_get(pdev, pcie);
+ if (ret < 0)
+ return ret;
+
+ bus = resource_list_first_type(&bridge->windows, IORESOURCE_BUS);
+ if (!bus)
+ return -ENODEV;
+
+ pcie->cfg = pci_ecam_create(dev, pcie->cfg_res, bus->res,
+ &pci_generic_ecam_ops);
+ if (IS_ERR(pcie->cfg))
+ return PTR_ERR(pcie->cfg);
+
+ bridge->ops = (struct pci_ops *)&pci_generic_ecam_ops.pci_ops;
+ rc = pci_host_bridge_priv(bridge);
+ rc->ecam_supported = 1;
+ rc->cfg_base = pcie->cfg->win;
+ rc->cfg_res = &pcie->cfg->res;
+
+ cdns_pcie = &rc->pcie;
+ cdns_pcie->dev = dev;
+ cdns_pcie->ops = &sky1_pcie_ops;
+ cdns_pcie->reg_base = pcie->reg_base;
+ cdns_pcie->msg_res = pcie->msg_res;
+ cdns_pcie->is_rc = 1;
+
+ reg_off = devm_kzalloc(dev, sizeof(*reg_off), GFP_KERNEL);
+ if (!reg_off)
+ return -ENOMEM;
+
+ reg_off->ip_reg_bank_offset = SKY1_IP_REG_BANK;
+ reg_off->ip_cfg_ctrl_reg_offset = SKY1_IP_CFG_CTRL_REG_BANK;
+ reg_off->axi_mstr_common_offset = SKY1_IP_AXI_MASTER_COMMON;
+ reg_off->axi_slave_offset = SKY1_AXI_SLAVE;
+ reg_off->axi_master_offset = SKY1_AXI_MASTER;
+ reg_off->axi_hls_offset = SKY1_AXI_HLS_REGISTERS;
+ reg_off->axi_ras_offset = SKY1_AXI_RAS_REGISTERS;
+ reg_off->axi_dti_offset = SKY1_DTI_REGISTERS;
+ cdns_pcie->cdns_pcie_reg_offsets = reg_off;
+
+ pcie->cdns_pcie = cdns_pcie;
+ pcie->cdns_pcie_rc = rc;
+ pcie->cfg_base = rc->cfg_base;
+ bridge->sysdata = pcie->cfg;
+
+ rc->vendor_id = PCI_VENDOR_ID_CIX;
+ rc->device_id = PCI_DEVICE_ID_CIX_SKY1;
+ rc->no_inbound_map = 1;
+
+ dev_set_drvdata(dev, pcie);
+
+ ret = cdns_pcie_hpa_host_setup(rc);
+ if (ret < 0) {
+ pci_ecam_free(pcie->cfg);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id of_sky1_pcie_match[] = {
+ { .compatible = "cix,sky1-pcie-host", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_sky1_pcie_match);
+
+static void sky1_pcie_remove(struct platform_device *pdev)
+{
+ struct sky1_pcie *pcie = platform_get_drvdata(pdev);
+
+ pci_ecam_free(pcie->cfg);
+}
+
+static struct platform_driver sky1_pcie_driver = {
+ .probe = sky1_pcie_probe,
+ .remove = sky1_pcie_remove,
+ .driver = {
+ .name = "sky1-pcie",
+ .of_match_table = of_sky1_pcie_match,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+module_platform_driver(sky1_pcie_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("PCIe controller driver for CIX's sky1 SoCs");
+MODULE_AUTHOR("Hans Zhang <hans.zhang@cixtech.com>");
diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c
index b8b655d4047e..c0e1194a936b 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-ep.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c
@@ -3,14 +3,17 @@
// Cadence PCIe endpoint controller driver.
// Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
+#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/pci-epc.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
#include "pcie-cadence.h"
+#include "../../pci.h"
#define CDNS_PCIE_EP_MIN_APERTURE 128 /* 128 bytes */
#define CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE 0x1
@@ -18,12 +21,13 @@
static u8 cdns_pcie_get_fn_from_vfn(struct cdns_pcie *pcie, u8 fn, u8 vfn)
{
- u32 cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
u32 first_vf_offset, stride;
+ u16 cap;
if (vfn == 0)
return fn;
+ cap = cdns_pcie_find_ext_capability(pcie, PCI_EXT_CAP_ID_SRIOV);
first_vf_offset = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_SRIOV_VF_OFFSET);
stride = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_SRIOV_VF_STRIDE);
fn = fn + first_vf_offset + ((vfn - 1) * stride);
@@ -35,10 +39,11 @@ static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn,
struct pci_epf_header *hdr)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
- u32 cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
struct cdns_pcie *pcie = &ep->pcie;
u32 reg;
+ u16 cap;
+ cap = cdns_pcie_find_ext_capability(pcie, PCI_EXT_CAP_ID_SRIOV);
if (vfn > 1) {
dev_err(&epc->dev, "Only Virtual Function #1 has deviceID\n");
return -EINVAL;
@@ -98,14 +103,11 @@ static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, u8 vfn,
ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS;
} else {
bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH);
- bool is_64bits = sz > SZ_2G;
+ bool is_64bits = !!(flags & PCI_BASE_ADDRESS_MEM_TYPE_64);
if (is_64bits && (bar & 1))
return -EINVAL;
- if (is_64bits && !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64))
- epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
-
if (is_64bits && is_prefetch)
ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS;
else if (is_prefetch)
@@ -222,13 +224,15 @@ static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn,
clear_bit(r, &ep->ob_region_map);
}
-static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn, u8 mmc)
+static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn, u8 nr_irqs)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
- u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
+ u8 mmc = order_base_2(nr_irqs);
u16 flags;
+ u8 cap;
+ cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_MSI);
fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
/*
@@ -248,9 +252,10 @@ static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
- u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
u16 flags, mme;
+ u8 cap;
+ cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_MSI);
fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
/* Validate that the MSI feature is actually enabled. */
@@ -262,18 +267,19 @@ static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
* Get the Multiple Message Enable bitfield from the Message Control
* register.
*/
- mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4;
+ mme = FIELD_GET(PCI_MSI_FLAGS_QSIZE, flags);
- return mme;
+ return 1 << mme;
}
static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
- u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET;
u32 val, reg;
+ u8 cap;
+ cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_MSIX);
func_no = cdns_pcie_get_fn_from_vfn(pcie, func_no, vfunc_no);
reg = cap + PCI_MSIX_FLAGS;
@@ -283,34 +289,34 @@ static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
val &= PCI_MSIX_FLAGS_QSIZE;
- return val;
+ return val + 1;
}
static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u8 vfn,
- u16 interrupts, enum pci_barno bir,
- u32 offset)
+ u16 nr_irqs, enum pci_barno bir, u32 offset)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
- u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET;
u32 val, reg;
+ u8 cap;
+ cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_MSIX);
fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
reg = cap + PCI_MSIX_FLAGS;
val = cdns_pcie_ep_fn_readw(pcie, fn, reg);
val &= ~PCI_MSIX_FLAGS_QSIZE;
- val |= interrupts;
+ val |= nr_irqs - 1; /* encoded as N-1 */
cdns_pcie_ep_fn_writew(pcie, fn, reg, val);
- /* Set MSIX BAR and offset */
+ /* Set MSI-X BAR and offset */
reg = cap + PCI_MSIX_TABLE;
val = offset | bir;
cdns_pcie_ep_fn_writel(pcie, fn, reg, val);
- /* Set PBA BAR and offset. BAR must match MSIX BAR */
+ /* Set PBA BAR and offset. BAR must match MSI-X BAR */
reg = cap + PCI_MSIX_PBA;
- val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir;
+ val = (offset + (nr_irqs * PCI_MSIX_ENTRY_SIZE)) | bir;
cdns_pcie_ep_fn_writel(pcie, fn, reg, val);
return 0;
@@ -339,10 +345,10 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx,
if (is_asserted) {
ep->irq_pending |= BIT(intx);
- msg_code = MSG_CODE_ASSERT_INTA + intx;
+ msg_code = PCIE_MSG_CODE_ASSERT_INTA + intx;
} else {
ep->irq_pending &= ~BIT(intx);
- msg_code = MSG_CODE_DEASSERT_INTA + intx;
+ msg_code = PCIE_MSG_CODE_DEASSERT_INTA + intx;
}
spin_lock_irqsave(&ep->lock, flags);
@@ -353,14 +359,13 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx,
}
spin_unlock_irqrestore(&ep->lock, flags);
- offset = CDNS_PCIE_NORMAL_MSG_ROUTING(MSG_ROUTING_LOCAL) |
- CDNS_PCIE_NORMAL_MSG_CODE(msg_code) |
- CDNS_PCIE_MSG_NO_DATA;
+ offset = CDNS_PCIE_NORMAL_MSG_ROUTING(PCIE_MSG_TYPE_R_LOCAL) |
+ CDNS_PCIE_NORMAL_MSG_CODE(msg_code);
writel(0, ep->irq_cpu_addr + offset);
}
-static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
- u8 intx)
+static int cdns_pcie_ep_send_intx_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
+ u8 intx)
{
u16 cmd;
@@ -370,7 +375,7 @@ static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
cdns_pcie_ep_assert_intx(ep, fn, intx, true);
/*
- * The mdelay() value was taken from dra7xx_pcie_raise_legacy_irq()
+ * The mdelay() value was taken from dra7xx_pcie_raise_intx_irq()
*/
mdelay(1);
cdns_pcie_ep_assert_intx(ep, fn, intx, false);
@@ -381,11 +386,11 @@ static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
u8 interrupt_num)
{
struct cdns_pcie *pcie = &ep->pcie;
- u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
u16 flags, mme, data, data_mask;
- u8 msi_count;
u64 pci_addr, pci_addr_mask = 0xff;
+ u8 msi_count, cap;
+ cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_MSI);
fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
/* Check whether the MSI feature has been enabled by the PCI host. */
@@ -394,7 +399,7 @@ static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
return -EINVAL;
/* Get the number of enabled MSIs */
- mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4;
+ mme = FIELD_GET(PCI_MSI_FLAGS_QSIZE, flags);
msi_count = 1 << mme;
if (!interrupt_num || interrupt_num > msi_count)
return -EINVAL;
@@ -433,14 +438,14 @@ static int cdns_pcie_ep_map_msi_irq(struct pci_epc *epc, u8 fn, u8 vfn,
u32 *msi_addr_offset)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
- u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
struct cdns_pcie *pcie = &ep->pcie;
u64 pci_addr, pci_addr_mask = 0xff;
u16 flags, mme, data, data_mask;
- u8 msi_count;
+ u8 msi_count, cap;
int ret;
int i;
+ cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_MSI);
fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
/* Check whether the MSI feature has been enabled by the PCI host. */
@@ -449,7 +454,7 @@ static int cdns_pcie_ep_map_msi_irq(struct pci_epc *epc, u8 fn, u8 vfn,
return -EINVAL;
/* Get the number of enabled MSIs */
- mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4;
+ mme = FIELD_GET(PCI_MSI_FLAGS_QSIZE, flags);
msi_count = 1 << mme;
if (!interrupt_num || interrupt_num > msi_count)
return -EINVAL;
@@ -483,16 +488,16 @@ static int cdns_pcie_ep_map_msi_irq(struct pci_epc *epc, u8 fn, u8 vfn,
static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
u16 interrupt_num)
{
- u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET;
u32 tbl_offset, msg_data, reg;
struct cdns_pcie *pcie = &ep->pcie;
struct pci_epf_msix_tbl *msix_tbl;
struct cdns_pcie_epf *epf;
u64 pci_addr_mask = 0xff;
u64 msg_addr;
+ u8 bir, cap;
u16 flags;
- u8 bir;
+ cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_MSIX);
epf = &ep->epf[fn];
if (vfn > 0)
epf = &epf->epf[vfn - 1];
@@ -506,7 +511,7 @@ static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
reg = cap + PCI_MSIX_TABLE;
tbl_offset = cdns_pcie_ep_fn_readl(pcie, fn, reg);
- bir = tbl_offset & PCI_MSIX_TABLE_BIR;
+ bir = FIELD_GET(PCI_MSIX_TABLE_BIR, tbl_offset);
tbl_offset &= PCI_MSIX_TABLE_OFFSET;
msix_tbl = epf->epf_bar[bir]->addr + tbl_offset;
@@ -531,25 +536,24 @@ static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
}
static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn,
- enum pci_epc_irq_type type,
- u16 interrupt_num)
+ unsigned int type, u16 interrupt_num)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
struct device *dev = pcie->dev;
switch (type) {
- case PCI_EPC_IRQ_LEGACY:
+ case PCI_IRQ_INTX:
if (vfn > 0) {
- dev_err(dev, "Cannot raise legacy interrupts for VF\n");
+ dev_err(dev, "Cannot raise INTX interrupts for VF\n");
return -EINVAL;
}
- return cdns_pcie_ep_send_legacy_irq(ep, fn, vfn, 0);
+ return cdns_pcie_ep_send_intx_irq(ep, fn, vfn, 0);
- case PCI_EPC_IRQ_MSI:
+ case PCI_IRQ_MSI:
return cdns_pcie_ep_send_msi_irq(ep, fn, vfn, interrupt_num);
- case PCI_EPC_IRQ_MSIX:
+ case PCI_IRQ_MSIX:
return cdns_pcie_ep_send_msix_irq(ep, fn, vfn, interrupt_num);
default:
@@ -565,26 +569,38 @@ static int cdns_pcie_ep_start(struct pci_epc *epc)
struct cdns_pcie *pcie = &ep->pcie;
struct device *dev = pcie->dev;
int max_epfs = sizeof(epc->function_num_map) * 8;
- int ret, value, epf;
+ int ret, epf, last_fn;
+ u32 reg, value;
+ u8 cap;
+ cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_EXP);
/*
* BIT(0) is hardwired to 1, hence function 0 is always enabled
* and can't be disabled anyway.
*/
cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, epc->function_num_map);
+ /*
+ * Next function field in ARI_CAP_AND_CTR register for last function
+ * should be 0. Clear Next Function Number field for the last
+ * function used.
+ */
+ last_fn = find_last_bit(&epc->function_num_map, BITS_PER_LONG);
+ reg = CDNS_PCIE_CORE_PF_I_ARI_CAP_AND_CTRL(last_fn);
+ value = cdns_pcie_readl(pcie, reg);
+ value &= ~CDNS_PCIE_ARI_CAP_NFN_MASK;
+ cdns_pcie_writel(pcie, reg, value);
+
if (ep->quirk_disable_flr) {
for (epf = 0; epf < max_epfs; epf++) {
if (!(epc->function_num_map & BIT(epf)))
continue;
value = cdns_pcie_ep_fn_readl(pcie, epf,
- CDNS_PCIE_EP_FUNC_DEV_CAP_OFFSET +
- PCI_EXP_DEVCAP);
+ cap + PCI_EXP_DEVCAP);
value &= ~PCI_EXP_DEVCAP_FLR;
cdns_pcie_ep_fn_writel(pcie, epf,
- CDNS_PCIE_EP_FUNC_DEV_CAP_OFFSET +
- PCI_EXP_DEVCAP, value);
+ cap + PCI_EXP_DEVCAP, value);
}
}
@@ -598,14 +614,12 @@ static int cdns_pcie_ep_start(struct pci_epc *epc)
}
static const struct pci_epc_features cdns_pcie_epc_vf_features = {
- .linkup_notifier = false,
.msi_capable = true,
.msix_capable = true,
.align = 65536,
};
static const struct pci_epc_features cdns_pcie_epc_features = {
- .linkup_notifier = false,
.msi_capable = true,
.msix_capable = true,
.align = 256,
@@ -636,6 +650,17 @@ static const struct pci_epc_ops cdns_pcie_epc_ops = {
.get_features = cdns_pcie_ep_get_features,
};
+void cdns_pcie_ep_disable(struct cdns_pcie_ep *ep)
+{
+ struct device *dev = ep->pcie.dev;
+ struct pci_epc *epc = to_pci_epc(dev);
+
+ pci_epc_deinit_notify(epc);
+ pci_epc_mem_free_addr(epc, ep->irq_phys_addr, ep->irq_cpu_addr,
+ SZ_128K);
+ pci_epc_mem_exit(epc);
+}
+EXPORT_SYMBOL_GPL(cdns_pcie_ep_disable);
int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
{
@@ -734,6 +759,8 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
spin_lock_init(&ep->lock);
+ pci_epc_init_notify(epc);
+
return 0;
free_epc_mem:
@@ -741,3 +768,8 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
return ret;
}
+EXPORT_SYMBOL_GPL(cdns_pcie_ep_setup);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Cadence PCIe endpoint controller driver");
+MODULE_AUTHOR("Cyrille Pitchen <cyrille.pitchen@free-electrons.com>");
diff --git a/drivers/pci/controller/cadence/pcie-cadence-host-common.c b/drivers/pci/controller/cadence/pcie-cadence-host-common.c
new file mode 100644
index 000000000000..15415d7f35ee
--- /dev/null
+++ b/drivers/pci/controller/cadence/pcie-cadence-host-common.c
@@ -0,0 +1,288 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cadence PCIe host controller library.
+ *
+ * Copyright (c) 2017 Cadence
+ * Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
+ */
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/list_sort.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/platform_device.h>
+
+#include "pcie-cadence.h"
+#include "pcie-cadence-host-common.h"
+
+#define LINK_RETRAIN_TIMEOUT HZ
+
+u64 bar_max_size[] = {
+ [RP_BAR0] = _ULL(128 * SZ_2G),
+ [RP_BAR1] = SZ_2G,
+ [RP_NO_BAR] = _BITULL(63),
+};
+EXPORT_SYMBOL_GPL(bar_max_size);
+
+int cdns_pcie_host_training_complete(struct cdns_pcie *pcie)
+{
+ u32 pcie_cap_off = CDNS_PCIE_RP_CAP_OFFSET;
+ unsigned long end_jiffies;
+ u16 lnk_stat;
+
+ /* Wait for link training to complete. Exit after timeout. */
+ end_jiffies = jiffies + LINK_RETRAIN_TIMEOUT;
+ do {
+ lnk_stat = cdns_pcie_rp_readw(pcie, pcie_cap_off + PCI_EXP_LNKSTA);
+ if (!(lnk_stat & PCI_EXP_LNKSTA_LT))
+ break;
+ usleep_range(0, 1000);
+ } while (time_before(jiffies, end_jiffies));
+
+ if (!(lnk_stat & PCI_EXP_LNKSTA_LT))
+ return 0;
+
+ return -ETIMEDOUT;
+}
+EXPORT_SYMBOL_GPL(cdns_pcie_host_training_complete);
+
+int cdns_pcie_host_wait_for_link(struct cdns_pcie *pcie,
+ cdns_pcie_linkup_func pcie_link_up)
+{
+ struct device *dev = pcie->dev;
+ int retries;
+
+ /* Check if the link is up or not */
+ for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
+ if (pcie_link_up(pcie)) {
+ dev_info(dev, "Link up\n");
+ return 0;
+ }
+ usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
+ }
+
+ return -ETIMEDOUT;
+}
+EXPORT_SYMBOL_GPL(cdns_pcie_host_wait_for_link);
+
+int cdns_pcie_retrain(struct cdns_pcie *pcie,
+ cdns_pcie_linkup_func pcie_link_up)
+{
+ u32 lnk_cap_sls, pcie_cap_off = CDNS_PCIE_RP_CAP_OFFSET;
+ u16 lnk_stat, lnk_ctl;
+ int ret = 0;
+
+ /*
+ * Set retrain bit if current speed is 2.5 GB/s,
+ * but the PCIe root port support is > 2.5 GB/s.
+ */
+
+ lnk_cap_sls = cdns_pcie_readl(pcie, (CDNS_PCIE_RP_BASE + pcie_cap_off +
+ PCI_EXP_LNKCAP));
+ if ((lnk_cap_sls & PCI_EXP_LNKCAP_SLS) <= PCI_EXP_LNKCAP_SLS_2_5GB)
+ return ret;
+
+ lnk_stat = cdns_pcie_rp_readw(pcie, pcie_cap_off + PCI_EXP_LNKSTA);
+ if ((lnk_stat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB) {
+ lnk_ctl = cdns_pcie_rp_readw(pcie,
+ pcie_cap_off + PCI_EXP_LNKCTL);
+ lnk_ctl |= PCI_EXP_LNKCTL_RL;
+ cdns_pcie_rp_writew(pcie, pcie_cap_off + PCI_EXP_LNKCTL,
+ lnk_ctl);
+
+ ret = cdns_pcie_host_training_complete(pcie);
+ if (ret)
+ return ret;
+
+ ret = cdns_pcie_host_wait_for_link(pcie, pcie_link_up);
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cdns_pcie_retrain);
+
+int cdns_pcie_host_start_link(struct cdns_pcie_rc *rc,
+ cdns_pcie_linkup_func pcie_link_up)
+{
+ struct cdns_pcie *pcie = &rc->pcie;
+ int ret;
+
+ ret = cdns_pcie_host_wait_for_link(pcie, pcie_link_up);
+
+ /*
+ * Retrain link for Gen2 training defect
+ * if quirk flag is set.
+ */
+ if (!ret && rc->quirk_retrain_flag)
+ ret = cdns_pcie_retrain(pcie, pcie_link_up);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cdns_pcie_host_start_link);
+
+enum cdns_pcie_rp_bar
+cdns_pcie_host_find_min_bar(struct cdns_pcie_rc *rc, u64 size)
+{
+ enum cdns_pcie_rp_bar bar, sel_bar;
+
+ sel_bar = RP_BAR_UNDEFINED;
+ for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++) {
+ if (!rc->avail_ib_bar[bar])
+ continue;
+
+ if (size <= bar_max_size[bar]) {
+ if (sel_bar == RP_BAR_UNDEFINED) {
+ sel_bar = bar;
+ continue;
+ }
+
+ if (bar_max_size[bar] < bar_max_size[sel_bar])
+ sel_bar = bar;
+ }
+ }
+
+ return sel_bar;
+}
+EXPORT_SYMBOL_GPL(cdns_pcie_host_find_min_bar);
+
+enum cdns_pcie_rp_bar
+cdns_pcie_host_find_max_bar(struct cdns_pcie_rc *rc, u64 size)
+{
+ enum cdns_pcie_rp_bar bar, sel_bar;
+
+ sel_bar = RP_BAR_UNDEFINED;
+ for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++) {
+ if (!rc->avail_ib_bar[bar])
+ continue;
+
+ if (size >= bar_max_size[bar]) {
+ if (sel_bar == RP_BAR_UNDEFINED) {
+ sel_bar = bar;
+ continue;
+ }
+
+ if (bar_max_size[bar] > bar_max_size[sel_bar])
+ sel_bar = bar;
+ }
+ }
+
+ return sel_bar;
+}
+EXPORT_SYMBOL_GPL(cdns_pcie_host_find_max_bar);
+
+int cdns_pcie_host_dma_ranges_cmp(void *priv, const struct list_head *a,
+ const struct list_head *b)
+{
+ struct resource_entry *entry1, *entry2;
+
+ entry1 = container_of(a, struct resource_entry, node);
+ entry2 = container_of(b, struct resource_entry, node);
+
+ return resource_size(entry2->res) - resource_size(entry1->res);
+}
+EXPORT_SYMBOL_GPL(cdns_pcie_host_dma_ranges_cmp);
+
+int cdns_pcie_host_bar_config(struct cdns_pcie_rc *rc,
+ struct resource_entry *entry,
+ cdns_pcie_host_bar_ib_cfg pci_host_ib_config)
+{
+ struct cdns_pcie *pcie = &rc->pcie;
+ struct device *dev = pcie->dev;
+ u64 cpu_addr, size, winsize;
+ enum cdns_pcie_rp_bar bar;
+ unsigned long flags;
+ int ret;
+
+ cpu_addr = entry->res->start;
+ flags = entry->res->flags;
+ size = resource_size(entry->res);
+
+ while (size > 0) {
+ /*
+ * Try to find a minimum BAR whose size is greater than
+ * or equal to the remaining resource_entry size. This will
+ * fail if the size of each of the available BARs is less than
+ * the remaining resource_entry size.
+ *
+ * If a minimum BAR is found, IB ATU will be configured and
+ * exited.
+ */
+ bar = cdns_pcie_host_find_min_bar(rc, size);
+ if (bar != RP_BAR_UNDEFINED) {
+ ret = pci_host_ib_config(rc, bar, cpu_addr, size, flags);
+ if (ret)
+ dev_err(dev, "IB BAR: %d config failed\n", bar);
+ return ret;
+ }
+
+ /*
+ * If the control reaches here, it would mean the remaining
+ * resource_entry size cannot be fitted in a single BAR. So we
+ * find a maximum BAR whose size is less than or equal to the
+ * remaining resource_entry size and split the resource entry
+ * so that part of resource entry is fitted inside the maximum
+ * BAR. The remaining size would be fitted during the next
+ * iteration of the loop.
+ *
+ * If a maximum BAR is not found, there is no way we can fit
+ * this resource_entry, so we error out.
+ */
+ bar = cdns_pcie_host_find_max_bar(rc, size);
+ if (bar == RP_BAR_UNDEFINED) {
+ dev_err(dev, "No free BAR to map cpu_addr %llx\n",
+ cpu_addr);
+ return -EINVAL;
+ }
+
+ winsize = bar_max_size[bar];
+ ret = pci_host_ib_config(rc, bar, cpu_addr, winsize, flags);
+ if (ret) {
+ dev_err(dev, "IB BAR: %d config failed\n", bar);
+ return ret;
+ }
+
+ size -= winsize;
+ cpu_addr += winsize;
+ }
+
+ return 0;
+}
+
+int cdns_pcie_host_map_dma_ranges(struct cdns_pcie_rc *rc,
+ cdns_pcie_host_bar_ib_cfg pci_host_ib_config)
+{
+ struct cdns_pcie *pcie = &rc->pcie;
+ struct device *dev = pcie->dev;
+ struct device_node *np = dev->of_node;
+ struct pci_host_bridge *bridge;
+ struct resource_entry *entry;
+ u32 no_bar_nbits = 32;
+ int err;
+
+ bridge = pci_host_bridge_from_priv(rc);
+ if (!bridge)
+ return -ENOMEM;
+
+ if (list_empty(&bridge->dma_ranges)) {
+ of_property_read_u32(np, "cdns,no-bar-match-nbits",
+ &no_bar_nbits);
+ err = pci_host_ib_config(rc, RP_NO_BAR, 0x0, (u64)1 << no_bar_nbits, 0);
+ if (err)
+ dev_err(dev, "IB BAR: %d config failed\n", RP_NO_BAR);
+ return err;
+ }
+
+ list_sort(NULL, &bridge->dma_ranges, cdns_pcie_host_dma_ranges_cmp);
+
+ resource_list_for_each_entry(entry, &bridge->dma_ranges) {
+ err = cdns_pcie_host_bar_config(rc, entry, pci_host_ib_config);
+ if (err) {
+ dev_err(dev, "Fail to configure IB using dma-ranges\n");
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Cadence PCIe host controller driver");
diff --git a/drivers/pci/controller/cadence/pcie-cadence-host-common.h b/drivers/pci/controller/cadence/pcie-cadence-host-common.h
new file mode 100644
index 000000000000..fe7d4202a8b6
--- /dev/null
+++ b/drivers/pci/controller/cadence/pcie-cadence-host-common.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Cadence PCIe Host controller driver.
+ *
+ * Copyright (c) 2017 Cadence
+ * Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
+ */
+#ifndef _PCIE_CADENCE_HOST_COMMON_H
+#define _PCIE_CADENCE_HOST_COMMON_H
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+
+extern u64 bar_max_size[];
+
+typedef int (*cdns_pcie_host_bar_ib_cfg)(struct cdns_pcie_rc *,
+ enum cdns_pcie_rp_bar,
+ u64,
+ u64,
+ unsigned long);
+typedef bool (*cdns_pcie_linkup_func)(struct cdns_pcie *);
+
+int cdns_pcie_host_training_complete(struct cdns_pcie *pcie);
+int cdns_pcie_host_wait_for_link(struct cdns_pcie *pcie,
+ cdns_pcie_linkup_func pcie_link_up);
+int cdns_pcie_retrain(struct cdns_pcie *pcie, cdns_pcie_linkup_func pcie_linkup_func);
+int cdns_pcie_host_start_link(struct cdns_pcie_rc *rc,
+ cdns_pcie_linkup_func pcie_link_up);
+enum cdns_pcie_rp_bar
+cdns_pcie_host_find_min_bar(struct cdns_pcie_rc *rc, u64 size);
+enum cdns_pcie_rp_bar
+cdns_pcie_host_find_max_bar(struct cdns_pcie_rc *rc, u64 size);
+int cdns_pcie_host_dma_ranges_cmp(void *priv, const struct list_head *a,
+ const struct list_head *b);
+int cdns_pcie_host_bar_ib_config(struct cdns_pcie_rc *rc,
+ enum cdns_pcie_rp_bar bar,
+ u64 cpu_addr,
+ u64 size,
+ unsigned long flags);
+int cdns_pcie_host_bar_config(struct cdns_pcie_rc *rc,
+ struct resource_entry *entry,
+ cdns_pcie_host_bar_ib_cfg pci_host_ib_config);
+int cdns_pcie_host_map_dma_ranges(struct cdns_pcie_rc *rc,
+ cdns_pcie_host_bar_ib_cfg pci_host_ib_config);
+
+#endif /* _PCIE_CADENCE_HOST_COMMON_H */
diff --git a/drivers/pci/controller/cadence/pcie-cadence-host-hpa.c b/drivers/pci/controller/cadence/pcie-cadence-host-hpa.c
new file mode 100644
index 000000000000..0f540bed58e8
--- /dev/null
+++ b/drivers/pci/controller/cadence/pcie-cadence-host-hpa.c
@@ -0,0 +1,368 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cadence PCIe host controller driver.
+ *
+ * Copyright (c) 2024, Cadence Design Systems
+ * Author: Manikandan K Pillai <mpillai@cadence.com>
+ */
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/list_sort.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+
+#include "pcie-cadence.h"
+#include "pcie-cadence-host-common.h"
+
+static u8 bar_aperture_mask[] = {
+ [RP_BAR0] = 0x3F,
+ [RP_BAR1] = 0x3F,
+};
+
+void __iomem *cdns_pci_hpa_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int where)
+{
+ struct pci_host_bridge *bridge = pci_find_host_bridge(bus);
+ struct cdns_pcie_rc *rc = pci_host_bridge_priv(bridge);
+ struct cdns_pcie *pcie = &rc->pcie;
+ unsigned int busn = bus->number;
+ u32 addr0, desc0, desc1, ctrl0;
+ u32 regval;
+
+ if (pci_is_root_bus(bus)) {
+ /*
+ * Only the root port (devfn == 0) is connected to this bus.
+ * All other PCI devices are behind some bridge hence on another
+ * bus.
+ */
+ if (devfn)
+ return NULL;
+
+ return pcie->reg_base + (where & 0xfff);
+ }
+
+ /* Clear AXI link-down status */
+ regval = cdns_pcie_hpa_readl(pcie, REG_BANK_AXI_SLAVE, CDNS_PCIE_HPA_AT_LINKDOWN);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE, CDNS_PCIE_HPA_AT_LINKDOWN,
+ (regval & ~GENMASK(0, 0)));
+
+ /* Update Output registers for AXI region 0 */
+ addr0 = CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_NBITS(12) |
+ CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) |
+ CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_BUS(busn);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0(0), addr0);
+
+ desc1 = cdns_pcie_hpa_readl(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_DESC1(0));
+ desc1 &= ~CDNS_PCIE_HPA_AT_OB_REGION_DESC1_DEVFN_MASK;
+ desc1 |= CDNS_PCIE_HPA_AT_OB_REGION_DESC1_DEVFN(0);
+ ctrl0 = CDNS_PCIE_HPA_AT_OB_REGION_CTRL0_SUPPLY_BUS |
+ CDNS_PCIE_HPA_AT_OB_REGION_CTRL0_SUPPLY_DEV_FN;
+
+ if (busn == bridge->busnr + 1)
+ desc0 = CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0;
+ else
+ desc0 = CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1;
+
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_DESC0(0), desc0);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_DESC1(0), desc1);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_CTRL0(0), ctrl0);
+
+ return rc->cfg_base + (where & 0xfff);
+}
+
+static struct pci_ops cdns_pcie_hpa_host_ops = {
+ .map_bus = cdns_pci_hpa_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+};
+
+static void cdns_pcie_hpa_host_enable_ptm_response(struct cdns_pcie *pcie)
+{
+ u32 val;
+
+ val = cdns_pcie_hpa_readl(pcie, REG_BANK_IP_REG, CDNS_PCIE_HPA_LM_PTM_CTRL);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_IP_REG, CDNS_PCIE_HPA_LM_PTM_CTRL,
+ val | CDNS_PCIE_HPA_LM_PTM_CTRL_PTMRSEN);
+}
+
+static int cdns_pcie_hpa_host_bar_ib_config(struct cdns_pcie_rc *rc,
+ enum cdns_pcie_rp_bar bar,
+ u64 cpu_addr, u64 size,
+ unsigned long flags)
+{
+ struct cdns_pcie *pcie = &rc->pcie;
+ u32 addr0, addr1, aperture, value;
+
+ if (!rc->avail_ib_bar[bar])
+ return -ENODEV;
+
+ rc->avail_ib_bar[bar] = false;
+
+ aperture = ilog2(size);
+ if (bar == RP_NO_BAR) {
+ addr0 = CDNS_PCIE_HPA_AT_IB_RP_BAR_ADDR0_NBITS(aperture) |
+ (lower_32_bits(cpu_addr) & GENMASK(31, 8));
+ addr1 = upper_32_bits(cpu_addr);
+ } else {
+ addr0 = lower_32_bits(cpu_addr);
+ addr1 = upper_32_bits(cpu_addr);
+ }
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_MASTER,
+ CDNS_PCIE_HPA_AT_IB_RP_BAR_ADDR0(bar), addr0);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_MASTER,
+ CDNS_PCIE_HPA_AT_IB_RP_BAR_ADDR1(bar), addr1);
+
+ if (bar == RP_NO_BAR)
+ bar = (enum cdns_pcie_rp_bar)BAR_0;
+
+ value = cdns_pcie_hpa_readl(pcie, REG_BANK_IP_CFG_CTRL_REG, CDNS_PCIE_HPA_LM_RC_BAR_CFG);
+ value &= ~(HPA_LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar) |
+ HPA_LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar) |
+ HPA_LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar) |
+ HPA_LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar) |
+ HPA_LM_RC_BAR_CFG_APERTURE(bar, bar_aperture_mask[bar] + 7));
+ if (size + cpu_addr >= SZ_4G) {
+ value |= HPA_LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar);
+ if ((flags & IORESOURCE_PREFETCH))
+ value |= HPA_LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar);
+ } else {
+ value |= HPA_LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar);
+ if ((flags & IORESOURCE_PREFETCH))
+ value |= HPA_LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar);
+ }
+
+ value |= HPA_LM_RC_BAR_CFG_APERTURE(bar, aperture);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_IP_CFG_CTRL_REG, CDNS_PCIE_HPA_LM_RC_BAR_CFG, value);
+
+ return 0;
+}
+
+static int cdns_pcie_hpa_host_init_root_port(struct cdns_pcie_rc *rc)
+{
+ struct cdns_pcie *pcie = &rc->pcie;
+ u32 value, ctrl;
+
+ /*
+ * Set the root port BAR configuration register:
+ * - disable both BAR0 and BAR1
+ * - enable Prefetchable Memory Base and Limit registers in type 1
+ * config space (64 bits)
+ * - enable IO Base and Limit registers in type 1 config
+ * space (32 bits)
+ */
+
+ ctrl = CDNS_PCIE_HPA_LM_BAR_CFG_CTRL_DISABLED;
+ value = CDNS_PCIE_HPA_LM_RC_BAR_CFG_BAR0_CTRL(ctrl) |
+ CDNS_PCIE_HPA_LM_RC_BAR_CFG_BAR1_CTRL(ctrl) |
+ CDNS_PCIE_HPA_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE |
+ CDNS_PCIE_HPA_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS |
+ CDNS_PCIE_HPA_LM_RC_BAR_CFG_IO_ENABLE |
+ CDNS_PCIE_HPA_LM_RC_BAR_CFG_IO_32BITS;
+ cdns_pcie_hpa_writel(pcie, REG_BANK_IP_CFG_CTRL_REG,
+ CDNS_PCIE_HPA_LM_RC_BAR_CFG, value);
+
+ if (rc->vendor_id != 0xffff)
+ cdns_pcie_hpa_rp_writew(pcie, PCI_VENDOR_ID, rc->vendor_id);
+
+ if (rc->device_id != 0xffff)
+ cdns_pcie_hpa_rp_writew(pcie, PCI_DEVICE_ID, rc->device_id);
+
+ cdns_pcie_hpa_rp_writeb(pcie, PCI_CLASS_REVISION, 0);
+ cdns_pcie_hpa_rp_writeb(pcie, PCI_CLASS_PROG, 0);
+ cdns_pcie_hpa_rp_writew(pcie, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI);
+
+ /* Enable bus mastering */
+ value = cdns_pcie_hpa_readl(pcie, REG_BANK_RP, PCI_COMMAND);
+ value |= (PCI_COMMAND_MEMORY | PCI_COMMAND_IO | PCI_COMMAND_MASTER);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_RP, PCI_COMMAND, value);
+ return 0;
+}
+
+static void cdns_pcie_hpa_create_region_for_cfg(struct cdns_pcie_rc *rc)
+{
+ struct cdns_pcie *pcie = &rc->pcie;
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(rc);
+ struct resource *cfg_res = rc->cfg_res;
+ struct resource_entry *entry;
+ u64 cpu_addr = cfg_res->start;
+ u32 addr0, addr1, desc1;
+ int busnr = 0;
+
+ entry = resource_list_first_type(&bridge->windows, IORESOURCE_BUS);
+ if (entry)
+ busnr = entry->res->start;
+
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_TAG_MANAGEMENT, 0x01000000);
+ /*
+ * Reserve region 0 for PCI configure space accesses:
+ * OB_REGION_PCI_ADDR0 and OB_REGION_DESC0 are updated dynamically by
+ * cdns_pci_map_bus(), other region registers are set here once for all
+ */
+ desc1 = CDNS_PCIE_HPA_AT_OB_REGION_DESC1_BUS(busnr);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR1(0), 0x0);
+ /* Type-1 CFG */
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_DESC0(0), 0x05000000);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_DESC1(0), desc1);
+
+ addr0 = CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR0_NBITS(12) |
+ (lower_32_bits(cpu_addr) & GENMASK(31, 8));
+ addr1 = upper_32_bits(cpu_addr);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR0(0), addr0);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR1(0), addr1);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_CTRL0(0), 0x06000000);
+}
+
+static int cdns_pcie_hpa_host_init_address_translation(struct cdns_pcie_rc *rc)
+{
+ struct cdns_pcie *pcie = &rc->pcie;
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(rc);
+ struct resource_entry *entry;
+ int r = 0, busnr = 0;
+
+ if (!rc->ecam_supported)
+ cdns_pcie_hpa_create_region_for_cfg(rc);
+
+ entry = resource_list_first_type(&bridge->windows, IORESOURCE_BUS);
+ if (entry)
+ busnr = entry->res->start;
+
+ r++;
+ if (pcie->msg_res) {
+ cdns_pcie_hpa_set_outbound_region_for_normal_msg(pcie, busnr, 0, r,
+ pcie->msg_res->start);
+
+ r++;
+ }
+ resource_list_for_each_entry(entry, &bridge->windows) {
+ struct resource *res = entry->res;
+ u64 pci_addr = res->start - entry->offset;
+
+ if (resource_type(res) == IORESOURCE_IO)
+ cdns_pcie_hpa_set_outbound_region(pcie, busnr, 0, r,
+ true,
+ pci_pio_to_address(res->start),
+ pci_addr,
+ resource_size(res));
+ else
+ cdns_pcie_hpa_set_outbound_region(pcie, busnr, 0, r,
+ false,
+ res->start,
+ pci_addr,
+ resource_size(res));
+
+ r++;
+ }
+
+ if (rc->no_inbound_map)
+ return 0;
+ else
+ return cdns_pcie_host_map_dma_ranges(rc, cdns_pcie_hpa_host_bar_ib_config);
+}
+
+static int cdns_pcie_hpa_host_init(struct cdns_pcie_rc *rc)
+{
+ int err;
+
+ err = cdns_pcie_hpa_host_init_root_port(rc);
+ if (err)
+ return err;
+
+ return cdns_pcie_hpa_host_init_address_translation(rc);
+}
+
+int cdns_pcie_hpa_host_link_setup(struct cdns_pcie_rc *rc)
+{
+ struct cdns_pcie *pcie = &rc->pcie;
+ struct device *dev = rc->pcie.dev;
+ int ret;
+
+ if (rc->quirk_detect_quiet_flag)
+ cdns_pcie_hpa_detect_quiet_min_delay_set(&rc->pcie);
+
+ cdns_pcie_hpa_host_enable_ptm_response(pcie);
+
+ ret = cdns_pcie_start_link(pcie);
+ if (ret) {
+ dev_err(dev, "Failed to start link\n");
+ return ret;
+ }
+
+ ret = cdns_pcie_host_wait_for_link(pcie, cdns_pcie_hpa_link_up);
+ if (ret)
+ dev_dbg(dev, "PCIe link never came up\n");
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cdns_pcie_hpa_host_link_setup);
+
+int cdns_pcie_hpa_host_setup(struct cdns_pcie_rc *rc)
+{
+ struct device *dev = rc->pcie.dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct pci_host_bridge *bridge;
+ enum cdns_pcie_rp_bar bar;
+ struct cdns_pcie *pcie;
+ struct resource *res;
+ int ret;
+
+ bridge = pci_host_bridge_from_priv(rc);
+ if (!bridge)
+ return -ENOMEM;
+
+ pcie = &rc->pcie;
+ pcie->is_rc = true;
+
+ if (!pcie->reg_base) {
+ pcie->reg_base = devm_platform_ioremap_resource_byname(pdev, "reg");
+ if (IS_ERR(pcie->reg_base)) {
+ dev_err(dev, "missing \"reg\"\n");
+ return PTR_ERR(pcie->reg_base);
+ }
+ }
+
+ /* ECAM config space is remapped at glue layer */
+ if (!rc->cfg_base) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
+ rc->cfg_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(rc->cfg_base))
+ return PTR_ERR(rc->cfg_base);
+ rc->cfg_res = res;
+ }
+
+ /* Put EROM Bar aperture to 0 */
+ cdns_pcie_hpa_writel(pcie, REG_BANK_IP_CFG_CTRL_REG, CDNS_PCIE_EROM, 0x0);
+
+ ret = cdns_pcie_hpa_host_link_setup(rc);
+ if (ret)
+ return ret;
+
+ for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++)
+ rc->avail_ib_bar[bar] = true;
+
+ ret = cdns_pcie_hpa_host_init(rc);
+ if (ret)
+ return ret;
+
+ if (!bridge->ops)
+ bridge->ops = &cdns_pcie_hpa_host_ops;
+
+ return pci_host_probe(bridge);
+}
+EXPORT_SYMBOL_GPL(cdns_pcie_hpa_host_setup);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Cadence PCIe host controller driver");
diff --git a/drivers/pci/controller/cadence/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c
index 5b14f7ee3c79..db3154c1eccb 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-host.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-host.c
@@ -5,20 +5,14 @@
#include <linux/delay.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/list_sort.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
#include <linux/platform_device.h>
#include "pcie-cadence.h"
-
-#define LINK_RETRAIN_TIMEOUT HZ
-
-static u64 bar_max_size[] = {
- [RP_BAR0] = _ULL(128 * SZ_2G),
- [RP_BAR1] = SZ_2G,
- [RP_NO_BAR] = _BITULL(63),
-};
+#include "pcie-cadence-host-common.h"
static u8 bar_aperture_mask[] = {
[RP_BAR0] = 0x1F,
@@ -72,6 +66,7 @@ void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
return rc->cfg_base + (where & 0xfff);
}
+EXPORT_SYMBOL_GPL(cdns_pci_map_bus);
static struct pci_ops cdns_pcie_host_ops = {
.map_bus = cdns_pci_map_bus,
@@ -79,75 +74,12 @@ static struct pci_ops cdns_pcie_host_ops = {
.write = pci_generic_config_write,
};
-static int cdns_pcie_host_training_complete(struct cdns_pcie *pcie)
-{
- u32 pcie_cap_off = CDNS_PCIE_RP_CAP_OFFSET;
- unsigned long end_jiffies;
- u16 lnk_stat;
-
- /* Wait for link training to complete. Exit after timeout. */
- end_jiffies = jiffies + LINK_RETRAIN_TIMEOUT;
- do {
- lnk_stat = cdns_pcie_rp_readw(pcie, pcie_cap_off + PCI_EXP_LNKSTA);
- if (!(lnk_stat & PCI_EXP_LNKSTA_LT))
- break;
- usleep_range(0, 1000);
- } while (time_before(jiffies, end_jiffies));
-
- if (!(lnk_stat & PCI_EXP_LNKSTA_LT))
- return 0;
-
- return -ETIMEDOUT;
-}
-
-static int cdns_pcie_host_wait_for_link(struct cdns_pcie *pcie)
-{
- struct device *dev = pcie->dev;
- int retries;
-
- /* Check if the link is up or not */
- for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
- if (cdns_pcie_link_up(pcie)) {
- dev_info(dev, "Link up\n");
- return 0;
- }
- usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
- }
-
- return -ETIMEDOUT;
-}
-
-static int cdns_pcie_retrain(struct cdns_pcie *pcie)
+static void cdns_pcie_host_disable_ptm_response(struct cdns_pcie *pcie)
{
- u32 lnk_cap_sls, pcie_cap_off = CDNS_PCIE_RP_CAP_OFFSET;
- u16 lnk_stat, lnk_ctl;
- int ret = 0;
-
- /*
- * Set retrain bit if current speed is 2.5 GB/s,
- * but the PCIe root port support is > 2.5 GB/s.
- */
-
- lnk_cap_sls = cdns_pcie_readl(pcie, (CDNS_PCIE_RP_BASE + pcie_cap_off +
- PCI_EXP_LNKCAP));
- if ((lnk_cap_sls & PCI_EXP_LNKCAP_SLS) <= PCI_EXP_LNKCAP_SLS_2_5GB)
- return ret;
-
- lnk_stat = cdns_pcie_rp_readw(pcie, pcie_cap_off + PCI_EXP_LNKSTA);
- if ((lnk_stat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB) {
- lnk_ctl = cdns_pcie_rp_readw(pcie,
- pcie_cap_off + PCI_EXP_LNKCTL);
- lnk_ctl |= PCI_EXP_LNKCTL_RL;
- cdns_pcie_rp_writew(pcie, pcie_cap_off + PCI_EXP_LNKCTL,
- lnk_ctl);
-
- ret = cdns_pcie_host_training_complete(pcie);
- if (ret)
- return ret;
+ u32 val;
- ret = cdns_pcie_host_wait_for_link(pcie);
- }
- return ret;
+ val = cdns_pcie_readl(pcie, CDNS_PCIE_LM_PTM_CTRL);
+ cdns_pcie_writel(pcie, CDNS_PCIE_LM_PTM_CTRL, val & ~CDNS_PCIE_LM_TPM_CTRL_PTMRSEN);
}
static void cdns_pcie_host_enable_ptm_response(struct cdns_pcie *pcie)
@@ -158,21 +90,24 @@ static void cdns_pcie_host_enable_ptm_response(struct cdns_pcie *pcie)
cdns_pcie_writel(pcie, CDNS_PCIE_LM_PTM_CTRL, val | CDNS_PCIE_LM_TPM_CTRL_PTMRSEN);
}
-static int cdns_pcie_host_start_link(struct cdns_pcie_rc *rc)
+static void cdns_pcie_host_deinit_root_port(struct cdns_pcie_rc *rc)
{
struct cdns_pcie *pcie = &rc->pcie;
- int ret;
-
- ret = cdns_pcie_host_wait_for_link(pcie);
-
- /*
- * Retrain link for Gen2 training defect
- * if quirk flag is set.
- */
- if (!ret && rc->quirk_retrain_flag)
- ret = cdns_pcie_retrain(pcie);
+ u32 value, ctrl;
- return ret;
+ cdns_pcie_rp_writew(pcie, PCI_CLASS_DEVICE, 0xffff);
+ cdns_pcie_rp_writeb(pcie, PCI_CLASS_PROG, 0xff);
+ cdns_pcie_rp_writeb(pcie, PCI_CLASS_REVISION, 0xff);
+ cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, 0xffffffff);
+ cdns_pcie_rp_writew(pcie, PCI_DEVICE_ID, 0xffff);
+ ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED;
+ value = ~(CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(ctrl) |
+ CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(ctrl) |
+ CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE |
+ CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS |
+ CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE |
+ CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS);
+ cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value);
}
static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc)
@@ -215,10 +150,11 @@ static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc)
return 0;
}
-static int cdns_pcie_host_bar_ib_config(struct cdns_pcie_rc *rc,
- enum cdns_pcie_rp_bar bar,
- u64 cpu_addr, u64 size,
- unsigned long flags)
+int cdns_pcie_host_bar_ib_config(struct cdns_pcie_rc *rc,
+ enum cdns_pcie_rp_bar bar,
+ u64 cpu_addr,
+ u64 size,
+ unsigned long flags)
{
struct cdns_pcie *pcie = &rc->pcie;
u32 addr0, addr1, aperture, value;
@@ -260,172 +196,53 @@ static int cdns_pcie_host_bar_ib_config(struct cdns_pcie_rc *rc,
return 0;
}
-static enum cdns_pcie_rp_bar
-cdns_pcie_host_find_min_bar(struct cdns_pcie_rc *rc, u64 size)
+static void cdns_pcie_host_unmap_dma_ranges(struct cdns_pcie_rc *rc)
{
- enum cdns_pcie_rp_bar bar, sel_bar;
+ struct cdns_pcie *pcie = &rc->pcie;
+ enum cdns_pcie_rp_bar bar;
+ u32 value;
- sel_bar = RP_BAR_UNDEFINED;
+ /* Reset inbound configuration for all BARs which were being used */
for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++) {
- if (!rc->avail_ib_bar[bar])
+ if (rc->avail_ib_bar[bar])
continue;
- if (size <= bar_max_size[bar]) {
- if (sel_bar == RP_BAR_UNDEFINED) {
- sel_bar = bar;
- continue;
- }
-
- if (bar_max_size[bar] < bar_max_size[sel_bar])
- sel_bar = bar;
- }
- }
-
- return sel_bar;
-}
+ cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR0(bar), 0);
+ cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR1(bar), 0);
-static enum cdns_pcie_rp_bar
-cdns_pcie_host_find_max_bar(struct cdns_pcie_rc *rc, u64 size)
-{
- enum cdns_pcie_rp_bar bar, sel_bar;
-
- sel_bar = RP_BAR_UNDEFINED;
- for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++) {
- if (!rc->avail_ib_bar[bar])
+ if (bar == RP_NO_BAR)
continue;
- if (size >= bar_max_size[bar]) {
- if (sel_bar == RP_BAR_UNDEFINED) {
- sel_bar = bar;
- continue;
- }
-
- if (bar_max_size[bar] > bar_max_size[sel_bar])
- sel_bar = bar;
- }
+ value = ~(LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar) |
+ LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar) |
+ LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar) |
+ LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar) |
+ LM_RC_BAR_CFG_APERTURE(bar, bar_aperture_mask[bar] + 2));
+ cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value);
}
-
- return sel_bar;
}
-static int cdns_pcie_host_bar_config(struct cdns_pcie_rc *rc,
- struct resource_entry *entry)
+static void cdns_pcie_host_deinit_address_translation(struct cdns_pcie_rc *rc)
{
- u64 cpu_addr, pci_addr, size, winsize;
struct cdns_pcie *pcie = &rc->pcie;
- struct device *dev = pcie->dev;
- enum cdns_pcie_rp_bar bar;
- unsigned long flags;
- int ret;
-
- cpu_addr = entry->res->start;
- pci_addr = entry->res->start - entry->offset;
- flags = entry->res->flags;
- size = resource_size(entry->res);
-
- if (entry->offset) {
- dev_err(dev, "PCI addr: %llx must be equal to CPU addr: %llx\n",
- pci_addr, cpu_addr);
- return -EINVAL;
- }
-
- while (size > 0) {
- /*
- * Try to find a minimum BAR whose size is greater than
- * or equal to the remaining resource_entry size. This will
- * fail if the size of each of the available BARs is less than
- * the remaining resource_entry size.
- * If a minimum BAR is found, IB ATU will be configured and
- * exited.
- */
- bar = cdns_pcie_host_find_min_bar(rc, size);
- if (bar != RP_BAR_UNDEFINED) {
- ret = cdns_pcie_host_bar_ib_config(rc, bar, cpu_addr,
- size, flags);
- if (ret)
- dev_err(dev, "IB BAR: %d config failed\n", bar);
- return ret;
- }
-
- /*
- * If the control reaches here, it would mean the remaining
- * resource_entry size cannot be fitted in a single BAR. So we
- * find a maximum BAR whose size is less than or equal to the
- * remaining resource_entry size and split the resource entry
- * so that part of resource entry is fitted inside the maximum
- * BAR. The remaining size would be fitted during the next
- * iteration of the loop.
- * If a maximum BAR is not found, there is no way we can fit
- * this resource_entry, so we error out.
- */
- bar = cdns_pcie_host_find_max_bar(rc, size);
- if (bar == RP_BAR_UNDEFINED) {
- dev_err(dev, "No free BAR to map cpu_addr %llx\n",
- cpu_addr);
- return -EINVAL;
- }
-
- winsize = bar_max_size[bar];
- ret = cdns_pcie_host_bar_ib_config(rc, bar, cpu_addr, winsize,
- flags);
- if (ret) {
- dev_err(dev, "IB BAR: %d config failed\n", bar);
- return ret;
- }
-
- size -= winsize;
- cpu_addr += winsize;
- }
-
- return 0;
-}
-
-static int cdns_pcie_host_dma_ranges_cmp(void *priv, const struct list_head *a,
- const struct list_head *b)
-{
- struct resource_entry *entry1, *entry2;
-
- entry1 = container_of(a, struct resource_entry, node);
- entry2 = container_of(b, struct resource_entry, node);
-
- return resource_size(entry2->res) - resource_size(entry1->res);
-}
-
-static int cdns_pcie_host_map_dma_ranges(struct cdns_pcie_rc *rc)
-{
- struct cdns_pcie *pcie = &rc->pcie;
- struct device *dev = pcie->dev;
- struct device_node *np = dev->of_node;
- struct pci_host_bridge *bridge;
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(rc);
struct resource_entry *entry;
- u32 no_bar_nbits = 32;
- int err;
+ int r;
- bridge = pci_host_bridge_from_priv(rc);
- if (!bridge)
- return -ENOMEM;
+ cdns_pcie_host_unmap_dma_ranges(rc);
- if (list_empty(&bridge->dma_ranges)) {
- of_property_read_u32(np, "cdns,no-bar-match-nbits",
- &no_bar_nbits);
- err = cdns_pcie_host_bar_ib_config(rc, RP_NO_BAR, 0x0,
- (u64)1 << no_bar_nbits, 0);
- if (err)
- dev_err(dev, "IB BAR: %d config failed\n", RP_NO_BAR);
- return err;
- }
-
- list_sort(NULL, &bridge->dma_ranges, cdns_pcie_host_dma_ranges_cmp);
+ /*
+ * Reset outbound region 0 which was reserved for configuration space
+ * accesses.
+ */
+ cdns_pcie_reset_outbound_region(pcie, 0);
- resource_list_for_each_entry(entry, &bridge->dma_ranges) {
- err = cdns_pcie_host_bar_config(rc, entry);
- if (err) {
- dev_err(dev, "Fail to configure IB using dma-ranges\n");
- return err;
- }
+ /* Reset rest of the outbound regions */
+ r = 1;
+ resource_list_for_each_entry(entry, &bridge->windows) {
+ cdns_pcie_reset_outbound_region(pcie, r);
+ r++;
}
-
- return 0;
}
static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc)
@@ -452,7 +269,7 @@ static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc)
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(0), addr1);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(0), desc1);
- if (pcie->ops->cpu_addr_fixup)
+ if (pcie->ops && pcie->ops->cpu_addr_fixup)
cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr);
addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(12) |
@@ -482,11 +299,16 @@ static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc)
r++;
}
- return cdns_pcie_host_map_dma_ranges(rc);
+ return cdns_pcie_host_map_dma_ranges(rc, cdns_pcie_host_bar_ib_config);
+}
+
+static void cdns_pcie_host_deinit(struct cdns_pcie_rc *rc)
+{
+ cdns_pcie_host_deinit_address_translation(rc);
+ cdns_pcie_host_deinit_root_port(rc);
}
-static int cdns_pcie_host_init(struct device *dev,
- struct cdns_pcie_rc *rc)
+int cdns_pcie_host_init(struct cdns_pcie_rc *rc)
{
int err;
@@ -496,6 +318,53 @@ static int cdns_pcie_host_init(struct device *dev,
return cdns_pcie_host_init_address_translation(rc);
}
+EXPORT_SYMBOL_GPL(cdns_pcie_host_init);
+
+static void cdns_pcie_host_link_disable(struct cdns_pcie_rc *rc)
+{
+ struct cdns_pcie *pcie = &rc->pcie;
+
+ cdns_pcie_stop_link(pcie);
+ cdns_pcie_host_disable_ptm_response(pcie);
+}
+
+int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc)
+{
+ struct cdns_pcie *pcie = &rc->pcie;
+ struct device *dev = rc->pcie.dev;
+ int ret;
+
+ if (rc->quirk_detect_quiet_flag)
+ cdns_pcie_detect_quiet_min_delay_set(&rc->pcie);
+
+ cdns_pcie_host_enable_ptm_response(pcie);
+
+ ret = cdns_pcie_start_link(pcie);
+ if (ret) {
+ dev_err(dev, "Failed to start link\n");
+ return ret;
+ }
+
+ ret = cdns_pcie_host_start_link(rc, cdns_pcie_link_up);
+ if (ret)
+ dev_dbg(dev, "PCIe link never came up\n");
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cdns_pcie_host_link_setup);
+
+void cdns_pcie_host_disable(struct cdns_pcie_rc *rc)
+{
+ struct pci_host_bridge *bridge;
+
+ bridge = pci_host_bridge_from_priv(rc);
+ pci_stop_root_bus(bridge->bus);
+ pci_remove_root_bus(bridge->bus);
+
+ cdns_pcie_host_deinit(rc);
+ cdns_pcie_host_link_disable(rc);
+}
+EXPORT_SYMBOL_GPL(cdns_pcie_host_disable);
int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
{
@@ -533,39 +402,24 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
return PTR_ERR(rc->cfg_base);
rc->cfg_res = res;
- if (rc->quirk_detect_quiet_flag)
- cdns_pcie_detect_quiet_min_delay_set(&rc->pcie);
-
- cdns_pcie_host_enable_ptm_response(pcie);
-
- ret = cdns_pcie_start_link(pcie);
- if (ret) {
- dev_err(dev, "Failed to start link\n");
- return ret;
- }
-
- ret = cdns_pcie_host_start_link(rc);
+ ret = cdns_pcie_host_link_setup(rc);
if (ret)
- dev_dbg(dev, "PCIe link never came up\n");
+ return ret;
for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++)
rc->avail_ib_bar[bar] = true;
- ret = cdns_pcie_host_init(dev, rc);
+ ret = cdns_pcie_host_init(rc);
if (ret)
return ret;
if (!bridge->ops)
bridge->ops = &cdns_pcie_host_ops;
- ret = pci_host_probe(bridge);
- if (ret < 0)
- goto err_init;
-
- return 0;
-
- err_init:
- pm_runtime_put_sync(dev);
-
- return ret;
+ return pci_host_probe(bridge);
}
+EXPORT_SYMBOL_GPL(cdns_pcie_host_setup);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Cadence PCIe host controller driver");
+MODULE_AUTHOR("Cyrille Pitchen <cyrille.pitchen@free-electrons.com>");
diff --git a/drivers/pci/controller/cadence/pcie-cadence-hpa-regs.h b/drivers/pci/controller/cadence/pcie-cadence-hpa-regs.h
new file mode 100644
index 000000000000..026e131600de
--- /dev/null
+++ b/drivers/pci/controller/cadence/pcie-cadence-hpa-regs.h
@@ -0,0 +1,193 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Cadence PCIe controller driver.
+ *
+ * Copyright (c) 2024, Cadence Design Systems
+ * Author: Manikandan K Pillai <mpillai@cadence.com>
+ */
+#ifndef _PCIE_CADENCE_HPA_REGS_H
+#define _PCIE_CADENCE_HPA_REGS_H
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/pci-epf.h>
+#include <linux/phy/phy.h>
+#include <linux/bitfield.h>
+
+/* High Performance Architecture (HPA) PCIe controller registers */
+#define CDNS_PCIE_HPA_IP_REG_BANK 0x01000000
+#define CDNS_PCIE_HPA_IP_CFG_CTRL_REG_BANK 0x01003C00
+#define CDNS_PCIE_HPA_IP_AXI_MASTER_COMMON 0x02020000
+
+/* Address Translation Registers */
+#define CDNS_PCIE_HPA_AXI_SLAVE 0x03000000
+#define CDNS_PCIE_HPA_AXI_MASTER 0x03002000
+
+/* Root Port register base address */
+#define CDNS_PCIE_HPA_RP_BASE 0x0
+
+#define CDNS_PCIE_HPA_LM_ID 0x1420
+
+/* Endpoint Function BARs */
+#define CDNS_PCIE_HPA_LM_EP_FUNC_BAR_CFG(bar, fn) \
+ (((bar) < BAR_3) ? CDNS_PCIE_HPA_LM_EP_FUNC_BAR_CFG0(fn) : \
+ CDNS_PCIE_HPA_LM_EP_FUNC_BAR_CFG1(fn))
+#define CDNS_PCIE_HPA_LM_EP_FUNC_BAR_CFG0(pfn) (0x4000 * (pfn))
+#define CDNS_PCIE_HPA_LM_EP_FUNC_BAR_CFG1(pfn) ((0x4000 * (pfn)) + 0x04)
+#define CDNS_PCIE_HPA_LM_EP_VFUNC_BAR_CFG(bar, fn) \
+ (((bar) < BAR_3) ? CDNS_PCIE_HPA_LM_EP_VFUNC_BAR_CFG0(fn) : \
+ CDNS_PCIE_HPA_LM_EP_VFUNC_BAR_CFG1(fn))
+#define CDNS_PCIE_HPA_LM_EP_VFUNC_BAR_CFG0(vfn) ((0x4000 * (vfn)) + 0x08)
+#define CDNS_PCIE_HPA_LM_EP_VFUNC_BAR_CFG1(vfn) ((0x4000 * (vfn)) + 0x0C)
+#define CDNS_PCIE_HPA_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(f) \
+ (GENMASK(5, 0) << (0x4 + (f) * 10))
+#define CDNS_PCIE_HPA_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, a) \
+ (((a) << (4 + ((b) * 10))) & (CDNS_PCIE_HPA_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b)))
+#define CDNS_PCIE_HPA_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(f) \
+ (GENMASK(3, 0) << ((f) * 10))
+#define CDNS_PCIE_HPA_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, c) \
+ (((c) << ((b) * 10)) & (CDNS_PCIE_HPA_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b)))
+
+/* Endpoint Function Configuration Register */
+#define CDNS_PCIE_HPA_LM_EP_FUNC_CFG 0x02C0
+
+/* Root Complex BAR Configuration Register */
+#define CDNS_PCIE_HPA_LM_RC_BAR_CFG 0x14
+#define CDNS_PCIE_HPA_LM_RC_BAR_CFG_BAR0_APERTURE_MASK GENMASK(9, 4)
+#define CDNS_PCIE_HPA_LM_RC_BAR_CFG_BAR0_APERTURE(a) \
+ FIELD_PREP(CDNS_PCIE_HPA_LM_RC_BAR_CFG_BAR0_APERTURE_MASK, a)
+#define CDNS_PCIE_HPA_LM_RC_BAR_CFG_BAR0_CTRL_MASK GENMASK(3, 0)
+#define CDNS_PCIE_HPA_LM_RC_BAR_CFG_BAR0_CTRL(c) \
+ FIELD_PREP(CDNS_PCIE_HPA_LM_RC_BAR_CFG_BAR0_CTRL_MASK, c)
+#define CDNS_PCIE_HPA_LM_RC_BAR_CFG_BAR1_APERTURE_MASK GENMASK(19, 14)
+#define CDNS_PCIE_HPA_LM_RC_BAR_CFG_BAR1_APERTURE(a) \
+ FIELD_PREP(CDNS_PCIE_HPA_LM_RC_BAR_CFG_BAR1_APERTURE_MASK, a)
+#define CDNS_PCIE_HPA_LM_RC_BAR_CFG_BAR1_CTRL_MASK GENMASK(13, 10)
+#define CDNS_PCIE_HPA_LM_RC_BAR_CFG_BAR1_CTRL(c) \
+ FIELD_PREP(CDNS_PCIE_HPA_LM_RC_BAR_CFG_BAR1_CTRL_MASK, c)
+
+#define CDNS_PCIE_HPA_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE BIT(20)
+#define CDNS_PCIE_HPA_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS BIT(21)
+#define CDNS_PCIE_HPA_LM_RC_BAR_CFG_IO_ENABLE BIT(22)
+#define CDNS_PCIE_HPA_LM_RC_BAR_CFG_IO_32BITS BIT(23)
+
+/* BAR control values applicable to both Endpoint Function and Root Complex */
+#define CDNS_PCIE_HPA_LM_BAR_CFG_CTRL_DISABLED 0x0
+#define CDNS_PCIE_HPA_LM_BAR_CFG_CTRL_IO_32BITS 0x3
+#define CDNS_PCIE_HPA_LM_BAR_CFG_CTRL_MEM_32BITS 0x1
+#define CDNS_PCIE_HPA_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS 0x9
+#define CDNS_PCIE_HPA_LM_BAR_CFG_CTRL_MEM_64BITS 0x5
+#define CDNS_PCIE_HPA_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS 0xD
+
+#define HPA_LM_RC_BAR_CFG_CTRL_DISABLED(bar) \
+ (CDNS_PCIE_HPA_LM_BAR_CFG_CTRL_DISABLED << ((bar) * 10))
+#define HPA_LM_RC_BAR_CFG_CTRL_IO_32BITS(bar) \
+ (CDNS_PCIE_HPA_LM_BAR_CFG_CTRL_IO_32BITS << ((bar) * 10))
+#define HPA_LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar) \
+ (CDNS_PCIE_HPA_LM_BAR_CFG_CTRL_MEM_32BITS << ((bar) * 10))
+#define HPA_LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar) \
+ (CDNS_PCIE_HPA_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS << ((bar) * 10))
+#define HPA_LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar) \
+ (CDNS_PCIE_HPA_LM_BAR_CFG_CTRL_MEM_64BITS << ((bar) * 10))
+#define HPA_LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar) \
+ (CDNS_PCIE_HPA_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS << ((bar) * 10))
+#define HPA_LM_RC_BAR_CFG_APERTURE(bar, aperture) \
+ (((aperture) - 7) << (((bar) * 10) + 4))
+
+#define CDNS_PCIE_HPA_LM_PTM_CTRL 0x0520
+#define CDNS_PCIE_HPA_LM_PTM_CTRL_PTMRSEN BIT(17)
+
+/* Root Port Registers PCI config space for root port function */
+#define CDNS_PCIE_HPA_RP_CAP_OFFSET 0xC0
+
+/* Region r Outbound AXI to PCIe Address Translation Register 0 */
+#define CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0(r) (0x1010 + ((r) & 0x1F) * 0x0080)
+#define CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_NBITS_MASK GENMASK(5, 0)
+#define CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) \
+ (((nbits) - 1) & CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_NBITS_MASK)
+#define CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK GENMASK(23, 16)
+#define CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) \
+ FIELD_PREP(CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK, devfn)
+#define CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_BUS_MASK GENMASK(31, 24)
+#define CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_BUS(bus) \
+ FIELD_PREP(CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_BUS_MASK, bus)
+
+/* Region r Outbound AXI to PCIe Address Translation Register 1 */
+#define CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR1(r) (0x1014 + ((r) & 0x1F) * 0x0080)
+
+/* Region r Outbound PCIe Descriptor Register */
+#define CDNS_PCIE_HPA_AT_OB_REGION_DESC0(r) (0x1008 + ((r) & 0x1F) * 0x0080)
+#define CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_MASK GENMASK(28, 24)
+#define CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_MEM \
+ FIELD_PREP(CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_MASK, 0x0)
+#define CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_IO \
+ FIELD_PREP(CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_MASK, 0x2)
+#define CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0 \
+ FIELD_PREP(CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_MASK, 0x4)
+#define CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1 \
+ FIELD_PREP(CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_MASK, 0x5)
+#define CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG \
+ FIELD_PREP(CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_MASK, 0x10)
+
+/* Region r Outbound PCIe Descriptor Register */
+#define CDNS_PCIE_HPA_AT_OB_REGION_DESC1(r) (0x100C + ((r) & 0x1F) * 0x0080)
+#define CDNS_PCIE_HPA_AT_OB_REGION_DESC1_BUS_MASK GENMASK(31, 24)
+#define CDNS_PCIE_HPA_AT_OB_REGION_DESC1_BUS(bus) \
+ FIELD_PREP(CDNS_PCIE_HPA_AT_OB_REGION_DESC1_BUS_MASK, bus)
+#define CDNS_PCIE_HPA_AT_OB_REGION_DESC1_DEVFN_MASK GENMASK(23, 16)
+#define CDNS_PCIE_HPA_AT_OB_REGION_DESC1_DEVFN(devfn) \
+ FIELD_PREP(CDNS_PCIE_HPA_AT_OB_REGION_DESC1_DEVFN_MASK, devfn)
+
+#define CDNS_PCIE_HPA_AT_OB_REGION_CTRL0(r) (0x1018 + ((r) & 0x1F) * 0x0080)
+#define CDNS_PCIE_HPA_AT_OB_REGION_CTRL0_SUPPLY_BUS BIT(26)
+#define CDNS_PCIE_HPA_AT_OB_REGION_CTRL0_SUPPLY_DEV_FN BIT(25)
+
+/* Region r AXI Region Base Address Register 0 */
+#define CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR0(r) (0x1000 + ((r) & 0x1F) * 0x0080)
+#define CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR0_NBITS_MASK GENMASK(5, 0)
+#define CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) \
+ (((nbits) - 1) & CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR0_NBITS_MASK)
+
+/* Region r AXI Region Base Address Register 1 */
+#define CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR1(r) (0x1004 + ((r) & 0x1F) * 0x0080)
+
+/* Root Port BAR Inbound PCIe to AXI Address Translation Register */
+#define CDNS_PCIE_HPA_AT_IB_RP_BAR_ADDR0(bar) (((bar) * 0x0008))
+#define CDNS_PCIE_HPA_AT_IB_RP_BAR_ADDR0_NBITS_MASK GENMASK(5, 0)
+#define CDNS_PCIE_HPA_AT_IB_RP_BAR_ADDR0_NBITS(nbits) \
+ (((nbits) - 1) & CDNS_PCIE_HPA_AT_IB_RP_BAR_ADDR0_NBITS_MASK)
+#define CDNS_PCIE_HPA_AT_IB_RP_BAR_ADDR1(bar) (0x04 + ((bar) * 0x0008))
+
+/* AXI link down register */
+#define CDNS_PCIE_HPA_AT_LINKDOWN 0x04
+
+/*
+ * Physical Layer Configuration Register 0
+ * This register contains the parameters required for functional setup
+ * of Physical Layer.
+ */
+#define CDNS_PCIE_HPA_PHY_LAYER_CFG0 0x0400
+#define CDNS_PCIE_HPA_DETECT_QUIET_MIN_DELAY_MASK GENMASK(26, 24)
+#define CDNS_PCIE_HPA_DETECT_QUIET_MIN_DELAY(delay) \
+ FIELD_PREP(CDNS_PCIE_HPA_DETECT_QUIET_MIN_DELAY_MASK, delay)
+#define CDNS_PCIE_HPA_LINK_TRNG_EN_MASK GENMASK(27, 27)
+
+#define CDNS_PCIE_HPA_PHY_DBG_STS_REG0 0x0420
+
+#define CDNS_PCIE_HPA_RP_MAX_IB 0x3
+#define CDNS_PCIE_HPA_MAX_OB 15
+
+/* Endpoint Function BAR Inbound PCIe to AXI Address Translation Register */
+#define CDNS_PCIE_HPA_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar) (((fn) * 0x0080) + ((bar) * 0x0008))
+#define CDNS_PCIE_HPA_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar) (0x4 + ((fn) * 0x0080) + ((bar) * 0x0008))
+
+/* Miscellaneous offsets definitions */
+#define CDNS_PCIE_HPA_TAG_MANAGEMENT 0x0
+#define CDNS_PCIE_HPA_SLAVE_RESP 0x100
+
+#define I_ROOT_PORT_REQ_ID_REG 0x141c
+#define LM_HAL_SBSA_CTRL 0x1170
+
+#define I_PCIE_BUS_NUMBERS (CDNS_PCIE_HPA_RP_BASE + 0x18)
+#define CDNS_PCIE_EROM 0x18
+#endif /* _PCIE_CADENCE_HPA_REGS_H */
diff --git a/drivers/pci/controller/cadence/pcie-cadence-hpa.c b/drivers/pci/controller/cadence/pcie-cadence-hpa.c
new file mode 100644
index 000000000000..f60a16938265
--- /dev/null
+++ b/drivers/pci/controller/cadence/pcie-cadence-hpa.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cadence PCIe controller driver.
+ *
+ * Copyright (c) 2024, Cadence Design Systems
+ * Author: Manikandan K Pillai <mpillai@cadence.com>
+ */
+#include <linux/kernel.h>
+#include <linux/of.h>
+
+#include "pcie-cadence.h"
+
+bool cdns_pcie_hpa_link_up(struct cdns_pcie *pcie)
+{
+ u32 pl_reg_val;
+
+ pl_reg_val = cdns_pcie_hpa_readl(pcie, REG_BANK_IP_REG, CDNS_PCIE_HPA_PHY_DBG_STS_REG0);
+ if (pl_reg_val & GENMASK(0, 0))
+ return true;
+ return false;
+}
+EXPORT_SYMBOL_GPL(cdns_pcie_hpa_link_up);
+
+void cdns_pcie_hpa_detect_quiet_min_delay_set(struct cdns_pcie *pcie)
+{
+ u32 delay = 0x3;
+ u32 ltssm_control_cap;
+
+ /* Set the LTSSM Detect Quiet state min. delay to 2ms */
+ ltssm_control_cap = cdns_pcie_hpa_readl(pcie, REG_BANK_IP_REG,
+ CDNS_PCIE_HPA_PHY_LAYER_CFG0);
+ ltssm_control_cap = ((ltssm_control_cap &
+ ~CDNS_PCIE_HPA_DETECT_QUIET_MIN_DELAY_MASK) |
+ CDNS_PCIE_HPA_DETECT_QUIET_MIN_DELAY(delay));
+
+ cdns_pcie_hpa_writel(pcie, REG_BANK_IP_REG,
+ CDNS_PCIE_HPA_PHY_LAYER_CFG0, ltssm_control_cap);
+}
+EXPORT_SYMBOL_GPL(cdns_pcie_hpa_detect_quiet_min_delay_set);
+
+void cdns_pcie_hpa_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
+ u32 r, bool is_io,
+ u64 cpu_addr, u64 pci_addr, size_t size)
+{
+ /*
+ * roundup_pow_of_two() returns an unsigned long, which is not suited
+ * for 64bit values
+ */
+ u64 sz = 1ULL << fls64(size - 1);
+ int nbits = ilog2(sz);
+ u32 addr0, addr1, desc0, desc1, ctrl0;
+
+ if (nbits < 8)
+ nbits = 8;
+
+ /* Set the PCI address */
+ addr0 = CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) |
+ (lower_32_bits(pci_addr) & GENMASK(31, 8));
+ addr1 = upper_32_bits(pci_addr);
+
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0(r), addr0);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR1(r), addr1);
+
+ /* Set the PCIe header descriptor */
+ if (is_io)
+ desc0 = CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_IO;
+ else
+ desc0 = CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_MEM;
+ desc1 = 0;
+ ctrl0 = 0;
+
+ /*
+ * Whether Bit [26] is set or not inside DESC0 register of the outbound
+ * PCIe descriptor, the PCI function number must be set into
+ * Bits [31:24] of DESC1 anyway.
+ *
+ * In Root Complex mode, the function number is always 0 but in Endpoint
+ * mode, the PCIe controller may support more than one function. This
+ * function number needs to be set properly into the outbound PCIe
+ * descriptor.
+ *
+ * Besides, setting Bit [26] is mandatory when in Root Complex mode:
+ * then the driver must provide the bus, resp. device, number in
+ * Bits [31:24] of DESC1, resp. Bits[23:16] of DESC0. Like the function
+ * number, the device number is always 0 in Root Complex mode.
+ *
+ * However when in Endpoint mode, we can clear Bit [26] of DESC0, hence
+ * the PCIe controller will use the captured values for the bus and
+ * device numbers.
+ */
+ if (pcie->is_rc) {
+ /* The device and function numbers are always 0 */
+ desc1 = CDNS_PCIE_HPA_AT_OB_REGION_DESC1_BUS(busnr) |
+ CDNS_PCIE_HPA_AT_OB_REGION_DESC1_DEVFN(0);
+ ctrl0 = CDNS_PCIE_HPA_AT_OB_REGION_CTRL0_SUPPLY_BUS |
+ CDNS_PCIE_HPA_AT_OB_REGION_CTRL0_SUPPLY_DEV_FN;
+ } else {
+ /*
+ * Use captured values for bus and device numbers but still
+ * need to set the function number
+ */
+ desc1 |= CDNS_PCIE_HPA_AT_OB_REGION_DESC1_DEVFN(fn);
+ }
+
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_DESC0(r), desc0);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_DESC1(r), desc1);
+
+ addr0 = CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) |
+ (lower_32_bits(cpu_addr) & GENMASK(31, 8));
+ addr1 = upper_32_bits(cpu_addr);
+
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR0(r), addr0);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR1(r), addr1);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_CTRL0(r), ctrl0);
+}
+EXPORT_SYMBOL_GPL(cdns_pcie_hpa_set_outbound_region);
+
+void cdns_pcie_hpa_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie,
+ u8 busnr, u8 fn,
+ u32 r, u64 cpu_addr)
+{
+ u32 addr0, addr1, desc0, desc1, ctrl0;
+
+ desc0 = CDNS_PCIE_HPA_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG;
+ desc1 = 0;
+ ctrl0 = 0;
+
+ /* See cdns_pcie_set_outbound_region() comments above */
+ if (pcie->is_rc) {
+ desc1 = CDNS_PCIE_HPA_AT_OB_REGION_DESC1_BUS(busnr) |
+ CDNS_PCIE_HPA_AT_OB_REGION_DESC1_DEVFN(0);
+ ctrl0 = CDNS_PCIE_HPA_AT_OB_REGION_CTRL0_SUPPLY_BUS |
+ CDNS_PCIE_HPA_AT_OB_REGION_CTRL0_SUPPLY_DEV_FN;
+ } else {
+ desc1 |= CDNS_PCIE_HPA_AT_OB_REGION_DESC1_DEVFN(fn);
+ }
+
+ addr0 = CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR0_NBITS(17) |
+ (lower_32_bits(cpu_addr) & GENMASK(31, 8));
+ addr1 = upper_32_bits(cpu_addr);
+
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR0(r), 0);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_PCI_ADDR1(r), 0);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_DESC0(r), desc0);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_DESC1(r), desc1);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR0(r), addr0);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_CPU_ADDR1(r), addr1);
+ cdns_pcie_hpa_writel(pcie, REG_BANK_AXI_SLAVE,
+ CDNS_PCIE_HPA_AT_OB_REGION_CTRL0(r), ctrl0);
+}
+EXPORT_SYMBOL_GPL(cdns_pcie_hpa_set_outbound_region_for_normal_msg);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Cadence PCIe controller driver");
diff --git a/drivers/pci/controller/cadence/pcie-cadence-lga-regs.h b/drivers/pci/controller/cadence/pcie-cadence-lga-regs.h
new file mode 100644
index 000000000000..857b2140c5d2
--- /dev/null
+++ b/drivers/pci/controller/cadence/pcie-cadence-lga-regs.h
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Cadence PCIe controller driver.
+ *
+ * Copyright (c) 2017 Cadence
+ * Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
+ */
+#ifndef _PCIE_CADENCE_LGA_REGS_H
+#define _PCIE_CADENCE_LGA_REGS_H
+
+#include <linux/bitfield.h>
+
+/* Parameters for the waiting for link up routine */
+#define LINK_WAIT_MAX_RETRIES 10
+#define LINK_WAIT_USLEEP_MIN 90000
+#define LINK_WAIT_USLEEP_MAX 100000
+
+/* Local Management Registers */
+#define CDNS_PCIE_LM_BASE 0x00100000
+
+/* Vendor ID Register */
+#define CDNS_PCIE_LM_ID (CDNS_PCIE_LM_BASE + 0x0044)
+#define CDNS_PCIE_LM_ID_VENDOR_MASK GENMASK(15, 0)
+#define CDNS_PCIE_LM_ID_VENDOR_SHIFT 0
+#define CDNS_PCIE_LM_ID_VENDOR(vid) \
+ (((vid) << CDNS_PCIE_LM_ID_VENDOR_SHIFT) & CDNS_PCIE_LM_ID_VENDOR_MASK)
+#define CDNS_PCIE_LM_ID_SUBSYS_MASK GENMASK(31, 16)
+#define CDNS_PCIE_LM_ID_SUBSYS_SHIFT 16
+#define CDNS_PCIE_LM_ID_SUBSYS(sub) \
+ (((sub) << CDNS_PCIE_LM_ID_SUBSYS_SHIFT) & CDNS_PCIE_LM_ID_SUBSYS_MASK)
+
+/* Root Port Requester ID Register */
+#define CDNS_PCIE_LM_RP_RID (CDNS_PCIE_LM_BASE + 0x0228)
+#define CDNS_PCIE_LM_RP_RID_MASK GENMASK(15, 0)
+#define CDNS_PCIE_LM_RP_RID_SHIFT 0
+#define CDNS_PCIE_LM_RP_RID_(rid) \
+ (((rid) << CDNS_PCIE_LM_RP_RID_SHIFT) & CDNS_PCIE_LM_RP_RID_MASK)
+
+/* Endpoint Bus and Device Number Register */
+#define CDNS_PCIE_LM_EP_ID (CDNS_PCIE_LM_BASE + 0x022C)
+#define CDNS_PCIE_LM_EP_ID_DEV_MASK GENMASK(4, 0)
+#define CDNS_PCIE_LM_EP_ID_DEV_SHIFT 0
+#define CDNS_PCIE_LM_EP_ID_BUS_MASK GENMASK(15, 8)
+#define CDNS_PCIE_LM_EP_ID_BUS_SHIFT 8
+
+/* Endpoint Function f BAR b Configuration Registers */
+#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG(bar, fn) \
+ (((bar) < BAR_4) ? CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn) : CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn))
+#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn) \
+ (CDNS_PCIE_LM_BASE + 0x0240 + (fn) * 0x0008)
+#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn) \
+ (CDNS_PCIE_LM_BASE + 0x0244 + (fn) * 0x0008)
+#define CDNS_PCIE_LM_EP_VFUNC_BAR_CFG(bar, fn) \
+ (((bar) < BAR_4) ? CDNS_PCIE_LM_EP_VFUNC_BAR_CFG0(fn) : CDNS_PCIE_LM_EP_VFUNC_BAR_CFG1(fn))
+#define CDNS_PCIE_LM_EP_VFUNC_BAR_CFG0(fn) \
+ (CDNS_PCIE_LM_BASE + 0x0280 + (fn) * 0x0008)
+#define CDNS_PCIE_LM_EP_VFUNC_BAR_CFG1(fn) \
+ (CDNS_PCIE_LM_BASE + 0x0284 + (fn) * 0x0008)
+#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) \
+ (GENMASK(4, 0) << ((b) * 8))
+#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, a) \
+ (((a) << ((b) * 8)) & CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b))
+#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b) \
+ (GENMASK(7, 5) << ((b) * 8))
+#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, c) \
+ (((c) << ((b) * 8 + 5)) & CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b))
+
+/* Endpoint Function Configuration Register */
+#define CDNS_PCIE_LM_EP_FUNC_CFG (CDNS_PCIE_LM_BASE + 0x02C0)
+
+/* Root Complex BAR Configuration Register */
+#define CDNS_PCIE_LM_RC_BAR_CFG (CDNS_PCIE_LM_BASE + 0x0300)
+#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE_MASK GENMASK(5, 0)
+#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE(a) \
+ (((a) << 0) & CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE_MASK)
+#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL_MASK GENMASK(8, 6)
+#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(c) \
+ (((c) << 6) & CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL_MASK)
+#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE_MASK GENMASK(13, 9)
+#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE(a) \
+ (((a) << 9) & CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE_MASK)
+#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL_MASK GENMASK(16, 14)
+#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(c) \
+ (((c) << 14) & CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL_MASK)
+#define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE BIT(17)
+#define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_32BITS 0
+#define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS BIT(18)
+#define CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE BIT(19)
+#define CDNS_PCIE_LM_RC_BAR_CFG_IO_16BITS 0
+#define CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS BIT(20)
+#define CDNS_PCIE_LM_RC_BAR_CFG_CHECK_ENABLE BIT(31)
+
+/* BAR control values applicable to both Endpoint Function and Root Complex */
+#define CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED 0x0
+#define CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS 0x1
+#define CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS 0x4
+#define CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS 0x5
+#define CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS 0x6
+#define CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS 0x7
+
+#define LM_RC_BAR_CFG_CTRL_DISABLED(bar) \
+ (CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED << (((bar) * 8) + 6))
+#define LM_RC_BAR_CFG_CTRL_IO_32BITS(bar) \
+ (CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS << (((bar) * 8) + 6))
+#define LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar) \
+ (CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS << (((bar) * 8) + 6))
+#define LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar) \
+ (CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS << (((bar) * 8) + 6))
+#define LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar) \
+ (CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS << (((bar) * 8) + 6))
+#define LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar) \
+ (CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS << (((bar) * 8) + 6))
+#define LM_RC_BAR_CFG_APERTURE(bar, aperture) \
+ (((aperture) - 2) << ((bar) * 8))
+
+/* PTM Control Register */
+#define CDNS_PCIE_LM_PTM_CTRL (CDNS_PCIE_LM_BASE + 0x0DA8)
+#define CDNS_PCIE_LM_TPM_CTRL_PTMRSEN BIT(17)
+
+/*
+ * Endpoint Function Registers (PCI configuration space for endpoint functions)
+ */
+#define CDNS_PCIE_EP_FUNC_BASE(fn) (((fn) << 12) & GENMASK(19, 12))
+
+#define CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET 0x90
+#define CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET 0xB0
+#define CDNS_PCIE_EP_FUNC_DEV_CAP_OFFSET 0xC0
+#define CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET 0x200
+
+/* Endpoint PF Registers */
+#define CDNS_PCIE_CORE_PF_I_ARI_CAP_AND_CTRL(fn) (0x144 + (fn) * 0x1000)
+#define CDNS_PCIE_ARI_CAP_NFN_MASK GENMASK(15, 8)
+
+/* Root Port Registers (PCI configuration space for the root port function) */
+#define CDNS_PCIE_RP_BASE 0x00200000
+#define CDNS_PCIE_RP_CAP_OFFSET 0xC0
+
+/* Address Translation Registers */
+#define CDNS_PCIE_AT_BASE 0x00400000
+
+/* Region r Outbound AXI to PCIe Address Translation Register 0 */
+#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r) \
+ (CDNS_PCIE_AT_BASE + 0x0000 + ((r) & 0x1F) * 0x0020)
+#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS_MASK GENMASK(5, 0)
+#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) \
+ (((nbits) - 1) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS_MASK)
+#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK GENMASK(19, 12)
+#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) \
+ (((devfn) << 12) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK)
+#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK GENMASK(27, 20)
+#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(bus) \
+ (((bus) << 20) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK)
+
+/* Region r Outbound AXI to PCIe Address Translation Register 1 */
+#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r) \
+ (CDNS_PCIE_AT_BASE + 0x0004 + ((r) & 0x1F) * 0x0020)
+
+/* Region r Outbound PCIe Descriptor Register 0 */
+#define CDNS_PCIE_AT_OB_REGION_DESC0(r) \
+ (CDNS_PCIE_AT_BASE + 0x0008 + ((r) & 0x1F) * 0x0020)
+#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MASK GENMASK(3, 0)
+#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MEM 0x2
+#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_IO 0x6
+#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0 0xA
+#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1 0xB
+#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG 0xC
+#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_VENDOR_MSG 0xD
+/* Bit 23 MUST be set in RC mode. */
+#define CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID BIT(23)
+#define CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK GENMASK(31, 24)
+#define CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(devfn) \
+ (((devfn) << 24) & CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK)
+
+/* Region r Outbound PCIe Descriptor Register 1 */
+#define CDNS_PCIE_AT_OB_REGION_DESC1(r) \
+ (CDNS_PCIE_AT_BASE + 0x000C + ((r) & 0x1F) * 0x0020)
+#define CDNS_PCIE_AT_OB_REGION_DESC1_BUS_MASK GENMASK(7, 0)
+#define CDNS_PCIE_AT_OB_REGION_DESC1_BUS(bus) \
+ ((bus) & CDNS_PCIE_AT_OB_REGION_DESC1_BUS_MASK)
+
+/* Region r AXI Region Base Address Register 0 */
+#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r) \
+ (CDNS_PCIE_AT_BASE + 0x0018 + ((r) & 0x1F) * 0x0020)
+#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS_MASK GENMASK(5, 0)
+#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) \
+ (((nbits) - 1) & CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS_MASK)
+
+/* Region r AXI Region Base Address Register 1 */
+#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r) \
+ (CDNS_PCIE_AT_BASE + 0x001C + ((r) & 0x1F) * 0x0020)
+
+/* Root Port BAR Inbound PCIe to AXI Address Translation Register */
+#define CDNS_PCIE_AT_IB_RP_BAR_ADDR0(bar) \
+ (CDNS_PCIE_AT_BASE + 0x0800 + (bar) * 0x0008)
+#define CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS_MASK GENMASK(5, 0)
+#define CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(nbits) \
+ (((nbits) - 1) & CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS_MASK)
+#define CDNS_PCIE_AT_IB_RP_BAR_ADDR1(bar) \
+ (CDNS_PCIE_AT_BASE + 0x0804 + (bar) * 0x0008)
+
+/* AXI link down register */
+#define CDNS_PCIE_AT_LINKDOWN (CDNS_PCIE_AT_BASE + 0x0824)
+
+/* LTSSM Capabilities register */
+#define CDNS_PCIE_LTSSM_CONTROL_CAP (CDNS_PCIE_LM_BASE + 0x0054)
+#define CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK GENMASK(2, 1)
+#define CDNS_PCIE_DETECT_QUIET_MIN_DELAY_SHIFT 1
+#define CDNS_PCIE_DETECT_QUIET_MIN_DELAY(delay) \
+ (((delay) << CDNS_PCIE_DETECT_QUIET_MIN_DELAY_SHIFT) & \
+ CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK)
+
+#define CDNS_PCIE_RP_MAX_IB 0x3
+#define CDNS_PCIE_MAX_OB 32
+
+/* Endpoint Function BAR Inbound PCIe to AXI Address Translation Register */
+#define CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar) \
+ (CDNS_PCIE_AT_BASE + 0x0840 + (fn) * 0x0040 + (bar) * 0x0008)
+#define CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar) \
+ (CDNS_PCIE_AT_BASE + 0x0844 + (fn) * 0x0040 + (bar) * 0x0008)
+
+/* Normal/Vendor specific message access: offset inside some outbound region */
+#define CDNS_PCIE_NORMAL_MSG_ROUTING_MASK GENMASK(7, 5)
+#define CDNS_PCIE_NORMAL_MSG_ROUTING(route) \
+ (((route) << 5) & CDNS_PCIE_NORMAL_MSG_ROUTING_MASK)
+#define CDNS_PCIE_NORMAL_MSG_CODE_MASK GENMASK(15, 8)
+#define CDNS_PCIE_NORMAL_MSG_CODE(code) \
+ (((code) << 8) & CDNS_PCIE_NORMAL_MSG_CODE_MASK)
+#define CDNS_PCIE_MSG_NO_DATA BIT(16)
+
+#endif /* _PCIE_CADENCE_LGA_REGS_H */
diff --git a/drivers/pci/controller/cadence/pcie-cadence-plat.c b/drivers/pci/controller/cadence/pcie-cadence-plat.c
index bac0541317c1..b067a3296dd3 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-plat.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-plat.c
@@ -6,11 +6,10 @@
* Author: Tom Joseph <tjoseph@cadence.com>
*/
#include <linux/kernel.h>
-#include <linux/of_address.h>
+#include <linux/of.h>
#include <linux/of_pci.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-#include <linux/of_device.h>
#include "pcie-cadence.h"
#define CDNS_PLAT_CPU_TO_BUS_ADDR 0x0FFFFFFF
@@ -18,16 +17,9 @@
/**
* struct cdns_plat_pcie - private data for this PCIe platform driver
* @pcie: Cadence PCIe controller
- * @is_rc: Set to 1 indicates the PCIe controller mode is Root Complex,
- * if 0 it is in Endpoint mode.
*/
struct cdns_plat_pcie {
struct cdns_pcie *pcie;
- bool is_rc;
-};
-
-struct cdns_plat_pcie_of_data {
- bool is_rc;
};
static const struct of_device_id cdns_plat_pcie_of_match[];
@@ -77,7 +69,6 @@ static int cdns_plat_pcie_probe(struct platform_device *pdev)
rc->pcie.dev = dev;
rc->pcie.ops = &cdns_plat_ops;
cdns_plat_pcie->pcie = &rc->pcie;
- cdns_plat_pcie->is_rc = is_rc;
ret = cdns_pcie_init_phy(dev, cdns_plat_pcie->pcie);
if (ret) {
@@ -105,7 +96,6 @@ static int cdns_plat_pcie_probe(struct platform_device *pdev)
ep->pcie.dev = dev;
ep->pcie.ops = &cdns_plat_ops;
cdns_plat_pcie->pcie = &ep->pcie;
- cdns_plat_pcie->is_rc = is_rc;
ret = cdns_pcie_init_phy(dev, cdns_plat_pcie->pcie);
if (ret) {
@@ -183,4 +173,7 @@ static struct platform_driver cdns_plat_pcie_driver = {
.probe = cdns_plat_pcie_probe,
.shutdown = cdns_plat_pcie_shutdown,
};
-builtin_platform_driver(cdns_plat_pcie_driver);
+module_platform_driver(cdns_plat_pcie_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Cadence PCIe controller platform driver");
diff --git a/drivers/pci/controller/cadence/pcie-cadence.c b/drivers/pci/controller/cadence/pcie-cadence.c
index 13c4032ca379..e6f1a4ac0fb7 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.c
+++ b/drivers/pci/controller/cadence/pcie-cadence.c
@@ -4,8 +4,35 @@
// Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
#include "pcie-cadence.h"
+#include "../../pci.h"
+
+u8 cdns_pcie_find_capability(struct cdns_pcie *pcie, u8 cap)
+{
+ return PCI_FIND_NEXT_CAP(cdns_pcie_read_cfg, PCI_CAPABILITY_LIST,
+ cap, pcie);
+}
+EXPORT_SYMBOL_GPL(cdns_pcie_find_capability);
+
+u16 cdns_pcie_find_ext_capability(struct cdns_pcie *pcie, u8 cap)
+{
+ return PCI_FIND_NEXT_EXT_CAP(cdns_pcie_read_cfg, 0, cap, pcie);
+}
+EXPORT_SYMBOL_GPL(cdns_pcie_find_ext_capability);
+
+bool cdns_pcie_linkup(struct cdns_pcie *pcie)
+{
+ u32 pl_reg_val;
+
+ pl_reg_val = cdns_pcie_readl(pcie, CDNS_PCIE_LM_BASE);
+ if (pl_reg_val & GENMASK(0, 0))
+ return true;
+ return false;
+}
+EXPORT_SYMBOL_GPL(cdns_pcie_linkup);
void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie)
{
@@ -22,6 +49,7 @@ void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie)
cdns_pcie_writel(pcie, CDNS_PCIE_LTSSM_CONTROL_CAP, ltssm_control_cap);
}
+EXPORT_SYMBOL_GPL(cdns_pcie_detect_quiet_min_delay_set);
void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
u32 r, bool is_io,
@@ -89,7 +117,7 @@ void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), desc1);
/* Set the CPU address */
- if (pcie->ops->cpu_addr_fixup)
+ if (pcie->ops && pcie->ops->cpu_addr_fixup)
cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr);
addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) |
@@ -99,6 +127,7 @@ void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1);
}
+EXPORT_SYMBOL_GPL(cdns_pcie_set_outbound_region);
void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie,
u8 busnr, u8 fn,
@@ -119,7 +148,7 @@ void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie,
}
/* Set the CPU address */
- if (pcie->ops->cpu_addr_fixup)
+ if (pcie->ops && pcie->ops->cpu_addr_fixup)
cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr);
addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(17) |
@@ -133,6 +162,7 @@ void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie,
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1);
}
+EXPORT_SYMBOL_GPL(cdns_pcie_set_outbound_region_for_normal_msg);
void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r)
{
@@ -145,6 +175,7 @@ void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r)
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), 0);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), 0);
}
+EXPORT_SYMBOL_GPL(cdns_pcie_reset_outbound_region);
void cdns_pcie_disable_phy(struct cdns_pcie *pcie)
{
@@ -155,6 +186,7 @@ void cdns_pcie_disable_phy(struct cdns_pcie *pcie)
phy_exit(pcie->phy[i]);
}
}
+EXPORT_SYMBOL_GPL(cdns_pcie_disable_phy);
int cdns_pcie_enable_phy(struct cdns_pcie *pcie)
{
@@ -183,6 +215,7 @@ err_phy:
return ret;
}
+EXPORT_SYMBOL_GPL(cdns_pcie_enable_phy);
int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie)
{
@@ -196,7 +229,7 @@ int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie)
phy_count = of_property_count_strings(np, "phy-names");
if (phy_count < 1) {
- dev_err(dev, "no phy-names. PHY will not be initialized\n");
+ dev_info(dev, "no \"phy-names\" property found; PHY will not be initialized\n");
pcie->phy_count = 0;
return 0;
}
@@ -242,6 +275,7 @@ err_phy:
return ret;
}
+EXPORT_SYMBOL_GPL(cdns_pcie_init_phy);
static int cdns_pcie_suspend_noirq(struct device *dev)
{
@@ -259,7 +293,7 @@ static int cdns_pcie_resume_noirq(struct device *dev)
ret = cdns_pcie_enable_phy(pcie);
if (ret) {
- dev_err(dev, "failed to enable phy\n");
+ dev_err(dev, "failed to enable PHY\n");
return ret;
}
@@ -270,3 +304,8 @@ const struct dev_pm_ops cdns_pcie_pm_ops = {
NOIRQ_SYSTEM_SLEEP_PM_OPS(cdns_pcie_suspend_noirq,
cdns_pcie_resume_noirq)
};
+EXPORT_SYMBOL_GPL(cdns_pcie_pm_ops);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Cadence PCIe controller driver");
+MODULE_AUTHOR("Cyrille Pitchen <cyrille.pitchen@free-electrons.com>");
diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h
index 190786e47df9..443033c607d7 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.h
+++ b/drivers/pci/controller/cadence/pcie-cadence.h
@@ -7,210 +7,12 @@
#define _PCIE_CADENCE_H
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pci-epf.h>
#include <linux/phy/phy.h>
-
-/* Parameters for the waiting for link up routine */
-#define LINK_WAIT_MAX_RETRIES 10
-#define LINK_WAIT_USLEEP_MIN 90000
-#define LINK_WAIT_USLEEP_MAX 100000
-
-/*
- * Local Management Registers
- */
-#define CDNS_PCIE_LM_BASE 0x00100000
-
-/* Vendor ID Register */
-#define CDNS_PCIE_LM_ID (CDNS_PCIE_LM_BASE + 0x0044)
-#define CDNS_PCIE_LM_ID_VENDOR_MASK GENMASK(15, 0)
-#define CDNS_PCIE_LM_ID_VENDOR_SHIFT 0
-#define CDNS_PCIE_LM_ID_VENDOR(vid) \
- (((vid) << CDNS_PCIE_LM_ID_VENDOR_SHIFT) & CDNS_PCIE_LM_ID_VENDOR_MASK)
-#define CDNS_PCIE_LM_ID_SUBSYS_MASK GENMASK(31, 16)
-#define CDNS_PCIE_LM_ID_SUBSYS_SHIFT 16
-#define CDNS_PCIE_LM_ID_SUBSYS(sub) \
- (((sub) << CDNS_PCIE_LM_ID_SUBSYS_SHIFT) & CDNS_PCIE_LM_ID_SUBSYS_MASK)
-
-/* Root Port Requestor ID Register */
-#define CDNS_PCIE_LM_RP_RID (CDNS_PCIE_LM_BASE + 0x0228)
-#define CDNS_PCIE_LM_RP_RID_MASK GENMASK(15, 0)
-#define CDNS_PCIE_LM_RP_RID_SHIFT 0
-#define CDNS_PCIE_LM_RP_RID_(rid) \
- (((rid) << CDNS_PCIE_LM_RP_RID_SHIFT) & CDNS_PCIE_LM_RP_RID_MASK)
-
-/* Endpoint Bus and Device Number Register */
-#define CDNS_PCIE_LM_EP_ID (CDNS_PCIE_LM_BASE + 0x022c)
-#define CDNS_PCIE_LM_EP_ID_DEV_MASK GENMASK(4, 0)
-#define CDNS_PCIE_LM_EP_ID_DEV_SHIFT 0
-#define CDNS_PCIE_LM_EP_ID_BUS_MASK GENMASK(15, 8)
-#define CDNS_PCIE_LM_EP_ID_BUS_SHIFT 8
-
-/* Endpoint Function f BAR b Configuration Registers */
-#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG(bar, fn) \
- (((bar) < BAR_4) ? CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn) : CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn))
-#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn) \
- (CDNS_PCIE_LM_BASE + 0x0240 + (fn) * 0x0008)
-#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn) \
- (CDNS_PCIE_LM_BASE + 0x0244 + (fn) * 0x0008)
-#define CDNS_PCIE_LM_EP_VFUNC_BAR_CFG(bar, fn) \
- (((bar) < BAR_4) ? CDNS_PCIE_LM_EP_VFUNC_BAR_CFG0(fn) : CDNS_PCIE_LM_EP_VFUNC_BAR_CFG1(fn))
-#define CDNS_PCIE_LM_EP_VFUNC_BAR_CFG0(fn) \
- (CDNS_PCIE_LM_BASE + 0x0280 + (fn) * 0x0008)
-#define CDNS_PCIE_LM_EP_VFUNC_BAR_CFG1(fn) \
- (CDNS_PCIE_LM_BASE + 0x0284 + (fn) * 0x0008)
-#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) \
- (GENMASK(4, 0) << ((b) * 8))
-#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, a) \
- (((a) << ((b) * 8)) & CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b))
-#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b) \
- (GENMASK(7, 5) << ((b) * 8))
-#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, c) \
- (((c) << ((b) * 8 + 5)) & CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b))
-
-/* Endpoint Function Configuration Register */
-#define CDNS_PCIE_LM_EP_FUNC_CFG (CDNS_PCIE_LM_BASE + 0x02c0)
-
-/* Root Complex BAR Configuration Register */
-#define CDNS_PCIE_LM_RC_BAR_CFG (CDNS_PCIE_LM_BASE + 0x0300)
-#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE_MASK GENMASK(5, 0)
-#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE(a) \
- (((a) << 0) & CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE_MASK)
-#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL_MASK GENMASK(8, 6)
-#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(c) \
- (((c) << 6) & CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL_MASK)
-#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE_MASK GENMASK(13, 9)
-#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE(a) \
- (((a) << 9) & CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE_MASK)
-#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL_MASK GENMASK(16, 14)
-#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(c) \
- (((c) << 14) & CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL_MASK)
-#define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE BIT(17)
-#define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_32BITS 0
-#define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS BIT(18)
-#define CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE BIT(19)
-#define CDNS_PCIE_LM_RC_BAR_CFG_IO_16BITS 0
-#define CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS BIT(20)
-#define CDNS_PCIE_LM_RC_BAR_CFG_CHECK_ENABLE BIT(31)
-
-/* BAR control values applicable to both Endpoint Function and Root Complex */
-#define CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED 0x0
-#define CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS 0x1
-#define CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS 0x4
-#define CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS 0x5
-#define CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS 0x6
-#define CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS 0x7
-
-#define LM_RC_BAR_CFG_CTRL_DISABLED(bar) \
- (CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED << (((bar) * 8) + 6))
-#define LM_RC_BAR_CFG_CTRL_IO_32BITS(bar) \
- (CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS << (((bar) * 8) + 6))
-#define LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar) \
- (CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS << (((bar) * 8) + 6))
-#define LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar) \
- (CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS << (((bar) * 8) + 6))
-#define LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar) \
- (CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS << (((bar) * 8) + 6))
-#define LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar) \
- (CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS << (((bar) * 8) + 6))
-#define LM_RC_BAR_CFG_APERTURE(bar, aperture) \
- (((aperture) - 2) << ((bar) * 8))
-
-/* PTM Control Register */
-#define CDNS_PCIE_LM_PTM_CTRL (CDNS_PCIE_LM_BASE + 0x0da8)
-#define CDNS_PCIE_LM_TPM_CTRL_PTMRSEN BIT(17)
-
-/*
- * Endpoint Function Registers (PCI configuration space for endpoint functions)
- */
-#define CDNS_PCIE_EP_FUNC_BASE(fn) (((fn) << 12) & GENMASK(19, 12))
-
-#define CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET 0x90
-#define CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET 0xb0
-#define CDNS_PCIE_EP_FUNC_DEV_CAP_OFFSET 0xc0
-#define CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET 0x200
-
-/*
- * Root Port Registers (PCI configuration space for the root port function)
- */
-#define CDNS_PCIE_RP_BASE 0x00200000
-#define CDNS_PCIE_RP_CAP_OFFSET 0xc0
-
-/*
- * Address Translation Registers
- */
-#define CDNS_PCIE_AT_BASE 0x00400000
-
-/* Region r Outbound AXI to PCIe Address Translation Register 0 */
-#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r) \
- (CDNS_PCIE_AT_BASE + 0x0000 + ((r) & 0x1f) * 0x0020)
-#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS_MASK GENMASK(5, 0)
-#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) \
- (((nbits) - 1) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS_MASK)
-#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK GENMASK(19, 12)
-#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) \
- (((devfn) << 12) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK)
-#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK GENMASK(27, 20)
-#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(bus) \
- (((bus) << 20) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK)
-
-/* Region r Outbound AXI to PCIe Address Translation Register 1 */
-#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r) \
- (CDNS_PCIE_AT_BASE + 0x0004 + ((r) & 0x1f) * 0x0020)
-
-/* Region r Outbound PCIe Descriptor Register 0 */
-#define CDNS_PCIE_AT_OB_REGION_DESC0(r) \
- (CDNS_PCIE_AT_BASE + 0x0008 + ((r) & 0x1f) * 0x0020)
-#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MASK GENMASK(3, 0)
-#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MEM 0x2
-#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_IO 0x6
-#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0 0xa
-#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1 0xb
-#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG 0xc
-#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_VENDOR_MSG 0xd
-/* Bit 23 MUST be set in RC mode. */
-#define CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID BIT(23)
-#define CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK GENMASK(31, 24)
-#define CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(devfn) \
- (((devfn) << 24) & CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK)
-
-/* Region r Outbound PCIe Descriptor Register 1 */
-#define CDNS_PCIE_AT_OB_REGION_DESC1(r) \
- (CDNS_PCIE_AT_BASE + 0x000c + ((r) & 0x1f) * 0x0020)
-#define CDNS_PCIE_AT_OB_REGION_DESC1_BUS_MASK GENMASK(7, 0)
-#define CDNS_PCIE_AT_OB_REGION_DESC1_BUS(bus) \
- ((bus) & CDNS_PCIE_AT_OB_REGION_DESC1_BUS_MASK)
-
-/* Region r AXI Region Base Address Register 0 */
-#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r) \
- (CDNS_PCIE_AT_BASE + 0x0018 + ((r) & 0x1f) * 0x0020)
-#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS_MASK GENMASK(5, 0)
-#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) \
- (((nbits) - 1) & CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS_MASK)
-
-/* Region r AXI Region Base Address Register 1 */
-#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r) \
- (CDNS_PCIE_AT_BASE + 0x001c + ((r) & 0x1f) * 0x0020)
-
-/* Root Port BAR Inbound PCIe to AXI Address Translation Register */
-#define CDNS_PCIE_AT_IB_RP_BAR_ADDR0(bar) \
- (CDNS_PCIE_AT_BASE + 0x0800 + (bar) * 0x0008)
-#define CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS_MASK GENMASK(5, 0)
-#define CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(nbits) \
- (((nbits) - 1) & CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS_MASK)
-#define CDNS_PCIE_AT_IB_RP_BAR_ADDR1(bar) \
- (CDNS_PCIE_AT_BASE + 0x0804 + (bar) * 0x0008)
-
-/* AXI link down register */
-#define CDNS_PCIE_AT_LINKDOWN (CDNS_PCIE_AT_BASE + 0x0824)
-
-/* LTSSM Capabilities register */
-#define CDNS_PCIE_LTSSM_CONTROL_CAP (CDNS_PCIE_LM_BASE + 0x0054)
-#define CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK GENMASK(2, 1)
-#define CDNS_PCIE_DETECT_QUIET_MIN_DELAY_SHIFT 1
-#define CDNS_PCIE_DETECT_QUIET_MIN_DELAY(delay) \
- (((delay) << CDNS_PCIE_DETECT_QUIET_MIN_DELAY_SHIFT) & \
- CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK)
+#include "pcie-cadence-lga-regs.h"
+#include "pcie-cadence-hpa-regs.h"
enum cdns_pcie_rp_bar {
RP_BAR_UNDEFINED = -1,
@@ -219,73 +21,63 @@ enum cdns_pcie_rp_bar {
RP_NO_BAR
};
-#define CDNS_PCIE_RP_MAX_IB 0x3
-#define CDNS_PCIE_MAX_OB 32
-
struct cdns_pcie_rp_ib_bar {
u64 size;
bool free;
};
-/* Endpoint Function BAR Inbound PCIe to AXI Address Translation Register */
-#define CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar) \
- (CDNS_PCIE_AT_BASE + 0x0840 + (fn) * 0x0040 + (bar) * 0x0008)
-#define CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar) \
- (CDNS_PCIE_AT_BASE + 0x0844 + (fn) * 0x0040 + (bar) * 0x0008)
-
-/* Normal/Vendor specific message access: offset inside some outbound region */
-#define CDNS_PCIE_NORMAL_MSG_ROUTING_MASK GENMASK(7, 5)
-#define CDNS_PCIE_NORMAL_MSG_ROUTING(route) \
- (((route) << 5) & CDNS_PCIE_NORMAL_MSG_ROUTING_MASK)
-#define CDNS_PCIE_NORMAL_MSG_CODE_MASK GENMASK(15, 8)
-#define CDNS_PCIE_NORMAL_MSG_CODE(code) \
- (((code) << 8) & CDNS_PCIE_NORMAL_MSG_CODE_MASK)
-#define CDNS_PCIE_MSG_NO_DATA BIT(16)
-
struct cdns_pcie;
-
-enum cdns_pcie_msg_code {
- MSG_CODE_ASSERT_INTA = 0x20,
- MSG_CODE_ASSERT_INTB = 0x21,
- MSG_CODE_ASSERT_INTC = 0x22,
- MSG_CODE_ASSERT_INTD = 0x23,
- MSG_CODE_DEASSERT_INTA = 0x24,
- MSG_CODE_DEASSERT_INTB = 0x25,
- MSG_CODE_DEASSERT_INTC = 0x26,
- MSG_CODE_DEASSERT_INTD = 0x27,
-};
-
-enum cdns_pcie_msg_routing {
- /* Route to Root Complex */
- MSG_ROUTING_TO_RC,
-
- /* Use Address Routing */
- MSG_ROUTING_BY_ADDR,
-
- /* Use ID Routing */
- MSG_ROUTING_BY_ID,
-
- /* Route as Broadcast Message from Root Complex */
- MSG_ROUTING_BCAST,
-
- /* Local message; terminate at receiver (INTx messages) */
- MSG_ROUTING_LOCAL,
-
- /* Gather & route to Root Complex (PME_TO_Ack message) */
- MSG_ROUTING_GATHER,
+struct cdns_pcie_rc;
+
+enum cdns_pcie_reg_bank {
+ REG_BANK_RP,
+ REG_BANK_IP_REG,
+ REG_BANK_IP_CFG_CTRL_REG,
+ REG_BANK_AXI_MASTER_COMMON,
+ REG_BANK_AXI_MASTER,
+ REG_BANK_AXI_SLAVE,
+ REG_BANK_AXI_HLS,
+ REG_BANK_AXI_RAS,
+ REG_BANK_AXI_DTI,
+ REG_BANKS_MAX,
};
struct cdns_pcie_ops {
- int (*start_link)(struct cdns_pcie *pcie);
- void (*stop_link)(struct cdns_pcie *pcie);
- bool (*link_up)(struct cdns_pcie *pcie);
+ int (*start_link)(struct cdns_pcie *pcie);
+ void (*stop_link)(struct cdns_pcie *pcie);
+ bool (*link_up)(struct cdns_pcie *pcie);
u64 (*cpu_addr_fixup)(struct cdns_pcie *pcie, u64 cpu_addr);
};
/**
+ * struct cdns_plat_pcie_of_data - Register bank offset for a platform
+ * @is_rc: controller is a RC
+ * @ip_reg_bank_offset: ip register bank start offset
+ * @ip_cfg_ctrl_reg_offset: ip config control register start offset
+ * @axi_mstr_common_offset: AXI master common register start offset
+ * @axi_slave_offset: AXI slave start offset
+ * @axi_master_offset: AXI master start offset
+ * @axi_hls_offset: AXI HLS offset start
+ * @axi_ras_offset: AXI RAS offset
+ * @axi_dti_offset: AXI DTI offset
+ */
+struct cdns_plat_pcie_of_data {
+ u32 is_rc:1;
+ u32 ip_reg_bank_offset;
+ u32 ip_cfg_ctrl_reg_offset;
+ u32 axi_mstr_common_offset;
+ u32 axi_slave_offset;
+ u32 axi_master_offset;
+ u32 axi_hls_offset;
+ u32 axi_ras_offset;
+ u32 axi_dti_offset;
+};
+
+/**
* struct cdns_pcie - private data for Cadence PCIe controller drivers
* @reg_base: IO mapped register base
* @mem_res: start/end offsets in the physical system memory to map PCI accesses
+ * @msg_res: Region for send message to map PCI accesses
* @dev: PCIe controller
* @is_rc: tell whether the PCIe controller mode is Root Complex or Endpoint.
* @phy_count: number of supported PHY devices
@@ -293,22 +85,24 @@ struct cdns_pcie_ops {
* @link: list of pointers to corresponding device link representations
* @ops: Platform-specific ops to control various inputs from Cadence PCIe
* wrapper
+ * @cdns_pcie_reg_offsets: Register bank offsets for different SoC
*/
struct cdns_pcie {
- void __iomem *reg_base;
- struct resource *mem_res;
- struct device *dev;
- bool is_rc;
- int phy_count;
- struct phy **phy;
- struct device_link **link;
- const struct cdns_pcie_ops *ops;
+ void __iomem *reg_base;
+ struct resource *mem_res;
+ struct resource *msg_res;
+ struct device *dev;
+ bool is_rc;
+ int phy_count;
+ struct phy **phy;
+ struct device_link **link;
+ const struct cdns_pcie_ops *ops;
+ const struct cdns_plat_pcie_of_data *cdns_pcie_reg_offsets;
};
/**
* struct cdns_pcie_rc - private data for this PCIe Root Complex driver
* @pcie: Cadence PCIe controller
- * @dev: pointer to PCIe device
* @cfg_res: start/end offsets in the physical system memory to map PCI
* configuration space accesses
* @cfg_base: IO mapped window to access the PCI configuration space of a
@@ -319,6 +113,8 @@ struct cdns_pcie {
* available
* @quirk_retrain_flag: Retrain link as quirk for PCIe Gen2
* @quirk_detect_quiet_flag: LTSSM Detect Quiet min delay set as quirk
+ * @ecam_supported: Whether the ECAM is supported
+ * @no_inbound_map: Whether inbound mapping is supported
*/
struct cdns_pcie_rc {
struct cdns_pcie pcie;
@@ -329,6 +125,8 @@ struct cdns_pcie_rc {
bool avail_ib_bar[CDNS_PCIE_RP_MAX_IB];
unsigned int quirk_retrain_flag:1;
unsigned int quirk_detect_quiet_flag:1;
+ unsigned int ecam_supported:1;
+ unsigned int no_inbound_map:1;
};
/**
@@ -347,16 +145,16 @@ struct cdns_pcie_epf {
* @max_regions: maximum number of regions supported by hardware
* @ob_region_map: bitmask of mapped outbound regions
* @ob_addr: base addresses in the AXI bus where the outbound regions start
- * @irq_phys_addr: base address on the AXI bus where the MSI/legacy IRQ
+ * @irq_phys_addr: base address on the AXI bus where the MSI/INTX IRQ
* dedicated outbound regions is mapped.
* @irq_cpu_addr: base address in the CPU space where a write access triggers
- * the sending of a memory write (MSI) / normal message (legacy
+ * the sending of a memory write (MSI) / normal message (INTX
* IRQ) TLP through the PCIe bus.
- * @irq_pci_addr: used to save the current mapping of the MSI/legacy IRQ
+ * @irq_pci_addr: used to save the current mapping of the MSI/INTX IRQ
* dedicated outbound region.
* @irq_pci_fn: the latest PCI function that has updated the mapping of
- * the MSI/legacy IRQ dedicated outbound region.
- * @irq_pending: bitmask of asserted legacy IRQs.
+ * the MSI/INTX IRQ dedicated outbound region.
+ * @irq_pending: bitmask of asserted INTX IRQs.
* @lock: spin lock to disable interrupts while modifying PCIe controller
* registers fields (RMW) accessible by both remote RC and EP to
* minimize time between read and write
@@ -374,13 +172,50 @@ struct cdns_pcie_ep {
u64 irq_pci_addr;
u8 irq_pci_fn;
u8 irq_pending;
- /* protect writing to PCI_STATUS while raising legacy interrupts */
+ /* protect writing to PCI_STATUS while raising INTX interrupts */
spinlock_t lock;
struct cdns_pcie_epf *epf;
unsigned int quirk_detect_quiet_flag:1;
unsigned int quirk_disable_flr:1;
};
+static inline u32 cdns_reg_bank_to_off(struct cdns_pcie *pcie, enum cdns_pcie_reg_bank bank)
+{
+ u32 offset = 0x0;
+
+ switch (bank) {
+ case REG_BANK_RP:
+ offset = 0;
+ break;
+ case REG_BANK_IP_REG:
+ offset = pcie->cdns_pcie_reg_offsets->ip_reg_bank_offset;
+ break;
+ case REG_BANK_IP_CFG_CTRL_REG:
+ offset = pcie->cdns_pcie_reg_offsets->ip_cfg_ctrl_reg_offset;
+ break;
+ case REG_BANK_AXI_MASTER_COMMON:
+ offset = pcie->cdns_pcie_reg_offsets->axi_mstr_common_offset;
+ break;
+ case REG_BANK_AXI_MASTER:
+ offset = pcie->cdns_pcie_reg_offsets->axi_master_offset;
+ break;
+ case REG_BANK_AXI_SLAVE:
+ offset = pcie->cdns_pcie_reg_offsets->axi_slave_offset;
+ break;
+ case REG_BANK_AXI_HLS:
+ offset = pcie->cdns_pcie_reg_offsets->axi_hls_offset;
+ break;
+ case REG_BANK_AXI_RAS:
+ offset = pcie->cdns_pcie_reg_offsets->axi_ras_offset;
+ break;
+ case REG_BANK_AXI_DTI:
+ offset = pcie->cdns_pcie_reg_offsets->axi_dti_offset;
+ break;
+ default:
+ break;
+ }
+ return offset;
+}
/* Register access */
static inline void cdns_pcie_writel(struct cdns_pcie *pcie, u32 reg, u32 value)
@@ -393,6 +228,58 @@ static inline u32 cdns_pcie_readl(struct cdns_pcie *pcie, u32 reg)
return readl(pcie->reg_base + reg);
}
+static inline void cdns_pcie_hpa_writel(struct cdns_pcie *pcie,
+ enum cdns_pcie_reg_bank bank,
+ u32 reg,
+ u32 value)
+{
+ u32 offset = cdns_reg_bank_to_off(pcie, bank);
+
+ reg += offset;
+ writel(value, pcie->reg_base + reg);
+}
+
+static inline u32 cdns_pcie_hpa_readl(struct cdns_pcie *pcie,
+ enum cdns_pcie_reg_bank bank,
+ u32 reg)
+{
+ u32 offset = cdns_reg_bank_to_off(pcie, bank);
+
+ reg += offset;
+ return readl(pcie->reg_base + reg);
+}
+
+static inline u16 cdns_pcie_readw(struct cdns_pcie *pcie, u32 reg)
+{
+ return readw(pcie->reg_base + reg);
+}
+
+static inline u8 cdns_pcie_readb(struct cdns_pcie *pcie, u32 reg)
+{
+ return readb(pcie->reg_base + reg);
+}
+
+static inline int cdns_pcie_read_cfg_byte(struct cdns_pcie *pcie, int where,
+ u8 *val)
+{
+ *val = cdns_pcie_readb(pcie, where);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static inline int cdns_pcie_read_cfg_word(struct cdns_pcie *pcie, int where,
+ u16 *val)
+{
+ *val = cdns_pcie_readw(pcie, where);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static inline int cdns_pcie_read_cfg_dword(struct cdns_pcie *pcie, int where,
+ u32 *val)
+{
+ *val = cdns_pcie_readl(pcie, where);
+ return PCIBIOS_SUCCESSFUL;
+}
+
static inline u32 cdns_pcie_read_sz(void __iomem *addr, int size)
{
void __iomem *aligned_addr = PTR_ALIGN_DOWN(addr, 0x4);
@@ -457,6 +344,29 @@ static inline u16 cdns_pcie_rp_readw(struct cdns_pcie *pcie, u32 reg)
return cdns_pcie_read_sz(addr, 0x2);
}
+static inline void cdns_pcie_hpa_rp_writeb(struct cdns_pcie *pcie,
+ u32 reg, u8 value)
+{
+ void __iomem *addr = pcie->reg_base + CDNS_PCIE_HPA_RP_BASE + reg;
+
+ cdns_pcie_write_sz(addr, 0x1, value);
+}
+
+static inline void cdns_pcie_hpa_rp_writew(struct cdns_pcie *pcie,
+ u32 reg, u16 value)
+{
+ void __iomem *addr = pcie->reg_base + CDNS_PCIE_HPA_RP_BASE + reg;
+
+ cdns_pcie_write_sz(addr, 0x2, value);
+}
+
+static inline u16 cdns_pcie_hpa_rp_readw(struct cdns_pcie *pcie, u32 reg)
+{
+ void __iomem *addr = pcie->reg_base + CDNS_PCIE_HPA_RP_BASE + reg;
+
+ return cdns_pcie_read_sz(addr, 0x2);
+}
+
/* Endpoint Function register access */
static inline void cdns_pcie_ep_fn_writeb(struct cdns_pcie *pcie, u8 fn,
u32 reg, u8 value)
@@ -494,7 +404,7 @@ static inline u32 cdns_pcie_ep_fn_readl(struct cdns_pcie *pcie, u8 fn, u32 reg)
static inline int cdns_pcie_start_link(struct cdns_pcie *pcie)
{
- if (pcie->ops->start_link)
+ if (pcie->ops && pcie->ops->start_link)
return pcie->ops->start_link(pcie);
return 0;
@@ -502,28 +412,51 @@ static inline int cdns_pcie_start_link(struct cdns_pcie *pcie)
static inline void cdns_pcie_stop_link(struct cdns_pcie *pcie)
{
- if (pcie->ops->stop_link)
+ if (pcie->ops && pcie->ops->stop_link)
pcie->ops->stop_link(pcie);
}
static inline bool cdns_pcie_link_up(struct cdns_pcie *pcie)
{
- if (pcie->ops->link_up)
+ if (pcie->ops && pcie->ops->link_up)
return pcie->ops->link_up(pcie);
return true;
}
-#ifdef CONFIG_PCIE_CADENCE_HOST
+#if IS_ENABLED(CONFIG_PCIE_CADENCE_HOST)
+int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc);
+int cdns_pcie_host_init(struct cdns_pcie_rc *rc);
int cdns_pcie_host_setup(struct cdns_pcie_rc *rc);
+void cdns_pcie_host_disable(struct cdns_pcie_rc *rc);
void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
int where);
+int cdns_pcie_hpa_host_setup(struct cdns_pcie_rc *rc);
#else
+static inline int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc)
+{
+ return 0;
+}
+
+static inline int cdns_pcie_host_init(struct cdns_pcie_rc *rc)
+{
+ return 0;
+}
+
static inline int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
{
return 0;
}
+static inline int cdns_pcie_hpa_host_setup(struct cdns_pcie_rc *rc)
+{
+ return 0;
+}
+
+static inline void cdns_pcie_host_disable(struct cdns_pcie_rc *rc)
+{
+}
+
static inline void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
int where)
{
@@ -531,15 +464,31 @@ static inline void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int d
}
#endif
-#ifdef CONFIG_PCIE_CADENCE_EP
+#if IS_ENABLED(CONFIG_PCIE_CADENCE_EP)
int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep);
+void cdns_pcie_ep_disable(struct cdns_pcie_ep *ep);
+int cdns_pcie_hpa_ep_setup(struct cdns_pcie_ep *ep);
#else
static inline int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
{
return 0;
}
+
+static inline void cdns_pcie_ep_disable(struct cdns_pcie_ep *ep)
+{
+}
+
+static inline int cdns_pcie_hpa_ep_setup(struct cdns_pcie_ep *ep)
+{
+ return 0;
+}
+
#endif
+u8 cdns_pcie_find_capability(struct cdns_pcie *pcie, u8 cap);
+u16 cdns_pcie_find_ext_capability(struct cdns_pcie *pcie, u8 cap);
+bool cdns_pcie_linkup(struct cdns_pcie *pcie);
+
void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie);
void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
@@ -552,8 +501,23 @@ void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie,
void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r);
void cdns_pcie_disable_phy(struct cdns_pcie *pcie);
-int cdns_pcie_enable_phy(struct cdns_pcie *pcie);
-int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie);
+int cdns_pcie_enable_phy(struct cdns_pcie *pcie);
+int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie);
+void cdns_pcie_hpa_detect_quiet_min_delay_set(struct cdns_pcie *pcie);
+void cdns_pcie_hpa_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
+ u32 r, bool is_io,
+ u64 cpu_addr, u64 pci_addr, size_t size);
+void cdns_pcie_hpa_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie,
+ u8 busnr, u8 fn,
+ u32 r, u64 cpu_addr);
+int cdns_pcie_hpa_host_link_setup(struct cdns_pcie_rc *rc);
+void __iomem *cdns_pci_hpa_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int where);
+int cdns_pcie_hpa_host_start_link(struct cdns_pcie_rc *rc);
+int cdns_pcie_hpa_start_link(struct cdns_pcie *pcie);
+void cdns_pcie_hpa_stop_link(struct cdns_pcie *pcie);
+bool cdns_pcie_hpa_link_up(struct cdns_pcie *pcie);
+
extern const struct dev_pm_ops cdns_pcie_pm_ops;
#endif /* _PCIE_CADENCE_H */
diff --git a/drivers/pci/controller/cadence/pcie-sg2042.c b/drivers/pci/controller/cadence/pcie-sg2042.c
new file mode 100644
index 000000000000..0c50c74d03ee
--- /dev/null
+++ b/drivers/pci/controller/cadence/pcie-sg2042.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * pcie-sg2042 - PCIe controller driver for Sophgo SG2042 SoC
+ *
+ * Copyright (C) 2025 Sophgo Technology Inc.
+ * Copyright (C) 2025 Chen Wang <unicorn_wang@outlook.com>
+ */
+
+#include <linux/mod_devicetable.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "pcie-cadence.h"
+
+/*
+ * SG2042 only supports 4-byte aligned access, so for the rootbus (i.e. to
+ * read/write the Root Port itself, read32/write32 is required. For
+ * non-rootbus (i.e. to read/write the PCIe peripheral registers, supports
+ * 1/2/4 byte aligned access, so directly using read/write should be fine.
+ */
+
+static struct pci_ops sg2042_pcie_root_ops = {
+ .map_bus = cdns_pci_map_bus,
+ .read = pci_generic_config_read32,
+ .write = pci_generic_config_write32,
+};
+
+static struct pci_ops sg2042_pcie_child_ops = {
+ .map_bus = cdns_pci_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+};
+
+static int sg2042_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct pci_host_bridge *bridge;
+ struct cdns_pcie *pcie;
+ struct cdns_pcie_rc *rc;
+ int ret;
+
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc));
+ if (!bridge)
+ return dev_err_probe(dev, -ENOMEM, "Failed to alloc host bridge!\n");
+
+ bridge->ops = &sg2042_pcie_root_ops;
+ bridge->child_ops = &sg2042_pcie_child_ops;
+
+ rc = pci_host_bridge_priv(bridge);
+ pcie = &rc->pcie;
+ pcie->dev = dev;
+
+ platform_set_drvdata(pdev, pcie);
+
+ pm_runtime_set_active(dev);
+ pm_runtime_no_callbacks(dev);
+ devm_pm_runtime_enable(dev);
+
+ ret = cdns_pcie_init_phy(dev, pcie);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to init phy!\n");
+
+ ret = cdns_pcie_host_setup(rc);
+ if (ret) {
+ dev_err_probe(dev, ret, "Failed to setup host!\n");
+ cdns_pcie_disable_phy(pcie);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void sg2042_pcie_remove(struct platform_device *pdev)
+{
+ struct cdns_pcie *pcie = platform_get_drvdata(pdev);
+ struct cdns_pcie_rc *rc;
+
+ rc = container_of(pcie, struct cdns_pcie_rc, pcie);
+ cdns_pcie_host_disable(rc);
+
+ cdns_pcie_disable_phy(pcie);
+}
+
+static int sg2042_pcie_suspend_noirq(struct device *dev)
+{
+ struct cdns_pcie *pcie = dev_get_drvdata(dev);
+
+ cdns_pcie_disable_phy(pcie);
+
+ return 0;
+}
+
+static int sg2042_pcie_resume_noirq(struct device *dev)
+{
+ struct cdns_pcie *pcie = dev_get_drvdata(dev);
+ int ret;
+
+ ret = cdns_pcie_enable_phy(pcie);
+ if (ret) {
+ dev_err(dev, "failed to enable PHY\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static DEFINE_NOIRQ_DEV_PM_OPS(sg2042_pcie_pm_ops,
+ sg2042_pcie_suspend_noirq,
+ sg2042_pcie_resume_noirq);
+
+static const struct of_device_id sg2042_pcie_of_match[] = {
+ { .compatible = "sophgo,sg2042-pcie-host" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sg2042_pcie_of_match);
+
+static struct platform_driver sg2042_pcie_driver = {
+ .driver = {
+ .name = "sg2042-pcie",
+ .of_match_table = sg2042_pcie_of_match,
+ .pm = pm_sleep_ptr(&sg2042_pcie_pm_ops),
+ },
+ .probe = sg2042_pcie_probe,
+ .remove = sg2042_pcie_remove,
+};
+module_platform_driver(sg2042_pcie_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("PCIe controller driver for SG2042 SoCs");
+MODULE_AUTHOR("Chen Wang <unicorn_wang@outlook.com>");
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index ab96da43e0c2..519b59422b47 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -6,9 +6,21 @@ menu "DesignWare-based PCIe controllers"
config PCIE_DW
bool
+config PCIE_DW_DEBUGFS
+ bool "DesignWare PCIe debugfs entries"
+ depends on DEBUG_FS
+ depends on PCIE_DW_HOST || PCIE_DW_EP
+ help
+ Say Y here to enable debugfs entries for the PCIe controller. These
+ entries provide various debug features related to the controller and
+ expose the RAS DES capabilities such as Silicon Debug, Error Injection
+ and Statistical Counters.
+
config PCIE_DW_HOST
bool
select PCIE_DW
+ select IRQ_MSI_LIB
+ select PCI_HOST_COMMON
config PCIE_DW_EP
bool
@@ -27,6 +39,17 @@ config PCIE_AL
required only for DT-based platforms. ACPI platforms with the
Annapurna Labs PCIe controller don't need to enable this.
+config PCIE_AMD_MDB
+ bool "AMD MDB Versal2 PCIe controller"
+ depends on OF && (ARM64 || COMPILE_TEST)
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ help
+ Say Y here if you want to enable PCIe controller support on AMD
+ Versal2 SoCs. The AMD MDB Versal2 PCIe controller is based on
+ DesignWare IP and therefore the driver re-uses the DesignWare
+ core functions to implement the driver.
+
config PCI_MESON
tristate "Amlogic Meson PCIe controller"
default m if ARCH_MESON
@@ -233,6 +256,16 @@ config PCIE_TEGRA194_EP
in order to enable device-specific features PCIE_TEGRA194_EP must be
selected. This uses the DesignWare core.
+config PCIE_NXP_S32G
+ bool "NXP S32G PCIe controller (host mode)"
+ depends on ARCH_S32 || COMPILE_TEST
+ select PCIE_DW_HOST
+ help
+ Enable support for the PCIe controller in NXP S32G based boards to
+ work in Host mode. The controller is based on DesignWare IP and
+ can work either as RC or EP. In order to enable host-specific
+ features PCIE_NXP_S32G must be selected.
+
config PCIE_DW_PLAT
bool
@@ -265,12 +298,18 @@ config PCIE_DW_PLAT_EP
order to enable device-specific features PCI_DW_PLAT_EP must be
selected.
+config PCIE_QCOM_COMMON
+ bool
+
config PCIE_QCOM
bool "Qualcomm PCIe controller (host mode)"
depends on OF && (ARCH_QCOM || COMPILE_TEST)
depends on PCI_MSI
select PCIE_DW_HOST
select CRC8
+ select PCIE_QCOM_COMMON
+ select PCI_HOST_COMMON
+ select PCI_PWRCTRL_SLOT
help
Say Y here to enable PCIe controller support on Qualcomm SoCs. The
PCIe controller uses the DesignWare core plus Qualcomm-specific
@@ -281,21 +320,61 @@ config PCIE_QCOM_EP
depends on OF && (ARCH_QCOM || COMPILE_TEST)
depends on PCI_ENDPOINT
select PCIE_DW_EP
+ select PCIE_QCOM_COMMON
help
Say Y here to enable support for the PCIe controllers on Qualcomm SoCs
to work in endpoint mode. The PCIe controller uses the DesignWare core
plus Qualcomm-specific hardware wrappers.
-config PCIE_ROCKCHIP_DW_HOST
- bool "Rockchip DesignWare PCIe controller"
- select PCIE_DW
+config PCIE_RCAR_GEN4
+ tristate
+
+config PCIE_RCAR_GEN4_HOST
+ tristate "Renesas R-Car Gen4 PCIe controller (host mode)"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ depends on PCI_MSI
select PCIE_DW_HOST
+ select PCIE_RCAR_GEN4
+ help
+ Say Y here if you want PCIe controller (host mode) on R-Car Gen4 SoCs.
+ To compile this driver as a module, choose M here: the module will be
+ called pcie-rcar-gen4.ko. This uses the DesignWare core.
+
+config PCIE_RCAR_GEN4_EP
+ tristate "Renesas R-Car Gen4 PCIe controller (endpoint mode)"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ select PCIE_RCAR_GEN4
+ help
+ Say Y here if you want PCIe controller (endpoint mode) on R-Car Gen4
+ SoCs. To compile this driver as a module, choose M here: the module
+ will be called pcie-rcar-gen4.ko. This uses the DesignWare core.
+
+config PCIE_ROCKCHIP_DW
+ bool
+
+config PCIE_ROCKCHIP_DW_HOST
+ bool "Rockchip DesignWare PCIe controller (host mode)"
depends on PCI_MSI
depends on ARCH_ROCKCHIP || COMPILE_TEST
depends on OF
+ select PCIE_DW_HOST
+ select PCIE_ROCKCHIP_DW
+ help
+ Enables support for the DesignWare PCIe controller in the
+ Rockchip SoC (except RK3399) to work in host mode.
+
+config PCIE_ROCKCHIP_DW_EP
+ bool "Rockchip DesignWare PCIe controller (endpoint mode)"
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
+ depends on OF
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ select PCIE_ROCKCHIP_DW
help
Enables support for the DesignWare PCIe controller in the
- Rockchip SoC except RK3399.
+ Rockchip SoC (except RK3399) to work in endpoint mode.
config PCI_EXYNOS
tristate "Samsung Exynos PCIe controller"
@@ -311,7 +390,7 @@ config PCI_EXYNOS
config PCIE_FU740
bool "SiFive FU740 PCIe controller"
depends on PCI_MSI
- depends on SOC_SIFIVE || COMPILE_TEST
+ depends on ARCH_SIFIVE || COMPILE_TEST
select PCIE_DW_HOST
help
Say Y here if you want PCIe controller support for the SiFive
@@ -337,6 +416,29 @@ config PCIE_UNIPHIER_EP
Say Y here if you want PCIe endpoint controller support on
UniPhier SoCs. This driver supports Pro5 SoC.
+config PCIE_SOPHGO_DW
+ bool "Sophgo DesignWare PCIe controller (host mode)"
+ depends on ARCH_SOPHGO || COMPILE_TEST
+ depends on PCI_MSI
+ depends on OF
+ select PCIE_DW_HOST
+ help
+ Say Y here if you want PCIe host controller support on
+ Sophgo SoCs.
+
+config PCIE_SPACEMIT_K1
+ tristate "SpacemiT K1 PCIe controller (host mode)"
+ depends on ARCH_SPACEMIT || COMPILE_TEST
+ depends on HAS_IOMEM
+ select PCIE_DW_HOST
+ select PCI_PWRCTRL_SLOT
+ default ARCH_SPACEMIT
+ help
+ Enables support for the DesignWare based PCIe controller in
+ the SpacemiT K1 SoC operating in host mode. Three controllers
+ are available on the K1 SoC; the first of these shares a PHY
+ with a USB 3.0 host controller (one or the other can be used).
+
config PCIE_SPEAR13XX
bool "STMicroelectronics SPEAr PCIe controller"
depends on ARCH_SPEAR13XX || COMPILE_TEST
@@ -345,6 +447,30 @@ config PCIE_SPEAR13XX
help
Say Y here if you want PCIe support on SPEAr13XX SoCs.
+config PCIE_STM32_HOST
+ tristate "STMicroelectronics STM32MP25 PCIe Controller (host mode)"
+ depends on ARCH_STM32 || COMPILE_TEST
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ help
+ Enables Root Complex (RC) support for the DesignWare core based PCIe
+ controller found in STM32MP25 SoC.
+
+ This driver can also be built as a module. If so, the module
+ will be called pcie-stm32.
+
+config PCIE_STM32_EP
+ tristate "STMicroelectronics STM32MP25 PCIe Controller (endpoint mode)"
+ depends on ARCH_STM32 || COMPILE_TEST
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ help
+ Enables Endpoint (EP) support for the DesignWare core based PCIe
+ controller found in STM32MP25 SoC.
+
+ This driver can also be built as a module. If so, the module
+ will be called pcie-stm32-ep.
+
config PCI_DRA7XX
tristate
@@ -379,15 +505,21 @@ config PCI_DRA7XX_EP
to enable device-specific features PCI_DRA7XX_EP must be selected.
This uses the DesignWare core.
+# ARM32 platforms use hook_fault_code() and cannot support loadable module.
config PCI_KEYSTONE
bool
+# On non-ARM32 platforms, loadable module can be supported.
+config PCI_KEYSTONE_TRISTATE
+ tristate
+
config PCI_KEYSTONE_HOST
- bool "TI Keystone PCIe controller (host mode)"
+ tristate "TI Keystone PCIe controller (host mode)"
depends on ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
depends on PCI_MSI
select PCIE_DW_HOST
- select PCI_KEYSTONE
+ select PCI_KEYSTONE if ARM
+ select PCI_KEYSTONE_TRISTATE if !ARM
help
Enables support for the PCIe controller in the Keystone SoC to
work in host mode. The PCI controller on Keystone is based on
@@ -395,11 +527,12 @@ config PCI_KEYSTONE_HOST
DesignWare core functions to implement the driver.
config PCI_KEYSTONE_EP
- bool "TI Keystone PCIe controller (endpoint mode)"
+ tristate "TI Keystone PCIe controller (endpoint mode)"
depends on ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
depends on PCI_ENDPOINT
select PCIE_DW_EP
- select PCI_KEYSTONE
+ select PCI_KEYSTONE if ARM
+ select PCI_KEYSTONE_TRISTATE if !ARM
help
Enables support for the PCIe controller in the Keystone SoC to
work in endpoint mode. The PCI controller on Keystone is based
diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile
index bf5c311875a1..67ba59c02038 100644
--- a/drivers/pci/controller/dwc/Makefile
+++ b/drivers/pci/controller/dwc/Makefile
@@ -1,22 +1,30 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_PCIE_DW) += pcie-designware.o
+obj-$(CONFIG_PCIE_DW_DEBUGFS) += pcie-designware-debugfs.o
obj-$(CONFIG_PCIE_DW_HOST) += pcie-designware-host.o
obj-$(CONFIG_PCIE_DW_EP) += pcie-designware-ep.o
obj-$(CONFIG_PCIE_DW_PLAT) += pcie-designware-plat.o
+obj-$(CONFIG_PCIE_AMD_MDB) += pcie-amd-mdb.o
obj-$(CONFIG_PCIE_BT1) += pcie-bt1.o
obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o
obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
obj-$(CONFIG_PCIE_FU740) += pcie-fu740.o
obj-$(CONFIG_PCI_IMX6) += pci-imx6.o
+obj-$(CONFIG_PCIE_NXP_S32G) += pcie-nxp-s32g.o
obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
+# ARM32 platforms use hook_fault_code() and cannot support loadable module.
obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone.o
+# On non-ARM32 platforms, loadable module can be supported.
+obj-$(CONFIG_PCI_KEYSTONE_TRISTATE) += pci-keystone.o
obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o
obj-$(CONFIG_PCI_LAYERSCAPE_EP) += pci-layerscape-ep.o
+obj-$(CONFIG_PCIE_QCOM_COMMON) += pcie-qcom-common.o
obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
obj-$(CONFIG_PCIE_QCOM_EP) += pcie-qcom-ep.o
obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o
-obj-$(CONFIG_PCIE_ROCKCHIP_DW_HOST) += pcie-dw-rockchip.o
+obj-$(CONFIG_PCIE_ROCKCHIP_DW) += pcie-dw-rockchip.o
+obj-$(CONFIG_PCIE_SOPHGO_DW) += pcie-sophgo.o
obj-$(CONFIG_PCIE_INTEL_GW) += pcie-intel-gw.o
obj-$(CONFIG_PCIE_KEEMBAY) += pcie-keembay.o
obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o
@@ -26,6 +34,10 @@ obj-$(CONFIG_PCIE_TEGRA194) += pcie-tegra194.o
obj-$(CONFIG_PCIE_UNIPHIER) += pcie-uniphier.o
obj-$(CONFIG_PCIE_UNIPHIER_EP) += pcie-uniphier-ep.o
obj-$(CONFIG_PCIE_VISCONTI_HOST) += pcie-visconti.o
+obj-$(CONFIG_PCIE_RCAR_GEN4) += pcie-rcar-gen4.o
+obj-$(CONFIG_PCIE_SPACEMIT_K1) += pcie-spacemit-k1.o
+obj-$(CONFIG_PCIE_STM32_HOST) += pcie-stm32.o
+obj-$(CONFIG_PCIE_STM32_EP) += pcie-stm32-ep.o
# The following drivers are for devices that use the generic ACPI
# pci_root.c driver but don't support standard ECAM config access.
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index 4ae807e7cf79..01cfd9aeb0b8 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -13,11 +13,11 @@
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/of_gpio.h>
+#include <linux/of.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
#include <linux/phy/phy.h>
@@ -113,17 +113,17 @@ static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset,
writel(value, pcie->base + offset);
}
-static u64 dra7xx_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr)
+static u64 dra7xx_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 cpu_addr)
{
- return pci_addr & DRA7XX_CPU_TO_BUS_ADDR;
+ return cpu_addr & DRA7XX_CPU_TO_BUS_ADDR;
}
-static int dra7xx_pcie_link_up(struct dw_pcie *pci)
+static bool dra7xx_pcie_link_up(struct dw_pcie *pci)
{
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
u32 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS);
- return !!(reg & LINK_UP);
+ return reg & LINK_UP;
}
static void dra7xx_pcie_stop_link(struct dw_pcie *pci)
@@ -359,8 +359,8 @@ static int dra7xx_pcie_init_irq_domain(struct dw_pcie_rp *pp)
irq_set_chained_handler_and_data(pp->irq, dra7xx_pcie_msi_irq_handler,
pp);
- dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
- &intx_domain_ops, pp);
+ dra7xx->irq_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node),
+ PCI_NUM_INTX, &intx_domain_ops, pp);
of_node_put(pcie_intc_node);
if (!dra7xx->irq_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
@@ -371,7 +371,7 @@ static int dra7xx_pcie_init_irq_domain(struct dw_pcie_rp *pp)
}
static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = {
- .host_init = dra7xx_pcie_host_init,
+ .init = dra7xx_pcie_host_init,
};
static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep)
@@ -386,7 +386,7 @@ static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep)
dra7xx_pcie_enable_wrapper_interrupts(dra7xx);
}
-static void dra7xx_pcie_raise_legacy_irq(struct dra7xx_pcie *dra7xx)
+static void dra7xx_pcie_raise_intx_irq(struct dra7xx_pcie *dra7xx)
{
dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_ASSERT, 0x1);
mdelay(1);
@@ -404,16 +404,16 @@ static void dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie *dra7xx,
}
static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
- enum pci_epc_irq_type type, u16 interrupt_num)
+ unsigned int type, u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
switch (type) {
- case PCI_EPC_IRQ_LEGACY:
- dra7xx_pcie_raise_legacy_irq(dra7xx);
+ case PCI_IRQ_INTX:
+ dra7xx_pcie_raise_intx_irq(dra7xx);
break;
- case PCI_EPC_IRQ_MSI:
+ case PCI_IRQ_MSI:
dra7xx_pcie_raise_msi_irq(dra7xx, interrupt_num);
break;
default:
@@ -426,7 +426,6 @@ static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
static const struct pci_epc_features dra7xx_pcie_epc_features = {
.linkup_notifier = true,
.msi_capable = true,
- .msix_capable = false,
};
static const struct pci_epc_features*
@@ -436,7 +435,7 @@ dra7xx_pcie_get_features(struct dw_pcie_ep *ep)
}
static const struct dw_pcie_ep_ops pcie_ep_ops = {
- .ep_init = dra7xx_pcie_ep_init,
+ .init = dra7xx_pcie_ep_init,
.raise_irq = dra7xx_pcie_raise_irq,
.get_features = dra7xx_pcie_get_features,
};
@@ -467,6 +466,15 @@ static int dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx,
return ret;
}
+ ret = dw_pcie_ep_init_registers(ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(ep);
+ return ret;
+ }
+
+ pci_epc_init_notify(ep->epc);
+
return 0;
}
@@ -626,30 +634,20 @@ static int dra7xx_pcie_unaligned_memaccess(struct device *dev)
{
int ret;
struct device_node *np = dev->of_node;
- struct of_phandle_args args;
+ unsigned int args[2];
struct regmap *regmap;
- regmap = syscon_regmap_lookup_by_phandle(np,
- "ti,syscon-unaligned-access");
+ regmap = syscon_regmap_lookup_by_phandle_args(np, "ti,syscon-unaligned-access",
+ 2, args);
if (IS_ERR(regmap)) {
dev_dbg(dev, "can't get ti,syscon-unaligned-access\n");
return -EINVAL;
}
- ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-unaligned-access",
- 2, 0, &args);
- if (ret) {
- dev_err(dev, "failed to parse ti,syscon-unaligned-access\n");
- return ret;
- }
-
- ret = regmap_update_bits(regmap, args.args[0], args.args[1],
- args.args[1]);
+ ret = regmap_update_bits(regmap, args[0], args[1], args[1]);
if (ret)
dev_err(dev, "failed to enable unaligned access\n");
- of_node_put(args.np);
-
return ret;
}
@@ -662,18 +660,13 @@ static int dra7xx_pcie_configure_two_lane(struct device *dev,
u32 mask;
u32 val;
- pcie_syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-lane-sel");
+ pcie_syscon = syscon_regmap_lookup_by_phandle_args(np, "ti,syscon-lane-sel",
+ 1, &pcie_reg);
if (IS_ERR(pcie_syscon)) {
dev_err(dev, "unable to get ti,syscon-lane-sel\n");
return -EINVAL;
}
- if (of_property_read_u32_index(np, "ti,syscon-lane-sel", 1,
- &pcie_reg)) {
- dev_err(dev, "couldn't get lane selection reg offset\n");
- return -EINVAL;
- }
-
mask = b1co_mode_sel_mask | PCIE_B0_B1_TSYNCEN;
val = PCIE_B1C0_MODE_SEL | PCIE_B0_B1_TSYNCEN;
regmap_update_bits(pcie_syscon, pcie_reg, mask, val);
@@ -841,14 +834,21 @@ static int dra7xx_pcie_probe(struct platform_device *pdev)
dra7xx->mode = mode;
ret = devm_request_threaded_irq(dev, irq, NULL, dra7xx_pcie_irq_handler,
- IRQF_SHARED, "dra7xx-pcie-main", dra7xx);
+ IRQF_SHARED | IRQF_ONESHOT,
+ "dra7xx-pcie-main", dra7xx);
if (ret) {
dev_err(dev, "failed to request irq\n");
- goto err_gpio;
+ goto err_deinit;
}
return 0;
+err_deinit:
+ if (dra7xx->mode == DW_PCIE_RC_TYPE)
+ dw_pcie_host_deinit(&dra7xx->pci->pp);
+ else
+ dw_pcie_ep_deinit(&dra7xx->pci->ep);
+
err_gpio:
err_get_sync:
pm_runtime_put(dev);
diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c
index ec5611005566..0bb7d4f5d784 100644
--- a/drivers/pci/controller/dwc/pci-exynos.c
+++ b/drivers/pci/controller/dwc/pci-exynos.c
@@ -14,11 +14,11 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/of_device.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
#include <linux/regulator/consumer.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include "pcie-designware.h"
@@ -53,44 +53,11 @@
struct exynos_pcie {
struct dw_pcie pci;
- void __iomem *elbi_base;
- struct clk *clk;
- struct clk *bus_clk;
+ struct clk_bulk_data *clks;
struct phy *phy;
struct regulator_bulk_data supplies[2];
};
-static int exynos_pcie_init_clk_resources(struct exynos_pcie *ep)
-{
- struct device *dev = ep->pci.dev;
- int ret;
-
- ret = clk_prepare_enable(ep->clk);
- if (ret) {
- dev_err(dev, "cannot enable pcie rc clock");
- return ret;
- }
-
- ret = clk_prepare_enable(ep->bus_clk);
- if (ret) {
- dev_err(dev, "cannot enable pcie bus clock");
- goto err_bus_clk;
- }
-
- return 0;
-
-err_bus_clk:
- clk_disable_unprepare(ep->clk);
-
- return ret;
-}
-
-static void exynos_pcie_deinit_clk_resources(struct exynos_pcie *ep)
-{
- clk_disable_unprepare(ep->bus_clk);
- clk_disable_unprepare(ep->clk);
-}
-
static void exynos_pcie_writel(void __iomem *base, u32 val, u32 reg)
{
writel(val, base + reg);
@@ -103,73 +70,78 @@ static u32 exynos_pcie_readl(void __iomem *base, u32 reg)
static void exynos_pcie_sideband_dbi_w_mode(struct exynos_pcie *ep, bool on)
{
+ struct dw_pcie *pci = &ep->pci;
u32 val;
- val = exynos_pcie_readl(ep->elbi_base, PCIE_ELBI_SLV_AWMISC);
+ val = exynos_pcie_readl(pci->elbi_base, PCIE_ELBI_SLV_AWMISC);
if (on)
val |= PCIE_ELBI_SLV_DBI_ENABLE;
else
val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
- exynos_pcie_writel(ep->elbi_base, val, PCIE_ELBI_SLV_AWMISC);
+ exynos_pcie_writel(pci->elbi_base, val, PCIE_ELBI_SLV_AWMISC);
}
static void exynos_pcie_sideband_dbi_r_mode(struct exynos_pcie *ep, bool on)
{
+ struct dw_pcie *pci = &ep->pci;
u32 val;
- val = exynos_pcie_readl(ep->elbi_base, PCIE_ELBI_SLV_ARMISC);
+ val = exynos_pcie_readl(pci->elbi_base, PCIE_ELBI_SLV_ARMISC);
if (on)
val |= PCIE_ELBI_SLV_DBI_ENABLE;
else
val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
- exynos_pcie_writel(ep->elbi_base, val, PCIE_ELBI_SLV_ARMISC);
+ exynos_pcie_writel(pci->elbi_base, val, PCIE_ELBI_SLV_ARMISC);
}
static void exynos_pcie_assert_core_reset(struct exynos_pcie *ep)
{
+ struct dw_pcie *pci = &ep->pci;
u32 val;
- val = exynos_pcie_readl(ep->elbi_base, PCIE_CORE_RESET);
+ val = exynos_pcie_readl(pci->elbi_base, PCIE_CORE_RESET);
val &= ~PCIE_CORE_RESET_ENABLE;
- exynos_pcie_writel(ep->elbi_base, val, PCIE_CORE_RESET);
- exynos_pcie_writel(ep->elbi_base, 0, PCIE_STICKY_RESET);
- exynos_pcie_writel(ep->elbi_base, 0, PCIE_NONSTICKY_RESET);
+ exynos_pcie_writel(pci->elbi_base, val, PCIE_CORE_RESET);
+ exynos_pcie_writel(pci->elbi_base, 0, PCIE_STICKY_RESET);
+ exynos_pcie_writel(pci->elbi_base, 0, PCIE_NONSTICKY_RESET);
}
static void exynos_pcie_deassert_core_reset(struct exynos_pcie *ep)
{
+ struct dw_pcie *pci = &ep->pci;
u32 val;
- val = exynos_pcie_readl(ep->elbi_base, PCIE_CORE_RESET);
+ val = exynos_pcie_readl(pci->elbi_base, PCIE_CORE_RESET);
val |= PCIE_CORE_RESET_ENABLE;
- exynos_pcie_writel(ep->elbi_base, val, PCIE_CORE_RESET);
- exynos_pcie_writel(ep->elbi_base, 1, PCIE_STICKY_RESET);
- exynos_pcie_writel(ep->elbi_base, 1, PCIE_NONSTICKY_RESET);
- exynos_pcie_writel(ep->elbi_base, 1, PCIE_APP_INIT_RESET);
- exynos_pcie_writel(ep->elbi_base, 0, PCIE_APP_INIT_RESET);
+ exynos_pcie_writel(pci->elbi_base, val, PCIE_CORE_RESET);
+ exynos_pcie_writel(pci->elbi_base, 1, PCIE_STICKY_RESET);
+ exynos_pcie_writel(pci->elbi_base, 1, PCIE_NONSTICKY_RESET);
+ exynos_pcie_writel(pci->elbi_base, 1, PCIE_APP_INIT_RESET);
+ exynos_pcie_writel(pci->elbi_base, 0, PCIE_APP_INIT_RESET);
}
static int exynos_pcie_start_link(struct dw_pcie *pci)
{
- struct exynos_pcie *ep = to_exynos_pcie(pci);
u32 val;
- val = exynos_pcie_readl(ep->elbi_base, PCIE_SW_WAKE);
+ val = exynos_pcie_readl(pci->elbi_base, PCIE_SW_WAKE);
val &= ~PCIE_BUS_EN;
- exynos_pcie_writel(ep->elbi_base, val, PCIE_SW_WAKE);
+ exynos_pcie_writel(pci->elbi_base, val, PCIE_SW_WAKE);
/* assert LTSSM enable */
- exynos_pcie_writel(ep->elbi_base, PCIE_ELBI_LTSSM_ENABLE,
+ exynos_pcie_writel(pci->elbi_base, PCIE_ELBI_LTSSM_ENABLE,
PCIE_APP_LTSSM_ENABLE);
return 0;
}
static void exynos_pcie_clear_irq_pulse(struct exynos_pcie *ep)
{
- u32 val = exynos_pcie_readl(ep->elbi_base, PCIE_IRQ_PULSE);
+ struct dw_pcie *pci = &ep->pci;
- exynos_pcie_writel(ep->elbi_base, val, PCIE_IRQ_PULSE);
+ u32 val = exynos_pcie_readl(pci->elbi_base, PCIE_IRQ_PULSE);
+
+ exynos_pcie_writel(pci->elbi_base, val, PCIE_IRQ_PULSE);
}
static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg)
@@ -182,12 +154,14 @@ static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg)
static void exynos_pcie_enable_irq_pulse(struct exynos_pcie *ep)
{
+ struct dw_pcie *pci = &ep->pci;
+
u32 val = IRQ_INTA_ASSERT | IRQ_INTB_ASSERT |
IRQ_INTC_ASSERT | IRQ_INTD_ASSERT;
- exynos_pcie_writel(ep->elbi_base, val, PCIE_IRQ_EN_PULSE);
- exynos_pcie_writel(ep->elbi_base, 0, PCIE_IRQ_EN_LEVEL);
- exynos_pcie_writel(ep->elbi_base, 0, PCIE_IRQ_EN_SPECIAL);
+ exynos_pcie_writel(pci->elbi_base, val, PCIE_IRQ_EN_PULSE);
+ exynos_pcie_writel(pci->elbi_base, 0, PCIE_IRQ_EN_LEVEL);
+ exynos_pcie_writel(pci->elbi_base, 0, PCIE_IRQ_EN_SPECIAL);
}
static u32 exynos_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base,
@@ -241,12 +215,11 @@ static struct pci_ops exynos_pci_ops = {
.write = exynos_pcie_wr_own_conf,
};
-static int exynos_pcie_link_up(struct dw_pcie *pci)
+static bool exynos_pcie_link_up(struct dw_pcie *pci)
{
- struct exynos_pcie *ep = to_exynos_pcie(pci);
- u32 val = exynos_pcie_readl(ep->elbi_base, PCIE_ELBI_RDLH_LINKUP);
+ u32 val = exynos_pcie_readl(pci->elbi_base, PCIE_ELBI_RDLH_LINKUP);
- return (val & PCIE_ELBI_XMLH_LINKUP);
+ return val & PCIE_ELBI_XMLH_LINKUP;
}
static int exynos_pcie_host_init(struct dw_pcie_rp *pp)
@@ -268,7 +241,7 @@ static int exynos_pcie_host_init(struct dw_pcie_rp *pp)
}
static const struct dw_pcie_host_ops exynos_pcie_host_ops = {
- .host_init = exynos_pcie_host_init,
+ .init = exynos_pcie_host_init,
};
static int exynos_add_pcie_port(struct exynos_pcie *ep,
@@ -327,22 +300,9 @@ static int exynos_pcie_probe(struct platform_device *pdev)
if (IS_ERR(ep->phy))
return PTR_ERR(ep->phy);
- /* External Local Bus interface (ELBI) registers */
- ep->elbi_base = devm_platform_ioremap_resource_byname(pdev, "elbi");
- if (IS_ERR(ep->elbi_base))
- return PTR_ERR(ep->elbi_base);
-
- ep->clk = devm_clk_get(dev, "pcie");
- if (IS_ERR(ep->clk)) {
- dev_err(dev, "Failed to get pcie rc clock\n");
- return PTR_ERR(ep->clk);
- }
-
- ep->bus_clk = devm_clk_get(dev, "pcie_bus");
- if (IS_ERR(ep->bus_clk)) {
- dev_err(dev, "Failed to get pcie bus clock\n");
- return PTR_ERR(ep->bus_clk);
- }
+ ret = devm_clk_bulk_get_all_enabled(dev, &ep->clks);
+ if (ret < 0)
+ return ret;
ep->supplies[0].supply = "vdd18";
ep->supplies[1].supply = "vdd10";
@@ -351,10 +311,6 @@ static int exynos_pcie_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = exynos_pcie_init_clk_resources(ep);
- if (ret)
- return ret;
-
ret = regulator_bulk_enable(ARRAY_SIZE(ep->supplies), ep->supplies);
if (ret)
return ret;
@@ -369,13 +325,12 @@ static int exynos_pcie_probe(struct platform_device *pdev)
fail_probe:
phy_exit(ep->phy);
- exynos_pcie_deinit_clk_resources(ep);
regulator_bulk_disable(ARRAY_SIZE(ep->supplies), ep->supplies);
return ret;
}
-static int __exit exynos_pcie_remove(struct platform_device *pdev)
+static void exynos_pcie_remove(struct platform_device *pdev)
{
struct exynos_pcie *ep = platform_get_drvdata(pdev);
@@ -383,10 +338,7 @@ static int __exit exynos_pcie_remove(struct platform_device *pdev)
exynos_pcie_assert_core_reset(ep);
phy_power_off(ep->phy);
phy_exit(ep->phy);
- exynos_pcie_deinit_clk_resources(ep);
regulator_bulk_disable(ARRAY_SIZE(ep->supplies), ep->supplies);
-
- return 0;
}
static int exynos_pcie_suspend_noirq(struct device *dev)
@@ -431,7 +383,7 @@ static const struct of_device_id exynos_pcie_of_match[] = {
static struct platform_driver exynos_pcie_driver = {
.probe = exynos_pcie_probe,
- .remove = __exit_p(exynos_pcie_remove),
+ .remove = exynos_pcie_remove,
.driver = {
.name = "exynos-pcie",
.of_match_table = exynos_pcie_of_match,
@@ -439,5 +391,6 @@ static struct platform_driver exynos_pcie_driver = {
},
};
module_platform_driver(exynos_pcie_driver);
+MODULE_DESCRIPTION("Samsung Exynos PCIe host controller driver");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(of, exynos_pcie_of_match);
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 27aaa2a6bf39..4668fc9648bf 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -11,14 +11,13 @@
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
#include <linux/mfd/syscon/imx7-iomuxc-gpr.h>
#include <linux/module.h>
-#include <linux/of_gpio.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
@@ -29,10 +28,12 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/reset.h>
+#include <linux/phy/pcie.h>
#include <linux/phy/phy.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
+#include "../../pci.h"
#include "pcie-designware.h"
#define IMX8MQ_GPR_PCIE_REF_USE_PAD BIT(9)
@@ -40,11 +41,47 @@
#define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE BIT(11)
#define IMX8MQ_GPR_PCIE_VREG_BYPASS BIT(12)
#define IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE GENMASK(11, 8)
-#define IMX8MQ_PCIE2_BASE_ADDR 0x33c00000
-#define to_imx6_pcie(x) dev_get_drvdata((x)->dev)
+#define IMX95_PCIE_PHY_GEN_CTRL 0x0
+#define IMX95_PCIE_REF_USE_PAD BIT(17)
-enum imx6_pcie_variants {
+#define IMX95_PCIE_PHY_MPLLA_CTRL 0x10
+#define IMX95_PCIE_PHY_MPLL_STATE BIT(30)
+
+#define IMX95_PCIE_SS_RW_REG_0 0xf0
+#define IMX95_PCIE_REF_CLKEN BIT(23)
+#define IMX95_PCIE_PHY_CR_PARA_SEL BIT(9)
+#define IMX95_PCIE_SS_RW_REG_1 0xf4
+#define IMX95_PCIE_SYS_AUX_PWR_DET BIT(31)
+
+#define IMX95_PE0_GEN_CTRL_1 0x1050
+#define IMX95_PCIE_DEVICE_TYPE GENMASK(3, 0)
+
+#define IMX95_PE0_GEN_CTRL_3 0x1058
+#define IMX95_PCIE_LTSSM_EN BIT(0)
+
+#define IMX95_PE0_LUT_ACSCTRL 0x1008
+#define IMX95_PEO_LUT_RWA BIT(16)
+#define IMX95_PE0_LUT_ENLOC GENMASK(4, 0)
+
+#define IMX95_PE0_LUT_DATA1 0x100c
+#define IMX95_PE0_LUT_VLD BIT(31)
+#define IMX95_PE0_LUT_DAC_ID GENMASK(10, 8)
+#define IMX95_PE0_LUT_STREAM_ID GENMASK(5, 0)
+
+#define IMX95_PE0_LUT_DATA2 0x1010
+#define IMX95_PE0_LUT_REQID GENMASK(31, 16)
+#define IMX95_PE0_LUT_MASK GENMASK(15, 0)
+
+#define IMX95_SID_MASK GENMASK(5, 0)
+#define IMX95_MAX_LUT 32
+
+#define IMX95_PCIE_RST_CTRL 0x3010
+#define IMX95_PCIE_COLD_RST BIT(0)
+
+#define to_imx_pcie(x) dev_get_drvdata((x)->dev)
+
+enum imx_pcie_variants {
IMX6Q,
IMX6SX,
IMX6QP,
@@ -52,39 +89,71 @@ enum imx6_pcie_variants {
IMX8MQ,
IMX8MM,
IMX8MP,
+ IMX8Q,
+ IMX95,
IMX8MQ_EP,
IMX8MM_EP,
IMX8MP_EP,
+ IMX8Q_EP,
+ IMX95_EP,
};
-#define IMX6_PCIE_FLAG_IMX6_PHY BIT(0)
-#define IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE BIT(1)
-#define IMX6_PCIE_FLAG_SUPPORTS_SUSPEND BIT(2)
+#define IMX_PCIE_FLAG_IMX_PHY BIT(0)
+#define IMX_PCIE_FLAG_SPEED_CHANGE_WORKAROUND BIT(1)
+#define IMX_PCIE_FLAG_SUPPORTS_SUSPEND BIT(2)
+#define IMX_PCIE_FLAG_HAS_PHYDRV BIT(3)
+#define IMX_PCIE_FLAG_HAS_APP_RESET BIT(4)
+#define IMX_PCIE_FLAG_HAS_PHY_RESET BIT(5)
+#define IMX_PCIE_FLAG_HAS_SERDES BIT(6)
+#define IMX_PCIE_FLAG_SUPPORT_64BIT BIT(7)
+#define IMX_PCIE_FLAG_CPU_ADDR_FIXUP BIT(8)
+/*
+ * Because of ERR005723 (PCIe does not support L2 power down) we need to
+ * workaround suspend resume on some devices which are affected by this errata.
+ */
+#define IMX_PCIE_FLAG_BROKEN_SUSPEND BIT(9)
+#define IMX_PCIE_FLAG_HAS_LUT BIT(10)
+#define IMX_PCIE_FLAG_8GT_ECN_ERR051586 BIT(11)
+
+#define imx_check_flag(pci, val) (pci->drvdata->flags & val)
-struct imx6_pcie_drvdata {
- enum imx6_pcie_variants variant;
+#define IMX_PCIE_MAX_INSTANCES 2
+
+struct imx_pcie;
+
+struct imx_pcie_drvdata {
+ enum imx_pcie_variants variant;
enum dw_pcie_device_mode mode;
u32 flags;
int dbi_length;
const char *gpr;
+ const u32 ltssm_off;
+ const u32 ltssm_mask;
+ const u32 mode_off[IMX_PCIE_MAX_INSTANCES];
+ const u32 mode_mask[IMX_PCIE_MAX_INSTANCES];
+ const struct pci_epc_features *epc_features;
+ int (*init_phy)(struct imx_pcie *pcie);
+ int (*enable_ref_clk)(struct imx_pcie *pcie, bool enable);
+ int (*core_reset)(struct imx_pcie *pcie, bool assert);
+ int (*wait_pll_lock)(struct imx_pcie *pcie);
+ const struct dw_pcie_host_ops *ops;
+};
+
+struct imx_lut_data {
+ u32 data1;
+ u32 data2;
};
-struct imx6_pcie {
+struct imx_pcie {
struct dw_pcie *pci;
- int reset_gpio;
- bool gpio_active_high;
- bool link_is_up;
- struct clk *pcie_bus;
- struct clk *pcie_phy;
- struct clk *pcie_inbound_axi;
- struct clk *pcie;
- struct clk *pcie_aux;
+ struct gpio_desc *reset_gpiod;
+ struct clk_bulk_data *clks;
+ int num_clks;
struct regmap *iomuxc_gpr;
u16 msi_ctrl;
u32 controller_id;
struct reset_control *pciephy_reset;
struct reset_control *apps_reset;
- struct reset_control *turnoff_reset;
u32 tx_deemph_gen1;
u32 tx_deemph_gen2_3p5db;
u32 tx_deemph_gen2_6db;
@@ -94,12 +163,17 @@ struct imx6_pcie {
struct regulator *vph;
void __iomem *phy_base;
+ /* LUT data for pcie */
+ struct imx_lut_data luts[IMX95_MAX_LUT];
/* power domain for pcie */
struct device *pd_pcie;
/* power domain for pcie phy */
struct device *pd_pcie_phy;
struct phy *phy;
- const struct imx6_pcie_drvdata *drvdata;
+ const struct imx_pcie_drvdata *drvdata;
+
+ /* Ensure that only one device's LUT is configured at any given time */
+ struct mutex lock;
};
/* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */
@@ -154,50 +228,77 @@ struct imx6_pcie {
#define PHY_RX_OVRD_IN_LO_RX_DATA_EN BIT(5)
#define PHY_RX_OVRD_IN_LO_RX_PLL_EN BIT(3)
-static unsigned int imx6_pcie_grp_offset(const struct imx6_pcie *imx6_pcie)
+static unsigned int imx_pcie_grp_offset(const struct imx_pcie *imx_pcie)
{
- WARN_ON(imx6_pcie->drvdata->variant != IMX8MQ &&
- imx6_pcie->drvdata->variant != IMX8MQ_EP &&
- imx6_pcie->drvdata->variant != IMX8MM &&
- imx6_pcie->drvdata->variant != IMX8MM_EP &&
- imx6_pcie->drvdata->variant != IMX8MP &&
- imx6_pcie->drvdata->variant != IMX8MP_EP);
- return imx6_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14;
+ WARN_ON(imx_pcie->drvdata->variant != IMX8MQ &&
+ imx_pcie->drvdata->variant != IMX8MQ_EP &&
+ imx_pcie->drvdata->variant != IMX8MM &&
+ imx_pcie->drvdata->variant != IMX8MM_EP &&
+ imx_pcie->drvdata->variant != IMX8MP &&
+ imx_pcie->drvdata->variant != IMX8MP_EP);
+ return imx_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14;
+}
+
+static int imx95_pcie_init_phy(struct imx_pcie *imx_pcie)
+{
+ /*
+ * ERR051624: The Controller Without Vaux Cannot Exit L23 Ready
+ * Through Beacon or PERST# De-assertion
+ *
+ * When the auxiliary power is not available, the controller
+ * cannot exit from L23 Ready with beacon or PERST# de-assertion
+ * when main power is not removed.
+ *
+ * Workaround: Set SS_RW_REG_1[SYS_AUX_PWR_DET] to 1.
+ */
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IMX95_PCIE_SS_RW_REG_1,
+ IMX95_PCIE_SYS_AUX_PWR_DET);
+
+ regmap_update_bits(imx_pcie->iomuxc_gpr,
+ IMX95_PCIE_SS_RW_REG_0,
+ IMX95_PCIE_PHY_CR_PARA_SEL,
+ IMX95_PCIE_PHY_CR_PARA_SEL);
+
+ regmap_update_bits(imx_pcie->iomuxc_gpr,
+ IMX95_PCIE_PHY_GEN_CTRL,
+ IMX95_PCIE_REF_USE_PAD, 0);
+ regmap_update_bits(imx_pcie->iomuxc_gpr,
+ IMX95_PCIE_SS_RW_REG_0,
+ IMX95_PCIE_REF_CLKEN,
+ IMX95_PCIE_REF_CLKEN);
+
+ return 0;
}
-static void imx6_pcie_configure_type(struct imx6_pcie *imx6_pcie)
+static void imx_pcie_configure_type(struct imx_pcie *imx_pcie)
{
- unsigned int mask, val, mode;
+ const struct imx_pcie_drvdata *drvdata = imx_pcie->drvdata;
+ unsigned int mask, val, mode, id;
- if (imx6_pcie->drvdata->mode == DW_PCIE_EP_TYPE)
+ if (drvdata->mode == DW_PCIE_EP_TYPE)
mode = PCI_EXP_TYPE_ENDPOINT;
else
mode = PCI_EXP_TYPE_ROOT_PORT;
- switch (imx6_pcie->drvdata->variant) {
- case IMX8MQ:
- case IMX8MQ_EP:
- if (imx6_pcie->controller_id == 1) {
- mask = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE;
- val = FIELD_PREP(IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
- mode);
- } else {
- mask = IMX6Q_GPR12_DEVICE_TYPE;
- val = FIELD_PREP(IMX6Q_GPR12_DEVICE_TYPE, mode);
- }
- break;
- default:
- mask = IMX6Q_GPR12_DEVICE_TYPE;
- val = FIELD_PREP(IMX6Q_GPR12_DEVICE_TYPE, mode);
- break;
- }
+ id = imx_pcie->controller_id;
+
+ /* If mode_mask is 0, generic PHY driver is used to set the mode */
+ if (!drvdata->mode_mask[0])
+ return;
+
+ /* If mode_mask[id] is 0, each controller has its individual GPR */
+ if (!drvdata->mode_mask[id])
+ id = 0;
+
+ mask = drvdata->mode_mask[id];
+ val = mode << (ffs(mask) - 1);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, mask, val);
+ regmap_update_bits(imx_pcie->iomuxc_gpr, drvdata->mode_off[id], mask, val);
}
-static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, bool exp_val)
+static int pcie_phy_poll_ack(struct imx_pcie *imx_pcie, bool exp_val)
{
- struct dw_pcie *pci = imx6_pcie->pci;
+ struct dw_pcie *pci = imx_pcie->pci;
bool val;
u32 max_iterations = 10;
u32 wait_counter = 0;
@@ -216,9 +317,9 @@ static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, bool exp_val)
return -ETIMEDOUT;
}
-static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr)
+static int pcie_phy_wait_ack(struct imx_pcie *imx_pcie, int addr)
{
- struct dw_pcie *pci = imx6_pcie->pci;
+ struct dw_pcie *pci = imx_pcie->pci;
u32 val;
int ret;
@@ -228,24 +329,24 @@ static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr)
val |= PCIE_PHY_CTRL_CAP_ADR;
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
- ret = pcie_phy_poll_ack(imx6_pcie, true);
+ ret = pcie_phy_poll_ack(imx_pcie, true);
if (ret)
return ret;
val = PCIE_PHY_CTRL_DATA(addr);
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
- return pcie_phy_poll_ack(imx6_pcie, false);
+ return pcie_phy_poll_ack(imx_pcie, false);
}
/* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
-static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, u16 *data)
+static int pcie_phy_read(struct imx_pcie *imx_pcie, int addr, u16 *data)
{
- struct dw_pcie *pci = imx6_pcie->pci;
+ struct dw_pcie *pci = imx_pcie->pci;
u32 phy_ctl;
int ret;
- ret = pcie_phy_wait_ack(imx6_pcie, addr);
+ ret = pcie_phy_wait_ack(imx_pcie, addr);
if (ret)
return ret;
@@ -253,7 +354,7 @@ static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, u16 *data)
phy_ctl = PCIE_PHY_CTRL_RD;
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, phy_ctl);
- ret = pcie_phy_poll_ack(imx6_pcie, true);
+ ret = pcie_phy_poll_ack(imx_pcie, true);
if (ret)
return ret;
@@ -262,18 +363,18 @@ static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, u16 *data)
/* deassert Read signal */
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x00);
- return pcie_phy_poll_ack(imx6_pcie, false);
+ return pcie_phy_poll_ack(imx_pcie, false);
}
-static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
+static int pcie_phy_write(struct imx_pcie *imx_pcie, int addr, u16 data)
{
- struct dw_pcie *pci = imx6_pcie->pci;
+ struct dw_pcie *pci = imx_pcie->pci;
u32 var;
int ret;
/* write addr */
/* cap addr */
- ret = pcie_phy_wait_ack(imx6_pcie, addr);
+ ret = pcie_phy_wait_ack(imx_pcie, addr);
if (ret)
return ret;
@@ -284,7 +385,7 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
var |= PCIE_PHY_CTRL_CAP_DAT;
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
- ret = pcie_phy_poll_ack(imx6_pcie, true);
+ ret = pcie_phy_poll_ack(imx_pcie, true);
if (ret)
return ret;
@@ -293,7 +394,7 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
/* wait for ack de-assertion */
- ret = pcie_phy_poll_ack(imx6_pcie, false);
+ ret = pcie_phy_poll_ack(imx_pcie, false);
if (ret)
return ret;
@@ -302,7 +403,7 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
/* wait for ack */
- ret = pcie_phy_poll_ack(imx6_pcie, true);
+ ret = pcie_phy_poll_ack(imx_pcie, true);
if (ret)
return ret;
@@ -311,7 +412,7 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
/* wait for ack de-assertion */
- ret = pcie_phy_poll_ack(imx6_pcie, false);
+ ret = pcie_phy_poll_ack(imx_pcie, false);
if (ret)
return ret;
@@ -320,84 +421,68 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data)
return 0;
}
-static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
+static int imx8mq_pcie_init_phy(struct imx_pcie *imx_pcie)
{
- switch (imx6_pcie->drvdata->variant) {
- case IMX8MM:
- case IMX8MM_EP:
- case IMX8MP:
- case IMX8MP_EP:
- /*
- * The PHY initialization had been done in the PHY
- * driver, break here directly.
- */
- break;
- case IMX8MQ:
- case IMX8MQ_EP:
- /*
- * TODO: Currently this code assumes external
- * oscillator is being used
- */
- regmap_update_bits(imx6_pcie->iomuxc_gpr,
- imx6_pcie_grp_offset(imx6_pcie),
- IMX8MQ_GPR_PCIE_REF_USE_PAD,
- IMX8MQ_GPR_PCIE_REF_USE_PAD);
- /*
- * Regarding the datasheet, the PCIE_VPH is suggested
- * to be 1.8V. If the PCIE_VPH is supplied by 3.3V, the
- * VREG_BYPASS should be cleared to zero.
- */
- if (imx6_pcie->vph &&
- regulator_get_voltage(imx6_pcie->vph) > 3000000)
- regmap_update_bits(imx6_pcie->iomuxc_gpr,
- imx6_pcie_grp_offset(imx6_pcie),
- IMX8MQ_GPR_PCIE_VREG_BYPASS,
- 0);
- break;
- case IMX7D:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0);
- break;
- case IMX6SX:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6SX_GPR12_PCIE_RX_EQ_MASK,
- IMX6SX_GPR12_PCIE_RX_EQ_2);
- fallthrough;
- default:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ /* TODO: This code assumes external oscillator is being used */
+ regmap_update_bits(imx_pcie->iomuxc_gpr,
+ imx_pcie_grp_offset(imx_pcie),
+ IMX8MQ_GPR_PCIE_REF_USE_PAD,
+ IMX8MQ_GPR_PCIE_REF_USE_PAD);
+ /*
+ * Per the datasheet, the PCIE_VPH is suggested to be 1.8V. If the
+ * PCIE_VPH is supplied by 3.3V, the VREG_BYPASS should be cleared
+ * to zero.
+ */
+ if (imx_pcie->vph && regulator_get_voltage(imx_pcie->vph) > 3000000)
+ regmap_update_bits(imx_pcie->iomuxc_gpr,
+ imx_pcie_grp_offset(imx_pcie),
+ IMX8MQ_GPR_PCIE_VREG_BYPASS,
+ 0);
+
+ return 0;
+}
+
+static int imx_pcie_init_phy(struct imx_pcie *imx_pcie)
+{
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
- /* configure constant input signal to the pcie ctrl and phy */
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
-
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_DEEMPH_GEN1,
- imx6_pcie->tx_deemph_gen1 << 0);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB,
- imx6_pcie->tx_deemph_gen2_3p5db << 6);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB,
- imx6_pcie->tx_deemph_gen2_6db << 12);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_SWING_FULL,
- imx6_pcie->tx_swing_full << 18);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_SWING_LOW,
- imx6_pcie->tx_swing_low << 25);
- break;
- }
+ /* configure constant input signal to the pcie ctrl and phy */
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
+
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_DEEMPH_GEN1,
+ imx_pcie->tx_deemph_gen1 << 0);
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB,
+ imx_pcie->tx_deemph_gen2_3p5db << 6);
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB,
+ imx_pcie->tx_deemph_gen2_6db << 12);
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_SWING_FULL,
+ imx_pcie->tx_swing_full << 18);
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_SWING_LOW,
+ imx_pcie->tx_swing_low << 25);
+ return 0;
+}
+
+static int imx6sx_pcie_init_phy(struct imx_pcie *imx_pcie)
+{
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6SX_GPR12_PCIE_RX_EQ_MASK, IMX6SX_GPR12_PCIE_RX_EQ_2);
- imx6_pcie_configure_type(imx6_pcie);
+ return imx_pcie_init_phy(imx_pcie);
}
-static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
+static void imx7d_pcie_wait_for_phy_pll_lock(struct imx_pcie *imx_pcie)
{
u32 val;
- struct device *dev = imx6_pcie->pci->dev;
+ struct device *dev = imx_pcie->pci->dev;
- if (regmap_read_poll_timeout(imx6_pcie->iomuxc_gpr,
+ if (regmap_read_poll_timeout(imx_pcie->iomuxc_gpr,
IOMUXC_GPR22, val,
val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED,
PHY_PLL_LOCK_WAIT_USLEEP_MAX,
@@ -405,15 +490,38 @@ static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
dev_err(dev, "PCIe PLL lock timeout\n");
}
-static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie)
+static int imx95_pcie_wait_for_phy_pll_lock(struct imx_pcie *imx_pcie)
{
- unsigned long phy_rate = clk_get_rate(imx6_pcie->pcie_phy);
+ u32 val;
+ struct device *dev = imx_pcie->pci->dev;
+
+ if (regmap_read_poll_timeout(imx_pcie->iomuxc_gpr,
+ IMX95_PCIE_PHY_MPLLA_CTRL, val,
+ val & IMX95_PCIE_PHY_MPLL_STATE,
+ PHY_PLL_LOCK_WAIT_USLEEP_MAX,
+ PHY_PLL_LOCK_WAIT_TIMEOUT)) {
+ dev_err(dev, "PCIe PLL lock timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int imx_setup_phy_mpll(struct imx_pcie *imx_pcie)
+{
+ unsigned long phy_rate = 0;
int mult, div;
u16 val;
+ int i;
+ struct clk_bulk_data *clks = imx_pcie->clks;
- if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
+ if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_IMX_PHY))
return 0;
+ for (i = 0; i < imx_pcie->num_clks; i++)
+ if (strncmp(clks[i].id, "pcie_phy", 8) == 0)
+ phy_rate = clk_get_rate(clks[i].clk);
+
switch (phy_rate) {
case 125000000:
/*
@@ -430,46 +538,46 @@ static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie)
div = 1;
break;
default:
- dev_err(imx6_pcie->pci->dev,
+ dev_err(imx_pcie->pci->dev,
"Unsupported PHY reference clock rate %lu\n", phy_rate);
return -EINVAL;
}
- pcie_phy_read(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, &val);
+ pcie_phy_read(imx_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, &val);
val &= ~(PCIE_PHY_MPLL_MULTIPLIER_MASK <<
PCIE_PHY_MPLL_MULTIPLIER_SHIFT);
val |= mult << PCIE_PHY_MPLL_MULTIPLIER_SHIFT;
val |= PCIE_PHY_MPLL_MULTIPLIER_OVRD;
- pcie_phy_write(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, val);
+ pcie_phy_write(imx_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, val);
- pcie_phy_read(imx6_pcie, PCIE_PHY_ATEOVRD, &val);
+ pcie_phy_read(imx_pcie, PCIE_PHY_ATEOVRD, &val);
val &= ~(PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK <<
PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT);
val |= div << PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT;
val |= PCIE_PHY_ATEOVRD_EN;
- pcie_phy_write(imx6_pcie, PCIE_PHY_ATEOVRD, val);
+ pcie_phy_write(imx_pcie, PCIE_PHY_ATEOVRD, val);
return 0;
}
-static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie)
+static void imx_pcie_reset_phy(struct imx_pcie *imx_pcie)
{
u16 tmp;
- if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY))
+ if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_IMX_PHY))
return;
- pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
+ pcie_phy_read(imx_pcie, PHY_RX_OVRD_IN_LO, &tmp);
tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
PHY_RX_OVRD_IN_LO_RX_PLL_EN);
- pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
+ pcie_phy_write(imx_pcie, PHY_RX_OVRD_IN_LO, tmp);
usleep_range(2000, 3000);
- pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
+ pcie_phy_read(imx_pcie, PHY_RX_OVRD_IN_LO, &tmp);
tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
PHY_RX_OVRD_IN_LO_RX_PLL_EN);
- pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
+ pcie_phy_write(imx_pcie, PHY_RX_OVRD_IN_LO, tmp);
}
#ifdef CONFIG_ARM
@@ -508,170 +616,112 @@ static int imx6q_pcie_abort_handler(unsigned long addr,
}
#endif
-static int imx6_pcie_attach_pd(struct device *dev)
+static int imx_pcie_attach_pd(struct device *dev)
{
- struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+ struct imx_pcie *imx_pcie = dev_get_drvdata(dev);
struct device_link *link;
/* Do nothing when in a single power domain */
if (dev->pm_domain)
return 0;
- imx6_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie");
- if (IS_ERR(imx6_pcie->pd_pcie))
- return PTR_ERR(imx6_pcie->pd_pcie);
+ imx_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie");
+ if (IS_ERR(imx_pcie->pd_pcie))
+ return PTR_ERR(imx_pcie->pd_pcie);
/* Do nothing when power domain missing */
- if (!imx6_pcie->pd_pcie)
+ if (!imx_pcie->pd_pcie)
return 0;
- link = device_link_add(dev, imx6_pcie->pd_pcie,
+ link = device_link_add(dev, imx_pcie->pd_pcie,
DL_FLAG_STATELESS |
DL_FLAG_PM_RUNTIME |
DL_FLAG_RPM_ACTIVE);
if (!link) {
- dev_err(dev, "Failed to add device_link to pcie pd.\n");
+ dev_err(dev, "Failed to add device_link to pcie pd\n");
return -EINVAL;
}
- imx6_pcie->pd_pcie_phy = dev_pm_domain_attach_by_name(dev, "pcie_phy");
- if (IS_ERR(imx6_pcie->pd_pcie_phy))
- return PTR_ERR(imx6_pcie->pd_pcie_phy);
+ imx_pcie->pd_pcie_phy = dev_pm_domain_attach_by_name(dev, "pcie_phy");
+ if (IS_ERR(imx_pcie->pd_pcie_phy))
+ return PTR_ERR(imx_pcie->pd_pcie_phy);
- link = device_link_add(dev, imx6_pcie->pd_pcie_phy,
+ link = device_link_add(dev, imx_pcie->pd_pcie_phy,
DL_FLAG_STATELESS |
DL_FLAG_PM_RUNTIME |
DL_FLAG_RPM_ACTIVE);
if (!link) {
- dev_err(dev, "Failed to add device_link to pcie_phy pd.\n");
+ dev_err(dev, "Failed to add device_link to pcie_phy pd\n");
return -EINVAL;
}
return 0;
}
-static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
+static int imx6sx_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable)
{
- struct dw_pcie *pci = imx6_pcie->pci;
- struct device *dev = pci->dev;
- unsigned int offset;
- int ret = 0;
-
- switch (imx6_pcie->drvdata->variant) {
- case IMX6SX:
- ret = clk_prepare_enable(imx6_pcie->pcie_inbound_axi);
- if (ret) {
- dev_err(dev, "unable to enable pcie_axi clock\n");
- break;
- }
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
+ enable ? 0 : IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
+ return 0;
+}
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0);
- break;
- case IMX6QP:
- case IMX6Q:
+static int imx6q_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable)
+{
+ if (enable) {
/* power up core phy and enable ref clock */
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
+ regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD);
/*
- * the async reset input need ref clock to sync internally,
+ * The async reset input need ref clock to sync internally,
* when the ref clock comes after reset, internal synced
* reset time is too short, cannot meet the requirement.
- * add one ~10us delay here.
+ * Add a ~10us delay here.
*/
usleep_range(10, 100);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
- break;
- case IMX7D:
- break;
- case IMX8MM:
- case IMX8MM_EP:
- case IMX8MQ:
- case IMX8MQ_EP:
- case IMX8MP:
- case IMX8MP_EP:
- ret = clk_prepare_enable(imx6_pcie->pcie_aux);
- if (ret) {
- dev_err(dev, "unable to enable pcie_aux clock\n");
- break;
- }
-
- offset = imx6_pcie_grp_offset(imx6_pcie);
- /*
- * Set the over ride low and enabled
- * make sure that REF_CLK is turned on.
- */
- regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
- IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE,
- 0);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, offset,
- IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN,
- IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN);
- break;
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN);
+ } else {
+ regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN);
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD);
}
- return ret;
+ return 0;
}
-static void imx6_pcie_disable_ref_clk(struct imx6_pcie *imx6_pcie)
+static int imx8mm_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable)
{
- switch (imx6_pcie->drvdata->variant) {
- case IMX6SX:
- clk_disable_unprepare(imx6_pcie->pcie_inbound_axi);
- break;
- case IMX6QP:
- case IMX6Q:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_REF_CLK_EN, 0);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_TEST_PD,
- IMX6Q_GPR1_PCIE_TEST_PD);
- break;
- case IMX7D:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX7D_GPR12_PCIE_PHY_REFCLK_SEL,
- IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
- break;
- case IMX8MM:
- case IMX8MM_EP:
- case IMX8MQ:
- case IMX8MQ_EP:
- case IMX8MP:
- case IMX8MP_EP:
- clk_disable_unprepare(imx6_pcie->pcie_aux);
- break;
- default:
- break;
- }
+ int offset = imx_pcie_grp_offset(imx_pcie);
+
+ regmap_update_bits(imx_pcie->iomuxc_gpr, offset,
+ IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE,
+ enable ? 0 : IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE);
+ regmap_update_bits(imx_pcie->iomuxc_gpr, offset,
+ IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN,
+ enable ? IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN : 0);
+ return 0;
}
-static int imx6_pcie_clk_enable(struct imx6_pcie *imx6_pcie)
+static int imx7d_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable)
{
- struct dw_pcie *pci = imx6_pcie->pci;
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX7D_GPR12_PCIE_PHY_REFCLK_SEL,
+ enable ? 0 : IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
+ return 0;
+}
+
+static int imx_pcie_clk_enable(struct imx_pcie *imx_pcie)
+{
+ struct dw_pcie *pci = imx_pcie->pci;
struct device *dev = pci->dev;
int ret;
- ret = clk_prepare_enable(imx6_pcie->pcie_phy);
- if (ret) {
- dev_err(dev, "unable to enable pcie_phy clock\n");
+ ret = clk_bulk_prepare_enable(imx_pcie->num_clks, imx_pcie->clks);
+ if (ret)
return ret;
- }
- ret = clk_prepare_enable(imx6_pcie->pcie_bus);
- if (ret) {
- dev_err(dev, "unable to enable pcie_bus clock\n");
- goto err_pcie_bus;
- }
-
- ret = clk_prepare_enable(imx6_pcie->pcie);
- if (ret) {
- dev_err(dev, "unable to enable pcie clock\n");
- goto err_pcie;
- }
-
- ret = imx6_pcie_enable_ref_clk(imx6_pcie);
- if (ret) {
- dev_err(dev, "unable to enable pcie ref clock\n");
- goto err_ref_clk;
+ if (imx_pcie->drvdata->enable_ref_clk) {
+ ret = imx_pcie->drvdata->enable_ref_clk(imx_pcie, true);
+ if (ret) {
+ dev_err(dev, "Failed to enable PCIe REFCLK\n");
+ goto err_ref_clk;
+ }
}
/* allow the clocks to stabilize */
@@ -679,122 +729,156 @@ static int imx6_pcie_clk_enable(struct imx6_pcie *imx6_pcie)
return 0;
err_ref_clk:
- clk_disable_unprepare(imx6_pcie->pcie);
-err_pcie:
- clk_disable_unprepare(imx6_pcie->pcie_bus);
-err_pcie_bus:
- clk_disable_unprepare(imx6_pcie->pcie_phy);
+ clk_bulk_disable_unprepare(imx_pcie->num_clks, imx_pcie->clks);
return ret;
}
-static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie)
+static void imx_pcie_clk_disable(struct imx_pcie *imx_pcie)
{
- imx6_pcie_disable_ref_clk(imx6_pcie);
- clk_disable_unprepare(imx6_pcie->pcie);
- clk_disable_unprepare(imx6_pcie->pcie_bus);
- clk_disable_unprepare(imx6_pcie->pcie_phy);
+ if (imx_pcie->drvdata->enable_ref_clk)
+ imx_pcie->drvdata->enable_ref_clk(imx_pcie, false);
+ clk_bulk_disable_unprepare(imx_pcie->num_clks, imx_pcie->clks);
}
-static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
+static int imx6sx_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert)
{
- switch (imx6_pcie->drvdata->variant) {
- case IMX7D:
- case IMX8MQ:
- case IMX8MQ_EP:
- reset_control_assert(imx6_pcie->pciephy_reset);
- fallthrough;
- case IMX8MM:
- case IMX8MM_EP:
- case IMX8MP:
- case IMX8MP_EP:
- reset_control_assert(imx6_pcie->apps_reset);
- break;
- case IMX6SX:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
- IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
- /* Force PCIe PHY reset */
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
- IMX6SX_GPR5_PCIE_BTNRST_RESET,
- IMX6SX_GPR5_PCIE_BTNRST_RESET);
- break;
- case IMX6QP:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_SW_RST,
- IMX6Q_GPR1_PCIE_SW_RST);
- break;
- case IMX6Q:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
- break;
- }
+ if (assert)
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
- /* Some boards don't have PCIe reset GPIO. */
- if (gpio_is_valid(imx6_pcie->reset_gpio))
- gpio_set_value_cansleep(imx6_pcie->reset_gpio,
- imx6_pcie->gpio_active_high);
+ /* Force PCIe PHY reset */
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR5, IMX6SX_GPR5_PCIE_BTNRST_RESET,
+ assert ? IMX6SX_GPR5_PCIE_BTNRST_RESET : 0);
+ return 0;
+}
+
+static int imx6qp_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert)
+{
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_SW_RST,
+ assert ? IMX6Q_GPR1_PCIE_SW_RST : 0);
+ if (!assert)
+ usleep_range(200, 500);
+
+ return 0;
+}
+
+static int imx6q_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert)
+{
+ if (!assert)
+ return 0;
+
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD);
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN);
+
+ return 0;
}
-static int imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
+static int imx7d_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert)
{
- struct dw_pcie *pci = imx6_pcie->pci;
+ struct dw_pcie *pci = imx_pcie->pci;
struct device *dev = pci->dev;
- switch (imx6_pcie->drvdata->variant) {
- case IMX8MQ:
- case IMX8MQ_EP:
- reset_control_deassert(imx6_pcie->pciephy_reset);
- break;
- case IMX7D:
- reset_control_deassert(imx6_pcie->pciephy_reset);
+ if (assert)
+ return 0;
- /* Workaround for ERR010728, failure of PCI-e PLL VCO to
- * oscillate, especially when cold. This turns off "Duty-cycle
- * Corrector" and other mysterious undocumented things.
- */
- if (likely(imx6_pcie->phy_base)) {
- /* De-assert DCC_FB_EN */
- writel(PCIE_PHY_CMN_REG4_DCC_FB_EN,
- imx6_pcie->phy_base + PCIE_PHY_CMN_REG4);
- /* Assert RX_EQS and RX_EQS_SEL */
- writel(PCIE_PHY_CMN_REG24_RX_EQ_SEL
- | PCIE_PHY_CMN_REG24_RX_EQ,
- imx6_pcie->phy_base + PCIE_PHY_CMN_REG24);
- /* Assert ATT_MODE */
- writel(PCIE_PHY_CMN_REG26_ATT_MODE,
- imx6_pcie->phy_base + PCIE_PHY_CMN_REG26);
- } else {
- dev_warn(dev, "Unable to apply ERR010728 workaround. DT missing fsl,imx7d-pcie-phy phandle ?\n");
- }
+ /*
+ * Workaround for ERR010728 (IMX7DS_2N09P, Rev. 1.1, 4/2023):
+ *
+ * PCIe: PLL may fail to lock under corner conditions.
+ *
+ * Initial VCO oscillation may fail under corner conditions such as
+ * cold temperature which will cause the PCIe PLL fail to lock in the
+ * initialization phase.
+ *
+ * The Duty-cycle Corrector calibration must be disabled.
+ *
+ * 1. De-assert the G_RST signal by clearing
+ * SRC_PCIEPHY_RCR[PCIEPHY_G_RST].
+ * 2. De-assert DCC_FB_EN by writing data “0x29” to the register
+ * address 0x306d0014 (PCIE_PHY_CMN_REG4).
+ * 3. Assert RX_EQS, RX_EQ_SEL by writing data “0x48” to the register
+ * address 0x306d0090 (PCIE_PHY_CMN_REG24).
+ * 4. Assert ATT_MODE by writing data “0xbc” to the register
+ * address 0x306d0098 (PCIE_PHY_CMN_REG26).
+ * 5. De-assert the CMN_RST signal by clearing register bit
+ * SRC_PCIEPHY_RCR[PCIEPHY_BTN]
+ */
- imx7d_pcie_wait_for_phy_pll_lock(imx6_pcie);
- break;
- case IMX6SX:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
- IMX6SX_GPR5_PCIE_BTNRST_RESET, 0);
- break;
- case IMX6QP:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_SW_RST, 0);
+ if (likely(imx_pcie->phy_base)) {
+ /* De-assert DCC_FB_EN */
+ writel(PCIE_PHY_CMN_REG4_DCC_FB_EN, imx_pcie->phy_base + PCIE_PHY_CMN_REG4);
+ /* Assert RX_EQS and RX_EQS_SEL */
+ writel(PCIE_PHY_CMN_REG24_RX_EQ_SEL | PCIE_PHY_CMN_REG24_RX_EQ,
+ imx_pcie->phy_base + PCIE_PHY_CMN_REG24);
+ /* Assert ATT_MODE */
+ writel(PCIE_PHY_CMN_REG26_ATT_MODE, imx_pcie->phy_base + PCIE_PHY_CMN_REG26);
+ } else {
+ dev_warn(dev, "Unable to apply ERR010728 workaround. DT missing fsl,imx7d-pcie-phy phandle ?\n");
+ }
+ imx7d_pcie_wait_for_phy_pll_lock(imx_pcie);
+ return 0;
+}
- usleep_range(200, 500);
- break;
- case IMX6Q: /* Nothing to do */
- case IMX8MM:
- case IMX8MM_EP:
- case IMX8MP:
- case IMX8MP_EP:
- break;
+static int imx95_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert)
+{
+ u32 val;
+
+ if (assert) {
+ /*
+ * From i.MX95 PCIe PHY perspective, the COLD reset toggle
+ * should be complete after power-up by the following sequence.
+ * > 10us(at power-up)
+ * > 10ns(warm reset)
+ * |<------------>|
+ * ______________
+ * phy_reset ____/ \________________
+ * ____________
+ * ref_clk_en_______________________/
+ * Toggle COLD reset aligned with this sequence for i.MX95 PCIe.
+ */
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IMX95_PCIE_RST_CTRL,
+ IMX95_PCIE_COLD_RST);
+ /*
+ * Make sure the write to IMX95_PCIE_RST_CTRL is flushed to the
+ * hardware by doing a read. Otherwise, there is no guarantee
+ * that the write has reached the hardware before udelay().
+ */
+ regmap_read_bypassed(imx_pcie->iomuxc_gpr, IMX95_PCIE_RST_CTRL,
+ &val);
+ udelay(15);
+ regmap_clear_bits(imx_pcie->iomuxc_gpr, IMX95_PCIE_RST_CTRL,
+ IMX95_PCIE_COLD_RST);
+ regmap_read_bypassed(imx_pcie->iomuxc_gpr, IMX95_PCIE_RST_CTRL,
+ &val);
+ udelay(10);
}
+ return 0;
+}
+
+static void imx_pcie_assert_core_reset(struct imx_pcie *imx_pcie)
+{
+ reset_control_assert(imx_pcie->pciephy_reset);
+
+ if (imx_pcie->drvdata->core_reset)
+ imx_pcie->drvdata->core_reset(imx_pcie, true);
+
+ /* Some boards don't have PCIe reset GPIO. */
+ gpiod_set_value_cansleep(imx_pcie->reset_gpiod, 1);
+}
+
+static int imx_pcie_deassert_core_reset(struct imx_pcie *imx_pcie)
+{
+ reset_control_deassert(imx_pcie->pciephy_reset);
+
+ if (imx_pcie->drvdata->core_reset)
+ imx_pcie->drvdata->core_reset(imx_pcie, false);
+
/* Some boards don't have PCIe reset GPIO. */
- if (gpio_is_valid(imx6_pcie->reset_gpio)) {
+ if (imx_pcie->reset_gpiod) {
msleep(100);
- gpio_set_value_cansleep(imx6_pcie->reset_gpio,
- !imx6_pcie->gpio_active_high);
+ gpiod_set_value_cansleep(imx_pcie->reset_gpiod, 0);
/* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */
msleep(100);
}
@@ -802,9 +886,9 @@ static int imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
return 0;
}
-static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
+static int imx_pcie_wait_for_speed_change(struct imx_pcie *imx_pcie)
{
- struct dw_pcie *pci = imx6_pcie->pci;
+ struct dw_pcie *pci = imx_pcie->pci;
struct device *dev = pci->dev;
u32 tmp;
unsigned int retries;
@@ -821,61 +905,49 @@ static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
return -ETIMEDOUT;
}
-static void imx6_pcie_ltssm_enable(struct device *dev)
+static void imx_pcie_ltssm_enable(struct device *dev)
{
- struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
-
- switch (imx6_pcie->drvdata->variant) {
- case IMX6Q:
- case IMX6SX:
- case IMX6QP:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6Q_GPR12_PCIE_CTL_2,
- IMX6Q_GPR12_PCIE_CTL_2);
- break;
- case IMX7D:
- case IMX8MQ:
- case IMX8MQ_EP:
- case IMX8MM:
- case IMX8MM_EP:
- case IMX8MP:
- case IMX8MP_EP:
- reset_control_deassert(imx6_pcie->apps_reset);
- break;
- }
+ struct imx_pcie *imx_pcie = dev_get_drvdata(dev);
+ const struct imx_pcie_drvdata *drvdata = imx_pcie->drvdata;
+ u8 offset = dw_pcie_find_capability(imx_pcie->pci, PCI_CAP_ID_EXP);
+ u32 tmp;
+
+ tmp = dw_pcie_readl_dbi(imx_pcie->pci, offset + PCI_EXP_LNKCAP);
+ phy_set_speed(imx_pcie->phy, FIELD_GET(PCI_EXP_LNKCAP_SLS, tmp));
+ if (drvdata->ltssm_mask)
+ regmap_update_bits(imx_pcie->iomuxc_gpr, drvdata->ltssm_off, drvdata->ltssm_mask,
+ drvdata->ltssm_mask);
+
+ reset_control_deassert(imx_pcie->apps_reset);
}
-static void imx6_pcie_ltssm_disable(struct device *dev)
+static void imx_pcie_ltssm_disable(struct device *dev)
{
- struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
-
- switch (imx6_pcie->drvdata->variant) {
- case IMX6Q:
- case IMX6SX:
- case IMX6QP:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6Q_GPR12_PCIE_CTL_2, 0);
- break;
- case IMX7D:
- case IMX8MQ:
- case IMX8MQ_EP:
- case IMX8MM:
- case IMX8MM_EP:
- case IMX8MP:
- case IMX8MP_EP:
- reset_control_assert(imx6_pcie->apps_reset);
- break;
- }
+ struct imx_pcie *imx_pcie = dev_get_drvdata(dev);
+ const struct imx_pcie_drvdata *drvdata = imx_pcie->drvdata;
+
+ phy_set_speed(imx_pcie->phy, 0);
+ if (drvdata->ltssm_mask)
+ regmap_update_bits(imx_pcie->iomuxc_gpr, drvdata->ltssm_off,
+ drvdata->ltssm_mask, 0);
+
+ reset_control_assert(imx_pcie->apps_reset);
}
-static int imx6_pcie_start_link(struct dw_pcie *pci)
+static int imx_pcie_start_link(struct dw_pcie *pci)
{
- struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+ struct imx_pcie *imx_pcie = to_imx_pcie(pci);
struct device *dev = pci->dev;
u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
u32 tmp;
int ret;
+ if (!(imx_pcie->drvdata->flags &
+ IMX_PCIE_FLAG_SPEED_CHANGE_WORKAROUND)) {
+ imx_pcie_ltssm_enable(dev);
+ return 0;
+ }
+
/*
* Force Gen1 operation when starting the link. In case the link is
* started in Gen2 mode, there is a possibility the devices on the
@@ -889,18 +961,18 @@ static int imx6_pcie_start_link(struct dw_pcie *pci)
dw_pcie_dbi_ro_wr_dis(pci);
/* Start LTSSM. */
- imx6_pcie_ltssm_enable(dev);
+ imx_pcie_ltssm_enable(dev);
- ret = dw_pcie_wait_for_link(pci);
- if (ret)
- goto err_reset_phy;
+ if (pci->max_link_speed > 1) {
+ ret = dw_pcie_wait_for_link(pci);
+ if (ret)
+ goto err_reset_phy;
- if (pci->link_gen > 1) {
/* Allow faster modes after the link is up */
dw_pcie_dbi_ro_wr_en(pci);
tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
tmp &= ~PCI_EXP_LNKCAP_SLS;
- tmp |= pci->link_gen;
+ tmp |= pci->max_link_speed;
dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp);
/*
@@ -912,63 +984,226 @@ static int imx6_pcie_start_link(struct dw_pcie *pci)
dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);
dw_pcie_dbi_ro_wr_dis(pci);
- if (imx6_pcie->drvdata->flags &
- IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE) {
- /*
- * On i.MX7, DIRECT_SPEED_CHANGE behaves differently
- * from i.MX6 family when no link speed transition
- * occurs and we go Gen1 -> yep, Gen1. The difference
- * is that, in such case, it will not be cleared by HW
- * which will cause the following code to report false
- * failure.
- */
-
- ret = imx6_pcie_wait_for_speed_change(imx6_pcie);
- if (ret) {
- dev_err(dev, "Failed to bring link up!\n");
- goto err_reset_phy;
- }
- }
-
- /* Make sure link training is finished as well! */
- ret = dw_pcie_wait_for_link(pci);
- if (ret)
+ ret = imx_pcie_wait_for_speed_change(imx_pcie);
+ if (ret) {
+ dev_err(dev, "Failed to bring link up!\n");
goto err_reset_phy;
+ }
} else {
dev_info(dev, "Link: Only Gen1 is enabled\n");
}
- imx6_pcie->link_is_up = true;
- tmp = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA);
- dev_info(dev, "Link up, Gen%i\n", tmp & PCI_EXP_LNKSTA_CLS);
return 0;
err_reset_phy:
- imx6_pcie->link_is_up = false;
dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n",
dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0),
dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1));
- imx6_pcie_reset_phy(imx6_pcie);
+ imx_pcie_reset_phy(imx_pcie);
return 0;
}
-static void imx6_pcie_stop_link(struct dw_pcie *pci)
+static void imx_pcie_stop_link(struct dw_pcie *pci)
{
struct device *dev = pci->dev;
/* Turn off PCIe LTSSM */
- imx6_pcie_ltssm_disable(dev);
+ imx_pcie_ltssm_disable(dev);
+}
+
+static int imx_pcie_add_lut(struct imx_pcie *imx_pcie, u16 rid, u8 sid)
+{
+ struct dw_pcie *pci = imx_pcie->pci;
+ struct device *dev = pci->dev;
+ u32 data1, data2;
+ int free = -1;
+ int i;
+
+ if (sid >= 64) {
+ dev_err(dev, "Invalid SID for index %d\n", sid);
+ return -EINVAL;
+ }
+
+ guard(mutex)(&imx_pcie->lock);
+
+ /*
+ * Iterate through all LUT entries to check for duplicate RID and
+ * identify the first available entry. Configure this available entry
+ * immediately after verification to avoid rescanning it.
+ */
+ for (i = 0; i < IMX95_MAX_LUT; i++) {
+ regmap_write(imx_pcie->iomuxc_gpr,
+ IMX95_PE0_LUT_ACSCTRL, IMX95_PEO_LUT_RWA | i);
+ regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA1, &data1);
+
+ if (!(data1 & IMX95_PE0_LUT_VLD)) {
+ if (free < 0)
+ free = i;
+ continue;
+ }
+
+ regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, &data2);
+
+ /* Do not add duplicate RID */
+ if (rid == FIELD_GET(IMX95_PE0_LUT_REQID, data2)) {
+ dev_warn(dev, "Existing LUT entry available for RID (%d)", rid);
+ return 0;
+ }
+ }
+
+ if (free < 0) {
+ dev_err(dev, "LUT entry is not available\n");
+ return -ENOSPC;
+ }
+
+ data1 = FIELD_PREP(IMX95_PE0_LUT_DAC_ID, 0);
+ data1 |= FIELD_PREP(IMX95_PE0_LUT_STREAM_ID, sid);
+ data1 |= IMX95_PE0_LUT_VLD;
+ regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA1, data1);
+
+ if (imx_pcie->drvdata->mode == DW_PCIE_EP_TYPE)
+ data2 = 0x7; /* In the EP mode, only 'Device ID' is required */
+ else
+ data2 = IMX95_PE0_LUT_MASK; /* Match all bits of RID */
+ data2 |= FIELD_PREP(IMX95_PE0_LUT_REQID, rid);
+ regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, data2);
+
+ regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_ACSCTRL, free);
+
+ return 0;
+}
+
+static void imx_pcie_remove_lut(struct imx_pcie *imx_pcie, u16 rid)
+{
+ u32 data2;
+ int i;
+
+ guard(mutex)(&imx_pcie->lock);
+
+ for (i = 0; i < IMX95_MAX_LUT; i++) {
+ regmap_write(imx_pcie->iomuxc_gpr,
+ IMX95_PE0_LUT_ACSCTRL, IMX95_PEO_LUT_RWA | i);
+ regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, &data2);
+ if (FIELD_GET(IMX95_PE0_LUT_REQID, data2) == rid) {
+ regmap_write(imx_pcie->iomuxc_gpr,
+ IMX95_PE0_LUT_DATA1, 0);
+ regmap_write(imx_pcie->iomuxc_gpr,
+ IMX95_PE0_LUT_DATA2, 0);
+ regmap_write(imx_pcie->iomuxc_gpr,
+ IMX95_PE0_LUT_ACSCTRL, i);
+
+ break;
+ }
+ }
+}
+
+static int imx_pcie_add_lut_by_rid(struct imx_pcie *imx_pcie, u32 rid)
+{
+ struct device *dev = imx_pcie->pci->dev;
+ struct device_node *target;
+ u32 sid_i, sid_m;
+ int err_i, err_m;
+ u32 sid = 0;
+
+ target = NULL;
+ err_i = of_map_id(dev->of_node, rid, "iommu-map", "iommu-map-mask",
+ &target, &sid_i);
+ if (target) {
+ of_node_put(target);
+ } else {
+ /*
+ * "target == NULL && err_i == 0" means RID out of map range.
+ * Use 1:1 map RID to streamID. Hardware can't support this
+ * because the streamID is only 6 bits
+ */
+ err_i = -EINVAL;
+ }
+
+ target = NULL;
+ err_m = of_map_id(dev->of_node, rid, "msi-map", "msi-map-mask",
+ &target, &sid_m);
+
+ /*
+ * err_m target
+ * 0 NULL RID out of range. Use 1:1 map RID to
+ * streamID, Current hardware can't
+ * support it, so return -EINVAL.
+ * != 0 NULL msi-map does not exist, use built-in MSI
+ * 0 != NULL Get correct streamID from RID
+ * != 0 != NULL Invalid combination
+ */
+ if (!err_m && !target)
+ return -EINVAL;
+ else if (target)
+ of_node_put(target); /* Find streamID map entry for RID in msi-map */
+
+ /*
+ * msi-map iommu-map
+ * N N DWC MSI Ctrl
+ * Y Y ITS + SMMU, require the same SID
+ * Y N ITS
+ * N Y DWC MSI Ctrl + SMMU
+ */
+ if (err_i && err_m)
+ return 0;
+
+ if (!err_i && !err_m) {
+ /*
+ * Glue Layer
+ * <==========>
+ * ┌─────┐ ┌──────────┐
+ * │ LUT │ 6-bit streamID │ │
+ * │ │─────────────────►│ MSI │
+ * └─────┘ 2-bit ctrl ID │ │
+ * ┌───────────►│ │
+ * (i.MX95) │ │ │
+ * 00 PCIe0 │ │ │
+ * 01 ENETC │ │ │
+ * 10 PCIe1 │ │ │
+ * │ └──────────┘
+ * The MSI glue layer auto adds 2 bits controller ID ahead of
+ * streamID, so mask these 2 bits to get streamID. The
+ * IOMMU glue layer doesn't do that.
+ */
+ if (sid_i != (sid_m & IMX95_SID_MASK)) {
+ dev_err(dev, "iommu-map and msi-map entries mismatch!\n");
+ return -EINVAL;
+ }
+ }
+
+ if (!err_i)
+ sid = sid_i;
+ else if (!err_m)
+ sid = sid_m & IMX95_SID_MASK;
+
+ return imx_pcie_add_lut(imx_pcie, rid, sid);
+}
+
+static int imx_pcie_enable_device(struct pci_host_bridge *bridge, struct pci_dev *pdev)
+{
+ struct imx_pcie *imx_pcie = to_imx_pcie(to_dw_pcie_from_pp(bridge->sysdata));
+
+ return imx_pcie_add_lut_by_rid(imx_pcie, pci_dev_id(pdev));
+}
+
+static void imx_pcie_disable_device(struct pci_host_bridge *bridge,
+ struct pci_dev *pdev)
+{
+ struct imx_pcie *imx_pcie;
+
+ imx_pcie = to_imx_pcie(to_dw_pcie_from_pp(bridge->sysdata));
+ imx_pcie_remove_lut(imx_pcie, pci_dev_id(pdev));
}
-static int imx6_pcie_host_init(struct dw_pcie_rp *pp)
+static int imx_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct device *dev = pci->dev;
- struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+ struct imx_pcie *imx_pcie = to_imx_pcie(pci);
int ret;
- if (imx6_pcie->vpcie) {
- ret = regulator_enable(imx6_pcie->vpcie);
+ if (imx_pcie->vpcie) {
+ ret = regulator_enable(imx_pcie->vpcie);
if (ret) {
dev_err(dev, "failed to enable vpcie regulator: %d\n",
ret);
@@ -976,78 +1211,153 @@ static int imx6_pcie_host_init(struct dw_pcie_rp *pp)
}
}
- imx6_pcie_assert_core_reset(imx6_pcie);
- imx6_pcie_init_phy(imx6_pcie);
+ if (pp->bridge && imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_LUT)) {
+ pp->bridge->enable_device = imx_pcie_enable_device;
+ pp->bridge->disable_device = imx_pcie_disable_device;
+ }
+
+ imx_pcie_assert_core_reset(imx_pcie);
+
+ if (imx_pcie->drvdata->init_phy)
+ imx_pcie->drvdata->init_phy(imx_pcie);
- ret = imx6_pcie_clk_enable(imx6_pcie);
+ imx_pcie_configure_type(imx_pcie);
+
+ ret = imx_pcie_clk_enable(imx_pcie);
if (ret) {
dev_err(dev, "unable to enable pcie clocks: %d\n", ret);
goto err_reg_disable;
}
- if (imx6_pcie->phy) {
- ret = phy_init(imx6_pcie->phy);
+ if (imx_pcie->phy) {
+ ret = phy_init(imx_pcie->phy);
if (ret) {
dev_err(dev, "pcie PHY power up failed\n");
goto err_clk_disable;
}
- }
- if (imx6_pcie->phy) {
- ret = phy_power_on(imx6_pcie->phy);
+ ret = phy_set_mode_ext(imx_pcie->phy, PHY_MODE_PCIE,
+ imx_pcie->drvdata->mode == DW_PCIE_EP_TYPE ?
+ PHY_MODE_PCIE_EP : PHY_MODE_PCIE_RC);
+ if (ret) {
+ dev_err(dev, "unable to set PCIe PHY mode\n");
+ goto err_phy_exit;
+ }
+
+ ret = phy_power_on(imx_pcie->phy);
if (ret) {
dev_err(dev, "waiting for PHY ready timeout!\n");
- goto err_phy_off;
+ goto err_phy_exit;
}
}
- ret = imx6_pcie_deassert_core_reset(imx6_pcie);
+ /* Make sure that PCIe LTSSM is cleared */
+ imx_pcie_ltssm_disable(dev);
+
+ ret = imx_pcie_deassert_core_reset(imx_pcie);
if (ret < 0) {
dev_err(dev, "pcie deassert core reset failed: %d\n", ret);
goto err_phy_off;
}
- imx6_setup_phy_mpll(imx6_pcie);
+ if (imx_pcie->drvdata->wait_pll_lock) {
+ ret = imx_pcie->drvdata->wait_pll_lock(imx_pcie);
+ if (ret < 0)
+ goto err_phy_off;
+ }
+
+ imx_setup_phy_mpll(imx_pcie);
return 0;
err_phy_off:
- if (imx6_pcie->phy)
- phy_exit(imx6_pcie->phy);
+ phy_power_off(imx_pcie->phy);
+err_phy_exit:
+ phy_exit(imx_pcie->phy);
err_clk_disable:
- imx6_pcie_clk_disable(imx6_pcie);
+ imx_pcie_clk_disable(imx_pcie);
err_reg_disable:
- if (imx6_pcie->vpcie)
- regulator_disable(imx6_pcie->vpcie);
+ if (imx_pcie->vpcie)
+ regulator_disable(imx_pcie->vpcie);
return ret;
}
-static void imx6_pcie_host_exit(struct dw_pcie_rp *pp)
+static void imx_pcie_host_exit(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+ struct imx_pcie *imx_pcie = to_imx_pcie(pci);
- if (imx6_pcie->phy) {
- if (phy_power_off(imx6_pcie->phy))
+ if (imx_pcie->phy) {
+ if (phy_power_off(imx_pcie->phy))
dev_err(pci->dev, "unable to power off PHY\n");
- phy_exit(imx6_pcie->phy);
+ phy_exit(imx_pcie->phy);
}
- imx6_pcie_clk_disable(imx6_pcie);
+ imx_pcie_clk_disable(imx_pcie);
+
+ if (imx_pcie->vpcie)
+ regulator_disable(imx_pcie->vpcie);
+}
+
+static void imx_pcie_host_post_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct imx_pcie *imx_pcie = to_imx_pcie(pci);
+ u32 val;
- if (imx6_pcie->vpcie)
- regulator_disable(imx6_pcie->vpcie);
+ if (imx_pcie->drvdata->flags & IMX_PCIE_FLAG_8GT_ECN_ERR051586) {
+ /*
+ * ERR051586: Compliance with 8GT/s Receiver Impedance ECN
+ *
+ * The default value of GEN3_RELATED_OFF[GEN3_ZRXDC_NONCOMPL]
+ * is 1 which makes receiver non-compliant with the ZRX-DC
+ * parameter for 2.5 GT/s when operating at 8 GT/s or higher.
+ * It causes unnecessary timeout in L1.
+ *
+ * Workaround: Program GEN3_RELATED_OFF[GEN3_ZRXDC_NONCOMPL]
+ * to 0.
+ */
+ dw_pcie_dbi_ro_wr_en(pci);
+ val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
+ val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
+ dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
+ dw_pcie_dbi_ro_wr_dis(pci);
+ }
}
-static const struct dw_pcie_host_ops imx6_pcie_host_ops = {
- .host_init = imx6_pcie_host_init,
+/*
+ * In old DWC implementations, PCIE_ATU_INHIBIT_PAYLOAD in iATU Ctrl2
+ * register is reserved, so the generic DWC implementation of sending the
+ * PME_Turn_Off message using a dummy MMIO write cannot be used.
+ */
+static void imx_pcie_pme_turn_off(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct imx_pcie *imx_pcie = to_imx_pcie(pci);
+
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6SX_GPR12_PCIE_PM_TURN_OFF);
+ regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6SX_GPR12_PCIE_PM_TURN_OFF);
+
+ usleep_range(PCIE_PME_TO_L2_TIMEOUT_US/10, PCIE_PME_TO_L2_TIMEOUT_US);
+}
+
+static const struct dw_pcie_host_ops imx_pcie_host_ops = {
+ .init = imx_pcie_host_init,
+ .deinit = imx_pcie_host_exit,
+ .pme_turn_off = imx_pcie_pme_turn_off,
+};
+
+static const struct dw_pcie_host_ops imx_pcie_host_dw_pme_ops = {
+ .init = imx_pcie_host_init,
+ .deinit = imx_pcie_host_exit,
+ .post_init = imx_pcie_host_post_init,
};
static const struct dw_pcie_ops dw_pcie_ops = {
- .start_link = imx6_pcie_start_link,
- .stop_link = imx6_pcie_stop_link,
+ .start_link = imx_pcie_start_link,
+ .stop_link = imx_pcie_stop_link,
};
-static void imx6_pcie_ep_init(struct dw_pcie_ep *ep)
+static void imx_pcie_ep_init(struct dw_pcie_ep *ep)
{
enum pci_barno bar;
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -1056,18 +1366,17 @@ static void imx6_pcie_ep_init(struct dw_pcie_ep *ep)
dw_pcie_ep_reset_bar(pci, bar);
}
-static int imx6_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
- enum pci_epc_irq_type type,
- u16 interrupt_num)
+static int imx_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ unsigned int type, u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
switch (type) {
- case PCI_EPC_IRQ_LEGACY:
- return dw_pcie_ep_raise_legacy_irq(ep, func_no);
- case PCI_EPC_IRQ_MSI:
+ case PCI_IRQ_INTX:
+ return dw_pcie_ep_raise_intx_irq(ep, func_no);
+ case PCI_IRQ_MSI:
return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
- case PCI_EPC_IRQ_MSIX:
+ case PCI_IRQ_MSIX:
return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
default:
dev_err(pci->dev, "UNKNOWN IRQ type\n");
@@ -1078,182 +1387,226 @@ static int imx6_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
}
static const struct pci_epc_features imx8m_pcie_epc_features = {
- .linkup_notifier = false,
.msi_capable = true,
- .msix_capable = false,
- .reserved_bar = 1 << BAR_1 | 1 << BAR_3,
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
+ .bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = SZ_256, },
+ .bar[BAR_5] = { .type = BAR_RESERVED, },
+ .align = SZ_64K,
+};
+
+static const struct pci_epc_features imx8q_pcie_epc_features = {
+ .msi_capable = true,
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
+ .bar[BAR_5] = { .type = BAR_RESERVED, },
.align = SZ_64K,
};
+/*
+ * | Default | Default | Default | BAR Sizing
+ * BAR# | Enable? | Type | Size | Scheme
+ * =======================================================
+ * BAR0 | Enable | 64-bit | 1 MB | Programmable Size
+ * BAR1 | Disable | 32-bit | 64 KB | Fixed Size
+ * (BAR1 should be disabled if BAR0 is 64-bit)
+ * BAR2 | Enable | 32-bit | 1 MB | Programmable Size
+ * BAR3 | Enable | 32-bit | 64 KB | Programmable Size
+ * BAR4 | Enable | 32-bit | 1 MB | Programmable Size
+ * BAR5 | Enable | 32-bit | 64 KB | Programmable Size
+ */
+static const struct pci_epc_features imx95_pcie_epc_features = {
+ .msi_capable = true,
+ .bar[BAR_1] = { .type = BAR_FIXED, .fixed_size = SZ_64K, },
+ .align = SZ_4K,
+};
+
static const struct pci_epc_features*
-imx6_pcie_ep_get_features(struct dw_pcie_ep *ep)
+imx_pcie_ep_get_features(struct dw_pcie_ep *ep)
{
- return &imx8m_pcie_epc_features;
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct imx_pcie *imx_pcie = to_imx_pcie(pci);
+
+ return imx_pcie->drvdata->epc_features;
}
static const struct dw_pcie_ep_ops pcie_ep_ops = {
- .ep_init = imx6_pcie_ep_init,
- .raise_irq = imx6_pcie_ep_raise_irq,
- .get_features = imx6_pcie_ep_get_features,
+ .init = imx_pcie_ep_init,
+ .raise_irq = imx_pcie_ep_raise_irq,
+ .get_features = imx_pcie_ep_get_features,
};
-static int imx6_add_pcie_ep(struct imx6_pcie *imx6_pcie,
+static int imx_add_pcie_ep(struct imx_pcie *imx_pcie,
struct platform_device *pdev)
{
int ret;
- unsigned int pcie_dbi2_offset;
struct dw_pcie_ep *ep;
- struct resource *res;
- struct dw_pcie *pci = imx6_pcie->pci;
+ struct dw_pcie *pci = imx_pcie->pci;
struct dw_pcie_rp *pp = &pci->pp;
struct device *dev = pci->dev;
- imx6_pcie_host_init(pp);
+ imx_pcie_host_init(pp);
ep = &pci->ep;
ep->ops = &pcie_ep_ops;
- switch (imx6_pcie->drvdata->variant) {
- case IMX8MQ_EP:
- case IMX8MM_EP:
- case IMX8MP_EP:
- pcie_dbi2_offset = SZ_1M;
- break;
- default:
- pcie_dbi2_offset = SZ_4K;
- break;
- }
- pci->dbi_base2 = pci->dbi_base + pcie_dbi2_offset;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
- if (!res)
- return -EINVAL;
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_SUPPORT_64BIT))
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
- ep->phys_base = res->start;
- ep->addr_size = resource_size(res);
- ep->page_size = SZ_64K;
+ ep->page_size = imx_pcie->drvdata->epc_features->align;
ret = dw_pcie_ep_init(ep);
if (ret) {
dev_err(dev, "failed to initialize endpoint\n");
return ret;
}
- /* Start LTSSM. */
- imx6_pcie_ltssm_enable(dev);
-
- return 0;
-}
-
-static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie)
-{
- struct device *dev = imx6_pcie->pci->dev;
+ imx_pcie_host_post_init(pp);
- /* Some variants have a turnoff reset in DT */
- if (imx6_pcie->turnoff_reset) {
- reset_control_assert(imx6_pcie->turnoff_reset);
- reset_control_deassert(imx6_pcie->turnoff_reset);
- goto pm_turnoff_sleep;
+ ret = dw_pcie_ep_init_registers(ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(ep);
+ return ret;
}
- /* Others poke directly at IOMUXC registers */
- switch (imx6_pcie->drvdata->variant) {
- case IMX6SX:
- case IMX6QP:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6SX_GPR12_PCIE_PM_TURN_OFF,
- IMX6SX_GPR12_PCIE_PM_TURN_OFF);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6SX_GPR12_PCIE_PM_TURN_OFF, 0);
- break;
- default:
- dev_err(dev, "PME_Turn_Off not implemented\n");
- return;
- }
+ pci_epc_init_notify(ep->epc);
- /*
- * Components with an upstream port must respond to
- * PME_Turn_Off with PME_TO_Ack but we can't check.
- *
- * The standard recommends a 1-10ms timeout after which to
- * proceed anyway as if acks were received.
- */
-pm_turnoff_sleep:
- usleep_range(1000, 10000);
+ return 0;
}
-static void imx6_pcie_msi_save_restore(struct imx6_pcie *imx6_pcie, bool save)
+static void imx_pcie_msi_save_restore(struct imx_pcie *imx_pcie, bool save)
{
u8 offset;
u16 val;
- struct dw_pcie *pci = imx6_pcie->pci;
+ struct dw_pcie *pci = imx_pcie->pci;
if (pci_msi_enabled()) {
offset = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI);
if (save) {
val = dw_pcie_readw_dbi(pci, offset + PCI_MSI_FLAGS);
- imx6_pcie->msi_ctrl = val;
+ imx_pcie->msi_ctrl = val;
} else {
dw_pcie_dbi_ro_wr_en(pci);
- val = imx6_pcie->msi_ctrl;
+ val = imx_pcie->msi_ctrl;
dw_pcie_writew_dbi(pci, offset + PCI_MSI_FLAGS, val);
dw_pcie_dbi_ro_wr_dis(pci);
}
}
}
-static int imx6_pcie_suspend_noirq(struct device *dev)
+static void imx_pcie_lut_save(struct imx_pcie *imx_pcie)
+{
+ u32 data1, data2;
+ int i;
+
+ for (i = 0; i < IMX95_MAX_LUT; i++) {
+ regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_ACSCTRL,
+ IMX95_PEO_LUT_RWA | i);
+ regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA1, &data1);
+ regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, &data2);
+ if (data1 & IMX95_PE0_LUT_VLD) {
+ imx_pcie->luts[i].data1 = data1;
+ imx_pcie->luts[i].data2 = data2;
+ } else {
+ imx_pcie->luts[i].data1 = 0;
+ imx_pcie->luts[i].data2 = 0;
+ }
+ }
+}
+
+static void imx_pcie_lut_restore(struct imx_pcie *imx_pcie)
{
- struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
- struct dw_pcie_rp *pp = &imx6_pcie->pci->pp;
+ int i;
+
+ for (i = 0; i < IMX95_MAX_LUT; i++) {
+ if ((imx_pcie->luts[i].data1 & IMX95_PE0_LUT_VLD) == 0)
+ continue;
+
+ regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA1,
+ imx_pcie->luts[i].data1);
+ regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2,
+ imx_pcie->luts[i].data2);
+ regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_ACSCTRL, i);
+ }
+}
- if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))
+static int imx_pcie_suspend_noirq(struct device *dev)
+{
+ struct imx_pcie *imx_pcie = dev_get_drvdata(dev);
+
+ if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_SUPPORTS_SUSPEND))
return 0;
- imx6_pcie_msi_save_restore(imx6_pcie, true);
- imx6_pcie_pm_turnoff(imx6_pcie);
- imx6_pcie_stop_link(imx6_pcie->pci);
- imx6_pcie_host_exit(pp);
+ imx_pcie_msi_save_restore(imx_pcie, true);
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_LUT))
+ imx_pcie_lut_save(imx_pcie);
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_BROKEN_SUSPEND)) {
+ /*
+ * The minimum for a workaround would be to set PERST# and to
+ * set the PCIE_TEST_PD flag. However, we can also disable the
+ * clock which saves some power.
+ */
+ imx_pcie_assert_core_reset(imx_pcie);
+ imx_pcie->drvdata->enable_ref_clk(imx_pcie, false);
+ } else {
+ return dw_pcie_suspend_noirq(imx_pcie->pci);
+ }
return 0;
}
-static int imx6_pcie_resume_noirq(struct device *dev)
+static int imx_pcie_resume_noirq(struct device *dev)
{
int ret;
- struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
- struct dw_pcie_rp *pp = &imx6_pcie->pci->pp;
+ struct imx_pcie *imx_pcie = dev_get_drvdata(dev);
- if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND))
+ if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_SUPPORTS_SUSPEND))
return 0;
- ret = imx6_pcie_host_init(pp);
- if (ret)
- return ret;
- imx6_pcie_msi_save_restore(imx6_pcie, false);
- dw_pcie_setup_rc(pp);
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_BROKEN_SUSPEND)) {
+ ret = imx_pcie->drvdata->enable_ref_clk(imx_pcie, true);
+ if (ret)
+ return ret;
+ ret = imx_pcie_deassert_core_reset(imx_pcie);
+ if (ret)
+ return ret;
- if (imx6_pcie->link_is_up)
- imx6_pcie_start_link(imx6_pcie->pci);
+ /*
+ * Using PCIE_TEST_PD seems to disable MSI and powers down the
+ * root complex. This is why we have to setup the rc again and
+ * why we have to restore the MSI register.
+ */
+ ret = dw_pcie_setup_rc(&imx_pcie->pci->pp);
+ if (ret)
+ return ret;
+ } else {
+ ret = dw_pcie_resume_noirq(imx_pcie->pci);
+ if (ret)
+ return ret;
+ }
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_LUT))
+ imx_pcie_lut_restore(imx_pcie);
+ imx_pcie_msi_save_restore(imx_pcie, false);
return 0;
}
-static const struct dev_pm_ops imx6_pcie_pm_ops = {
- NOIRQ_SYSTEM_SLEEP_PM_OPS(imx6_pcie_suspend_noirq,
- imx6_pcie_resume_noirq)
+static const struct dev_pm_ops imx_pcie_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_pcie_suspend_noirq,
+ imx_pcie_resume_noirq)
};
-static int imx6_pcie_probe(struct platform_device *pdev)
+static int imx_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct dw_pcie *pci;
- struct imx6_pcie *imx6_pcie;
+ struct imx_pcie *imx_pcie;
struct device_node *np;
- struct resource *dbi_base;
struct device_node *node = dev->of_node;
- int ret;
+ int ret, domain;
u16 val;
- imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL);
- if (!imx6_pcie)
+ imx_pcie = devm_kzalloc(dev, sizeof(*imx_pcie), GFP_KERNEL);
+ if (!imx_pcie)
return -ENOMEM;
pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
@@ -1262,10 +1615,16 @@ static int imx6_pcie_probe(struct platform_device *pdev)
pci->dev = dev;
pci->ops = &dw_pcie_ops;
- pci->pp.ops = &imx6_pcie_host_ops;
- imx6_pcie->pci = pci;
- imx6_pcie->drvdata = of_device_get_match_data(dev);
+ imx_pcie->pci = pci;
+ imx_pcie->drvdata = of_device_get_match_data(dev);
+
+ mutex_init(&imx_pcie->lock);
+
+ if (imx_pcie->drvdata->ops)
+ pci->pp.ops = imx_pcie->drvdata->ops;
+ else
+ pci->pp.ops = &imx_pcie_host_dw_pme_ops;
/* Find the PHY if one is defined, only imx7d uses it */
np = of_parse_phandle(node, "fsl,imx7d-pcie-phy", 0);
@@ -1277,175 +1636,148 @@ static int imx6_pcie_probe(struct platform_device *pdev)
dev_err(dev, "Unable to map PCIe PHY\n");
return ret;
}
- imx6_pcie->phy_base = devm_ioremap_resource(dev, &res);
- if (IS_ERR(imx6_pcie->phy_base))
- return PTR_ERR(imx6_pcie->phy_base);
+ imx_pcie->phy_base = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(imx_pcie->phy_base))
+ return PTR_ERR(imx_pcie->phy_base);
}
- dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pci->dbi_base = devm_ioremap_resource(dev, dbi_base);
- if (IS_ERR(pci->dbi_base))
- return PTR_ERR(pci->dbi_base);
-
/* Fetch GPIOs */
- imx6_pcie->reset_gpio = of_get_named_gpio(node, "reset-gpio", 0);
- imx6_pcie->gpio_active_high = of_property_read_bool(node,
- "reset-gpio-active-high");
- if (gpio_is_valid(imx6_pcie->reset_gpio)) {
- ret = devm_gpio_request_one(dev, imx6_pcie->reset_gpio,
- imx6_pcie->gpio_active_high ?
- GPIOF_OUT_INIT_HIGH :
- GPIOF_OUT_INIT_LOW,
- "PCIe reset");
- if (ret) {
- dev_err(dev, "unable to get reset gpio\n");
- return ret;
- }
- } else if (imx6_pcie->reset_gpio == -EPROBE_DEFER) {
- return imx6_pcie->reset_gpio;
- }
+ imx_pcie->reset_gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(imx_pcie->reset_gpiod))
+ return dev_err_probe(dev, PTR_ERR(imx_pcie->reset_gpiod),
+ "unable to get reset gpio\n");
+ gpiod_set_consumer_name(imx_pcie->reset_gpiod, "PCIe reset");
/* Fetch clocks */
- imx6_pcie->pcie_bus = devm_clk_get(dev, "pcie_bus");
- if (IS_ERR(imx6_pcie->pcie_bus))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_bus),
- "pcie_bus clock source missing or invalid\n");
-
- imx6_pcie->pcie = devm_clk_get(dev, "pcie");
- if (IS_ERR(imx6_pcie->pcie))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie),
- "pcie clock source missing or invalid\n");
-
- switch (imx6_pcie->drvdata->variant) {
- case IMX6SX:
- imx6_pcie->pcie_inbound_axi = devm_clk_get(dev,
- "pcie_inbound_axi");
- if (IS_ERR(imx6_pcie->pcie_inbound_axi))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_inbound_axi),
- "pcie_inbound_axi clock missing or invalid\n");
- break;
- case IMX8MQ:
- case IMX8MQ_EP:
- imx6_pcie->pcie_aux = devm_clk_get(dev, "pcie_aux");
- if (IS_ERR(imx6_pcie->pcie_aux))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_aux),
- "pcie_aux clock source missing or invalid\n");
- fallthrough;
- case IMX7D:
- if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR)
- imx6_pcie->controller_id = 1;
-
- imx6_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev,
- "pciephy");
- if (IS_ERR(imx6_pcie->pciephy_reset)) {
- dev_err(dev, "Failed to get PCIEPHY reset control\n");
- return PTR_ERR(imx6_pcie->pciephy_reset);
- }
+ imx_pcie->num_clks = devm_clk_bulk_get_all(dev, &imx_pcie->clks);
+ if (imx_pcie->num_clks < 0)
+ return dev_err_probe(dev, imx_pcie->num_clks,
+ "failed to get clocks\n");
+
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_PHYDRV)) {
+ imx_pcie->phy = devm_phy_get(dev, "pcie-phy");
+ if (IS_ERR(imx_pcie->phy))
+ return dev_err_probe(dev, PTR_ERR(imx_pcie->phy),
+ "failed to get pcie phy\n");
+ }
- imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev,
- "apps");
- if (IS_ERR(imx6_pcie->apps_reset)) {
- dev_err(dev, "Failed to get PCIE APPS reset control\n");
- return PTR_ERR(imx6_pcie->apps_reset);
- }
- break;
- case IMX8MM:
- case IMX8MM_EP:
- case IMX8MP:
- case IMX8MP_EP:
- imx6_pcie->pcie_aux = devm_clk_get(dev, "pcie_aux");
- if (IS_ERR(imx6_pcie->pcie_aux))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_aux),
- "pcie_aux clock source missing or invalid\n");
- imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev,
- "apps");
- if (IS_ERR(imx6_pcie->apps_reset))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->apps_reset),
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_APP_RESET)) {
+ imx_pcie->apps_reset = devm_reset_control_get_exclusive(dev, "apps");
+ if (IS_ERR(imx_pcie->apps_reset))
+ return dev_err_probe(dev, PTR_ERR(imx_pcie->apps_reset),
"failed to get pcie apps reset control\n");
+ }
- imx6_pcie->phy = devm_phy_get(dev, "pcie-phy");
- if (IS_ERR(imx6_pcie->phy))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->phy),
- "failed to get pcie phy\n");
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_PHY_RESET)) {
+ imx_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev, "pciephy");
+ if (IS_ERR(imx_pcie->pciephy_reset))
+ return dev_err_probe(dev, PTR_ERR(imx_pcie->pciephy_reset),
+ "Failed to get PCIEPHY reset control\n");
+ }
+
+ switch (imx_pcie->drvdata->variant) {
+ case IMX8MQ:
+ case IMX8MQ_EP:
+ domain = of_get_pci_domain_nr(node);
+ if (domain < 0 || domain > 1)
+ return dev_err_probe(dev, -ENODEV, "no \"linux,pci-domain\" property in devicetree\n");
+ imx_pcie->controller_id = domain;
break;
default:
break;
}
- /* Don't fetch the pcie_phy clock, if it has abstract PHY driver */
- if (imx6_pcie->phy == NULL) {
- imx6_pcie->pcie_phy = devm_clk_get(dev, "pcie_phy");
- if (IS_ERR(imx6_pcie->pcie_phy))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_phy),
- "pcie_phy clock source missing or invalid\n");
+
+ if (imx_pcie->drvdata->gpr) {
+ /* Grab GPR config register range */
+ imx_pcie->iomuxc_gpr =
+ syscon_regmap_lookup_by_compatible(imx_pcie->drvdata->gpr);
+ if (IS_ERR(imx_pcie->iomuxc_gpr))
+ return dev_err_probe(dev, PTR_ERR(imx_pcie->iomuxc_gpr),
+ "unable to find iomuxc registers\n");
}
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_SERDES)) {
+ void __iomem *off = devm_platform_ioremap_resource_byname(pdev, "app");
- /* Grab turnoff reset */
- imx6_pcie->turnoff_reset = devm_reset_control_get_optional_exclusive(dev, "turnoff");
- if (IS_ERR(imx6_pcie->turnoff_reset)) {
- dev_err(dev, "Failed to get TURNOFF reset control\n");
- return PTR_ERR(imx6_pcie->turnoff_reset);
- }
+ if (IS_ERR(off))
+ return dev_err_probe(dev, PTR_ERR(off),
+ "unable to find serdes registers\n");
- /* Grab GPR config register range */
- imx6_pcie->iomuxc_gpr =
- syscon_regmap_lookup_by_compatible(imx6_pcie->drvdata->gpr);
- if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
- dev_err(dev, "unable to find iomuxc registers\n");
- return PTR_ERR(imx6_pcie->iomuxc_gpr);
+ static const struct regmap_config regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ };
+
+ imx_pcie->iomuxc_gpr = devm_regmap_init_mmio(dev, off, &regmap_config);
+ if (IS_ERR(imx_pcie->iomuxc_gpr))
+ return dev_err_probe(dev, PTR_ERR(imx_pcie->iomuxc_gpr),
+ "unable to find iomuxc registers\n");
}
/* Grab PCIe PHY Tx Settings */
if (of_property_read_u32(node, "fsl,tx-deemph-gen1",
- &imx6_pcie->tx_deemph_gen1))
- imx6_pcie->tx_deemph_gen1 = 0;
+ &imx_pcie->tx_deemph_gen1))
+ imx_pcie->tx_deemph_gen1 = 0;
if (of_property_read_u32(node, "fsl,tx-deemph-gen2-3p5db",
- &imx6_pcie->tx_deemph_gen2_3p5db))
- imx6_pcie->tx_deemph_gen2_3p5db = 0;
+ &imx_pcie->tx_deemph_gen2_3p5db))
+ imx_pcie->tx_deemph_gen2_3p5db = 0;
if (of_property_read_u32(node, "fsl,tx-deemph-gen2-6db",
- &imx6_pcie->tx_deemph_gen2_6db))
- imx6_pcie->tx_deemph_gen2_6db = 20;
+ &imx_pcie->tx_deemph_gen2_6db))
+ imx_pcie->tx_deemph_gen2_6db = 20;
if (of_property_read_u32(node, "fsl,tx-swing-full",
- &imx6_pcie->tx_swing_full))
- imx6_pcie->tx_swing_full = 127;
+ &imx_pcie->tx_swing_full))
+ imx_pcie->tx_swing_full = 127;
if (of_property_read_u32(node, "fsl,tx-swing-low",
- &imx6_pcie->tx_swing_low))
- imx6_pcie->tx_swing_low = 127;
+ &imx_pcie->tx_swing_low))
+ imx_pcie->tx_swing_low = 127;
/* Limit link speed */
- pci->link_gen = 1;
- of_property_read_u32(node, "fsl,max-link-speed", &pci->link_gen);
-
- imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie");
- if (IS_ERR(imx6_pcie->vpcie)) {
- if (PTR_ERR(imx6_pcie->vpcie) != -ENODEV)
- return PTR_ERR(imx6_pcie->vpcie);
- imx6_pcie->vpcie = NULL;
+ pci->max_link_speed = 1;
+ of_property_read_u32(node, "fsl,max-link-speed", &pci->max_link_speed);
+
+ ret = devm_regulator_get_enable_optional(&pdev->dev, "vpcie3v3aux");
+ if (ret < 0 && ret != -ENODEV)
+ return dev_err_probe(dev, ret, "failed to enable Vaux supply\n");
+
+ imx_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie");
+ if (IS_ERR(imx_pcie->vpcie)) {
+ if (PTR_ERR(imx_pcie->vpcie) != -ENODEV)
+ return PTR_ERR(imx_pcie->vpcie);
+ imx_pcie->vpcie = NULL;
}
- imx6_pcie->vph = devm_regulator_get_optional(&pdev->dev, "vph");
- if (IS_ERR(imx6_pcie->vph)) {
- if (PTR_ERR(imx6_pcie->vph) != -ENODEV)
- return PTR_ERR(imx6_pcie->vph);
- imx6_pcie->vph = NULL;
+ imx_pcie->vph = devm_regulator_get_optional(&pdev->dev, "vph");
+ if (IS_ERR(imx_pcie->vph)) {
+ if (PTR_ERR(imx_pcie->vph) != -ENODEV)
+ return PTR_ERR(imx_pcie->vph);
+ imx_pcie->vph = NULL;
}
- platform_set_drvdata(pdev, imx6_pcie);
+ platform_set_drvdata(pdev, imx_pcie);
- ret = imx6_pcie_attach_pd(dev);
+ ret = imx_pcie_attach_pd(dev);
if (ret)
return ret;
- if (imx6_pcie->drvdata->mode == DW_PCIE_EP_TYPE) {
- ret = imx6_add_pcie_ep(imx6_pcie, pdev);
+ pci->use_parent_dt_ranges = true;
+ if (imx_pcie->drvdata->mode == DW_PCIE_EP_TYPE) {
+ ret = imx_add_pcie_ep(imx_pcie, pdev);
if (ret < 0)
return ret;
+
+ /*
+ * FIXME: Only single Device (EPF) is supported due to the
+ * Endpoint framework limitation.
+ */
+ imx_pcie_add_lut_by_rid(imx_pcie, 0);
} else {
+ pci->pp.use_atu_msg = true;
ret = dw_pcie_host_init(&pci->pp);
if (ret < 0)
return ret;
@@ -1462,74 +1794,186 @@ static int imx6_pcie_probe(struct platform_device *pdev)
return 0;
}
-static void imx6_pcie_shutdown(struct platform_device *pdev)
+static void imx_pcie_shutdown(struct platform_device *pdev)
{
- struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev);
+ struct imx_pcie *imx_pcie = platform_get_drvdata(pdev);
/* bring down link, so bootloader gets clean state in case of reboot */
- imx6_pcie_assert_core_reset(imx6_pcie);
+ imx_pcie_assert_core_reset(imx_pcie);
}
-static const struct imx6_pcie_drvdata drvdata[] = {
+static const struct imx_pcie_drvdata drvdata[] = {
[IMX6Q] = {
.variant = IMX6Q,
- .flags = IMX6_PCIE_FLAG_IMX6_PHY |
- IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE,
+ .flags = IMX_PCIE_FLAG_IMX_PHY |
+ IMX_PCIE_FLAG_SPEED_CHANGE_WORKAROUND |
+ IMX_PCIE_FLAG_BROKEN_SUSPEND |
+ IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
.dbi_length = 0x200,
.gpr = "fsl,imx6q-iomuxc-gpr",
+ .ltssm_off = IOMUXC_GPR12,
+ .ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2,
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .init_phy = imx_pcie_init_phy,
+ .enable_ref_clk = imx6q_pcie_enable_ref_clk,
+ .core_reset = imx6q_pcie_core_reset,
},
[IMX6SX] = {
.variant = IMX6SX,
- .flags = IMX6_PCIE_FLAG_IMX6_PHY |
- IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE |
- IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .flags = IMX_PCIE_FLAG_IMX_PHY |
+ IMX_PCIE_FLAG_SPEED_CHANGE_WORKAROUND |
+ IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
.gpr = "fsl,imx6q-iomuxc-gpr",
+ .ltssm_off = IOMUXC_GPR12,
+ .ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2,
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .init_phy = imx6sx_pcie_init_phy,
+ .enable_ref_clk = imx6sx_pcie_enable_ref_clk,
+ .core_reset = imx6sx_pcie_core_reset,
+ .ops = &imx_pcie_host_ops,
},
[IMX6QP] = {
.variant = IMX6QP,
- .flags = IMX6_PCIE_FLAG_IMX6_PHY |
- IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE |
- IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .flags = IMX_PCIE_FLAG_IMX_PHY |
+ IMX_PCIE_FLAG_SPEED_CHANGE_WORKAROUND |
+ IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
.dbi_length = 0x200,
.gpr = "fsl,imx6q-iomuxc-gpr",
+ .ltssm_off = IOMUXC_GPR12,
+ .ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2,
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .init_phy = imx_pcie_init_phy,
+ .enable_ref_clk = imx6q_pcie_enable_ref_clk,
+ .core_reset = imx6qp_pcie_core_reset,
+ .ops = &imx_pcie_host_ops,
},
[IMX7D] = {
.variant = IMX7D,
- .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .flags = IMX_PCIE_FLAG_SUPPORTS_SUSPEND |
+ IMX_PCIE_FLAG_HAS_APP_RESET |
+ IMX_PCIE_FLAG_HAS_PHY_RESET,
.gpr = "fsl,imx7d-iomuxc-gpr",
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .enable_ref_clk = imx7d_pcie_enable_ref_clk,
+ .core_reset = imx7d_pcie_core_reset,
},
[IMX8MQ] = {
.variant = IMX8MQ,
+ .flags = IMX_PCIE_FLAG_HAS_APP_RESET |
+ IMX_PCIE_FLAG_HAS_PHY_RESET |
+ IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
.gpr = "fsl,imx8mq-iomuxc-gpr",
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .mode_off[1] = IOMUXC_GPR12,
+ .mode_mask[1] = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
+ .init_phy = imx8mq_pcie_init_phy,
+ .enable_ref_clk = imx8mm_pcie_enable_ref_clk,
},
[IMX8MM] = {
.variant = IMX8MM,
- .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .flags = IMX_PCIE_FLAG_SUPPORTS_SUSPEND |
+ IMX_PCIE_FLAG_HAS_PHYDRV |
+ IMX_PCIE_FLAG_HAS_APP_RESET,
.gpr = "fsl,imx8mm-iomuxc-gpr",
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .enable_ref_clk = imx8mm_pcie_enable_ref_clk,
},
[IMX8MP] = {
.variant = IMX8MP,
- .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .flags = IMX_PCIE_FLAG_SUPPORTS_SUSPEND |
+ IMX_PCIE_FLAG_HAS_PHYDRV |
+ IMX_PCIE_FLAG_HAS_APP_RESET,
.gpr = "fsl,imx8mp-iomuxc-gpr",
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .enable_ref_clk = imx8mm_pcie_enable_ref_clk,
+ },
+ [IMX8Q] = {
+ .variant = IMX8Q,
+ .flags = IMX_PCIE_FLAG_HAS_PHYDRV |
+ IMX_PCIE_FLAG_CPU_ADDR_FIXUP |
+ IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
+ },
+ [IMX95] = {
+ .variant = IMX95,
+ .flags = IMX_PCIE_FLAG_HAS_SERDES |
+ IMX_PCIE_FLAG_HAS_LUT |
+ IMX_PCIE_FLAG_8GT_ECN_ERR051586 |
+ IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .ltssm_off = IMX95_PE0_GEN_CTRL_3,
+ .ltssm_mask = IMX95_PCIE_LTSSM_EN,
+ .mode_off[0] = IMX95_PE0_GEN_CTRL_1,
+ .mode_mask[0] = IMX95_PCIE_DEVICE_TYPE,
+ .core_reset = imx95_pcie_core_reset,
+ .init_phy = imx95_pcie_init_phy,
+ .wait_pll_lock = imx95_pcie_wait_for_phy_pll_lock,
},
[IMX8MQ_EP] = {
.variant = IMX8MQ_EP,
+ .flags = IMX_PCIE_FLAG_HAS_APP_RESET |
+ IMX_PCIE_FLAG_HAS_PHY_RESET,
.mode = DW_PCIE_EP_TYPE,
.gpr = "fsl,imx8mq-iomuxc-gpr",
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .mode_off[1] = IOMUXC_GPR12,
+ .mode_mask[1] = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
+ .epc_features = &imx8q_pcie_epc_features,
+ .init_phy = imx8mq_pcie_init_phy,
+ .enable_ref_clk = imx8mm_pcie_enable_ref_clk,
},
[IMX8MM_EP] = {
.variant = IMX8MM_EP,
+ .flags = IMX_PCIE_FLAG_HAS_APP_RESET |
+ IMX_PCIE_FLAG_HAS_PHYDRV,
.mode = DW_PCIE_EP_TYPE,
.gpr = "fsl,imx8mm-iomuxc-gpr",
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .epc_features = &imx8m_pcie_epc_features,
+ .enable_ref_clk = imx8mm_pcie_enable_ref_clk,
},
[IMX8MP_EP] = {
.variant = IMX8MP_EP,
+ .flags = IMX_PCIE_FLAG_HAS_APP_RESET |
+ IMX_PCIE_FLAG_HAS_PHYDRV,
.mode = DW_PCIE_EP_TYPE,
.gpr = "fsl,imx8mp-iomuxc-gpr",
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .epc_features = &imx8m_pcie_epc_features,
+ .enable_ref_clk = imx8mm_pcie_enable_ref_clk,
+ },
+ [IMX8Q_EP] = {
+ .variant = IMX8Q_EP,
+ .flags = IMX_PCIE_FLAG_HAS_PHYDRV,
+ .mode = DW_PCIE_EP_TYPE,
+ .epc_features = &imx8q_pcie_epc_features,
+ },
+ [IMX95_EP] = {
+ .variant = IMX95_EP,
+ .flags = IMX_PCIE_FLAG_HAS_SERDES |
+ IMX_PCIE_FLAG_8GT_ECN_ERR051586 |
+ IMX_PCIE_FLAG_SUPPORT_64BIT,
+ .ltssm_off = IMX95_PE0_GEN_CTRL_3,
+ .ltssm_mask = IMX95_PCIE_LTSSM_EN,
+ .mode_off[0] = IMX95_PE0_GEN_CTRL_1,
+ .mode_mask[0] = IMX95_PCIE_DEVICE_TYPE,
+ .init_phy = imx95_pcie_init_phy,
+ .core_reset = imx95_pcie_core_reset,
+ .wait_pll_lock = imx95_pcie_wait_for_phy_pll_lock,
+ .epc_features = &imx95_pcie_epc_features,
+ .mode = DW_PCIE_EP_TYPE,
},
};
-static const struct of_device_id imx6_pcie_of_match[] = {
+static const struct of_device_id imx_pcie_of_match[] = {
{ .compatible = "fsl,imx6q-pcie", .data = &drvdata[IMX6Q], },
{ .compatible = "fsl,imx6sx-pcie", .data = &drvdata[IMX6SX], },
{ .compatible = "fsl,imx6qp-pcie", .data = &drvdata[IMX6QP], },
@@ -1537,25 +1981,29 @@ static const struct of_device_id imx6_pcie_of_match[] = {
{ .compatible = "fsl,imx8mq-pcie", .data = &drvdata[IMX8MQ], },
{ .compatible = "fsl,imx8mm-pcie", .data = &drvdata[IMX8MM], },
{ .compatible = "fsl,imx8mp-pcie", .data = &drvdata[IMX8MP], },
+ { .compatible = "fsl,imx8q-pcie", .data = &drvdata[IMX8Q], },
+ { .compatible = "fsl,imx95-pcie", .data = &drvdata[IMX95], },
{ .compatible = "fsl,imx8mq-pcie-ep", .data = &drvdata[IMX8MQ_EP], },
{ .compatible = "fsl,imx8mm-pcie-ep", .data = &drvdata[IMX8MM_EP], },
{ .compatible = "fsl,imx8mp-pcie-ep", .data = &drvdata[IMX8MP_EP], },
+ { .compatible = "fsl,imx8q-pcie-ep", .data = &drvdata[IMX8Q_EP], },
+ { .compatible = "fsl,imx95-pcie-ep", .data = &drvdata[IMX95_EP], },
{},
};
-static struct platform_driver imx6_pcie_driver = {
+static struct platform_driver imx_pcie_driver = {
.driver = {
.name = "imx6q-pcie",
- .of_match_table = imx6_pcie_of_match,
+ .of_match_table = imx_pcie_of_match,
.suppress_bind_attrs = true,
- .pm = &imx6_pcie_pm_ops,
+ .pm = &imx_pcie_pm_ops,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
- .probe = imx6_pcie_probe,
- .shutdown = imx6_pcie_shutdown,
+ .probe = imx_pcie_probe,
+ .shutdown = imx_pcie_shutdown,
};
-static void imx6_pcie_quirk(struct pci_dev *dev)
+static void imx_pcie_quirk(struct pci_dev *dev)
{
struct pci_bus *bus = dev->bus;
struct dw_pcie_rp *pp = bus->sysdata;
@@ -1565,33 +2013,33 @@ static void imx6_pcie_quirk(struct pci_dev *dev)
return;
/* Make sure we only quirk devices associated with this driver */
- if (bus->dev.parent->parent->driver != &imx6_pcie_driver.driver)
+ if (bus->dev.parent->parent->driver != &imx_pcie_driver.driver)
return;
if (pci_is_root_bus(bus)) {
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+ struct imx_pcie *imx_pcie = to_imx_pcie(pci);
/*
* Limit config length to avoid the kernel reading beyond
* the register set and causing an abort on i.MX 6Quad
*/
- if (imx6_pcie->drvdata->dbi_length) {
- dev->cfg_size = imx6_pcie->drvdata->dbi_length;
+ if (imx_pcie->drvdata->dbi_length) {
+ dev->cfg_size = imx_pcie->drvdata->dbi_length;
dev_info(&dev->dev, "Limiting cfg_size to %d\n",
dev->cfg_size);
}
}
}
DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, 0xabcd,
- PCI_CLASS_BRIDGE_PCI, 8, imx6_pcie_quirk);
+ PCI_CLASS_BRIDGE_PCI, 8, imx_pcie_quirk);
-static int __init imx6_pcie_init(void)
+static int __init imx_pcie_init(void)
{
#ifdef CONFIG_ARM
struct device_node *np;
- np = of_find_matching_node(NULL, imx6_pcie_of_match);
+ np = of_find_matching_node(NULL, imx_pcie_of_match);
if (!np)
return -ENODEV;
of_node_put(np);
@@ -1607,6 +2055,6 @@ static int __init imx6_pcie_init(void)
"external abort on non-linefetch");
#endif
- return platform_driver_register(&imx6_pcie_driver);
+ return platform_driver_register(&imx_pcie_driver);
}
-device_initcall(imx6_pcie_init);
+device_initcall(imx_pcie_init);
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index 78818853af9e..f86d9111f863 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -17,9 +17,9 @@
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/mfd/syscon.h>
+#include <linux/module.h>
#include <linux/msi.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/of_pci.h>
#include <linux/phy/phy.h>
@@ -35,6 +35,11 @@
#define PCIE_DEVICEID_SHIFT 16
/* Application registers */
+#define PID 0x000
+#define RTL GENMASK(15, 11)
+#define RTL_SHIFT 11
+#define AM6_PCI_PG1_RTL_VER 0x15
+
#define CMD_STATUS 0x004
#define LTSSM_EN_VAL BIT(0)
#define OB_XLAT_EN_VAL BIT(1)
@@ -105,6 +110,8 @@
#define to_keystone_pcie(x) dev_get_drvdata((x)->dev)
+#define PCI_DEVICE_ID_TI_AM654X 0xb00c
+
struct ks_pcie_of_data {
enum dw_pcie_device_mode mode;
const struct dw_pcie_host_ops *host_ops;
@@ -116,8 +123,7 @@ struct keystone_pcie {
struct dw_pcie *pci;
/* PCI Device ID */
u32 device_id;
- int legacy_host_irqs[PCI_NUM_INTX];
- struct device_node *legacy_intc_np;
+ int intx_host_irqs[PCI_NUM_INTX];
int msi_host_irq;
int num_lanes;
@@ -125,7 +131,7 @@ struct keystone_pcie {
struct phy **phy;
struct device_link **link;
struct device_node *msi_intc_np;
- struct irq_domain *legacy_irq_domain;
+ struct irq_domain *intx_irq_domain;
struct device_node *np;
/* Application register space */
@@ -184,12 +190,6 @@ static void ks_pcie_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
(int)data->hwirq, msg->address_hi, msg->address_lo);
}
-static int ks_pcie_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static void ks_pcie_msi_mask(struct irq_data *data)
{
struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
@@ -242,19 +242,78 @@ static struct irq_chip ks_pcie_msi_irq_chip = {
.name = "KEYSTONE-PCI-MSI",
.irq_ack = ks_pcie_msi_irq_ack,
.irq_compose_msi_msg = ks_pcie_compose_msi_msg,
- .irq_set_affinity = ks_pcie_msi_set_affinity,
.irq_mask = ks_pcie_msi_mask,
.irq_unmask = ks_pcie_msi_unmask,
};
+/**
+ * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask registers
+ * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
+ * PCIe host controller driver information.
+ *
+ * Since modification of dbi_cs2 involves different clock domain, read the
+ * status back to ensure the transition is complete.
+ */
+static void ks_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie)
+{
+ u32 val;
+
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ val |= DBI_CS2;
+ ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
+
+ do {
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ } while (!(val & DBI_CS2));
+}
+
+/**
+ * ks_pcie_clear_dbi_mode() - Disable DBI mode
+ * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
+ * PCIe host controller driver information.
+ *
+ * Since modification of dbi_cs2 involves different clock domain, read the
+ * status back to ensure the transition is complete.
+ */
+static void ks_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
+{
+ u32 val;
+
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ val &= ~DBI_CS2;
+ ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
+
+ do {
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ } while (val & DBI_CS2);
+}
+
static int ks_pcie_msi_host_init(struct dw_pcie_rp *pp)
{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+
+ /* Configure and set up BAR0 */
+ ks_pcie_set_dbi_mode(ks_pcie);
+
+ /* Enable BAR0 */
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1);
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1);
+
+ ks_pcie_clear_dbi_mode(ks_pcie);
+
+ /*
+ * For BAR0, just setting bus address for inbound writes (MSI) should
+ * be sufficient. Use physical address to avoid any conflicts.
+ */
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start);
+
pp->msi_irq_chip = &ks_pcie_msi_irq_chip;
return dw_pcie_allocate_domains(pp);
}
-static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie,
- int offset)
+static void ks_pcie_handle_intx_irq(struct keystone_pcie *ks_pcie,
+ int offset)
{
struct dw_pcie *pci = ks_pcie->pci;
struct device *dev = pci->dev;
@@ -264,7 +323,7 @@ static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie,
if (BIT(0) & pending) {
dev_dbg(dev, ": irq: irq_offset %d", offset);
- generic_handle_domain_irq(ks_pcie->legacy_irq_domain, offset);
+ generic_handle_domain_irq(ks_pcie->intx_irq_domain, offset);
}
/* EOI the INTx interrupt */
@@ -308,94 +367,56 @@ static irqreturn_t ks_pcie_handle_error_irq(struct keystone_pcie *ks_pcie)
return IRQ_HANDLED;
}
-static void ks_pcie_ack_legacy_irq(struct irq_data *d)
+static void ks_pcie_ack_intx_irq(struct irq_data *d)
{
}
-static void ks_pcie_mask_legacy_irq(struct irq_data *d)
+static void ks_pcie_mask_intx_irq(struct irq_data *d)
{
}
-static void ks_pcie_unmask_legacy_irq(struct irq_data *d)
+static void ks_pcie_unmask_intx_irq(struct irq_data *d)
{
}
-static struct irq_chip ks_pcie_legacy_irq_chip = {
- .name = "Keystone-PCI-Legacy-IRQ",
- .irq_ack = ks_pcie_ack_legacy_irq,
- .irq_mask = ks_pcie_mask_legacy_irq,
- .irq_unmask = ks_pcie_unmask_legacy_irq,
+static struct irq_chip ks_pcie_intx_irq_chip = {
+ .name = "Keystone-PCI-INTX-IRQ",
+ .irq_ack = ks_pcie_ack_intx_irq,
+ .irq_mask = ks_pcie_mask_intx_irq,
+ .irq_unmask = ks_pcie_unmask_intx_irq,
};
-static int ks_pcie_init_legacy_irq_map(struct irq_domain *d,
- unsigned int irq,
- irq_hw_number_t hw_irq)
+static int ks_pcie_init_intx_irq_map(struct irq_domain *d,
+ unsigned int irq, irq_hw_number_t hw_irq)
{
- irq_set_chip_and_handler(irq, &ks_pcie_legacy_irq_chip,
+ irq_set_chip_and_handler(irq, &ks_pcie_intx_irq_chip,
handle_level_irq);
irq_set_chip_data(irq, d->host_data);
return 0;
}
-static const struct irq_domain_ops ks_pcie_legacy_irq_domain_ops = {
- .map = ks_pcie_init_legacy_irq_map,
+static const struct irq_domain_ops ks_pcie_intx_irq_domain_ops = {
+ .map = ks_pcie_init_intx_irq_map,
.xlate = irq_domain_xlate_onetwocell,
};
-/**
- * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask registers
- * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
- * PCIe host controller driver information.
- *
- * Since modification of dbi_cs2 involves different clock domain, read the
- * status back to ensure the transition is complete.
- */
-static void ks_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie)
-{
- u32 val;
-
- val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
- val |= DBI_CS2;
- ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
-
- do {
- val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
- } while (!(val & DBI_CS2));
-}
-
-/**
- * ks_pcie_clear_dbi_mode() - Disable DBI mode
- * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
- * PCIe host controller driver information.
- *
- * Since modification of dbi_cs2 involves different clock domain, read the
- * status back to ensure the transition is complete.
- */
-static void ks_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
-{
- u32 val;
-
- val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
- val &= ~DBI_CS2;
- ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
-
- do {
- val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
- } while (val & DBI_CS2);
-}
-
-static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
+static int ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
{
u32 val;
u32 num_viewport = ks_pcie->num_viewport;
struct dw_pcie *pci = ks_pcie->pci;
struct dw_pcie_rp *pp = &pci->pp;
- u64 start, end;
+ struct resource_entry *entry;
struct resource *mem;
+ u64 start, end;
int i;
- mem = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM)->res;
+ entry = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM);
+ if (!entry)
+ return -ENODEV;
+
+ mem = entry->res;
start = mem->start;
end = mem->end;
@@ -406,7 +427,7 @@ static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
ks_pcie_clear_dbi_mode(ks_pcie);
if (ks_pcie->is_am6)
- return;
+ return 0;
val = ilog2(OB_WIN_SIZE);
ks_pcie_app_writel(ks_pcie, OB_SIZE, val);
@@ -423,6 +444,8 @@ static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
val |= OB_XLAT_EN_VAL;
ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
+
+ return 0;
}
static void __iomem *ks_pcie_other_map_bus(struct pci_bus *bus,
@@ -433,6 +456,17 @@ static void __iomem *ks_pcie_other_map_bus(struct pci_bus *bus,
struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
u32 reg;
+ /*
+ * Checking whether the link is up here is a last line of defense
+ * against platforms that forward errors on the system bus as
+ * SError upon PCI configuration transactions issued when the link
+ * is down. This check is racy by definition and does not stop
+ * the system from triggering an SError if the link goes down
+ * after this check is performed.
+ */
+ if (!dw_pcie_link_up(pci))
+ return NULL;
+
reg = CFG_BUS(bus->number) | CFG_DEVICE(PCI_SLOT(devfn)) |
CFG_FUNC(PCI_FUNC(devfn));
if (!pci_is_root_bus(bus->parent))
@@ -448,44 +482,10 @@ static struct pci_ops ks_child_pcie_ops = {
.write = pci_generic_config_write,
};
-/**
- * ks_pcie_v3_65_add_bus() - keystone add_bus post initialization
- * @bus: A pointer to the PCI bus structure.
- *
- * This sets BAR0 to enable inbound access for MSI_IRQ register
- */
-static int ks_pcie_v3_65_add_bus(struct pci_bus *bus)
-{
- struct dw_pcie_rp *pp = bus->sysdata;
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
-
- if (!pci_is_root_bus(bus))
- return 0;
-
- /* Configure and set up BAR0 */
- ks_pcie_set_dbi_mode(ks_pcie);
-
- /* Enable BAR0 */
- dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1);
- dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1);
-
- ks_pcie_clear_dbi_mode(ks_pcie);
-
- /*
- * For BAR0, just setting bus address for inbound writes (MSI) should
- * be sufficient. Use physical address to avoid any conflicts.
- */
- dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start);
-
- return 0;
-}
-
static struct pci_ops ks_pcie_ops = {
.map_bus = dw_pcie_own_conf_map_bus,
.read = pci_generic_config_read,
.write = pci_generic_config_write,
- .add_bus = ks_pcie_v3_65_add_bus,
};
/**
@@ -493,13 +493,12 @@ static struct pci_ops ks_pcie_ops = {
* @pci: A pointer to the dw_pcie structure which holds the DesignWare PCIe host
* controller driver information.
*/
-static int ks_pcie_link_up(struct dw_pcie *pci)
+static bool ks_pcie_link_up(struct dw_pcie *pci)
{
u32 val;
val = dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0);
- val &= PORT_LOGIC_LTSSM_STATE_MASK;
- return (val == PORT_LOGIC_LTSSM_STATE_L0);
+ return (val & PORT_LOGIC_LTSSM_STATE_MASK) == PORT_LOGIC_LTSSM_STATE_L0;
}
static void ks_pcie_stop_link(struct dw_pcie *pci)
@@ -528,7 +527,11 @@ static int ks_pcie_start_link(struct dw_pcie *pci)
static void ks_pcie_quirk(struct pci_dev *dev)
{
struct pci_bus *bus = dev->bus;
+ struct keystone_pcie *ks_pcie;
+ struct device *bridge_dev;
struct pci_dev *bridge;
+ u32 val;
+
static const struct pci_device_id rc_pci_devids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK),
.class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
@@ -540,6 +543,11 @@ static void ks_pcie_quirk(struct pci_dev *dev)
.class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
{ 0, },
};
+ static const struct pci_device_id am6_pci_devids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654X),
+ .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
+ { 0, },
+ };
if (pci_is_root_bus(bus))
bridge = dev;
@@ -561,10 +569,36 @@ static void ks_pcie_quirk(struct pci_dev *dev)
*/
if (pci_match_id(rc_pci_devids, bridge)) {
if (pcie_get_readrq(dev) > 256) {
- dev_info(&dev->dev, "limiting MRRS to 256\n");
+ dev_info(&dev->dev, "limiting MRRS to 256 bytes\n");
pcie_set_readrq(dev, 256);
}
}
+
+ /*
+ * Memory transactions fail with PCI controller in AM654 PG1.0
+ * when MRRS is set to more than 128 bytes. Force the MRRS to
+ * 128 bytes in all downstream devices.
+ */
+ if (pci_match_id(am6_pci_devids, bridge)) {
+ bridge_dev = pci_get_host_bridge_device(dev);
+ if (!bridge_dev || !bridge_dev->parent)
+ return;
+
+ ks_pcie = dev_get_drvdata(bridge_dev->parent);
+ if (!ks_pcie)
+ return;
+
+ val = ks_pcie_app_readl(ks_pcie, PID);
+ val &= RTL;
+ val >>= RTL_SHIFT;
+ if (val != AM6_PCI_PG1_RTL_VER)
+ return;
+
+ if (pcie_get_readrq(dev) > 128) {
+ dev_info(&dev->dev, "limiting MRRS to 128 bytes\n");
+ pcie_set_readrq(dev, 128);
+ }
+ }
}
DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, ks_pcie_quirk);
@@ -606,22 +640,22 @@ static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
}
/**
- * ks_pcie_legacy_irq_handler() - Handle legacy interrupt
+ * ks_pcie_intx_irq_handler() - Handle INTX interrupt
* @desc: Pointer to irq descriptor
*
- * Traverse through pending legacy interrupts and invoke handler for each. Also
+ * Traverse through pending INTX interrupts and invoke handler for each. Also
* takes care of interrupt controller level mask/ack operation.
*/
-static void ks_pcie_legacy_irq_handler(struct irq_desc *desc)
+static void ks_pcie_intx_irq_handler(struct irq_desc *desc)
{
unsigned int irq = irq_desc_get_irq(desc);
struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
struct dw_pcie *pci = ks_pcie->pci;
struct device *dev = pci->dev;
- u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0];
+ u32 irq_offset = irq - ks_pcie->intx_host_irqs[0];
struct irq_chip *chip = irq_desc_get_chip(desc);
- dev_dbg(dev, ": Handling legacy irq %d\n", irq);
+ dev_dbg(dev, ": Handling INTX irq %d\n", irq);
/*
* The chained irq handler installation would have replaced normal
@@ -629,7 +663,7 @@ static void ks_pcie_legacy_irq_handler(struct irq_desc *desc)
* ack operation.
*/
chained_irq_enter(chip, desc);
- ks_pcie_handle_legacy_irq(ks_pcie, irq_offset);
+ ks_pcie_handle_intx_irq(ks_pcie, irq_offset);
chained_irq_exit(chip, desc);
}
@@ -687,10 +721,10 @@ err:
return ret;
}
-static int ks_pcie_config_legacy_irq(struct keystone_pcie *ks_pcie)
+static int ks_pcie_config_intx_irq(struct keystone_pcie *ks_pcie)
{
struct device *dev = ks_pcie->pci->dev;
- struct irq_domain *legacy_irq_domain;
+ struct irq_domain *intx_irq_domain;
struct device_node *np = ks_pcie->np;
struct device_node *intc_np;
int irq_count, irq, ret = 0, i;
@@ -698,7 +732,7 @@ static int ks_pcie_config_legacy_irq(struct keystone_pcie *ks_pcie)
intc_np = of_get_child_by_name(np, "legacy-interrupt-controller");
if (!intc_np) {
/*
- * Since legacy interrupts are modeled as edge-interrupts in
+ * Since INTX interrupts are modeled as edge-interrupts in
* AM6, keep it disabled for now.
*/
if (ks_pcie->is_am6)
@@ -720,22 +754,21 @@ static int ks_pcie_config_legacy_irq(struct keystone_pcie *ks_pcie)
ret = -EINVAL;
goto err;
}
- ks_pcie->legacy_host_irqs[i] = irq;
+ ks_pcie->intx_host_irqs[i] = irq;
irq_set_chained_handler_and_data(irq,
- ks_pcie_legacy_irq_handler,
+ ks_pcie_intx_irq_handler,
ks_pcie);
}
- legacy_irq_domain =
- irq_domain_add_linear(intc_np, PCI_NUM_INTX,
- &ks_pcie_legacy_irq_domain_ops, NULL);
- if (!legacy_irq_domain) {
- dev_err(dev, "Failed to add irq domain for legacy irqs\n");
+ intx_irq_domain = irq_domain_create_linear(of_fwnode_handle(intc_np), PCI_NUM_INTX,
+ &ks_pcie_intx_irq_domain_ops, NULL);
+ if (!intx_irq_domain) {
+ dev_err(dev, "Failed to add irq domain for INTX irqs\n");
ret = -EINVAL;
goto err;
}
- ks_pcie->legacy_irq_domain = legacy_irq_domain;
+ ks_pcie->intx_irq_domain = intx_irq_domain;
for (i = 0; i < PCI_NUM_INTX; i++)
ks_pcie_app_writel(ks_pcie, IRQ_ENABLE_SET(i), INTx_EN);
@@ -745,29 +778,7 @@ err:
return ret;
}
-#ifdef CONFIG_ARM
-/*
- * When a PCI device does not exist during config cycles, keystone host
- * gets a bus error instead of returning 0xffffffff (PCI_ERROR_RESPONSE).
- * This handler always returns 0 for this kind of fault.
- */
-static int ks_pcie_fault(unsigned long addr, unsigned int fsr,
- struct pt_regs *regs)
-{
- unsigned long instr = *(unsigned long *) instruction_pointer(regs);
-
- if ((instr & 0x0e100090) == 0x00100090) {
- int reg = (instr >> 12) & 15;
-
- regs->uregs[reg] = -1;
- regs->ARM_pc += 4;
- }
-
- return 0;
-}
-#endif
-
-static int __init ks_pcie_init_id(struct keystone_pcie *ks_pcie)
+static int ks_pcie_init_id(struct keystone_pcie *ks_pcie)
{
int ret;
unsigned int id;
@@ -799,7 +810,7 @@ static int __init ks_pcie_init_id(struct keystone_pcie *ks_pcie)
return 0;
}
-static int __init ks_pcie_host_init(struct dw_pcie_rp *pp)
+static int ks_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
@@ -809,7 +820,7 @@ static int __init ks_pcie_host_init(struct dw_pcie_rp *pp)
if (!ks_pcie->is_am6)
pp->bridge->child_ops = &ks_child_pcie_ops;
- ret = ks_pcie_config_legacy_irq(ks_pcie);
+ ret = ks_pcie_config_intx_irq(ks_pcie);
if (ret)
return ret;
@@ -818,7 +829,10 @@ static int __init ks_pcie_host_init(struct dw_pcie_rp *pp)
return ret;
ks_pcie_stop_link(pci);
- ks_pcie_setup_rc_app_regs(ks_pcie);
+ ret = ks_pcie_setup_rc_app_regs(ks_pcie);
+ if (ret)
+ return ret;
+
writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8),
pci->dbi_base + PCI_IO_BASE);
@@ -826,25 +840,16 @@ static int __init ks_pcie_host_init(struct dw_pcie_rp *pp)
if (ret < 0)
return ret;
-#ifdef CONFIG_ARM
- /*
- * PCIe access errors that result into OCP errors are caught by ARM as
- * "External aborts"
- */
- hook_fault_code(17, ks_pcie_fault, SIGBUS, 0,
- "Asynchronous external abort");
-#endif
-
return 0;
}
static const struct dw_pcie_host_ops ks_pcie_host_ops = {
- .host_init = ks_pcie_host_init,
- .msi_host_init = ks_pcie_msi_host_init,
+ .init = ks_pcie_host_init,
+ .msi_init = ks_pcie_msi_host_init,
};
static const struct dw_pcie_host_ops ks_pcie_am654_host_ops = {
- .host_init = ks_pcie_host_init,
+ .init = ks_pcie_host_init,
};
static irqreturn_t ks_pcie_err_irq_handler(int irq, void *priv)
@@ -882,7 +887,7 @@ static void ks_pcie_am654_ep_init(struct dw_pcie_ep *ep)
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, flags);
}
-static void ks_pcie_am654_raise_legacy_irq(struct keystone_pcie *ks_pcie)
+static void ks_pcie_am654_raise_intx_irq(struct keystone_pcie *ks_pcie)
{
struct dw_pcie *pci = ks_pcie->pci;
u8 int_pin;
@@ -901,20 +906,19 @@ static void ks_pcie_am654_raise_legacy_irq(struct keystone_pcie *ks_pcie)
}
static int ks_pcie_am654_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
- enum pci_epc_irq_type type,
- u16 interrupt_num)
+ unsigned int type, u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
switch (type) {
- case PCI_EPC_IRQ_LEGACY:
- ks_pcie_am654_raise_legacy_irq(ks_pcie);
+ case PCI_IRQ_INTX:
+ ks_pcie_am654_raise_intx_irq(ks_pcie);
break;
- case PCI_EPC_IRQ_MSI:
+ case PCI_IRQ_MSI:
dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
break;
- case PCI_EPC_IRQ_MSIX:
+ case PCI_IRQ_MSIX:
dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
break;
default:
@@ -926,16 +930,15 @@ static int ks_pcie_am654_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
}
static const struct pci_epc_features ks_pcie_am654_epc_features = {
- .linkup_notifier = false,
.msi_capable = true,
.msix_capable = true,
- .reserved_bar = 1 << BAR_0 | 1 << BAR_1,
- .bar_fixed_64bit = 1 << BAR_0,
- .bar_fixed_size[2] = SZ_1M,
- .bar_fixed_size[3] = SZ_64K,
- .bar_fixed_size[4] = 256,
- .bar_fixed_size[5] = SZ_1M,
- .align = SZ_1M,
+ .bar[BAR_0] = { .type = BAR_RESERVED, },
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_2] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_3] = { .type = BAR_FIXED, .fixed_size = SZ_64K, },
+ .bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = 256, },
+ .bar[BAR_5] = { .type = BAR_RESIZABLE, },
+ .align = SZ_64K,
};
static const struct pci_epc_features*
@@ -945,7 +948,7 @@ ks_pcie_am654_get_features(struct dw_pcie_ep *ep)
}
static const struct dw_pcie_ep_ops ks_pcie_am654_ep_ops = {
- .ep_init = ks_pcie_am654_ep_init,
+ .init = ks_pcie_am654_ep_init,
.raise_irq = ks_pcie_am654_raise_irq,
.get_features = &ks_pcie_am654_get_features,
};
@@ -1069,6 +1072,7 @@ static int ks_pcie_am654_set_mode(struct device *dev,
static const struct ks_pcie_of_data ks_pcie_rc_of_data = {
.host_ops = &ks_pcie_host_ops,
+ .mode = DW_PCIE_RC_TYPE,
.version = DW_PCIE_VER_365A,
};
@@ -1100,8 +1104,9 @@ static const struct of_device_id ks_pcie_of_match[] = {
},
{ },
};
+MODULE_DEVICE_TABLE(of, ks_pcie_of_match);
-static int __init ks_pcie_probe(struct platform_device *pdev)
+static int ks_pcie_probe(struct platform_device *pdev)
{
const struct dw_pcie_host_ops *host_ops;
const struct dw_pcie_ep_ops *ep_ops;
@@ -1166,8 +1171,8 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- ret = request_irq(irq, ks_pcie_err_irq_handler, IRQF_SHARED,
- "ks-pcie-error-irq", ks_pcie);
+ ret = devm_request_irq(dev, irq, ks_pcie_err_irq_handler, IRQF_SHARED,
+ "ks-pcie-error-irq", ks_pcie);
if (ret < 0) {
dev_err(dev, "failed to request error IRQ %d\n",
irq);
@@ -1178,11 +1183,11 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
if (ret)
num_lanes = 1;
- phy = devm_kzalloc(dev, sizeof(*phy) * num_lanes, GFP_KERNEL);
+ phy = devm_kcalloc(dev, num_lanes, sizeof(*phy), GFP_KERNEL);
if (!phy)
return -ENOMEM;
- link = devm_kzalloc(dev, sizeof(*link) * num_lanes, GFP_KERNEL);
+ link = devm_kcalloc(dev, num_lanes, sizeof(*link), GFP_KERNEL);
if (!link)
return -ENOMEM;
@@ -1219,7 +1224,16 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
goto err_link;
}
+ /* Obtain references to the PHYs */
+ for (i = 0; i < num_lanes; i++)
+ phy_pm_runtime_get_sync(ks_pcie->phy[i]);
+
ret = ks_pcie_enable_phy(ks_pcie);
+
+ /* Release references to the PHYs */
+ for (i = 0; i < num_lanes; i++)
+ phy_pm_runtime_put_sync(ks_pcie->phy[i]);
+
if (ret) {
dev_err(dev, "failed to enable phy\n");
goto err_link;
@@ -1282,15 +1296,28 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
ret = dw_pcie_ep_init(&pci->ep);
if (ret < 0)
goto err_get_sync;
+
+ ret = dw_pcie_ep_init_registers(&pci->ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ goto err_ep_init;
+ }
+
+ pci_epc_init_notify(pci->ep.epc);
+
break;
default:
dev_err(dev, "INVALID device type %d\n", mode);
+ ret = -EINVAL;
+ goto err_get_sync;
}
ks_pcie_enable_error_irq(ks_pcie);
return 0;
+err_ep_init:
+ dw_pcie_ep_deinit(&pci->ep);
err_get_sync:
pm_runtime_put(dev);
pm_runtime_disable(dev);
@@ -1303,7 +1330,7 @@ err_link:
return ret;
}
-static int __exit ks_pcie_remove(struct platform_device *pdev)
+static void ks_pcie_remove(struct platform_device *pdev)
{
struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev);
struct device_link **link = ks_pcie->link;
@@ -1315,16 +1342,55 @@ static int __exit ks_pcie_remove(struct platform_device *pdev)
ks_pcie_disable_phy(ks_pcie);
while (num_lanes--)
device_link_del(link[num_lanes]);
-
- return 0;
}
-static struct platform_driver ks_pcie_driver __refdata = {
+static struct platform_driver ks_pcie_driver = {
.probe = ks_pcie_probe,
- .remove = __exit_p(ks_pcie_remove),
+ .remove = ks_pcie_remove,
.driver = {
.name = "keystone-pcie",
.of_match_table = ks_pcie_of_match,
},
};
+
+#ifdef CONFIG_ARM
+/*
+ * When a PCI device does not exist during config cycles, keystone host
+ * gets a bus error instead of returning 0xffffffff (PCI_ERROR_RESPONSE).
+ * This handler always returns 0 for this kind of fault.
+ */
+static int ks_pcie_fault(unsigned long addr, unsigned int fsr,
+ struct pt_regs *regs)
+{
+ unsigned long instr = *(unsigned long *)instruction_pointer(regs);
+
+ if ((instr & 0x0e100090) == 0x00100090) {
+ int reg = (instr >> 12) & 15;
+
+ regs->uregs[reg] = -1;
+ regs->ARM_pc += 4;
+ }
+
+ return 0;
+}
+
+static int __init ks_pcie_init(void)
+{
+ /*
+ * PCIe access errors that result into OCP errors are caught by ARM as
+ * "External aborts"
+ */
+ if (of_find_matching_node(NULL, ks_pcie_of_match))
+ hook_fault_code(17, ks_pcie_fault, SIGBUS, 0,
+ "Asynchronous external abort");
+
+ return platform_driver_register(&ks_pcie_driver);
+}
+device_initcall(ks_pcie_init);
+#else
builtin_platform_driver(ks_pcie_driver);
+#endif
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("PCIe controller driver for Texas Instruments Keystone SoCs");
+MODULE_AUTHOR("Murali Karicheri <m-karicheri2@ti.com>");
diff --git a/drivers/pci/controller/dwc/pci-layerscape-ep.c b/drivers/pci/controller/dwc/pci-layerscape-ep.c
index de4c1758a6c3..a4a800699f89 100644
--- a/drivers/pci/controller/dwc/pci-layerscape-ep.c
+++ b/drivers/pci/controller/dwc/pci-layerscape-ep.c
@@ -45,10 +45,11 @@ struct ls_pcie_ep {
struct pci_epc_features *ls_epc;
const struct ls_pcie_ep_drvdata *drvdata;
int irq;
+ u32 lnkcap;
bool big_endian;
};
-static u32 ls_lut_readl(struct ls_pcie_ep *pcie, u32 offset)
+static u32 ls_pcie_pf_lut_readl(struct ls_pcie_ep *pcie, u32 offset)
{
struct dw_pcie *pci = pcie->pci;
@@ -58,7 +59,7 @@ static u32 ls_lut_readl(struct ls_pcie_ep *pcie, u32 offset)
return ioread32(pci->dbi_base + offset);
}
-static void ls_lut_writel(struct ls_pcie_ep *pcie, u32 offset, u32 value)
+static void ls_pcie_pf_lut_writel(struct ls_pcie_ep *pcie, u32 offset, u32 value)
{
struct dw_pcie *pci = pcie->pci;
@@ -73,22 +74,37 @@ static irqreturn_t ls_pcie_ep_event_handler(int irq, void *dev_id)
struct ls_pcie_ep *pcie = dev_id;
struct dw_pcie *pci = pcie->pci;
u32 val, cfg;
+ u8 offset;
- val = ls_lut_readl(pcie, PEX_PF0_PME_MES_DR);
- ls_lut_writel(pcie, PEX_PF0_PME_MES_DR, val);
+ val = ls_pcie_pf_lut_readl(pcie, PEX_PF0_PME_MES_DR);
+ ls_pcie_pf_lut_writel(pcie, PEX_PF0_PME_MES_DR, val);
if (!val)
return IRQ_NONE;
if (val & PEX_PF0_PME_MES_DR_LUD) {
- cfg = ls_lut_readl(pcie, PEX_PF0_CONFIG);
+
+ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+
+ /*
+ * The values of the Maximum Link Width and Supported Link
+ * Speed from the Link Capabilities Register will be lost
+ * during link down or hot reset. Restore initial value
+ * that configured by the Reset Configuration Word (RCW).
+ */
+ dw_pcie_dbi_ro_wr_en(pci);
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, pcie->lnkcap);
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ cfg = ls_pcie_pf_lut_readl(pcie, PEX_PF0_CONFIG);
cfg |= PEX_PF0_CFG_READY;
- ls_lut_writel(pcie, PEX_PF0_CONFIG, cfg);
+ ls_pcie_pf_lut_writel(pcie, PEX_PF0_CONFIG, cfg);
dw_pcie_ep_linkup(&pci->ep);
dev_dbg(pci->dev, "Link up\n");
} else if (val & PEX_PF0_PME_MES_DR_LDD) {
dev_dbg(pci->dev, "Link down\n");
+ dw_pcie_ep_linkdown(&pci->ep);
} else if (val & PEX_PF0_PME_MES_DR_HRD) {
dev_dbg(pci->dev, "Hot reset\n");
}
@@ -114,10 +130,10 @@ static int ls_pcie_ep_interrupt_init(struct ls_pcie_ep *pcie,
}
/* Enable interrupts */
- val = ls_lut_readl(pcie, PEX_PF0_PME_MES_IER);
+ val = ls_pcie_pf_lut_readl(pcie, PEX_PF0_PME_MES_IER);
val |= PEX_PF0_PME_MES_IER_LDDIE | PEX_PF0_PME_MES_IER_HRDIE |
PEX_PF0_PME_MES_IER_LUDIE;
- ls_lut_writel(pcie, PEX_PF0_PME_MES_IER, val);
+ ls_pcie_pf_lut_writel(pcie, PEX_PF0_PME_MES_IER, val);
return 0;
}
@@ -150,16 +166,16 @@ static void ls_pcie_ep_init(struct dw_pcie_ep *ep)
}
static int ls_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
- enum pci_epc_irq_type type, u16 interrupt_num)
+ unsigned int type, u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
switch (type) {
- case PCI_EPC_IRQ_LEGACY:
- return dw_pcie_ep_raise_legacy_irq(ep, func_no);
- case PCI_EPC_IRQ_MSI:
+ case PCI_IRQ_INTX:
+ return dw_pcie_ep_raise_intx_irq(ep, func_no);
+ case PCI_IRQ_MSI:
return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
- case PCI_EPC_IRQ_MSIX:
+ case PCI_IRQ_MSIX:
return dw_pcie_ep_raise_msix_irq_doorbell(ep, func_no,
interrupt_num);
default:
@@ -168,8 +184,7 @@ static int ls_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
}
}
-static unsigned int ls_pcie_ep_func_conf_select(struct dw_pcie_ep *ep,
- u8 func_no)
+static unsigned int ls_pcie_ep_get_dbi_offset(struct dw_pcie_ep *ep, u8 func_no)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct ls_pcie_ep *pcie = to_ls_pcie_ep(pci);
@@ -179,10 +194,10 @@ static unsigned int ls_pcie_ep_func_conf_select(struct dw_pcie_ep *ep,
}
static const struct dw_pcie_ep_ops ls_pcie_ep_ops = {
- .ep_init = ls_pcie_ep_init,
+ .init = ls_pcie_ep_init,
.raise_irq = ls_pcie_ep_raise_irq,
.get_features = ls_pcie_ep_get_features,
- .func_conf_select = ls_pcie_ep_func_conf_select,
+ .get_dbi_offset = ls_pcie_ep_get_dbi_offset,
};
static const struct ls_pcie_ep_drvdata ls1_ep_drvdata = {
@@ -215,6 +230,7 @@ static int __init ls_pcie_ep_probe(struct platform_device *pdev)
struct ls_pcie_ep *pcie;
struct pci_epc_features *ls_epc;
struct resource *dbi_base;
+ u8 offset;
int ret;
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
@@ -234,7 +250,10 @@ static int __init ls_pcie_ep_probe(struct platform_device *pdev)
pci->dev = dev;
pci->ops = pcie->drvdata->dw_pcie_ops;
- ls_epc->bar_fixed_64bit = (1 << BAR_2) | (1 << BAR_4);
+ ls_epc->bar[BAR_2].only_64bit = true;
+ ls_epc->bar[BAR_3].type = BAR_RESERVED;
+ ls_epc->bar[BAR_4].only_64bit = true;
+ ls_epc->bar[BAR_5].type = BAR_RESERVED;
ls_epc->linkup_notifier = true;
pcie->pci = pci;
@@ -249,12 +268,26 @@ static int __init ls_pcie_ep_probe(struct platform_device *pdev)
pcie->big_endian = of_property_read_bool(dev->of_node, "big-endian");
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+
platform_set_drvdata(pdev, pcie);
+ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ pcie->lnkcap = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
+
ret = dw_pcie_ep_init(&pci->ep);
if (ret)
return ret;
+ ret = dw_pcie_ep_init_registers(&pci->ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(&pci->ep);
+ return ret;
+ }
+
+ pci_epc_init_notify(pci->ep.epc);
+
return ls_pcie_ep_interrupt_init(pcie, pdev);
}
diff --git a/drivers/pci/controller/dwc/pci-layerscape.c b/drivers/pci/controller/dwc/pci-layerscape.c
index ed5fb492fe08..a44b5c256d6e 100644
--- a/drivers/pci/controller/dwc/pci-layerscape.c
+++ b/drivers/pci/controller/dwc/pci-layerscape.c
@@ -8,9 +8,11 @@
* Author: Minghuan Lian <Minghuan.Lian@freescale.com>
*/
+#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/init.h>
+#include <linux/iopoll.h>
#include <linux/of_pci.h>
#include <linux/of_platform.h>
#include <linux/of_address.h>
@@ -20,6 +22,7 @@
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
+#include "../../pci.h"
#include "pcie-designware.h"
/* PEX Internal Configuration Registers */
@@ -27,12 +30,46 @@
#define PCIE_ABSERR 0x8d0 /* Bridge Slave Error Response Register */
#define PCIE_ABSERR_SETTING 0x9401 /* Forward error of non-posted request */
+/* PF Message Command Register */
+#define LS_PCIE_PF_MCR 0x2c
+#define PF_MCR_PTOMR BIT(0)
+#define PF_MCR_EXL2S BIT(1)
+
+/* LS1021A PEXn PM Write Control Register */
+#define SCFG_PEXPMWRCR(idx) (0x5c + (idx) * 0x64)
+#define PMXMTTURNOFF BIT(31)
+#define SCFG_PEXSFTRSTCR 0x190
+#define PEXSR(idx) BIT(idx)
+
+/* LS1043A PEX PME control register */
+#define SCFG_PEXPMECR 0x144
+#define PEXPME(idx) BIT(31 - (idx) * 4)
+
+/* LS1043A PEX LUT debug register */
+#define LS_PCIE_LDBG 0x7fc
+#define LDBG_SR BIT(30)
+#define LDBG_WE BIT(31)
+
#define PCIE_IATU_NUM 6
+struct ls_pcie_drvdata {
+ const u32 pf_lut_off;
+ const struct dw_pcie_host_ops *ops;
+ int (*exit_from_l2)(struct dw_pcie_rp *pp);
+ bool scfg_support;
+ bool pm_support;
+};
+
struct ls_pcie {
struct dw_pcie *pci;
+ const struct ls_pcie_drvdata *drvdata;
+ void __iomem *pf_lut_base;
+ struct regmap *scfg;
+ int index;
+ bool big_endian;
};
+#define ls_pcie_pf_lut_readl_addr(addr) ls_pcie_pf_lut_readl(pcie, addr)
#define to_ls_pcie(x) dev_get_drvdata((x)->dev)
static bool ls_pcie_is_bridge(struct ls_pcie *pcie)
@@ -41,7 +78,7 @@ static bool ls_pcie_is_bridge(struct ls_pcie *pcie)
u32 header_type;
header_type = ioread8(pci->dbi_base + PCI_HEADER_TYPE);
- header_type &= 0x7f;
+ header_type &= PCI_HEADER_TYPE_MASK;
return header_type == PCI_HEADER_TYPE_BRIDGE;
}
@@ -73,6 +110,70 @@ static void ls_pcie_fix_error_response(struct ls_pcie *pcie)
iowrite32(PCIE_ABSERR_SETTING, pci->dbi_base + PCIE_ABSERR);
}
+static u32 ls_pcie_pf_lut_readl(struct ls_pcie *pcie, u32 off)
+{
+ if (pcie->big_endian)
+ return ioread32be(pcie->pf_lut_base + off);
+
+ return ioread32(pcie->pf_lut_base + off);
+}
+
+static void ls_pcie_pf_lut_writel(struct ls_pcie *pcie, u32 off, u32 val)
+{
+ if (pcie->big_endian)
+ iowrite32be(val, pcie->pf_lut_base + off);
+ else
+ iowrite32(val, pcie->pf_lut_base + off);
+}
+
+static void ls_pcie_send_turnoff_msg(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct ls_pcie *pcie = to_ls_pcie(pci);
+ u32 val;
+ int ret;
+
+ val = ls_pcie_pf_lut_readl(pcie, LS_PCIE_PF_MCR);
+ val |= PF_MCR_PTOMR;
+ ls_pcie_pf_lut_writel(pcie, LS_PCIE_PF_MCR, val);
+
+ ret = readx_poll_timeout(ls_pcie_pf_lut_readl_addr, LS_PCIE_PF_MCR,
+ val, !(val & PF_MCR_PTOMR),
+ PCIE_PME_TO_L2_TIMEOUT_US/10,
+ PCIE_PME_TO_L2_TIMEOUT_US);
+ if (ret)
+ dev_err(pcie->pci->dev, "PME_Turn_off timeout\n");
+}
+
+static int ls_pcie_exit_from_l2(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct ls_pcie *pcie = to_ls_pcie(pci);
+ u32 val;
+ int ret;
+
+ /*
+ * Set PF_MCR_EXL2S bit in LS_PCIE_PF_MCR register for the link
+ * to exit L2 state.
+ */
+ val = ls_pcie_pf_lut_readl(pcie, LS_PCIE_PF_MCR);
+ val |= PF_MCR_EXL2S;
+ ls_pcie_pf_lut_writel(pcie, LS_PCIE_PF_MCR, val);
+
+ /*
+ * L2 exit timeout of 10ms is not defined in the specifications,
+ * it was chosen based on empirical observations.
+ */
+ ret = readx_poll_timeout(ls_pcie_pf_lut_readl_addr, LS_PCIE_PF_MCR,
+ val, !(val & PF_MCR_EXL2S),
+ 1000,
+ 10000);
+ if (ret)
+ dev_err(pcie->pci->dev, "L2 exit timeout\n");
+
+ return ret;
+}
+
static int ls_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
@@ -89,20 +190,135 @@ static int ls_pcie_host_init(struct dw_pcie_rp *pp)
return 0;
}
+static void scfg_pcie_send_turnoff_msg(struct regmap *scfg, u32 reg, u32 mask)
+{
+ /* Send PME_Turn_Off message */
+ regmap_write_bits(scfg, reg, mask, mask);
+
+ /*
+ * There is no specific register to check for PME_To_Ack from endpoint.
+ * So on the safe side, wait for PCIE_PME_TO_L2_TIMEOUT_US.
+ */
+ mdelay(PCIE_PME_TO_L2_TIMEOUT_US/1000);
+
+ /*
+ * Layerscape hardware reference manual recommends clearing the PMXMTTURNOFF bit
+ * to complete the PME_Turn_Off handshake.
+ */
+ regmap_write_bits(scfg, reg, mask, 0);
+}
+
+static void ls1021a_pcie_send_turnoff_msg(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct ls_pcie *pcie = to_ls_pcie(pci);
+
+ scfg_pcie_send_turnoff_msg(pcie->scfg, SCFG_PEXPMWRCR(pcie->index), PMXMTTURNOFF);
+}
+
+static int scfg_pcie_exit_from_l2(struct regmap *scfg, u32 reg, u32 mask)
+{
+ /* Reset the PEX wrapper to bring the link out of L2 */
+ regmap_write_bits(scfg, reg, mask, mask);
+ regmap_write_bits(scfg, reg, mask, 0);
+
+ return 0;
+}
+
+static int ls1021a_pcie_exit_from_l2(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct ls_pcie *pcie = to_ls_pcie(pci);
+
+ return scfg_pcie_exit_from_l2(pcie->scfg, SCFG_PEXSFTRSTCR, PEXSR(pcie->index));
+}
+
+static void ls1043a_pcie_send_turnoff_msg(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct ls_pcie *pcie = to_ls_pcie(pci);
+
+ scfg_pcie_send_turnoff_msg(pcie->scfg, SCFG_PEXPMECR, PEXPME(pcie->index));
+}
+
+static int ls1043a_pcie_exit_from_l2(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct ls_pcie *pcie = to_ls_pcie(pci);
+ u32 val;
+
+ /*
+ * Reset the PEX wrapper to bring the link out of L2.
+ * LDBG_WE: allows the user to have write access to the PEXDBG[SR] for both setting and
+ * clearing the soft reset on the PEX module.
+ * LDBG_SR: When SR is set to 1, the PEX module enters soft reset.
+ */
+ val = ls_pcie_pf_lut_readl(pcie, LS_PCIE_LDBG);
+ val |= LDBG_WE;
+ ls_pcie_pf_lut_writel(pcie, LS_PCIE_LDBG, val);
+
+ val = ls_pcie_pf_lut_readl(pcie, LS_PCIE_LDBG);
+ val |= LDBG_SR;
+ ls_pcie_pf_lut_writel(pcie, LS_PCIE_LDBG, val);
+
+ val = ls_pcie_pf_lut_readl(pcie, LS_PCIE_LDBG);
+ val &= ~LDBG_SR;
+ ls_pcie_pf_lut_writel(pcie, LS_PCIE_LDBG, val);
+
+ val = ls_pcie_pf_lut_readl(pcie, LS_PCIE_LDBG);
+ val &= ~LDBG_WE;
+ ls_pcie_pf_lut_writel(pcie, LS_PCIE_LDBG, val);
+
+ return 0;
+}
+
static const struct dw_pcie_host_ops ls_pcie_host_ops = {
- .host_init = ls_pcie_host_init,
+ .init = ls_pcie_host_init,
+ .pme_turn_off = ls_pcie_send_turnoff_msg,
+};
+
+static const struct dw_pcie_host_ops ls1021a_pcie_host_ops = {
+ .init = ls_pcie_host_init,
+ .pme_turn_off = ls1021a_pcie_send_turnoff_msg,
+};
+
+static const struct ls_pcie_drvdata ls1021a_drvdata = {
+ .pm_support = true,
+ .scfg_support = true,
+ .ops = &ls1021a_pcie_host_ops,
+ .exit_from_l2 = ls1021a_pcie_exit_from_l2,
+};
+
+static const struct dw_pcie_host_ops ls1043a_pcie_host_ops = {
+ .init = ls_pcie_host_init,
+ .pme_turn_off = ls1043a_pcie_send_turnoff_msg,
+};
+
+static const struct ls_pcie_drvdata ls1043a_drvdata = {
+ .pf_lut_off = 0x10000,
+ .pm_support = true,
+ .scfg_support = true,
+ .ops = &ls1043a_pcie_host_ops,
+ .exit_from_l2 = ls1043a_pcie_exit_from_l2,
+};
+
+static const struct ls_pcie_drvdata layerscape_drvdata = {
+ .pf_lut_off = 0xc0000,
+ .pm_support = true,
+ .ops = &ls_pcie_host_ops,
+ .exit_from_l2 = ls_pcie_exit_from_l2,
};
static const struct of_device_id ls_pcie_of_match[] = {
- { .compatible = "fsl,ls1012a-pcie", },
- { .compatible = "fsl,ls1021a-pcie", },
- { .compatible = "fsl,ls1028a-pcie", },
- { .compatible = "fsl,ls1043a-pcie", },
- { .compatible = "fsl,ls1046a-pcie", },
- { .compatible = "fsl,ls2080a-pcie", },
- { .compatible = "fsl,ls2085a-pcie", },
- { .compatible = "fsl,ls2088a-pcie", },
- { .compatible = "fsl,ls1088a-pcie", },
+ { .compatible = "fsl,ls1012a-pcie", .data = &layerscape_drvdata },
+ { .compatible = "fsl,ls1021a-pcie", .data = &ls1021a_drvdata },
+ { .compatible = "fsl,ls1028a-pcie", .data = &layerscape_drvdata },
+ { .compatible = "fsl,ls1043a-pcie", .data = &ls1043a_drvdata },
+ { .compatible = "fsl,ls1046a-pcie", .data = &layerscape_drvdata },
+ { .compatible = "fsl,ls2080a-pcie", .data = &layerscape_drvdata },
+ { .compatible = "fsl,ls2085a-pcie", .data = &layerscape_drvdata },
+ { .compatible = "fsl,ls2088a-pcie", .data = &layerscape_drvdata },
+ { .compatible = "fsl,ls1088a-pcie", .data = &layerscape_drvdata },
{ },
};
@@ -112,6 +328,7 @@ static int ls_pcie_probe(struct platform_device *pdev)
struct dw_pcie *pci;
struct ls_pcie *pcie;
struct resource *dbi_base;
+ u32 index[2];
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
@@ -121,16 +338,34 @@ static int ls_pcie_probe(struct platform_device *pdev)
if (!pci)
return -ENOMEM;
- pci->dev = dev;
- pci->pp.ops = &ls_pcie_host_ops;
+ pcie->drvdata = of_device_get_match_data(dev);
+ pci->dev = dev;
pcie->pci = pci;
+ pci->pp.ops = pcie->drvdata->ops;
dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base);
if (IS_ERR(pci->dbi_base))
return PTR_ERR(pci->dbi_base);
+ pcie->big_endian = of_property_read_bool(dev->of_node, "big-endian");
+
+ pcie->pf_lut_base = pci->dbi_base + pcie->drvdata->pf_lut_off;
+
+ if (pcie->drvdata->scfg_support) {
+ pcie->scfg =
+ syscon_regmap_lookup_by_phandle_args(dev->of_node,
+ "fsl,pcie-scfg", 1,
+ index);
+ if (IS_ERR(pcie->scfg)) {
+ dev_err(dev, "No syscfg phandle specified\n");
+ return PTR_ERR(pcie->scfg);
+ }
+
+ pcie->index = index[1];
+ }
+
if (!ls_pcie_is_bridge(pcie))
return -ENODEV;
@@ -139,12 +374,42 @@ static int ls_pcie_probe(struct platform_device *pdev)
return dw_pcie_host_init(&pci->pp);
}
+static int ls_pcie_suspend_noirq(struct device *dev)
+{
+ struct ls_pcie *pcie = dev_get_drvdata(dev);
+
+ if (!pcie->drvdata->pm_support)
+ return 0;
+
+ return dw_pcie_suspend_noirq(pcie->pci);
+}
+
+static int ls_pcie_resume_noirq(struct device *dev)
+{
+ struct ls_pcie *pcie = dev_get_drvdata(dev);
+ int ret;
+
+ if (!pcie->drvdata->pm_support)
+ return 0;
+
+ ret = pcie->drvdata->exit_from_l2(&pcie->pci->pp);
+ if (ret)
+ return ret;
+
+ return dw_pcie_resume_noirq(pcie->pci);
+}
+
+static const struct dev_pm_ops ls_pcie_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(ls_pcie_suspend_noirq, ls_pcie_resume_noirq)
+};
+
static struct platform_driver ls_pcie_driver = {
.probe = ls_pcie_probe,
.driver = {
.name = "layerscape-pcie",
.of_match_table = ls_pcie_of_match,
.suppress_bind_attrs = true,
+ .pm = &ls_pcie_pm_ops,
},
};
builtin_platform_driver(ls_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c
index c1527693bed9..54b6a4196f17 100644
--- a/drivers/pci/controller/dwc/pci-meson.c
+++ b/drivers/pci/controller/dwc/pci-meson.c
@@ -9,14 +9,13 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
-#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/resource.h>
#include <linux/types.h>
#include <linux/phy/phy.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include "pcie-designware.h"
@@ -109,10 +108,22 @@ static int meson_pcie_get_mems(struct platform_device *pdev,
struct meson_pcie *mp)
{
struct dw_pcie *pci = &mp->pci;
+ struct resource *res;
- pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "elbi");
- if (IS_ERR(pci->dbi_base))
- return PTR_ERR(pci->dbi_base);
+ /*
+ * For the broken DTs that supply 'dbi' as 'elbi', parse the 'elbi'
+ * region and assign it to both 'pci->elbi_base' and 'pci->dbi_space' so
+ * that the DWC core can skip parsing both regions.
+ */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
+ if (res) {
+ pci->elbi_base = devm_pci_remap_cfg_resource(pci->dev, res);
+ if (IS_ERR(pci->elbi_base))
+ return PTR_ERR(pci->elbi_base);
+
+ pci->dbi_base = pci->elbi_base;
+ pci->dbi_phys_addr = res->start;
+ }
mp->cfg_base = devm_platform_ioremap_resource_byname(pdev, "cfg");
if (IS_ERR(mp->cfg_base))
@@ -163,6 +174,13 @@ static int meson_pcie_reset(struct meson_pcie *mp)
return 0;
}
+static inline void meson_pcie_disable_clock(void *data)
+{
+ struct clk *clk = data;
+
+ clk_disable_unprepare(clk);
+}
+
static inline struct clk *meson_pcie_probe_clock(struct device *dev,
const char *id, u64 rate)
{
@@ -187,9 +205,7 @@ static inline struct clk *meson_pcie_probe_clock(struct device *dev,
return ERR_PTR(ret);
}
- devm_add_action_or_reset(dev,
- (void (*) (void *))clk_disable_unprepare,
- clk);
+ devm_add_action_or_reset(dev, meson_pcie_disable_clock, clk);
return clk;
}
@@ -331,7 +347,7 @@ static struct pci_ops meson_pci_ops = {
.write = pci_generic_config_write,
};
-static int meson_pcie_link_up(struct dw_pcie *pci)
+static bool meson_pcie_link_up(struct dw_pcie *pci)
{
struct meson_pcie *mp = to_meson_pcie(pci);
struct device *dev = pci->dev;
@@ -359,7 +375,7 @@ static int meson_pcie_link_up(struct dw_pcie *pci)
dev_dbg(dev, "speed_okay\n");
if (smlh_up && rdlh_up && ltssm_up && speed_okay)
- return 1;
+ return true;
cnt++;
@@ -367,7 +383,7 @@ static int meson_pcie_link_up(struct dw_pcie *pci)
} while (cnt < WAIT_LINKUP_TIMEOUT);
dev_err(dev, "error: wait linkup timeout\n");
- return 0;
+ return false;
}
static int meson_pcie_host_init(struct dw_pcie_rp *pp)
@@ -384,7 +400,7 @@ static int meson_pcie_host_init(struct dw_pcie_rp *pp)
}
static const struct dw_pcie_host_ops meson_pcie_host_ops = {
- .host_init = meson_pcie_host_init,
+ .init = meson_pcie_host_init,
};
static const struct dw_pcie_ops dw_pcie_ops = {
diff --git a/drivers/pci/controller/dwc/pcie-al.c b/drivers/pci/controller/dwc/pcie-al.c
index b8cb77c9c4bd..345c281c74fe 100644
--- a/drivers/pci/controller/dwc/pcie-al.c
+++ b/drivers/pci/controller/dwc/pcie-al.c
@@ -242,18 +242,24 @@ static struct pci_ops al_child_pci_ops = {
.write = pci_generic_config_write,
};
-static void al_pcie_config_prepare(struct al_pcie *pcie)
+static int al_pcie_config_prepare(struct al_pcie *pcie)
{
struct al_pcie_target_bus_cfg *target_bus_cfg;
struct dw_pcie_rp *pp = &pcie->pci->pp;
unsigned int ecam_bus_mask;
+ struct resource_entry *ft;
u32 cfg_control_offset;
+ struct resource *bus;
u8 subordinate_bus;
u8 secondary_bus;
u32 cfg_control;
u32 reg;
- struct resource *bus = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS)->res;
+ ft = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS);
+ if (!ft)
+ return -ENODEV;
+
+ bus = ft->res;
target_bus_cfg = &pcie->target_bus_cfg;
ecam_bus_mask = (pcie->ecam_size >> PCIE_ECAM_BUS_SHIFT) - 1;
@@ -287,6 +293,8 @@ static void al_pcie_config_prepare(struct al_pcie *pcie)
FIELD_PREP(CFG_CONTROL_SEC_BUS_MASK, secondary_bus);
al_pcie_controller_writel(pcie, cfg_control_offset, reg);
+
+ return 0;
}
static int al_pcie_host_init(struct dw_pcie_rp *pp)
@@ -305,13 +313,15 @@ static int al_pcie_host_init(struct dw_pcie_rp *pp)
if (rc)
return rc;
- al_pcie_config_prepare(pcie);
+ rc = al_pcie_config_prepare(pcie);
+ if (rc)
+ return rc;
return 0;
}
static const struct dw_pcie_host_ops al_pcie_host_ops = {
- .host_init = al_pcie_host_init,
+ .init = al_pcie_host_init,
};
static int al_pcie_probe(struct platform_device *pdev)
@@ -342,6 +352,7 @@ static int al_pcie_probe(struct platform_device *pdev)
return -ENOENT;
}
al_pcie->ecam_size = resource_size(ecam_res);
+ pci->pp.native_ecam = true;
controller_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"controller");
diff --git a/drivers/pci/controller/dwc/pcie-amd-mdb.c b/drivers/pci/controller/dwc/pcie-amd-mdb.c
new file mode 100644
index 000000000000..3c6e837465bb
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-amd-mdb.c
@@ -0,0 +1,526 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for AMD MDB PCIe Bridge
+ *
+ * Copyright (C) 2024-2025, Advanced Micro Devices, Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of_device.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+
+#include "../../pci.h"
+#include "pcie-designware.h"
+
+#define AMD_MDB_TLP_IR_STATUS_MISC 0x4C0
+#define AMD_MDB_TLP_IR_MASK_MISC 0x4C4
+#define AMD_MDB_TLP_IR_ENABLE_MISC 0x4C8
+#define AMD_MDB_TLP_IR_DISABLE_MISC 0x4CC
+
+#define AMD_MDB_TLP_PCIE_INTX_MASK GENMASK(23, 16)
+
+#define AMD_MDB_PCIE_INTR_INTX_ASSERT(x) BIT((x) * 2)
+
+/* Interrupt registers definitions. */
+#define AMD_MDB_PCIE_INTR_CMPL_TIMEOUT 15
+#define AMD_MDB_PCIE_INTR_INTX 16
+#define AMD_MDB_PCIE_INTR_PM_PME_RCVD 24
+#define AMD_MDB_PCIE_INTR_PME_TO_ACK_RCVD 25
+#define AMD_MDB_PCIE_INTR_MISC_CORRECTABLE 26
+#define AMD_MDB_PCIE_INTR_NONFATAL 27
+#define AMD_MDB_PCIE_INTR_FATAL 28
+
+#define IMR(x) BIT(AMD_MDB_PCIE_INTR_ ##x)
+#define AMD_MDB_PCIE_IMR_ALL_MASK \
+ ( \
+ IMR(CMPL_TIMEOUT) | \
+ IMR(PM_PME_RCVD) | \
+ IMR(PME_TO_ACK_RCVD) | \
+ IMR(MISC_CORRECTABLE) | \
+ IMR(NONFATAL) | \
+ IMR(FATAL) | \
+ AMD_MDB_TLP_PCIE_INTX_MASK \
+ )
+
+/**
+ * struct amd_mdb_pcie - PCIe port information
+ * @pci: DesignWare PCIe controller structure
+ * @slcr: MDB System Level Control and Status Register (SLCR) base
+ * @intx_domain: INTx IRQ domain pointer
+ * @mdb_domain: MDB IRQ domain pointer
+ * @perst_gpio: GPIO descriptor for PERST# signal handling
+ * @intx_irq: INTx IRQ interrupt number
+ */
+struct amd_mdb_pcie {
+ struct dw_pcie pci;
+ void __iomem *slcr;
+ struct irq_domain *intx_domain;
+ struct irq_domain *mdb_domain;
+ struct gpio_desc *perst_gpio;
+ int intx_irq;
+};
+
+static const struct dw_pcie_host_ops amd_mdb_pcie_host_ops = {
+};
+
+static void amd_mdb_intx_irq_mask(struct irq_data *data)
+{
+ struct amd_mdb_pcie *pcie = irq_data_get_irq_chip_data(data);
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *port = &pci->pp;
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&port->lock, flags);
+ val = FIELD_PREP(AMD_MDB_TLP_PCIE_INTX_MASK,
+ AMD_MDB_PCIE_INTR_INTX_ASSERT(data->hwirq));
+
+ /*
+ * Writing '1' to a bit in AMD_MDB_TLP_IR_DISABLE_MISC disables that
+ * interrupt, writing '0' has no effect.
+ */
+ writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_DISABLE_MISC);
+ raw_spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void amd_mdb_intx_irq_unmask(struct irq_data *data)
+{
+ struct amd_mdb_pcie *pcie = irq_data_get_irq_chip_data(data);
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *port = &pci->pp;
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&port->lock, flags);
+ val = FIELD_PREP(AMD_MDB_TLP_PCIE_INTX_MASK,
+ AMD_MDB_PCIE_INTR_INTX_ASSERT(data->hwirq));
+
+ /*
+ * Writing '1' to a bit in AMD_MDB_TLP_IR_ENABLE_MISC enables that
+ * interrupt, writing '0' has no effect.
+ */
+ writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_ENABLE_MISC);
+ raw_spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static struct irq_chip amd_mdb_intx_irq_chip = {
+ .name = "AMD MDB INTx",
+ .irq_mask = amd_mdb_intx_irq_mask,
+ .irq_unmask = amd_mdb_intx_irq_unmask,
+};
+
+/**
+ * amd_mdb_pcie_intx_map - Set the handler for the INTx and mark IRQ as valid
+ * @domain: IRQ domain
+ * @irq: Virtual IRQ number
+ * @hwirq: Hardware interrupt number
+ *
+ * Return: Always returns '0'.
+ */
+static int amd_mdb_pcie_intx_map(struct irq_domain *domain,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &amd_mdb_intx_irq_chip,
+ handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ irq_set_status_flags(irq, IRQ_LEVEL);
+
+ return 0;
+}
+
+/* INTx IRQ domain operations. */
+static const struct irq_domain_ops amd_intx_domain_ops = {
+ .map = amd_mdb_pcie_intx_map,
+};
+
+static irqreturn_t dw_pcie_rp_intx(int irq, void *args)
+{
+ struct amd_mdb_pcie *pcie = args;
+ unsigned long val;
+ int i, int_status;
+
+ val = readl_relaxed(pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC);
+ int_status = FIELD_GET(AMD_MDB_TLP_PCIE_INTX_MASK, val);
+
+ for (i = 0; i < PCI_NUM_INTX; i++) {
+ if (int_status & AMD_MDB_PCIE_INTR_INTX_ASSERT(i))
+ generic_handle_domain_irq(pcie->intx_domain, i);
+ }
+
+ return IRQ_HANDLED;
+}
+
+#define _IC(x, s)[AMD_MDB_PCIE_INTR_ ## x] = { __stringify(x), s }
+
+static const struct {
+ const char *sym;
+ const char *str;
+} intr_cause[32] = {
+ _IC(CMPL_TIMEOUT, "Completion timeout"),
+ _IC(PM_PME_RCVD, "PM_PME message received"),
+ _IC(PME_TO_ACK_RCVD, "PME_TO_ACK message received"),
+ _IC(MISC_CORRECTABLE, "Correctable error message"),
+ _IC(NONFATAL, "Non fatal error message"),
+ _IC(FATAL, "Fatal error message"),
+};
+
+static void amd_mdb_event_irq_mask(struct irq_data *d)
+{
+ struct amd_mdb_pcie *pcie = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *port = &pci->pp;
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&port->lock, flags);
+ val = BIT(d->hwirq);
+ writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_DISABLE_MISC);
+ raw_spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void amd_mdb_event_irq_unmask(struct irq_data *d)
+{
+ struct amd_mdb_pcie *pcie = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *port = &pci->pp;
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&port->lock, flags);
+ val = BIT(d->hwirq);
+ writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_ENABLE_MISC);
+ raw_spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static struct irq_chip amd_mdb_event_irq_chip = {
+ .name = "AMD MDB RC-Event",
+ .irq_mask = amd_mdb_event_irq_mask,
+ .irq_unmask = amd_mdb_event_irq_unmask,
+};
+
+static int amd_mdb_pcie_event_map(struct irq_domain *domain,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &amd_mdb_event_irq_chip,
+ handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ irq_set_status_flags(irq, IRQ_LEVEL);
+
+ return 0;
+}
+
+static const struct irq_domain_ops event_domain_ops = {
+ .map = amd_mdb_pcie_event_map,
+};
+
+static irqreturn_t amd_mdb_pcie_event(int irq, void *args)
+{
+ struct amd_mdb_pcie *pcie = args;
+ unsigned long val;
+ int i;
+
+ val = readl_relaxed(pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC);
+ val &= ~readl_relaxed(pcie->slcr + AMD_MDB_TLP_IR_MASK_MISC);
+ for_each_set_bit(i, &val, 32)
+ generic_handle_domain_irq(pcie->mdb_domain, i);
+ writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC);
+
+ return IRQ_HANDLED;
+}
+
+static void amd_mdb_pcie_free_irq_domains(struct amd_mdb_pcie *pcie)
+{
+ if (pcie->intx_domain) {
+ irq_domain_remove(pcie->intx_domain);
+ pcie->intx_domain = NULL;
+ }
+
+ if (pcie->mdb_domain) {
+ irq_domain_remove(pcie->mdb_domain);
+ pcie->mdb_domain = NULL;
+ }
+}
+
+static int amd_mdb_pcie_init_port(struct amd_mdb_pcie *pcie)
+{
+ unsigned long val;
+
+ /* Disable all TLP interrupts. */
+ writel_relaxed(AMD_MDB_PCIE_IMR_ALL_MASK,
+ pcie->slcr + AMD_MDB_TLP_IR_DISABLE_MISC);
+
+ /* Clear pending TLP interrupts. */
+ val = readl_relaxed(pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC);
+ val &= AMD_MDB_PCIE_IMR_ALL_MASK;
+ writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC);
+
+ /* Enable all TLP interrupts. */
+ writel_relaxed(AMD_MDB_PCIE_IMR_ALL_MASK,
+ pcie->slcr + AMD_MDB_TLP_IR_ENABLE_MISC);
+
+ return 0;
+}
+
+/**
+ * amd_mdb_pcie_init_irq_domains - Initialize IRQ domain
+ * @pcie: PCIe port information
+ * @pdev: Platform device
+ *
+ * Return: Returns '0' on success and error value on failure.
+ */
+static int amd_mdb_pcie_init_irq_domains(struct amd_mdb_pcie *pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct device_node *pcie_intc_node;
+ int err;
+
+ pcie_intc_node = of_get_child_by_name(node, "interrupt-controller");
+ if (!pcie_intc_node) {
+ dev_err(dev, "No PCIe Intc node found\n");
+ return -ENODEV;
+ }
+
+ pcie->mdb_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), 32,
+ &event_domain_ops, pcie);
+ if (!pcie->mdb_domain) {
+ err = -ENOMEM;
+ dev_err(dev, "Failed to add MDB domain\n");
+ goto out;
+ }
+
+ irq_domain_update_bus_token(pcie->mdb_domain, DOMAIN_BUS_NEXUS);
+
+ pcie->intx_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node),
+ PCI_NUM_INTX, &amd_intx_domain_ops, pcie);
+ if (!pcie->intx_domain) {
+ err = -ENOMEM;
+ dev_err(dev, "Failed to add INTx domain\n");
+ goto mdb_out;
+ }
+
+ of_node_put(pcie_intc_node);
+ irq_domain_update_bus_token(pcie->intx_domain, DOMAIN_BUS_WIRED);
+
+ raw_spin_lock_init(&pp->lock);
+
+ return 0;
+mdb_out:
+ amd_mdb_pcie_free_irq_domains(pcie);
+out:
+ of_node_put(pcie_intc_node);
+ return err;
+}
+
+static irqreturn_t amd_mdb_pcie_intr_handler(int irq, void *args)
+{
+ struct amd_mdb_pcie *pcie = args;
+ struct device *dev;
+ struct irq_data *d;
+
+ dev = pcie->pci.dev;
+
+ /*
+ * In the future, error reporting will be hooked to the AER subsystem.
+ * Currently, the driver prints a warning message to the user.
+ */
+ d = irq_domain_get_irq_data(pcie->mdb_domain, irq);
+ if (intr_cause[d->hwirq].str)
+ dev_warn(dev, "%s\n", intr_cause[d->hwirq].str);
+ else
+ dev_warn_once(dev, "Unknown IRQ %ld\n", d->hwirq);
+
+ return IRQ_HANDLED;
+}
+
+static int amd_mdb_setup_irq(struct amd_mdb_pcie *pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct device *dev = &pdev->dev;
+ int i, irq, err;
+
+ amd_mdb_pcie_init_port(pcie);
+
+ pp->irq = platform_get_irq(pdev, 0);
+ if (pp->irq < 0)
+ return pp->irq;
+
+ for (i = 0; i < ARRAY_SIZE(intr_cause); i++) {
+ if (!intr_cause[i].str)
+ continue;
+
+ irq = irq_create_mapping(pcie->mdb_domain, i);
+ if (!irq) {
+ dev_err(dev, "Failed to map MDB domain interrupt\n");
+ return -ENOMEM;
+ }
+
+ err = devm_request_irq(dev, irq, amd_mdb_pcie_intr_handler,
+ IRQF_NO_THREAD, intr_cause[i].sym, pcie);
+ if (err) {
+ dev_err(dev, "Failed to request IRQ %d, err=%d\n",
+ irq, err);
+ return err;
+ }
+ }
+
+ pcie->intx_irq = irq_create_mapping(pcie->mdb_domain,
+ AMD_MDB_PCIE_INTR_INTX);
+ if (!pcie->intx_irq) {
+ dev_err(dev, "Failed to map INTx interrupt\n");
+ return -ENXIO;
+ }
+
+ err = devm_request_irq(dev, pcie->intx_irq, dw_pcie_rp_intx,
+ IRQF_NO_THREAD, NULL, pcie);
+ if (err) {
+ dev_err(dev, "Failed to request INTx IRQ %d, err=%d\n",
+ irq, err);
+ return err;
+ }
+
+ /* Plug the main event handler. */
+ err = devm_request_irq(dev, pp->irq, amd_mdb_pcie_event, IRQF_NO_THREAD,
+ "amd_mdb pcie_irq", pcie);
+ if (err) {
+ dev_err(dev, "Failed to request event IRQ %d, err=%d\n",
+ pp->irq, err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int amd_mdb_parse_pcie_port(struct amd_mdb_pcie *pcie)
+{
+ struct device *dev = pcie->pci.dev;
+ struct device_node *pcie_port_node __maybe_unused;
+
+ /*
+ * This platform currently supports only one Root Port, so the loop
+ * will execute only once.
+ * TODO: Enhance the driver to handle multiple Root Ports in the future.
+ */
+ for_each_child_of_node_with_prefix(dev->of_node, pcie_port_node, "pcie") {
+ pcie->perst_gpio = devm_fwnode_gpiod_get(dev, of_fwnode_handle(pcie_port_node),
+ "reset", GPIOD_OUT_HIGH, NULL);
+ if (IS_ERR(pcie->perst_gpio))
+ return dev_err_probe(dev, PTR_ERR(pcie->perst_gpio),
+ "Failed to request reset GPIO\n");
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+static int amd_mdb_add_pcie_port(struct amd_mdb_pcie *pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct device *dev = &pdev->dev;
+ int err;
+
+ pcie->slcr = devm_platform_ioremap_resource_byname(pdev, "slcr");
+ if (IS_ERR(pcie->slcr))
+ return PTR_ERR(pcie->slcr);
+
+ err = amd_mdb_pcie_init_irq_domains(pcie, pdev);
+ if (err)
+ return err;
+
+ err = amd_mdb_setup_irq(pcie, pdev);
+ if (err) {
+ dev_err(dev, "Failed to set up interrupts, err=%d\n", err);
+ goto out;
+ }
+
+ pp->ops = &amd_mdb_pcie_host_ops;
+
+ if (pcie->perst_gpio) {
+ mdelay(PCIE_T_PVPERL_MS);
+ gpiod_set_value_cansleep(pcie->perst_gpio, 0);
+ mdelay(PCIE_RESET_CONFIG_WAIT_MS);
+ }
+
+ err = dw_pcie_host_init(pp);
+ if (err) {
+ dev_err(dev, "Failed to initialize host, err=%d\n", err);
+ goto out;
+ }
+
+ return 0;
+
+out:
+ amd_mdb_pcie_free_irq_domains(pcie);
+ return err;
+}
+
+static int amd_mdb_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct amd_mdb_pcie *pcie;
+ struct dw_pcie *pci;
+ int ret;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pci = &pcie->pci;
+ pci->dev = dev;
+
+ platform_set_drvdata(pdev, pcie);
+
+ ret = amd_mdb_parse_pcie_port(pcie);
+ /*
+ * If amd_mdb_parse_pcie_port returns -ENODEV, it indicates that the
+ * PCIe Bridge node was not found in the device tree. This is not
+ * considered a fatal error and will trigger a fallback where the
+ * reset GPIO is acquired directly from the PCIe Host Bridge node.
+ */
+ if (ret) {
+ if (ret != -ENODEV)
+ return ret;
+
+ pcie->perst_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(pcie->perst_gpio))
+ return dev_err_probe(dev, PTR_ERR(pcie->perst_gpio),
+ "Failed to request reset GPIO\n");
+ }
+
+ return amd_mdb_add_pcie_port(pcie, pdev);
+}
+
+static const struct of_device_id amd_mdb_pcie_of_match[] = {
+ {
+ .compatible = "amd,versal2-mdb-host",
+ },
+ {},
+};
+
+static struct platform_driver amd_mdb_pcie_driver = {
+ .driver = {
+ .name = "amd-mdb-pcie",
+ .of_match_table = amd_mdb_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = amd_mdb_pcie_probe,
+};
+
+builtin_platform_driver(amd_mdb_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-armada8k.c b/drivers/pci/controller/dwc/pcie-armada8k.c
index 5c999e15c357..c2650fd0d458 100644
--- a/drivers/pci/controller/dwc/pcie-armada8k.c
+++ b/drivers/pci/controller/dwc/pcie-armada8k.c
@@ -139,7 +139,7 @@ static int armada8k_pcie_setup_phys(struct armada8k_pcie *pcie)
return ret;
}
-static int armada8k_pcie_link_up(struct dw_pcie *pci)
+static bool armada8k_pcie_link_up(struct dw_pcie *pci)
{
u32 reg;
u32 mask = PCIE_GLB_STS_RDLH_LINK_UP | PCIE_GLB_STS_PHY_LINK_UP;
@@ -147,10 +147,10 @@ static int armada8k_pcie_link_up(struct dw_pcie *pci)
reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_STATUS_REG);
if ((reg & mask) == mask)
- return 1;
+ return true;
dev_dbg(pci->dev, "No link detected (Global-Status: 0x%08x).\n", reg);
- return 0;
+ return false;
}
static int armada8k_pcie_start_link(struct dw_pcie *pci)
@@ -225,7 +225,7 @@ static irqreturn_t armada8k_pcie_irq_handler(int irq, void *arg)
}
static const struct dw_pcie_host_ops armada8k_pcie_host_ops = {
- .host_init = armada8k_pcie_host_init,
+ .init = armada8k_pcie_host_init,
};
static int armada8k_add_pcie_port(struct armada8k_pcie *pcie,
diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c
index 98102079e26d..f4a136ee2daf 100644
--- a/drivers/pci/controller/dwc/pcie-artpec6.c
+++ b/drivers/pci/controller/dwc/pcie-artpec6.c
@@ -10,7 +10,7 @@
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/resource.h>
@@ -94,7 +94,7 @@ static void artpec6_pcie_writel(struct artpec6_pcie *artpec6_pcie, u32 offset, u
regmap_write(artpec6_pcie->regmap, offset, val);
}
-static u64 artpec6_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr)
+static u64 artpec6_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 cpu_addr)
{
struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
struct dw_pcie_rp *pp = &pci->pp;
@@ -102,13 +102,13 @@ static u64 artpec6_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr)
switch (artpec6_pcie->mode) {
case DW_PCIE_RC_TYPE:
- return pci_addr - pp->cfg0_base;
+ return cpu_addr - pp->cfg0_base;
case DW_PCIE_EP_TYPE:
- return pci_addr - ep->phys_base;
+ return cpu_addr - ep->phys_base;
default:
dev_err(pci->dev, "UNKNOWN device type\n");
}
- return pci_addr;
+ return cpu_addr;
}
static int artpec6_pcie_establish_link(struct dw_pcie *pci)
@@ -333,7 +333,7 @@ static int artpec6_pcie_host_init(struct dw_pcie_rp *pp)
}
static const struct dw_pcie_host_ops artpec6_pcie_host_ops = {
- .host_init = artpec6_pcie_host_init,
+ .init = artpec6_pcie_host_init,
};
static void artpec6_pcie_ep_init(struct dw_pcie_ep *ep)
@@ -352,15 +352,15 @@ static void artpec6_pcie_ep_init(struct dw_pcie_ep *ep)
}
static int artpec6_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
- enum pci_epc_irq_type type, u16 interrupt_num)
+ unsigned int type, u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
switch (type) {
- case PCI_EPC_IRQ_LEGACY:
- dev_err(pci->dev, "EP cannot trigger legacy IRQs\n");
+ case PCI_IRQ_INTX:
+ dev_err(pci->dev, "EP cannot trigger INTx IRQs\n");
return -EINVAL;
- case PCI_EPC_IRQ_MSI:
+ case PCI_IRQ_MSI:
return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
default:
dev_err(pci->dev, "UNKNOWN IRQ type\n");
@@ -369,9 +369,20 @@ static int artpec6_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
return 0;
}
+static const struct pci_epc_features artpec6_pcie_epc_features = {
+ .msi_capable = true,
+};
+
+static const struct pci_epc_features *
+artpec6_pcie_get_features(struct dw_pcie_ep *ep)
+{
+ return &artpec6_pcie_epc_features;
+}
+
static const struct dw_pcie_ep_ops pcie_ep_ops = {
- .ep_init = artpec6_pcie_ep_init,
+ .init = artpec6_pcie_ep_init,
.raise_irq = artpec6_pcie_raise_irq,
+ .get_features = artpec6_pcie_get_features,
};
static int artpec6_pcie_probe(struct platform_device *pdev)
@@ -441,7 +452,20 @@ static int artpec6_pcie_probe(struct platform_device *pdev)
pci->ep.ops = &pcie_ep_ops;
- return dw_pcie_ep_init(&pci->ep);
+ ret = dw_pcie_ep_init(&pci->ep);
+ if (ret)
+ return ret;
+
+ ret = dw_pcie_ep_init_registers(&pci->ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(&pci->ep);
+ return ret;
+ }
+
+ pci_epc_init_notify(pci->ep.epc);
+
+ break;
default:
dev_err(dev, "INVALID device type %d\n", artpec6_pcie->mode);
}
diff --git a/drivers/pci/controller/dwc/pcie-bt1.c b/drivers/pci/controller/dwc/pcie-bt1.c
index 17e696797ff5..1340edc18d12 100644
--- a/drivers/pci/controller/dwc/pcie-bt1.c
+++ b/drivers/pci/controller/dwc/pcie-bt1.c
@@ -559,8 +559,8 @@ static void bt1_pcie_host_deinit(struct dw_pcie_rp *pp)
}
static const struct dw_pcie_host_ops bt1_pcie_host_ops = {
- .host_init = bt1_pcie_host_init,
- .host_deinit = bt1_pcie_host_deinit,
+ .init = bt1_pcie_host_init,
+ .deinit = bt1_pcie_host_deinit,
};
static struct bt1_pcie *bt1_pcie_create_data(struct platform_device *pdev)
@@ -632,7 +632,7 @@ MODULE_DEVICE_TABLE(of, bt1_pcie_of_match);
static struct platform_driver bt1_pcie_driver = {
.probe = bt1_pcie_probe,
- .remove_new = bt1_pcie_remove,
+ .remove = bt1_pcie_remove,
.driver = {
.name = "bt1-pcie",
.of_match_table = bt1_pcie_of_match,
diff --git a/drivers/pci/controller/dwc/pcie-designware-debugfs.c b/drivers/pci/controller/dwc/pcie-designware-debugfs.c
new file mode 100644
index 000000000000..0fbf86c0b97e
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-designware-debugfs.c
@@ -0,0 +1,927 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Synopsys DesignWare PCIe controller debugfs driver
+ *
+ * Copyright (C) 2025 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Shradha Todi <shradha.t@samsung.com>
+ */
+
+#include <linux/debugfs.h>
+
+#include "pcie-designware.h"
+
+#define SD_STATUS_L1LANE_REG 0xb0
+#define PIPE_RXVALID BIT(18)
+#define PIPE_DETECT_LANE BIT(17)
+#define LANE_SELECT GENMASK(3, 0)
+
+#define ERR_INJ0_OFF 0x34
+#define EINJ_VAL_DIFF GENMASK(28, 16)
+#define EINJ_VC_NUM GENMASK(14, 12)
+#define EINJ_TYPE_SHIFT 8
+#define EINJ0_TYPE GENMASK(11, 8)
+#define EINJ1_TYPE BIT(8)
+#define EINJ2_TYPE GENMASK(9, 8)
+#define EINJ3_TYPE GENMASK(10, 8)
+#define EINJ4_TYPE GENMASK(10, 8)
+#define EINJ5_TYPE BIT(8)
+#define EINJ_COUNT GENMASK(7, 0)
+
+#define ERR_INJ_ENABLE_REG 0x30
+
+#define RAS_DES_EVENT_COUNTER_DATA_REG 0xc
+
+#define RAS_DES_EVENT_COUNTER_CTRL_REG 0x8
+#define EVENT_COUNTER_GROUP_SELECT GENMASK(27, 24)
+#define EVENT_COUNTER_EVENT_SELECT GENMASK(23, 16)
+#define EVENT_COUNTER_LANE_SELECT GENMASK(11, 8)
+#define EVENT_COUNTER_STATUS BIT(7)
+#define EVENT_COUNTER_ENABLE GENMASK(4, 2)
+#define PER_EVENT_ON 0x3
+#define PER_EVENT_OFF 0x1
+
+#define DWC_DEBUGFS_BUF_MAX 128
+
+/**
+ * struct dwc_pcie_rasdes_info - Stores controller common information
+ * @ras_cap_offset: RAS DES vendor specific extended capability offset
+ * @reg_event_lock: Mutex used for RAS DES shadow event registers
+ *
+ * Any parameter constant to all files of the debugfs hierarchy for a single
+ * controller will be stored in this struct. It is allocated and assigned to
+ * controller specific struct dw_pcie during initialization.
+ */
+struct dwc_pcie_rasdes_info {
+ u32 ras_cap_offset;
+ struct mutex reg_event_lock;
+};
+
+/**
+ * struct dwc_pcie_rasdes_priv - Stores file specific private data information
+ * @pci: Reference to the dw_pcie structure
+ * @idx: Index of specific file related information in array of structs
+ *
+ * All debugfs files will have this struct as its private data.
+ */
+struct dwc_pcie_rasdes_priv {
+ struct dw_pcie *pci;
+ int idx;
+};
+
+/**
+ * struct dwc_pcie_err_inj - Store details about each error injection
+ * supported by DWC RAS DES
+ * @name: Name of the error that can be injected
+ * @err_inj_group: Group number to which the error belongs. The value
+ * can range from 0 to 5
+ * @err_inj_type: Each group can have multiple types of error
+ */
+struct dwc_pcie_err_inj {
+ const char *name;
+ u32 err_inj_group;
+ u32 err_inj_type;
+};
+
+static const struct dwc_pcie_err_inj err_inj_list[] = {
+ {"tx_lcrc", 0x0, 0x0},
+ {"b16_crc_dllp", 0x0, 0x1},
+ {"b16_crc_upd_fc", 0x0, 0x2},
+ {"tx_ecrc", 0x0, 0x3},
+ {"fcrc_tlp", 0x0, 0x4},
+ {"parity_tsos", 0x0, 0x5},
+ {"parity_skpos", 0x0, 0x6},
+ {"rx_lcrc", 0x0, 0x8},
+ {"rx_ecrc", 0x0, 0xb},
+ {"tlp_err_seq", 0x1, 0x0},
+ {"ack_nak_dllp_seq", 0x1, 0x1},
+ {"ack_nak_dllp", 0x2, 0x0},
+ {"upd_fc_dllp", 0x2, 0x1},
+ {"nak_dllp", 0x2, 0x2},
+ {"inv_sync_hdr_sym", 0x3, 0x0},
+ {"com_pad_ts1", 0x3, 0x1},
+ {"com_pad_ts2", 0x3, 0x2},
+ {"com_fts", 0x3, 0x3},
+ {"com_idl", 0x3, 0x4},
+ {"end_edb", 0x3, 0x5},
+ {"stp_sdp", 0x3, 0x6},
+ {"com_skp", 0x3, 0x7},
+ {"posted_tlp_hdr", 0x4, 0x0},
+ {"non_post_tlp_hdr", 0x4, 0x1},
+ {"cmpl_tlp_hdr", 0x4, 0x2},
+ {"posted_tlp_data", 0x4, 0x4},
+ {"non_post_tlp_data", 0x4, 0x5},
+ {"cmpl_tlp_data", 0x4, 0x6},
+ {"duplicate_tlp", 0x5, 0x0},
+ {"nullified_tlp", 0x5, 0x1},
+};
+
+static const u32 err_inj_type_mask[] = {
+ EINJ0_TYPE,
+ EINJ1_TYPE,
+ EINJ2_TYPE,
+ EINJ3_TYPE,
+ EINJ4_TYPE,
+ EINJ5_TYPE,
+};
+
+/**
+ * struct dwc_pcie_event_counter - Store details about each event counter
+ * supported in DWC RAS DES
+ * @name: Name of the error counter
+ * @group_no: Group number that the event belongs to. The value can range
+ * from 0 to 4
+ * @event_no: Event number of the particular event. The value ranges are:
+ * Group 0: 0 - 10
+ * Group 1: 5 - 13
+ * Group 2: 0 - 7
+ * Group 3: 0 - 5
+ * Group 4: 0 - 1
+ */
+struct dwc_pcie_event_counter {
+ const char *name;
+ u32 group_no;
+ u32 event_no;
+};
+
+static const struct dwc_pcie_event_counter event_list[] = {
+ {"ebuf_overflow", 0x0, 0x0},
+ {"ebuf_underrun", 0x0, 0x1},
+ {"decode_err", 0x0, 0x2},
+ {"running_disparity_err", 0x0, 0x3},
+ {"skp_os_parity_err", 0x0, 0x4},
+ {"sync_header_err", 0x0, 0x5},
+ {"rx_valid_deassertion", 0x0, 0x6},
+ {"ctl_skp_os_parity_err", 0x0, 0x7},
+ {"retimer_parity_err_1st", 0x0, 0x8},
+ {"retimer_parity_err_2nd", 0x0, 0x9},
+ {"margin_crc_parity_err", 0x0, 0xA},
+ {"detect_ei_infer", 0x1, 0x5},
+ {"receiver_err", 0x1, 0x6},
+ {"rx_recovery_req", 0x1, 0x7},
+ {"n_fts_timeout", 0x1, 0x8},
+ {"framing_err", 0x1, 0x9},
+ {"deskew_err", 0x1, 0xa},
+ {"framing_err_in_l0", 0x1, 0xc},
+ {"deskew_uncompleted_err", 0x1, 0xd},
+ {"bad_tlp", 0x2, 0x0},
+ {"lcrc_err", 0x2, 0x1},
+ {"bad_dllp", 0x2, 0x2},
+ {"replay_num_rollover", 0x2, 0x3},
+ {"replay_timeout", 0x2, 0x4},
+ {"rx_nak_dllp", 0x2, 0x5},
+ {"tx_nak_dllp", 0x2, 0x6},
+ {"retry_tlp", 0x2, 0x7},
+ {"fc_timeout", 0x3, 0x0},
+ {"poisoned_tlp", 0x3, 0x1},
+ {"ecrc_error", 0x3, 0x2},
+ {"unsupported_request", 0x3, 0x3},
+ {"completer_abort", 0x3, 0x4},
+ {"completion_timeout", 0x3, 0x5},
+ {"ebuf_skp_add", 0x4, 0x0},
+ {"ebuf_skp_del", 0x4, 0x1},
+};
+
+static ssize_t lane_detect_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dw_pcie *pci = file->private_data;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ char debugfs_buf[DWC_DEBUGFS_BUF_MAX];
+ ssize_t pos;
+ u32 val;
+
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + SD_STATUS_L1LANE_REG);
+ val = FIELD_GET(PIPE_DETECT_LANE, val);
+ if (val)
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Lane Detected\n");
+ else
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Lane Undetected\n");
+
+ return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos);
+}
+
+static ssize_t lane_detect_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dw_pcie *pci = file->private_data;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ u32 lane, val;
+
+ val = kstrtou32_from_user(buf, count, 0, &lane);
+ if (val)
+ return val;
+
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + SD_STATUS_L1LANE_REG);
+ val &= ~(LANE_SELECT);
+ val |= FIELD_PREP(LANE_SELECT, lane);
+ dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + SD_STATUS_L1LANE_REG, val);
+
+ return count;
+}
+
+static ssize_t rx_valid_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dw_pcie *pci = file->private_data;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ char debugfs_buf[DWC_DEBUGFS_BUF_MAX];
+ ssize_t pos;
+ u32 val;
+
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + SD_STATUS_L1LANE_REG);
+ val = FIELD_GET(PIPE_RXVALID, val);
+ if (val)
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "RX Valid\n");
+ else
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "RX Invalid\n");
+
+ return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos);
+}
+
+static ssize_t rx_valid_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return lane_detect_write(file, buf, count, ppos);
+}
+
+static ssize_t err_inj_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dwc_pcie_rasdes_priv *pdata = file->private_data;
+ struct dw_pcie *pci = pdata->pci;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ u32 val, counter, vc_num, err_group, type_mask;
+ int val_diff = 0;
+ char *kern_buf;
+
+ err_group = err_inj_list[pdata->idx].err_inj_group;
+ type_mask = err_inj_type_mask[err_group];
+
+ kern_buf = memdup_user_nul(buf, count);
+ if (IS_ERR(kern_buf))
+ return PTR_ERR(kern_buf);
+
+ if (err_group == 4) {
+ val = sscanf(kern_buf, "%u %d %u", &counter, &val_diff, &vc_num);
+ if ((val != 3) || (val_diff < -4095 || val_diff > 4095)) {
+ kfree(kern_buf);
+ return -EINVAL;
+ }
+ } else if (err_group == 1) {
+ val = sscanf(kern_buf, "%u %d", &counter, &val_diff);
+ if ((val != 2) || (val_diff < -4095 || val_diff > 4095)) {
+ kfree(kern_buf);
+ return -EINVAL;
+ }
+ } else {
+ val = kstrtou32(kern_buf, 0, &counter);
+ if (val) {
+ kfree(kern_buf);
+ return val;
+ }
+ }
+
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + ERR_INJ0_OFF + (0x4 * err_group));
+ val &= ~(type_mask | EINJ_COUNT);
+ val |= ((err_inj_list[pdata->idx].err_inj_type << EINJ_TYPE_SHIFT) & type_mask);
+ val |= FIELD_PREP(EINJ_COUNT, counter);
+
+ if (err_group == 1 || err_group == 4) {
+ val &= ~(EINJ_VAL_DIFF);
+ val |= FIELD_PREP(EINJ_VAL_DIFF, val_diff);
+ }
+ if (err_group == 4) {
+ val &= ~(EINJ_VC_NUM);
+ val |= FIELD_PREP(EINJ_VC_NUM, vc_num);
+ }
+
+ dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + ERR_INJ0_OFF + (0x4 * err_group), val);
+ dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + ERR_INJ_ENABLE_REG, (0x1 << err_group));
+
+ kfree(kern_buf);
+ return count;
+}
+
+static void set_event_number(struct dwc_pcie_rasdes_priv *pdata,
+ struct dw_pcie *pci, struct dwc_pcie_rasdes_info *rinfo)
+{
+ u32 val;
+
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG);
+ val &= ~EVENT_COUNTER_ENABLE;
+ val &= ~(EVENT_COUNTER_GROUP_SELECT | EVENT_COUNTER_EVENT_SELECT);
+ val |= FIELD_PREP(EVENT_COUNTER_GROUP_SELECT, event_list[pdata->idx].group_no);
+ val |= FIELD_PREP(EVENT_COUNTER_EVENT_SELECT, event_list[pdata->idx].event_no);
+ dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG, val);
+}
+
+static ssize_t counter_enable_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dwc_pcie_rasdes_priv *pdata = file->private_data;
+ struct dw_pcie *pci = pdata->pci;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ char debugfs_buf[DWC_DEBUGFS_BUF_MAX];
+ ssize_t pos;
+ u32 val;
+
+ mutex_lock(&rinfo->reg_event_lock);
+ set_event_number(pdata, pci, rinfo);
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG);
+ mutex_unlock(&rinfo->reg_event_lock);
+ val = FIELD_GET(EVENT_COUNTER_STATUS, val);
+ if (val)
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Counter Enabled\n");
+ else
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Counter Disabled\n");
+
+ return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos);
+}
+
+static ssize_t counter_enable_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dwc_pcie_rasdes_priv *pdata = file->private_data;
+ struct dw_pcie *pci = pdata->pci;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ u32 val, enable;
+
+ val = kstrtou32_from_user(buf, count, 0, &enable);
+ if (val)
+ return val;
+
+ mutex_lock(&rinfo->reg_event_lock);
+ set_event_number(pdata, pci, rinfo);
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG);
+ if (enable)
+ val |= FIELD_PREP(EVENT_COUNTER_ENABLE, PER_EVENT_ON);
+ else
+ val |= FIELD_PREP(EVENT_COUNTER_ENABLE, PER_EVENT_OFF);
+
+ dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG, val);
+
+ /*
+ * While enabling the counter, always read the status back to check if
+ * it is enabled or not. Return error if it is not enabled to let the
+ * users know that the counter is not supported on the platform.
+ */
+ if (enable) {
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset +
+ RAS_DES_EVENT_COUNTER_CTRL_REG);
+ if (!FIELD_GET(EVENT_COUNTER_STATUS, val)) {
+ mutex_unlock(&rinfo->reg_event_lock);
+ return -EOPNOTSUPP;
+ }
+ }
+
+ mutex_unlock(&rinfo->reg_event_lock);
+
+ return count;
+}
+
+static ssize_t counter_lane_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dwc_pcie_rasdes_priv *pdata = file->private_data;
+ struct dw_pcie *pci = pdata->pci;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ char debugfs_buf[DWC_DEBUGFS_BUF_MAX];
+ ssize_t pos;
+ u32 val;
+
+ mutex_lock(&rinfo->reg_event_lock);
+ set_event_number(pdata, pci, rinfo);
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG);
+ mutex_unlock(&rinfo->reg_event_lock);
+ val = FIELD_GET(EVENT_COUNTER_LANE_SELECT, val);
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Lane: %d\n", val);
+
+ return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos);
+}
+
+static ssize_t counter_lane_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dwc_pcie_rasdes_priv *pdata = file->private_data;
+ struct dw_pcie *pci = pdata->pci;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ u32 val, lane;
+
+ val = kstrtou32_from_user(buf, count, 0, &lane);
+ if (val)
+ return val;
+
+ mutex_lock(&rinfo->reg_event_lock);
+ set_event_number(pdata, pci, rinfo);
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG);
+ val &= ~(EVENT_COUNTER_LANE_SELECT);
+ val |= FIELD_PREP(EVENT_COUNTER_LANE_SELECT, lane);
+ dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG, val);
+ mutex_unlock(&rinfo->reg_event_lock);
+
+ return count;
+}
+
+static ssize_t counter_value_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dwc_pcie_rasdes_priv *pdata = file->private_data;
+ struct dw_pcie *pci = pdata->pci;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ char debugfs_buf[DWC_DEBUGFS_BUF_MAX];
+ ssize_t pos;
+ u32 val;
+
+ mutex_lock(&rinfo->reg_event_lock);
+ set_event_number(pdata, pci, rinfo);
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_DATA_REG);
+ mutex_unlock(&rinfo->reg_event_lock);
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Counter value: %d\n", val);
+
+ return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos);
+}
+
+static const char *ltssm_status_string(enum dw_pcie_ltssm ltssm)
+{
+ const char *str;
+
+ switch (ltssm) {
+#define DW_PCIE_LTSSM_NAME(n) case n: str = #n; break
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DETECT_QUIET);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DETECT_ACT);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_POLL_ACTIVE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_POLL_COMPLIANCE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_POLL_CONFIG);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_PRE_DETECT_QUIET);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DETECT_WAIT);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_LINKWD_START);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_LINKWD_ACEPT);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_LANENUM_WAI);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_LANENUM_ACEPT);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_COMPLETE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_IDLE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_LOCK);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_SPEED);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_RCVRCFG);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_IDLE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L0);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L0S);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L123_SEND_EIDLE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L1_IDLE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L2_IDLE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L2_WAKE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DISABLED_ENTRY);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DISABLED_IDLE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DISABLED);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_LPBK_ENTRY);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_LPBK_ACTIVE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_LPBK_EXIT);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_LPBK_EXIT_TIMEOUT);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_HOT_RESET_ENTRY);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_HOT_RESET);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_EQ0);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_EQ1);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_EQ2);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_EQ3);
+ default:
+ str = "DW_PCIE_LTSSM_UNKNOWN";
+ break;
+ }
+
+ return str + strlen("DW_PCIE_LTSSM_");
+}
+
+static int ltssm_status_show(struct seq_file *s, void *v)
+{
+ struct dw_pcie *pci = s->private;
+ enum dw_pcie_ltssm val;
+
+ val = dw_pcie_get_ltssm(pci);
+ seq_printf(s, "%s (0x%02x)\n", ltssm_status_string(val), val);
+
+ return 0;
+}
+
+static int ltssm_status_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ltssm_status_show, inode->i_private);
+}
+
+#define dwc_debugfs_create(name) \
+debugfs_create_file(#name, 0644, rasdes_debug, pci, \
+ &dbg_ ## name ## _fops)
+
+#define DWC_DEBUGFS_FOPS(name) \
+static const struct file_operations dbg_ ## name ## _fops = { \
+ .open = simple_open, \
+ .read = name ## _read, \
+ .write = name ## _write \
+}
+
+DWC_DEBUGFS_FOPS(lane_detect);
+DWC_DEBUGFS_FOPS(rx_valid);
+
+static const struct file_operations dwc_pcie_err_inj_ops = {
+ .open = simple_open,
+ .write = err_inj_write,
+};
+
+static const struct file_operations dwc_pcie_counter_enable_ops = {
+ .open = simple_open,
+ .read = counter_enable_read,
+ .write = counter_enable_write,
+};
+
+static const struct file_operations dwc_pcie_counter_lane_ops = {
+ .open = simple_open,
+ .read = counter_lane_read,
+ .write = counter_lane_write,
+};
+
+static const struct file_operations dwc_pcie_counter_value_ops = {
+ .open = simple_open,
+ .read = counter_value_read,
+};
+
+static const struct file_operations dwc_pcie_ltssm_status_ops = {
+ .open = ltssm_status_open,
+ .read = seq_read,
+};
+
+static void dwc_pcie_rasdes_debugfs_deinit(struct dw_pcie *pci)
+{
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+
+ mutex_destroy(&rinfo->reg_event_lock);
+}
+
+static int dwc_pcie_rasdes_debugfs_init(struct dw_pcie *pci, struct dentry *dir)
+{
+ struct dentry *rasdes_debug, *rasdes_err_inj;
+ struct dentry *rasdes_event_counter, *rasdes_events;
+ struct dwc_pcie_rasdes_info *rasdes_info;
+ struct dwc_pcie_rasdes_priv *priv_tmp;
+ struct device *dev = pci->dev;
+ int ras_cap, i, ret;
+
+ /*
+ * If a given SoC has no RAS DES capability, the following call is
+ * bound to return an error, breaking some existing platforms. So,
+ * return 0 here, as this is not necessarily an error.
+ */
+ ras_cap = dw_pcie_find_rasdes_capability(pci);
+ if (!ras_cap) {
+ dev_dbg(dev, "no RAS DES capability available\n");
+ return 0;
+ }
+
+ rasdes_info = devm_kzalloc(dev, sizeof(*rasdes_info), GFP_KERNEL);
+ if (!rasdes_info)
+ return -ENOMEM;
+
+ /* Create subdirectories for Debug, Error Injection, Statistics. */
+ rasdes_debug = debugfs_create_dir("rasdes_debug", dir);
+ rasdes_err_inj = debugfs_create_dir("rasdes_err_inj", dir);
+ rasdes_event_counter = debugfs_create_dir("rasdes_event_counter", dir);
+
+ mutex_init(&rasdes_info->reg_event_lock);
+ rasdes_info->ras_cap_offset = ras_cap;
+ pci->debugfs->rasdes_info = rasdes_info;
+
+ /* Create debugfs files for Debug subdirectory. */
+ dwc_debugfs_create(lane_detect);
+ dwc_debugfs_create(rx_valid);
+
+ /* Create debugfs files for Error Injection subdirectory. */
+ for (i = 0; i < ARRAY_SIZE(err_inj_list); i++) {
+ priv_tmp = devm_kzalloc(dev, sizeof(*priv_tmp), GFP_KERNEL);
+ if (!priv_tmp) {
+ ret = -ENOMEM;
+ goto err_deinit;
+ }
+
+ priv_tmp->idx = i;
+ priv_tmp->pci = pci;
+ debugfs_create_file(err_inj_list[i].name, 0200, rasdes_err_inj, priv_tmp,
+ &dwc_pcie_err_inj_ops);
+ }
+
+ /* Create debugfs files for Statistical Counter subdirectory. */
+ for (i = 0; i < ARRAY_SIZE(event_list); i++) {
+ priv_tmp = devm_kzalloc(dev, sizeof(*priv_tmp), GFP_KERNEL);
+ if (!priv_tmp) {
+ ret = -ENOMEM;
+ goto err_deinit;
+ }
+
+ priv_tmp->idx = i;
+ priv_tmp->pci = pci;
+ rasdes_events = debugfs_create_dir(event_list[i].name, rasdes_event_counter);
+ if (event_list[i].group_no == 0 || event_list[i].group_no == 4) {
+ debugfs_create_file("lane_select", 0644, rasdes_events,
+ priv_tmp, &dwc_pcie_counter_lane_ops);
+ }
+ debugfs_create_file("counter_value", 0444, rasdes_events, priv_tmp,
+ &dwc_pcie_counter_value_ops);
+ debugfs_create_file("counter_enable", 0644, rasdes_events, priv_tmp,
+ &dwc_pcie_counter_enable_ops);
+ }
+
+ return 0;
+
+err_deinit:
+ dwc_pcie_rasdes_debugfs_deinit(pci);
+ return ret;
+}
+
+static void dwc_pcie_ltssm_debugfs_init(struct dw_pcie *pci, struct dentry *dir)
+{
+ debugfs_create_file("ltssm_status", 0444, dir, pci,
+ &dwc_pcie_ltssm_status_ops);
+}
+
+static int dw_pcie_ptm_check_capability(void *drvdata)
+{
+ struct dw_pcie *pci = drvdata;
+
+ pci->ptm_vsec_offset = dw_pcie_find_ptm_capability(pci);
+
+ return pci->ptm_vsec_offset;
+}
+
+static int dw_pcie_ptm_context_update_write(void *drvdata, u8 mode)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 val;
+
+ if (mode == PCIE_PTM_CONTEXT_UPDATE_AUTO) {
+ val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL);
+ val |= PTM_REQ_AUTO_UPDATE_ENABLED;
+ dw_pcie_writel_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL, val);
+ } else if (mode == PCIE_PTM_CONTEXT_UPDATE_MANUAL) {
+ val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL);
+ val &= ~PTM_REQ_AUTO_UPDATE_ENABLED;
+ val |= PTM_REQ_START_UPDATE;
+ dw_pcie_writel_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL, val);
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dw_pcie_ptm_context_update_read(void *drvdata, u8 *mode)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 val;
+
+ val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL);
+ if (FIELD_GET(PTM_REQ_AUTO_UPDATE_ENABLED, val))
+ *mode = PCIE_PTM_CONTEXT_UPDATE_AUTO;
+ else
+ /*
+ * PTM_REQ_START_UPDATE is a self clearing register bit. So if
+ * PTM_REQ_AUTO_UPDATE_ENABLED is not set, then it implies that
+ * manual update is used.
+ */
+ *mode = PCIE_PTM_CONTEXT_UPDATE_MANUAL;
+
+ return 0;
+}
+
+static int dw_pcie_ptm_context_valid_write(void *drvdata, bool valid)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 val;
+
+ if (valid) {
+ val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL);
+ val |= PTM_RES_CCONTEXT_VALID;
+ dw_pcie_writel_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL, val);
+ } else {
+ val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL);
+ val &= ~PTM_RES_CCONTEXT_VALID;
+ dw_pcie_writel_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL, val);
+ }
+
+ return 0;
+}
+
+static int dw_pcie_ptm_context_valid_read(void *drvdata, bool *valid)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 val;
+
+ val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL);
+ *valid = !!FIELD_GET(PTM_RES_CCONTEXT_VALID, val);
+
+ return 0;
+}
+
+static int dw_pcie_ptm_local_clock_read(void *drvdata, u64 *clock)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 msb, lsb;
+
+ do {
+ msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_LOCAL_MSB);
+ lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_LOCAL_LSB);
+ } while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_LOCAL_MSB));
+
+ *clock = ((u64) msb) << 32 | lsb;
+
+ return 0;
+}
+
+static int dw_pcie_ptm_master_clock_read(void *drvdata, u64 *clock)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 msb, lsb;
+
+ do {
+ msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_MASTER_MSB);
+ lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_MASTER_LSB);
+ } while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_MASTER_MSB));
+
+ *clock = ((u64) msb) << 32 | lsb;
+
+ return 0;
+}
+
+static int dw_pcie_ptm_t1_read(void *drvdata, u64 *clock)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 msb, lsb;
+
+ do {
+ msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_MSB);
+ lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_LSB);
+ } while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_MSB));
+
+ *clock = ((u64) msb) << 32 | lsb;
+
+ return 0;
+}
+
+static int dw_pcie_ptm_t2_read(void *drvdata, u64 *clock)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 msb, lsb;
+
+ do {
+ msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_MSB);
+ lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_LSB);
+ } while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_MSB));
+
+ *clock = ((u64) msb) << 32 | lsb;
+
+ return 0;
+}
+
+static int dw_pcie_ptm_t3_read(void *drvdata, u64 *clock)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 msb, lsb;
+
+ do {
+ msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_MSB);
+ lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_LSB);
+ } while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_MSB));
+
+ *clock = ((u64) msb) << 32 | lsb;
+
+ return 0;
+}
+
+static int dw_pcie_ptm_t4_read(void *drvdata, u64 *clock)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 msb, lsb;
+
+ do {
+ msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_MSB);
+ lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_LSB);
+ } while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_MSB));
+
+ *clock = ((u64) msb) << 32 | lsb;
+
+ return 0;
+}
+
+static bool dw_pcie_ptm_context_update_visible(void *drvdata)
+{
+ struct dw_pcie *pci = drvdata;
+
+ return pci->mode == DW_PCIE_EP_TYPE;
+}
+
+static bool dw_pcie_ptm_context_valid_visible(void *drvdata)
+{
+ struct dw_pcie *pci = drvdata;
+
+ return pci->mode == DW_PCIE_RC_TYPE;
+}
+
+static bool dw_pcie_ptm_local_clock_visible(void *drvdata)
+{
+ /* PTM local clock is always visible */
+ return true;
+}
+
+static bool dw_pcie_ptm_master_clock_visible(void *drvdata)
+{
+ struct dw_pcie *pci = drvdata;
+
+ return pci->mode == DW_PCIE_EP_TYPE;
+}
+
+static bool dw_pcie_ptm_t1_visible(void *drvdata)
+{
+ struct dw_pcie *pci = drvdata;
+
+ return pci->mode == DW_PCIE_EP_TYPE;
+}
+
+static bool dw_pcie_ptm_t2_visible(void *drvdata)
+{
+ struct dw_pcie *pci = drvdata;
+
+ return pci->mode == DW_PCIE_RC_TYPE;
+}
+
+static bool dw_pcie_ptm_t3_visible(void *drvdata)
+{
+ struct dw_pcie *pci = drvdata;
+
+ return pci->mode == DW_PCIE_RC_TYPE;
+}
+
+static bool dw_pcie_ptm_t4_visible(void *drvdata)
+{
+ struct dw_pcie *pci = drvdata;
+
+ return pci->mode == DW_PCIE_EP_TYPE;
+}
+
+static const struct pcie_ptm_ops dw_pcie_ptm_ops = {
+ .check_capability = dw_pcie_ptm_check_capability,
+ .context_update_write = dw_pcie_ptm_context_update_write,
+ .context_update_read = dw_pcie_ptm_context_update_read,
+ .context_valid_write = dw_pcie_ptm_context_valid_write,
+ .context_valid_read = dw_pcie_ptm_context_valid_read,
+ .local_clock_read = dw_pcie_ptm_local_clock_read,
+ .master_clock_read = dw_pcie_ptm_master_clock_read,
+ .t1_read = dw_pcie_ptm_t1_read,
+ .t2_read = dw_pcie_ptm_t2_read,
+ .t3_read = dw_pcie_ptm_t3_read,
+ .t4_read = dw_pcie_ptm_t4_read,
+ .context_update_visible = dw_pcie_ptm_context_update_visible,
+ .context_valid_visible = dw_pcie_ptm_context_valid_visible,
+ .local_clock_visible = dw_pcie_ptm_local_clock_visible,
+ .master_clock_visible = dw_pcie_ptm_master_clock_visible,
+ .t1_visible = dw_pcie_ptm_t1_visible,
+ .t2_visible = dw_pcie_ptm_t2_visible,
+ .t3_visible = dw_pcie_ptm_t3_visible,
+ .t4_visible = dw_pcie_ptm_t4_visible,
+};
+
+void dwc_pcie_debugfs_deinit(struct dw_pcie *pci)
+{
+ if (!pci->debugfs)
+ return;
+
+ pcie_ptm_destroy_debugfs(pci->ptm_debugfs);
+ dwc_pcie_rasdes_debugfs_deinit(pci);
+ debugfs_remove_recursive(pci->debugfs->debug_dir);
+}
+
+void dwc_pcie_debugfs_init(struct dw_pcie *pci, enum dw_pcie_device_mode mode)
+{
+ char dirname[DWC_DEBUGFS_BUF_MAX];
+ struct device *dev = pci->dev;
+ struct debugfs_info *debugfs;
+ struct dentry *dir;
+ int err;
+
+ /* Create main directory for each platform driver. */
+ snprintf(dirname, DWC_DEBUGFS_BUF_MAX, "dwc_pcie_%s", dev_name(dev));
+ dir = debugfs_create_dir(dirname, NULL);
+ debugfs = devm_kzalloc(dev, sizeof(*debugfs), GFP_KERNEL);
+ if (!debugfs)
+ return;
+
+ debugfs->debug_dir = dir;
+ pci->debugfs = debugfs;
+ err = dwc_pcie_rasdes_debugfs_init(pci, dir);
+ if (err)
+ dev_err(dev, "failed to initialize RAS DES debugfs, err=%d\n",
+ err);
+
+ dwc_pcie_ltssm_debugfs_init(pci, dir);
+
+ pci->mode = mode;
+ pci->ptm_debugfs = pcie_ptm_create_debugfs(pci->dev, pci,
+ &dw_pcie_ptm_ops);
+}
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index f9182f8d552f..19571ac2b961 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -6,6 +6,8 @@
* Author: Kishon Vijay Abraham I <kishon@ti.com>
*/
+#include <linux/align.h>
+#include <linux/bitfield.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -13,22 +15,14 @@
#include <linux/pci-epc.h>
#include <linux/pci-epf.h>
-void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
-{
- struct pci_epc *epc = ep->epc;
-
- pci_epc_linkup(epc);
-}
-EXPORT_SYMBOL_GPL(dw_pcie_ep_linkup);
-
-void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep)
-{
- struct pci_epc *epc = ep->epc;
-
- pci_epc_init_notify(epc);
-}
-EXPORT_SYMBOL_GPL(dw_pcie_ep_init_notify);
-
+/**
+ * dw_pcie_ep_get_func_from_ep - Get the struct dw_pcie_ep_func corresponding to
+ * the endpoint function
+ * @ep: DWC EP device
+ * @func_no: Function number of the endpoint device
+ *
+ * Return: struct dw_pcie_ep_func if success, NULL otherwise.
+ */
struct dw_pcie_ep_func *
dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no)
{
@@ -42,36 +36,28 @@ dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no)
return NULL;
}
-static unsigned int dw_pcie_ep_func_select(struct dw_pcie_ep *ep, u8 func_no)
-{
- unsigned int func_offset = 0;
-
- if (ep->ops->func_conf_select)
- func_offset = ep->ops->func_conf_select(ep, func_no);
-
- return func_offset;
-}
-
static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, u8 func_no,
enum pci_barno bar, int flags)
{
- u32 reg;
- unsigned int func_offset = 0;
struct dw_pcie_ep *ep = &pci->ep;
+ u32 reg;
- func_offset = dw_pcie_ep_func_select(ep, func_no);
-
- reg = func_offset + PCI_BASE_ADDRESS_0 + (4 * bar);
+ reg = PCI_BASE_ADDRESS_0 + (4 * bar);
dw_pcie_dbi_ro_wr_en(pci);
- dw_pcie_writel_dbi2(pci, reg, 0x0);
- dw_pcie_writel_dbi(pci, reg, 0x0);
+ dw_pcie_ep_writel_dbi2(ep, func_no, reg, 0x0);
+ dw_pcie_ep_writel_dbi(ep, func_no, reg, 0x0);
if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
- dw_pcie_writel_dbi2(pci, reg + 4, 0x0);
- dw_pcie_writel_dbi(pci, reg + 4, 0x0);
+ dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, 0x0);
+ dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0x0);
}
dw_pcie_dbi_ro_wr_dis(pci);
}
+/**
+ * dw_pcie_ep_reset_bar - Reset endpoint BAR
+ * @pci: DWC PCI device
+ * @bar: BAR number of the endpoint
+ */
void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
{
u8 func_no, funcs;
@@ -83,77 +69,79 @@ void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
}
EXPORT_SYMBOL_GPL(dw_pcie_ep_reset_bar);
-static u8 __dw_pcie_ep_find_next_cap(struct dw_pcie_ep *ep, u8 func_no,
- u8 cap_ptr, u8 cap)
+static u8 dw_pcie_ep_find_capability(struct dw_pcie_ep *ep, u8 func_no, u8 cap)
{
- struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- unsigned int func_offset = 0;
- u8 cap_id, next_cap_ptr;
- u16 reg;
-
- if (!cap_ptr)
- return 0;
-
- func_offset = dw_pcie_ep_func_select(ep, func_no);
+ return PCI_FIND_NEXT_CAP(dw_pcie_ep_read_cfg, PCI_CAPABILITY_LIST,
+ cap, ep, func_no);
+}
- reg = dw_pcie_readw_dbi(pci, func_offset + cap_ptr);
- cap_id = (reg & 0x00ff);
+/**
+ * dw_pcie_ep_hide_ext_capability - Hide a capability from the linked list
+ * @pci: DWC PCI device
+ * @prev_cap: Capability preceding the capability that should be hidden
+ * @cap: Capability that should be hidden
+ *
+ * Return: 0 if success, errno otherwise.
+ */
+int dw_pcie_ep_hide_ext_capability(struct dw_pcie *pci, u8 prev_cap, u8 cap)
+{
+ u16 prev_cap_offset, cap_offset;
+ u32 prev_cap_header, cap_header;
- if (cap_id > PCI_CAP_ID_MAX)
- return 0;
+ prev_cap_offset = dw_pcie_find_ext_capability(pci, prev_cap);
+ if (!prev_cap_offset)
+ return -EINVAL;
- if (cap_id == cap)
- return cap_ptr;
+ prev_cap_header = dw_pcie_readl_dbi(pci, prev_cap_offset);
+ cap_offset = PCI_EXT_CAP_NEXT(prev_cap_header);
+ cap_header = dw_pcie_readl_dbi(pci, cap_offset);
- next_cap_ptr = (reg & 0xff00) >> 8;
- return __dw_pcie_ep_find_next_cap(ep, func_no, next_cap_ptr, cap);
-}
+ /* cap must immediately follow prev_cap. */
+ if (PCI_EXT_CAP_ID(cap_header) != cap)
+ return -EINVAL;
-static u8 dw_pcie_ep_find_capability(struct dw_pcie_ep *ep, u8 func_no, u8 cap)
-{
- struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- unsigned int func_offset = 0;
- u8 next_cap_ptr;
- u16 reg;
+ /* Clear next ptr. */
+ prev_cap_header &= ~GENMASK(31, 20);
- func_offset = dw_pcie_ep_func_select(ep, func_no);
+ /* Set next ptr to next ptr of cap. */
+ prev_cap_header |= cap_header & GENMASK(31, 20);
- reg = dw_pcie_readw_dbi(pci, func_offset + PCI_CAPABILITY_LIST);
- next_cap_ptr = (reg & 0x00ff);
+ dw_pcie_dbi_ro_wr_en(pci);
+ dw_pcie_writel_dbi(pci, prev_cap_offset, prev_cap_header);
+ dw_pcie_dbi_ro_wr_dis(pci);
- return __dw_pcie_ep_find_next_cap(ep, func_no, next_cap_ptr, cap);
+ return 0;
}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_hide_ext_capability);
static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_header *hdr)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- unsigned int func_offset = 0;
-
- func_offset = dw_pcie_ep_func_select(ep, func_no);
dw_pcie_dbi_ro_wr_en(pci);
- dw_pcie_writew_dbi(pci, func_offset + PCI_VENDOR_ID, hdr->vendorid);
- dw_pcie_writew_dbi(pci, func_offset + PCI_DEVICE_ID, hdr->deviceid);
- dw_pcie_writeb_dbi(pci, func_offset + PCI_REVISION_ID, hdr->revid);
- dw_pcie_writeb_dbi(pci, func_offset + PCI_CLASS_PROG, hdr->progif_code);
- dw_pcie_writew_dbi(pci, func_offset + PCI_CLASS_DEVICE,
- hdr->subclass_code | hdr->baseclass_code << 8);
- dw_pcie_writeb_dbi(pci, func_offset + PCI_CACHE_LINE_SIZE,
- hdr->cache_line_size);
- dw_pcie_writew_dbi(pci, func_offset + PCI_SUBSYSTEM_VENDOR_ID,
- hdr->subsys_vendor_id);
- dw_pcie_writew_dbi(pci, func_offset + PCI_SUBSYSTEM_ID, hdr->subsys_id);
- dw_pcie_writeb_dbi(pci, func_offset + PCI_INTERRUPT_PIN,
- hdr->interrupt_pin);
+ dw_pcie_ep_writew_dbi(ep, func_no, PCI_VENDOR_ID, hdr->vendorid);
+ dw_pcie_ep_writew_dbi(ep, func_no, PCI_DEVICE_ID, hdr->deviceid);
+ dw_pcie_ep_writeb_dbi(ep, func_no, PCI_REVISION_ID, hdr->revid);
+ dw_pcie_ep_writeb_dbi(ep, func_no, PCI_CLASS_PROG, hdr->progif_code);
+ dw_pcie_ep_writew_dbi(ep, func_no, PCI_CLASS_DEVICE,
+ hdr->subclass_code | hdr->baseclass_code << 8);
+ dw_pcie_ep_writeb_dbi(ep, func_no, PCI_CACHE_LINE_SIZE,
+ hdr->cache_line_size);
+ dw_pcie_ep_writew_dbi(ep, func_no, PCI_SUBSYSTEM_VENDOR_ID,
+ hdr->subsys_vendor_id);
+ dw_pcie_ep_writew_dbi(ep, func_no, PCI_SUBSYSTEM_ID, hdr->subsys_id);
+ dw_pcie_ep_writeb_dbi(ep, func_no, PCI_INTERRUPT_PIN,
+ hdr->interrupt_pin);
dw_pcie_dbi_ro_wr_dis(pci);
return 0;
}
static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,
- dma_addr_t cpu_addr, enum pci_barno bar)
+ dma_addr_t parent_bus_addr, enum pci_barno bar,
+ size_t size)
{
int ret;
u32 free_win;
@@ -162,7 +150,7 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,
if (!ep->bar_to_atu[bar])
free_win = find_first_zero_bit(ep->ib_window_map, pci->num_ib_windows);
else
- free_win = ep->bar_to_atu[bar];
+ free_win = ep->bar_to_atu[bar] - 1;
if (free_win >= pci->num_ib_windows) {
dev_err(pci->dev, "No free inbound window\n");
@@ -170,21 +158,24 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,
}
ret = dw_pcie_prog_ep_inbound_atu(pci, func_no, free_win, type,
- cpu_addr, bar);
+ parent_bus_addr, bar, size);
if (ret < 0) {
dev_err(pci->dev, "Failed to program IB window\n");
return ret;
}
- ep->bar_to_atu[bar] = free_win;
+ /*
+ * Always increment free_win before assignment, since value 0 is used to identify
+ * unallocated mapping.
+ */
+ ep->bar_to_atu[bar] = free_win + 1;
set_bit(free_win, ep->ib_window_map);
return 0;
}
-static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, u8 func_no,
- phys_addr_t phys_addr,
- u64 pci_addr, size_t size)
+static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep,
+ struct dw_pcie_ob_atu_cfg *atu)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
u32 free_win;
@@ -196,13 +187,13 @@ static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, u8 func_no,
return -EINVAL;
}
- ret = dw_pcie_prog_ep_outbound_atu(pci, func_no, free_win, PCIE_ATU_TYPE_MEM,
- phys_addr, pci_addr, size);
+ atu->index = free_win;
+ ret = dw_pcie_prog_outbound_atu(pci, atu);
if (ret)
return ret;
set_bit(free_win, ep->ob_window_map);
- ep->outbound_addr[free_win] = phys_addr;
+ ep->outbound_addr[free_win] = atu->parent_bus_addr;
return 0;
}
@@ -213,7 +204,10 @@ static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
enum pci_barno bar = epf_bar->barno;
- u32 atu_index = ep->bar_to_atu[bar];
+ u32 atu_index = ep->bar_to_atu[bar] - 1;
+
+ if (!ep->bar_to_atu[bar])
+ return;
__dw_pcie_ep_reset_bar(pci, func_no, bar, epf_bar->flags);
@@ -223,6 +217,125 @@ static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
ep->bar_to_atu[bar] = 0;
}
+static unsigned int dw_pcie_ep_get_rebar_offset(struct dw_pcie *pci,
+ enum pci_barno bar)
+{
+ u32 reg, bar_index;
+ unsigned int offset, nbars;
+ int i;
+
+ offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
+ if (!offset)
+ return offset;
+
+ reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
+ nbars = FIELD_GET(PCI_REBAR_CTRL_NBAR_MASK, reg);
+
+ for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) {
+ reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
+ bar_index = FIELD_GET(PCI_REBAR_CTRL_BAR_IDX, reg);
+ if (bar_index == bar)
+ return offset;
+ }
+
+ return 0;
+}
+
+static int dw_pcie_ep_set_bar_resizable(struct dw_pcie_ep *ep, u8 func_no,
+ struct pci_epf_bar *epf_bar)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar = epf_bar->barno;
+ size_t size = epf_bar->size;
+ int flags = epf_bar->flags;
+ u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar);
+ unsigned int rebar_offset;
+ u32 rebar_cap, rebar_ctrl;
+ int ret;
+
+ rebar_offset = dw_pcie_ep_get_rebar_offset(pci, bar);
+ if (!rebar_offset)
+ return -EINVAL;
+
+ ret = pci_epc_bar_size_to_rebar_cap(size, &rebar_cap);
+ if (ret)
+ return ret;
+
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ /*
+ * A BAR mask should not be written for a resizable BAR. The BAR mask
+ * is automatically derived by the controller every time the "selected
+ * size" bits are updated, see "Figure 3-26 Resizable BAR Example for
+ * 32-bit Memory BAR0" in DWC EP databook 5.96a. We simply need to write
+ * BIT(0) to set the BAR enable bit.
+ */
+ dw_pcie_ep_writel_dbi2(ep, func_no, reg, BIT(0));
+ dw_pcie_ep_writel_dbi(ep, func_no, reg, flags);
+
+ if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
+ dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, 0);
+ dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0);
+ }
+
+ /*
+ * Bits 31:0 in PCI_REBAR_CAP define "supported sizes" bits for sizes
+ * 1 MB to 128 TB. Bits 31:16 in PCI_REBAR_CTRL define "supported sizes"
+ * bits for sizes 256 TB to 8 EB. Disallow sizes 256 TB to 8 EB.
+ */
+ rebar_ctrl = dw_pcie_readl_dbi(pci, rebar_offset + PCI_REBAR_CTRL);
+ rebar_ctrl &= ~GENMASK(31, 16);
+ dw_pcie_writel_dbi(pci, rebar_offset + PCI_REBAR_CTRL, rebar_ctrl);
+
+ /*
+ * The "selected size" (bits 13:8) in PCI_REBAR_CTRL are automatically
+ * updated when writing PCI_REBAR_CAP, see "Figure 3-26 Resizable BAR
+ * Example for 32-bit Memory BAR0" in DWC EP databook 5.96a.
+ */
+ dw_pcie_writel_dbi(pci, rebar_offset + PCI_REBAR_CAP, rebar_cap);
+
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ return 0;
+}
+
+static int dw_pcie_ep_set_bar_programmable(struct dw_pcie_ep *ep, u8 func_no,
+ struct pci_epf_bar *epf_bar)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar = epf_bar->barno;
+ size_t size = epf_bar->size;
+ int flags = epf_bar->flags;
+ u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar);
+
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ dw_pcie_ep_writel_dbi2(ep, func_no, reg, lower_32_bits(size - 1));
+ dw_pcie_ep_writel_dbi(ep, func_no, reg, flags);
+
+ if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
+ dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, upper_32_bits(size - 1));
+ dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0);
+ }
+
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ return 0;
+}
+
+static enum pci_epc_bar_type dw_pcie_ep_get_bar_type(struct dw_pcie_ep *ep,
+ enum pci_barno bar)
+{
+ const struct pci_epc_features *epc_features;
+
+ if (!ep->ops->get_features)
+ return BAR_PROGRAMMABLE;
+
+ epc_features = ep->ops->get_features(ep);
+
+ return epc_features->bar[bar].type;
+}
+
static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar)
{
@@ -230,39 +343,77 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
enum pci_barno bar = epf_bar->barno;
size_t size = epf_bar->size;
+ enum pci_epc_bar_type bar_type;
int flags = epf_bar->flags;
- unsigned int func_offset = 0;
int ret, type;
- u32 reg;
- func_offset = dw_pcie_ep_func_select(ep, func_no);
+ /*
+ * DWC does not allow BAR pairs to overlap, e.g. you cannot combine BARs
+ * 1 and 2 to form a 64-bit BAR.
+ */
+ if ((flags & PCI_BASE_ADDRESS_MEM_TYPE_64) && (bar & 1))
+ return -EINVAL;
+
+ /*
+ * Certain EPF drivers dynamically change the physical address of a BAR
+ * (i.e. they call set_bar() twice, without ever calling clear_bar(), as
+ * calling clear_bar() would clear the BAR's PCI address assigned by the
+ * host).
+ */
+ if (ep->epf_bar[bar]) {
+ /*
+ * We can only dynamically change a BAR if the new BAR size and
+ * BAR flags do not differ from the existing configuration.
+ */
+ if (ep->epf_bar[bar]->barno != bar ||
+ ep->epf_bar[bar]->size != size ||
+ ep->epf_bar[bar]->flags != flags)
+ return -EINVAL;
+
+ /*
+ * When dynamically changing a BAR, skip writing the BAR reg, as
+ * that would clear the BAR's PCI address assigned by the host.
+ */
+ goto config_atu;
+ }
+
+ bar_type = dw_pcie_ep_get_bar_type(ep, bar);
+ switch (bar_type) {
+ case BAR_FIXED:
+ /*
+ * There is no need to write a BAR mask for a fixed BAR (except
+ * to write 1 to the LSB of the BAR mask register, to enable the
+ * BAR). Write the BAR mask regardless. (The fixed bits in the
+ * BAR mask register will be read-only anyway.)
+ */
+ fallthrough;
+ case BAR_PROGRAMMABLE:
+ ret = dw_pcie_ep_set_bar_programmable(ep, func_no, epf_bar);
+ break;
+ case BAR_RESIZABLE:
+ ret = dw_pcie_ep_set_bar_resizable(ep, func_no, epf_bar);
+ break;
+ default:
+ ret = -EINVAL;
+ dev_err(pci->dev, "Invalid BAR type\n");
+ break;
+ }
- reg = PCI_BASE_ADDRESS_0 + (4 * bar) + func_offset;
+ if (ret)
+ return ret;
+config_atu:
if (!(flags & PCI_BASE_ADDRESS_SPACE))
type = PCIE_ATU_TYPE_MEM;
else
type = PCIE_ATU_TYPE_IO;
- ret = dw_pcie_ep_inbound_atu(ep, func_no, type, epf_bar->phys_addr, bar);
+ ret = dw_pcie_ep_inbound_atu(ep, func_no, type, epf_bar->phys_addr, bar,
+ size);
if (ret)
return ret;
- if (ep->epf_bar[bar])
- return 0;
-
- dw_pcie_dbi_ro_wr_en(pci);
-
- dw_pcie_writel_dbi2(pci, reg, lower_32_bits(size - 1));
- dw_pcie_writel_dbi(pci, reg, flags);
-
- if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
- dw_pcie_writel_dbi2(pci, reg + 4, upper_32_bits(size - 1));
- dw_pcie_writel_dbi(pci, reg + 4, 0);
- }
-
ep->epf_bar[bar] = epf_bar;
- dw_pcie_dbi_ro_wr_dis(pci);
return 0;
}
@@ -273,7 +424,7 @@ static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr,
u32 index;
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- for (index = 0; index < pci->num_ob_windows; index++) {
+ for_each_set_bit(index, ep->ob_window_map, pci->num_ob_windows) {
if (ep->outbound_addr[index] != addr)
continue;
*atu_index = index;
@@ -283,6 +434,20 @@ static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr,
return -EINVAL;
}
+static u64 dw_pcie_ep_align_addr(struct pci_epc *epc, u64 pci_addr,
+ size_t *pci_size, size_t *offset)
+{
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ u64 mask = pci->region_align - 1;
+ size_t ofst = pci_addr & mask;
+
+ *pci_size = ALIGN(ofst + *pci_size, epc->mem->window.page_size);
+ *offset = ofst;
+
+ return pci_addr & ~mask;
+}
+
static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t addr)
{
@@ -291,10 +456,12 @@ static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- ret = dw_pcie_find_index(ep, addr, &atu_index);
+ ret = dw_pcie_find_index(ep, addr - pci->parent_bus_offset,
+ &atu_index);
if (ret < 0)
return;
+ ep->outbound_addr[atu_index] = 0;
dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, atu_index);
clear_bit(atu_index, ep->ob_window_map);
}
@@ -305,8 +472,14 @@ static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
int ret;
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
-
- ret = dw_pcie_ep_outbound_atu(ep, func_no, addr, pci_addr, size);
+ struct dw_pcie_ob_atu_cfg atu = { 0 };
+
+ atu.func_no = func_no;
+ atu.type = PCIE_ATU_TYPE_MEM;
+ atu.parent_bus_addr = addr - pci->parent_bus_offset;
+ atu.pci_addr = pci_addr;
+ atu.size = size;
+ ret = dw_pcie_ep_outbound_atu(ep, &atu);
if (ret) {
dev_err(pci->dev, "Failed to enable address\n");
return ret;
@@ -318,48 +491,42 @@ static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
- struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- u32 val, reg;
- unsigned int func_offset = 0;
struct dw_pcie_ep_func *ep_func;
+ u32 val, reg;
ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
if (!ep_func || !ep_func->msi_cap)
return -EINVAL;
- func_offset = dw_pcie_ep_func_select(ep, func_no);
-
- reg = ep_func->msi_cap + func_offset + PCI_MSI_FLAGS;
- val = dw_pcie_readw_dbi(pci, reg);
+ reg = ep_func->msi_cap + PCI_MSI_FLAGS;
+ val = dw_pcie_ep_readw_dbi(ep, func_no, reg);
if (!(val & PCI_MSI_FLAGS_ENABLE))
return -EINVAL;
- val = (val & PCI_MSI_FLAGS_QSIZE) >> 4;
+ val = FIELD_GET(PCI_MSI_FLAGS_QSIZE, val);
- return val;
+ return 1 << val;
}
static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
- u8 interrupts)
+ u8 nr_irqs)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- u32 val, reg;
- unsigned int func_offset = 0;
struct dw_pcie_ep_func *ep_func;
+ u8 mmc = order_base_2(nr_irqs);
+ u32 val, reg;
ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
if (!ep_func || !ep_func->msi_cap)
return -EINVAL;
- func_offset = dw_pcie_ep_func_select(ep, func_no);
-
- reg = ep_func->msi_cap + func_offset + PCI_MSI_FLAGS;
- val = dw_pcie_readw_dbi(pci, reg);
+ reg = ep_func->msi_cap + PCI_MSI_FLAGS;
+ val = dw_pcie_ep_readw_dbi(ep, func_no, reg);
val &= ~PCI_MSI_FLAGS_QMASK;
- val |= (interrupts << 1) & PCI_MSI_FLAGS_QMASK;
+ val |= FIELD_PREP(PCI_MSI_FLAGS_QMASK, mmc);
dw_pcie_dbi_ro_wr_en(pci);
- dw_pcie_writew_dbi(pci, reg, val);
+ dw_pcie_ep_writew_dbi(ep, func_no, reg, val);
dw_pcie_dbi_ro_wr_dis(pci);
return 0;
@@ -368,35 +535,30 @@ static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
- struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- u32 val, reg;
- unsigned int func_offset = 0;
struct dw_pcie_ep_func *ep_func;
+ u32 val, reg;
ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
if (!ep_func || !ep_func->msix_cap)
return -EINVAL;
- func_offset = dw_pcie_ep_func_select(ep, func_no);
-
- reg = ep_func->msix_cap + func_offset + PCI_MSIX_FLAGS;
- val = dw_pcie_readw_dbi(pci, reg);
+ reg = ep_func->msix_cap + PCI_MSIX_FLAGS;
+ val = dw_pcie_ep_readw_dbi(ep, func_no, reg);
if (!(val & PCI_MSIX_FLAGS_ENABLE))
return -EINVAL;
val &= PCI_MSIX_FLAGS_QSIZE;
- return val;
+ return val + 1;
}
static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
- u16 interrupts, enum pci_barno bir, u32 offset)
+ u16 nr_irqs, enum pci_barno bir, u32 offset)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- u32 val, reg;
- unsigned int func_offset = 0;
struct dw_pcie_ep_func *ep_func;
+ u32 val, reg;
ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
if (!ep_func || !ep_func->msix_cap)
@@ -404,21 +566,19 @@ static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
dw_pcie_dbi_ro_wr_en(pci);
- func_offset = dw_pcie_ep_func_select(ep, func_no);
-
- reg = ep_func->msix_cap + func_offset + PCI_MSIX_FLAGS;
- val = dw_pcie_readw_dbi(pci, reg);
+ reg = ep_func->msix_cap + PCI_MSIX_FLAGS;
+ val = dw_pcie_ep_readw_dbi(ep, func_no, reg);
val &= ~PCI_MSIX_FLAGS_QSIZE;
- val |= interrupts;
+ val |= nr_irqs - 1; /* encoded as N-1 */
dw_pcie_writew_dbi(pci, reg, val);
- reg = ep_func->msix_cap + func_offset + PCI_MSIX_TABLE;
+ reg = ep_func->msix_cap + PCI_MSIX_TABLE;
val = offset | bir;
- dw_pcie_writel_dbi(pci, reg, val);
+ dw_pcie_ep_writel_dbi(ep, func_no, reg, val);
- reg = ep_func->msix_cap + func_offset + PCI_MSIX_PBA;
- val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir;
- dw_pcie_writel_dbi(pci, reg, val);
+ reg = ep_func->msix_cap + PCI_MSIX_PBA;
+ val = (offset + (nr_irqs * PCI_MSIX_ENTRY_SIZE)) | bir;
+ dw_pcie_ep_writel_dbi(ep, func_no, reg, val);
dw_pcie_dbi_ro_wr_dis(pci);
@@ -426,7 +586,7 @@ static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
}
static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
- enum pci_epc_irq_type type, u16 interrupt_num)
+ unsigned int type, u16 interrupt_num)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
@@ -467,6 +627,7 @@ static const struct pci_epc_ops epc_ops = {
.write_header = dw_pcie_ep_write_header,
.set_bar = dw_pcie_ep_set_bar,
.clear_bar = dw_pcie_ep_clear_bar,
+ .align_addr = dw_pcie_ep_align_addr,
.map_addr = dw_pcie_ep_map_addr,
.unmap_addr = dw_pcie_ep_unmap_addr,
.set_msi = dw_pcie_ep_set_msi,
@@ -479,62 +640,74 @@ static const struct pci_epc_ops epc_ops = {
.get_features = dw_pcie_ep_get_features,
};
-int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no)
+/**
+ * dw_pcie_ep_raise_intx_irq - Raise INTx IRQ to the host
+ * @ep: DWC EP device
+ * @func_no: Function number of the endpoint
+ *
+ * Return: 0 if success, errno otherwise.
+ */
+int dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep, u8 func_no)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct device *dev = pci->dev;
- dev_err(dev, "EP cannot trigger legacy IRQs\n");
+ dev_err(dev, "EP cannot raise INTX IRQs\n");
return -EINVAL;
}
-EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_legacy_irq);
+EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_intx_irq);
+/**
+ * dw_pcie_ep_raise_msi_irq - Raise MSI IRQ to the host
+ * @ep: DWC EP device
+ * @func_no: Function number of the endpoint
+ * @interrupt_num: Interrupt number to be raised
+ *
+ * Return: 0 if success, errno otherwise.
+ */
int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
u8 interrupt_num)
{
- struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ u32 msg_addr_lower, msg_addr_upper, reg;
struct dw_pcie_ep_func *ep_func;
struct pci_epc *epc = ep->epc;
- unsigned int aligned_offset;
- unsigned int func_offset = 0;
+ size_t map_size = sizeof(u32);
+ size_t offset;
u16 msg_ctrl, msg_data;
- u32 msg_addr_lower, msg_addr_upper, reg;
- u64 msg_addr;
bool has_upper;
+ u64 msg_addr;
int ret;
ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
if (!ep_func || !ep_func->msi_cap)
return -EINVAL;
- func_offset = dw_pcie_ep_func_select(ep, func_no);
-
/* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */
- reg = ep_func->msi_cap + func_offset + PCI_MSI_FLAGS;
- msg_ctrl = dw_pcie_readw_dbi(pci, reg);
+ reg = ep_func->msi_cap + PCI_MSI_FLAGS;
+ msg_ctrl = dw_pcie_ep_readw_dbi(ep, func_no, reg);
has_upper = !!(msg_ctrl & PCI_MSI_FLAGS_64BIT);
- reg = ep_func->msi_cap + func_offset + PCI_MSI_ADDRESS_LO;
- msg_addr_lower = dw_pcie_readl_dbi(pci, reg);
+ reg = ep_func->msi_cap + PCI_MSI_ADDRESS_LO;
+ msg_addr_lower = dw_pcie_ep_readl_dbi(ep, func_no, reg);
if (has_upper) {
- reg = ep_func->msi_cap + func_offset + PCI_MSI_ADDRESS_HI;
- msg_addr_upper = dw_pcie_readl_dbi(pci, reg);
- reg = ep_func->msi_cap + func_offset + PCI_MSI_DATA_64;
- msg_data = dw_pcie_readw_dbi(pci, reg);
+ reg = ep_func->msi_cap + PCI_MSI_ADDRESS_HI;
+ msg_addr_upper = dw_pcie_ep_readl_dbi(ep, func_no, reg);
+ reg = ep_func->msi_cap + PCI_MSI_DATA_64;
+ msg_data = dw_pcie_ep_readw_dbi(ep, func_no, reg);
} else {
msg_addr_upper = 0;
- reg = ep_func->msi_cap + func_offset + PCI_MSI_DATA_32;
- msg_data = dw_pcie_readw_dbi(pci, reg);
+ reg = ep_func->msi_cap + PCI_MSI_DATA_32;
+ msg_data = dw_pcie_ep_readw_dbi(ep, func_no, reg);
}
- aligned_offset = msg_addr_lower & (epc->mem->window.page_size - 1);
- msg_addr = ((u64)msg_addr_upper) << 32 |
- (msg_addr_lower & ~aligned_offset);
+ msg_addr = ((u64)msg_addr_upper) << 32 | msg_addr_lower;
+
+ msg_addr = dw_pcie_ep_align_addr(epc, msg_addr, &map_size, &offset);
ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr,
- epc->mem->window.page_size);
+ map_size);
if (ret)
return ret;
- writel(msg_data | (interrupt_num - 1), ep->msi_mem + aligned_offset);
+ writel(msg_data | (interrupt_num - 1), ep->msi_mem + offset);
dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys);
@@ -542,6 +715,15 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
}
EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_msi_irq);
+/**
+ * dw_pcie_ep_raise_msix_irq_doorbell - Raise MSI-X to the host using Doorbell
+ * method
+ * @ep: DWC EP device
+ * @func_no: Function number of the endpoint device
+ * @interrupt_num: Interrupt number to be raised
+ *
+ * Return: 0 if success, errno otherwise.
+ */
int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep, u8 func_no,
u16 interrupt_num)
{
@@ -561,16 +743,24 @@ int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep, u8 func_no,
return 0;
}
+/**
+ * dw_pcie_ep_raise_msix_irq - Raise MSI-X to the host
+ * @ep: DWC EP device
+ * @func_no: Function number of the endpoint device
+ * @interrupt_num: Interrupt number to be raised
+ *
+ * Return: 0 if success, errno otherwise.
+ */
int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- struct dw_pcie_ep_func *ep_func;
struct pci_epf_msix_tbl *msix_tbl;
+ struct dw_pcie_ep_func *ep_func;
struct pci_epc *epc = ep->epc;
- unsigned int func_offset = 0;
+ size_t map_size = sizeof(u32);
+ size_t offset;
u32 reg, msg_data, vec_ctrl;
- unsigned int aligned_offset;
u32 tbl_offset;
u64 msg_addr;
int ret;
@@ -580,11 +770,9 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
if (!ep_func || !ep_func->msix_cap)
return -EINVAL;
- func_offset = dw_pcie_ep_func_select(ep, func_no);
-
- reg = ep_func->msix_cap + func_offset + PCI_MSIX_TABLE;
- tbl_offset = dw_pcie_readl_dbi(pci, reg);
- bir = (tbl_offset & PCI_MSIX_TABLE_BIR);
+ reg = ep_func->msix_cap + PCI_MSIX_TABLE;
+ tbl_offset = dw_pcie_ep_readl_dbi(ep, func_no, reg);
+ bir = FIELD_GET(PCI_MSIX_TABLE_BIR, tbl_offset);
tbl_offset &= PCI_MSIX_TABLE_OFFSET;
msix_tbl = ep->epf_bar[bir]->addr + tbl_offset;
@@ -597,58 +785,125 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
return -EPERM;
}
- aligned_offset = msg_addr & (epc->mem->window.page_size - 1);
+ msg_addr = dw_pcie_ep_align_addr(epc, msg_addr, &map_size, &offset);
ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr,
- epc->mem->window.page_size);
+ map_size);
if (ret)
return ret;
- writel(msg_data, ep->msi_mem + aligned_offset);
+ writel(msg_data, ep->msi_mem + offset);
dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys);
return 0;
}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_msix_irq);
-void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
+/**
+ * dw_pcie_ep_cleanup - Cleanup DWC EP resources after fundamental reset
+ * @ep: DWC EP device
+ *
+ * Cleans up the DWC EP specific resources like eDMA etc... after fundamental
+ * reset like PERST#. Note that this API is only applicable for drivers
+ * supporting PERST# or any other methods of fundamental reset.
+ */
+void dw_pcie_ep_cleanup(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- struct pci_epc *epc = ep->epc;
+ dwc_pcie_debugfs_deinit(pci);
dw_pcie_edma_remove(pci);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_cleanup);
+
+/**
+ * dw_pcie_ep_deinit - Deinitialize the endpoint device
+ * @ep: DWC EP device
+ *
+ * Deinitialize the endpoint device. EPC device is not destroyed since that will
+ * be taken care by Devres.
+ */
+void dw_pcie_ep_deinit(struct dw_pcie_ep *ep)
+{
+ struct pci_epc *epc = ep->epc;
+
+ dw_pcie_ep_cleanup(ep);
pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
epc->mem->window.page_size);
pci_epc_mem_exit(epc);
}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_deinit);
-static unsigned int dw_pcie_ep_find_ext_capability(struct dw_pcie *pci, int cap)
+static void dw_pcie_ep_init_non_sticky_registers(struct dw_pcie *pci)
{
- u32 header;
- int pos = PCI_CFG_SPACE_SIZE;
+ struct dw_pcie_ep *ep = &pci->ep;
+ unsigned int offset;
+ unsigned int nbars;
+ enum pci_barno bar;
+ u32 reg, i, val;
+
+ offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
- while (pos) {
- header = dw_pcie_readl_dbi(pci, pos);
- if (PCI_EXT_CAP_ID(header) == cap)
- return pos;
+ dw_pcie_dbi_ro_wr_en(pci);
- pos = PCI_EXT_CAP_NEXT(header);
- if (!pos)
- break;
+ if (offset) {
+ reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
+ nbars = FIELD_GET(PCI_REBAR_CTRL_NBAR_MASK, reg);
+
+ /*
+ * PCIe r6.0, sec 7.8.6.2 require us to support at least one
+ * size in the range from 1 MB to 512 GB. Advertise support
+ * for 1 MB BAR size only.
+ *
+ * For a BAR that has been configured via dw_pcie_ep_set_bar(),
+ * advertise support for only that size instead.
+ */
+ for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) {
+ /*
+ * While the RESBAR_CAP_REG_* fields are sticky, the
+ * RESBAR_CTRL_REG_BAR_SIZE field is non-sticky (it is
+ * sticky in certain versions of DWC PCIe, but not all).
+ *
+ * RESBAR_CTRL_REG_BAR_SIZE is updated automatically by
+ * the controller when RESBAR_CAP_REG is written, which
+ * is why RESBAR_CAP_REG is written here.
+ */
+ val = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
+ bar = FIELD_GET(PCI_REBAR_CTRL_BAR_IDX, val);
+ if (ep->epf_bar[bar])
+ pci_epc_bar_size_to_rebar_cap(ep->epf_bar[bar]->size, &val);
+ else
+ val = BIT(4);
+
+ dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, val);
+ }
}
- return 0;
+ dw_pcie_setup(pci);
+ dw_pcie_dbi_ro_wr_dis(pci);
}
-int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
+/**
+ * dw_pcie_ep_init_registers - Initialize DWC EP specific registers
+ * @ep: DWC EP device
+ *
+ * Initialize the registers (CSRs) specific to DWC EP. This API should be called
+ * only when the endpoint receives an active refclk (either from host or
+ * generated locally).
+ */
+int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- unsigned int offset, ptm_cap_base;
- unsigned int nbars;
+ struct dw_pcie_ep_func *ep_func;
+ struct device *dev = pci->dev;
+ struct pci_epc *epc = ep->epc;
+ u32 ptm_cap_base, reg;
u8 hdr_type;
- u32 reg;
- int i;
+ u8 func_no;
+ void *addr;
+ int ret;
hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE) &
PCI_HEADER_TYPE_MASK;
@@ -659,20 +914,61 @@ int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
return -EIO;
}
- offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
- ptm_cap_base = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_PTM);
+ dw_pcie_version_detect(pci);
- dw_pcie_dbi_ro_wr_en(pci);
+ dw_pcie_iatu_detect(pci);
- if (offset) {
- reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
- nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >>
- PCI_REBAR_CTRL_NBAR_SHIFT;
+ ret = dw_pcie_edma_detect(pci);
+ if (ret)
+ return ret;
- for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL)
- dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, 0x0);
+ ret = -ENOMEM;
+ if (!ep->ib_window_map) {
+ ep->ib_window_map = devm_bitmap_zalloc(dev, pci->num_ib_windows,
+ GFP_KERNEL);
+ if (!ep->ib_window_map)
+ goto err_remove_edma;
}
+ if (!ep->ob_window_map) {
+ ep->ob_window_map = devm_bitmap_zalloc(dev, pci->num_ob_windows,
+ GFP_KERNEL);
+ if (!ep->ob_window_map)
+ goto err_remove_edma;
+ }
+
+ if (!ep->outbound_addr) {
+ addr = devm_kcalloc(dev, pci->num_ob_windows, sizeof(phys_addr_t),
+ GFP_KERNEL);
+ if (!addr)
+ goto err_remove_edma;
+ ep->outbound_addr = addr;
+ }
+
+ for (func_no = 0; func_no < epc->max_functions; func_no++) {
+
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+ if (ep_func)
+ continue;
+
+ ep_func = devm_kzalloc(dev, sizeof(*ep_func), GFP_KERNEL);
+ if (!ep_func)
+ goto err_remove_edma;
+
+ ep_func->func_no = func_no;
+ ep_func->msi_cap = dw_pcie_ep_find_capability(ep, func_no,
+ PCI_CAP_ID_MSI);
+ ep_func->msix_cap = dw_pcie_ep_find_capability(ep, func_no,
+ PCI_CAP_ID_MSIX);
+
+ list_add_tail(&ep_func->list, &ep->func_list);
+ }
+
+ if (ep->ops->init)
+ ep->ops->init(ep);
+
+ ptm_cap_base = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_PTM);
+
/*
* PTM responder capability can be disabled only after disabling
* PTM root capability.
@@ -689,28 +985,65 @@ int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
dw_pcie_dbi_ro_wr_dis(pci);
}
- dw_pcie_setup(pci);
- dw_pcie_dbi_ro_wr_dis(pci);
+ dw_pcie_ep_init_non_sticky_registers(pci);
+
+ dwc_pcie_debugfs_init(pci, DW_PCIE_EP_TYPE);
return 0;
+
+err_remove_edma:
+ dw_pcie_edma_remove(pci);
+
+ return ret;
}
-EXPORT_SYMBOL_GPL(dw_pcie_ep_init_complete);
+EXPORT_SYMBOL_GPL(dw_pcie_ep_init_registers);
-int dw_pcie_ep_init(struct dw_pcie_ep *ep)
+/**
+ * dw_pcie_ep_linkup - Notify EPF drivers about Link Up event
+ * @ep: DWC EP device
+ */
+void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
+{
+ struct pci_epc *epc = ep->epc;
+
+ pci_epc_linkup(epc);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_linkup);
+
+/**
+ * dw_pcie_ep_linkdown - Notify EPF drivers about Link Down event
+ * @ep: DWC EP device
+ *
+ * Non-sticky registers are also initialized before sending the notification to
+ * the EPF drivers. This is needed since the registers need to be initialized
+ * before the link comes back again.
+ */
+void dw_pcie_ep_linkdown(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct pci_epc *epc = ep->epc;
+
+ /*
+ * Initialize the non-sticky DWC registers as they would've reset post
+ * Link Down. This is specifically needed for drivers not supporting
+ * PERST# as they have no way to reinitialize the registers before the
+ * link comes back again.
+ */
+ dw_pcie_ep_init_non_sticky_registers(pci);
+
+ pci_epc_linkdown(epc);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_linkdown);
+
+static int dw_pcie_ep_get_resources(struct dw_pcie_ep *ep)
{
- int ret;
- void *addr;
- u8 func_no;
- struct resource *res;
- struct pci_epc *epc;
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct device *dev = pci->dev;
struct platform_device *pdev = to_platform_device(dev);
struct device_node *np = dev->of_node;
- const struct pci_epc_features *epc_features;
- struct dw_pcie_ep_func *ep_func;
-
- INIT_LIST_HEAD(&ep->func_list);
+ struct pci_epc *epc = ep->epc;
+ struct resource *res;
+ int ret;
ret = dw_pcie_get_resources(pci);
if (ret)
@@ -723,25 +1056,37 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
ep->phys_base = res->start;
ep->addr_size = resource_size(res);
- dw_pcie_version_detect(pci);
+ /*
+ * artpec6_pcie_cpu_addr_fixup() uses ep->phys_base, so call
+ * dw_pcie_parent_bus_offset() after setting ep->phys_base.
+ */
+ pci->parent_bus_offset = dw_pcie_parent_bus_offset(pci, "addr_space",
+ ep->phys_base);
- dw_pcie_iatu_detect(pci);
+ ret = of_property_read_u8(np, "max-functions", &epc->max_functions);
+ if (ret < 0)
+ epc->max_functions = 1;
- ep->ib_window_map = devm_bitmap_zalloc(dev, pci->num_ib_windows,
- GFP_KERNEL);
- if (!ep->ib_window_map)
- return -ENOMEM;
+ return 0;
+}
- ep->ob_window_map = devm_bitmap_zalloc(dev, pci->num_ob_windows,
- GFP_KERNEL);
- if (!ep->ob_window_map)
- return -ENOMEM;
+/**
+ * dw_pcie_ep_init - Initialize the endpoint device
+ * @ep: DWC EP device
+ *
+ * Initialize the endpoint device. Allocate resources and create the EPC
+ * device with the endpoint framework.
+ *
+ * Return: 0 if success, errno otherwise.
+ */
+int dw_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ int ret;
+ struct pci_epc *epc;
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct device *dev = pci->dev;
- addr = devm_kcalloc(dev, pci->num_ob_windows, sizeof(phys_addr_t),
- GFP_KERNEL);
- if (!addr)
- return -ENOMEM;
- ep->outbound_addr = addr;
+ INIT_LIST_HEAD(&ep->func_list);
epc = devm_pci_epc_create(dev, &epc_ops);
if (IS_ERR(epc)) {
@@ -752,26 +1097,12 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
ep->epc = epc;
epc_set_drvdata(epc, ep);
- ret = of_property_read_u8(np, "max-functions", &epc->max_functions);
- if (ret < 0)
- epc->max_functions = 1;
-
- for (func_no = 0; func_no < epc->max_functions; func_no++) {
- ep_func = devm_kzalloc(dev, sizeof(*ep_func), GFP_KERNEL);
- if (!ep_func)
- return -ENOMEM;
-
- ep_func->func_no = func_no;
- ep_func->msi_cap = dw_pcie_ep_find_capability(ep, func_no,
- PCI_CAP_ID_MSI);
- ep_func->msix_cap = dw_pcie_ep_find_capability(ep, func_no,
- PCI_CAP_ID_MSIX);
-
- list_add_tail(&ep_func->list, &ep->func_list);
- }
+ ret = dw_pcie_ep_get_resources(ep);
+ if (ret)
+ return ret;
- if (ep->ops->ep_init)
- ep->ops->ep_init(ep);
+ if (ep->ops->pre_init)
+ ep->ops->pre_init(ep);
ret = pci_epc_mem_init(epc, ep->phys_base, ep->addr_size,
ep->page_size);
@@ -788,29 +1119,8 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
goto err_exit_epc_mem;
}
- ret = dw_pcie_edma_detect(pci);
- if (ret)
- goto err_free_epc_mem;
-
- if (ep->ops->get_features) {
- epc_features = ep->ops->get_features(ep);
- if (epc_features->core_init_notifier)
- return 0;
- }
-
- ret = dw_pcie_ep_init_complete(ep);
- if (ret)
- goto err_remove_edma;
-
return 0;
-err_remove_edma:
- dw_pcie_edma_remove(pci);
-
-err_free_epc_mem:
- pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
- epc->mem->window.page_size);
-
err_exit_epc_mem:
pci_epc_mem_exit(epc);
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index cf61733bf78d..372207c33a85 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -8,7 +8,10 @@
* Author: Jingoo Han <jg1.han@samsung.com>
*/
+#include <linux/align.h>
+#include <linux/iopoll.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
#include <linux/msi.h>
#include <linux/of_address.h>
@@ -16,39 +19,30 @@
#include <linux/pci_regs.h>
#include <linux/platform_device.h>
+#include "../../pci.h"
#include "pcie-designware.h"
static struct pci_ops dw_pcie_ops;
+static struct pci_ops dw_pcie_ecam_ops;
static struct pci_ops dw_child_pcie_ops;
-static void dw_msi_ack_irq(struct irq_data *d)
-{
- irq_chip_ack_parent(d);
-}
-
-static void dw_msi_mask_irq(struct irq_data *d)
-{
- pci_msi_mask_irq(d);
- irq_chip_mask_parent(d);
-}
-
-static void dw_msi_unmask_irq(struct irq_data *d)
-{
- pci_msi_unmask_irq(d);
- irq_chip_unmask_parent(d);
-}
-
-static struct irq_chip dw_pcie_msi_irq_chip = {
- .name = "PCI-MSI",
- .irq_ack = dw_msi_ack_irq,
- .irq_mask = dw_msi_mask_irq,
- .irq_unmask = dw_msi_unmask_irq,
-};
-
-static struct msi_domain_info dw_pcie_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
- .chip = &dw_pcie_msi_irq_chip,
+#define DW_PCIE_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_NO_AFFINITY | \
+ MSI_FLAG_PCI_MSI_MASK_PARENT)
+#define DW_PCIE_MSI_FLAGS_SUPPORTED (MSI_FLAG_MULTI_PCI_MSI | \
+ MSI_FLAG_PCI_MSIX | \
+ MSI_GENERIC_FLAGS_MASK)
+
+#define IS_256MB_ALIGNED(x) IS_ALIGNED(x, SZ_256M)
+
+static const struct msi_parent_ops dw_pcie_msi_parent_ops = {
+ .required_flags = DW_PCIE_MSI_FLAGS_REQUIRED,
+ .supported_flags = DW_PCIE_MSI_FLAGS_SUPPORTED,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .chip_flags = MSI_CHIP_FLAG_SET_ACK,
+ .prefix = "DW-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
};
/* MSI int handler */
@@ -114,12 +108,6 @@ static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg)
(int)d->hwirq, msg->address_hi, msg->address_lo);
}
-static int dw_pci_msi_set_affinity(struct irq_data *d,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static void dw_pci_bottom_mask(struct irq_data *d)
{
struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
@@ -175,7 +163,6 @@ static struct irq_chip dw_pci_msi_bottom_irq_chip = {
.name = "DWPCI-MSI",
.irq_ack = dw_pci_bottom_ack,
.irq_compose_msi_msg = dw_pci_setup_msi_msg,
- .irq_set_affinity = dw_pci_msi_set_affinity,
.irq_mask = dw_pci_bottom_mask,
.irq_unmask = dw_pci_bottom_unmask,
};
@@ -231,30 +218,24 @@ static const struct irq_domain_ops dw_pcie_msi_domain_ops = {
int dw_pcie_allocate_domains(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node);
-
- pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors,
- &dw_pcie_msi_domain_ops, pp);
+ struct irq_domain_info info = {
+ .fwnode = dev_fwnode(pci->dev),
+ .ops = &dw_pcie_msi_domain_ops,
+ .size = pp->num_vectors,
+ .host_data = pp,
+ };
+
+ pp->irq_domain = msi_create_parent_irq_domain(&info, &dw_pcie_msi_parent_ops);
if (!pp->irq_domain) {
dev_err(pci->dev, "Failed to create IRQ domain\n");
return -ENOMEM;
}
- irq_domain_update_bus_token(pp->irq_domain, DOMAIN_BUS_NEXUS);
-
- pp->msi_domain = pci_msi_create_irq_domain(fwnode,
- &dw_pcie_msi_domain_info,
- pp->irq_domain);
- if (!pp->msi_domain) {
- dev_err(pci->dev, "Failed to create MSI domain\n");
- irq_domain_remove(pp->irq_domain);
- return -ENOMEM;
- }
-
return 0;
}
+EXPORT_SYMBOL_GPL(dw_pcie_allocate_domains);
-static void dw_pcie_free_msi(struct dw_pcie_rp *pp)
+void dw_pcie_free_msi(struct dw_pcie_rp *pp)
{
u32 ctrl;
@@ -264,22 +245,36 @@ static void dw_pcie_free_msi(struct dw_pcie_rp *pp)
NULL, NULL);
}
- irq_domain_remove(pp->msi_domain);
irq_domain_remove(pp->irq_domain);
}
+EXPORT_SYMBOL_GPL(dw_pcie_free_msi);
-static void dw_pcie_msi_init(struct dw_pcie_rp *pp)
+void dw_pcie_msi_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
u64 msi_target = (u64)pp->msi_data;
+ u32 ctrl, num_ctrls;
if (!pci_msi_enabled() || !pp->has_msi_ctrl)
return;
+ num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
+
+ /* Initialize IRQ Status array */
+ for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK +
+ (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
+ pp->irq_mask[ctrl]);
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE +
+ (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
+ ~0);
+ }
+
/* Program the msi_data */
dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_LO, lower_32_bits(msi_target));
dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_HI, upper_32_bits(msi_target));
}
+EXPORT_SYMBOL_GPL(dw_pcie_msi_init);
static int dw_pcie_parse_split_msi_irq(struct dw_pcie_rp *pp)
{
@@ -321,12 +316,12 @@ static int dw_pcie_parse_split_msi_irq(struct dw_pcie_rp *pp)
return 0;
}
-static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
+int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct device *dev = pci->dev;
struct platform_device *pdev = to_platform_device(dev);
- u64 *msi_vaddr;
+ u64 *msi_vaddr = NULL;
int ret;
u32 ctrl, num_ctrls;
@@ -377,78 +372,230 @@ static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
* memory.
*/
ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
- if (ret)
- dev_warn(dev, "Failed to set DMA mask to 32-bit. Devices with only 32-bit MSI support may not work properly\n");
+ if (!ret)
+ msi_vaddr = dmam_alloc_coherent(dev, sizeof(u64), &pp->msi_data,
+ GFP_KERNEL);
- msi_vaddr = dmam_alloc_coherent(dev, sizeof(u64), &pp->msi_data,
- GFP_KERNEL);
if (!msi_vaddr) {
- dev_err(dev, "Failed to alloc and map MSI data\n");
- dw_pcie_free_msi(pp);
- return -ENOMEM;
+ dev_warn(dev, "Failed to allocate 32-bit MSI address\n");
+ dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
+ msi_vaddr = dmam_alloc_coherent(dev, sizeof(u64), &pp->msi_data,
+ GFP_KERNEL);
+ if (!msi_vaddr) {
+ dev_err(dev, "Failed to allocate MSI address\n");
+ dw_pcie_free_msi(pp);
+ return -ENOMEM;
+ }
}
return 0;
}
+EXPORT_SYMBOL_GPL(dw_pcie_msi_host_init);
-int dw_pcie_host_init(struct dw_pcie_rp *pp)
+static void dw_pcie_host_request_msg_tlp_res(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct device *dev = pci->dev;
- struct device_node *np = dev->of_node;
- struct platform_device *pdev = to_platform_device(dev);
struct resource_entry *win;
- struct pci_host_bridge *bridge;
struct resource *res;
+
+ win = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM);
+ if (win) {
+ res = devm_kzalloc(pci->dev, sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return;
+
+ /*
+ * Allocate MSG TLP region of size 'region_align' at the end of
+ * the host bridge window.
+ */
+ res->start = win->res->end - pci->region_align + 1;
+ res->end = win->res->end;
+ res->name = "msg";
+ res->flags = win->res->flags | IORESOURCE_BUSY;
+
+ if (!devm_request_resource(pci->dev, win->res, res))
+ pp->msg_res = res;
+ }
+}
+
+static int dw_pcie_config_ecam_iatu(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct dw_pcie_ob_atu_cfg atu = {0};
+ resource_size_t bus_range_max;
+ struct resource_entry *bus;
int ret;
- raw_spin_lock_init(&pp->lock);
+ bus = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS);
- ret = dw_pcie_get_resources(pci);
+ /*
+ * Root bus under the host bridge doesn't require any iATU configuration
+ * as DBI region will be used to access root bus config space.
+ * Immediate bus under Root Bus, needs type 0 iATU configuration and
+ * remaining buses need type 1 iATU configuration.
+ */
+ atu.index = 0;
+ atu.type = PCIE_ATU_TYPE_CFG0;
+ atu.parent_bus_addr = pp->cfg0_base + SZ_1M;
+ /* 1MiB is to cover 1 (bus) * 32 (devices) * 8 (functions) */
+ atu.size = SZ_1M;
+ atu.ctrl2 = PCIE_ATU_CFG_SHIFT_MODE_ENABLE;
+ ret = dw_pcie_prog_outbound_atu(pci, &atu);
if (ret)
return ret;
+ bus_range_max = resource_size(bus->res);
+
+ if (bus_range_max < 2)
+ return 0;
+
+ /* Configure remaining buses in type 1 iATU configuration */
+ atu.index = 1;
+ atu.type = PCIE_ATU_TYPE_CFG1;
+ atu.parent_bus_addr = pp->cfg0_base + SZ_2M;
+ atu.size = (SZ_1M * bus_range_max) - SZ_2M;
+ atu.ctrl2 = PCIE_ATU_CFG_SHIFT_MODE_ENABLE;
+
+ return dw_pcie_prog_outbound_atu(pci, &atu);
+}
+
+static int dw_pcie_create_ecam_window(struct dw_pcie_rp *pp, struct resource *res)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct device *dev = pci->dev;
+ struct resource_entry *bus;
+
+ bus = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS);
+ if (!bus)
+ return -ENODEV;
+
+ pp->cfg = pci_ecam_create(dev, res, bus->res, &pci_generic_ecam_ops);
+ if (IS_ERR(pp->cfg))
+ return PTR_ERR(pp->cfg);
+
+ return 0;
+}
+
+static bool dw_pcie_ecam_enabled(struct dw_pcie_rp *pp, struct resource *config_res)
+{
+ struct resource *bus_range;
+ u64 nr_buses;
+
+ /* Vendor glue drivers may implement their own ECAM mechanism */
+ if (pp->native_ecam)
+ return false;
+
+ /*
+ * PCIe spec r6.0, sec 7.2.2 mandates the base address used for ECAM to
+ * be aligned on a 2^(n+20) byte boundary, where n is the number of bits
+ * used for representing 'bus' in BDF. Since the DWC cores always use 8
+ * bits for representing 'bus', the base address has to be aligned to
+ * 2^28 byte boundary, which is 256 MiB.
+ */
+ if (!IS_256MB_ALIGNED(config_res->start))
+ return false;
+
+ bus_range = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS)->res;
+ if (!bus_range)
+ return false;
+
+ nr_buses = resource_size(config_res) >> PCIE_ECAM_BUS_SHIFT;
+
+ return nr_buses >= resource_size(bus_range);
+}
+
+static int dw_pcie_host_get_resources(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct device *dev = pci->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource_entry *win;
+ struct resource *res;
+ int ret;
+
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
- if (res) {
- pp->cfg0_size = resource_size(res);
- pp->cfg0_base = res->start;
+ if (!res) {
+ dev_err(dev, "Missing \"config\" reg space\n");
+ return -ENODEV;
+ }
+
+ pp->cfg0_size = resource_size(res);
+ pp->cfg0_base = res->start;
+
+ pp->ecam_enabled = dw_pcie_ecam_enabled(pp, res);
+ if (pp->ecam_enabled) {
+ ret = dw_pcie_create_ecam_window(pp, res);
+ if (ret)
+ return ret;
+ pp->bridge->ops = &dw_pcie_ecam_ops;
+ pp->bridge->sysdata = pp->cfg;
+ pp->cfg->priv = pp;
+ } else {
pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res);
if (IS_ERR(pp->va_cfg0_base))
return PTR_ERR(pp->va_cfg0_base);
- } else {
- dev_err(dev, "Missing *config* reg space\n");
- return -ENODEV;
- }
- bridge = devm_pci_alloc_host_bridge(dev, 0);
- if (!bridge)
- return -ENOMEM;
+ /* Set default bus ops */
+ pp->bridge->ops = &dw_pcie_ops;
+ pp->bridge->child_ops = &dw_child_pcie_ops;
+ pp->bridge->sysdata = pp;
+ }
- pp->bridge = bridge;
+ ret = dw_pcie_get_resources(pci);
+ if (ret) {
+ if (pp->cfg)
+ pci_ecam_free(pp->cfg);
+ return ret;
+ }
/* Get the I/O range from DT */
- win = resource_list_first_type(&bridge->windows, IORESOURCE_IO);
+ win = resource_list_first_type(&pp->bridge->windows, IORESOURCE_IO);
if (win) {
pp->io_size = resource_size(win->res);
pp->io_bus_addr = win->res->start - win->offset;
pp->io_base = pci_pio_to_address(win->res->start);
}
- /* Set default bus ops */
- bridge->ops = &dw_pcie_ops;
- bridge->child_ops = &dw_child_pcie_ops;
+ /*
+ * visconti_pcie_cpu_addr_fixup() uses pp->io_base, so we have to
+ * call dw_pcie_parent_bus_offset() after setting pp->io_base.
+ */
+ pci->parent_bus_offset = dw_pcie_parent_bus_offset(pci, "config",
+ pp->cfg0_base);
+ return 0;
+}
- if (pp->ops->host_init) {
- ret = pp->ops->host_init(pp);
+int dw_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct device *dev = pci->dev;
+ struct device_node *np = dev->of_node;
+ struct pci_host_bridge *bridge;
+ int ret;
+
+ raw_spin_lock_init(&pp->lock);
+
+ bridge = devm_pci_alloc_host_bridge(dev, 0);
+ if (!bridge)
+ return -ENOMEM;
+
+ pp->bridge = bridge;
+
+ ret = dw_pcie_host_get_resources(pp);
+ if (ret)
+ return ret;
+
+ if (pp->ops->init) {
+ ret = pp->ops->init(pp);
if (ret)
- return ret;
+ goto err_free_ecam;
}
if (pci_msi_enabled()) {
- pp->has_msi_ctrl = !(pp->ops->msi_host_init ||
- of_property_read_bool(np, "msi-parent") ||
- of_property_read_bool(np, "msi-map"));
+ pp->has_msi_ctrl = !(pp->ops->msi_init ||
+ of_property_present(np, "msi-parent") ||
+ of_property_present(np, "msi-map"));
/*
* For the has_msi_ctrl case the default assignment is handled
@@ -462,8 +609,8 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
goto err_deinit_host;
}
- if (pp->ops->msi_host_init) {
- ret = pp->ops->msi_host_init(pp);
+ if (pp->ops->msi_init) {
+ ret = pp->ops->msi_init(pp);
if (ret < 0)
goto err_deinit_host;
} else if (pp->has_msi_ctrl) {
@@ -477,6 +624,33 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
dw_pcie_iatu_detect(pci);
+ if (pci->num_lanes < 1)
+ pci->num_lanes = dw_pcie_link_get_max_link_width(pci);
+
+ ret = of_pci_get_equalization_presets(dev, &pp->presets, pci->num_lanes);
+ if (ret)
+ goto err_free_msi;
+
+ if (pp->ecam_enabled) {
+ ret = dw_pcie_config_ecam_iatu(pp);
+ if (ret) {
+ dev_err(dev, "Failed to configure iATU in ECAM mode\n");
+ goto err_free_msi;
+ }
+ }
+
+ /*
+ * Allocate the resource for MSG TLP before programming the iATU
+ * outbound window in dw_pcie_setup_rc(). Since the allocation depends
+ * on the value of 'region_align', this has to be done after
+ * dw_pcie_iatu_detect().
+ *
+ * Glue drivers need to set 'use_atu_msg' before dw_pcie_host_init() to
+ * make use of the generic MSG TLP implementation.
+ */
+ if (pp->use_atu_msg)
+ dw_pcie_host_request_msg_tlp_res(pp);
+
ret = dw_pcie_edma_detect(pci);
if (ret)
goto err_free_msi;
@@ -485,26 +659,30 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
if (ret)
goto err_remove_edma;
- if (dw_pcie_link_up(pci)) {
- dw_pcie_print_link_status(pci);
- } else {
+ if (!dw_pcie_link_up(pci)) {
ret = dw_pcie_start_link(pci);
if (ret)
goto err_remove_edma;
-
- if (pci->ops && pci->ops->start_link) {
- ret = dw_pcie_wait_for_link(pci);
- if (ret)
- goto err_stop_link;
- }
}
- bridge->sysdata = pp;
+ /*
+ * Note: Skip the link up delay only when a Link Up IRQ is present.
+ * If there is no Link Up IRQ, we should not bypass the delay
+ * because that would require users to manually rescan for devices.
+ */
+ if (!pp->use_linkup_irq)
+ /* Ignore errors, the link may come up later */
+ dw_pcie_wait_for_link(pci);
ret = pci_host_probe(bridge);
if (ret)
goto err_stop_link;
+ if (pp->ops->post_init)
+ pp->ops->post_init(pp);
+
+ dwc_pcie_debugfs_init(pci, DW_PCIE_RC_TYPE);
+
return 0;
err_stop_link:
@@ -518,8 +696,12 @@ err_free_msi:
dw_pcie_free_msi(pp);
err_deinit_host:
- if (pp->ops->host_deinit)
- pp->ops->host_deinit(pp);
+ if (pp->ops->deinit)
+ pp->ops->deinit(pp);
+
+err_free_ecam:
+ if (pp->cfg)
+ pci_ecam_free(pp->cfg);
return ret;
}
@@ -529,6 +711,8 @@ void dw_pcie_host_deinit(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ dwc_pcie_debugfs_deinit(pci);
+
pci_stop_root_bus(pp->bridge->bus);
pci_remove_root_bus(pp->bridge->bus);
@@ -539,8 +723,11 @@ void dw_pcie_host_deinit(struct dw_pcie_rp *pp)
if (pp->has_msi_ctrl)
dw_pcie_free_msi(pp);
- if (pp->ops->host_deinit)
- pp->ops->host_deinit(pp);
+ if (pp->ops->deinit)
+ pp->ops->deinit(pp);
+
+ if (pp->cfg)
+ pci_ecam_free(pp->cfg);
}
EXPORT_SYMBOL_GPL(dw_pcie_host_deinit);
@@ -549,6 +736,7 @@ static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
{
struct dw_pcie_rp *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct dw_pcie_ob_atu_cfg atu = { 0 };
int type, ret;
u32 busdev;
@@ -571,8 +759,12 @@ static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
else
type = PCIE_ATU_TYPE_CFG1;
- ret = dw_pcie_prog_outbound_atu(pci, 0, type, pp->cfg0_base, busdev,
- pp->cfg0_size);
+ atu.type = type;
+ atu.parent_bus_addr = pp->cfg0_base - pci->parent_bus_offset;
+ atu.pci_addr = busdev;
+ atu.size = pp->cfg0_size;
+
+ ret = dw_pcie_prog_outbound_atu(pci, &atu);
if (ret)
return NULL;
@@ -584,6 +776,7 @@ static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn,
{
struct dw_pcie_rp *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct dw_pcie_ob_atu_cfg atu = { 0 };
int ret;
ret = pci_generic_config_read(bus, devfn, where, size, val);
@@ -591,9 +784,12 @@ static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn,
return ret;
if (pp->cfg0_io_shared) {
- ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO,
- pp->io_base, pp->io_bus_addr,
- pp->io_size);
+ atu.type = PCIE_ATU_TYPE_IO;
+ atu.parent_bus_addr = pp->io_base - pci->parent_bus_offset;
+ atu.pci_addr = pp->io_bus_addr;
+ atu.size = pp->io_size;
+
+ ret = dw_pcie_prog_outbound_atu(pci, &atu);
if (ret)
return PCIBIOS_SET_FAILED;
}
@@ -606,6 +802,7 @@ static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn,
{
struct dw_pcie_rp *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct dw_pcie_ob_atu_cfg atu = { 0 };
int ret;
ret = pci_generic_config_write(bus, devfn, where, size, val);
@@ -613,9 +810,12 @@ static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn,
return ret;
if (pp->cfg0_io_shared) {
- ret = dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO,
- pp->io_base, pp->io_bus_addr,
- pp->io_size);
+ atu.type = PCIE_ATU_TYPE_IO;
+ atu.parent_bus_addr = pp->io_base - pci->parent_bus_offset;
+ atu.pci_addr = pp->io_bus_addr;
+ atu.size = pp->io_size;
+
+ ret = dw_pcie_prog_outbound_atu(pci, &atu);
if (ret)
return PCIBIOS_SET_FAILED;
}
@@ -641,15 +841,47 @@ void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn,
}
EXPORT_SYMBOL_GPL(dw_pcie_own_conf_map_bus);
+static void __iomem *dw_pcie_ecam_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ struct dw_pcie_rp *pp = cfg->priv;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ unsigned int busn = bus->number;
+
+ if (busn > 0)
+ return pci_ecam_map_bus(bus, devfn, where);
+
+ if (PCI_SLOT(devfn) > 0)
+ return NULL;
+
+ return pci->dbi_base + where;
+}
+
+static int dw_pcie_op_assert_perst(struct pci_bus *bus, bool assert)
+{
+ struct dw_pcie_rp *pp = bus->sysdata;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+
+ return dw_pcie_assert_perst(pci, assert);
+}
+
static struct pci_ops dw_pcie_ops = {
.map_bus = dw_pcie_own_conf_map_bus,
.read = pci_generic_config_read,
.write = pci_generic_config_write,
+ .assert_perst = dw_pcie_op_assert_perst,
+};
+
+static struct pci_ops dw_pcie_ecam_ops = {
+ .map_bus = dw_pcie_ecam_conf_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
};
static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct dw_pcie_ob_atu_cfg atu = { 0 };
struct resource_entry *entry;
int i, ret;
@@ -677,10 +909,19 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
if (pci->num_ob_windows <= ++i)
break;
- ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_MEM,
- entry->res->start,
- entry->res->start - entry->offset,
- resource_size(entry->res));
+ atu.index = i;
+ atu.type = PCIE_ATU_TYPE_MEM;
+ atu.parent_bus_addr = entry->res->start - pci->parent_bus_offset;
+ atu.pci_addr = entry->res->start - entry->offset;
+
+ /* Adjust iATU size if MSG TLP region was allocated before */
+ if (pp->msg_res && pp->msg_res->parent == entry->res)
+ atu.size = resource_size(entry->res) -
+ resource_size(pp->msg_res);
+ else
+ atu.size = resource_size(entry->res);
+
+ ret = dw_pcie_prog_outbound_atu(pci, &atu);
if (ret) {
dev_err(pci->dev, "Failed to set MEM range %pr\n",
entry->res);
@@ -690,10 +931,13 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
if (pp->io_size) {
if (pci->num_ob_windows > ++i) {
- ret = dw_pcie_prog_outbound_atu(pci, i, PCIE_ATU_TYPE_IO,
- pp->io_base,
- pp->io_bus_addr,
- pp->io_size);
+ atu.index = i;
+ atu.type = PCIE_ATU_TYPE_IO;
+ atu.parent_bus_addr = pp->io_base - pci->parent_bus_offset;
+ atu.pci_addr = pp->io_bus_addr;
+ atu.size = pp->io_size;
+
+ ret = dw_pcie_prog_outbound_atu(pci, &atu);
if (ret) {
dev_err(pci->dev, "Failed to set IO range %pr\n",
entry->res);
@@ -708,6 +952,8 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
dev_warn(pci->dev, "Ranges exceed outbound iATU size (%d)\n",
pci->num_ob_windows);
+ pp->msg_atu_index = i;
+
i = 0;
resource_list_for_each_entry(entry, &pp->bridge->dma_ranges) {
if (resource_type(entry->res) != IORESOURCE_MEM)
@@ -734,10 +980,81 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
return 0;
}
+static void dw_pcie_program_presets(struct dw_pcie_rp *pp, enum pci_bus_speed speed)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ u8 lane_eq_offset, lane_reg_size, cap_id;
+ u8 *presets;
+ u32 cap;
+ int i;
+
+ if (speed == PCIE_SPEED_8_0GT) {
+ presets = (u8 *)pp->presets.eq_presets_8gts;
+ lane_eq_offset = PCI_SECPCI_LE_CTRL;
+ cap_id = PCI_EXT_CAP_ID_SECPCI;
+ /* For data rate of 8 GT/S each lane equalization control is 16bits wide*/
+ lane_reg_size = 0x2;
+ } else if (speed == PCIE_SPEED_16_0GT) {
+ presets = pp->presets.eq_presets_Ngts[EQ_PRESET_TYPE_16GTS - 1];
+ lane_eq_offset = PCI_PL_16GT_LE_CTRL;
+ cap_id = PCI_EXT_CAP_ID_PL_16GT;
+ lane_reg_size = 0x1;
+ } else if (speed == PCIE_SPEED_32_0GT) {
+ presets = pp->presets.eq_presets_Ngts[EQ_PRESET_TYPE_32GTS - 1];
+ lane_eq_offset = PCI_PL_32GT_LE_CTRL;
+ cap_id = PCI_EXT_CAP_ID_PL_32GT;
+ lane_reg_size = 0x1;
+ } else if (speed == PCIE_SPEED_64_0GT) {
+ presets = pp->presets.eq_presets_Ngts[EQ_PRESET_TYPE_64GTS - 1];
+ lane_eq_offset = PCI_PL_64GT_LE_CTRL;
+ cap_id = PCI_EXT_CAP_ID_PL_64GT;
+ lane_reg_size = 0x1;
+ } else {
+ return;
+ }
+
+ if (presets[0] == PCI_EQ_RESV)
+ return;
+
+ cap = dw_pcie_find_ext_capability(pci, cap_id);
+ if (!cap)
+ return;
+
+ /*
+ * Write preset values to the registers byte-by-byte for the given
+ * number of lanes and register size.
+ */
+ for (i = 0; i < pci->num_lanes * lane_reg_size; i++)
+ dw_pcie_writeb_dbi(pci, cap + lane_eq_offset + i, presets[i]);
+}
+
+static void dw_pcie_config_presets(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ enum pci_bus_speed speed = pcie_link_speed[pci->max_link_speed];
+
+ /*
+ * Lane equalization settings need to be applied for all data rates the
+ * controller supports and for all supported lanes.
+ */
+
+ if (speed >= PCIE_SPEED_8_0GT)
+ dw_pcie_program_presets(pp, PCIE_SPEED_8_0GT);
+
+ if (speed >= PCIE_SPEED_16_0GT)
+ dw_pcie_program_presets(pp, PCIE_SPEED_16_0GT);
+
+ if (speed >= PCIE_SPEED_32_0GT)
+ dw_pcie_program_presets(pp, PCIE_SPEED_32_0GT);
+
+ if (speed >= PCIE_SPEED_64_0GT)
+ dw_pcie_program_presets(pp, PCIE_SPEED_64_0GT);
+}
+
int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- u32 val, ctrl, num_ctrls;
+ u32 val;
int ret;
/*
@@ -748,20 +1065,6 @@ int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
dw_pcie_setup(pci);
- if (pp->has_msi_ctrl) {
- num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
-
- /* Initialize IRQ Status array */
- for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
- dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK +
- (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
- pp->irq_mask[ctrl]);
- dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE +
- (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
- ~0);
- }
- }
-
dw_pcie_msi_init(pp);
/* Setup RC BARs */
@@ -787,6 +1090,9 @@ int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
+ dw_pcie_hide_unsupported_l1ss(pci);
+
+ dw_pcie_config_presets(pp);
/*
* If the platform provides its own child bus config accesses, it means
* the platform uses its own address translation component rather than
@@ -812,3 +1118,119 @@ int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
return 0;
}
EXPORT_SYMBOL_GPL(dw_pcie_setup_rc);
+
+static int dw_pcie_pme_turn_off(struct dw_pcie *pci)
+{
+ struct dw_pcie_ob_atu_cfg atu = { 0 };
+ void __iomem *mem;
+ int ret;
+
+ if (pci->num_ob_windows <= pci->pp.msg_atu_index)
+ return -ENOSPC;
+
+ if (!pci->pp.msg_res)
+ return -ENOSPC;
+
+ atu.code = PCIE_MSG_CODE_PME_TURN_OFF;
+ atu.routing = PCIE_MSG_TYPE_R_BC;
+ atu.type = PCIE_ATU_TYPE_MSG;
+ atu.size = resource_size(pci->pp.msg_res);
+ atu.index = pci->pp.msg_atu_index;
+
+ atu.parent_bus_addr = pci->pp.msg_res->start - pci->parent_bus_offset;
+
+ ret = dw_pcie_prog_outbound_atu(pci, &atu);
+ if (ret)
+ return ret;
+
+ mem = ioremap(pci->pp.msg_res->start, pci->region_align);
+ if (!mem)
+ return -ENOMEM;
+
+ /* A dummy write is converted to a Msg TLP */
+ writel(0, mem);
+
+ iounmap(mem);
+
+ return 0;
+}
+
+int dw_pcie_suspend_noirq(struct dw_pcie *pci)
+{
+ u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ u32 val;
+ int ret;
+
+ /*
+ * If L1SS is supported, then do not put the link into L2 as some
+ * devices such as NVMe expect low resume latency.
+ */
+ if (dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKCTL) & PCI_EXP_LNKCTL_ASPM_L1)
+ return 0;
+
+ if (pci->pp.ops->pme_turn_off) {
+ pci->pp.ops->pme_turn_off(&pci->pp);
+ } else {
+ ret = dw_pcie_pme_turn_off(pci);
+ if (ret)
+ return ret;
+ }
+
+ ret = read_poll_timeout(dw_pcie_get_ltssm, val,
+ val == DW_PCIE_LTSSM_L2_IDLE ||
+ val <= DW_PCIE_LTSSM_DETECT_WAIT,
+ PCIE_PME_TO_L2_TIMEOUT_US/10,
+ PCIE_PME_TO_L2_TIMEOUT_US, false, pci);
+ if (ret) {
+ /* Only log message when LTSSM isn't in DETECT or POLL */
+ dev_err(pci->dev, "Timeout waiting for L2 entry! LTSSM: 0x%x\n", val);
+ return ret;
+ }
+
+ /*
+ * Per PCIe r6.0, sec 5.3.3.2.1, software should wait at least
+ * 100ns after L2/L3 Ready before turning off refclock and
+ * main power. This is harmless when no endpoint is connected.
+ */
+ udelay(1);
+
+ dw_pcie_stop_link(pci);
+ if (pci->pp.ops->deinit)
+ pci->pp.ops->deinit(&pci->pp);
+
+ pci->suspended = true;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_suspend_noirq);
+
+int dw_pcie_resume_noirq(struct dw_pcie *pci)
+{
+ int ret;
+
+ if (!pci->suspended)
+ return 0;
+
+ pci->suspended = false;
+
+ if (pci->pp.ops->init) {
+ ret = pci->pp.ops->init(&pci->pp);
+ if (ret) {
+ dev_err(pci->dev, "Host init failed: %d\n", ret);
+ return ret;
+ }
+ }
+
+ dw_pcie_setup_rc(&pci->pp);
+
+ ret = dw_pcie_start_link(pci);
+ if (ret)
+ return ret;
+
+ ret = dw_pcie_wait_for_link(pci);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_resume_noirq);
diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c
index 1fcfb840f238..12f41886c65d 100644
--- a/drivers/pci/controller/dwc/pcie-designware-plat.c
+++ b/drivers/pci/controller/dwc/pcie-designware-plat.c
@@ -12,7 +12,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/resource.h>
@@ -42,17 +42,16 @@ static void dw_plat_pcie_ep_init(struct dw_pcie_ep *ep)
}
static int dw_plat_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
- enum pci_epc_irq_type type,
- u16 interrupt_num)
+ unsigned int type, u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
switch (type) {
- case PCI_EPC_IRQ_LEGACY:
- return dw_pcie_ep_raise_legacy_irq(ep, func_no);
- case PCI_EPC_IRQ_MSI:
+ case PCI_IRQ_INTX:
+ return dw_pcie_ep_raise_intx_irq(ep, func_no);
+ case PCI_IRQ_MSI:
return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
- case PCI_EPC_IRQ_MSIX:
+ case PCI_IRQ_MSIX:
return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
default:
dev_err(pci->dev, "UNKNOWN IRQ type\n");
@@ -62,7 +61,6 @@ static int dw_plat_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
}
static const struct pci_epc_features dw_plat_pcie_epc_features = {
- .linkup_notifier = false,
.msi_capable = true,
.msix_capable = true,
};
@@ -74,7 +72,7 @@ dw_plat_pcie_get_features(struct dw_pcie_ep *ep)
}
static const struct dw_pcie_ep_ops pcie_ep_ops = {
- .ep_init = dw_plat_pcie_ep_init,
+ .init = dw_plat_pcie_ep_init,
.raise_irq = dw_plat_pcie_ep_raise_irq,
.get_features = dw_plat_pcie_get_features,
};
@@ -146,6 +144,17 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)
pci->ep.ops = &pcie_ep_ops;
ret = dw_pcie_ep_init(&pci->ep);
+ if (ret)
+ return ret;
+
+ ret = dw_pcie_ep_init_registers(&pci->ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(&pci->ep);
+ }
+
+ pci_epc_init_notify(pci->ep.epc);
+
break;
default:
dev_err(dev, "INVALID device type %d\n", dw_plat_pcie->mode);
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index c87848cd8686..75fc8b767fcc 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -16,7 +16,9 @@
#include <linux/gpio/consumer.h>
#include <linux/ioport.h>
#include <linux/of.h>
-#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/pcie-dwc.h>
+#include <linux/platform_device.h>
#include <linux/sizes.h>
#include <linux/types.h>
@@ -52,6 +54,14 @@ static const char * const dw_pcie_core_rsts[DW_PCIE_NUM_CORE_RSTS] = {
[DW_PCIE_PWR_RST] = "pwr",
};
+static const struct dwc_pcie_vsec_id dwc_pcie_ptm_vsec_ids[] = {
+ { .vendor_id = PCI_VENDOR_ID_QCOM, /* EP */
+ .vsec_id = 0x03, .vsec_rev = 0x1 },
+ { .vendor_id = PCI_VENDOR_ID_QCOM, /* RC */
+ .vsec_id = 0x04, .vsec_rev = 0x1 },
+ { }
+};
+
static int dw_pcie_get_clocks(struct dw_pcie *pci)
{
int i, ret;
@@ -112,6 +122,7 @@ int dw_pcie_get_resources(struct dw_pcie *pci)
pci->dbi_base = devm_pci_remap_cfg_resource(pci->dev, res);
if (IS_ERR(pci->dbi_base))
return PTR_ERR(pci->dbi_base);
+ pci->dbi_phys_addr = res->start;
}
/* DBI2 is mainly useful for the endpoint controller */
@@ -134,6 +145,7 @@ int dw_pcie_get_resources(struct dw_pcie *pci)
pci->atu_base = devm_ioremap_resource(pci->dev, res);
if (IS_ERR(pci->atu_base))
return PTR_ERR(pci->atu_base);
+ pci->atu_phys_addr = res->start;
} else {
pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
}
@@ -155,6 +167,16 @@ int dw_pcie_get_resources(struct dw_pcie *pci)
}
}
+ /* ELBI is an optional resource */
+ if (!pci->elbi_base) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
+ if (res) {
+ pci->elbi_base = devm_ioremap_resource(pci->dev, res);
+ if (IS_ERR(pci->elbi_base))
+ return PTR_ERR(pci->elbi_base);
+ }
+ }
+
/* LLDD is supposed to manually switch the clocks and resets state */
if (dw_pcie_cap_is(pci, REQ_RES)) {
ret = dw_pcie_get_clocks(pci);
@@ -166,8 +188,8 @@ int dw_pcie_get_resources(struct dw_pcie *pci)
return ret;
}
- if (pci->link_gen < 1)
- pci->link_gen = of_pci_get_max_link_speed(np);
+ if (pci->max_link_speed < 1)
+ pci->max_link_speed = of_pci_get_max_link_speed(np);
of_property_read_u32(np, "num-lanes", &pci->num_lanes);
@@ -201,85 +223,69 @@ void dw_pcie_version_detect(struct dw_pcie *pci)
pci->type = ver;
}
-/*
- * These interfaces resemble the pci_find_*capability() interfaces, but these
- * are for configuring host controllers, which are bridges *to* PCI devices but
- * are not PCI devices themselves.
- */
-static u8 __dw_pcie_find_next_cap(struct dw_pcie *pci, u8 cap_ptr,
- u8 cap)
+u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap)
{
- u8 cap_id, next_cap_ptr;
- u16 reg;
-
- if (!cap_ptr)
- return 0;
-
- reg = dw_pcie_readw_dbi(pci, cap_ptr);
- cap_id = (reg & 0x00ff);
-
- if (cap_id > PCI_CAP_ID_MAX)
- return 0;
-
- if (cap_id == cap)
- return cap_ptr;
-
- next_cap_ptr = (reg & 0xff00) >> 8;
- return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap);
+ return PCI_FIND_NEXT_CAP(dw_pcie_read_cfg, PCI_CAPABILITY_LIST, cap,
+ pci);
}
+EXPORT_SYMBOL_GPL(dw_pcie_find_capability);
-u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap)
+u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap)
{
- u8 next_cap_ptr;
- u16 reg;
-
- reg = dw_pcie_readw_dbi(pci, PCI_CAPABILITY_LIST);
- next_cap_ptr = (reg & 0x00ff);
-
- return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap);
+ return PCI_FIND_NEXT_EXT_CAP(dw_pcie_read_cfg, 0, cap, pci);
}
-EXPORT_SYMBOL_GPL(dw_pcie_find_capability);
+EXPORT_SYMBOL_GPL(dw_pcie_find_ext_capability);
-static u16 dw_pcie_find_next_ext_capability(struct dw_pcie *pci, u16 start,
- u8 cap)
+static u16 __dw_pcie_find_vsec_capability(struct dw_pcie *pci, u16 vendor_id,
+ u16 vsec_id)
{
+ u16 vsec = 0;
u32 header;
- int ttl;
- int pos = PCI_CFG_SPACE_SIZE;
-
- /* minimum 8 bytes per capability */
- ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
- if (start)
- pos = start;
-
- header = dw_pcie_readl_dbi(pci, pos);
- /*
- * If we have no capabilities, this is indicated by cap ID,
- * cap version and next pointer all being 0.
- */
- if (header == 0)
+ if (vendor_id != dw_pcie_readw_dbi(pci, PCI_VENDOR_ID))
return 0;
- while (ttl-- > 0) {
- if (PCI_EXT_CAP_ID(header) == cap && pos != start)
- return pos;
+ while ((vsec = PCI_FIND_NEXT_EXT_CAP(dw_pcie_read_cfg, vsec,
+ PCI_EXT_CAP_ID_VNDR, pci))) {
+ header = dw_pcie_readl_dbi(pci, vsec + PCI_VNDR_HEADER);
+ if (PCI_VNDR_HEADER_ID(header) == vsec_id)
+ return vsec;
+ }
- pos = PCI_EXT_CAP_NEXT(header);
- if (pos < PCI_CFG_SPACE_SIZE)
- break;
+ return 0;
+}
+
+static u16 dw_pcie_find_vsec_capability(struct dw_pcie *pci,
+ const struct dwc_pcie_vsec_id *vsec_ids)
+{
+ const struct dwc_pcie_vsec_id *vid;
+ u16 vsec;
+ u32 header;
- header = dw_pcie_readl_dbi(pci, pos);
+ for (vid = vsec_ids; vid->vendor_id; vid++) {
+ vsec = __dw_pcie_find_vsec_capability(pci, vid->vendor_id,
+ vid->vsec_id);
+ if (vsec) {
+ header = dw_pcie_readl_dbi(pci, vsec + PCI_VNDR_HEADER);
+ if (PCI_VNDR_HEADER_REV(header) == vid->vsec_rev)
+ return vsec;
+ }
}
return 0;
}
-u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap)
+u16 dw_pcie_find_rasdes_capability(struct dw_pcie *pci)
{
- return dw_pcie_find_next_ext_capability(pci, 0, cap);
+ return dw_pcie_find_vsec_capability(pci, dwc_pcie_rasdes_vsec_ids);
}
-EXPORT_SYMBOL_GPL(dw_pcie_find_ext_capability);
+EXPORT_SYMBOL_GPL(dw_pcie_find_rasdes_capability);
+
+u16 dw_pcie_find_ptm_capability(struct dw_pcie *pci)
+{
+ return dw_pcie_find_vsec_capability(pci, dwc_pcie_ptm_vsec_ids);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_find_ptm_capability);
int dw_pcie_read(void __iomem *addr, int size, u32 *val)
{
@@ -365,6 +371,7 @@ void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
if (ret)
dev_err(pci->dev, "write DBI address failed\n");
}
+EXPORT_SYMBOL_GPL(dw_pcie_write_dbi2);
static inline void __iomem *dw_pcie_select_atu(struct dw_pcie *pci, u32 dir,
u32 index)
@@ -464,56 +471,58 @@ static inline u32 dw_pcie_enable_ecrc(u32 val)
return val | PCIE_ATU_TD;
}
-static int __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, u8 func_no,
- int index, int type, u64 cpu_addr,
- u64 pci_addr, u64 size)
+int dw_pcie_prog_outbound_atu(struct dw_pcie *pci,
+ const struct dw_pcie_ob_atu_cfg *atu)
{
+ u64 parent_bus_addr = atu->parent_bus_addr;
u32 retries, val;
u64 limit_addr;
- if (pci->ops && pci->ops->cpu_addr_fixup)
- cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr);
-
- limit_addr = cpu_addr + size - 1;
+ limit_addr = parent_bus_addr + atu->size - 1;
- if ((limit_addr & ~pci->region_limit) != (cpu_addr & ~pci->region_limit) ||
- !IS_ALIGNED(cpu_addr, pci->region_align) ||
- !IS_ALIGNED(pci_addr, pci->region_align) || !size) {
+ if ((limit_addr & ~pci->region_limit) != (parent_bus_addr & ~pci->region_limit) ||
+ !IS_ALIGNED(parent_bus_addr, pci->region_align) ||
+ !IS_ALIGNED(atu->pci_addr, pci->region_align) || !atu->size) {
return -EINVAL;
}
- dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LOWER_BASE,
- lower_32_bits(cpu_addr));
- dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_BASE,
- upper_32_bits(cpu_addr));
+ dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_LOWER_BASE,
+ lower_32_bits(parent_bus_addr));
+ dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_UPPER_BASE,
+ upper_32_bits(parent_bus_addr));
- dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LIMIT,
+ dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_LIMIT,
lower_32_bits(limit_addr));
if (dw_pcie_ver_is_ge(pci, 460A))
- dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_LIMIT,
+ dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_UPPER_LIMIT,
upper_32_bits(limit_addr));
- dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_LOWER_TARGET,
- lower_32_bits(pci_addr));
- dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_UPPER_TARGET,
- upper_32_bits(pci_addr));
+ dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_LOWER_TARGET,
+ lower_32_bits(atu->pci_addr));
+ dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_UPPER_TARGET,
+ upper_32_bits(atu->pci_addr));
- val = type | PCIE_ATU_FUNC_NUM(func_no);
- if (upper_32_bits(limit_addr) > upper_32_bits(cpu_addr) &&
+ val = atu->type | atu->routing | PCIE_ATU_FUNC_NUM(atu->func_no);
+ if (upper_32_bits(limit_addr) > upper_32_bits(parent_bus_addr) &&
dw_pcie_ver_is_ge(pci, 460A))
val |= PCIE_ATU_INCREASE_REGION_SIZE;
if (dw_pcie_ver_is(pci, 490A))
val = dw_pcie_enable_ecrc(val);
- dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_REGION_CTRL1, val);
+ dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_REGION_CTRL1, val);
- dw_pcie_writel_atu_ob(pci, index, PCIE_ATU_REGION_CTRL2, PCIE_ATU_ENABLE);
+ val = PCIE_ATU_ENABLE | atu->ctrl2;
+ if (atu->type == PCIE_ATU_TYPE_MSG) {
+ /* The data-less messages only for now */
+ val |= PCIE_ATU_INHIBIT_PAYLOAD | atu->code;
+ }
+ dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_REGION_CTRL2, val);
/*
* Make sure ATU enable takes effect before any subsequent config
* and I/O accesses.
*/
for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
- val = dw_pcie_readl_atu_ob(pci, index, PCIE_ATU_REGION_CTRL2);
+ val = dw_pcie_readl_atu_ob(pci, atu->index, PCIE_ATU_REGION_CTRL2);
if (val & PCIE_ATU_ENABLE)
return 0;
@@ -525,21 +534,6 @@ static int __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, u8 func_no,
return -ETIMEDOUT;
}
-int dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
- u64 cpu_addr, u64 pci_addr, u64 size)
-{
- return __dw_pcie_prog_outbound_atu(pci, 0, index, type,
- cpu_addr, pci_addr, size);
-}
-
-int dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
- int type, u64 cpu_addr, u64 pci_addr,
- u64 size)
-{
- return __dw_pcie_prog_outbound_atu(pci, func_no, index, type,
- cpu_addr, pci_addr, size);
-}
-
static inline u32 dw_pcie_readl_atu_ib(struct dw_pcie *pci, u32 index, u32 reg)
{
return dw_pcie_readl_atu(pci, PCIE_ATU_REGION_DIR_IB, index, reg);
@@ -552,13 +546,13 @@ static inline void dw_pcie_writel_atu_ib(struct dw_pcie *pci, u32 index, u32 reg
}
int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,
- u64 cpu_addr, u64 pci_addr, u64 size)
+ u64 parent_bus_addr, u64 pci_addr, u64 size)
{
u64 limit_addr = pci_addr + size - 1;
u32 retries, val;
if ((limit_addr & ~pci->region_limit) != (pci_addr & ~pci->region_limit) ||
- !IS_ALIGNED(cpu_addr, pci->region_align) ||
+ !IS_ALIGNED(parent_bus_addr, pci->region_align) ||
!IS_ALIGNED(pci_addr, pci->region_align) || !size) {
return -EINVAL;
}
@@ -575,9 +569,9 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,
upper_32_bits(limit_addr));
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_TARGET,
- lower_32_bits(cpu_addr));
+ lower_32_bits(parent_bus_addr));
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_TARGET,
- upper_32_bits(cpu_addr));
+ upper_32_bits(parent_bus_addr));
val = type;
if (upper_32_bits(limit_addr) > upper_32_bits(pci_addr) &&
@@ -604,17 +598,18 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,
}
int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
- int type, u64 cpu_addr, u8 bar)
+ int type, u64 parent_bus_addr, u8 bar, size_t size)
{
u32 retries, val;
- if (!IS_ALIGNED(cpu_addr, pci->region_align))
+ if (!IS_ALIGNED(parent_bus_addr, pci->region_align) ||
+ !IS_ALIGNED(parent_bus_addr, size))
return -EINVAL;
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_TARGET,
- lower_32_bits(cpu_addr));
+ lower_32_bits(parent_bus_addr));
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_TARGET,
- upper_32_bits(cpu_addr));
+ upper_32_bits(parent_bus_addr));
dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL1, type |
PCIE_ATU_FUNC_NUM(func_no));
@@ -644,42 +639,44 @@ void dw_pcie_disable_atu(struct dw_pcie *pci, u32 dir, int index)
dw_pcie_writel_atu(pci, dir, index, PCIE_ATU_REGION_CTRL2, 0);
}
-void dw_pcie_print_link_status(struct dw_pcie *pci)
-{
- u32 offset, val;
-
- offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
- val = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA);
-
- dev_info(pci->dev, "PCIe Gen.%u x%u link up\n",
- FIELD_GET(PCI_EXP_LNKSTA_CLS, val),
- FIELD_GET(PCI_EXP_LNKSTA_NLW, val));
-}
-
int dw_pcie_wait_for_link(struct dw_pcie *pci)
{
+ u32 offset, val;
int retries;
/* Check if the link is up or not */
- for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
+ for (retries = 0; retries < PCIE_LINK_WAIT_MAX_RETRIES; retries++) {
if (dw_pcie_link_up(pci))
break;
- usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
+ msleep(PCIE_LINK_WAIT_SLEEP_MS);
}
- if (retries >= LINK_WAIT_MAX_RETRIES) {
+ if (retries >= PCIE_LINK_WAIT_MAX_RETRIES) {
dev_info(pci->dev, "Phy link never came up\n");
return -ETIMEDOUT;
}
- dw_pcie_print_link_status(pci);
+ /*
+ * As per PCIe r6.0, sec 6.6.1, a Downstream Port that supports Link
+ * speeds greater than 5.0 GT/s, software must wait a minimum of 100 ms
+ * after Link training completes before sending a Configuration Request.
+ */
+ if (pci->max_link_speed > 2)
+ msleep(PCIE_RESET_CONFIG_WAIT_MS);
+
+ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ val = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA);
+
+ dev_info(pci->dev, "PCIe Gen.%u x%u link up\n",
+ FIELD_GET(PCI_EXP_LNKSTA_CLS, val),
+ FIELD_GET(PCI_EXP_LNKSTA_NLW, val));
return 0;
}
EXPORT_SYMBOL_GPL(dw_pcie_wait_for_link);
-int dw_pcie_link_up(struct dw_pcie *pci)
+bool dw_pcie_link_up(struct dw_pcie *pci)
{
u32 val;
@@ -702,16 +699,27 @@ void dw_pcie_upconfig_setup(struct dw_pcie *pci)
}
EXPORT_SYMBOL_GPL(dw_pcie_upconfig_setup);
-static void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen)
+static void dw_pcie_link_set_max_speed(struct dw_pcie *pci)
{
u32 cap, ctrl2, link_speed;
u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
cap = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
+
+ /*
+ * Even if the platform doesn't want to limit the maximum link speed,
+ * just cache the hardware default value so that the vendor drivers can
+ * use it to do any link specific configuration.
+ */
+ if (pci->max_link_speed < 1) {
+ pci->max_link_speed = FIELD_GET(PCI_EXP_LNKCAP_SLS, cap);
+ return;
+ }
+
ctrl2 = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCTL2);
ctrl2 &= ~PCI_EXP_LNKCTL2_TLS;
- switch (pcie_link_speed[link_gen]) {
+ switch (pcie_link_speed[pci->max_link_speed]) {
case PCIE_SPEED_2_5GT:
link_speed = PCI_EXP_LNKCTL2_TLS_2_5GT;
break;
@@ -738,6 +746,61 @@ static void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen)
}
+int dw_pcie_link_get_max_link_width(struct dw_pcie *pci)
+{
+ u8 cap = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ u32 lnkcap = dw_pcie_readl_dbi(pci, cap + PCI_EXP_LNKCAP);
+
+ return FIELD_GET(PCI_EXP_LNKCAP_MLW, lnkcap);
+}
+
+static void dw_pcie_link_set_max_link_width(struct dw_pcie *pci, u32 num_lanes)
+{
+ u32 lnkcap, lwsc, plc;
+ u8 cap;
+
+ if (!num_lanes)
+ return;
+
+ /* Set the number of lanes */
+ plc = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
+ plc &= ~PORT_LINK_FAST_LINK_MODE;
+ plc &= ~PORT_LINK_MODE_MASK;
+
+ /* Set link width speed control register */
+ lwsc = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ lwsc &= ~PORT_LOGIC_LINK_WIDTH_MASK;
+ lwsc |= PORT_LOGIC_LINK_WIDTH_1_LANES;
+ switch (num_lanes) {
+ case 1:
+ plc |= PORT_LINK_MODE_1_LANES;
+ break;
+ case 2:
+ plc |= PORT_LINK_MODE_2_LANES;
+ break;
+ case 4:
+ plc |= PORT_LINK_MODE_4_LANES;
+ break;
+ case 8:
+ plc |= PORT_LINK_MODE_8_LANES;
+ break;
+ case 16:
+ plc |= PORT_LINK_MODE_16_LANES;
+ break;
+ default:
+ dev_err(pci->dev, "num-lanes %u: invalid value\n", num_lanes);
+ return;
+ }
+ dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, plc);
+ dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, lwsc);
+
+ cap = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ lnkcap = dw_pcie_readl_dbi(pci, cap + PCI_EXP_LNKCAP);
+ lnkcap &= ~PCI_EXP_LNKCAP_MLW;
+ lnkcap |= FIELD_PREP(PCI_EXP_LNKCAP_MLW, num_lanes);
+ dw_pcie_writel_dbi(pci, cap + PCI_EXP_LNKCAP, lnkcap);
+}
+
void dw_pcie_iatu_detect(struct dw_pcie *pci)
{
int max_region, ob, ib;
@@ -838,11 +901,29 @@ static struct dw_edma_plat_ops dw_pcie_edma_ops = {
.irq_vector = dw_pcie_edma_irq_vector,
};
-static int dw_pcie_edma_find_chip(struct dw_pcie *pci)
+static void dw_pcie_edma_init_data(struct dw_pcie *pci)
+{
+ pci->edma.dev = pci->dev;
+
+ if (!pci->edma.ops)
+ pci->edma.ops = &dw_pcie_edma_ops;
+
+ pci->edma.flags |= DW_EDMA_CHIP_LOCAL;
+}
+
+static int dw_pcie_edma_find_mf(struct dw_pcie *pci)
{
u32 val;
/*
+ * Bail out finding the mapping format if it is already set by the glue
+ * driver. Also ensure that the edma.reg_base is pointing to a valid
+ * memory region.
+ */
+ if (pci->edma.mf != EDMA_MF_EDMA_LEGACY)
+ return pci->edma.reg_base ? 0 : -ENODEV;
+
+ /*
* Indirect eDMA CSRs access has been completely removed since v5.40a
* thus no space is now reserved for the eDMA channels viewport and
* former DMA CTRL register is no longer fixed to FFs.
@@ -854,8 +935,6 @@ static int dw_pcie_edma_find_chip(struct dw_pcie *pci)
if (val == 0xFFFFFFFF && pci->edma.reg_base) {
pci->edma.mf = EDMA_MF_EDMA_UNROLL;
-
- val = dw_pcie_readl_dma(pci, PCIE_DMA_CTRL);
} else if (val != 0xFFFFFFFF) {
pci->edma.mf = EDMA_MF_EDMA_LEGACY;
@@ -864,15 +943,25 @@ static int dw_pcie_edma_find_chip(struct dw_pcie *pci)
return -ENODEV;
}
- pci->edma.dev = pci->dev;
+ return 0;
+}
- if (!pci->edma.ops)
- pci->edma.ops = &dw_pcie_edma_ops;
+static int dw_pcie_edma_find_channels(struct dw_pcie *pci)
+{
+ u32 val;
- pci->edma.flags |= DW_EDMA_CHIP_LOCAL;
+ /*
+ * Autodetect the read/write channels count only for non-HDMA platforms.
+ * HDMA platforms with native CSR mapping doesn't support autodetect,
+ * so the glue drivers should've passed the valid count already. If not,
+ * the below sanity check will catch it.
+ */
+ if (pci->edma.mf != EDMA_MF_HDMA_NATIVE) {
+ val = dw_pcie_readl_dma(pci, PCIE_DMA_CTRL);
- pci->edma.ll_wr_cnt = FIELD_GET(PCIE_DMA_NUM_WR_CHAN, val);
- pci->edma.ll_rd_cnt = FIELD_GET(PCIE_DMA_NUM_RD_CHAN, val);
+ pci->edma.ll_wr_cnt = FIELD_GET(PCIE_DMA_NUM_WR_CHAN, val);
+ pci->edma.ll_rd_cnt = FIELD_GET(PCIE_DMA_NUM_RD_CHAN, val);
+ }
/* Sanity check the channels count if the mapping was incorrect */
if (!pci->edma.ll_wr_cnt || pci->edma.ll_wr_cnt > EDMA_MAX_WR_CH ||
@@ -882,16 +971,27 @@ static int dw_pcie_edma_find_chip(struct dw_pcie *pci)
return 0;
}
+static int dw_pcie_edma_find_chip(struct dw_pcie *pci)
+{
+ int ret;
+
+ dw_pcie_edma_init_data(pci);
+
+ ret = dw_pcie_edma_find_mf(pci);
+ if (ret)
+ return ret;
+
+ return dw_pcie_edma_find_channels(pci);
+}
+
static int dw_pcie_edma_irq_verify(struct dw_pcie *pci)
{
struct platform_device *pdev = to_platform_device(pci->dev);
u16 ch_cnt = pci->edma.ll_wr_cnt + pci->edma.ll_rd_cnt;
- char name[6];
+ char name[15];
int ret;
- if (pci->edma.nr_irqs == 1)
- return 0;
- else if (pci->edma.nr_irqs > 1)
+ if (pci->edma.nr_irqs > 1)
return pci->edma.nr_irqs != ch_cnt ? -EINVAL : 0;
ret = platform_get_irq_byname_optional(pdev, "dma");
@@ -983,12 +1083,35 @@ void dw_pcie_edma_remove(struct dw_pcie *pci)
dw_edma_remove(&pci->edma);
}
+void dw_pcie_hide_unsupported_l1ss(struct dw_pcie *pci)
+{
+ u16 l1ss;
+ u32 l1ss_cap;
+
+ if (pci->l1ss_support)
+ return;
+
+ l1ss = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_L1SS);
+ if (!l1ss)
+ return;
+
+ /*
+ * Unless the driver claims "l1ss_support", don't advertise L1 PM
+ * Substates because they require CLKREQ# and possibly other
+ * device-specific configuration.
+ */
+ l1ss_cap = dw_pcie_readl_dbi(pci, l1ss + PCI_L1SS_CAP);
+ l1ss_cap &= ~(PCI_L1SS_CAP_PCIPM_L1_1 | PCI_L1SS_CAP_ASPM_L1_1 |
+ PCI_L1SS_CAP_PCIPM_L1_2 | PCI_L1SS_CAP_ASPM_L1_2 |
+ PCI_L1SS_CAP_L1_PM_SS);
+ dw_pcie_writel_dbi(pci, l1ss + PCI_L1SS_CAP, l1ss_cap);
+}
+
void dw_pcie_setup(struct dw_pcie *pci)
{
u32 val;
- if (pci->link_gen > 0)
- dw_pcie_link_set_max_speed(pci, pci->link_gen);
+ dw_pcie_link_set_max_speed(pci);
/* Configure Gen1 N_FTS */
if (pci->n_fts[0]) {
@@ -1019,49 +1142,65 @@ void dw_pcie_setup(struct dw_pcie *pci)
val |= PORT_LINK_DLL_LINK_EN;
dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
- if (!pci->num_lanes) {
- dev_dbg(pci->dev, "Using h/w default number of lanes\n");
- return;
+ dw_pcie_link_set_max_link_width(pci, pci->num_lanes);
+}
+
+resource_size_t dw_pcie_parent_bus_offset(struct dw_pcie *pci,
+ const char *reg_name,
+ resource_size_t cpu_phys_addr)
+{
+ struct device *dev = pci->dev;
+ struct device_node *np = dev->of_node;
+ int index;
+ u64 reg_addr, fixup_addr;
+ u64 (*fixup)(struct dw_pcie *pcie, u64 cpu_addr);
+
+ /* Look up reg_name address on parent bus */
+ index = of_property_match_string(np, "reg-names", reg_name);
+
+ if (index < 0) {
+ dev_err(dev, "No %s in devicetree \"reg\" property\n", reg_name);
+ return 0;
}
- /* Set the number of lanes */
- val &= ~PORT_LINK_FAST_LINK_MODE;
- val &= ~PORT_LINK_MODE_MASK;
- switch (pci->num_lanes) {
- case 1:
- val |= PORT_LINK_MODE_1_LANES;
- break;
- case 2:
- val |= PORT_LINK_MODE_2_LANES;
- break;
- case 4:
- val |= PORT_LINK_MODE_4_LANES;
- break;
- case 8:
- val |= PORT_LINK_MODE_8_LANES;
- break;
- default:
- dev_err(pci->dev, "num-lanes %u: invalid value\n", pci->num_lanes);
- return;
+ of_property_read_reg(np, index, &reg_addr, NULL);
+
+ fixup = pci->ops ? pci->ops->cpu_addr_fixup : NULL;
+ if (fixup) {
+ fixup_addr = fixup(pci, cpu_phys_addr);
+ if (reg_addr == fixup_addr) {
+ dev_info(dev, "%s reg[%d] %#010llx == %#010llx == fixup(cpu %#010llx); %ps is redundant with this devicetree\n",
+ reg_name, index, reg_addr, fixup_addr,
+ (unsigned long long) cpu_phys_addr, fixup);
+ } else {
+ dev_warn(dev, "%s reg[%d] %#010llx != %#010llx == fixup(cpu %#010llx); devicetree is broken\n",
+ reg_name, index, reg_addr, fixup_addr,
+ (unsigned long long) cpu_phys_addr);
+ reg_addr = fixup_addr;
+ }
+
+ return cpu_phys_addr - reg_addr;
}
- dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
- /* Set link width speed control register */
- val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
- val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
- switch (pci->num_lanes) {
- case 1:
- val |= PORT_LOGIC_LINK_WIDTH_1_LANES;
- break;
- case 2:
- val |= PORT_LOGIC_LINK_WIDTH_2_LANES;
- break;
- case 4:
- val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
- break;
- case 8:
- val |= PORT_LOGIC_LINK_WIDTH_8_LANES;
- break;
+ if (pci->use_parent_dt_ranges) {
+
+ /*
+ * This platform once had a fixup, presumably because it
+ * translates between CPU and PCI controller addresses.
+ * Log a note if devicetree didn't describe a translation.
+ */
+ if (reg_addr == cpu_phys_addr)
+ dev_info(dev, "%s reg[%d] %#010llx == cpu %#010llx\n; no fixup was ever needed for this devicetree\n",
+ reg_name, index, reg_addr,
+ (unsigned long long) cpu_phys_addr);
+ } else {
+ if (reg_addr != cpu_phys_addr) {
+ dev_warn(dev, "%s reg[%d] %#010llx != cpu %#010llx; no fixup and devicetree \"ranges\" is broken, assuming no translation\n",
+ reg_name, index, reg_addr,
+ (unsigned long long) cpu_phys_addr);
+ return 0;
+ }
}
- dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
+
+ return cpu_phys_addr - reg_addr;
}
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index 615660640801..31685951a080 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -20,11 +20,14 @@
#include <linux/irq.h>
#include <linux/msi.h>
#include <linux/pci.h>
+#include <linux/pci-ecam.h>
#include <linux/reset.h>
#include <linux/pci-epc.h>
#include <linux/pci-epf.h>
+#include "../../pci.h"
+
/* DWC PCIe IP-core versions (native support since v4.70a) */
#define DW_PCIE_VER_365A 0x3336352a
#define DW_PCIE_VER_460A 0x3436302a
@@ -60,16 +63,14 @@
#define dw_pcie_cap_set(_pci, _cap) \
set_bit(DW_PCIE_CAP_ ## _cap, &(_pci)->caps)
-/* Parameters for the waiting for link up routine */
-#define LINK_WAIT_MAX_RETRIES 10
-#define LINK_WAIT_USLEEP_MIN 90000
-#define LINK_WAIT_USLEEP_MAX 100000
-
/* Parameters for the waiting for iATU enabled routine */
#define LINK_WAIT_MAX_IATU_RETRIES 5
#define LINK_WAIT_IATU 9
/* Synopsys-specific PCIe configuration registers */
+#define PCIE_PORT_FORCE 0x708
+#define PORT_FORCE_DO_DESKEW_FOR_SRIS BIT(23)
+
#define PCIE_PORT_AFR 0x70C
#define PORT_AFR_N_FTS_MASK GENMASK(15, 8)
#define PORT_AFR_N_FTS(n) FIELD_PREP(PORT_AFR_N_FTS_MASK, n)
@@ -90,9 +91,13 @@
#define PORT_LINK_MODE_2_LANES PORT_LINK_MODE(0x3)
#define PORT_LINK_MODE_4_LANES PORT_LINK_MODE(0x7)
#define PORT_LINK_MODE_8_LANES PORT_LINK_MODE(0xf)
+#define PORT_LINK_MODE_16_LANES PORT_LINK_MODE(0x1f)
+
+#define PCIE_PORT_LANE_SKEW 0x714
+#define PORT_LANE_SKEW_INSERT_MASK GENMASK(23, 0)
#define PCIE_PORT_DEBUG0 0x728
-#define PORT_LOGIC_LTSSM_STATE_MASK 0x1f
+#define PORT_LOGIC_LTSSM_STATE_MASK 0x3f
#define PORT_LOGIC_LTSSM_STATE_L0 0x11
#define PCIE_PORT_DEBUG1 0x72C
#define PCIE_PORT_DEBUG1_LINK_UP BIT(4)
@@ -116,11 +121,31 @@
#define GEN3_RELATED_OFF 0x890
#define GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL BIT(0)
+#define GEN3_RELATED_OFF_EQ_PHASE_2_3 BIT(9)
#define GEN3_RELATED_OFF_RXEQ_RGRDLESS_RXTS BIT(13)
#define GEN3_RELATED_OFF_GEN3_EQ_DISABLE BIT(16)
#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT 24
#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK GENMASK(25, 24)
+#define GEN3_EQ_CONTROL_OFF 0x8A8
+#define GEN3_EQ_CONTROL_OFF_FB_MODE GENMASK(3, 0)
+#define GEN3_EQ_CONTROL_OFF_PHASE23_EXIT_MODE BIT(4)
+#define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC GENMASK(23, 8)
+#define GEN3_EQ_CONTROL_OFF_FOM_INC_INITIAL_EVAL BIT(24)
+
+#define GEN3_EQ_FB_MODE_DIR_CHANGE_OFF 0x8AC
+#define GEN3_EQ_FMDC_T_MIN_PHASE23 GENMASK(4, 0)
+#define GEN3_EQ_FMDC_N_EVALS GENMASK(9, 5)
+#define GEN3_EQ_FMDC_MAX_PRE_CURSOR_DELTA GENMASK(13, 10)
+#define GEN3_EQ_FMDC_MAX_POST_CURSOR_DELTA GENMASK(17, 14)
+
+#define COHERENCY_CONTROL_1_OFF 0x8E0
+#define CFG_MEMTYPE_BOUNDARY_LOW_ADDR_MASK GENMASK(31, 2)
+#define CFG_MEMTYPE_VALUE BIT(0)
+
+#define COHERENCY_CONTROL_2_OFF 0x8E4
+#define COHERENCY_CONTROL_3_OFF 0x8E8
+
#define PCIE_PORT_MULTI_LANE_CTRL 0x8C0
#define PORT_MLTI_UPCFG_SUPPORT BIT(7)
@@ -147,11 +172,14 @@
#define PCIE_ATU_TYPE_IO 0x2
#define PCIE_ATU_TYPE_CFG0 0x4
#define PCIE_ATU_TYPE_CFG1 0x5
+#define PCIE_ATU_TYPE_MSG 0x10
#define PCIE_ATU_TD BIT(8)
#define PCIE_ATU_FUNC_NUM(pf) ((pf) << 20)
#define PCIE_ATU_REGION_CTRL2 0x004
#define PCIE_ATU_ENABLE BIT(31)
#define PCIE_ATU_BAR_MODE_ENABLE BIT(30)
+#define PCIE_ATU_CFG_SHIFT_MODE_ENABLE BIT(28)
+#define PCIE_ATU_INHIBIT_PAYLOAD BIT(22)
#define PCIE_ATU_FUNC_NUM_MATCH_EN BIT(19)
#define PCIE_ATU_LOWER_BASE 0x008
#define PCIE_ATU_UPPER_BASE 0x00C
@@ -191,6 +219,24 @@
#define PCIE_PL_CHK_REG_ERR_ADDR 0xB28
/*
+ * 16.0 GT/s (Gen 4) lane margining register definitions
+ */
+#define GEN4_LANE_MARGINING_1_OFF 0xB80
+#define MARGINING_MAX_VOLTAGE_OFFSET GENMASK(29, 24)
+#define MARGINING_NUM_VOLTAGE_STEPS GENMASK(22, 16)
+#define MARGINING_MAX_TIMING_OFFSET GENMASK(13, 8)
+#define MARGINING_NUM_TIMING_STEPS GENMASK(5, 0)
+
+#define GEN4_LANE_MARGINING_2_OFF 0xB84
+#define MARGINING_IND_ERROR_SAMPLER BIT(28)
+#define MARGINING_SAMPLE_REPORTING_METHOD BIT(27)
+#define MARGINING_IND_LEFT_RIGHT_TIMING BIT(26)
+#define MARGINING_IND_UP_DOWN_VOLTAGE BIT(25)
+#define MARGINING_VOLTAGE_SUPPORTED BIT(24)
+#define MARGINING_MAXLANES GENMASK(20, 16)
+#define MARGINING_SAMPLE_RATE_TIMING GENMASK(13, 8)
+#define MARGINING_SAMPLE_RATE_VOLTAGE GENMASK(5, 0)
+/*
* iATU Unroll-specific register definitions
* From 4.80 core version the address translation will be made by unroll
*/
@@ -222,6 +268,21 @@
#define PCIE_RAS_DES_EVENT_COUNTER_DATA 0xc
+/* PTM register definitions */
+#define PTM_RES_REQ_CTRL 0x8
+#define PTM_RES_CCONTEXT_VALID BIT(0)
+#define PTM_REQ_AUTO_UPDATE_ENABLED BIT(0)
+#define PTM_REQ_START_UPDATE BIT(1)
+
+#define PTM_LOCAL_LSB 0x10
+#define PTM_LOCAL_MSB 0x14
+#define PTM_T1_T2_LSB 0x18
+#define PTM_T1_T2_MSB 0x1c
+#define PTM_T3_T4_LSB 0x28
+#define PTM_T3_T4_MSB 0x2c
+#define PTM_MASTER_LSB 0x38
+#define PTM_MASTER_MSB 0x3c
+
/*
* The default address offset between dbi_base and atu_base. Root controller
* drivers are not required to initialize atu_base if the offset matches this
@@ -288,10 +349,66 @@ enum dw_pcie_core_rst {
DW_PCIE_NUM_CORE_RSTS
};
+enum dw_pcie_ltssm {
+ /* Need to align with PCIE_PORT_DEBUG0 bits 0:5 */
+ DW_PCIE_LTSSM_DETECT_QUIET = 0x0,
+ DW_PCIE_LTSSM_DETECT_ACT = 0x1,
+ DW_PCIE_LTSSM_POLL_ACTIVE = 0x2,
+ DW_PCIE_LTSSM_POLL_COMPLIANCE = 0x3,
+ DW_PCIE_LTSSM_POLL_CONFIG = 0x4,
+ DW_PCIE_LTSSM_PRE_DETECT_QUIET = 0x5,
+ DW_PCIE_LTSSM_DETECT_WAIT = 0x6,
+ DW_PCIE_LTSSM_CFG_LINKWD_START = 0x7,
+ DW_PCIE_LTSSM_CFG_LINKWD_ACEPT = 0x8,
+ DW_PCIE_LTSSM_CFG_LANENUM_WAI = 0x9,
+ DW_PCIE_LTSSM_CFG_LANENUM_ACEPT = 0xa,
+ DW_PCIE_LTSSM_CFG_COMPLETE = 0xb,
+ DW_PCIE_LTSSM_CFG_IDLE = 0xc,
+ DW_PCIE_LTSSM_RCVRY_LOCK = 0xd,
+ DW_PCIE_LTSSM_RCVRY_SPEED = 0xe,
+ DW_PCIE_LTSSM_RCVRY_RCVRCFG = 0xf,
+ DW_PCIE_LTSSM_RCVRY_IDLE = 0x10,
+ DW_PCIE_LTSSM_L0 = 0x11,
+ DW_PCIE_LTSSM_L0S = 0x12,
+ DW_PCIE_LTSSM_L123_SEND_EIDLE = 0x13,
+ DW_PCIE_LTSSM_L1_IDLE = 0x14,
+ DW_PCIE_LTSSM_L2_IDLE = 0x15,
+ DW_PCIE_LTSSM_L2_WAKE = 0x16,
+ DW_PCIE_LTSSM_DISABLED_ENTRY = 0x17,
+ DW_PCIE_LTSSM_DISABLED_IDLE = 0x18,
+ DW_PCIE_LTSSM_DISABLED = 0x19,
+ DW_PCIE_LTSSM_LPBK_ENTRY = 0x1a,
+ DW_PCIE_LTSSM_LPBK_ACTIVE = 0x1b,
+ DW_PCIE_LTSSM_LPBK_EXIT = 0x1c,
+ DW_PCIE_LTSSM_LPBK_EXIT_TIMEOUT = 0x1d,
+ DW_PCIE_LTSSM_HOT_RESET_ENTRY = 0x1e,
+ DW_PCIE_LTSSM_HOT_RESET = 0x1f,
+ DW_PCIE_LTSSM_RCVRY_EQ0 = 0x20,
+ DW_PCIE_LTSSM_RCVRY_EQ1 = 0x21,
+ DW_PCIE_LTSSM_RCVRY_EQ2 = 0x22,
+ DW_PCIE_LTSSM_RCVRY_EQ3 = 0x23,
+
+ DW_PCIE_LTSSM_UNKNOWN = 0xFFFFFFFF,
+};
+
+struct dw_pcie_ob_atu_cfg {
+ int index;
+ int type;
+ u8 func_no;
+ u8 code;
+ u8 routing;
+ u32 ctrl2;
+ u64 parent_bus_addr;
+ u64 pci_addr;
+ u64 size;
+};
+
struct dw_pcie_host_ops {
- int (*host_init)(struct dw_pcie_rp *pp);
- void (*host_deinit)(struct dw_pcie_rp *pp);
- int (*msi_host_init)(struct dw_pcie_rp *pp);
+ int (*init)(struct dw_pcie_rp *pp);
+ void (*deinit)(struct dw_pcie_rp *pp);
+ void (*post_init)(struct dw_pcie_rp *pp);
+ int (*msi_init)(struct dw_pcie_rp *pp);
+ void (*pme_turn_off)(struct dw_pcie_rp *pp);
};
struct dw_pcie_rp {
@@ -307,7 +424,6 @@ struct dw_pcie_rp {
const struct dw_pcie_host_ops *ops;
int msi_irq[MAX_MSI_CTRLS];
struct irq_domain *irq_domain;
- struct irq_domain *msi_domain;
dma_addr_t msi_data;
struct irq_chip *msi_irq_chip;
u32 num_vectors;
@@ -315,12 +431,21 @@ struct dw_pcie_rp {
struct pci_host_bridge *bridge;
raw_spinlock_t lock;
DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS);
+ bool use_atu_msg;
+ int msg_atu_index;
+ struct resource *msg_res;
+ bool use_linkup_irq;
+ struct pci_eq_presets presets;
+ struct pci_config_window *cfg;
+ bool ecam_enabled;
+ bool native_ecam;
};
struct dw_pcie_ep_ops {
- void (*ep_init)(struct dw_pcie_ep *ep);
+ void (*pre_init)(struct dw_pcie_ep *ep);
+ void (*init)(struct dw_pcie_ep *ep);
int (*raise_irq)(struct dw_pcie_ep *ep, u8 func_no,
- enum pci_epc_irq_type type, u16 interrupt_num);
+ unsigned int type, u16 interrupt_num);
const struct pci_epc_features* (*get_features)(struct dw_pcie_ep *ep);
/*
* Provide a method to implement the different func config space
@@ -329,7 +454,8 @@ struct dw_pcie_ep_ops {
* return a 0, and implement code in callback function of platform
* driver.
*/
- unsigned int (*func_conf_select)(struct dw_pcie_ep *ep, u8 func_no);
+ unsigned int (*get_dbi_offset)(struct dw_pcie_ep *ep, u8 func_no);
+ unsigned int (*get_dbi2_offset)(struct dw_pcie_ep *ep, u8 func_no);
};
struct dw_pcie_ep_func {
@@ -363,17 +489,28 @@ struct dw_pcie_ops {
size_t size, u32 val);
void (*write_dbi2)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
size_t size, u32 val);
- int (*link_up)(struct dw_pcie *pcie);
+ bool (*link_up)(struct dw_pcie *pcie);
+ enum dw_pcie_ltssm (*get_ltssm)(struct dw_pcie *pcie);
int (*start_link)(struct dw_pcie *pcie);
void (*stop_link)(struct dw_pcie *pcie);
+ int (*assert_perst)(struct dw_pcie *pcie, bool assert);
+};
+
+struct debugfs_info {
+ struct dentry *debug_dir;
+ void *rasdes_info;
};
struct dw_pcie {
struct device *dev;
void __iomem *dbi_base;
+ resource_size_t dbi_phys_addr;
void __iomem *dbi_base2;
void __iomem *atu_base;
+ void __iomem *elbi_base;
+ resource_size_t atu_phys_addr;
size_t atu_size;
+ resource_size_t parent_bus_offset;
u32 num_ib_windows;
u32 num_ob_windows;
u32 region_align;
@@ -385,14 +522,33 @@ struct dw_pcie {
u32 type;
unsigned long caps;
int num_lanes;
- int link_gen;
+ int max_link_speed;
u8 n_fts[2];
struct dw_edma_chip edma;
+ bool l1ss_support; /* L1 PM Substates support */
struct clk_bulk_data app_clks[DW_PCIE_NUM_APP_CLKS];
struct clk_bulk_data core_clks[DW_PCIE_NUM_CORE_CLKS];
struct reset_control_bulk_data app_rsts[DW_PCIE_NUM_APP_RSTS];
struct reset_control_bulk_data core_rsts[DW_PCIE_NUM_CORE_RSTS];
struct gpio_desc *pe_rst;
+ bool suspended;
+ struct debugfs_info *debugfs;
+ enum dw_pcie_device_mode mode;
+ u16 ptm_vsec_offset;
+ struct pci_ptm_debugfs *ptm_debugfs;
+
+ /*
+ * If iATU input addresses are offset from CPU physical addresses,
+ * we previously required .cpu_addr_fixup() to convert them. We
+ * now rely on the devicetree instead. If .cpu_addr_fixup()
+ * exists, we compare its results with devicetree.
+ *
+ * If .cpu_addr_fixup() does not exist, we assume the offset is
+ * zero and warn if devicetree claims otherwise. If we know all
+ * devicetrees correctly describe the offset, set
+ * use_parent_dt_ranges to true to avoid this warning.
+ */
+ bool use_parent_dt_ranges;
};
#define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp)
@@ -406,6 +562,8 @@ void dw_pcie_version_detect(struct dw_pcie *pci);
u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap);
u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap);
+u16 dw_pcie_find_rasdes_capability(struct dw_pcie *pci);
+u16 dw_pcie_find_ptm_capability(struct dw_pcie *pci);
int dw_pcie_read(void __iomem *addr, int size, u32 *val);
int dw_pcie_write(void __iomem *addr, int size, u32 val);
@@ -413,23 +571,26 @@ int dw_pcie_write(void __iomem *addr, int size, u32 val);
u32 dw_pcie_read_dbi(struct dw_pcie *pci, u32 reg, size_t size);
void dw_pcie_write_dbi(struct dw_pcie *pci, u32 reg, size_t size, u32 val);
void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val);
-int dw_pcie_link_up(struct dw_pcie *pci);
+bool dw_pcie_link_up(struct dw_pcie *pci);
void dw_pcie_upconfig_setup(struct dw_pcie *pci);
int dw_pcie_wait_for_link(struct dw_pcie *pci);
-int dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
- u64 cpu_addr, u64 pci_addr, u64 size);
-int dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index,
- int type, u64 cpu_addr, u64 pci_addr, u64 size);
+int dw_pcie_link_get_max_link_width(struct dw_pcie *pci);
+int dw_pcie_prog_outbound_atu(struct dw_pcie *pci,
+ const struct dw_pcie_ob_atu_cfg *atu);
int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,
- u64 cpu_addr, u64 pci_addr, u64 size);
+ u64 parent_bus_addr, u64 pci_addr, u64 size);
int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
- int type, u64 cpu_addr, u8 bar);
+ int type, u64 parent_bus_addr,
+ u8 bar, size_t size);
void dw_pcie_disable_atu(struct dw_pcie *pci, u32 dir, int index);
+void dw_pcie_hide_unsupported_l1ss(struct dw_pcie *pci);
void dw_pcie_setup(struct dw_pcie *pci);
void dw_pcie_iatu_detect(struct dw_pcie *pci);
int dw_pcie_edma_detect(struct dw_pcie *pci);
void dw_pcie_edma_remove(struct dw_pcie *pci);
-void dw_pcie_print_link_status(struct dw_pcie *pci);
+resource_size_t dw_pcie_parent_bus_offset(struct dw_pcie *pci,
+ const char *reg_name,
+ resource_size_t cpu_phy_addr);
static inline void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val)
{
@@ -466,6 +627,141 @@ static inline void dw_pcie_writel_dbi2(struct dw_pcie *pci, u32 reg, u32 val)
dw_pcie_write_dbi2(pci, reg, 0x4, val);
}
+static inline int dw_pcie_read_cfg_byte(struct dw_pcie *pci, int where,
+ u8 *val)
+{
+ *val = dw_pcie_readb_dbi(pci, where);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static inline int dw_pcie_read_cfg_word(struct dw_pcie *pci, int where,
+ u16 *val)
+{
+ *val = dw_pcie_readw_dbi(pci, where);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static inline int dw_pcie_read_cfg_dword(struct dw_pcie *pci, int where,
+ u32 *val)
+{
+ *val = dw_pcie_readl_dbi(pci, where);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static inline unsigned int dw_pcie_ep_get_dbi_offset(struct dw_pcie_ep *ep,
+ u8 func_no)
+{
+ unsigned int dbi_offset = 0;
+
+ if (ep->ops->get_dbi_offset)
+ dbi_offset = ep->ops->get_dbi_offset(ep, func_no);
+
+ return dbi_offset;
+}
+
+static inline u32 dw_pcie_ep_read_dbi(struct dw_pcie_ep *ep, u8 func_no,
+ u32 reg, size_t size)
+{
+ unsigned int offset = dw_pcie_ep_get_dbi_offset(ep, func_no);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ return dw_pcie_read_dbi(pci, offset + reg, size);
+}
+
+static inline void dw_pcie_ep_write_dbi(struct dw_pcie_ep *ep, u8 func_no,
+ u32 reg, size_t size, u32 val)
+{
+ unsigned int offset = dw_pcie_ep_get_dbi_offset(ep, func_no);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ dw_pcie_write_dbi(pci, offset + reg, size, val);
+}
+
+static inline void dw_pcie_ep_writel_dbi(struct dw_pcie_ep *ep, u8 func_no,
+ u32 reg, u32 val)
+{
+ dw_pcie_ep_write_dbi(ep, func_no, reg, 0x4, val);
+}
+
+static inline u32 dw_pcie_ep_readl_dbi(struct dw_pcie_ep *ep, u8 func_no,
+ u32 reg)
+{
+ return dw_pcie_ep_read_dbi(ep, func_no, reg, 0x4);
+}
+
+static inline void dw_pcie_ep_writew_dbi(struct dw_pcie_ep *ep, u8 func_no,
+ u32 reg, u16 val)
+{
+ dw_pcie_ep_write_dbi(ep, func_no, reg, 0x2, val);
+}
+
+static inline u16 dw_pcie_ep_readw_dbi(struct dw_pcie_ep *ep, u8 func_no,
+ u32 reg)
+{
+ return dw_pcie_ep_read_dbi(ep, func_no, reg, 0x2);
+}
+
+static inline void dw_pcie_ep_writeb_dbi(struct dw_pcie_ep *ep, u8 func_no,
+ u32 reg, u8 val)
+{
+ dw_pcie_ep_write_dbi(ep, func_no, reg, 0x1, val);
+}
+
+static inline u8 dw_pcie_ep_readb_dbi(struct dw_pcie_ep *ep, u8 func_no,
+ u32 reg)
+{
+ return dw_pcie_ep_read_dbi(ep, func_no, reg, 0x1);
+}
+
+static inline int dw_pcie_ep_read_cfg_byte(struct dw_pcie_ep *ep, u8 func_no,
+ int where, u8 *val)
+{
+ *val = dw_pcie_ep_readb_dbi(ep, func_no, where);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static inline int dw_pcie_ep_read_cfg_word(struct dw_pcie_ep *ep, u8 func_no,
+ int where, u16 *val)
+{
+ *val = dw_pcie_ep_readw_dbi(ep, func_no, where);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static inline int dw_pcie_ep_read_cfg_dword(struct dw_pcie_ep *ep, u8 func_no,
+ int where, u32 *val)
+{
+ *val = dw_pcie_ep_readl_dbi(ep, func_no, where);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static inline unsigned int dw_pcie_ep_get_dbi2_offset(struct dw_pcie_ep *ep,
+ u8 func_no)
+{
+ unsigned int dbi2_offset = 0;
+
+ if (ep->ops->get_dbi2_offset)
+ dbi2_offset = ep->ops->get_dbi2_offset(ep, func_no);
+ else if (ep->ops->get_dbi_offset) /* for backward compatibility */
+ dbi2_offset = ep->ops->get_dbi_offset(ep, func_no);
+
+ return dbi2_offset;
+}
+
+static inline void dw_pcie_ep_write_dbi2(struct dw_pcie_ep *ep, u8 func_no,
+ u32 reg, size_t size, u32 val)
+{
+ unsigned int offset = dw_pcie_ep_get_dbi2_offset(ep, func_no);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ dw_pcie_write_dbi2(pci, offset + reg, size, val);
+}
+
+static inline void dw_pcie_ep_writel_dbi2(struct dw_pcie_ep *ep, u8 func_no,
+ u32 reg, u32 val)
+{
+ dw_pcie_ep_write_dbi2(ep, func_no, reg, 0x4, val);
+}
+
static inline void dw_pcie_dbi_ro_wr_en(struct dw_pcie *pci)
{
u32 reg;
@@ -502,8 +798,33 @@ static inline void dw_pcie_stop_link(struct dw_pcie *pci)
pci->ops->stop_link(pci);
}
+static inline int dw_pcie_assert_perst(struct dw_pcie *pci, bool assert)
+{
+ if (pci->ops && pci->ops->assert_perst)
+ return pci->ops->assert_perst(pci, assert);
+
+ return 0;
+}
+
+static inline enum dw_pcie_ltssm dw_pcie_get_ltssm(struct dw_pcie *pci)
+{
+ u32 val;
+
+ if (pci->ops && pci->ops->get_ltssm)
+ return pci->ops->get_ltssm(pci);
+
+ val = dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0);
+
+ return (enum dw_pcie_ltssm)FIELD_GET(PORT_LOGIC_LTSSM_STATE_MASK, val);
+}
+
#ifdef CONFIG_PCIE_DW_HOST
+int dw_pcie_suspend_noirq(struct dw_pcie *pci);
+int dw_pcie_resume_noirq(struct dw_pcie *pci);
irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp);
+void dw_pcie_msi_init(struct dw_pcie_rp *pp);
+int dw_pcie_msi_host_init(struct dw_pcie_rp *pp);
+void dw_pcie_free_msi(struct dw_pcie_rp *pp);
int dw_pcie_setup_rc(struct dw_pcie_rp *pp);
int dw_pcie_host_init(struct dw_pcie_rp *pp);
void dw_pcie_host_deinit(struct dw_pcie_rp *pp);
@@ -511,11 +832,32 @@ int dw_pcie_allocate_domains(struct dw_pcie_rp *pp);
void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn,
int where);
#else
+static inline int dw_pcie_suspend_noirq(struct dw_pcie *pci)
+{
+ return 0;
+}
+
+static inline int dw_pcie_resume_noirq(struct dw_pcie *pci)
+{
+ return 0;
+}
+
static inline irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp)
{
return IRQ_NONE;
}
+static inline void dw_pcie_msi_init(struct dw_pcie_rp *pp)
+{ }
+
+static inline int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
+{
+ return -ENODEV;
+}
+
+static inline void dw_pcie_free_msi(struct dw_pcie_rp *pp)
+{ }
+
static inline int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
{
return 0;
@@ -544,11 +886,12 @@ static inline void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus,
#ifdef CONFIG_PCIE_DW_EP
void dw_pcie_ep_linkup(struct dw_pcie_ep *ep);
+void dw_pcie_ep_linkdown(struct dw_pcie_ep *ep);
int dw_pcie_ep_init(struct dw_pcie_ep *ep);
-int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep);
-void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep);
-void dw_pcie_ep_exit(struct dw_pcie_ep *ep);
-int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no);
+int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep);
+void dw_pcie_ep_deinit(struct dw_pcie_ep *ep);
+void dw_pcie_ep_cleanup(struct dw_pcie_ep *ep);
+int dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep, u8 func_no);
int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
u8 interrupt_num);
int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
@@ -556,6 +899,7 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep, u8 func_no,
u16 interrupt_num);
void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar);
+int dw_pcie_ep_hide_ext_capability(struct dw_pcie *pci, u8 prev_cap, u8 cap);
struct dw_pcie_ep_func *
dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no);
#else
@@ -563,25 +907,29 @@ static inline void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
{
}
+static inline void dw_pcie_ep_linkdown(struct dw_pcie_ep *ep)
+{
+}
+
static inline int dw_pcie_ep_init(struct dw_pcie_ep *ep)
{
return 0;
}
-static inline int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
+static inline int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep)
{
return 0;
}
-static inline void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep)
+static inline void dw_pcie_ep_deinit(struct dw_pcie_ep *ep)
{
}
-static inline void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
+static inline void dw_pcie_ep_cleanup(struct dw_pcie_ep *ep)
{
}
-static inline int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no)
+static inline int dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep, u8 func_no)
{
return 0;
}
@@ -609,10 +957,30 @@ static inline void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
{
}
+static inline int dw_pcie_ep_hide_ext_capability(struct dw_pcie *pci,
+ u8 prev_cap, u8 cap)
+{
+ return 0;
+}
+
static inline struct dw_pcie_ep_func *
dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no)
{
return NULL;
}
#endif
+
+#ifdef CONFIG_PCIE_DW_DEBUGFS
+void dwc_pcie_debugfs_init(struct dw_pcie *pci, enum dw_pcie_device_mode mode);
+void dwc_pcie_debugfs_deinit(struct dw_pcie *pci);
+#else
+static inline void dwc_pcie_debugfs_init(struct dw_pcie *pci,
+ enum dw_pcie_device_mode mode)
+{
+}
+static inline void dwc_pcie_debugfs_deinit(struct dw_pcie *pci)
+{
+}
+#endif
+
#endif /* _PCIE_DESIGNWARE_H */
diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
index c1e7653e508e..f8605fe61a41 100644
--- a/drivers/pci/controller/dwc/pcie-dw-rockchip.c
+++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
@@ -8,71 +8,108 @@
* Author: Simon Xue <xxm@rock-chips.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/gpio/consumer.h>
+#include <linux/hw_bitfield.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset.h>
+#include "../../pci.h"
#include "pcie-designware.h"
/*
* The upper 16 bits of PCIE_CLIENT_CONFIG are a write
* mask for the lower 16 bits.
*/
-#define HIWORD_UPDATE(mask, val) (((mask) << 16) | (val))
-#define HIWORD_UPDATE_BIT(val) HIWORD_UPDATE(val, val)
-#define HIWORD_DISABLE_BIT(val) HIWORD_UPDATE(val, ~val)
#define to_rockchip_pcie(x) dev_get_drvdata((x)->dev)
-#define PCIE_CLIENT_RC_MODE HIWORD_UPDATE_BIT(0x40)
-#define PCIE_CLIENT_ENABLE_LTSSM HIWORD_UPDATE_BIT(0xc)
-#define PCIE_SMLH_LINKUP BIT(16)
-#define PCIE_RDLH_LINKUP BIT(17)
-#define PCIE_LINKUP (PCIE_SMLH_LINKUP | PCIE_RDLH_LINKUP)
-#define PCIE_L0S_ENTRY 0x11
-#define PCIE_CLIENT_GENERAL_CONTROL 0x0
+/* General Control Register */
+#define PCIE_CLIENT_GENERAL_CON 0x0
+#define PCIE_CLIENT_MODE_MASK GENMASK(7, 4)
+#define PCIE_CLIENT_MODE_EP 0x0UL
+#define PCIE_CLIENT_MODE_RC 0x4UL
+#define PCIE_CLIENT_SET_MODE(x) FIELD_PREP_WM16(PCIE_CLIENT_MODE_MASK, (x))
+#define PCIE_CLIENT_LD_RQ_RST_GRT FIELD_PREP_WM16(BIT(3), 1)
+#define PCIE_CLIENT_ENABLE_LTSSM FIELD_PREP_WM16(BIT(2), 1)
+#define PCIE_CLIENT_DISABLE_LTSSM FIELD_PREP_WM16(BIT(2), 0)
+
+/* Interrupt Status Register Related to Legacy Interrupt */
#define PCIE_CLIENT_INTR_STATUS_LEGACY 0x8
+
+/* Interrupt Status Register Related to Miscellaneous Operation */
+#define PCIE_CLIENT_INTR_STATUS_MISC 0x10
+#define PCIE_RDLH_LINK_UP_CHGED BIT(1)
+#define PCIE_LINK_REQ_RST_NOT_INT BIT(2)
+
+/* Interrupt Mask Register Related to Legacy Interrupt */
#define PCIE_CLIENT_INTR_MASK_LEGACY 0x1c
-#define PCIE_CLIENT_GENERAL_DEBUG 0x104
+#define PCIE_INTR_MASK GENMASK(7, 0)
+#define PCIE_INTR_CLAMP(_x) ((BIT((_x)) & PCIE_INTR_MASK))
+#define PCIE_INTR_LEGACY_MASK(x) (PCIE_INTR_CLAMP((x)) | \
+ (PCIE_INTR_CLAMP((x)) << 16))
+#define PCIE_INTR_LEGACY_UNMASK(x) (PCIE_INTR_CLAMP((x)) << 16)
+
+/* Interrupt Mask Register Related to Miscellaneous Operation */
+#define PCIE_CLIENT_INTR_MASK_MISC 0x24
+
+/* Power Management Control Register */
+#define PCIE_CLIENT_POWER_CON 0x2c
+#define PCIE_CLKREQ_READY FIELD_PREP_WM16(BIT(0), 1)
+#define PCIE_CLKREQ_NOT_READY FIELD_PREP_WM16(BIT(0), 0)
+#define PCIE_CLKREQ_PULL_DOWN FIELD_PREP_WM16(GENMASK(13, 12), 1)
+
+/* Hot Reset Control Register */
#define PCIE_CLIENT_HOT_RESET_CTRL 0x180
+#define PCIE_LTSSM_APP_DLY2_EN BIT(1)
+#define PCIE_LTSSM_APP_DLY2_DONE BIT(3)
+#define PCIE_LTSSM_ENABLE_ENHANCE BIT(4)
+
+/* LTSSM Status Register */
#define PCIE_CLIENT_LTSSM_STATUS 0x300
-#define PCIE_LTSSM_ENABLE_ENHANCE BIT(4)
-#define PCIE_LTSSM_STATUS_MASK GENMASK(5, 0)
+#define PCIE_LINKUP 0x3
+#define PCIE_LINKUP_MASK GENMASK(17, 16)
+#define PCIE_LTSSM_STATUS_MASK GENMASK(5, 0)
struct rockchip_pcie {
- struct dw_pcie pci;
- void __iomem *apb_base;
- struct phy *phy;
- struct clk_bulk_data *clks;
- unsigned int clk_cnt;
- struct reset_control *rst;
- struct gpio_desc *rst_gpio;
- struct regulator *vpcie3v3;
- struct irq_domain *irq_domain;
+ struct dw_pcie pci;
+ void __iomem *apb_base;
+ struct phy *phy;
+ struct clk_bulk_data *clks;
+ unsigned int clk_cnt;
+ struct reset_control *rst;
+ struct gpio_desc *rst_gpio;
+ struct irq_domain *irq_domain;
+ const struct rockchip_pcie_of_data *data;
+ bool supports_clkreq;
};
-static int rockchip_pcie_readl_apb(struct rockchip_pcie *rockchip,
- u32 reg)
+struct rockchip_pcie_of_data {
+ enum dw_pcie_device_mode mode;
+ const struct pci_epc_features *epc_features;
+};
+
+static int rockchip_pcie_readl_apb(struct rockchip_pcie *rockchip, u32 reg)
{
return readl_relaxed(rockchip->apb_base + reg);
}
-static void rockchip_pcie_writel_apb(struct rockchip_pcie *rockchip,
- u32 val, u32 reg)
+static void rockchip_pcie_writel_apb(struct rockchip_pcie *rockchip, u32 val,
+ u32 reg)
{
writel_relaxed(val, rockchip->apb_base + reg);
}
-static void rockchip_pcie_legacy_int_handler(struct irq_desc *desc)
+static void rockchip_pcie_intx_handler(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct rockchip_pcie *rockchip = irq_desc_get_handler_data(desc);
@@ -91,14 +128,14 @@ static void rockchip_pcie_legacy_int_handler(struct irq_desc *desc)
static void rockchip_intx_mask(struct irq_data *data)
{
rockchip_pcie_writel_apb(irq_data_get_irq_chip_data(data),
- HIWORD_UPDATE_BIT(BIT(data->hwirq)),
+ PCIE_INTR_LEGACY_MASK(data->hwirq),
PCIE_CLIENT_INTR_MASK_LEGACY);
};
static void rockchip_intx_unmask(struct irq_data *data)
{
rockchip_pcie_writel_apb(irq_data_get_irq_chip_data(data),
- HIWORD_DISABLE_BIT(BIT(data->hwirq)),
+ PCIE_INTR_LEGACY_UNMASK(data->hwirq),
PCIE_CLIENT_INTR_MASK_LEGACY);
};
@@ -133,8 +170,8 @@ static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip)
return -EINVAL;
}
- rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX,
- &intx_domain_ops, rockchip);
+ rockchip->irq_domain = irq_domain_create_linear(of_fwnode_handle(intc), PCI_NUM_INTX,
+ &intx_domain_ops, rockchip);
of_node_put(intc);
if (!rockchip->irq_domain) {
dev_err(dev, "failed to get a INTx IRQ domain\n");
@@ -144,22 +181,73 @@ static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip)
return 0;
}
+static u32 rockchip_pcie_get_ltssm(struct rockchip_pcie *rockchip)
+{
+ return rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_LTSSM_STATUS);
+}
+
static void rockchip_pcie_enable_ltssm(struct rockchip_pcie *rockchip)
{
rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_ENABLE_LTSSM,
- PCIE_CLIENT_GENERAL_CONTROL);
+ PCIE_CLIENT_GENERAL_CON);
+}
+
+static void rockchip_pcie_disable_ltssm(struct rockchip_pcie *rockchip)
+{
+ rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_DISABLE_LTSSM,
+ PCIE_CLIENT_GENERAL_CON);
}
-static int rockchip_pcie_link_up(struct dw_pcie *pci)
+static bool rockchip_pcie_link_up(struct dw_pcie *pci)
{
struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
- u32 val = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_LTSSM_STATUS);
+ u32 val = rockchip_pcie_get_ltssm(rockchip);
- if ((val & PCIE_LINKUP) == PCIE_LINKUP &&
- (val & PCIE_LTSSM_STATUS_MASK) == PCIE_L0S_ENTRY)
- return 1;
+ return FIELD_GET(PCIE_LINKUP_MASK, val) == PCIE_LINKUP;
+}
- return 0;
+/*
+ * See e.g. section '11.6.6.4 L1 Substate' in the RK3588 TRM V1.0 for the steps
+ * needed to support L1 substates. Currently, just enable L1 substates for RC
+ * mode if CLKREQ# is properly connected and supports-clkreq is present in DT.
+ * For EP mode, there are more things should be done to actually save power in
+ * L1 substates, so disable L1 substates until there is proper support.
+ */
+static void rockchip_pcie_configure_l1ss(struct dw_pcie *pci)
+{
+ struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
+
+ /* Enable L1 substates if CLKREQ# is properly connected */
+ if (rockchip->supports_clkreq) {
+ rockchip_pcie_writel_apb(rockchip, PCIE_CLKREQ_READY,
+ PCIE_CLIENT_POWER_CON);
+ pci->l1ss_support = true;
+ return;
+ }
+
+ /*
+ * Otherwise, assert CLKREQ# unconditionally. Since
+ * pci->l1ss_support is not set, the DWC core will prevent L1
+ * Substates support from being advertised.
+ */
+ rockchip_pcie_writel_apb(rockchip,
+ PCIE_CLKREQ_PULL_DOWN | PCIE_CLKREQ_NOT_READY,
+ PCIE_CLIENT_POWER_CON);
+}
+
+static void rockchip_pcie_enable_l0s(struct dw_pcie *pci)
+{
+ u32 cap, lnkcap;
+
+ /* Enable L0S capability for all SoCs */
+ cap = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ if (cap) {
+ lnkcap = dw_pcie_readl_dbi(pci, cap + PCI_EXP_LNKCAP);
+ lnkcap |= PCI_EXP_LNKCAP_ASPM_L0S;
+ dw_pcie_dbi_ro_wr_en(pci);
+ dw_pcie_writel_dbi(pci, cap + PCI_EXP_LNKCAP, lnkcap);
+ dw_pcie_dbi_ro_wr_dis(pci);
+ }
}
static int rockchip_pcie_start_link(struct dw_pcie *pci)
@@ -180,18 +268,24 @@ static int rockchip_pcie_start_link(struct dw_pcie *pci)
* We need more extra time as before, rather than setting just
* 100us as we don't know how long should the device need to reset.
*/
- msleep(100);
+ msleep(PCIE_T_PVPERL_MS);
gpiod_set_value_cansleep(rockchip->rst_gpio, 1);
return 0;
}
+static void rockchip_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
+
+ rockchip_pcie_disable_ltssm(rockchip);
+}
+
static int rockchip_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
struct device *dev = rockchip->pci.dev;
- u32 val = HIWORD_UPDATE_BIT(PCIE_LTSSM_ENABLE_ENHANCE);
int irq, ret;
irq = of_irq_get_byname(dev->of_node, "legacy");
@@ -202,20 +296,119 @@ static int rockchip_pcie_host_init(struct dw_pcie_rp *pp)
if (ret < 0)
dev_err(dev, "failed to init irq domain\n");
- irq_set_chained_handler_and_data(irq, rockchip_pcie_legacy_int_handler,
+ irq_set_chained_handler_and_data(irq, rockchip_pcie_intx_handler,
rockchip);
- /* LTSSM enable control mode */
- rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL);
-
- rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_RC_MODE,
- PCIE_CLIENT_GENERAL_CONTROL);
+ rockchip_pcie_configure_l1ss(pci);
+ rockchip_pcie_enable_l0s(pci);
return 0;
}
static const struct dw_pcie_host_ops rockchip_pcie_host_ops = {
- .host_init = rockchip_pcie_host_init,
+ .init = rockchip_pcie_host_init,
+};
+
+/*
+ * ATS does not work on RK3588 when running in EP mode.
+ *
+ * After the host has enabled ATS on the EP side, it will send an IOTLB
+ * invalidation request to the EP side. However, the RK3588 will never send
+ * a completion back and eventually the host will print an IOTLB_INV_TIMEOUT
+ * error, and the EP will not be operational. If we hide the ATS capability,
+ * things work as expected.
+ */
+static void rockchip_pcie_ep_hide_broken_ats_cap_rk3588(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct device *dev = pci->dev;
+
+ /* Only hide the ATS capability for RK3588 running in EP mode. */
+ if (!of_device_is_compatible(dev->of_node, "rockchip,rk3588-pcie-ep"))
+ return;
+
+ if (dw_pcie_ep_hide_ext_capability(pci, PCI_EXT_CAP_ID_SECPCI,
+ PCI_EXT_CAP_ID_ATS))
+ dev_err(dev, "failed to hide ATS capability\n");
+}
+
+static void rockchip_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar;
+
+ rockchip_pcie_enable_l0s(pci);
+ rockchip_pcie_ep_hide_broken_ats_cap_rk3588(ep);
+
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
+ dw_pcie_ep_reset_bar(pci, bar);
+};
+
+static int rockchip_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ unsigned int type, u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ switch (type) {
+ case PCI_IRQ_INTX:
+ return dw_pcie_ep_raise_intx_irq(ep, func_no);
+ case PCI_IRQ_MSI:
+ return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+ case PCI_IRQ_MSIX:
+ return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
+ default:
+ dev_err(pci->dev, "UNKNOWN IRQ type\n");
+ }
+
+ return 0;
+}
+
+static const struct pci_epc_features rockchip_pcie_epc_features_rk3568 = {
+ .linkup_notifier = true,
+ .msi_capable = true,
+ .msix_capable = true,
+ .align = SZ_64K,
+ .bar[BAR_0] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_1] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_2] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_3] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_4] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_5] = { .type = BAR_RESIZABLE, },
+};
+
+/*
+ * BAR4 on rk3588 exposes the ATU Port Logic Structure to the host regardless of
+ * iATU settings for BAR4. This means that BAR4 cannot be used by an EPF driver,
+ * so mark it as RESERVED. (rockchip_pcie_ep_init() will disable all BARs by
+ * default.) If the host could write to BAR4, the iATU settings (for all other
+ * BARs) would be overwritten, resulting in (all other BARs) no longer working.
+ */
+static const struct pci_epc_features rockchip_pcie_epc_features_rk3588 = {
+ .linkup_notifier = true,
+ .msi_capable = true,
+ .msix_capable = true,
+ .align = SZ_64K,
+ .bar[BAR_0] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_1] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_2] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_3] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_4] = { .type = BAR_RESERVED, },
+ .bar[BAR_5] = { .type = BAR_RESIZABLE, },
+};
+
+static const struct pci_epc_features *
+rockchip_pcie_get_features(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
+
+ return rockchip->data->epc_features;
+}
+
+static const struct dw_pcie_ep_ops rockchip_pcie_ep_ops = {
+ .init = rockchip_pcie_ep_init,
+ .raise_irq = rockchip_pcie_raise_irq,
+ .get_features = rockchip_pcie_get_features,
};
static int rockchip_pcie_clk_init(struct rockchip_pcie *rockchip)
@@ -225,11 +418,15 @@ static int rockchip_pcie_clk_init(struct rockchip_pcie *rockchip)
ret = devm_clk_bulk_get_all(dev, &rockchip->clks);
if (ret < 0)
- return ret;
+ return dev_err_probe(dev, ret, "failed to get clocks\n");
rockchip->clk_cnt = ret;
- return clk_bulk_prepare_enable(rockchip->clk_cnt, rockchip->clks);
+ ret = clk_bulk_prepare_enable(rockchip->clk_cnt, rockchip->clks);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to enable clocks\n");
+
+ return 0;
}
static int rockchip_pcie_resource_get(struct platform_device *pdev,
@@ -237,18 +434,23 @@ static int rockchip_pcie_resource_get(struct platform_device *pdev,
{
rockchip->apb_base = devm_platform_ioremap_resource_byname(pdev, "apb");
if (IS_ERR(rockchip->apb_base))
- return PTR_ERR(rockchip->apb_base);
+ return dev_err_probe(&pdev->dev, PTR_ERR(rockchip->apb_base),
+ "failed to map apb registers\n");
rockchip->rst_gpio = devm_gpiod_get_optional(&pdev->dev, "reset",
- GPIOD_OUT_HIGH);
+ GPIOD_OUT_LOW);
if (IS_ERR(rockchip->rst_gpio))
- return PTR_ERR(rockchip->rst_gpio);
+ return dev_err_probe(&pdev->dev, PTR_ERR(rockchip->rst_gpio),
+ "failed to get reset gpio\n");
rockchip->rst = devm_reset_control_array_get_exclusive(&pdev->dev);
if (IS_ERR(rockchip->rst))
return dev_err_probe(&pdev->dev, PTR_ERR(rockchip->rst),
"failed to get reset lines\n");
+ rockchip->supports_clkreq = of_property_read_bool(pdev->dev.of_node,
+ "supports-clkreq");
+
return 0;
}
@@ -275,22 +477,197 @@ static int rockchip_pcie_phy_init(struct rockchip_pcie *rockchip)
static void rockchip_pcie_phy_deinit(struct rockchip_pcie *rockchip)
{
- phy_exit(rockchip->phy);
phy_power_off(rockchip->phy);
+ phy_exit(rockchip->phy);
}
static const struct dw_pcie_ops dw_pcie_ops = {
.link_up = rockchip_pcie_link_up,
.start_link = rockchip_pcie_start_link,
+ .stop_link = rockchip_pcie_stop_link,
};
+static irqreturn_t rockchip_pcie_rc_sys_irq_thread(int irq, void *arg)
+{
+ struct rockchip_pcie *rockchip = arg;
+ struct dw_pcie *pci = &rockchip->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct device *dev = pci->dev;
+ u32 reg;
+
+ reg = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_INTR_STATUS_MISC);
+ rockchip_pcie_writel_apb(rockchip, reg, PCIE_CLIENT_INTR_STATUS_MISC);
+
+ dev_dbg(dev, "PCIE_CLIENT_INTR_STATUS_MISC: %#x\n", reg);
+ dev_dbg(dev, "LTSSM_STATUS: %#x\n", rockchip_pcie_get_ltssm(rockchip));
+
+ if (reg & PCIE_RDLH_LINK_UP_CHGED) {
+ if (rockchip_pcie_link_up(pci)) {
+ msleep(PCIE_RESET_CONFIG_WAIT_MS);
+ dev_dbg(dev, "Received Link up event. Starting enumeration!\n");
+ /* Rescan the bus to enumerate endpoint devices */
+ pci_lock_rescan_remove();
+ pci_rescan_bus(pp->bridge->bus);
+ pci_unlock_rescan_remove();
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rockchip_pcie_ep_sys_irq_thread(int irq, void *arg)
+{
+ struct rockchip_pcie *rockchip = arg;
+ struct dw_pcie *pci = &rockchip->pci;
+ struct device *dev = pci->dev;
+ u32 reg, val;
+
+ reg = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_INTR_STATUS_MISC);
+ rockchip_pcie_writel_apb(rockchip, reg, PCIE_CLIENT_INTR_STATUS_MISC);
+
+ dev_dbg(dev, "PCIE_CLIENT_INTR_STATUS_MISC: %#x\n", reg);
+ dev_dbg(dev, "LTSSM_STATUS: %#x\n", rockchip_pcie_get_ltssm(rockchip));
+
+ if (reg & PCIE_LINK_REQ_RST_NOT_INT) {
+ dev_dbg(dev, "hot reset or link-down reset\n");
+ dw_pcie_ep_linkdown(&pci->ep);
+ /* Stop delaying link training. */
+ val = FIELD_PREP_WM16(PCIE_LTSSM_APP_DLY2_DONE, 1);
+ rockchip_pcie_writel_apb(rockchip, val,
+ PCIE_CLIENT_HOT_RESET_CTRL);
+ }
+
+ if (reg & PCIE_RDLH_LINK_UP_CHGED) {
+ if (rockchip_pcie_link_up(pci)) {
+ dev_dbg(dev, "link up\n");
+ dw_pcie_ep_linkup(&pci->ep);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int rockchip_pcie_configure_rc(struct platform_device *pdev,
+ struct rockchip_pcie *rockchip)
+{
+ struct device *dev = &pdev->dev;
+ struct dw_pcie_rp *pp;
+ int irq, ret;
+ u32 val;
+
+ if (!IS_ENABLED(CONFIG_PCIE_ROCKCHIP_DW_HOST))
+ return -ENODEV;
+
+ irq = platform_get_irq_byname(pdev, "sys");
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ rockchip_pcie_rc_sys_irq_thread,
+ IRQF_ONESHOT, "pcie-sys-rc", rockchip);
+ if (ret) {
+ dev_err(dev, "failed to request PCIe sys IRQ\n");
+ return ret;
+ }
+
+ /* LTSSM enable control mode */
+ val = FIELD_PREP_WM16(PCIE_LTSSM_ENABLE_ENHANCE, 1);
+ rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL);
+
+ rockchip_pcie_writel_apb(rockchip,
+ PCIE_CLIENT_SET_MODE(PCIE_CLIENT_MODE_RC),
+ PCIE_CLIENT_GENERAL_CON);
+
+ pp = &rockchip->pci.pp;
+ pp->ops = &rockchip_pcie_host_ops;
+ pp->use_linkup_irq = true;
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(dev, "failed to initialize host\n");
+ return ret;
+ }
+
+ /* unmask DLL up/down indicator */
+ val = FIELD_PREP_WM16(PCIE_RDLH_LINK_UP_CHGED, 0);
+ rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_INTR_MASK_MISC);
+
+ return ret;
+}
+
+static int rockchip_pcie_configure_ep(struct platform_device *pdev,
+ struct rockchip_pcie *rockchip)
+{
+ struct device *dev = &pdev->dev;
+ int irq, ret;
+ u32 val;
+
+ if (!IS_ENABLED(CONFIG_PCIE_ROCKCHIP_DW_EP))
+ return -ENODEV;
+
+ irq = platform_get_irq_byname(pdev, "sys");
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ rockchip_pcie_ep_sys_irq_thread,
+ IRQF_ONESHOT, "pcie-sys-ep", rockchip);
+ if (ret) {
+ dev_err(dev, "failed to request PCIe sys IRQ\n");
+ return ret;
+ }
+
+ /*
+ * LTSSM enable control mode, and automatically delay link training on
+ * hot reset/link-down reset.
+ */
+ val = FIELD_PREP_WM16(PCIE_LTSSM_ENABLE_ENHANCE, 1) |
+ FIELD_PREP_WM16(PCIE_LTSSM_APP_DLY2_EN, 1);
+ rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL);
+
+ rockchip_pcie_writel_apb(rockchip,
+ PCIE_CLIENT_SET_MODE(PCIE_CLIENT_MODE_EP),
+ PCIE_CLIENT_GENERAL_CON);
+
+ rockchip->pci.ep.ops = &rockchip_pcie_ep_ops;
+ rockchip->pci.ep.page_size = SZ_64K;
+
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+
+ ret = dw_pcie_ep_init(&rockchip->pci.ep);
+ if (ret) {
+ dev_err(dev, "failed to initialize endpoint\n");
+ return ret;
+ }
+
+ ret = dw_pcie_ep_init_registers(&rockchip->pci.ep);
+ if (ret) {
+ dev_err(dev, "failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(&rockchip->pci.ep);
+ return ret;
+ }
+
+ pci_epc_init_notify(rockchip->pci.ep.epc);
+
+ /* unmask DLL up/down indicator and hot reset/link-down reset */
+ val = FIELD_PREP_WM16(PCIE_RDLH_LINK_UP_CHGED, 0) |
+ FIELD_PREP_WM16(PCIE_LINK_REQ_RST_NOT_INT, 0);
+ rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_INTR_MASK_MISC);
+
+ return ret;
+}
+
static int rockchip_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rockchip_pcie *rockchip;
- struct dw_pcie_rp *pp;
+ const struct rockchip_pcie_of_data *data;
int ret;
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -EINVAL;
+
rockchip = devm_kzalloc(dev, sizeof(*rockchip), GFP_KERNEL);
if (!rockchip)
return -ENOMEM;
@@ -299,9 +676,11 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
rockchip->pci.dev = dev;
rockchip->pci.ops = &dw_pcie_ops;
+ rockchip->data = data;
- pp = &rockchip->pci.pp;
- pp->ops = &rockchip_pcie_host_ops;
+ /* Default N_FTS value (210) is broken, override it to 255 */
+ rockchip->pci.n_fts[0] = 255; /* Gen1 */
+ rockchip->pci.n_fts[1] = 255; /* Gen2+ */
ret = rockchip_pcie_resource_get(pdev, rockchip);
if (ret)
@@ -312,23 +691,15 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
return ret;
/* DON'T MOVE ME: must be enable before PHY init */
- rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3");
- if (IS_ERR(rockchip->vpcie3v3)) {
- if (PTR_ERR(rockchip->vpcie3v3) != -ENODEV)
- return dev_err_probe(dev, PTR_ERR(rockchip->vpcie3v3),
- "failed to get vpcie3v3 regulator\n");
- rockchip->vpcie3v3 = NULL;
- } else {
- ret = regulator_enable(rockchip->vpcie3v3);
- if (ret) {
- dev_err(dev, "failed to enable vpcie3v3 regulator\n");
- return ret;
- }
- }
+ ret = devm_regulator_get_enable_optional(dev, "vpcie3v3");
+ if (ret < 0 && ret != -ENODEV)
+ return dev_err_probe(dev, ret,
+ "failed to enable vpcie3v3 regulator\n");
ret = rockchip_pcie_phy_init(rockchip);
if (ret)
- goto disable_regulator;
+ return dev_err_probe(dev, ret,
+ "failed to initialize the phy\n");
ret = reset_control_deassert(rockchip->rst);
if (ret)
@@ -338,22 +709,60 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
if (ret)
goto deinit_phy;
- ret = dw_pcie_host_init(pp);
- if (!ret)
- return 0;
+ switch (data->mode) {
+ case DW_PCIE_RC_TYPE:
+ ret = rockchip_pcie_configure_rc(pdev, rockchip);
+ if (ret)
+ goto deinit_clk;
+ break;
+ case DW_PCIE_EP_TYPE:
+ ret = rockchip_pcie_configure_ep(pdev, rockchip);
+ if (ret)
+ goto deinit_clk;
+ break;
+ default:
+ dev_err(dev, "INVALID device type %d\n", data->mode);
+ ret = -EINVAL;
+ goto deinit_clk;
+ }
+
+ return 0;
+deinit_clk:
clk_bulk_disable_unprepare(rockchip->clk_cnt, rockchip->clks);
deinit_phy:
rockchip_pcie_phy_deinit(rockchip);
-disable_regulator:
- if (rockchip->vpcie3v3)
- regulator_disable(rockchip->vpcie3v3);
return ret;
}
+static const struct rockchip_pcie_of_data rockchip_pcie_rc_of_data_rk3568 = {
+ .mode = DW_PCIE_RC_TYPE,
+};
+
+static const struct rockchip_pcie_of_data rockchip_pcie_ep_of_data_rk3568 = {
+ .mode = DW_PCIE_EP_TYPE,
+ .epc_features = &rockchip_pcie_epc_features_rk3568,
+};
+
+static const struct rockchip_pcie_of_data rockchip_pcie_ep_of_data_rk3588 = {
+ .mode = DW_PCIE_EP_TYPE,
+ .epc_features = &rockchip_pcie_epc_features_rk3588,
+};
+
static const struct of_device_id rockchip_pcie_of_match[] = {
- { .compatible = "rockchip,rk3568-pcie", },
+ {
+ .compatible = "rockchip,rk3568-pcie",
+ .data = &rockchip_pcie_rc_of_data_rk3568,
+ },
+ {
+ .compatible = "rockchip,rk3568-pcie-ep",
+ .data = &rockchip_pcie_ep_of_data_rk3568,
+ },
+ {
+ .compatible = "rockchip,rk3588-pcie-ep",
+ .data = &rockchip_pcie_ep_of_data_rk3588,
+ },
{},
};
diff --git a/drivers/pci/controller/dwc/pcie-fu740.c b/drivers/pci/controller/dwc/pcie-fu740.c
index 0c90583c078b..66367252032b 100644
--- a/drivers/pci/controller/dwc/pcie-fu740.c
+++ b/drivers/pci/controller/dwc/pcie-fu740.c
@@ -279,7 +279,7 @@ static int fu740_pcie_host_init(struct dw_pcie_rp *pp)
}
static const struct dw_pcie_host_ops fu740_pcie_host_ops = {
- .host_init = fu740_pcie_host_init,
+ .init = fu740_pcie_host_init,
};
static const struct dw_pcie_ops dw_pcie_ops = {
@@ -299,6 +299,7 @@ static int fu740_pcie_probe(struct platform_device *pdev)
pci->dev = dev;
pci->ops = &dw_pcie_ops;
pci->pp.ops = &fu740_pcie_host_ops;
+ pci->pp.num_vectors = MAX_MSI_IRQS;
/* SiFive specific region: mgmt */
afp->mgmt_base = devm_platform_ioremap_resource_byname(pdev, "mgmt");
diff --git a/drivers/pci/controller/dwc/pcie-hisi.c b/drivers/pci/controller/dwc/pcie-hisi.c
index 8904b5b85ee5..3c17897e56fc 100644
--- a/drivers/pci/controller/dwc/pcie-hisi.c
+++ b/drivers/pci/controller/dwc/pcie-hisi.c
@@ -15,6 +15,7 @@
#include <linux/pci-acpi.h>
#include <linux/pci-ecam.h>
#include "../../pci.h"
+#include "../pci-host-common.h"
#if defined(CONFIG_PCI_HISI) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c
index fd484cc7c481..a52071589377 100644
--- a/drivers/pci/controller/dwc/pcie-histb.c
+++ b/drivers/pci/controller/dwc/pcie-histb.c
@@ -151,7 +151,7 @@ static struct pci_ops histb_pci_ops = {
.write = histb_pcie_wr_own_conf,
};
-static int histb_pcie_link_up(struct dw_pcie *pci)
+static bool histb_pcie_link_up(struct dw_pcie *pci)
{
struct histb_pcie *hipcie = to_histb_pcie(pci);
u32 regval;
@@ -160,11 +160,8 @@ static int histb_pcie_link_up(struct dw_pcie *pci)
regval = histb_pcie_readl(hipcie, PCIE_SYS_STAT0);
status = histb_pcie_readl(hipcie, PCIE_SYS_STAT4);
status &= PCIE_LTSSM_STATE_MASK;
- if ((regval & PCIE_XMLH_LINK_UP) && (regval & PCIE_RDLH_LINK_UP) &&
- (status == PCIE_LTSSM_STATE_ACTIVE))
- return 1;
-
- return 0;
+ return ((regval & PCIE_XMLH_LINK_UP) && (regval & PCIE_RDLH_LINK_UP) &&
+ (status == PCIE_LTSSM_STATE_ACTIVE));
}
static int histb_pcie_start_link(struct dw_pcie *pci)
@@ -198,7 +195,7 @@ static int histb_pcie_host_init(struct dw_pcie_rp *pp)
}
static const struct dw_pcie_host_ops histb_pcie_host_ops = {
- .host_init = histb_pcie_host_init,
+ .init = histb_pcie_host_init,
};
static void histb_pcie_host_disable(struct histb_pcie *hipcie)
@@ -409,16 +406,21 @@ static int histb_pcie_probe(struct platform_device *pdev)
ret = histb_pcie_host_enable(pp);
if (ret) {
dev_err(dev, "failed to enable host\n");
- return ret;
+ goto err_exit_phy;
}
ret = dw_pcie_host_init(pp);
if (ret) {
dev_err(dev, "failed to initialize host\n");
- return ret;
+ goto err_exit_phy;
}
return 0;
+
+err_exit_phy:
+ phy_exit(hipcie->phy);
+
+ return ret;
}
static void histb_pcie_remove(struct platform_device *pdev)
@@ -427,8 +429,7 @@ static void histb_pcie_remove(struct platform_device *pdev)
histb_pcie_host_disable(hipcie);
- if (hipcie->phy)
- phy_exit(hipcie->phy);
+ phy_exit(hipcie->phy);
}
static const struct of_device_id histb_pcie_of_match[] = {
@@ -439,7 +440,7 @@ MODULE_DEVICE_TABLE(of, histb_pcie_of_match);
static struct platform_driver histb_pcie_platform_driver = {
.probe = histb_pcie_probe,
- .remove_new = histb_pcie_remove,
+ .remove = histb_pcie_remove,
.driver = {
.name = "histb-pcie",
.of_match_table = histb_pcie_of_match,
diff --git a/drivers/pci/controller/dwc/pcie-intel-gw.c b/drivers/pci/controller/dwc/pcie-intel-gw.c
index 9c7caed9e706..c21906eced61 100644
--- a/drivers/pci/controller/dwc/pcie-intel-gw.c
+++ b/drivers/pci/controller/dwc/pcie-intel-gw.c
@@ -9,9 +9,11 @@
#include <linux/clk.h>
#include <linux/gpio/consumer.h>
#include <linux/iopoll.h>
+#include <linux/mod_devicetable.h>
#include <linux/pci_regs.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/reset.h>
#include "../../pci.h"
@@ -55,7 +57,6 @@
PCIE_APP_IRN_INTA | PCIE_APP_IRN_INTB | \
PCIE_APP_IRN_INTC | PCIE_APP_IRN_INTD)
-#define BUS_IATU_OFFSET SZ_256M
#define RESET_INTERVAL_MS 100
struct intel_pcie {
@@ -130,7 +131,7 @@ static void intel_pcie_link_setup(struct intel_pcie *pcie)
static void intel_pcie_init_n_fts(struct dw_pcie *pci)
{
- switch (pci->link_gen) {
+ switch (pci->max_link_speed) {
case 3:
pci->n_fts[1] = PORT_AFR_N_FTS_GEN3;
break;
@@ -250,7 +251,7 @@ static int intel_pcie_wait_l2(struct intel_pcie *pcie)
int ret;
struct dw_pcie *pci = &pcie->pci;
- if (pci->link_gen < 3)
+ if (pci->max_link_speed < 3)
return 0;
/* Send PME_TURN_OFF message */
@@ -379,17 +380,11 @@ static int intel_pcie_rc_init(struct dw_pcie_rp *pp)
return intel_pcie_host_setup(pcie);
}
-static u64 intel_pcie_cpu_addr(struct dw_pcie *pcie, u64 cpu_addr)
-{
- return cpu_addr + BUS_IATU_OFFSET;
-}
-
static const struct dw_pcie_ops intel_pcie_ops = {
- .cpu_addr_fixup = intel_pcie_cpu_addr,
};
static const struct dw_pcie_host_ops intel_pcie_dw_ops = {
- .host_init = intel_pcie_rc_init,
+ .init = intel_pcie_rc_init,
};
static int intel_pcie_probe(struct platform_device *pdev)
@@ -407,6 +402,7 @@ static int intel_pcie_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pcie);
pci = &pcie->pci;
pci->dev = dev;
+ pci->use_parent_dt_ranges = true;
pp = &pci->pp;
ret = intel_pcie_get_resources(pdev);
@@ -441,7 +437,7 @@ static const struct of_device_id of_intel_pcie_match[] = {
static struct platform_driver intel_pcie_driver = {
.probe = intel_pcie_probe,
- .remove_new = intel_pcie_remove,
+ .remove = intel_pcie_remove,
.driver = {
.name = "intel-gw-pcie",
.of_match_table = of_intel_pcie_match,
diff --git a/drivers/pci/controller/dwc/pcie-keembay.c b/drivers/pci/controller/dwc/pcie-keembay.c
index f90f36bac018..60e74ac782af 100644
--- a/drivers/pci/controller/dwc/pcie-keembay.c
+++ b/drivers/pci/controller/dwc/pcie-keembay.c
@@ -101,7 +101,7 @@ static void keembay_pcie_ltssm_set(struct keembay_pcie *pcie, bool enable)
writel(val, pcie->apb_base + PCIE_REGS_PCIE_APP_CNTRL);
}
-static int keembay_pcie_link_up(struct dw_pcie *pci)
+static bool keembay_pcie_link_up(struct dw_pcie *pci)
{
struct keembay_pcie *pcie = dev_get_drvdata(pci->dev);
u32 val;
@@ -148,6 +148,13 @@ static const struct dw_pcie_ops keembay_pcie_ops = {
.stop_link = keembay_pcie_stop_link,
};
+static inline void keembay_pcie_disable_clock(void *data)
+{
+ struct clk *clk = data;
+
+ clk_disable_unprepare(clk);
+}
+
static inline struct clk *keembay_pcie_probe_clock(struct device *dev,
const char *id, u64 rate)
{
@@ -168,9 +175,7 @@ static inline struct clk *keembay_pcie_probe_clock(struct device *dev,
if (ret)
return ERR_PTR(ret);
- ret = devm_add_action_or_reset(dev,
- (void(*)(void *))clk_disable_unprepare,
- clk);
+ ret = devm_add_action_or_reset(dev, keembay_pcie_disable_clock, clk);
if (ret)
return ERR_PTR(ret);
@@ -284,19 +289,18 @@ static void keembay_pcie_ep_init(struct dw_pcie_ep *ep)
}
static int keembay_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
- enum pci_epc_irq_type type,
- u16 interrupt_num)
+ unsigned int type, u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
switch (type) {
- case PCI_EPC_IRQ_LEGACY:
- /* Legacy interrupts are not supported in Keem Bay */
- dev_err(pci->dev, "Legacy IRQ is not supported\n");
+ case PCI_IRQ_INTX:
+ /* INTx interrupts are not supported in Keem Bay */
+ dev_err(pci->dev, "INTx IRQ is not supported\n");
return -EINVAL;
- case PCI_EPC_IRQ_MSI:
+ case PCI_IRQ_MSI:
return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
- case PCI_EPC_IRQ_MSIX:
+ case PCI_IRQ_MSIX:
return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
default:
dev_err(pci->dev, "Unknown IRQ type %d\n", type);
@@ -305,11 +309,14 @@ static int keembay_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
}
static const struct pci_epc_features keembay_pcie_epc_features = {
- .linkup_notifier = false,
.msi_capable = true,
.msix_capable = true,
- .reserved_bar = BIT(BAR_1) | BIT(BAR_3) | BIT(BAR_5),
- .bar_fixed_64bit = BIT(BAR_0) | BIT(BAR_2) | BIT(BAR_4),
+ .bar[BAR_0] = { .only_64bit = true, },
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_2] = { .only_64bit = true, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
+ .bar[BAR_4] = { .only_64bit = true, },
+ .bar[BAR_5] = { .type = BAR_RESERVED, },
.align = SZ_16K,
};
@@ -320,7 +327,7 @@ keembay_pcie_get_features(struct dw_pcie_ep *ep)
}
static const struct dw_pcie_ep_ops keembay_pcie_ep_ops = {
- .ep_init = keembay_pcie_ep_init,
+ .init = keembay_pcie_ep_init,
.raise_irq = keembay_pcie_ep_raise_irq,
.get_features = keembay_pcie_get_features,
};
@@ -388,6 +395,7 @@ static int keembay_pcie_probe(struct platform_device *pdev)
struct keembay_pcie *pcie;
struct dw_pcie *pci;
enum dw_pcie_device_mode mode;
+ int ret;
data = device_get_match_data(dev);
if (!data)
@@ -422,11 +430,26 @@ static int keembay_pcie_probe(struct platform_device *pdev)
return -ENODEV;
pci->ep.ops = &keembay_pcie_ep_ops;
- return dw_pcie_ep_init(&pci->ep);
+ ret = dw_pcie_ep_init(&pci->ep);
+ if (ret)
+ return ret;
+
+ ret = dw_pcie_ep_init_registers(&pci->ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(&pci->ep);
+ return ret;
+ }
+
+ pci_epc_init_notify(pci->ep.epc);
+
+ break;
default:
dev_err(dev, "Invalid device type %d\n", pcie->mode);
return -ENODEV;
}
+
+ return 0;
}
static const struct keembay_pcie_of_data keembay_pcie_rc_of_data = {
diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c
index d09507f822a7..91559c8b1866 100644
--- a/drivers/pci/controller/dwc/pcie-kirin.c
+++ b/drivers/pci/controller/dwc/pcie-kirin.c
@@ -12,13 +12,10 @@
#include <linux/compiler.h>
#include <linux/delay.h>
#include <linux/err.h>
-#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/mfd/syscon.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/of_gpio.h>
+#include <linux/of.h>
#include <linux/of_pci.h>
#include <linux/phy/phy.h>
#include <linux/pci.h>
@@ -79,16 +76,16 @@ struct kirin_pcie {
void *phy_priv; /* only for PCIE_KIRIN_INTERNAL_PHY */
/* DWC PERST# */
- int gpio_id_dwc_perst;
+ struct gpio_desc *id_dwc_perst_gpio;
/* Per-slot PERST# */
int num_slots;
- int gpio_id_reset[MAX_PCI_SLOTS];
+ struct gpio_desc *id_reset_gpio[MAX_PCI_SLOTS];
const char *reset_names[MAX_PCI_SLOTS];
/* Per-slot clkreq */
int n_gpio_clkreq;
- int gpio_id_clkreq[MAX_PCI_SLOTS];
+ struct gpio_desc *id_clkreq_gpio[MAX_PCI_SLOTS];
const char *clkreq_names[MAX_PCI_SLOTS];
};
@@ -219,10 +216,9 @@ static int hi3660_pcie_phy_start(struct hi3660_pcie_phy *phy)
usleep_range(PIPE_CLK_WAIT_MIN, PIPE_CLK_WAIT_MAX);
reg_val = kirin_apb_phy_readl(phy, PCIE_APB_PHY_STATUS0);
- if (reg_val & PIPE_CLK_STABLE) {
- dev_err(dev, "PIPE clk is not stable\n");
- return -EINVAL;
- }
+ if (reg_val & PIPE_CLK_STABLE)
+ return dev_err_probe(dev, -ETIMEDOUT,
+ "PIPE clk is not stable\n");
return 0;
}
@@ -367,7 +363,6 @@ static int kirin_pcie_get_gpio_enable(struct kirin_pcie *pcie,
struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- char name[32];
int ret, i;
/* This is an optional property */
@@ -375,24 +370,27 @@ static int kirin_pcie_get_gpio_enable(struct kirin_pcie *pcie,
if (ret < 0)
return 0;
- if (ret > MAX_PCI_SLOTS) {
- dev_err(dev, "Too many GPIO clock requests!\n");
- return -EINVAL;
- }
+ if (ret > MAX_PCI_SLOTS)
+ return dev_err_probe(dev, -EINVAL,
+ "Too many GPIO clock requests!\n");
pcie->n_gpio_clkreq = ret;
for (i = 0; i < pcie->n_gpio_clkreq; i++) {
- pcie->gpio_id_clkreq[i] = of_get_named_gpio(dev->of_node,
- "hisilicon,clken-gpios", i);
- if (pcie->gpio_id_clkreq[i] < 0)
- return pcie->gpio_id_clkreq[i];
-
- sprintf(name, "pcie_clkreq_%d", i);
- pcie->clkreq_names[i] = devm_kstrdup_const(dev, name,
- GFP_KERNEL);
+ pcie->id_clkreq_gpio[i] = devm_gpiod_get_index(dev,
+ "hisilicon,clken", i,
+ GPIOD_OUT_LOW);
+ if (IS_ERR(pcie->id_clkreq_gpio[i]))
+ return dev_err_probe(dev, PTR_ERR(pcie->id_clkreq_gpio[i]),
+ "unable to get a valid clken gpio\n");
+
+ pcie->clkreq_names[i] = devm_kasprintf(dev, GFP_KERNEL,
+ "pcie_clkreq_%d", i);
if (!pcie->clkreq_names[i])
return -ENOMEM;
+
+ gpiod_set_consumer_name(pcie->id_clkreq_gpio[i],
+ pcie->clkreq_names[i]);
}
return 0;
@@ -403,57 +401,55 @@ static int kirin_pcie_parse_port(struct kirin_pcie *pcie,
struct device_node *node)
{
struct device *dev = &pdev->dev;
- struct device_node *parent, *child;
int ret, slot, i;
- char name[32];
- for_each_available_child_of_node(node, parent) {
- for_each_available_child_of_node(parent, child) {
+ for_each_available_child_of_node_scoped(node, parent) {
+ for_each_available_child_of_node_scoped(parent, child) {
i = pcie->num_slots;
- pcie->gpio_id_reset[i] = of_get_named_gpio(child,
- "reset-gpios", 0);
- if (pcie->gpio_id_reset[i] < 0)
- continue;
+ pcie->id_reset_gpio[i] = devm_fwnode_gpiod_get_index(dev,
+ of_fwnode_handle(child),
+ "reset", 0, GPIOD_OUT_LOW,
+ NULL);
+ if (IS_ERR(pcie->id_reset_gpio[i])) {
+ if (PTR_ERR(pcie->id_reset_gpio[i]) == -ENOENT)
+ continue;
+ return dev_err_probe(dev, PTR_ERR(pcie->id_reset_gpio[i]),
+ "unable to get a valid reset gpio\n");
+ }
+
+ if (pcie->num_slots + 1 >= MAX_PCI_SLOTS)
+ return dev_err_probe(dev, -EINVAL,
+ "Too many PCI slots!\n");
pcie->num_slots++;
- if (pcie->num_slots > MAX_PCI_SLOTS) {
- dev_err(dev, "Too many PCI slots!\n");
- ret = -EINVAL;
- goto put_node;
- }
ret = of_pci_get_devfn(child);
- if (ret < 0) {
- dev_err(dev, "failed to parse devfn: %d\n", ret);
- goto put_node;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "failed to parse devfn\n");
slot = PCI_SLOT(ret);
- sprintf(name, "pcie_perst_%d", slot);
- pcie->reset_names[i] = devm_kstrdup_const(dev, name,
- GFP_KERNEL);
- if (!pcie->reset_names[i]) {
- ret = -ENOMEM;
- goto put_node;
- }
+ pcie->reset_names[i] = devm_kasprintf(dev, GFP_KERNEL,
+ "pcie_perst_%d",
+ slot);
+ if (!pcie->reset_names[i])
+ return -ENOMEM;
+
+ gpiod_set_consumer_name(pcie->id_reset_gpio[i],
+ pcie->reset_names[i]);
}
}
return 0;
-
-put_node:
- of_node_put(child);
- of_node_put(parent);
- return ret;
}
static long kirin_pcie_get_resource(struct kirin_pcie *kirin_pcie,
struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *child, *node = dev->of_node;
+ struct device_node *node = dev->of_node;
void __iomem *apb_base;
int ret;
@@ -467,31 +463,24 @@ static long kirin_pcie_get_resource(struct kirin_pcie *kirin_pcie,
return PTR_ERR(kirin_pcie->apb);
/* pcie internal PERST# gpio */
- kirin_pcie->gpio_id_dwc_perst = of_get_named_gpio(dev->of_node,
- "reset-gpios", 0);
- if (kirin_pcie->gpio_id_dwc_perst == -EPROBE_DEFER) {
- return -EPROBE_DEFER;
- } else if (!gpio_is_valid(kirin_pcie->gpio_id_dwc_perst)) {
- dev_err(dev, "unable to get a valid gpio pin\n");
- return -ENODEV;
- }
+ kirin_pcie->id_dwc_perst_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(kirin_pcie->id_dwc_perst_gpio))
+ return dev_err_probe(dev, PTR_ERR(kirin_pcie->id_dwc_perst_gpio),
+ "unable to get a valid gpio pin\n");
+ gpiod_set_consumer_name(kirin_pcie->id_dwc_perst_gpio, "pcie_perst_bridge");
ret = kirin_pcie_get_gpio_enable(kirin_pcie, pdev);
if (ret)
return ret;
/* Parse OF children */
- for_each_available_child_of_node(node, child) {
+ for_each_available_child_of_node_scoped(node, child) {
ret = kirin_pcie_parse_port(kirin_pcie, pdev, child);
if (ret)
- goto put_node;
+ return ret;
}
return 0;
-
-put_node:
- of_node_put(child);
- return ret;
}
static void kirin_pcie_sideband_dbi_w_mode(struct kirin_pcie *kirin_pcie,
@@ -557,7 +546,7 @@ static int kirin_pcie_add_bus(struct pci_bus *bus)
/* Send PERST# to each slot */
for (i = 0; i < kirin_pcie->num_slots; i++) {
- ret = gpio_direction_output(kirin_pcie->gpio_id_reset[i], 1);
+ ret = gpiod_direction_output_raw(kirin_pcie->id_reset_gpio[i], 1);
if (ret) {
dev_err(pci->dev, "PERST# %s error: %d\n",
kirin_pcie->reset_names[i], ret);
@@ -597,16 +586,13 @@ static void kirin_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base,
kirin_pcie_sideband_dbi_w_mode(kirin_pcie, false);
}
-static int kirin_pcie_link_up(struct dw_pcie *pci)
+static bool kirin_pcie_link_up(struct dw_pcie *pci)
{
struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
u32 val;
regmap_read(kirin_pcie->apb, PCIE_APB_PHY_STATUS0, &val);
- if ((val & PCIE_LINKUP_ENABLE) == PCIE_LINKUP_ENABLE)
- return 1;
-
- return 0;
+ return (val & PCIE_LINKUP_ENABLE) == PCIE_LINKUP_ENABLE;
}
static int kirin_pcie_start_link(struct dw_pcie *pci)
@@ -627,44 +613,6 @@ static int kirin_pcie_host_init(struct dw_pcie_rp *pp)
return 0;
}
-static int kirin_pcie_gpio_request(struct kirin_pcie *kirin_pcie,
- struct device *dev)
-{
- int ret, i;
-
- for (i = 0; i < kirin_pcie->num_slots; i++) {
- if (!gpio_is_valid(kirin_pcie->gpio_id_reset[i])) {
- dev_err(dev, "unable to get a valid %s gpio\n",
- kirin_pcie->reset_names[i]);
- return -ENODEV;
- }
-
- ret = devm_gpio_request(dev, kirin_pcie->gpio_id_reset[i],
- kirin_pcie->reset_names[i]);
- if (ret)
- return ret;
- }
-
- for (i = 0; i < kirin_pcie->n_gpio_clkreq; i++) {
- if (!gpio_is_valid(kirin_pcie->gpio_id_clkreq[i])) {
- dev_err(dev, "unable to get a valid %s gpio\n",
- kirin_pcie->clkreq_names[i]);
- return -ENODEV;
- }
-
- ret = devm_gpio_request(dev, kirin_pcie->gpio_id_clkreq[i],
- kirin_pcie->clkreq_names[i]);
- if (ret)
- return ret;
-
- ret = gpio_direction_output(kirin_pcie->gpio_id_clkreq[i], 0);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
static const struct dw_pcie_ops kirin_dw_pcie_ops = {
.read_dbi = kirin_pcie_read_dbi,
.write_dbi = kirin_pcie_write_dbi,
@@ -673,7 +621,7 @@ static const struct dw_pcie_ops kirin_dw_pcie_ops = {
};
static const struct dw_pcie_host_ops kirin_pcie_host_ops = {
- .host_init = kirin_pcie_host_init,
+ .init = kirin_pcie_host_init,
};
static int kirin_pcie_power_off(struct kirin_pcie *kirin_pcie)
@@ -684,7 +632,7 @@ static int kirin_pcie_power_off(struct kirin_pcie *kirin_pcie)
return hi3660_pcie_phy_power_off(kirin_pcie);
for (i = 0; i < kirin_pcie->n_gpio_clkreq; i++)
- gpio_direction_output(kirin_pcie->gpio_id_clkreq[i], 1);
+ gpiod_direction_output_raw(kirin_pcie->id_clkreq_gpio[i], 1);
phy_power_off(kirin_pcie->phy);
phy_exit(kirin_pcie->phy);
@@ -711,10 +659,6 @@ static int kirin_pcie_power_on(struct platform_device *pdev,
if (IS_ERR(kirin_pcie->phy))
return PTR_ERR(kirin_pcie->phy);
- ret = kirin_pcie_gpio_request(kirin_pcie, dev);
- if (ret)
- return ret;
-
ret = phy_init(kirin_pcie->phy);
if (ret)
goto err;
@@ -727,11 +671,9 @@ static int kirin_pcie_power_on(struct platform_device *pdev,
/* perst assert Endpoint */
usleep_range(REF_2_PERST_MIN, REF_2_PERST_MAX);
- if (!gpio_request(kirin_pcie->gpio_id_dwc_perst, "pcie_perst_bridge")) {
- ret = gpio_direction_output(kirin_pcie->gpio_id_dwc_perst, 1);
- if (ret)
- goto err;
- }
+ ret = gpiod_direction_output_raw(kirin_pcie->id_dwc_perst_gpio, 1);
+ if (ret)
+ goto err;
usleep_range(PERST_2_ACCESS_MIN, PERST_2_ACCESS_MAX);
@@ -742,15 +684,13 @@ err:
return ret;
}
-static int __exit kirin_pcie_remove(struct platform_device *pdev)
+static void kirin_pcie_remove(struct platform_device *pdev)
{
struct kirin_pcie *kirin_pcie = platform_get_drvdata(pdev);
dw_pcie_host_deinit(&kirin_pcie->pci->pp);
kirin_pcie_power_off(kirin_pcie);
-
- return 0;
}
struct kirin_pcie_data {
@@ -779,16 +719,9 @@ static int kirin_pcie_probe(struct platform_device *pdev)
struct dw_pcie *pci;
int ret;
- if (!dev->of_node) {
- dev_err(dev, "NULL node\n");
- return -EINVAL;
- }
-
data = of_device_get_match_data(dev);
- if (!data) {
- dev_err(dev, "OF data missing\n");
- return -EINVAL;
- }
+ if (!data)
+ return dev_err_probe(dev, -EINVAL, "OF data missing\n");
kirin_pcie = devm_kzalloc(dev, sizeof(struct kirin_pcie), GFP_KERNEL);
if (!kirin_pcie)
@@ -819,7 +752,7 @@ static int kirin_pcie_probe(struct platform_device *pdev)
static struct platform_driver kirin_pcie_driver = {
.probe = kirin_pcie_probe,
- .remove = __exit_p(kirin_pcie_remove),
+ .remove = kirin_pcie_remove,
.driver = {
.name = "kirin-pcie",
.of_match_table = kirin_pcie_match,
diff --git a/drivers/pci/controller/dwc/pcie-nxp-s32g.c b/drivers/pci/controller/dwc/pcie-nxp-s32g.c
new file mode 100644
index 000000000000..47745749f75c
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-nxp-s32g.c
@@ -0,0 +1,406 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for NXP S32G SoCs
+ *
+ * Copyright 2019-2025 NXP
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/sizes.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+
+/* PCIe controller Sub-System */
+
+/* PCIe controller 0 General Control 1 */
+#define PCIE_S32G_PE0_GEN_CTRL_1 0x50
+#define DEVICE_TYPE_MASK GENMASK(3, 0)
+#define SRIS_MODE BIT(8)
+
+/* PCIe controller 0 General Control 3 */
+#define PCIE_S32G_PE0_GEN_CTRL_3 0x58
+#define LTSSM_EN BIT(0)
+
+/* PCIe Controller 0 Interrupt Status */
+#define PCIE_S32G_PE0_INT_STS 0xE8
+#define HP_INT_STS BIT(6)
+
+/* Boundary between peripheral space and physical memory space */
+#define S32G_MEMORY_BOUNDARY_ADDR 0x80000000
+
+struct s32g_pcie_port {
+ struct list_head list;
+ struct phy *phy;
+};
+
+struct s32g_pcie {
+ struct dw_pcie pci;
+ void __iomem *ctrl_base;
+ struct list_head ports;
+};
+
+#define to_s32g_from_dw_pcie(x) \
+ container_of(x, struct s32g_pcie, pci)
+
+static void s32g_pcie_writel_ctrl(struct s32g_pcie *s32g_pp, u32 reg, u32 val)
+{
+ writel(val, s32g_pp->ctrl_base + reg);
+}
+
+static u32 s32g_pcie_readl_ctrl(struct s32g_pcie *s32g_pp, u32 reg)
+{
+ return readl(s32g_pp->ctrl_base + reg);
+}
+
+static void s32g_pcie_enable_ltssm(struct s32g_pcie *s32g_pp)
+{
+ u32 reg;
+
+ reg = s32g_pcie_readl_ctrl(s32g_pp, PCIE_S32G_PE0_GEN_CTRL_3);
+ reg |= LTSSM_EN;
+ s32g_pcie_writel_ctrl(s32g_pp, PCIE_S32G_PE0_GEN_CTRL_3, reg);
+}
+
+static void s32g_pcie_disable_ltssm(struct s32g_pcie *s32g_pp)
+{
+ u32 reg;
+
+ reg = s32g_pcie_readl_ctrl(s32g_pp, PCIE_S32G_PE0_GEN_CTRL_3);
+ reg &= ~LTSSM_EN;
+ s32g_pcie_writel_ctrl(s32g_pp, PCIE_S32G_PE0_GEN_CTRL_3, reg);
+}
+
+static int s32g_pcie_start_link(struct dw_pcie *pci)
+{
+ struct s32g_pcie *s32g_pp = to_s32g_from_dw_pcie(pci);
+
+ s32g_pcie_enable_ltssm(s32g_pp);
+
+ return 0;
+}
+
+static void s32g_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct s32g_pcie *s32g_pp = to_s32g_from_dw_pcie(pci);
+
+ s32g_pcie_disable_ltssm(s32g_pp);
+}
+
+static struct dw_pcie_ops s32g_pcie_ops = {
+ .start_link = s32g_pcie_start_link,
+ .stop_link = s32g_pcie_stop_link,
+};
+
+/* Configure the AMBA AXI Coherency Extensions (ACE) interface */
+static void s32g_pcie_reset_mstr_ace(struct dw_pcie *pci)
+{
+ u32 ddr_base_low = lower_32_bits(S32G_MEMORY_BOUNDARY_ADDR);
+ u32 ddr_base_high = upper_32_bits(S32G_MEMORY_BOUNDARY_ADDR);
+
+ dw_pcie_dbi_ro_wr_en(pci);
+ dw_pcie_writel_dbi(pci, COHERENCY_CONTROL_3_OFF, 0x0);
+
+ /*
+ * Ncore is a cache-coherent interconnect module that enables the
+ * integration of heterogeneous coherent and non-coherent agents in
+ * the chip. Ncore transactions to peripheral should be non-coherent
+ * or it might drop them.
+ *
+ * One example where this is needed are PCIe MSIs, which use NoSnoop=0
+ * and might end up routed to Ncore. PCIe coherent traffic (e.g. MSIs)
+ * that targets peripheral space will be dropped by Ncore because
+ * peripherals on S32G are not coherent as slaves. We add a hard
+ * boundary in the PCIe controller coherency control registers to
+ * separate physical memory space from peripheral space.
+ *
+ * Define the start of DDR as seen by Linux as this boundary between
+ * "memory" and "peripherals", with peripherals being below.
+ */
+ dw_pcie_writel_dbi(pci, COHERENCY_CONTROL_1_OFF,
+ (ddr_base_low & CFG_MEMTYPE_BOUNDARY_LOW_ADDR_MASK));
+ dw_pcie_writel_dbi(pci, COHERENCY_CONTROL_2_OFF, ddr_base_high);
+ dw_pcie_dbi_ro_wr_dis(pci);
+}
+
+static int s32g_init_pcie_controller(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct s32g_pcie *s32g_pp = to_s32g_from_dw_pcie(pci);
+ u32 val;
+
+ /* Set RP mode */
+ val = s32g_pcie_readl_ctrl(s32g_pp, PCIE_S32G_PE0_GEN_CTRL_1);
+ val &= ~DEVICE_TYPE_MASK;
+ val |= FIELD_PREP(DEVICE_TYPE_MASK, PCI_EXP_TYPE_ROOT_PORT);
+
+ /* Use default CRNS */
+ val &= ~SRIS_MODE;
+
+ s32g_pcie_writel_ctrl(s32g_pp, PCIE_S32G_PE0_GEN_CTRL_1, val);
+
+ /*
+ * Make sure we use the coherency defaults (just in case the settings
+ * have been changed from their reset values)
+ */
+ s32g_pcie_reset_mstr_ace(pci);
+
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ val = dw_pcie_readl_dbi(pci, PCIE_PORT_FORCE);
+ val |= PORT_FORCE_DO_DESKEW_FOR_SRIS;
+ dw_pcie_writel_dbi(pci, PCIE_PORT_FORCE, val);
+
+ val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
+ val |= GEN3_RELATED_OFF_EQ_PHASE_2_3;
+ dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
+
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ return 0;
+}
+
+static const struct dw_pcie_host_ops s32g_pcie_host_ops = {
+ .init = s32g_init_pcie_controller,
+};
+
+static int s32g_init_pcie_phy(struct s32g_pcie *s32g_pp)
+{
+ struct dw_pcie *pci = &s32g_pp->pci;
+ struct device *dev = pci->dev;
+ struct s32g_pcie_port *port, *tmp;
+ int ret;
+
+ list_for_each_entry(port, &s32g_pp->ports, list) {
+ ret = phy_init(port->phy);
+ if (ret) {
+ dev_err(dev, "Failed to init serdes PHY\n");
+ goto err_phy_revert;
+ }
+
+ ret = phy_set_mode_ext(port->phy, PHY_MODE_PCIE, 0);
+ if (ret) {
+ dev_err(dev, "Failed to set mode on serdes PHY\n");
+ goto err_phy_exit;
+ }
+
+ ret = phy_power_on(port->phy);
+ if (ret) {
+ dev_err(dev, "Failed to power on serdes PHY\n");
+ goto err_phy_exit;
+ }
+ }
+
+ return 0;
+
+err_phy_exit:
+ phy_exit(port->phy);
+
+err_phy_revert:
+ list_for_each_entry_continue_reverse(port, &s32g_pp->ports, list) {
+ phy_power_off(port->phy);
+ phy_exit(port->phy);
+ }
+
+ list_for_each_entry_safe(port, tmp, &s32g_pp->ports, list)
+ list_del(&port->list);
+
+ return ret;
+}
+
+static void s32g_deinit_pcie_phy(struct s32g_pcie *s32g_pp)
+{
+ struct s32g_pcie_port *port, *tmp;
+
+ list_for_each_entry_safe(port, tmp, &s32g_pp->ports, list) {
+ phy_power_off(port->phy);
+ phy_exit(port->phy);
+ list_del(&port->list);
+ }
+}
+
+static int s32g_pcie_init(struct device *dev, struct s32g_pcie *s32g_pp)
+{
+ s32g_pcie_disable_ltssm(s32g_pp);
+
+ return s32g_init_pcie_phy(s32g_pp);
+}
+
+static void s32g_pcie_deinit(struct s32g_pcie *s32g_pp)
+{
+ s32g_pcie_disable_ltssm(s32g_pp);
+
+ s32g_deinit_pcie_phy(s32g_pp);
+}
+
+static int s32g_pcie_parse_port(struct s32g_pcie *s32g_pp, struct device_node *node)
+{
+ struct device *dev = s32g_pp->pci.dev;
+ struct s32g_pcie_port *port;
+ int num_lanes;
+
+ port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ port->phy = devm_of_phy_get(dev, node, NULL);
+ if (IS_ERR(port->phy))
+ return dev_err_probe(dev, PTR_ERR(port->phy),
+ "Failed to get serdes PHY\n");
+
+ INIT_LIST_HEAD(&port->list);
+ list_add_tail(&port->list, &s32g_pp->ports);
+
+ /*
+ * The DWC core initialization code cannot yet parse the num-lanes
+ * attribute in the Root Port node. The S32G only supports one Root
+ * Port for now so its driver can parse the node and set the num_lanes
+ * field of struct dwc_pcie before calling dw_pcie_host_init().
+ */
+ if (!of_property_read_u32(node, "num-lanes", &num_lanes))
+ s32g_pp->pci.num_lanes = num_lanes;
+
+ return 0;
+}
+
+static int s32g_pcie_parse_ports(struct device *dev, struct s32g_pcie *s32g_pp)
+{
+ struct s32g_pcie_port *port, *tmp;
+ int ret = -ENOENT;
+
+ for_each_available_child_of_node_scoped(dev->of_node, of_port) {
+ if (!of_node_is_type(of_port, "pci"))
+ continue;
+
+ ret = s32g_pcie_parse_port(s32g_pp, of_port);
+ if (ret)
+ goto err_port;
+ }
+
+err_port:
+ list_for_each_entry_safe(port, tmp, &s32g_pp->ports, list)
+ list_del(&port->list);
+
+ return ret;
+}
+
+static int s32g_pcie_get_resources(struct platform_device *pdev,
+ struct s32g_pcie *s32g_pp)
+{
+ struct dw_pcie *pci = &s32g_pp->pci;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ pci->dev = dev;
+ pci->ops = &s32g_pcie_ops;
+
+ s32g_pp->ctrl_base = devm_platform_ioremap_resource_byname(pdev, "ctrl");
+ if (IS_ERR(s32g_pp->ctrl_base))
+ return PTR_ERR(s32g_pp->ctrl_base);
+
+ INIT_LIST_HEAD(&s32g_pp->ports);
+
+ ret = s32g_pcie_parse_ports(dev, s32g_pp);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to parse Root Port: %d\n", ret);
+
+ platform_set_drvdata(pdev, s32g_pp);
+
+ return 0;
+}
+
+static int s32g_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct s32g_pcie *s32g_pp;
+ struct dw_pcie_rp *pp;
+ int ret;
+
+ s32g_pp = devm_kzalloc(dev, sizeof(*s32g_pp), GFP_KERNEL);
+ if (!s32g_pp)
+ return -ENOMEM;
+
+ ret = s32g_pcie_get_resources(pdev, s32g_pp);
+ if (ret)
+ return ret;
+
+ pm_runtime_no_callbacks(dev);
+ devm_pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+ goto err_pm_runtime_put;
+
+ ret = s32g_pcie_init(dev, s32g_pp);
+ if (ret)
+ goto err_pm_runtime_put;
+
+ pp = &s32g_pp->pci.pp;
+ pp->ops = &s32g_pcie_host_ops;
+ pp->use_atu_msg = true;
+
+ ret = dw_pcie_host_init(pp);
+ if (ret)
+ goto err_pcie_deinit;
+
+ return 0;
+
+err_pcie_deinit:
+ s32g_pcie_deinit(s32g_pp);
+err_pm_runtime_put:
+ pm_runtime_put(dev);
+
+ return ret;
+}
+
+static int s32g_pcie_suspend_noirq(struct device *dev)
+{
+ struct s32g_pcie *s32g_pp = dev_get_drvdata(dev);
+ struct dw_pcie *pci = &s32g_pp->pci;
+
+ return dw_pcie_suspend_noirq(pci);
+}
+
+static int s32g_pcie_resume_noirq(struct device *dev)
+{
+ struct s32g_pcie *s32g_pp = dev_get_drvdata(dev);
+ struct dw_pcie *pci = &s32g_pp->pci;
+
+ return dw_pcie_resume_noirq(pci);
+}
+
+static const struct dev_pm_ops s32g_pcie_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(s32g_pcie_suspend_noirq,
+ s32g_pcie_resume_noirq)
+};
+
+static const struct of_device_id s32g_pcie_of_match[] = {
+ { .compatible = "nxp,s32g2-pcie" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, s32g_pcie_of_match);
+
+static struct platform_driver s32g_pcie_driver = {
+ .driver = {
+ .name = "s32g-pcie",
+ .of_match_table = s32g_pcie_of_match,
+ .suppress_bind_attrs = true,
+ .pm = pm_sleep_ptr(&s32g_pcie_pm_ops),
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+ .probe = s32g_pcie_probe,
+};
+
+builtin_platform_driver(s32g_pcie_driver);
+
+MODULE_AUTHOR("Ionut Vicovan <Ionut.Vicovan@nxp.com>");
+MODULE_DESCRIPTION("NXP S32G PCIe Host controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pci/controller/dwc/pcie-qcom-common.c b/drivers/pci/controller/dwc/pcie-qcom-common.c
new file mode 100644
index 000000000000..01c5387e53bf
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-qcom-common.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/pci.h>
+
+#include "pcie-designware.h"
+#include "pcie-qcom-common.h"
+
+void qcom_pcie_common_set_equalization(struct dw_pcie *pci)
+{
+ struct device *dev = pci->dev;
+ u32 reg;
+ u16 speed;
+
+ /*
+ * GEN3_RELATED_OFF register is repurposed to apply equalization
+ * settings at various data transmission rates through registers namely
+ * GEN3_EQ_*. The RATE_SHADOW_SEL bit field of GEN3_RELATED_OFF
+ * determines the data rate for which these equalization settings are
+ * applied.
+ */
+
+ for (speed = PCIE_SPEED_8_0GT; speed <= pcie_link_speed[pci->max_link_speed]; speed++) {
+ if (speed > PCIE_SPEED_32_0GT) {
+ dev_warn(dev, "Skipped equalization settings for unsupported data rate\n");
+ break;
+ }
+
+ reg = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
+ reg &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
+ reg &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
+ reg |= FIELD_PREP(GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK,
+ speed - PCIE_SPEED_8_0GT);
+ dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, reg);
+
+ reg = dw_pcie_readl_dbi(pci, GEN3_EQ_FB_MODE_DIR_CHANGE_OFF);
+ reg &= ~(GEN3_EQ_FMDC_T_MIN_PHASE23 |
+ GEN3_EQ_FMDC_N_EVALS |
+ GEN3_EQ_FMDC_MAX_PRE_CURSOR_DELTA |
+ GEN3_EQ_FMDC_MAX_POST_CURSOR_DELTA);
+ reg |= FIELD_PREP(GEN3_EQ_FMDC_T_MIN_PHASE23, 0x1) |
+ FIELD_PREP(GEN3_EQ_FMDC_N_EVALS, 0xd) |
+ FIELD_PREP(GEN3_EQ_FMDC_MAX_PRE_CURSOR_DELTA, 0x5) |
+ FIELD_PREP(GEN3_EQ_FMDC_MAX_POST_CURSOR_DELTA, 0x5);
+ dw_pcie_writel_dbi(pci, GEN3_EQ_FB_MODE_DIR_CHANGE_OFF, reg);
+
+ reg = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
+ reg &= ~(GEN3_EQ_CONTROL_OFF_FB_MODE |
+ GEN3_EQ_CONTROL_OFF_PHASE23_EXIT_MODE |
+ GEN3_EQ_CONTROL_OFF_FOM_INC_INITIAL_EVAL |
+ GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC);
+ dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, reg);
+ }
+}
+EXPORT_SYMBOL_GPL(qcom_pcie_common_set_equalization);
+
+void qcom_pcie_common_set_16gt_lane_margining(struct dw_pcie *pci)
+{
+ u32 reg;
+
+ reg = dw_pcie_readl_dbi(pci, GEN4_LANE_MARGINING_1_OFF);
+ reg &= ~(MARGINING_MAX_VOLTAGE_OFFSET |
+ MARGINING_NUM_VOLTAGE_STEPS |
+ MARGINING_MAX_TIMING_OFFSET |
+ MARGINING_NUM_TIMING_STEPS);
+ reg |= FIELD_PREP(MARGINING_MAX_VOLTAGE_OFFSET, 0x24) |
+ FIELD_PREP(MARGINING_NUM_VOLTAGE_STEPS, 0x78) |
+ FIELD_PREP(MARGINING_MAX_TIMING_OFFSET, 0x32) |
+ FIELD_PREP(MARGINING_NUM_TIMING_STEPS, 0x10);
+ dw_pcie_writel_dbi(pci, GEN4_LANE_MARGINING_1_OFF, reg);
+
+ reg = dw_pcie_readl_dbi(pci, GEN4_LANE_MARGINING_2_OFF);
+ reg |= MARGINING_IND_ERROR_SAMPLER |
+ MARGINING_SAMPLE_REPORTING_METHOD |
+ MARGINING_IND_LEFT_RIGHT_TIMING |
+ MARGINING_VOLTAGE_SUPPORTED;
+ reg &= ~(MARGINING_IND_UP_DOWN_VOLTAGE |
+ MARGINING_MAXLANES |
+ MARGINING_SAMPLE_RATE_TIMING |
+ MARGINING_SAMPLE_RATE_VOLTAGE);
+ reg |= FIELD_PREP(MARGINING_MAXLANES, pci->num_lanes) |
+ FIELD_PREP(MARGINING_SAMPLE_RATE_TIMING, 0x3f) |
+ FIELD_PREP(MARGINING_SAMPLE_RATE_VOLTAGE, 0x3f);
+ dw_pcie_writel_dbi(pci, GEN4_LANE_MARGINING_2_OFF, reg);
+}
+EXPORT_SYMBOL_GPL(qcom_pcie_common_set_16gt_lane_margining);
diff --git a/drivers/pci/controller/dwc/pcie-qcom-common.h b/drivers/pci/controller/dwc/pcie-qcom-common.h
new file mode 100644
index 000000000000..7f5ca2fd9a72
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-qcom-common.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _PCIE_QCOM_COMMON_H
+#define _PCIE_QCOM_COMMON_H
+
+struct dw_pcie;
+
+void qcom_pcie_common_set_equalization(struct dw_pcie *pci);
+void qcom_pcie_common_set_16gt_lane_margining(struct dw_pcie *pci);
+
+#endif
diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c
index 0fe7f06f2102..f1bc0ac81a92 100644
--- a/drivers/pci/controller/dwc/pcie-qcom-ep.c
+++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c
@@ -13,6 +13,7 @@
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
+#include <linux/interconnect.h>
#include <linux/mfd/syscon.h>
#include <linux/phy/pcie.h>
#include <linux/phy/phy.h>
@@ -22,7 +23,9 @@
#include <linux/reset.h>
#include <linux/module.h>
+#include "../../pci.h"
#include "pcie-designware.h"
+#include "pcie-qcom-common.h"
/* PARF registers */
#define PARF_SYS_CTRL 0x00
@@ -45,6 +48,7 @@
#define PARF_DBI_BASE_ADDR_HI 0x354
#define PARF_SLV_ADDR_SPACE_SIZE 0x358
#define PARF_SLV_ADDR_SPACE_SIZE_HI 0x35c
+#define PARF_NO_SNOOP_OVERRIDE 0x3d4
#define PARF_ATU_BASE_ADDR 0x634
#define PARF_ATU_BASE_ADDR_HI 0x638
#define PARF_SRIS_MODE 0x644
@@ -55,6 +59,8 @@
#define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2 0xc88
#define PARF_DEVICE_TYPE 0x1000
#define PARF_BDF_TO_SID_CFG 0x2c00
+#define PARF_INT_ALL_5_MASK 0x2dcc
+#define PARF_INT_ALL_3_MASK 0x2e18
/* PARF_INT_ALL_{STATUS/CLEAR/MASK} register fields */
#define PARF_INT_ALL_LINK_DOWN BIT(1)
@@ -74,6 +80,7 @@
#define PARF_INT_ALL_PLS_ERR BIT(15)
#define PARF_INT_ALL_PME_LEGACY BIT(16)
#define PARF_INT_ALL_PLS_PME BIT(17)
+#define PARF_INT_ALL_EDMA BIT(22)
/* PARF_BDF_TO_SID_CFG register fields */
#define PARF_BDF_TO_SID_BYPASS BIT(0)
@@ -83,6 +90,10 @@
#define PARF_DEBUG_INT_CFG_BUS_MASTER_EN BIT(2)
#define PARF_DEBUG_INT_RADM_PM_TURNOFF BIT(3)
+/* PARF_NO_SNOOP_OVERRIDE register fields */
+#define WR_NO_SNOOP_OVERRIDE_EN BIT(1)
+#define RD_NO_SNOOP_OVERRIDE_EN BIT(3)
+
/* PARF_DEVICE_TYPE register fields */
#define PARF_DEVICE_TYPE_EP 0x0
@@ -119,8 +130,15 @@
/* PARF_CFG_BITS register fields */
#define PARF_CFG_BITS_REQ_EXIT_L1SS_MSI_LTR_EN BIT(1)
+/* PARF_INT_ALL_5_MASK fields */
+#define PARF_INT_ALL_5_MHI_RAM_DATA_PARITY_ERR BIT(0)
+
+/* PARF_INT_ALL_3_MASK fields */
+#define PARF_INT_ALL_3_PTM_UPDATING BIT(4)
+
/* ELBI registers */
#define ELBI_SYS_STTS 0x08
+#define ELBI_CS2_ENABLE 0xa4
/* DBI registers */
#define DBI_CON_STATUS 0x44
@@ -133,6 +151,9 @@
#define CORE_RESET_TIME_US_MAX 1005
#define WAKE_DELAY_US 2000 /* 2 ms */
+#define QCOM_PCIE_LINK_SPEED_TO_BW(speed) \
+ Mbps_to_icc(PCIE_SPEED2MBS_ENC(pcie_link_speed[speed]))
+
#define to_pcie_ep(x) dev_get_drvdata((x)->dev)
enum qcom_pcie_ep_link_status {
@@ -143,10 +164,21 @@ enum qcom_pcie_ep_link_status {
};
/**
+ * struct qcom_pcie_ep_cfg - Per SoC config struct
+ * @hdma_support: HDMA support on this SoC
+ * @override_no_snoop: Override NO_SNOOP attribute in TLP to enable cache snooping
+ * @disable_mhi_ram_parity_check: Disable MHI RAM data parity error check
+ */
+struct qcom_pcie_ep_cfg {
+ bool hdma_support;
+ bool override_no_snoop;
+ bool disable_mhi_ram_parity_check;
+};
+
+/**
* struct qcom_pcie_ep - Qualcomm PCIe Endpoint Controller
* @pci: Designware PCIe controller struct
* @parf: Qualcomm PCIe specific PARF register base
- * @elbi: Designware PCIe specific ELBI register base
* @mmio: MMIO register base
* @perst_map: PERST regmap
* @mmio_res: MMIO region resource
@@ -155,10 +187,12 @@ enum qcom_pcie_ep_link_status {
* @wake: WAKE# GPIO
* @phy: PHY controller block
* @debugfs: PCIe Endpoint Debugfs directory
+ * @icc_mem: Handle to an interconnect path between PCIe and MEM
* @clks: PCIe clocks
* @num_clks: PCIe clocks count
* @perst_en: Flag for PERST enable
* @perst_sep_en: Flag for PERST separation enable
+ * @cfg: PCIe EP config struct
* @link_status: PCIe Link status
* @global_irq: Qualcomm PCIe specific Global IRQ
* @perst_irq: PERST# IRQ
@@ -167,7 +201,6 @@ struct qcom_pcie_ep {
struct dw_pcie pci;
void __iomem *parf;
- void __iomem *elbi;
void __iomem *mmio;
struct regmap *perst_map;
struct resource *mmio_res;
@@ -178,12 +211,15 @@ struct qcom_pcie_ep {
struct phy *phy;
struct dentry *debugfs;
+ struct icc_path *icc_mem;
+
struct clk_bulk_data *clks;
int num_clks;
u32 perst_en;
u32 perst_sep_en;
+ const struct qcom_pcie_ep_cfg *cfg;
enum qcom_pcie_ep_link_status link_status;
int global_irq;
int perst_irq;
@@ -227,12 +263,11 @@ static void qcom_pcie_ep_configure_tcsr(struct qcom_pcie_ep *pcie_ep)
}
}
-static int qcom_pcie_dw_link_up(struct dw_pcie *pci)
+static bool qcom_pcie_dw_link_up(struct dw_pcie *pci)
{
- struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
u32 reg;
- reg = readl_relaxed(pcie_ep->elbi + ELBI_SYS_STTS);
+ reg = readl_relaxed(pci->elbi_base + ELBI_SYS_STTS);
return reg & XMLH_LINK_UP;
}
@@ -253,8 +288,45 @@ static void qcom_pcie_dw_stop_link(struct dw_pcie *pci)
disable_irq(pcie_ep->perst_irq);
}
+static void qcom_pcie_dw_write_dbi2(struct dw_pcie *pci, void __iomem *base,
+ u32 reg, size_t size, u32 val)
+{
+ int ret;
+
+ writel(1, pci->elbi_base + ELBI_CS2_ENABLE);
+
+ ret = dw_pcie_write(pci->dbi_base2 + reg, size, val);
+ if (ret)
+ dev_err(pci->dev, "Failed to write DBI2 register (0x%x): %d\n", reg, ret);
+
+ writel(0, pci->elbi_base + ELBI_CS2_ENABLE);
+}
+
+static void qcom_pcie_ep_icc_update(struct qcom_pcie_ep *pcie_ep)
+{
+ struct dw_pcie *pci = &pcie_ep->pci;
+ u32 offset, status;
+ int speed, width;
+ int ret;
+
+ if (!pcie_ep->icc_mem)
+ return;
+
+ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ status = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA);
+
+ speed = FIELD_GET(PCI_EXP_LNKSTA_CLS, status);
+ width = FIELD_GET(PCI_EXP_LNKSTA_NLW, status);
+
+ ret = icc_set_bw(pcie_ep->icc_mem, 0, width * QCOM_PCIE_LINK_SPEED_TO_BW(speed));
+ if (ret)
+ dev_err(pci->dev, "failed to set interconnect bandwidth: %d\n",
+ ret);
+}
+
static int qcom_pcie_enable_resources(struct qcom_pcie_ep *pcie_ep)
{
+ struct dw_pcie *pci = &pcie_ep->pci;
int ret;
ret = clk_bulk_prepare_enable(pcie_ep->num_clks, pcie_ep->clks);
@@ -277,8 +349,24 @@ static int qcom_pcie_enable_resources(struct qcom_pcie_ep *pcie_ep)
if (ret)
goto err_phy_exit;
+ /*
+ * Some Qualcomm platforms require interconnect bandwidth constraints
+ * to be set before enabling interconnect clocks.
+ *
+ * Set an initial peak bandwidth corresponding to single-lane Gen 1
+ * for the pcie-mem path.
+ */
+ ret = icc_set_bw(pcie_ep->icc_mem, 0, QCOM_PCIE_LINK_SPEED_TO_BW(1));
+ if (ret) {
+ dev_err(pci->dev, "failed to set interconnect bandwidth: %d\n",
+ ret);
+ goto err_phy_off;
+ }
+
return 0;
+err_phy_off:
+ phy_power_off(pcie_ep->phy);
err_phy_exit:
phy_exit(pcie_ep->phy);
err_disable_clk:
@@ -289,6 +377,7 @@ err_disable_clk:
static void qcom_pcie_disable_resources(struct qcom_pcie_ep *pcie_ep)
{
+ icc_set_bw(pcie_ep->icc_mem, 0, 0);
phy_power_off(pcie_ep->phy);
phy_exit(pcie_ep->phy);
clk_bulk_disable_unprepare(pcie_ep->num_clks, pcie_ep->clks);
@@ -307,6 +396,10 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
return ret;
}
+ /* Perform cleanup that requires refclk */
+ pci_epc_deinit_notify(pci->ep.epc);
+ dw_pcie_ep_cleanup(&pci->ep);
+
/* Assert WAKE# to RC to indicate device is ready */
gpiod_set_value_cansleep(pcie_ep->wake, 1);
usleep_range(WAKE_DELAY_US, WAKE_DELAY_US + 500);
@@ -395,15 +488,30 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
writel_relaxed(0, pcie_ep->parf + PARF_INT_ALL_MASK);
val = PARF_INT_ALL_LINK_DOWN | PARF_INT_ALL_BME |
PARF_INT_ALL_PM_TURNOFF | PARF_INT_ALL_DSTATE_CHANGE |
- PARF_INT_ALL_LINK_UP;
+ PARF_INT_ALL_LINK_UP | PARF_INT_ALL_EDMA;
writel_relaxed(val, pcie_ep->parf + PARF_INT_ALL_MASK);
- ret = dw_pcie_ep_init_complete(&pcie_ep->pci.ep);
+ if (pcie_ep->cfg && pcie_ep->cfg->disable_mhi_ram_parity_check) {
+ val = readl_relaxed(pcie_ep->parf + PARF_INT_ALL_5_MASK);
+ val &= ~PARF_INT_ALL_5_MHI_RAM_DATA_PARITY_ERR;
+ writel_relaxed(val, pcie_ep->parf + PARF_INT_ALL_5_MASK);
+ }
+
+ val = readl_relaxed(pcie_ep->parf + PARF_INT_ALL_3_MASK);
+ val &= ~PARF_INT_ALL_3_PTM_UPDATING;
+ writel_relaxed(val, pcie_ep->parf + PARF_INT_ALL_3_MASK);
+
+ ret = dw_pcie_ep_init_registers(&pcie_ep->pci.ep);
if (ret) {
dev_err(dev, "Failed to complete initialization: %d\n", ret);
goto err_disable_resources;
}
+ qcom_pcie_common_set_equalization(pci);
+
+ if (pcie_link_speed[pci->max_link_speed] == PCIE_SPEED_16_0GT)
+ qcom_pcie_common_set_16gt_lane_margining(pci);
+
/*
* The physical address of the MMIO region which is exposed as the BAR
* should be written to MHI BASE registers.
@@ -415,15 +523,19 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
/* Gate Master AXI clock to MHI bus during L1SS */
val = readl_relaxed(pcie_ep->parf + PARF_MHI_CLOCK_RESET_CTRL);
val &= ~PARF_MSTR_AXI_CLK_EN;
- val = readl_relaxed(pcie_ep->parf + PARF_MHI_CLOCK_RESET_CTRL);
+ writel_relaxed(val, pcie_ep->parf + PARF_MHI_CLOCK_RESET_CTRL);
- dw_pcie_ep_init_notify(&pcie_ep->pci.ep);
+ pci_epc_init_notify(pcie_ep->pci.ep.epc);
/* Enable LTSSM */
val = readl_relaxed(pcie_ep->parf + PARF_LTSSM);
val |= BIT(8);
writel_relaxed(val, pcie_ep->parf + PARF_LTSSM);
+ if (pcie_ep->cfg && pcie_ep->cfg->override_no_snoop)
+ writel_relaxed(WR_NO_SNOOP_OVERRIDE_EN | RD_NO_SNOOP_OVERRIDE_EN,
+ pcie_ep->parf + PARF_NO_SNOOP_OVERRIDE);
+
return 0;
err_disable_resources:
@@ -435,12 +547,6 @@ err_disable_resources:
static void qcom_pcie_perst_assert(struct dw_pcie *pci)
{
struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
- struct device *dev = pci->dev;
-
- if (pcie_ep->link_status == QCOM_PCIE_EP_LINK_DISABLED) {
- dev_dbg(dev, "Link is already disabled\n");
- return;
- }
qcom_pcie_disable_resources(pcie_ep);
pcie_ep->link_status = QCOM_PCIE_EP_LINK_DISABLED;
@@ -451,6 +557,7 @@ static const struct dw_pcie_ops pci_ops = {
.link_up = qcom_pcie_dw_link_up,
.start_link = qcom_pcie_dw_start_link,
.stop_link = qcom_pcie_dw_stop_link,
+ .write_dbi2 = qcom_pcie_dw_write_dbi2,
};
static int qcom_pcie_ep_get_io_resources(struct platform_device *pdev,
@@ -472,11 +579,6 @@ static int qcom_pcie_ep_get_io_resources(struct platform_device *pdev,
return PTR_ERR(pci->dbi_base);
pci->dbi_base2 = pci->dbi_base;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
- pcie_ep->elbi = devm_pci_remap_cfg_resource(dev, res);
- if (IS_ERR(pcie_ep->elbi))
- return PTR_ERR(pcie_ep->elbi);
-
pcie_ep->mmio_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"mmio");
if (!pcie_ep->mmio_res) {
@@ -550,6 +652,10 @@ static int qcom_pcie_ep_get_resources(struct platform_device *pdev,
if (IS_ERR(pcie_ep->phy))
ret = PTR_ERR(pcie_ep->phy);
+ pcie_ep->icc_mem = devm_of_icc_get(dev, "pcie-mem");
+ if (IS_ERR(pcie_ep->icc_mem))
+ ret = PTR_ERR(pcie_ep->icc_mem);
+
return ret;
}
@@ -560,20 +666,19 @@ static irqreturn_t qcom_pcie_ep_global_irq_thread(int irq, void *data)
struct dw_pcie *pci = &pcie_ep->pci;
struct device *dev = pci->dev;
u32 status = readl_relaxed(pcie_ep->parf + PARF_INT_ALL_STATUS);
- u32 mask = readl_relaxed(pcie_ep->parf + PARF_INT_ALL_MASK);
u32 dstate, val;
writel_relaxed(status, pcie_ep->parf + PARF_INT_ALL_CLEAR);
- status &= mask;
if (FIELD_GET(PARF_INT_ALL_LINK_DOWN, status)) {
dev_dbg(dev, "Received Linkdown event\n");
pcie_ep->link_status = QCOM_PCIE_EP_LINK_DOWN;
- pci_epc_linkdown(pci->ep.epc);
+ dw_pcie_ep_linkdown(&pci->ep);
} else if (FIELD_GET(PARF_INT_ALL_BME, status)) {
- dev_dbg(dev, "Received BME event. Link is enabled!\n");
+ dev_dbg(dev, "Received Bus Master Enable event\n");
pcie_ep->link_status = QCOM_PCIE_EP_LINK_ENABLED;
- pci_epc_bme_notify(pci->ep.epc);
+ qcom_pcie_ep_icc_update(pcie_ep);
+ pci_epc_bus_master_enable_notify(pci->ep.epc);
} else if (FIELD_GET(PARF_INT_ALL_PM_TURNOFF, status)) {
dev_dbg(dev, "Received PM Turn-off event! Entering L23\n");
val = readl_relaxed(pcie_ep->parf + PARF_PM_CTRL);
@@ -593,7 +698,8 @@ static irqreturn_t qcom_pcie_ep_global_irq_thread(int irq, void *data)
dw_pcie_ep_linkup(&pci->ep);
pcie_ep->link_status = QCOM_PCIE_EP_LINK_UP;
} else {
- dev_dbg(dev, "Received unknown event: %d\n", status);
+ dev_WARN_ONCE(dev, 1, "Received unknown event. INT_STATUS: 0x%08x\n",
+ status);
}
return IRQ_HANDLED;
@@ -624,8 +730,15 @@ static irqreturn_t qcom_pcie_ep_perst_irq_thread(int irq, void *data)
static int qcom_pcie_ep_enable_irq_resources(struct platform_device *pdev,
struct qcom_pcie_ep *pcie_ep)
{
+ struct device *dev = pcie_ep->pci.dev;
+ char *name;
int ret;
+ name = devm_kasprintf(dev, GFP_KERNEL, "qcom_pcie_ep_global_irq%d",
+ pcie_ep->pci.ep.epc->domain_nr);
+ if (!name)
+ return -ENOMEM;
+
pcie_ep->global_irq = platform_get_irq_byname(pdev, "global");
if (pcie_ep->global_irq < 0)
return pcie_ep->global_irq;
@@ -633,18 +746,23 @@ static int qcom_pcie_ep_enable_irq_resources(struct platform_device *pdev,
ret = devm_request_threaded_irq(&pdev->dev, pcie_ep->global_irq, NULL,
qcom_pcie_ep_global_irq_thread,
IRQF_ONESHOT,
- "global_irq", pcie_ep);
+ name, pcie_ep);
if (ret) {
dev_err(&pdev->dev, "Failed to request Global IRQ\n");
return ret;
}
+ name = devm_kasprintf(dev, GFP_KERNEL, "qcom_pcie_ep_perst_irq%d",
+ pcie_ep->pci.ep.epc->domain_nr);
+ if (!name)
+ return -ENOMEM;
+
pcie_ep->perst_irq = gpiod_to_irq(pcie_ep->reset);
irq_set_status_flags(pcie_ep->perst_irq, IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(&pdev->dev, pcie_ep->perst_irq, NULL,
qcom_pcie_ep_perst_irq_thread,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
- "perst_irq", pcie_ep);
+ name, pcie_ep);
if (ret) {
dev_err(&pdev->dev, "Failed to request PERST IRQ\n");
disable_irq(pcie_ep->global_irq);
@@ -655,14 +773,14 @@ static int qcom_pcie_ep_enable_irq_resources(struct platform_device *pdev,
}
static int qcom_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
- enum pci_epc_irq_type type, u16 interrupt_num)
+ unsigned int type, u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
switch (type) {
- case PCI_EPC_IRQ_LEGACY:
- return dw_pcie_ep_raise_legacy_irq(ep, func_no);
- case PCI_EPC_IRQ_MSI:
+ case PCI_IRQ_INTX:
+ return dw_pcie_ep_raise_intx_irq(ep, func_no);
+ case PCI_IRQ_MSI:
return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
default:
dev_err(pci->dev, "Unknown IRQ type\n");
@@ -703,9 +821,12 @@ static void qcom_pcie_ep_init_debugfs(struct qcom_pcie_ep *pcie_ep)
static const struct pci_epc_features qcom_pcie_epc_features = {
.linkup_notifier = true,
- .core_init_notifier = true,
.msi_capable = true,
- .msix_capable = false,
+ .align = SZ_4K,
+ .bar[BAR_0] = { .only_64bit = true, },
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_2] = { .only_64bit = true, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
};
static const struct pci_epc_features *
@@ -724,7 +845,7 @@ static void qcom_pcie_ep_init(struct dw_pcie_ep *ep)
}
static const struct dw_pcie_ep_ops pci_ep_ops = {
- .ep_init = qcom_pcie_ep_init,
+ .init = qcom_pcie_ep_init,
.raise_irq = qcom_pcie_ep_raise_irq,
.get_features = qcom_pcie_epc_get_features,
};
@@ -743,27 +864,29 @@ static int qcom_pcie_ep_probe(struct platform_device *pdev)
pcie_ep->pci.dev = dev;
pcie_ep->pci.ops = &pci_ops;
pcie_ep->pci.ep.ops = &pci_ep_ops;
+
+ pcie_ep->cfg = of_device_get_match_data(dev);
+ if (pcie_ep->cfg && pcie_ep->cfg->hdma_support) {
+ pcie_ep->pci.edma.ll_wr_cnt = 8;
+ pcie_ep->pci.edma.ll_rd_cnt = 8;
+ pcie_ep->pci.edma.mf = EDMA_MF_HDMA_NATIVE;
+ }
+
platform_set_drvdata(pdev, pcie_ep);
ret = qcom_pcie_ep_get_resources(pdev, pcie_ep);
if (ret)
return ret;
- ret = qcom_pcie_enable_resources(pcie_ep);
- if (ret) {
- dev_err(dev, "Failed to enable resources: %d\n", ret);
- return ret;
- }
-
ret = dw_pcie_ep_init(&pcie_ep->pci.ep);
if (ret) {
dev_err(dev, "Failed to initialize endpoint: %d\n", ret);
- goto err_disable_resources;
+ return ret;
}
ret = qcom_pcie_ep_enable_irq_resources(pdev, pcie_ep);
if (ret)
- goto err_disable_resources;
+ goto err_ep_deinit;
name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
if (!name) {
@@ -780,8 +903,8 @@ err_disable_irqs:
disable_irq(pcie_ep->global_irq);
disable_irq(pcie_ep->perst_irq);
-err_disable_resources:
- qcom_pcie_disable_resources(pcie_ep);
+err_ep_deinit:
+ dw_pcie_ep_deinit(&pcie_ep->pci.ep);
return ret;
}
@@ -801,16 +924,24 @@ static void qcom_pcie_ep_remove(struct platform_device *pdev)
qcom_pcie_disable_resources(pcie_ep);
}
+static const struct qcom_pcie_ep_cfg cfg_1_34_0 = {
+ .hdma_support = true,
+ .override_no_snoop = true,
+ .disable_mhi_ram_parity_check = true,
+};
+
static const struct of_device_id qcom_pcie_ep_match[] = {
+ { .compatible = "qcom,sa8775p-pcie-ep", .data = &cfg_1_34_0},
{ .compatible = "qcom,sdx55-pcie-ep", },
{ .compatible = "qcom,sm8450-pcie-ep", },
+ { .compatible = "qcom,sar2130p-pcie-ep", },
{ }
};
MODULE_DEVICE_TABLE(of, qcom_pcie_ep_match);
static struct platform_driver qcom_pcie_ep_driver = {
.probe = qcom_pcie_ep_probe,
- .remove_new = qcom_pcie_ep_remove,
+ .remove = qcom_pcie_ep_remove,
.driver = {
.name = "qcom-pcie-ep",
.of_match_table = qcom_pcie_ep_match,
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 7a87a47eb7ed..7b92e7a1c0d9 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -18,10 +18,13 @@
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
+#include <linux/limits.h>
#include <linux/init.h>
-#include <linux/of_device.h>
-#include <linux/of_gpio.h>
+#include <linux/of.h>
+#include <linux/of_pci.h>
#include <linux/pci.h>
+#include <linux/pci-ecam.h>
+#include <linux/pm_opp.h>
#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
#include <linux/phy/pcie.h>
@@ -30,9 +33,12 @@
#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/types.h>
+#include <linux/units.h>
#include "../../pci.h"
+#include "../pci-host-common.h"
#include "pcie-designware.h"
+#include "pcie-qcom-common.h"
/* PARF registers */
#define PARF_SYS_CTRL 0x00
@@ -43,17 +49,27 @@
#define PARF_PHY_REFCLK 0x4c
#define PARF_CONFIG_BITS 0x50
#define PARF_DBI_BASE_ADDR 0x168
-#define PARF_SLV_ADDR_SPACE_SIZE_2_3_3 0x16c /* Register offset specific to IP ver 2.3.3 */
+#define PARF_SLV_ADDR_SPACE_SIZE 0x16c
#define PARF_MHI_CLOCK_RESET_CTRL 0x174
#define PARF_AXI_MSTR_WR_ADDR_HALT 0x178
#define PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1a8
#define PARF_Q2A_FLUSH 0x1ac
#define PARF_LTSSM 0x1b0
+#define PARF_INT_ALL_STATUS 0x224
+#define PARF_INT_ALL_CLEAR 0x228
+#define PARF_INT_ALL_MASK 0x22c
#define PARF_SID_OFFSET 0x234
#define PARF_BDF_TRANSLATE_CFG 0x24c
-#define PARF_SLV_ADDR_SPACE_SIZE 0x358
+#define PARF_DBI_BASE_ADDR_V2 0x350
+#define PARF_DBI_BASE_ADDR_V2_HI 0x354
+#define PARF_SLV_ADDR_SPACE_SIZE_V2 0x358
+#define PARF_SLV_ADDR_SPACE_SIZE_V2_HI 0x35c
+#define PARF_NO_SNOOP_OVERRIDE 0x3d4
+#define PARF_ATU_BASE_ADDR 0x634
+#define PARF_ATU_BASE_ADDR_HI 0x638
#define PARF_DEVICE_TYPE 0x1000
#define PARF_BDF_TO_SID_TABLE_N 0x2000
+#define PARF_BDF_TO_SID_CFG 0x2c00
/* ELBI registers */
#define ELBI_SYS_CTRL 0x04
@@ -105,7 +121,7 @@
#define PHY_RX0_EQ(x) FIELD_PREP(GENMASK(26, 24), x)
/* PARF_SLV_ADDR_SPACE_SIZE register value */
-#define SLV_ADDR_SPACE_SZ 0x10000000
+#define SLV_ADDR_SPACE_SZ 0x80000000
/* PARF_MHI_CLOCK_RESET_CTRL register fields */
#define AHB_CLK_EN BIT(0)
@@ -118,9 +134,20 @@
/* PARF_LTSSM register fields */
#define LTSSM_EN BIT(8)
+/* PARF_INT_ALL_{STATUS/CLEAR/MASK} register fields */
+#define PARF_INT_ALL_LINK_UP BIT(13)
+#define PARF_INT_MSI_DEV_0_7 GENMASK(30, 23)
+
+/* PARF_NO_SNOOP_OVERRIDE register fields */
+#define WR_NO_SNOOP_OVERRIDE_EN BIT(1)
+#define RD_NO_SNOOP_OVERRIDE_EN BIT(3)
+
/* PARF_DEVICE_TYPE register fields */
#define DEVICE_TYPE_RC 0x4
+/* PARF_BDF_TO_SID_CFG fields */
+#define BDF_TO_SID_BYPASS BIT(0)
+
/* ELBI_SYS_CTRL register fields */
#define ELBI_SYS_CTRL_LT_ENABLE BIT(0)
@@ -148,58 +175,59 @@
#define QCOM_PCIE_CRC8_POLYNOMIAL (BIT(2) | BIT(1) | BIT(0))
-#define QCOM_PCIE_1_0_0_MAX_CLOCKS 4
+#define QCOM_PCIE_LINK_SPEED_TO_BW(speed) \
+ Mbps_to_icc(PCIE_SPEED2MBS_ENC(pcie_link_speed[speed]))
+
struct qcom_pcie_resources_1_0_0 {
- struct clk_bulk_data clks[QCOM_PCIE_1_0_0_MAX_CLOCKS];
+ struct clk_bulk_data *clks;
+ int num_clks;
struct reset_control *core;
struct regulator *vdda;
};
-#define QCOM_PCIE_2_1_0_MAX_CLOCKS 5
#define QCOM_PCIE_2_1_0_MAX_RESETS 6
#define QCOM_PCIE_2_1_0_MAX_SUPPLY 3
struct qcom_pcie_resources_2_1_0 {
- struct clk_bulk_data clks[QCOM_PCIE_2_1_0_MAX_CLOCKS];
+ struct clk_bulk_data *clks;
+ int num_clks;
struct reset_control_bulk_data resets[QCOM_PCIE_2_1_0_MAX_RESETS];
int num_resets;
struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY];
};
-#define QCOM_PCIE_2_3_2_MAX_CLOCKS 4
#define QCOM_PCIE_2_3_2_MAX_SUPPLY 2
struct qcom_pcie_resources_2_3_2 {
- struct clk_bulk_data clks[QCOM_PCIE_2_3_2_MAX_CLOCKS];
+ struct clk_bulk_data *clks;
+ int num_clks;
struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY];
};
-#define QCOM_PCIE_2_3_3_MAX_CLOCKS 5
#define QCOM_PCIE_2_3_3_MAX_RESETS 7
struct qcom_pcie_resources_2_3_3 {
- struct clk_bulk_data clks[QCOM_PCIE_2_3_3_MAX_CLOCKS];
+ struct clk_bulk_data *clks;
+ int num_clks;
struct reset_control_bulk_data rst[QCOM_PCIE_2_3_3_MAX_RESETS];
};
-#define QCOM_PCIE_2_4_0_MAX_CLOCKS 4
#define QCOM_PCIE_2_4_0_MAX_RESETS 12
struct qcom_pcie_resources_2_4_0 {
- struct clk_bulk_data clks[QCOM_PCIE_2_4_0_MAX_CLOCKS];
+ struct clk_bulk_data *clks;
int num_clks;
struct reset_control_bulk_data resets[QCOM_PCIE_2_4_0_MAX_RESETS];
int num_resets;
};
-#define QCOM_PCIE_2_7_0_MAX_CLOCKS 15
#define QCOM_PCIE_2_7_0_MAX_SUPPLIES 2
struct qcom_pcie_resources_2_7_0 {
- struct clk_bulk_data clks[QCOM_PCIE_2_7_0_MAX_CLOCKS];
+ struct clk_bulk_data *clks;
int num_clks;
struct regulator_bulk_data supplies[QCOM_PCIE_2_7_0_MAX_SUPPLIES];
struct reset_control *rst;
};
-#define QCOM_PCIE_2_9_0_MAX_CLOCKS 5
struct qcom_pcie_resources_2_9_0 {
- struct clk_bulk_data clks[QCOM_PCIE_2_9_0_MAX_CLOCKS];
+ struct clk_bulk_data *clks;
+ int num_clks;
struct reset_control *rst;
};
@@ -219,49 +247,80 @@ struct qcom_pcie_ops {
int (*get_resources)(struct qcom_pcie *pcie);
int (*init)(struct qcom_pcie *pcie);
int (*post_init)(struct qcom_pcie *pcie);
+ void (*host_post_init)(struct qcom_pcie *pcie);
void (*deinit)(struct qcom_pcie *pcie);
void (*ltssm_enable)(struct qcom_pcie *pcie);
int (*config_sid)(struct qcom_pcie *pcie);
};
+ /**
+ * struct qcom_pcie_cfg - Per SoC config struct
+ * @ops: qcom PCIe ops structure
+ * @override_no_snoop: Override NO_SNOOP attribute in TLP to enable cache
+ * snooping
+ * @firmware_managed: Set if the Root Complex is firmware managed
+ */
struct qcom_pcie_cfg {
const struct qcom_pcie_ops *ops;
+ bool override_no_snoop;
+ bool firmware_managed;
+ bool no_l0s;
+};
+
+struct qcom_pcie_port {
+ struct list_head list;
+ struct gpio_desc *reset;
+ struct phy *phy;
};
struct qcom_pcie {
struct dw_pcie *pci;
void __iomem *parf; /* DT parf */
- void __iomem *elbi; /* DT elbi */
void __iomem *mhi;
union qcom_pcie_resources res;
- struct phy *phy;
- struct gpio_desc *reset;
struct icc_path *icc_mem;
+ struct icc_path *icc_cpu;
const struct qcom_pcie_cfg *cfg;
struct dentry *debugfs;
+ struct list_head ports;
bool suspended;
+ bool use_pm_opp;
};
#define to_qcom_pcie(x) dev_get_drvdata((x)->dev)
-static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
+static void qcom_perst_assert(struct qcom_pcie *pcie, bool assert)
{
- gpiod_set_value_cansleep(pcie->reset, 1);
+ struct qcom_pcie_port *port;
+ int val = assert ? 1 : 0;
+
+ list_for_each_entry(port, &pcie->ports, list)
+ gpiod_set_value_cansleep(port->reset, val);
+
usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
}
+static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
+{
+ qcom_perst_assert(pcie, true);
+}
+
static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
{
/* Ensure that PERST has been asserted for at least 100 ms */
- msleep(100);
- gpiod_set_value_cansleep(pcie->reset, 0);
- usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
+ msleep(PCIE_T_PVPERL_MS);
+ qcom_perst_assert(pcie, false);
}
static int qcom_pcie_start_link(struct dw_pcie *pci)
{
struct qcom_pcie *pcie = to_qcom_pcie(pci);
+ qcom_pcie_common_set_equalization(pci);
+
+ if (pcie_link_speed[pci->max_link_speed] == PCIE_SPEED_16_0GT)
+ qcom_pcie_common_set_16gt_lane_margining(pci);
+
/* Enable Link Training state machine */
if (pcie->cfg->ops->ltssm_enable)
pcie->cfg->ops->ltssm_enable(pcie);
@@ -269,6 +328,26 @@ static int qcom_pcie_start_link(struct dw_pcie *pci)
return 0;
}
+static void qcom_pcie_clear_aspm_l0s(struct dw_pcie *pci)
+{
+ struct qcom_pcie *pcie = to_qcom_pcie(pci);
+ u16 offset;
+ u32 val;
+
+ if (!pcie->cfg->no_l0s)
+ return;
+
+ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);
+ val &= ~PCI_EXP_LNKCAP_ASPM_L0S;
+ writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP);
+
+ dw_pcie_dbi_ro_wr_dis(pci);
+}
+
static void qcom_pcie_clear_hpc(struct dw_pcie *pci)
{
u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
@@ -283,14 +362,63 @@ static void qcom_pcie_clear_hpc(struct dw_pcie *pci)
dw_pcie_dbi_ro_wr_dis(pci);
}
+static void qcom_pcie_configure_dbi_base(struct qcom_pcie *pcie)
+{
+ struct dw_pcie *pci = pcie->pci;
+
+ if (pci->dbi_phys_addr) {
+ /*
+ * PARF_DBI_BASE_ADDR register is in CPU domain and require to
+ * be programmed with CPU physical address.
+ */
+ writel(lower_32_bits(pci->dbi_phys_addr), pcie->parf +
+ PARF_DBI_BASE_ADDR);
+ writel(SLV_ADDR_SPACE_SZ, pcie->parf +
+ PARF_SLV_ADDR_SPACE_SIZE);
+ }
+}
+
+static void qcom_pcie_configure_dbi_atu_base(struct qcom_pcie *pcie)
+{
+ struct dw_pcie *pci = pcie->pci;
+
+ if (pci->dbi_phys_addr) {
+ /*
+ * PARF_DBI_BASE_ADDR_V2 and PARF_ATU_BASE_ADDR registers are
+ * in CPU domain and require to be programmed with CPU
+ * physical addresses.
+ */
+ writel(lower_32_bits(pci->dbi_phys_addr), pcie->parf +
+ PARF_DBI_BASE_ADDR_V2);
+ writel(upper_32_bits(pci->dbi_phys_addr), pcie->parf +
+ PARF_DBI_BASE_ADDR_V2_HI);
+
+ if (pci->atu_phys_addr) {
+ writel(lower_32_bits(pci->atu_phys_addr), pcie->parf +
+ PARF_ATU_BASE_ADDR);
+ writel(upper_32_bits(pci->atu_phys_addr), pcie->parf +
+ PARF_ATU_BASE_ADDR_HI);
+ }
+
+ writel(0x0, pcie->parf + PARF_SLV_ADDR_SPACE_SIZE_V2);
+ writel(SLV_ADDR_SPACE_SZ, pcie->parf +
+ PARF_SLV_ADDR_SPACE_SIZE_V2_HI);
+ }
+}
+
static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie)
{
+ struct dw_pcie *pci = pcie->pci;
u32 val;
+ if (!pci->elbi_base) {
+ dev_err(pci->dev, "ELBI is not present\n");
+ return;
+ }
/* enable link training */
- val = readl(pcie->elbi + ELBI_SYS_CTRL);
+ val = readl(pci->elbi_base + ELBI_SYS_CTRL);
val |= ELBI_SYS_CTRL_LT_ENABLE;
- writel(val, pcie->elbi + ELBI_SYS_CTRL);
+ writel(val, pci->elbi_base + ELBI_SYS_CTRL);
}
static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
@@ -309,21 +437,11 @@ static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
if (ret)
return ret;
- res->clks[0].id = "iface";
- res->clks[1].id = "core";
- res->clks[2].id = "phy";
- res->clks[3].id = "aux";
- res->clks[4].id = "ref";
-
- /* iface, core, phy are required */
- ret = devm_clk_bulk_get(dev, 3, res->clks);
- if (ret < 0)
- return ret;
-
- /* aux, ref are optional */
- ret = devm_clk_bulk_get_optional(dev, 2, res->clks + 3);
- if (ret < 0)
- return ret;
+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
+ if (res->num_clks < 0) {
+ dev_err(dev, "Failed to get clocks\n");
+ return res->num_clks;
+ }
res->resets[0].id = "pci";
res->resets[1].id = "axi";
@@ -345,7 +463,7 @@ static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
- clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
reset_control_bulk_assert(res->num_resets, res->resets);
writel(1, pcie->parf + PARF_PHY_CTRL);
@@ -397,7 +515,7 @@ static int qcom_pcie_post_init_2_1_0(struct qcom_pcie *pcie)
val &= ~PHY_TEST_PWR_DOWN;
writel(val, pcie->parf + PARF_PHY_CTRL);
- ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
+ ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
if (ret)
return ret;
@@ -448,20 +566,16 @@ static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie)
struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
- int ret;
res->vdda = devm_regulator_get(dev, "vdda");
if (IS_ERR(res->vdda))
return PTR_ERR(res->vdda);
- res->clks[0].id = "iface";
- res->clks[1].id = "aux";
- res->clks[2].id = "master_bus";
- res->clks[3].id = "slave_bus";
-
- ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
- if (ret < 0)
- return ret;
+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
+ if (res->num_clks < 0) {
+ dev_err(dev, "Failed to get clocks\n");
+ return res->num_clks;
+ }
res->core = devm_reset_control_get_exclusive(dev, "core");
return PTR_ERR_OR_ZERO(res->core);
@@ -472,7 +586,7 @@ static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie)
struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
reset_control_assert(res->core);
- clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
regulator_disable(res->vdda);
}
@@ -489,7 +603,7 @@ static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)
return ret;
}
- ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
+ ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
if (ret) {
dev_err(dev, "cannot prepare/enable clocks\n");
goto err_assert_reset;
@@ -504,7 +618,7 @@ static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)
return 0;
err_disable_clks:
- clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
err_assert_reset:
reset_control_assert(res->core);
@@ -513,8 +627,7 @@ err_assert_reset:
static int qcom_pcie_post_init_1_0_0(struct qcom_pcie *pcie)
{
- /* change DBI base address */
- writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
+ qcom_pcie_configure_dbi_base(pcie);
if (IS_ENABLED(CONFIG_PCI_MSI)) {
u32 val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT);
@@ -528,6 +641,18 @@ static int qcom_pcie_post_init_1_0_0(struct qcom_pcie *pcie)
return 0;
}
+static int qcom_pcie_assert_perst(struct dw_pcie *pci, bool assert)
+{
+ struct qcom_pcie *pcie = to_qcom_pcie(pci);
+
+ if (assert)
+ qcom_ep_reset_assert(pcie);
+ else
+ qcom_ep_reset_deassert(pcie);
+
+ return 0;
+}
+
static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie)
{
u32 val;
@@ -552,14 +677,11 @@ static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie)
if (ret)
return ret;
- res->clks[0].id = "aux";
- res->clks[1].id = "cfg";
- res->clks[2].id = "bus_master";
- res->clks[3].id = "bus_slave";
-
- ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
- if (ret < 0)
- return ret;
+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
+ if (res->num_clks < 0) {
+ dev_err(dev, "Failed to get clocks\n");
+ return res->num_clks;
+ }
return 0;
}
@@ -568,7 +690,7 @@ static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
- clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
}
@@ -585,7 +707,7 @@ static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)
return ret;
}
- ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
+ ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
if (ret) {
dev_err(dev, "cannot prepare/enable clocks\n");
regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
@@ -604,8 +726,7 @@ static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie)
val &= ~PHY_TEST_PWR_DOWN;
writel(val, pcie->parf + PARF_PHY_CTRL);
- /* change DBI base address */
- writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
+ qcom_pcie_configure_dbi_base(pcie);
/* MAC PHY_POWERDOWN MUX DISABLE */
val = readl(pcie->parf + PARF_SYS_CTRL);
@@ -633,17 +754,11 @@ static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019");
int ret;
- res->clks[0].id = "aux";
- res->clks[1].id = "master_bus";
- res->clks[2].id = "slave_bus";
- res->clks[3].id = "iface";
-
- /* qcom,pcie-ipq4019 is defined without "iface" */
- res->num_clks = is_ipq ? 3 : 4;
-
- ret = devm_clk_bulk_get(dev, res->num_clks, res->clks);
- if (ret < 0)
- return ret;
+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
+ if (res->num_clks < 0) {
+ dev_err(dev, "Failed to get clocks\n");
+ return res->num_clks;
+ }
res->resets[0].id = "axi_m";
res->resets[1].id = "axi_s";
@@ -714,15 +829,11 @@ static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie)
struct device *dev = pci->dev;
int ret;
- res->clks[0].id = "iface";
- res->clks[1].id = "axi_m";
- res->clks[2].id = "axi_s";
- res->clks[3].id = "ahb";
- res->clks[4].id = "aux";
-
- ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
- if (ret < 0)
- return ret;
+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
+ if (res->num_clks < 0) {
+ dev_err(dev, "Failed to get clocks\n");
+ return res->num_clks;
+ }
res->rst[0].id = "axi_m";
res->rst[1].id = "axi_s";
@@ -743,7 +854,7 @@ static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
- clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
}
static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
@@ -773,7 +884,7 @@ static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
*/
usleep_range(2000, 2500);
- ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
+ ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
if (ret) {
dev_err(dev, "cannot prepare/enable clocks\n");
goto err_assert_resets;
@@ -797,14 +908,11 @@ static int qcom_pcie_post_init_2_3_3(struct qcom_pcie *pcie)
u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
u32 val;
- writel(SLV_ADDR_SPACE_SZ,
- pcie->parf + PARF_SLV_ADDR_SPACE_SIZE_2_3_3);
-
val = readl(pcie->parf + PARF_PHY_CTRL);
val &= ~PHY_TEST_PWR_DOWN;
writel(val, pcie->parf + PARF_PHY_CTRL);
- writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
+ qcom_pcie_configure_dbi_atu_base(pcie);
writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS
| SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
@@ -835,8 +943,6 @@ static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
- unsigned int num_clks, num_opt_clks;
- unsigned int idx;
int ret;
res->rst = devm_reset_control_array_get_exclusive(dev);
@@ -850,36 +956,11 @@ static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
if (ret)
return ret;
- idx = 0;
- res->clks[idx++].id = "aux";
- res->clks[idx++].id = "cfg";
- res->clks[idx++].id = "bus_master";
- res->clks[idx++].id = "bus_slave";
- res->clks[idx++].id = "slave_q2a";
-
- num_clks = idx;
-
- ret = devm_clk_bulk_get(dev, num_clks, res->clks);
- if (ret < 0)
- return ret;
-
- res->clks[idx++].id = "tbu";
- res->clks[idx++].id = "ddrss_sf_tbu";
- res->clks[idx++].id = "aggre0";
- res->clks[idx++].id = "aggre1";
- res->clks[idx++].id = "noc_aggr";
- res->clks[idx++].id = "noc_aggr_4";
- res->clks[idx++].id = "noc_aggr_south_sf";
- res->clks[idx++].id = "cnoc_qx";
- res->clks[idx++].id = "sleep";
- res->clks[idx++].id = "cnoc_sf_axi";
-
- num_opt_clks = idx - num_clks;
- res->num_clks = idx;
-
- ret = devm_clk_bulk_get_optional(dev, num_opt_clks, res->clks + num_clks);
- if (ret < 0)
- return ret;
+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
+ if (res->num_clks < 0) {
+ dev_err(dev, "Failed to get clocks\n");
+ return res->num_clks;
+ }
return 0;
}
@@ -927,8 +1008,7 @@ static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
val &= ~PHY_TEST_PWR_DOWN;
writel(val, pcie->parf + PARF_PHY_CTRL);
- /* change DBI base address */
- writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
+ qcom_pcie_configure_dbi_atu_base(pcie);
/* MAC PHY_POWERDOWN MUX DISABLE */
val = readl(pcie->parf + PARF_SYS_CTRL);
@@ -944,6 +1024,8 @@ static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
val &= ~REQ_NOT_ENTR_L1;
writel(val, pcie->parf + PARF_PM_CTRL);
+ pci->l1ss_support = true;
+
val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
val |= EN;
writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
@@ -959,11 +1041,37 @@ err_disable_regulators:
static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)
{
+ const struct qcom_pcie_cfg *pcie_cfg = pcie->cfg;
+
+ if (pcie_cfg->override_no_snoop)
+ writel(WR_NO_SNOOP_OVERRIDE_EN | RD_NO_SNOOP_OVERRIDE_EN,
+ pcie->parf + PARF_NO_SNOOP_OVERRIDE);
+
+ qcom_pcie_clear_aspm_l0s(pcie->pci);
qcom_pcie_clear_hpc(pcie->pci);
return 0;
}
+static int qcom_pcie_enable_aspm(struct pci_dev *pdev, void *userdata)
+{
+ /*
+ * Downstream devices need to be in D0 state before enabling PCI PM
+ * substates.
+ */
+ pci_set_power_state_locked(pdev, PCI_D0);
+ pci_enable_link_state_locked(pdev, PCIE_LINK_STATE_ALL);
+
+ return 0;
+}
+
+static void qcom_pcie_host_post_init_2_7_0(struct qcom_pcie *pcie)
+{
+ struct dw_pcie_rp *pp = &pcie->pci->pp;
+
+ pci_walk_bus(pp->bridge->bus, qcom_pcie_enable_aspm, NULL);
+}
+
static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
@@ -987,11 +1095,17 @@ static int qcom_pcie_config_sid_1_9_0(struct qcom_pcie *pcie)
u8 qcom_pcie_crc8_table[CRC8_TABLE_SIZE];
int i, nr_map, size = 0;
u32 smmu_sid_base;
+ u32 val;
of_get_property(dev->of_node, "iommu-map", &size);
if (!size)
return 0;
+ /* Enable BDF to SID translation by disabling bypass mode (default) */
+ val = readl(pcie->parf + PARF_BDF_TO_SID_CFG);
+ val &= ~BDF_TO_SID_BYPASS;
+ writel(val, pcie->parf + PARF_BDF_TO_SID_CFG);
+
map = kzalloc(size, GFP_KERNEL);
if (!map)
return -ENOMEM;
@@ -1048,17 +1162,12 @@ static int qcom_pcie_get_resources_2_9_0(struct qcom_pcie *pcie)
struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
- int ret;
-
- res->clks[0].id = "iface";
- res->clks[1].id = "axi_m";
- res->clks[2].id = "axi_s";
- res->clks[3].id = "axi_bridge";
- res->clks[4].id = "rchng";
- ret = devm_clk_bulk_get(dev, ARRAY_SIZE(res->clks), res->clks);
- if (ret < 0)
- return ret;
+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
+ if (res->num_clks < 0) {
+ dev_err(dev, "Failed to get clocks\n");
+ return res->num_clks;
+ }
res->rst = devm_reset_control_array_get_exclusive(dev);
if (IS_ERR(res->rst))
@@ -1071,7 +1180,7 @@ static void qcom_pcie_deinit_2_9_0(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
- clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks);
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
}
static int qcom_pcie_init_2_9_0(struct qcom_pcie *pcie)
@@ -1100,7 +1209,7 @@ static int qcom_pcie_init_2_9_0(struct qcom_pcie *pcie)
usleep_range(2000, 2500);
- return clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks);
+ return clk_bulk_prepare_enable(res->num_clks, res->clks);
}
static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie)
@@ -1110,14 +1219,11 @@ static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie)
u32 val;
int i;
- writel(SLV_ADDR_SPACE_SZ,
- pcie->parf + PARF_SLV_ADDR_SPACE_SIZE);
-
val = readl(pcie->parf + PARF_PHY_CTRL);
val &= ~PHY_TEST_PWR_DOWN;
writel(val, pcie->parf + PARF_PHY_CTRL);
- writel(0, pcie->parf + PARF_DBI_BASE_ADDR);
+ qcom_pcie_configure_dbi_atu_base(pcie);
writel(DEVICE_TYPE_RC, pcie->parf + PARF_DEVICE_TYPE);
writel(BYPASS | MSTR_AXI_CLK_EN | AHB_CLK_EN,
@@ -1152,12 +1258,40 @@ static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie)
return 0;
}
-static int qcom_pcie_link_up(struct dw_pcie *pci)
+static bool qcom_pcie_link_up(struct dw_pcie *pci)
{
u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
u16 val = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA);
- return !!(val & PCI_EXP_LNKSTA_DLLLA);
+ return val & PCI_EXP_LNKSTA_DLLLA;
+}
+
+static void qcom_pcie_phy_power_off(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_port *port;
+
+ list_for_each_entry(port, &pcie->ports, list)
+ phy_power_off(port->phy);
+}
+
+static int qcom_pcie_phy_power_on(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_port *port;
+ int ret;
+
+ list_for_each_entry(port, &pcie->ports, list) {
+ ret = phy_set_mode_ext(port->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC);
+ if (ret)
+ return ret;
+
+ ret = phy_power_on(port->phy);
+ if (ret) {
+ qcom_pcie_phy_power_off(pcie);
+ return ret;
+ }
+ }
+
+ return 0;
}
static int qcom_pcie_host_init(struct dw_pcie_rp *pp)
@@ -1172,11 +1306,7 @@ static int qcom_pcie_host_init(struct dw_pcie_rp *pp)
if (ret)
return ret;
- ret = phy_set_mode_ext(pcie->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC);
- if (ret)
- goto err_deinit;
-
- ret = phy_power_on(pcie->phy);
+ ret = qcom_pcie_phy_power_on(pcie);
if (ret)
goto err_deinit;
@@ -1199,7 +1329,7 @@ static int qcom_pcie_host_init(struct dw_pcie_rp *pp)
err_assert_reset:
qcom_ep_reset_assert(pcie);
err_disable_phy:
- phy_power_off(pcie->phy);
+ qcom_pcie_phy_power_off(pcie);
err_deinit:
pcie->cfg->ops->deinit(pcie);
@@ -1212,13 +1342,23 @@ static void qcom_pcie_host_deinit(struct dw_pcie_rp *pp)
struct qcom_pcie *pcie = to_qcom_pcie(pci);
qcom_ep_reset_assert(pcie);
- phy_power_off(pcie->phy);
+ qcom_pcie_phy_power_off(pcie);
pcie->cfg->ops->deinit(pcie);
}
+static void qcom_pcie_host_post_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct qcom_pcie *pcie = to_qcom_pcie(pci);
+
+ if (pcie->cfg->ops->host_post_init)
+ pcie->cfg->ops->host_post_init(pcie);
+}
+
static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
- .host_init = qcom_pcie_host_init,
- .host_deinit = qcom_pcie_host_deinit,
+ .init = qcom_pcie_host_init,
+ .deinit = qcom_pcie_host_deinit,
+ .post_init = qcom_pcie_host_post_init,
};
/* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */
@@ -1280,11 +1420,22 @@ static const struct qcom_pcie_ops ops_1_9_0 = {
.get_resources = qcom_pcie_get_resources_2_7_0,
.init = qcom_pcie_init_2_7_0,
.post_init = qcom_pcie_post_init_2_7_0,
+ .host_post_init = qcom_pcie_host_post_init_2_7_0,
.deinit = qcom_pcie_deinit_2_7_0,
.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
.config_sid = qcom_pcie_config_sid_1_9_0,
};
+/* Qcom IP rev.: 1.21.0 Synopsys IP rev.: 5.60a */
+static const struct qcom_pcie_ops ops_1_21_0 = {
+ .get_resources = qcom_pcie_get_resources_2_7_0,
+ .init = qcom_pcie_init_2_7_0,
+ .post_init = qcom_pcie_post_init_2_7_0,
+ .host_post_init = qcom_pcie_host_post_init_2_7_0,
+ .deinit = qcom_pcie_deinit_2_7_0,
+ .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
+};
+
/* Qcom IP rev.: 2.9.0 Synopsys IP rev.: 5.00a */
static const struct qcom_pcie_ops ops_2_9_0 = {
.get_resources = qcom_pcie_get_resources_2_9_0,
@@ -1302,6 +1453,11 @@ static const struct qcom_pcie_cfg cfg_1_9_0 = {
.ops = &ops_1_9_0,
};
+static const struct qcom_pcie_cfg cfg_1_34_0 = {
+ .ops = &ops_1_9_0,
+ .override_no_snoop = true,
+};
+
static const struct qcom_pcie_cfg cfg_2_1_0 = {
.ops = &ops_2_1_0,
};
@@ -1326,9 +1482,19 @@ static const struct qcom_pcie_cfg cfg_2_9_0 = {
.ops = &ops_2_9_0,
};
+static const struct qcom_pcie_cfg cfg_sc8280xp = {
+ .ops = &ops_1_21_0,
+ .no_l0s = true,
+};
+
+static const struct qcom_pcie_cfg cfg_fw_managed = {
+ .firmware_managed = true,
+};
+
static const struct dw_pcie_ops dw_pcie_ops = {
.link_up = qcom_pcie_link_up,
.start_link = qcom_pcie_start_link,
+ .assert_perst = qcom_pcie_assert_perst,
};
static int qcom_pcie_icc_init(struct qcom_pcie *pcie)
@@ -1340,6 +1506,9 @@ static int qcom_pcie_icc_init(struct qcom_pcie *pcie)
if (IS_ERR(pcie->icc_mem))
return PTR_ERR(pcie->icc_mem);
+ pcie->icc_cpu = devm_of_icc_get(pci->dev, "cpu-pcie");
+ if (IS_ERR(pcie->icc_cpu))
+ return PTR_ERR(pcie->icc_cpu);
/*
* Some Qualcomm platforms require interconnect bandwidth constraints
* to be set before enabling interconnect clocks.
@@ -1347,25 +1516,38 @@ static int qcom_pcie_icc_init(struct qcom_pcie *pcie)
* Set an initial peak bandwidth corresponding to single-lane Gen 1
* for the pcie-mem path.
*/
- ret = icc_set_bw(pcie->icc_mem, 0, MBps_to_icc(250));
+ ret = icc_set_bw(pcie->icc_mem, 0, QCOM_PCIE_LINK_SPEED_TO_BW(1));
if (ret) {
- dev_err(pci->dev, "failed to set interconnect bandwidth: %d\n",
+ dev_err(pci->dev, "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n",
ret);
return ret;
}
+ /*
+ * Since the CPU-PCIe path is only used for activities like register
+ * access of the host controller and endpoint Config/BAR space access,
+ * HW team has recommended to use a minimal bandwidth of 1KBps just to
+ * keep the path active.
+ */
+ ret = icc_set_bw(pcie->icc_cpu, 0, kBps_to_icc(1));
+ if (ret) {
+ dev_err(pci->dev, "Failed to set bandwidth for CPU-PCIe interconnect path: %d\n",
+ ret);
+ icc_set_bw(pcie->icc_mem, 0, 0);
+ return ret;
+ }
+
return 0;
}
-static void qcom_pcie_icc_update(struct qcom_pcie *pcie)
+static void qcom_pcie_icc_opp_update(struct qcom_pcie *pcie)
{
+ u32 offset, status, width, speed;
struct dw_pcie *pci = pcie->pci;
- u32 offset, status, bw;
- int speed, width;
- int ret;
-
- if (!pcie->icc_mem)
- return;
+ struct dev_pm_opp_key key = {};
+ unsigned long freq_kbps;
+ struct dev_pm_opp *opp;
+ int ret, freq_mbps;
offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
status = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA);
@@ -1377,25 +1559,40 @@ static void qcom_pcie_icc_update(struct qcom_pcie *pcie)
speed = FIELD_GET(PCI_EXP_LNKSTA_CLS, status);
width = FIELD_GET(PCI_EXP_LNKSTA_NLW, status);
- switch (speed) {
- case 1:
- bw = MBps_to_icc(250);
- break;
- case 2:
- bw = MBps_to_icc(500);
- break;
- default:
- WARN_ON_ONCE(1);
- fallthrough;
- case 3:
- bw = MBps_to_icc(985);
- break;
- }
-
- ret = icc_set_bw(pcie->icc_mem, 0, width * bw);
- if (ret) {
- dev_err(pci->dev, "failed to set interconnect bandwidth: %d\n",
- ret);
+ if (pcie->icc_mem) {
+ ret = icc_set_bw(pcie->icc_mem, 0,
+ width * QCOM_PCIE_LINK_SPEED_TO_BW(speed));
+ if (ret) {
+ dev_err(pci->dev, "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n",
+ ret);
+ }
+ } else if (pcie->use_pm_opp) {
+ freq_mbps = pcie_dev_speed_mbps(pcie_link_speed[speed]);
+ if (freq_mbps < 0)
+ return;
+
+ freq_kbps = freq_mbps * KILO;
+ opp = dev_pm_opp_find_level_exact(pci->dev, speed);
+ if (IS_ERR(opp)) {
+ /* opp-level is not defined use only frequency */
+ opp = dev_pm_opp_find_freq_exact(pci->dev, freq_kbps * width,
+ true);
+ } else {
+ /* put opp-level OPP */
+ dev_pm_opp_put(opp);
+
+ key.freq = freq_kbps * width;
+ key.level = speed;
+ key.bw = 0;
+ opp = dev_pm_opp_find_key_exact(pci->dev, &key, true);
+ }
+ if (!IS_ERR(opp)) {
+ ret = dev_pm_opp_set_opp(pci->dev, opp);
+ if (ret)
+ dev_err(pci->dev, "Failed to set OPP for freq (%lu): %d\n",
+ freq_kbps * width, ret);
+ dev_pm_opp_put(opp);
+ }
}
}
@@ -1436,35 +1633,238 @@ static void qcom_pcie_init_debugfs(struct qcom_pcie *pcie)
qcom_pcie_link_transition_count);
}
+static irqreturn_t qcom_pcie_global_irq_thread(int irq, void *data)
+{
+ struct qcom_pcie *pcie = data;
+ struct dw_pcie_rp *pp = &pcie->pci->pp;
+ struct device *dev = pcie->pci->dev;
+ u32 status = readl_relaxed(pcie->parf + PARF_INT_ALL_STATUS);
+
+ writel_relaxed(status, pcie->parf + PARF_INT_ALL_CLEAR);
+
+ if (FIELD_GET(PARF_INT_ALL_LINK_UP, status)) {
+ msleep(PCIE_RESET_CONFIG_WAIT_MS);
+ dev_dbg(dev, "Received Link up event. Starting enumeration!\n");
+ /* Rescan the bus to enumerate endpoint devices */
+ pci_lock_rescan_remove();
+ pci_rescan_bus(pp->bridge->bus);
+ pci_unlock_rescan_remove();
+
+ qcom_pcie_icc_opp_update(pcie);
+ } else {
+ dev_WARN_ONCE(dev, 1, "Received unknown event. INT_STATUS: 0x%08x\n",
+ status);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void qcom_pci_free_msi(void *ptr)
+{
+ struct dw_pcie_rp *pp = (struct dw_pcie_rp *)ptr;
+
+ if (pp && pp->has_msi_ctrl)
+ dw_pcie_free_msi(pp);
+}
+
+static int qcom_pcie_ecam_host_init(struct pci_config_window *cfg)
+{
+ struct device *dev = cfg->parent;
+ struct dw_pcie_rp *pp;
+ struct dw_pcie *pci;
+ int ret;
+
+ pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+ if (!pci)
+ return -ENOMEM;
+
+ pci->dev = dev;
+ pp = &pci->pp;
+ pci->dbi_base = cfg->win;
+ pp->num_vectors = MSI_DEF_NUM_VECTORS;
+
+ ret = dw_pcie_msi_host_init(pp);
+ if (ret)
+ return ret;
+
+ pp->has_msi_ctrl = true;
+ dw_pcie_msi_init(pp);
+
+ return devm_add_action_or_reset(dev, qcom_pci_free_msi, pp);
+}
+
+static const struct pci_ecam_ops pci_qcom_ecam_ops = {
+ .init = qcom_pcie_ecam_host_init,
+ .pci_ops = {
+ .map_bus = pci_ecam_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ }
+};
+
+static int qcom_pcie_parse_port(struct qcom_pcie *pcie, struct device_node *node)
+{
+ struct device *dev = pcie->pci->dev;
+ struct qcom_pcie_port *port;
+ struct gpio_desc *reset;
+ struct phy *phy;
+ int ret;
+
+ reset = devm_fwnode_gpiod_get(dev, of_fwnode_handle(node),
+ "reset", GPIOD_OUT_HIGH, "PERST#");
+ if (IS_ERR(reset))
+ return PTR_ERR(reset);
+
+ phy = devm_of_phy_get(dev, node, NULL);
+ if (IS_ERR(phy))
+ return PTR_ERR(phy);
+
+ port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ ret = phy_init(phy);
+ if (ret)
+ return ret;
+
+ port->reset = reset;
+ port->phy = phy;
+ INIT_LIST_HEAD(&port->list);
+ list_add_tail(&port->list, &pcie->ports);
+
+ return 0;
+}
+
+static int qcom_pcie_parse_ports(struct qcom_pcie *pcie)
+{
+ struct device *dev = pcie->pci->dev;
+ struct qcom_pcie_port *port, *tmp;
+ int ret = -ENOENT;
+
+ for_each_available_child_of_node_scoped(dev->of_node, of_port) {
+ if (!of_node_is_type(of_port, "pci"))
+ continue;
+ ret = qcom_pcie_parse_port(pcie, of_port);
+ if (ret)
+ goto err_port_del;
+ }
+
+ return ret;
+
+err_port_del:
+ list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
+ phy_exit(port->phy);
+ list_del(&port->list);
+ }
+
+ return ret;
+}
+
+static int qcom_pcie_parse_legacy_binding(struct qcom_pcie *pcie)
+{
+ struct device *dev = pcie->pci->dev;
+ struct qcom_pcie_port *port;
+ struct gpio_desc *reset;
+ struct phy *phy;
+ int ret;
+
+ phy = devm_phy_optional_get(dev, "pciephy");
+ if (IS_ERR(phy))
+ return PTR_ERR(phy);
+
+ reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH);
+ if (IS_ERR(reset))
+ return PTR_ERR(reset);
+
+ ret = phy_init(phy);
+ if (ret)
+ return ret;
+
+ port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ port->reset = reset;
+ port->phy = phy;
+ INIT_LIST_HEAD(&port->list);
+ list_add_tail(&port->list, &pcie->ports);
+
+ return 0;
+}
+
static int qcom_pcie_probe(struct platform_device *pdev)
{
const struct qcom_pcie_cfg *pcie_cfg;
+ unsigned long max_freq = ULONG_MAX;
+ struct qcom_pcie_port *port, *tmp;
struct device *dev = &pdev->dev;
+ struct dev_pm_opp *opp;
struct qcom_pcie *pcie;
struct dw_pcie_rp *pp;
struct resource *res;
struct dw_pcie *pci;
- int ret;
+ int ret, irq;
+ char *name;
pcie_cfg = of_device_get_match_data(dev);
- if (!pcie_cfg || !pcie_cfg->ops) {
- dev_err(dev, "Invalid platform data\n");
- return -EINVAL;
+ if (!pcie_cfg) {
+ dev_err(dev, "No platform data\n");
+ return -ENODATA;
}
- pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
- if (!pcie)
- return -ENOMEM;
-
- pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
- if (!pci)
- return -ENOMEM;
+ if (!pcie_cfg->firmware_managed && !pcie_cfg->ops) {
+ dev_err(dev, "No platform ops\n");
+ return -ENODATA;
+ }
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
if (ret < 0)
goto err_pm_runtime_put;
+ if (pcie_cfg->firmware_managed) {
+ struct pci_host_bridge *bridge;
+ struct pci_config_window *cfg;
+
+ bridge = devm_pci_alloc_host_bridge(dev, 0);
+ if (!bridge) {
+ ret = -ENOMEM;
+ goto err_pm_runtime_put;
+ }
+
+ /* Parse and map our ECAM configuration space area */
+ cfg = pci_host_common_ecam_create(dev, bridge,
+ &pci_qcom_ecam_ops);
+ if (IS_ERR(cfg)) {
+ ret = PTR_ERR(cfg);
+ goto err_pm_runtime_put;
+ }
+
+ bridge->sysdata = cfg;
+ bridge->ops = (struct pci_ops *)&pci_qcom_ecam_ops.pci_ops;
+ bridge->msi_domain = true;
+
+ ret = pci_host_probe(bridge);
+ if (ret)
+ goto err_pm_runtime_put;
+
+ return 0;
+ }
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie) {
+ ret = -ENOMEM;
+ goto err_pm_runtime_put;
+ }
+
+ pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+ if (!pci) {
+ ret = -ENOMEM;
+ goto err_pm_runtime_put;
+ }
+
+ INIT_LIST_HEAD(&pcie->ports);
+
pci->dev = dev;
pci->ops = &dw_pcie_ops;
pp = &pci->pp;
@@ -1473,24 +1873,12 @@ static int qcom_pcie_probe(struct platform_device *pdev)
pcie->cfg = pcie_cfg;
- pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH);
- if (IS_ERR(pcie->reset)) {
- ret = PTR_ERR(pcie->reset);
- goto err_pm_runtime_put;
- }
-
pcie->parf = devm_platform_ioremap_resource_byname(pdev, "parf");
if (IS_ERR(pcie->parf)) {
ret = PTR_ERR(pcie->parf);
goto err_pm_runtime_put;
}
- pcie->elbi = devm_platform_ioremap_resource_byname(pdev, "elbi");
- if (IS_ERR(pcie->elbi)) {
- ret = PTR_ERR(pcie->elbi);
- goto err_pm_runtime_put;
- }
-
/* MHI region is optional */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mhi");
if (res) {
@@ -1501,15 +1889,45 @@ static int qcom_pcie_probe(struct platform_device *pdev)
}
}
- pcie->phy = devm_phy_optional_get(dev, "pciephy");
- if (IS_ERR(pcie->phy)) {
- ret = PTR_ERR(pcie->phy);
+ /* OPP table is optional */
+ ret = devm_pm_opp_of_add_table(dev);
+ if (ret && ret != -ENODEV) {
+ dev_err_probe(dev, ret, "Failed to add OPP table\n");
goto err_pm_runtime_put;
}
- ret = qcom_pcie_icc_init(pcie);
- if (ret)
- goto err_pm_runtime_put;
+ /*
+ * Before the PCIe link is initialized, vote for highest OPP in the OPP
+ * table, so that we are voting for maximum voltage corner for the
+ * link to come up in maximum supported speed. At the end of the
+ * probe(), OPP will be updated using qcom_pcie_icc_opp_update().
+ */
+ if (!ret) {
+ opp = dev_pm_opp_find_freq_floor(dev, &max_freq);
+ if (IS_ERR(opp)) {
+ ret = PTR_ERR(opp);
+ dev_err_probe(pci->dev, ret,
+ "Unable to find max freq OPP\n");
+ goto err_pm_runtime_put;
+ } else {
+ ret = dev_pm_opp_set_opp(dev, opp);
+ }
+
+ dev_pm_opp_put(opp);
+ if (ret) {
+ dev_err_probe(pci->dev, ret,
+ "Failed to set OPP for freq %lu\n",
+ max_freq);
+ goto err_pm_runtime_put;
+ }
+
+ pcie->use_pm_opp = true;
+ } else {
+ /* Skip ICC init if OPP is supported as it is handled by OPP */
+ ret = qcom_pcie_icc_init(pcie);
+ if (ret)
+ goto err_pm_runtime_put;
+ }
ret = pcie->cfg->ops->get_resources(pcie);
if (ret)
@@ -1517,27 +1935,71 @@ static int qcom_pcie_probe(struct platform_device *pdev)
pp->ops = &qcom_pcie_dw_ops;
- ret = phy_init(pcie->phy);
- if (ret)
- goto err_pm_runtime_put;
+ ret = qcom_pcie_parse_ports(pcie);
+ if (ret) {
+ if (ret != -ENOENT) {
+ dev_err_probe(pci->dev, ret,
+ "Failed to parse Root Port: %d\n", ret);
+ goto err_pm_runtime_put;
+ }
+
+ /*
+ * In the case of properties not populated in Root Port node,
+ * fallback to the legacy method of parsing the Host Bridge
+ * node. This is to maintain DT backwards compatibility.
+ */
+ ret = qcom_pcie_parse_legacy_binding(pcie);
+ if (ret)
+ goto err_pm_runtime_put;
+ }
platform_set_drvdata(pdev, pcie);
+ irq = platform_get_irq_byname_optional(pdev, "global");
+ if (irq > 0)
+ pp->use_linkup_irq = true;
+
ret = dw_pcie_host_init(pp);
if (ret) {
dev_err(dev, "cannot initialize host\n");
goto err_phy_exit;
}
- qcom_pcie_icc_update(pcie);
+ name = devm_kasprintf(dev, GFP_KERNEL, "qcom_pcie_global_irq%d",
+ pci_domain_nr(pp->bridge->bus));
+ if (!name) {
+ ret = -ENOMEM;
+ goto err_host_deinit;
+ }
+
+ if (irq > 0) {
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ qcom_pcie_global_irq_thread,
+ IRQF_ONESHOT, name, pcie);
+ if (ret) {
+ dev_err_probe(&pdev->dev, ret,
+ "Failed to request Global IRQ\n");
+ goto err_host_deinit;
+ }
+
+ writel_relaxed(PARF_INT_ALL_LINK_UP | PARF_INT_MSI_DEV_0_7,
+ pcie->parf + PARF_INT_ALL_MASK);
+ }
+
+ qcom_pcie_icc_opp_update(pcie);
if (pcie->mhi)
qcom_pcie_init_debugfs(pcie);
return 0;
+err_host_deinit:
+ dw_pcie_host_deinit(pp);
err_phy_exit:
- phy_exit(pcie->phy);
+ list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
+ phy_exit(port->phy);
+ list_del(&port->list);
+ }
err_pm_runtime_put:
pm_runtime_put(dev);
pm_runtime_disable(dev);
@@ -1547,17 +2009,25 @@ err_pm_runtime_put:
static int qcom_pcie_suspend_noirq(struct device *dev)
{
- struct qcom_pcie *pcie = dev_get_drvdata(dev);
- int ret;
+ struct qcom_pcie *pcie;
+ int ret = 0;
+
+ pcie = dev_get_drvdata(dev);
+ if (!pcie)
+ return 0;
/*
* Set minimum bandwidth required to keep data path functional during
* suspend.
*/
- ret = icc_set_bw(pcie->icc_mem, 0, kBps_to_icc(1));
- if (ret) {
- dev_err(dev, "Failed to set interconnect bandwidth: %d\n", ret);
- return ret;
+ if (pcie->icc_mem) {
+ ret = icc_set_bw(pcie->icc_mem, 0, kBps_to_icc(1));
+ if (ret) {
+ dev_err(dev,
+ "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n",
+ ret);
+ return ret;
+ }
}
/*
@@ -1580,14 +2050,40 @@ static int qcom_pcie_suspend_noirq(struct device *dev)
pcie->suspended = true;
}
- return 0;
+ /*
+ * Only disable CPU-PCIe interconnect path if the suspend is non-S2RAM.
+ * Because on some platforms, DBI access can happen very late during the
+ * S2RAM and a non-active CPU-PCIe interconnect path may lead to NoC
+ * error.
+ */
+ if (pm_suspend_target_state != PM_SUSPEND_MEM) {
+ ret = icc_disable(pcie->icc_cpu);
+ if (ret)
+ dev_err(dev, "Failed to disable CPU-PCIe interconnect path: %d\n", ret);
+
+ if (pcie->use_pm_opp)
+ dev_pm_opp_set_opp(pcie->pci->dev, NULL);
+ }
+ return ret;
}
static int qcom_pcie_resume_noirq(struct device *dev)
{
- struct qcom_pcie *pcie = dev_get_drvdata(dev);
+ struct qcom_pcie *pcie;
int ret;
+ pcie = dev_get_drvdata(dev);
+ if (!pcie)
+ return 0;
+
+ if (pm_suspend_target_state != PM_SUSPEND_MEM) {
+ ret = icc_enable(pcie->icc_cpu);
+ if (ret) {
+ dev_err(dev, "Failed to enable CPU-PCIe interconnect path: %d\n", ret);
+ return ret;
+ }
+ }
+
if (pcie->suspended) {
ret = qcom_pcie_host_init(&pcie->pci->pp);
if (ret)
@@ -1596,7 +2092,7 @@ static int qcom_pcie_resume_noirq(struct device *dev)
pcie->suspended = false;
}
- qcom_pcie_icc_update(pcie);
+ qcom_pcie_icc_opp_update(pcie);
return 0;
}
@@ -1605,17 +2101,21 @@ static const struct of_device_id qcom_pcie_match[] = {
{ .compatible = "qcom,pcie-apq8064", .data = &cfg_2_1_0 },
{ .compatible = "qcom,pcie-apq8084", .data = &cfg_1_0_0 },
{ .compatible = "qcom,pcie-ipq4019", .data = &cfg_2_4_0 },
+ { .compatible = "qcom,pcie-ipq5018", .data = &cfg_2_9_0 },
{ .compatible = "qcom,pcie-ipq6018", .data = &cfg_2_9_0 },
{ .compatible = "qcom,pcie-ipq8064", .data = &cfg_2_1_0 },
{ .compatible = "qcom,pcie-ipq8064-v2", .data = &cfg_2_1_0 },
{ .compatible = "qcom,pcie-ipq8074", .data = &cfg_2_3_3 },
{ .compatible = "qcom,pcie-ipq8074-gen3", .data = &cfg_2_9_0 },
+ { .compatible = "qcom,pcie-ipq9574", .data = &cfg_2_9_0 },
{ .compatible = "qcom,pcie-msm8996", .data = &cfg_2_3_2 },
{ .compatible = "qcom,pcie-qcs404", .data = &cfg_2_4_0 },
- { .compatible = "qcom,pcie-sa8540p", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sa8255p", .data = &cfg_fw_managed },
+ { .compatible = "qcom,pcie-sa8540p", .data = &cfg_sc8280xp },
+ { .compatible = "qcom,pcie-sa8775p", .data = &cfg_1_34_0},
{ .compatible = "qcom,pcie-sc7280", .data = &cfg_1_9_0 },
{ .compatible = "qcom,pcie-sc8180x", .data = &cfg_1_9_0 },
- { .compatible = "qcom,pcie-sc8280xp", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sc8280xp", .data = &cfg_sc8280xp },
{ .compatible = "qcom,pcie-sdm845", .data = &cfg_2_7_0 },
{ .compatible = "qcom,pcie-sdx55", .data = &cfg_1_9_0 },
{ .compatible = "qcom,pcie-sm8150", .data = &cfg_1_9_0 },
@@ -1624,6 +2124,7 @@ static const struct of_device_id qcom_pcie_match[] = {
{ .compatible = "qcom,pcie-sm8450-pcie0", .data = &cfg_1_9_0 },
{ .compatible = "qcom,pcie-sm8450-pcie1", .data = &cfg_1_9_0 },
{ .compatible = "qcom,pcie-sm8550", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-x1e80100", .data = &cfg_sc8280xp },
{ }
};
diff --git a/drivers/pci/controller/dwc/pcie-rcar-gen4.c b/drivers/pci/controller/dwc/pcie-rcar-gen4.c
new file mode 100644
index 000000000000..80778917d2dd
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-rcar-gen4.c
@@ -0,0 +1,804 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PCIe controller driver for Renesas R-Car Gen4 Series SoCs
+ * Copyright (C) 2022-2023 Renesas Electronics Corporation
+ *
+ * The r8a779g0 (R-Car V4H) controller requires a specific firmware to be
+ * provided, to initialize the PHY. Otherwise, the PCIe controller will not
+ * work.
+ */
+
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include "../../pci.h"
+#include "pcie-designware.h"
+
+/* Renesas-specific */
+/* PCIe Mode Setting Register 0 */
+#define PCIEMSR0 0x0000
+#define APP_SRIS_MODE BIT(6)
+#define DEVICE_TYPE_EP 0
+#define DEVICE_TYPE_RC BIT(4)
+#define BIFUR_MOD_SET_ON BIT(0)
+
+/* PCIe Interrupt Status 0 */
+#define PCIEINTSTS0 0x0084
+
+/* PCIe Interrupt Status 0 Enable */
+#define PCIEINTSTS0EN 0x0310
+#define MSI_CTRL_INT BIT(26)
+#define SMLH_LINK_UP BIT(7)
+#define RDLH_LINK_UP BIT(6)
+
+/* PCIe DMA Interrupt Status Enable */
+#define PCIEDMAINTSTSEN 0x0314
+#define PCIEDMAINTSTSEN_INIT GENMASK(15, 0)
+
+/* Port Logic Registers 89 */
+#define PRTLGC89 0x0b70
+
+/* Port Logic Registers 90 */
+#define PRTLGC90 0x0b74
+
+/* PCIe Reset Control Register 1 */
+#define PCIERSTCTRL1 0x0014
+#define APP_HOLD_PHY_RST BIT(16)
+#define APP_LTSSM_ENABLE BIT(0)
+
+/* PCIe Power Management Control */
+#define PCIEPWRMNGCTRL 0x0070
+#define APP_CLK_REQ_N BIT(11)
+#define APP_CLK_PM_EN BIT(10)
+
+#define RCAR_NUM_SPEED_CHANGE_RETRIES 10
+#define RCAR_MAX_LINK_SPEED 4
+
+#define RCAR_GEN4_PCIE_EP_FUNC_DBI_OFFSET 0x1000
+#define RCAR_GEN4_PCIE_EP_FUNC_DBI2_OFFSET 0x800
+
+#define RCAR_GEN4_PCIE_FIRMWARE_NAME "rcar_gen4_pcie.bin"
+#define RCAR_GEN4_PCIE_FIRMWARE_BASE_ADDR 0xc000
+MODULE_FIRMWARE(RCAR_GEN4_PCIE_FIRMWARE_NAME);
+
+struct rcar_gen4_pcie;
+struct rcar_gen4_pcie_drvdata {
+ void (*additional_common_init)(struct rcar_gen4_pcie *rcar);
+ int (*ltssm_control)(struct rcar_gen4_pcie *rcar, bool enable);
+ enum dw_pcie_device_mode mode;
+};
+
+struct rcar_gen4_pcie {
+ struct dw_pcie dw;
+ void __iomem *base;
+ void __iomem *phy_base;
+ struct platform_device *pdev;
+ const struct rcar_gen4_pcie_drvdata *drvdata;
+};
+#define to_rcar_gen4_pcie(_dw) container_of(_dw, struct rcar_gen4_pcie, dw)
+
+/* Common */
+static bool rcar_gen4_pcie_link_up(struct dw_pcie *dw)
+{
+ struct rcar_gen4_pcie *rcar = to_rcar_gen4_pcie(dw);
+ u32 val, mask;
+
+ val = readl(rcar->base + PCIEINTSTS0);
+ mask = RDLH_LINK_UP | SMLH_LINK_UP;
+
+ return (val & mask) == mask;
+}
+
+/*
+ * Manually initiate the speed change. Return 0 if change succeeded; otherwise
+ * -ETIMEDOUT.
+ */
+static int rcar_gen4_pcie_speed_change(struct dw_pcie *dw)
+{
+ u32 val;
+ int i;
+
+ val = dw_pcie_readl_dbi(dw, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ val &= ~PORT_LOGIC_SPEED_CHANGE;
+ dw_pcie_writel_dbi(dw, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
+
+ val = dw_pcie_readl_dbi(dw, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ val |= PORT_LOGIC_SPEED_CHANGE;
+ dw_pcie_writel_dbi(dw, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
+
+ for (i = 0; i < RCAR_NUM_SPEED_CHANGE_RETRIES; i++) {
+ val = dw_pcie_readl_dbi(dw, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ if (!(val & PORT_LOGIC_SPEED_CHANGE))
+ return 0;
+ usleep_range(10000, 11000);
+ }
+
+ return -ETIMEDOUT;
+}
+
+/*
+ * Enable LTSSM of this controller and manually initiate the speed change.
+ * Always return 0.
+ */
+static int rcar_gen4_pcie_start_link(struct dw_pcie *dw)
+{
+ struct rcar_gen4_pcie *rcar = to_rcar_gen4_pcie(dw);
+ int i, changes, ret;
+
+ if (rcar->drvdata->ltssm_control) {
+ ret = rcar->drvdata->ltssm_control(rcar, true);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Require direct speed change with retrying here if the max_link_speed
+ * is PCIe Gen2 or higher.
+ */
+ changes = min_not_zero(dw->max_link_speed, RCAR_MAX_LINK_SPEED) - 1;
+
+ /*
+ * Since dw_pcie_setup_rc() sets it once, PCIe Gen2 will be trained.
+ * So, this needs remaining times for up to PCIe Gen4 if RC mode.
+ */
+ if (changes && rcar->drvdata->mode == DW_PCIE_RC_TYPE)
+ changes--;
+
+ for (i = 0; i < changes; i++) {
+ /* It may not be connected in EP mode yet. So, break the loop */
+ if (rcar_gen4_pcie_speed_change(dw))
+ break;
+ }
+
+ return 0;
+}
+
+static void rcar_gen4_pcie_stop_link(struct dw_pcie *dw)
+{
+ struct rcar_gen4_pcie *rcar = to_rcar_gen4_pcie(dw);
+
+ if (rcar->drvdata->ltssm_control)
+ rcar->drvdata->ltssm_control(rcar, false);
+}
+
+static int rcar_gen4_pcie_common_init(struct rcar_gen4_pcie *rcar)
+{
+ struct dw_pcie *dw = &rcar->dw;
+ u32 val;
+ int ret;
+
+ ret = clk_bulk_prepare_enable(DW_PCIE_NUM_CORE_CLKS, dw->core_clks);
+ if (ret) {
+ dev_err(dw->dev, "Enabling core clocks failed\n");
+ return ret;
+ }
+
+ if (!reset_control_status(dw->core_rsts[DW_PCIE_PWR_RST].rstc)) {
+ reset_control_assert(dw->core_rsts[DW_PCIE_PWR_RST].rstc);
+ /*
+ * R-Car V4H Reference Manual R19UH0186EJ0130 Rev.1.30 Apr.
+ * 21, 2025 page 585 Figure 9.3.2 Software Reset flow (B)
+ * indicates that for peripherals in HSC domain, after
+ * reset has been asserted by writing a matching reset bit
+ * into register SRCR, it is mandatory to wait 1ms.
+ */
+ fsleep(1000);
+ }
+
+ val = readl(rcar->base + PCIEMSR0);
+ if (rcar->drvdata->mode == DW_PCIE_RC_TYPE) {
+ val |= DEVICE_TYPE_RC;
+ } else if (rcar->drvdata->mode == DW_PCIE_EP_TYPE) {
+ val |= DEVICE_TYPE_EP;
+ } else {
+ ret = -EINVAL;
+ goto err_unprepare;
+ }
+
+ if (dw->num_lanes < 4)
+ val |= BIFUR_MOD_SET_ON;
+
+ writel(val, rcar->base + PCIEMSR0);
+
+ ret = reset_control_deassert(dw->core_rsts[DW_PCIE_PWR_RST].rstc);
+ if (ret)
+ goto err_unprepare;
+
+ /*
+ * Assure the reset is latched and the core is ready for DBI access.
+ * On R-Car V4H, the PCIe reset is asynchronous and does not take
+ * effect immediately, but needs a short time to complete. In case
+ * DBI access happens in that short time, that access generates an
+ * SError. To make sure that condition can never happen, read back the
+ * state of the reset, which should turn the asynchronous reset into
+ * synchronous one, and wait a little over 1ms to add additional
+ * safety margin.
+ */
+ reset_control_status(dw->core_rsts[DW_PCIE_PWR_RST].rstc);
+ fsleep(1000);
+
+ if (rcar->drvdata->additional_common_init)
+ rcar->drvdata->additional_common_init(rcar);
+
+ return 0;
+
+err_unprepare:
+ clk_bulk_disable_unprepare(DW_PCIE_NUM_CORE_CLKS, dw->core_clks);
+
+ return ret;
+}
+
+static void rcar_gen4_pcie_common_deinit(struct rcar_gen4_pcie *rcar)
+{
+ struct dw_pcie *dw = &rcar->dw;
+
+ reset_control_assert(dw->core_rsts[DW_PCIE_PWR_RST].rstc);
+ clk_bulk_disable_unprepare(DW_PCIE_NUM_CORE_CLKS, dw->core_clks);
+}
+
+static int rcar_gen4_pcie_prepare(struct rcar_gen4_pcie *rcar)
+{
+ struct device *dev = rcar->dw.dev;
+ int err;
+
+ pm_runtime_enable(dev);
+ err = pm_runtime_resume_and_get(dev);
+ if (err < 0) {
+ dev_err(dev, "Runtime resume failed\n");
+ pm_runtime_disable(dev);
+ }
+
+ return err;
+}
+
+static void rcar_gen4_pcie_unprepare(struct rcar_gen4_pcie *rcar)
+{
+ struct device *dev = rcar->dw.dev;
+
+ pm_runtime_put(dev);
+ pm_runtime_disable(dev);
+}
+
+static int rcar_gen4_pcie_get_resources(struct rcar_gen4_pcie *rcar)
+{
+ rcar->phy_base = devm_platform_ioremap_resource_byname(rcar->pdev, "phy");
+ if (IS_ERR(rcar->phy_base))
+ return PTR_ERR(rcar->phy_base);
+
+ /* Renesas-specific registers */
+ rcar->base = devm_platform_ioremap_resource_byname(rcar->pdev, "app");
+
+ return PTR_ERR_OR_ZERO(rcar->base);
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .start_link = rcar_gen4_pcie_start_link,
+ .stop_link = rcar_gen4_pcie_stop_link,
+ .link_up = rcar_gen4_pcie_link_up,
+};
+
+static struct rcar_gen4_pcie *rcar_gen4_pcie_alloc(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rcar_gen4_pcie *rcar;
+
+ rcar = devm_kzalloc(dev, sizeof(*rcar), GFP_KERNEL);
+ if (!rcar)
+ return ERR_PTR(-ENOMEM);
+
+ rcar->dw.ops = &dw_pcie_ops;
+ rcar->dw.dev = dev;
+ rcar->pdev = pdev;
+ rcar->dw.edma.mf = EDMA_MF_EDMA_UNROLL;
+ dw_pcie_cap_set(&rcar->dw, REQ_RES);
+ platform_set_drvdata(pdev, rcar);
+
+ return rcar;
+}
+
+/* Host mode */
+static int rcar_gen4_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *dw = to_dw_pcie_from_pp(pp);
+ struct rcar_gen4_pcie *rcar = to_rcar_gen4_pcie(dw);
+ int ret;
+ u32 val;
+
+ gpiod_set_value_cansleep(dw->pe_rst, 1);
+
+ ret = rcar_gen4_pcie_common_init(rcar);
+ if (ret)
+ return ret;
+
+ /*
+ * According to the section 3.5.7.2 "RC Mode" in DWC PCIe Dual Mode
+ * Rev.5.20a and 3.5.6.1 "RC mode" in DWC PCIe RC databook v5.20a, we
+ * should disable two BARs to avoid unnecessary memory assignment
+ * during device enumeration.
+ */
+ dw_pcie_writel_dbi2(dw, PCI_BASE_ADDRESS_0, 0x0);
+ dw_pcie_writel_dbi2(dw, PCI_BASE_ADDRESS_1, 0x0);
+
+ /* Enable MSI interrupt signal */
+ val = readl(rcar->base + PCIEINTSTS0EN);
+ val |= MSI_CTRL_INT;
+ writel(val, rcar->base + PCIEINTSTS0EN);
+
+ msleep(PCIE_T_PVPERL_MS); /* pe_rst requires 100msec delay */
+
+ gpiod_set_value_cansleep(dw->pe_rst, 0);
+
+ return 0;
+}
+
+static void rcar_gen4_pcie_host_deinit(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *dw = to_dw_pcie_from_pp(pp);
+ struct rcar_gen4_pcie *rcar = to_rcar_gen4_pcie(dw);
+
+ gpiod_set_value_cansleep(dw->pe_rst, 1);
+ rcar_gen4_pcie_common_deinit(rcar);
+}
+
+static const struct dw_pcie_host_ops rcar_gen4_pcie_host_ops = {
+ .init = rcar_gen4_pcie_host_init,
+ .deinit = rcar_gen4_pcie_host_deinit,
+};
+
+static int rcar_gen4_add_dw_pcie_rp(struct rcar_gen4_pcie *rcar)
+{
+ struct dw_pcie_rp *pp = &rcar->dw.pp;
+
+ if (!IS_ENABLED(CONFIG_PCIE_RCAR_GEN4_HOST))
+ return -ENODEV;
+
+ pp->num_vectors = MAX_MSI_IRQS;
+ pp->ops = &rcar_gen4_pcie_host_ops;
+
+ return dw_pcie_host_init(pp);
+}
+
+static void rcar_gen4_remove_dw_pcie_rp(struct rcar_gen4_pcie *rcar)
+{
+ dw_pcie_host_deinit(&rcar->dw.pp);
+}
+
+/* Endpoint mode */
+static void rcar_gen4_pcie_ep_pre_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *dw = to_dw_pcie_from_ep(ep);
+ struct rcar_gen4_pcie *rcar = to_rcar_gen4_pcie(dw);
+ int ret;
+
+ ret = rcar_gen4_pcie_common_init(rcar);
+ if (ret)
+ return;
+
+ writel(PCIEDMAINTSTSEN_INIT, rcar->base + PCIEDMAINTSTSEN);
+}
+
+static void rcar_gen4_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar;
+
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
+ dw_pcie_ep_reset_bar(pci, bar);
+}
+
+static void rcar_gen4_pcie_ep_deinit(struct rcar_gen4_pcie *rcar)
+{
+ writel(0, rcar->base + PCIEDMAINTSTSEN);
+ rcar_gen4_pcie_common_deinit(rcar);
+}
+
+static int rcar_gen4_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ unsigned int type, u16 interrupt_num)
+{
+ struct dw_pcie *dw = to_dw_pcie_from_ep(ep);
+
+ switch (type) {
+ case PCI_IRQ_INTX:
+ return dw_pcie_ep_raise_intx_irq(ep, func_no);
+ case PCI_IRQ_MSI:
+ return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+ default:
+ dev_err(dw->dev, "Unknown IRQ type\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct pci_epc_features rcar_gen4_pcie_epc_features = {
+ .msi_capable = true,
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
+ .bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = 256 },
+ .bar[BAR_5] = { .type = BAR_RESERVED, },
+ .align = SZ_1M,
+};
+
+static const struct pci_epc_features*
+rcar_gen4_pcie_ep_get_features(struct dw_pcie_ep *ep)
+{
+ return &rcar_gen4_pcie_epc_features;
+}
+
+static unsigned int rcar_gen4_pcie_ep_get_dbi_offset(struct dw_pcie_ep *ep,
+ u8 func_no)
+{
+ return func_no * RCAR_GEN4_PCIE_EP_FUNC_DBI_OFFSET;
+}
+
+static unsigned int rcar_gen4_pcie_ep_get_dbi2_offset(struct dw_pcie_ep *ep,
+ u8 func_no)
+{
+ return func_no * RCAR_GEN4_PCIE_EP_FUNC_DBI2_OFFSET;
+}
+
+static const struct dw_pcie_ep_ops pcie_ep_ops = {
+ .pre_init = rcar_gen4_pcie_ep_pre_init,
+ .init = rcar_gen4_pcie_ep_init,
+ .raise_irq = rcar_gen4_pcie_ep_raise_irq,
+ .get_features = rcar_gen4_pcie_ep_get_features,
+ .get_dbi_offset = rcar_gen4_pcie_ep_get_dbi_offset,
+ .get_dbi2_offset = rcar_gen4_pcie_ep_get_dbi2_offset,
+};
+
+static int rcar_gen4_add_dw_pcie_ep(struct rcar_gen4_pcie *rcar)
+{
+ struct dw_pcie_ep *ep = &rcar->dw.ep;
+ struct device *dev = rcar->dw.dev;
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_PCIE_RCAR_GEN4_EP))
+ return -ENODEV;
+
+ ep->ops = &pcie_ep_ops;
+
+ ret = dw_pcie_ep_init(ep);
+ if (ret) {
+ rcar_gen4_pcie_ep_deinit(rcar);
+ return ret;
+ }
+
+ ret = dw_pcie_ep_init_registers(ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(ep);
+ rcar_gen4_pcie_ep_deinit(rcar);
+ }
+
+ pci_epc_init_notify(ep->epc);
+
+ return ret;
+}
+
+static void rcar_gen4_remove_dw_pcie_ep(struct rcar_gen4_pcie *rcar)
+{
+ dw_pcie_ep_deinit(&rcar->dw.ep);
+ rcar_gen4_pcie_ep_deinit(rcar);
+}
+
+/* Common */
+static int rcar_gen4_add_dw_pcie(struct rcar_gen4_pcie *rcar)
+{
+ rcar->drvdata = of_device_get_match_data(&rcar->pdev->dev);
+ if (!rcar->drvdata)
+ return -EINVAL;
+
+ switch (rcar->drvdata->mode) {
+ case DW_PCIE_RC_TYPE:
+ return rcar_gen4_add_dw_pcie_rp(rcar);
+ case DW_PCIE_EP_TYPE:
+ return rcar_gen4_add_dw_pcie_ep(rcar);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int rcar_gen4_pcie_probe(struct platform_device *pdev)
+{
+ struct rcar_gen4_pcie *rcar;
+ int err;
+
+ rcar = rcar_gen4_pcie_alloc(pdev);
+ if (IS_ERR(rcar))
+ return PTR_ERR(rcar);
+
+ err = rcar_gen4_pcie_get_resources(rcar);
+ if (err)
+ return err;
+
+ err = rcar_gen4_pcie_prepare(rcar);
+ if (err)
+ return err;
+
+ err = rcar_gen4_add_dw_pcie(rcar);
+ if (err)
+ goto err_unprepare;
+
+ return 0;
+
+err_unprepare:
+ rcar_gen4_pcie_unprepare(rcar);
+
+ return err;
+}
+
+static void rcar_gen4_remove_dw_pcie(struct rcar_gen4_pcie *rcar)
+{
+ switch (rcar->drvdata->mode) {
+ case DW_PCIE_RC_TYPE:
+ rcar_gen4_remove_dw_pcie_rp(rcar);
+ break;
+ case DW_PCIE_EP_TYPE:
+ rcar_gen4_remove_dw_pcie_ep(rcar);
+ break;
+ default:
+ break;
+ }
+}
+
+static void rcar_gen4_pcie_remove(struct platform_device *pdev)
+{
+ struct rcar_gen4_pcie *rcar = platform_get_drvdata(pdev);
+
+ rcar_gen4_remove_dw_pcie(rcar);
+ rcar_gen4_pcie_unprepare(rcar);
+}
+
+static int r8a779f0_pcie_ltssm_control(struct rcar_gen4_pcie *rcar, bool enable)
+{
+ u32 val;
+
+ val = readl(rcar->base + PCIERSTCTRL1);
+ if (enable) {
+ val |= APP_LTSSM_ENABLE;
+ val &= ~APP_HOLD_PHY_RST;
+ } else {
+ /*
+ * Since the datasheet of R-Car doesn't mention how to assert
+ * the APP_HOLD_PHY_RST, don't assert it again. Otherwise,
+ * hang-up issue happened in the dw_edma_core_off() when
+ * the controller didn't detect a PCI device.
+ */
+ val &= ~APP_LTSSM_ENABLE;
+ }
+ writel(val, rcar->base + PCIERSTCTRL1);
+
+ return 0;
+}
+
+static void rcar_gen4_pcie_additional_common_init(struct rcar_gen4_pcie *rcar)
+{
+ struct dw_pcie *dw = &rcar->dw;
+ u32 val;
+
+ val = dw_pcie_readl_dbi(dw, PCIE_PORT_LANE_SKEW);
+ val &= ~PORT_LANE_SKEW_INSERT_MASK;
+ if (dw->num_lanes < 4)
+ val |= BIT(6);
+ dw_pcie_writel_dbi(dw, PCIE_PORT_LANE_SKEW, val);
+
+ val = readl(rcar->base + PCIEPWRMNGCTRL);
+ val |= APP_CLK_REQ_N | APP_CLK_PM_EN;
+ writel(val, rcar->base + PCIEPWRMNGCTRL);
+}
+
+static void rcar_gen4_pcie_phy_reg_update_bits(struct rcar_gen4_pcie *rcar,
+ u32 offset, u32 mask, u32 val)
+{
+ u32 tmp;
+
+ tmp = readl(rcar->phy_base + offset);
+ tmp &= ~mask;
+ tmp |= val;
+ writel(tmp, rcar->phy_base + offset);
+}
+
+/*
+ * SoC datasheet suggests checking port logic register bits during firmware
+ * write. If read returns non-zero value, then this function returns -EAGAIN
+ * indicating that the write needs to be done again. If read returns zero,
+ * then return 0 to indicate success.
+ */
+static int rcar_gen4_pcie_reg_test_bit(struct rcar_gen4_pcie *rcar,
+ u32 offset, u32 mask)
+{
+ struct dw_pcie *dw = &rcar->dw;
+
+ if (dw_pcie_readl_dbi(dw, offset) & mask)
+ return -EAGAIN;
+
+ return 0;
+}
+
+static int rcar_gen4_pcie_download_phy_firmware(struct rcar_gen4_pcie *rcar)
+{
+ /* The check_addr values are magical numbers in the datasheet */
+ static const u32 check_addr[] = {
+ 0x00101018,
+ 0x00101118,
+ 0x00101021,
+ 0x00101121,
+ };
+ struct dw_pcie *dw = &rcar->dw;
+ const struct firmware *fw;
+ unsigned int i, timeout;
+ u32 data;
+ int ret;
+
+ ret = request_firmware(&fw, RCAR_GEN4_PCIE_FIRMWARE_NAME, dw->dev);
+ if (ret) {
+ dev_err(dw->dev, "Failed to load firmware (%s): %d\n",
+ RCAR_GEN4_PCIE_FIRMWARE_NAME, ret);
+ return ret;
+ }
+
+ for (i = 0; i < (fw->size / 2); i++) {
+ data = fw->data[(i * 2) + 1] << 8 | fw->data[i * 2];
+ timeout = 100;
+ do {
+ dw_pcie_writel_dbi(dw, PRTLGC89, RCAR_GEN4_PCIE_FIRMWARE_BASE_ADDR + i);
+ dw_pcie_writel_dbi(dw, PRTLGC90, data);
+ if (!rcar_gen4_pcie_reg_test_bit(rcar, PRTLGC89, BIT(30)))
+ break;
+ if (!(--timeout)) {
+ ret = -ETIMEDOUT;
+ goto exit;
+ }
+ usleep_range(100, 200);
+ } while (1);
+ }
+
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x0f8, BIT(17), BIT(17));
+
+ for (i = 0; i < ARRAY_SIZE(check_addr); i++) {
+ timeout = 100;
+ do {
+ dw_pcie_writel_dbi(dw, PRTLGC89, check_addr[i]);
+ ret = rcar_gen4_pcie_reg_test_bit(rcar, PRTLGC89, BIT(30));
+ ret |= rcar_gen4_pcie_reg_test_bit(rcar, PRTLGC90, BIT(0));
+ if (!ret)
+ break;
+ if (!(--timeout)) {
+ ret = -ETIMEDOUT;
+ goto exit;
+ }
+ usleep_range(100, 200);
+ } while (1);
+ }
+
+exit:
+ release_firmware(fw);
+
+ return ret;
+}
+
+static int rcar_gen4_pcie_ltssm_control(struct rcar_gen4_pcie *rcar, bool enable)
+{
+ struct dw_pcie *dw = &rcar->dw;
+ u32 val;
+ int ret;
+
+ if (!enable) {
+ val = readl(rcar->base + PCIERSTCTRL1);
+ val &= ~APP_LTSSM_ENABLE;
+ writel(val, rcar->base + PCIERSTCTRL1);
+
+ return 0;
+ }
+
+ val = dw_pcie_readl_dbi(dw, PCIE_PORT_FORCE);
+ val |= PORT_FORCE_DO_DESKEW_FOR_SRIS;
+ dw_pcie_writel_dbi(dw, PCIE_PORT_FORCE, val);
+
+ val = readl(rcar->base + PCIEMSR0);
+ val |= APP_SRIS_MODE;
+ writel(val, rcar->base + PCIEMSR0);
+
+ /*
+ * The R-Car Gen4 datasheet doesn't describe the PHY registers' name.
+ * But, the initialization procedure describes these offsets. So,
+ * this driver has magical offset numbers.
+ */
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x700, BIT(28), 0);
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x700, BIT(20), 0);
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x700, BIT(12), 0);
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x700, BIT(4), 0);
+
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(23, 22), BIT(22));
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(18, 16), GENMASK(17, 16));
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(7, 6), BIT(6));
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(2, 0), GENMASK(1, 0));
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x1d4, GENMASK(16, 15), GENMASK(16, 15));
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x514, BIT(26), BIT(26));
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x0f8, BIT(16), 0);
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x0f8, BIT(19), BIT(19));
+
+ val = readl(rcar->base + PCIERSTCTRL1);
+ val &= ~APP_HOLD_PHY_RST;
+ writel(val, rcar->base + PCIERSTCTRL1);
+
+ ret = readl_poll_timeout(rcar->phy_base + 0x0f8, val, val & BIT(18), 100, 10000);
+ if (ret < 0)
+ return ret;
+
+ ret = rcar_gen4_pcie_download_phy_firmware(rcar);
+ if (ret)
+ return ret;
+
+ val = readl(rcar->base + PCIERSTCTRL1);
+ val |= APP_LTSSM_ENABLE;
+ writel(val, rcar->base + PCIERSTCTRL1);
+
+ return 0;
+}
+
+static struct rcar_gen4_pcie_drvdata drvdata_r8a779f0_pcie = {
+ .ltssm_control = r8a779f0_pcie_ltssm_control,
+ .mode = DW_PCIE_RC_TYPE,
+};
+
+static struct rcar_gen4_pcie_drvdata drvdata_r8a779f0_pcie_ep = {
+ .ltssm_control = r8a779f0_pcie_ltssm_control,
+ .mode = DW_PCIE_EP_TYPE,
+};
+
+static struct rcar_gen4_pcie_drvdata drvdata_rcar_gen4_pcie = {
+ .additional_common_init = rcar_gen4_pcie_additional_common_init,
+ .ltssm_control = rcar_gen4_pcie_ltssm_control,
+ .mode = DW_PCIE_RC_TYPE,
+};
+
+static struct rcar_gen4_pcie_drvdata drvdata_rcar_gen4_pcie_ep = {
+ .additional_common_init = rcar_gen4_pcie_additional_common_init,
+ .ltssm_control = rcar_gen4_pcie_ltssm_control,
+ .mode = DW_PCIE_EP_TYPE,
+};
+
+static const struct of_device_id rcar_gen4_pcie_of_match[] = {
+ {
+ .compatible = "renesas,r8a779f0-pcie",
+ .data = &drvdata_r8a779f0_pcie,
+ },
+ {
+ .compatible = "renesas,r8a779f0-pcie-ep",
+ .data = &drvdata_r8a779f0_pcie_ep,
+ },
+ {
+ .compatible = "renesas,rcar-gen4-pcie",
+ .data = &drvdata_rcar_gen4_pcie,
+ },
+ {
+ .compatible = "renesas,rcar-gen4-pcie-ep",
+ .data = &drvdata_rcar_gen4_pcie_ep,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, rcar_gen4_pcie_of_match);
+
+static struct platform_driver rcar_gen4_pcie_driver = {
+ .driver = {
+ .name = "pcie-rcar-gen4",
+ .of_match_table = rcar_gen4_pcie_of_match,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+ .probe = rcar_gen4_pcie_probe,
+ .remove = rcar_gen4_pcie_remove,
+};
+module_platform_driver(rcar_gen4_pcie_driver);
+
+MODULE_DESCRIPTION("Renesas R-Car Gen4 PCIe controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pci/controller/dwc/pcie-sophgo.c b/drivers/pci/controller/dwc/pcie-sophgo.c
new file mode 100644
index 000000000000..ad4baaa34ffa
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-sophgo.c
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sophgo DesignWare based PCIe host controller driver
+ */
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/platform_device.h>
+
+#include "pcie-designware.h"
+
+#define to_sophgo_pcie(x) dev_get_drvdata((x)->dev)
+
+#define PCIE_INT_SIGNAL 0xc48
+#define PCIE_INT_EN 0xca0
+
+#define PCIE_INT_SIGNAL_INTX GENMASK(8, 5)
+
+#define PCIE_INT_EN_INTX GENMASK(4, 1)
+#define PCIE_INT_EN_INT_MSI BIT(5)
+
+struct sophgo_pcie {
+ struct dw_pcie pci;
+ void __iomem *app_base;
+ struct clk_bulk_data *clks;
+ unsigned int clk_cnt;
+ struct irq_domain *irq_domain;
+};
+
+static int sophgo_pcie_readl_app(struct sophgo_pcie *sophgo, u32 reg)
+{
+ return readl_relaxed(sophgo->app_base + reg);
+}
+
+static void sophgo_pcie_writel_app(struct sophgo_pcie *sophgo, u32 val, u32 reg)
+{
+ writel_relaxed(val, sophgo->app_base + reg);
+}
+
+static void sophgo_pcie_intx_handler(struct irq_desc *desc)
+{
+ struct dw_pcie_rp *pp = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct sophgo_pcie *sophgo = to_sophgo_pcie(pci);
+ unsigned long hwirq, reg;
+
+ chained_irq_enter(chip, desc);
+
+ reg = sophgo_pcie_readl_app(sophgo, PCIE_INT_SIGNAL);
+ reg = FIELD_GET(PCIE_INT_SIGNAL_INTX, reg);
+
+ for_each_set_bit(hwirq, &reg, PCI_NUM_INTX)
+ generic_handle_domain_irq(sophgo->irq_domain, hwirq);
+
+ chained_irq_exit(chip, desc);
+}
+
+static void sophgo_intx_irq_mask(struct irq_data *d)
+{
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct sophgo_pcie *sophgo = to_sophgo_pcie(pci);
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&pp->lock, flags);
+
+ val = sophgo_pcie_readl_app(sophgo, PCIE_INT_EN);
+ val &= ~FIELD_PREP(PCIE_INT_EN_INTX, BIT(d->hwirq));
+ sophgo_pcie_writel_app(sophgo, val, PCIE_INT_EN);
+
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
+};
+
+static void sophgo_intx_irq_unmask(struct irq_data *d)
+{
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct sophgo_pcie *sophgo = to_sophgo_pcie(pci);
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&pp->lock, flags);
+
+ val = sophgo_pcie_readl_app(sophgo, PCIE_INT_EN);
+ val |= FIELD_PREP(PCIE_INT_EN_INTX, BIT(d->hwirq));
+ sophgo_pcie_writel_app(sophgo, val, PCIE_INT_EN);
+
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
+};
+
+static struct irq_chip sophgo_intx_irq_chip = {
+ .name = "INTx",
+ .irq_mask = sophgo_intx_irq_mask,
+ .irq_unmask = sophgo_intx_irq_unmask,
+};
+
+static int sophgo_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &sophgo_intx_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = sophgo_pcie_intx_map,
+};
+
+static int sophgo_pcie_init_irq_domain(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct sophgo_pcie *sophgo = to_sophgo_pcie(pci);
+ struct device *dev = sophgo->pci.dev;
+ struct fwnode_handle *intc;
+ int irq;
+
+ intc = device_get_named_child_node(dev, "interrupt-controller");
+ if (!intc) {
+ dev_err(dev, "missing child interrupt-controller node\n");
+ return -ENODEV;
+ }
+
+ irq = fwnode_irq_get(intc, 0);
+ if (irq < 0) {
+ dev_err(dev, "failed to get INTx irq number\n");
+ fwnode_handle_put(intc);
+ return irq;
+ }
+
+ sophgo->irq_domain = irq_domain_create_linear(intc, PCI_NUM_INTX,
+ &intx_domain_ops, pp);
+ fwnode_handle_put(intc);
+ if (!sophgo->irq_domain) {
+ dev_err(dev, "failed to get a INTx irq domain\n");
+ return -EINVAL;
+ }
+
+ return irq;
+}
+
+static void sophgo_pcie_msi_enable(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct sophgo_pcie *sophgo = to_sophgo_pcie(pci);
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&pp->lock, flags);
+
+ val = sophgo_pcie_readl_app(sophgo, PCIE_INT_EN);
+ val |= PCIE_INT_EN_INT_MSI;
+ sophgo_pcie_writel_app(sophgo, val, PCIE_INT_EN);
+
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
+}
+
+static int sophgo_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ int irq;
+
+ irq = sophgo_pcie_init_irq_domain(pp);
+ if (irq < 0)
+ return irq;
+
+ irq_set_chained_handler_and_data(irq, sophgo_pcie_intx_handler, pp);
+
+ sophgo_pcie_msi_enable(pp);
+
+ return 0;
+}
+
+static const struct dw_pcie_host_ops sophgo_pcie_host_ops = {
+ .init = sophgo_pcie_host_init,
+};
+
+static int sophgo_pcie_clk_init(struct sophgo_pcie *sophgo)
+{
+ struct device *dev = sophgo->pci.dev;
+ int ret;
+
+ ret = devm_clk_bulk_get_all_enabled(dev, &sophgo->clks);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to get clocks\n");
+
+ sophgo->clk_cnt = ret;
+
+ return 0;
+}
+
+static int sophgo_pcie_resource_get(struct platform_device *pdev,
+ struct sophgo_pcie *sophgo)
+{
+ sophgo->app_base = devm_platform_ioremap_resource_byname(pdev, "app");
+ if (IS_ERR(sophgo->app_base))
+ return dev_err_probe(&pdev->dev, PTR_ERR(sophgo->app_base),
+ "failed to map app registers\n");
+
+ return 0;
+}
+
+static int sophgo_pcie_configure_rc(struct sophgo_pcie *sophgo)
+{
+ struct dw_pcie_rp *pp;
+
+ pp = &sophgo->pci.pp;
+ pp->ops = &sophgo_pcie_host_ops;
+
+ return dw_pcie_host_init(pp);
+}
+
+static int sophgo_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sophgo_pcie *sophgo;
+ int ret;
+
+ sophgo = devm_kzalloc(dev, sizeof(*sophgo), GFP_KERNEL);
+ if (!sophgo)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, sophgo);
+
+ sophgo->pci.dev = dev;
+
+ ret = sophgo_pcie_resource_get(pdev, sophgo);
+ if (ret)
+ return ret;
+
+ ret = sophgo_pcie_clk_init(sophgo);
+ if (ret)
+ return ret;
+
+ return sophgo_pcie_configure_rc(sophgo);
+}
+
+static const struct of_device_id sophgo_pcie_of_match[] = {
+ { .compatible = "sophgo,sg2044-pcie" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sophgo_pcie_of_match);
+
+static struct platform_driver sophgo_pcie_driver = {
+ .driver = {
+ .name = "sophgo-pcie",
+ .of_match_table = sophgo_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = sophgo_pcie_probe,
+};
+builtin_platform_driver(sophgo_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-spacemit-k1.c b/drivers/pci/controller/dwc/pcie-spacemit-k1.c
new file mode 100644
index 000000000000..be20a520255b
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-spacemit-k1.c
@@ -0,0 +1,357 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SpacemiT K1 PCIe host driver
+ *
+ * Copyright (C) 2025 by RISCstar Solutions Corporation. All rights reserved.
+ * Copyright (c) 2023, spacemit Corporation.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gfp.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mod_devicetable.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+
+#define PCI_VENDOR_ID_SPACEMIT 0x201f
+#define PCI_DEVICE_ID_SPACEMIT_K1 0x0001
+
+/* Offsets and field definitions for link management registers */
+#define K1_PHY_AHB_IRQ_EN 0x0000
+#define PCIE_INTERRUPT_EN BIT(0)
+
+#define K1_PHY_AHB_LINK_STS 0x0004
+#define SMLH_LINK_UP BIT(1)
+#define RDLH_LINK_UP BIT(12)
+
+#define INTR_ENABLE 0x0014
+#define MSI_CTRL_INT BIT(11)
+
+/* Some controls require APMU regmap access */
+#define SYSCON_APMU "spacemit,apmu"
+
+/* Offsets and field definitions for APMU registers */
+#define PCIE_CLK_RESET_CONTROL 0x0000
+#define LTSSM_EN BIT(6)
+#define PCIE_AUX_PWR_DET BIT(9)
+#define PCIE_RC_PERST BIT(12) /* 1: assert PERST# */
+#define APP_HOLD_PHY_RST BIT(30)
+#define DEVICE_TYPE_RC BIT(31) /* 0: endpoint; 1: RC */
+
+#define PCIE_CONTROL_LOGIC 0x0004
+#define PCIE_SOFT_RESET BIT(0)
+
+struct k1_pcie {
+ struct dw_pcie pci;
+ struct phy *phy;
+ void __iomem *link;
+ struct regmap *pmu; /* Errors ignored; MMIO-backed regmap */
+ u32 pmu_off;
+};
+
+#define to_k1_pcie(dw_pcie) \
+ platform_get_drvdata(to_platform_device((dw_pcie)->dev))
+
+static void k1_pcie_toggle_soft_reset(struct k1_pcie *k1)
+{
+ u32 offset;
+ u32 val;
+
+ /*
+ * Write, then read back to guarantee it has reached the device
+ * before we start the delay.
+ */
+ offset = k1->pmu_off + PCIE_CONTROL_LOGIC;
+ regmap_set_bits(k1->pmu, offset, PCIE_SOFT_RESET);
+ regmap_read(k1->pmu, offset, &val);
+
+ mdelay(2);
+
+ regmap_clear_bits(k1->pmu, offset, PCIE_SOFT_RESET);
+}
+
+/* Enable app clocks, deassert resets */
+static int k1_pcie_enable_resources(struct k1_pcie *k1)
+{
+ struct dw_pcie *pci = &k1->pci;
+ int ret;
+
+ ret = clk_bulk_prepare_enable(ARRAY_SIZE(pci->app_clks), pci->app_clks);
+ if (ret)
+ return ret;
+
+ ret = reset_control_bulk_deassert(ARRAY_SIZE(pci->app_rsts),
+ pci->app_rsts);
+ if (ret)
+ goto err_disable_clks;
+
+ return 0;
+
+err_disable_clks:
+ clk_bulk_disable_unprepare(ARRAY_SIZE(pci->app_clks), pci->app_clks);
+
+ return ret;
+}
+
+/* Assert resets, disable app clocks */
+static void k1_pcie_disable_resources(struct k1_pcie *k1)
+{
+ struct dw_pcie *pci = &k1->pci;
+
+ reset_control_bulk_assert(ARRAY_SIZE(pci->app_rsts), pci->app_rsts);
+ clk_bulk_disable_unprepare(ARRAY_SIZE(pci->app_clks), pci->app_clks);
+}
+
+/* FIXME: Disable ASPM L1 to avoid errors reported on some NVMe drives */
+static void k1_pcie_disable_aspm_l1(struct k1_pcie *k1)
+{
+ struct dw_pcie *pci = &k1->pci;
+ u8 offset;
+ u32 val;
+
+ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ offset += PCI_EXP_LNKCAP;
+
+ dw_pcie_dbi_ro_wr_en(pci);
+ val = dw_pcie_readl_dbi(pci, offset);
+ val &= ~PCI_EXP_LNKCAP_ASPM_L1;
+ dw_pcie_writel_dbi(pci, offset, val);
+ dw_pcie_dbi_ro_wr_dis(pci);
+}
+
+static int k1_pcie_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct k1_pcie *k1 = to_k1_pcie(pci);
+ u32 reset_ctrl;
+ u32 val;
+ int ret;
+
+ k1_pcie_toggle_soft_reset(k1);
+
+ ret = k1_pcie_enable_resources(k1);
+ if (ret)
+ return ret;
+
+ /* Set the PCI vendor and device ID */
+ dw_pcie_dbi_ro_wr_en(pci);
+ dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, PCI_VENDOR_ID_SPACEMIT);
+ dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, PCI_DEVICE_ID_SPACEMIT_K1);
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ /*
+ * Start by asserting fundamental reset (drive PERST# low). The
+ * PCI CEM spec says that PERST# should be deasserted at least
+ * 100ms after the power becomes stable, so we'll insert that
+ * delay first. Write, then read it back to guarantee the write
+ * reaches the device before we start the delay.
+ */
+ reset_ctrl = k1->pmu_off + PCIE_CLK_RESET_CONTROL;
+ regmap_set_bits(k1->pmu, reset_ctrl, PCIE_RC_PERST);
+ regmap_read(k1->pmu, reset_ctrl, &val);
+ mdelay(PCIE_T_PVPERL_MS);
+
+ /*
+ * Put the controller in root complex mode, and indicate that
+ * Vaux (3.3v) is present.
+ */
+ regmap_set_bits(k1->pmu, reset_ctrl, DEVICE_TYPE_RC | PCIE_AUX_PWR_DET);
+
+ ret = phy_init(k1->phy);
+ if (ret) {
+ k1_pcie_disable_resources(k1);
+
+ return ret;
+ }
+
+ /* Deassert fundamental reset (drive PERST# high) */
+ regmap_clear_bits(k1->pmu, reset_ctrl, PCIE_RC_PERST);
+
+ /* Finally, as a workaround, disable ASPM L1 */
+ k1_pcie_disable_aspm_l1(k1);
+
+ return 0;
+}
+
+static void k1_pcie_deinit(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct k1_pcie *k1 = to_k1_pcie(pci);
+
+ /* Assert fundamental reset (drive PERST# low) */
+ regmap_set_bits(k1->pmu, k1->pmu_off + PCIE_CLK_RESET_CONTROL,
+ PCIE_RC_PERST);
+
+ phy_exit(k1->phy);
+
+ k1_pcie_disable_resources(k1);
+}
+
+static const struct dw_pcie_host_ops k1_pcie_host_ops = {
+ .init = k1_pcie_init,
+ .deinit = k1_pcie_deinit,
+};
+
+static bool k1_pcie_link_up(struct dw_pcie *pci)
+{
+ struct k1_pcie *k1 = to_k1_pcie(pci);
+ u32 val;
+
+ val = readl_relaxed(k1->link + K1_PHY_AHB_LINK_STS);
+
+ return (val & RDLH_LINK_UP) && (val & SMLH_LINK_UP);
+}
+
+static int k1_pcie_start_link(struct dw_pcie *pci)
+{
+ struct k1_pcie *k1 = to_k1_pcie(pci);
+ u32 val;
+
+ /* Stop holding the PHY in reset, and enable link training */
+ regmap_update_bits(k1->pmu, k1->pmu_off + PCIE_CLK_RESET_CONTROL,
+ APP_HOLD_PHY_RST | LTSSM_EN, LTSSM_EN);
+
+ /* Enable the MSI interrupt */
+ writel_relaxed(MSI_CTRL_INT, k1->link + INTR_ENABLE);
+
+ /* Top-level interrupt enable */
+ val = readl_relaxed(k1->link + K1_PHY_AHB_IRQ_EN);
+ val |= PCIE_INTERRUPT_EN;
+ writel_relaxed(val, k1->link + K1_PHY_AHB_IRQ_EN);
+
+ return 0;
+}
+
+static void k1_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct k1_pcie *k1 = to_k1_pcie(pci);
+ u32 val;
+
+ /* Disable interrupts */
+ val = readl_relaxed(k1->link + K1_PHY_AHB_IRQ_EN);
+ val &= ~PCIE_INTERRUPT_EN;
+ writel_relaxed(val, k1->link + K1_PHY_AHB_IRQ_EN);
+
+ writel_relaxed(0, k1->link + INTR_ENABLE);
+
+ /* Disable the link and hold the PHY in reset */
+ regmap_update_bits(k1->pmu, k1->pmu_off + PCIE_CLK_RESET_CONTROL,
+ APP_HOLD_PHY_RST | LTSSM_EN, APP_HOLD_PHY_RST);
+}
+
+static const struct dw_pcie_ops k1_pcie_ops = {
+ .link_up = k1_pcie_link_up,
+ .start_link = k1_pcie_start_link,
+ .stop_link = k1_pcie_stop_link,
+};
+
+static int k1_pcie_parse_port(struct k1_pcie *k1)
+{
+ struct device *dev = k1->pci.dev;
+ struct device_node *root_port;
+ struct phy *phy;
+
+ /* We assume only one root port */
+ root_port = of_get_next_available_child(dev_of_node(dev), NULL);
+ if (!root_port)
+ return -EINVAL;
+
+ phy = devm_of_phy_get(dev, root_port, NULL);
+
+ of_node_put(root_port);
+
+ if (IS_ERR(phy))
+ return PTR_ERR(phy);
+
+ k1->phy = phy;
+
+ return 0;
+}
+
+static int k1_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct k1_pcie *k1;
+ int ret;
+
+ k1 = devm_kzalloc(dev, sizeof(*k1), GFP_KERNEL);
+ if (!k1)
+ return -ENOMEM;
+
+ k1->pmu = syscon_regmap_lookup_by_phandle_args(dev_of_node(dev),
+ SYSCON_APMU, 1,
+ &k1->pmu_off);
+ if (IS_ERR(k1->pmu))
+ return dev_err_probe(dev, PTR_ERR(k1->pmu),
+ "failed to lookup PMU registers\n");
+
+ k1->link = devm_platform_ioremap_resource_byname(pdev, "link");
+ if (IS_ERR(k1->link))
+ return dev_err_probe(dev, PTR_ERR(k1->link),
+ "failed to map \"link\" registers\n");
+
+ k1->pci.dev = dev;
+ k1->pci.ops = &k1_pcie_ops;
+ k1->pci.pp.num_vectors = MAX_MSI_IRQS;
+ dw_pcie_cap_set(&k1->pci, REQ_RES);
+
+ k1->pci.pp.ops = &k1_pcie_host_ops;
+
+ /* Hold the PHY in reset until we start the link */
+ regmap_set_bits(k1->pmu, k1->pmu_off + PCIE_CLK_RESET_CONTROL,
+ APP_HOLD_PHY_RST);
+
+ ret = devm_regulator_get_enable(dev, "vpcie3v3");
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to get \"vpcie3v3\" supply\n");
+
+ pm_runtime_set_active(dev);
+ pm_runtime_no_callbacks(dev);
+ devm_pm_runtime_enable(dev);
+
+ platform_set_drvdata(pdev, k1);
+
+ ret = k1_pcie_parse_port(k1);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to parse root port\n");
+
+ ret = dw_pcie_host_init(&k1->pci.pp);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to initialize host\n");
+
+ return 0;
+}
+
+static void k1_pcie_remove(struct platform_device *pdev)
+{
+ struct k1_pcie *k1 = platform_get_drvdata(pdev);
+
+ dw_pcie_host_deinit(&k1->pci.pp);
+}
+
+static const struct of_device_id k1_pcie_of_match_table[] = {
+ { .compatible = "spacemit,k1-pcie", },
+ { }
+};
+
+static struct platform_driver k1_pcie_driver = {
+ .probe = k1_pcie_probe,
+ .remove = k1_pcie_remove,
+ .driver = {
+ .name = "spacemit-k1-pcie",
+ .of_match_table = k1_pcie_of_match_table,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+module_platform_driver(k1_pcie_driver);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SpacemiT K1 PCIe host driver");
diff --git a/drivers/pci/controller/dwc/pcie-spear13xx.c b/drivers/pci/controller/dwc/pcie-spear13xx.c
index 99d47ae80331..01794a9d3ad2 100644
--- a/drivers/pci/controller/dwc/pcie-spear13xx.c
+++ b/drivers/pci/controller/dwc/pcie-spear13xx.c
@@ -110,15 +110,12 @@ static void spear13xx_pcie_enable_interrupts(struct spear13xx_pcie *spear13xx_pc
MSI_CTRL_INT, &app_reg->int_mask);
}
-static int spear13xx_pcie_link_up(struct dw_pcie *pci)
+static bool spear13xx_pcie_link_up(struct dw_pcie *pci)
{
struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci);
struct pcie_app_reg __iomem *app_reg = spear13xx_pcie->app_base;
- if (readl(&app_reg->app_status_1) & XMLH_LINK_UP)
- return 1;
-
- return 0;
+ return readl(&app_reg->app_status_1) & XMLH_LINK_UP;
}
static int spear13xx_pcie_host_init(struct dw_pcie_rp *pp)
@@ -148,7 +145,7 @@ static int spear13xx_pcie_host_init(struct dw_pcie_rp *pp)
}
static const struct dw_pcie_host_ops spear13xx_pcie_host_ops = {
- .host_init = spear13xx_pcie_host_init,
+ .init = spear13xx_pcie_host_init,
};
static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie,
@@ -233,7 +230,7 @@ static int spear13xx_pcie_probe(struct platform_device *pdev)
}
if (of_property_read_bool(np, "st,pcie-is-gen1"))
- pci->link_gen = 1;
+ pci->max_link_speed = 1;
platform_set_drvdata(pdev, spear13xx_pcie);
diff --git a/drivers/pci/controller/dwc/pcie-stm32-ep.c b/drivers/pci/controller/dwc/pcie-stm32-ep.c
new file mode 100644
index 000000000000..2cecf32d2b0f
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-stm32-ep.c
@@ -0,0 +1,343 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * STMicroelectronics STM32MP25 PCIe endpoint driver.
+ *
+ * Copyright (C) 2025 STMicroelectronics
+ * Author: Christian Bruel <christian.bruel@foss.st.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_platform.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include "pcie-designware.h"
+#include "pcie-stm32.h"
+
+struct stm32_pcie {
+ struct dw_pcie pci;
+ struct regmap *regmap;
+ struct reset_control *rst;
+ struct phy *phy;
+ struct clk *clk;
+ struct gpio_desc *perst_gpio;
+ unsigned int perst_irq;
+};
+
+static void stm32_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar;
+
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
+ dw_pcie_ep_reset_bar(pci, bar);
+}
+
+static int stm32_pcie_start_link(struct dw_pcie *pci)
+{
+ struct stm32_pcie *stm32_pcie = to_stm32_pcie(pci);
+
+ enable_irq(stm32_pcie->perst_irq);
+
+ return 0;
+}
+
+static void stm32_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct stm32_pcie *stm32_pcie = to_stm32_pcie(pci);
+
+ disable_irq(stm32_pcie->perst_irq);
+}
+
+static int stm32_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ unsigned int type, u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ switch (type) {
+ case PCI_IRQ_INTX:
+ return dw_pcie_ep_raise_intx_irq(ep, func_no);
+ case PCI_IRQ_MSI:
+ return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+ default:
+ dev_err(pci->dev, "UNKNOWN IRQ type\n");
+ return -EINVAL;
+ }
+}
+
+static const struct pci_epc_features stm32_pcie_epc_features = {
+ .msi_capable = true,
+ .align = SZ_64K,
+};
+
+static const struct pci_epc_features*
+stm32_pcie_get_features(struct dw_pcie_ep *ep)
+{
+ return &stm32_pcie_epc_features;
+}
+
+static const struct dw_pcie_ep_ops stm32_pcie_ep_ops = {
+ .init = stm32_pcie_ep_init,
+ .raise_irq = stm32_pcie_raise_irq,
+ .get_features = stm32_pcie_get_features,
+};
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .start_link = stm32_pcie_start_link,
+ .stop_link = stm32_pcie_stop_link,
+};
+
+static int stm32_pcie_enable_resources(struct stm32_pcie *stm32_pcie)
+{
+ int ret;
+
+ ret = phy_init(stm32_pcie->phy);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(stm32_pcie->clk);
+ if (ret)
+ phy_exit(stm32_pcie->phy);
+
+ return ret;
+}
+
+static void stm32_pcie_disable_resources(struct stm32_pcie *stm32_pcie)
+{
+ clk_disable_unprepare(stm32_pcie->clk);
+
+ phy_exit(stm32_pcie->phy);
+}
+
+static void stm32_pcie_perst_assert(struct dw_pcie *pci)
+{
+ struct stm32_pcie *stm32_pcie = to_stm32_pcie(pci);
+ struct dw_pcie_ep *ep = &stm32_pcie->pci.ep;
+ struct device *dev = pci->dev;
+
+ dev_dbg(dev, "PERST asserted by host\n");
+
+ regmap_update_bits(stm32_pcie->regmap, SYSCFG_PCIECR,
+ STM32MP25_PCIECR_LTSSM_EN, 0);
+
+ pci_epc_deinit_notify(ep->epc);
+
+ stm32_pcie_disable_resources(stm32_pcie);
+
+ pm_runtime_put_sync(dev);
+}
+
+static void stm32_pcie_perst_deassert(struct dw_pcie *pci)
+{
+ struct stm32_pcie *stm32_pcie = to_stm32_pcie(pci);
+ struct device *dev = pci->dev;
+ struct dw_pcie_ep *ep = &pci->ep;
+ int ret;
+
+ dev_dbg(dev, "PERST de-asserted by host\n");
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to resume runtime PM: %d\n", ret);
+ return;
+ }
+
+ ret = stm32_pcie_enable_resources(stm32_pcie);
+ if (ret) {
+ dev_err(dev, "Failed to enable resources: %d\n", ret);
+ goto err_pm_put_sync;
+ }
+
+ /*
+ * Reprogram the configuration space registers here because the DBI
+ * registers were reset by the PHY RCC during phy_init().
+ */
+ ret = dw_pcie_ep_init_registers(ep);
+ if (ret) {
+ dev_err(dev, "Failed to complete initialization: %d\n", ret);
+ goto err_disable_resources;
+ }
+
+ pci_epc_init_notify(ep->epc);
+
+ /* Enable link training */
+ regmap_update_bits(stm32_pcie->regmap, SYSCFG_PCIECR,
+ STM32MP25_PCIECR_LTSSM_EN,
+ STM32MP25_PCIECR_LTSSM_EN);
+
+ return;
+
+err_disable_resources:
+ stm32_pcie_disable_resources(stm32_pcie);
+
+err_pm_put_sync:
+ pm_runtime_put_sync(dev);
+}
+
+static irqreturn_t stm32_pcie_ep_perst_irq_thread(int irq, void *data)
+{
+ struct stm32_pcie *stm32_pcie = data;
+ struct dw_pcie *pci = &stm32_pcie->pci;
+ u32 perst;
+
+ perst = gpiod_get_value(stm32_pcie->perst_gpio);
+ if (perst)
+ stm32_pcie_perst_assert(pci);
+ else
+ stm32_pcie_perst_deassert(pci);
+
+ irq_set_irq_type(gpiod_to_irq(stm32_pcie->perst_gpio),
+ (perst ? IRQF_TRIGGER_HIGH : IRQF_TRIGGER_LOW));
+
+ return IRQ_HANDLED;
+}
+
+static int stm32_add_pcie_ep(struct stm32_pcie *stm32_pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie_ep *ep = &stm32_pcie->pci.ep;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ ret = regmap_update_bits(stm32_pcie->regmap, SYSCFG_PCIECR,
+ STM32MP25_PCIECR_TYPE_MASK,
+ STM32MP25_PCIECR_EP);
+ if (ret)
+ return ret;
+
+ reset_control_assert(stm32_pcie->rst);
+ reset_control_deassert(stm32_pcie->rst);
+
+ ep->ops = &stm32_pcie_ep_ops;
+
+ ep->page_size = stm32_pcie_epc_features.align;
+
+ ret = dw_pcie_ep_init(ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize ep: %d\n", ret);
+ return ret;
+ }
+
+ ret = stm32_pcie_enable_resources(stm32_pcie);
+ if (ret) {
+ dev_err(dev, "Failed to enable resources: %d\n", ret);
+ dw_pcie_ep_deinit(ep);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int stm32_pcie_probe(struct platform_device *pdev)
+{
+ struct stm32_pcie *stm32_pcie;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ stm32_pcie = devm_kzalloc(dev, sizeof(*stm32_pcie), GFP_KERNEL);
+ if (!stm32_pcie)
+ return -ENOMEM;
+
+ stm32_pcie->pci.dev = dev;
+ stm32_pcie->pci.ops = &dw_pcie_ops;
+
+ stm32_pcie->regmap = syscon_regmap_lookup_by_compatible("st,stm32mp25-syscfg");
+ if (IS_ERR(stm32_pcie->regmap))
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->regmap),
+ "No syscfg specified\n");
+
+ stm32_pcie->phy = devm_phy_get(dev, NULL);
+ if (IS_ERR(stm32_pcie->phy))
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->phy),
+ "failed to get pcie-phy\n");
+
+ stm32_pcie->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(stm32_pcie->clk))
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->clk),
+ "Failed to get PCIe clock source\n");
+
+ stm32_pcie->rst = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(stm32_pcie->rst))
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->rst),
+ "Failed to get PCIe reset\n");
+
+ stm32_pcie->perst_gpio = devm_gpiod_get(dev, "reset", GPIOD_IN);
+ if (IS_ERR(stm32_pcie->perst_gpio))
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->perst_gpio),
+ "Failed to get reset GPIO\n");
+
+ ret = phy_set_mode(stm32_pcie->phy, PHY_MODE_PCIE);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, stm32_pcie);
+
+ pm_runtime_get_noresume(dev);
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&pdev->dev);
+ return dev_err_probe(dev, ret, "Failed to enable runtime PM\n");
+ }
+
+ stm32_pcie->perst_irq = gpiod_to_irq(stm32_pcie->perst_gpio);
+
+ /* Will be enabled in start_link when device is initialized. */
+ irq_set_status_flags(stm32_pcie->perst_irq, IRQ_NOAUTOEN);
+
+ ret = devm_request_threaded_irq(dev, stm32_pcie->perst_irq, NULL,
+ stm32_pcie_ep_perst_irq_thread,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ "perst_irq", stm32_pcie);
+ if (ret) {
+ pm_runtime_put_noidle(&pdev->dev);
+ return dev_err_probe(dev, ret, "Failed to request PERST IRQ\n");
+ }
+
+ ret = stm32_add_pcie_ep(stm32_pcie, pdev);
+ if (ret)
+ pm_runtime_put_noidle(&pdev->dev);
+
+ return ret;
+}
+
+static void stm32_pcie_remove(struct platform_device *pdev)
+{
+ struct stm32_pcie *stm32_pcie = platform_get_drvdata(pdev);
+ struct dw_pcie *pci = &stm32_pcie->pci;
+ struct dw_pcie_ep *ep = &pci->ep;
+
+ dw_pcie_stop_link(pci);
+
+ pci_epc_deinit_notify(ep->epc);
+ dw_pcie_ep_deinit(ep);
+
+ stm32_pcie_disable_resources(stm32_pcie);
+
+ pm_runtime_put_sync(&pdev->dev);
+}
+
+static const struct of_device_id stm32_pcie_ep_of_match[] = {
+ { .compatible = "st,stm32mp25-pcie-ep" },
+ {},
+};
+
+static struct platform_driver stm32_pcie_ep_driver = {
+ .probe = stm32_pcie_probe,
+ .remove = stm32_pcie_remove,
+ .driver = {
+ .name = "stm32-ep-pcie",
+ .of_match_table = stm32_pcie_ep_of_match,
+ },
+};
+
+module_platform_driver(stm32_pcie_ep_driver);
+
+MODULE_AUTHOR("Christian Bruel <christian.bruel@foss.st.com>");
+MODULE_DESCRIPTION("STM32MP25 PCIe Endpoint Controller driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(of, stm32_pcie_ep_of_match);
diff --git a/drivers/pci/controller/dwc/pcie-stm32.c b/drivers/pci/controller/dwc/pcie-stm32.c
new file mode 100644
index 000000000000..a9e77478443b
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-stm32.c
@@ -0,0 +1,370 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * STMicroelectronics STM32MP25 PCIe root complex driver.
+ *
+ * Copyright (C) 2025 STMicroelectronics
+ * Author: Christian Bruel <christian.bruel@foss.st.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/irq.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/phy/phy.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_wakeirq.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/stddef.h>
+
+#include "../../pci.h"
+
+#include "pcie-designware.h"
+#include "pcie-stm32.h"
+
+struct stm32_pcie {
+ struct dw_pcie pci;
+ struct regmap *regmap;
+ struct reset_control *rst;
+ struct phy *phy;
+ struct clk *clk;
+ struct gpio_desc *perst_gpio;
+ struct gpio_desc *wake_gpio;
+};
+
+static void stm32_pcie_deassert_perst(struct stm32_pcie *stm32_pcie)
+{
+ if (stm32_pcie->perst_gpio) {
+ msleep(PCIE_T_PVPERL_MS);
+ gpiod_set_value(stm32_pcie->perst_gpio, 0);
+ }
+
+ msleep(PCIE_RESET_CONFIG_WAIT_MS);
+}
+
+static void stm32_pcie_assert_perst(struct stm32_pcie *stm32_pcie)
+{
+ gpiod_set_value(stm32_pcie->perst_gpio, 1);
+}
+
+static int stm32_pcie_start_link(struct dw_pcie *pci)
+{
+ struct stm32_pcie *stm32_pcie = to_stm32_pcie(pci);
+
+ return regmap_update_bits(stm32_pcie->regmap, SYSCFG_PCIECR,
+ STM32MP25_PCIECR_LTSSM_EN,
+ STM32MP25_PCIECR_LTSSM_EN);
+}
+
+static void stm32_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct stm32_pcie *stm32_pcie = to_stm32_pcie(pci);
+
+ regmap_update_bits(stm32_pcie->regmap, SYSCFG_PCIECR,
+ STM32MP25_PCIECR_LTSSM_EN, 0);
+}
+
+static int stm32_pcie_suspend_noirq(struct device *dev)
+{
+ struct stm32_pcie *stm32_pcie = dev_get_drvdata(dev);
+ int ret;
+
+ ret = dw_pcie_suspend_noirq(&stm32_pcie->pci);
+ if (ret)
+ return ret;
+
+ stm32_pcie_assert_perst(stm32_pcie);
+
+ clk_disable_unprepare(stm32_pcie->clk);
+
+ if (!device_wakeup_path(dev))
+ phy_exit(stm32_pcie->phy);
+
+ return pinctrl_pm_select_sleep_state(dev);
+}
+
+static int stm32_pcie_resume_noirq(struct device *dev)
+{
+ struct stm32_pcie *stm32_pcie = dev_get_drvdata(dev);
+ int ret;
+
+ /*
+ * The core clock is gated with CLKREQ# from the COMBOPHY REFCLK,
+ * thus if no device is present, must deassert it with a GPIO from
+ * pinctrl pinmux before accessing the DBI registers.
+ */
+ ret = pinctrl_pm_select_init_state(dev);
+ if (ret) {
+ dev_err(dev, "Failed to activate pinctrl pm state: %d\n", ret);
+ return ret;
+ }
+
+ if (!device_wakeup_path(dev)) {
+ ret = phy_init(stm32_pcie->phy);
+ if (ret) {
+ pinctrl_pm_select_default_state(dev);
+ return ret;
+ }
+ }
+
+ ret = clk_prepare_enable(stm32_pcie->clk);
+ if (ret)
+ goto err_phy_exit;
+
+ stm32_pcie_deassert_perst(stm32_pcie);
+
+ ret = dw_pcie_resume_noirq(&stm32_pcie->pci);
+ if (ret)
+ goto err_disable_clk;
+
+ pinctrl_pm_select_default_state(dev);
+
+ return 0;
+
+err_disable_clk:
+ stm32_pcie_assert_perst(stm32_pcie);
+ clk_disable_unprepare(stm32_pcie->clk);
+
+err_phy_exit:
+ phy_exit(stm32_pcie->phy);
+ pinctrl_pm_select_default_state(dev);
+
+ return ret;
+}
+
+static const struct dev_pm_ops stm32_pcie_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(stm32_pcie_suspend_noirq,
+ stm32_pcie_resume_noirq)
+};
+
+static const struct dw_pcie_host_ops stm32_pcie_host_ops = {
+};
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .start_link = stm32_pcie_start_link,
+ .stop_link = stm32_pcie_stop_link
+};
+
+static int stm32_add_pcie_port(struct stm32_pcie *stm32_pcie)
+{
+ struct device *dev = stm32_pcie->pci.dev;
+ unsigned int wake_irq;
+ int ret;
+
+ ret = phy_set_mode(stm32_pcie->phy, PHY_MODE_PCIE);
+ if (ret)
+ return ret;
+
+ ret = phy_init(stm32_pcie->phy);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(stm32_pcie->regmap, SYSCFG_PCIECR,
+ STM32MP25_PCIECR_TYPE_MASK,
+ STM32MP25_PCIECR_RC);
+ if (ret)
+ goto err_phy_exit;
+
+ stm32_pcie_deassert_perst(stm32_pcie);
+
+ if (stm32_pcie->wake_gpio) {
+ wake_irq = gpiod_to_irq(stm32_pcie->wake_gpio);
+ ret = dev_pm_set_dedicated_wake_irq(dev, wake_irq);
+ if (ret) {
+ dev_err(dev, "Failed to enable wakeup irq %d\n", ret);
+ goto err_assert_perst;
+ }
+ irq_set_irq_type(wake_irq, IRQ_TYPE_EDGE_FALLING);
+ }
+
+ return 0;
+
+err_assert_perst:
+ stm32_pcie_assert_perst(stm32_pcie);
+
+err_phy_exit:
+ phy_exit(stm32_pcie->phy);
+
+ return ret;
+}
+
+static void stm32_remove_pcie_port(struct stm32_pcie *stm32_pcie)
+{
+ dev_pm_clear_wake_irq(stm32_pcie->pci.dev);
+
+ stm32_pcie_assert_perst(stm32_pcie);
+
+ phy_exit(stm32_pcie->phy);
+}
+
+static int stm32_pcie_parse_port(struct stm32_pcie *stm32_pcie)
+{
+ struct device *dev = stm32_pcie->pci.dev;
+ struct device_node *root_port;
+
+ root_port = of_get_next_available_child(dev->of_node, NULL);
+
+ stm32_pcie->phy = devm_of_phy_get(dev, root_port, NULL);
+ if (IS_ERR(stm32_pcie->phy)) {
+ of_node_put(root_port);
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->phy),
+ "Failed to get pcie-phy\n");
+ }
+
+ stm32_pcie->perst_gpio = devm_fwnode_gpiod_get(dev, of_fwnode_handle(root_port),
+ "reset", GPIOD_OUT_HIGH, NULL);
+ if (IS_ERR(stm32_pcie->perst_gpio)) {
+ if (PTR_ERR(stm32_pcie->perst_gpio) != -ENOENT) {
+ of_node_put(root_port);
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->perst_gpio),
+ "Failed to get reset GPIO\n");
+ }
+ stm32_pcie->perst_gpio = NULL;
+ }
+
+ stm32_pcie->wake_gpio = devm_fwnode_gpiod_get(dev, of_fwnode_handle(root_port),
+ "wake", GPIOD_IN, NULL);
+
+ if (IS_ERR(stm32_pcie->wake_gpio)) {
+ if (PTR_ERR(stm32_pcie->wake_gpio) != -ENOENT) {
+ of_node_put(root_port);
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->wake_gpio),
+ "Failed to get wake GPIO\n");
+ }
+ stm32_pcie->wake_gpio = NULL;
+ }
+
+ of_node_put(root_port);
+
+ return 0;
+}
+
+static int stm32_pcie_probe(struct platform_device *pdev)
+{
+ struct stm32_pcie *stm32_pcie;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ stm32_pcie = devm_kzalloc(dev, sizeof(*stm32_pcie), GFP_KERNEL);
+ if (!stm32_pcie)
+ return -ENOMEM;
+
+ stm32_pcie->pci.dev = dev;
+ stm32_pcie->pci.ops = &dw_pcie_ops;
+ stm32_pcie->pci.pp.ops = &stm32_pcie_host_ops;
+
+ stm32_pcie->regmap = syscon_regmap_lookup_by_compatible("st,stm32mp25-syscfg");
+ if (IS_ERR(stm32_pcie->regmap))
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->regmap),
+ "No syscfg specified\n");
+
+ stm32_pcie->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(stm32_pcie->clk))
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->clk),
+ "Failed to get PCIe clock source\n");
+
+ stm32_pcie->rst = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(stm32_pcie->rst))
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->rst),
+ "Failed to get PCIe reset\n");
+
+ ret = stm32_pcie_parse_port(stm32_pcie);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, stm32_pcie);
+
+ ret = stm32_add_pcie_port(stm32_pcie);
+ if (ret)
+ return ret;
+
+ reset_control_assert(stm32_pcie->rst);
+ reset_control_deassert(stm32_pcie->rst);
+
+ ret = clk_prepare_enable(stm32_pcie->clk);
+ if (ret) {
+ dev_err(dev, "Core clock enable failed %d\n", ret);
+ goto err_remove_port;
+ }
+
+ ret = pm_runtime_set_active(dev);
+ if (ret < 0) {
+ dev_err_probe(dev, ret, "Failed to activate runtime PM\n");
+ goto err_disable_clk;
+ }
+
+ pm_runtime_no_callbacks(dev);
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret < 0) {
+ dev_err_probe(dev, ret, "Failed to enable runtime PM\n");
+ goto err_disable_clk;
+ }
+
+ ret = dw_pcie_host_init(&stm32_pcie->pci.pp);
+ if (ret)
+ goto err_disable_clk;
+
+ if (stm32_pcie->wake_gpio)
+ device_init_wakeup(dev, true);
+
+ return 0;
+
+err_disable_clk:
+ clk_disable_unprepare(stm32_pcie->clk);
+
+err_remove_port:
+ stm32_remove_pcie_port(stm32_pcie);
+
+ return ret;
+}
+
+static void stm32_pcie_remove(struct platform_device *pdev)
+{
+ struct stm32_pcie *stm32_pcie = platform_get_drvdata(pdev);
+ struct dw_pcie_rp *pp = &stm32_pcie->pci.pp;
+
+ if (stm32_pcie->wake_gpio)
+ device_init_wakeup(&pdev->dev, false);
+
+ dw_pcie_host_deinit(pp);
+
+ clk_disable_unprepare(stm32_pcie->clk);
+
+ stm32_remove_pcie_port(stm32_pcie);
+
+ pm_runtime_put_noidle(&pdev->dev);
+}
+
+static const struct of_device_id stm32_pcie_of_match[] = {
+ { .compatible = "st,stm32mp25-pcie-rc" },
+ {},
+};
+
+static struct platform_driver stm32_pcie_driver = {
+ .probe = stm32_pcie_probe,
+ .remove = stm32_pcie_remove,
+ .driver = {
+ .name = "stm32-pcie",
+ .of_match_table = stm32_pcie_of_match,
+ .pm = &stm32_pcie_pm_ops,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+
+module_platform_driver(stm32_pcie_driver);
+
+MODULE_AUTHOR("Christian Bruel <christian.bruel@foss.st.com>");
+MODULE_DESCRIPTION("STM32MP25 PCIe Controller driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(of, stm32_pcie_of_match);
diff --git a/drivers/pci/controller/dwc/pcie-stm32.h b/drivers/pci/controller/dwc/pcie-stm32.h
new file mode 100644
index 000000000000..419cf1ff669d
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-stm32.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * ST PCIe driver definitions for STM32-MP25 SoC
+ *
+ * Copyright (C) 2025 STMicroelectronics - All Rights Reserved
+ * Author: Christian Bruel <christian.bruel@foss.st.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/device.h>
+
+#define to_stm32_pcie(x) dev_get_drvdata((x)->dev)
+
+#define STM32MP25_PCIECR_TYPE_MASK GENMASK(11, 8)
+#define STM32MP25_PCIECR_EP 0
+#define STM32MP25_PCIECR_LTSSM_EN BIT(2)
+#define STM32MP25_PCIECR_RC BIT(10)
+
+#define SYSCFG_PCIECR 0x6000
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index e1db909f53ec..0ddeef70726d 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -9,10 +9,10 @@
* Author: Vidya Sagar <vidyas@nvidia.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/interconnect.h>
#include <linux/interrupt.h>
@@ -20,8 +20,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
#include <linux/phy/phy.h>
@@ -126,7 +124,7 @@
#define APPL_LTR_MSG_1 0xC4
#define LTR_MSG_REQ BIT(15)
-#define LTR_MST_NO_SNOOP_SHIFT 16
+#define LTR_NOSNOOP_MSG_REQ BIT(31)
#define APPL_LTR_MSG_2 0xC8
#define APPL_LTR_MSG_2_LTR_MSG_REQ_STATE BIT(3)
@@ -179,17 +177,12 @@
#define N_FTS_VAL 52
#define FTS_VAL 52
-#define GEN3_EQ_CONTROL_OFF 0x8a8
-#define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT 8
-#define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK GENMASK(23, 8)
-#define GEN3_EQ_CONTROL_OFF_FB_MODE_MASK GENMASK(3, 0)
-
#define PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT 0x8D0
-#define AMBA_ERROR_RESPONSE_CRS_SHIFT 3
-#define AMBA_ERROR_RESPONSE_CRS_MASK GENMASK(1, 0)
-#define AMBA_ERROR_RESPONSE_CRS_OKAY 0
-#define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFFFFFF 1
-#define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 2
+#define AMBA_ERROR_RESPONSE_RRS_SHIFT 3
+#define AMBA_ERROR_RESPONSE_RRS_MASK GENMASK(1, 0)
+#define AMBA_ERROR_RESPONSE_RRS_OKAY 0
+#define AMBA_ERROR_RESPONSE_RRS_OKAY_FFFFFFFF 1
+#define AMBA_ERROR_RESPONSE_RRS_OKAY_FFFF0001 2
#define MSIX_ADDR_MATCH_LOW_OFF 0x940
#define MSIX_ADDR_MATCH_LOW_OFF_EN BIT(0)
@@ -267,7 +260,6 @@ struct tegra_pcie_dw {
u32 msi_ctrl_int;
u32 num_lanes;
u32 cid;
- u32 cfg_link_cap_l1sub;
u32 ras_des_cap;
u32 pcie_cap_base;
u32 aspm_cmrt;
@@ -308,10 +300,6 @@ static inline u32 appl_readl(struct tegra_pcie_dw *pcie, const u32 reg)
return readl_relaxed(pcie->appl_base + reg);
}
-struct tegra_pcie_soc {
- enum dw_pcie_device_mode mode;
-};
-
static void tegra_pcie_icc_set(struct tegra_pcie_dw *pcie)
{
struct dw_pcie *pci = &pcie->pci;
@@ -322,9 +310,9 @@ static void tegra_pcie_icc_set(struct tegra_pcie_dw *pcie)
speed = FIELD_GET(PCI_EXP_LNKSTA_CLS, val);
width = FIELD_GET(PCI_EXP_LNKSTA_NLW, val);
- val = width * (PCIE_SPEED2MBS_ENC(pcie_link_speed[speed]) / BITS_PER_BYTE);
+ val = width * PCIE_SPEED2MBS_ENC(pcie_link_speed[speed]);
- if (icc_set_bw(pcie->icc_path, MBps_to_icc(val), 0))
+ if (icc_set_bw(pcie->icc_path, Mbps_to_icc(val), 0))
dev_err(pcie->dev, "can't set bw[%u]\n", val);
if (speed >= ARRAY_SIZE(pcie_gen_freq))
@@ -347,8 +335,7 @@ static void apply_bad_link_workaround(struct dw_pcie_rp *pp)
*/
val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
if (val & PCI_EXP_LNKSTA_LBMS) {
- current_link_width = (val & PCI_EXP_LNKSTA_NLW) >>
- PCI_EXP_LNKSTA_NLW_SHIFT;
+ current_link_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, val);
if (pcie->init_link_width > current_link_width) {
dev_warn(pci->dev, "PCIe link is bad, width reduced\n");
val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
@@ -487,8 +474,7 @@ static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg)
return IRQ_HANDLED;
/* If EP doesn't advertise L1SS, just return */
- val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub);
- if (!(val & (PCI_L1SS_CAP_ASPM_L1_1 | PCI_L1SS_CAP_ASPM_L1_2)))
+ if (!pci->l1ss_support)
return IRQ_HANDLED;
/* Check if BME is set to '1' */
@@ -497,8 +483,12 @@ static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg)
ktime_t timeout;
/* 110us for both snoop and no-snoop */
- val = 110 | (2 << PCI_LTR_SCALE_SHIFT) | LTR_MSG_REQ;
- val |= (val << LTR_MST_NO_SNOOP_SHIFT);
+ val = FIELD_PREP(PCI_LTR_VALUE_MASK, 110) |
+ FIELD_PREP(PCI_LTR_SCALE_MASK, 2) |
+ LTR_MSG_REQ |
+ FIELD_PREP(PCI_LTR_NOSNOOP_VALUE, 110) |
+ FIELD_PREP(PCI_LTR_NOSNOOP_SCALE, 2) |
+ LTR_NOSNOOP_MSG_REQ;
appl_writel(pcie, val, APPL_LTR_MSG_1);
/* Send LTR upstream */
@@ -616,24 +606,6 @@ static struct pci_ops tegra_pci_ops = {
};
#if defined(CONFIG_PCIEASPM)
-static void disable_aspm_l11(struct tegra_pcie_dw *pcie)
-{
- u32 val;
-
- val = dw_pcie_readl_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub);
- val &= ~PCI_L1SS_CAP_ASPM_L1_1;
- dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val);
-}
-
-static void disable_aspm_l12(struct tegra_pcie_dw *pcie)
-{
- u32 val;
-
- val = dw_pcie_readl_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub);
- val &= ~PCI_L1SS_CAP_ASPM_L1_2;
- dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val);
-}
-
static inline u32 event_counter_prog(struct tegra_pcie_dw *pcie, u32 event)
{
u32 val;
@@ -690,10 +662,9 @@ static int aspm_state_cnt(struct seq_file *s, void *data)
static void init_host_aspm(struct tegra_pcie_dw *pcie)
{
struct dw_pcie *pci = &pcie->pci;
- u32 val;
+ u32 l1ss, val;
- val = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_L1SS);
- pcie->cfg_link_cap_l1sub = val + PCI_L1SS_CAP;
+ l1ss = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_L1SS);
pcie->ras_des_cap = dw_pcie_find_ext_capability(&pcie->pci,
PCI_EXT_CAP_ID_VNDR);
@@ -705,11 +676,14 @@ static void init_host_aspm(struct tegra_pcie_dw *pcie)
PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val);
/* Program T_cmrt and T_pwr_on values */
- val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub);
+ val = dw_pcie_readl_dbi(pci, l1ss + PCI_L1SS_CAP);
val &= ~(PCI_L1SS_CAP_CM_RESTORE_TIME | PCI_L1SS_CAP_P_PWR_ON_VALUE);
val |= (pcie->aspm_cmrt << 8);
val |= (pcie->aspm_pwr_on_t << 19);
- dw_pcie_writel_dbi(pci, pcie->cfg_link_cap_l1sub, val);
+ dw_pcie_writel_dbi(pci, l1ss + PCI_L1SS_CAP, val);
+
+ if (pcie->supports_clkreq)
+ pci->l1ss_support = true;
/* Program L0s and L1 entrance latencies */
val = dw_pcie_readl_dbi(pci, PCIE_PORT_AFR);
@@ -721,12 +695,19 @@ static void init_host_aspm(struct tegra_pcie_dw *pcie)
static void init_debugfs(struct tegra_pcie_dw *pcie)
{
- debugfs_create_devm_seqfile(pcie->dev, "aspm_state_cnt", pcie->debugfs,
+ struct device *dev = pcie->dev;
+ char *name;
+
+ name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
+ if (!name)
+ return;
+
+ pcie->debugfs = debugfs_create_dir(name, NULL);
+
+ debugfs_create_devm_seqfile(dev, "aspm_state_cnt", pcie->debugfs,
aspm_state_cnt);
}
#else
-static inline void disable_aspm_l12(struct tegra_pcie_dw *pcie) { return; }
-static inline void disable_aspm_l11(struct tegra_pcie_dw *pcie) { return; }
static inline void init_host_aspm(struct tegra_pcie_dw *pcie) { return; }
static inline void init_debugfs(struct tegra_pcie_dw *pcie) { return; }
#endif
@@ -761,8 +742,7 @@ static void tegra_pcie_enable_system_interrupts(struct dw_pcie_rp *pp)
val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base +
PCI_EXP_LNKSTA);
- pcie->init_link_width = (val_w & PCI_EXP_LNKSTA_NLW) >>
- PCI_EXP_LNKSTA_NLW_SHIFT;
+ pcie->init_link_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, val_w);
val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base +
PCI_EXP_LNKCTL);
@@ -771,13 +751,13 @@ static void tegra_pcie_enable_system_interrupts(struct dw_pcie_rp *pp)
val_w);
}
-static void tegra_pcie_enable_legacy_interrupts(struct dw_pcie_rp *pp)
+static void tegra_pcie_enable_intx_interrupts(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
u32 val;
- /* Enable legacy interrupt generation */
+ /* Enable INTX interrupt generation */
val = appl_readl(pcie, APPL_INTR_EN_L0_0);
val |= APPL_INTR_EN_L0_0_SYS_INTR_EN;
val |= APPL_INTR_EN_L0_0_INT_INT_EN;
@@ -828,7 +808,7 @@ static void tegra_pcie_enable_interrupts(struct dw_pcie_rp *pp)
appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17);
tegra_pcie_enable_system_interrupts(pp);
- tegra_pcie_enable_legacy_interrupts(pp);
+ tegra_pcie_enable_intx_interrupts(pp);
if (IS_ENABLED(CONFIG_PCI_MSI))
tegra_pcie_enable_msi_interrupts(pp);
}
@@ -865,9 +845,9 @@ static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie)
dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
- val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK;
- val |= (0x3ff << GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT);
- val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK;
+ val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC;
+ val |= FIELD_PREP(GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC, 0x3ff);
+ val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE;
dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val);
val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
@@ -876,10 +856,10 @@ static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie)
dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
- val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK;
- val |= (pcie->of_data->gen4_preset_vec <<
- GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT);
- val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK;
+ val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC;
+ val |= FIELD_PREP(GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC,
+ pcie->of_data->gen4_preset_vec);
+ val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE;
dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val);
val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
@@ -900,11 +880,6 @@ static int tegra_pcie_dw_host_init(struct dw_pcie_rp *pp)
pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
PCI_CAP_ID_EXP);
- val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL);
- val_16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
- val_16 |= PCI_EXP_DEVCTL_PAYLOAD_256B;
- dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL, val_16);
-
val = dw_pcie_readl_dbi(pci, PCI_IO_BASE);
val &= ~(IO_BASE_IO_DECODE | IO_BASE_IO_DECODE_BIT8);
dw_pcie_writel_dbi(pci, PCI_IO_BASE, val);
@@ -916,19 +891,13 @@ static int tegra_pcie_dw_host_init(struct dw_pcie_rp *pp)
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
- /* Enable as 0xFFFF0001 response for CRS */
+ /* Enable as 0xFFFF0001 response for RRS */
val = dw_pcie_readl_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT);
- val &= ~(AMBA_ERROR_RESPONSE_CRS_MASK << AMBA_ERROR_RESPONSE_CRS_SHIFT);
- val |= (AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 <<
- AMBA_ERROR_RESPONSE_CRS_SHIFT);
+ val &= ~(AMBA_ERROR_RESPONSE_RRS_MASK << AMBA_ERROR_RESPONSE_RRS_SHIFT);
+ val |= (AMBA_ERROR_RESPONSE_RRS_OKAY_FFFF0001 <<
+ AMBA_ERROR_RESPONSE_RRS_SHIFT);
dw_pcie_writel_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT, val);
- /* Configure Max lane width from DT */
- val = dw_pcie_readl_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP);
- val &= ~PCI_EXP_LNKCAP_MLW;
- val |= (pcie->num_lanes << PCI_EXP_LNKSTA_NLW_SHIFT);
- dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP, val);
-
/* Clear Slot Clock Configuration bit if SRNS configuration */
if (pcie->enable_srns) {
val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
@@ -942,12 +911,6 @@ static int tegra_pcie_dw_host_init(struct dw_pcie_rp *pp)
init_host_aspm(pcie);
- /* Disable ASPM-L1SS advertisement if there is no CLKREQ routing */
- if (!pcie->supports_clkreq) {
- disable_aspm_l11(pcie);
- disable_aspm_l12(pcie);
- }
-
if (!pcie->of_data->has_l1ss_exit_fix) {
val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
@@ -1047,12 +1010,12 @@ retry_link:
return 0;
}
-static int tegra_pcie_dw_link_up(struct dw_pcie *pci)
+static bool tegra_pcie_dw_link_up(struct dw_pcie *pci)
{
struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
u32 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
- return !!(val & PCI_EXP_LNKSTA_DLLLA);
+ return val & PCI_EXP_LNKSTA_DLLLA;
}
static void tegra_pcie_dw_stop_link(struct dw_pcie *pci)
@@ -1069,7 +1032,7 @@ static const struct dw_pcie_ops tegra_dw_pcie_ops = {
};
static const struct dw_pcie_host_ops tegra_pcie_dw_host_ops = {
- .host_init = tegra_pcie_dw_host_init,
+ .init = tegra_pcie_dw_host_init,
};
static void tegra_pcie_disable_phy(struct tegra_pcie_dw *pcie)
@@ -1225,6 +1188,7 @@ static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie,
struct mrq_uphy_response resp;
struct tegra_bpmp_message msg;
struct mrq_uphy_request req;
+ int err;
/*
* Controller-5 doesn't need to have its state set by BPMP-FW in
@@ -1247,7 +1211,13 @@ static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie,
msg.rx.data = &resp;
msg.rx.size = sizeof(resp);
- return tegra_bpmp_transfer(pcie->bpmp, &msg);
+ err = tegra_bpmp_transfer(pcie->bpmp, &msg);
+ if (err)
+ return err;
+ if (msg.rx.ret)
+ return -EINVAL;
+
+ return 0;
}
static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie,
@@ -1256,6 +1226,7 @@ static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie,
struct mrq_uphy_response resp;
struct tegra_bpmp_message msg;
struct mrq_uphy_request req;
+ int err;
memset(&req, 0, sizeof(req));
memset(&resp, 0, sizeof(resp));
@@ -1275,13 +1246,19 @@ static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie,
msg.rx.data = &resp;
msg.rx.size = sizeof(resp);
- return tegra_bpmp_transfer(pcie->bpmp, &msg);
+ err = tegra_bpmp_transfer(pcie->bpmp, &msg);
+ if (err)
+ return err;
+ if (msg.rx.ret)
+ return -EINVAL;
+
+ return 0;
}
static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie)
{
struct dw_pcie_rp *pp = &pcie->pci.pp;
- struct pci_bus *child, *root_bus = NULL;
+ struct pci_bus *child, *root_port_bus = NULL;
struct pci_dev *pdev;
/*
@@ -1294,19 +1271,19 @@ static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie)
*/
list_for_each_entry(child, &pp->bridge->bus->children, node) {
- /* Bring downstream devices to D0 if they are not already in */
if (child->parent == pp->bridge->bus) {
- root_bus = child;
+ root_port_bus = child;
break;
}
}
- if (!root_bus) {
- dev_err(pcie->dev, "Failed to find downstream devices\n");
+ if (!root_port_bus) {
+ dev_err(pcie->dev, "Failed to find downstream bus of Root Port\n");
return;
}
- list_for_each_entry(pdev, &root_bus->devices, bus_list) {
+ /* Bring downstream devices to D0 if they are not already in */
+ list_for_each_entry(pdev, &root_port_bus->devices, bus_list) {
if (PCI_SLOT(pdev->devfn) == 0) {
if (pci_set_power_state(pdev, PCI_D0))
dev_err(pcie->dev,
@@ -1654,7 +1631,6 @@ static void tegra_pcie_deinit_controller(struct tegra_pcie_dw *pcie)
static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie)
{
struct device *dev = pcie->dev;
- char *name;
int ret;
pm_runtime_enable(dev);
@@ -1684,13 +1660,6 @@ static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie)
goto fail_host_init;
}
- name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
- if (!name) {
- ret = -ENOMEM;
- goto fail_host_init;
- }
-
- pcie->debugfs = debugfs_create_dir(name, NULL);
init_debugfs(pcie);
return ret;
@@ -1741,9 +1710,9 @@ static void pex_ep_event_pex_rst_assert(struct tegra_pcie_dw *pcie)
ret);
}
- ret = tegra_pcie_bpmp_set_pll_state(pcie, false);
+ ret = tegra_pcie_bpmp_set_ctrl_state(pcie, false);
if (ret)
- dev_err(pcie->dev, "Failed to turn off UPHY: %d\n", ret);
+ dev_err(pcie->dev, "Failed to disable controller: %d\n", ret);
pcie->ep_state = EP_STATE_DISABLED;
dev_dbg(pcie->dev, "Uninitialization of endpoint is completed\n");
@@ -1802,6 +1771,10 @@ static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
goto fail_phy;
}
+ /* Perform cleanup that requires refclk */
+ pci_epc_deinit_notify(pcie->pci.ep.epc);
+ dw_pcie_ep_cleanup(&pcie->pci.ep);
+
/* Clear any stale interrupt statuses */
appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
@@ -1872,12 +1845,6 @@ static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
init_host_aspm(pcie);
- /* Disable ASPM-L1SS advertisement if there is no CLKREQ routing */
- if (!pcie->supports_clkreq) {
- disable_aspm_l11(pcie);
- disable_aspm_l12(pcie);
- }
-
if (!pcie->of_data->has_l1ss_exit_fix) {
val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
@@ -1887,11 +1854,6 @@ static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
PCI_CAP_ID_EXP);
- val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL);
- val_16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
- val_16 |= PCI_EXP_DEVCTL_PAYLOAD_256B;
- dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_DEVCTL, val_16);
-
/* Clear Slot Clock Configuration bit if SRNS configuration */
if (pcie->enable_srns) {
val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
@@ -1909,13 +1871,13 @@ static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
val = (upper_32_bits(ep->msi_mem_phys) & MSIX_ADDR_MATCH_HIGH_OFF_MASK);
dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_HIGH_OFF, val);
- ret = dw_pcie_ep_init_complete(ep);
+ ret = dw_pcie_ep_init_registers(ep);
if (ret) {
dev_err(dev, "Failed to complete initialization: %d\n", ret);
goto fail_init_complete;
}
- dw_pcie_ep_init_notify(ep);
+ pci_epc_init_notify(ep->epc);
/* Program the private control to allow sending LTR upstream */
if (pcie->of_data->has_ltr_req_fix) {
@@ -1961,7 +1923,16 @@ static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg)
return IRQ_HANDLED;
}
-static int tegra_pcie_ep_raise_legacy_irq(struct tegra_pcie_dw *pcie, u16 irq)
+static void tegra_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar;
+
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
+ dw_pcie_ep_reset_bar(pci, bar);
+};
+
+static int tegra_pcie_ep_raise_intx_irq(struct tegra_pcie_dw *pcie, u16 irq)
{
/* Tegra194 supports only INTA */
if (irq > 1)
@@ -1975,10 +1946,10 @@ static int tegra_pcie_ep_raise_legacy_irq(struct tegra_pcie_dw *pcie, u16 irq)
static int tegra_pcie_ep_raise_msi_irq(struct tegra_pcie_dw *pcie, u16 irq)
{
- if (unlikely(irq > 31))
+ if (unlikely(irq > 32))
return -EINVAL;
- appl_writel(pcie, BIT(irq), APPL_MSI_CTRL_1);
+ appl_writel(pcie, BIT(irq - 1), APPL_MSI_CTRL_1);
return 0;
}
@@ -1993,20 +1964,19 @@ static int tegra_pcie_ep_raise_msix_irq(struct tegra_pcie_dw *pcie, u16 irq)
}
static int tegra_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
- enum pci_epc_irq_type type,
- u16 interrupt_num)
+ unsigned int type, u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
switch (type) {
- case PCI_EPC_IRQ_LEGACY:
- return tegra_pcie_ep_raise_legacy_irq(pcie, interrupt_num);
+ case PCI_IRQ_INTX:
+ return tegra_pcie_ep_raise_intx_irq(pcie, interrupt_num);
- case PCI_EPC_IRQ_MSI:
+ case PCI_IRQ_MSI:
return tegra_pcie_ep_raise_msi_irq(pcie, interrupt_num);
- case PCI_EPC_IRQ_MSIX:
+ case PCI_IRQ_MSIX:
return tegra_pcie_ep_raise_msix_irq(pcie, interrupt_num);
default:
@@ -2019,12 +1989,15 @@ static int tegra_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
static const struct pci_epc_features tegra_pcie_epc_features = {
.linkup_notifier = true,
- .core_init_notifier = true,
- .msi_capable = false,
- .msix_capable = false,
- .reserved_bar = 1 << BAR_2 | 1 << BAR_3 | 1 << BAR_4 | 1 << BAR_5,
- .bar_fixed_64bit = 1 << BAR_0,
- .bar_fixed_size[0] = SZ_1M,
+ .msi_capable = true,
+ .bar[BAR_0] = { .type = BAR_FIXED, .fixed_size = SZ_1M,
+ .only_64bit = true, },
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_2] = { .type = BAR_RESERVED, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
+ .bar[BAR_4] = { .type = BAR_RESERVED, },
+ .bar[BAR_5] = { .type = BAR_RESERVED, },
+ .align = SZ_64K,
};
static const struct pci_epc_features*
@@ -2034,6 +2007,7 @@ tegra_pcie_ep_get_features(struct dw_pcie_ep *ep)
}
static const struct dw_pcie_ep_ops pcie_ep_ops = {
+ .init = tegra_pcie_ep_init,
.raise_irq = tegra_pcie_ep_raise_irq,
.get_features = tegra_pcie_ep_get_features,
};
@@ -2284,11 +2258,14 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev)
ret = tegra_pcie_config_ep(pcie, pdev);
if (ret < 0)
goto fail;
+ else
+ return 0;
break;
default:
dev_err(dev, "Invalid PCIe device type %d\n",
pcie->of_data->mode);
+ ret = -EINVAL;
}
fail:
@@ -2509,7 +2486,7 @@ static const struct dev_pm_ops tegra_pcie_dw_pm_ops = {
static struct platform_driver tegra_pcie_dw_driver = {
.probe = tegra_pcie_dw_probe,
- .remove_new = tegra_pcie_dw_remove,
+ .remove = tegra_pcie_dw_remove,
.shutdown = tegra_pcie_dw_shutdown,
.driver = {
.name = "tegra194-pcie",
diff --git a/drivers/pci/controller/dwc/pcie-uniphier-ep.c b/drivers/pci/controller/dwc/pcie-uniphier-ep.c
index 4d0a587c0ba5..d6e73811216e 100644
--- a/drivers/pci/controller/dwc/pcie-uniphier-ep.c
+++ b/drivers/pci/controller/dwc/pcie-uniphier-ep.c
@@ -11,7 +11,7 @@
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/iopoll.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/pci.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
@@ -212,7 +212,7 @@ static void uniphier_pcie_ep_init(struct dw_pcie_ep *ep)
dw_pcie_ep_reset_bar(pci, bar);
}
-static int uniphier_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep)
+static int uniphier_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci);
@@ -256,15 +256,14 @@ static int uniphier_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep,
}
static int uniphier_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
- enum pci_epc_irq_type type,
- u16 interrupt_num)
+ unsigned int type, u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
switch (type) {
- case PCI_EPC_IRQ_LEGACY:
- return uniphier_pcie_ep_raise_legacy_irq(ep);
- case PCI_EPC_IRQ_MSI:
+ case PCI_IRQ_INTX:
+ return uniphier_pcie_ep_raise_intx_irq(ep);
+ case PCI_IRQ_MSI:
return uniphier_pcie_ep_raise_msi_irq(ep, func_no,
interrupt_num);
default:
@@ -284,7 +283,7 @@ uniphier_pcie_get_features(struct dw_pcie_ep *ep)
}
static const struct dw_pcie_ep_ops uniphier_pcie_ep_ops = {
- .ep_init = uniphier_pcie_ep_init,
+ .init = uniphier_pcie_ep_init,
.raise_irq = uniphier_pcie_ep_raise_irq,
.get_features = uniphier_pcie_get_features,
};
@@ -400,7 +399,20 @@ static int uniphier_pcie_ep_probe(struct platform_device *pdev)
return ret;
priv->pci.ep.ops = &uniphier_pcie_ep_ops;
- return dw_pcie_ep_init(&priv->pci.ep);
+ ret = dw_pcie_ep_init(&priv->pci.ep);
+ if (ret)
+ return ret;
+
+ ret = dw_pcie_ep_init_registers(&priv->pci.ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(&priv->pci.ep);
+ return ret;
+ }
+
+ pci_epc_init_notify(priv->pci.ep.epc);
+
+ return 0;
}
static const struct uniphier_pcie_ep_soc_data uniphier_pro5_data = {
@@ -412,8 +424,12 @@ static const struct uniphier_pcie_ep_soc_data uniphier_pro5_data = {
.msi_capable = true,
.msix_capable = false,
.align = 1 << 16,
- .bar_fixed_64bit = BIT(BAR_0) | BIT(BAR_2) | BIT(BAR_4),
- .reserved_bar = BIT(BAR_4),
+ .bar[BAR_0] = { .only_64bit = true, },
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_2] = { .only_64bit = true, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
+ .bar[BAR_4] = { .type = BAR_RESERVED, },
+ .bar[BAR_5] = { .type = BAR_RESERVED, },
},
};
@@ -426,7 +442,12 @@ static const struct uniphier_pcie_ep_soc_data uniphier_nx1_data = {
.msi_capable = true,
.msix_capable = false,
.align = 1 << 12,
- .bar_fixed_64bit = BIT(BAR_0) | BIT(BAR_2) | BIT(BAR_4),
+ .bar[BAR_0] = { .only_64bit = true, },
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_2] = { .only_64bit = true, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
+ .bar[BAR_4] = { .only_64bit = true, },
+ .bar[BAR_5] = { .type = BAR_RESERVED, },
},
};
diff --git a/drivers/pci/controller/dwc/pcie-uniphier.c b/drivers/pci/controller/dwc/pcie-uniphier.c
index 48c3eba817b4..297e7a3d9b36 100644
--- a/drivers/pci/controller/dwc/pcie-uniphier.c
+++ b/drivers/pci/controller/dwc/pcie-uniphier.c
@@ -67,7 +67,7 @@ struct uniphier_pcie {
struct clk *clk;
struct reset_control *rst;
struct phy *phy;
- struct irq_domain *legacy_irq_domain;
+ struct irq_domain *intx_irq_domain;
};
#define to_uniphier_pcie(x) dev_get_drvdata((x)->dev)
@@ -135,7 +135,7 @@ static int uniphier_pcie_wait_rc(struct uniphier_pcie *pcie)
return 0;
}
-static int uniphier_pcie_link_up(struct dw_pcie *pci)
+static bool uniphier_pcie_link_up(struct dw_pcie *pci)
{
struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
u32 val, mask;
@@ -253,12 +253,12 @@ static void uniphier_pcie_irq_handler(struct irq_desc *desc)
reg = FIELD_GET(PCL_RCV_INTX_ALL_STATUS, val);
for_each_set_bit(bit, &reg, PCI_NUM_INTX)
- generic_handle_domain_irq(pcie->legacy_irq_domain, bit);
+ generic_handle_domain_irq(pcie->intx_irq_domain, bit);
chained_irq_exit(chip, desc);
}
-static int uniphier_pcie_config_legacy_irq(struct dw_pcie_rp *pp)
+static int uniphier_pcie_config_intx_irq(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
@@ -279,9 +279,9 @@ static int uniphier_pcie_config_legacy_irq(struct dw_pcie_rp *pp)
goto out_put_node;
}
- pcie->legacy_irq_domain = irq_domain_add_linear(np_intc, PCI_NUM_INTX,
+ pcie->intx_irq_domain = irq_domain_create_linear(of_fwnode_handle(np_intc), PCI_NUM_INTX,
&uniphier_intx_domain_ops, pp);
- if (!pcie->legacy_irq_domain) {
+ if (!pcie->intx_irq_domain) {
dev_err(pci->dev, "Failed to get INTx domain\n");
ret = -ENODEV;
goto out_put_node;
@@ -301,7 +301,7 @@ static int uniphier_pcie_host_init(struct dw_pcie_rp *pp)
struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
int ret;
- ret = uniphier_pcie_config_legacy_irq(pp);
+ ret = uniphier_pcie_config_intx_irq(pp);
if (ret)
return ret;
@@ -311,7 +311,7 @@ static int uniphier_pcie_host_init(struct dw_pcie_rp *pp)
}
static const struct dw_pcie_host_ops uniphier_pcie_host_ops = {
- .host_init = uniphier_pcie_host_init,
+ .init = uniphier_pcie_host_init,
};
static int uniphier_pcie_host_enable(struct uniphier_pcie *pcie)
diff --git a/drivers/pci/controller/dwc/pcie-visconti.c b/drivers/pci/controller/dwc/pcie-visconti.c
index 71026fefa366..cdeac6177143 100644
--- a/drivers/pci/controller/dwc/pcie-visconti.c
+++ b/drivers/pci/controller/dwc/pcie-visconti.c
@@ -121,13 +121,13 @@ static u32 visconti_mpu_readl(struct visconti_pcie *pcie, u32 reg)
return readl_relaxed(pcie->mpu_base + reg);
}
-static int visconti_pcie_link_up(struct dw_pcie *pci)
+static bool visconti_pcie_link_up(struct dw_pcie *pci)
{
struct visconti_pcie *pcie = dev_get_drvdata(pci->dev);
void __iomem *addr = pcie->ulreg_base;
u32 val = readl_relaxed(addr + PCIE_UL_REG_V_PHY_ST_02);
- return !!(val & PCIE_UL_S_L0);
+ return val & PCIE_UL_S_L0;
}
static int visconti_pcie_start_link(struct dw_pcie *pci)
@@ -236,7 +236,7 @@ static int visconti_pcie_host_init(struct dw_pcie_rp *pp)
}
static const struct dw_pcie_host_ops visconti_pcie_host_ops = {
- .host_init = visconti_pcie_host_init,
+ .init = visconti_pcie_host_init,
};
static int visconti_get_resources(struct platform_device *pdev,
diff --git a/drivers/pci/controller/mobiveil/Kconfig b/drivers/pci/controller/mobiveil/Kconfig
index 58ce034f701a..c50c4625937f 100644
--- a/drivers/pci/controller/mobiveil/Kconfig
+++ b/drivers/pci/controller/mobiveil/Kconfig
@@ -9,6 +9,7 @@ config PCIE_MOBIVEIL
config PCIE_MOBIVEIL_HOST
bool
depends on PCI_MSI
+ select IRQ_MSI_LIB
select PCIE_MOBIVEIL
config PCIE_LAYERSCAPE_GEN4
diff --git a/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c b/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c
index d7b7350f02dd..4919b27eaf44 100644
--- a/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c
+++ b/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c
@@ -53,18 +53,13 @@ static inline void ls_g4_pcie_pf_writel(struct ls_g4_pcie *pcie,
iowrite32(val, pcie->pci.csr_axi_slave_base + PCIE_PF_OFF + off);
}
-static int ls_g4_pcie_link_up(struct mobiveil_pcie *pci)
+static bool ls_g4_pcie_link_up(struct mobiveil_pcie *pci)
{
struct ls_g4_pcie *pcie = to_ls_g4_pcie(pci);
u32 state;
state = ls_g4_pcie_pf_readl(pcie, PCIE_PF_DBG);
- state = state & PF_DBG_LTSSM_MASK;
-
- if (state == PF_DBG_LTSSM_L0)
- return 1;
-
- return 0;
+ return (state & PF_DBG_LTSSM_MASK) == PF_DBG_LTSSM_L0;
}
static void ls_g4_pcie_disable_interrupt(struct ls_g4_pcie *pcie)
@@ -174,8 +169,7 @@ static int ls_g4_pcie_interrupt_init(struct mobiveil_pcie *mv_pci)
static void ls_g4_pcie_reset(struct work_struct *work)
{
- struct delayed_work *dwork = container_of(work, struct delayed_work,
- work);
+ struct delayed_work *dwork = to_delayed_work(work);
struct ls_g4_pcie *pcie = container_of(dwork, struct ls_g4_pcie, dwork);
struct mobiveil_pcie *mv_pci = &pcie->pci;
u16 ctrl;
@@ -190,7 +184,7 @@ static void ls_g4_pcie_reset(struct work_struct *work)
ls_g4_pcie_enable_interrupt(pcie);
}
-static struct mobiveil_rp_ops ls_g4_pcie_rp_ops = {
+static const struct mobiveil_rp_ops ls_g4_pcie_rp_ops = {
.interrupt_init = ls_g4_pcie_interrupt_init,
};
diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
index 31a7bdebe540..dbc72c73fd0a 100644
--- a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
+++ b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
@@ -12,14 +12,12 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/msi.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/of_platform.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
@@ -356,16 +354,19 @@ static const struct irq_domain_ops intx_domain_ops = {
.map = mobiveil_pcie_intx_map,
};
-static struct irq_chip mobiveil_msi_irq_chip = {
- .name = "Mobiveil PCIe MSI",
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
-};
+#define MOBIVEIL_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_NO_AFFINITY)
+
+#define MOBIVEIL_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_PCI_MSIX)
-static struct msi_domain_info mobiveil_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX),
- .chip = &mobiveil_msi_irq_chip,
+static const struct msi_parent_ops mobiveil_msi_parent_ops = {
+ .required_flags = MOBIVEIL_MSI_FLAGS_REQUIRED,
+ .supported_flags = MOBIVEIL_MSI_FLAGS_SUPPORTED,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .prefix = "Mobiveil-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
};
static void mobiveil_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
@@ -381,16 +382,9 @@ static void mobiveil_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
(int)data->hwirq, msg->address_hi, msg->address_lo);
}
-static int mobiveil_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static struct irq_chip mobiveil_msi_bottom_irq_chip = {
.name = "Mobiveil MSI",
.irq_compose_msi_msg = mobiveil_compose_msi_msg,
- .irq_set_affinity = mobiveil_msi_set_affinity,
};
static int mobiveil_irq_msi_domain_alloc(struct irq_domain *domain,
@@ -445,23 +439,20 @@ static const struct irq_domain_ops msi_domain_ops = {
static int mobiveil_allocate_msi_domains(struct mobiveil_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
- struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
struct mobiveil_msi *msi = &pcie->rp.msi;
mutex_init(&msi->lock);
- msi->dev_domain = irq_domain_add_linear(NULL, msi->num_of_vectors,
- &msi_domain_ops, pcie);
- if (!msi->dev_domain) {
- dev_err(dev, "failed to create IRQ domain\n");
- return -ENOMEM;
- }
- msi->msi_domain = pci_msi_create_irq_domain(fwnode,
- &mobiveil_msi_domain_info,
- msi->dev_domain);
- if (!msi->msi_domain) {
+ struct irq_domain_info info = {
+ .fwnode = dev_fwnode(dev),
+ .ops = &msi_domain_ops,
+ .host_data = pcie,
+ .size = msi->num_of_vectors,
+ };
+
+ msi->dev_domain = msi_create_parent_irq_domain(&info, &mobiveil_msi_parent_ops);
+ if (!msi->dev_domain) {
dev_err(dev, "failed to create MSI domain\n");
- irq_domain_remove(msi->dev_domain);
return -ENOMEM;
}
@@ -471,13 +462,11 @@ static int mobiveil_allocate_msi_domains(struct mobiveil_pcie *pcie)
static int mobiveil_pcie_init_irq_domain(struct mobiveil_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
- struct device_node *node = dev->of_node;
struct mobiveil_root_port *rp = &pcie->rp;
/* setup INTx */
- rp->intx_domain = irq_domain_add_linear(node, PCI_NUM_INTX,
- &intx_domain_ops, pcie);
-
+ rp->intx_domain = irq_domain_create_linear(dev_fwnode(dev), PCI_NUM_INTX, &intx_domain_ops,
+ pcie);
if (!rp->intx_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
return -ENOMEM;
@@ -542,7 +531,7 @@ static bool mobiveil_pcie_is_bridge(struct mobiveil_pcie *pcie)
u32 header_type;
header_type = mobiveil_csr_readb(pcie, PCI_HEADER_TYPE);
- header_type &= 0x7f;
+ header_type &= PCI_HEADER_TYPE_MASK;
return header_type == PCI_HEADER_TYPE_BRIDGE;
}
diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil.h b/drivers/pci/controller/mobiveil/pcie-mobiveil.h
index 6082b8afbc31..7246de6a7176 100644
--- a/drivers/pci/controller/mobiveil/pcie-mobiveil.h
+++ b/drivers/pci/controller/mobiveil/pcie-mobiveil.h
@@ -135,7 +135,6 @@
struct mobiveil_msi { /* MSI information */
struct mutex lock; /* protect bitmap variable */
- struct irq_domain *msi_domain;
struct irq_domain *dev_domain;
phys_addr_t msi_pages_phys;
int num_of_vectors;
@@ -151,7 +150,7 @@ struct mobiveil_rp_ops {
struct mobiveil_root_port {
void __iomem *config_axi_slave_base; /* endpoint config base */
struct resource *ob_io_res;
- struct mobiveil_rp_ops *ops;
+ const struct mobiveil_rp_ops *ops;
int irq;
raw_spinlock_t intx_mask_lock;
struct irq_domain *intx_domain;
@@ -160,7 +159,7 @@ struct mobiveil_root_port {
};
struct mobiveil_pab_ops {
- int (*link_up)(struct mobiveil_pcie *pcie);
+ bool (*link_up)(struct mobiveil_pcie *pcie);
};
struct mobiveil_pcie {
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index 71ecd7ddcc8a..e34bea1ff0ac 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -13,6 +13,7 @@
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -23,7 +24,6 @@
#include <linux/platform_device.h>
#include <linux/msi.h>
#include <linux/of_address.h>
-#include <linux/of_gpio.h>
#include <linux/of_pci.h>
#include "../pci.h"
@@ -51,7 +51,7 @@
#define PIO_COMPLETION_STATUS_MASK GENMASK(9, 7)
#define PIO_COMPLETION_STATUS_OK 0
#define PIO_COMPLETION_STATUS_UR 1
-#define PIO_COMPLETION_STATUS_CRS 2
+#define PIO_COMPLETION_STATUS_RRS 2
#define PIO_COMPLETION_STATUS_CA 4
#define PIO_NON_POSTED_REQ BIT(10)
#define PIO_ERR_STATUS BIT(11)
@@ -263,7 +263,7 @@ enum {
#define MSI_IRQ_NUM 32
-#define CFG_RD_CRS_VAL 0xffff0001
+#define CFG_RD_RRS_VAL 0xffff0001
struct advk_pcie {
struct platform_device *pdev;
@@ -279,7 +279,6 @@ struct advk_pcie {
struct irq_domain *irq_domain;
struct irq_chip irq_chip;
raw_spinlock_t irq_lock;
- struct irq_domain *msi_domain;
struct irq_domain *msi_inner_domain;
raw_spinlock_t msi_irq_lock;
DECLARE_BITMAP(msi_used, MSI_IRQ_NUM);
@@ -650,7 +649,7 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
advk_pcie_train_link(pcie);
}
-static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u32 *val)
+static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_rrs, u32 *val)
{
struct device *dev = &pcie->pdev->dev;
u32 reg;
@@ -670,7 +669,7 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
* 2) value Unsupported Request(1) of COMPLETION_STATUS(bit9:7) only
* means a PIO write error, and for PIO read it is successful with
* a read value of 0xFFFFFFFF.
- * 3) value Completion Retry Status(CRS) of COMPLETION_STATUS(bit9:7)
+ * 3) value Config Request Retry Status(RRS) of COMPLETION_STATUS(bit9:7)
* only means a PIO write error, and for PIO read it is successful
* with a read value of 0xFFFF0001.
* 4) value Completer Abort (CA) of COMPLETION_STATUS(bit9:7) means
@@ -695,10 +694,10 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
strcomp_status = "UR";
ret = -EOPNOTSUPP;
break;
- case PIO_COMPLETION_STATUS_CRS:
- if (allow_crs && val) {
- /* PCIe r4.0, sec 2.3.2, says:
- * If CRS Software Visibility is enabled:
+ case PIO_COMPLETION_STATUS_RRS:
+ if (allow_rrs && val) {
+ /* PCIe r6.0, sec 2.3.2, says:
+ * If Configuration RRS Software Visibility is enabled:
* For a Configuration Read Request that includes both
* bytes of the Vendor ID field of a device Function's
* Configuration Space Header, the Root Complex must
@@ -707,22 +706,22 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
* all '1's for any additional bytes included in the
* request.
*
- * So CRS in this case is not an error status.
+ * So RRS in this case is not an error status.
*/
- *val = CFG_RD_CRS_VAL;
+ *val = CFG_RD_RRS_VAL;
strcomp_status = NULL;
ret = 0;
break;
}
- /* PCIe r4.0, sec 2.3.2, says:
- * If CRS Software Visibility is not enabled, the Root Complex
+ /* PCIe r6.0, sec 2.3.2, says:
+ * If RRS Software Visibility is not enabled, the Root Complex
* must re-issue the Configuration Request as a new Request.
- * If CRS Software Visibility is enabled: For a Configuration
+ * If RRS Software Visibility is enabled: For a Configuration
* Write Request or for any other Configuration Read Request,
* the Root Complex must re-issue the Configuration Request as
* a new Request.
* A Root Complex implementation may choose to limit the number
- * of Configuration Request/CRS Completion Status loops before
+ * of Configuration Request/RRS Completion Status loops before
* determining that something is wrong with the target of the
* Request and taking appropriate action, e.g., complete the
* Request to the host as a failed transaction.
@@ -730,7 +729,7 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
* So return -EAGAIN and caller (pci-aardvark.c driver) will
* re-issue request again up to the PIO_RETRY_CNT retries.
*/
- strcomp_status = "CRS";
+ strcomp_status = "RRS";
ret = -EAGAIN;
break;
case PIO_COMPLETION_STATUS_CA:
@@ -921,8 +920,8 @@ advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
case PCI_EXP_RTCTL: {
u16 rootctl = le16_to_cpu(bridge->pcie_conf.rootctl);
- /* Only emulation of PMEIE and CRSSVE bits is provided */
- rootctl &= PCI_EXP_RTCTL_PMEIE | PCI_EXP_RTCTL_CRSSVE;
+ /* Only emulation of PMEIE and RRS_SVE bits is provided */
+ rootctl &= PCI_EXP_RTCTL_PMEIE | PCI_EXP_RTCTL_RRS_SVE;
bridge->pcie_conf.rootctl = cpu_to_le16(rootctl);
break;
}
@@ -1076,7 +1075,7 @@ static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
bridge->pcie_conf.slotsta = cpu_to_le16(PCI_EXP_SLTSTA_PDS);
/* Indicates supports for Completion Retry Status */
- bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS);
+ bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_RRS_SV);
bridge->subsystem_vendor_id = advk_readl(pcie, PCIE_CORE_SSDEV_ID_REG) & 0xffff;
bridge->subsystem_id = advk_readl(pcie, PCIE_CORE_SSDEV_ID_REG) >> 16;
@@ -1142,7 +1141,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
{
struct advk_pcie *pcie = bus->sysdata;
int retry_count;
- bool allow_crs;
+ bool allow_rrs;
u32 reg;
int ret;
@@ -1154,16 +1153,16 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
size, val);
/*
- * Completion Retry Status is possible to return only when reading all
- * 4 bytes from PCI_VENDOR_ID and PCI_DEVICE_ID registers at once and
- * CRSSVE flag on Root Bridge is enabled.
+ * Configuration Request Retry Status (RRS) is possible to return
+ * only when reading both bytes from PCI_VENDOR_ID at once and
+ * RRS_SVE flag on Root Port is enabled.
*/
- allow_crs = (where == PCI_VENDOR_ID) && (size == 4) &&
+ allow_rrs = (where == PCI_VENDOR_ID) && (size >= 2) &&
(le16_to_cpu(pcie->bridge.pcie_conf.rootctl) &
- PCI_EXP_RTCTL_CRSSVE);
+ PCI_EXP_RTCTL_RRS_SVE);
if (advk_pcie_pio_is_running(pcie))
- goto try_crs;
+ goto try_rrs;
/* Program the control register */
reg = advk_readl(pcie, PIO_CTRL);
@@ -1190,12 +1189,12 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
ret = advk_pcie_wait_pio(pcie);
if (ret < 0)
- goto try_crs;
+ goto try_rrs;
retry_count += ret;
/* Check PIO status and get the read result */
- ret = advk_pcie_check_pio_status(pcie, allow_crs, val);
+ ret = advk_pcie_check_pio_status(pcie, allow_rrs, val);
} while (ret == -EAGAIN && retry_count < PIO_RETRY_CNT);
if (ret < 0)
@@ -1208,13 +1207,13 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
return PCIBIOS_SUCCESSFUL;
-try_crs:
+try_rrs:
/*
- * If it is possible, return Completion Retry Status so that caller
- * tries to issue the request again instead of failing.
+ * If it is possible, return Configuration Request Retry Status so
+ * that caller tries to issue the request again instead of failing.
*/
- if (allow_crs) {
- *val = CFG_RD_CRS_VAL;
+ if (allow_rrs) {
+ *val = CFG_RD_RRS_VAL;
return PCIBIOS_SUCCESSFUL;
}
@@ -1305,12 +1304,6 @@ static void advk_msi_irq_compose_msi_msg(struct irq_data *data,
msg->data = data->hwirq;
}
-static int advk_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static void advk_msi_irq_mask(struct irq_data *d)
{
struct advk_pcie *pcie = d->domain->host_data;
@@ -1339,22 +1332,9 @@ static void advk_msi_irq_unmask(struct irq_data *d)
raw_spin_unlock_irqrestore(&pcie->msi_irq_lock, flags);
}
-static void advk_msi_top_irq_mask(struct irq_data *d)
-{
- pci_msi_mask_irq(d);
- irq_chip_mask_parent(d);
-}
-
-static void advk_msi_top_irq_unmask(struct irq_data *d)
-{
- pci_msi_unmask_irq(d);
- irq_chip_unmask_parent(d);
-}
-
static struct irq_chip advk_msi_bottom_irq_chip = {
.name = "MSI",
.irq_compose_msi_msg = advk_msi_irq_compose_msi_msg,
- .irq_set_affinity = advk_msi_set_affinity,
.irq_mask = advk_msi_irq_mask,
.irq_unmask = advk_msi_irq_unmask,
};
@@ -1444,16 +1424,20 @@ static const struct irq_domain_ops advk_pcie_irq_domain_ops = {
.xlate = irq_domain_xlate_onecell,
};
-static struct irq_chip advk_msi_irq_chip = {
- .name = "advk-MSI",
- .irq_mask = advk_msi_top_irq_mask,
- .irq_unmask = advk_msi_top_irq_unmask,
-};
-
-static struct msi_domain_info advk_msi_domain_info = {
- .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX,
- .chip = &advk_msi_irq_chip,
+#define ADVK_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_PCI_MSI_MASK_PARENT | \
+ MSI_FLAG_NO_AFFINITY)
+#define ADVK_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_PCI_MSIX | \
+ MSI_FLAG_MULTI_PCI_MSI)
+
+static const struct msi_parent_ops advk_msi_parent_ops = {
+ .required_flags = ADVK_MSI_FLAGS_REQUIRED,
+ .supported_flags = ADVK_MSI_FLAGS_SUPPORTED,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .prefix = "advk-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
};
static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie)
@@ -1463,27 +1447,22 @@ static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie)
raw_spin_lock_init(&pcie->msi_irq_lock);
mutex_init(&pcie->msi_used_lock);
- pcie->msi_inner_domain =
- irq_domain_add_linear(NULL, MSI_IRQ_NUM,
- &advk_msi_domain_ops, pcie);
- if (!pcie->msi_inner_domain)
- return -ENOMEM;
+ struct irq_domain_info info = {
+ .fwnode = dev_fwnode(dev),
+ .ops = &advk_msi_domain_ops,
+ .host_data = pcie,
+ .size = MSI_IRQ_NUM,
+ };
- pcie->msi_domain =
- pci_msi_create_irq_domain(dev_fwnode(dev),
- &advk_msi_domain_info,
- pcie->msi_inner_domain);
- if (!pcie->msi_domain) {
- irq_domain_remove(pcie->msi_inner_domain);
+ pcie->msi_inner_domain = msi_create_parent_irq_domain(&info, &advk_msi_parent_ops);
+ if (!pcie->msi_inner_domain)
return -ENOMEM;
- }
return 0;
}
static void advk_pcie_remove_msi_irq_domain(struct advk_pcie *pcie)
{
- irq_domain_remove(pcie->msi_domain);
irq_domain_remove(pcie->msi_inner_domain);
}
@@ -1515,9 +1494,8 @@ static int advk_pcie_init_irq_domain(struct advk_pcie *pcie)
irq_chip->irq_mask = advk_pcie_irq_mask;
irq_chip->irq_unmask = advk_pcie_irq_unmask;
- pcie->irq_domain =
- irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
- &advk_pcie_irq_domain_ops, pcie);
+ pcie->irq_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX,
+ &advk_pcie_irq_domain_ops, pcie);
if (!pcie->irq_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
ret = -ENOMEM;
@@ -1556,9 +1534,7 @@ static const struct irq_domain_ops advk_pcie_rp_irq_domain_ops = {
static int advk_pcie_init_rp_irq_domain(struct advk_pcie *pcie)
{
- pcie->rp_irq_domain = irq_domain_add_linear(NULL, 1,
- &advk_pcie_rp_irq_domain_ops,
- pcie);
+ pcie->rp_irq_domain = irq_domain_create_linear(NULL, 1, &advk_pcie_rp_irq_domain_ops, pcie);
if (!pcie->rp_irq_domain) {
dev_err(&pcie->pdev->dev, "Failed to add Root Port IRQ domain\n");
return -ENOMEM;
@@ -2003,7 +1979,7 @@ static struct platform_driver advk_pcie_driver = {
.of_match_table = advk_pcie_of_match_table,
},
.probe = advk_pcie_probe,
- .remove_new = advk_pcie_remove,
+ .remove = advk_pcie_remove,
};
module_platform_driver(advk_pcie_driver);
diff --git a/drivers/pci/controller/pci-ftpci100.c b/drivers/pci/controller/pci-ftpci100.c
index 6e7981d2ed5e..28e43831c0f1 100644
--- a/drivers/pci/controller/pci-ftpci100.c
+++ b/drivers/pci/controller/pci-ftpci100.c
@@ -15,8 +15,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
@@ -346,8 +345,8 @@ static int faraday_pci_setup_cascaded_irq(struct faraday_pci *p)
return irq ?: -EINVAL;
}
- p->irqdomain = irq_domain_add_linear(intc, PCI_NUM_INTX,
- &faraday_pci_irqdomain_ops, p);
+ p->irqdomain = irq_domain_create_linear(of_fwnode_handle(intc), PCI_NUM_INTX,
+ &faraday_pci_irqdomain_ops, p);
of_node_put(intc);
if (!p->irqdomain) {
dev_err(p->dev, "failed to create Gemini PCI IRQ domain\n");
diff --git a/drivers/pci/controller/pci-host-common.c b/drivers/pci/controller/pci-host-common.c
index d3924a44db02..c473e7c03bac 100644
--- a/drivers/pci/controller/pci-host-common.c
+++ b/drivers/pci/controller/pci-host-common.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Generic PCI host driver common code
+ * Common library for PCI host controller drivers
*
* Copyright (C) 2014 ARM Limited
*
@@ -9,18 +9,20 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/of_pci.h>
#include <linux/pci-ecam.h>
#include <linux/platform_device.h>
+#include "pci-host-common.h"
+
static void gen_pci_unmap_cfg(void *ptr)
{
pci_ecam_free((struct pci_config_window *)ptr);
}
-static struct pci_config_window *gen_pci_init(struct device *dev,
+struct pci_config_window *pci_host_common_ecam_create(struct device *dev,
struct pci_host_bridge *bridge, const struct pci_ecam_ops *ops)
{
int err;
@@ -48,44 +50,52 @@ static struct pci_config_window *gen_pci_init(struct device *dev,
return cfg;
}
+EXPORT_SYMBOL_GPL(pci_host_common_ecam_create);
-int pci_host_common_probe(struct platform_device *pdev)
+int pci_host_common_init(struct platform_device *pdev,
+ struct pci_host_bridge *bridge,
+ const struct pci_ecam_ops *ops)
{
struct device *dev = &pdev->dev;
- struct pci_host_bridge *bridge;
struct pci_config_window *cfg;
- const struct pci_ecam_ops *ops;
-
- ops = of_device_get_match_data(&pdev->dev);
- if (!ops)
- return -ENODEV;
- bridge = devm_pci_alloc_host_bridge(dev, 0);
- if (!bridge)
- return -ENOMEM;
+ of_pci_check_probe_only();
platform_set_drvdata(pdev, bridge);
- of_pci_check_probe_only();
-
/* Parse and map our Configuration Space windows */
- cfg = gen_pci_init(dev, bridge, ops);
+ cfg = pci_host_common_ecam_create(dev, bridge, ops);
if (IS_ERR(cfg))
return PTR_ERR(cfg);
- /* Do not reassign resources if probe only */
- if (!pci_has_flag(PCI_PROBE_ONLY))
- pci_add_flags(PCI_REASSIGN_ALL_BUS);
-
bridge->sysdata = cfg;
bridge->ops = (struct pci_ops *)&ops->pci_ops;
+ bridge->enable_device = ops->enable_device;
+ bridge->disable_device = ops->disable_device;
bridge->msi_domain = true;
return pci_host_probe(bridge);
}
+EXPORT_SYMBOL_GPL(pci_host_common_init);
+
+int pci_host_common_probe(struct platform_device *pdev)
+{
+ const struct pci_ecam_ops *ops;
+ struct pci_host_bridge *bridge;
+
+ ops = of_device_get_match_data(&pdev->dev);
+ if (!ops)
+ return -ENODEV;
+
+ bridge = devm_pci_alloc_host_bridge(&pdev->dev, 0);
+ if (!bridge)
+ return -ENOMEM;
+
+ return pci_host_common_init(pdev, bridge, ops);
+}
EXPORT_SYMBOL_GPL(pci_host_common_probe);
-int pci_host_common_remove(struct platform_device *pdev)
+void pci_host_common_remove(struct platform_device *pdev)
{
struct pci_host_bridge *bridge = platform_get_drvdata(pdev);
@@ -93,9 +103,8 @@ int pci_host_common_remove(struct platform_device *pdev)
pci_stop_root_bus(bridge->bus);
pci_remove_root_bus(bridge->bus);
pci_unlock_rescan_remove();
-
- return 0;
}
EXPORT_SYMBOL_GPL(pci_host_common_remove);
+MODULE_DESCRIPTION("Common library for PCI host controller drivers");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pci-host-common.h b/drivers/pci/controller/pci-host-common.h
new file mode 100644
index 000000000000..b5075d4bd7eb
--- /dev/null
+++ b/drivers/pci/controller/pci-host-common.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common library for PCI host controller drivers
+ *
+ * Copyright (C) 2014 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#ifndef _PCI_HOST_COMMON_H
+#define _PCI_HOST_COMMON_H
+
+struct pci_ecam_ops;
+
+int pci_host_common_probe(struct platform_device *pdev);
+int pci_host_common_init(struct platform_device *pdev,
+ struct pci_host_bridge *bridge,
+ const struct pci_ecam_ops *ops);
+void pci_host_common_remove(struct platform_device *pdev);
+
+struct pci_config_window *pci_host_common_ecam_create(struct device *dev,
+ struct pci_host_bridge *bridge, const struct pci_ecam_ops *ops);
+#endif
diff --git a/drivers/pci/controller/pci-host-generic.c b/drivers/pci/controller/pci-host-generic.c
index 63865aeb636b..c1bc0d34348f 100644
--- a/drivers/pci/controller/pci-host-generic.c
+++ b/drivers/pci/controller/pci-host-generic.c
@@ -14,6 +14,8 @@
#include <linux/pci-ecam.h>
#include <linux/platform_device.h>
+#include "pci-host-common.h"
+
static const struct pci_ecam_ops gen_pci_cfg_cam_bus_ops = {
.bus_shift = 16,
.pci_ops = {
@@ -86,4 +88,5 @@ static struct platform_driver gen_pci_driver = {
};
module_platform_driver(gen_pci_driver);
+MODULE_DESCRIPTION("Generic PCI host controller driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pci-hyperv-intf.c b/drivers/pci/controller/pci-hyperv-intf.c
index cc96be450360..28b3e93d31c0 100644
--- a/drivers/pci/controller/pci-hyperv-intf.c
+++ b/drivers/pci/controller/pci-hyperv-intf.c
@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/hyperv.h>
+#include <linux/export.h>
struct hyperv_pci_block_ops hvpci_block_ops;
EXPORT_SYMBOL_GPL(hvpci_block_ops);
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index 2d93d0c4f10d..1e237d3538f9 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -44,11 +44,14 @@
#include <linux/delay.h>
#include <linux/semaphore.h>
#include <linux/irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/msi.h>
#include <linux/hyperv.h>
#include <linux/refcount.h>
#include <linux/irqdomain.h>
#include <linux/acpi.h>
+#include <linux/sizes.h>
+#include <linux/of_irq.h>
#include <asm/mshyperv.h>
/*
@@ -308,8 +311,6 @@ struct pci_packet {
void (*completion_func)(void *context, struct pci_response *resp,
int resp_packet_size);
void *compl_ctxt;
-
- struct pci_message message[];
};
/*
@@ -465,7 +466,7 @@ struct pci_eject_response {
u32 status;
} __packed;
-static int pci_ring_size = (4 * PAGE_SIZE);
+static int pci_ring_size = VMBUS_RING_SIZE(SZ_16K);
/*
* Driver specific state.
@@ -508,7 +509,6 @@ struct hv_pcibus_device {
struct list_head children;
struct list_head dr_list;
- struct msi_domain_info msi_info;
struct irq_domain *irq_domain;
struct workqueue_struct *wq;
@@ -545,7 +545,7 @@ struct hv_pcidev_description {
struct hv_dr_state {
struct list_head list_entry;
u32 device_count;
- struct hv_pcidev_description func[];
+ struct hv_pcidev_description func[] __counted_by(device_count);
};
struct hv_pci_dev {
@@ -576,9 +576,8 @@ struct hv_pci_compl {
static void hv_pci_onchannelcallback(void *context);
#ifdef CONFIG_X86
-#define DELIVERY_MODE APIC_DELIVERY_MODE_FIXED
-#define FLOW_HANDLER handle_edge_irq
-#define FLOW_NAME "edge"
+#define DELIVERY_MODE APIC_DELIVERY_MODE_FIXED
+#define HV_MSI_CHIP_FLAGS MSI_CHIP_FLAG_SET_ACK
static int hv_pci_irqchip_init(void)
{
@@ -600,7 +599,7 @@ static unsigned int hv_msi_get_int_vector(struct irq_data *data)
#define hv_msi_prepare pci_msi_prepare
/**
- * hv_arch_irq_unmask() - "Unmask" the IRQ by setting its current
+ * hv_irq_retarget_interrupt() - "Unmask" the IRQ by setting its current
* affinity.
* @data: Describes the IRQ
*
@@ -609,7 +608,7 @@ static unsigned int hv_msi_get_int_vector(struct irq_data *data)
* is built out of this PCI bus's instance GUID and the function
* number of the device.
*/
-static void hv_arch_irq_unmask(struct irq_data *data)
+static void hv_irq_retarget_interrupt(struct irq_data *data)
{
struct msi_desc *msi_desc = irq_data_get_msi_desc(data);
struct hv_retarget_device_interrupt *params;
@@ -650,13 +649,6 @@ static void hv_arch_irq_unmask(struct irq_data *data)
PCI_FUNC(pdev->devfn);
params->int_target.vector = hv_msi_get_int_vector(data);
- /*
- * Honoring apic->delivery_mode set to APIC_DELIVERY_MODE_FIXED by
- * setting the HV_DEVICE_INTERRUPT_TARGET_MULTICAST flag results in a
- * spurious interrupt storm. Not doing so does not seem to have a
- * negative effect (yet?).
- */
-
if (hbus->protocol_version >= PCI_PROTOCOL_VERSION_1_2) {
/*
* PCI_PROTOCOL_VERSION_1_2 supports the VP_SET version of the
@@ -721,6 +713,20 @@ out:
dev_err(&hbus->hdev->device,
"%s() failed: %#llx", __func__, res);
}
+
+static void hv_arch_irq_unmask(struct irq_data *data)
+{
+ if (hv_root_partition())
+ /*
+ * In case of the nested root partition, the nested hypervisor
+ * is taking care of interrupt remapping and thus the
+ * MAP_DEVICE_INTERRUPT hypercall is required instead of
+ * RETARGET_INTERRUPT.
+ */
+ (void)hv_map_msi_interrupt(data, NULL);
+ else
+ hv_irq_retarget_interrupt(data);
+}
#elif defined(CONFIG_ARM64)
/*
* SPI vectors to use for vPCI; arch SPIs range is [32, 1019], but leaving a bit
@@ -730,8 +736,7 @@ out:
#define HV_PCI_MSI_SPI_START 64
#define HV_PCI_MSI_SPI_NR (1020 - HV_PCI_MSI_SPI_START)
#define DELIVERY_MODE 0
-#define FLOW_HANDLER NULL
-#define FLOW_NAME NULL
+#define HV_MSI_CHIP_FLAGS MSI_CHIP_FLAG_SET_EOI
#define hv_msi_prepare NULL
struct hv_pci_chip_data {
@@ -823,9 +828,17 @@ static int hv_pci_vec_irq_gic_domain_alloc(struct irq_domain *domain,
int ret;
fwspec.fwnode = domain->parent->fwnode;
- fwspec.param_count = 2;
- fwspec.param[0] = hwirq;
- fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
+ if (is_of_node(fwspec.fwnode)) {
+ /* SPI lines for OF translations start at offset 32 */
+ fwspec.param_count = 3;
+ fwspec.param[0] = 0;
+ fwspec.param[1] = hwirq - 32;
+ fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
+ } else {
+ fwspec.param_count = 2;
+ fwspec.param[0] = hwirq;
+ fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
+ }
ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
if (ret)
@@ -893,10 +906,44 @@ static const struct irq_domain_ops hv_pci_domain_ops = {
.activate = hv_pci_vec_irq_domain_activate,
};
+#ifdef CONFIG_OF
+
+static struct irq_domain *hv_pci_of_irq_domain_parent(void)
+{
+ struct device_node *parent;
+ struct irq_domain *domain;
+
+ parent = of_irq_find_parent(hv_get_vmbus_root_device()->of_node);
+ if (!parent)
+ return NULL;
+ domain = irq_find_host(parent);
+ of_node_put(parent);
+
+ return domain;
+}
+
+#endif
+
+#ifdef CONFIG_ACPI
+
+static struct irq_domain *hv_pci_acpi_irq_domain_parent(void)
+{
+ acpi_gsi_domain_disp_fn gsi_domain_disp_fn;
+
+ gsi_domain_disp_fn = acpi_get_gsi_dispatcher();
+ if (!gsi_domain_disp_fn)
+ return NULL;
+ return irq_find_matching_fwnode(gsi_domain_disp_fn(0),
+ DOMAIN_BUS_ANY);
+}
+
+#endif
+
static int hv_pci_irqchip_init(void)
{
static struct hv_pci_chip_data *chip_data;
struct fwnode_handle *fn = NULL;
+ struct irq_domain *irq_domain_parent = NULL;
int ret = -ENOMEM;
chip_data = kzalloc(sizeof(*chip_data), GFP_KERNEL);
@@ -913,9 +960,24 @@ static int hv_pci_irqchip_init(void)
* way to ensure that all the corresponding devices are also gone and
* no interrupts will be generated.
*/
- hv_msi_gic_irq_domain = acpi_irq_create_hierarchy(0, HV_PCI_MSI_SPI_NR,
- fn, &hv_pci_domain_ops,
- chip_data);
+#ifdef CONFIG_ACPI
+ if (!acpi_disabled)
+ irq_domain_parent = hv_pci_acpi_irq_domain_parent();
+#endif
+#ifdef CONFIG_OF
+ if (!irq_domain_parent)
+ irq_domain_parent = hv_pci_of_irq_domain_parent();
+#endif
+ if (!irq_domain_parent) {
+ WARN_ONCE(1, "Invalid firmware configuration for VMBus interrupts\n");
+ ret = -EINVAL;
+ goto free_chip;
+ }
+
+ hv_msi_gic_irq_domain = irq_domain_create_hierarchy(irq_domain_parent, 0,
+ HV_PCI_MSI_SPI_NR,
+ fn, &hv_pci_domain_ops,
+ chip_data);
if (!hv_msi_gic_irq_domain) {
pr_err("Failed to create Hyper-V arm64 vPCI MSI IRQ domain\n");
@@ -1136,8 +1198,8 @@ static void _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where,
PCI_CAPABILITY_LIST) {
/* ROM BARs are unimplemented */
*val = 0;
- } else if (where >= PCI_INTERRUPT_LINE && where + size <=
- PCI_INTERRUPT_PIN) {
+ } else if ((where >= PCI_INTERRUPT_LINE && where + size <= PCI_INTERRUPT_PIN) ||
+ (where >= PCI_INTERRUPT_PIN && where + size <= PCI_MIN_GNT)) {
/*
* Interrupt Line and Interrupt PIN are hard-wired to zero
* because this front-end only supports message-signaled
@@ -1362,7 +1424,7 @@ static struct pci_ops hv_pcifront_ops = {
*
* If the PF driver wishes to initiate communication, it can "invalidate" one or
* more of the first 64 blocks. This invalidation is delivered via a callback
- * supplied by the VF driver by this driver.
+ * supplied to the VF driver by this driver.
*
* No protocol is implied, except that supplied by the PF and VF drivers.
*/
@@ -1444,7 +1506,7 @@ static int hv_read_config_block(struct pci_dev *pdev, void *buf,
memset(&pkt, 0, sizeof(pkt));
pkt.pkt.completion_func = hv_pci_read_config_compl;
pkt.pkt.compl_ctxt = &comp_pkt;
- read_blk = (struct pci_read_block *)&pkt.pkt.message;
+ read_blk = (struct pci_read_block *)pkt.buf;
read_blk->message_type.type = PCI_READ_BLOCK;
read_blk->wslot.slot = devfn_to_wslot(pdev->devfn);
read_blk->block_id = block_id;
@@ -1524,7 +1586,7 @@ static int hv_write_config_block(struct pci_dev *pdev, void *buf,
memset(&pkt, 0, sizeof(pkt));
pkt.pkt.completion_func = hv_pci_write_config_compl;
pkt.pkt.compl_ctxt = &comp_pkt;
- write_blk = (struct pci_write_block *)&pkt.pkt.message;
+ write_blk = (struct pci_write_block *)pkt.buf;
write_blk->message_type.type = PCI_WRITE_BLOCK;
write_blk->wslot.slot = devfn_to_wslot(pdev->devfn);
write_blk->block_id = block_id;
@@ -1605,7 +1667,7 @@ static void hv_int_desc_free(struct hv_pci_dev *hpdev,
return;
}
memset(&ctxt, 0, sizeof(ctxt));
- int_pkt = (struct pci_delete_interrupt *)&ctxt.pkt.message;
+ int_pkt = (struct pci_delete_interrupt *)ctxt.buffer;
int_pkt->message_type.type =
PCI_DELETE_INTERRUPT_MESSAGE;
int_pkt->wslot.slot = hpdev->desc.win_slot.slot;
@@ -1618,7 +1680,6 @@ static void hv_int_desc_free(struct hv_pci_dev *hpdev,
/**
* hv_msi_free() - Free the MSI.
* @domain: The interrupt domain pointer
- * @info: Extra MSI-related context
* @irq: Identifies the IRQ.
*
* The Hyper-V parent partition and hypervisor are tracking the
@@ -1626,8 +1687,7 @@ static void hv_int_desc_free(struct hv_pci_dev *hpdev,
* table up to date. This callback sends a message that frees
* the IRT entry and related tracking nonsense.
*/
-static void hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info,
- unsigned int irq)
+static void hv_msi_free(struct irq_domain *domain, unsigned int irq)
{
struct hv_pcibus_device *hbus;
struct hv_pci_dev *hpdev;
@@ -1637,7 +1697,7 @@ static void hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info,
struct msi_desc *msi = irq_data_get_msi_desc(irq_data);
pdev = msi_desc_to_pci_dev(msi);
- hbus = info->data;
+ hbus = domain->host_data;
int_desc = irq_data_get_irq_chip_data(irq_data);
if (!int_desc)
return;
@@ -1655,7 +1715,6 @@ static void hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info,
static void hv_irq_mask(struct irq_data *data)
{
- pci_msi_mask_irq(data);
if (data->parent_data->chip->irq_mask)
irq_chip_mask_parent(data);
}
@@ -1666,7 +1725,6 @@ static void hv_irq_unmask(struct irq_data *data)
if (data->parent_data->chip->irq_unmask)
irq_chip_unmask_parent(data);
- pci_msi_unmask_irq(data);
}
struct compose_comp_ctxt {
@@ -1763,8 +1821,7 @@ static int hv_compose_multi_msi_req_get_cpu(void)
spin_lock_irqsave(&multi_msi_cpu_lock, flags);
- cpu_next = cpumask_next_wrap(cpu_next, cpu_online_mask, nr_cpu_ids,
- false);
+ cpu_next = cpumask_next_wrap(cpu_next, cpu_online_mask);
cpu = cpu_next;
spin_unlock_irqrestore(&multi_msi_cpu_lock, flags);
@@ -2052,23 +2109,85 @@ return_null_message:
msg->data = 0;
}
+static bool hv_pcie_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
+ struct irq_domain *real_parent, struct msi_domain_info *info)
+{
+ struct irq_chip *chip = info->chip;
+
+ if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info))
+ return false;
+
+ info->ops->msi_prepare = hv_msi_prepare;
+
+ chip->irq_set_affinity = irq_chip_set_affinity_parent;
+
+ if (IS_ENABLED(CONFIG_X86))
+ chip->flags |= IRQCHIP_MOVE_DEFERRED;
+
+ return true;
+}
+
+#define HV_PCIE_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_PCI_MSI_MASK_PARENT)
+#define HV_PCIE_MSI_FLAGS_SUPPORTED (MSI_FLAG_MULTI_PCI_MSI | \
+ MSI_FLAG_PCI_MSIX | \
+ MSI_FLAG_PCI_MSIX_ALLOC_DYN | \
+ MSI_GENERIC_FLAGS_MASK)
+
+static const struct msi_parent_ops hv_pcie_msi_parent_ops = {
+ .required_flags = HV_PCIE_MSI_FLAGS_REQUIRED,
+ .supported_flags = HV_PCIE_MSI_FLAGS_SUPPORTED,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .chip_flags = HV_MSI_CHIP_FLAGS,
+ .prefix = "HV-",
+ .init_dev_msi_info = hv_pcie_init_dev_msi_info,
+};
+
/* HW Interrupt Chip Descriptor */
static struct irq_chip hv_msi_irq_chip = {
.name = "Hyper-V PCIe MSI",
.irq_compose_msi_msg = hv_compose_msi_msg,
.irq_set_affinity = irq_chip_set_affinity_parent,
-#ifdef CONFIG_X86
.irq_ack = irq_chip_ack_parent,
-#elif defined(CONFIG_ARM64)
.irq_eoi = irq_chip_eoi_parent,
-#endif
.irq_mask = hv_irq_mask,
.irq_unmask = hv_irq_unmask,
};
-static struct msi_domain_ops hv_msi_ops = {
- .msi_prepare = hv_msi_prepare,
- .msi_free = hv_msi_free,
+static int hv_pcie_domain_alloc(struct irq_domain *d, unsigned int virq, unsigned int nr_irqs,
+ void *arg)
+{
+ /*
+ * TODO: Allocating and populating struct tran_int_desc in hv_compose_msi_msg()
+ * should be moved here.
+ */
+ int ret;
+
+ ret = irq_domain_alloc_irqs_parent(d, virq, nr_irqs, arg);
+ if (ret < 0)
+ return ret;
+
+ for (int i = 0; i < nr_irqs; i++) {
+ irq_domain_set_hwirq_and_chip(d, virq + i, 0, &hv_msi_irq_chip, NULL);
+ if (IS_ENABLED(CONFIG_X86))
+ __irq_set_handler(virq + i, handle_edge_irq, 0, "edge");
+ }
+
+ return 0;
+}
+
+static void hv_pcie_domain_free(struct irq_domain *d, unsigned int virq, unsigned int nr_irqs)
+{
+ for (int i = 0; i < nr_irqs; i++)
+ hv_msi_free(d, virq + i);
+
+ irq_domain_free_irqs_top(d, virq, nr_irqs);
+}
+
+static const struct irq_domain_ops hv_pcie_domain_ops = {
+ .alloc = hv_pcie_domain_alloc,
+ .free = hv_pcie_domain_free,
};
/**
@@ -2086,17 +2205,14 @@ static struct msi_domain_ops hv_msi_ops = {
*/
static int hv_pcie_init_irq_domain(struct hv_pcibus_device *hbus)
{
- hbus->msi_info.chip = &hv_msi_irq_chip;
- hbus->msi_info.ops = &hv_msi_ops;
- hbus->msi_info.flags = (MSI_FLAG_USE_DEF_DOM_OPS |
- MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_MULTI_PCI_MSI |
- MSI_FLAG_PCI_MSIX);
- hbus->msi_info.handler = FLOW_HANDLER;
- hbus->msi_info.handler_name = FLOW_NAME;
- hbus->msi_info.data = hbus;
- hbus->irq_domain = pci_msi_create_irq_domain(hbus->fwnode,
- &hbus->msi_info,
- hv_pci_get_root_domain());
+ struct irq_domain_info info = {
+ .fwnode = hbus->fwnode,
+ .ops = &hv_pcie_domain_ops,
+ .host_data = hbus,
+ .parent = hv_pci_get_root_domain(),
+ };
+
+ hbus->irq_domain = msi_create_parent_irq_domain(&info, &hv_pcie_msi_parent_ops);
if (!hbus->irq_domain) {
dev_err(&hbus->hdev->device,
"Failed to build an MSI IRQ domain\n");
@@ -2488,7 +2604,7 @@ static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
comp_pkt.hpdev = hpdev;
pkt.init_packet.compl_ctxt = &comp_pkt;
pkt.init_packet.completion_func = q_resource_requirements;
- res_req = (struct pci_child_message *)&pkt.init_packet.message;
+ res_req = (struct pci_child_message *)pkt.buffer;
res_req->message_type.type = PCI_QUERY_RESOURCE_REQUIREMENTS;
res_req->wslot.slot = desc->win_slot.slot;
@@ -2866,7 +2982,7 @@ static void hv_eject_device_work(struct work_struct *work)
pci_destroy_slot(hpdev->pci_slot);
memset(&ctxt, 0, sizeof(ctxt));
- ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message;
+ ejct_pkt = (struct pci_eject_response *)ctxt.buffer;
ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE;
ejct_pkt->wslot.slot = hpdev->desc.win_slot.slot;
vmbus_sendpacket(hbus->hdev->channel, ejct_pkt,
@@ -3124,7 +3240,7 @@ static int hv_pci_protocol_negotiation(struct hv_device *hdev,
init_completion(&comp_pkt.host_event);
pkt->completion_func = hv_pci_generic_compl;
pkt->compl_ctxt = &comp_pkt;
- version_req = (struct pci_version_request *)&pkt->message;
+ version_req = (struct pci_version_request *)(pkt + 1);
version_req->message_type.type = PCI_QUERY_PROTOCOL_VERSION;
for (i = 0; i < num_version; i++) {
@@ -3346,7 +3462,7 @@ enter_d0_retry:
init_completion(&comp_pkt.host_event);
pkt->completion_func = hv_pci_generic_compl;
pkt->compl_ctxt = &comp_pkt;
- d0_entry = (struct pci_bus_d0_entry *)&pkt->message;
+ d0_entry = (struct pci_bus_d0_entry *)(pkt + 1);
d0_entry->message_type.type = PCI_BUS_D0ENTRY;
d0_entry->mmio_base = hbus->mem_config->start;
@@ -3504,20 +3620,20 @@ static int hv_send_resources_allocated(struct hv_device *hdev)
if (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_2) {
res_assigned =
- (struct pci_resources_assigned *)&pkt->message;
+ (struct pci_resources_assigned *)(pkt + 1);
res_assigned->message_type.type =
PCI_RESOURCES_ASSIGNED;
res_assigned->wslot.slot = hpdev->desc.win_slot.slot;
} else {
res_assigned2 =
- (struct pci_resources_assigned2 *)&pkt->message;
+ (struct pci_resources_assigned2 *)(pkt + 1);
res_assigned2->message_type.type =
PCI_RESOURCES_ASSIGNED2;
res_assigned2->wslot.slot = hpdev->desc.win_slot.slot;
}
put_pcichild(hpdev);
- ret = vmbus_sendpacket(hdev->channel, &pkt->message,
+ ret = vmbus_sendpacket(hdev->channel, pkt + 1,
size_res, (unsigned long)pkt,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
@@ -3580,48 +3696,6 @@ static int hv_send_resources_released(struct hv_device *hdev)
return 0;
}
-#define HVPCI_DOM_MAP_SIZE (64 * 1024)
-static DECLARE_BITMAP(hvpci_dom_map, HVPCI_DOM_MAP_SIZE);
-
-/*
- * PCI domain number 0 is used by emulated devices on Gen1 VMs, so define 0
- * as invalid for passthrough PCI devices of this driver.
- */
-#define HVPCI_DOM_INVALID 0
-
-/**
- * hv_get_dom_num() - Get a valid PCI domain number
- * Check if the PCI domain number is in use, and return another number if
- * it is in use.
- *
- * @dom: Requested domain number
- *
- * return: domain number on success, HVPCI_DOM_INVALID on failure
- */
-static u16 hv_get_dom_num(u16 dom)
-{
- unsigned int i;
-
- if (test_and_set_bit(dom, hvpci_dom_map) == 0)
- return dom;
-
- for_each_clear_bit(i, hvpci_dom_map, HVPCI_DOM_MAP_SIZE) {
- if (test_and_set_bit(i, hvpci_dom_map) == 0)
- return i;
- }
-
- return HVPCI_DOM_INVALID;
-}
-
-/**
- * hv_put_dom_num() - Mark the PCI domain number as free
- * @dom: Domain number to be freed
- */
-static void hv_put_dom_num(u16 dom)
-{
- clear_bit(dom, hvpci_dom_map);
-}
-
/**
* hv_pci_probe() - New VMBus channel probe, for a root PCI bus
* @hdev: VMBus's tracking struct for this root PCI bus
@@ -3634,9 +3708,9 @@ static int hv_pci_probe(struct hv_device *hdev,
{
struct pci_host_bridge *bridge;
struct hv_pcibus_device *hbus;
- u16 dom_req, dom;
+ int ret, dom;
+ u16 dom_req;
char *name;
- int ret;
bridge = devm_pci_alloc_host_bridge(&hdev->device, 0);
if (!bridge)
@@ -3663,11 +3737,14 @@ static int hv_pci_probe(struct hv_device *hdev,
* PCI bus (which is actually emulated by the hypervisor) is domain 0.
* (2) There will be no overlap between domains (after fixing possible
* collisions) in the same VM.
+ *
+ * Because Gen1 VMs use domain 0, don't allow picking domain 0 here,
+ * even if bytes 4 and 5 of the instance GUID are both zero. For wider
+ * userspace compatibility, limit the domain ID to a 16-bit value.
*/
dom_req = hdev->dev_instance.b[5] << 8 | hdev->dev_instance.b[4];
- dom = hv_get_dom_num(dom_req);
-
- if (dom == HVPCI_DOM_INVALID) {
+ dom = pci_bus_find_emul_domain_nr(dom_req, 1, U16_MAX);
+ if (dom < 0) {
dev_err(&hdev->device,
"Unable to use dom# 0x%x or other numbers", dom_req);
ret = -EINVAL;
@@ -3801,7 +3878,7 @@ close:
destroy_wq:
destroy_workqueue(hbus->wq);
free_dom:
- hv_put_dom_num(hbus->bridge->domain_nr);
+ pci_bus_release_emul_domain_nr(hbus->bridge->domain_nr);
free_bus:
kfree(hbus);
return ret;
@@ -3815,6 +3892,7 @@ static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs)
struct pci_packet teardown_packet;
u8 buffer[sizeof(struct pci_message)];
} pkt;
+ struct pci_message *msg;
struct hv_pci_compl comp_pkt;
struct hv_pci_dev *hpdev, *tmp;
unsigned long flags;
@@ -3860,10 +3938,10 @@ static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs)
init_completion(&comp_pkt.host_event);
pkt.teardown_packet.completion_func = hv_pci_generic_compl;
pkt.teardown_packet.compl_ctxt = &comp_pkt;
- pkt.teardown_packet.message[0].type = PCI_BUS_D0EXIT;
+ msg = (struct pci_message *)pkt.buffer;
+ msg->type = PCI_BUS_D0EXIT;
- ret = vmbus_sendpacket_getid(chan, &pkt.teardown_packet.message,
- sizeof(struct pci_message),
+ ret = vmbus_sendpacket_getid(chan, msg, sizeof(*msg),
(unsigned long)&pkt.teardown_packet,
&trans_id, VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
@@ -3925,8 +4003,6 @@ static void hv_pci_remove(struct hv_device *hdev)
irq_domain_remove(hbus->irq_domain);
irq_domain_free_fwnode(hbus->fwnode);
- hv_put_dom_num(hbus->bridge->domain_nr);
-
kfree(hbus);
}
@@ -3981,21 +4057,18 @@ static int hv_pci_restore_msi_msg(struct pci_dev *pdev, void *arg)
{
struct irq_data *irq_data;
struct msi_desc *entry;
- int ret = 0;
- msi_lock_descs(&pdev->dev);
+ if (!pdev->msi_enabled && !pdev->msix_enabled)
+ return 0;
+
+ guard(msi_descs_lock)(&pdev->dev);
msi_for_each_desc(entry, &pdev->dev, MSI_DESC_ASSOCIATED) {
irq_data = irq_get_irq_data(entry->irq);
- if (WARN_ON_ONCE(!irq_data)) {
- ret = -EINVAL;
- break;
- }
-
+ if (WARN_ON_ONCE(!irq_data))
+ return -EINVAL;
hv_compose_msi_msg(irq_data, &entry->msg);
}
- msi_unlock_descs(&pdev->dev);
-
- return ret;
+ return 0;
}
/*
@@ -4096,13 +4169,13 @@ static int __init init_hv_pci_drv(void)
if (!hv_is_hyperv_initialized())
return -ENODEV;
+ if (hv_root_partition() && !hv_nested)
+ return -ENODEV;
+
ret = hv_pci_irqchip_init();
if (ret)
return ret;
- /* Set the invalid domain number's bit, so it will not be used */
- set_bit(HVPCI_DOM_INVALID, hvpci_dom_map);
-
/* Initialize PCI block r/w interface */
hvpci_block_ops.read_block = hv_read_config_block;
hvpci_block_ops.write_block = hv_write_config_block;
diff --git a/drivers/pci/controller/pci-ixp4xx.c b/drivers/pci/controller/pci-ixp4xx.c
index e44252db6085..9fd401838bad 100644
--- a/drivers/pci/controller/pci-ixp4xx.c
+++ b/drivers/pci/controller/pci-ixp4xx.c
@@ -19,8 +19,7 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
@@ -215,6 +214,7 @@ static u32 ixp4xx_crp_byte_lane_enable_bits(u32 n, int size)
return 0xffffffff;
}
+#ifdef CONFIG_ARM
static int ixp4xx_crp_read_config(struct ixp4xx_pci *p, int where, int size,
u32 *value)
{
@@ -252,6 +252,7 @@ static int ixp4xx_crp_read_config(struct ixp4xx_pci *p, int where, int size,
return PCIBIOS_SUCCESSFUL;
}
+#endif
static int ixp4xx_crp_write_config(struct ixp4xx_pci *p, int where, int size,
u32 value)
@@ -471,6 +472,7 @@ static int ixp4xx_pci_parse_map_dma_ranges(struct ixp4xx_pci *p)
return 0;
}
+#ifdef CONFIG_ARM
/* Only used to get context for abort handling */
static struct ixp4xx_pci *ixp4xx_pci_abort_singleton;
@@ -510,6 +512,7 @@ static int ixp4xx_pci_abort_handler(unsigned long addr, unsigned int fsr,
return 0;
}
+#endif
static int __init ixp4xx_pci_probe(struct platform_device *pdev)
{
@@ -556,10 +559,12 @@ static int __init ixp4xx_pci_probe(struct platform_device *pdev)
dev_info(dev, "controller is in %s mode\n",
p->host_mode ? "host" : "option");
+#ifdef CONFIG_ARM
/* Hook in our fault handler for PCI errors */
ixp4xx_pci_abort_singleton = p;
hook_fault_code(16+6, ixp4xx_pci_abort_handler, SIGBUS, 0,
"imprecise external abort");
+#endif
ret = ixp4xx_pci_parse_map_ranges(p);
if (ret)
diff --git a/drivers/pci/controller/pci-loongson.c b/drivers/pci/controller/pci-loongson.c
index fe0f732f6e43..bc630ab8a283 100644
--- a/drivers/pci/controller/pci-loongson.c
+++ b/drivers/pci/controller/pci-loongson.c
@@ -5,7 +5,7 @@
* Copyright (C) 2020 Jiaxun Yang <jiaxun.yang@flygoat.com>
*/
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
@@ -80,13 +80,49 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
DEV_LS7A_LPC, system_bus_quirk);
+/*
+ * Some Loongson PCIe ports have hardware limitations on their Maximum Read
+ * Request Size. They can't handle anything larger than this. Sane
+ * firmware will set proper MRRS at boot, so we only need no_inc_mrrs for
+ * bridges. However, some MIPS Loongson firmware doesn't set MRRS properly,
+ * so we have to enforce maximum safe MRRS, which is 256 bytes.
+ */
+#ifdef CONFIG_MIPS
+static void loongson_set_min_mrrs_quirk(struct pci_dev *pdev)
+{
+ struct pci_bus *bus = pdev->bus;
+ struct pci_dev *bridge;
+ static const struct pci_device_id bridge_devids[] = {
+ { PCI_VDEVICE(LOONGSON, DEV_LS2K_PCIE_PORT0) },
+ { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT0) },
+ { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT1) },
+ { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT2) },
+ { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT3) },
+ { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT4) },
+ { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT5) },
+ { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT6) },
+ { 0, },
+ };
+
+ /* look for the matching bridge */
+ while (!pci_is_root_bus(bus)) {
+ bridge = bus->self;
+ bus = bus->parent;
+
+ if (pci_match_id(bridge_devids, bridge)) {
+ if (pcie_get_readrq(pdev) > 256) {
+ pci_info(pdev, "limiting MRRS to 256\n");
+ pcie_set_readrq(pdev, 256);
+ }
+ break;
+ }
+ }
+}
+DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, loongson_set_min_mrrs_quirk);
+#endif
+
static void loongson_mrrs_quirk(struct pci_dev *pdev)
{
- /*
- * Some Loongson PCIe ports have h/w limitations of maximum read
- * request size. They can't handle anything larger than this. So
- * force this limit on any devices attached under these ports.
- */
struct pci_host_bridge *bridge = pci_find_host_bridge(pdev->bus);
bridge->no_inc_mrrs = 1;
@@ -127,6 +163,19 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON,
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON,
DEV_LS7A_HDMI, loongson_pci_pin_quirk);
+static void loongson_pci_msi_quirk(struct pci_dev *dev)
+{
+ u16 val, class = dev->class >> 8;
+
+ if (class != PCI_CLASS_BRIDGE_HOST)
+ return;
+
+ pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &val);
+ val |= PCI_MSI_FLAGS_ENABLE;
+ pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, val);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, DEV_LS7A_PCIE_PORT5, loongson_pci_msi_quirk);
+
static struct loongson_pci *pci_bus_to_loongson_pci(struct pci_bus *bus)
{
struct pci_config_window *cfg;
diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
index c931b1b07b1d..a72aa57591c0 100644
--- a/drivers/pci/controller/pci-mvebu.c
+++ b/drivers/pci/controller/pci-mvebu.c
@@ -87,7 +87,6 @@ struct mvebu_pcie {
struct resource io;
struct resource realio;
struct resource mem;
- struct resource busn;
int nports;
};
@@ -265,7 +264,7 @@ static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port)
*/
lnkcap = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP);
lnkcap &= ~PCI_EXP_LNKCAP_MLW;
- lnkcap |= (port->is_x4 ? 4 : 1) << 4;
+ lnkcap |= FIELD_PREP(PCI_EXP_LNKCAP_MLW, port->is_x4 ? 4 : 1);
mvebu_writel(port, lnkcap, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP);
/* Disable Root Bridge I/O space, memory space and bus mastering. */
@@ -1079,9 +1078,9 @@ static int mvebu_pcie_init_irq_domain(struct mvebu_pcie_port *port)
return -ENODEV;
}
- port->intx_irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
- &mvebu_pcie_intx_irq_domain_ops,
- port);
+ port->intx_irq_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node),
+ PCI_NUM_INTX,
+ &mvebu_pcie_intx_irq_domain_ops, port);
of_node_put(pcie_intc_node);
if (!port->intx_irq_domain) {
dev_err(dev, "Failed to get INTx IRQ domain for %s\n", port->name);
@@ -1169,48 +1168,27 @@ static void __iomem *mvebu_pcie_map_registers(struct platform_device *pdev,
return devm_ioremap_resource(&pdev->dev, &port->regs);
}
-#define DT_FLAGS_TO_TYPE(flags) (((flags) >> 24) & 0x03)
-#define DT_TYPE_IO 0x1
-#define DT_TYPE_MEM32 0x2
-#define DT_CPUADDR_TO_TARGET(cpuaddr) (((cpuaddr) >> 56) & 0xFF)
-#define DT_CPUADDR_TO_ATTR(cpuaddr) (((cpuaddr) >> 48) & 0xFF)
-
static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
unsigned long type,
unsigned int *tgt,
unsigned int *attr)
{
- const int na = 3, ns = 2;
- const __be32 *range;
- int rlen, nranges, rangesz, pna, i;
+ struct of_range range;
+ struct of_range_parser parser;
*tgt = -1;
*attr = -1;
- range = of_get_property(np, "ranges", &rlen);
- if (!range)
+ if (of_pci_range_parser_init(&parser, np))
return -EINVAL;
- pna = of_n_addr_cells(np);
- rangesz = pna + na + ns;
- nranges = rlen / sizeof(__be32) / rangesz;
-
- for (i = 0; i < nranges; i++, range += rangesz) {
- u32 flags = of_read_number(range, 1);
- u32 slot = of_read_number(range + 1, 1);
- u64 cpuaddr = of_read_number(range + na, pna);
- unsigned long rtype;
-
- if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_IO)
- rtype = IORESOURCE_IO;
- else if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_MEM32)
- rtype = IORESOURCE_MEM;
- else
- continue;
+ for_each_of_range(&parser, &range) {
+ u32 slot = upper_32_bits(range.bus_addr);
- if (slot == PCI_SLOT(devfn) && type == rtype) {
- *tgt = DT_CPUADDR_TO_TARGET(cpuaddr);
- *attr = DT_CPUADDR_TO_ATTR(cpuaddr);
+ if (slot == PCI_SLOT(devfn) &&
+ type == (range.flags & IORESOURCE_TYPE_BITS)) {
+ *tgt = (range.parent_bus_addr >> 56) & 0xFF;
+ *attr = (range.parent_bus_addr >> 48) & 0xFF;
return 0;
}
}
@@ -1362,11 +1340,9 @@ static int mvebu_pcie_parse_port(struct mvebu_pcie *pcie,
goto skip;
}
- ret = devm_add_action(dev, mvebu_pcie_port_clk_put, port);
- if (ret < 0) {
- clk_put(port->clk);
+ ret = devm_add_action_or_reset(dev, mvebu_pcie_port_clk_put, port);
+ if (ret < 0)
goto err;
- }
return 1;
@@ -1423,7 +1399,7 @@ static void mvebu_pcie_powerdown(struct mvebu_pcie_port *port)
}
/*
- * devm_of_pci_get_host_bridge_resources() only sets up translateable resources,
+ * devm_of_pci_get_host_bridge_resources() only sets up translatable resources,
* so we need extra resource setup parsing our special DT properties encoding
* the MEM and IO apertures.
*/
@@ -1716,6 +1692,7 @@ static const struct of_device_id mvebu_pcie_of_match_table[] = {
{ .compatible = "marvell,kirkwood-pcie", },
{},
};
+MODULE_DEVICE_TABLE(of, mvebu_pcie_of_match_table);
static const struct dev_pm_ops mvebu_pcie_pm_ops = {
NOIRQ_SYSTEM_SLEEP_PM_OPS(mvebu_pcie_suspend, mvebu_pcie_resume)
@@ -1728,7 +1705,7 @@ static struct platform_driver mvebu_pcie_driver = {
.pm = &mvebu_pcie_pm_ops,
},
.probe = mvebu_pcie_probe,
- .remove_new = mvebu_pcie_remove,
+ .remove = mvebu_pcie_remove,
};
module_platform_driver(mvebu_pcie_driver);
diff --git a/drivers/pci/controller/pci-rcar-gen2.c b/drivers/pci/controller/pci-rcar-gen2.c
index 839695791757..d29866485361 100644
--- a/drivers/pci/controller/pci-rcar-gen2.c
+++ b/drivers/pci/controller/pci-rcar-gen2.c
@@ -290,8 +290,7 @@ static int rcar_pci_probe(struct platform_device *pdev)
priv = pci_host_bridge_priv(bridge);
bridge->sysdata = priv;
- cfg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg = devm_ioremap_resource(dev, cfg_res);
+ reg = devm_platform_get_and_ioremap_resource(pdev, 0, &cfg_res);
if (IS_ERR(reg))
return PTR_ERR(reg);
diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
index 038d974a318e..942ddfca3bf6 100644
--- a/drivers/pci/controller/pci-tegra.c
+++ b/drivers/pci/controller/pci-tegra.c
@@ -14,6 +14,7 @@
*/
#include <linux/clk.h>
+#include <linux/cleanup.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/export.h>
@@ -22,6 +23,7 @@
#include <linux/iopoll.h>
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -269,7 +271,7 @@ struct tegra_msi {
DECLARE_BITMAP(used, INT_PCI_MSI_NR);
struct irq_domain *domain;
struct mutex map_lock;
- spinlock_t mask_lock;
+ raw_spinlock_t mask_lock;
void *virt;
dma_addr_t phys;
int irq;
@@ -1343,7 +1345,7 @@ static int tegra_pcie_port_get_phys(struct tegra_pcie_port *port)
unsigned int i;
int err;
- port->phys = devm_kcalloc(dev, sizeof(phy), port->lanes, GFP_KERNEL);
+ port->phys = devm_kcalloc(dev, port->lanes, sizeof(phy), GFP_KERNEL);
if (!port->phys)
return -ENOMEM;
@@ -1460,7 +1462,7 @@ static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
pcie->cs = *res;
/* constrain configuration space to 4 KiB */
- pcie->cs.end = pcie->cs.start + SZ_4K - 1;
+ resource_set_size(&pcie->cs, SZ_4K);
pcie->cfg = devm_ioremap_resource(dev, &pcie->cs);
if (IS_ERR(pcie->cfg)) {
@@ -1547,7 +1549,7 @@ static void tegra_pcie_msi_irq(struct irq_desc *desc)
unsigned int index = i * 32 + offset;
int ret;
- ret = generic_handle_domain_irq(msi->domain->parent, index);
+ ret = generic_handle_domain_irq(msi->domain, index);
if (ret) {
/*
* that's weird who triggered this?
@@ -1565,30 +1567,6 @@ static void tegra_pcie_msi_irq(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
-static void tegra_msi_top_irq_ack(struct irq_data *d)
-{
- irq_chip_ack_parent(d);
-}
-
-static void tegra_msi_top_irq_mask(struct irq_data *d)
-{
- pci_msi_mask_irq(d);
- irq_chip_mask_parent(d);
-}
-
-static void tegra_msi_top_irq_unmask(struct irq_data *d)
-{
- pci_msi_unmask_irq(d);
- irq_chip_unmask_parent(d);
-}
-
-static struct irq_chip tegra_msi_top_chip = {
- .name = "Tegra PCIe MSI",
- .irq_ack = tegra_msi_top_irq_ack,
- .irq_mask = tegra_msi_top_irq_mask,
- .irq_unmask = tegra_msi_top_irq_unmask,
-};
-
static void tegra_msi_irq_ack(struct irq_data *d)
{
struct tegra_msi *msi = irq_data_get_irq_chip_data(d);
@@ -1604,14 +1582,13 @@ static void tegra_msi_irq_mask(struct irq_data *d)
struct tegra_msi *msi = irq_data_get_irq_chip_data(d);
struct tegra_pcie *pcie = msi_to_pcie(msi);
unsigned int index = d->hwirq / 32;
- unsigned long flags;
u32 value;
- spin_lock_irqsave(&msi->mask_lock, flags);
- value = afi_readl(pcie, AFI_MSI_EN_VEC(index));
- value &= ~BIT(d->hwirq % 32);
- afi_writel(pcie, value, AFI_MSI_EN_VEC(index));
- spin_unlock_irqrestore(&msi->mask_lock, flags);
+ scoped_guard(raw_spinlock_irqsave, &msi->mask_lock) {
+ value = afi_readl(pcie, AFI_MSI_EN_VEC(index));
+ value &= ~BIT(d->hwirq % 32);
+ afi_writel(pcie, value, AFI_MSI_EN_VEC(index));
+ }
}
static void tegra_msi_irq_unmask(struct irq_data *d)
@@ -1619,19 +1596,13 @@ static void tegra_msi_irq_unmask(struct irq_data *d)
struct tegra_msi *msi = irq_data_get_irq_chip_data(d);
struct tegra_pcie *pcie = msi_to_pcie(msi);
unsigned int index = d->hwirq / 32;
- unsigned long flags;
u32 value;
- spin_lock_irqsave(&msi->mask_lock, flags);
- value = afi_readl(pcie, AFI_MSI_EN_VEC(index));
- value |= BIT(d->hwirq % 32);
- afi_writel(pcie, value, AFI_MSI_EN_VEC(index));
- spin_unlock_irqrestore(&msi->mask_lock, flags);
-}
-
-static int tegra_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force)
-{
- return -EINVAL;
+ scoped_guard(raw_spinlock_irqsave, &msi->mask_lock) {
+ value = afi_readl(pcie, AFI_MSI_EN_VEC(index));
+ value |= BIT(d->hwirq % 32);
+ afi_writel(pcie, value, AFI_MSI_EN_VEC(index));
+ }
}
static void tegra_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
@@ -1648,7 +1619,6 @@ static struct irq_chip tegra_msi_bottom_chip = {
.irq_ack = tegra_msi_irq_ack,
.irq_mask = tegra_msi_irq_mask,
.irq_unmask = tegra_msi_irq_unmask,
- .irq_set_affinity = tegra_msi_set_affinity,
.irq_compose_msi_msg = tegra_compose_msi_msg,
};
@@ -1696,42 +1666,40 @@ static const struct irq_domain_ops tegra_msi_domain_ops = {
.free = tegra_msi_domain_free,
};
-static struct msi_domain_info tegra_msi_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX),
- .chip = &tegra_msi_top_chip,
+static const struct msi_parent_ops tegra_msi_parent_ops = {
+ .supported_flags = (MSI_GENERIC_FLAGS_MASK |
+ MSI_FLAG_PCI_MSIX),
+ .required_flags = (MSI_FLAG_USE_DEF_DOM_OPS |
+ MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_PCI_MSI_MASK_PARENT |
+ MSI_FLAG_NO_AFFINITY),
+ .chip_flags = MSI_CHIP_FLAG_SET_ACK,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
};
static int tegra_allocate_domains(struct tegra_msi *msi)
{
struct tegra_pcie *pcie = msi_to_pcie(msi);
struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
- struct irq_domain *parent;
-
- parent = irq_domain_create_linear(fwnode, INT_PCI_MSI_NR,
- &tegra_msi_domain_ops, msi);
- if (!parent) {
- dev_err(pcie->dev, "failed to create IRQ domain\n");
- return -ENOMEM;
- }
- irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
+ struct irq_domain_info info = {
+ .fwnode = fwnode,
+ .ops = &tegra_msi_domain_ops,
+ .size = INT_PCI_MSI_NR,
+ .host_data = msi,
+ };
- msi->domain = pci_msi_create_irq_domain(fwnode, &tegra_msi_info, parent);
+ msi->domain = msi_create_parent_irq_domain(&info, &tegra_msi_parent_ops);
if (!msi->domain) {
dev_err(pcie->dev, "failed to create MSI domain\n");
- irq_domain_remove(parent);
return -ENOMEM;
}
-
return 0;
}
static void tegra_free_domains(struct tegra_msi *msi)
{
- struct irq_domain *parent = msi->domain->parent;
-
irq_domain_remove(msi->domain);
- irq_domain_remove(parent);
}
static int tegra_pcie_msi_setup(struct tegra_pcie *pcie)
@@ -1742,7 +1710,7 @@ static int tegra_pcie_msi_setup(struct tegra_pcie *pcie)
int err;
mutex_init(&msi->map_lock);
- spin_lock_init(&msi->mask_lock);
+ raw_spin_lock_init(&msi->mask_lock);
if (IS_ENABLED(CONFIG_PCI_MSI)) {
err = tegra_allocate_domains(msi);
@@ -2112,47 +2080,39 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
{
struct device *dev = pcie->dev;
- struct device_node *np = dev->of_node, *port;
+ struct device_node *np = dev->of_node;
const struct tegra_pcie_soc *soc = pcie->soc;
u32 lanes = 0, mask = 0;
unsigned int lane = 0;
int err;
/* parse root ports */
- for_each_child_of_node(np, port) {
+ for_each_child_of_node_scoped(np, port) {
struct tegra_pcie_port *rp;
unsigned int index;
u32 value;
char *label;
err = of_pci_get_devfn(port);
- if (err < 0) {
- dev_err(dev, "failed to parse address: %d\n", err);
- goto err_node_put;
- }
+ if (err < 0)
+ return dev_err_probe(dev, err, "failed to parse address\n");
index = PCI_SLOT(err);
- if (index < 1 || index > soc->num_ports) {
- dev_err(dev, "invalid port number: %d\n", index);
- err = -EINVAL;
- goto err_node_put;
- }
+ if (index < 1 || index > soc->num_ports)
+ return dev_err_probe(dev, -EINVAL,
+ "invalid port number: %d\n", index);
index--;
err = of_property_read_u32(port, "nvidia,num-lanes", &value);
- if (err < 0) {
- dev_err(dev, "failed to parse # of lanes: %d\n",
- err);
- goto err_node_put;
- }
+ if (err < 0)
+ return dev_err_probe(dev, err,
+ "failed to parse # of lanes\n");
- if (value > 16) {
- dev_err(dev, "invalid # of lanes: %u\n", value);
- err = -EINVAL;
- goto err_node_put;
- }
+ if (value > 16)
+ return dev_err_probe(dev, -EINVAL,
+ "invalid # of lanes: %u\n", value);
lanes |= value << (index << 3);
@@ -2165,16 +2125,12 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
lane += value;
rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL);
- if (!rp) {
- err = -ENOMEM;
- goto err_node_put;
- }
+ if (!rp)
+ return -ENOMEM;
err = of_address_to_resource(port, 0, &rp->regs);
- if (err < 0) {
- dev_err(dev, "failed to parse address: %d\n", err);
- goto err_node_put;
- }
+ if (err < 0)
+ return dev_err_probe(dev, err, "failed to parse address\n");
INIT_LIST_HEAD(&rp->list);
rp->index = index;
@@ -2183,16 +2139,12 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
rp->np = port;
rp->base = devm_pci_remap_cfg_resource(dev, &rp->regs);
- if (IS_ERR(rp->base)) {
- err = PTR_ERR(rp->base);
- goto err_node_put;
- }
+ if (IS_ERR(rp->base))
+ return PTR_ERR(rp->base);
label = devm_kasprintf(dev, GFP_KERNEL, "pex-reset-%u", index);
- if (!label) {
- err = -ENOMEM;
- goto err_node_put;
- }
+ if (!label)
+ return -ENOMEM;
/*
* Returns -ENOENT if reset-gpios property is not populated
@@ -2205,34 +2157,26 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
GPIOD_OUT_LOW,
label);
if (IS_ERR(rp->reset_gpio)) {
- if (PTR_ERR(rp->reset_gpio) == -ENOENT) {
+ if (PTR_ERR(rp->reset_gpio) == -ENOENT)
rp->reset_gpio = NULL;
- } else {
- dev_err(dev, "failed to get reset GPIO: %ld\n",
- PTR_ERR(rp->reset_gpio));
- err = PTR_ERR(rp->reset_gpio);
- goto err_node_put;
- }
+ else
+ return dev_err_probe(dev, PTR_ERR(rp->reset_gpio),
+ "failed to get reset GPIO\n");
}
list_add_tail(&rp->list, &pcie->ports);
}
err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
- if (err < 0) {
- dev_err(dev, "invalid lane configuration\n");
- return err;
- }
+ if (err < 0)
+ return dev_err_probe(dev, err,
+ "invalid lane configuration\n");
err = tegra_pcie_get_regulators(pcie, mask);
if (err < 0)
return err;
return 0;
-
-err_node_put:
- of_node_put(port);
- return err;
}
/*
@@ -2806,6 +2750,6 @@ static struct platform_driver tegra_pcie_driver = {
.pm = &tegra_pcie_pm_ops,
},
.probe = tegra_pcie_probe,
- .remove_new = tegra_pcie_remove,
+ .remove = tegra_pcie_remove,
};
module_platform_driver(tegra_pcie_driver);
diff --git a/drivers/pci/controller/pci-thunder-ecam.c b/drivers/pci/controller/pci-thunder-ecam.c
index b5bd10a62adb..b5b4a958e6a2 100644
--- a/drivers/pci/controller/pci-thunder-ecam.c
+++ b/drivers/pci/controller/pci-thunder-ecam.c
@@ -11,6 +11,8 @@
#include <linux/pci-ecam.h>
#include <linux/platform_device.h>
+#include "pci-host-common.h"
+
#if defined(CONFIG_PCI_HOST_THUNDER_ECAM) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
static void set_val(u32 v, int where, int size, u32 *val)
@@ -204,7 +206,7 @@ static int thunder_ecam_config_read(struct pci_bus *bus, unsigned int devfn,
v = readl(addr);
if (v & 0xff00)
- pr_err("Bad MSIX cap header: %08x\n", v);
+ pr_err("Bad MSI-X cap header: %08x\n", v);
v |= 0xbc00; /* next capability is EA at 0xbc */
set_val(v, where, size, val);
return PCIBIOS_SUCCESSFUL;
diff --git a/drivers/pci/controller/pci-thunder-pem.c b/drivers/pci/controller/pci-thunder-pem.c
index 06a9855cb431..5fa037fb61dc 100644
--- a/drivers/pci/controller/pci-thunder-pem.c
+++ b/drivers/pci/controller/pci-thunder-pem.c
@@ -14,6 +14,7 @@
#include <linux/platform_device.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include "../pci.h"
+#include "pci-host-common.h"
#if defined(CONFIG_PCI_HOST_THUNDER_PEM) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
@@ -400,9 +401,9 @@ static int thunder_pem_acpi_init(struct pci_config_window *cfg)
* Reserve 64K size PEM specific resources. The full 16M range
* size is required for thunder_pem_init() call.
*/
- res_pem->end = res_pem->start + SZ_64K - 1;
+ resource_set_size(res_pem, SZ_64K);
thunder_pem_reserve_range(dev, root->segment, res_pem);
- res_pem->end = res_pem->start + SZ_16M - 1;
+ resource_set_size(res_pem, SZ_16M);
/* Reserve PCI configuration space as well. */
thunder_pem_reserve_range(dev, root->segment, &cfg->res);
diff --git a/drivers/pci/controller/pci-v3-semi.c b/drivers/pci/controller/pci-v3-semi.c
index ca44b0c83d1b..460a825325dd 100644
--- a/drivers/pci/controller/pci-v3-semi.c
+++ b/drivers/pci/controller/pci-v3-semi.c
@@ -20,8 +20,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
@@ -736,8 +735,7 @@ static int v3_pci_probe(struct platform_device *pdev)
return ret;
}
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- v3->base = devm_ioremap_resource(dev, regs);
+ v3->base = devm_platform_get_and_ioremap_resource(pdev, 0, &regs);
if (IS_ERR(v3->base))
return PTR_ERR(v3->base);
/*
diff --git a/drivers/pci/controller/pci-xgene-msi.c b/drivers/pci/controller/pci-xgene-msi.c
index 0234e528b9a5..654639bccd10 100644
--- a/drivers/pci/controller/pci-xgene-msi.c
+++ b/drivers/pci/controller/pci-xgene-msi.c
@@ -6,12 +6,14 @@
* Author: Tanmay Inamdar <tinamdar@apm.com>
* Duc Dang <dhdang@apm.com>
*/
+#include <linux/bitfield.h>
#include <linux/cpu.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/module.h>
#include <linux/msi.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/of_pci.h>
@@ -21,46 +23,49 @@
#define IDX_PER_GROUP 8
#define IRQS_PER_IDX 16
#define NR_HW_IRQS 16
-#define NR_MSI_VEC (IDX_PER_GROUP * IRQS_PER_IDX * NR_HW_IRQS)
+#define NR_MSI_BITS (IDX_PER_GROUP * IRQS_PER_IDX * NR_HW_IRQS)
+#define NR_MSI_VEC (NR_MSI_BITS / num_possible_cpus())
-struct xgene_msi_group {
- struct xgene_msi *msi;
- int gic_irq;
- u32 msi_grp;
-};
+#define MSI_GROUP_MASK GENMASK(22, 19)
+#define MSI_INDEX_MASK GENMASK(18, 16)
+#define MSI_INTR_MASK GENMASK(19, 16)
+
+#define MSInRx_HWIRQ_MASK GENMASK(6, 4)
+#define DATA_HWIRQ_MASK GENMASK(3, 0)
struct xgene_msi {
- struct device_node *node;
struct irq_domain *inner_domain;
- struct irq_domain *msi_domain;
u64 msi_addr;
void __iomem *msi_regs;
unsigned long *bitmap;
struct mutex bitmap_lock;
- struct xgene_msi_group *msi_groups;
- int num_cpus;
+ unsigned int gic_irq[NR_HW_IRQS];
};
/* Global data */
-static struct xgene_msi xgene_msi_ctrl;
-
-static struct irq_chip xgene_msi_top_irq_chip = {
- .name = "X-Gene1 MSI",
- .irq_enable = pci_msi_unmask_irq,
- .irq_disable = pci_msi_mask_irq,
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
-};
-
-static struct msi_domain_info xgene_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX),
- .chip = &xgene_msi_top_irq_chip,
-};
+static struct xgene_msi *xgene_msi_ctrl;
/*
- * X-Gene v1 has 16 groups of MSI termination registers MSInIRx, where
- * n is group number (0..F), x is index of registers in each group (0..7)
+ * X-Gene v1 has 16 frames of MSI termination registers MSInIRx, where n is
+ * frame number (0..15), x is index of registers in each frame (0..7). Each
+ * 32b register is at the beginning of a 64kB region, each frame occupying
+ * 512kB (and the whole thing 8MB of PA space).
+ *
+ * Each register supports 16 MSI vectors (0..15) to generate interrupts. A
+ * write to the MSInIRx from the PCI side generates an interrupt. A read
+ * from the MSInRx on the CPU side returns a bitmap of the pending MSIs in
+ * the lower 16 bits. A side effect of this read is that all pending
+ * interrupts are acknowledged and cleared).
+ *
+ * Additionally, each MSI termination frame has 1 MSIINTn register (n is
+ * 0..15) to indicate the MSI pending status caused by any of its 8
+ * termination registers, reported as a bitmap in the lower 8 bits. Each 32b
+ * register is at the beginning of a 64kB region (and overall occupying an
+ * extra 1MB).
+ *
+ * There is one GIC IRQ assigned for each MSI termination frame, 16 in
+ * total.
+ *
* The register layout is as follows:
* MSI0IR0 base_addr
* MSI0IR1 base_addr + 0x10000
@@ -81,107 +86,74 @@ static struct msi_domain_info xgene_msi_domain_info = {
* MSIINT1 base_addr + 0x810000
* ... ...
* MSIINTF base_addr + 0x8F0000
- *
- * Each index register supports 16 MSI vectors (0..15) to generate interrupt.
- * There are total 16 GIC IRQs assigned for these 16 groups of MSI termination
- * registers.
- *
- * Each MSI termination group has 1 MSIINTn register (n is 0..15) to indicate
- * the MSI pending status caused by 1 of its 8 index registers.
*/
/* MSInIRx read helper */
-static u32 xgene_msi_ir_read(struct xgene_msi *msi,
- u32 msi_grp, u32 msir_idx)
+static u32 xgene_msi_ir_read(struct xgene_msi *msi, u32 msi_grp, u32 msir_idx)
{
return readl_relaxed(msi->msi_regs + MSI_IR0 +
- (msi_grp << 19) + (msir_idx << 16));
+ (FIELD_PREP(MSI_GROUP_MASK, msi_grp) |
+ FIELD_PREP(MSI_INDEX_MASK, msir_idx)));
}
/* MSIINTn read helper */
static u32 xgene_msi_int_read(struct xgene_msi *msi, u32 msi_grp)
{
- return readl_relaxed(msi->msi_regs + MSI_INT0 + (msi_grp << 16));
+ return readl_relaxed(msi->msi_regs + MSI_INT0 +
+ FIELD_PREP(MSI_INTR_MASK, msi_grp));
}
/*
- * With 2048 MSI vectors supported, the MSI message can be constructed using
- * following scheme:
- * - Divide into 8 256-vector groups
- * Group 0: 0-255
- * Group 1: 256-511
- * Group 2: 512-767
- * ...
- * Group 7: 1792-2047
- * - Each 256-vector group is divided into 16 16-vector groups
- * As an example: 16 16-vector groups for 256-vector group 0-255 is
- * Group 0: 0-15
- * Group 1: 16-32
- * ...
- * Group 15: 240-255
- * - The termination address of MSI vector in 256-vector group n and 16-vector
- * group x is the address of MSIxIRn
- * - The data for MSI vector in 16-vector group x is x
+ * In order to allow an MSI to be moved from one CPU to another without
+ * having to repaint both the address and the data (which cannot be done
+ * atomically), we statically partitions the MSI frames between CPUs. Given
+ * that XGene-1 has 8 CPUs, each CPU gets two frames assigned to it
+ *
+ * We adopt the convention that when an MSI is moved, it is configured to
+ * target the same register number in the congruent frame assigned to the
+ * new target CPU. This reserves a given MSI across all CPUs, and reduces
+ * the MSI capacity from 2048 to 256.
+ *
+ * Effectively, this amounts to:
+ * - hwirq[7]::cpu[2:0] is the target frame number (n in MSInIRx)
+ * - hwirq[6:4] is the register index in any given frame (x in MSInIRx)
+ * - hwirq[3:0] is the MSI data
*/
-static u32 hwirq_to_reg_set(unsigned long hwirq)
-{
- return (hwirq / (NR_HW_IRQS * IRQS_PER_IDX));
-}
-
-static u32 hwirq_to_group(unsigned long hwirq)
-{
- return (hwirq % NR_HW_IRQS);
-}
-
-static u32 hwirq_to_msi_data(unsigned long hwirq)
+static irq_hw_number_t compute_hwirq(u8 frame, u8 index, u8 data)
{
- return ((hwirq / NR_HW_IRQS) % IRQS_PER_IDX);
+ return (FIELD_PREP(BIT(7), FIELD_GET(BIT(3), frame)) |
+ FIELD_PREP(MSInRx_HWIRQ_MASK, index) |
+ FIELD_PREP(DATA_HWIRQ_MASK, data));
}
static void xgene_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
struct xgene_msi *msi = irq_data_get_irq_chip_data(data);
- u32 reg_set = hwirq_to_reg_set(data->hwirq);
- u32 group = hwirq_to_group(data->hwirq);
- u64 target_addr = msi->msi_addr + (((8 * group) + reg_set) << 16);
+ u64 target_addr;
+ u32 frame, msir;
+ int cpu;
- msg->address_hi = upper_32_bits(target_addr);
- msg->address_lo = lower_32_bits(target_addr);
- msg->data = hwirq_to_msi_data(data->hwirq);
-}
+ cpu = cpumask_first(irq_data_get_effective_affinity_mask(data));
+ msir = FIELD_GET(MSInRx_HWIRQ_MASK, data->hwirq);
+ frame = FIELD_PREP(BIT(3), FIELD_GET(BIT(7), data->hwirq)) | cpu;
-/*
- * X-Gene v1 only has 16 MSI GIC IRQs for 2048 MSI vectors. To maintain
- * the expected behaviour of .set_affinity for each MSI interrupt, the 16
- * MSI GIC IRQs are statically allocated to 8 X-Gene v1 cores (2 GIC IRQs
- * for each core). The MSI vector is moved fom 1 MSI GIC IRQ to another
- * MSI GIC IRQ to steer its MSI interrupt to correct X-Gene v1 core. As a
- * consequence, the total MSI vectors that X-Gene v1 supports will be
- * reduced to 256 (2048/8) vectors.
- */
-static int hwirq_to_cpu(unsigned long hwirq)
-{
- return (hwirq % xgene_msi_ctrl.num_cpus);
-}
+ target_addr = msi->msi_addr;
+ target_addr += (FIELD_PREP(MSI_GROUP_MASK, frame) |
+ FIELD_PREP(MSI_INTR_MASK, msir));
-static unsigned long hwirq_to_canonical_hwirq(unsigned long hwirq)
-{
- return (hwirq - hwirq_to_cpu(hwirq));
+ msg->address_hi = upper_32_bits(target_addr);
+ msg->address_lo = lower_32_bits(target_addr);
+ msg->data = FIELD_GET(DATA_HWIRQ_MASK, data->hwirq);
}
static int xgene_msi_set_affinity(struct irq_data *irqdata,
const struct cpumask *mask, bool force)
{
int target_cpu = cpumask_first(mask);
- int curr_cpu;
-
- curr_cpu = hwirq_to_cpu(irqdata->hwirq);
- if (curr_cpu == target_cpu)
- return IRQ_SET_MASK_OK_DONE;
- /* Update MSI number to target the new CPU */
- irqdata->hwirq = hwirq_to_canonical_hwirq(irqdata->hwirq) + target_cpu;
+ irq_data_update_effective_affinity(irqdata, cpumask_of(target_cpu));
+ /* Force the core code to regenerate the message */
return IRQ_SET_MASK_OK;
}
@@ -195,25 +167,23 @@ static int xgene_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *args)
{
struct xgene_msi *msi = domain->host_data;
- int msi_irq;
+ irq_hw_number_t hwirq;
mutex_lock(&msi->bitmap_lock);
- msi_irq = bitmap_find_next_zero_area(msi->bitmap, NR_MSI_VEC, 0,
- msi->num_cpus, 0);
- if (msi_irq < NR_MSI_VEC)
- bitmap_set(msi->bitmap, msi_irq, msi->num_cpus);
- else
- msi_irq = -ENOSPC;
+ hwirq = find_first_zero_bit(msi->bitmap, NR_MSI_VEC);
+ if (hwirq < NR_MSI_VEC)
+ set_bit(hwirq, msi->bitmap);
mutex_unlock(&msi->bitmap_lock);
- if (msi_irq < 0)
- return msi_irq;
+ if (hwirq >= NR_MSI_VEC)
+ return -ENOSPC;
- irq_domain_set_info(domain, virq, msi_irq,
+ irq_domain_set_info(domain, virq, hwirq,
&xgene_msi_bottom_irq_chip, domain->host_data,
handle_simple_irq, NULL, NULL);
+ irqd_set_resend_when_in_progress(irq_get_irq_data(virq));
return 0;
}
@@ -223,204 +193,149 @@ static void xgene_irq_domain_free(struct irq_domain *domain,
{
struct irq_data *d = irq_domain_get_irq_data(domain, virq);
struct xgene_msi *msi = irq_data_get_irq_chip_data(d);
- u32 hwirq;
mutex_lock(&msi->bitmap_lock);
- hwirq = hwirq_to_canonical_hwirq(d->hwirq);
- bitmap_clear(msi->bitmap, hwirq, msi->num_cpus);
+ clear_bit(d->hwirq, msi->bitmap);
mutex_unlock(&msi->bitmap_lock);
irq_domain_free_irqs_parent(domain, virq, nr_irqs);
}
-static const struct irq_domain_ops msi_domain_ops = {
+static const struct irq_domain_ops xgene_msi_domain_ops = {
.alloc = xgene_irq_domain_alloc,
.free = xgene_irq_domain_free,
};
-static int xgene_allocate_domains(struct xgene_msi *msi)
-{
- msi->inner_domain = irq_domain_add_linear(NULL, NR_MSI_VEC,
- &msi_domain_ops, msi);
- if (!msi->inner_domain)
- return -ENOMEM;
-
- msi->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(msi->node),
- &xgene_msi_domain_info,
- msi->inner_domain);
-
- if (!msi->msi_domain) {
- irq_domain_remove(msi->inner_domain);
- return -ENOMEM;
- }
-
- return 0;
-}
+static const struct msi_parent_ops xgene_msi_parent_ops = {
+ .supported_flags = (MSI_GENERIC_FLAGS_MASK |
+ MSI_FLAG_PCI_MSIX),
+ .required_flags = (MSI_FLAG_USE_DEF_DOM_OPS |
+ MSI_FLAG_USE_DEF_CHIP_OPS),
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
+};
-static void xgene_free_domains(struct xgene_msi *msi)
+static int xgene_allocate_domains(struct device_node *node,
+ struct xgene_msi *msi)
{
- if (msi->msi_domain)
- irq_domain_remove(msi->msi_domain);
- if (msi->inner_domain)
- irq_domain_remove(msi->inner_domain);
+ struct irq_domain_info info = {
+ .fwnode = of_fwnode_handle(node),
+ .ops = &xgene_msi_domain_ops,
+ .size = NR_MSI_VEC,
+ .host_data = msi,
+ };
+
+ msi->inner_domain = msi_create_parent_irq_domain(&info, &xgene_msi_parent_ops);
+ return msi->inner_domain ? 0 : -ENOMEM;
}
-static int xgene_msi_init_allocator(struct xgene_msi *xgene_msi)
+static int xgene_msi_init_allocator(struct device *dev)
{
- xgene_msi->bitmap = bitmap_zalloc(NR_MSI_VEC, GFP_KERNEL);
- if (!xgene_msi->bitmap)
+ xgene_msi_ctrl->bitmap = devm_bitmap_zalloc(dev, NR_MSI_VEC, GFP_KERNEL);
+ if (!xgene_msi_ctrl->bitmap)
return -ENOMEM;
- mutex_init(&xgene_msi->bitmap_lock);
-
- xgene_msi->msi_groups = kcalloc(NR_HW_IRQS,
- sizeof(struct xgene_msi_group),
- GFP_KERNEL);
- if (!xgene_msi->msi_groups)
- return -ENOMEM;
+ mutex_init(&xgene_msi_ctrl->bitmap_lock);
return 0;
}
static void xgene_msi_isr(struct irq_desc *desc)
{
+ unsigned int *irqp = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
- struct xgene_msi_group *msi_groups;
- struct xgene_msi *xgene_msi;
- int msir_index, msir_val, hw_irq, ret;
- u32 intr_index, grp_select, msi_grp;
+ struct xgene_msi *xgene_msi = xgene_msi_ctrl;
+ unsigned long grp_pending;
+ int msir_idx;
+ u32 msi_grp;
chained_irq_enter(chip, desc);
- msi_groups = irq_desc_get_handler_data(desc);
- xgene_msi = msi_groups->msi;
- msi_grp = msi_groups->msi_grp;
-
- /*
- * MSIINTn (n is 0..F) indicates if there is a pending MSI interrupt
- * If bit x of this register is set (x is 0..7), one or more interrupts
- * corresponding to MSInIRx is set.
- */
- grp_select = xgene_msi_int_read(xgene_msi, msi_grp);
- while (grp_select) {
- msir_index = ffs(grp_select) - 1;
- /*
- * Calculate MSInIRx address to read to check for interrupts
- * (refer to termination address and data assignment
- * described in xgene_compose_msi_msg() )
- */
- msir_val = xgene_msi_ir_read(xgene_msi, msi_grp, msir_index);
- while (msir_val) {
- intr_index = ffs(msir_val) - 1;
- /*
- * Calculate MSI vector number (refer to the termination
- * address and data assignment described in
- * xgene_compose_msi_msg function)
- */
- hw_irq = (((msir_index * IRQS_PER_IDX) + intr_index) *
- NR_HW_IRQS) + msi_grp;
- /*
- * As we have multiple hw_irq that maps to single MSI,
- * always look up the virq using the hw_irq as seen from
- * CPU0
- */
- hw_irq = hwirq_to_canonical_hwirq(hw_irq);
- ret = generic_handle_domain_irq(xgene_msi->inner_domain, hw_irq);
+ msi_grp = irqp - xgene_msi->gic_irq;
+
+ grp_pending = xgene_msi_int_read(xgene_msi, msi_grp);
+
+ for_each_set_bit(msir_idx, &grp_pending, IDX_PER_GROUP) {
+ unsigned long msir;
+ int intr_idx;
+
+ msir = xgene_msi_ir_read(xgene_msi, msi_grp, msir_idx);
+
+ for_each_set_bit(intr_idx, &msir, IRQS_PER_IDX) {
+ irq_hw_number_t hwirq;
+ int ret;
+
+ hwirq = compute_hwirq(msi_grp, msir_idx, intr_idx);
+ ret = generic_handle_domain_irq(xgene_msi->inner_domain,
+ hwirq);
WARN_ON_ONCE(ret);
- msir_val &= ~(1 << intr_index);
- }
- grp_select &= ~(1 << msir_index);
-
- if (!grp_select) {
- /*
- * We handled all interrupts happened in this group,
- * resample this group MSI_INTx register in case
- * something else has been made pending in the meantime
- */
- grp_select = xgene_msi_int_read(xgene_msi, msi_grp);
}
}
chained_irq_exit(chip, desc);
}
-static enum cpuhp_state pci_xgene_online;
-
static void xgene_msi_remove(struct platform_device *pdev)
{
- struct xgene_msi *msi = platform_get_drvdata(pdev);
-
- if (pci_xgene_online)
- cpuhp_remove_state(pci_xgene_online);
- cpuhp_remove_state(CPUHP_PCI_XGENE_DEAD);
-
- kfree(msi->msi_groups);
-
- bitmap_free(msi->bitmap);
- msi->bitmap = NULL;
+ for (int i = 0; i < NR_HW_IRQS; i++) {
+ unsigned int irq = xgene_msi_ctrl->gic_irq[i];
+ if (!irq)
+ continue;
+ irq_set_chained_handler_and_data(irq, NULL, NULL);
+ }
- xgene_free_domains(msi);
+ if (xgene_msi_ctrl->inner_domain)
+ irq_domain_remove(xgene_msi_ctrl->inner_domain);
}
-static int xgene_msi_hwirq_alloc(unsigned int cpu)
+static int xgene_msi_handler_setup(struct platform_device *pdev)
{
- struct xgene_msi *msi = &xgene_msi_ctrl;
- struct xgene_msi_group *msi_group;
- cpumask_var_t mask;
+ struct xgene_msi *xgene_msi = xgene_msi_ctrl;
int i;
- int err;
- for (i = cpu; i < NR_HW_IRQS; i += msi->num_cpus) {
- msi_group = &msi->msi_groups[i];
- if (!msi_group->gic_irq)
- continue;
+ for (i = 0; i < NR_HW_IRQS; i++) {
+ u32 msi_val;
+ int irq, err;
+
+ /*
+ * MSInIRx registers are read-to-clear; before registering
+ * interrupt handlers, read all of them to clear spurious
+ * interrupts that may occur before the driver is probed.
+ */
+ for (int msi_idx = 0; msi_idx < IDX_PER_GROUP; msi_idx++)
+ xgene_msi_ir_read(xgene_msi, i, msi_idx);
+
+ /* Read MSIINTn to confirm */
+ msi_val = xgene_msi_int_read(xgene_msi, i);
+ if (msi_val) {
+ dev_err(&pdev->dev, "Failed to clear spurious IRQ\n");
+ return -EINVAL;
+ }
- irq_set_chained_handler_and_data(msi_group->gic_irq,
- xgene_msi_isr, msi_group);
+ irq = platform_get_irq(pdev, i);
+ if (irq < 0)
+ return irq;
+
+ xgene_msi->gic_irq[i] = irq;
/*
* Statically allocate MSI GIC IRQs to each CPU core.
* With 8-core X-Gene v1, 2 MSI GIC IRQs are allocated
* to each core.
*/
- if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
- cpumask_clear(mask);
- cpumask_set_cpu(cpu, mask);
- err = irq_set_affinity(msi_group->gic_irq, mask);
- if (err)
- pr_err("failed to set affinity for GIC IRQ");
- free_cpumask_var(mask);
- } else {
- pr_err("failed to alloc CPU mask for affinity\n");
- err = -EINVAL;
- }
-
+ irq_set_status_flags(irq, IRQ_NO_BALANCING);
+ err = irq_set_affinity(irq, cpumask_of(i % num_possible_cpus()));
if (err) {
- irq_set_chained_handler_and_data(msi_group->gic_irq,
- NULL, NULL);
+ pr_err("failed to set affinity for GIC IRQ");
return err;
}
- }
- return 0;
-}
-
-static int xgene_msi_hwirq_free(unsigned int cpu)
-{
- struct xgene_msi *msi = &xgene_msi_ctrl;
- struct xgene_msi_group *msi_group;
- int i;
-
- for (i = cpu; i < NR_HW_IRQS; i += msi->num_cpus) {
- msi_group = &msi->msi_groups[i];
- if (!msi_group->gic_irq)
- continue;
-
- irq_set_chained_handler_and_data(msi_group->gic_irq, NULL,
- NULL);
+ irq_set_chained_handler_and_data(irq, xgene_msi_isr,
+ &xgene_msi_ctrl->gic_irq[i]);
}
+
return 0;
}
@@ -432,82 +347,42 @@ static const struct of_device_id xgene_msi_match_table[] = {
static int xgene_msi_probe(struct platform_device *pdev)
{
struct resource *res;
- int rc, irq_index;
struct xgene_msi *xgene_msi;
- int virt_msir;
- u32 msi_val, msi_idx;
+ int rc;
- xgene_msi = &xgene_msi_ctrl;
+ xgene_msi_ctrl = devm_kzalloc(&pdev->dev, sizeof(*xgene_msi_ctrl),
+ GFP_KERNEL);
+ if (!xgene_msi_ctrl)
+ return -ENOMEM;
- platform_set_drvdata(pdev, xgene_msi);
+ xgene_msi = xgene_msi_ctrl;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- xgene_msi->msi_regs = devm_ioremap_resource(&pdev->dev, res);
+ xgene_msi->msi_regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(xgene_msi->msi_regs)) {
rc = PTR_ERR(xgene_msi->msi_regs);
goto error;
}
xgene_msi->msi_addr = res->start;
- xgene_msi->node = pdev->dev.of_node;
- xgene_msi->num_cpus = num_possible_cpus();
- rc = xgene_msi_init_allocator(xgene_msi);
+ rc = xgene_msi_init_allocator(&pdev->dev);
if (rc) {
dev_err(&pdev->dev, "Error allocating MSI bitmap\n");
goto error;
}
- rc = xgene_allocate_domains(xgene_msi);
+ rc = xgene_allocate_domains(dev_of_node(&pdev->dev), xgene_msi);
if (rc) {
dev_err(&pdev->dev, "Failed to allocate MSI domain\n");
goto error;
}
- for (irq_index = 0; irq_index < NR_HW_IRQS; irq_index++) {
- virt_msir = platform_get_irq(pdev, irq_index);
- if (virt_msir < 0) {
- rc = virt_msir;
- goto error;
- }
- xgene_msi->msi_groups[irq_index].gic_irq = virt_msir;
- xgene_msi->msi_groups[irq_index].msi_grp = irq_index;
- xgene_msi->msi_groups[irq_index].msi = xgene_msi;
- }
-
- /*
- * MSInIRx registers are read-to-clear; before registering
- * interrupt handlers, read all of them to clear spurious
- * interrupts that may occur before the driver is probed.
- */
- for (irq_index = 0; irq_index < NR_HW_IRQS; irq_index++) {
- for (msi_idx = 0; msi_idx < IDX_PER_GROUP; msi_idx++)
- xgene_msi_ir_read(xgene_msi, irq_index, msi_idx);
-
- /* Read MSIINTn to confirm */
- msi_val = xgene_msi_int_read(xgene_msi, irq_index);
- if (msi_val) {
- dev_err(&pdev->dev, "Failed to clear spurious IRQ\n");
- rc = -EINVAL;
- goto error;
- }
- }
-
- rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "pci/xgene:online",
- xgene_msi_hwirq_alloc, NULL);
- if (rc < 0)
- goto err_cpuhp;
- pci_xgene_online = rc;
- rc = cpuhp_setup_state(CPUHP_PCI_XGENE_DEAD, "pci/xgene:dead", NULL,
- xgene_msi_hwirq_free);
+ rc = xgene_msi_handler_setup(pdev);
if (rc)
- goto err_cpuhp;
+ goto error;
dev_info(&pdev->dev, "APM X-Gene PCIe MSI driver loaded\n");
return 0;
-
-err_cpuhp:
- dev_err(&pdev->dev, "failed to add CPU MSI notifier\n");
error:
xgene_msi_remove(pdev);
return rc;
@@ -519,11 +394,6 @@ static struct platform_driver xgene_msi_driver = {
.of_match_table = xgene_msi_match_table,
},
.probe = xgene_msi_probe,
- .remove_new = xgene_msi_remove,
+ .remove = xgene_msi_remove,
};
-
-static int __init xgene_pcie_msi_init(void)
-{
- return platform_driver_register(&xgene_msi_driver);
-}
-subsys_initcall(xgene_pcie_msi_init);
+builtin_platform_driver(xgene_msi_driver);
diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c
index 887b4941ff32..b95afa35201d 100644
--- a/drivers/pci/controller/pci-xgene.c
+++ b/drivers/pci/controller/pci-xgene.c
@@ -12,6 +12,7 @@
#include <linux/jiffies.h>
#include <linux/memblock.h>
#include <linux/init.h>
+#include <linux/irqdomain.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
@@ -53,11 +54,9 @@
#define XGENE_V1_PCI_EXP_CAP 0x40
/* PCIe IP version */
-#define XGENE_PCIE_IP_VER_UNKN 0
#define XGENE_PCIE_IP_VER_1 1
#define XGENE_PCIE_IP_VER_2 2
-#if defined(CONFIG_PCI_XGENE) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
struct xgene_pcie {
struct device_node *node;
struct device *dev;
@@ -163,31 +162,31 @@ static int xgene_pcie_config_read32(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
struct xgene_pcie *port = pcie_bus_to_port(bus);
+ int ret;
- if (pci_generic_config_read32(bus, devfn, where & ~0x3, 4, val) !=
- PCIBIOS_SUCCESSFUL)
- return PCIBIOS_DEVICE_NOT_FOUND;
+ ret = pci_generic_config_read32(bus, devfn, where & ~0x3, 4, val);
+ if (ret != PCIBIOS_SUCCESSFUL)
+ return ret;
/*
* The v1 controller has a bug in its Configuration Request Retry
- * Status (CRS) logic: when CRS Software Visibility is enabled and
+ * Status (RRS) logic: when RRS Software Visibility is enabled and
* we read the Vendor and Device ID of a non-existent device, the
* controller fabricates return data of 0xFFFF0001 ("device exists
* but is not ready") instead of 0xFFFFFFFF (PCI_ERROR_RESPONSE)
* ("device does not exist"). This causes the PCI core to retry
* the read until it times out. Avoid this by not claiming to
- * support CRS SV.
+ * support RRS SV.
*/
if (pci_is_root_bus(bus) && (port->version == XGENE_PCIE_IP_VER_1) &&
((where & ~0x3) == XGENE_V1_PCI_EXP_CAP + PCI_EXP_RTCTL))
- *val &= ~(PCI_EXP_RTCAP_CRSVIS << 16);
+ *val &= ~(PCI_EXP_RTCAP_RRS_SV << 16);
if (size <= 2)
*val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
return PCIBIOS_SUCCESSFUL;
}
-#endif
#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
static int xgene_get_csr_resource(struct acpi_device *adev,
@@ -278,7 +277,6 @@ const struct pci_ecam_ops xgene_v2_pcie_ecam_ops = {
};
#endif
-#if defined(CONFIG_PCI_XGENE)
static u64 xgene_pcie_set_ib_mask(struct xgene_pcie *port, u32 addr,
u32 flags, u64 size)
{
@@ -593,6 +591,24 @@ static struct pci_ops xgene_pcie_ops = {
.write = pci_generic_config_write32,
};
+static bool xgene_check_pcie_msi_ready(void)
+{
+ struct device_node *np;
+ struct irq_domain *d;
+
+ if (!IS_ENABLED(CONFIG_PCI_XGENE_MSI))
+ return true;
+
+ np = of_find_compatible_node(NULL, NULL, "apm,xgene1-msi");
+ if (!np)
+ return true;
+
+ d = irq_find_matching_host(np, DOMAIN_BUS_PCI_MSI);
+ of_node_put(np);
+
+ return d && irq_domain_is_msi_parent(d);
+}
+
static int xgene_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -601,6 +617,10 @@ static int xgene_pcie_probe(struct platform_device *pdev)
struct pci_host_bridge *bridge;
int ret;
+ if (!xgene_check_pcie_msi_ready())
+ return dev_err_probe(&pdev->dev, -EPROBE_DEFER,
+ "MSI driver not ready\n");
+
bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port));
if (!bridge)
return -ENOMEM;
@@ -609,10 +629,7 @@ static int xgene_pcie_probe(struct platform_device *pdev)
port->node = of_node_get(dn);
port->dev = dev;
-
- port->version = XGENE_PCIE_IP_VER_UNKN;
- if (of_device_is_compatible(port->node, "apm,xgene-pcie"))
- port->version = XGENE_PCIE_IP_VER_1;
+ port->version = XGENE_PCIE_IP_VER_1;
ret = xgene_pcie_map_reg(port, pdev);
if (ret)
@@ -646,4 +663,3 @@ static struct platform_driver xgene_pcie_driver = {
.probe = xgene_pcie_probe,
};
builtin_platform_driver(xgene_pcie_driver);
-#endif
diff --git a/drivers/pci/controller/pcie-altera-msi.c b/drivers/pci/controller/pcie-altera-msi.c
index 6ad5427490b5..ea2ca2e70f20 100644
--- a/drivers/pci/controller/pcie-altera-msi.c
+++ b/drivers/pci/controller/pcie-altera-msi.c
@@ -9,6 +9,7 @@
#include <linux/interrupt.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -29,7 +30,6 @@ struct altera_msi {
DECLARE_BITMAP(used, MAX_MSI_VECTORS);
struct mutex lock; /* protect "used" bitmap */
struct platform_device *pdev;
- struct irq_domain *msi_domain;
struct irq_domain *inner_domain;
void __iomem *csr_base;
void __iomem *vector_base;
@@ -74,18 +74,20 @@ static void altera_msi_isr(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
-static struct irq_chip altera_msi_irq_chip = {
- .name = "Altera PCIe MSI",
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
-};
+#define ALTERA_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_NO_AFFINITY)
-static struct msi_domain_info altera_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX),
- .chip = &altera_msi_irq_chip,
-};
+#define ALTERA_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_PCI_MSIX)
+static const struct msi_parent_ops altera_msi_parent_ops = {
+ .required_flags = ALTERA_MSI_FLAGS_REQUIRED,
+ .supported_flags = ALTERA_MSI_FLAGS_SUPPORTED,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .prefix = "Altera-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
+};
static void altera_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
struct altera_msi *msi = irq_data_get_irq_chip_data(data);
@@ -99,16 +101,9 @@ static void altera_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
(int)data->hwirq, msg->address_hi, msg->address_lo);
}
-static int altera_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static struct irq_chip altera_msi_bottom_irq_chip = {
.name = "Altera MSI",
.irq_compose_msi_msg = altera_compose_msi_msg,
- .irq_set_affinity = altera_msi_set_affinity,
};
static int altera_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
@@ -171,20 +166,16 @@ static const struct irq_domain_ops msi_domain_ops = {
static int altera_allocate_domains(struct altera_msi *msi)
{
- struct fwnode_handle *fwnode = of_node_to_fwnode(msi->pdev->dev.of_node);
-
- msi->inner_domain = irq_domain_add_linear(NULL, msi->num_of_vectors,
- &msi_domain_ops, msi);
+ struct irq_domain_info info = {
+ .fwnode = dev_fwnode(&msi->pdev->dev),
+ .ops = &msi_domain_ops,
+ .host_data = msi,
+ .size = msi->num_of_vectors,
+ };
+
+ msi->inner_domain = msi_create_parent_irq_domain(&info, &altera_msi_parent_ops);
if (!msi->inner_domain) {
- dev_err(&msi->pdev->dev, "failed to create IRQ domain\n");
- return -ENOMEM;
- }
-
- msi->msi_domain = pci_msi_create_irq_domain(fwnode,
- &altera_msi_domain_info, msi->inner_domain);
- if (!msi->msi_domain) {
dev_err(&msi->pdev->dev, "failed to create MSI domain\n");
- irq_domain_remove(msi->inner_domain);
return -ENOMEM;
}
@@ -193,7 +184,6 @@ static int altera_allocate_domains(struct altera_msi *msi)
static void altera_free_domains(struct altera_msi *msi)
{
- irq_domain_remove(msi->msi_domain);
irq_domain_remove(msi->inner_domain);
}
@@ -274,7 +264,7 @@ static struct platform_driver altera_msi_driver = {
.of_match_table = altera_msi_of_match,
},
.probe = altera_msi_probe,
- .remove_new = altera_msi_remove,
+ .remove = altera_msi_remove,
};
static int __init altera_msi_init(void)
@@ -290,4 +280,5 @@ static void __exit altera_msi_exit(void)
subsys_initcall(altera_msi_init);
MODULE_DEVICE_TABLE(of, altera_msi_of_match);
module_exit(altera_msi_exit);
+MODULE_DESCRIPTION("Altera PCIe MSI support driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-altera.c b/drivers/pci/controller/pcie-altera.c
index c95a29fff8bf..3dbb7adc421c 100644
--- a/drivers/pci/controller/pcie-altera.c
+++ b/drivers/pci/controller/pcie-altera.c
@@ -6,14 +6,14 @@
* Description: Altera PCIe host controller driver
*/
+#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
+#include <linux/of.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
@@ -56,12 +56,11 @@
#define TLP_READ_TAG 0x1d
#define TLP_WRITE_TAG 0x10
#define RP_DEVFN 0
-#define TLP_REQ_ID(bus, devfn) (((bus) << 8) | (devfn))
#define TLP_CFG_DW0(pcie, cfg) \
(((cfg) << 24) | \
TLP_PAYLOAD_SIZE)
#define TLP_CFG_DW1(pcie, tag, be) \
- (((TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN)) << 16) | (tag << 8) | (be))
+ (((PCI_DEVID(pcie->root_bus_nr, RP_DEVFN)) << 16) | (tag << 8) | (be))
#define TLP_CFG_DW2(bus, devfn, offset) \
(((bus) << 24) | ((devfn) << 16) | (offset))
#define TLP_COMP_STATUS(s) (((s) >> 13) & 7)
@@ -79,9 +78,25 @@
#define S10_TLP_FMTTYPE_CFGWR0 0x45
#define S10_TLP_FMTTYPE_CFGWR1 0x44
+#define AGLX_RP_CFG_ADDR(pcie, reg) (((pcie)->hip_base) + (reg))
+#define AGLX_RP_SECONDARY(pcie) \
+ readb(AGLX_RP_CFG_ADDR(pcie, PCI_SECONDARY_BUS))
+
+#define AGLX_BDF_REG 0x00002004
+#define AGLX_ROOT_PORT_IRQ_STATUS 0x14c
+#define AGLX_ROOT_PORT_IRQ_ENABLE 0x150
+#define CFG_AER BIT(4)
+
+#define AGLX_CFG_TARGET GENMASK(13, 12)
+#define AGLX_CFG_TARGET_TYPE0 0
+#define AGLX_CFG_TARGET_TYPE1 1
+#define AGLX_CFG_TARGET_LOCAL_2000 2
+#define AGLX_CFG_TARGET_LOCAL_3000 3
+
enum altera_pcie_version {
ALTERA_PCIE_V1 = 0,
ALTERA_PCIE_V2,
+ ALTERA_PCIE_V3,
};
struct altera_pcie {
@@ -104,6 +119,11 @@ struct altera_pcie_ops {
int size, u32 *value);
int (*rp_write_cfg)(struct altera_pcie *pcie, u8 busno,
int where, int size, u32 value);
+ int (*ep_read_cfg)(struct altera_pcie *pcie, u8 busno,
+ unsigned int devfn, int where, int size, u32 *value);
+ int (*ep_write_cfg)(struct altera_pcie *pcie, u8 busno,
+ unsigned int devfn, int where, int size, u32 value);
+ void (*rp_isr)(struct irq_desc *desc);
};
struct altera_pcie_data {
@@ -114,6 +134,9 @@ struct altera_pcie_data {
u32 cfgrd1;
u32 cfgwr0;
u32 cfgwr1;
+ u32 port_conf_offset;
+ u32 port_irq_status_offset;
+ u32 port_irq_enable_offset;
};
struct tlp_rp_regpair_t {
@@ -133,6 +156,28 @@ static inline u32 cra_readl(struct altera_pcie *pcie, const u32 reg)
return readl_relaxed(pcie->cra_base + reg);
}
+static inline void cra_writew(struct altera_pcie *pcie, const u32 value,
+ const u32 reg)
+{
+ writew_relaxed(value, pcie->cra_base + reg);
+}
+
+static inline u32 cra_readw(struct altera_pcie *pcie, const u32 reg)
+{
+ return readw_relaxed(pcie->cra_base + reg);
+}
+
+static inline void cra_writeb(struct altera_pcie *pcie, const u32 value,
+ const u32 reg)
+{
+ writeb_relaxed(value, pcie->cra_base + reg);
+}
+
+static inline u32 cra_readb(struct altera_pcie *pcie, const u32 reg)
+{
+ return readb_relaxed(pcie->cra_base + reg);
+}
+
static bool altera_pcie_link_up(struct altera_pcie *pcie)
{
return !!((cra_readl(pcie, RP_LTSSM) & RP_LTSSM_MASK) == LTSSM_L0);
@@ -147,11 +192,20 @@ static bool s10_altera_pcie_link_up(struct altera_pcie *pcie)
return !!(readw(addr) & PCI_EXP_LNKSTA_DLLLA);
}
+static bool aglx_altera_pcie_link_up(struct altera_pcie *pcie)
+{
+ void __iomem *addr = AGLX_RP_CFG_ADDR(pcie,
+ pcie->pcie_data->cap_offset +
+ PCI_EXP_LNKSTA);
+
+ return (readw_relaxed(addr) & PCI_EXP_LNKSTA_DLLLA);
+}
+
/*
* Altera PCIe port uses BAR0 of RC's configuration space as the translation
* from PCI bus to native BUS. Entire DDR region is mapped into PCIe space
* using these registers, so it can be reached by DMA from EP devices.
- * This BAR0 will also access to MSI vector when receiving MSI/MSIX interrupt
+ * This BAR0 will also access to MSI vector when receiving MSI/MSI-X interrupt
* from EP devices, eventually trigger interrupt to GIC. The BAR0 of bridge
* should be hidden during enumeration to avoid the sizing and resource
* allocation by PCIe core.
@@ -427,6 +481,103 @@ static int s10_rp_write_cfg(struct altera_pcie *pcie, u8 busno,
return PCIBIOS_SUCCESSFUL;
}
+static int aglx_rp_read_cfg(struct altera_pcie *pcie, int where,
+ int size, u32 *value)
+{
+ void __iomem *addr = AGLX_RP_CFG_ADDR(pcie, where);
+
+ switch (size) {
+ case 1:
+ *value = readb_relaxed(addr);
+ break;
+ case 2:
+ *value = readw_relaxed(addr);
+ break;
+ default:
+ *value = readl_relaxed(addr);
+ break;
+ }
+
+ /* Interrupt PIN not programmed in hardware, set to INTA. */
+ if (where == PCI_INTERRUPT_PIN && size == 1 && !(*value))
+ *value = 0x01;
+ else if (where == PCI_INTERRUPT_LINE && !(*value & 0xff00))
+ *value |= 0x0100;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int aglx_rp_write_cfg(struct altera_pcie *pcie, u8 busno,
+ int where, int size, u32 value)
+{
+ void __iomem *addr = AGLX_RP_CFG_ADDR(pcie, where);
+
+ switch (size) {
+ case 1:
+ writeb_relaxed(value, addr);
+ break;
+ case 2:
+ writew_relaxed(value, addr);
+ break;
+ default:
+ writel_relaxed(value, addr);
+ break;
+ }
+
+ /*
+ * Monitor changes to PCI_PRIMARY_BUS register on Root Port
+ * and update local copy of root bus number accordingly.
+ */
+ if (busno == pcie->root_bus_nr && where == PCI_PRIMARY_BUS)
+ pcie->root_bus_nr = value & 0xff;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int aglx_ep_write_cfg(struct altera_pcie *pcie, u8 busno,
+ unsigned int devfn, int where, int size, u32 value)
+{
+ cra_writel(pcie, ((busno << 8) | devfn), AGLX_BDF_REG);
+ if (busno > AGLX_RP_SECONDARY(pcie))
+ where |= FIELD_PREP(AGLX_CFG_TARGET, AGLX_CFG_TARGET_TYPE1);
+
+ switch (size) {
+ case 1:
+ cra_writeb(pcie, value, where);
+ break;
+ case 2:
+ cra_writew(pcie, value, where);
+ break;
+ default:
+ cra_writel(pcie, value, where);
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int aglx_ep_read_cfg(struct altera_pcie *pcie, u8 busno,
+ unsigned int devfn, int where, int size, u32 *value)
+{
+ cra_writel(pcie, ((busno << 8) | devfn), AGLX_BDF_REG);
+ if (busno > AGLX_RP_SECONDARY(pcie))
+ where |= FIELD_PREP(AGLX_CFG_TARGET, AGLX_CFG_TARGET_TYPE1);
+
+ switch (size) {
+ case 1:
+ *value = cra_readb(pcie, where);
+ break;
+ case 2:
+ *value = cra_readw(pcie, where);
+ break;
+ default:
+ *value = cra_readl(pcie, where);
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
static int _altera_pcie_cfg_read(struct altera_pcie *pcie, u8 busno,
unsigned int devfn, int where, int size,
u32 *value)
@@ -439,6 +590,10 @@ static int _altera_pcie_cfg_read(struct altera_pcie *pcie, u8 busno,
return pcie->pcie_data->ops->rp_read_cfg(pcie, where,
size, value);
+ if (pcie->pcie_data->ops->ep_read_cfg)
+ return pcie->pcie_data->ops->ep_read_cfg(pcie, busno, devfn,
+ where, size, value);
+
switch (size) {
case 1:
byte_en = 1 << (where & 3);
@@ -483,6 +638,10 @@ static int _altera_pcie_cfg_write(struct altera_pcie *pcie, u8 busno,
return pcie->pcie_data->ops->rp_write_cfg(pcie, busno,
where, size, value);
+ if (pcie->pcie_data->ops->ep_write_cfg)
+ return pcie->pcie_data->ops->ep_write_cfg(pcie, busno, devfn,
+ where, size, value);
+
switch (size) {
case 1:
data32 = (value & 0xff) << shift;
@@ -661,17 +820,41 @@ static void altera_pcie_isr(struct irq_desc *desc)
dev_err_ratelimited(dev, "unexpected IRQ, INT%d\n", bit);
}
}
+ chained_irq_exit(chip, desc);
+}
+
+static void aglx_isr(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct altera_pcie *pcie;
+ struct device *dev;
+ u32 status;
+ int ret;
+
+ chained_irq_enter(chip, desc);
+ pcie = irq_desc_get_handler_data(desc);
+ dev = &pcie->pdev->dev;
+
+ status = readl(pcie->hip_base + pcie->pcie_data->port_conf_offset +
+ pcie->pcie_data->port_irq_status_offset);
+ if (status & CFG_AER) {
+ writel(CFG_AER, (pcie->hip_base + pcie->pcie_data->port_conf_offset +
+ pcie->pcie_data->port_irq_status_offset));
+
+ ret = generic_handle_domain_irq(pcie->irq_domain, 0);
+ if (ret)
+ dev_err_ratelimited(dev, "unexpected IRQ %d\n", pcie->irq);
+ }
chained_irq_exit(chip, desc);
}
static int altera_pcie_init_irq_domain(struct altera_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
- struct device_node *node = dev->of_node;
/* Setup INTx */
- pcie->irq_domain = irq_domain_add_linear(node, PCI_NUM_INTX,
+ pcie->irq_domain = irq_domain_create_linear(dev_fwnode(dev), PCI_NUM_INTX,
&intx_domain_ops, pcie);
if (!pcie->irq_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
@@ -696,9 +879,9 @@ static int altera_pcie_parse_dt(struct altera_pcie *pcie)
if (IS_ERR(pcie->cra_base))
return PTR_ERR(pcie->cra_base);
- if (pcie->pcie_data->version == ALTERA_PCIE_V2) {
- pcie->hip_base =
- devm_platform_ioremap_resource_byname(pdev, "Hip");
+ if (pcie->pcie_data->version == ALTERA_PCIE_V2 ||
+ pcie->pcie_data->version == ALTERA_PCIE_V3) {
+ pcie->hip_base = devm_platform_ioremap_resource_byname(pdev, "Hip");
if (IS_ERR(pcie->hip_base))
return PTR_ERR(pcie->hip_base);
}
@@ -708,7 +891,7 @@ static int altera_pcie_parse_dt(struct altera_pcie *pcie)
if (pcie->irq < 0)
return pcie->irq;
- irq_set_chained_handler_and_data(pcie->irq, altera_pcie_isr, pcie);
+ irq_set_chained_handler_and_data(pcie->irq, pcie->pcie_data->ops->rp_isr, pcie);
return 0;
}
@@ -721,6 +904,7 @@ static const struct altera_pcie_ops altera_pcie_ops_1_0 = {
.tlp_read_pkt = tlp_read_packet,
.tlp_write_pkt = tlp_write_packet,
.get_link_status = altera_pcie_link_up,
+ .rp_isr = altera_pcie_isr,
};
static const struct altera_pcie_ops altera_pcie_ops_2_0 = {
@@ -729,6 +913,16 @@ static const struct altera_pcie_ops altera_pcie_ops_2_0 = {
.get_link_status = s10_altera_pcie_link_up,
.rp_read_cfg = s10_rp_read_cfg,
.rp_write_cfg = s10_rp_write_cfg,
+ .rp_isr = altera_pcie_isr,
+};
+
+static const struct altera_pcie_ops altera_pcie_ops_3_0 = {
+ .rp_read_cfg = aglx_rp_read_cfg,
+ .rp_write_cfg = aglx_rp_write_cfg,
+ .get_link_status = aglx_altera_pcie_link_up,
+ .ep_read_cfg = aglx_ep_read_cfg,
+ .ep_write_cfg = aglx_ep_write_cfg,
+ .rp_isr = aglx_isr,
};
static const struct altera_pcie_data altera_pcie_1_0_data = {
@@ -751,11 +945,44 @@ static const struct altera_pcie_data altera_pcie_2_0_data = {
.cfgwr1 = S10_TLP_FMTTYPE_CFGWR1,
};
+static const struct altera_pcie_data altera_pcie_3_0_f_tile_data = {
+ .ops = &altera_pcie_ops_3_0,
+ .version = ALTERA_PCIE_V3,
+ .cap_offset = 0x70,
+ .port_conf_offset = 0x14000,
+ .port_irq_status_offset = AGLX_ROOT_PORT_IRQ_STATUS,
+ .port_irq_enable_offset = AGLX_ROOT_PORT_IRQ_ENABLE,
+};
+
+static const struct altera_pcie_data altera_pcie_3_0_p_tile_data = {
+ .ops = &altera_pcie_ops_3_0,
+ .version = ALTERA_PCIE_V3,
+ .cap_offset = 0x70,
+ .port_conf_offset = 0x104000,
+ .port_irq_status_offset = AGLX_ROOT_PORT_IRQ_STATUS,
+ .port_irq_enable_offset = AGLX_ROOT_PORT_IRQ_ENABLE,
+};
+
+static const struct altera_pcie_data altera_pcie_3_0_r_tile_data = {
+ .ops = &altera_pcie_ops_3_0,
+ .version = ALTERA_PCIE_V3,
+ .cap_offset = 0x70,
+ .port_conf_offset = 0x1300,
+ .port_irq_status_offset = 0x0,
+ .port_irq_enable_offset = 0x4,
+};
+
static const struct of_device_id altera_pcie_of_match[] = {
{.compatible = "altr,pcie-root-port-1.0",
.data = &altera_pcie_1_0_data },
{.compatible = "altr,pcie-root-port-2.0",
.data = &altera_pcie_2_0_data },
+ {.compatible = "altr,pcie-root-port-3.0-f-tile",
+ .data = &altera_pcie_3_0_f_tile_data },
+ {.compatible = "altr,pcie-root-port-3.0-p-tile",
+ .data = &altera_pcie_3_0_p_tile_data },
+ {.compatible = "altr,pcie-root-port-3.0-r-tile",
+ .data = &altera_pcie_3_0_r_tile_data },
{},
};
@@ -793,11 +1020,18 @@ static int altera_pcie_probe(struct platform_device *pdev)
return ret;
}
- /* clear all interrupts */
- cra_writel(pcie, P2A_INT_STS_ALL, P2A_INT_STATUS);
- /* enable all interrupts */
- cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE);
- altera_pcie_host_init(pcie);
+ if (pcie->pcie_data->version == ALTERA_PCIE_V1 ||
+ pcie->pcie_data->version == ALTERA_PCIE_V2) {
+ /* clear all interrupts */
+ cra_writel(pcie, P2A_INT_STS_ALL, P2A_INT_STATUS);
+ /* enable all interrupts */
+ cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE);
+ altera_pcie_host_init(pcie);
+ } else if (pcie->pcie_data->version == ALTERA_PCIE_V3) {
+ writel(CFG_AER,
+ pcie->hip_base + pcie->pcie_data->port_conf_offset +
+ pcie->pcie_data->port_irq_enable_offset);
+ }
bridge->sysdata = pcie;
bridge->busnr = pcie->root_bus_nr;
@@ -817,14 +1051,15 @@ static void altera_pcie_remove(struct platform_device *pdev)
}
static struct platform_driver altera_pcie_driver = {
- .probe = altera_pcie_probe,
- .remove_new = altera_pcie_remove,
+ .probe = altera_pcie_probe,
+ .remove = altera_pcie_remove,
.driver = {
- .name = "altera-pcie",
+ .name = "altera-pcie",
.of_match_table = altera_pcie_of_match,
},
};
MODULE_DEVICE_TABLE(of, altera_pcie_of_match);
module_platform_driver(altera_pcie_driver);
+MODULE_DESCRIPTION("Altera PCIe host controller driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-apple.c b/drivers/pci/controller/pcie-apple.c
index 66f37e403a09..2d92fc79f6dd 100644
--- a/drivers/pci/controller/pcie-apple.c
+++ b/drivers/pci/controller/pcie-apple.c
@@ -18,18 +18,22 @@
* Author: Marc Zyngier <maz@kernel.org>
*/
+#include <linux/bitfield.h>
#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
#include <linux/iopoll.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/msi.h>
-#include <linux/notifier.h>
#include <linux/of_irq.h>
#include <linux/pci-ecam.h>
+#include "pci-host-common.h"
+
+/* T8103 (original M1) and related SoCs */
#define CORE_RC_PHYIF_CTL 0x00024
#define CORE_RC_PHYIF_CTL_RUN BIT(0)
#define CORE_RC_PHYIF_STAT 0x00028
@@ -40,14 +44,18 @@
#define CORE_RC_STAT_READY BIT(0)
#define CORE_FABRIC_STAT 0x04000
#define CORE_FABRIC_STAT_MASK 0x001F001F
-#define CORE_LANE_CFG(port) (0x84000 + 0x4000 * (port))
-#define CORE_LANE_CFG_REFCLK0REQ BIT(0)
-#define CORE_LANE_CFG_REFCLK1REQ BIT(1)
-#define CORE_LANE_CFG_REFCLK0ACK BIT(2)
-#define CORE_LANE_CFG_REFCLK1ACK BIT(3)
-#define CORE_LANE_CFG_REFCLKEN (BIT(9) | BIT(10))
-#define CORE_LANE_CTL(port) (0x84004 + 0x4000 * (port))
-#define CORE_LANE_CTL_CFGACC BIT(15)
+
+#define CORE_PHY_DEFAULT_BASE(port) (0x84000 + 0x4000 * (port))
+
+#define PHY_LANE_CFG 0x00000
+#define PHY_LANE_CFG_REFCLK0REQ BIT(0)
+#define PHY_LANE_CFG_REFCLK1REQ BIT(1)
+#define PHY_LANE_CFG_REFCLK0ACK BIT(2)
+#define PHY_LANE_CFG_REFCLK1ACK BIT(3)
+#define PHY_LANE_CFG_REFCLKEN (BIT(9) | BIT(10))
+#define PHY_LANE_CFG_REFCLKCGEN (BIT(30) | BIT(31))
+#define PHY_LANE_CTL 0x00004
+#define PHY_LANE_CTL_CFGACC BIT(15)
#define PORT_LTSSMCTL 0x00080
#define PORT_LTSSMCTL_START BIT(0)
@@ -101,7 +109,7 @@
#define PORT_REFCLK_CGDIS BIT(8)
#define PORT_PERST 0x00814
#define PORT_PERST_OFF BIT(0)
-#define PORT_RID2SID(i16) (0x00828 + 4 * (i16))
+#define PORT_RID2SID 0x00828
#define PORT_RID2SID_VALID BIT(31)
#define PORT_RID2SID_SID_SHIFT 16
#define PORT_RID2SID_BUS_SHIFT 8
@@ -119,7 +127,15 @@
#define PORT_TUNSTAT_PERST_ACK_PEND BIT(1)
#define PORT_PREFMEM_ENABLE 0x00994
-#define MAX_RID2SID 64
+/* T602x (M2-pro and co) */
+#define PORT_T602X_MSIADDR 0x016c
+#define PORT_T602X_MSIADDR_HI 0x0170
+#define PORT_T602X_PERST 0x082c
+#define PORT_T602X_RID2SID 0x3000
+#define PORT_T602X_MSIMAP 0x3800
+
+#define PORT_MSIMAP_ENABLE BIT(31)
+#define PORT_MSIMAP_TARGET GENMASK(7, 0)
/*
* The doorbell address is set to 0xfffff000, which by convention
@@ -130,11 +146,45 @@
*/
#define DOORBELL_ADDR CONFIG_PCIE_APPLE_MSI_DOORBELL_ADDR
+struct hw_info {
+ u32 phy_lane_ctl;
+ u32 port_msiaddr;
+ u32 port_msiaddr_hi;
+ u32 port_refclk;
+ u32 port_perst;
+ u32 port_rid2sid;
+ u32 port_msimap;
+ u32 max_rid2sid;
+};
+
+static const struct hw_info t8103_hw = {
+ .phy_lane_ctl = PHY_LANE_CTL,
+ .port_msiaddr = PORT_MSIADDR,
+ .port_msiaddr_hi = 0,
+ .port_refclk = PORT_REFCLK,
+ .port_perst = PORT_PERST,
+ .port_rid2sid = PORT_RID2SID,
+ .port_msimap = 0,
+ .max_rid2sid = 64,
+};
+
+static const struct hw_info t602x_hw = {
+ .phy_lane_ctl = 0,
+ .port_msiaddr = PORT_T602X_MSIADDR,
+ .port_msiaddr_hi = PORT_T602X_MSIADDR_HI,
+ .port_refclk = 0,
+ .port_perst = PORT_T602X_PERST,
+ .port_rid2sid = PORT_T602X_RID2SID,
+ .port_msimap = PORT_T602X_MSIMAP,
+ /* 16 on t602x, guess for autodetect on future HW */
+ .max_rid2sid = 512,
+};
+
struct apple_pcie {
struct mutex lock;
struct device *dev;
void __iomem *base;
- struct irq_domain *domain;
+ const struct hw_info *hw;
unsigned long *bitmap;
struct list_head ports;
struct completion event;
@@ -143,12 +193,14 @@ struct apple_pcie {
};
struct apple_pcie_port {
+ raw_spinlock_t lock;
struct apple_pcie *pcie;
struct device_node *np;
void __iomem *base;
+ void __iomem *phy;
struct irq_domain *domain;
struct list_head entry;
- DECLARE_BITMAP(sid_map, MAX_RID2SID);
+ unsigned long *sid_map;
int sid_map_sz;
int idx;
};
@@ -163,27 +215,6 @@ static void rmw_clear(u32 clr, void __iomem *addr)
writel_relaxed(readl_relaxed(addr) & ~clr, addr);
}
-static void apple_msi_top_irq_mask(struct irq_data *d)
-{
- pci_msi_mask_irq(d);
- irq_chip_mask_parent(d);
-}
-
-static void apple_msi_top_irq_unmask(struct irq_data *d)
-{
- pci_msi_unmask_irq(d);
- irq_chip_unmask_parent(d);
-}
-
-static struct irq_chip apple_msi_top_chip = {
- .name = "PCIe MSI",
- .irq_mask = apple_msi_top_irq_mask,
- .irq_unmask = apple_msi_top_irq_unmask,
- .irq_eoi = irq_chip_eoi_parent,
- .irq_set_affinity = irq_chip_set_affinity_parent,
- .irq_set_type = irq_chip_set_type_parent,
-};
-
static void apple_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
{
msg->address_hi = upper_32_bits(DOORBELL_ADDR);
@@ -227,8 +258,7 @@ static int apple_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
for (i = 0; i < nr_irqs; i++) {
irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
- &apple_msi_bottom_chip,
- domain->host_data);
+ &apple_msi_bottom_chip, pcie);
}
return 0;
@@ -252,24 +282,20 @@ static const struct irq_domain_ops apple_msi_domain_ops = {
.free = apple_msi_domain_free,
};
-static struct msi_domain_info apple_msi_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
- .chip = &apple_msi_top_chip,
-};
-
static void apple_port_irq_mask(struct irq_data *data)
{
struct apple_pcie_port *port = irq_data_get_irq_chip_data(data);
- writel_relaxed(BIT(data->hwirq), port->base + PORT_INTMSKSET);
+ guard(raw_spinlock_irqsave)(&port->lock);
+ rmw_set(BIT(data->hwirq), port->base + PORT_INTMSK);
}
static void apple_port_irq_unmask(struct irq_data *data)
{
struct apple_pcie_port *port = irq_data_get_irq_chip_data(data);
- writel_relaxed(BIT(data->hwirq), port->base + PORT_INTMSKCLR);
+ guard(raw_spinlock_irqsave)(&port->lock);
+ rmw_clear(BIT(data->hwirq), port->base + PORT_INTMSK);
}
static bool hwirq_is_intx(unsigned int hwirq)
@@ -373,7 +399,9 @@ static void apple_port_irq_handler(struct irq_desc *desc)
static int apple_pcie_port_setup_irq(struct apple_pcie_port *port)
{
struct fwnode_handle *fwnode = &port->np->fwnode;
+ struct apple_pcie *pcie = port->pcie;
unsigned int irq;
+ u32 val = 0;
/* FIXME: consider moving each interrupt under each port */
irq = irq_of_parse_and_map(to_of_node(dev_fwnode(port->pcie->dev)),
@@ -388,20 +416,31 @@ static int apple_pcie_port_setup_irq(struct apple_pcie_port *port)
return -ENOMEM;
/* Disable all interrupts */
- writel_relaxed(~0, port->base + PORT_INTMSKSET);
+ writel_relaxed(~0, port->base + PORT_INTMSK);
writel_relaxed(~0, port->base + PORT_INTSTAT);
+ writel_relaxed(~0, port->base + PORT_LINKCMDSTS);
irq_set_chained_handler_and_data(irq, apple_port_irq_handler, port);
/* Configure MSI base address */
BUILD_BUG_ON(upper_32_bits(DOORBELL_ADDR));
- writel_relaxed(lower_32_bits(DOORBELL_ADDR), port->base + PORT_MSIADDR);
+ writel_relaxed(lower_32_bits(DOORBELL_ADDR),
+ port->base + pcie->hw->port_msiaddr);
+ if (pcie->hw->port_msiaddr_hi)
+ writel_relaxed(0, port->base + pcie->hw->port_msiaddr_hi);
/* Enable MSIs, shared between all ports */
- writel_relaxed(0, port->base + PORT_MSIBASE);
- writel_relaxed((ilog2(port->pcie->nvecs) << PORT_MSICFG_L2MSINUM_SHIFT) |
- PORT_MSICFG_EN, port->base + PORT_MSICFG);
+ if (pcie->hw->port_msimap) {
+ for (int i = 0; i < pcie->nvecs; i++)
+ writel_relaxed(FIELD_PREP(PORT_MSIMAP_TARGET, i) |
+ PORT_MSIMAP_ENABLE,
+ port->base + pcie->hw->port_msimap + 4 * i);
+ } else {
+ writel_relaxed(0, port->base + PORT_MSIBASE);
+ val = ilog2(pcie->nvecs) << PORT_MSICFG_L2MSINUM_SHIFT;
+ }
+ writel_relaxed(val | PORT_MSICFG_EN, port->base + PORT_MSICFG);
return 0;
}
@@ -468,43 +507,47 @@ static int apple_pcie_setup_refclk(struct apple_pcie *pcie,
u32 stat;
int res;
- res = readl_relaxed_poll_timeout(pcie->base + CORE_RC_PHYIF_STAT, stat,
- stat & CORE_RC_PHYIF_STAT_REFCLK,
- 100, 50000);
- if (res < 0)
- return res;
+ if (pcie->hw->phy_lane_ctl)
+ rmw_set(PHY_LANE_CTL_CFGACC, port->phy + pcie->hw->phy_lane_ctl);
- rmw_set(CORE_LANE_CTL_CFGACC, pcie->base + CORE_LANE_CTL(port->idx));
- rmw_set(CORE_LANE_CFG_REFCLK0REQ, pcie->base + CORE_LANE_CFG(port->idx));
+ rmw_set(PHY_LANE_CFG_REFCLK0REQ, port->phy + PHY_LANE_CFG);
- res = readl_relaxed_poll_timeout(pcie->base + CORE_LANE_CFG(port->idx),
- stat, stat & CORE_LANE_CFG_REFCLK0ACK,
+ res = readl_relaxed_poll_timeout(port->phy + PHY_LANE_CFG,
+ stat, stat & PHY_LANE_CFG_REFCLK0ACK,
100, 50000);
if (res < 0)
return res;
- rmw_set(CORE_LANE_CFG_REFCLK1REQ, pcie->base + CORE_LANE_CFG(port->idx));
- res = readl_relaxed_poll_timeout(pcie->base + CORE_LANE_CFG(port->idx),
- stat, stat & CORE_LANE_CFG_REFCLK1ACK,
+ rmw_set(PHY_LANE_CFG_REFCLK1REQ, port->phy + PHY_LANE_CFG);
+ res = readl_relaxed_poll_timeout(port->phy + PHY_LANE_CFG,
+ stat, stat & PHY_LANE_CFG_REFCLK1ACK,
100, 50000);
if (res < 0)
return res;
- rmw_clear(CORE_LANE_CTL_CFGACC, pcie->base + CORE_LANE_CTL(port->idx));
+ if (pcie->hw->phy_lane_ctl)
+ rmw_clear(PHY_LANE_CTL_CFGACC, port->phy + pcie->hw->phy_lane_ctl);
+
+ rmw_set(PHY_LANE_CFG_REFCLKEN, port->phy + PHY_LANE_CFG);
- rmw_set(CORE_LANE_CFG_REFCLKEN, pcie->base + CORE_LANE_CFG(port->idx));
- rmw_set(PORT_REFCLK_EN, port->base + PORT_REFCLK);
+ if (pcie->hw->port_refclk)
+ rmw_set(PORT_REFCLK_EN, port->base + pcie->hw->port_refclk);
return 0;
}
+static void __iomem *port_rid2sid_addr(struct apple_pcie_port *port, int idx)
+{
+ return port->base + port->pcie->hw->port_rid2sid + 4 * idx;
+}
+
static u32 apple_pcie_rid2sid_write(struct apple_pcie_port *port,
int idx, u32 val)
{
- writel_relaxed(val, port->base + PORT_RID2SID(idx));
+ writel_relaxed(val, port_rid2sid_addr(port, idx));
/* Read back to ensure completion of the write */
- return readl_relaxed(port->base + PORT_RID2SID(idx));
+ return readl_relaxed(port_rid2sid_addr(port, idx));
}
static int apple_pcie_setup_port(struct apple_pcie *pcie,
@@ -513,6 +556,8 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
struct platform_device *platform = to_platform_device(pcie->dev);
struct apple_pcie_port *port;
struct gpio_desc *reset;
+ struct resource *res;
+ char name[16];
u32 stat, idx;
int ret, i;
@@ -525,6 +570,10 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
if (!port)
return -ENOMEM;
+ port->sid_map = devm_bitmap_zalloc(pcie->dev, pcie->hw->max_rid2sid, GFP_KERNEL);
+ if (!port->sid_map)
+ return -ENOMEM;
+
ret = of_property_read_u32_index(np, "reg", 0, &idx);
if (ret)
return ret;
@@ -534,14 +583,28 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
port->pcie = pcie;
port->np = np;
- port->base = devm_platform_ioremap_resource(platform, port->idx + 2);
+ raw_spin_lock_init(&port->lock);
+
+ snprintf(name, sizeof(name), "port%d", port->idx);
+ res = platform_get_resource_byname(platform, IORESOURCE_MEM, name);
+ if (!res)
+ res = platform_get_resource(platform, IORESOURCE_MEM, port->idx + 2);
+
+ port->base = devm_ioremap_resource(&platform->dev, res);
if (IS_ERR(port->base))
return PTR_ERR(port->base);
+ snprintf(name, sizeof(name), "phy%d", port->idx);
+ res = platform_get_resource_byname(platform, IORESOURCE_MEM, name);
+ if (res)
+ port->phy = devm_ioremap_resource(&platform->dev, res);
+ else
+ port->phy = pcie->base + CORE_PHY_DEFAULT_BASE(port->idx);
+
rmw_set(PORT_APPCLK_EN, port->base + PORT_APPCLK);
/* Assert PERST# before setting up the clock */
- gpiod_set_value(reset, 1);
+ gpiod_set_value_cansleep(reset, 1);
ret = apple_pcie_setup_refclk(pcie, port);
if (ret < 0)
@@ -551,8 +614,8 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
usleep_range(100, 200);
/* Deassert PERST# */
- rmw_set(PORT_PERST_OFF, port->base + PORT_PERST);
- gpiod_set_value(reset, 0);
+ rmw_set(PORT_PERST_OFF, port->base + pcie->hw->port_perst);
+ gpiod_set_value_cansleep(reset, 0);
/* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */
msleep(100);
@@ -564,7 +627,11 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
return ret;
}
- rmw_clear(PORT_REFCLK_CGDIS, port->base + PORT_REFCLK);
+ if (pcie->hw->port_refclk)
+ rmw_clear(PORT_REFCLK_CGDIS, port->base + pcie->hw->port_refclk);
+ else
+ rmw_set(PHY_LANE_CFG_REFCLKCGEN, port->phy + PHY_LANE_CFG);
+
rmw_clear(PORT_APPCLK_CGDIS, port->base + PORT_APPCLK);
ret = apple_pcie_port_setup_irq(port);
@@ -572,7 +639,7 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
return ret;
/* Reset all RID/SID mappings, and check for RAZ/WI registers */
- for (i = 0; i < MAX_RID2SID; i++) {
+ for (i = 0; i < pcie->hw->max_rid2sid; i++) {
if (apple_pcie_rid2sid_write(port, i, 0xbad1d) != 0xbad1d)
break;
apple_pcie_rid2sid_write(port, i, 0);
@@ -585,6 +652,9 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
list_add_tail(&port->entry, &pcie->ports);
init_completion(&pcie->event);
+ /* In the success path, we keep a reference to np around */
+ of_node_get(np);
+
ret = apple_pcie_port_register_irqs(port);
WARN_ON(ret);
@@ -596,11 +666,28 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
return 0;
}
+static const struct msi_parent_ops apple_msi_parent_ops = {
+ .supported_flags = (MSI_GENERIC_FLAGS_MASK |
+ MSI_FLAG_PCI_MSIX |
+ MSI_FLAG_MULTI_PCI_MSI),
+ .required_flags = (MSI_FLAG_USE_DEF_DOM_OPS |
+ MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_PCI_MSI_MASK_PARENT),
+ .chip_flags = MSI_CHIP_FLAG_SET_EOI,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
+};
+
static int apple_msi_init(struct apple_pcie *pcie)
{
struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
+ struct irq_domain_info info = {
+ .fwnode = fwnode,
+ .ops = &apple_msi_domain_ops,
+ .size = pcie->nvecs,
+ .host_data = pcie,
+ };
struct of_phandle_args args = {};
- struct irq_domain *parent;
int ret;
ret = of_parse_phandle_with_args(to_of_node(fwnode), "msi-ranges",
@@ -620,38 +707,35 @@ static int apple_msi_init(struct apple_pcie *pcie)
if (!pcie->bitmap)
return -ENOMEM;
- parent = irq_find_matching_fwspec(&pcie->fwspec, DOMAIN_BUS_WIRED);
- if (!parent) {
+ info.parent = irq_find_matching_fwspec(&pcie->fwspec, DOMAIN_BUS_WIRED);
+ if (!info.parent) {
dev_err(pcie->dev, "failed to find parent domain\n");
return -ENXIO;
}
- parent = irq_domain_create_hierarchy(parent, 0, pcie->nvecs, fwnode,
- &apple_msi_domain_ops, pcie);
- if (!parent) {
+ if (!msi_create_parent_irq_domain(&info, &apple_msi_parent_ops)) {
dev_err(pcie->dev, "failed to create IRQ domain\n");
return -ENOMEM;
}
- irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
-
- pcie->domain = pci_msi_create_irq_domain(fwnode, &apple_msi_info,
- parent);
- if (!pcie->domain) {
- dev_err(pcie->dev, "failed to create MSI domain\n");
- irq_domain_remove(parent);
- return -ENOMEM;
- }
-
return 0;
}
+static struct apple_pcie *apple_pcie_lookup(struct device *dev)
+{
+ return pci_host_bridge_priv(dev_get_drvdata(dev));
+}
+
static struct apple_pcie_port *apple_pcie_get_port(struct pci_dev *pdev)
{
struct pci_config_window *cfg = pdev->sysdata;
- struct apple_pcie *pcie = cfg->priv;
+ struct apple_pcie *pcie;
struct pci_dev *port_pdev;
struct apple_pcie_port *port;
+ pcie = apple_pcie_lookup(cfg->parent);
+ if (WARN_ON(!pcie))
+ return NULL;
+
/* Find the root port this device is on */
port_pdev = pcie_find_root_port(pdev);
@@ -667,12 +751,16 @@ static struct apple_pcie_port *apple_pcie_get_port(struct pci_dev *pdev)
return NULL;
}
-static int apple_pcie_add_device(struct apple_pcie_port *port,
- struct pci_dev *pdev)
+static int apple_pcie_enable_device(struct pci_host_bridge *bridge, struct pci_dev *pdev)
{
- u32 sid, rid = PCI_DEVID(pdev->bus->number, pdev->devfn);
+ u32 sid, rid = pci_dev_id(pdev);
+ struct apple_pcie_port *port;
int idx, err;
+ port = apple_pcie_get_port(pdev);
+ if (!port)
+ return 0;
+
dev_dbg(&pdev->dev, "added to bus %s, index %d\n",
pci_name(pdev->bus->self), port->idx);
@@ -698,18 +786,22 @@ static int apple_pcie_add_device(struct apple_pcie_port *port,
return idx >= 0 ? 0 : -ENOSPC;
}
-static void apple_pcie_release_device(struct apple_pcie_port *port,
- struct pci_dev *pdev)
+static void apple_pcie_disable_device(struct pci_host_bridge *bridge, struct pci_dev *pdev)
{
- u32 rid = PCI_DEVID(pdev->bus->number, pdev->devfn);
+ struct apple_pcie_port *port;
+ u32 rid = pci_dev_id(pdev);
int idx;
+ port = apple_pcie_get_port(pdev);
+ if (!port)
+ return;
+
mutex_lock(&port->pcie->lock);
for_each_set_bit(idx, port->sid_map, port->sid_map_sz) {
u32 val;
- val = readl_relaxed(port->base + PORT_RID2SID(idx));
+ val = readl_relaxed(port_rid2sid_addr(port, idx));
if ((val & 0xffff) == rid) {
apple_pcie_rid2sid_write(port, idx, 0);
bitmap_release_region(port->sid_map, idx, 0);
@@ -721,106 +813,71 @@ static void apple_pcie_release_device(struct apple_pcie_port *port,
mutex_unlock(&port->pcie->lock);
}
-static int apple_pcie_bus_notifier(struct notifier_block *nb,
- unsigned long action,
- void *data)
+static int apple_pcie_init(struct pci_config_window *cfg)
{
- struct device *dev = data;
- struct pci_dev *pdev = to_pci_dev(dev);
- struct apple_pcie_port *port;
- int err;
+ struct device *dev = cfg->parent;
+ struct apple_pcie *pcie;
+ int ret;
- /*
- * This is a bit ugly. We assume that if we get notified for
- * any PCI device, we must be in charge of it, and that there
- * is no other PCI controller in the whole system. It probably
- * holds for now, but who knows for how long?
- */
- port = apple_pcie_get_port(pdev);
- if (!port)
- return NOTIFY_DONE;
+ pcie = apple_pcie_lookup(dev);
+ if (WARN_ON(!pcie))
+ return -ENOENT;
- switch (action) {
- case BUS_NOTIFY_ADD_DEVICE:
- err = apple_pcie_add_device(port, pdev);
- if (err)
- return notifier_from_errno(err);
- break;
- case BUS_NOTIFY_DEL_DEVICE:
- apple_pcie_release_device(port, pdev);
- break;
- default:
- return NOTIFY_DONE;
+ for_each_available_child_of_node_scoped(dev->of_node, of_port) {
+ ret = apple_pcie_setup_port(pcie, of_port);
+ if (ret) {
+ dev_err(dev, "Port %pOF setup fail: %d\n", of_port, ret);
+ return ret;
+ }
}
- return NOTIFY_OK;
+ return 0;
}
-static struct notifier_block apple_pcie_nb = {
- .notifier_call = apple_pcie_bus_notifier,
+static const struct pci_ecam_ops apple_pcie_cfg_ecam_ops = {
+ .init = apple_pcie_init,
+ .enable_device = apple_pcie_enable_device,
+ .disable_device = apple_pcie_disable_device,
+ .pci_ops = {
+ .map_bus = pci_ecam_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ }
};
-static int apple_pcie_init(struct pci_config_window *cfg)
+static int apple_pcie_probe(struct platform_device *pdev)
{
- struct device *dev = cfg->parent;
- struct platform_device *platform = to_platform_device(dev);
- struct device_node *of_port;
+ struct device *dev = &pdev->dev;
+ struct pci_host_bridge *bridge;
struct apple_pcie *pcie;
int ret;
- pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
- if (!pcie)
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
+ if (!bridge)
return -ENOMEM;
+ pcie = pci_host_bridge_priv(bridge);
pcie->dev = dev;
-
- mutex_init(&pcie->lock);
-
- pcie->base = devm_platform_ioremap_resource(platform, 1);
+ pcie->hw = of_device_get_match_data(dev);
+ if (!pcie->hw)
+ return -ENODEV;
+ pcie->base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(pcie->base))
return PTR_ERR(pcie->base);
- cfg->priv = pcie;
+ mutex_init(&pcie->lock);
INIT_LIST_HEAD(&pcie->ports);
- for_each_child_of_node(dev->of_node, of_port) {
- ret = apple_pcie_setup_port(pcie, of_port);
- if (ret) {
- dev_err(pcie->dev, "Port %pOF setup fail: %d\n", of_port, ret);
- of_node_put(of_port);
- return ret;
- }
- }
-
- return apple_msi_init(pcie);
-}
-
-static int apple_pcie_probe(struct platform_device *pdev)
-{
- int ret;
-
- ret = bus_register_notifier(&pci_bus_type, &apple_pcie_nb);
+ ret = apple_msi_init(pcie);
if (ret)
return ret;
- ret = pci_host_common_probe(pdev);
- if (ret)
- bus_unregister_notifier(&pci_bus_type, &apple_pcie_nb);
-
- return ret;
+ return pci_host_common_init(pdev, bridge, &apple_pcie_cfg_ecam_ops);
}
-static const struct pci_ecam_ops apple_pcie_cfg_ecam_ops = {
- .init = apple_pcie_init,
- .pci_ops = {
- .map_bus = pci_ecam_map_bus,
- .read = pci_generic_config_read,
- .write = pci_generic_config_write,
- }
-};
-
static const struct of_device_id apple_pcie_of_match[] = {
- { .compatible = "apple,pcie", .data = &apple_pcie_cfg_ecam_ops },
+ { .compatible = "apple,t6020-pcie", .data = &t602x_hw },
+ { .compatible = "apple,pcie", .data = &t8103_hw },
{ }
};
MODULE_DEVICE_TABLE(of, apple_pcie_of_match);
@@ -835,4 +892,5 @@ static struct platform_driver apple_pcie_driver = {
};
module_platform_driver(apple_pcie_driver);
+MODULE_DESCRIPTION("Apple PCIe host bridge driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
index f593a422bd63..062f55690012 100644
--- a/drivers/pci/controller/pcie-brcmstb.c
+++ b/drivers/pci/controller/pcie-brcmstb.c
@@ -12,16 +12,20 @@
#include <linux/iopoll.h>
#include <linux/ioport.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
+#include <linux/kdebug.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/log2.h>
#include <linux/module.h>
#include <linux/msi.h>
+#include <linux/notifier.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_pci.h>
#include <linux/of_platform.h>
+#include <linux/panic_notifier.h>
#include <linux/pci.h>
#include <linux/pci-ecam.h>
#include <linux/printk.h>
@@ -29,7 +33,9 @@
#include <linux/reset.h>
#include <linux/sizes.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <linux/string.h>
+#include <linux/string_choices.h>
#include <linux/types.h>
#include "../pci.h"
@@ -40,18 +46,28 @@
/* Broadcom STB PCIe Register Offsets */
#define PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1 0x0188
#define PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK 0xc
-#define PCIE_RC_CFG_VENDOR_SPCIFIC_REG1_LITTLE_ENDIAN 0x0
+#define PCIE_RC_CFG_VENDOR_SPECIFIC_REG1_LITTLE_ENDIAN 0x0
#define PCIE_RC_CFG_PRIV1_ID_VAL3 0x043c
#define PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK 0xffffff
#define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY 0x04dc
-#define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK 0xc00
+#define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_MAX_LINK_WIDTH_MASK 0x1f0
+
+#define PCIE_RC_CFG_PRIV1_ROOT_CAP 0x4f8
+#define PCIE_RC_CFG_PRIV1_ROOT_CAP_L1SS_MODE_MASK 0xf8
#define PCIE_RC_DL_MDIO_ADDR 0x1100
#define PCIE_RC_DL_MDIO_WR_DATA 0x1104
#define PCIE_RC_DL_MDIO_RD_DATA 0x1108
+#define PCIE_RC_PL_REG_PHY_CTL_1 0x1804
+#define PCIE_RC_PL_REG_PHY_CTL_1_REG_P2_POWERDOWN_ENA_NOSYNC_MASK 0x8
+
+#define PCIE_RC_PL_PHY_CTL_15 0x184c
+#define PCIE_RC_PL_PHY_CTL_15_DIS_PLL_PD_MASK 0x400000
+#define PCIE_RC_PL_PHY_CTL_15_PM_CLK_PERIOD_MASK 0xff
+
#define PCIE_MISC_MISC_CTRL 0x4008
#define PCIE_MISC_MISC_CTRL_PCIE_RCB_64B_MODE_MASK 0x80
#define PCIE_MISC_MISC_CTRL_PCIE_RCB_MPS_MODE_MASK 0x400
@@ -72,15 +88,19 @@
#define PCIE_MEM_WIN0_HI(win) \
PCIE_MISC_CPU_2_PCIE_MEM_WIN0_HI + ((win) * 8)
+/*
+ * NOTE: You may see the term "BAR" in a number of register names used by
+ * this driver. The term is an artifact of when the HW core was an
+ * endpoint device (EP). Now it is a root complex (RC) and anywhere a
+ * register has the term "BAR" it is related to an inbound window.
+ */
+
+#define PCIE_BRCM_MAX_INBOUND_WINS 16
#define PCIE_MISC_RC_BAR1_CONFIG_LO 0x402c
#define PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK 0x1f
-#define PCIE_MISC_RC_BAR2_CONFIG_LO 0x4034
-#define PCIE_MISC_RC_BAR2_CONFIG_LO_SIZE_MASK 0x1f
-#define PCIE_MISC_RC_BAR2_CONFIG_HI 0x4038
+#define PCIE_MISC_RC_BAR4_CONFIG_LO 0x40d4
-#define PCIE_MISC_RC_BAR3_CONFIG_LO 0x403c
-#define PCIE_MISC_RC_BAR3_CONFIG_LO_SIZE_MASK 0x1f
#define PCIE_MISC_MSI_BAR_CONFIG_LO 0x4044
#define PCIE_MISC_MSI_BAR_CONFIG_HI 0x4048
@@ -119,25 +139,60 @@
#define PCIE_MEM_WIN0_LIMIT_HI(win) \
PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI + ((win) * 8)
-#define PCIE_MISC_HARD_PCIE_HARD_DEBUG 0x4204
#define PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK 0x2
+#define PCIE_MISC_HARD_PCIE_HARD_DEBUG_L1SS_ENABLE_MASK 0x200000
#define PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK 0x08000000
#define PCIE_BMIPS_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK 0x00800000
+#define PCIE_CLKREQ_MASK \
+ (PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK | \
+ PCIE_MISC_HARD_PCIE_HARD_DEBUG_L1SS_ENABLE_MASK)
+#define PCIE_MISC_UBUS_BAR1_CONFIG_REMAP 0x40ac
+#define PCIE_MISC_UBUS_BAR1_CONFIG_REMAP_ACCESS_EN_MASK BIT(0)
+#define PCIE_MISC_UBUS_BAR4_CONFIG_REMAP 0x410c
-#define PCIE_INTR2_CPU_BASE 0x4300
#define PCIE_MSI_INTR2_BASE 0x4500
-/* Offsets from PCIE_INTR2_CPU_BASE and PCIE_MSI_INTR2_BASE */
+
+/* Offsets from INTR2_CPU and MSI_INTR2 BASE offsets */
#define MSI_INT_STATUS 0x0
#define MSI_INT_CLR 0x8
#define MSI_INT_MASK_SET 0x10
#define MSI_INT_MASK_CLR 0x14
-#define PCIE_EXT_CFG_DATA 0x8000
-#define PCIE_EXT_CFG_INDEX 0x9000
+/* Error report registers */
+#define PCIE_OUTB_ERR_TREAT 0x6000
+#define PCIE_OUTB_ERR_TREAT_CONFIG 0x1
+#define PCIE_OUTB_ERR_TREAT_MEM 0x2
+#define PCIE_OUTB_ERR_VALID 0x6004
+#define PCIE_OUTB_ERR_CLEAR 0x6008
+#define PCIE_OUTB_ERR_ACC_INFO 0x600c
+#define PCIE_OUTB_ERR_ACC_INFO_CFG_ERR BIT(0)
+#define PCIE_OUTB_ERR_ACC_INFO_MEM_ERR BIT(1)
+#define PCIE_OUTB_ERR_ACC_INFO_TYPE_64 BIT(2)
+#define PCIE_OUTB_ERR_ACC_INFO_DIR_WRITE BIT(4)
+#define PCIE_OUTB_ERR_ACC_INFO_BYTE_LANES 0xff00
+#define PCIE_OUTB_ERR_ACC_ADDR 0x6010
+#define PCIE_OUTB_ERR_ACC_ADDR_BUS 0xff00000
+#define PCIE_OUTB_ERR_ACC_ADDR_DEV 0xf8000
+#define PCIE_OUTB_ERR_ACC_ADDR_FUNC 0x7000
+#define PCIE_OUTB_ERR_ACC_ADDR_REG 0xfff
+#define PCIE_OUTB_ERR_CFG_CAUSE 0x6014
+#define PCIE_OUTB_ERR_CFG_CAUSE_TIMEOUT BIT(6)
+#define PCIE_OUTB_ERR_CFG_CAUSE_ABORT BIT(5)
+#define PCIE_OUTB_ERR_CFG_CAUSE_UNSUPP_REQ BIT(4)
+#define PCIE_OUTB_ERR_CFG_CAUSE_ACC_TIMEOUT BIT(2)
+#define PCIE_OUTB_ERR_CFG_CAUSE_ACC_DISABLED BIT(1)
+#define PCIE_OUTB_ERR_CFG_CAUSE_ACC_64BIT BIT(0)
+#define PCIE_OUTB_ERR_MEM_ADDR_LO 0x6018
+#define PCIE_OUTB_ERR_MEM_ADDR_HI 0x601c
+#define PCIE_OUTB_ERR_MEM_CAUSE 0x6020
+#define PCIE_OUTB_ERR_MEM_CAUSE_TIMEOUT BIT(6)
+#define PCIE_OUTB_ERR_MEM_CAUSE_ABORT BIT(5)
+#define PCIE_OUTB_ERR_MEM_CAUSE_UNSUPP_REQ BIT(4)
+#define PCIE_OUTB_ERR_MEM_CAUSE_ACC_DISABLED BIT(1)
+#define PCIE_OUTB_ERR_MEM_CAUSE_BAD_ADDR BIT(0)
#define PCIE_RGR1_SW_INIT_1_PERST_MASK 0x1
-#define PCIE_RGR1_SW_INIT_1_PERST_SHIFT 0x0
#define RGR1_SW_INIT_1_INIT_GENERIC_MASK 0x2
#define RGR1_SW_INIT_1_INIT_GENERIC_SHIFT 0x1
@@ -161,8 +216,9 @@
#define MDIO_PORT0 0x0
#define MDIO_DATA_MASK 0x7fffffff
#define MDIO_PORT_MASK 0xf0000
+#define MDIO_PORT_EXT_MASK 0x200000
#define MDIO_REGAD_MASK 0xffff
-#define MDIO_CMD_MASK 0xfff00000
+#define MDIO_CMD_MASK 0x00100000
#define MDIO_CMD_READ 0x1
#define MDIO_CMD_WRITE 0x0
#define MDIO_DATA_DONE_MASK 0x80000000
@@ -178,9 +234,11 @@
#define SSC_STATUS_PLL_LOCK_MASK 0x800
#define PCIE_BRCM_MAX_MEMC 3
-#define IDX_ADDR(pcie) (pcie->reg_offsets[EXT_CFG_INDEX])
-#define DATA_ADDR(pcie) (pcie->reg_offsets[EXT_CFG_DATA])
-#define PCIE_RGR1_SW_INIT_1(pcie) (pcie->reg_offsets[RGR1_SW_INIT_1])
+#define IDX_ADDR(pcie) ((pcie)->cfg->offsets[EXT_CFG_INDEX])
+#define DATA_ADDR(pcie) ((pcie)->cfg->offsets[EXT_CFG_DATA])
+#define PCIE_RGR1_SW_INIT_1(pcie) ((pcie)->cfg->offsets[RGR1_SW_INIT_1])
+#define HARD_DEBUG(pcie) ((pcie)->cfg->offsets[PCIE_HARD_DEBUG])
+#define INTR2_CPU_BASE(pcie) ((pcie)->cfg->offsets[PCIE_INTR2_CPU_BASE])
/* Rescal registers */
#define PCIE_DVT_PMU_PCIE_PHY_CTRL 0xc700
@@ -199,27 +257,45 @@ enum {
RGR1_SW_INIT_1,
EXT_CFG_INDEX,
EXT_CFG_DATA,
+ PCIE_HARD_DEBUG,
+ PCIE_INTR2_CPU_BASE,
};
-enum {
- RGR1_SW_INIT_1_INIT_MASK,
- RGR1_SW_INIT_1_INIT_SHIFT,
-};
-
-enum pcie_type {
+enum pcie_soc_base {
GENERIC,
- BCM7425,
- BCM7435,
+ BCM2711,
BCM4908,
BCM7278,
- BCM2711,
+ BCM7425,
+ BCM7435,
+ BCM7712,
+};
+
+struct inbound_win {
+ u64 size;
+ u64 pci_offset;
+ u64 cpu_addr;
};
+/*
+ * The RESCAL block is tied to PCIe controller #1, regardless of the number of
+ * controllers, and turning off PCIe controller #1 prevents access to the RESCAL
+ * register blocks, therefore no other controller can access this register
+ * space, and depending upon the bus fabric we may get a timeout (UBUS/GISB),
+ * or a hang (AXI).
+ */
+#define CFG_QUIRK_AVOID_BRIDGE_SHUTDOWN BIT(0)
+
struct pcie_cfg_data {
const int *offsets;
- const enum pcie_type type;
- void (*perst_set)(struct brcm_pcie *pcie, u32 val);
- void (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
+ const enum pcie_soc_base soc_base;
+ const bool has_phy;
+ const u32 quirks;
+ u8 num_inbound_wins;
+ int (*perst_set)(struct brcm_pcie *pcie, u32 val);
+ int (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
+ int (*post_setup)(struct brcm_pcie *pcie);
+ bool has_err_report;
};
struct subdev_regulators {
@@ -231,7 +307,6 @@ struct brcm_msi {
struct device *dev;
void __iomem *base;
struct device_node *np;
- struct irq_domain *msi_domain;
struct irq_domain *inner_domain;
struct mutex lock; /* guards the alloc/free operations */
u64 target_addr;
@@ -255,22 +330,43 @@ struct brcm_pcie {
int gen;
u64 msi_target_addr;
struct brcm_msi *msi;
- const int *reg_offsets;
- enum pcie_type type;
struct reset_control *rescal;
struct reset_control *perst_reset;
+ struct reset_control *bridge_reset;
+ struct reset_control *swinit_reset;
int num_memc;
u64 memc_size[PCIE_BRCM_MAX_MEMC];
u32 hw_rev;
- void (*perst_set)(struct brcm_pcie *pcie, u32 val);
- void (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
struct subdev_regulators *sr;
bool ep_wakeup_capable;
+ const struct pcie_cfg_data *cfg;
+ bool bridge_in_reset;
+ struct notifier_block die_notifier;
+ struct notifier_block panic_notifier;
+ spinlock_t bridge_lock;
};
static inline bool is_bmips(const struct brcm_pcie *pcie)
{
- return pcie->type == BCM7435 || pcie->type == BCM7425;
+ return pcie->cfg->soc_base == BCM7435 || pcie->cfg->soc_base == BCM7425;
+}
+
+static int brcm_pcie_bridge_sw_init_set(struct brcm_pcie *pcie, u32 val)
+{
+ unsigned long flags;
+ int ret;
+
+ if (pcie->cfg->has_err_report)
+ spin_lock_irqsave(&pcie->bridge_lock, flags);
+
+ ret = pcie->cfg->bridge_sw_init_set(pcie, val);
+ /* If we fail, assume the bridge is in reset (off) */
+ pcie->bridge_in_reset = ret ? true : val;
+
+ if (pcie->cfg->has_err_report)
+ spin_unlock_irqrestore(&pcie->bridge_lock, flags);
+
+ return ret;
}
/*
@@ -284,8 +380,8 @@ static int brcm_pcie_encode_ibar_size(u64 size)
if (log2_in >= 12 && log2_in <= 15)
/* Covers 4KB to 32KB (inclusive) */
return (log2_in - 12) + 0x1c;
- else if (log2_in >= 16 && log2_in <= 35)
- /* Covers 64KB to 32GB, (inclusive) */
+ else if (log2_in >= 16 && log2_in <= 36)
+ /* Covers 64KB to 64GB, (inclusive) */
return log2_in - 15;
/* Something is awry so disable */
return 0;
@@ -295,6 +391,7 @@ static u32 brcm_pcie_mdio_form_pkt(int port, int regad, int cmd)
{
u32 pkt = 0;
+ pkt |= FIELD_PREP(MDIO_PORT_EXT_MASK, port >> 4);
pkt |= FIELD_PREP(MDIO_PORT_MASK, port);
pkt |= FIELD_PREP(MDIO_REGAD_MASK, regad);
pkt |= FIELD_PREP(MDIO_CMD_MASK, cmd);
@@ -330,7 +427,7 @@ static int brcm_pcie_mdio_write(void __iomem *base, u8 port,
readl(base + PCIE_RC_DL_MDIO_ADDR);
writel(MDIO_DATA_DONE_MASK | wrdata, base + PCIE_RC_DL_MDIO_WR_DATA);
- err = readw_poll_timeout_atomic(base + PCIE_RC_DL_MDIO_WR_DATA, data,
+ err = readl_poll_timeout_atomic(base + PCIE_RC_DL_MDIO_WR_DATA, data,
MDIO_WT_DONE(data), 10, 100);
return err;
}
@@ -378,17 +475,17 @@ static int brcm_pcie_set_ssc(struct brcm_pcie *pcie)
static void brcm_pcie_set_gen(struct brcm_pcie *pcie, int gen)
{
u16 lnkctl2 = readw(pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCTL2);
- u32 lnkcap = readl(pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCAP);
+ u32 lnkcap = readl(pcie->base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
- lnkcap = (lnkcap & ~PCI_EXP_LNKCAP_SLS) | gen;
- writel(lnkcap, pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCAP);
+ u32p_replace_bits(&lnkcap, gen, PCI_EXP_LNKCAP_SLS);
+ writel(lnkcap, pcie->base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
- lnkctl2 = (lnkctl2 & ~0xf) | gen;
+ u16p_replace_bits(&lnkctl2, gen, PCI_EXP_LNKCTL2_TLS);
writew(lnkctl2, pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCTL2);
}
static void brcm_pcie_set_outbound_win(struct brcm_pcie *pcie,
- unsigned int win, u64 cpu_addr,
+ u8 win, u64 cpu_addr,
u64 pcie_addr, u64 size)
{
u32 cpu_addr_mb_high, limit_addr_mb_high;
@@ -431,18 +528,20 @@ static void brcm_pcie_set_outbound_win(struct brcm_pcie *pcie,
writel(tmp, pcie->base + PCIE_MEM_WIN0_LIMIT_HI(win));
}
-static struct irq_chip brcm_msi_irq_chip = {
- .name = "BRCM STB PCIe MSI",
- .irq_ack = irq_chip_ack_parent,
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
-};
+#define BRCM_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_NO_AFFINITY)
+
+#define BRCM_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_MULTI_PCI_MSI)
-static struct msi_domain_info brcm_msi_domain_info = {
- /* Multi MSI is supported by the controller, but not by this driver */
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_MULTI_PCI_MSI),
- .chip = &brcm_msi_irq_chip,
+static const struct msi_parent_ops brcm_msi_parent_ops = {
+ .required_flags = BRCM_MSI_FLAGS_REQUIRED,
+ .supported_flags = BRCM_MSI_FLAGS_SUPPORTED,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .chip_flags = MSI_CHIP_FLAG_SET_ACK,
+ .prefix = "BRCM-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
};
static void brcm_pcie_msi_isr(struct irq_desc *desc)
@@ -479,12 +578,6 @@ static void brcm_msi_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
msg->data = (0xffff & PCIE_MISC_MSI_DATA_CONFIG_VAL_32) | data->hwirq;
}
-static int brcm_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static void brcm_msi_ack_irq(struct irq_data *data)
{
struct brcm_msi *msi = irq_data_get_irq_chip_data(data);
@@ -497,7 +590,6 @@ static void brcm_msi_ack_irq(struct irq_data *data)
static struct irq_chip brcm_msi_bottom_irq_chip = {
.name = "BRCM STB MSI",
.irq_compose_msi_msg = brcm_msi_compose_msi_msg,
- .irq_set_affinity = brcm_msi_set_affinity,
.irq_ack = brcm_msi_ack_irq,
};
@@ -533,7 +625,7 @@ static int brcm_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
return hwirq;
for (i = 0; i < nr_irqs; i++)
- irq_domain_set_info(domain, virq + i, hwirq + i,
+ irq_domain_set_info(domain, virq + i, (irq_hw_number_t)hwirq + i,
&brcm_msi_bottom_irq_chip, domain->host_data,
handle_edge_irq, NULL, NULL);
return 0;
@@ -555,21 +647,18 @@ static const struct irq_domain_ops msi_domain_ops = {
static int brcm_allocate_domains(struct brcm_msi *msi)
{
- struct fwnode_handle *fwnode = of_node_to_fwnode(msi->np);
struct device *dev = msi->dev;
- msi->inner_domain = irq_domain_add_linear(NULL, msi->nr, &msi_domain_ops, msi);
- if (!msi->inner_domain) {
- dev_err(dev, "failed to create IRQ domain\n");
- return -ENOMEM;
- }
+ struct irq_domain_info info = {
+ .fwnode = of_fwnode_handle(msi->np),
+ .ops = &msi_domain_ops,
+ .host_data = msi,
+ .size = msi->nr,
+ };
- msi->msi_domain = pci_msi_create_irq_domain(fwnode,
- &brcm_msi_domain_info,
- msi->inner_domain);
- if (!msi->msi_domain) {
+ msi->inner_domain = msi_create_parent_irq_domain(&info, &brcm_msi_parent_ops);
+ if (!msi->inner_domain) {
dev_err(dev, "failed to create MSI domain\n");
- irq_domain_remove(msi->inner_domain);
return -ENOMEM;
}
@@ -578,7 +667,6 @@ static int brcm_allocate_domains(struct brcm_msi *msi)
static void brcm_free_domains(struct brcm_msi *msi)
{
- irq_domain_remove(msi->msi_domain);
irq_domain_remove(msi->inner_domain);
}
@@ -644,7 +732,7 @@ static int brcm_pcie_enable_msi(struct brcm_pcie *pcie)
BUILD_BUG_ON(BRCM_INT_PCI_MSI_LEGACY_NR > BRCM_INT_PCI_MSI_NR);
if (msi->legacy) {
- msi->intr_base = msi->base + PCIE_INTR2_CPU_BASE;
+ msi->intr_base = msi->base + INTR2_CPU_BASE(pcie);
msi->nr = BRCM_INT_PCI_MSI_LEGACY_NR;
msi->legacy_shift = 24;
} else {
@@ -700,8 +788,8 @@ static void __iomem *brcm_pcie_map_bus(struct pci_bus *bus,
/* For devices, write to the config space index register */
idx = PCIE_ECAM_OFFSET(bus->number, devfn, 0);
- writel(idx, pcie->base + PCIE_EXT_CFG_INDEX);
- return base + PCIE_EXT_CFG_DATA + PCIE_ECAM_REG(where);
+ writel(idx, base + IDX_ADDR(pcie));
+ return base + DATA_ADDR(pcie) + PCIE_ECAM_REG(where);
}
static void __iomem *brcm7425_pcie_map_bus(struct pci_bus *bus,
@@ -725,17 +813,33 @@ static void __iomem *brcm7425_pcie_map_bus(struct pci_bus *bus,
return base + DATA_ADDR(pcie);
}
-static void brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val)
+static int brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val)
{
- u32 tmp, mask = RGR1_SW_INIT_1_INIT_GENERIC_MASK;
+ u32 tmp, mask = RGR1_SW_INIT_1_INIT_GENERIC_MASK;
u32 shift = RGR1_SW_INIT_1_INIT_GENERIC_SHIFT;
+ int ret = 0;
+
+ if (pcie->bridge_reset) {
+ if (val)
+ ret = reset_control_assert(pcie->bridge_reset);
+ else
+ ret = reset_control_deassert(pcie->bridge_reset);
+
+ if (ret)
+ dev_err(pcie->dev, "failed to %s 'bridge' reset, err=%d\n",
+ val ? "assert" : "deassert", ret);
+
+ return ret;
+ }
tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
tmp = (tmp & ~mask) | ((val << shift) & mask);
writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
+
+ return ret;
}
-static void brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 val)
+static int brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 val)
{
u32 tmp, mask = RGR1_SW_INIT_1_INIT_7278_MASK;
u32 shift = RGR1_SW_INIT_1_INIT_7278_SHIFT;
@@ -743,20 +847,29 @@ static void brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 val)
tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
tmp = (tmp & ~mask) | ((val << shift) & mask);
writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
+
+ return 0;
}
-static void brcm_pcie_perst_set_4908(struct brcm_pcie *pcie, u32 val)
+static int brcm_pcie_perst_set_4908(struct brcm_pcie *pcie, u32 val)
{
+ int ret;
+
if (WARN_ONCE(!pcie->perst_reset, "missing PERST# reset controller\n"))
- return;
+ return -EINVAL;
if (val)
- reset_control_assert(pcie->perst_reset);
+ ret = reset_control_assert(pcie->perst_reset);
else
- reset_control_deassert(pcie->perst_reset);
+ ret = reset_control_deassert(pcie->perst_reset);
+
+ if (ret)
+ dev_err(pcie->dev, "failed to %s 'perst' reset, err=%d\n",
+ val ? "assert" : "deassert", ret);
+ return ret;
}
-static void brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val)
+static int brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val)
{
u32 tmp;
@@ -764,34 +877,110 @@ static void brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val)
tmp = readl(pcie->base + PCIE_MISC_PCIE_CTRL);
u32p_replace_bits(&tmp, !val, PCIE_MISC_PCIE_CTRL_PCIE_PERSTB_MASK);
writel(tmp, pcie->base + PCIE_MISC_PCIE_CTRL);
+
+ return 0;
}
-static void brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val)
+static int brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val)
{
u32 tmp;
tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
u32p_replace_bits(&tmp, val, PCIE_RGR1_SW_INIT_1_PERST_MASK);
writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
+
+ return 0;
+}
+
+static int brcm_pcie_post_setup_bcm2712(struct brcm_pcie *pcie)
+{
+ static const u16 data[] = { 0x50b9, 0xbda1, 0x0094, 0x97b4, 0x5030,
+ 0x5030, 0x0007 };
+ static const u8 regs[] = { 0x16, 0x17, 0x18, 0x19, 0x1b, 0x1c, 0x1e };
+ int ret, i;
+ u32 tmp;
+
+ /* Allow a 54MHz (xosc) refclk source */
+ ret = brcm_pcie_mdio_write(pcie->base, MDIO_PORT0, SET_ADDR_OFFSET, 0x1600);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(regs); i++) {
+ ret = brcm_pcie_mdio_write(pcie->base, MDIO_PORT0, regs[i], data[i]);
+ if (ret < 0)
+ return ret;
+ }
+
+ usleep_range(100, 200);
+
+ /*
+ * Set L1SS sub-state timers to avoid lengthy state transitions,
+ * PM clock period is 18.52ns (1/54MHz, round down).
+ */
+ tmp = readl(pcie->base + PCIE_RC_PL_PHY_CTL_15);
+ tmp &= ~PCIE_RC_PL_PHY_CTL_15_PM_CLK_PERIOD_MASK;
+ tmp |= 0x12;
+ writel(tmp, pcie->base + PCIE_RC_PL_PHY_CTL_15);
+
+ return 0;
+}
+
+static void add_inbound_win(struct inbound_win *b, u8 *count, u64 size,
+ u64 cpu_addr, u64 pci_offset)
+{
+ b->size = size;
+ b->cpu_addr = cpu_addr;
+ b->pci_offset = pci_offset;
+ (*count)++;
}
-static int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
- u64 *rc_bar2_size,
- u64 *rc_bar2_offset)
+static int brcm_pcie_get_inbound_wins(struct brcm_pcie *pcie,
+ struct inbound_win inbound_wins[])
{
struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
+ u64 pci_offset, cpu_addr, size = 0, tot_size = 0;
struct resource_entry *entry;
struct device *dev = pcie->dev;
u64 lowest_pcie_addr = ~(u64)0;
int ret, i = 0;
- u64 size = 0;
+ u8 n = 0;
+
+ /*
+ * The HW registers (and PCIe) use order-1 numbering for BARs. As such,
+ * we have inbound_wins[0] unused and BAR1 starts at inbound_wins[1].
+ */
+ struct inbound_win *b_begin = &inbound_wins[1];
+ struct inbound_win *b = b_begin;
+
+ /*
+ * STB chips beside 7712 disable the first inbound window default.
+ * Rather being mapped to system memory it is mapped to the
+ * internal registers of the SoC. This feature is deprecated, has
+ * security considerations, and is not implemented in our modern
+ * SoCs.
+ */
+ if (pcie->cfg->soc_base != BCM7712)
+ add_inbound_win(b++, &n, 0, 0, 0);
resource_list_for_each_entry(entry, &bridge->dma_ranges) {
- u64 pcie_beg = entry->res->start - entry->offset;
+ u64 pcie_start = entry->res->start - entry->offset;
+ u64 cpu_start = entry->res->start;
- size += entry->res->end - entry->res->start + 1;
- if (pcie_beg < lowest_pcie_addr)
- lowest_pcie_addr = pcie_beg;
+ size = resource_size(entry->res);
+ tot_size += size;
+ if (pcie_start < lowest_pcie_addr)
+ lowest_pcie_addr = pcie_start;
+ /*
+ * 7712 and newer chips may have many BARs, with each
+ * offering a non-overlapping viewport to system memory.
+ * That being said, each BARs size must still be a power of
+ * two.
+ */
+ if (pcie->cfg->soc_base == BCM7712)
+ add_inbound_win(b++, &n, size, cpu_start, pcie_start);
+
+ if (n > pcie->cfg->num_inbound_wins)
+ break;
}
if (lowest_pcie_addr == ~(u64)0) {
@@ -799,13 +988,20 @@ static int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
return -EINVAL;
}
+ /*
+ * 7712 and newer chips do not have an internal memory mapping system
+ * that enables multiple memory controllers. As such, it can return
+ * now w/o doing special configuration.
+ */
+ if (pcie->cfg->soc_base == BCM7712)
+ return n;
+
ret = of_property_read_variable_u64_array(pcie->np, "brcm,scb-sizes", pcie->memc_size, 1,
PCIE_BRCM_MAX_MEMC);
-
if (ret <= 0) {
/* Make an educated guess */
pcie->num_memc = 1;
- pcie->memc_size[0] = 1ULL << fls64(size - 1);
+ pcie->memc_size[0] = 1ULL << fls64(tot_size - 1);
} else {
pcie->num_memc = ret;
}
@@ -814,10 +1010,15 @@ static int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
for (i = 0, size = 0; i < pcie->num_memc; i++)
size += pcie->memc_size[i];
- /* System memory starts at this address in PCIe-space */
- *rc_bar2_offset = lowest_pcie_addr;
- /* The sum of all memc views must also be a power of 2 */
- *rc_bar2_size = 1ULL << fls64(size - 1);
+ /* Our HW mandates that the window size must be a power of 2 */
+ size = 1ULL << fls64(size - 1);
+
+ /*
+ * For STB chips, the BAR2 cpu_addr is hardwired to the start
+ * of system memory, so we set it to 0.
+ */
+ cpu_addr = 0;
+ pci_offset = lowest_pcie_addr;
/*
* We validate the inbound memory view even though we should trust
@@ -831,7 +1032,7 @@ static int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
*
* The PCIe host controller by design must set the inbound viewport to
* be a contiguous arrangement of all of the system's memory. In
- * addition, its size mut be a power of two. To further complicate
+ * addition, its size must be a power of two. To further complicate
* matters, the viewport must start on a pcie-address that is aligned
* on a multiple of its size. If a portion of the viewport does not
* represent system memory -- e.g. 3GB of memory requires a 4GB
@@ -852,39 +1053,119 @@ static int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
* outbound memory @ 3GB). So instead it will start at the 1x
* multiple of its size
*/
- if (!*rc_bar2_size || (*rc_bar2_offset & (*rc_bar2_size - 1)) ||
- (*rc_bar2_offset < SZ_4G && *rc_bar2_offset > SZ_2G)) {
- dev_err(dev, "Invalid rc_bar2_offset/size: size 0x%llx, off 0x%llx\n",
- *rc_bar2_size, *rc_bar2_offset);
+ if (!size || (pci_offset & (size - 1)) ||
+ (pci_offset < SZ_4G && pci_offset > SZ_2G)) {
+ dev_err(dev, "Invalid inbound_win2_offset/size: size 0x%llx, off 0x%llx\n",
+ size, pci_offset);
return -EINVAL;
}
- return 0;
+ /* Enable inbound window 2, the main inbound window for STB chips */
+ add_inbound_win(b++, &n, size, cpu_addr, pci_offset);
+
+ /*
+ * Disable inbound window 3. On some chips presents the same
+ * window as #2 but the data appears in a settable endianness.
+ */
+ add_inbound_win(b++, &n, 0, 0, 0);
+
+ return n;
+}
+
+static u32 brcm_bar_reg_offset(int bar)
+{
+ if (bar <= 3)
+ return PCIE_MISC_RC_BAR1_CONFIG_LO + 8 * (bar - 1);
+ else
+ return PCIE_MISC_RC_BAR4_CONFIG_LO + 8 * (bar - 4);
+}
+
+static u32 brcm_ubus_reg_offset(int bar)
+{
+ if (bar <= 3)
+ return PCIE_MISC_UBUS_BAR1_CONFIG_REMAP + 8 * (bar - 1);
+ else
+ return PCIE_MISC_UBUS_BAR4_CONFIG_REMAP + 8 * (bar - 4);
+}
+
+static void set_inbound_win_registers(struct brcm_pcie *pcie,
+ const struct inbound_win *inbound_wins,
+ u8 num_inbound_wins)
+{
+ void __iomem *base = pcie->base;
+ int i;
+
+ for (i = 1; i <= num_inbound_wins; i++) {
+ u64 pci_offset = inbound_wins[i].pci_offset;
+ u64 cpu_addr = inbound_wins[i].cpu_addr;
+ u64 size = inbound_wins[i].size;
+ u32 reg_offset = brcm_bar_reg_offset(i);
+ u32 tmp = lower_32_bits(pci_offset);
+
+ u32p_replace_bits(&tmp, brcm_pcie_encode_ibar_size(size),
+ PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK);
+
+ /* Write low */
+ writel_relaxed(tmp, base + reg_offset);
+ /* Write high */
+ writel_relaxed(upper_32_bits(pci_offset), base + reg_offset + 4);
+
+ /*
+ * Most STB chips:
+ * Do nothing.
+ * 7712:
+ * All of their BARs need to be set.
+ */
+ if (pcie->cfg->soc_base == BCM7712) {
+ /* BUS remap register settings */
+ reg_offset = brcm_ubus_reg_offset(i);
+ tmp = lower_32_bits(cpu_addr) & ~0xfff;
+ tmp |= PCIE_MISC_UBUS_BAR1_CONFIG_REMAP_ACCESS_EN_MASK;
+ writel_relaxed(tmp, base + reg_offset);
+ tmp = upper_32_bits(cpu_addr);
+ writel_relaxed(tmp, base + reg_offset + 4);
+ }
+ }
}
static int brcm_pcie_setup(struct brcm_pcie *pcie)
{
- u64 rc_bar2_offset, rc_bar2_size;
+ struct inbound_win inbound_wins[PCIE_BRCM_MAX_INBOUND_WINS];
void __iomem *base = pcie->base;
struct pci_host_bridge *bridge;
struct resource_entry *entry;
- u32 tmp, burst, aspm_support;
- int num_out_wins = 0;
- int ret, memc;
+ u32 tmp, burst, num_lanes, num_lanes_cap;
+ u8 num_out_wins = 0;
+ int num_inbound_wins = 0;
+ int memc, ret;
/* Reset the bridge */
- pcie->bridge_sw_init_set(pcie, 1);
+ ret = brcm_pcie_bridge_sw_init_set(pcie, 1);
+ if (ret)
+ return ret;
+
+ /* Ensure that PERST# is asserted; some bootloaders may deassert it. */
+ if (pcie->cfg->soc_base == BCM2711) {
+ ret = pcie->cfg->perst_set(pcie, 1);
+ if (ret) {
+ pcie->cfg->bridge_sw_init_set(pcie, 0);
+ return ret;
+ }
+ }
+
usleep_range(100, 200);
/* Take the bridge out of reset */
- pcie->bridge_sw_init_set(pcie, 0);
+ ret = brcm_pcie_bridge_sw_init_set(pcie, 0);
+ if (ret)
+ return ret;
- tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ tmp = readl(base + HARD_DEBUG(pcie));
if (is_bmips(pcie))
tmp &= ~PCIE_BMIPS_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK;
else
tmp &= ~PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK;
- writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ writel(tmp, base + HARD_DEBUG(pcie));
/* Wait for SerDes to be stable */
usleep_range(100, 200);
@@ -895,9 +1176,9 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
*/
if (is_bmips(pcie))
burst = 0x1; /* 256 bytes */
- else if (pcie->type == BCM2711)
+ else if (pcie->cfg->soc_base == BCM2711)
burst = 0x0; /* 128 bytes */
- else if (pcie->type == BCM7278)
+ else if (pcie->cfg->soc_base == BCM7278)
burst = 0x3; /* 512 bytes */
else
burst = 0x2; /* 512 bytes */
@@ -914,17 +1195,16 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_PCIE_RCB_64B_MODE_MASK);
writel(tmp, base + PCIE_MISC_MISC_CTRL);
- ret = brcm_pcie_get_rc_bar2_size_and_offset(pcie, &rc_bar2_size,
- &rc_bar2_offset);
- if (ret)
- return ret;
+ num_inbound_wins = brcm_pcie_get_inbound_wins(pcie, inbound_wins);
+ if (num_inbound_wins < 0)
+ return num_inbound_wins;
+
+ set_inbound_win_registers(pcie, inbound_wins, num_inbound_wins);
- tmp = lower_32_bits(rc_bar2_offset);
- u32p_replace_bits(&tmp, brcm_pcie_encode_ibar_size(rc_bar2_size),
- PCIE_MISC_RC_BAR2_CONFIG_LO_SIZE_MASK);
- writel(tmp, base + PCIE_MISC_RC_BAR2_CONFIG_LO);
- writel(upper_32_bits(rc_bar2_offset),
- base + PCIE_MISC_RC_BAR2_CONFIG_HI);
+ if (!brcm_pcie_rc_mode(pcie)) {
+ dev_err(pcie->dev, "PCIe RC controller misconfigured as Endpoint\n");
+ return -EINVAL;
+ }
tmp = readl(base + PCIE_MISC_MISC_CTRL);
for (memc = 0; memc < pcie->num_memc; memc++) {
@@ -946,35 +1226,40 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
* 4GB or when the inbound area is smaller than 4GB (taking into
* account the rounding-up we're forced to perform).
*/
- if (rc_bar2_offset >= SZ_4G || (rc_bar2_size + rc_bar2_offset) < SZ_4G)
+ if (inbound_wins[2].pci_offset >= SZ_4G ||
+ (inbound_wins[2].size + inbound_wins[2].pci_offset) < SZ_4G)
pcie->msi_target_addr = BRCM_MSI_TARGET_ADDR_LT_4GB;
else
pcie->msi_target_addr = BRCM_MSI_TARGET_ADDR_GT_4GB;
- if (!brcm_pcie_rc_mode(pcie)) {
- dev_err(pcie->dev, "PCIe RC controller misconfigured as Endpoint\n");
- return -EINVAL;
- }
-
- /* disable the PCIe->GISB memory window (RC_BAR1) */
- tmp = readl(base + PCIE_MISC_RC_BAR1_CONFIG_LO);
- tmp &= ~PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK;
- writel(tmp, base + PCIE_MISC_RC_BAR1_CONFIG_LO);
-
- /* disable the PCIe->SCB memory window (RC_BAR3) */
- tmp = readl(base + PCIE_MISC_RC_BAR3_CONFIG_LO);
- tmp &= ~PCIE_MISC_RC_BAR3_CONFIG_LO_SIZE_MASK;
- writel(tmp, base + PCIE_MISC_RC_BAR3_CONFIG_LO);
/* Don't advertise L0s capability if 'aspm-no-l0s' */
- aspm_support = PCIE_LINK_STATE_L1;
- if (!of_property_read_bool(pcie->np, "aspm-no-l0s"))
- aspm_support |= PCIE_LINK_STATE_L0S;
tmp = readl(base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
- u32p_replace_bits(&tmp, aspm_support,
- PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK);
+ if (of_property_read_bool(pcie->np, "aspm-no-l0s"))
+ tmp &= ~PCI_EXP_LNKCAP_ASPM_L0S;
writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
+ /* 'tmp' still holds the contents of PRIV1_LINK_CAPABILITY */
+ num_lanes_cap = u32_get_bits(tmp, PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_MAX_LINK_WIDTH_MASK);
+ num_lanes = 0;
+
+ /*
+ * Use hardware negotiated Max Link Width value by default. If the
+ * "num-lanes" DT property is present, assume that the chip's default
+ * link width capability information is incorrect/undesired and use the
+ * specified value instead.
+ */
+ if (!of_property_read_u32(pcie->np, "num-lanes", &num_lanes) &&
+ num_lanes && num_lanes <= 4 && num_lanes_cap != num_lanes) {
+ u32p_replace_bits(&tmp, num_lanes,
+ PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_MAX_LINK_WIDTH_MASK);
+ writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
+ tmp = readl(base + PCIE_RC_PL_REG_PHY_CTL_1);
+ u32p_replace_bits(&tmp, 1,
+ PCIE_RC_PL_REG_PHY_CTL_1_REG_P2_POWERDOWN_ENA_NOSYNC_MASK);
+ writel(tmp, base + PCIE_RC_PL_REG_PHY_CTL_1);
+ }
+
/*
* For config space accesses on the RC, show the right class for
* a PCIe-PCIe bridge (the default setting is to be EP mode).
@@ -1015,32 +1300,120 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
num_out_wins++;
}
- /* PCIe->SCB endian mode for BAR */
+ /* PCIe->SCB endian mode for inbound window */
tmp = readl(base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
- u32p_replace_bits(&tmp, PCIE_RC_CFG_VENDOR_SPCIFIC_REG1_LITTLE_ENDIAN,
+ u32p_replace_bits(&tmp, PCIE_RC_CFG_VENDOR_SPECIFIC_REG1_LITTLE_ENDIAN,
PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK);
writel(tmp, base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
+ if (pcie->cfg->post_setup) {
+ ret = pcie->cfg->post_setup(pcie);
+ if (ret < 0)
+ return ret;
+ }
+
return 0;
}
+/*
+ * This extends the timeout period for an access to an internal bus. This
+ * access timeout may occur during L1SS sleep periods, even without the
+ * presence of a PCIe access.
+ */
+static void brcm_extend_rbus_timeout(struct brcm_pcie *pcie)
+{
+ /* TIMEOUT register is two registers before RGR1_SW_INIT_1 */
+ const unsigned int REG_OFFSET = PCIE_RGR1_SW_INIT_1(pcie) - 8;
+ u32 timeout_us = 4000000; /* 4 seconds, our setting for L1SS */
+
+ /* 7712 does not have this (RGR1) timer */
+ if (pcie->cfg->soc_base == BCM7712)
+ return;
+
+ /* Each unit in timeout register is 1/216,000,000 seconds */
+ writel(216 * timeout_us, pcie->base + REG_OFFSET);
+}
+
+static void brcm_config_clkreq(struct brcm_pcie *pcie)
+{
+ static const char err_msg[] = "invalid 'brcm,clkreq-mode' DT string\n";
+ const char *mode = "default";
+ u32 clkreq_cntl;
+ int ret, tmp;
+
+ ret = of_property_read_string(pcie->np, "brcm,clkreq-mode", &mode);
+ if (ret && ret != -EINVAL) {
+ dev_err(pcie->dev, err_msg);
+ mode = "safe";
+ }
+
+ /* Start out assuming safe mode (both mode bits cleared) */
+ clkreq_cntl = readl(pcie->base + HARD_DEBUG(pcie));
+ clkreq_cntl &= ~PCIE_CLKREQ_MASK;
+
+ if (strcmp(mode, "no-l1ss") == 0) {
+ /*
+ * "no-l1ss" -- Provides Clock Power Management, L0s, and
+ * L1, but cannot provide L1 substate (L1SS) power
+ * savings. If the downstream device connected to the RC is
+ * L1SS capable AND the OS enables L1SS, all PCIe traffic
+ * may abruptly halt, potentially hanging the system.
+ */
+ clkreq_cntl |= PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK;
+ /*
+ * We want to un-advertise L1 substates because if the OS
+ * tries to configure the controller into using L1 substate
+ * power savings it may fail or hang when the RC HW is in
+ * "no-l1ss" mode.
+ */
+ tmp = readl(pcie->base + PCIE_RC_CFG_PRIV1_ROOT_CAP);
+ u32p_replace_bits(&tmp, 2, PCIE_RC_CFG_PRIV1_ROOT_CAP_L1SS_MODE_MASK);
+ writel(tmp, pcie->base + PCIE_RC_CFG_PRIV1_ROOT_CAP);
+
+ } else if (strcmp(mode, "default") == 0) {
+ /*
+ * "default" -- Provides L0s, L1, and L1SS, but not
+ * compliant to provide Clock Power Management;
+ * specifically, may not be able to meet the Tclron max
+ * timing of 400ns as specified in "Dynamic Clock Control",
+ * section 3.2.5.2.2 of the PCIe spec. This situation is
+ * atypical and should happen only with older devices.
+ */
+ clkreq_cntl |= PCIE_MISC_HARD_PCIE_HARD_DEBUG_L1SS_ENABLE_MASK;
+ brcm_extend_rbus_timeout(pcie);
+
+ } else {
+ /*
+ * "safe" -- No power savings; refclk is driven by RC
+ * unconditionally.
+ */
+ if (strcmp(mode, "safe") != 0)
+ dev_err(pcie->dev, err_msg);
+ mode = "safe";
+ }
+ writel(clkreq_cntl, pcie->base + HARD_DEBUG(pcie));
+
+ dev_info(pcie->dev, "clkreq-mode set to %s\n", mode);
+}
+
static int brcm_pcie_start_link(struct brcm_pcie *pcie)
{
struct device *dev = pcie->dev;
void __iomem *base = pcie->base;
u16 nlw, cls, lnksta;
bool ssc_good = false;
- u32 tmp;
int ret, i;
+ /* Limit the generation if specified */
+ if (pcie->gen)
+ brcm_pcie_set_gen(pcie, pcie->gen);
+
/* Unassert the fundamental reset */
- pcie->perst_set(pcie, 0);
+ ret = pcie->cfg->perst_set(pcie, 0);
+ if (ret)
+ return ret;
- /*
- * Wait for 100ms after PERST# deassertion; see PCIe CEM specification
- * sections 2.2, PCIe r5.0, 6.6.1.
- */
- msleep(100);
+ msleep(PCIE_RESET_CONFIG_WAIT_MS);
/*
* Give the RC/EP even more time to wake up, before trying to
@@ -1055,8 +1428,7 @@ static int brcm_pcie_start_link(struct brcm_pcie *pcie)
return -ENODEV;
}
- if (pcie->gen)
- brcm_pcie_set_gen(pcie, pcie->gen);
+ brcm_config_clkreq(pcie);
if (pcie->ssc) {
ret = brcm_pcie_set_ssc(pcie);
@@ -1073,14 +1445,6 @@ static int brcm_pcie_start_link(struct brcm_pcie *pcie)
pci_speed_string(pcie_link_speed[cls]), nlw,
ssc_good ? "(SSC)" : "(!SSC)");
- /*
- * Refclk from RC should be gated with CLKREQ# input when ASPM L0s,L1
- * is enabled => setting the CLKREQ_DEBUG_ENABLE field to 1.
- */
- tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
- tmp |= PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK;
- writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
-
return 0;
}
@@ -1128,7 +1492,8 @@ static int brcm_pcie_add_bus(struct pci_bus *bus)
ret = regulator_bulk_get(dev, sr->num_supplies, sr->supplies);
if (ret) {
- dev_info(dev, "No regulators for downstream device\n");
+ dev_info(dev, "Did not get regulators, err=%d\n", ret);
+ pcie->sr = NULL;
goto no_regulators;
}
@@ -1151,7 +1516,7 @@ static void brcm_pcie_remove_bus(struct pci_bus *bus)
struct subdev_regulators *sr = pcie->sr;
struct device *dev = &bus->dev;
- if (!sr)
+ if (!sr || !bus->parent || !pci_is_root_bus(bus->parent))
return;
if (regulator_bulk_disable(sr->num_supplies, sr->supplies))
@@ -1224,23 +1589,25 @@ static int brcm_phy_cntl(struct brcm_pcie *pcie, const int start)
static inline int brcm_phy_start(struct brcm_pcie *pcie)
{
- return pcie->rescal ? brcm_phy_cntl(pcie, 1) : 0;
+ return pcie->cfg->has_phy ? brcm_phy_cntl(pcie, 1) : 0;
}
static inline int brcm_phy_stop(struct brcm_pcie *pcie)
{
- return pcie->rescal ? brcm_phy_cntl(pcie, 0) : 0;
+ return pcie->cfg->has_phy ? brcm_phy_cntl(pcie, 0) : 0;
}
-static void brcm_pcie_turn_off(struct brcm_pcie *pcie)
+static int brcm_pcie_turn_off(struct brcm_pcie *pcie)
{
void __iomem *base = pcie->base;
- int tmp;
+ int tmp, ret;
if (brcm_pcie_link_up(pcie))
brcm_pcie_enter_l23(pcie);
/* Assert fundamental reset */
- pcie->perst_set(pcie, 1);
+ ret = pcie->cfg->perst_set(pcie, 1);
+ if (ret)
+ return ret;
/* Deassert request for L23 in case it was asserted */
tmp = readl(base + PCIE_MISC_PCIE_CTRL);
@@ -1248,12 +1615,15 @@ static void brcm_pcie_turn_off(struct brcm_pcie *pcie)
writel(tmp, base + PCIE_MISC_PCIE_CTRL);
/* Turn off SerDes */
- tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ tmp = readl(base + HARD_DEBUG(pcie));
u32p_replace_bits(&tmp, 1, PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK);
- writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ writel(tmp, base + HARD_DEBUG(pcie));
+
+ if (!(pcie->cfg->quirks & CFG_QUIRK_AVOID_BRIDGE_SHUTDOWN))
+ /* Shutdown PCIe bridge */
+ ret = brcm_pcie_bridge_sw_init_set(pcie, 1);
- /* Shutdown PCIe bridge */
- pcie->bridge_sw_init_set(pcie, 1);
+ return ret;
}
static int pci_dev_may_wakeup(struct pci_dev *dev, void *data)
@@ -1271,9 +1641,12 @@ static int brcm_pcie_suspend_noirq(struct device *dev)
{
struct brcm_pcie *pcie = dev_get_drvdata(dev);
struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
- int ret;
+ int ret, rret;
+
+ ret = brcm_pcie_turn_off(pcie);
+ if (ret)
+ return ret;
- brcm_pcie_turn_off(pcie);
/*
* If brcm_phy_stop() returns an error, just dev_err(). If we
* return the error it will cause the suspend to fail and this is a
@@ -1302,7 +1675,10 @@ static int brcm_pcie_suspend_noirq(struct device *dev)
pcie->sr->supplies);
if (ret) {
dev_err(dev, "Could not turn off regulators\n");
- reset_control_reset(pcie->rescal);
+ rret = reset_control_reset(pcie->rescal);
+ if (rret)
+ dev_err(dev, "failed to reset 'rascal' controller ret=%d\n",
+ rret);
return ret;
}
}
@@ -1317,7 +1693,7 @@ static int brcm_pcie_resume_noirq(struct device *dev)
struct brcm_pcie *pcie = dev_get_drvdata(dev);
void __iomem *base;
u32 tmp;
- int ret;
+ int ret, rret;
base = pcie->base;
ret = clk_prepare_enable(pcie->clk);
@@ -1333,12 +1709,14 @@ static int brcm_pcie_resume_noirq(struct device *dev)
goto err_reset;
/* Take bridge out of reset so we can access the SERDES reg */
- pcie->bridge_sw_init_set(pcie, 0);
+ ret = brcm_pcie_bridge_sw_init_set(pcie, 0);
+ if (ret)
+ goto err_reset;
/* SERDES_IDDQ = 0 */
- tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ tmp = readl(base + HARD_DEBUG(pcie));
u32p_replace_bits(&tmp, 0, PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK);
- writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ writel(tmp, base + HARD_DEBUG(pcie));
/* wait for serdes to be stable */
udelay(100);
@@ -1379,12 +1757,127 @@ err_regulator:
if (pcie->sr)
regulator_bulk_disable(pcie->sr->num_supplies, pcie->sr->supplies);
err_reset:
- reset_control_rearm(pcie->rescal);
+ rret = reset_control_rearm(pcie->rescal);
+ if (rret)
+ dev_err(pcie->dev, "failed to rearm 'rescal' reset, err=%d\n", rret);
err_disable_clk:
clk_disable_unprepare(pcie->clk);
return ret;
}
+/* Dump out PCIe errors on die or panic */
+static int brcm_pcie_dump_err(struct brcm_pcie *pcie,
+ const char *type)
+{
+ void __iomem *base = pcie->base;
+ int i, is_cfg_err, is_mem_err, lanes;
+ const char *width_str, *direction_str;
+ u32 info, cfg_addr, cfg_cause, mem_cause, lo, hi;
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
+ unsigned long flags;
+ char lanes_str[9];
+
+ spin_lock_irqsave(&pcie->bridge_lock, flags);
+ /* Don't access registers when the bridge is off */
+ if (pcie->bridge_in_reset || readl(base + PCIE_OUTB_ERR_VALID) == 0) {
+ spin_unlock_irqrestore(&pcie->bridge_lock, flags);
+ return NOTIFY_DONE;
+ }
+
+ /* Read all necessary registers so we can release the spinlock ASAP */
+ info = readl(base + PCIE_OUTB_ERR_ACC_INFO);
+ is_cfg_err = !!(info & PCIE_OUTB_ERR_ACC_INFO_CFG_ERR);
+ is_mem_err = !!(info & PCIE_OUTB_ERR_ACC_INFO_MEM_ERR);
+ if (is_cfg_err) {
+ cfg_addr = readl(base + PCIE_OUTB_ERR_ACC_ADDR);
+ cfg_cause = readl(base + PCIE_OUTB_ERR_CFG_CAUSE);
+ }
+ if (is_mem_err) {
+ mem_cause = readl(base + PCIE_OUTB_ERR_MEM_CAUSE);
+ lo = readl(base + PCIE_OUTB_ERR_MEM_ADDR_LO);
+ hi = readl(base + PCIE_OUTB_ERR_MEM_ADDR_HI);
+ }
+ /* We've got all of the info, clear the error */
+ writel(1, base + PCIE_OUTB_ERR_CLEAR);
+ spin_unlock_irqrestore(&pcie->bridge_lock, flags);
+
+ dev_err(pcie->dev, "reporting PCIe info which may be related to %s error\n",
+ type);
+ width_str = (info & PCIE_OUTB_ERR_ACC_INFO_TYPE_64) ? "64bit" : "32bit";
+ direction_str = str_read_write(!(info & PCIE_OUTB_ERR_ACC_INFO_DIR_WRITE));
+ lanes = FIELD_GET(PCIE_OUTB_ERR_ACC_INFO_BYTE_LANES, info);
+ for (i = 0, lanes_str[8] = 0; i < 8; i++)
+ lanes_str[i] = (lanes & (1 << i)) ? '1' : '0';
+
+ if (is_cfg_err) {
+ int bus = FIELD_GET(PCIE_OUTB_ERR_ACC_ADDR_BUS, cfg_addr);
+ int dev = FIELD_GET(PCIE_OUTB_ERR_ACC_ADDR_DEV, cfg_addr);
+ int func = FIELD_GET(PCIE_OUTB_ERR_ACC_ADDR_FUNC, cfg_addr);
+ int reg = FIELD_GET(PCIE_OUTB_ERR_ACC_ADDR_REG, cfg_addr);
+
+ dev_err(pcie->dev, "Error: CFG Acc, %s, %s (%04x:%02x:%02x.%d) reg=0x%x, lanes=%s\n",
+ width_str, direction_str, bridge->domain_nr, bus, dev,
+ func, reg, lanes_str);
+ dev_err(pcie->dev, " Type: TO=%d Abt=%d UnsupReq=%d AccTO=%d AccDsbld=%d Acc64bit=%d\n",
+ !!(cfg_cause & PCIE_OUTB_ERR_CFG_CAUSE_TIMEOUT),
+ !!(cfg_cause & PCIE_OUTB_ERR_CFG_CAUSE_ABORT),
+ !!(cfg_cause & PCIE_OUTB_ERR_CFG_CAUSE_UNSUPP_REQ),
+ !!(cfg_cause & PCIE_OUTB_ERR_CFG_CAUSE_ACC_TIMEOUT),
+ !!(cfg_cause & PCIE_OUTB_ERR_CFG_CAUSE_ACC_DISABLED),
+ !!(cfg_cause & PCIE_OUTB_ERR_CFG_CAUSE_ACC_64BIT));
+ }
+
+ if (is_mem_err) {
+ u64 addr = ((u64)hi << 32) | (u64)lo;
+
+ dev_err(pcie->dev, "Error: Mem Acc, %s, %s, @0x%llx, lanes=%s\n",
+ width_str, direction_str, addr, lanes_str);
+ dev_err(pcie->dev, " Type: TO=%d Abt=%d UnsupReq=%d AccDsble=%d BadAddr=%d\n",
+ !!(mem_cause & PCIE_OUTB_ERR_MEM_CAUSE_TIMEOUT),
+ !!(mem_cause & PCIE_OUTB_ERR_MEM_CAUSE_ABORT),
+ !!(mem_cause & PCIE_OUTB_ERR_MEM_CAUSE_UNSUPP_REQ),
+ !!(mem_cause & PCIE_OUTB_ERR_MEM_CAUSE_ACC_DISABLED),
+ !!(mem_cause & PCIE_OUTB_ERR_MEM_CAUSE_BAD_ADDR));
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int brcm_pcie_die_notify_cb(struct notifier_block *self,
+ unsigned long v, void *p)
+{
+ struct brcm_pcie *pcie =
+ container_of(self, struct brcm_pcie, die_notifier);
+
+ return brcm_pcie_dump_err(pcie, "Die");
+}
+
+static int brcm_pcie_panic_notify_cb(struct notifier_block *self,
+ unsigned long v, void *p)
+{
+ struct brcm_pcie *pcie =
+ container_of(self, struct brcm_pcie, panic_notifier);
+
+ return brcm_pcie_dump_err(pcie, "Panic");
+}
+
+static void brcm_register_die_notifiers(struct brcm_pcie *pcie)
+{
+ pcie->panic_notifier.notifier_call = brcm_pcie_panic_notify_cb;
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &pcie->panic_notifier);
+
+ pcie->die_notifier.notifier_call = brcm_pcie_die_notify_cb;
+ register_die_notifier(&pcie->die_notifier);
+}
+
+static void brcm_unregister_die_notifiers(struct brcm_pcie *pcie)
+{
+ unregister_die_notifier(&pcie->die_notifier);
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &pcie->panic_notifier);
+}
+
static void __brcm_pcie_remove(struct brcm_pcie *pcie)
{
brcm_msi_remove(pcie);
@@ -1403,78 +1896,131 @@ static void brcm_pcie_remove(struct platform_device *pdev)
pci_stop_root_bus(bridge->bus);
pci_remove_root_bus(bridge->bus);
+ if (pcie->cfg->has_err_report)
+ brcm_unregister_die_notifiers(pcie);
+
__brcm_pcie_remove(pcie);
}
static const int pcie_offsets[] = {
- [RGR1_SW_INIT_1] = 0x9210,
- [EXT_CFG_INDEX] = 0x9000,
- [EXT_CFG_DATA] = 0x9004,
+ [RGR1_SW_INIT_1] = 0x9210,
+ [EXT_CFG_INDEX] = 0x9000,
+ [EXT_CFG_DATA] = 0x8000,
+ [PCIE_HARD_DEBUG] = 0x4204,
+ [PCIE_INTR2_CPU_BASE] = 0x4300,
+};
+
+static const int pcie_offsets_bcm7278[] = {
+ [RGR1_SW_INIT_1] = 0xc010,
+ [EXT_CFG_INDEX] = 0x9000,
+ [EXT_CFG_DATA] = 0x8000,
+ [PCIE_HARD_DEBUG] = 0x4204,
+ [PCIE_INTR2_CPU_BASE] = 0x4300,
+};
+
+static const int pcie_offsets_bcm7425[] = {
+ [RGR1_SW_INIT_1] = 0x8010,
+ [EXT_CFG_INDEX] = 0x8300,
+ [EXT_CFG_DATA] = 0x8304,
+ [PCIE_HARD_DEBUG] = 0x4204,
+ [PCIE_INTR2_CPU_BASE] = 0x4300,
};
-static const int pcie_offsets_bmips_7425[] = {
- [RGR1_SW_INIT_1] = 0x8010,
- [EXT_CFG_INDEX] = 0x8300,
- [EXT_CFG_DATA] = 0x8304,
+static const int pcie_offsets_bcm7712[] = {
+ [RGR1_SW_INIT_1] = 0x9210,
+ [EXT_CFG_INDEX] = 0x9000,
+ [EXT_CFG_DATA] = 0x8000,
+ [PCIE_HARD_DEBUG] = 0x4304,
+ [PCIE_INTR2_CPU_BASE] = 0x4400,
};
static const struct pcie_cfg_data generic_cfg = {
.offsets = pcie_offsets,
- .type = GENERIC,
+ .soc_base = GENERIC,
.perst_set = brcm_pcie_perst_set_generic,
.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+ .num_inbound_wins = 3,
};
-static const struct pcie_cfg_data bcm7425_cfg = {
- .offsets = pcie_offsets_bmips_7425,
- .type = BCM7425,
+static const struct pcie_cfg_data bcm2711_cfg = {
+ .offsets = pcie_offsets,
+ .soc_base = BCM2711,
.perst_set = brcm_pcie_perst_set_generic,
.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+ .num_inbound_wins = 3,
};
-static const struct pcie_cfg_data bcm7435_cfg = {
- .offsets = pcie_offsets,
- .type = BCM7435,
- .perst_set = brcm_pcie_perst_set_generic,
+static const struct pcie_cfg_data bcm2712_cfg = {
+ .offsets = pcie_offsets_bcm7712,
+ .soc_base = BCM7712,
+ .perst_set = brcm_pcie_perst_set_7278,
.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+ .post_setup = brcm_pcie_post_setup_bcm2712,
+ .quirks = CFG_QUIRK_AVOID_BRIDGE_SHUTDOWN,
+ .num_inbound_wins = 10,
};
static const struct pcie_cfg_data bcm4908_cfg = {
.offsets = pcie_offsets,
- .type = BCM4908,
+ .soc_base = BCM4908,
.perst_set = brcm_pcie_perst_set_4908,
.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
-};
-
-static const int pcie_offset_bcm7278[] = {
- [RGR1_SW_INIT_1] = 0xc010,
- [EXT_CFG_INDEX] = 0x9000,
- [EXT_CFG_DATA] = 0x9004,
+ .num_inbound_wins = 3,
};
static const struct pcie_cfg_data bcm7278_cfg = {
- .offsets = pcie_offset_bcm7278,
- .type = BCM7278,
+ .offsets = pcie_offsets_bcm7278,
+ .soc_base = BCM7278,
.perst_set = brcm_pcie_perst_set_7278,
.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_7278,
+ .num_inbound_wins = 3,
};
-static const struct pcie_cfg_data bcm2711_cfg = {
+static const struct pcie_cfg_data bcm7425_cfg = {
+ .offsets = pcie_offsets_bcm7425,
+ .soc_base = BCM7425,
+ .perst_set = brcm_pcie_perst_set_generic,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+ .num_inbound_wins = 3,
+};
+
+static const struct pcie_cfg_data bcm7435_cfg = {
.offsets = pcie_offsets,
- .type = BCM2711,
+ .soc_base = BCM7435,
.perst_set = brcm_pcie_perst_set_generic,
.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+ .num_inbound_wins = 3,
+};
+
+static const struct pcie_cfg_data bcm7216_cfg = {
+ .offsets = pcie_offsets_bcm7278,
+ .soc_base = BCM7278,
+ .perst_set = brcm_pcie_perst_set_7278,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_7278,
+ .has_phy = true,
+ .num_inbound_wins = 3,
+ .has_err_report = true,
+};
+
+static const struct pcie_cfg_data bcm7712_cfg = {
+ .offsets = pcie_offsets_bcm7712,
+ .perst_set = brcm_pcie_perst_set_7278,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+ .soc_base = BCM7712,
+ .num_inbound_wins = 10,
};
static const struct of_device_id brcm_pcie_match[] = {
{ .compatible = "brcm,bcm2711-pcie", .data = &bcm2711_cfg },
+ { .compatible = "brcm,bcm2712-pcie", .data = &bcm2712_cfg },
{ .compatible = "brcm,bcm4908-pcie", .data = &bcm4908_cfg },
{ .compatible = "brcm,bcm7211-pcie", .data = &generic_cfg },
+ { .compatible = "brcm,bcm7216-pcie", .data = &bcm7216_cfg },
{ .compatible = "brcm,bcm7278-pcie", .data = &bcm7278_cfg },
- { .compatible = "brcm,bcm7216-pcie", .data = &bcm7278_cfg },
- { .compatible = "brcm,bcm7445-pcie", .data = &generic_cfg },
- { .compatible = "brcm,bcm7435-pcie", .data = &bcm7435_cfg },
{ .compatible = "brcm,bcm7425-pcie", .data = &bcm7425_cfg },
+ { .compatible = "brcm,bcm7435-pcie", .data = &bcm7435_cfg },
+ { .compatible = "brcm,bcm7445-pcie", .data = &generic_cfg },
+ { .compatible = "brcm,bcm7712-pcie", .data = &bcm7712_cfg },
{},
};
@@ -1496,7 +2042,7 @@ static struct pci_ops brcm7425_pcie_ops = {
static int brcm_pcie_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node, *msi_np;
+ struct device_node *np = pdev->dev.of_node;
struct pci_host_bridge *bridge;
const struct pcie_cfg_data *data;
struct brcm_pcie *pcie;
@@ -1515,10 +2061,7 @@ static int brcm_pcie_probe(struct platform_device *pdev)
pcie = pci_host_bridge_priv(bridge);
pcie->dev = &pdev->dev;
pcie->np = np;
- pcie->reg_offsets = data->offsets;
- pcie->type = data->type;
- pcie->perst_set = data->perst_set;
- pcie->bridge_sw_init_set = data->bridge_sw_init_set;
+ pcie->cfg = data;
pcie->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pcie->base))
@@ -1533,25 +2076,55 @@ static int brcm_pcie_probe(struct platform_device *pdev)
pcie->ssc = of_property_read_bool(np, "brcm,enable-ssc");
- ret = clk_prepare_enable(pcie->clk);
- if (ret) {
- dev_err(&pdev->dev, "could not enable clock\n");
- return ret;
- }
pcie->rescal = devm_reset_control_get_optional_shared(&pdev->dev, "rescal");
- if (IS_ERR(pcie->rescal)) {
- clk_disable_unprepare(pcie->clk);
+ if (IS_ERR(pcie->rescal))
return PTR_ERR(pcie->rescal);
- }
+
pcie->perst_reset = devm_reset_control_get_optional_exclusive(&pdev->dev, "perst");
- if (IS_ERR(pcie->perst_reset)) {
- clk_disable_unprepare(pcie->clk);
+ if (IS_ERR(pcie->perst_reset))
return PTR_ERR(pcie->perst_reset);
+
+ pcie->bridge_reset = devm_reset_control_get_optional_exclusive(&pdev->dev, "bridge");
+ if (IS_ERR(pcie->bridge_reset))
+ return PTR_ERR(pcie->bridge_reset);
+
+ pcie->swinit_reset = devm_reset_control_get_optional_exclusive(&pdev->dev, "swinit");
+ if (IS_ERR(pcie->swinit_reset))
+ return PTR_ERR(pcie->swinit_reset);
+
+ ret = clk_prepare_enable(pcie->clk);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "could not enable clock\n");
+
+ ret = brcm_pcie_bridge_sw_init_set(pcie, 0);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "could not de-assert bridge reset\n");
+
+ if (pcie->swinit_reset) {
+ ret = reset_control_assert(pcie->swinit_reset);
+ if (ret) {
+ clk_disable_unprepare(pcie->clk);
+ return dev_err_probe(&pdev->dev, ret,
+ "could not assert reset 'swinit'\n");
+ }
+
+ /* HW team recommends 1us for proper sync and propagation of reset */
+ udelay(1);
+
+ ret = reset_control_deassert(pcie->swinit_reset);
+ if (ret) {
+ clk_disable_unprepare(pcie->clk);
+ return dev_err_probe(&pdev->dev, ret,
+ "could not de-assert reset 'swinit'\n");
+ }
}
ret = reset_control_reset(pcie->rescal);
- if (ret)
- dev_err(&pdev->dev, "failed to deassert 'rescal'\n");
+ if (ret) {
+ clk_disable_unprepare(pcie->clk);
+ return dev_err_probe(&pdev->dev, ret, "failed to deassert 'rescal'\n");
+ }
ret = brcm_phy_start(pcie);
if (ret) {
@@ -1565,22 +2138,29 @@ static int brcm_pcie_probe(struct platform_device *pdev)
goto fail;
pcie->hw_rev = readl(pcie->base + PCIE_MISC_REVISION);
- if (pcie->type == BCM4908 && pcie->hw_rev >= BRCM_PCIE_HW_REV_3_20) {
+ if (pcie->cfg->soc_base == BCM4908 &&
+ pcie->hw_rev >= BRCM_PCIE_HW_REV_3_20) {
dev_err(pcie->dev, "hardware revision with unsupported PERST# setup\n");
ret = -ENODEV;
goto fail;
}
- msi_np = of_parse_phandle(pcie->np, "msi-parent", 0);
- if (pci_msi_enabled() && msi_np == pcie->np) {
- ret = brcm_pcie_enable_msi(pcie);
+ if (pci_msi_enabled()) {
+ struct device_node *msi_np = of_parse_phandle(pcie->np, "msi-parent", 0);
+
+ if (msi_np == pcie->np)
+ ret = brcm_pcie_enable_msi(pcie);
+
+ of_node_put(msi_np);
+
if (ret) {
dev_err(pcie->dev, "probe of internal MSI failed");
goto fail;
}
}
- bridge->ops = pcie->type == BCM7425 ? &brcm7425_pcie_ops : &brcm_pcie_ops;
+ bridge->ops = pcie->cfg->soc_base == BCM7425 ?
+ &brcm7425_pcie_ops : &brcm_pcie_ops;
bridge->sysdata = pcie;
platform_set_drvdata(pdev, pcie);
@@ -1594,10 +2174,16 @@ static int brcm_pcie_probe(struct platform_device *pdev)
return ret;
}
+ if (pcie->cfg->has_err_report) {
+ spin_lock_init(&pcie->bridge_lock);
+ brcm_register_die_notifiers(pcie);
+ }
+
return 0;
fail:
__brcm_pcie_remove(pcie);
+
return ret;
}
@@ -1610,7 +2196,7 @@ static const struct dev_pm_ops brcm_pcie_pm_ops = {
static struct platform_driver brcm_pcie_driver = {
.probe = brcm_pcie_probe,
- .remove_new = brcm_pcie_remove,
+ .remove = brcm_pcie_remove,
.driver = {
.name = "brcm-pcie",
.of_match_table = brcm_pcie_match,
@@ -1622,3 +2208,4 @@ module_platform_driver(brcm_pcie_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Broadcom STB PCIe RC driver");
MODULE_AUTHOR("Broadcom");
+MODULE_SOFTDEP("pre: irq_bcm2712_mip");
diff --git a/drivers/pci/controller/pcie-hisi-error.c b/drivers/pci/controller/pcie-hisi-error.c
index ad9d5ffcd9e3..aaf1ed2b6e59 100644
--- a/drivers/pci/controller/pcie-hisi-error.c
+++ b/drivers/pci/controller/pcie-hisi-error.c
@@ -317,7 +317,7 @@ static struct platform_driver hisi_pcie_error_handler_driver = {
.acpi_match_table = hisi_pcie_acpi_match,
},
.probe = hisi_pcie_error_handler_probe,
- .remove_new = hisi_pcie_error_handler_remove,
+ .remove = hisi_pcie_error_handler_remove,
};
module_platform_driver(hisi_pcie_error_handler_driver);
diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c
index fee036b07cd4..9ba242ab9596 100644
--- a/drivers/pci/controller/pcie-iproc-msi.c
+++ b/drivers/pci/controller/pcie-iproc-msi.c
@@ -5,6 +5,7 @@
#include <linux/interrupt.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
#include <linux/msi.h>
#include <linux/of_irq.h>
@@ -81,7 +82,6 @@ struct iproc_msi_grp {
* @bitmap_lock: lock to protect access to the MSI bitmap
* @nr_msi_vecs: total number of MSI vectors
* @inner_domain: inner IRQ domain
- * @msi_domain: MSI IRQ domain
* @nr_eq_region: required number of 4K aligned memory region for MSI event
* queues
* @nr_msi_region: required number of 4K aligned address region for MSI posted
@@ -101,7 +101,6 @@ struct iproc_msi {
struct mutex bitmap_lock;
unsigned int nr_msi_vecs;
struct irq_domain *inner_domain;
- struct irq_domain *msi_domain;
unsigned int nr_eq_region;
unsigned int nr_msi_region;
void *eq_cpu;
@@ -165,16 +164,18 @@ static inline unsigned int iproc_msi_eq_offset(struct iproc_msi *msi, u32 eq)
return eq * EQ_LEN * sizeof(u32);
}
-static struct irq_chip iproc_msi_irq_chip = {
- .name = "iProc-MSI",
+#define IPROC_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS)
+#define IPROC_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_PCI_MSIX)
+
+static struct msi_parent_ops iproc_msi_parent_ops = {
+ .required_flags = IPROC_MSI_FLAGS_REQUIRED,
+ .supported_flags = IPROC_MSI_FLAGS_SUPPORTED,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .prefix = "iProc-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
};
-
-static struct msi_domain_info iproc_msi_domain_info = {
- .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX,
- .chip = &iproc_msi_irq_chip,
-};
-
/*
* In iProc PCIe core, each MSI group is serviced by a GIC interrupt and a
* dedicated event queue. Each MSI group can support up to 64 MSI vectors.
@@ -446,27 +447,22 @@ static void iproc_msi_disable(struct iproc_msi *msi)
static int iproc_msi_alloc_domains(struct device_node *node,
struct iproc_msi *msi)
{
- msi->inner_domain = irq_domain_add_linear(NULL, msi->nr_msi_vecs,
- &msi_domain_ops, msi);
+ struct irq_domain_info info = {
+ .fwnode = of_fwnode_handle(node),
+ .ops = &msi_domain_ops,
+ .host_data = msi,
+ .size = msi->nr_msi_vecs,
+ };
+
+ msi->inner_domain = msi_create_parent_irq_domain(&info, &iproc_msi_parent_ops);
if (!msi->inner_domain)
return -ENOMEM;
- msi->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node),
- &iproc_msi_domain_info,
- msi->inner_domain);
- if (!msi->msi_domain) {
- irq_domain_remove(msi->inner_domain);
- return -ENOMEM;
- }
-
return 0;
}
static void iproc_msi_free_domains(struct iproc_msi *msi)
{
- if (msi->msi_domain)
- irq_domain_remove(msi->msi_domain);
-
if (msi->inner_domain)
irq_domain_remove(msi->inner_domain);
}
@@ -525,7 +521,7 @@ int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node)
if (!of_device_is_compatible(node, "brcm,iproc-msi"))
return -ENODEV;
- if (!of_find_property(node, "msi-controller", NULL))
+ if (!of_property_read_bool(node, "msi-controller"))
return -ENODEV;
if (pcie->msi)
@@ -542,7 +538,7 @@ int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node)
msi->nr_cpus = num_possible_cpus();
if (msi->nr_cpus == 1)
- iproc_msi_domain_info.flags |= MSI_FLAG_MULTI_PCI_MSI;
+ iproc_msi_parent_ops.supported_flags |= MSI_FLAG_MULTI_PCI_MSI;
msi->nr_irqs = of_irq_count(node);
if (!msi->nr_irqs) {
@@ -585,8 +581,7 @@ int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node)
return -EINVAL;
}
- if (of_find_property(node, "brcm,pcie-msi-inten", NULL))
- msi->has_inten_reg = true;
+ msi->has_inten_reg = of_property_read_bool(node, "brcm,pcie-msi-inten");
msi->nr_msi_vecs = msi->nr_irqs * EQ_LEN;
msi->bitmap = devm_bitmap_zalloc(pcie->dev, msi->nr_msi_vecs,
diff --git a/drivers/pci/controller/pcie-iproc-platform.c b/drivers/pci/controller/pcie-iproc-platform.c
index acdc583d2980..0cb78c583c7e 100644
--- a/drivers/pci/controller/pcie-iproc-platform.c
+++ b/drivers/pci/controller/pcie-iproc-platform.c
@@ -52,7 +52,7 @@ static int iproc_pltfm_pcie_probe(struct platform_device *pdev)
pcie = pci_host_bridge_priv(bridge);
pcie->dev = dev;
- pcie->type = (enum iproc_pcie_type) of_device_get_match_data(dev);
+ pcie->type = (uintptr_t)of_device_get_match_data(dev);
ret = of_address_to_resource(np, 0, &reg);
if (ret < 0) {
@@ -134,7 +134,7 @@ static struct platform_driver iproc_pltfm_pcie_driver = {
.of_match_table = of_match_ptr(iproc_pcie_of_match_table),
},
.probe = iproc_pltfm_pcie_probe,
- .remove_new = iproc_pltfm_pcie_remove,
+ .remove = iproc_pltfm_pcie_remove,
.shutdown = iproc_pltfm_pcie_shutdown,
};
module_platform_driver(iproc_pltfm_pcie_driver);
diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c
index bd1c98b68851..ccf71993ea35 100644
--- a/drivers/pci/controller/pcie-iproc.c
+++ b/drivers/pci/controller/pcie-iproc.c
@@ -17,6 +17,7 @@
#include <linux/irqchip/arm-gic-v3.h>
#include <linux/platform_device.h>
#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_pci.h>
#include <linux/of_platform.h>
#include <linux/phy/phy.h>
@@ -54,7 +55,7 @@
#define CFG_RD_SUCCESS 0
#define CFG_RD_UR 1
-#define CFG_RD_CRS 2
+#define CFG_RD_RRS 2
#define CFG_RD_CA 3
#define CFG_RETRY_STATUS 0xffff0001
#define CFG_RETRY_STATUS_TIMEOUT_US 500000 /* 500 milliseconds */
@@ -485,31 +486,31 @@ static unsigned int iproc_pcie_cfg_retry(struct iproc_pcie *pcie,
u32 status;
/*
- * As per PCIe spec r3.1, sec 2.3.2, CRS Software Visibility only
+ * As per PCIe r6.0, sec 2.3.2, Config RRS Software Visibility only
* affects config reads of the Vendor ID. For config writes or any
* other config reads, the Root may automatically reissue the
* configuration request again as a new request.
*
* For config reads, this hardware returns CFG_RETRY_STATUS data
- * when it receives a CRS completion, regardless of the address of
- * the read or the CRS Software Visibility Enable bit. As a
+ * when it receives a RRS completion, regardless of the address of
+ * the read or the RRS Software Visibility Enable bit. As a
* partial workaround for this, we retry in software any read that
* returns CFG_RETRY_STATUS.
*
* Note that a non-Vendor ID config register may have a value of
* CFG_RETRY_STATUS. If we read that, we can't distinguish it from
- * a CRS completion, so we will incorrectly retry the read and
+ * a RRS completion, so we will incorrectly retry the read and
* eventually return the wrong data (0xffffffff).
*/
data = readl(cfg_data_p);
while (data == CFG_RETRY_STATUS && timeout--) {
/*
- * CRS state is set in CFG_RD status register
+ * RRS state is set in CFG_RD status register
* This will handle the case where CFG_RETRY_STATUS is
* valid config data.
*/
status = iproc_pcie_read_reg(pcie, IPROC_PCIE_CFG_RD_STATUS);
- if (status != CFG_RD_CRS)
+ if (status != CFG_RD_RRS)
return data;
udelay(1);
@@ -556,8 +557,8 @@ static void iproc_pcie_fix_cap(struct iproc_pcie *pcie, int where, u32 *val)
break;
case IPROC_PCI_EXP_CAP + PCI_EXP_RTCTL:
- /* Don't advertise CRS SV support */
- *val &= ~(PCI_EXP_RTCAP_CRSVIS << 16);
+ /* Don't advertise RRS SV support */
+ *val &= ~(PCI_EXP_RTCAP_RRS_SV << 16);
break;
default:
@@ -783,7 +784,7 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie)
/* make sure we are not in EP mode */
iproc_pci_raw_config_read32(pcie, 0, PCI_HEADER_TYPE, 1, &hdr_type);
- if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE) {
+ if ((hdr_type & PCI_HEADER_TYPE_MASK) != PCI_HEADER_TYPE_BRIDGE) {
dev_err(dev, "in EP mode, hdr=%#02x\n", hdr_type);
return -EFAULT;
}
@@ -1337,29 +1338,16 @@ static int iproc_pcie_msi_steer(struct iproc_pcie *pcie,
static int iproc_pcie_msi_enable(struct iproc_pcie *pcie)
{
- struct device_node *msi_node;
+ struct device_node *msi_node = NULL;
int ret;
/*
* Either the "msi-parent" or the "msi-map" phandle needs to exist
* for us to obtain the MSI node.
*/
-
- msi_node = of_parse_phandle(pcie->dev->of_node, "msi-parent", 0);
- if (!msi_node) {
- const __be32 *msi_map = NULL;
- int len;
- u32 phandle;
-
- msi_map = of_get_property(pcie->dev->of_node, "msi-map", &len);
- if (!msi_map)
- return -ENODEV;
-
- phandle = be32_to_cpup(msi_map + 1);
- msi_node = of_find_node_by_phandle(phandle);
- if (!msi_node)
- return -ENODEV;
- }
+ of_msi_xlate(pcie->dev, &msi_node, 0);
+ if (!msi_node)
+ return -ENODEV;
/*
* Certain revisions of the iProc PCIe controller require additional
diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c
index e0e27645fdf4..75ddb8bee168 100644
--- a/drivers/pci/controller/pcie-mediatek-gen3.c
+++ b/drivers/pci/controller/pcie-mediatek-gen3.c
@@ -6,29 +6,47 @@
* Author: Jianjun Wang <jianjun.wang@mediatek.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/iopoll.h>
#include <linux/irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/msi.h>
+#include <linux/of_device.h>
+#include <linux/of_pci.h>
#include <linux/pci.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
#include <linux/reset.h>
#include "../pci.h"
+#define PCIE_BASE_CFG_REG 0x14
+#define PCIE_BASE_CFG_SPEED GENMASK(15, 8)
+
#define PCIE_SETTING_REG 0x80
+#define PCIE_SETTING_LINK_WIDTH GENMASK(11, 8)
+#define PCIE_SETTING_GEN_SUPPORT GENMASK(14, 12)
#define PCIE_PCI_IDS_1 0x9c
#define PCI_CLASS(class) (class << 8)
#define PCIE_RC_MODE BIT(0)
+#define PCIE_EQ_PRESET_01_REG 0x100
+#define PCIE_VAL_LN0_DOWNSTREAM GENMASK(6, 0)
+#define PCIE_VAL_LN0_UPSTREAM GENMASK(14, 8)
+#define PCIE_VAL_LN1_DOWNSTREAM GENMASK(22, 16)
+#define PCIE_VAL_LN1_UPSTREAM GENMASK(30, 24)
+
#define PCIE_CFGNUM_REG 0x140
#define PCIE_CFG_DEVFN(devfn) ((devfn) & GENMASK(7, 0))
#define PCIE_CFG_BUS(bus) (((bus) << 8) & GENMASK(15, 8))
@@ -68,6 +86,14 @@
#define PCIE_MSI_SET_ENABLE_REG 0x190
#define PCIE_MSI_SET_ENABLE GENMASK(PCIE_MSI_SET_NUM - 1, 0)
+#define PCIE_PIPE4_PIE8_REG 0x338
+#define PCIE_K_FINETUNE_MAX GENMASK(5, 0)
+#define PCIE_K_FINETUNE_ERR GENMASK(7, 6)
+#define PCIE_K_PRESET_TO_USE GENMASK(18, 8)
+#define PCIE_K_PHYPARAM_QUERY BIT(19)
+#define PCIE_K_QUERY_TIMEOUT BIT(20)
+#define PCIE_K_PRESET_TO_USE_16G GENMASK(31, 21)
+
#define PCIE_MSI_SET_BASE_REG 0xc00
#define PCIE_MSI_SET_OFFSET 0x10
#define PCIE_MSI_SET_STATUS_OFFSET 0x04
@@ -76,6 +102,9 @@
#define PCIE_MSI_SET_ADDR_HI_BASE 0xc80
#define PCIE_MSI_SET_ADDR_HI_OFFSET 0x04
+#define PCIE_RESOURCE_CTRL_REG 0xd2c
+#define PCIE_RSRC_SYS_CLK_RDY_TIME_MASK GENMASK(7, 0)
+
#define PCIE_ICMD_PM_REG 0x198
#define PCIE_TURN_OFF_LINK BIT(4)
@@ -100,6 +129,42 @@
#define PCIE_ATR_TLP_TYPE_MEM PCIE_ATR_TLP_TYPE(0)
#define PCIE_ATR_TLP_TYPE_IO PCIE_ATR_TLP_TYPE(2)
+#define MAX_NUM_PHY_RESETS 3
+
+#define PCIE_MTK_RESET_TIME_US 10
+
+/* Time in ms needed to complete PCIe reset on EN7581 SoC */
+#define PCIE_EN7581_RESET_TIME_MS 100
+
+struct mtk_gen3_pcie;
+
+#define PCIE_CONF_LINK2_CTL_STS (PCIE_CFG_OFFSET_ADDR + 0xb0)
+#define PCIE_CONF_LINK2_LCR2_LINK_SPEED GENMASK(3, 0)
+
+enum mtk_gen3_pcie_flags {
+ SKIP_PCIE_RSTB = BIT(0), /* Skip PERST# assertion during device
+ * probing or suspend/resume phase to
+ * avoid hw bugs/issues.
+ */
+};
+
+/**
+ * struct mtk_gen3_pcie_pdata - differentiate between host generations
+ * @power_up: pcie power_up callback
+ * @phy_resets: phy reset lines SoC data.
+ * @sys_clk_rdy_time_us: System clock ready time override (microseconds)
+ * @flags: pcie device flags.
+ */
+struct mtk_gen3_pcie_pdata {
+ int (*power_up)(struct mtk_gen3_pcie *pcie);
+ struct {
+ const char *id[MAX_NUM_PHY_RESETS];
+ int num_resets;
+ } phy_resets;
+ u8 sys_clk_rdy_time_us;
+ u32 flags;
+};
+
/**
* struct mtk_msi_set - MSI information for each set
* @base: IO mapped register base
@@ -118,39 +183,44 @@ struct mtk_msi_set {
* @base: IO mapped register base
* @reg_base: physical register base
* @mac_reset: MAC reset control
- * @phy_reset: PHY reset control
+ * @phy_resets: PHY reset controllers
* @phy: PHY controller block
* @clks: PCIe clocks
* @num_clks: PCIe clocks count for this port
+ * @max_link_speed: Maximum link speed (PCIe Gen) for this port
+ * @num_lanes: Number of PCIe lanes for this port
* @irq: PCIe controller interrupt number
* @saved_irq_state: IRQ enable state saved at suspend time
* @irq_lock: lock protecting IRQ register access
* @intx_domain: legacy INTx IRQ domain
- * @msi_domain: MSI IRQ domain
* @msi_bottom_domain: MSI IRQ bottom domain
* @msi_sets: MSI sets information
* @lock: lock protecting IRQ bit map
* @msi_irq_in_use: bit map for assigned MSI IRQ
+ * @soc: pointer to SoC-dependent operations
*/
struct mtk_gen3_pcie {
struct device *dev;
void __iomem *base;
phys_addr_t reg_base;
struct reset_control *mac_reset;
- struct reset_control *phy_reset;
+ struct reset_control_bulk_data phy_resets[MAX_NUM_PHY_RESETS];
struct phy *phy;
struct clk_bulk_data *clks;
int num_clks;
+ u8 max_link_speed;
+ u8 num_lanes;
int irq;
u32 saved_irq_state;
raw_spinlock_t irq_lock;
struct irq_domain *intx_domain;
- struct irq_domain *msi_domain;
struct irq_domain *msi_bottom_domain;
struct mtk_msi_set msi_sets[PCIE_MSI_SET_NUM];
struct mutex lock;
DECLARE_BITMAP(msi_irq_in_use, PCIE_MSI_IRQS_NUM);
+
+ const struct mtk_gen3_pcie_pdata *soc;
};
/* LTSSM state in PCIE_LTSSM_STATUS_REG bit[28:24] */
@@ -245,35 +315,61 @@ static int mtk_pcie_set_trans_table(struct mtk_gen3_pcie *pcie,
resource_size_t cpu_addr,
resource_size_t pci_addr,
resource_size_t size,
- unsigned long type, int num)
+ unsigned long type, int *num)
{
+ resource_size_t remaining = size;
+ resource_size_t table_size;
+ resource_size_t addr_align;
+ const char *range_type;
void __iomem *table;
u32 val;
- if (num >= PCIE_MAX_TRANS_TABLES) {
- dev_err(pcie->dev, "not enough translate table for addr: %#llx, limited to [%d]\n",
- (unsigned long long)cpu_addr, PCIE_MAX_TRANS_TABLES);
- return -ENODEV;
- }
+ while (remaining && (*num < PCIE_MAX_TRANS_TABLES)) {
+ /* Table size needs to be a power of 2 */
+ table_size = BIT(fls(remaining) - 1);
- table = pcie->base + PCIE_TRANS_TABLE_BASE_REG +
- num * PCIE_ATR_TLB_SET_OFFSET;
+ if (cpu_addr > 0) {
+ addr_align = BIT(ffs(cpu_addr) - 1);
+ table_size = min(table_size, addr_align);
+ }
- writel_relaxed(lower_32_bits(cpu_addr) | PCIE_ATR_SIZE(fls(size) - 1),
- table);
- writel_relaxed(upper_32_bits(cpu_addr),
- table + PCIE_ATR_SRC_ADDR_MSB_OFFSET);
- writel_relaxed(lower_32_bits(pci_addr),
- table + PCIE_ATR_TRSL_ADDR_LSB_OFFSET);
- writel_relaxed(upper_32_bits(pci_addr),
- table + PCIE_ATR_TRSL_ADDR_MSB_OFFSET);
+ /* Minimum size of translate table is 4KiB */
+ if (table_size < 0x1000) {
+ dev_err(pcie->dev, "illegal table size %#llx\n",
+ (unsigned long long)table_size);
+ return -EINVAL;
+ }
- if (type == IORESOURCE_IO)
- val = PCIE_ATR_TYPE_IO | PCIE_ATR_TLP_TYPE_IO;
- else
- val = PCIE_ATR_TYPE_MEM | PCIE_ATR_TLP_TYPE_MEM;
+ table = pcie->base + PCIE_TRANS_TABLE_BASE_REG + *num * PCIE_ATR_TLB_SET_OFFSET;
+ writel_relaxed(lower_32_bits(cpu_addr) | PCIE_ATR_SIZE(fls(table_size) - 1), table);
+ writel_relaxed(upper_32_bits(cpu_addr), table + PCIE_ATR_SRC_ADDR_MSB_OFFSET);
+ writel_relaxed(lower_32_bits(pci_addr), table + PCIE_ATR_TRSL_ADDR_LSB_OFFSET);
+ writel_relaxed(upper_32_bits(pci_addr), table + PCIE_ATR_TRSL_ADDR_MSB_OFFSET);
- writel_relaxed(val, table + PCIE_ATR_TRSL_PARAM_OFFSET);
+ if (type == IORESOURCE_IO) {
+ val = PCIE_ATR_TYPE_IO | PCIE_ATR_TLP_TYPE_IO;
+ range_type = "IO";
+ } else {
+ val = PCIE_ATR_TYPE_MEM | PCIE_ATR_TLP_TYPE_MEM;
+ range_type = "MEM";
+ }
+
+ writel_relaxed(val, table + PCIE_ATR_TRSL_PARAM_OFFSET);
+
+ dev_dbg(pcie->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n",
+ range_type, *num, (unsigned long long)cpu_addr,
+ (unsigned long long)pci_addr,
+ (unsigned long long)table_size);
+
+ cpu_addr += table_size;
+ pci_addr += table_size;
+ remaining -= table_size;
+ (*num)++;
+ }
+
+ if (remaining)
+ dev_warn(pcie->dev, "not enough translate table for addr: %#llx, limited to [%d]\n",
+ (unsigned long long)cpu_addr, PCIE_MAX_TRANS_TABLES);
return 0;
}
@@ -315,11 +411,43 @@ static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie)
int err;
u32 val;
- /* Set as RC mode */
+ /* Set as RC mode and set controller PCIe Gen speed restriction, if any */
val = readl_relaxed(pcie->base + PCIE_SETTING_REG);
val |= PCIE_RC_MODE;
+ if (pcie->max_link_speed) {
+ val &= ~PCIE_SETTING_GEN_SUPPORT;
+
+ /* Can enable link speed support only from Gen2 onwards */
+ if (pcie->max_link_speed >= 2)
+ val |= FIELD_PREP(PCIE_SETTING_GEN_SUPPORT,
+ GENMASK(pcie->max_link_speed - 2, 0));
+ }
+ if (pcie->num_lanes) {
+ val &= ~PCIE_SETTING_LINK_WIDTH;
+
+ /* Zero means one lane, each bit activates x2/x4/x8/x16 */
+ if (pcie->num_lanes > 1)
+ val |= FIELD_PREP(PCIE_SETTING_LINK_WIDTH,
+ GENMASK(fls(pcie->num_lanes >> 2), 0));
+ }
writel_relaxed(val, pcie->base + PCIE_SETTING_REG);
+ /* Set Link Control 2 (LNKCTL2) speed restriction, if any */
+ if (pcie->max_link_speed) {
+ val = readl_relaxed(pcie->base + PCIE_CONF_LINK2_CTL_STS);
+ val &= ~PCIE_CONF_LINK2_LCR2_LINK_SPEED;
+ val |= FIELD_PREP(PCIE_CONF_LINK2_LCR2_LINK_SPEED, pcie->max_link_speed);
+ writel_relaxed(val, pcie->base + PCIE_CONF_LINK2_CTL_STS);
+ }
+
+ /* If parameter is present, adjust SYS_CLK_RDY_TIME to avoid glitching */
+ if (pcie->soc->sys_clk_rdy_time_us) {
+ val = readl_relaxed(pcie->base + PCIE_RESOURCE_CTRL_REG);
+ FIELD_MODIFY(PCIE_RSRC_SYS_CLK_RDY_TIME_MASK, &val,
+ pcie->soc->sys_clk_rdy_time_us);
+ writel_relaxed(val, pcie->base + PCIE_RESOURCE_CTRL_REG);
+ }
+
/* Set class code */
val = readl_relaxed(pcie->base + PCIE_PCI_IDS_1);
val &= ~GENMASK(31, 8);
@@ -336,22 +464,33 @@ static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie)
val |= PCIE_DISABLE_DVFSRC_VLT_REQ;
writel_relaxed(val, pcie->base + PCIE_MISC_CTRL_REG);
- /* Assert all reset signals */
- val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
- val |= PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB;
- writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
-
/*
- * Described in PCIe CEM specification sections 2.2 (PERST# Signal)
- * and 2.2.1 (Initial Power-Up (G3 to S0)).
- * The deassertion of PERST# should be delayed 100ms (TPVPERL)
- * for the power and clock to become stable.
+ * Airoha EN7581 has a hw bug asserting/releasing PCIE_PE_RSTB signal
+ * causing occasional PCIe link down. In order to overcome the issue,
+ * PCIE_RSTB signals are not asserted/released at this stage and the
+ * PCIe block is reset using en7523_reset_assert() and
+ * en7581_pci_enable().
*/
- msleep(100);
-
- /* De-assert reset signals */
- val &= ~(PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB);
- writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
+ if (!(pcie->soc->flags & SKIP_PCIE_RSTB)) {
+ /* Assert all reset signals */
+ val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
+ val |= PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB |
+ PCIE_PE_RSTB;
+ writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
+
+ /*
+ * Described in PCIe CEM specification revision 6.0.
+ *
+ * The deassertion of PERST# should be delayed 100ms (TPVPERL)
+ * for the power and clock to become stable.
+ */
+ msleep(PCIE_T_PVPERL_MS);
+
+ /* De-assert reset signals */
+ val &= ~(PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB |
+ PCIE_PE_RSTB);
+ writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
+ }
/* Check if the link is up or not */
err = readl_poll_timeout(pcie->base + PCIE_LINK_STATUS_REG, val,
@@ -380,64 +519,41 @@ static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie)
resource_size_t cpu_addr;
resource_size_t pci_addr;
resource_size_t size;
- const char *range_type;
- if (type == IORESOURCE_IO) {
+ if (type == IORESOURCE_IO)
cpu_addr = pci_pio_to_address(res->start);
- range_type = "IO";
- } else if (type == IORESOURCE_MEM) {
+ else if (type == IORESOURCE_MEM)
cpu_addr = res->start;
- range_type = "MEM";
- } else {
+ else
continue;
- }
pci_addr = res->start - entry->offset;
size = resource_size(res);
err = mtk_pcie_set_trans_table(pcie, cpu_addr, pci_addr, size,
- type, table_index);
+ type, &table_index);
if (err)
return err;
-
- dev_dbg(pcie->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n",
- range_type, table_index, (unsigned long long)cpu_addr,
- (unsigned long long)pci_addr, (unsigned long long)size);
-
- table_index++;
}
return 0;
}
-static int mtk_pcie_set_affinity(struct irq_data *data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
-static void mtk_pcie_msi_irq_mask(struct irq_data *data)
-{
- pci_msi_mask_irq(data);
- irq_chip_mask_parent(data);
-}
-
-static void mtk_pcie_msi_irq_unmask(struct irq_data *data)
-{
- pci_msi_unmask_irq(data);
- irq_chip_unmask_parent(data);
-}
-
-static struct irq_chip mtk_msi_irq_chip = {
- .irq_ack = irq_chip_ack_parent,
- .irq_mask = mtk_pcie_msi_irq_mask,
- .irq_unmask = mtk_pcie_msi_irq_unmask,
- .name = "MSI",
-};
-
-static struct msi_domain_info mtk_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
- .chip = &mtk_msi_irq_chip,
+#define MTK_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_NO_AFFINITY | \
+ MSI_FLAG_PCI_MSI_MASK_PARENT)
+
+#define MTK_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_PCI_MSIX | \
+ MSI_FLAG_MULTI_PCI_MSI)
+
+static const struct msi_parent_ops mtk_msi_parent_ops = {
+ .required_flags = MTK_MSI_FLAGS_REQUIRED,
+ .supported_flags = MTK_MSI_FLAGS_SUPPORTED,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .chip_flags = MSI_CHIP_FLAG_SET_ACK,
+ .prefix = "MTK3-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
};
static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
@@ -502,7 +618,6 @@ static struct irq_chip mtk_msi_bottom_irq_chip = {
.irq_mask = mtk_msi_bottom_irq_mask,
.irq_unmask = mtk_msi_bottom_irq_unmask,
.irq_compose_msi_msg = mtk_compose_msi_msg,
- .irq_set_affinity = mtk_pcie_set_affinity,
.name = "MSI",
};
@@ -603,7 +718,6 @@ static struct irq_chip mtk_intx_irq_chip = {
.irq_mask = mtk_intx_mask,
.irq_unmask = mtk_intx_unmask,
.irq_eoi = mtk_intx_eoi,
- .irq_set_affinity = mtk_pcie_set_affinity,
.name = "INTx",
};
@@ -635,8 +749,8 @@ static int mtk_pcie_init_irq_domains(struct mtk_gen3_pcie *pcie)
return -ENODEV;
}
- pcie->intx_domain = irq_domain_add_linear(intc_node, PCI_NUM_INTX,
- &intx_domain_ops, pcie);
+ pcie->intx_domain = irq_domain_create_linear(of_fwnode_handle(intc_node), PCI_NUM_INTX,
+ &intx_domain_ops, pcie);
if (!pcie->intx_domain) {
dev_err(dev, "failed to create INTx IRQ domain\n");
ret = -ENODEV;
@@ -646,28 +760,23 @@ static int mtk_pcie_init_irq_domains(struct mtk_gen3_pcie *pcie)
/* Setup MSI */
mutex_init(&pcie->lock);
- pcie->msi_bottom_domain = irq_domain_add_linear(node, PCIE_MSI_IRQS_NUM,
- &mtk_msi_bottom_domain_ops, pcie);
+ struct irq_domain_info info = {
+ .fwnode = dev_fwnode(dev),
+ .ops = &mtk_msi_bottom_domain_ops,
+ .host_data = pcie,
+ .size = PCIE_MSI_IRQS_NUM,
+ };
+
+ pcie->msi_bottom_domain = msi_create_parent_irq_domain(&info, &mtk_msi_parent_ops);
if (!pcie->msi_bottom_domain) {
dev_err(dev, "failed to create MSI bottom domain\n");
ret = -ENODEV;
goto err_msi_bottom_domain;
}
- pcie->msi_domain = pci_msi_create_irq_domain(dev->fwnode,
- &mtk_msi_domain_info,
- pcie->msi_bottom_domain);
- if (!pcie->msi_domain) {
- dev_err(dev, "failed to create MSI domain\n");
- ret = -ENODEV;
- goto err_msi_domain;
- }
-
of_node_put(intc_node);
return 0;
-err_msi_domain:
- irq_domain_remove(pcie->msi_bottom_domain);
err_msi_bottom_domain:
irq_domain_remove(pcie->intx_domain);
out_put_node:
@@ -682,9 +791,6 @@ static void mtk_pcie_irq_teardown(struct mtk_gen3_pcie *pcie)
if (pcie->intx_domain)
irq_domain_remove(pcie->intx_domain);
- if (pcie->msi_domain)
- irq_domain_remove(pcie->msi_domain);
-
if (pcie->msi_bottom_domain)
irq_domain_remove(pcie->msi_bottom_domain);
@@ -760,10 +866,11 @@ static int mtk_pcie_setup_irq(struct mtk_gen3_pcie *pcie)
static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie)
{
+ int i, ret, num_resets = pcie->soc->phy_resets.num_resets;
struct device *dev = pcie->dev;
struct platform_device *pdev = to_platform_device(dev);
struct resource *regs;
- int ret;
+ u32 num_lanes;
regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcie-mac");
if (!regs)
@@ -776,12 +883,13 @@ static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie)
pcie->reg_base = regs->start;
- pcie->phy_reset = devm_reset_control_get_optional_exclusive(dev, "phy");
- if (IS_ERR(pcie->phy_reset)) {
- ret = PTR_ERR(pcie->phy_reset);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to get PHY reset\n");
+ for (i = 0; i < num_resets; i++)
+ pcie->phy_resets[i].id = pcie->soc->phy_resets.id[i];
+ ret = devm_reset_control_bulk_get_optional_shared(dev, num_resets,
+ pcie->phy_resets);
+ if (ret) {
+ dev_err(dev, "failed to get PHY bulk reset\n");
return ret;
}
@@ -809,16 +917,151 @@ static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie)
return pcie->num_clks;
}
+ ret = of_property_read_u32(dev->of_node, "num-lanes", &num_lanes);
+ if (ret == 0) {
+ if (num_lanes == 0 || num_lanes > 16 ||
+ (num_lanes != 1 && num_lanes % 2))
+ dev_warn(dev, "invalid num-lanes, using controller defaults\n");
+ else
+ pcie->num_lanes = num_lanes;
+ }
+
return 0;
}
+static int mtk_pcie_en7581_power_up(struct mtk_gen3_pcie *pcie)
+{
+ struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
+ struct device *dev = pcie->dev;
+ struct resource_entry *entry;
+ struct regmap *pbus_regmap;
+ u32 val, args[2], size;
+ resource_size_t addr;
+ int err;
+
+ /*
+ * The controller may have been left out of reset by the bootloader
+ * so make sure that we get a clean start by asserting resets here.
+ */
+ reset_control_bulk_assert(pcie->soc->phy_resets.num_resets,
+ pcie->phy_resets);
+
+ /* Wait for the time needed to complete the reset lines assert. */
+ msleep(PCIE_EN7581_RESET_TIME_MS);
+
+ /*
+ * Configure PBus base address and base address mask to allow the
+ * hw to detect if a given address is accessible on PCIe controller.
+ */
+ pbus_regmap = syscon_regmap_lookup_by_phandle_args(dev->of_node,
+ "mediatek,pbus-csr",
+ ARRAY_SIZE(args),
+ args);
+ if (IS_ERR(pbus_regmap))
+ return PTR_ERR(pbus_regmap);
+
+ entry = resource_list_first_type(&host->windows, IORESOURCE_MEM);
+ if (!entry)
+ return -ENODEV;
+
+ addr = entry->res->start - entry->offset;
+ regmap_write(pbus_regmap, args[0], lower_32_bits(addr));
+ size = lower_32_bits(resource_size(entry->res));
+ regmap_write(pbus_regmap, args[1], GENMASK(31, __fls(size)));
+
+ /*
+ * Unlike the other MediaTek Gen3 controllers, the Airoha EN7581
+ * requires PHY initialization and power-on before PHY reset deassert.
+ */
+ err = phy_init(pcie->phy);
+ if (err) {
+ dev_err(dev, "failed to initialize PHY\n");
+ return err;
+ }
+
+ err = phy_power_on(pcie->phy);
+ if (err) {
+ dev_err(dev, "failed to power on PHY\n");
+ goto err_phy_on;
+ }
+
+ err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets,
+ pcie->phy_resets);
+ if (err) {
+ dev_err(dev, "failed to deassert PHYs\n");
+ goto err_phy_deassert;
+ }
+
+ /*
+ * Wait for the time needed to complete the bulk de-assert above.
+ * This time is specific for EN7581 SoC.
+ */
+ msleep(PCIE_EN7581_RESET_TIME_MS);
+
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+
+ val = FIELD_PREP(PCIE_VAL_LN0_DOWNSTREAM, 0x47) |
+ FIELD_PREP(PCIE_VAL_LN1_DOWNSTREAM, 0x47) |
+ FIELD_PREP(PCIE_VAL_LN0_UPSTREAM, 0x41) |
+ FIELD_PREP(PCIE_VAL_LN1_UPSTREAM, 0x41);
+ writel_relaxed(val, pcie->base + PCIE_EQ_PRESET_01_REG);
+
+ val = PCIE_K_PHYPARAM_QUERY | PCIE_K_QUERY_TIMEOUT |
+ FIELD_PREP(PCIE_K_PRESET_TO_USE_16G, 0x80) |
+ FIELD_PREP(PCIE_K_PRESET_TO_USE, 0x2) |
+ FIELD_PREP(PCIE_K_FINETUNE_MAX, 0xf);
+ writel_relaxed(val, pcie->base + PCIE_PIPE4_PIE8_REG);
+
+ err = clk_bulk_prepare_enable(pcie->num_clks, pcie->clks);
+ if (err) {
+ dev_err(dev, "failed to prepare clock\n");
+ goto err_clk_prepare_enable;
+ }
+
+ /*
+ * Airoha EN7581 performs PCIe reset via clk callbacks since it has a
+ * hw issue with PCIE_PE_RSTB signal. Add wait for the time needed to
+ * complete the PCIe reset.
+ */
+ msleep(PCIE_T_PVPERL_MS);
+
+ return 0;
+
+err_clk_prepare_enable:
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+ reset_control_bulk_assert(pcie->soc->phy_resets.num_resets,
+ pcie->phy_resets);
+err_phy_deassert:
+ phy_power_off(pcie->phy);
+err_phy_on:
+ phy_exit(pcie->phy);
+
+ return err;
+}
+
static int mtk_pcie_power_up(struct mtk_gen3_pcie *pcie)
{
struct device *dev = pcie->dev;
int err;
+ /*
+ * The controller may have been left out of reset by the bootloader
+ * so make sure that we get a clean start by asserting resets here.
+ */
+ reset_control_bulk_assert(pcie->soc->phy_resets.num_resets,
+ pcie->phy_resets);
+ reset_control_assert(pcie->mac_reset);
+ usleep_range(PCIE_MTK_RESET_TIME_US, 2 * PCIE_MTK_RESET_TIME_US);
+
/* PHY power on and enable pipe clock */
- reset_control_deassert(pcie->phy_reset);
+ err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets,
+ pcie->phy_resets);
+ if (err) {
+ dev_err(dev, "failed to deassert PHYs\n");
+ return err;
+ }
err = phy_init(pcie->phy);
if (err) {
@@ -854,7 +1097,8 @@ err_clk_init:
err_phy_on:
phy_exit(pcie->phy);
err_phy_init:
- reset_control_assert(pcie->phy_reset);
+ reset_control_bulk_assert(pcie->soc->phy_resets.num_resets,
+ pcie->phy_resets);
return err;
}
@@ -869,30 +1113,56 @@ static void mtk_pcie_power_down(struct mtk_gen3_pcie *pcie)
phy_power_off(pcie->phy);
phy_exit(pcie->phy);
- reset_control_assert(pcie->phy_reset);
+ reset_control_bulk_assert(pcie->soc->phy_resets.num_resets,
+ pcie->phy_resets);
+}
+
+static int mtk_pcie_get_controller_max_link_speed(struct mtk_gen3_pcie *pcie)
+{
+ u32 val;
+ int ret;
+
+ val = readl_relaxed(pcie->base + PCIE_BASE_CFG_REG);
+ val = FIELD_GET(PCIE_BASE_CFG_SPEED, val);
+ ret = fls(val);
+
+ return ret > 0 ? ret : -EINVAL;
}
static int mtk_pcie_setup(struct mtk_gen3_pcie *pcie)
{
- int err;
+ int err, max_speed;
err = mtk_pcie_parse_port(pcie);
if (err)
return err;
/*
- * The controller may have been left out of reset by the bootloader
- * so make sure that we get a clean start by asserting resets here.
+ * Deassert the line in order to avoid unbalance in deassert_count
+ * counter since the bulk is shared.
*/
- reset_control_assert(pcie->phy_reset);
- reset_control_assert(pcie->mac_reset);
- usleep_range(10, 20);
+ reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets,
+ pcie->phy_resets);
/* Don't touch the hardware registers before power up */
- err = mtk_pcie_power_up(pcie);
+ err = pcie->soc->power_up(pcie);
if (err)
return err;
+ err = of_pci_get_max_link_speed(pcie->dev->of_node);
+ if (err) {
+ /* Get the maximum speed supported by the controller */
+ max_speed = mtk_pcie_get_controller_max_link_speed(pcie);
+
+ /* Set max_link_speed only if the controller supports it */
+ if (max_speed >= 0 && max_speed <= err) {
+ pcie->max_link_speed = err;
+ dev_info(pcie->dev,
+ "maximum controller link speed Gen%d, overriding to Gen%u",
+ max_speed, pcie->max_link_speed);
+ }
+ }
+
/* Try link up */
err = mtk_pcie_startup_port(pcie);
if (err)
@@ -924,6 +1194,7 @@ static int mtk_pcie_probe(struct platform_device *pdev)
pcie = pci_host_bridge_priv(host);
pcie->dev = dev;
+ pcie->soc = device_get_match_data(dev);
platform_set_drvdata(pdev, pcie);
err = mtk_pcie_setup(pcie);
@@ -1021,10 +1292,12 @@ static int mtk_pcie_suspend_noirq(struct device *dev)
return err;
}
- /* Pull down the PERST# pin */
- val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
- val |= PCIE_PE_RSTB;
- writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
+ if (!(pcie->soc->flags & SKIP_PCIE_RSTB)) {
+ /* Assert the PERST# pin */
+ val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
+ val |= PCIE_PE_RSTB;
+ writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
+ }
dev_dbg(pcie->dev, "entered L2 states successfully");
@@ -1039,7 +1312,7 @@ static int mtk_pcie_resume_noirq(struct device *dev)
struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev);
int err;
- err = mtk_pcie_power_up(pcie);
+ err = pcie->soc->power_up(pcie);
if (err)
return err;
@@ -1059,21 +1332,53 @@ static const struct dev_pm_ops mtk_pcie_pm_ops = {
mtk_pcie_resume_noirq)
};
+static const struct mtk_gen3_pcie_pdata mtk_pcie_soc_mt8192 = {
+ .power_up = mtk_pcie_power_up,
+ .phy_resets = {
+ .id[0] = "phy",
+ .num_resets = 1,
+ },
+};
+
+static const struct mtk_gen3_pcie_pdata mtk_pcie_soc_mt8196 = {
+ .power_up = mtk_pcie_power_up,
+ .phy_resets = {
+ .id[0] = "phy",
+ .num_resets = 1,
+ },
+ .sys_clk_rdy_time_us = 10,
+};
+
+static const struct mtk_gen3_pcie_pdata mtk_pcie_soc_en7581 = {
+ .power_up = mtk_pcie_en7581_power_up,
+ .phy_resets = {
+ .id[0] = "phy-lane0",
+ .id[1] = "phy-lane1",
+ .id[2] = "phy-lane2",
+ .num_resets = 3,
+ },
+ .flags = SKIP_PCIE_RSTB,
+};
+
static const struct of_device_id mtk_pcie_of_match[] = {
- { .compatible = "mediatek,mt8192-pcie" },
+ { .compatible = "airoha,en7581-pcie", .data = &mtk_pcie_soc_en7581 },
+ { .compatible = "mediatek,mt8192-pcie", .data = &mtk_pcie_soc_mt8192 },
+ { .compatible = "mediatek,mt8196-pcie", .data = &mtk_pcie_soc_mt8196 },
{},
};
MODULE_DEVICE_TABLE(of, mtk_pcie_of_match);
static struct platform_driver mtk_pcie_driver = {
.probe = mtk_pcie_probe,
- .remove_new = mtk_pcie_remove,
+ .remove = mtk_pcie_remove,
.driver = {
.name = "mtk-pcie-gen3",
.of_match_table = mtk_pcie_of_match,
.pm = &mtk_pcie_pm_ops,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
module_platform_driver(mtk_pcie_driver);
+MODULE_DESCRIPTION("MediaTek Gen3 PCIe host controller driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
index 66a8f73296fc..4b78b6528f9f 100644
--- a/drivers/pci/controller/pcie-mediatek.c
+++ b/drivers/pci/controller/pcie-mediatek.c
@@ -12,6 +12,7 @@
#include <linux/iopoll.h>
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
@@ -142,23 +143,33 @@
struct mtk_pcie_port;
/**
+ * enum mtk_pcie_quirks - MTK PCIe quirks
+ * @MTK_PCIE_FIX_CLASS_ID: host's class ID needed to be fixed
+ * @MTK_PCIE_FIX_DEVICE_ID: host's device ID needed to be fixed
+ * @MTK_PCIE_NO_MSI: Bridge has no MSI support, and relies on an external block
+ * @MTK_PCIE_SKIP_RSTB: Skip calling RSTB bits on PCIe probe
+ */
+enum mtk_pcie_quirks {
+ MTK_PCIE_FIX_CLASS_ID = BIT(0),
+ MTK_PCIE_FIX_DEVICE_ID = BIT(1),
+ MTK_PCIE_NO_MSI = BIT(2),
+ MTK_PCIE_SKIP_RSTB = BIT(3),
+};
+
+/**
* struct mtk_pcie_soc - differentiate between host generations
- * @need_fix_class_id: whether this host's class ID needed to be fixed or not
- * @need_fix_device_id: whether this host's device ID needed to be fixed or not
- * @no_msi: Bridge has no MSI support, and relies on an external block
* @device_id: device ID which this host need to be fixed
* @ops: pointer to configuration access functions
* @startup: pointer to controller setting functions
* @setup_irq: pointer to initialize IRQ functions
+ * @quirks: PCIe device quirks.
*/
struct mtk_pcie_soc {
- bool need_fix_class_id;
- bool need_fix_device_id;
- bool no_msi;
unsigned int device_id;
struct pci_ops *ops;
int (*startup)(struct mtk_pcie_port *port);
int (*setup_irq)(struct mtk_pcie_port *port, struct device_node *node);
+ enum mtk_pcie_quirks quirks;
};
/**
@@ -180,7 +191,6 @@ struct mtk_pcie_soc {
* @irq: GIC irq
* @irq_domain: legacy INTx IRQ domain
* @inner_domain: inner IRQ domain
- * @msi_domain: MSI IRQ domain
* @lock: protect the msi_irq_in_use bitmap
* @msi_irq_in_use: bit map for assigned MSI IRQ
*/
@@ -200,7 +210,6 @@ struct mtk_pcie_port {
int irq;
struct irq_domain *irq_domain;
struct irq_domain *inner_domain;
- struct irq_domain *msi_domain;
struct mutex lock;
DECLARE_BITMAP(msi_irq_in_use, MTK_MSI_IRQS_NUM);
};
@@ -211,7 +220,6 @@ struct mtk_pcie_port {
* @base: IO mapped register base
* @cfg: IO mapped register map for PCIe config
* @free_ck: free-run reference clock
- * @mem: non-prefetchable memory resource
* @ports: pointer to PCIe port information
* @soc: pointer to SoC-dependent operations
*/
@@ -407,12 +415,6 @@ static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
(int)data->hwirq, msg->address_hi, msg->address_lo);
}
-static int mtk_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static void mtk_msi_ack_irq(struct irq_data *data)
{
struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
@@ -424,7 +426,6 @@ static void mtk_msi_ack_irq(struct irq_data *data)
static struct irq_chip mtk_msi_bottom_irq_chip = {
.name = "MTK MSI",
.irq_compose_msi_msg = mtk_compose_msi_msg,
- .irq_set_affinity = mtk_msi_set_affinity,
.irq_ack = mtk_msi_ack_irq,
};
@@ -478,40 +479,39 @@ static const struct irq_domain_ops msi_domain_ops = {
.free = mtk_pcie_irq_domain_free,
};
-static struct irq_chip mtk_msi_irq_chip = {
- .name = "MTK PCIe MSI",
- .irq_ack = irq_chip_ack_parent,
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
-};
+#define MTK_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_NO_AFFINITY)
+
+#define MTK_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_PCI_MSIX)
-static struct msi_domain_info mtk_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX),
- .chip = &mtk_msi_irq_chip,
+static const struct msi_parent_ops mtk_msi_parent_ops = {
+ .required_flags = MTK_MSI_FLAGS_REQUIRED,
+ .supported_flags = MTK_MSI_FLAGS_SUPPORTED,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .chip_flags = MSI_CHIP_FLAG_SET_ACK,
+ .prefix = "MTK-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
};
static int mtk_pcie_allocate_msi_domains(struct mtk_pcie_port *port)
{
- struct fwnode_handle *fwnode = of_node_to_fwnode(port->pcie->dev->of_node);
-
mutex_init(&port->lock);
- port->inner_domain = irq_domain_create_linear(fwnode, MTK_MSI_IRQS_NUM,
- &msi_domain_ops, port);
+ struct irq_domain_info info = {
+ .fwnode = dev_fwnode(port->pcie->dev),
+ .ops = &msi_domain_ops,
+ .host_data = port,
+ .size = MTK_MSI_IRQS_NUM,
+ };
+
+ port->inner_domain = msi_create_parent_irq_domain(&info, &mtk_msi_parent_ops);
if (!port->inner_domain) {
dev_err(port->pcie->dev, "failed to create IRQ domain\n");
return -ENOMEM;
}
- port->msi_domain = pci_msi_create_irq_domain(fwnode, &mtk_msi_domain_info,
- port->inner_domain);
- if (!port->msi_domain) {
- dev_err(port->pcie->dev, "failed to create MSI domain\n");
- irq_domain_remove(port->inner_domain);
- return -ENOMEM;
- }
-
return 0;
}
@@ -540,8 +540,6 @@ static void mtk_pcie_irq_teardown(struct mtk_pcie *pcie)
irq_domain_remove(port->irq_domain);
if (IS_ENABLED(CONFIG_PCI_MSI)) {
- if (port->msi_domain)
- irq_domain_remove(port->msi_domain);
if (port->inner_domain)
irq_domain_remove(port->inner_domain);
}
@@ -577,8 +575,8 @@ static int mtk_pcie_init_irq_domain(struct mtk_pcie_port *port,
return -ENODEV;
}
- port->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
- &intx_domain_ops, port);
+ port->irq_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX,
+ &intx_domain_ops, port);
of_node_put(pcie_intc_node);
if (!port->irq_domain) {
dev_err(dev, "failed to get INTx IRQ domain\n");
@@ -617,12 +615,18 @@ static void mtk_pcie_intr_handler(struct irq_desc *desc)
if (status & MSI_STATUS){
unsigned long imsi_status;
+ /*
+ * The interrupt status can be cleared even if the
+ * MSI status remains pending. As such, given the
+ * edge-triggered interrupt type, its status should
+ * be cleared before being dispatched to the
+ * handler of the underlying device.
+ */
+ writel(MSI_STATUS, port->base + PCIE_INT_STATUS);
while ((imsi_status = readl(port->base + PCIE_IMSI_STATUS))) {
for_each_set_bit(bit, &imsi_status, MTK_MSI_IRQS_NUM)
generic_handle_domain_irq(port->inner_domain, bit);
}
- /* Clear MSI interrupt status */
- writel(MSI_STATUS, port->base + PCIE_INT_STATUS);
}
}
@@ -685,31 +689,28 @@ static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
regmap_update_bits(pcie->cfg, PCIE_SYS_CFG_V2, val, val);
}
- /* Assert all reset signals */
- writel(0, port->base + PCIE_RST_CTRL);
+ if (!(soc->quirks & MTK_PCIE_SKIP_RSTB)) {
+ /* Assert all reset signals */
+ writel(0, port->base + PCIE_RST_CTRL);
- /*
- * Enable PCIe link down reset, if link status changed from link up to
- * link down, this will reset MAC control registers and configuration
- * space.
- */
- writel(PCIE_LINKDOWN_RST_EN, port->base + PCIE_RST_CTRL);
+ /*
+ * Enable PCIe link down reset, if link status changed from
+ * link up to link down, this will reset MAC control registers
+ * and configuration space.
+ */
+ writel(PCIE_LINKDOWN_RST_EN, port->base + PCIE_RST_CTRL);
- /*
- * Described in PCIe CEM specification sections 2.2 (PERST# Signal) and
- * 2.2.1 (Initial Power-Up (G3 to S0)). The deassertion of PERST# should
- * be delayed 100ms (TPVPERL) for the power and clock to become stable.
- */
- msleep(100);
+ msleep(PCIE_T_PVPERL_MS);
- /* De-assert PHY, PE, PIPE, MAC and configuration reset */
- val = readl(port->base + PCIE_RST_CTRL);
- val |= PCIE_PHY_RSTB | PCIE_PERSTB | PCIE_PIPE_SRSTB |
- PCIE_MAC_SRSTB | PCIE_CRSTB;
- writel(val, port->base + PCIE_RST_CTRL);
+ /* De-assert PHY, PE, PIPE, MAC and configuration reset */
+ val = readl(port->base + PCIE_RST_CTRL);
+ val |= PCIE_PHY_RSTB | PCIE_PERSTB | PCIE_PIPE_SRSTB |
+ PCIE_MAC_SRSTB | PCIE_CRSTB;
+ writel(val, port->base + PCIE_RST_CTRL);
+ }
/* Set up vendor ID and class code */
- if (soc->need_fix_class_id) {
+ if (soc->quirks & MTK_PCIE_FIX_CLASS_ID) {
val = PCI_VENDOR_ID_MEDIATEK;
writew(val, port->base + PCIE_CONF_VEND_ID);
@@ -717,7 +718,7 @@ static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
writew(val, port->base + PCIE_CONF_CLASS_ID);
}
- if (soc->need_fix_device_id)
+ if (soc->quirks & MTK_PCIE_FIX_DEVICE_ID)
writew(soc->device_id, port->base + PCIE_CONF_DEVICE_ID);
/* 100ms timeout value should be enough for Gen1/2 training */
@@ -827,6 +828,41 @@ static int mtk_pcie_startup_port(struct mtk_pcie_port *port)
return 0;
}
+static int mtk_pcie_startup_port_an7583(struct mtk_pcie_port *port)
+{
+ struct mtk_pcie *pcie = port->pcie;
+ struct device *dev = pcie->dev;
+ struct pci_host_bridge *host;
+ struct resource_entry *entry;
+ struct regmap *pbus_regmap;
+ resource_size_t addr;
+ u32 args[2], size;
+
+ /*
+ * Configure PBus base address and base address mask to allow
+ * the hw to detect if a given address is accessible on PCIe
+ * controller.
+ */
+ pbus_regmap = syscon_regmap_lookup_by_phandle_args(dev->of_node,
+ "mediatek,pbus-csr",
+ ARRAY_SIZE(args),
+ args);
+ if (IS_ERR(pbus_regmap))
+ return PTR_ERR(pbus_regmap);
+
+ host = pci_host_bridge_from_priv(pcie);
+ entry = resource_list_first_type(&host->windows, IORESOURCE_MEM);
+ if (!entry)
+ return -ENODEV;
+
+ addr = entry->res->start - entry->offset;
+ regmap_write(pbus_regmap, args[0], lower_32_bits(addr));
+ size = lower_32_bits(resource_size(entry->res));
+ regmap_write(pbus_regmap, args[1], GENMASK(31, __fls(size)));
+
+ return mtk_pcie_startup_port_v2(port);
+}
+
static void mtk_pcie_enable_port(struct mtk_pcie_port *port)
{
struct mtk_pcie *pcie = port->pcie;
@@ -1043,24 +1079,22 @@ err_free_ck:
static int mtk_pcie_setup(struct mtk_pcie *pcie)
{
struct device *dev = pcie->dev;
- struct device_node *node = dev->of_node, *child;
+ struct device_node *node = dev->of_node;
struct mtk_pcie_port *port, *tmp;
int err, slot;
slot = of_get_pci_domain_nr(dev->of_node);
if (slot < 0) {
- for_each_available_child_of_node(node, child) {
+ for_each_available_child_of_node_scoped(node, child) {
err = of_pci_get_devfn(child);
- if (err < 0) {
- dev_err(dev, "failed to get devfn: %d\n", err);
- goto error_put_node;
- }
+ if (err < 0)
+ return dev_err_probe(dev, err, "failed to get devfn\n");
slot = PCI_SLOT(err);
err = mtk_pcie_parse_port(pcie, child, slot);
if (err)
- goto error_put_node;
+ return err;
}
} else {
err = mtk_pcie_parse_port(pcie, node, slot);
@@ -1081,9 +1115,6 @@ static int mtk_pcie_setup(struct mtk_pcie *pcie)
mtk_pcie_subsys_powerdown(pcie);
return 0;
-error_put_node:
- of_node_put(child);
- return err;
}
static int mtk_pcie_probe(struct platform_device *pdev)
@@ -1110,7 +1141,7 @@ static int mtk_pcie_probe(struct platform_device *pdev)
host->ops = pcie->soc->ops;
host->sysdata = pcie;
- host->msi_domain = pcie->soc->no_msi;
+ host->msi_domain = !!(pcie->soc->quirks & MTK_PCIE_NO_MSI);
err = pci_host_probe(host);
if (err)
@@ -1198,9 +1229,9 @@ static const struct dev_pm_ops mtk_pcie_pm_ops = {
};
static const struct mtk_pcie_soc mtk_pcie_soc_v1 = {
- .no_msi = true,
.ops = &mtk_pcie_ops,
.startup = mtk_pcie_startup_port,
+ .quirks = MTK_PCIE_NO_MSI,
};
static const struct mtk_pcie_soc mtk_pcie_soc_mt2712 = {
@@ -1210,22 +1241,29 @@ static const struct mtk_pcie_soc mtk_pcie_soc_mt2712 = {
};
static const struct mtk_pcie_soc mtk_pcie_soc_mt7622 = {
- .need_fix_class_id = true,
.ops = &mtk_pcie_ops_v2,
.startup = mtk_pcie_startup_port_v2,
.setup_irq = mtk_pcie_setup_irq,
+ .quirks = MTK_PCIE_FIX_CLASS_ID,
+};
+
+static const struct mtk_pcie_soc mtk_pcie_soc_an7583 = {
+ .ops = &mtk_pcie_ops_v2,
+ .startup = mtk_pcie_startup_port_an7583,
+ .setup_irq = mtk_pcie_setup_irq,
+ .quirks = MTK_PCIE_FIX_CLASS_ID | MTK_PCIE_SKIP_RSTB,
};
static const struct mtk_pcie_soc mtk_pcie_soc_mt7629 = {
- .need_fix_class_id = true,
- .need_fix_device_id = true,
.device_id = PCI_DEVICE_ID_MEDIATEK_7629,
.ops = &mtk_pcie_ops_v2,
.startup = mtk_pcie_startup_port_v2,
.setup_irq = mtk_pcie_setup_irq,
+ .quirks = MTK_PCIE_FIX_CLASS_ID | MTK_PCIE_FIX_DEVICE_ID,
};
static const struct of_device_id mtk_pcie_ids[] = {
+ { .compatible = "airoha,an7583-pcie", .data = &mtk_pcie_soc_an7583 },
{ .compatible = "mediatek,mt2701-pcie", .data = &mtk_pcie_soc_v1 },
{ .compatible = "mediatek,mt7623-pcie", .data = &mtk_pcie_soc_v1 },
{ .compatible = "mediatek,mt2712-pcie", .data = &mtk_pcie_soc_mt2712 },
@@ -1237,7 +1275,7 @@ MODULE_DEVICE_TABLE(of, mtk_pcie_ids);
static struct platform_driver mtk_pcie_driver = {
.probe = mtk_pcie_probe,
- .remove_new = mtk_pcie_remove,
+ .remove = mtk_pcie_remove,
.driver = {
.name = "mtk-pcie",
.of_match_table = mtk_pcie_ids,
@@ -1246,4 +1284,5 @@ static struct platform_driver mtk_pcie_driver = {
},
};
module_platform_driver(mtk_pcie_driver);
+MODULE_DESCRIPTION("MediaTek PCIe host controller driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-microchip-host.c b/drivers/pci/controller/pcie-microchip-host.c
deleted file mode 100644
index 5e710e485464..000000000000
--- a/drivers/pci/controller/pcie-microchip-host.c
+++ /dev/null
@@ -1,1139 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Microchip AXI PCIe Bridge host controller driver
- *
- * Copyright (c) 2018 - 2020 Microchip Corporation. All rights reserved.
- *
- * Author: Daire McNamara <daire.mcnamara@microchip.com>
- */
-
-#include <linux/clk.h>
-#include <linux/irqchip/chained_irq.h>
-#include <linux/irqdomain.h>
-#include <linux/module.h>
-#include <linux/msi.h>
-#include <linux/of_address.h>
-#include <linux/of_pci.h>
-#include <linux/pci-ecam.h>
-#include <linux/platform_device.h>
-
-#include "../pci.h"
-
-/* Number of MSI IRQs */
-#define MC_NUM_MSI_IRQS 32
-#define MC_NUM_MSI_IRQS_CODED 5
-
-/* PCIe Bridge Phy and Controller Phy offsets */
-#define MC_PCIE1_BRIDGE_ADDR 0x00008000u
-#define MC_PCIE1_CTRL_ADDR 0x0000a000u
-
-#define MC_PCIE_BRIDGE_ADDR (MC_PCIE1_BRIDGE_ADDR)
-#define MC_PCIE_CTRL_ADDR (MC_PCIE1_CTRL_ADDR)
-
-/* PCIe Controller Phy Regs */
-#define SEC_ERROR_CNT 0x20
-#define DED_ERROR_CNT 0x24
-#define SEC_ERROR_INT 0x28
-#define SEC_ERROR_INT_TX_RAM_SEC_ERR_INT GENMASK(3, 0)
-#define SEC_ERROR_INT_RX_RAM_SEC_ERR_INT GENMASK(7, 4)
-#define SEC_ERROR_INT_PCIE2AXI_RAM_SEC_ERR_INT GENMASK(11, 8)
-#define SEC_ERROR_INT_AXI2PCIE_RAM_SEC_ERR_INT GENMASK(15, 12)
-#define NUM_SEC_ERROR_INTS (4)
-#define SEC_ERROR_INT_MASK 0x2c
-#define DED_ERROR_INT 0x30
-#define DED_ERROR_INT_TX_RAM_DED_ERR_INT GENMASK(3, 0)
-#define DED_ERROR_INT_RX_RAM_DED_ERR_INT GENMASK(7, 4)
-#define DED_ERROR_INT_PCIE2AXI_RAM_DED_ERR_INT GENMASK(11, 8)
-#define DED_ERROR_INT_AXI2PCIE_RAM_DED_ERR_INT GENMASK(15, 12)
-#define NUM_DED_ERROR_INTS (4)
-#define DED_ERROR_INT_MASK 0x34
-#define ECC_CONTROL 0x38
-#define ECC_CONTROL_TX_RAM_INJ_ERROR_0 BIT(0)
-#define ECC_CONTROL_TX_RAM_INJ_ERROR_1 BIT(1)
-#define ECC_CONTROL_TX_RAM_INJ_ERROR_2 BIT(2)
-#define ECC_CONTROL_TX_RAM_INJ_ERROR_3 BIT(3)
-#define ECC_CONTROL_RX_RAM_INJ_ERROR_0 BIT(4)
-#define ECC_CONTROL_RX_RAM_INJ_ERROR_1 BIT(5)
-#define ECC_CONTROL_RX_RAM_INJ_ERROR_2 BIT(6)
-#define ECC_CONTROL_RX_RAM_INJ_ERROR_3 BIT(7)
-#define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_0 BIT(8)
-#define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_1 BIT(9)
-#define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_2 BIT(10)
-#define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_3 BIT(11)
-#define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_0 BIT(12)
-#define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_1 BIT(13)
-#define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_2 BIT(14)
-#define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_3 BIT(15)
-#define ECC_CONTROL_TX_RAM_ECC_BYPASS BIT(24)
-#define ECC_CONTROL_RX_RAM_ECC_BYPASS BIT(25)
-#define ECC_CONTROL_PCIE2AXI_RAM_ECC_BYPASS BIT(26)
-#define ECC_CONTROL_AXI2PCIE_RAM_ECC_BYPASS BIT(27)
-#define LTSSM_STATE 0x5c
-#define LTSSM_L0_STATE 0x10
-#define PCIE_EVENT_INT 0x14c
-#define PCIE_EVENT_INT_L2_EXIT_INT BIT(0)
-#define PCIE_EVENT_INT_HOTRST_EXIT_INT BIT(1)
-#define PCIE_EVENT_INT_DLUP_EXIT_INT BIT(2)
-#define PCIE_EVENT_INT_MASK GENMASK(2, 0)
-#define PCIE_EVENT_INT_L2_EXIT_INT_MASK BIT(16)
-#define PCIE_EVENT_INT_HOTRST_EXIT_INT_MASK BIT(17)
-#define PCIE_EVENT_INT_DLUP_EXIT_INT_MASK BIT(18)
-#define PCIE_EVENT_INT_ENB_MASK GENMASK(18, 16)
-#define PCIE_EVENT_INT_ENB_SHIFT 16
-#define NUM_PCIE_EVENTS (3)
-
-/* PCIe Bridge Phy Regs */
-#define PCIE_PCI_IDS_DW1 0x9c
-
-/* PCIe Config space MSI capability structure */
-#define MC_MSI_CAP_CTRL_OFFSET 0xe0u
-#define MC_MSI_MAX_Q_AVAIL (MC_NUM_MSI_IRQS_CODED << 1)
-#define MC_MSI_Q_SIZE (MC_NUM_MSI_IRQS_CODED << 4)
-
-#define IMASK_LOCAL 0x180
-#define DMA_END_ENGINE_0_MASK 0x00000000u
-#define DMA_END_ENGINE_0_SHIFT 0
-#define DMA_END_ENGINE_1_MASK 0x00000000u
-#define DMA_END_ENGINE_1_SHIFT 1
-#define DMA_ERROR_ENGINE_0_MASK 0x00000100u
-#define DMA_ERROR_ENGINE_0_SHIFT 8
-#define DMA_ERROR_ENGINE_1_MASK 0x00000200u
-#define DMA_ERROR_ENGINE_1_SHIFT 9
-#define A_ATR_EVT_POST_ERR_MASK 0x00010000u
-#define A_ATR_EVT_POST_ERR_SHIFT 16
-#define A_ATR_EVT_FETCH_ERR_MASK 0x00020000u
-#define A_ATR_EVT_FETCH_ERR_SHIFT 17
-#define A_ATR_EVT_DISCARD_ERR_MASK 0x00040000u
-#define A_ATR_EVT_DISCARD_ERR_SHIFT 18
-#define A_ATR_EVT_DOORBELL_MASK 0x00000000u
-#define A_ATR_EVT_DOORBELL_SHIFT 19
-#define P_ATR_EVT_POST_ERR_MASK 0x00100000u
-#define P_ATR_EVT_POST_ERR_SHIFT 20
-#define P_ATR_EVT_FETCH_ERR_MASK 0x00200000u
-#define P_ATR_EVT_FETCH_ERR_SHIFT 21
-#define P_ATR_EVT_DISCARD_ERR_MASK 0x00400000u
-#define P_ATR_EVT_DISCARD_ERR_SHIFT 22
-#define P_ATR_EVT_DOORBELL_MASK 0x00000000u
-#define P_ATR_EVT_DOORBELL_SHIFT 23
-#define PM_MSI_INT_INTA_MASK 0x01000000u
-#define PM_MSI_INT_INTA_SHIFT 24
-#define PM_MSI_INT_INTB_MASK 0x02000000u
-#define PM_MSI_INT_INTB_SHIFT 25
-#define PM_MSI_INT_INTC_MASK 0x04000000u
-#define PM_MSI_INT_INTC_SHIFT 26
-#define PM_MSI_INT_INTD_MASK 0x08000000u
-#define PM_MSI_INT_INTD_SHIFT 27
-#define PM_MSI_INT_INTX_MASK 0x0f000000u
-#define PM_MSI_INT_INTX_SHIFT 24
-#define PM_MSI_INT_MSI_MASK 0x10000000u
-#define PM_MSI_INT_MSI_SHIFT 28
-#define PM_MSI_INT_AER_EVT_MASK 0x20000000u
-#define PM_MSI_INT_AER_EVT_SHIFT 29
-#define PM_MSI_INT_EVENTS_MASK 0x40000000u
-#define PM_MSI_INT_EVENTS_SHIFT 30
-#define PM_MSI_INT_SYS_ERR_MASK 0x80000000u
-#define PM_MSI_INT_SYS_ERR_SHIFT 31
-#define NUM_LOCAL_EVENTS 15
-#define ISTATUS_LOCAL 0x184
-#define IMASK_HOST 0x188
-#define ISTATUS_HOST 0x18c
-#define MSI_ADDR 0x190
-#define ISTATUS_MSI 0x194
-
-/* PCIe Master table init defines */
-#define ATR0_PCIE_WIN0_SRCADDR_PARAM 0x600u
-#define ATR0_PCIE_ATR_SIZE 0x25
-#define ATR0_PCIE_ATR_SIZE_SHIFT 1
-#define ATR0_PCIE_WIN0_SRC_ADDR 0x604u
-#define ATR0_PCIE_WIN0_TRSL_ADDR_LSB 0x608u
-#define ATR0_PCIE_WIN0_TRSL_ADDR_UDW 0x60cu
-#define ATR0_PCIE_WIN0_TRSL_PARAM 0x610u
-
-/* PCIe AXI slave table init defines */
-#define ATR0_AXI4_SLV0_SRCADDR_PARAM 0x800u
-#define ATR_SIZE_SHIFT 1
-#define ATR_IMPL_ENABLE 1
-#define ATR0_AXI4_SLV0_SRC_ADDR 0x804u
-#define ATR0_AXI4_SLV0_TRSL_ADDR_LSB 0x808u
-#define ATR0_AXI4_SLV0_TRSL_ADDR_UDW 0x80cu
-#define ATR0_AXI4_SLV0_TRSL_PARAM 0x810u
-#define PCIE_TX_RX_INTERFACE 0x00000000u
-#define PCIE_CONFIG_INTERFACE 0x00000001u
-
-#define ATR_ENTRY_SIZE 32
-
-#define EVENT_PCIE_L2_EXIT 0
-#define EVENT_PCIE_HOTRST_EXIT 1
-#define EVENT_PCIE_DLUP_EXIT 2
-#define EVENT_SEC_TX_RAM_SEC_ERR 3
-#define EVENT_SEC_RX_RAM_SEC_ERR 4
-#define EVENT_SEC_AXI2PCIE_RAM_SEC_ERR 5
-#define EVENT_SEC_PCIE2AXI_RAM_SEC_ERR 6
-#define EVENT_DED_TX_RAM_DED_ERR 7
-#define EVENT_DED_RX_RAM_DED_ERR 8
-#define EVENT_DED_AXI2PCIE_RAM_DED_ERR 9
-#define EVENT_DED_PCIE2AXI_RAM_DED_ERR 10
-#define EVENT_LOCAL_DMA_END_ENGINE_0 11
-#define EVENT_LOCAL_DMA_END_ENGINE_1 12
-#define EVENT_LOCAL_DMA_ERROR_ENGINE_0 13
-#define EVENT_LOCAL_DMA_ERROR_ENGINE_1 14
-#define EVENT_LOCAL_A_ATR_EVT_POST_ERR 15
-#define EVENT_LOCAL_A_ATR_EVT_FETCH_ERR 16
-#define EVENT_LOCAL_A_ATR_EVT_DISCARD_ERR 17
-#define EVENT_LOCAL_A_ATR_EVT_DOORBELL 18
-#define EVENT_LOCAL_P_ATR_EVT_POST_ERR 19
-#define EVENT_LOCAL_P_ATR_EVT_FETCH_ERR 20
-#define EVENT_LOCAL_P_ATR_EVT_DISCARD_ERR 21
-#define EVENT_LOCAL_P_ATR_EVT_DOORBELL 22
-#define EVENT_LOCAL_PM_MSI_INT_INTX 23
-#define EVENT_LOCAL_PM_MSI_INT_MSI 24
-#define EVENT_LOCAL_PM_MSI_INT_AER_EVT 25
-#define EVENT_LOCAL_PM_MSI_INT_EVENTS 26
-#define EVENT_LOCAL_PM_MSI_INT_SYS_ERR 27
-#define NUM_EVENTS 28
-
-#define PCIE_EVENT_CAUSE(x, s) \
- [EVENT_PCIE_ ## x] = { __stringify(x), s }
-
-#define SEC_ERROR_CAUSE(x, s) \
- [EVENT_SEC_ ## x] = { __stringify(x), s }
-
-#define DED_ERROR_CAUSE(x, s) \
- [EVENT_DED_ ## x] = { __stringify(x), s }
-
-#define LOCAL_EVENT_CAUSE(x, s) \
- [EVENT_LOCAL_ ## x] = { __stringify(x), s }
-
-#define PCIE_EVENT(x) \
- .base = MC_PCIE_CTRL_ADDR, \
- .offset = PCIE_EVENT_INT, \
- .mask_offset = PCIE_EVENT_INT, \
- .mask_high = 1, \
- .mask = PCIE_EVENT_INT_ ## x ## _INT, \
- .enb_mask = PCIE_EVENT_INT_ENB_MASK
-
-#define SEC_EVENT(x) \
- .base = MC_PCIE_CTRL_ADDR, \
- .offset = SEC_ERROR_INT, \
- .mask_offset = SEC_ERROR_INT_MASK, \
- .mask = SEC_ERROR_INT_ ## x ## _INT, \
- .mask_high = 1, \
- .enb_mask = 0
-
-#define DED_EVENT(x) \
- .base = MC_PCIE_CTRL_ADDR, \
- .offset = DED_ERROR_INT, \
- .mask_offset = DED_ERROR_INT_MASK, \
- .mask_high = 1, \
- .mask = DED_ERROR_INT_ ## x ## _INT, \
- .enb_mask = 0
-
-#define LOCAL_EVENT(x) \
- .base = MC_PCIE_BRIDGE_ADDR, \
- .offset = ISTATUS_LOCAL, \
- .mask_offset = IMASK_LOCAL, \
- .mask_high = 0, \
- .mask = x ## _MASK, \
- .enb_mask = 0
-
-#define PCIE_EVENT_TO_EVENT_MAP(x) \
- { PCIE_EVENT_INT_ ## x ## _INT, EVENT_PCIE_ ## x }
-
-#define SEC_ERROR_TO_EVENT_MAP(x) \
- { SEC_ERROR_INT_ ## x ## _INT, EVENT_SEC_ ## x }
-
-#define DED_ERROR_TO_EVENT_MAP(x) \
- { DED_ERROR_INT_ ## x ## _INT, EVENT_DED_ ## x }
-
-#define LOCAL_STATUS_TO_EVENT_MAP(x) \
- { x ## _MASK, EVENT_LOCAL_ ## x }
-
-struct event_map {
- u32 reg_mask;
- u32 event_bit;
-};
-
-struct mc_msi {
- struct mutex lock; /* Protect used bitmap */
- struct irq_domain *msi_domain;
- struct irq_domain *dev_domain;
- u32 num_vectors;
- u64 vector_phy;
- DECLARE_BITMAP(used, MC_NUM_MSI_IRQS);
-};
-
-struct mc_pcie {
- void __iomem *axi_base_addr;
- struct device *dev;
- struct irq_domain *intx_domain;
- struct irq_domain *event_domain;
- raw_spinlock_t lock;
- struct mc_msi msi;
-};
-
-struct cause {
- const char *sym;
- const char *str;
-};
-
-static const struct cause event_cause[NUM_EVENTS] = {
- PCIE_EVENT_CAUSE(L2_EXIT, "L2 exit event"),
- PCIE_EVENT_CAUSE(HOTRST_EXIT, "Hot reset exit event"),
- PCIE_EVENT_CAUSE(DLUP_EXIT, "DLUP exit event"),
- SEC_ERROR_CAUSE(TX_RAM_SEC_ERR, "sec error in tx buffer"),
- SEC_ERROR_CAUSE(RX_RAM_SEC_ERR, "sec error in rx buffer"),
- SEC_ERROR_CAUSE(PCIE2AXI_RAM_SEC_ERR, "sec error in pcie2axi buffer"),
- SEC_ERROR_CAUSE(AXI2PCIE_RAM_SEC_ERR, "sec error in axi2pcie buffer"),
- DED_ERROR_CAUSE(TX_RAM_DED_ERR, "ded error in tx buffer"),
- DED_ERROR_CAUSE(RX_RAM_DED_ERR, "ded error in rx buffer"),
- DED_ERROR_CAUSE(PCIE2AXI_RAM_DED_ERR, "ded error in pcie2axi buffer"),
- DED_ERROR_CAUSE(AXI2PCIE_RAM_DED_ERR, "ded error in axi2pcie buffer"),
- LOCAL_EVENT_CAUSE(DMA_ERROR_ENGINE_0, "dma engine 0 error"),
- LOCAL_EVENT_CAUSE(DMA_ERROR_ENGINE_1, "dma engine 1 error"),
- LOCAL_EVENT_CAUSE(A_ATR_EVT_POST_ERR, "axi write request error"),
- LOCAL_EVENT_CAUSE(A_ATR_EVT_FETCH_ERR, "axi read request error"),
- LOCAL_EVENT_CAUSE(A_ATR_EVT_DISCARD_ERR, "axi read timeout"),
- LOCAL_EVENT_CAUSE(P_ATR_EVT_POST_ERR, "pcie write request error"),
- LOCAL_EVENT_CAUSE(P_ATR_EVT_FETCH_ERR, "pcie read request error"),
- LOCAL_EVENT_CAUSE(P_ATR_EVT_DISCARD_ERR, "pcie read timeout"),
- LOCAL_EVENT_CAUSE(PM_MSI_INT_AER_EVT, "aer event"),
- LOCAL_EVENT_CAUSE(PM_MSI_INT_EVENTS, "pm/ltr/hotplug event"),
- LOCAL_EVENT_CAUSE(PM_MSI_INT_SYS_ERR, "system error"),
-};
-
-static struct event_map pcie_event_to_event[] = {
- PCIE_EVENT_TO_EVENT_MAP(L2_EXIT),
- PCIE_EVENT_TO_EVENT_MAP(HOTRST_EXIT),
- PCIE_EVENT_TO_EVENT_MAP(DLUP_EXIT),
-};
-
-static struct event_map sec_error_to_event[] = {
- SEC_ERROR_TO_EVENT_MAP(TX_RAM_SEC_ERR),
- SEC_ERROR_TO_EVENT_MAP(RX_RAM_SEC_ERR),
- SEC_ERROR_TO_EVENT_MAP(PCIE2AXI_RAM_SEC_ERR),
- SEC_ERROR_TO_EVENT_MAP(AXI2PCIE_RAM_SEC_ERR),
-};
-
-static struct event_map ded_error_to_event[] = {
- DED_ERROR_TO_EVENT_MAP(TX_RAM_DED_ERR),
- DED_ERROR_TO_EVENT_MAP(RX_RAM_DED_ERR),
- DED_ERROR_TO_EVENT_MAP(PCIE2AXI_RAM_DED_ERR),
- DED_ERROR_TO_EVENT_MAP(AXI2PCIE_RAM_DED_ERR),
-};
-
-static struct event_map local_status_to_event[] = {
- LOCAL_STATUS_TO_EVENT_MAP(DMA_END_ENGINE_0),
- LOCAL_STATUS_TO_EVENT_MAP(DMA_END_ENGINE_1),
- LOCAL_STATUS_TO_EVENT_MAP(DMA_ERROR_ENGINE_0),
- LOCAL_STATUS_TO_EVENT_MAP(DMA_ERROR_ENGINE_1),
- LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_POST_ERR),
- LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_FETCH_ERR),
- LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_DISCARD_ERR),
- LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_DOORBELL),
- LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_POST_ERR),
- LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_FETCH_ERR),
- LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_DISCARD_ERR),
- LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_DOORBELL),
- LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_INTX),
- LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_MSI),
- LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_AER_EVT),
- LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_EVENTS),
- LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_SYS_ERR),
-};
-
-static struct {
- u32 base;
- u32 offset;
- u32 mask;
- u32 shift;
- u32 enb_mask;
- u32 mask_high;
- u32 mask_offset;
-} event_descs[] = {
- { PCIE_EVENT(L2_EXIT) },
- { PCIE_EVENT(HOTRST_EXIT) },
- { PCIE_EVENT(DLUP_EXIT) },
- { SEC_EVENT(TX_RAM_SEC_ERR) },
- { SEC_EVENT(RX_RAM_SEC_ERR) },
- { SEC_EVENT(PCIE2AXI_RAM_SEC_ERR) },
- { SEC_EVENT(AXI2PCIE_RAM_SEC_ERR) },
- { DED_EVENT(TX_RAM_DED_ERR) },
- { DED_EVENT(RX_RAM_DED_ERR) },
- { DED_EVENT(PCIE2AXI_RAM_DED_ERR) },
- { DED_EVENT(AXI2PCIE_RAM_DED_ERR) },
- { LOCAL_EVENT(DMA_END_ENGINE_0) },
- { LOCAL_EVENT(DMA_END_ENGINE_1) },
- { LOCAL_EVENT(DMA_ERROR_ENGINE_0) },
- { LOCAL_EVENT(DMA_ERROR_ENGINE_1) },
- { LOCAL_EVENT(A_ATR_EVT_POST_ERR) },
- { LOCAL_EVENT(A_ATR_EVT_FETCH_ERR) },
- { LOCAL_EVENT(A_ATR_EVT_DISCARD_ERR) },
- { LOCAL_EVENT(A_ATR_EVT_DOORBELL) },
- { LOCAL_EVENT(P_ATR_EVT_POST_ERR) },
- { LOCAL_EVENT(P_ATR_EVT_FETCH_ERR) },
- { LOCAL_EVENT(P_ATR_EVT_DISCARD_ERR) },
- { LOCAL_EVENT(P_ATR_EVT_DOORBELL) },
- { LOCAL_EVENT(PM_MSI_INT_INTX) },
- { LOCAL_EVENT(PM_MSI_INT_MSI) },
- { LOCAL_EVENT(PM_MSI_INT_AER_EVT) },
- { LOCAL_EVENT(PM_MSI_INT_EVENTS) },
- { LOCAL_EVENT(PM_MSI_INT_SYS_ERR) },
-};
-
-static char poss_clks[][5] = { "fic0", "fic1", "fic2", "fic3" };
-
-static void mc_pcie_enable_msi(struct mc_pcie *port, void __iomem *base)
-{
- struct mc_msi *msi = &port->msi;
- u32 cap_offset = MC_MSI_CAP_CTRL_OFFSET;
- u16 msg_ctrl = readw_relaxed(base + cap_offset + PCI_MSI_FLAGS);
-
- msg_ctrl |= PCI_MSI_FLAGS_ENABLE;
- msg_ctrl &= ~PCI_MSI_FLAGS_QMASK;
- msg_ctrl |= MC_MSI_MAX_Q_AVAIL;
- msg_ctrl &= ~PCI_MSI_FLAGS_QSIZE;
- msg_ctrl |= MC_MSI_Q_SIZE;
- msg_ctrl |= PCI_MSI_FLAGS_64BIT;
-
- writew_relaxed(msg_ctrl, base + cap_offset + PCI_MSI_FLAGS);
-
- writel_relaxed(lower_32_bits(msi->vector_phy),
- base + cap_offset + PCI_MSI_ADDRESS_LO);
- writel_relaxed(upper_32_bits(msi->vector_phy),
- base + cap_offset + PCI_MSI_ADDRESS_HI);
-}
-
-static void mc_handle_msi(struct irq_desc *desc)
-{
- struct mc_pcie *port = irq_desc_get_handler_data(desc);
- struct irq_chip *chip = irq_desc_get_chip(desc);
- struct device *dev = port->dev;
- struct mc_msi *msi = &port->msi;
- void __iomem *bridge_base_addr =
- port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
- unsigned long status;
- u32 bit;
- int ret;
-
- chained_irq_enter(chip, desc);
-
- status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
- if (status & PM_MSI_INT_MSI_MASK) {
- writel_relaxed(status & PM_MSI_INT_MSI_MASK, bridge_base_addr + ISTATUS_LOCAL);
- status = readl_relaxed(bridge_base_addr + ISTATUS_MSI);
- for_each_set_bit(bit, &status, msi->num_vectors) {
- ret = generic_handle_domain_irq(msi->dev_domain, bit);
- if (ret)
- dev_err_ratelimited(dev, "bad MSI IRQ %d\n",
- bit);
- }
- }
-
- chained_irq_exit(chip, desc);
-}
-
-static void mc_msi_bottom_irq_ack(struct irq_data *data)
-{
- struct mc_pcie *port = irq_data_get_irq_chip_data(data);
- void __iomem *bridge_base_addr =
- port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
- u32 bitpos = data->hwirq;
-
- writel_relaxed(BIT(bitpos), bridge_base_addr + ISTATUS_MSI);
-}
-
-static void mc_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
-{
- struct mc_pcie *port = irq_data_get_irq_chip_data(data);
- phys_addr_t addr = port->msi.vector_phy;
-
- msg->address_lo = lower_32_bits(addr);
- msg->address_hi = upper_32_bits(addr);
- msg->data = data->hwirq;
-
- dev_dbg(port->dev, "msi#%x address_hi %#x address_lo %#x\n",
- (int)data->hwirq, msg->address_hi, msg->address_lo);
-}
-
-static int mc_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
-static struct irq_chip mc_msi_bottom_irq_chip = {
- .name = "Microchip MSI",
- .irq_ack = mc_msi_bottom_irq_ack,
- .irq_compose_msi_msg = mc_compose_msi_msg,
- .irq_set_affinity = mc_msi_set_affinity,
-};
-
-static int mc_irq_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
- unsigned int nr_irqs, void *args)
-{
- struct mc_pcie *port = domain->host_data;
- struct mc_msi *msi = &port->msi;
- void __iomem *bridge_base_addr =
- port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
- unsigned long bit;
- u32 val;
-
- mutex_lock(&msi->lock);
- bit = find_first_zero_bit(msi->used, msi->num_vectors);
- if (bit >= msi->num_vectors) {
- mutex_unlock(&msi->lock);
- return -ENOSPC;
- }
-
- set_bit(bit, msi->used);
-
- irq_domain_set_info(domain, virq, bit, &mc_msi_bottom_irq_chip,
- domain->host_data, handle_edge_irq, NULL, NULL);
-
- /* Enable MSI interrupts */
- val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
- val |= PM_MSI_INT_MSI_MASK;
- writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
-
- mutex_unlock(&msi->lock);
-
- return 0;
-}
-
-static void mc_irq_msi_domain_free(struct irq_domain *domain, unsigned int virq,
- unsigned int nr_irqs)
-{
- struct irq_data *d = irq_domain_get_irq_data(domain, virq);
- struct mc_pcie *port = irq_data_get_irq_chip_data(d);
- struct mc_msi *msi = &port->msi;
-
- mutex_lock(&msi->lock);
-
- if (test_bit(d->hwirq, msi->used))
- __clear_bit(d->hwirq, msi->used);
- else
- dev_err(port->dev, "trying to free unused MSI%lu\n", d->hwirq);
-
- mutex_unlock(&msi->lock);
-}
-
-static const struct irq_domain_ops msi_domain_ops = {
- .alloc = mc_irq_msi_domain_alloc,
- .free = mc_irq_msi_domain_free,
-};
-
-static struct irq_chip mc_msi_irq_chip = {
- .name = "Microchip PCIe MSI",
- .irq_ack = irq_chip_ack_parent,
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
-};
-
-static struct msi_domain_info mc_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX),
- .chip = &mc_msi_irq_chip,
-};
-
-static int mc_allocate_msi_domains(struct mc_pcie *port)
-{
- struct device *dev = port->dev;
- struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
- struct mc_msi *msi = &port->msi;
-
- mutex_init(&port->msi.lock);
-
- msi->dev_domain = irq_domain_add_linear(NULL, msi->num_vectors,
- &msi_domain_ops, port);
- if (!msi->dev_domain) {
- dev_err(dev, "failed to create IRQ domain\n");
- return -ENOMEM;
- }
-
- msi->msi_domain = pci_msi_create_irq_domain(fwnode, &mc_msi_domain_info,
- msi->dev_domain);
- if (!msi->msi_domain) {
- dev_err(dev, "failed to create MSI domain\n");
- irq_domain_remove(msi->dev_domain);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static void mc_handle_intx(struct irq_desc *desc)
-{
- struct mc_pcie *port = irq_desc_get_handler_data(desc);
- struct irq_chip *chip = irq_desc_get_chip(desc);
- struct device *dev = port->dev;
- void __iomem *bridge_base_addr =
- port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
- unsigned long status;
- u32 bit;
- int ret;
-
- chained_irq_enter(chip, desc);
-
- status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
- if (status & PM_MSI_INT_INTX_MASK) {
- status &= PM_MSI_INT_INTX_MASK;
- status >>= PM_MSI_INT_INTX_SHIFT;
- for_each_set_bit(bit, &status, PCI_NUM_INTX) {
- ret = generic_handle_domain_irq(port->intx_domain, bit);
- if (ret)
- dev_err_ratelimited(dev, "bad INTx IRQ %d\n",
- bit);
- }
- }
-
- chained_irq_exit(chip, desc);
-}
-
-static void mc_ack_intx_irq(struct irq_data *data)
-{
- struct mc_pcie *port = irq_data_get_irq_chip_data(data);
- void __iomem *bridge_base_addr =
- port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
- u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
-
- writel_relaxed(mask, bridge_base_addr + ISTATUS_LOCAL);
-}
-
-static void mc_mask_intx_irq(struct irq_data *data)
-{
- struct mc_pcie *port = irq_data_get_irq_chip_data(data);
- void __iomem *bridge_base_addr =
- port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
- unsigned long flags;
- u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
- u32 val;
-
- raw_spin_lock_irqsave(&port->lock, flags);
- val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
- val &= ~mask;
- writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
- raw_spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static void mc_unmask_intx_irq(struct irq_data *data)
-{
- struct mc_pcie *port = irq_data_get_irq_chip_data(data);
- void __iomem *bridge_base_addr =
- port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
- unsigned long flags;
- u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
- u32 val;
-
- raw_spin_lock_irqsave(&port->lock, flags);
- val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
- val |= mask;
- writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
- raw_spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static struct irq_chip mc_intx_irq_chip = {
- .name = "Microchip PCIe INTx",
- .irq_ack = mc_ack_intx_irq,
- .irq_mask = mc_mask_intx_irq,
- .irq_unmask = mc_unmask_intx_irq,
-};
-
-static int mc_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
- irq_hw_number_t hwirq)
-{
- irq_set_chip_and_handler(irq, &mc_intx_irq_chip, handle_level_irq);
- irq_set_chip_data(irq, domain->host_data);
-
- return 0;
-}
-
-static const struct irq_domain_ops intx_domain_ops = {
- .map = mc_pcie_intx_map,
-};
-
-static inline u32 reg_to_event(u32 reg, struct event_map field)
-{
- return (reg & field.reg_mask) ? BIT(field.event_bit) : 0;
-}
-
-static u32 pcie_events(void __iomem *addr)
-{
- u32 reg = readl_relaxed(addr);
- u32 val = 0;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(pcie_event_to_event); i++)
- val |= reg_to_event(reg, pcie_event_to_event[i]);
-
- return val;
-}
-
-static u32 sec_errors(void __iomem *addr)
-{
- u32 reg = readl_relaxed(addr);
- u32 val = 0;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(sec_error_to_event); i++)
- val |= reg_to_event(reg, sec_error_to_event[i]);
-
- return val;
-}
-
-static u32 ded_errors(void __iomem *addr)
-{
- u32 reg = readl_relaxed(addr);
- u32 val = 0;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(ded_error_to_event); i++)
- val |= reg_to_event(reg, ded_error_to_event[i]);
-
- return val;
-}
-
-static u32 local_events(void __iomem *addr)
-{
- u32 reg = readl_relaxed(addr);
- u32 val = 0;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(local_status_to_event); i++)
- val |= reg_to_event(reg, local_status_to_event[i]);
-
- return val;
-}
-
-static u32 get_events(struct mc_pcie *port)
-{
- void __iomem *bridge_base_addr =
- port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
- void __iomem *ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR;
- u32 events = 0;
-
- events |= pcie_events(ctrl_base_addr + PCIE_EVENT_INT);
- events |= sec_errors(ctrl_base_addr + SEC_ERROR_INT);
- events |= ded_errors(ctrl_base_addr + DED_ERROR_INT);
- events |= local_events(bridge_base_addr + ISTATUS_LOCAL);
-
- return events;
-}
-
-static irqreturn_t mc_event_handler(int irq, void *dev_id)
-{
- struct mc_pcie *port = dev_id;
- struct device *dev = port->dev;
- struct irq_data *data;
-
- data = irq_domain_get_irq_data(port->event_domain, irq);
-
- if (event_cause[data->hwirq].str)
- dev_err_ratelimited(dev, "%s\n", event_cause[data->hwirq].str);
- else
- dev_err_ratelimited(dev, "bad event IRQ %ld\n", data->hwirq);
-
- return IRQ_HANDLED;
-}
-
-static void mc_handle_event(struct irq_desc *desc)
-{
- struct mc_pcie *port = irq_desc_get_handler_data(desc);
- unsigned long events;
- u32 bit;
- struct irq_chip *chip = irq_desc_get_chip(desc);
-
- chained_irq_enter(chip, desc);
-
- events = get_events(port);
-
- for_each_set_bit(bit, &events, NUM_EVENTS)
- generic_handle_domain_irq(port->event_domain, bit);
-
- chained_irq_exit(chip, desc);
-}
-
-static void mc_ack_event_irq(struct irq_data *data)
-{
- struct mc_pcie *port = irq_data_get_irq_chip_data(data);
- u32 event = data->hwirq;
- void __iomem *addr;
- u32 mask;
-
- addr = port->axi_base_addr + event_descs[event].base +
- event_descs[event].offset;
- mask = event_descs[event].mask;
- mask |= event_descs[event].enb_mask;
-
- writel_relaxed(mask, addr);
-}
-
-static void mc_mask_event_irq(struct irq_data *data)
-{
- struct mc_pcie *port = irq_data_get_irq_chip_data(data);
- u32 event = data->hwirq;
- void __iomem *addr;
- u32 mask;
- u32 val;
-
- addr = port->axi_base_addr + event_descs[event].base +
- event_descs[event].mask_offset;
- mask = event_descs[event].mask;
- if (event_descs[event].enb_mask) {
- mask <<= PCIE_EVENT_INT_ENB_SHIFT;
- mask &= PCIE_EVENT_INT_ENB_MASK;
- }
-
- if (!event_descs[event].mask_high)
- mask = ~mask;
-
- raw_spin_lock(&port->lock);
- val = readl_relaxed(addr);
- if (event_descs[event].mask_high)
- val |= mask;
- else
- val &= mask;
-
- writel_relaxed(val, addr);
- raw_spin_unlock(&port->lock);
-}
-
-static void mc_unmask_event_irq(struct irq_data *data)
-{
- struct mc_pcie *port = irq_data_get_irq_chip_data(data);
- u32 event = data->hwirq;
- void __iomem *addr;
- u32 mask;
- u32 val;
-
- addr = port->axi_base_addr + event_descs[event].base +
- event_descs[event].mask_offset;
- mask = event_descs[event].mask;
-
- if (event_descs[event].enb_mask)
- mask <<= PCIE_EVENT_INT_ENB_SHIFT;
-
- if (event_descs[event].mask_high)
- mask = ~mask;
-
- if (event_descs[event].enb_mask)
- mask &= PCIE_EVENT_INT_ENB_MASK;
-
- raw_spin_lock(&port->lock);
- val = readl_relaxed(addr);
- if (event_descs[event].mask_high)
- val &= mask;
- else
- val |= mask;
- writel_relaxed(val, addr);
- raw_spin_unlock(&port->lock);
-}
-
-static struct irq_chip mc_event_irq_chip = {
- .name = "Microchip PCIe EVENT",
- .irq_ack = mc_ack_event_irq,
- .irq_mask = mc_mask_event_irq,
- .irq_unmask = mc_unmask_event_irq,
-};
-
-static int mc_pcie_event_map(struct irq_domain *domain, unsigned int irq,
- irq_hw_number_t hwirq)
-{
- irq_set_chip_and_handler(irq, &mc_event_irq_chip, handle_level_irq);
- irq_set_chip_data(irq, domain->host_data);
-
- return 0;
-}
-
-static const struct irq_domain_ops event_domain_ops = {
- .map = mc_pcie_event_map,
-};
-
-static inline struct clk *mc_pcie_init_clk(struct device *dev, const char *id)
-{
- struct clk *clk;
- int ret;
-
- clk = devm_clk_get_optional(dev, id);
- if (IS_ERR(clk))
- return clk;
- if (!clk)
- return clk;
-
- ret = clk_prepare_enable(clk);
- if (ret)
- return ERR_PTR(ret);
-
- devm_add_action_or_reset(dev, (void (*) (void *))clk_disable_unprepare,
- clk);
-
- return clk;
-}
-
-static int mc_pcie_init_clks(struct device *dev)
-{
- int i;
- struct clk *fic;
-
- /*
- * PCIe may be clocked via Fabric Interface using between 1 and 4
- * clocks. Scan DT for clocks and enable them if present
- */
- for (i = 0; i < ARRAY_SIZE(poss_clks); i++) {
- fic = mc_pcie_init_clk(dev, poss_clks[i]);
- if (IS_ERR(fic))
- return PTR_ERR(fic);
- }
-
- return 0;
-}
-
-static int mc_pcie_init_irq_domains(struct mc_pcie *port)
-{
- struct device *dev = port->dev;
- struct device_node *node = dev->of_node;
- struct device_node *pcie_intc_node;
-
- /* Setup INTx */
- pcie_intc_node = of_get_next_child(node, NULL);
- if (!pcie_intc_node) {
- dev_err(dev, "failed to find PCIe Intc node\n");
- return -EINVAL;
- }
-
- port->event_domain = irq_domain_add_linear(pcie_intc_node, NUM_EVENTS,
- &event_domain_ops, port);
- if (!port->event_domain) {
- dev_err(dev, "failed to get event domain\n");
- of_node_put(pcie_intc_node);
- return -ENOMEM;
- }
-
- irq_domain_update_bus_token(port->event_domain, DOMAIN_BUS_NEXUS);
-
- port->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
- &intx_domain_ops, port);
- if (!port->intx_domain) {
- dev_err(dev, "failed to get an INTx IRQ domain\n");
- of_node_put(pcie_intc_node);
- return -ENOMEM;
- }
-
- irq_domain_update_bus_token(port->intx_domain, DOMAIN_BUS_WIRED);
-
- of_node_put(pcie_intc_node);
- raw_spin_lock_init(&port->lock);
-
- return mc_allocate_msi_domains(port);
-}
-
-static void mc_pcie_setup_window(void __iomem *bridge_base_addr, u32 index,
- phys_addr_t axi_addr, phys_addr_t pci_addr,
- size_t size)
-{
- u32 atr_sz = ilog2(size) - 1;
- u32 val;
-
- if (index == 0)
- val = PCIE_CONFIG_INTERFACE;
- else
- val = PCIE_TX_RX_INTERFACE;
-
- writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
- ATR0_AXI4_SLV0_TRSL_PARAM);
-
- val = lower_32_bits(axi_addr) | (atr_sz << ATR_SIZE_SHIFT) |
- ATR_IMPL_ENABLE;
- writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
- ATR0_AXI4_SLV0_SRCADDR_PARAM);
-
- val = upper_32_bits(axi_addr);
- writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
- ATR0_AXI4_SLV0_SRC_ADDR);
-
- val = lower_32_bits(pci_addr);
- writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
- ATR0_AXI4_SLV0_TRSL_ADDR_LSB);
-
- val = upper_32_bits(pci_addr);
- writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
- ATR0_AXI4_SLV0_TRSL_ADDR_UDW);
-
- val = readl(bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
- val |= (ATR0_PCIE_ATR_SIZE << ATR0_PCIE_ATR_SIZE_SHIFT);
- writel(val, bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
- writel(0, bridge_base_addr + ATR0_PCIE_WIN0_SRC_ADDR);
-}
-
-static int mc_pcie_setup_windows(struct platform_device *pdev,
- struct mc_pcie *port)
-{
- void __iomem *bridge_base_addr =
- port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
- struct pci_host_bridge *bridge = platform_get_drvdata(pdev);
- struct resource_entry *entry;
- u64 pci_addr;
- u32 index = 1;
-
- resource_list_for_each_entry(entry, &bridge->windows) {
- if (resource_type(entry->res) == IORESOURCE_MEM) {
- pci_addr = entry->res->start - entry->offset;
- mc_pcie_setup_window(bridge_base_addr, index,
- entry->res->start, pci_addr,
- resource_size(entry->res));
- index++;
- }
- }
-
- return 0;
-}
-
-static int mc_platform_init(struct pci_config_window *cfg)
-{
- struct device *dev = cfg->parent;
- struct platform_device *pdev = to_platform_device(dev);
- struct mc_pcie *port;
- void __iomem *bridge_base_addr;
- void __iomem *ctrl_base_addr;
- int ret;
- int irq;
- int i, intx_irq, msi_irq, event_irq;
- u32 val;
- int err;
-
- port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
- if (!port)
- return -ENOMEM;
- port->dev = dev;
-
- ret = mc_pcie_init_clks(dev);
- if (ret) {
- dev_err(dev, "failed to get clock resources, error %d\n", ret);
- return -ENODEV;
- }
-
- port->axi_base_addr = devm_platform_ioremap_resource(pdev, 1);
- if (IS_ERR(port->axi_base_addr))
- return PTR_ERR(port->axi_base_addr);
-
- bridge_base_addr = port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
- ctrl_base_addr = port->axi_base_addr + MC_PCIE_CTRL_ADDR;
-
- port->msi.vector_phy = MSI_ADDR;
- port->msi.num_vectors = MC_NUM_MSI_IRQS;
- ret = mc_pcie_init_irq_domains(port);
- if (ret) {
- dev_err(dev, "failed creating IRQ domains\n");
- return ret;
- }
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return -ENODEV;
-
- for (i = 0; i < NUM_EVENTS; i++) {
- event_irq = irq_create_mapping(port->event_domain, i);
- if (!event_irq) {
- dev_err(dev, "failed to map hwirq %d\n", i);
- return -ENXIO;
- }
-
- err = devm_request_irq(dev, event_irq, mc_event_handler,
- 0, event_cause[i].sym, port);
- if (err) {
- dev_err(dev, "failed to request IRQ %d\n", event_irq);
- return err;
- }
- }
-
- intx_irq = irq_create_mapping(port->event_domain,
- EVENT_LOCAL_PM_MSI_INT_INTX);
- if (!intx_irq) {
- dev_err(dev, "failed to map INTx interrupt\n");
- return -ENXIO;
- }
-
- /* Plug the INTx chained handler */
- irq_set_chained_handler_and_data(intx_irq, mc_handle_intx, port);
-
- msi_irq = irq_create_mapping(port->event_domain,
- EVENT_LOCAL_PM_MSI_INT_MSI);
- if (!msi_irq)
- return -ENXIO;
-
- /* Plug the MSI chained handler */
- irq_set_chained_handler_and_data(msi_irq, mc_handle_msi, port);
-
- /* Plug the main event chained handler */
- irq_set_chained_handler_and_data(irq, mc_handle_event, port);
-
- /* Hardware doesn't setup MSI by default */
- mc_pcie_enable_msi(port, cfg->win);
-
- val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
- val |= PM_MSI_INT_INTX_MASK;
- writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
-
- writel_relaxed(val, ctrl_base_addr + ECC_CONTROL);
-
- val = PCIE_EVENT_INT_L2_EXIT_INT |
- PCIE_EVENT_INT_HOTRST_EXIT_INT |
- PCIE_EVENT_INT_DLUP_EXIT_INT;
- writel_relaxed(val, ctrl_base_addr + PCIE_EVENT_INT);
-
- val = SEC_ERROR_INT_TX_RAM_SEC_ERR_INT |
- SEC_ERROR_INT_RX_RAM_SEC_ERR_INT |
- SEC_ERROR_INT_PCIE2AXI_RAM_SEC_ERR_INT |
- SEC_ERROR_INT_AXI2PCIE_RAM_SEC_ERR_INT;
- writel_relaxed(val, ctrl_base_addr + SEC_ERROR_INT);
- writel_relaxed(0, ctrl_base_addr + SEC_ERROR_INT_MASK);
- writel_relaxed(0, ctrl_base_addr + SEC_ERROR_CNT);
-
- val = DED_ERROR_INT_TX_RAM_DED_ERR_INT |
- DED_ERROR_INT_RX_RAM_DED_ERR_INT |
- DED_ERROR_INT_PCIE2AXI_RAM_DED_ERR_INT |
- DED_ERROR_INT_AXI2PCIE_RAM_DED_ERR_INT;
- writel_relaxed(val, ctrl_base_addr + DED_ERROR_INT);
- writel_relaxed(0, ctrl_base_addr + DED_ERROR_INT_MASK);
- writel_relaxed(0, ctrl_base_addr + DED_ERROR_CNT);
-
- writel_relaxed(0, bridge_base_addr + IMASK_HOST);
- writel_relaxed(GENMASK(31, 0), bridge_base_addr + ISTATUS_HOST);
-
- /* Configure Address Translation Table 0 for PCIe config space */
- mc_pcie_setup_window(bridge_base_addr, 0, cfg->res.start & 0xffffffff,
- cfg->res.start, resource_size(&cfg->res));
-
- return mc_pcie_setup_windows(pdev, port);
-}
-
-static const struct pci_ecam_ops mc_ecam_ops = {
- .init = mc_platform_init,
- .pci_ops = {
- .map_bus = pci_ecam_map_bus,
- .read = pci_generic_config_read,
- .write = pci_generic_config_write,
- }
-};
-
-static const struct of_device_id mc_pcie_of_match[] = {
- {
- .compatible = "microchip,pcie-host-1.0",
- .data = &mc_ecam_ops,
- },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, mc_pcie_of_match);
-
-static struct platform_driver mc_pcie_driver = {
- .probe = pci_host_common_probe,
- .driver = {
- .name = "microchip-pcie",
- .of_match_table = mc_pcie_of_match,
- .suppress_bind_attrs = true,
- },
-};
-
-builtin_platform_driver(mc_pcie_driver);
-MODULE_DESCRIPTION("Microchip PCIe host controller driver");
-MODULE_AUTHOR("Daire McNamara <daire.mcnamara@microchip.com>");
diff --git a/drivers/pci/controller/pcie-mt7621.c b/drivers/pci/controller/pcie-mt7621.c
index 79e225edb42a..01ead2f92e87 100644
--- a/drivers/pci/controller/pcie-mt7621.c
+++ b/drivers/pci/controller/pcie-mt7621.c
@@ -202,7 +202,7 @@ static int mt7621_pcie_parse_port(struct mt7621_pcie *pcie,
struct mt7621_pcie_port *port;
struct device *dev = pcie->dev;
struct platform_device *pdev = to_platform_device(dev);
- char name[10];
+ char name[11];
int err;
port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
@@ -258,30 +258,25 @@ static int mt7621_pcie_parse_dt(struct mt7621_pcie *pcie)
{
struct device *dev = pcie->dev;
struct platform_device *pdev = to_platform_device(dev);
- struct device_node *node = dev->of_node, *child;
+ struct device_node *node = dev->of_node;
int err;
pcie->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pcie->base))
return PTR_ERR(pcie->base);
- for_each_available_child_of_node(node, child) {
+ for_each_available_child_of_node_scoped(node, child) {
int slot;
err = of_pci_get_devfn(child);
- if (err < 0) {
- of_node_put(child);
- dev_err(dev, "failed to parse devfn: %d\n", err);
- return err;
- }
+ if (err < 0)
+ return dev_err_probe(dev, err, "failed to parse devfn\n");
slot = PCI_SLOT(err);
err = mt7621_pcie_parse_port(pcie, child, slot);
- if (err) {
- of_node_put(child);
+ if (err)
return err;
- }
}
return 0;
@@ -541,7 +536,7 @@ MODULE_DEVICE_TABLE(of, mt7621_pcie_ids);
static struct platform_driver mt7621_pcie_driver = {
.probe = mt7621_pcie_probe,
- .remove_new = mt7621_pcie_remove,
+ .remove = mt7621_pcie_remove,
.driver = {
.name = "mt7621-pci",
.of_match_table = mt7621_pcie_ids,
@@ -549,4 +544,5 @@ static struct platform_driver mt7621_pcie_driver = {
};
builtin_platform_driver(mt7621_pcie_driver);
+MODULE_DESCRIPTION("MediaTek MT7621 PCIe host controller driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-rcar-ep.c b/drivers/pci/controller/pcie-rcar-ep.c
index f9682df1da61..657875ef4657 100644
--- a/drivers/pci/controller/pcie-rcar-ep.c
+++ b/drivers/pci/controller/pcie-rcar-ep.c
@@ -43,7 +43,7 @@ static void rcar_pcie_ep_hw_init(struct rcar_pcie *pcie)
rcar_rmw32(pcie, REXPCAP(0), 0xff, PCI_CAP_ID_EXP);
rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS),
PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ENDPOINT << 4);
- rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), 0x7f,
+ rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), PCI_HEADER_TYPE_MASK,
PCI_HEADER_TYPE_NORMAL);
/* Write out the physical slot number = 0 */
@@ -107,7 +107,7 @@ static int rcar_pcie_parse_outbound_ranges(struct rcar_pcie_endpoint *ep,
}
if (!devm_request_mem_region(&pdev->dev, res->start,
resource_size(res),
- outbound_name)) {
+ res->name)) {
dev_err(pcie->dev, "Cannot request memory region %s.\n",
outbound_name);
return -EIO;
@@ -256,15 +256,15 @@ static void rcar_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, u8 vfn,
clear_bit(atu_index + 1, ep->ib_window_map);
}
-static int rcar_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn,
- u8 interrupts)
+static int rcar_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn, u8 nr_irqs)
{
struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
struct rcar_pcie *pcie = &ep->pcie;
+ u8 mmc = order_base_2(nr_irqs);
u32 flags;
flags = rcar_pci_read_reg(pcie, MSICAP(fn));
- flags |= interrupts << MSICAP0_MMESCAP_OFFSET;
+ flags |= mmc << MSICAP0_MMESCAP_OFFSET;
rcar_pci_write_reg(pcie, flags, MSICAP(fn));
return 0;
@@ -280,7 +280,7 @@ static int rcar_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
if (!(flags & MSICAP0_MSIE))
return -EINVAL;
- return ((flags & MSICAP0_MMESE_MASK) >> MSICAP0_MMESE_OFFSET);
+ return 1 << ((flags & MSICAP0_MMESE_MASK) >> MSICAP0_MMESE_OFFSET);
}
static int rcar_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn,
@@ -402,16 +402,15 @@ static int rcar_pcie_ep_assert_msi(struct rcar_pcie *pcie,
}
static int rcar_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn,
- enum pci_epc_irq_type type,
- u16 interrupt_num)
+ unsigned int type, u16 interrupt_num)
{
struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc);
switch (type) {
- case PCI_EPC_IRQ_LEGACY:
+ case PCI_IRQ_INTX:
return rcar_pcie_ep_assert_intx(ep, fn, 0);
- case PCI_EPC_IRQ_MSI:
+ case PCI_IRQ_MSI:
return rcar_pcie_ep_assert_msi(&ep->pcie, fn, interrupt_num);
default:
@@ -437,15 +436,17 @@ static void rcar_pcie_ep_stop(struct pci_epc *epc)
}
static const struct pci_epc_features rcar_pcie_epc_features = {
- .linkup_notifier = false,
.msi_capable = true,
- .msix_capable = false,
/* use 64-bit BARs so mark BAR[1,3,5] as reserved */
- .reserved_bar = 1 << BAR_1 | 1 << BAR_3 | 1 << BAR_5,
- .bar_fixed_64bit = 1 << BAR_0 | 1 << BAR_2 | 1 << BAR_4,
- .bar_fixed_size[0] = 128,
- .bar_fixed_size[2] = 256,
- .bar_fixed_size[4] = 256,
+ .bar[BAR_0] = { .type = BAR_FIXED, .fixed_size = 128,
+ .only_64bit = true, },
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_2] = { .type = BAR_FIXED, .fixed_size = 256,
+ .only_64bit = true, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
+ .bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = 256,
+ .only_64bit = true, },
+ .bar[BAR_5] = { .type = BAR_RESERVED, },
};
static const struct pci_epc_features*
@@ -539,6 +540,8 @@ static int rcar_pcie_ep_probe(struct platform_device *pdev)
goto err_pm_put;
}
+ pci_epc_init_notify(epc);
+
return 0;
err_pm_put:
diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c
index 88975e40ee2f..213028052aa5 100644
--- a/drivers/pci/controller/pcie-rcar-host.c
+++ b/drivers/pci/controller/pcie-rcar-host.c
@@ -12,11 +12,13 @@
*/
#include <linux/bitops.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -29,6 +31,7 @@
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
#include "pcie-rcar.h"
@@ -36,7 +39,7 @@ struct rcar_msi {
DECLARE_BITMAP(used, INT_PCI_MSI_NR);
struct irq_domain *domain;
struct mutex map_lock;
- spinlock_t mask_lock;
+ raw_spinlock_t mask_lock;
int irq1;
int irq2;
};
@@ -50,20 +53,13 @@ struct rcar_pcie_host {
int (*phy_init_fn)(struct rcar_pcie_host *host);
};
-static DEFINE_SPINLOCK(pmsr_lock);
-
static int rcar_pcie_wakeup(struct device *pcie_dev, void __iomem *pcie_base)
{
- unsigned long flags;
u32 pmsr, val;
int ret = 0;
- spin_lock_irqsave(&pmsr_lock, flags);
-
- if (!pcie_base || pm_runtime_suspended(pcie_dev)) {
- ret = -EINVAL;
- goto unlock_exit;
- }
+ if (!pcie_base || pm_runtime_suspended(pcie_dev))
+ return -EINVAL;
pmsr = readl(pcie_base + PMSR);
@@ -77,12 +73,14 @@ static int rcar_pcie_wakeup(struct device *pcie_dev, void __iomem *pcie_base)
writel(L1IATN, pcie_base + PMCTLR);
ret = readl_poll_timeout_atomic(pcie_base + PMSR, val,
val & L1FAEG, 10, 1000);
- WARN(ret, "Timeout waiting for L1 link state, ret=%d\n", ret);
+ if (ret) {
+ dev_warn_ratelimited(pcie_dev,
+ "Timeout waiting for L1 link state, ret=%d\n",
+ ret);
+ }
writel(L1FAEG | PMEL1RX, pcie_base + PMSR);
}
-unlock_exit:
- spin_unlock_irqrestore(&pmsr_lock, flags);
return ret;
}
@@ -173,8 +171,8 @@ static int rcar_pcie_config_access(struct rcar_pcie_host *host,
* space, it's generally only accessible when in endpoint mode.
* When in root complex mode, the controller is unable to target
* itself with either type 0 or type 1 accesses, and indeed, any
- * controller initiated target transfer to its own config space
- * result in a completer abort.
+ * controller-initiated target transfer to its own config space
+ * results in a completer abort.
*
* Each channel effectively only supports a single device, but as
* the same channel <-> device access works for any PCI_SLOT()
@@ -460,7 +458,7 @@ static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
rcar_rmw32(pcie, REXPCAP(0), 0xff, PCI_CAP_ID_EXP);
rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS),
PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ROOT_PORT << 4);
- rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), 0x7f,
+ rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), PCI_HEADER_TYPE_MASK,
PCI_HEADER_TYPE_BRIDGE);
/* Enable data link layer active state reporting */
@@ -578,7 +576,7 @@ static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
unsigned int index = find_first_bit(&reg, 32);
int ret;
- ret = generic_handle_domain_irq(msi->domain->parent, index);
+ ret = generic_handle_domain_irq(msi->domain, index);
if (ret) {
/* Unknown MSI, just clear it */
dev_dbg(dev, "unexpected MSI\n");
@@ -592,30 +590,6 @@ static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static void rcar_msi_top_irq_ack(struct irq_data *d)
-{
- irq_chip_ack_parent(d);
-}
-
-static void rcar_msi_top_irq_mask(struct irq_data *d)
-{
- pci_msi_mask_irq(d);
- irq_chip_mask_parent(d);
-}
-
-static void rcar_msi_top_irq_unmask(struct irq_data *d)
-{
- pci_msi_unmask_irq(d);
- irq_chip_unmask_parent(d);
-}
-
-static struct irq_chip rcar_msi_top_chip = {
- .name = "PCIe MSI",
- .irq_ack = rcar_msi_top_irq_ack,
- .irq_mask = rcar_msi_top_irq_mask,
- .irq_unmask = rcar_msi_top_irq_unmask,
-};
-
static void rcar_msi_irq_ack(struct irq_data *d)
{
struct rcar_msi *msi = irq_data_get_irq_chip_data(d);
@@ -629,33 +603,26 @@ static void rcar_msi_irq_mask(struct irq_data *d)
{
struct rcar_msi *msi = irq_data_get_irq_chip_data(d);
struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
- unsigned long flags;
u32 value;
- spin_lock_irqsave(&msi->mask_lock, flags);
- value = rcar_pci_read_reg(pcie, PCIEMSIIER);
- value &= ~BIT(d->hwirq);
- rcar_pci_write_reg(pcie, value, PCIEMSIIER);
- spin_unlock_irqrestore(&msi->mask_lock, flags);
+ scoped_guard(raw_spinlock_irqsave, &msi->mask_lock) {
+ value = rcar_pci_read_reg(pcie, PCIEMSIIER);
+ value &= ~BIT(d->hwirq);
+ rcar_pci_write_reg(pcie, value, PCIEMSIIER);
+ }
}
static void rcar_msi_irq_unmask(struct irq_data *d)
{
struct rcar_msi *msi = irq_data_get_irq_chip_data(d);
struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
- unsigned long flags;
u32 value;
- spin_lock_irqsave(&msi->mask_lock, flags);
- value = rcar_pci_read_reg(pcie, PCIEMSIIER);
- value |= BIT(d->hwirq);
- rcar_pci_write_reg(pcie, value, PCIEMSIIER);
- spin_unlock_irqrestore(&msi->mask_lock, flags);
-}
-
-static int rcar_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force)
-{
- return -EINVAL;
+ scoped_guard(raw_spinlock_irqsave, &msi->mask_lock) {
+ value = rcar_pci_read_reg(pcie, PCIEMSIIER);
+ value |= BIT(d->hwirq);
+ rcar_pci_write_reg(pcie, value, PCIEMSIIER);
+ }
}
static void rcar_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
@@ -673,7 +640,6 @@ static struct irq_chip rcar_msi_bottom_chip = {
.irq_ack = rcar_msi_irq_ack,
.irq_mask = rcar_msi_irq_mask,
.irq_unmask = rcar_msi_irq_unmask,
- .irq_set_affinity = rcar_msi_set_affinity,
.irq_compose_msi_msg = rcar_compose_msi_msg,
};
@@ -719,30 +685,36 @@ static const struct irq_domain_ops rcar_msi_domain_ops = {
.free = rcar_msi_domain_free,
};
-static struct msi_domain_info rcar_msi_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_MULTI_PCI_MSI),
- .chip = &rcar_msi_top_chip,
+#define RCAR_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_PCI_MSI_MASK_PARENT | \
+ MSI_FLAG_NO_AFFINITY)
+
+#define RCAR_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_MULTI_PCI_MSI)
+
+static const struct msi_parent_ops rcar_msi_parent_ops = {
+ .required_flags = RCAR_MSI_FLAGS_REQUIRED,
+ .supported_flags = RCAR_MSI_FLAGS_SUPPORTED,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .chip_flags = MSI_CHIP_FLAG_SET_ACK,
+ .prefix = "RCAR-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
};
static int rcar_allocate_domains(struct rcar_msi *msi)
{
struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
- struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
- struct irq_domain *parent;
-
- parent = irq_domain_create_linear(fwnode, INT_PCI_MSI_NR,
- &rcar_msi_domain_ops, msi);
- if (!parent) {
- dev_err(pcie->dev, "failed to create IRQ domain\n");
- return -ENOMEM;
- }
- irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
-
- msi->domain = pci_msi_create_irq_domain(fwnode, &rcar_msi_info, parent);
+ struct irq_domain_info info = {
+ .fwnode = dev_fwnode(pcie->dev),
+ .ops = &rcar_msi_domain_ops,
+ .host_data = msi,
+ .size = INT_PCI_MSI_NR,
+ };
+
+ msi->domain = msi_create_parent_irq_domain(&info, &rcar_msi_parent_ops);
if (!msi->domain) {
- dev_err(pcie->dev, "failed to create MSI domain\n");
- irq_domain_remove(parent);
+ dev_err(pcie->dev, "failed to create IRQ domain\n");
return -ENOMEM;
}
@@ -751,10 +723,7 @@ static int rcar_allocate_domains(struct rcar_msi *msi)
static void rcar_free_domains(struct rcar_msi *msi)
{
- struct irq_domain *parent = msi->domain->parent;
-
irq_domain_remove(msi->domain);
- irq_domain_remove(parent);
}
static int rcar_pcie_enable_msi(struct rcar_pcie_host *host)
@@ -766,7 +735,7 @@ static int rcar_pcie_enable_msi(struct rcar_pcie_host *host)
int err;
mutex_init(&msi->map_lock);
- spin_lock_init(&msi->mask_lock);
+ raw_spin_lock_init(&msi->mask_lock);
err = of_address_to_resource(dev->of_node, 0, &res);
if (err)
@@ -776,7 +745,7 @@ static int rcar_pcie_enable_msi(struct rcar_pcie_host *host)
if (err)
return err;
- /* Two irqs are for MSI, but they are also used for non-MSI irqs */
+ /* Two IRQs are for MSI, but they are also used for non-MSI IRQs */
err = devm_request_irq(dev, msi->irq1, rcar_pcie_msi_irq,
IRQF_SHARED | IRQF_NO_THREAD,
rcar_msi_bottom_chip.name, host);
@@ -793,12 +762,12 @@ static int rcar_pcie_enable_msi(struct rcar_pcie_host *host)
goto err;
}
- /* disable all MSIs */
+ /* Disable all MSIs */
rcar_pci_write_reg(pcie, 0, PCIEMSIIER);
/*
- * Setup MSI data target using RC base address address, which
- * is guaranteed to be in the low 32bit range on any R-Car HW.
+ * Setup MSI data target using RC base address, which is guaranteed
+ * to be in the low 32bit range on any R-Car HW.
*/
rcar_pci_write_reg(pcie, lower_32_bits(res.start) | MSIFE, PCIEMSIALR);
rcar_pci_write_reg(pcie, upper_32_bits(res.start), PCIEMSIAUR);
@@ -893,6 +862,7 @@ static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
dev_err(pcie->dev, "Failed to map inbound regions!\n");
return -EINVAL;
}
+
/*
* If the size of the range is larger than the alignment of
* the start address, we have to use multiple entries to
@@ -904,6 +874,7 @@ static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
size = min(size, alignment);
}
+
/* Hardware supports max 4GiB inbound region */
size = min(size, 1ULL << 32);
@@ -953,14 +924,22 @@ static const struct of_device_id rcar_pcie_of_match[] = {
{},
};
+/* Design note 346 from Linear Technology says order is not important. */
+static const char * const rcar_pcie_supplies[] = {
+ "vpcie1v5",
+ "vpcie3v3",
+ "vpcie12v",
+};
+
static int rcar_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct pci_host_bridge *bridge;
struct rcar_pcie_host *host;
struct rcar_pcie *pcie;
+ unsigned int i;
u32 data;
int err;
- struct pci_host_bridge *bridge;
bridge = devm_pci_alloc_host_bridge(dev, sizeof(*host));
if (!bridge)
@@ -971,6 +950,13 @@ static int rcar_pcie_probe(struct platform_device *pdev)
pcie->dev = dev;
platform_set_drvdata(pdev, host);
+ for (i = 0; i < ARRAY_SIZE(rcar_pcie_supplies); i++) {
+ err = devm_regulator_get_enable_optional(dev, rcar_pcie_supplies[i]);
+ if (err < 0 && err != -ENODEV)
+ return dev_err_probe(dev, err, "failed to enable regulator: %s\n",
+ rcar_pcie_supplies[i]);
+ }
+
pm_runtime_enable(pcie->dev);
err = pm_runtime_get_sync(pcie->dev);
if (err < 0) {
diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c
index 0af0e965fb57..799461335762 100644
--- a/drivers/pci/controller/pcie-rockchip-ep.c
+++ b/drivers/pci/controller/pcie-rockchip-ep.c
@@ -10,12 +10,16 @@
#include <linux/configfs.h>
#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
+#include <linux/irq.h>
#include <linux/of.h>
#include <linux/pci-epc.h>
#include <linux/platform_device.h>
#include <linux/pci-epf.h>
#include <linux/sizes.h>
+#include <linux/workqueue.h>
#include "pcie-rockchip.h"
@@ -26,16 +30,20 @@
* @max_regions: maximum number of regions supported by hardware
* @ob_region_map: bitmask of mapped outbound regions
* @ob_addr: base addresses in the AXI bus where the outbound regions start
- * @irq_phys_addr: base address on the AXI bus where the MSI/legacy IRQ
+ * @irq_phys_addr: base address on the AXI bus where the MSI/INTX IRQ
* dedicated outbound regions is mapped.
* @irq_cpu_addr: base address in the CPU space where a write access triggers
- * the sending of a memory write (MSI) / normal message (legacy
+ * the sending of a memory write (MSI) / normal message (INTX
* IRQ) TLP through the PCIe bus.
- * @irq_pci_addr: used to save the current mapping of the MSI/legacy IRQ
+ * @irq_pci_addr: used to save the current mapping of the MSI/INTX IRQ
* dedicated outbound region.
* @irq_pci_fn: the latest PCI function that has updated the mapping of
- * the MSI/legacy IRQ dedicated outbound region.
- * @irq_pending: bitmask of asserted legacy IRQs.
+ * the MSI/INTX IRQ dedicated outbound region.
+ * @irq_pending: bitmask of asserted INTX IRQs.
+ * @perst_irq: IRQ used for the PERST# signal.
+ * @perst_asserted: True if the PERST# signal was asserted.
+ * @link_up: True if the PCI link is up.
+ * @link_training: Work item to execute PCI link training.
*/
struct rockchip_pcie_ep {
struct rockchip_pcie rockchip;
@@ -48,6 +56,10 @@ struct rockchip_pcie_ep {
u64 irq_pci_addr;
u8 irq_pci_fn;
u8 irq_pending;
+ int perst_irq;
+ bool perst_asserted;
+ bool link_up;
+ struct delayed_work link_training;
};
static void rockchip_pcie_clear_ep_ob_atu(struct rockchip_pcie *rockchip,
@@ -63,15 +75,25 @@ static void rockchip_pcie_clear_ep_ob_atu(struct rockchip_pcie *rockchip,
ROCKCHIP_PCIE_AT_OB_REGION_DESC1(region));
}
+static int rockchip_pcie_ep_ob_atu_num_bits(struct rockchip_pcie *rockchip,
+ u64 pci_addr, size_t size)
+{
+ int num_pass_bits = fls64(pci_addr ^ (pci_addr + size - 1));
+
+ return clamp(num_pass_bits,
+ ROCKCHIP_PCIE_AT_MIN_NUM_BITS,
+ ROCKCHIP_PCIE_AT_MAX_NUM_BITS);
+}
+
static void rockchip_pcie_prog_ep_ob_atu(struct rockchip_pcie *rockchip, u8 fn,
u32 r, u64 cpu_addr, u64 pci_addr,
size_t size)
{
- int num_pass_bits = fls64(size - 1);
+ int num_pass_bits;
u32 addr0, addr1, desc0;
- if (num_pass_bits < 8)
- num_pass_bits = 8;
+ num_pass_bits = rockchip_pcie_ep_ob_atu_num_bits(rockchip,
+ pci_addr, size);
addr0 = ((num_pass_bits - 1) & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS) |
(lower_32_bits(pci_addr) & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR);
@@ -98,10 +120,8 @@ static int rockchip_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn,
/* All functions share the same vendor ID with function 0 */
if (fn == 0) {
- u32 vid_regs = (hdr->vendorid & GENMASK(15, 0)) |
- (hdr->subsys_vendor_id & GENMASK(31, 16)) << 16;
-
- rockchip_pcie_write(rockchip, vid_regs,
+ rockchip_pcie_write(rockchip,
+ hdr->vendorid | hdr->subsys_vendor_id << 16,
PCIE_CORE_CONFIG_VENDOR);
}
@@ -153,7 +173,7 @@ static int rockchip_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, u8 vfn,
ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_IO_32BITS;
} else {
bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH);
- bool is_64bits = sz > SZ_2G;
+ bool is_64bits = !!(flags & PCI_BASE_ADDRESS_MEM_TYPE_64);
if (is_64bits && (bar & 1))
return -EINVAL;
@@ -230,6 +250,28 @@ static inline u32 rockchip_ob_region(phys_addr_t addr)
return (addr >> ilog2(SZ_1M)) & 0x1f;
}
+static u64 rockchip_pcie_ep_align_addr(struct pci_epc *epc, u64 pci_addr,
+ size_t *pci_size, size_t *addr_offset)
+{
+ struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+ size_t size = *pci_size;
+ u64 offset, mask;
+ int num_bits;
+
+ num_bits = rockchip_pcie_ep_ob_atu_num_bits(&ep->rockchip,
+ pci_addr, size);
+ mask = (1ULL << num_bits) - 1;
+
+ offset = pci_addr & mask;
+ if (size + offset > SZ_1M)
+ size = SZ_1M - offset;
+
+ *pci_size = ALIGN(offset + size, ROCKCHIP_PCIE_AT_SIZE_ALIGN);
+ *addr_offset = offset;
+
+ return pci_addr & ~mask;
+}
+
static int rockchip_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn,
phys_addr_t addr, u64 pci_addr,
size_t size)
@@ -238,6 +280,9 @@ static int rockchip_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn,
struct rockchip_pcie *pcie = &ep->rockchip;
u32 r = rockchip_ob_region(addr);
+ if (test_bit(r, &ep->ob_region_map))
+ return -EBUSY;
+
rockchip_pcie_prog_ep_ob_atu(pcie, fn, r, addr, pci_addr, size);
set_bit(r, &ep->ob_region_map);
@@ -251,13 +296,9 @@ static void rockchip_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn,
{
struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
struct rockchip_pcie *rockchip = &ep->rockchip;
- u32 r;
-
- for (r = 0; r < ep->max_regions; r++)
- if (ep->ob_addr[r] == addr)
- break;
+ u32 r = rockchip_ob_region(addr);
- if (r == ep->max_regions)
+ if (addr != ep->ob_addr[r] || !test_bit(r, &ep->ob_region_map))
return;
rockchip_pcie_clear_ep_ob_atu(rockchip, r);
@@ -267,10 +308,11 @@ static void rockchip_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn,
}
static int rockchip_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn,
- u8 multi_msg_cap)
+ u8 nr_irqs)
{
struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
struct rockchip_pcie *rockchip = &ep->rockchip;
+ u8 mmc = order_base_2(nr_irqs);
u32 flags;
flags = rockchip_pcie_read(rockchip,
@@ -278,7 +320,7 @@ static int rockchip_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn,
ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
flags &= ~ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_MASK;
flags |=
- (multi_msg_cap << ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_OFFSET) |
+ (mmc << ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_OFFSET) |
(PCI_MSI_FLAGS_64BIT << ROCKCHIP_PCIE_EP_MSI_FLAGS_OFFSET);
flags &= ~ROCKCHIP_PCIE_EP_MSI_CTRL_MASK_MSI_CAP;
rockchip_pcie_write(rockchip, flags,
@@ -299,8 +341,8 @@ static int rockchip_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
if (!(flags & ROCKCHIP_PCIE_EP_MSI_CTRL_ME))
return -EINVAL;
- return ((flags & ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK) >>
- ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET);
+ return 1 << ((flags & ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK) >>
+ ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET);
}
static void rockchip_pcie_ep_assert_intx(struct rockchip_pcie_ep *ep, u8 fn,
@@ -325,8 +367,8 @@ static void rockchip_pcie_ep_assert_intx(struct rockchip_pcie_ep *ep, u8 fn,
}
}
-static int rockchip_pcie_ep_send_legacy_irq(struct rockchip_pcie_ep *ep, u8 fn,
- u8 intx)
+static int rockchip_pcie_ep_send_intx_irq(struct rockchip_pcie_ep *ep, u8 fn,
+ u8 intx)
{
u16 cmd;
@@ -353,9 +395,10 @@ static int rockchip_pcie_ep_send_msi_irq(struct rockchip_pcie_ep *ep, u8 fn,
{
struct rockchip_pcie *rockchip = &ep->rockchip;
u32 flags, mme, data, data_mask;
+ size_t irq_pci_size, offset;
+ u64 irq_pci_addr;
u8 msi_count;
u64 pci_addr;
- u32 r;
/* Check MSI enable bit */
flags = rockchip_pcie_read(&ep->rockchip,
@@ -391,31 +434,33 @@ static int rockchip_pcie_ep_send_msi_irq(struct rockchip_pcie_ep *ep, u8 fn,
PCI_MSI_ADDRESS_LO);
/* Set the outbound region if needed. */
- if (unlikely(ep->irq_pci_addr != (pci_addr & PCIE_ADDR_MASK) ||
+ irq_pci_size = ~PCIE_ADDR_MASK + 1;
+ irq_pci_addr = rockchip_pcie_ep_align_addr(ep->epc,
+ pci_addr & PCIE_ADDR_MASK,
+ &irq_pci_size, &offset);
+ if (unlikely(ep->irq_pci_addr != irq_pci_addr ||
ep->irq_pci_fn != fn)) {
- r = rockchip_ob_region(ep->irq_phys_addr);
- rockchip_pcie_prog_ep_ob_atu(rockchip, fn, r,
- ep->irq_phys_addr,
- pci_addr & PCIE_ADDR_MASK,
- ~PCIE_ADDR_MASK + 1);
- ep->irq_pci_addr = (pci_addr & PCIE_ADDR_MASK);
+ rockchip_pcie_prog_ep_ob_atu(rockchip, fn,
+ rockchip_ob_region(ep->irq_phys_addr),
+ ep->irq_phys_addr,
+ irq_pci_addr, irq_pci_size);
+ ep->irq_pci_addr = irq_pci_addr;
ep->irq_pci_fn = fn;
}
- writew(data, ep->irq_cpu_addr + (pci_addr & ~PCIE_ADDR_MASK));
+ writew(data, ep->irq_cpu_addr + offset + (pci_addr & ~PCIE_ADDR_MASK));
return 0;
}
static int rockchip_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn,
- enum pci_epc_irq_type type,
- u16 interrupt_num)
+ unsigned int type, u16 interrupt_num)
{
struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
switch (type) {
- case PCI_EPC_IRQ_LEGACY:
- return rockchip_pcie_ep_send_legacy_irq(ep, fn, 0);
- case PCI_EPC_IRQ_MSI:
+ case PCI_IRQ_INTX:
+ return rockchip_pcie_ep_send_intx_irq(ep, fn, 0);
+ case PCI_IRQ_MSI:
return rockchip_pcie_ep_send_msi_irq(ep, fn, interrupt_num);
default:
return -EINVAL;
@@ -435,14 +480,222 @@ static int rockchip_pcie_ep_start(struct pci_epc *epc)
rockchip_pcie_write(rockchip, cfg, PCIE_CORE_PHY_FUNC_CFG);
+ if (rockchip->perst_gpio)
+ enable_irq(ep->perst_irq);
+
+ /* Enable configuration and start link training */
+ rockchip_pcie_write(rockchip,
+ PCIE_CLIENT_LINK_TRAIN_ENABLE |
+ PCIE_CLIENT_CONF_ENABLE,
+ PCIE_CLIENT_CONFIG);
+
+ if (!rockchip->perst_gpio)
+ schedule_delayed_work(&ep->link_training, 0);
+
+ return 0;
+}
+
+static void rockchip_pcie_ep_stop(struct pci_epc *epc)
+{
+ struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+
+ if (rockchip->perst_gpio) {
+ ep->perst_asserted = true;
+ disable_irq(ep->perst_irq);
+ }
+
+ cancel_delayed_work_sync(&ep->link_training);
+
+ /* Stop link training and disable configuration */
+ rockchip_pcie_write(rockchip,
+ PCIE_CLIENT_CONF_DISABLE |
+ PCIE_CLIENT_LINK_TRAIN_DISABLE,
+ PCIE_CLIENT_CONFIG);
+}
+
+static void rockchip_pcie_ep_retrain_link(struct rockchip_pcie *rockchip)
+{
+ u32 status;
+
+ status = rockchip_pcie_read(rockchip, PCIE_EP_CONFIG_BASE + PCI_EXP_LNKCTL);
+ status |= PCI_EXP_LNKCTL_RL;
+ rockchip_pcie_write(rockchip, status, PCIE_EP_CONFIG_BASE + PCI_EXP_LNKCTL);
+}
+
+static bool rockchip_pcie_ep_link_up(struct rockchip_pcie *rockchip)
+{
+ u32 val = rockchip_pcie_read(rockchip, PCIE_CLIENT_BASIC_STATUS1);
+
+ return PCIE_LINK_UP(val);
+}
+
+static void rockchip_pcie_ep_link_training(struct work_struct *work)
+{
+ struct rockchip_pcie_ep *ep =
+ container_of(work, struct rockchip_pcie_ep, link_training.work);
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+ struct device *dev = rockchip->dev;
+ u32 val;
+ int ret;
+
+ /* Enable Gen1 training and wait for its completion */
+ ret = readl_poll_timeout(rockchip->apb_base + PCIE_CORE_CTRL,
+ val, PCIE_LINK_TRAINING_DONE(val), 50,
+ LINK_TRAIN_TIMEOUT);
+ if (ret)
+ goto again;
+
+ /* Make sure that the link is up */
+ ret = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_BASIC_STATUS1,
+ val, PCIE_LINK_UP(val), 50,
+ LINK_TRAIN_TIMEOUT);
+ if (ret)
+ goto again;
+
+ /*
+ * Check the current speed: if gen2 speed was requested and we are not
+ * at gen2 speed yet, retrain again for gen2.
+ */
+ val = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL);
+ if (!PCIE_LINK_IS_GEN2(val) && rockchip->link_gen == 2) {
+ /* Enable retrain for gen2 */
+ rockchip_pcie_ep_retrain_link(rockchip);
+ readl_poll_timeout(rockchip->apb_base + PCIE_CORE_CTRL,
+ val, PCIE_LINK_IS_GEN2(val), 50,
+ LINK_TRAIN_TIMEOUT);
+ }
+
+ /* Check again that the link is up */
+ if (!rockchip_pcie_ep_link_up(rockchip))
+ goto again;
+
+ /*
+ * If PERST# was asserted while polling the link, do not notify
+ * the function.
+ */
+ if (ep->perst_asserted)
+ return;
+
+ val = rockchip_pcie_read(rockchip, PCIE_CLIENT_BASIC_STATUS0);
+ dev_info(dev,
+ "link up (negotiated speed: %sGT/s, width: x%lu)\n",
+ (val & PCIE_CLIENT_NEG_LINK_SPEED) ? "5" : "2.5",
+ ((val & PCIE_CLIENT_NEG_LINK_WIDTH_MASK) >>
+ PCIE_CLIENT_NEG_LINK_WIDTH_SHIFT) << 1);
+
+ /* Notify the function */
+ pci_epc_linkup(ep->epc);
+ ep->link_up = true;
+
+ return;
+
+again:
+ schedule_delayed_work(&ep->link_training, msecs_to_jiffies(5));
+}
+
+static void rockchip_pcie_ep_perst_assert(struct rockchip_pcie_ep *ep)
+{
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+
+ dev_dbg(rockchip->dev, "PERST# asserted, link down\n");
+
+ if (ep->perst_asserted)
+ return;
+
+ ep->perst_asserted = true;
+
+ cancel_delayed_work_sync(&ep->link_training);
+
+ if (ep->link_up) {
+ pci_epc_linkdown(ep->epc);
+ ep->link_up = false;
+ }
+}
+
+static void rockchip_pcie_ep_perst_deassert(struct rockchip_pcie_ep *ep)
+{
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+
+ dev_dbg(rockchip->dev, "PERST# de-asserted, starting link training\n");
+
+ if (!ep->perst_asserted)
+ return;
+
+ ep->perst_asserted = false;
+
+ /* Enable link re-training */
+ rockchip_pcie_ep_retrain_link(rockchip);
+
+ /* Start link training */
+ schedule_delayed_work(&ep->link_training, 0);
+}
+
+static irqreturn_t rockchip_pcie_ep_perst_irq_thread(int irq, void *data)
+{
+ struct pci_epc *epc = data;
+ struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+ u32 perst = gpiod_get_value(rockchip->perst_gpio);
+
+ if (perst)
+ rockchip_pcie_ep_perst_assert(ep);
+ else
+ rockchip_pcie_ep_perst_deassert(ep);
+
+ irq_set_irq_type(ep->perst_irq,
+ (perst ? IRQF_TRIGGER_HIGH : IRQF_TRIGGER_LOW));
+
+ return IRQ_HANDLED;
+}
+
+static int rockchip_pcie_ep_setup_irq(struct pci_epc *epc)
+{
+ struct rockchip_pcie_ep *ep = epc_get_drvdata(epc);
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+ struct device *dev = rockchip->dev;
+ int ret;
+
+ if (!rockchip->perst_gpio)
+ return 0;
+
+ /* PCIe reset interrupt */
+ ep->perst_irq = gpiod_to_irq(rockchip->perst_gpio);
+ if (ep->perst_irq < 0) {
+ dev_err(dev,
+ "failed to get IRQ for PERST# GPIO: %d\n",
+ ep->perst_irq);
+
+ return ep->perst_irq;
+ }
+
+ /*
+ * The perst_gpio is active low, so when it is inactive on start, it
+ * is high and will trigger the perst_irq handler. So treat this initial
+ * IRQ as a dummy one by faking the host asserting PERST#.
+ */
+ ep->perst_asserted = true;
+ irq_set_status_flags(ep->perst_irq, IRQ_NOAUTOEN);
+ ret = devm_request_threaded_irq(dev, ep->perst_irq, NULL,
+ rockchip_pcie_ep_perst_irq_thread,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ "pcie-ep-perst", epc);
+ if (ret) {
+ dev_err(dev,
+ "failed to request IRQ for PERST# GPIO: %d\n",
+ ret);
+
+ return ret;
+ }
+
return 0;
}
static const struct pci_epc_features rockchip_pcie_epc_features = {
- .linkup_notifier = false,
+ .linkup_notifier = true,
.msi_capable = true,
- .msix_capable = false,
- .align = 256,
+ .intx_capable = true,
+ .align = ROCKCHIP_PCIE_AT_SIZE_ALIGN,
};
static const struct pci_epc_features*
@@ -455,17 +708,19 @@ static const struct pci_epc_ops rockchip_pcie_epc_ops = {
.write_header = rockchip_pcie_ep_write_header,
.set_bar = rockchip_pcie_ep_set_bar,
.clear_bar = rockchip_pcie_ep_clear_bar,
+ .align_addr = rockchip_pcie_ep_align_addr,
.map_addr = rockchip_pcie_ep_map_addr,
.unmap_addr = rockchip_pcie_ep_unmap_addr,
.set_msi = rockchip_pcie_ep_set_msi,
.get_msi = rockchip_pcie_ep_get_msi,
.raise_irq = rockchip_pcie_ep_raise_irq,
.start = rockchip_pcie_ep_start,
+ .stop = rockchip_pcie_ep_stop,
.get_features = rockchip_pcie_ep_get_features,
};
-static int rockchip_pcie_parse_ep_dt(struct rockchip_pcie *rockchip,
- struct rockchip_pcie_ep *ep)
+static int rockchip_pcie_ep_get_resources(struct rockchip_pcie *rockchip,
+ struct rockchip_pcie_ep *ep)
{
struct device *dev = rockchip->dev;
int err;
@@ -499,82 +754,38 @@ static const struct of_device_id rockchip_pcie_ep_of_match[] = {
{},
};
-static int rockchip_pcie_ep_probe(struct platform_device *pdev)
+static int rockchip_pcie_ep_init_ob_mem(struct rockchip_pcie_ep *ep)
{
- struct device *dev = &pdev->dev;
- struct rockchip_pcie_ep *ep;
- struct rockchip_pcie *rockchip;
- struct pci_epc *epc;
- size_t max_regions;
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+ struct device *dev = rockchip->dev;
struct pci_epc_mem_window *windows = NULL;
int err, i;
- u32 cfg_msi, cfg_msix_cp;
-
- ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
- if (!ep)
- return -ENOMEM;
-
- rockchip = &ep->rockchip;
- rockchip->is_rc = false;
- rockchip->dev = dev;
-
- epc = devm_pci_epc_create(dev, &rockchip_pcie_epc_ops);
- if (IS_ERR(epc)) {
- dev_err(dev, "failed to create epc device\n");
- return PTR_ERR(epc);
- }
-
- ep->epc = epc;
- epc_set_drvdata(epc, ep);
-
- err = rockchip_pcie_parse_ep_dt(rockchip, ep);
- if (err)
- return err;
-
- err = rockchip_pcie_enable_clocks(rockchip);
- if (err)
- return err;
-
- err = rockchip_pcie_init_port(rockchip);
- if (err)
- goto err_disable_clocks;
-
- /* Establish the link automatically */
- rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE,
- PCIE_CLIENT_CONFIG);
- max_regions = ep->max_regions;
- ep->ob_addr = devm_kcalloc(dev, max_regions, sizeof(*ep->ob_addr),
+ ep->ob_addr = devm_kcalloc(dev, ep->max_regions, sizeof(*ep->ob_addr),
GFP_KERNEL);
- if (!ep->ob_addr) {
- err = -ENOMEM;
- goto err_uninit_port;
- }
-
- /* Only enable function 0 by default */
- rockchip_pcie_write(rockchip, BIT(0), PCIE_CORE_PHY_FUNC_CFG);
+ if (!ep->ob_addr)
+ return -ENOMEM;
windows = devm_kcalloc(dev, ep->max_regions,
sizeof(struct pci_epc_mem_window), GFP_KERNEL);
- if (!windows) {
- err = -ENOMEM;
- goto err_uninit_port;
- }
+ if (!windows)
+ return -ENOMEM;
+
for (i = 0; i < ep->max_regions; i++) {
windows[i].phys_base = rockchip->mem_res->start + (SZ_1M * i);
windows[i].size = SZ_1M;
windows[i].page_size = SZ_1M;
}
- err = pci_epc_multi_mem_init(epc, windows, ep->max_regions);
+ err = pci_epc_multi_mem_init(ep->epc, windows, ep->max_regions);
devm_kfree(dev, windows);
if (err < 0) {
dev_err(dev, "failed to initialize the memory space\n");
- goto err_uninit_port;
+ return err;
}
- ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &ep->irq_phys_addr,
+ ep->irq_cpu_addr = pci_epc_mem_alloc_addr(ep->epc, &ep->irq_phys_addr,
SZ_1M);
if (!ep->irq_cpu_addr) {
dev_err(dev, "failed to reserve memory space for MSI\n");
@@ -584,6 +795,23 @@ static int rockchip_pcie_ep_probe(struct platform_device *pdev)
ep->irq_pci_addr = ROCKCHIP_PCIE_EP_DUMMY_IRQ_ADDR;
+ return 0;
+
+err_epc_mem_exit:
+ pci_epc_mem_exit(ep->epc);
+
+ return err;
+}
+
+static void rockchip_pcie_ep_exit_ob_mem(struct rockchip_pcie_ep *ep)
+{
+ pci_epc_mem_exit(ep->epc);
+}
+
+static void rockchip_pcie_ep_hide_broken_msix_cap(struct rockchip_pcie *rockchip)
+{
+ u32 cfg_msi, cfg_msix_cp;
+
/*
* MSI-X is not supported but the controller still advertises the MSI-X
* capability by default, which can lead to the Root Complex side
@@ -606,17 +834,68 @@ static int rockchip_pcie_ep_probe(struct platform_device *pdev)
rockchip_pcie_write(rockchip, cfg_msi,
PCIE_EP_CONFIG_BASE + ROCKCHIP_PCIE_EP_MSI_CTRL_REG);
+}
- rockchip_pcie_write(rockchip, PCIE_CLIENT_CONF_ENABLE,
- PCIE_CLIENT_CONFIG);
+static int rockchip_pcie_ep_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rockchip_pcie_ep *ep;
+ struct rockchip_pcie *rockchip;
+ struct pci_epc *epc;
+ int err;
+
+ ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
+ if (!ep)
+ return -ENOMEM;
+
+ rockchip = &ep->rockchip;
+ rockchip->is_rc = false;
+ rockchip->dev = dev;
+ INIT_DELAYED_WORK(&ep->link_training, rockchip_pcie_ep_link_training);
+
+ epc = devm_pci_epc_create(dev, &rockchip_pcie_epc_ops);
+ if (IS_ERR(epc)) {
+ dev_err(dev, "failed to create EPC device\n");
+ return PTR_ERR(epc);
+ }
+
+ ep->epc = epc;
+ epc_set_drvdata(epc, ep);
+
+ err = rockchip_pcie_ep_get_resources(rockchip, ep);
+ if (err)
+ return err;
+
+ err = rockchip_pcie_ep_init_ob_mem(ep);
+ if (err)
+ return err;
+
+ err = rockchip_pcie_enable_clocks(rockchip);
+ if (err)
+ goto err_exit_ob_mem;
+
+ err = rockchip_pcie_init_port(rockchip);
+ if (err)
+ goto err_disable_clocks;
+
+ rockchip_pcie_ep_hide_broken_msix_cap(rockchip);
+
+ /* Only enable function 0 by default */
+ rockchip_pcie_write(rockchip, BIT(0), PCIE_CORE_PHY_FUNC_CFG);
+
+ pci_epc_init_notify(epc);
+
+ err = rockchip_pcie_ep_setup_irq(epc);
+ if (err < 0)
+ goto err_uninit_port;
return 0;
-err_epc_mem_exit:
- pci_epc_mem_exit(epc);
err_uninit_port:
rockchip_pcie_deinit_phys(rockchip);
err_disable_clocks:
rockchip_pcie_disable_clocks(rockchip);
+err_exit_ob_mem:
+ rockchip_pcie_ep_exit_ob_mem(ep);
return err;
}
diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c
index 2438bc9b3a1a..ee1822ca01db 100644
--- a/drivers/pci/controller/pcie-rockchip-host.c
+++ b/drivers/pci/controller/pcie-rockchip-host.c
@@ -11,29 +11,19 @@
* ARM PCI Host generic driver.
*/
+#include <linux/bitfield.h>
#include <linux/bitrev.h>
-#include <linux/clk.h>
-#include <linux/delay.h>
#include <linux/gpio/consumer.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
-#include <linux/kernel.h>
-#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_pci.h>
-#include <linux/of_platform.h>
-#include <linux/pci.h>
-#include <linux/pci_ids.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
-#include <linux/reset.h>
-#include <linux/regmap.h>
#include "../pci.h"
#include "pcie-rockchip.h"
@@ -42,18 +32,18 @@ static void rockchip_pcie_enable_bw_int(struct rockchip_pcie *rockchip)
{
u32 status;
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL);
status |= (PCI_EXP_LNKCTL_LBMIE | PCI_EXP_LNKCTL_LABIE);
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL);
}
static void rockchip_pcie_clr_bw_int(struct rockchip_pcie *rockchip)
{
u32 status;
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL);
status |= (PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_LABS) << 16;
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL);
}
static void rockchip_pcie_update_txcredit_mui(struct rockchip_pcie *rockchip)
@@ -271,7 +261,7 @@ static void rockchip_pcie_set_power_limit(struct rockchip_pcie *rockchip)
scale = 3; /* 0.001x */
curr = curr / 1000; /* convert to mA */
power = (curr * 3300) / 1000; /* milliwatt */
- while (power > PCIE_RC_CONFIG_DCR_CSPL_LIMIT) {
+ while (power > FIELD_MAX(PCI_EXP_DEVCAP_PWR_VAL)) {
if (!scale) {
dev_warn(rockchip->dev, "invalid power supply\n");
return;
@@ -280,10 +270,10 @@ static void rockchip_pcie_set_power_limit(struct rockchip_pcie *rockchip)
power = power / 10;
}
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCR);
- status |= (power << PCIE_RC_CONFIG_DCR_CSPL_SHIFT) |
- (scale << PCIE_RC_CONFIG_DCR_CPLS_SHIFT);
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCR);
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_DEVCAP);
+ status |= FIELD_PREP(PCI_EXP_DEVCAP_PWR_VAL, power);
+ status |= FIELD_PREP(PCI_EXP_DEVCAP_PWR_SCL, scale);
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_DEVCAP);
}
/**
@@ -296,7 +286,7 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip)
int err, i = MAX_LANE_NUM;
u32 status;
- gpiod_set_value_cansleep(rockchip->ep_gpio, 0);
+ gpiod_set_value_cansleep(rockchip->perst_gpio, 0);
err = rockchip_pcie_init_port(rockchip);
if (err)
@@ -311,20 +301,23 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip)
rockchip_pcie_set_power_limit(rockchip);
/* Set RC's clock architecture as common clock */
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL);
status |= PCI_EXP_LNKSTA_SLC << 16;
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL);
/* Set RC's RCB to 128 */
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL);
status |= PCI_EXP_LNKCTL_RCB;
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL);
/* Enable Gen1 training */
rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE,
PCIE_CLIENT_CONFIG);
- gpiod_set_value_cansleep(rockchip->ep_gpio, 1);
+ msleep(PCIE_T_PVPERL_MS);
+ gpiod_set_value_cansleep(rockchip->perst_gpio, 1);
+
+ msleep(PCIE_RESET_CONFIG_WAIT_MS);
/* 500ms timeout value should be enough for Gen1/2 training */
err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_BASIC_STATUS1,
@@ -340,9 +333,13 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip)
* Enable retrain for gen2. This should be configured only after
* gen1 finished.
*/
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS);
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL2);
+ status &= ~PCI_EXP_LNKCTL2_TLS;
+ status |= PCI_EXP_LNKCTL2_TLS_5_0GT;
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL2);
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL);
status |= PCI_EXP_LNKCTL_RL;
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS);
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL);
err = readl_poll_timeout(rockchip->apb_base + PCIE_CORE_CTRL,
status, PCIE_LINK_IS_GEN2(status), 20,
@@ -366,7 +363,7 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip)
}
}
- rockchip_pcie_write(rockchip, ROCKCHIP_VENDOR_ID,
+ rockchip_pcie_write(rockchip, PCI_VENDOR_ID_ROCKCHIP,
PCIE_CORE_CONFIG_VENDOR);
rockchip_pcie_write(rockchip,
PCI_CLASS_BRIDGE_PCI_NORMAL << 8,
@@ -379,15 +376,15 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip)
/* Clear L0s from RC's link cap */
if (of_property_read_bool(dev->of_node, "aspm-no-l0s")) {
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LINK_CAP);
- status &= ~PCIE_RC_CONFIG_LINK_CAP_L0S;
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LINK_CAP);
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCAP);
+ status &= ~PCI_EXP_LNKCAP_ASPM_L0S;
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCAP);
}
- status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCSR);
- status &= ~PCIE_RC_CONFIG_DCSR_MPS_MASK;
- status |= PCIE_RC_CONFIG_DCSR_MPS_256;
- rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCSR);
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_DEVCTL);
+ status &= ~PCI_EXP_DEVCTL_PAYLOAD;
+ status |= PCI_EXP_DEVCTL_PAYLOAD_256B;
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_DEVCTL);
return 0;
err_power_off_phy:
@@ -438,7 +435,7 @@ static irqreturn_t rockchip_pcie_subsys_irq_handler(int irq, void *arg)
dev_dbg(dev, "malformed TLP received from the link\n");
if (sub_reg & PCIE_CORE_INT_UCR)
- dev_dbg(dev, "malformed TLP received from the link\n");
+ dev_dbg(dev, "Unexpected Completion received from the link\n");
if (sub_reg & PCIE_CORE_INT_FCE)
dev_dbg(dev, "an error was observed in the flow control advertisements from the other side\n");
@@ -488,7 +485,7 @@ static irqreturn_t rockchip_pcie_client_irq_handler(int irq, void *arg)
dev_dbg(dev, "fatal error interrupt received\n");
if (reg & PCIE_CLIENT_INT_NFATAL_ERR)
- dev_dbg(dev, "no fatal error interrupt received\n");
+ dev_dbg(dev, "non fatal error interrupt received\n");
if (reg & PCIE_CLIENT_INT_CORR_ERR)
dev_dbg(dev, "correctable error interrupt received\n");
@@ -507,7 +504,7 @@ static irqreturn_t rockchip_pcie_client_irq_handler(int irq, void *arg)
return IRQ_HANDLED;
}
-static void rockchip_pcie_legacy_int_handler(struct irq_desc *desc)
+static void rockchip_pcie_intx_handler(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct rockchip_pcie *rockchip = irq_desc_get_handler_data(desc);
@@ -555,7 +552,7 @@ static int rockchip_pcie_setup_irq(struct rockchip_pcie *rockchip)
return irq;
irq_set_chained_handler_and_data(irq,
- rockchip_pcie_legacy_int_handler,
+ rockchip_pcie_intx_handler,
rockchip);
irq = platform_get_irq_byname(pdev, "client");
@@ -692,8 +689,8 @@ static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip)
return -EINVAL;
}
- rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX,
- &intx_domain_ops, rockchip);
+ rockchip->irq_domain = irq_domain_create_linear(of_fwnode_handle(intc), PCI_NUM_INTX,
+ &intx_domain_ops, rockchip);
of_node_put(intc);
if (!rockchip->irq_domain) {
dev_err(dev, "failed to get a INTx IRQ domain\n");
@@ -1049,7 +1046,7 @@ static struct platform_driver rockchip_pcie_driver = {
.pm = &rockchip_pcie_pm_ops,
},
.probe = rockchip_pcie_probe,
- .remove_new = rockchip_pcie_remove,
+ .remove = rockchip_pcie_remove,
};
module_platform_driver(rockchip_pcie_driver);
diff --git a/drivers/pci/controller/pcie-rockchip.c b/drivers/pci/controller/pcie-rockchip.c
index 1aa84035a8bc..0f88da378805 100644
--- a/drivers/pci/controller/pcie-rockchip.c
+++ b/drivers/pci/controller/pcie-rockchip.c
@@ -15,6 +15,7 @@
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/iopoll.h>
+#include <linux/of.h>
#include <linux/of_pci.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
@@ -29,7 +30,7 @@ int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
struct platform_device *pdev = to_platform_device(dev);
struct device_node *node = dev->of_node;
struct resource *regs;
- int err;
+ int err, i;
if (rockchip->is_rc) {
regs = platform_get_resource_byname(pdev,
@@ -68,87 +69,38 @@ int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip)
if (rockchip->link_gen < 0 || rockchip->link_gen > 2)
rockchip->link_gen = 2;
- rockchip->core_rst = devm_reset_control_get_exclusive(dev, "core");
- if (IS_ERR(rockchip->core_rst)) {
- if (PTR_ERR(rockchip->core_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing core reset property in node\n");
- return PTR_ERR(rockchip->core_rst);
- }
-
- rockchip->mgmt_rst = devm_reset_control_get_exclusive(dev, "mgmt");
- if (IS_ERR(rockchip->mgmt_rst)) {
- if (PTR_ERR(rockchip->mgmt_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing mgmt reset property in node\n");
- return PTR_ERR(rockchip->mgmt_rst);
- }
-
- rockchip->mgmt_sticky_rst = devm_reset_control_get_exclusive(dev,
- "mgmt-sticky");
- if (IS_ERR(rockchip->mgmt_sticky_rst)) {
- if (PTR_ERR(rockchip->mgmt_sticky_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing mgmt-sticky reset property in node\n");
- return PTR_ERR(rockchip->mgmt_sticky_rst);
- }
-
- rockchip->pipe_rst = devm_reset_control_get_exclusive(dev, "pipe");
- if (IS_ERR(rockchip->pipe_rst)) {
- if (PTR_ERR(rockchip->pipe_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing pipe reset property in node\n");
- return PTR_ERR(rockchip->pipe_rst);
- }
-
- rockchip->pm_rst = devm_reset_control_get_exclusive(dev, "pm");
- if (IS_ERR(rockchip->pm_rst)) {
- if (PTR_ERR(rockchip->pm_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing pm reset property in node\n");
- return PTR_ERR(rockchip->pm_rst);
- }
+ for (i = 0; i < ROCKCHIP_NUM_PM_RSTS; i++)
+ rockchip->pm_rsts[i].id = rockchip_pci_pm_rsts[i];
- rockchip->pclk_rst = devm_reset_control_get_exclusive(dev, "pclk");
- if (IS_ERR(rockchip->pclk_rst)) {
- if (PTR_ERR(rockchip->pclk_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing pclk reset property in node\n");
- return PTR_ERR(rockchip->pclk_rst);
- }
-
- rockchip->aclk_rst = devm_reset_control_get_exclusive(dev, "aclk");
- if (IS_ERR(rockchip->aclk_rst)) {
- if (PTR_ERR(rockchip->aclk_rst) != -EPROBE_DEFER)
- dev_err(dev, "missing aclk reset property in node\n");
- return PTR_ERR(rockchip->aclk_rst);
- }
-
- if (rockchip->is_rc) {
- rockchip->ep_gpio = devm_gpiod_get_optional(dev, "ep",
- GPIOD_OUT_HIGH);
- if (IS_ERR(rockchip->ep_gpio))
- return dev_err_probe(dev, PTR_ERR(rockchip->ep_gpio),
- "failed to get ep GPIO\n");
- }
+ err = devm_reset_control_bulk_get_exclusive(dev,
+ ROCKCHIP_NUM_PM_RSTS,
+ rockchip->pm_rsts);
+ if (err)
+ return dev_err_probe(dev, err, "Cannot get the PM reset\n");
- rockchip->aclk_pcie = devm_clk_get(dev, "aclk");
- if (IS_ERR(rockchip->aclk_pcie)) {
- dev_err(dev, "aclk clock not found\n");
- return PTR_ERR(rockchip->aclk_pcie);
- }
+ for (i = 0; i < ROCKCHIP_NUM_CORE_RSTS; i++)
+ rockchip->core_rsts[i].id = rockchip_pci_core_rsts[i];
- rockchip->aclk_perf_pcie = devm_clk_get(dev, "aclk-perf");
- if (IS_ERR(rockchip->aclk_perf_pcie)) {
- dev_err(dev, "aclk_perf clock not found\n");
- return PTR_ERR(rockchip->aclk_perf_pcie);
- }
+ err = devm_reset_control_bulk_get_exclusive(dev,
+ ROCKCHIP_NUM_CORE_RSTS,
+ rockchip->core_rsts);
+ if (err)
+ return dev_err_probe(dev, err, "Cannot get the Core resets\n");
- rockchip->hclk_pcie = devm_clk_get(dev, "hclk");
- if (IS_ERR(rockchip->hclk_pcie)) {
- dev_err(dev, "hclk clock not found\n");
- return PTR_ERR(rockchip->hclk_pcie);
- }
+ if (rockchip->is_rc)
+ rockchip->perst_gpio = devm_gpiod_get_optional(dev, "ep",
+ GPIOD_OUT_LOW);
+ else
+ rockchip->perst_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_IN);
+ if (IS_ERR(rockchip->perst_gpio))
+ return dev_err_probe(dev, PTR_ERR(rockchip->perst_gpio),
+ "failed to get PERST# GPIO\n");
- rockchip->clk_pcie_pm = devm_clk_get(dev, "pm");
- if (IS_ERR(rockchip->clk_pcie_pm)) {
- dev_err(dev, "pm clock not found\n");
- return PTR_ERR(rockchip->clk_pcie_pm);
- }
+ rockchip->num_clks = devm_clk_bulk_get_all(dev, &rockchip->clks);
+ if (rockchip->num_clks < 0)
+ return dev_err_probe(dev, rockchip->num_clks,
+ "failed to get clocks\n");
return 0;
}
@@ -166,23 +118,10 @@ int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
int err, i;
u32 regs;
- err = reset_control_assert(rockchip->aclk_rst);
- if (err) {
- dev_err(dev, "assert aclk_rst err %d\n", err);
- return err;
- }
-
- err = reset_control_assert(rockchip->pclk_rst);
- if (err) {
- dev_err(dev, "assert pclk_rst err %d\n", err);
- return err;
- }
-
- err = reset_control_assert(rockchip->pm_rst);
- if (err) {
- dev_err(dev, "assert pm_rst err %d\n", err);
- return err;
- }
+ err = reset_control_bulk_assert(ROCKCHIP_NUM_PM_RSTS,
+ rockchip->pm_rsts);
+ if (err)
+ return dev_err_probe(dev, err, "Couldn't assert PM resets\n");
for (i = 0; i < MAX_LANE_NUM; i++) {
err = phy_init(rockchip->phys[i]);
@@ -192,47 +131,19 @@ int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
}
}
- err = reset_control_assert(rockchip->core_rst);
- if (err) {
- dev_err(dev, "assert core_rst err %d\n", err);
- goto err_exit_phy;
- }
-
- err = reset_control_assert(rockchip->mgmt_rst);
- if (err) {
- dev_err(dev, "assert mgmt_rst err %d\n", err);
- goto err_exit_phy;
- }
-
- err = reset_control_assert(rockchip->mgmt_sticky_rst);
+ err = reset_control_bulk_assert(ROCKCHIP_NUM_CORE_RSTS,
+ rockchip->core_rsts);
if (err) {
- dev_err(dev, "assert mgmt_sticky_rst err %d\n", err);
- goto err_exit_phy;
- }
-
- err = reset_control_assert(rockchip->pipe_rst);
- if (err) {
- dev_err(dev, "assert pipe_rst err %d\n", err);
+ dev_err_probe(dev, err, "Couldn't assert Core resets\n");
goto err_exit_phy;
}
udelay(10);
- err = reset_control_deassert(rockchip->pm_rst);
- if (err) {
- dev_err(dev, "deassert pm_rst err %d\n", err);
- goto err_exit_phy;
- }
-
- err = reset_control_deassert(rockchip->aclk_rst);
+ err = reset_control_bulk_deassert(ROCKCHIP_NUM_PM_RSTS,
+ rockchip->pm_rsts);
if (err) {
- dev_err(dev, "deassert aclk_rst err %d\n", err);
- goto err_exit_phy;
- }
-
- err = reset_control_deassert(rockchip->pclk_rst);
- if (err) {
- dev_err(dev, "deassert pclk_rst err %d\n", err);
+ dev_err(dev, "Couldn't deassert PM resets %d\n", err);
goto err_exit_phy;
}
@@ -243,11 +154,12 @@ int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
rockchip_pcie_write(rockchip, PCIE_CLIENT_GEN_SEL_1,
PCIE_CLIENT_CONFIG);
- regs = PCIE_CLIENT_LINK_TRAIN_ENABLE | PCIE_CLIENT_ARI_ENABLE |
+ regs = PCIE_CLIENT_ARI_ENABLE |
PCIE_CLIENT_CONF_LANE_NUM(rockchip->lanes);
if (rockchip->is_rc)
- regs |= PCIE_CLIENT_CONF_ENABLE | PCIE_CLIENT_MODE_RC;
+ regs |= PCIE_CLIENT_LINK_TRAIN_ENABLE |
+ PCIE_CLIENT_CONF_ENABLE | PCIE_CLIENT_MODE_RC;
else
regs |= PCIE_CLIENT_CONF_DISABLE | PCIE_CLIENT_MODE_EP;
@@ -271,31 +183,10 @@ int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
goto err_power_off_phy;
}
- /*
- * Please don't reorder the deassert sequence of the following
- * four reset pins.
- */
- err = reset_control_deassert(rockchip->mgmt_sticky_rst);
- if (err) {
- dev_err(dev, "deassert mgmt_sticky_rst err %d\n", err);
- goto err_power_off_phy;
- }
-
- err = reset_control_deassert(rockchip->core_rst);
- if (err) {
- dev_err(dev, "deassert core_rst err %d\n", err);
- goto err_power_off_phy;
- }
-
- err = reset_control_deassert(rockchip->mgmt_rst);
- if (err) {
- dev_err(dev, "deassert mgmt_rst err %d\n", err);
- goto err_power_off_phy;
- }
-
- err = reset_control_deassert(rockchip->pipe_rst);
+ err = reset_control_bulk_deassert(ROCKCHIP_NUM_CORE_RSTS,
+ rockchip->core_rsts);
if (err) {
- dev_err(dev, "deassert pipe_rst err %d\n", err);
+ dev_err(dev, "Couldn't deassert Core reset %d\n", err);
goto err_power_off_phy;
}
@@ -371,50 +262,18 @@ int rockchip_pcie_enable_clocks(struct rockchip_pcie *rockchip)
struct device *dev = rockchip->dev;
int err;
- err = clk_prepare_enable(rockchip->aclk_pcie);
- if (err) {
- dev_err(dev, "unable to enable aclk_pcie clock\n");
- return err;
- }
-
- err = clk_prepare_enable(rockchip->aclk_perf_pcie);
- if (err) {
- dev_err(dev, "unable to enable aclk_perf_pcie clock\n");
- goto err_aclk_perf_pcie;
- }
-
- err = clk_prepare_enable(rockchip->hclk_pcie);
- if (err) {
- dev_err(dev, "unable to enable hclk_pcie clock\n");
- goto err_hclk_pcie;
- }
-
- err = clk_prepare_enable(rockchip->clk_pcie_pm);
- if (err) {
- dev_err(dev, "unable to enable clk_pcie_pm clock\n");
- goto err_clk_pcie_pm;
- }
+ err = clk_bulk_prepare_enable(rockchip->num_clks, rockchip->clks);
+ if (err)
+ return dev_err_probe(dev, err, "failed to enable clocks\n");
return 0;
-
-err_clk_pcie_pm:
- clk_disable_unprepare(rockchip->hclk_pcie);
-err_hclk_pcie:
- clk_disable_unprepare(rockchip->aclk_perf_pcie);
-err_aclk_perf_pcie:
- clk_disable_unprepare(rockchip->aclk_pcie);
- return err;
}
EXPORT_SYMBOL_GPL(rockchip_pcie_enable_clocks);
-void rockchip_pcie_disable_clocks(void *data)
+void rockchip_pcie_disable_clocks(struct rockchip_pcie *rockchip)
{
- struct rockchip_pcie *rockchip = data;
- clk_disable_unprepare(rockchip->clk_pcie_pm);
- clk_disable_unprepare(rockchip->hclk_pcie);
- clk_disable_unprepare(rockchip->aclk_perf_pcie);
- clk_disable_unprepare(rockchip->aclk_pcie);
+ clk_bulk_disable_unprepare(rockchip->num_clks, rockchip->clks);
}
EXPORT_SYMBOL_GPL(rockchip_pcie_disable_clocks);
diff --git a/drivers/pci/controller/pcie-rockchip.h b/drivers/pci/controller/pcie-rockchip.h
index fe0333778fd9..3e82a69b9c00 100644
--- a/drivers/pci/controller/pcie-rockchip.h
+++ b/drivers/pci/controller/pcie-rockchip.h
@@ -11,44 +11,53 @@
#ifndef _PCIE_ROCKCHIP_H
#define _PCIE_ROCKCHIP_H
+#include <linux/clk.h>
+#include <linux/hw_bitfield.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/pci-ecam.h>
+#include <linux/reset.h>
/*
* The upper 16 bits of PCIE_CLIENT_CONFIG are a write mask for the lower 16
* bits. This allows atomic updates of the register without locking.
*/
-#define HIWORD_UPDATE(mask, val) (((mask) << 16) | (val))
-#define HIWORD_UPDATE_BIT(val) HIWORD_UPDATE(val, val)
+#define HWORD_SET_BIT(val) (FIELD_PREP_WM16_CONST((val), 1))
+#define HWORD_CLR_BIT(val) (FIELD_PREP_WM16_CONST((val), 0))
-#define ENCODE_LANES(x) ((((x) >> 1) & 3) << 4)
+#define ENCODE_LANES(x) ((((x) >> 1) & 3))
#define MAX_LANE_NUM 4
#define MAX_REGION_LIMIT 32
#define MIN_EP_APERTURE 28
+#define LINK_TRAIN_TIMEOUT (500 * USEC_PER_MSEC)
#define PCIE_CLIENT_BASE 0x0
#define PCIE_CLIENT_CONFIG (PCIE_CLIENT_BASE + 0x00)
-#define PCIE_CLIENT_CONF_ENABLE HIWORD_UPDATE_BIT(0x0001)
-#define PCIE_CLIENT_CONF_DISABLE HIWORD_UPDATE(0x0001, 0)
-#define PCIE_CLIENT_LINK_TRAIN_ENABLE HIWORD_UPDATE_BIT(0x0002)
-#define PCIE_CLIENT_ARI_ENABLE HIWORD_UPDATE_BIT(0x0008)
-#define PCIE_CLIENT_CONF_LANE_NUM(x) HIWORD_UPDATE(0x0030, ENCODE_LANES(x))
-#define PCIE_CLIENT_MODE_RC HIWORD_UPDATE_BIT(0x0040)
-#define PCIE_CLIENT_MODE_EP HIWORD_UPDATE(0x0040, 0)
-#define PCIE_CLIENT_GEN_SEL_1 HIWORD_UPDATE(0x0080, 0)
-#define PCIE_CLIENT_GEN_SEL_2 HIWORD_UPDATE_BIT(0x0080)
+#define PCIE_CLIENT_CONF_ENABLE HWORD_SET_BIT(0x0001)
+#define PCIE_CLIENT_CONF_DISABLE HWORD_CLR_BIT(0x0001)
+#define PCIE_CLIENT_LINK_TRAIN_ENABLE HWORD_SET_BIT(0x0002)
+#define PCIE_CLIENT_LINK_TRAIN_DISABLE HWORD_CLR_BIT(0x0002)
+#define PCIE_CLIENT_ARI_ENABLE HWORD_SET_BIT(0x0008)
+#define PCIE_CLIENT_CONF_LANE_NUM(x) FIELD_PREP_WM16(0x0030, ENCODE_LANES(x))
+#define PCIE_CLIENT_MODE_RC HWORD_SET_BIT(0x0040)
+#define PCIE_CLIENT_MODE_EP HWORD_CLR_BIT(0x0040)
+#define PCIE_CLIENT_GEN_SEL_1 HWORD_CLR_BIT(0x0080)
+#define PCIE_CLIENT_GEN_SEL_2 HWORD_SET_BIT(0x0080)
#define PCIE_CLIENT_LEGACY_INT_CTRL (PCIE_CLIENT_BASE + 0x0c)
-#define PCIE_CLIENT_INT_IN_ASSERT HIWORD_UPDATE_BIT(0x0002)
-#define PCIE_CLIENT_INT_IN_DEASSERT HIWORD_UPDATE(0x0002, 0)
-#define PCIE_CLIENT_INT_PEND_ST_PEND HIWORD_UPDATE_BIT(0x0001)
-#define PCIE_CLIENT_INT_PEND_ST_NORMAL HIWORD_UPDATE(0x0001, 0)
+#define PCIE_CLIENT_INT_IN_ASSERT HWORD_SET_BIT(0x0002)
+#define PCIE_CLIENT_INT_IN_DEASSERT HWORD_CLR_BIT(0x0002)
+#define PCIE_CLIENT_INT_PEND_ST_PEND HWORD_SET_BIT(0x0001)
+#define PCIE_CLIENT_INT_PEND_ST_NORMAL HWORD_CLR_BIT(0x0001)
#define PCIE_CLIENT_SIDE_BAND_STATUS (PCIE_CLIENT_BASE + 0x20)
#define PCIE_CLIENT_PHY_ST BIT(12)
#define PCIE_CLIENT_DEBUG_OUT_0 (PCIE_CLIENT_BASE + 0x3c)
#define PCIE_CLIENT_DEBUG_LTSSM_MASK GENMASK(5, 0)
#define PCIE_CLIENT_DEBUG_LTSSM_L1 0x18
#define PCIE_CLIENT_DEBUG_LTSSM_L2 0x19
+#define PCIE_CLIENT_BASIC_STATUS0 (PCIE_CLIENT_BASE + 0x44)
+#define PCIE_CLIENT_NEG_LINK_WIDTH_MASK GENMASK(7, 6)
+#define PCIE_CLIENT_NEG_LINK_WIDTH_SHIFT 6
+#define PCIE_CLIENT_NEG_LINK_SPEED BIT(5)
#define PCIE_CLIENT_BASIC_STATUS1 (PCIE_CLIENT_BASE + 0x48)
#define PCIE_CLIENT_LINK_STATUS_UP 0x00300000
#define PCIE_CLIENT_LINK_STATUS_MASK 0x00300000
@@ -86,6 +95,8 @@
#define PCIE_CORE_CTRL_MGMT_BASE 0x900000
#define PCIE_CORE_CTRL (PCIE_CORE_CTRL_MGMT_BASE + 0x000)
+#define PCIE_CORE_PL_CONF_LS_MASK 0x00000001
+#define PCIE_CORE_PL_CONF_LS_READY 0x00000001
#define PCIE_CORE_PL_CONF_SPEED_5G 0x00000008
#define PCIE_CORE_PL_CONF_SPEED_MASK 0x00000018
#define PCIE_CORE_PL_CONF_LANE_MASK 0x00000006
@@ -143,22 +154,16 @@
#define PCIE_RC_CONFIG_BASE 0xa00000
#define PCIE_EP_CONFIG_BASE 0xa00000
#define PCIE_EP_CONFIG_DID_VID (PCIE_EP_CONFIG_BASE + 0x00)
+#define PCIE_EP_CONFIG_LCS (PCIE_EP_CONFIG_BASE + 0xd0)
#define PCIE_RC_CONFIG_RID_CCR (PCIE_RC_CONFIG_BASE + 0x08)
-#define PCIE_RC_CONFIG_DCR (PCIE_RC_CONFIG_BASE + 0xc4)
-#define PCIE_RC_CONFIG_DCR_CSPL_SHIFT 18
-#define PCIE_RC_CONFIG_DCR_CSPL_LIMIT 0xff
-#define PCIE_RC_CONFIG_DCR_CPLS_SHIFT 26
-#define PCIE_RC_CONFIG_DCSR (PCIE_RC_CONFIG_BASE + 0xc8)
-#define PCIE_RC_CONFIG_DCSR_MPS_MASK GENMASK(7, 5)
-#define PCIE_RC_CONFIG_DCSR_MPS_256 (0x1 << 5)
-#define PCIE_RC_CONFIG_LINK_CAP (PCIE_RC_CONFIG_BASE + 0xcc)
-#define PCIE_RC_CONFIG_LINK_CAP_L0S BIT(10)
-#define PCIE_RC_CONFIG_LCS (PCIE_RC_CONFIG_BASE + 0xd0)
+#define PCIE_RC_CONFIG_CR (PCIE_RC_CONFIG_BASE + 0xc0)
#define PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2 (PCIE_RC_CONFIG_BASE + 0x90c)
#define PCIE_RC_CONFIG_THP_CAP (PCIE_RC_CONFIG_BASE + 0x274)
#define PCIE_RC_CONFIG_THP_CAP_NEXT_MASK GENMASK(31, 20)
-#define PCIE_ADDR_MASK 0xffffff00
+#define MAX_AXI_IB_ROOTPORT_REGION_NUM 3
+#define MIN_AXI_ADDR_BITS_PASSED 8
+#define PCIE_ADDR_MASK GENMASK_ULL(63, MIN_AXI_ADDR_BITS_PASSED)
#define PCIE_CORE_AXI_CONF_BASE 0xc00000
#define PCIE_CORE_OB_REGION_ADDR0 (PCIE_CORE_AXI_CONF_BASE + 0x0)
#define PCIE_CORE_OB_REGION_ADDR0_NUM_BITS 0x3f
@@ -185,12 +190,11 @@
#define AXI_WRAPPER_TYPE1_CFG 0xb
#define AXI_WRAPPER_NOR_MSG 0xc
-#define MAX_AXI_IB_ROOTPORT_REGION_NUM 3
-#define MIN_AXI_ADDR_BITS_PASSED 8
#define PCIE_RC_SEND_PME_OFF 0x11960
-#define ROCKCHIP_VENDOR_ID 0x1d87
#define PCIE_LINK_IS_L2(x) \
(((x) & PCIE_CLIENT_DEBUG_LTSSM_MASK) == PCIE_CLIENT_DEBUG_LTSSM_L2)
+#define PCIE_LINK_TRAINING_DONE(x) \
+ (((x) & PCIE_CORE_PL_CONF_LS_MASK) == PCIE_CORE_PL_CONF_LS_READY)
#define PCIE_LINK_UP(x) \
(((x) & PCIE_CLIENT_LINK_STATUS_MASK) == PCIE_CLIENT_LINK_STATUS_UP)
#define PCIE_LINK_IS_GEN2(x) \
@@ -202,20 +206,6 @@
#define RC_REGION_0_TYPE_MASK GENMASK(3, 0)
#define MAX_AXI_WRAPPER_REGION_NUM 33
-#define ROCKCHIP_PCIE_MSG_ROUTING_TO_RC 0x0
-#define ROCKCHIP_PCIE_MSG_ROUTING_VIA_ADDR 0x1
-#define ROCKCHIP_PCIE_MSG_ROUTING_VIA_ID 0x2
-#define ROCKCHIP_PCIE_MSG_ROUTING_BROADCAST 0x3
-#define ROCKCHIP_PCIE_MSG_ROUTING_LOCAL_INTX 0x4
-#define ROCKCHIP_PCIE_MSG_ROUTING_PME_ACK 0x5
-#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTA 0x20
-#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTB 0x21
-#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTC 0x22
-#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTD 0x23
-#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTA 0x24
-#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTB 0x25
-#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTC 0x26
-#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTD 0x27
#define ROCKCHIP_PCIE_MSG_ROUTING_MASK GENMASK(7, 5)
#define ROCKCHIP_PCIE_MSG_ROUTING(route) \
(((route) << 5) & ROCKCHIP_PCIE_MSG_ROUTING_MASK)
@@ -241,10 +231,20 @@
#define ROCKCHIP_PCIE_EP_MSIX_CAP_CP_MASK GENMASK(15, 8)
#define ROCKCHIP_PCIE_EP_DUMMY_IRQ_ADDR 0x1
#define ROCKCHIP_PCIE_EP_PCI_LEGACY_IRQ_ADDR 0x3
+
+#define ROCKCHIP_PCIE_AT_MIN_NUM_BITS 8
+#define ROCKCHIP_PCIE_AT_MAX_NUM_BITS 20
+#define ROCKCHIP_PCIE_AT_SIZE_ALIGN (1UL << ROCKCHIP_PCIE_AT_MIN_NUM_BITS)
+
#define ROCKCHIP_PCIE_EP_FUNC_BASE(fn) \
(PCIE_EP_PF_CONFIG_REGS_BASE + (((fn) << 12) & GENMASK(19, 12)))
#define ROCKCHIP_PCIE_EP_VIRT_FUNC_BASE(fn) \
(PCIE_EP_PF_CONFIG_REGS_BASE + 0x10000 + (((fn) << 12) & GENMASK(19, 12)))
+
+#define ROCKCHIP_PCIE_AT_MIN_NUM_BITS 8
+#define ROCKCHIP_PCIE_AT_MAX_NUM_BITS 20
+#define ROCKCHIP_PCIE_AT_SIZE_ALIGN (1UL << ROCKCHIP_PCIE_AT_MIN_NUM_BITS)
+
#define ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar) \
(PCIE_CORE_AXI_CONF_BASE + 0x0828 + (fn) * 0x0040 + (bar) * 0x0008)
#define ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar) \
@@ -287,27 +287,37 @@
(((c) << ((b) * 8 + 5)) & \
ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b))
+#define ROCKCHIP_NUM_PM_RSTS ARRAY_SIZE(rockchip_pci_pm_rsts)
+#define ROCKCHIP_NUM_CORE_RSTS ARRAY_SIZE(rockchip_pci_core_rsts)
+
+static const char * const rockchip_pci_pm_rsts[] = {
+ "pm",
+ "pclk",
+ "aclk",
+};
+
+/* NOTE: Do not reorder the deassert sequence of the following reset pins */
+static const char * const rockchip_pci_core_rsts[] = {
+ "pipe",
+ "mgmt",
+ "core",
+ "mgmt-sticky",
+};
+
struct rockchip_pcie {
void __iomem *reg_base; /* DT axi-base */
void __iomem *apb_base; /* DT apb-base */
bool legacy_phy;
struct phy *phys[MAX_LANE_NUM];
- struct reset_control *core_rst;
- struct reset_control *mgmt_rst;
- struct reset_control *mgmt_sticky_rst;
- struct reset_control *pipe_rst;
- struct reset_control *pm_rst;
- struct reset_control *aclk_rst;
- struct reset_control *pclk_rst;
- struct clk *aclk_pcie;
- struct clk *aclk_perf_pcie;
- struct clk *hclk_pcie;
- struct clk *clk_pcie_pm;
+ struct reset_control_bulk_data pm_rsts[ROCKCHIP_NUM_PM_RSTS];
+ struct reset_control_bulk_data core_rsts[ROCKCHIP_NUM_CORE_RSTS];
+ struct clk_bulk_data *clks;
+ int num_clks;
struct regulator *vpcie12v; /* 12V power supply */
struct regulator *vpcie3v3; /* 3.3V power supply */
struct regulator *vpcie1v8; /* 1.8V power supply */
struct regulator *vpcie0v9; /* 0.9V power supply */
- struct gpio_desc *ep_gpio;
+ struct gpio_desc *perst_gpio;
u32 lanes;
u8 lanes_map;
int link_gen;
@@ -336,7 +346,7 @@ int rockchip_pcie_init_port(struct rockchip_pcie *rockchip);
int rockchip_pcie_get_phys(struct rockchip_pcie *rockchip);
void rockchip_pcie_deinit_phys(struct rockchip_pcie *rockchip);
int rockchip_pcie_enable_clocks(struct rockchip_pcie *rockchip);
-void rockchip_pcie_disable_clocks(void *data);
+void rockchip_pcie_disable_clocks(struct rockchip_pcie *rockchip);
void rockchip_pcie_cfg_configuration_accesses(
struct rockchip_pcie *rockchip, u32 type);
diff --git a/drivers/pci/controller/pcie-rzg3s-host.c b/drivers/pci/controller/pcie-rzg3s-host.c
new file mode 100644
index 000000000000..667e6d629474
--- /dev/null
+++ b/drivers/pci/controller/pcie-rzg3s-host.c
@@ -0,0 +1,1761 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe driver for Renesas RZ/G3S SoCs
+ *
+ * Copyright (C) 2025 Renesas Electronics Corp.
+ *
+ * Based on:
+ * drivers/pci/controller/pcie-rcar-host.c
+ * Copyright (C) 2009 - 2011 Paul Mundt
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/cleanup.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mutex.h>
+#include <linux/msi.h>
+#include <linux/of_irq.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/units.h>
+
+#include "../pci.h"
+
+/* AXI registers */
+#define RZG3S_PCI_REQDATA(id) (0x80 + (id) * 0x4)
+#define RZG3S_PCI_REQRCVDAT 0x8c
+
+#define RZG3S_PCI_REQADR1 0x90
+#define RZG3S_PCI_REQADR1_BUS GENMASK(31, 24)
+#define RZG3S_PCI_REQADR1_DEV GENMASK(23, 19)
+#define RZG3S_PCI_REQADR1_FUNC GENMASK(18, 16)
+#define RZG3S_PCI_REQADR1_REG GENMASK(11, 0)
+
+#define RZG3S_PCI_REQBE 0x98
+#define RZG3S_PCI_REQBE_BYTE_EN GENMASK(3, 0)
+
+#define RZG3S_PCI_REQISS 0x9c
+#define RZG3S_PCI_REQISS_MOR_STATUS GENMASK(18, 16)
+#define RZG3S_PCI_REQISS_TR_TYPE GENMASK(11, 8)
+#define RZG3S_PCI_REQISS_TR_TP0_RD FIELD_PREP(RZG3S_PCI_REQISS_TR_TYPE, 0x4)
+#define RZG3S_PCI_REQISS_TR_TP0_WR FIELD_PREP(RZG3S_PCI_REQISS_TR_TYPE, 0x5)
+#define RZG3S_PCI_REQISS_TR_TP1_RD FIELD_PREP(RZG3S_PCI_REQISS_TR_TYPE, 0x6)
+#define RZG3S_PCI_REQISS_TR_TP1_WR FIELD_PREP(RZG3S_PCI_REQISS_TR_TYPE, 0x7)
+#define RZG3S_PCI_REQISS_REQ_ISSUE BIT(0)
+
+#define RZG3S_PCI_MSIRCVWADRL 0x100
+#define RZG3S_PCI_MSIRCVWADRL_MASK GENMASK(31, 3)
+#define RZG3S_PCI_MSIRCVWADRL_MSG_DATA_ENA BIT(1)
+#define RZG3S_PCI_MSIRCVWADRL_ENA BIT(0)
+
+#define RZG3S_PCI_MSIRCVWADRU 0x104
+
+#define RZG3S_PCI_MSIRCVWMSKL 0x108
+#define RZG3S_PCI_MSIRCVWMSKL_MASK GENMASK(31, 2)
+
+#define RZG3S_PCI_PINTRCVIE 0x110
+#define RZG3S_PCI_PINTRCVIE_INTX(i) BIT(i)
+#define RZG3S_PCI_PINTRCVIE_MSI BIT(4)
+
+#define RZG3S_PCI_PINTRCVIS 0x114
+#define RZG3S_PCI_PINTRCVIS_INTX(i) BIT(i)
+#define RZG3S_PCI_PINTRCVIS_MSI BIT(4)
+
+#define RZG3S_PCI_MSGRCVIE 0x120
+#define RZG3S_PCI_MSGRCVIE_MSG_RCV BIT(24)
+
+#define RZG3S_PCI_MSGRCVIS 0x124
+#define RZG3S_PCI_MSGRCVIS_MRI BIT(24)
+
+#define RZG3S_PCI_PEIE0 0x200
+
+#define RZG3S_PCI_PEIS0 0x204
+#define RZG3S_PCI_PEIS0_RX_DLLP_PM_ENTER BIT(12)
+#define RZG3S_PCI_PEIS0_DL_UPDOWN BIT(9)
+
+#define RZG3S_PCI_PEIE1 0x208
+#define RZG3S_PCI_PEIS1 0x20c
+#define RZG3S_PCI_AMEIS 0x214
+#define RZG3S_PCI_ASEIS1 0x224
+
+#define RZG3S_PCI_PCSTAT1 0x408
+#define RZG3S_PCI_PCSTAT1_LTSSM_STATE GENMASK(14, 10)
+#define RZG3S_PCI_PCSTAT1_DL_DOWN_STS BIT(0)
+
+#define RZG3S_PCI_PCCTRL2 0x410
+#define RZG3S_PCI_PCCTRL2_LS_CHG GENMASK(9, 8)
+#define RZG3S_PCI_PCCTRL2_LS_CHG_REQ BIT(0)
+
+#define RZG3S_PCI_PCSTAT2 0x414
+#define RZG3S_PCI_PCSTAT2_LS_CHG_DONE BIT(28)
+#define RZG3S_PCI_PCSTAT2_SDRIRE GENMASK(7, 1)
+
+#define RZG3S_PCI_PERM 0x300
+#define RZG3S_PCI_PERM_CFG_HWINIT_EN BIT(2)
+#define RZG3S_PCI_PERM_PIPE_PHY_REG_EN BIT(1)
+
+#define RZG3S_PCI_MSIRE(id) (0x600 + (id) * 0x10)
+#define RZG3S_PCI_MSIRE_ENA BIT(0)
+
+#define RZG3S_PCI_MSIRM(id) (0x608 + (id) * 0x10)
+#define RZG3S_PCI_MSIRS(id) (0x60c + (id) * 0x10)
+
+#define RZG3S_PCI_AWBASEL(id) (0x1000 + (id) * 0x20)
+#define RZG3S_PCI_AWBASEL_WIN_ENA BIT(0)
+
+#define RZG3S_PCI_AWBASEU(id) (0x1004 + (id) * 0x20)
+#define RZG3S_PCI_AWMASKL(id) (0x1008 + (id) * 0x20)
+#define RZG3S_PCI_AWMASKU(id) (0x100c + (id) * 0x20)
+#define RZG3S_PCI_ADESTL(id) (0x1010 + (id) * 0x20)
+#define RZG3S_PCI_ADESTU(id) (0x1014 + (id) * 0x20)
+
+#define RZG3S_PCI_PWBASEL(id) (0x1100 + (id) * 0x20)
+#define RZG3S_PCI_PWBASEL_ENA BIT(0)
+
+#define RZG3S_PCI_PWBASEU(id) (0x1104 + (id) * 0x20)
+#define RZG3S_PCI_PDESTL(id) (0x1110 + (id) * 0x20)
+#define RZG3S_PCI_PDESTU(id) (0x1114 + (id) * 0x20)
+#define RZG3S_PCI_PWMASKL(id) (0x1108 + (id) * 0x20)
+#define RZG3S_PCI_PWMASKU(id) (0x110c + (id) * 0x20)
+
+/* PHY control registers */
+#define RZG3S_PCI_PHY_XCFGD(id) (0x2000 + (id) * 0x10)
+#define RZG3S_PCI_PHY_XCFGD_NUM 39
+
+#define RZG3S_PCI_PHY_XCFGA_CMN(id) (0x2400 + (id) * 0x10)
+#define RZG3S_PCI_PHY_XCFGA_CMN_NUM 16
+
+#define RZG3S_PCI_PHY_XCFGA_RX(id) (0x2500 + (id) * 0x10)
+#define RZG3S_PCI_PHY_XCFGA_RX_NUM 13
+
+#define RZG3S_PCI_PHY_XCFGA_TX 0x25d0
+
+#define RZG3S_PCI_PHY_XCFG_CTRL 0x2a20
+#define RZG3S_PCI_PHY_XCFG_CTRL_PHYREG_SEL BIT(0)
+
+/* PCIe registers */
+#define RZG3S_PCI_CFG_BASE 0x6000
+#define RZG3S_PCI_CFG_BARMSK00L 0xa0
+#define RZG3S_PCI_CFG_BARMSK00U 0xa4
+
+#define RZG3S_PCI_CFG_PCIEC 0x60
+
+/* System controller registers */
+#define RZG3S_SYS_PCIE_RST_RSM_B 0xd74
+#define RZG3S_SYS_PCIE_RST_RSM_B_MASK BIT(0)
+
+/* Maximum number of windows */
+#define RZG3S_MAX_WINDOWS 8
+
+/* Number of MSI interrupts per register */
+#define RZG3S_PCI_MSI_INT_PER_REG 32
+/* The number of MSI interrupts */
+#define RZG3S_PCI_MSI_INT_NR RZG3S_PCI_MSI_INT_PER_REG
+
+/* Timeouts experimentally determined */
+#define RZG3S_REQ_ISSUE_TIMEOUT_US 2500
+
+/**
+ * struct rzg3s_pcie_msi - RZ/G3S PCIe MSI data structure
+ * @domain: IRQ domain
+ * @map: bitmap with the allocated MSIs
+ * @dma_addr: address of the allocated MSI window
+ * @window_base: base address of the MSI window
+ * @pages: allocated pages for MSI window mapping
+ * @map_lock: lock for bitmap with the allocated MSIs
+ * @irq: MSI interrupt
+ */
+struct rzg3s_pcie_msi {
+ struct irq_domain *domain;
+ DECLARE_BITMAP(map, RZG3S_PCI_MSI_INT_NR);
+ dma_addr_t dma_addr;
+ dma_addr_t window_base;
+ unsigned long pages;
+ struct mutex map_lock;
+ int irq;
+};
+
+struct rzg3s_pcie_host;
+
+/**
+ * struct rzg3s_pcie_soc_data - SoC specific data
+ * @init_phy: PHY initialization function
+ * @power_resets: array with the resets that need to be de-asserted after
+ * power-on
+ * @cfg_resets: array with the resets that need to be de-asserted after
+ * configuration
+ * @num_power_resets: number of power resets
+ * @num_cfg_resets: number of configuration resets
+ */
+struct rzg3s_pcie_soc_data {
+ int (*init_phy)(struct rzg3s_pcie_host *host);
+ const char * const *power_resets;
+ const char * const *cfg_resets;
+ u8 num_power_resets;
+ u8 num_cfg_resets;
+};
+
+/**
+ * struct rzg3s_pcie_port - RZ/G3S PCIe Root Port data structure
+ * @refclk: PCIe reference clock
+ * @vendor_id: Vendor ID
+ * @device_id: Device ID
+ */
+struct rzg3s_pcie_port {
+ struct clk *refclk;
+ u32 vendor_id;
+ u32 device_id;
+};
+
+/**
+ * struct rzg3s_pcie_host - RZ/G3S PCIe data structure
+ * @axi: base address for AXI registers
+ * @pcie: base address for PCIe registers
+ * @dev: struct device
+ * @power_resets: reset control signals that should be set after power up
+ * @cfg_resets: reset control signals that should be set after configuration
+ * @sysc: SYSC regmap
+ * @intx_domain: INTx IRQ domain
+ * @data: SoC specific data
+ * @msi: MSI data structure
+ * @port: PCIe Root Port
+ * @hw_lock: lock for access to the HW resources
+ * @intx_irqs: INTx interrupts
+ * @max_link_speed: maximum supported link speed
+ */
+struct rzg3s_pcie_host {
+ void __iomem *axi;
+ void __iomem *pcie;
+ struct device *dev;
+ struct reset_control_bulk_data *power_resets;
+ struct reset_control_bulk_data *cfg_resets;
+ struct regmap *sysc;
+ struct irq_domain *intx_domain;
+ const struct rzg3s_pcie_soc_data *data;
+ struct rzg3s_pcie_msi msi;
+ struct rzg3s_pcie_port port;
+ raw_spinlock_t hw_lock;
+ int intx_irqs[PCI_NUM_INTX];
+ int max_link_speed;
+};
+
+#define rzg3s_msi_to_host(_msi) container_of(_msi, struct rzg3s_pcie_host, msi)
+
+static void rzg3s_pcie_update_bits(void __iomem *base, u32 offset, u32 mask,
+ u32 val)
+{
+ u32 tmp;
+
+ tmp = readl_relaxed(base + offset);
+ tmp &= ~mask;
+ tmp |= val & mask;
+ writel_relaxed(tmp, base + offset);
+}
+
+static int rzg3s_pcie_child_issue_request(struct rzg3s_pcie_host *host)
+{
+ u32 val;
+ int ret;
+
+ rzg3s_pcie_update_bits(host->axi, RZG3S_PCI_REQISS,
+ RZG3S_PCI_REQISS_REQ_ISSUE,
+ RZG3S_PCI_REQISS_REQ_ISSUE);
+ ret = readl_poll_timeout_atomic(host->axi + RZG3S_PCI_REQISS, val,
+ !(val & RZG3S_PCI_REQISS_REQ_ISSUE),
+ 5, RZG3S_REQ_ISSUE_TIMEOUT_US);
+
+ if (val & RZG3S_PCI_REQISS_MOR_STATUS)
+ return -EIO;
+
+ return ret;
+}
+
+static void rzg3s_pcie_child_prepare_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
+{
+ struct rzg3s_pcie_host *host = bus->sysdata;
+ unsigned int dev, func, reg;
+
+ dev = PCI_SLOT(devfn);
+ func = PCI_FUNC(devfn);
+ reg = where & ~0x3;
+
+ /* Set the destination */
+ writel_relaxed(FIELD_PREP(RZG3S_PCI_REQADR1_BUS, bus->number) |
+ FIELD_PREP(RZG3S_PCI_REQADR1_DEV, dev) |
+ FIELD_PREP(RZG3S_PCI_REQADR1_FUNC, func) |
+ FIELD_PREP(RZG3S_PCI_REQADR1_REG, reg),
+ host->axi + RZG3S_PCI_REQADR1);
+
+ /* Set byte enable */
+ writel_relaxed(RZG3S_PCI_REQBE_BYTE_EN, host->axi + RZG3S_PCI_REQBE);
+}
+
+static int rzg3s_pcie_child_read_conf(struct rzg3s_pcie_host *host,
+ struct pci_bus *bus, unsigned int devfn,
+ int where, u32 *data)
+{
+ bool type0 = pci_is_root_bus(bus->parent) ? true : false;
+ int ret;
+
+ rzg3s_pcie_child_prepare_bus(bus, devfn, where);
+
+ /* Set the type of request */
+ writel_relaxed(type0 ? RZG3S_PCI_REQISS_TR_TP0_RD :
+ RZG3S_PCI_REQISS_TR_TP1_RD,
+ host->axi + RZG3S_PCI_REQISS);
+
+ /* Issue the request and wait to finish */
+ ret = rzg3s_pcie_child_issue_request(host);
+ if (ret)
+ return PCIBIOS_SET_FAILED;
+
+ /* Read the data */
+ *data = readl_relaxed(host->axi + RZG3S_PCI_REQRCVDAT);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
+static int rzg3s_pcie_child_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ struct rzg3s_pcie_host *host = bus->sysdata;
+ int ret;
+
+ ret = rzg3s_pcie_child_read_conf(host, bus, devfn, where, val);
+ if (ret != PCIBIOS_SUCCESSFUL)
+ return ret;
+
+ if (size <= 2)
+ *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int rzg3s_pcie_child_write_conf(struct rzg3s_pcie_host *host,
+ struct pci_bus *bus, unsigned int devfn,
+ int where, u32 data)
+{
+ bool type0 = pci_is_root_bus(bus->parent) ? true : false;
+ int ret;
+
+ rzg3s_pcie_child_prepare_bus(bus, devfn, where);
+
+ /* Set the write data */
+ writel_relaxed(0, host->axi + RZG3S_PCI_REQDATA(0));
+ writel_relaxed(0, host->axi + RZG3S_PCI_REQDATA(1));
+ writel_relaxed(data, host->axi + RZG3S_PCI_REQDATA(2));
+
+ /* Set the type of request */
+ writel_relaxed(type0 ? RZG3S_PCI_REQISS_TR_TP0_WR :
+ RZG3S_PCI_REQISS_TR_TP1_WR,
+ host->axi + RZG3S_PCI_REQISS);
+
+ /* Issue the request and wait to finish */
+ ret = rzg3s_pcie_child_issue_request(host);
+ if (ret)
+ return PCIBIOS_SET_FAILED;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
+static int rzg3s_pcie_child_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ struct rzg3s_pcie_host *host = bus->sysdata;
+ u32 data, shift;
+ int ret;
+
+ if (size == 4)
+ return rzg3s_pcie_child_write_conf(host, bus, devfn, where, val);
+
+ /*
+ * Controller does 32 bit accesses. To do byte accesses software need
+ * to do read/modify/write. This may have potential side effects. For
+ * example, software may perform a 16-bit write. If the hardware only
+ * supports 32-bit accesses, we must do a 32-bit read, merge in the 16
+ * bits we intend to write, followed by a 32-bit write. If the 16 bits
+ * we *don't* intend to write happen to have any RW1C
+ * (write-one-to-clear) bits set, we just inadvertently cleared
+ * something we shouldn't have.
+ */
+ if (!bus->unsafe_warn) {
+ dev_warn(&bus->dev, "%d-byte config write to %04x:%02x:%02x.%d offset %#x may corrupt adjacent RW1C bits\n",
+ size, pci_domain_nr(bus), bus->number,
+ PCI_SLOT(devfn), PCI_FUNC(devfn), where);
+ bus->unsafe_warn = 1;
+ }
+
+ ret = rzg3s_pcie_child_read_conf(host, bus, devfn, where, &data);
+ if (ret != PCIBIOS_SUCCESSFUL)
+ return ret;
+
+ if (size == 1) {
+ shift = BITS_PER_BYTE * (where & 3);
+ data &= ~(0xff << shift);
+ data |= ((val & 0xff) << shift);
+ } else if (size == 2) {
+ shift = BITS_PER_BYTE * (where & 2);
+ data &= ~(0xffff << shift);
+ data |= ((val & 0xffff) << shift);
+ } else {
+ data = val;
+ }
+
+ return rzg3s_pcie_child_write_conf(host, bus, devfn, where, data);
+}
+
+static struct pci_ops rzg3s_pcie_child_ops = {
+ .read = rzg3s_pcie_child_read,
+ .write = rzg3s_pcie_child_write,
+};
+
+static void __iomem *rzg3s_pcie_root_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
+{
+ struct rzg3s_pcie_host *host = bus->sysdata;
+
+ if (devfn)
+ return NULL;
+
+ return host->pcie + where;
+}
+
+/* Serialized by 'pci_lock' */
+static int rzg3s_pcie_root_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ struct rzg3s_pcie_host *host = bus->sysdata;
+ int ret;
+
+ /* Enable access control to the CFGU */
+ writel_relaxed(RZG3S_PCI_PERM_CFG_HWINIT_EN,
+ host->axi + RZG3S_PCI_PERM);
+
+ ret = pci_generic_config_write(bus, devfn, where, size, val);
+
+ /* Disable access control to the CFGU */
+ writel_relaxed(0, host->axi + RZG3S_PCI_PERM);
+
+ return ret;
+}
+
+static struct pci_ops rzg3s_pcie_root_ops = {
+ .read = pci_generic_config_read,
+ .write = rzg3s_pcie_root_write,
+ .map_bus = rzg3s_pcie_root_map_bus,
+};
+
+static void rzg3s_pcie_intx_irq_handler(struct irq_desc *desc)
+{
+ struct rzg3s_pcie_host *host = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned int irq = irq_desc_get_irq(desc);
+ u32 intx = irq - host->intx_irqs[0];
+
+ chained_irq_enter(chip, desc);
+ generic_handle_domain_irq(host->intx_domain, intx);
+ chained_irq_exit(chip, desc);
+}
+
+static irqreturn_t rzg3s_pcie_msi_irq(int irq, void *data)
+{
+ u8 regs = RZG3S_PCI_MSI_INT_NR / RZG3S_PCI_MSI_INT_PER_REG;
+ DECLARE_BITMAP(bitmap, RZG3S_PCI_MSI_INT_NR);
+ struct rzg3s_pcie_host *host = data;
+ struct rzg3s_pcie_msi *msi = &host->msi;
+ unsigned long bit;
+ u32 status;
+
+ status = readl_relaxed(host->axi + RZG3S_PCI_PINTRCVIS);
+ if (!(status & RZG3S_PCI_PINTRCVIS_MSI))
+ return IRQ_NONE;
+
+ /* Clear the MSI */
+ rzg3s_pcie_update_bits(host->axi, RZG3S_PCI_PINTRCVIS,
+ RZG3S_PCI_PINTRCVIS_MSI,
+ RZG3S_PCI_PINTRCVIS_MSI);
+ rzg3s_pcie_update_bits(host->axi, RZG3S_PCI_MSGRCVIS,
+ RZG3S_PCI_MSGRCVIS_MRI, RZG3S_PCI_MSGRCVIS_MRI);
+
+ for (u8 reg_id = 0; reg_id < regs; reg_id++) {
+ status = readl_relaxed(host->axi + RZG3S_PCI_MSIRS(reg_id));
+ bitmap_write(bitmap, status, reg_id * RZG3S_PCI_MSI_INT_PER_REG,
+ RZG3S_PCI_MSI_INT_PER_REG);
+ }
+
+ for_each_set_bit(bit, bitmap, RZG3S_PCI_MSI_INT_NR) {
+ int ret;
+
+ ret = generic_handle_domain_irq(msi->domain, bit);
+ if (ret) {
+ u8 reg_bit = bit % RZG3S_PCI_MSI_INT_PER_REG;
+ u8 reg_id = bit / RZG3S_PCI_MSI_INT_PER_REG;
+
+ /* Unknown MSI, just clear it */
+ writel_relaxed(BIT(reg_bit),
+ host->axi + RZG3S_PCI_MSIRS(reg_id));
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void rzg3s_pcie_msi_irq_ack(struct irq_data *d)
+{
+ struct rzg3s_pcie_msi *msi = irq_data_get_irq_chip_data(d);
+ struct rzg3s_pcie_host *host = rzg3s_msi_to_host(msi);
+ u8 reg_bit = d->hwirq % RZG3S_PCI_MSI_INT_PER_REG;
+ u8 reg_id = d->hwirq / RZG3S_PCI_MSI_INT_PER_REG;
+
+ guard(raw_spinlock_irqsave)(&host->hw_lock);
+
+ writel_relaxed(BIT(reg_bit), host->axi + RZG3S_PCI_MSIRS(reg_id));
+}
+
+static void rzg3s_pcie_msi_irq_mask(struct irq_data *d)
+{
+ struct rzg3s_pcie_msi *msi = irq_data_get_irq_chip_data(d);
+ struct rzg3s_pcie_host *host = rzg3s_msi_to_host(msi);
+ u8 reg_bit = d->hwirq % RZG3S_PCI_MSI_INT_PER_REG;
+ u8 reg_id = d->hwirq / RZG3S_PCI_MSI_INT_PER_REG;
+
+ guard(raw_spinlock_irqsave)(&host->hw_lock);
+
+ rzg3s_pcie_update_bits(host->axi, RZG3S_PCI_MSIRM(reg_id), BIT(reg_bit),
+ BIT(reg_bit));
+}
+
+static void rzg3s_pcie_msi_irq_unmask(struct irq_data *d)
+{
+ struct rzg3s_pcie_msi *msi = irq_data_get_irq_chip_data(d);
+ struct rzg3s_pcie_host *host = rzg3s_msi_to_host(msi);
+ u8 reg_bit = d->hwirq % RZG3S_PCI_MSI_INT_PER_REG;
+ u8 reg_id = d->hwirq / RZG3S_PCI_MSI_INT_PER_REG;
+
+ guard(raw_spinlock_irqsave)(&host->hw_lock);
+
+ rzg3s_pcie_update_bits(host->axi, RZG3S_PCI_MSIRM(reg_id), BIT(reg_bit),
+ 0);
+}
+
+static void rzg3s_pcie_irq_compose_msi_msg(struct irq_data *data,
+ struct msi_msg *msg)
+{
+ struct rzg3s_pcie_msi *msi = irq_data_get_irq_chip_data(data);
+ struct rzg3s_pcie_host *host = rzg3s_msi_to_host(msi);
+ u32 lo, hi;
+
+ /*
+ * Enable and msg data enable bits are part of the address lo. Drop
+ * them along with the unused bit.
+ */
+ lo = readl_relaxed(host->axi + RZG3S_PCI_MSIRCVWADRL) &
+ RZG3S_PCI_MSIRCVWADRL_MASK;
+ hi = readl_relaxed(host->axi + RZG3S_PCI_MSIRCVWADRU);
+
+ msg->address_lo = lo;
+ msg->address_hi = hi;
+ msg->data = data->hwirq;
+}
+
+static struct irq_chip rzg3s_pcie_msi_bottom_chip = {
+ .name = "rzg3s-pcie-msi",
+ .irq_ack = rzg3s_pcie_msi_irq_ack,
+ .irq_mask = rzg3s_pcie_msi_irq_mask,
+ .irq_unmask = rzg3s_pcie_msi_irq_unmask,
+ .irq_compose_msi_msg = rzg3s_pcie_irq_compose_msi_msg,
+};
+
+static int rzg3s_pcie_msi_domain_alloc(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs,
+ void *args)
+{
+ struct rzg3s_pcie_msi *msi = domain->host_data;
+ int hwirq;
+
+ scoped_guard(mutex, &msi->map_lock) {
+ hwirq = bitmap_find_free_region(msi->map, RZG3S_PCI_MSI_INT_NR,
+ order_base_2(nr_irqs));
+ }
+
+ if (hwirq < 0)
+ return -ENOSPC;
+
+ for (unsigned int i = 0; i < nr_irqs; i++) {
+ irq_domain_set_info(domain, virq + i, hwirq + i,
+ &rzg3s_pcie_msi_bottom_chip,
+ domain->host_data, handle_edge_irq, NULL,
+ NULL);
+ }
+
+ return 0;
+}
+
+static void rzg3s_pcie_msi_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct rzg3s_pcie_msi *msi = domain->host_data;
+
+ guard(mutex)(&msi->map_lock);
+
+ bitmap_release_region(msi->map, d->hwirq, order_base_2(nr_irqs));
+}
+
+static const struct irq_domain_ops rzg3s_pcie_msi_domain_ops = {
+ .alloc = rzg3s_pcie_msi_domain_alloc,
+ .free = rzg3s_pcie_msi_domain_free,
+};
+
+#define RZG3S_PCIE_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_NO_AFFINITY | \
+ MSI_FLAG_PCI_MSI_MASK_PARENT)
+
+#define RZG3S_PCIE_MSI_FLAGS_SUPPORTED (MSI_FLAG_MULTI_PCI_MSI | \
+ MSI_GENERIC_FLAGS_MASK)
+
+static const struct msi_parent_ops rzg3s_pcie_msi_parent_ops = {
+ .required_flags = RZG3S_PCIE_MSI_FLAGS_REQUIRED,
+ .supported_flags = RZG3S_PCIE_MSI_FLAGS_SUPPORTED,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .chip_flags = MSI_CHIP_FLAG_SET_ACK,
+ .prefix = "RZG3S-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
+};
+
+static int rzg3s_pcie_msi_allocate_domains(struct rzg3s_pcie_msi *msi)
+{
+ struct rzg3s_pcie_host *host = rzg3s_msi_to_host(msi);
+ struct device *dev = host->dev;
+ struct irq_domain_info info = {
+ .fwnode = dev_fwnode(dev),
+ .ops = &rzg3s_pcie_msi_domain_ops,
+ .size = RZG3S_PCI_MSI_INT_NR,
+ .host_data = msi,
+ };
+
+ msi->domain = msi_create_parent_irq_domain(&info,
+ &rzg3s_pcie_msi_parent_ops);
+ if (!msi->domain)
+ return dev_err_probe(dev, -ENOMEM,
+ "failed to create IRQ domain\n");
+
+ return 0;
+}
+
+static int rzg3s_pcie_msi_hw_setup(struct rzg3s_pcie_host *host)
+{
+ u8 regs = RZG3S_PCI_MSI_INT_NR / RZG3S_PCI_MSI_INT_PER_REG;
+ struct rzg3s_pcie_msi *msi = &host->msi;
+
+ /*
+ * Set MSI window size. HW will set the window to
+ * RZG3S_PCI_MSI_INT_NR * 4 bytes.
+ */
+ writel_relaxed(FIELD_PREP(RZG3S_PCI_MSIRCVWMSKL_MASK,
+ RZG3S_PCI_MSI_INT_NR - 1),
+ host->axi + RZG3S_PCI_MSIRCVWMSKL);
+
+ /* Set MSI window address and enable MSI window */
+ writel_relaxed(upper_32_bits(msi->window_base),
+ host->axi + RZG3S_PCI_MSIRCVWADRU);
+ writel_relaxed(lower_32_bits(msi->window_base) |
+ RZG3S_PCI_MSIRCVWADRL_ENA |
+ RZG3S_PCI_MSIRCVWADRL_MSG_DATA_ENA,
+ host->axi + RZG3S_PCI_MSIRCVWADRL);
+
+ /* Set MSI receive enable */
+ for (u8 reg_id = 0; reg_id < regs; reg_id++) {
+ writel_relaxed(RZG3S_PCI_MSIRE_ENA,
+ host->axi + RZG3S_PCI_MSIRE(reg_id));
+ }
+
+ /* Enable message receive interrupts */
+ writel_relaxed(RZG3S_PCI_MSGRCVIE_MSG_RCV,
+ host->axi + RZG3S_PCI_MSGRCVIE);
+
+ /* Enable MSI */
+ rzg3s_pcie_update_bits(host->axi, RZG3S_PCI_PINTRCVIE,
+ RZG3S_PCI_PINTRCVIE_MSI,
+ RZG3S_PCI_PINTRCVIE_MSI);
+
+ return 0;
+}
+
+static int rzg3s_pcie_msi_setup(struct rzg3s_pcie_host *host)
+{
+ size_t size = RZG3S_PCI_MSI_INT_NR * sizeof(u32);
+ struct rzg3s_pcie_msi *msi = &host->msi;
+ struct device *dev = host->dev;
+ int id, ret;
+
+ msi->pages = __get_free_pages(GFP_KERNEL | GFP_DMA, 0);
+ if (!msi->pages)
+ return -ENOMEM;
+
+ msi->dma_addr = dma_map_single(dev, (void *)msi->pages, size * 2,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, msi->dma_addr)) {
+ ret = -ENOMEM;
+ goto free_pages;
+ }
+
+ /*
+ * According to the RZ/G3S HW manual (Rev.1.10, section 34.4.5.2 Setting
+ * the MSI Window) the MSI window needs to fall within one of the
+ * enabled AXI windows. Find an enabled AXI window to setup the MSI
+ * window.
+ */
+ for (id = 0; id < RZG3S_MAX_WINDOWS; id++) {
+ u64 base, basel, baseu;
+ u64 mask, maskl, masku;
+
+ basel = readl_relaxed(host->axi + RZG3S_PCI_AWBASEL(id));
+ /* Skip checking this AXI window if it's not enabled */
+ if (!(basel & RZG3S_PCI_AWBASEL_WIN_ENA))
+ continue;
+
+ baseu = readl_relaxed(host->axi + RZG3S_PCI_AWBASEU(id));
+ base = baseu << 32 | basel;
+
+ maskl = readl_relaxed(host->axi + RZG3S_PCI_AWMASKL(id));
+ masku = readl_relaxed(host->axi + RZG3S_PCI_AWMASKU(id));
+ mask = masku << 32 | maskl;
+
+ if (msi->dma_addr < base || msi->dma_addr > base + mask)
+ continue;
+
+ break;
+ }
+
+ if (id == RZG3S_MAX_WINDOWS) {
+ ret = -EINVAL;
+ goto dma_unmap;
+ }
+
+ /* The MSI base address must be aligned to the MSI size */
+ msi->window_base = ALIGN(msi->dma_addr, size);
+ if (msi->window_base < msi->dma_addr) {
+ ret = -EINVAL;
+ goto dma_unmap;
+ }
+
+ rzg3s_pcie_msi_hw_setup(host);
+
+ return 0;
+
+dma_unmap:
+ dma_unmap_single(dev, msi->dma_addr, size * 2, DMA_BIDIRECTIONAL);
+free_pages:
+ free_pages(msi->pages, 0);
+ return ret;
+}
+
+static void rzg3s_pcie_msi_hw_teardown(struct rzg3s_pcie_host *host)
+{
+ u8 regs = RZG3S_PCI_MSI_INT_NR / RZG3S_PCI_MSI_INT_PER_REG;
+
+ /* Disable MSI */
+ rzg3s_pcie_update_bits(host->axi, RZG3S_PCI_PINTRCVIE,
+ RZG3S_PCI_PINTRCVIE_MSI, 0);
+
+ /* Disable message receive interrupts */
+ rzg3s_pcie_update_bits(host->axi, RZG3S_PCI_MSGRCVIE,
+ RZG3S_PCI_MSGRCVIE_MSG_RCV, 0);
+
+ /* Disable MSI receive enable */
+ for (u8 reg_id = 0; reg_id < regs; reg_id++)
+ writel_relaxed(0, host->axi + RZG3S_PCI_MSIRE(reg_id));
+
+ /* Disable MSI window */
+ writel_relaxed(0, host->axi + RZG3S_PCI_MSIRCVWADRL);
+}
+
+static void rzg3s_pcie_teardown_msi(struct rzg3s_pcie_host *host)
+{
+ size_t size = RZG3S_PCI_MSI_INT_NR * sizeof(u32);
+ struct rzg3s_pcie_msi *msi = &host->msi;
+
+ rzg3s_pcie_msi_hw_teardown(host);
+
+ free_irq(msi->irq, host);
+ irq_domain_remove(msi->domain);
+
+ /* Free unused memory */
+ dma_unmap_single(host->dev, msi->dma_addr, size * 2, DMA_BIDIRECTIONAL);
+ free_pages(msi->pages, 0);
+}
+
+static int rzg3s_pcie_init_msi(struct rzg3s_pcie_host *host)
+{
+ struct platform_device *pdev = to_platform_device(host->dev);
+ struct rzg3s_pcie_msi *msi = &host->msi;
+ struct device *dev = host->dev;
+ const char *devname;
+ int ret;
+
+ ret = devm_mutex_init(dev, &msi->map_lock);
+ if (ret)
+ return ret;
+
+ msi->irq = platform_get_irq_byname(pdev, "msi");
+ if (msi->irq < 0)
+ return dev_err_probe(dev, msi->irq, "Failed to get MSI IRQ!\n");
+
+ devname = devm_kasprintf(dev, GFP_KERNEL, "%s-msi", dev_name(dev));
+ if (!devname)
+ return -ENOMEM;
+
+ ret = rzg3s_pcie_msi_allocate_domains(msi);
+ if (ret)
+ return ret;
+
+ /*
+ * Don't use devm_request_irq() as the driver uses non-devm helpers
+ * to control clocks. Mixing them may lead to subtle bugs.
+ */
+ ret = request_irq(msi->irq, rzg3s_pcie_msi_irq, 0, devname, host);
+ if (ret) {
+ dev_err_probe(dev, ret, "Failed to request IRQ: %d\n", ret);
+ goto free_domains;
+ }
+
+ ret = rzg3s_pcie_msi_setup(host);
+ if (ret) {
+ dev_err_probe(dev, ret, "Failed to setup MSI!\n");
+ goto free_irq;
+ }
+
+ return 0;
+
+free_irq:
+ free_irq(msi->irq, host);
+free_domains:
+ irq_domain_remove(msi->domain);
+ return ret;
+}
+
+static void rzg3s_pcie_intx_irq_ack(struct irq_data *d)
+{
+ struct rzg3s_pcie_host *host = irq_data_get_irq_chip_data(d);
+
+ guard(raw_spinlock_irqsave)(&host->hw_lock);
+
+ rzg3s_pcie_update_bits(host->axi, RZG3S_PCI_PINTRCVIS,
+ RZG3S_PCI_PINTRCVIS_INTX(d->hwirq),
+ RZG3S_PCI_PINTRCVIS_INTX(d->hwirq));
+}
+
+static void rzg3s_pcie_intx_irq_mask(struct irq_data *d)
+{
+ struct rzg3s_pcie_host *host = irq_data_get_irq_chip_data(d);
+
+ guard(raw_spinlock_irqsave)(&host->hw_lock);
+
+ rzg3s_pcie_update_bits(host->axi, RZG3S_PCI_PINTRCVIE,
+ RZG3S_PCI_PINTRCVIE_INTX(d->hwirq), 0);
+}
+
+static void rzg3s_pcie_intx_irq_unmask(struct irq_data *d)
+{
+ struct rzg3s_pcie_host *host = irq_data_get_irq_chip_data(d);
+
+ guard(raw_spinlock_irqsave)(&host->hw_lock);
+
+ rzg3s_pcie_update_bits(host->axi, RZG3S_PCI_PINTRCVIE,
+ RZG3S_PCI_PINTRCVIE_INTX(d->hwirq),
+ RZG3S_PCI_PINTRCVIE_INTX(d->hwirq));
+}
+
+static struct irq_chip rzg3s_pcie_intx_irq_chip = {
+ .name = "PCIe INTx",
+ .irq_ack = rzg3s_pcie_intx_irq_ack,
+ .irq_mask = rzg3s_pcie_intx_irq_mask,
+ .irq_unmask = rzg3s_pcie_intx_irq_unmask,
+};
+
+static int rzg3s_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &rzg3s_pcie_intx_irq_chip,
+ handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops rzg3s_pcie_intx_domain_ops = {
+ .map = rzg3s_pcie_intx_map,
+ .xlate = irq_domain_xlate_onetwocell,
+};
+
+static int rzg3s_pcie_init_irqdomain(struct rzg3s_pcie_host *host)
+{
+ struct device *dev = host->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+
+ for (int i = 0; i < PCI_NUM_INTX; i++) {
+ char irq_name[5] = {0};
+ int irq;
+
+ scnprintf(irq_name, ARRAY_SIZE(irq_name), "int%c", 'a' + i);
+
+ irq = platform_get_irq_byname(pdev, irq_name);
+ if (irq < 0)
+ return dev_err_probe(dev, -EINVAL,
+ "Failed to parse and map INT%c IRQ\n",
+ 'A' + i);
+
+ host->intx_irqs[i] = irq;
+ irq_set_chained_handler_and_data(irq,
+ rzg3s_pcie_intx_irq_handler,
+ host);
+ }
+
+ host->intx_domain = irq_domain_create_linear(dev_fwnode(dev),
+ PCI_NUM_INTX,
+ &rzg3s_pcie_intx_domain_ops,
+ host);
+ if (!host->intx_domain)
+ return dev_err_probe(dev, -EINVAL,
+ "Failed to add irq domain for INTx IRQs\n");
+ irq_domain_update_bus_token(host->intx_domain, DOMAIN_BUS_WIRED);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ int ret = rzg3s_pcie_init_msi(host);
+
+ if (ret) {
+ irq_domain_remove(host->intx_domain);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void rzg3s_pcie_teardown_irqdomain(struct rzg3s_pcie_host *host)
+{
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ rzg3s_pcie_teardown_msi(host);
+
+ irq_domain_remove(host->intx_domain);
+}
+
+static int rzg3s_pcie_set_max_link_speed(struct rzg3s_pcie_host *host)
+{
+ u32 remote_supported_link_speeds, max_supported_link_speeds;
+ u32 cs2, tmp, pcie_cap = RZG3S_PCI_CFG_PCIEC;
+ u32 cur_link_speed, link_speed;
+ u8 ltssm_state_l0 = 0xc;
+ int ret;
+ u16 ls;
+
+ /*
+ * According to the RZ/G3S HW manual (Rev.1.10, section 34.6.3 Caution
+ * when Changing the Speed Spontaneously) link speed change can be done
+ * only when the LTSSM is in L0.
+ */
+ ret = readl_poll_timeout(host->axi + RZG3S_PCI_PCSTAT1, tmp,
+ FIELD_GET(RZG3S_PCI_PCSTAT1_LTSSM_STATE, tmp) == ltssm_state_l0,
+ PCIE_LINK_WAIT_SLEEP_MS * MILLI,
+ PCIE_LINK_WAIT_SLEEP_MS * MILLI *
+ PCIE_LINK_WAIT_MAX_RETRIES);
+ if (ret)
+ return ret;
+
+ ls = readw_relaxed(host->pcie + pcie_cap + PCI_EXP_LNKSTA);
+ cs2 = readl_relaxed(host->axi + RZG3S_PCI_PCSTAT2);
+
+ switch (pcie_link_speed[host->max_link_speed]) {
+ case PCIE_SPEED_5_0GT:
+ max_supported_link_speeds = GENMASK(PCI_EXP_LNKSTA_CLS_5_0GB - 1, 0);
+ link_speed = PCI_EXP_LNKCTL2_TLS_5_0GT;
+ break;
+ default:
+ /* Should not happen */
+ return -EINVAL;
+ }
+
+ cur_link_speed = FIELD_GET(PCI_EXP_LNKSTA_CLS, ls);
+ remote_supported_link_speeds = FIELD_GET(RZG3S_PCI_PCSTAT2_SDRIRE, cs2);
+ /* Drop reserved bits */
+ remote_supported_link_speeds &= max_supported_link_speeds;
+
+ /*
+ * Return if max link speed is already set or the connected device
+ * doesn't support it.
+ */
+ if (cur_link_speed == host->max_link_speed ||
+ remote_supported_link_speeds != max_supported_link_speeds)
+ return 0;
+
+ /* Set target Link speed */
+ rzg3s_pcie_update_bits(host->pcie, pcie_cap + PCI_EXP_LNKCTL2,
+ PCI_EXP_LNKCTL2_TLS,
+ FIELD_PREP(PCI_EXP_LNKCTL2_TLS, link_speed));
+
+ /* Request link speed change */
+ rzg3s_pcie_update_bits(host->axi, RZG3S_PCI_PCCTRL2,
+ RZG3S_PCI_PCCTRL2_LS_CHG_REQ |
+ RZG3S_PCI_PCCTRL2_LS_CHG,
+ RZG3S_PCI_PCCTRL2_LS_CHG_REQ |
+ FIELD_PREP(RZG3S_PCI_PCCTRL2_LS_CHG,
+ link_speed - 1));
+
+ ret = readl_poll_timeout(host->axi + RZG3S_PCI_PCSTAT2, cs2,
+ (cs2 & RZG3S_PCI_PCSTAT2_LS_CHG_DONE),
+ PCIE_LINK_WAIT_SLEEP_MS * MILLI,
+ PCIE_LINK_WAIT_SLEEP_MS * MILLI *
+ PCIE_LINK_WAIT_MAX_RETRIES);
+
+ /*
+ * According to the RZ/G3S HW manual (Rev.1.10, section 34.6.3 Caution
+ * when Changing the Speed Spontaneously) the PCI_PCCTRL2_LS_CHG_REQ
+ * should be de-asserted after checking for PCI_PCSTAT2_LS_CHG_DONE.
+ */
+ rzg3s_pcie_update_bits(host->axi, RZG3S_PCI_PCCTRL2,
+ RZG3S_PCI_PCCTRL2_LS_CHG_REQ, 0);
+
+ return ret;
+}
+
+static int rzg3s_pcie_config_init(struct rzg3s_pcie_host *host)
+{
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host);
+ struct resource_entry *ft;
+ struct resource *bus;
+ u8 subordinate_bus;
+ u8 secondary_bus;
+ u8 primary_bus;
+
+ ft = resource_list_first_type(&bridge->windows, IORESOURCE_BUS);
+ if (!ft)
+ return -ENODEV;
+
+ bus = ft->res;
+ primary_bus = bus->start;
+ secondary_bus = bus->start + 1;
+ subordinate_bus = bus->end;
+
+ /* Enable access control to the CFGU */
+ writel_relaxed(RZG3S_PCI_PERM_CFG_HWINIT_EN,
+ host->axi + RZG3S_PCI_PERM);
+
+ /* HW manual recommends to write 0xffffffff on initialization */
+ writel_relaxed(0xffffffff, host->pcie + RZG3S_PCI_CFG_BARMSK00L);
+ writel_relaxed(0xffffffff, host->pcie + RZG3S_PCI_CFG_BARMSK00U);
+
+ /* Update bus info */
+ writeb_relaxed(primary_bus, host->pcie + PCI_PRIMARY_BUS);
+ writeb_relaxed(secondary_bus, host->pcie + PCI_SECONDARY_BUS);
+ writeb_relaxed(subordinate_bus, host->pcie + PCI_SUBORDINATE_BUS);
+
+ /* Disable access control to the CFGU */
+ writel_relaxed(0, host->axi + RZG3S_PCI_PERM);
+
+ return 0;
+}
+
+static void rzg3s_pcie_irq_init(struct rzg3s_pcie_host *host)
+{
+ /*
+ * According to the HW manual of the RZ/G3S (Rev.1.10, sections
+ * corresponding to all registers written with ~0U), the hardware
+ * ignores value written to unused bits. Writing ~0U to these registers
+ * should be safe.
+ */
+
+ /* Clear the link state and PM transitions */
+ writel_relaxed(RZG3S_PCI_PEIS0_DL_UPDOWN |
+ RZG3S_PCI_PEIS0_RX_DLLP_PM_ENTER,
+ host->axi + RZG3S_PCI_PEIS0);
+
+ /* Disable all interrupts */
+ writel_relaxed(0, host->axi + RZG3S_PCI_PEIE0);
+
+ /* Clear all parity and ecc error interrupts */
+ writel_relaxed(~0U, host->axi + RZG3S_PCI_PEIS1);
+
+ /* Disable all parity and ecc error interrupts */
+ writel_relaxed(0, host->axi + RZG3S_PCI_PEIE1);
+
+ /* Clear all AXI master error interrupts */
+ writel_relaxed(~0U, host->axi + RZG3S_PCI_AMEIS);
+
+ /* Clear all AXI slave error interrupts */
+ writel_relaxed(~0U, host->axi + RZG3S_PCI_ASEIS1);
+
+ /* Clear all message receive interrupts */
+ writel_relaxed(~0U, host->axi + RZG3S_PCI_MSGRCVIS);
+}
+
+static int rzg3s_pcie_power_resets_deassert(struct rzg3s_pcie_host *host)
+{
+ const struct rzg3s_pcie_soc_data *data = host->data;
+
+ /*
+ * According to the RZ/G3S HW manual (Rev.1.10, section
+ * 34.5.1.2 De-asserting the Reset) the PCIe IP needs to wait 5ms from
+ * power on to the de-assertion of reset.
+ */
+ fsleep(5000);
+ return reset_control_bulk_deassert(data->num_power_resets,
+ host->power_resets);
+}
+
+static int rzg3s_pcie_resets_prepare_and_get(struct rzg3s_pcie_host *host)
+{
+ const struct rzg3s_pcie_soc_data *data = host->data;
+ unsigned int i;
+ int ret;
+
+ host->power_resets = devm_kmalloc_array(host->dev,
+ data->num_power_resets,
+ sizeof(*host->power_resets),
+ GFP_KERNEL);
+ if (!host->power_resets)
+ return -ENOMEM;
+
+ for (i = 0; i < data->num_power_resets; i++)
+ host->power_resets[i].id = data->power_resets[i];
+
+ host->cfg_resets = devm_kmalloc_array(host->dev,
+ data->num_cfg_resets,
+ sizeof(*host->cfg_resets),
+ GFP_KERNEL);
+ if (!host->cfg_resets)
+ return -ENOMEM;
+
+ for (i = 0; i < data->num_cfg_resets; i++)
+ host->cfg_resets[i].id = data->cfg_resets[i];
+
+ ret = devm_reset_control_bulk_get_exclusive(host->dev,
+ data->num_power_resets,
+ host->power_resets);
+ if (ret)
+ return ret;
+
+ return devm_reset_control_bulk_get_exclusive(host->dev,
+ data->num_cfg_resets,
+ host->cfg_resets);
+}
+
+static int rzg3s_pcie_host_parse_port(struct rzg3s_pcie_host *host)
+{
+ struct device_node *of_port = of_get_next_child(host->dev->of_node, NULL);
+ struct rzg3s_pcie_port *port = &host->port;
+ int ret;
+
+ ret = of_property_read_u32(of_port, "vendor-id", &port->vendor_id);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32(of_port, "device-id", &port->device_id);
+ if (ret)
+ return ret;
+
+ port->refclk = of_clk_get_by_name(of_port, "ref");
+ if (IS_ERR(port->refclk))
+ return PTR_ERR(port->refclk);
+
+ return 0;
+}
+
+static int rzg3s_pcie_host_init_port(struct rzg3s_pcie_host *host)
+{
+ struct rzg3s_pcie_port *port = &host->port;
+ struct device *dev = host->dev;
+ int ret;
+
+ /* Enable access control to the CFGU */
+ writel_relaxed(RZG3S_PCI_PERM_CFG_HWINIT_EN,
+ host->axi + RZG3S_PCI_PERM);
+
+ /* Update vendor ID and device ID */
+ writew_relaxed(port->vendor_id, host->pcie + PCI_VENDOR_ID);
+ writew_relaxed(port->device_id, host->pcie + PCI_DEVICE_ID);
+
+ /* Disable access control to the CFGU */
+ writel_relaxed(0, host->axi + RZG3S_PCI_PERM);
+
+ ret = clk_prepare_enable(port->refclk);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable refclk!\n");
+
+ /* Set the PHY, if any */
+ if (host->data->init_phy) {
+ ret = host->data->init_phy(host);
+ if (ret) {
+ dev_err_probe(dev, ret, "Failed to set the PHY!\n");
+ goto refclk_disable;
+ }
+ }
+
+ return 0;
+
+refclk_disable:
+ clk_disable_unprepare(port->refclk);
+ return ret;
+}
+
+static int rzg3s_pcie_host_init(struct rzg3s_pcie_host *host)
+{
+ u32 val;
+ int ret;
+
+ /* Initialize the PCIe related registers */
+ ret = rzg3s_pcie_config_init(host);
+ if (ret)
+ return ret;
+
+ ret = rzg3s_pcie_host_init_port(host);
+ if (ret)
+ return ret;
+
+ /* Initialize the interrupts */
+ rzg3s_pcie_irq_init(host);
+
+ ret = reset_control_bulk_deassert(host->data->num_cfg_resets,
+ host->cfg_resets);
+ if (ret)
+ goto disable_port_refclk;
+
+ /* Wait for link up */
+ ret = readl_poll_timeout(host->axi + RZG3S_PCI_PCSTAT1, val,
+ !(val & RZG3S_PCI_PCSTAT1_DL_DOWN_STS),
+ PCIE_LINK_WAIT_SLEEP_MS * MILLI,
+ PCIE_LINK_WAIT_SLEEP_MS * MILLI *
+ PCIE_LINK_WAIT_MAX_RETRIES);
+ if (ret)
+ goto cfg_resets_deassert;
+
+ val = readl_relaxed(host->axi + RZG3S_PCI_PCSTAT2);
+ dev_info(host->dev, "PCIe link status [0x%x]\n", val);
+
+ return 0;
+
+cfg_resets_deassert:
+ reset_control_bulk_assert(host->data->num_cfg_resets,
+ host->cfg_resets);
+disable_port_refclk:
+ clk_disable_unprepare(host->port.refclk);
+ return ret;
+}
+
+static void rzg3s_pcie_set_inbound_window(struct rzg3s_pcie_host *host,
+ u64 cpu_addr, u64 pci_addr, u64 size,
+ int id)
+{
+ /* Set CPU window base address */
+ writel_relaxed(upper_32_bits(cpu_addr),
+ host->axi + RZG3S_PCI_ADESTU(id));
+ writel_relaxed(lower_32_bits(cpu_addr),
+ host->axi + RZG3S_PCI_ADESTL(id));
+
+ /* Set window size */
+ writel_relaxed(upper_32_bits(size), host->axi + RZG3S_PCI_AWMASKU(id));
+ writel_relaxed(lower_32_bits(size), host->axi + RZG3S_PCI_AWMASKL(id));
+
+ /* Set PCIe window base address and enable the window */
+ writel_relaxed(upper_32_bits(pci_addr),
+ host->axi + RZG3S_PCI_AWBASEU(id));
+ writel_relaxed(lower_32_bits(pci_addr) | RZG3S_PCI_AWBASEL_WIN_ENA,
+ host->axi + RZG3S_PCI_AWBASEL(id));
+}
+
+static int rzg3s_pcie_set_inbound_windows(struct rzg3s_pcie_host *host,
+ struct resource_entry *entry,
+ int *index)
+{
+ u64 pci_addr = entry->res->start - entry->offset;
+ u64 cpu_addr = entry->res->start;
+ u64 cpu_end = entry->res->end;
+ u64 size_id = 0;
+ int id = *index;
+ u64 size;
+
+ while (cpu_addr < cpu_end) {
+ if (id >= RZG3S_MAX_WINDOWS)
+ return dev_err_probe(host->dev, -ENOSPC,
+ "Failed to map inbound window for resource (%s)\n",
+ entry->res->name);
+
+ size = resource_size(entry->res) - size_id;
+
+ /*
+ * According to the RZ/G3S HW manual (Rev.1.10,
+ * section 34.3.1.71 AXI Window Mask (Lower) Registers) the min
+ * size is 4K.
+ */
+ size = max(size, SZ_4K);
+
+ /*
+ * According the RZ/G3S HW manual (Rev.1.10, sections:
+ * - 34.3.1.69 AXI Window Base (Lower) Registers
+ * - 34.3.1.71 AXI Window Mask (Lower) Registers
+ * - 34.3.1.73 AXI Destination (Lower) Registers)
+ * the CPU addr, PCIe addr, size should be 4K aligned and be a
+ * power of 2.
+ */
+ size = ALIGN(size, SZ_4K);
+ size = roundup_pow_of_two(size);
+
+ cpu_addr = ALIGN(cpu_addr, SZ_4K);
+ pci_addr = ALIGN(pci_addr, SZ_4K);
+
+ /*
+ * According to the RZ/G3S HW manual (Rev.1.10, section
+ * 34.3.1.71 AXI Window Mask (Lower) Registers) HW expects first
+ * 12 LSB bits to be 0xfff. Subtract 1 from size for this.
+ */
+ rzg3s_pcie_set_inbound_window(host, cpu_addr, pci_addr,
+ size - 1, id);
+
+ pci_addr += size;
+ cpu_addr += size;
+ size_id = size;
+ id++;
+ }
+ *index = id;
+
+ return 0;
+}
+
+static int rzg3s_pcie_parse_map_dma_ranges(struct rzg3s_pcie_host *host)
+{
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host);
+ struct resource_entry *entry;
+ int i = 0, ret;
+
+ resource_list_for_each_entry(entry, &bridge->dma_ranges) {
+ ret = rzg3s_pcie_set_inbound_windows(host, entry, &i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void rzg3s_pcie_set_outbound_window(struct rzg3s_pcie_host *host,
+ struct resource_entry *win, int id)
+{
+ struct resource *res = win->res;
+ resource_size_t size = resource_size(res);
+ resource_size_t res_start;
+
+ if (res->flags & IORESOURCE_IO)
+ res_start = pci_pio_to_address(res->start) - win->offset;
+ else
+ res_start = res->start - win->offset;
+
+ /*
+ * According to the RZ/G3S HW manual (Rev.1.10, section 34.3.1.75 PCIe
+ * Window Base (Lower) Registers) the window base address need to be 4K
+ * aligned.
+ */
+ res_start = ALIGN(res_start, SZ_4K);
+
+ size = ALIGN(size, SZ_4K);
+ size = roundup_pow_of_two(size) - 1;
+
+ /* Set PCIe destination */
+ writel_relaxed(upper_32_bits(res_start),
+ host->axi + RZG3S_PCI_PDESTU(id));
+ writel_relaxed(lower_32_bits(res_start),
+ host->axi + RZG3S_PCI_PDESTL(id));
+
+ /* Set PCIe window mask */
+ writel_relaxed(upper_32_bits(size), host->axi + RZG3S_PCI_PWMASKU(id));
+ writel_relaxed(lower_32_bits(size), host->axi + RZG3S_PCI_PWMASKL(id));
+
+ /* Set PCIe window base and enable the window */
+ writel_relaxed(upper_32_bits(res_start),
+ host->axi + RZG3S_PCI_PWBASEU(id));
+ writel_relaxed(lower_32_bits(res_start) | RZG3S_PCI_PWBASEL_ENA,
+ host->axi + RZG3S_PCI_PWBASEL(id));
+}
+
+static int rzg3s_pcie_parse_map_ranges(struct rzg3s_pcie_host *host)
+{
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host);
+ struct resource_entry *win;
+ int i = 0;
+
+ resource_list_for_each_entry(win, &bridge->windows) {
+ struct resource *res = win->res;
+
+ if (i >= RZG3S_MAX_WINDOWS)
+ return dev_err_probe(host->dev, -ENOSPC,
+ "Failed to map outbound window for resource (%s)\n",
+ res->name);
+
+ if (!res->flags)
+ continue;
+
+ switch (resource_type(res)) {
+ case IORESOURCE_IO:
+ case IORESOURCE_MEM:
+ rzg3s_pcie_set_outbound_window(host, win, i);
+ i++;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int rzg3s_soc_pcie_init_phy(struct rzg3s_pcie_host *host)
+{
+ static const u32 xcfgd_settings[RZG3S_PCI_PHY_XCFGD_NUM] = {
+ [8] = 0xe0006801, 0x007f7e30, 0x183e0000, 0x978ff500,
+ 0xec000000, 0x009f1400, 0x0000d009,
+ [17] = 0x78000000,
+ [19] = 0x00880000, 0x000005c0, 0x07000000, 0x00780920,
+ 0xc9400ce2, 0x90000c0c, 0x000c1414, 0x00005034,
+ 0x00006000, 0x00000001,
+ };
+ static const u32 xcfga_cmn_settings[RZG3S_PCI_PHY_XCFGA_CMN_NUM] = {
+ 0x00000d10, 0x08310100, 0x00c21404, 0x013c0010, 0x01874440,
+ 0x1a216082, 0x00103440, 0x00000080, 0x00000010, 0x0c1000c1,
+ 0x1000c100, 0x0222000c, 0x00640019, 0x00a00028, 0x01d11228,
+ 0x0201001d,
+ };
+ static const u32 xcfga_rx_settings[RZG3S_PCI_PHY_XCFGA_RX_NUM] = {
+ 0x07d55000, 0x030e3f00, 0x00000288, 0x102c5880, 0x0000000b,
+ 0x04141441, 0x00641641, 0x00d63d63, 0x00641641, 0x01970377,
+ 0x00190287, 0x00190028, 0x00000028,
+ };
+ unsigned int i;
+
+ /*
+ * Enable access permission for physical layer control and status
+ * registers.
+ */
+ writel_relaxed(RZG3S_PCI_PERM_PIPE_PHY_REG_EN,
+ host->axi + RZG3S_PCI_PERM);
+
+ for (i = 0; i < RZG3S_PCI_PHY_XCFGD_NUM; i++) {
+ writel_relaxed(xcfgd_settings[i],
+ host->axi + RZG3S_PCI_PHY_XCFGD(i));
+ }
+
+ for (i = 0; i < RZG3S_PCI_PHY_XCFGA_CMN_NUM; i++) {
+ writel_relaxed(xcfga_cmn_settings[i],
+ host->axi + RZG3S_PCI_PHY_XCFGA_CMN(i));
+ }
+
+ for (i = 0; i < RZG3S_PCI_PHY_XCFGA_RX_NUM; i++) {
+ writel_relaxed(xcfga_rx_settings[i],
+ host->axi + RZG3S_PCI_PHY_XCFGA_RX(i));
+ }
+
+ writel_relaxed(0x107, host->axi + RZG3S_PCI_PHY_XCFGA_TX);
+
+ /* Select PHY settings values */
+ writel_relaxed(RZG3S_PCI_PHY_XCFG_CTRL_PHYREG_SEL,
+ host->axi + RZG3S_PCI_PHY_XCFG_CTRL);
+
+ /*
+ * Disable access permission for physical layer control and status
+ * registers.
+ */
+ writel_relaxed(0, host->axi + RZG3S_PCI_PERM);
+
+ return 0;
+}
+
+static int
+rzg3s_pcie_host_setup(struct rzg3s_pcie_host *host,
+ int (*init_irqdomain)(struct rzg3s_pcie_host *host),
+ void (*teardown_irqdomain)(struct rzg3s_pcie_host *host))
+{
+ struct device *dev = host->dev;
+ int ret;
+
+ /* Set inbound windows */
+ ret = rzg3s_pcie_parse_map_dma_ranges(host);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to set inbound windows!\n");
+
+ /* Set outbound windows */
+ ret = rzg3s_pcie_parse_map_ranges(host);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to set outbound windows!\n");
+
+ ret = init_irqdomain(host);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to init IRQ domain\n");
+
+ ret = rzg3s_pcie_host_init(host);
+ if (ret) {
+ dev_err_probe(dev, ret, "Failed to initialize the HW!\n");
+ goto teardown_irqdomain;
+ }
+
+ ret = rzg3s_pcie_set_max_link_speed(host);
+ if (ret)
+ dev_info(dev, "Failed to set max link speed\n");
+
+ msleep(PCIE_RESET_CONFIG_WAIT_MS);
+
+ return 0;
+
+teardown_irqdomain:
+ teardown_irqdomain(host);
+
+ return ret;
+}
+
+static int rzg3s_pcie_probe(struct platform_device *pdev)
+{
+ struct pci_host_bridge *bridge;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *sysc_np __free(device_node) =
+ of_parse_phandle(np, "renesas,sysc", 0);
+ struct rzg3s_pcie_host *host;
+ int ret;
+
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*host));
+ if (!bridge)
+ return -ENOMEM;
+
+ host = pci_host_bridge_priv(bridge);
+ host->dev = dev;
+ host->data = device_get_match_data(dev);
+ platform_set_drvdata(pdev, host);
+
+ host->axi = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(host->axi))
+ return PTR_ERR(host->axi);
+ host->pcie = host->axi + RZG3S_PCI_CFG_BASE;
+
+ host->max_link_speed = of_pci_get_max_link_speed(np);
+ if (host->max_link_speed < 0)
+ host->max_link_speed = 2;
+
+ ret = rzg3s_pcie_host_parse_port(host);
+ if (ret)
+ return ret;
+
+ host->sysc = syscon_node_to_regmap(sysc_np);
+ if (IS_ERR(host->sysc)) {
+ ret = PTR_ERR(host->sysc);
+ goto port_refclk_put;
+ }
+
+ ret = regmap_update_bits(host->sysc, RZG3S_SYS_PCIE_RST_RSM_B,
+ RZG3S_SYS_PCIE_RST_RSM_B_MASK,
+ FIELD_PREP(RZG3S_SYS_PCIE_RST_RSM_B_MASK, 1));
+ if (ret)
+ goto port_refclk_put;
+
+ ret = rzg3s_pcie_resets_prepare_and_get(host);
+ if (ret)
+ goto sysc_signal_restore;
+
+ ret = rzg3s_pcie_power_resets_deassert(host);
+ if (ret)
+ goto sysc_signal_restore;
+
+ pm_runtime_enable(dev);
+
+ /*
+ * Controller clocks are part of a clock power domain. Enable them
+ * through runtime PM.
+ */
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ goto rpm_disable;
+
+ raw_spin_lock_init(&host->hw_lock);
+
+ ret = rzg3s_pcie_host_setup(host, rzg3s_pcie_init_irqdomain,
+ rzg3s_pcie_teardown_irqdomain);
+ if (ret)
+ goto rpm_put;
+
+ bridge->sysdata = host;
+ bridge->ops = &rzg3s_pcie_root_ops;
+ bridge->child_ops = &rzg3s_pcie_child_ops;
+ ret = pci_host_probe(bridge);
+ if (ret)
+ goto host_probe_teardown;
+
+ return 0;
+
+host_probe_teardown:
+ rzg3s_pcie_teardown_irqdomain(host);
+ reset_control_bulk_deassert(host->data->num_cfg_resets,
+ host->cfg_resets);
+rpm_put:
+ pm_runtime_put_sync(dev);
+rpm_disable:
+ pm_runtime_disable(dev);
+ reset_control_bulk_assert(host->data->num_power_resets,
+ host->power_resets);
+sysc_signal_restore:
+ /*
+ * SYSC RST_RSM_B signal need to be asserted before turning off the
+ * power to the PHY.
+ */
+ regmap_update_bits(host->sysc, RZG3S_SYS_PCIE_RST_RSM_B,
+ RZG3S_SYS_PCIE_RST_RSM_B_MASK,
+ FIELD_PREP(RZG3S_SYS_PCIE_RST_RSM_B_MASK, 0));
+port_refclk_put:
+ clk_put(host->port.refclk);
+
+ return ret;
+}
+
+static int rzg3s_pcie_suspend_noirq(struct device *dev)
+{
+ struct rzg3s_pcie_host *host = dev_get_drvdata(dev);
+ const struct rzg3s_pcie_soc_data *data = host->data;
+ struct rzg3s_pcie_port *port = &host->port;
+ struct regmap *sysc = host->sysc;
+ int ret;
+
+ ret = pm_runtime_put_sync(dev);
+ if (ret)
+ return ret;
+
+ clk_disable_unprepare(port->refclk);
+
+ ret = reset_control_bulk_assert(data->num_power_resets,
+ host->power_resets);
+ if (ret)
+ goto refclk_restore;
+
+ ret = reset_control_bulk_assert(data->num_cfg_resets,
+ host->cfg_resets);
+ if (ret)
+ goto power_resets_restore;
+
+ ret = regmap_update_bits(sysc, RZG3S_SYS_PCIE_RST_RSM_B,
+ RZG3S_SYS_PCIE_RST_RSM_B_MASK,
+ FIELD_PREP(RZG3S_SYS_PCIE_RST_RSM_B_MASK, 0));
+ if (ret)
+ goto cfg_resets_restore;
+
+ return 0;
+
+ /* Restore the previous state if any error happens */
+cfg_resets_restore:
+ reset_control_bulk_deassert(data->num_cfg_resets,
+ host->cfg_resets);
+power_resets_restore:
+ reset_control_bulk_deassert(data->num_power_resets,
+ host->power_resets);
+refclk_restore:
+ clk_prepare_enable(port->refclk);
+ pm_runtime_resume_and_get(dev);
+ return ret;
+}
+
+static int rzg3s_pcie_resume_noirq(struct device *dev)
+{
+ struct rzg3s_pcie_host *host = dev_get_drvdata(dev);
+ const struct rzg3s_pcie_soc_data *data = host->data;
+ struct regmap *sysc = host->sysc;
+ int ret;
+
+ ret = regmap_update_bits(sysc, RZG3S_SYS_PCIE_RST_RSM_B,
+ RZG3S_SYS_PCIE_RST_RSM_B_MASK,
+ FIELD_PREP(RZG3S_SYS_PCIE_RST_RSM_B_MASK, 1));
+ if (ret)
+ return ret;
+
+ ret = rzg3s_pcie_power_resets_deassert(host);
+ if (ret)
+ goto assert_rst_rsm_b;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ goto assert_power_resets;
+
+ ret = rzg3s_pcie_host_setup(host, rzg3s_pcie_msi_hw_setup,
+ rzg3s_pcie_msi_hw_teardown);
+ if (ret)
+ goto rpm_put;
+
+ return 0;
+
+ /*
+ * If any error happens there is no way to recover the IP. Put it in the
+ * lowest possible power state.
+ */
+rpm_put:
+ pm_runtime_put_sync(dev);
+assert_power_resets:
+ reset_control_bulk_assert(data->num_power_resets,
+ host->power_resets);
+assert_rst_rsm_b:
+ regmap_update_bits(sysc, RZG3S_SYS_PCIE_RST_RSM_B,
+ RZG3S_SYS_PCIE_RST_RSM_B_MASK,
+ FIELD_PREP(RZG3S_SYS_PCIE_RST_RSM_B_MASK, 0));
+ return ret;
+}
+
+static const struct dev_pm_ops rzg3s_pcie_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(rzg3s_pcie_suspend_noirq,
+ rzg3s_pcie_resume_noirq)
+};
+
+static const char * const rzg3s_soc_power_resets[] = {
+ "aresetn", "rst_cfg_b", "rst_load_b",
+};
+
+static const char * const rzg3s_soc_cfg_resets[] = {
+ "rst_b", "rst_ps_b", "rst_gp_b", "rst_rsm_b",
+};
+
+static const struct rzg3s_pcie_soc_data rzg3s_soc_data = {
+ .power_resets = rzg3s_soc_power_resets,
+ .num_power_resets = ARRAY_SIZE(rzg3s_soc_power_resets),
+ .cfg_resets = rzg3s_soc_cfg_resets,
+ .num_cfg_resets = ARRAY_SIZE(rzg3s_soc_cfg_resets),
+ .init_phy = rzg3s_soc_pcie_init_phy,
+};
+
+static const struct of_device_id rzg3s_pcie_of_match[] = {
+ {
+ .compatible = "renesas,r9a08g045-pcie",
+ .data = &rzg3s_soc_data,
+ },
+ {}
+};
+
+static struct platform_driver rzg3s_pcie_driver = {
+ .driver = {
+ .name = "rzg3s-pcie-host",
+ .of_match_table = rzg3s_pcie_of_match,
+ .pm = pm_ptr(&rzg3s_pcie_pm_ops),
+ .suppress_bind_attrs = true,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+ .probe = rzg3s_pcie_probe,
+};
+builtin_platform_driver(rzg3s_pcie_driver);
diff --git a/drivers/pci/controller/pcie-xilinx-common.h b/drivers/pci/controller/pcie-xilinx-common.h
new file mode 100644
index 000000000000..1832770f3308
--- /dev/null
+++ b/drivers/pci/controller/pcie-xilinx-common.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * (C) Copyright 2023, Xilinx, Inc.
+ */
+
+#include <linux/pci.h>
+#include <linux/pci-ecam.h>
+#include <linux/platform_device.h>
+
+/* Interrupt registers definitions */
+#define XILINX_PCIE_INTR_LINK_DOWN 0
+#define XILINX_PCIE_INTR_HOT_RESET 3
+#define XILINX_PCIE_INTR_CFG_PCIE_TIMEOUT 4
+#define XILINX_PCIE_INTR_CFG_TIMEOUT 8
+#define XILINX_PCIE_INTR_CORRECTABLE 9
+#define XILINX_PCIE_INTR_NONFATAL 10
+#define XILINX_PCIE_INTR_FATAL 11
+#define XILINX_PCIE_INTR_CFG_ERR_POISON 12
+#define XILINX_PCIE_INTR_PME_TO_ACK_RCVD 15
+#define XILINX_PCIE_INTR_INTX 16
+#define XILINX_PCIE_INTR_PM_PME_RCVD 17
+#define XILINX_PCIE_INTR_MSI 17
+#define XILINX_PCIE_INTR_SLV_UNSUPP 20
+#define XILINX_PCIE_INTR_SLV_UNEXP 21
+#define XILINX_PCIE_INTR_SLV_COMPL 22
+#define XILINX_PCIE_INTR_SLV_ERRP 23
+#define XILINX_PCIE_INTR_SLV_CMPABT 24
+#define XILINX_PCIE_INTR_SLV_ILLBUR 25
+#define XILINX_PCIE_INTR_MST_DECERR 26
+#define XILINX_PCIE_INTR_MST_SLVERR 27
+#define XILINX_PCIE_INTR_SLV_PCIE_TIMEOUT 28
diff --git a/drivers/pci/controller/pcie-xilinx-cpm.c b/drivers/pci/controller/pcie-xilinx-cpm.c
index 4a787a941674..d38f27e20761 100644
--- a/drivers/pci/controller/pcie-xilinx-cpm.c
+++ b/drivers/pci/controller/pcie-xilinx-cpm.c
@@ -16,11 +16,9 @@
#include <linux/of_address.h>
#include <linux/of_pci.h>
#include <linux/of_platform.h>
-#include <linux/pci.h>
-#include <linux/platform_device.h>
-#include <linux/pci-ecam.h>
#include "../pci.h"
+#include "pcie-xilinx-common.h"
/* Register definitions */
#define XILINX_CPM_PCIE_REG_IDR 0x00000E10
@@ -32,35 +30,16 @@
#define XILINX_CPM_PCIE_REG_IDRN_MASK 0x00000E3C
#define XILINX_CPM_PCIE_MISC_IR_STATUS 0x00000340
#define XILINX_CPM_PCIE_MISC_IR_ENABLE 0x00000348
-#define XILINX_CPM_PCIE_MISC_IR_LOCAL BIT(1)
-
-#define XILINX_CPM_PCIE_IR_STATUS 0x000002A0
-#define XILINX_CPM_PCIE_IR_ENABLE 0x000002A8
-#define XILINX_CPM_PCIE_IR_LOCAL BIT(0)
-
-/* Interrupt registers definitions */
-#define XILINX_CPM_PCIE_INTR_LINK_DOWN 0
-#define XILINX_CPM_PCIE_INTR_HOT_RESET 3
-#define XILINX_CPM_PCIE_INTR_CFG_PCIE_TIMEOUT 4
-#define XILINX_CPM_PCIE_INTR_CFG_TIMEOUT 8
-#define XILINX_CPM_PCIE_INTR_CORRECTABLE 9
-#define XILINX_CPM_PCIE_INTR_NONFATAL 10
-#define XILINX_CPM_PCIE_INTR_FATAL 11
-#define XILINX_CPM_PCIE_INTR_CFG_ERR_POISON 12
-#define XILINX_CPM_PCIE_INTR_PME_TO_ACK_RCVD 15
-#define XILINX_CPM_PCIE_INTR_INTX 16
-#define XILINX_CPM_PCIE_INTR_PM_PME_RCVD 17
-#define XILINX_CPM_PCIE_INTR_SLV_UNSUPP 20
-#define XILINX_CPM_PCIE_INTR_SLV_UNEXP 21
-#define XILINX_CPM_PCIE_INTR_SLV_COMPL 22
-#define XILINX_CPM_PCIE_INTR_SLV_ERRP 23
-#define XILINX_CPM_PCIE_INTR_SLV_CMPABT 24
-#define XILINX_CPM_PCIE_INTR_SLV_ILLBUR 25
-#define XILINX_CPM_PCIE_INTR_MST_DECERR 26
-#define XILINX_CPM_PCIE_INTR_MST_SLVERR 27
-#define XILINX_CPM_PCIE_INTR_SLV_PCIE_TIMEOUT 28
-
-#define IMR(x) BIT(XILINX_CPM_PCIE_INTR_ ##x)
+#define XILINX_CPM_PCIE0_MISC_IR_LOCAL BIT(1)
+#define XILINX_CPM_PCIE1_MISC_IR_LOCAL BIT(2)
+
+#define XILINX_CPM_PCIE0_IR_STATUS 0x000002A0
+#define XILINX_CPM_PCIE1_IR_STATUS 0x000002B4
+#define XILINX_CPM_PCIE0_IR_ENABLE 0x000002A8
+#define XILINX_CPM_PCIE1_IR_ENABLE 0x000002BC
+#define XILINX_CPM_PCIE_IR_LOCAL BIT(0)
+
+#define IMR(x) BIT(XILINX_PCIE_INTR_ ##x)
#define XILINX_CPM_PCIE_IMR_ALL_MASK \
( \
@@ -104,14 +83,22 @@
enum xilinx_cpm_version {
CPM,
CPM5,
+ CPM5_HOST1,
+ CPM5NC_HOST,
};
/**
* struct xilinx_cpm_variant - CPM variant information
* @version: CPM version
+ * @ir_status: Offset for the error interrupt status register
+ * @ir_enable: Offset for the CPM5 local error interrupt enable register
+ * @ir_misc_value: A bitmask for the miscellaneous interrupt status
*/
struct xilinx_cpm_variant {
enum xilinx_cpm_version version;
+ u32 ir_status;
+ u32 ir_enable;
+ u32 ir_misc_value;
};
/**
@@ -293,6 +280,7 @@ static void xilinx_cpm_pcie_event_flow(struct irq_desc *desc)
{
struct xilinx_cpm_pcie *port = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
+ const struct xilinx_cpm_variant *variant = port->variant;
unsigned long val;
int i;
@@ -303,11 +291,11 @@ static void xilinx_cpm_pcie_event_flow(struct irq_desc *desc)
generic_handle_domain_irq(port->cpm_domain, i);
pcie_write(port, val, XILINX_CPM_PCIE_REG_IDR);
- if (port->variant->version == CPM5) {
- val = readl_relaxed(port->cpm_base + XILINX_CPM_PCIE_IR_STATUS);
+ if (variant->ir_status) {
+ val = readl_relaxed(port->cpm_base + variant->ir_status);
if (val)
writel_relaxed(val, port->cpm_base +
- XILINX_CPM_PCIE_IR_STATUS);
+ variant->ir_status);
}
/*
@@ -323,7 +311,7 @@ static void xilinx_cpm_pcie_event_flow(struct irq_desc *desc)
}
#define _IC(x, s) \
- [XILINX_CPM_PCIE_INTR_ ## x] = { __stringify(x), s }
+ [XILINX_PCIE_INTR_ ## x] = { __stringify(x), s }
static const struct {
const char *sym;
@@ -359,9 +347,9 @@ static irqreturn_t xilinx_cpm_pcie_intr_handler(int irq, void *dev_id)
d = irq_domain_get_irq_data(port->cpm_domain, irq);
switch (d->hwirq) {
- case XILINX_CPM_PCIE_INTR_CORRECTABLE:
- case XILINX_CPM_PCIE_INTR_NONFATAL:
- case XILINX_CPM_PCIE_INTR_FATAL:
+ case XILINX_PCIE_INTR_CORRECTABLE:
+ case XILINX_PCIE_INTR_NONFATAL:
+ case XILINX_PCIE_INTR_FATAL:
cpm_pcie_clear_err_interrupts(port);
fallthrough;
@@ -407,17 +395,15 @@ static int xilinx_cpm_pcie_init_irq_domain(struct xilinx_cpm_pcie *port)
return -EINVAL;
}
- port->cpm_domain = irq_domain_add_linear(pcie_intc_node, 32,
- &event_domain_ops,
- port);
+ port->cpm_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), 32,
+ &event_domain_ops, port);
if (!port->cpm_domain)
goto out;
irq_domain_update_bus_token(port->cpm_domain, DOMAIN_BUS_NEXUS);
- port->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
- &intx_domain_ops,
- port);
+ port->intx_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX,
+ &intx_domain_ops, port);
if (!port->intx_domain)
goto out;
@@ -466,7 +452,7 @@ static int xilinx_cpm_setup_irq(struct xilinx_cpm_pcie *port)
}
port->intx_irq = irq_create_mapping(port->cpm_domain,
- XILINX_CPM_PCIE_INTR_INTX);
+ XILINX_PCIE_INTR_INTX);
if (!port->intx_irq) {
dev_err(dev, "Failed to map INTx interrupt\n");
return -ENXIO;
@@ -489,6 +475,11 @@ static int xilinx_cpm_setup_irq(struct xilinx_cpm_pcie *port)
*/
static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie *port)
{
+ const struct xilinx_cpm_variant *variant = port->variant;
+
+ if (variant->version == CPM5NC_HOST)
+ return;
+
if (cpm_pcie_link_up(port))
dev_info(port->dev, "PCIe Link is UP\n");
else
@@ -507,15 +498,15 @@ static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie *port)
* XILINX_CPM_PCIE_MISC_IR_ENABLE register is mapped to
* CPM SLCR block.
*/
- writel(XILINX_CPM_PCIE_MISC_IR_LOCAL,
+ writel(variant->ir_misc_value,
port->cpm_base + XILINX_CPM_PCIE_MISC_IR_ENABLE);
- if (port->variant->version == CPM5) {
+ if (variant->ir_enable) {
writel(XILINX_CPM_PCIE_IR_LOCAL,
- port->cpm_base + XILINX_CPM_PCIE_IR_ENABLE);
+ port->cpm_base + variant->ir_enable);
}
- /* Enable the Bridge enable bit */
+ /* Set Bridge enable bit */
pcie_write(port, pcie_read(port, XILINX_CPM_PCIE_REG_RPSC) |
XILINX_CPM_PCIE_REG_RPSC_BEN,
XILINX_CPM_PCIE_REG_RPSC);
@@ -549,7 +540,8 @@ static int xilinx_cpm_pcie_parse_dt(struct xilinx_cpm_pcie *port,
if (IS_ERR(port->cfg))
return PTR_ERR(port->cfg);
- if (port->variant->version == CPM5) {
+ if (port->variant->version == CPM5 ||
+ port->variant->version == CPM5_HOST1) {
port->reg_base = devm_platform_ioremap_resource_byname(pdev,
"cpm_csr");
if (IS_ERR(port->reg_base))
@@ -589,28 +581,34 @@ static int xilinx_cpm_pcie_probe(struct platform_device *pdev)
port->dev = dev;
- err = xilinx_cpm_pcie_init_irq_domain(port);
- if (err)
- return err;
+ port->variant = of_device_get_match_data(dev);
- bus = resource_list_first_type(&bridge->windows, IORESOURCE_BUS);
- if (!bus)
- return -ENODEV;
+ if (port->variant->version != CPM5NC_HOST) {
+ err = xilinx_cpm_pcie_init_irq_domain(port);
+ if (err)
+ return err;
+ }
- port->variant = of_device_get_match_data(dev);
+ bus = resource_list_first_type(&bridge->windows, IORESOURCE_BUS);
+ if (!bus) {
+ err = -ENODEV;
+ goto err_free_irq_domains;
+ }
err = xilinx_cpm_pcie_parse_dt(port, bus->res);
if (err) {
dev_err(dev, "Parsing DT failed\n");
- goto err_parse_dt;
+ goto err_free_irq_domains;
}
xilinx_cpm_pcie_init_port(port);
- err = xilinx_cpm_setup_irq(port);
- if (err) {
- dev_err(dev, "Failed to set up interrupts\n");
- goto err_setup_irq;
+ if (port->variant->version != CPM5NC_HOST) {
+ err = xilinx_cpm_setup_irq(port);
+ if (err) {
+ dev_err(dev, "Failed to set up interrupts\n");
+ goto err_setup_irq;
+ }
}
bridge->sysdata = port->cfg;
@@ -623,20 +621,37 @@ static int xilinx_cpm_pcie_probe(struct platform_device *pdev)
return 0;
err_host_bridge:
- xilinx_cpm_free_interrupts(port);
+ if (port->variant->version != CPM5NC_HOST)
+ xilinx_cpm_free_interrupts(port);
err_setup_irq:
pci_ecam_free(port->cfg);
-err_parse_dt:
- xilinx_cpm_free_irq_domains(port);
+err_free_irq_domains:
+ if (port->variant->version != CPM5NC_HOST)
+ xilinx_cpm_free_irq_domains(port);
return err;
}
static const struct xilinx_cpm_variant cpm_host = {
.version = CPM,
+ .ir_misc_value = XILINX_CPM_PCIE0_MISC_IR_LOCAL,
};
static const struct xilinx_cpm_variant cpm5_host = {
.version = CPM5,
+ .ir_misc_value = XILINX_CPM_PCIE0_MISC_IR_LOCAL,
+ .ir_status = XILINX_CPM_PCIE0_IR_STATUS,
+ .ir_enable = XILINX_CPM_PCIE0_IR_ENABLE,
+};
+
+static const struct xilinx_cpm_variant cpm5_host1 = {
+ .version = CPM5_HOST1,
+ .ir_misc_value = XILINX_CPM_PCIE1_MISC_IR_LOCAL,
+ .ir_status = XILINX_CPM_PCIE1_IR_STATUS,
+ .ir_enable = XILINX_CPM_PCIE1_IR_ENABLE,
+};
+
+static const struct xilinx_cpm_variant cpm5n_host = {
+ .version = CPM5NC_HOST,
};
static const struct of_device_id xilinx_cpm_pcie_of_match[] = {
@@ -648,6 +663,14 @@ static const struct of_device_id xilinx_cpm_pcie_of_match[] = {
.compatible = "xlnx,versal-cpm5-host",
.data = &cpm5_host,
},
+ {
+ .compatible = "xlnx,versal-cpm5-host1",
+ .data = &cpm5_host1,
+ },
+ {
+ .compatible = "xlnx,versal-cpm5nc-host",
+ .data = &cpm5n_host,
+ },
{}
};
diff --git a/drivers/pci/controller/pcie-xilinx-dma-pl.c b/drivers/pci/controller/pcie-xilinx-dma-pl.c
new file mode 100644
index 000000000000..b037c8f315e4
--- /dev/null
+++ b/drivers/pci/controller/pcie-xilinx-dma-pl.c
@@ -0,0 +1,847 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PCIe host controller driver for Xilinx XDMA PCIe Bridge
+ *
+ * Copyright (C) 2023 Xilinx, Inc. All rights reserved.
+ */
+#include <linux/bitfield.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+
+#include "../pci.h"
+#include "pcie-xilinx-common.h"
+
+/* Register definitions */
+#define XILINX_PCIE_DMA_REG_IDR 0x00000138
+#define XILINX_PCIE_DMA_REG_IMR 0x0000013c
+#define XILINX_PCIE_DMA_REG_PSCR 0x00000144
+#define XILINX_PCIE_DMA_REG_RPSC 0x00000148
+#define XILINX_PCIE_DMA_REG_MSIBASE1 0x0000014c
+#define XILINX_PCIE_DMA_REG_MSIBASE2 0x00000150
+#define XILINX_PCIE_DMA_REG_RPEFR 0x00000154
+#define XILINX_PCIE_DMA_REG_IDRN 0x00000160
+#define XILINX_PCIE_DMA_REG_IDRN_MASK 0x00000164
+#define XILINX_PCIE_DMA_REG_MSI_LOW 0x00000170
+#define XILINX_PCIE_DMA_REG_MSI_HI 0x00000174
+#define XILINX_PCIE_DMA_REG_MSI_LOW_MASK 0x00000178
+#define XILINX_PCIE_DMA_REG_MSI_HI_MASK 0x0000017c
+
+#define IMR(x) BIT(XILINX_PCIE_INTR_ ##x)
+
+#define XILINX_PCIE_INTR_IMR_ALL_MASK \
+ ( \
+ IMR(LINK_DOWN) | \
+ IMR(HOT_RESET) | \
+ IMR(CFG_TIMEOUT) | \
+ IMR(CORRECTABLE) | \
+ IMR(NONFATAL) | \
+ IMR(FATAL) | \
+ IMR(INTX) | \
+ IMR(MSI) | \
+ IMR(SLV_UNSUPP) | \
+ IMR(SLV_UNEXP) | \
+ IMR(SLV_COMPL) | \
+ IMR(SLV_ERRP) | \
+ IMR(SLV_CMPABT) | \
+ IMR(SLV_ILLBUR) | \
+ IMR(MST_DECERR) | \
+ IMR(MST_SLVERR) | \
+ )
+
+#define XILINX_PCIE_DMA_IMR_ALL_MASK 0x0ff30fe9
+#define XILINX_PCIE_DMA_IDR_ALL_MASK 0xffffffff
+#define XILINX_PCIE_DMA_IDRN_MASK GENMASK(19, 16)
+
+/* Root Port Error Register definitions */
+#define XILINX_PCIE_DMA_RPEFR_ERR_VALID BIT(18)
+#define XILINX_PCIE_DMA_RPEFR_REQ_ID GENMASK(15, 0)
+#define XILINX_PCIE_DMA_RPEFR_ALL_MASK 0xffffffff
+
+/* Root Port Interrupt Register definitions */
+#define XILINX_PCIE_DMA_IDRN_SHIFT 16
+
+/* Root Port Status/control Register definitions */
+#define XILINX_PCIE_DMA_REG_RPSC_BEN BIT(0)
+
+/* Phy Status/Control Register definitions */
+#define XILINX_PCIE_DMA_REG_PSCR_LNKUP BIT(11)
+#define QDMA_BRIDGE_BASE_OFF 0xcd8
+
+/* Number of MSI IRQs */
+#define XILINX_NUM_MSI_IRQS 64
+
+enum xilinx_pl_dma_version {
+ XDMA,
+ QDMA,
+};
+
+/**
+ * struct xilinx_pl_dma_variant - PL DMA PCIe variant information
+ * @version: DMA version
+ */
+struct xilinx_pl_dma_variant {
+ enum xilinx_pl_dma_version version;
+};
+
+struct xilinx_msi {
+ unsigned long *bitmap;
+ struct irq_domain *dev_domain;
+ struct mutex lock; /* Protect bitmap variable */
+ int irq_msi0;
+ int irq_msi1;
+};
+
+/**
+ * struct pl_dma_pcie - PCIe port information
+ * @dev: Device pointer
+ * @reg_base: IO Mapped Register Base
+ * @cfg_base: IO Mapped Configuration Base
+ * @irq: Interrupt number
+ * @cfg: Holds mappings of config space window
+ * @phys_reg_base: Physical address of reg base
+ * @intx_domain: Legacy IRQ domain pointer
+ * @pldma_domain: PL DMA IRQ domain pointer
+ * @resources: Bus Resources
+ * @msi: MSI information
+ * @intx_irq: INTx error interrupt number
+ * @lock: Lock protecting shared register access
+ * @variant: PL DMA PCIe version check pointer
+ */
+struct pl_dma_pcie {
+ struct device *dev;
+ void __iomem *reg_base;
+ void __iomem *cfg_base;
+ int irq;
+ struct pci_config_window *cfg;
+ phys_addr_t phys_reg_base;
+ struct irq_domain *intx_domain;
+ struct irq_domain *pldma_domain;
+ struct list_head resources;
+ struct xilinx_msi msi;
+ int intx_irq;
+ raw_spinlock_t lock;
+ const struct xilinx_pl_dma_variant *variant;
+};
+
+static inline u32 pcie_read(struct pl_dma_pcie *port, u32 reg)
+{
+ if (port->variant->version == QDMA)
+ return readl(port->reg_base + reg + QDMA_BRIDGE_BASE_OFF);
+
+ return readl(port->reg_base + reg);
+}
+
+static inline void pcie_write(struct pl_dma_pcie *port, u32 val, u32 reg)
+{
+ if (port->variant->version == QDMA)
+ writel(val, port->reg_base + reg + QDMA_BRIDGE_BASE_OFF);
+ else
+ writel(val, port->reg_base + reg);
+}
+
+static inline bool xilinx_pl_dma_pcie_link_up(struct pl_dma_pcie *port)
+{
+ return (pcie_read(port, XILINX_PCIE_DMA_REG_PSCR) &
+ XILINX_PCIE_DMA_REG_PSCR_LNKUP) ? true : false;
+}
+
+static void xilinx_pl_dma_pcie_clear_err_interrupts(struct pl_dma_pcie *port)
+{
+ unsigned long val = pcie_read(port, XILINX_PCIE_DMA_REG_RPEFR);
+
+ if (val & XILINX_PCIE_DMA_RPEFR_ERR_VALID) {
+ dev_dbg(port->dev, "Requester ID %lu\n",
+ val & XILINX_PCIE_DMA_RPEFR_REQ_ID);
+ pcie_write(port, XILINX_PCIE_DMA_RPEFR_ALL_MASK,
+ XILINX_PCIE_DMA_REG_RPEFR);
+ }
+}
+
+static bool xilinx_pl_dma_pcie_valid_device(struct pci_bus *bus,
+ unsigned int devfn)
+{
+ struct pl_dma_pcie *port = bus->sysdata;
+
+ if (!pci_is_root_bus(bus)) {
+ /*
+ * Checking whether the link is up is the last line of
+ * defense, and this check is inherently racy by definition.
+ * Sending a PIO request to a downstream device when the link is
+ * down causes an unrecoverable error, and a reset of the entire
+ * PCIe controller will be needed. We can reduce the likelihood
+ * of that unrecoverable error by checking whether the link is
+ * up, but we can't completely prevent it because the link may
+ * go down between the link-up check and the PIO request.
+ */
+ if (!xilinx_pl_dma_pcie_link_up(port))
+ return false;
+ } else if (devfn > 0)
+ /* Only one device down on each root port */
+ return false;
+
+ return true;
+}
+
+static void __iomem *xilinx_pl_dma_pcie_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
+{
+ struct pl_dma_pcie *port = bus->sysdata;
+
+ if (!xilinx_pl_dma_pcie_valid_device(bus, devfn))
+ return NULL;
+
+ if (port->variant->version == QDMA)
+ return port->cfg_base + PCIE_ECAM_OFFSET(bus->number, devfn, where);
+
+ return port->reg_base + PCIE_ECAM_OFFSET(bus->number, devfn, where);
+}
+
+/* PCIe operations */
+static struct pci_ecam_ops xilinx_pl_dma_pcie_ops = {
+ .pci_ops = {
+ .map_bus = xilinx_pl_dma_pcie_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ }
+};
+
+static void xilinx_pl_dma_pcie_enable_msi(struct pl_dma_pcie *port)
+{
+ phys_addr_t msi_addr = port->phys_reg_base;
+
+ pcie_write(port, upper_32_bits(msi_addr), XILINX_PCIE_DMA_REG_MSIBASE1);
+ pcie_write(port, lower_32_bits(msi_addr), XILINX_PCIE_DMA_REG_MSIBASE2);
+}
+
+static void xilinx_mask_intx_irq(struct irq_data *data)
+{
+ struct pl_dma_pcie *port = irq_data_get_irq_chip_data(data);
+ unsigned long flags;
+ u32 mask, val;
+
+ mask = BIT(data->hwirq + XILINX_PCIE_DMA_IDRN_SHIFT);
+ raw_spin_lock_irqsave(&port->lock, flags);
+ val = pcie_read(port, XILINX_PCIE_DMA_REG_IDRN_MASK);
+ pcie_write(port, (val & (~mask)), XILINX_PCIE_DMA_REG_IDRN_MASK);
+ raw_spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void xilinx_unmask_intx_irq(struct irq_data *data)
+{
+ struct pl_dma_pcie *port = irq_data_get_irq_chip_data(data);
+ unsigned long flags;
+ u32 mask, val;
+
+ mask = BIT(data->hwirq + XILINX_PCIE_DMA_IDRN_SHIFT);
+ raw_spin_lock_irqsave(&port->lock, flags);
+ val = pcie_read(port, XILINX_PCIE_DMA_REG_IDRN_MASK);
+ pcie_write(port, (val | mask), XILINX_PCIE_DMA_REG_IDRN_MASK);
+ raw_spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static struct irq_chip xilinx_leg_irq_chip = {
+ .name = "pl_dma:INTx",
+ .irq_mask = xilinx_mask_intx_irq,
+ .irq_unmask = xilinx_unmask_intx_irq,
+};
+
+static int xilinx_pl_dma_pcie_intx_map(struct irq_domain *domain,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &xilinx_leg_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ irq_set_status_flags(irq, IRQ_LEVEL);
+
+ return 0;
+}
+
+/* INTx IRQ Domain operations */
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = xilinx_pl_dma_pcie_intx_map,
+};
+
+static irqreturn_t xilinx_pl_dma_pcie_msi_handler_high(int irq, void *args)
+{
+ struct xilinx_msi *msi;
+ unsigned long status;
+ u32 bit, virq;
+ struct pl_dma_pcie *port = args;
+
+ msi = &port->msi;
+
+ while ((status = pcie_read(port, XILINX_PCIE_DMA_REG_MSI_HI)) != 0) {
+ for_each_set_bit(bit, &status, 32) {
+ pcie_write(port, 1 << bit, XILINX_PCIE_DMA_REG_MSI_HI);
+ bit = bit + 32;
+ virq = irq_find_mapping(msi->dev_domain, bit);
+ if (virq)
+ generic_handle_irq(virq);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t xilinx_pl_dma_pcie_msi_handler_low(int irq, void *args)
+{
+ struct pl_dma_pcie *port = args;
+ struct xilinx_msi *msi;
+ unsigned long status;
+ u32 bit, virq;
+
+ msi = &port->msi;
+
+ while ((status = pcie_read(port, XILINX_PCIE_DMA_REG_MSI_LOW)) != 0) {
+ for_each_set_bit(bit, &status, 32) {
+ pcie_write(port, 1 << bit, XILINX_PCIE_DMA_REG_MSI_LOW);
+ virq = irq_find_mapping(msi->dev_domain, bit);
+ if (virq)
+ generic_handle_irq(virq);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t xilinx_pl_dma_pcie_event_flow(int irq, void *args)
+{
+ struct pl_dma_pcie *port = args;
+ unsigned long val;
+ int i;
+
+ val = pcie_read(port, XILINX_PCIE_DMA_REG_IDR);
+ val &= pcie_read(port, XILINX_PCIE_DMA_REG_IMR);
+ for_each_set_bit(i, &val, 32)
+ generic_handle_domain_irq(port->pldma_domain, i);
+
+ pcie_write(port, val, XILINX_PCIE_DMA_REG_IDR);
+
+ return IRQ_HANDLED;
+}
+
+#define _IC(x, s) \
+ [XILINX_PCIE_INTR_ ## x] = { __stringify(x), s }
+
+static const struct {
+ const char *sym;
+ const char *str;
+} intr_cause[32] = {
+ _IC(LINK_DOWN, "Link Down"),
+ _IC(HOT_RESET, "Hot reset"),
+ _IC(CFG_TIMEOUT, "ECAM access timeout"),
+ _IC(CORRECTABLE, "Correctable error message"),
+ _IC(NONFATAL, "Non fatal error message"),
+ _IC(FATAL, "Fatal error message"),
+ _IC(SLV_UNSUPP, "Slave unsupported request"),
+ _IC(SLV_UNEXP, "Slave unexpected completion"),
+ _IC(SLV_COMPL, "Slave completion timeout"),
+ _IC(SLV_ERRP, "Slave Error Poison"),
+ _IC(SLV_CMPABT, "Slave Completer Abort"),
+ _IC(SLV_ILLBUR, "Slave Illegal Burst"),
+ _IC(MST_DECERR, "Master decode error"),
+ _IC(MST_SLVERR, "Master slave error"),
+};
+
+static irqreturn_t xilinx_pl_dma_pcie_intr_handler(int irq, void *dev_id)
+{
+ struct pl_dma_pcie *port = (struct pl_dma_pcie *)dev_id;
+ struct device *dev = port->dev;
+ struct irq_data *d;
+
+ d = irq_domain_get_irq_data(port->pldma_domain, irq);
+ switch (d->hwirq) {
+ case XILINX_PCIE_INTR_CORRECTABLE:
+ case XILINX_PCIE_INTR_NONFATAL:
+ case XILINX_PCIE_INTR_FATAL:
+ xilinx_pl_dma_pcie_clear_err_interrupts(port);
+ fallthrough;
+
+ default:
+ if (intr_cause[d->hwirq].str)
+ dev_warn(dev, "%s\n", intr_cause[d->hwirq].str);
+ else
+ dev_warn(dev, "Unknown IRQ %ld\n", d->hwirq);
+ }
+
+ return IRQ_HANDLED;
+}
+
+#define XILINX_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_NO_AFFINITY)
+
+#define XILINX_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_MULTI_PCI_MSI)
+
+static const struct msi_parent_ops xilinx_msi_parent_ops = {
+ .required_flags = XILINX_MSI_FLAGS_REQUIRED,
+ .supported_flags = XILINX_MSI_FLAGS_SUPPORTED,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .prefix = "pl_dma-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
+};
+static void xilinx_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct pl_dma_pcie *pcie = irq_data_get_irq_chip_data(data);
+ phys_addr_t msi_addr = pcie->phys_reg_base;
+
+ msg->address_lo = lower_32_bits(msi_addr);
+ msg->address_hi = upper_32_bits(msi_addr);
+ msg->data = data->hwirq;
+}
+
+static struct irq_chip xilinx_irq_chip = {
+ .name = "pl_dma:MSI",
+ .irq_compose_msi_msg = xilinx_compose_msi_msg,
+};
+
+static int xilinx_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *args)
+{
+ struct pl_dma_pcie *pcie = domain->host_data;
+ struct xilinx_msi *msi = &pcie->msi;
+ int bit, i;
+
+ mutex_lock(&msi->lock);
+ bit = bitmap_find_free_region(msi->bitmap, XILINX_NUM_MSI_IRQS,
+ get_count_order(nr_irqs));
+ if (bit < 0) {
+ mutex_unlock(&msi->lock);
+ return -ENOSPC;
+ }
+
+ for (i = 0; i < nr_irqs; i++) {
+ irq_domain_set_info(domain, virq + i, bit + i, &xilinx_irq_chip,
+ domain->host_data, handle_simple_irq,
+ NULL, NULL);
+ }
+ mutex_unlock(&msi->lock);
+
+ return 0;
+}
+
+static void xilinx_irq_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *data = irq_domain_get_irq_data(domain, virq);
+ struct pl_dma_pcie *pcie = irq_data_get_irq_chip_data(data);
+ struct xilinx_msi *msi = &pcie->msi;
+
+ mutex_lock(&msi->lock);
+ bitmap_release_region(msi->bitmap, data->hwirq,
+ get_count_order(nr_irqs));
+ mutex_unlock(&msi->lock);
+}
+
+static const struct irq_domain_ops dev_msi_domain_ops = {
+ .alloc = xilinx_irq_domain_alloc,
+ .free = xilinx_irq_domain_free,
+};
+
+static void xilinx_pl_dma_pcie_free_irq_domains(struct pl_dma_pcie *port)
+{
+ struct xilinx_msi *msi = &port->msi;
+
+ if (port->intx_domain) {
+ irq_domain_remove(port->intx_domain);
+ port->intx_domain = NULL;
+ }
+
+ if (msi->dev_domain) {
+ irq_domain_remove(msi->dev_domain);
+ msi->dev_domain = NULL;
+ }
+}
+
+static int xilinx_pl_dma_pcie_init_msi_irq_domain(struct pl_dma_pcie *port)
+{
+ struct device *dev = port->dev;
+ struct xilinx_msi *msi = &port->msi;
+ int size = BITS_TO_LONGS(XILINX_NUM_MSI_IRQS) * sizeof(long);
+ struct irq_domain_info info = {
+ .fwnode = dev_fwnode(port->dev),
+ .ops = &dev_msi_domain_ops,
+ .host_data = port,
+ .size = XILINX_NUM_MSI_IRQS,
+ };
+
+ msi->dev_domain = msi_create_parent_irq_domain(&info, &xilinx_msi_parent_ops);
+ if (!msi->dev_domain)
+ goto out;
+
+ mutex_init(&msi->lock);
+ msi->bitmap = kzalloc(size, GFP_KERNEL);
+ if (!msi->bitmap)
+ goto out;
+
+ raw_spin_lock_init(&port->lock);
+ xilinx_pl_dma_pcie_enable_msi(port);
+
+ return 0;
+
+out:
+ xilinx_pl_dma_pcie_free_irq_domains(port);
+ dev_err(dev, "Failed to allocate MSI IRQ domains\n");
+
+ return -ENOMEM;
+}
+
+/*
+ * INTx error interrupts are Xilinx controller specific interrupt, used to
+ * notify user about errors such as cfg timeout, slave unsupported requests,
+ * fatal and non fatal error etc.
+ */
+
+static irqreturn_t xilinx_pl_dma_pcie_intx_flow(int irq, void *args)
+{
+ unsigned long val;
+ int i;
+ struct pl_dma_pcie *port = args;
+
+ val = FIELD_GET(XILINX_PCIE_DMA_IDRN_MASK,
+ pcie_read(port, XILINX_PCIE_DMA_REG_IDRN));
+
+ for_each_set_bit(i, &val, PCI_NUM_INTX)
+ generic_handle_domain_irq(port->intx_domain, i);
+ return IRQ_HANDLED;
+}
+
+static void xilinx_pl_dma_pcie_mask_event_irq(struct irq_data *d)
+{
+ struct pl_dma_pcie *port = irq_data_get_irq_chip_data(d);
+ u32 val;
+
+ raw_spin_lock(&port->lock);
+ val = pcie_read(port, XILINX_PCIE_DMA_REG_IMR);
+ val &= ~BIT(d->hwirq);
+ pcie_write(port, val, XILINX_PCIE_DMA_REG_IMR);
+ raw_spin_unlock(&port->lock);
+}
+
+static void xilinx_pl_dma_pcie_unmask_event_irq(struct irq_data *d)
+{
+ struct pl_dma_pcie *port = irq_data_get_irq_chip_data(d);
+ u32 val;
+
+ raw_spin_lock(&port->lock);
+ val = pcie_read(port, XILINX_PCIE_DMA_REG_IMR);
+ val |= BIT(d->hwirq);
+ pcie_write(port, val, XILINX_PCIE_DMA_REG_IMR);
+ raw_spin_unlock(&port->lock);
+}
+
+static struct irq_chip xilinx_pl_dma_pcie_event_irq_chip = {
+ .name = "pl_dma:RC-Event",
+ .irq_mask = xilinx_pl_dma_pcie_mask_event_irq,
+ .irq_unmask = xilinx_pl_dma_pcie_unmask_event_irq,
+};
+
+static int xilinx_pl_dma_pcie_event_map(struct irq_domain *domain,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &xilinx_pl_dma_pcie_event_irq_chip,
+ handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ irq_set_status_flags(irq, IRQ_LEVEL);
+
+ return 0;
+}
+
+static const struct irq_domain_ops event_domain_ops = {
+ .map = xilinx_pl_dma_pcie_event_map,
+};
+
+/**
+ * xilinx_pl_dma_pcie_init_irq_domain - Initialize IRQ domain
+ * @port: PCIe port information
+ *
+ * Return: '0' on success and error value on failure.
+ */
+static int xilinx_pl_dma_pcie_init_irq_domain(struct pl_dma_pcie *port)
+{
+ struct device *dev = port->dev;
+ struct device_node *node = dev->of_node;
+ struct device_node *pcie_intc_node;
+ int ret;
+
+ /* Setup INTx */
+ pcie_intc_node = of_get_child_by_name(node, "interrupt-controller");
+ if (!pcie_intc_node) {
+ dev_err(dev, "No PCIe Intc node found\n");
+ return -EINVAL;
+ }
+
+ port->pldma_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), 32,
+ &event_domain_ops, port);
+ if (!port->pldma_domain)
+ return -ENOMEM;
+
+ irq_domain_update_bus_token(port->pldma_domain, DOMAIN_BUS_NEXUS);
+
+ port->intx_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX,
+ &intx_domain_ops, port);
+ if (!port->intx_domain) {
+ dev_err(dev, "Failed to get a INTx IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ irq_domain_update_bus_token(port->intx_domain, DOMAIN_BUS_WIRED);
+
+ ret = xilinx_pl_dma_pcie_init_msi_irq_domain(port);
+ if (ret != 0) {
+ irq_domain_remove(port->intx_domain);
+ return -ENOMEM;
+ }
+
+ of_node_put(pcie_intc_node);
+ raw_spin_lock_init(&port->lock);
+
+ return 0;
+}
+
+static int xilinx_pl_dma_pcie_setup_irq(struct pl_dma_pcie *port)
+{
+ struct device *dev = port->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ int i, irq, err;
+
+ port->irq = platform_get_irq(pdev, 0);
+ if (port->irq < 0)
+ return port->irq;
+
+ for (i = 0; i < ARRAY_SIZE(intr_cause); i++) {
+ int err;
+
+ if (!intr_cause[i].str)
+ continue;
+
+ irq = irq_create_mapping(port->pldma_domain, i);
+ if (!irq) {
+ dev_err(dev, "Failed to map interrupt\n");
+ return -ENXIO;
+ }
+
+ err = devm_request_irq(dev, irq,
+ xilinx_pl_dma_pcie_intr_handler,
+ IRQF_SHARED | IRQF_NO_THREAD,
+ intr_cause[i].sym, port);
+ if (err) {
+ dev_err(dev, "Failed to request IRQ %d\n", irq);
+ return err;
+ }
+ }
+
+ port->intx_irq = irq_create_mapping(port->pldma_domain,
+ XILINX_PCIE_INTR_INTX);
+ if (!port->intx_irq) {
+ dev_err(dev, "Failed to map INTx interrupt\n");
+ return -ENXIO;
+ }
+
+ err = devm_request_irq(dev, port->intx_irq, xilinx_pl_dma_pcie_intx_flow,
+ IRQF_SHARED | IRQF_NO_THREAD, NULL, port);
+ if (err) {
+ dev_err(dev, "Failed to request INTx IRQ %d\n", port->intx_irq);
+ return err;
+ }
+
+ err = devm_request_irq(dev, port->irq, xilinx_pl_dma_pcie_event_flow,
+ IRQF_SHARED | IRQF_NO_THREAD, NULL, port);
+ if (err) {
+ dev_err(dev, "Failed to request event IRQ %d\n", port->irq);
+ return err;
+ }
+
+ return 0;
+}
+
+static void xilinx_pl_dma_pcie_init_port(struct pl_dma_pcie *port)
+{
+ if (xilinx_pl_dma_pcie_link_up(port))
+ dev_info(port->dev, "PCIe Link is UP\n");
+ else
+ dev_info(port->dev, "PCIe Link is DOWN\n");
+
+ /* Disable all interrupts */
+ pcie_write(port, ~XILINX_PCIE_DMA_IDR_ALL_MASK,
+ XILINX_PCIE_DMA_REG_IMR);
+
+ /* Clear pending interrupts */
+ pcie_write(port, pcie_read(port, XILINX_PCIE_DMA_REG_IDR) &
+ XILINX_PCIE_DMA_IMR_ALL_MASK,
+ XILINX_PCIE_DMA_REG_IDR);
+
+ /* Needed for MSI DECODE MODE */
+ pcie_write(port, XILINX_PCIE_DMA_IDR_ALL_MASK,
+ XILINX_PCIE_DMA_REG_MSI_LOW_MASK);
+ pcie_write(port, XILINX_PCIE_DMA_IDR_ALL_MASK,
+ XILINX_PCIE_DMA_REG_MSI_HI_MASK);
+
+ /* Set the Bridge enable bit */
+ pcie_write(port, pcie_read(port, XILINX_PCIE_DMA_REG_RPSC) |
+ XILINX_PCIE_DMA_REG_RPSC_BEN,
+ XILINX_PCIE_DMA_REG_RPSC);
+}
+
+static int xilinx_request_msi_irq(struct pl_dma_pcie *port)
+{
+ struct device *dev = port->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ int ret;
+
+ port->msi.irq_msi0 = platform_get_irq_byname(pdev, "msi0");
+ if (port->msi.irq_msi0 <= 0)
+ return port->msi.irq_msi0;
+
+ ret = devm_request_irq(dev, port->msi.irq_msi0, xilinx_pl_dma_pcie_msi_handler_low,
+ IRQF_SHARED | IRQF_NO_THREAD, "xlnx-pcie-dma-pl",
+ port);
+ if (ret) {
+ dev_err(dev, "Failed to register interrupt\n");
+ return ret;
+ }
+
+ port->msi.irq_msi1 = platform_get_irq_byname(pdev, "msi1");
+ if (port->msi.irq_msi1 <= 0)
+ return port->msi.irq_msi1;
+
+ ret = devm_request_irq(dev, port->msi.irq_msi1, xilinx_pl_dma_pcie_msi_handler_high,
+ IRQF_SHARED | IRQF_NO_THREAD, "xlnx-pcie-dma-pl",
+ port);
+ if (ret) {
+ dev_err(dev, "Failed to register interrupt\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xilinx_pl_dma_pcie_parse_dt(struct pl_dma_pcie *port,
+ struct resource *bus_range)
+{
+ struct device *dev = port->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource *res;
+ int err;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "Missing \"reg\" property\n");
+ return -ENXIO;
+ }
+ port->phys_reg_base = res->start;
+
+ port->cfg = pci_ecam_create(dev, res, bus_range, &xilinx_pl_dma_pcie_ops);
+ if (IS_ERR(port->cfg))
+ return PTR_ERR(port->cfg);
+
+ port->reg_base = port->cfg->win;
+
+ if (port->variant->version == QDMA) {
+ port->cfg_base = port->cfg->win;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "breg");
+ port->reg_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(port->reg_base))
+ return PTR_ERR(port->reg_base);
+ port->phys_reg_base = res->start;
+ }
+
+ err = xilinx_request_msi_irq(port);
+ if (err) {
+ pci_ecam_free(port->cfg);
+ return err;
+ }
+
+ return 0;
+}
+
+static int xilinx_pl_dma_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct pl_dma_pcie *port;
+ struct pci_host_bridge *bridge;
+ struct resource_entry *bus;
+ int err;
+
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port));
+ if (!bridge)
+ return -ENODEV;
+
+ port = pci_host_bridge_priv(bridge);
+
+ port->dev = dev;
+
+ bus = resource_list_first_type(&bridge->windows, IORESOURCE_BUS);
+ if (!bus)
+ return -ENODEV;
+
+ port->variant = of_device_get_match_data(dev);
+
+ err = xilinx_pl_dma_pcie_parse_dt(port, bus->res);
+ if (err) {
+ dev_err(dev, "Parsing DT failed\n");
+ return err;
+ }
+
+ xilinx_pl_dma_pcie_init_port(port);
+
+ err = xilinx_pl_dma_pcie_init_irq_domain(port);
+ if (err)
+ goto err_irq_domain;
+
+ err = xilinx_pl_dma_pcie_setup_irq(port);
+
+ bridge->sysdata = port;
+ bridge->ops = &xilinx_pl_dma_pcie_ops.pci_ops;
+
+ err = pci_host_probe(bridge);
+ if (err < 0)
+ goto err_host_bridge;
+
+ return 0;
+
+err_host_bridge:
+ xilinx_pl_dma_pcie_free_irq_domains(port);
+
+err_irq_domain:
+ pci_ecam_free(port->cfg);
+ return err;
+}
+
+static const struct xilinx_pl_dma_variant xdma_host = {
+ .version = XDMA,
+};
+
+static const struct xilinx_pl_dma_variant qdma_host = {
+ .version = QDMA,
+};
+
+static const struct of_device_id xilinx_pl_dma_pcie_of_match[] = {
+ {
+ .compatible = "xlnx,xdma-host-3.00",
+ .data = &xdma_host,
+ },
+ {
+ .compatible = "xlnx,qdma-host-3.00",
+ .data = &qdma_host,
+ },
+ {}
+};
+
+static struct platform_driver xilinx_pl_dma_pcie_driver = {
+ .driver = {
+ .name = "xilinx-xdma-pcie",
+ .of_match_table = xilinx_pl_dma_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = xilinx_pl_dma_pcie_probe,
+};
+
+builtin_platform_driver(xilinx_pl_dma_pcie_driver);
diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c
index 176686bdb15c..7db2c96c6cec 100644
--- a/drivers/pci/controller/pcie-xilinx-nwl.c
+++ b/drivers/pci/controller/pcie-xilinx-nwl.c
@@ -10,6 +10,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -19,6 +20,7 @@
#include <linux/of_platform.h>
#include <linux/pci.h>
#include <linux/pci-ecam.h>
+#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/irqchip/chained_irq.h>
@@ -80,8 +82,8 @@
#define MSGF_MISC_SR_NON_FATAL_DEV BIT(22)
#define MSGF_MISC_SR_FATAL_DEV BIT(23)
#define MSGF_MISC_SR_LINK_DOWN BIT(24)
-#define MSGF_MSIC_SR_LINK_AUTO_BWIDTH BIT(25)
-#define MSGF_MSIC_SR_LINK_BWIDTH BIT(26)
+#define MSGF_MISC_SR_LINK_AUTO_BWIDTH BIT(25)
+#define MSGF_MISC_SR_LINK_BWIDTH BIT(26)
#define MSGF_MISC_SR_MASKALL (MSGF_MISC_SR_RXMSG_AVAIL | \
MSGF_MISC_SR_RXMSG_OVER | \
@@ -96,8 +98,8 @@
MSGF_MISC_SR_NON_FATAL_DEV | \
MSGF_MISC_SR_FATAL_DEV | \
MSGF_MISC_SR_LINK_DOWN | \
- MSGF_MSIC_SR_LINK_AUTO_BWIDTH | \
- MSGF_MSIC_SR_LINK_BWIDTH)
+ MSGF_MISC_SR_LINK_AUTO_BWIDTH | \
+ MSGF_MISC_SR_LINK_BWIDTH)
/* Legacy interrupt status mask bits */
#define MSGF_LEG_SR_INTA BIT(0)
@@ -126,7 +128,7 @@
#define E_ECAM_CR_ENABLE BIT(0)
#define E_ECAM_SIZE_LOC GENMASK(20, 16)
#define E_ECAM_SIZE_SHIFT 16
-#define NWL_ECAM_VALUE_DEFAULT 12
+#define NWL_ECAM_MAX_SIZE 16
#define CFG_DMA_REG_BAR GENMASK(2, 0)
#define CFG_PCIE_CACHE GENMASK(7, 0)
@@ -144,7 +146,6 @@
#define LINK_WAIT_USLEEP_MAX 100000
struct nwl_msi { /* MSI information */
- struct irq_domain *msi_domain;
DECLARE_BITMAP(bitmap, INT_PCI_MSI_NR);
struct irq_domain *dev_domain;
struct mutex lock; /* protect bitmap variable */
@@ -157,6 +158,7 @@ struct nwl_pcie {
void __iomem *breg_base;
void __iomem *pcireg_base;
void __iomem *ecam_base;
+ struct phy *phy[4];
phys_addr_t phys_breg_base; /* Physical Bridge Register Base */
phys_addr_t phys_pcie_reg_base; /* Physical PCIe Controller Base */
phys_addr_t phys_ecam_base; /* Physical Configuration Base */
@@ -165,10 +167,8 @@ struct nwl_pcie {
u32 ecam_size;
int irq_intx;
int irq_misc;
- u32 ecam_value;
- u8 last_busno;
struct nwl_msi msi;
- struct irq_domain *legacy_irq_domain;
+ struct irq_domain *intx_irq_domain;
struct clk *clk;
raw_spinlock_t leg_mask_lock;
};
@@ -269,42 +269,42 @@ static irqreturn_t nwl_pcie_misc_handler(int irq, void *data)
return IRQ_NONE;
if (misc_stat & MSGF_MISC_SR_RXMSG_OVER)
- dev_err(dev, "Received Message FIFO Overflow\n");
+ dev_err_ratelimited(dev, "Received Message FIFO Overflow\n");
if (misc_stat & MSGF_MISC_SR_SLAVE_ERR)
- dev_err(dev, "Slave error\n");
+ dev_err_ratelimited(dev, "Slave error\n");
if (misc_stat & MSGF_MISC_SR_MASTER_ERR)
- dev_err(dev, "Master error\n");
+ dev_err_ratelimited(dev, "Master error\n");
if (misc_stat & MSGF_MISC_SR_I_ADDR_ERR)
- dev_err(dev, "In Misc Ingress address translation error\n");
+ dev_err_ratelimited(dev, "In Misc Ingress address translation error\n");
if (misc_stat & MSGF_MISC_SR_E_ADDR_ERR)
- dev_err(dev, "In Misc Egress address translation error\n");
+ dev_err_ratelimited(dev, "In Misc Egress address translation error\n");
if (misc_stat & MSGF_MISC_SR_FATAL_AER)
- dev_err(dev, "Fatal Error in AER Capability\n");
+ dev_err_ratelimited(dev, "Fatal Error in AER Capability\n");
if (misc_stat & MSGF_MISC_SR_NON_FATAL_AER)
- dev_err(dev, "Non-Fatal Error in AER Capability\n");
+ dev_err_ratelimited(dev, "Non-Fatal Error in AER Capability\n");
if (misc_stat & MSGF_MISC_SR_CORR_AER)
- dev_err(dev, "Correctable Error in AER Capability\n");
+ dev_err_ratelimited(dev, "Correctable Error in AER Capability\n");
if (misc_stat & MSGF_MISC_SR_UR_DETECT)
- dev_err(dev, "Unsupported request Detected\n");
+ dev_err_ratelimited(dev, "Unsupported request Detected\n");
if (misc_stat & MSGF_MISC_SR_NON_FATAL_DEV)
- dev_err(dev, "Non-Fatal Error Detected\n");
+ dev_err_ratelimited(dev, "Non-Fatal Error Detected\n");
if (misc_stat & MSGF_MISC_SR_FATAL_DEV)
- dev_err(dev, "Fatal Error Detected\n");
+ dev_err_ratelimited(dev, "Fatal Error Detected\n");
- if (misc_stat & MSGF_MSIC_SR_LINK_AUTO_BWIDTH)
+ if (misc_stat & MSGF_MISC_SR_LINK_AUTO_BWIDTH)
dev_info(dev, "Link Autonomous Bandwidth Management Status bit set\n");
- if (misc_stat & MSGF_MSIC_SR_LINK_BWIDTH)
+ if (misc_stat & MSGF_MISC_SR_LINK_BWIDTH)
dev_info(dev, "Link Bandwidth Management Status bit set\n");
/* Clear misc interrupt status */
@@ -326,7 +326,7 @@ static void nwl_pcie_leg_handler(struct irq_desc *desc)
while ((status = nwl_bridge_readl(pcie, MSGF_LEG_STATUS) &
MSGF_LEG_SR_MASKALL) != 0) {
for_each_set_bit(bit, &status, PCI_NUM_INTX)
- generic_handle_domain_irq(pcie->legacy_irq_domain, bit);
+ generic_handle_domain_irq(pcie->intx_irq_domain, bit);
}
chained_irq_exit(chip, desc);
@@ -366,71 +366,74 @@ static void nwl_pcie_msi_handler_low(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
-static void nwl_mask_leg_irq(struct irq_data *data)
+static void nwl_mask_intx_irq(struct irq_data *data)
{
struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data);
unsigned long flags;
u32 mask;
u32 val;
- mask = 1 << (data->hwirq - 1);
+ mask = 1 << data->hwirq;
raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags);
val = nwl_bridge_readl(pcie, MSGF_LEG_MASK);
nwl_bridge_writel(pcie, (val & (~mask)), MSGF_LEG_MASK);
raw_spin_unlock_irqrestore(&pcie->leg_mask_lock, flags);
}
-static void nwl_unmask_leg_irq(struct irq_data *data)
+static void nwl_unmask_intx_irq(struct irq_data *data)
{
struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data);
unsigned long flags;
u32 mask;
u32 val;
- mask = 1 << (data->hwirq - 1);
+ mask = 1 << data->hwirq;
raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags);
val = nwl_bridge_readl(pcie, MSGF_LEG_MASK);
nwl_bridge_writel(pcie, (val | mask), MSGF_LEG_MASK);
raw_spin_unlock_irqrestore(&pcie->leg_mask_lock, flags);
}
-static struct irq_chip nwl_leg_irq_chip = {
+static struct irq_chip nwl_intx_irq_chip = {
.name = "nwl_pcie:legacy",
- .irq_enable = nwl_unmask_leg_irq,
- .irq_disable = nwl_mask_leg_irq,
- .irq_mask = nwl_mask_leg_irq,
- .irq_unmask = nwl_unmask_leg_irq,
+ .irq_enable = nwl_unmask_intx_irq,
+ .irq_disable = nwl_mask_intx_irq,
+ .irq_mask = nwl_mask_intx_irq,
+ .irq_unmask = nwl_unmask_intx_irq,
};
-static int nwl_legacy_map(struct irq_domain *domain, unsigned int irq,
- irq_hw_number_t hwirq)
+static int nwl_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
{
- irq_set_chip_and_handler(irq, &nwl_leg_irq_chip, handle_level_irq);
+ irq_set_chip_and_handler(irq, &nwl_intx_irq_chip, handle_level_irq);
irq_set_chip_data(irq, domain->host_data);
irq_set_status_flags(irq, IRQ_LEVEL);
return 0;
}
-static const struct irq_domain_ops legacy_domain_ops = {
- .map = nwl_legacy_map,
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = nwl_intx_map,
.xlate = pci_irqd_intx_xlate,
};
#ifdef CONFIG_PCI_MSI
-static struct irq_chip nwl_msi_irq_chip = {
- .name = "nwl_pcie:msi",
- .irq_enable = pci_msi_unmask_irq,
- .irq_disable = pci_msi_mask_irq,
- .irq_mask = pci_msi_mask_irq,
- .irq_unmask = pci_msi_unmask_irq,
-};
-static struct msi_domain_info nwl_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_MULTI_PCI_MSI),
- .chip = &nwl_msi_irq_chip,
+#define NWL_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_NO_AFFINITY)
+
+#define NWL_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_MULTI_PCI_MSI)
+
+static const struct msi_parent_ops nwl_msi_parent_ops = {
+ .required_flags = NWL_MSI_FLAGS_REQUIRED,
+ .supported_flags = NWL_MSI_FLAGS_SUPPORTED,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .prefix = "nwl-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
};
+
#endif
static void nwl_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
@@ -443,16 +446,9 @@ static void nwl_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
msg->data = data->hwirq;
}
-static int nwl_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static struct irq_chip nwl_irq_chip = {
.name = "Xilinx MSI",
.irq_compose_msi_msg = nwl_compose_msi_msg,
- .irq_set_affinity = nwl_msi_set_affinity,
};
static int nwl_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
@@ -502,45 +498,93 @@ static int nwl_pcie_init_msi_irq_domain(struct nwl_pcie *pcie)
{
#ifdef CONFIG_PCI_MSI
struct device *dev = pcie->dev;
- struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
struct nwl_msi *msi = &pcie->msi;
-
- msi->dev_domain = irq_domain_add_linear(NULL, INT_PCI_MSI_NR,
- &dev_msi_domain_ops, pcie);
+ struct irq_domain_info info = {
+ .fwnode = dev_fwnode(dev),
+ .ops = &dev_msi_domain_ops,
+ .host_data = pcie,
+ .size = INT_PCI_MSI_NR,
+ };
+
+ msi->dev_domain = msi_create_parent_irq_domain(&info, &nwl_msi_parent_ops);
if (!msi->dev_domain) {
dev_err(dev, "failed to create dev IRQ domain\n");
return -ENOMEM;
}
- msi->msi_domain = pci_msi_create_irq_domain(fwnode,
- &nwl_msi_domain_info,
- msi->dev_domain);
- if (!msi->msi_domain) {
- dev_err(dev, "failed to create msi IRQ domain\n");
- irq_domain_remove(msi->dev_domain);
- return -ENOMEM;
- }
#endif
return 0;
}
+static void nwl_pcie_phy_power_off(struct nwl_pcie *pcie, int i)
+{
+ int err = phy_power_off(pcie->phy[i]);
+
+ if (err)
+ dev_err(pcie->dev, "could not power off phy %d (err=%d)\n", i,
+ err);
+}
+
+static void nwl_pcie_phy_exit(struct nwl_pcie *pcie, int i)
+{
+ int err = phy_exit(pcie->phy[i]);
+
+ if (err)
+ dev_err(pcie->dev, "could not exit phy %d (err=%d)\n", i, err);
+}
+
+static int nwl_pcie_phy_enable(struct nwl_pcie *pcie)
+{
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(pcie->phy); i++) {
+ ret = phy_init(pcie->phy[i]);
+ if (ret)
+ goto err;
+
+ ret = phy_power_on(pcie->phy[i]);
+ if (ret) {
+ nwl_pcie_phy_exit(pcie, i);
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ while (i--) {
+ nwl_pcie_phy_power_off(pcie, i);
+ nwl_pcie_phy_exit(pcie, i);
+ }
+
+ return ret;
+}
+
+static void nwl_pcie_phy_disable(struct nwl_pcie *pcie)
+{
+ int i;
+
+ for (i = ARRAY_SIZE(pcie->phy); i--;) {
+ nwl_pcie_phy_power_off(pcie, i);
+ nwl_pcie_phy_exit(pcie, i);
+ }
+}
+
static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie)
{
struct device *dev = pcie->dev;
struct device_node *node = dev->of_node;
- struct device_node *legacy_intc_node;
+ struct device_node *intc_node;
- legacy_intc_node = of_get_next_child(node, NULL);
- if (!legacy_intc_node) {
+ intc_node = of_get_next_child(node, NULL);
+ if (!intc_node) {
dev_err(dev, "No legacy intc node found\n");
return -EINVAL;
}
- pcie->legacy_irq_domain = irq_domain_add_linear(legacy_intc_node,
- PCI_NUM_INTX,
- &legacy_domain_ops,
- pcie);
- of_node_put(legacy_intc_node);
- if (!pcie->legacy_irq_domain) {
+ pcie->intx_irq_domain = irq_domain_create_linear(of_fwnode_handle(intc_node), PCI_NUM_INTX,
+ &intx_domain_ops, pcie);
+ of_node_put(intc_node);
+ if (!pcie->intx_irq_domain) {
dev_err(dev, "failed to create IRQ domain\n");
return -ENOMEM;
}
@@ -625,7 +669,7 @@ static int nwl_pcie_bridge_init(struct nwl_pcie *pcie)
{
struct device *dev = pcie->dev;
struct platform_device *pdev = to_platform_device(dev);
- u32 breg_val, ecam_val, first_busno = 0;
+ u32 breg_val, ecam_val;
int err;
breg_val = nwl_bridge_readl(pcie, E_BREG_CAPABILITIES) & BREG_PRESENT;
@@ -674,24 +718,16 @@ static int nwl_pcie_bridge_init(struct nwl_pcie *pcie)
nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, E_ECAM_CONTROL) |
E_ECAM_CR_ENABLE, E_ECAM_CONTROL);
- nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, E_ECAM_CONTROL) |
- (pcie->ecam_value << E_ECAM_SIZE_SHIFT),
- E_ECAM_CONTROL);
+ ecam_val = nwl_bridge_readl(pcie, E_ECAM_CONTROL);
+ ecam_val &= ~E_ECAM_SIZE_LOC;
+ ecam_val |= NWL_ECAM_MAX_SIZE << E_ECAM_SIZE_SHIFT;
+ nwl_bridge_writel(pcie, ecam_val, E_ECAM_CONTROL);
nwl_bridge_writel(pcie, lower_32_bits(pcie->phys_ecam_base),
E_ECAM_BASE_LO);
nwl_bridge_writel(pcie, upper_32_bits(pcie->phys_ecam_base),
E_ECAM_BASE_HI);
- /* Get bus range */
- ecam_val = nwl_bridge_readl(pcie, E_ECAM_CONTROL);
- pcie->last_busno = (ecam_val & E_ECAM_SIZE_LOC) >> E_ECAM_SIZE_SHIFT;
- /* Write primary, secondary and subordinate bus numbers */
- ecam_val = first_busno;
- ecam_val |= (first_busno + 1) << 8;
- ecam_val |= (pcie->last_busno << E_ECAM_SIZE_SHIFT);
- writel(ecam_val, (pcie->ecam_base + PCI_PRIMARY_BUS));
-
if (nwl_pcie_link_up(pcie))
dev_info(dev, "Link is UP\n");
else
@@ -721,14 +757,14 @@ static int nwl_pcie_bridge_init(struct nwl_pcie *pcie)
/* Enable all misc interrupts */
nwl_bridge_writel(pcie, MSGF_MISC_SR_MASKALL, MSGF_MISC_MASK);
- /* Disable all legacy interrupts */
+ /* Disable all INTX interrupts */
nwl_bridge_writel(pcie, (u32)~MSGF_LEG_SR_MASKALL, MSGF_LEG_MASK);
- /* Clear pending legacy interrupts */
+ /* Clear pending INTX interrupts */
nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_LEG_STATUS) &
MSGF_LEG_SR_MASKALL, MSGF_LEG_STATUS);
- /* Enable all legacy interrupts */
+ /* Enable all INTX interrupts */
nwl_bridge_writel(pcie, MSGF_LEG_SR_MASKALL, MSGF_LEG_MASK);
/* Enable the bridge config interrupt */
@@ -743,6 +779,7 @@ static int nwl_pcie_parse_dt(struct nwl_pcie *pcie,
{
struct device *dev = pcie->dev;
struct resource *res;
+ int i;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "breg");
pcie->breg_base = devm_ioremap_resource(dev, res);
@@ -770,6 +807,18 @@ static int nwl_pcie_parse_dt(struct nwl_pcie *pcie,
irq_set_chained_handler_and_data(pcie->irq_intx,
nwl_pcie_leg_handler, pcie);
+
+ for (i = 0; i < ARRAY_SIZE(pcie->phy); i++) {
+ pcie->phy[i] = devm_of_phy_get_by_index(dev, dev->of_node, i);
+ if (PTR_ERR(pcie->phy[i]) == -ENODEV) {
+ pcie->phy[i] = NULL;
+ break;
+ }
+
+ if (IS_ERR(pcie->phy[i]))
+ return PTR_ERR(pcie->phy[i]);
+ }
+
return 0;
}
@@ -790,9 +839,9 @@ static int nwl_pcie_probe(struct platform_device *pdev)
return -ENODEV;
pcie = pci_host_bridge_priv(bridge);
+ platform_set_drvdata(pdev, pcie);
pcie->dev = dev;
- pcie->ecam_value = NWL_ECAM_VALUE_DEFAULT;
err = nwl_pcie_parse_dt(pcie, pdev);
if (err) {
@@ -810,16 +859,22 @@ static int nwl_pcie_probe(struct platform_device *pdev)
return err;
}
+ err = nwl_pcie_phy_enable(pcie);
+ if (err) {
+ dev_err(dev, "could not enable PHYs\n");
+ goto err_clk;
+ }
+
err = nwl_pcie_bridge_init(pcie);
if (err) {
dev_err(dev, "HW Initialization failed\n");
- return err;
+ goto err_phy;
}
err = nwl_pcie_init_irq_domain(pcie);
if (err) {
dev_err(dev, "Failed creating IRQ Domain\n");
- return err;
+ goto err_phy;
}
bridge->sysdata = pcie;
@@ -829,11 +884,27 @@ static int nwl_pcie_probe(struct platform_device *pdev)
err = nwl_pcie_enable_msi(pcie);
if (err < 0) {
dev_err(dev, "failed to enable MSI support: %d\n", err);
- return err;
+ goto err_phy;
}
}
- return pci_host_probe(bridge);
+ err = pci_host_probe(bridge);
+ if (!err)
+ return 0;
+
+err_phy:
+ nwl_pcie_phy_disable(pcie);
+err_clk:
+ clk_disable_unprepare(pcie->clk);
+ return err;
+}
+
+static void nwl_pcie_remove(struct platform_device *pdev)
+{
+ struct nwl_pcie *pcie = platform_get_drvdata(pdev);
+
+ nwl_pcie_phy_disable(pcie);
+ clk_disable_unprepare(pcie->clk);
}
static struct platform_driver nwl_pcie_driver = {
@@ -843,5 +914,6 @@ static struct platform_driver nwl_pcie_driver = {
.of_match_table = nwl_pcie_of_match,
},
.probe = nwl_pcie_probe,
+ .remove = nwl_pcie_remove,
};
builtin_platform_driver(nwl_pcie_driver);
diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c
index cb6e9f7b0152..937ea6ae1ac4 100644
--- a/drivers/pci/controller/pcie-xilinx.c
+++ b/drivers/pci/controller/pcie-xilinx.c
@@ -12,6 +12,7 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -203,16 +204,6 @@ static void xilinx_msi_top_irq_ack(struct irq_data *d)
*/
}
-static struct irq_chip xilinx_msi_top_chip = {
- .name = "PCIe MSI",
- .irq_ack = xilinx_msi_top_irq_ack,
-};
-
-static int xilinx_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force)
-{
- return -EINVAL;
-}
-
static void xilinx_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
struct xilinx_pcie *pcie = irq_data_get_irq_chip_data(data);
@@ -225,7 +216,6 @@ static void xilinx_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
static struct irq_chip xilinx_msi_bottom_chip = {
.name = "Xilinx MSI",
- .irq_set_affinity = xilinx_msi_set_affinity,
.irq_compose_msi_msg = xilinx_compose_msi_msg,
};
@@ -270,28 +260,42 @@ static const struct irq_domain_ops xilinx_msi_domain_ops = {
.free = xilinx_msi_domain_free,
};
-static struct msi_domain_info xilinx_msi_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
- .chip = &xilinx_msi_top_chip,
+static bool xilinx_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
+ struct irq_domain *real_parent, struct msi_domain_info *info)
+{
+ struct irq_chip *chip = info->chip;
+
+ if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info))
+ return false;
+
+ chip->irq_ack = xilinx_msi_top_irq_ack;
+ return true;
+}
+
+#define XILINX_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_NO_AFFINITY)
+
+static const struct msi_parent_ops xilinx_msi_parent_ops = {
+ .required_flags = XILINX_MSI_FLAGS_REQUIRED,
+ .supported_flags = MSI_GENERIC_FLAGS_MASK,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .prefix = "xilinx-",
+ .init_dev_msi_info = xilinx_init_dev_msi_info,
};
static int xilinx_allocate_msi_domains(struct xilinx_pcie *pcie)
{
- struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
- struct irq_domain *parent;
-
- parent = irq_domain_create_linear(fwnode, XILINX_NUM_MSI_IRQS,
- &xilinx_msi_domain_ops, pcie);
- if (!parent) {
- dev_err(pcie->dev, "failed to create IRQ domain\n");
- return -ENOMEM;
- }
- irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
-
- pcie->msi_domain = pci_msi_create_irq_domain(fwnode, &xilinx_msi_info, parent);
+ struct irq_domain_info info = {
+ .fwnode = dev_fwnode(pcie->dev),
+ .ops = &xilinx_msi_domain_ops,
+ .host_data = pcie,
+ .size = XILINX_NUM_MSI_IRQS,
+ };
+
+ pcie->msi_domain = msi_create_parent_irq_domain(&info, &xilinx_msi_parent_ops);
if (!pcie->msi_domain) {
dev_err(pcie->dev, "failed to create MSI domain\n");
- irq_domain_remove(parent);
return -ENOMEM;
}
@@ -300,10 +304,7 @@ static int xilinx_allocate_msi_domains(struct xilinx_pcie *pcie)
static void xilinx_free_msi_domains(struct xilinx_pcie *pcie)
{
- struct irq_domain *parent = pcie->msi_domain->parent;
-
irq_domain_remove(pcie->msi_domain);
- irq_domain_remove(parent);
}
/* INTx Functions */
@@ -399,7 +400,7 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
if (val & XILINX_PCIE_RPIFR1_MSI_INTR) {
val = pcie_read(pcie, XILINX_PCIE_REG_RPIFR2) &
XILINX_PCIE_RPIFR2_MSG_DATA;
- domain = pcie->msi_domain->parent;
+ domain = pcie->msi_domain;
} else {
val = (val & XILINX_PCIE_RPIFR1_INTR_MASK) >>
XILINX_PCIE_RPIFR1_INTR_SHIFT;
@@ -466,9 +467,8 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie *pcie)
return -ENODEV;
}
- pcie->leg_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
- &intx_domain_ops,
- pcie);
+ pcie->leg_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX,
+ &intx_domain_ops, pcie);
of_node_put(pcie_intc_node);
if (!pcie->leg_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
diff --git a/drivers/pci/controller/plda/Kconfig b/drivers/pci/controller/plda/Kconfig
new file mode 100644
index 000000000000..62120101139c
--- /dev/null
+++ b/drivers/pci/controller/plda/Kconfig
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: GPL-2.0
+
+menu "PLDA-based PCIe controllers"
+ depends on PCI
+
+config PCIE_PLDA_HOST
+ bool
+ select IRQ_MSI_LIB
+
+config PCIE_MICROCHIP_HOST
+ tristate "Microchip AXI PCIe controller"
+ depends on PCI_MSI && OF
+ select PCI_HOST_COMMON
+ select PCIE_PLDA_HOST
+ help
+ Say Y here if you want kernel to support the Microchip AXI PCIe
+ Host Bridge driver.
+
+config PCIE_STARFIVE_HOST
+ tristate "StarFive PCIe host controller"
+ depends on PCI_MSI && OF
+ depends on ARCH_STARFIVE || COMPILE_TEST
+ select PCIE_PLDA_HOST
+ help
+ Say Y here if you want to support the StarFive PCIe controller in
+ host mode. StarFive PCIe controller uses PLDA PCIe core.
+
+ If you choose to build this driver as module it will be dynamically
+ linked and module will be called pcie-starfive.ko.
+
+endmenu
diff --git a/drivers/pci/controller/plda/Makefile b/drivers/pci/controller/plda/Makefile
new file mode 100644
index 000000000000..0ac6851bed48
--- /dev/null
+++ b/drivers/pci/controller/plda/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_PCIE_PLDA_HOST) += pcie-plda-host.o
+obj-$(CONFIG_PCIE_MICROCHIP_HOST) += pcie-microchip-host.o
+obj-$(CONFIG_PCIE_STARFIVE_HOST) += pcie-starfive.o
diff --git a/drivers/pci/controller/plda/pcie-microchip-host.c b/drivers/pci/controller/plda/pcie-microchip-host.c
new file mode 100644
index 000000000000..24bbf93b8051
--- /dev/null
+++ b/drivers/pci/controller/plda/pcie-microchip-host.c
@@ -0,0 +1,834 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microchip AXI PCIe Bridge host controller driver
+ *
+ * Copyright (c) 2018 - 2020 Microchip Corporation. All rights reserved.
+ *
+ * Author: Daire McNamara <daire.mcnamara@microchip.com>
+ */
+
+#include <linux/align.h>
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/log2.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/pci-ecam.h>
+#include <linux/platform_device.h>
+#include <linux/wordpart.h>
+
+#include "../../pci.h"
+#include "../pci-host-common.h"
+#include "pcie-plda.h"
+
+#define MC_MAX_NUM_INBOUND_WINDOWS 8
+#define MPFS_NC_BOUNCE_ADDR 0x80000000
+
+/* PCIe Bridge Phy and Controller Phy offsets */
+#define MC_PCIE1_BRIDGE_ADDR 0x00008000u
+#define MC_PCIE1_CTRL_ADDR 0x0000a000u
+
+/* PCIe Controller Phy Regs */
+#define SEC_ERROR_EVENT_CNT 0x20
+#define DED_ERROR_EVENT_CNT 0x24
+#define SEC_ERROR_INT 0x28
+#define SEC_ERROR_INT_TX_RAM_SEC_ERR_INT GENMASK(3, 0)
+#define SEC_ERROR_INT_RX_RAM_SEC_ERR_INT GENMASK(7, 4)
+#define SEC_ERROR_INT_PCIE2AXI_RAM_SEC_ERR_INT GENMASK(11, 8)
+#define SEC_ERROR_INT_AXI2PCIE_RAM_SEC_ERR_INT GENMASK(15, 12)
+#define SEC_ERROR_INT_ALL_RAM_SEC_ERR_INT GENMASK(15, 0)
+#define NUM_SEC_ERROR_INTS (4)
+#define SEC_ERROR_INT_MASK 0x2c
+#define DED_ERROR_INT 0x30
+#define DED_ERROR_INT_TX_RAM_DED_ERR_INT GENMASK(3, 0)
+#define DED_ERROR_INT_RX_RAM_DED_ERR_INT GENMASK(7, 4)
+#define DED_ERROR_INT_PCIE2AXI_RAM_DED_ERR_INT GENMASK(11, 8)
+#define DED_ERROR_INT_AXI2PCIE_RAM_DED_ERR_INT GENMASK(15, 12)
+#define DED_ERROR_INT_ALL_RAM_DED_ERR_INT GENMASK(15, 0)
+#define NUM_DED_ERROR_INTS (4)
+#define DED_ERROR_INT_MASK 0x34
+#define ECC_CONTROL 0x38
+#define ECC_CONTROL_TX_RAM_INJ_ERROR_0 BIT(0)
+#define ECC_CONTROL_TX_RAM_INJ_ERROR_1 BIT(1)
+#define ECC_CONTROL_TX_RAM_INJ_ERROR_2 BIT(2)
+#define ECC_CONTROL_TX_RAM_INJ_ERROR_3 BIT(3)
+#define ECC_CONTROL_RX_RAM_INJ_ERROR_0 BIT(4)
+#define ECC_CONTROL_RX_RAM_INJ_ERROR_1 BIT(5)
+#define ECC_CONTROL_RX_RAM_INJ_ERROR_2 BIT(6)
+#define ECC_CONTROL_RX_RAM_INJ_ERROR_3 BIT(7)
+#define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_0 BIT(8)
+#define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_1 BIT(9)
+#define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_2 BIT(10)
+#define ECC_CONTROL_PCIE2AXI_RAM_INJ_ERROR_3 BIT(11)
+#define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_0 BIT(12)
+#define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_1 BIT(13)
+#define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_2 BIT(14)
+#define ECC_CONTROL_AXI2PCIE_RAM_INJ_ERROR_3 BIT(15)
+#define ECC_CONTROL_TX_RAM_ECC_BYPASS BIT(24)
+#define ECC_CONTROL_RX_RAM_ECC_BYPASS BIT(25)
+#define ECC_CONTROL_PCIE2AXI_RAM_ECC_BYPASS BIT(26)
+#define ECC_CONTROL_AXI2PCIE_RAM_ECC_BYPASS BIT(27)
+#define PCIE_EVENT_INT 0x14c
+#define PCIE_EVENT_INT_L2_EXIT_INT BIT(0)
+#define PCIE_EVENT_INT_HOTRST_EXIT_INT BIT(1)
+#define PCIE_EVENT_INT_DLUP_EXIT_INT BIT(2)
+#define PCIE_EVENT_INT_MASK GENMASK(2, 0)
+#define PCIE_EVENT_INT_L2_EXIT_INT_MASK BIT(16)
+#define PCIE_EVENT_INT_HOTRST_EXIT_INT_MASK BIT(17)
+#define PCIE_EVENT_INT_DLUP_EXIT_INT_MASK BIT(18)
+#define PCIE_EVENT_INT_ENB_MASK GENMASK(18, 16)
+#define PCIE_EVENT_INT_ENB_SHIFT 16
+#define NUM_PCIE_EVENTS (3)
+
+/* PCIe Config space MSI capability structure */
+#define MC_MSI_CAP_CTRL_OFFSET 0xe0u
+
+/* Events */
+#define EVENT_PCIE_L2_EXIT 0
+#define EVENT_PCIE_HOTRST_EXIT 1
+#define EVENT_PCIE_DLUP_EXIT 2
+#define EVENT_SEC_TX_RAM_SEC_ERR 3
+#define EVENT_SEC_RX_RAM_SEC_ERR 4
+#define EVENT_SEC_PCIE2AXI_RAM_SEC_ERR 5
+#define EVENT_SEC_AXI2PCIE_RAM_SEC_ERR 6
+#define EVENT_DED_TX_RAM_DED_ERR 7
+#define EVENT_DED_RX_RAM_DED_ERR 8
+#define EVENT_DED_PCIE2AXI_RAM_DED_ERR 9
+#define EVENT_DED_AXI2PCIE_RAM_DED_ERR 10
+#define EVENT_LOCAL_DMA_END_ENGINE_0 11
+#define EVENT_LOCAL_DMA_END_ENGINE_1 12
+#define EVENT_LOCAL_DMA_ERROR_ENGINE_0 13
+#define EVENT_LOCAL_DMA_ERROR_ENGINE_1 14
+#define NUM_MC_EVENTS 15
+#define EVENT_LOCAL_A_ATR_EVT_POST_ERR (NUM_MC_EVENTS + PLDA_AXI_POST_ERR)
+#define EVENT_LOCAL_A_ATR_EVT_FETCH_ERR (NUM_MC_EVENTS + PLDA_AXI_FETCH_ERR)
+#define EVENT_LOCAL_A_ATR_EVT_DISCARD_ERR (NUM_MC_EVENTS + PLDA_AXI_DISCARD_ERR)
+#define EVENT_LOCAL_A_ATR_EVT_DOORBELL (NUM_MC_EVENTS + PLDA_AXI_DOORBELL)
+#define EVENT_LOCAL_P_ATR_EVT_POST_ERR (NUM_MC_EVENTS + PLDA_PCIE_POST_ERR)
+#define EVENT_LOCAL_P_ATR_EVT_FETCH_ERR (NUM_MC_EVENTS + PLDA_PCIE_FETCH_ERR)
+#define EVENT_LOCAL_P_ATR_EVT_DISCARD_ERR (NUM_MC_EVENTS + PLDA_PCIE_DISCARD_ERR)
+#define EVENT_LOCAL_P_ATR_EVT_DOORBELL (NUM_MC_EVENTS + PLDA_PCIE_DOORBELL)
+#define EVENT_LOCAL_PM_MSI_INT_INTX (NUM_MC_EVENTS + PLDA_INTX)
+#define EVENT_LOCAL_PM_MSI_INT_MSI (NUM_MC_EVENTS + PLDA_MSI)
+#define EVENT_LOCAL_PM_MSI_INT_AER_EVT (NUM_MC_EVENTS + PLDA_AER_EVENT)
+#define EVENT_LOCAL_PM_MSI_INT_EVENTS (NUM_MC_EVENTS + PLDA_MISC_EVENTS)
+#define EVENT_LOCAL_PM_MSI_INT_SYS_ERR (NUM_MC_EVENTS + PLDA_SYS_ERR)
+#define NUM_EVENTS (NUM_MC_EVENTS + PLDA_INT_EVENT_NUM)
+
+#define PCIE_EVENT_CAUSE(x, s) \
+ [EVENT_PCIE_ ## x] = { __stringify(x), s }
+
+#define SEC_ERROR_CAUSE(x, s) \
+ [EVENT_SEC_ ## x] = { __stringify(x), s }
+
+#define DED_ERROR_CAUSE(x, s) \
+ [EVENT_DED_ ## x] = { __stringify(x), s }
+
+#define LOCAL_EVENT_CAUSE(x, s) \
+ [EVENT_LOCAL_ ## x] = { __stringify(x), s }
+
+#define PCIE_EVENT(x) \
+ .offset = PCIE_EVENT_INT, \
+ .mask_offset = PCIE_EVENT_INT, \
+ .mask_high = 1, \
+ .mask = PCIE_EVENT_INT_ ## x ## _INT, \
+ .enb_mask = PCIE_EVENT_INT_ENB_MASK
+
+#define SEC_EVENT(x) \
+ .offset = SEC_ERROR_INT, \
+ .mask_offset = SEC_ERROR_INT_MASK, \
+ .mask = SEC_ERROR_INT_ ## x ## _INT, \
+ .mask_high = 1, \
+ .enb_mask = 0
+
+#define DED_EVENT(x) \
+ .offset = DED_ERROR_INT, \
+ .mask_offset = DED_ERROR_INT_MASK, \
+ .mask_high = 1, \
+ .mask = DED_ERROR_INT_ ## x ## _INT, \
+ .enb_mask = 0
+
+#define LOCAL_EVENT(x) \
+ .offset = ISTATUS_LOCAL, \
+ .mask_offset = IMASK_LOCAL, \
+ .mask_high = 0, \
+ .mask = x ## _MASK, \
+ .enb_mask = 0
+
+#define PCIE_EVENT_TO_EVENT_MAP(x) \
+ { PCIE_EVENT_INT_ ## x ## _INT, EVENT_PCIE_ ## x }
+
+#define SEC_ERROR_TO_EVENT_MAP(x) \
+ { SEC_ERROR_INT_ ## x ## _INT, EVENT_SEC_ ## x }
+
+#define DED_ERROR_TO_EVENT_MAP(x) \
+ { DED_ERROR_INT_ ## x ## _INT, EVENT_DED_ ## x }
+
+#define LOCAL_STATUS_TO_EVENT_MAP(x) \
+ { x ## _MASK, EVENT_LOCAL_ ## x }
+
+struct event_map {
+ u32 reg_mask;
+ u32 event_bit;
+};
+
+
+struct mc_pcie {
+ struct plda_pcie_rp plda;
+ void __iomem *bridge_base_addr;
+ void __iomem *ctrl_base_addr;
+};
+
+struct cause {
+ const char *sym;
+ const char *str;
+};
+
+static const struct cause event_cause[NUM_EVENTS] = {
+ PCIE_EVENT_CAUSE(L2_EXIT, "L2 exit event"),
+ PCIE_EVENT_CAUSE(HOTRST_EXIT, "Hot reset exit event"),
+ PCIE_EVENT_CAUSE(DLUP_EXIT, "DLUP exit event"),
+ SEC_ERROR_CAUSE(TX_RAM_SEC_ERR, "sec error in tx buffer"),
+ SEC_ERROR_CAUSE(RX_RAM_SEC_ERR, "sec error in rx buffer"),
+ SEC_ERROR_CAUSE(PCIE2AXI_RAM_SEC_ERR, "sec error in pcie2axi buffer"),
+ SEC_ERROR_CAUSE(AXI2PCIE_RAM_SEC_ERR, "sec error in axi2pcie buffer"),
+ DED_ERROR_CAUSE(TX_RAM_DED_ERR, "ded error in tx buffer"),
+ DED_ERROR_CAUSE(RX_RAM_DED_ERR, "ded error in rx buffer"),
+ DED_ERROR_CAUSE(PCIE2AXI_RAM_DED_ERR, "ded error in pcie2axi buffer"),
+ DED_ERROR_CAUSE(AXI2PCIE_RAM_DED_ERR, "ded error in axi2pcie buffer"),
+ LOCAL_EVENT_CAUSE(DMA_ERROR_ENGINE_0, "dma engine 0 error"),
+ LOCAL_EVENT_CAUSE(DMA_ERROR_ENGINE_1, "dma engine 1 error"),
+ LOCAL_EVENT_CAUSE(A_ATR_EVT_POST_ERR, "axi write request error"),
+ LOCAL_EVENT_CAUSE(A_ATR_EVT_FETCH_ERR, "axi read request error"),
+ LOCAL_EVENT_CAUSE(A_ATR_EVT_DISCARD_ERR, "axi read timeout"),
+ LOCAL_EVENT_CAUSE(P_ATR_EVT_POST_ERR, "pcie write request error"),
+ LOCAL_EVENT_CAUSE(P_ATR_EVT_FETCH_ERR, "pcie read request error"),
+ LOCAL_EVENT_CAUSE(P_ATR_EVT_DISCARD_ERR, "pcie read timeout"),
+ LOCAL_EVENT_CAUSE(PM_MSI_INT_AER_EVT, "aer event"),
+ LOCAL_EVENT_CAUSE(PM_MSI_INT_EVENTS, "pm/ltr/hotplug event"),
+ LOCAL_EVENT_CAUSE(PM_MSI_INT_SYS_ERR, "system error"),
+};
+
+static struct event_map pcie_event_to_event[] = {
+ PCIE_EVENT_TO_EVENT_MAP(L2_EXIT),
+ PCIE_EVENT_TO_EVENT_MAP(HOTRST_EXIT),
+ PCIE_EVENT_TO_EVENT_MAP(DLUP_EXIT),
+};
+
+static struct event_map sec_error_to_event[] = {
+ SEC_ERROR_TO_EVENT_MAP(TX_RAM_SEC_ERR),
+ SEC_ERROR_TO_EVENT_MAP(RX_RAM_SEC_ERR),
+ SEC_ERROR_TO_EVENT_MAP(PCIE2AXI_RAM_SEC_ERR),
+ SEC_ERROR_TO_EVENT_MAP(AXI2PCIE_RAM_SEC_ERR),
+};
+
+static struct event_map ded_error_to_event[] = {
+ DED_ERROR_TO_EVENT_MAP(TX_RAM_DED_ERR),
+ DED_ERROR_TO_EVENT_MAP(RX_RAM_DED_ERR),
+ DED_ERROR_TO_EVENT_MAP(PCIE2AXI_RAM_DED_ERR),
+ DED_ERROR_TO_EVENT_MAP(AXI2PCIE_RAM_DED_ERR),
+};
+
+static struct event_map local_status_to_event[] = {
+ LOCAL_STATUS_TO_EVENT_MAP(DMA_END_ENGINE_0),
+ LOCAL_STATUS_TO_EVENT_MAP(DMA_END_ENGINE_1),
+ LOCAL_STATUS_TO_EVENT_MAP(DMA_ERROR_ENGINE_0),
+ LOCAL_STATUS_TO_EVENT_MAP(DMA_ERROR_ENGINE_1),
+ LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_POST_ERR),
+ LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_FETCH_ERR),
+ LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_DISCARD_ERR),
+ LOCAL_STATUS_TO_EVENT_MAP(A_ATR_EVT_DOORBELL),
+ LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_POST_ERR),
+ LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_FETCH_ERR),
+ LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_DISCARD_ERR),
+ LOCAL_STATUS_TO_EVENT_MAP(P_ATR_EVT_DOORBELL),
+ LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_INTX),
+ LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_MSI),
+ LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_AER_EVT),
+ LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_EVENTS),
+ LOCAL_STATUS_TO_EVENT_MAP(PM_MSI_INT_SYS_ERR),
+};
+
+static struct {
+ u32 offset;
+ u32 mask;
+ u32 shift;
+ u32 enb_mask;
+ u32 mask_high;
+ u32 mask_offset;
+} event_descs[] = {
+ { PCIE_EVENT(L2_EXIT) },
+ { PCIE_EVENT(HOTRST_EXIT) },
+ { PCIE_EVENT(DLUP_EXIT) },
+ { SEC_EVENT(TX_RAM_SEC_ERR) },
+ { SEC_EVENT(RX_RAM_SEC_ERR) },
+ { SEC_EVENT(PCIE2AXI_RAM_SEC_ERR) },
+ { SEC_EVENT(AXI2PCIE_RAM_SEC_ERR) },
+ { DED_EVENT(TX_RAM_DED_ERR) },
+ { DED_EVENT(RX_RAM_DED_ERR) },
+ { DED_EVENT(PCIE2AXI_RAM_DED_ERR) },
+ { DED_EVENT(AXI2PCIE_RAM_DED_ERR) },
+ { LOCAL_EVENT(DMA_END_ENGINE_0) },
+ { LOCAL_EVENT(DMA_END_ENGINE_1) },
+ { LOCAL_EVENT(DMA_ERROR_ENGINE_0) },
+ { LOCAL_EVENT(DMA_ERROR_ENGINE_1) },
+ { LOCAL_EVENT(A_ATR_EVT_POST_ERR) },
+ { LOCAL_EVENT(A_ATR_EVT_FETCH_ERR) },
+ { LOCAL_EVENT(A_ATR_EVT_DISCARD_ERR) },
+ { LOCAL_EVENT(A_ATR_EVT_DOORBELL) },
+ { LOCAL_EVENT(P_ATR_EVT_POST_ERR) },
+ { LOCAL_EVENT(P_ATR_EVT_FETCH_ERR) },
+ { LOCAL_EVENT(P_ATR_EVT_DISCARD_ERR) },
+ { LOCAL_EVENT(P_ATR_EVT_DOORBELL) },
+ { LOCAL_EVENT(PM_MSI_INT_INTX) },
+ { LOCAL_EVENT(PM_MSI_INT_MSI) },
+ { LOCAL_EVENT(PM_MSI_INT_AER_EVT) },
+ { LOCAL_EVENT(PM_MSI_INT_EVENTS) },
+ { LOCAL_EVENT(PM_MSI_INT_SYS_ERR) },
+};
+
+static char poss_clks[][5] = { "fic0", "fic1", "fic2", "fic3" };
+
+static struct mc_pcie *port;
+
+static void mc_pcie_enable_msi(struct mc_pcie *port, void __iomem *ecam)
+{
+ struct plda_msi *msi = &port->plda.msi;
+ u16 reg;
+ u8 queue_size;
+
+ /* Fixup MSI enable flag */
+ reg = readw_relaxed(ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_FLAGS);
+ reg |= PCI_MSI_FLAGS_ENABLE;
+ writew_relaxed(reg, ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_FLAGS);
+
+ /* Fixup PCI MSI queue flags */
+ queue_size = FIELD_GET(PCI_MSI_FLAGS_QMASK, reg);
+ reg |= FIELD_PREP(PCI_MSI_FLAGS_QSIZE, queue_size);
+ writew_relaxed(reg, ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_FLAGS);
+
+ /* Fixup MSI addr fields */
+ writel_relaxed(lower_32_bits(msi->vector_phy),
+ ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_ADDRESS_LO);
+ writel_relaxed(upper_32_bits(msi->vector_phy),
+ ecam + MC_MSI_CAP_CTRL_OFFSET + PCI_MSI_ADDRESS_HI);
+}
+
+static inline u32 reg_to_event(u32 reg, struct event_map field)
+{
+ return (reg & field.reg_mask) ? BIT(field.event_bit) : 0;
+}
+
+static u32 pcie_events(struct mc_pcie *port)
+{
+ u32 reg = readl_relaxed(port->ctrl_base_addr + PCIE_EVENT_INT);
+ u32 val = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pcie_event_to_event); i++)
+ val |= reg_to_event(reg, pcie_event_to_event[i]);
+
+ return val;
+}
+
+static u32 sec_errors(struct mc_pcie *port)
+{
+ u32 reg = readl_relaxed(port->ctrl_base_addr + SEC_ERROR_INT);
+ u32 val = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sec_error_to_event); i++)
+ val |= reg_to_event(reg, sec_error_to_event[i]);
+
+ return val;
+}
+
+static u32 ded_errors(struct mc_pcie *port)
+{
+ u32 reg = readl_relaxed(port->ctrl_base_addr + DED_ERROR_INT);
+ u32 val = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ded_error_to_event); i++)
+ val |= reg_to_event(reg, ded_error_to_event[i]);
+
+ return val;
+}
+
+static u32 local_events(struct mc_pcie *port)
+{
+ u32 reg = readl_relaxed(port->bridge_base_addr + ISTATUS_LOCAL);
+ u32 val = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(local_status_to_event); i++)
+ val |= reg_to_event(reg, local_status_to_event[i]);
+
+ return val;
+}
+
+static u32 mc_get_events(struct plda_pcie_rp *port)
+{
+ struct mc_pcie *mc_port = container_of(port, struct mc_pcie, plda);
+ u32 events = 0;
+
+ events |= pcie_events(mc_port);
+ events |= sec_errors(mc_port);
+ events |= ded_errors(mc_port);
+ events |= local_events(mc_port);
+
+ return events;
+}
+
+static irqreturn_t mc_event_handler(int irq, void *dev_id)
+{
+ struct plda_pcie_rp *port = dev_id;
+ struct device *dev = port->dev;
+ struct irq_data *data;
+
+ data = irq_domain_get_irq_data(port->event_domain, irq);
+
+ if (event_cause[data->hwirq].str)
+ dev_err_ratelimited(dev, "%s\n", event_cause[data->hwirq].str);
+ else
+ dev_err_ratelimited(dev, "bad event IRQ %ld\n", data->hwirq);
+
+ return IRQ_HANDLED;
+}
+
+static void mc_ack_event_irq(struct irq_data *data)
+{
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+ struct mc_pcie *mc_port = container_of(port, struct mc_pcie, plda);
+ u32 event = data->hwirq;
+ void __iomem *addr;
+ u32 mask;
+
+ if (event_descs[event].offset == ISTATUS_LOCAL)
+ addr = mc_port->bridge_base_addr;
+ else
+ addr = mc_port->ctrl_base_addr;
+
+ addr += event_descs[event].offset;
+ mask = event_descs[event].mask;
+ mask |= event_descs[event].enb_mask;
+
+ writel_relaxed(mask, addr);
+}
+
+static void mc_mask_event_irq(struct irq_data *data)
+{
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+ struct mc_pcie *mc_port = container_of(port, struct mc_pcie, plda);
+ u32 event = data->hwirq;
+ void __iomem *addr;
+ u32 mask;
+ u32 val;
+
+ if (event_descs[event].offset == ISTATUS_LOCAL)
+ addr = mc_port->bridge_base_addr;
+ else
+ addr = mc_port->ctrl_base_addr;
+
+ addr += event_descs[event].mask_offset;
+ mask = event_descs[event].mask;
+ if (event_descs[event].enb_mask) {
+ mask <<= PCIE_EVENT_INT_ENB_SHIFT;
+ mask &= PCIE_EVENT_INT_ENB_MASK;
+ }
+
+ if (!event_descs[event].mask_high)
+ mask = ~mask;
+
+ raw_spin_lock(&port->lock);
+ val = readl_relaxed(addr);
+ if (event_descs[event].mask_high)
+ val |= mask;
+ else
+ val &= mask;
+
+ writel_relaxed(val, addr);
+ raw_spin_unlock(&port->lock);
+}
+
+static void mc_unmask_event_irq(struct irq_data *data)
+{
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+ struct mc_pcie *mc_port = container_of(port, struct mc_pcie, plda);
+ u32 event = data->hwirq;
+ void __iomem *addr;
+ u32 mask;
+ u32 val;
+
+ if (event_descs[event].offset == ISTATUS_LOCAL)
+ addr = mc_port->bridge_base_addr;
+ else
+ addr = mc_port->ctrl_base_addr;
+
+ addr += event_descs[event].mask_offset;
+ mask = event_descs[event].mask;
+
+ if (event_descs[event].enb_mask)
+ mask <<= PCIE_EVENT_INT_ENB_SHIFT;
+
+ if (event_descs[event].mask_high)
+ mask = ~mask;
+
+ if (event_descs[event].enb_mask)
+ mask &= PCIE_EVENT_INT_ENB_MASK;
+
+ raw_spin_lock(&port->lock);
+ val = readl_relaxed(addr);
+ if (event_descs[event].mask_high)
+ val &= mask;
+ else
+ val |= mask;
+ writel_relaxed(val, addr);
+ raw_spin_unlock(&port->lock);
+}
+
+static struct irq_chip mc_event_irq_chip = {
+ .name = "Microchip PCIe EVENT",
+ .irq_ack = mc_ack_event_irq,
+ .irq_mask = mc_mask_event_irq,
+ .irq_unmask = mc_unmask_event_irq,
+};
+
+static inline void mc_pcie_deinit_clk(void *data)
+{
+ struct clk *clk = data;
+
+ clk_disable_unprepare(clk);
+}
+
+static inline struct clk *mc_pcie_init_clk(struct device *dev, const char *id)
+{
+ struct clk *clk;
+ int ret;
+
+ clk = devm_clk_get_optional(dev, id);
+ if (IS_ERR(clk))
+ return clk;
+ if (!clk)
+ return clk;
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ERR_PTR(ret);
+
+ devm_add_action_or_reset(dev, mc_pcie_deinit_clk, clk);
+
+ return clk;
+}
+
+static int mc_pcie_init_clks(struct device *dev)
+{
+ int i;
+ struct clk *fic;
+
+ /*
+ * PCIe may be clocked via Fabric Interface using between 1 and 4
+ * clocks. Scan DT for clocks and enable them if present
+ */
+ for (i = 0; i < ARRAY_SIZE(poss_clks); i++) {
+ fic = mc_pcie_init_clk(dev, poss_clks[i]);
+ if (IS_ERR(fic))
+ return PTR_ERR(fic);
+ }
+
+ return 0;
+}
+
+static int mc_request_event_irq(struct plda_pcie_rp *plda, int event_irq,
+ int event)
+{
+ return devm_request_irq(plda->dev, event_irq, mc_event_handler,
+ 0, event_cause[event].sym, plda);
+}
+
+static const struct plda_event_ops mc_event_ops = {
+ .get_events = mc_get_events,
+};
+
+static const struct plda_event mc_event = {
+ .request_event_irq = mc_request_event_irq,
+ .intx_event = EVENT_LOCAL_PM_MSI_INT_INTX,
+ .msi_event = EVENT_LOCAL_PM_MSI_INT_MSI,
+};
+
+static inline void mc_clear_secs(struct mc_pcie *port)
+{
+ writel_relaxed(SEC_ERROR_INT_ALL_RAM_SEC_ERR_INT,
+ port->ctrl_base_addr + SEC_ERROR_INT);
+ writel_relaxed(0, port->ctrl_base_addr + SEC_ERROR_EVENT_CNT);
+}
+
+static inline void mc_clear_deds(struct mc_pcie *port)
+{
+ writel_relaxed(DED_ERROR_INT_ALL_RAM_DED_ERR_INT,
+ port->ctrl_base_addr + DED_ERROR_INT);
+ writel_relaxed(0, port->ctrl_base_addr + DED_ERROR_EVENT_CNT);
+}
+
+static void mc_disable_interrupts(struct mc_pcie *port)
+{
+ u32 val;
+
+ /* Ensure ECC bypass is enabled */
+ val = ECC_CONTROL_TX_RAM_ECC_BYPASS |
+ ECC_CONTROL_RX_RAM_ECC_BYPASS |
+ ECC_CONTROL_PCIE2AXI_RAM_ECC_BYPASS |
+ ECC_CONTROL_AXI2PCIE_RAM_ECC_BYPASS;
+ writel_relaxed(val, port->ctrl_base_addr + ECC_CONTROL);
+
+ /* Disable SEC errors and clear any outstanding */
+ writel_relaxed(SEC_ERROR_INT_ALL_RAM_SEC_ERR_INT,
+ port->ctrl_base_addr + SEC_ERROR_INT_MASK);
+ mc_clear_secs(port);
+
+ /* Disable DED errors and clear any outstanding */
+ writel_relaxed(DED_ERROR_INT_ALL_RAM_DED_ERR_INT,
+ port->ctrl_base_addr + DED_ERROR_INT_MASK);
+ mc_clear_deds(port);
+
+ /* Disable local interrupts and clear any outstanding */
+ writel_relaxed(0, port->bridge_base_addr + IMASK_LOCAL);
+ writel_relaxed(GENMASK(31, 0), port->bridge_base_addr + ISTATUS_LOCAL);
+ writel_relaxed(GENMASK(31, 0), port->bridge_base_addr + ISTATUS_MSI);
+
+ /* Disable PCIe events and clear any outstanding */
+ val = PCIE_EVENT_INT_L2_EXIT_INT |
+ PCIE_EVENT_INT_HOTRST_EXIT_INT |
+ PCIE_EVENT_INT_DLUP_EXIT_INT |
+ PCIE_EVENT_INT_L2_EXIT_INT_MASK |
+ PCIE_EVENT_INT_HOTRST_EXIT_INT_MASK |
+ PCIE_EVENT_INT_DLUP_EXIT_INT_MASK;
+ writel_relaxed(val, port->ctrl_base_addr + PCIE_EVENT_INT);
+
+ /* Disable host interrupts and clear any outstanding */
+ writel_relaxed(0, port->bridge_base_addr + IMASK_HOST);
+ writel_relaxed(GENMASK(31, 0), port->bridge_base_addr + ISTATUS_HOST);
+}
+
+static void mc_pcie_setup_inbound_atr(struct mc_pcie *port, int window_index,
+ u64 axi_addr, u64 pcie_addr, u64 size)
+{
+ u32 table_offset = window_index * ATR_ENTRY_SIZE;
+ void __iomem *table_addr = port->bridge_base_addr + table_offset;
+ u32 atr_sz;
+ u32 val;
+
+ atr_sz = ilog2(size) - 1;
+
+ val = ALIGN_DOWN(lower_32_bits(pcie_addr), SZ_4K);
+ val |= FIELD_PREP(ATR_SIZE_MASK, atr_sz);
+ val |= ATR_IMPL_ENABLE;
+
+ writel(val, table_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
+
+ writel(upper_32_bits(pcie_addr), table_addr + ATR0_PCIE_WIN0_SRC_ADDR);
+
+ writel(lower_32_bits(axi_addr), table_addr + ATR0_PCIE_WIN0_TRSL_ADDR_LSB);
+ writel(upper_32_bits(axi_addr), table_addr + ATR0_PCIE_WIN0_TRSL_ADDR_UDW);
+
+ writel(TRSL_ID_AXI4_MASTER_0, table_addr + ATR0_PCIE_WIN0_TRSL_PARAM);
+}
+
+static int mc_pcie_setup_inbound_ranges(struct platform_device *pdev,
+ struct mc_pcie *port)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *dn = dev->of_node;
+ struct of_range_parser parser;
+ struct of_range range;
+ int atr_index = 0;
+
+ /*
+ * MPFS PCIe Root Port is 32-bit only, behind a Fabric Interface
+ * Controller FPGA logic block which contains the AXI-S interface.
+ *
+ * From the point of view of the PCIe Root Port, there are only two
+ * supported Root Port configurations:
+ *
+ * Configuration 1: for use with fully coherent designs; supports a
+ * window from 0x0 (CPU space) to specified PCIe space.
+ *
+ * Configuration 2: for use with non-coherent designs; supports two
+ * 1 GB windows to CPU space; one mapping CPU space 0 to PCIe space
+ * 0x80000000 and a second mapping CPU space 0x40000000 to PCIe
+ * space 0xc0000000. This cfg needs two windows because of how the
+ * MSI space is allocated in the AXI-S range on MPFS.
+ *
+ * The FIC interface outside the PCIe block *must* complete the
+ * inbound address translation as per MCHP MPFS FPGA design
+ * guidelines.
+ */
+ if (device_property_read_bool(dev, "dma-noncoherent")) {
+ /*
+ * Always need same two tables in this case. Need two tables
+ * due to hardware interactions between address and size.
+ */
+ mc_pcie_setup_inbound_atr(port, 0, 0,
+ MPFS_NC_BOUNCE_ADDR, SZ_1G);
+ mc_pcie_setup_inbound_atr(port, 1, SZ_1G,
+ MPFS_NC_BOUNCE_ADDR + SZ_1G, SZ_1G);
+ } else {
+ /* Find any DMA ranges */
+ if (of_pci_dma_range_parser_init(&parser, dn)) {
+ /* No DMA range property - setup default */
+ mc_pcie_setup_inbound_atr(port, 0, 0, 0, SZ_4G);
+ return 0;
+ }
+
+ for_each_of_range(&parser, &range) {
+ if (atr_index >= MC_MAX_NUM_INBOUND_WINDOWS) {
+ dev_err(dev, "too many inbound ranges; %d available tables\n",
+ MC_MAX_NUM_INBOUND_WINDOWS);
+ return -EINVAL;
+ }
+ mc_pcie_setup_inbound_atr(port, atr_index, 0,
+ range.pci_addr, range.size);
+ atr_index++;
+ }
+ }
+
+ return 0;
+}
+
+static int mc_platform_init(struct pci_config_window *cfg)
+{
+ struct device *dev = cfg->parent;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct pci_host_bridge *bridge = platform_get_drvdata(pdev);
+ int ret;
+
+ /* Configure address translation table 0 for PCIe config space */
+ plda_pcie_setup_window(port->bridge_base_addr, 0, cfg->res.start,
+ cfg->res.start,
+ resource_size(&cfg->res));
+
+ /* Need some fixups in config space */
+ mc_pcie_enable_msi(port, cfg->win);
+
+ /* Configure non-config space outbound ranges */
+ ret = plda_pcie_setup_iomems(bridge, &port->plda);
+ if (ret)
+ return ret;
+
+ ret = mc_pcie_setup_inbound_ranges(pdev, port);
+ if (ret)
+ return ret;
+
+ port->plda.event_ops = &mc_event_ops;
+ port->plda.event_irq_chip = &mc_event_irq_chip;
+ port->plda.events_bitmap = GENMASK(NUM_EVENTS - 1, 0);
+
+ /* Address translation is up; safe to enable interrupts */
+ ret = plda_init_interrupts(pdev, &port->plda, &mc_event);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int mc_host_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ void __iomem *apb_base_addr;
+ struct plda_pcie_rp *plda;
+ int ret;
+ u32 val;
+
+ port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ plda = &port->plda;
+ plda->dev = dev;
+
+ port->bridge_base_addr = devm_platform_ioremap_resource_byname(pdev,
+ "bridge");
+ port->ctrl_base_addr = devm_platform_ioremap_resource_byname(pdev,
+ "ctrl");
+ if (!IS_ERR(port->bridge_base_addr) && !IS_ERR(port->ctrl_base_addr))
+ goto addrs_set;
+
+ /*
+ * The original, incorrect, binding that lumped the control and
+ * bridge addresses together still needs to be handled by the driver.
+ */
+ apb_base_addr = devm_platform_ioremap_resource_byname(pdev, "apb");
+ if (IS_ERR(apb_base_addr))
+ return dev_err_probe(dev, PTR_ERR(apb_base_addr),
+ "both legacy apb register and ctrl/bridge regions missing");
+
+ port->bridge_base_addr = apb_base_addr + MC_PCIE1_BRIDGE_ADDR;
+ port->ctrl_base_addr = apb_base_addr + MC_PCIE1_CTRL_ADDR;
+
+addrs_set:
+ mc_disable_interrupts(port);
+
+ plda->bridge_addr = port->bridge_base_addr;
+ plda->num_events = NUM_EVENTS;
+
+ /* Allow enabling MSI by disabling MSI-X */
+ val = readl(port->bridge_base_addr + PCIE_PCI_IRQ_DW0);
+ val &= ~MSIX_CAP_MASK;
+ writel(val, port->bridge_base_addr + PCIE_PCI_IRQ_DW0);
+
+ /* Pick num vectors from bitfile programmed onto FPGA fabric */
+ val = readl(port->bridge_base_addr + PCIE_PCI_IRQ_DW0);
+ val &= NUM_MSI_MSGS_MASK;
+ val >>= NUM_MSI_MSGS_SHIFT;
+
+ plda->msi.num_vectors = 1 << val;
+
+ /* Pick vector address from design */
+ plda->msi.vector_phy = readl_relaxed(port->bridge_base_addr + IMSI_ADDR);
+
+ ret = mc_pcie_init_clks(dev);
+ if (ret) {
+ dev_err(dev, "failed to get clock resources, error %d\n", ret);
+ return -ENODEV;
+ }
+
+ return pci_host_common_probe(pdev);
+}
+
+static const struct pci_ecam_ops mc_ecam_ops = {
+ .init = mc_platform_init,
+ .pci_ops = {
+ .map_bus = pci_ecam_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ }
+};
+
+static const struct of_device_id mc_pcie_of_match[] = {
+ {
+ .compatible = "microchip,pcie-host-1.0",
+ .data = &mc_ecam_ops,
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, mc_pcie_of_match);
+
+static struct platform_driver mc_pcie_driver = {
+ .probe = mc_host_probe,
+ .driver = {
+ .name = "microchip-pcie",
+ .of_match_table = mc_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+builtin_platform_driver(mc_pcie_driver);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Microchip PCIe host controller driver");
+MODULE_AUTHOR("Daire McNamara <daire.mcnamara@microchip.com>");
diff --git a/drivers/pci/controller/plda/pcie-plda-host.c b/drivers/pci/controller/plda/pcie-plda-host.c
new file mode 100644
index 000000000000..3c2f68383010
--- /dev/null
+++ b/drivers/pci/controller/plda/pcie-plda-host.c
@@ -0,0 +1,651 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PLDA PCIe XpressRich host controller driver
+ *
+ * Copyright (C) 2023 Microchip Co. Ltd
+ * StarFive Co. Ltd
+ *
+ * Author: Daire McNamara <daire.mcnamara@microchip.com>
+ */
+
+#include <linux/align.h>
+#include <linux/bitfield.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
+#include <linux/irqdomain.h>
+#include <linux/msi.h>
+#include <linux/pci_regs.h>
+#include <linux/pci-ecam.h>
+#include <linux/wordpart.h>
+
+#include "pcie-plda.h"
+
+void __iomem *plda_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int where)
+{
+ struct plda_pcie_rp *pcie = bus->sysdata;
+
+ return pcie->config_base + PCIE_ECAM_OFFSET(bus->number, devfn, where);
+}
+EXPORT_SYMBOL_GPL(plda_pcie_map_bus);
+
+static void plda_handle_msi(struct irq_desc *desc)
+{
+ struct plda_pcie_rp *port = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct device *dev = port->dev;
+ struct plda_msi *msi = &port->msi;
+ void __iomem *bridge_base_addr = port->bridge_addr;
+ unsigned long status;
+ u32 bit;
+ int ret;
+
+ chained_irq_enter(chip, desc);
+
+ status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
+ if (status & PM_MSI_INT_MSI_MASK) {
+ writel_relaxed(status & PM_MSI_INT_MSI_MASK,
+ bridge_base_addr + ISTATUS_LOCAL);
+ status = readl_relaxed(bridge_base_addr + ISTATUS_MSI);
+ for_each_set_bit(bit, &status, msi->num_vectors) {
+ ret = generic_handle_domain_irq(msi->dev_domain, bit);
+ if (ret)
+ dev_err_ratelimited(dev, "bad MSI IRQ %d\n",
+ bit);
+ }
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void plda_msi_bottom_irq_ack(struct irq_data *data)
+{
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+ void __iomem *bridge_base_addr = port->bridge_addr;
+ u32 bitpos = data->hwirq;
+
+ writel_relaxed(BIT(bitpos), bridge_base_addr + ISTATUS_MSI);
+}
+
+static void plda_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+ phys_addr_t addr = port->msi.vector_phy;
+
+ msg->address_lo = lower_32_bits(addr);
+ msg->address_hi = upper_32_bits(addr);
+ msg->data = data->hwirq;
+
+ dev_dbg(port->dev, "msi#%x address_hi %#x address_lo %#x\n",
+ (int)data->hwirq, msg->address_hi, msg->address_lo);
+}
+
+static struct irq_chip plda_msi_bottom_irq_chip = {
+ .name = "PLDA MSI",
+ .irq_ack = plda_msi_bottom_irq_ack,
+ .irq_compose_msi_msg = plda_compose_msi_msg,
+};
+
+static int plda_irq_msi_domain_alloc(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs,
+ void *args)
+{
+ struct plda_pcie_rp *port = domain->host_data;
+ struct plda_msi *msi = &port->msi;
+ unsigned long bit;
+
+ mutex_lock(&msi->lock);
+ bit = find_first_zero_bit(msi->used, msi->num_vectors);
+ if (bit >= msi->num_vectors) {
+ mutex_unlock(&msi->lock);
+ return -ENOSPC;
+ }
+
+ set_bit(bit, msi->used);
+
+ irq_domain_set_info(domain, virq, bit, &plda_msi_bottom_irq_chip,
+ domain->host_data, handle_edge_irq, NULL, NULL);
+
+ mutex_unlock(&msi->lock);
+
+ return 0;
+}
+
+static void plda_irq_msi_domain_free(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(d);
+ struct plda_msi *msi = &port->msi;
+
+ mutex_lock(&msi->lock);
+
+ if (test_bit(d->hwirq, msi->used))
+ __clear_bit(d->hwirq, msi->used);
+ else
+ dev_err(port->dev, "trying to free unused MSI%lu\n", d->hwirq);
+
+ mutex_unlock(&msi->lock);
+}
+
+static const struct irq_domain_ops msi_domain_ops = {
+ .alloc = plda_irq_msi_domain_alloc,
+ .free = plda_irq_msi_domain_free,
+};
+
+#define PLDA_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_NO_AFFINITY)
+#define PLDA_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_PCI_MSIX)
+
+static const struct msi_parent_ops plda_msi_parent_ops = {
+ .required_flags = PLDA_MSI_FLAGS_REQUIRED,
+ .supported_flags = PLDA_MSI_FLAGS_SUPPORTED,
+ .chip_flags = MSI_CHIP_FLAG_SET_ACK,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .prefix = "PLDA-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
+};
+
+static int plda_allocate_msi_domains(struct plda_pcie_rp *port)
+{
+ struct device *dev = port->dev;
+ struct plda_msi *msi = &port->msi;
+
+ mutex_init(&port->msi.lock);
+
+ struct irq_domain_info info = {
+ .fwnode = dev_fwnode(dev),
+ .ops = &msi_domain_ops,
+ .host_data = port,
+ .size = msi->num_vectors,
+ };
+
+ msi->dev_domain = msi_create_parent_irq_domain(&info, &plda_msi_parent_ops);
+ if (!msi->dev_domain) {
+ dev_err(dev, "failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void plda_handle_intx(struct irq_desc *desc)
+{
+ struct plda_pcie_rp *port = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct device *dev = port->dev;
+ void __iomem *bridge_base_addr = port->bridge_addr;
+ unsigned long status;
+ u32 bit;
+ int ret;
+
+ chained_irq_enter(chip, desc);
+
+ status = readl_relaxed(bridge_base_addr + ISTATUS_LOCAL);
+ if (status & PM_MSI_INT_INTX_MASK) {
+ status &= PM_MSI_INT_INTX_MASK;
+ status >>= PM_MSI_INT_INTX_SHIFT;
+ for_each_set_bit(bit, &status, PCI_NUM_INTX) {
+ ret = generic_handle_domain_irq(port->intx_domain, bit);
+ if (ret)
+ dev_err_ratelimited(dev, "bad INTx IRQ %d\n",
+ bit);
+ }
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void plda_ack_intx_irq(struct irq_data *data)
+{
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+ void __iomem *bridge_base_addr = port->bridge_addr;
+ u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
+
+ writel_relaxed(mask, bridge_base_addr + ISTATUS_LOCAL);
+}
+
+static void plda_mask_intx_irq(struct irq_data *data)
+{
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+ void __iomem *bridge_base_addr = port->bridge_addr;
+ unsigned long flags;
+ u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
+ u32 val;
+
+ raw_spin_lock_irqsave(&port->lock, flags);
+ val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
+ val &= ~mask;
+ writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
+ raw_spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void plda_unmask_intx_irq(struct irq_data *data)
+{
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+ void __iomem *bridge_base_addr = port->bridge_addr;
+ unsigned long flags;
+ u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
+ u32 val;
+
+ raw_spin_lock_irqsave(&port->lock, flags);
+ val = readl_relaxed(bridge_base_addr + IMASK_LOCAL);
+ val |= mask;
+ writel_relaxed(val, bridge_base_addr + IMASK_LOCAL);
+ raw_spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static struct irq_chip plda_intx_irq_chip = {
+ .name = "PLDA PCIe INTx",
+ .irq_ack = plda_ack_intx_irq,
+ .irq_mask = plda_mask_intx_irq,
+ .irq_unmask = plda_unmask_intx_irq,
+};
+
+static int plda_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &plda_intx_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = plda_pcie_intx_map,
+};
+
+static u32 plda_get_events(struct plda_pcie_rp *port)
+{
+ u32 events, val, origin;
+
+ origin = readl_relaxed(port->bridge_addr + ISTATUS_LOCAL);
+
+ /* MSI event and sys events */
+ val = (origin & SYS_AND_MSI_MASK) >> PM_MSI_INT_MSI_SHIFT;
+ events = val << (PM_MSI_INT_MSI_SHIFT - PCI_NUM_INTX + 1);
+
+ /* INTx events */
+ if (origin & PM_MSI_INT_INTX_MASK)
+ events |= BIT(PM_MSI_INT_INTX_SHIFT);
+
+ /* remains are same with register */
+ events |= origin & GENMASK(P_ATR_EVT_DOORBELL_SHIFT, 0);
+
+ return events;
+}
+
+static irqreturn_t plda_event_handler(int irq, void *dev_id)
+{
+ return IRQ_HANDLED;
+}
+
+static void plda_handle_event(struct irq_desc *desc)
+{
+ struct plda_pcie_rp *port = irq_desc_get_handler_data(desc);
+ unsigned long events;
+ u32 bit;
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+
+ chained_irq_enter(chip, desc);
+
+ events = port->event_ops->get_events(port);
+
+ events &= port->events_bitmap;
+ for_each_set_bit(bit, &events, port->num_events)
+ generic_handle_domain_irq(port->event_domain, bit);
+
+ chained_irq_exit(chip, desc);
+}
+
+static u32 plda_hwirq_to_mask(int hwirq)
+{
+ u32 mask;
+
+ /* hwirq 23 - 0 are the same with register */
+ if (hwirq < EVENT_PM_MSI_INT_INTX)
+ mask = BIT(hwirq);
+ else if (hwirq == EVENT_PM_MSI_INT_INTX)
+ mask = PM_MSI_INT_INTX_MASK;
+ else
+ mask = BIT(hwirq + PCI_NUM_INTX - 1);
+
+ return mask;
+}
+
+static void plda_ack_event_irq(struct irq_data *data)
+{
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+
+ writel_relaxed(plda_hwirq_to_mask(data->hwirq),
+ port->bridge_addr + ISTATUS_LOCAL);
+}
+
+static void plda_mask_event_irq(struct irq_data *data)
+{
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+ u32 mask, val;
+
+ mask = plda_hwirq_to_mask(data->hwirq);
+
+ raw_spin_lock(&port->lock);
+ val = readl_relaxed(port->bridge_addr + IMASK_LOCAL);
+ val &= ~mask;
+ writel_relaxed(val, port->bridge_addr + IMASK_LOCAL);
+ raw_spin_unlock(&port->lock);
+}
+
+static void plda_unmask_event_irq(struct irq_data *data)
+{
+ struct plda_pcie_rp *port = irq_data_get_irq_chip_data(data);
+ u32 mask, val;
+
+ mask = plda_hwirq_to_mask(data->hwirq);
+
+ raw_spin_lock(&port->lock);
+ val = readl_relaxed(port->bridge_addr + IMASK_LOCAL);
+ val |= mask;
+ writel_relaxed(val, port->bridge_addr + IMASK_LOCAL);
+ raw_spin_unlock(&port->lock);
+}
+
+static struct irq_chip plda_event_irq_chip = {
+ .name = "PLDA PCIe EVENT",
+ .irq_ack = plda_ack_event_irq,
+ .irq_mask = plda_mask_event_irq,
+ .irq_unmask = plda_unmask_event_irq,
+};
+
+static const struct plda_event_ops plda_event_ops = {
+ .get_events = plda_get_events,
+};
+
+static int plda_pcie_event_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct plda_pcie_rp *port = (void *)domain->host_data;
+
+ irq_set_chip_and_handler(irq, port->event_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops plda_event_domain_ops = {
+ .map = plda_pcie_event_map,
+};
+
+static int plda_pcie_init_irq_domains(struct plda_pcie_rp *port)
+{
+ struct device *dev = port->dev;
+ struct device_node *node = dev->of_node;
+ struct device_node *pcie_intc_node;
+
+ /* Setup INTx */
+ pcie_intc_node = of_get_next_child(node, NULL);
+ if (!pcie_intc_node) {
+ dev_err(dev, "failed to find PCIe Intc node\n");
+ return -EINVAL;
+ }
+
+ port->event_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node),
+ port->num_events, &plda_event_domain_ops,
+ port);
+ if (!port->event_domain) {
+ dev_err(dev, "failed to get event domain\n");
+ of_node_put(pcie_intc_node);
+ return -ENOMEM;
+ }
+
+ irq_domain_update_bus_token(port->event_domain, DOMAIN_BUS_NEXUS);
+
+ port->intx_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX,
+ &intx_domain_ops, port);
+ if (!port->intx_domain) {
+ dev_err(dev, "failed to get an INTx IRQ domain\n");
+ of_node_put(pcie_intc_node);
+ return -ENOMEM;
+ }
+
+ irq_domain_update_bus_token(port->intx_domain, DOMAIN_BUS_WIRED);
+
+ of_node_put(pcie_intc_node);
+ raw_spin_lock_init(&port->lock);
+
+ return plda_allocate_msi_domains(port);
+}
+
+int plda_init_interrupts(struct platform_device *pdev,
+ struct plda_pcie_rp *port,
+ const struct plda_event *event)
+{
+ struct device *dev = &pdev->dev;
+ int event_irq, ret;
+ u32 i;
+
+ if (!port->event_ops)
+ port->event_ops = &plda_event_ops;
+
+ if (!port->event_irq_chip)
+ port->event_irq_chip = &plda_event_irq_chip;
+
+ ret = plda_pcie_init_irq_domains(port);
+ if (ret) {
+ dev_err(dev, "failed creating IRQ domains\n");
+ return ret;
+ }
+
+ port->irq = platform_get_irq(pdev, 0);
+ if (port->irq < 0)
+ return -ENODEV;
+
+ for_each_set_bit(i, &port->events_bitmap, port->num_events) {
+ event_irq = irq_create_mapping(port->event_domain, i);
+ if (!event_irq) {
+ dev_err(dev, "failed to map hwirq %d\n", i);
+ return -ENXIO;
+ }
+
+ if (event->request_event_irq)
+ ret = event->request_event_irq(port, event_irq, i);
+ else
+ ret = devm_request_irq(dev, event_irq,
+ plda_event_handler,
+ 0, NULL, port);
+
+ if (ret) {
+ dev_err(dev, "failed to request IRQ %d\n", event_irq);
+ return ret;
+ }
+ }
+
+ port->intx_irq = irq_create_mapping(port->event_domain,
+ event->intx_event);
+ if (!port->intx_irq) {
+ dev_err(dev, "failed to map INTx interrupt\n");
+ return -ENXIO;
+ }
+
+ /* Plug the INTx chained handler */
+ irq_set_chained_handler_and_data(port->intx_irq, plda_handle_intx, port);
+
+ port->msi_irq = irq_create_mapping(port->event_domain,
+ event->msi_event);
+ if (!port->msi_irq)
+ return -ENXIO;
+
+ /* Plug the MSI chained handler */
+ irq_set_chained_handler_and_data(port->msi_irq, plda_handle_msi, port);
+
+ /* Plug the main event chained handler */
+ irq_set_chained_handler_and_data(port->irq, plda_handle_event, port);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(plda_init_interrupts);
+
+void plda_pcie_setup_window(void __iomem *bridge_base_addr, u32 index,
+ phys_addr_t axi_addr, phys_addr_t pci_addr,
+ size_t size)
+{
+ u32 atr_sz = ilog2(size) - 1;
+ u32 val;
+
+ if (index == 0)
+ val = PCIE_CONFIG_INTERFACE;
+ else
+ val = PCIE_TX_RX_INTERFACE;
+
+ writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
+ ATR0_AXI4_SLV0_TRSL_PARAM);
+
+ val = ALIGN_DOWN(lower_32_bits(axi_addr), SZ_4K);
+ val |= FIELD_PREP(ATR_SIZE_MASK, atr_sz);
+ val |= ATR_IMPL_ENABLE;
+ writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
+ ATR0_AXI4_SLV0_SRCADDR_PARAM);
+
+ val = upper_32_bits(axi_addr);
+ writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
+ ATR0_AXI4_SLV0_SRC_ADDR);
+
+ val = lower_32_bits(pci_addr);
+ writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
+ ATR0_AXI4_SLV0_TRSL_ADDR_LSB);
+
+ val = upper_32_bits(pci_addr);
+ writel(val, bridge_base_addr + (index * ATR_ENTRY_SIZE) +
+ ATR0_AXI4_SLV0_TRSL_ADDR_UDW);
+}
+EXPORT_SYMBOL_GPL(plda_pcie_setup_window);
+
+void plda_pcie_setup_inbound_address_translation(struct plda_pcie_rp *port)
+{
+ void __iomem *bridge_base_addr = port->bridge_addr;
+ u32 val;
+
+ val = readl(bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
+ val |= (ATR0_PCIE_ATR_SIZE << ATR0_PCIE_ATR_SIZE_SHIFT);
+ writel(val, bridge_base_addr + ATR0_PCIE_WIN0_SRCADDR_PARAM);
+ writel(0, bridge_base_addr + ATR0_PCIE_WIN0_SRC_ADDR);
+}
+EXPORT_SYMBOL_GPL(plda_pcie_setup_inbound_address_translation);
+
+int plda_pcie_setup_iomems(struct pci_host_bridge *bridge,
+ struct plda_pcie_rp *port)
+{
+ void __iomem *bridge_base_addr = port->bridge_addr;
+ struct resource_entry *entry;
+ u64 pci_addr;
+ u32 index = 1;
+
+ resource_list_for_each_entry(entry, &bridge->windows) {
+ if (resource_type(entry->res) == IORESOURCE_MEM) {
+ pci_addr = entry->res->start - entry->offset;
+ plda_pcie_setup_window(bridge_base_addr, index,
+ entry->res->start, pci_addr,
+ resource_size(entry->res));
+ index++;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(plda_pcie_setup_iomems);
+
+static void plda_pcie_irq_domain_deinit(struct plda_pcie_rp *pcie)
+{
+ irq_set_chained_handler_and_data(pcie->irq, NULL, NULL);
+ irq_set_chained_handler_and_data(pcie->msi_irq, NULL, NULL);
+ irq_set_chained_handler_and_data(pcie->intx_irq, NULL, NULL);
+
+ irq_domain_remove(pcie->msi.dev_domain);
+
+ irq_domain_remove(pcie->intx_domain);
+ irq_domain_remove(pcie->event_domain);
+}
+
+int plda_pcie_host_init(struct plda_pcie_rp *port, struct pci_ops *ops,
+ const struct plda_event *plda_event)
+{
+ struct device *dev = port->dev;
+ struct pci_host_bridge *bridge;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource *cfg_res;
+ int ret;
+
+ pdev = to_platform_device(dev);
+
+ port->bridge_addr =
+ devm_platform_ioremap_resource_byname(pdev, "apb");
+
+ if (IS_ERR(port->bridge_addr))
+ return dev_err_probe(dev, PTR_ERR(port->bridge_addr),
+ "failed to map reg memory\n");
+
+ cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
+ if (!cfg_res)
+ return dev_err_probe(dev, -ENODEV,
+ "failed to get config memory\n");
+
+ port->config_base = devm_ioremap_resource(dev, cfg_res);
+ if (IS_ERR(port->config_base))
+ return dev_err_probe(dev, PTR_ERR(port->config_base),
+ "failed to map config memory\n");
+
+ bridge = devm_pci_alloc_host_bridge(dev, 0);
+ if (!bridge)
+ return -ENOMEM;
+
+ if (port->host_ops && port->host_ops->host_init) {
+ ret = port->host_ops->host_init(port);
+ if (ret)
+ return ret;
+ }
+
+ port->bridge = bridge;
+ plda_pcie_setup_window(port->bridge_addr, 0, cfg_res->start, 0,
+ resource_size(cfg_res));
+ plda_pcie_setup_iomems(bridge, port);
+ plda_set_default_msi(&port->msi);
+ ret = plda_init_interrupts(pdev, port, plda_event);
+ if (ret)
+ goto err_host;
+
+ /* Set default bus ops */
+ bridge->ops = ops;
+ bridge->sysdata = port;
+
+ ret = pci_host_probe(bridge);
+ if (ret < 0) {
+ dev_err_probe(dev, ret, "failed to probe pci host\n");
+ goto err_probe;
+ }
+
+ return ret;
+
+err_probe:
+ plda_pcie_irq_domain_deinit(port);
+err_host:
+ if (port->host_ops && port->host_ops->host_deinit)
+ port->host_ops->host_deinit(port);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(plda_pcie_host_init);
+
+void plda_pcie_host_deinit(struct plda_pcie_rp *port)
+{
+ pci_stop_root_bus(port->bridge->bus);
+ pci_remove_root_bus(port->bridge->bus);
+
+ plda_pcie_irq_domain_deinit(port);
+
+ if (port->host_ops && port->host_ops->host_deinit)
+ port->host_ops->host_deinit(port);
+}
+EXPORT_SYMBOL_GPL(plda_pcie_host_deinit);
diff --git a/drivers/pci/controller/plda/pcie-plda.h b/drivers/pci/controller/plda/pcie-plda.h
new file mode 100644
index 000000000000..6b8665df7bf0
--- /dev/null
+++ b/drivers/pci/controller/plda/pcie-plda.h
@@ -0,0 +1,274 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * PLDA PCIe host controller driver
+ */
+
+#ifndef _PCIE_PLDA_H
+#define _PCIE_PLDA_H
+
+/* Number of MSI IRQs */
+#define PLDA_MAX_NUM_MSI_IRQS 32
+
+/* PCIe Bridge Phy Regs */
+#define GEN_SETTINGS 0x80
+#define RP_ENABLE 1
+#define PCIE_PCI_IDS_DW1 0x9c
+#define IDS_CLASS_CODE_SHIFT 16
+#define REVISION_ID_MASK GENMASK(7, 0)
+#define CLASS_CODE_ID_MASK GENMASK(31, 8)
+#define PCIE_PCI_IRQ_DW0 0xa8
+#define MSIX_CAP_MASK BIT(31)
+#define NUM_MSI_MSGS_MASK GENMASK(6, 4)
+#define NUM_MSI_MSGS_SHIFT 4
+#define PCI_MISC 0xb4
+#define PHY_FUNCTION_DIS BIT(15)
+#define PCIE_WINROM 0xfc
+#define PREF_MEM_WIN_64_SUPPORT BIT(3)
+
+#define IMASK_LOCAL 0x180
+#define DMA_END_ENGINE_0_MASK 0x00000000u
+#define DMA_END_ENGINE_0_SHIFT 0
+#define DMA_END_ENGINE_1_MASK 0x00000000u
+#define DMA_END_ENGINE_1_SHIFT 1
+#define DMA_ERROR_ENGINE_0_MASK 0x00000100u
+#define DMA_ERROR_ENGINE_0_SHIFT 8
+#define DMA_ERROR_ENGINE_1_MASK 0x00000200u
+#define DMA_ERROR_ENGINE_1_SHIFT 9
+#define A_ATR_EVT_POST_ERR_MASK 0x00010000u
+#define A_ATR_EVT_POST_ERR_SHIFT 16
+#define A_ATR_EVT_FETCH_ERR_MASK 0x00020000u
+#define A_ATR_EVT_FETCH_ERR_SHIFT 17
+#define A_ATR_EVT_DISCARD_ERR_MASK 0x00040000u
+#define A_ATR_EVT_DISCARD_ERR_SHIFT 18
+#define A_ATR_EVT_DOORBELL_MASK 0x00000000u
+#define A_ATR_EVT_DOORBELL_SHIFT 19
+#define P_ATR_EVT_POST_ERR_MASK 0x00100000u
+#define P_ATR_EVT_POST_ERR_SHIFT 20
+#define P_ATR_EVT_FETCH_ERR_MASK 0x00200000u
+#define P_ATR_EVT_FETCH_ERR_SHIFT 21
+#define P_ATR_EVT_DISCARD_ERR_MASK 0x00400000u
+#define P_ATR_EVT_DISCARD_ERR_SHIFT 22
+#define P_ATR_EVT_DOORBELL_MASK 0x00000000u
+#define P_ATR_EVT_DOORBELL_SHIFT 23
+#define PM_MSI_INT_INTA_MASK 0x01000000u
+#define PM_MSI_INT_INTA_SHIFT 24
+#define PM_MSI_INT_INTB_MASK 0x02000000u
+#define PM_MSI_INT_INTB_SHIFT 25
+#define PM_MSI_INT_INTC_MASK 0x04000000u
+#define PM_MSI_INT_INTC_SHIFT 26
+#define PM_MSI_INT_INTD_MASK 0x08000000u
+#define PM_MSI_INT_INTD_SHIFT 27
+#define PM_MSI_INT_INTX_MASK 0x0f000000u
+#define PM_MSI_INT_INTX_SHIFT 24
+#define PM_MSI_INT_MSI_MASK 0x10000000u
+#define PM_MSI_INT_MSI_SHIFT 28
+#define PM_MSI_INT_AER_EVT_MASK 0x20000000u
+#define PM_MSI_INT_AER_EVT_SHIFT 29
+#define PM_MSI_INT_EVENTS_MASK 0x40000000u
+#define PM_MSI_INT_EVENTS_SHIFT 30
+#define PM_MSI_INT_SYS_ERR_MASK 0x80000000u
+#define PM_MSI_INT_SYS_ERR_SHIFT 31
+#define SYS_AND_MSI_MASK GENMASK(31, 28)
+#define NUM_LOCAL_EVENTS 15
+#define ISTATUS_LOCAL 0x184
+#define IMASK_HOST 0x188
+#define ISTATUS_HOST 0x18c
+#define IMSI_ADDR 0x190
+#define ISTATUS_MSI 0x194
+#define PMSG_SUPPORT_RX 0x3f0
+#define PMSG_LTR_SUPPORT BIT(2)
+
+/* PCIe Master table init defines */
+#define ATR0_PCIE_WIN0_SRCADDR_PARAM 0x600u
+#define ATR0_PCIE_ATR_SIZE 0x25
+#define ATR0_PCIE_ATR_SIZE_SHIFT 1
+#define ATR0_PCIE_WIN0_SRC_ADDR 0x604u
+#define ATR0_PCIE_WIN0_TRSL_ADDR_LSB 0x608u
+#define ATR0_PCIE_WIN0_TRSL_ADDR_UDW 0x60cu
+#define ATR0_PCIE_WIN0_TRSL_PARAM 0x610u
+
+/* PCIe AXI slave table init defines */
+#define ATR0_AXI4_SLV0_SRCADDR_PARAM 0x800u
+#define ATR_SIZE_MASK GENMASK(6, 1)
+#define ATR_IMPL_ENABLE BIT(0)
+#define ATR0_AXI4_SLV0_SRC_ADDR 0x804u
+#define ATR0_AXI4_SLV0_TRSL_ADDR_LSB 0x808u
+#define ATR0_AXI4_SLV0_TRSL_ADDR_UDW 0x80cu
+#define ATR0_AXI4_SLV0_TRSL_PARAM 0x810u
+#define PCIE_TX_RX_INTERFACE 0x00000000u
+#define PCIE_CONFIG_INTERFACE 0x00000001u
+#define TRSL_ID_AXI4_MASTER_0 0x00000004u
+
+#define CONFIG_SPACE_ADDR_OFFSET 0x1000u
+
+#define ATR_ENTRY_SIZE 32
+
+enum plda_int_event {
+ PLDA_AXI_POST_ERR,
+ PLDA_AXI_FETCH_ERR,
+ PLDA_AXI_DISCARD_ERR,
+ PLDA_AXI_DOORBELL,
+ PLDA_PCIE_POST_ERR,
+ PLDA_PCIE_FETCH_ERR,
+ PLDA_PCIE_DISCARD_ERR,
+ PLDA_PCIE_DOORBELL,
+ PLDA_INTX,
+ PLDA_MSI,
+ PLDA_AER_EVENT,
+ PLDA_MISC_EVENTS,
+ PLDA_SYS_ERR,
+ PLDA_INT_EVENT_NUM
+};
+
+#define PLDA_NUM_DMA_EVENTS 16
+
+#define EVENT_PM_MSI_INT_INTX (PLDA_NUM_DMA_EVENTS + PLDA_INTX)
+#define EVENT_PM_MSI_INT_MSI (PLDA_NUM_DMA_EVENTS + PLDA_MSI)
+#define PLDA_MAX_EVENT_NUM (PLDA_NUM_DMA_EVENTS + PLDA_INT_EVENT_NUM)
+
+/*
+ * PLDA interrupt register
+ *
+ * 31 27 23 15 7 0
+ * +--+--+--+-+------+-+-+-+-+-+-+-+-+-----------+-----------+
+ * |12|11|10|9| intx |7|6|5|4|3|2|1|0| DMA error | DMA end |
+ * +--+--+--+-+------+-+-+-+-+-+-+-+-+-----------+-----------+
+ * event bit
+ * 0-7 (0-7) DMA interrupt end : reserved for vendor implement
+ * 8-15 (8-15) DMA error : reserved for vendor implement
+ * 16 (16) AXI post error (PLDA_AXI_POST_ERR)
+ * 17 (17) AXI fetch error (PLDA_AXI_FETCH_ERR)
+ * 18 (18) AXI discard error (PLDA_AXI_DISCARD_ERR)
+ * 19 (19) AXI doorbell (PLDA_PCIE_DOORBELL)
+ * 20 (20) PCIe post error (PLDA_PCIE_POST_ERR)
+ * 21 (21) PCIe fetch error (PLDA_PCIE_FETCH_ERR)
+ * 22 (22) PCIe discard error (PLDA_PCIE_DISCARD_ERR)
+ * 23 (23) PCIe doorbell (PLDA_PCIE_DOORBELL)
+ * 24 (27-24) INTx interruts (PLDA_INTX)
+ * 25 (28): MSI interrupt (PLDA_MSI)
+ * 26 (29): AER event (PLDA_AER_EVENT)
+ * 27 (30): PM/LTR/Hotplug (PLDA_MISC_EVENTS)
+ * 28 (31): System error (PLDA_SYS_ERR)
+ */
+
+struct plda_pcie_rp;
+
+struct plda_event_ops {
+ u32 (*get_events)(struct plda_pcie_rp *pcie);
+};
+
+struct plda_pcie_host_ops {
+ int (*host_init)(struct plda_pcie_rp *pcie);
+ void (*host_deinit)(struct plda_pcie_rp *pcie);
+};
+
+struct plda_msi {
+ struct mutex lock; /* Protect used bitmap */
+ struct irq_domain *dev_domain;
+ u32 num_vectors;
+ u64 vector_phy;
+ DECLARE_BITMAP(used, PLDA_MAX_NUM_MSI_IRQS);
+};
+
+struct plda_pcie_rp {
+ struct device *dev;
+ struct pci_host_bridge *bridge;
+ struct irq_domain *intx_domain;
+ struct irq_domain *event_domain;
+ raw_spinlock_t lock;
+ struct plda_msi msi;
+ const struct plda_event_ops *event_ops;
+ const struct irq_chip *event_irq_chip;
+ const struct plda_pcie_host_ops *host_ops;
+ void __iomem *bridge_addr;
+ void __iomem *config_base;
+ unsigned long events_bitmap;
+ int irq;
+ int msi_irq;
+ int intx_irq;
+ int num_events;
+};
+
+struct plda_event {
+ int (*request_event_irq)(struct plda_pcie_rp *pcie,
+ int event_irq, int event);
+ int intx_event;
+ int msi_event;
+};
+
+void __iomem *plda_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int where);
+int plda_init_interrupts(struct platform_device *pdev,
+ struct plda_pcie_rp *port,
+ const struct plda_event *event);
+void plda_pcie_setup_window(void __iomem *bridge_base_addr, u32 index,
+ phys_addr_t axi_addr, phys_addr_t pci_addr,
+ size_t size);
+void plda_pcie_setup_inbound_address_translation(struct plda_pcie_rp *port);
+int plda_pcie_setup_iomems(struct pci_host_bridge *bridge,
+ struct plda_pcie_rp *port);
+int plda_pcie_host_init(struct plda_pcie_rp *port, struct pci_ops *ops,
+ const struct plda_event *plda_event);
+void plda_pcie_host_deinit(struct plda_pcie_rp *pcie);
+
+static inline void plda_set_default_msi(struct plda_msi *msi)
+{
+ msi->vector_phy = IMSI_ADDR;
+ msi->num_vectors = PLDA_MAX_NUM_MSI_IRQS;
+}
+
+static inline void plda_pcie_enable_root_port(struct plda_pcie_rp *plda)
+{
+ u32 value;
+
+ value = readl_relaxed(plda->bridge_addr + GEN_SETTINGS);
+ value |= RP_ENABLE;
+ writel_relaxed(value, plda->bridge_addr + GEN_SETTINGS);
+}
+
+static inline void plda_pcie_set_standard_class(struct plda_pcie_rp *plda)
+{
+ u32 value;
+
+ /* set class code and reserve revision id */
+ value = readl_relaxed(plda->bridge_addr + PCIE_PCI_IDS_DW1);
+ value &= REVISION_ID_MASK;
+ value |= (PCI_CLASS_BRIDGE_PCI << IDS_CLASS_CODE_SHIFT);
+ writel_relaxed(value, plda->bridge_addr + PCIE_PCI_IDS_DW1);
+}
+
+static inline void plda_pcie_set_pref_win_64bit(struct plda_pcie_rp *plda)
+{
+ u32 value;
+
+ value = readl_relaxed(plda->bridge_addr + PCIE_WINROM);
+ value |= PREF_MEM_WIN_64_SUPPORT;
+ writel_relaxed(value, plda->bridge_addr + PCIE_WINROM);
+}
+
+static inline void plda_pcie_disable_ltr(struct plda_pcie_rp *plda)
+{
+ u32 value;
+
+ value = readl_relaxed(plda->bridge_addr + PMSG_SUPPORT_RX);
+ value &= ~PMSG_LTR_SUPPORT;
+ writel_relaxed(value, plda->bridge_addr + PMSG_SUPPORT_RX);
+}
+
+static inline void plda_pcie_disable_func(struct plda_pcie_rp *plda)
+{
+ u32 value;
+
+ value = readl_relaxed(plda->bridge_addr + PCI_MISC);
+ value |= PHY_FUNCTION_DIS;
+ writel_relaxed(value, plda->bridge_addr + PCI_MISC);
+}
+
+static inline void plda_pcie_write_rc_bar(struct plda_pcie_rp *plda, u64 val)
+{
+ void __iomem *addr = plda->bridge_addr + CONFIG_SPACE_ADDR_OFFSET;
+
+ writel_relaxed(lower_32_bits(val), addr + PCI_BASE_ADDRESS_0);
+ writel_relaxed(upper_32_bits(val), addr + PCI_BASE_ADDRESS_1);
+}
+#endif /* _PCIE_PLDA_H */
diff --git a/drivers/pci/controller/plda/pcie-starfive.c b/drivers/pci/controller/plda/pcie-starfive.c
new file mode 100644
index 000000000000..3caf53c6c082
--- /dev/null
+++ b/drivers/pci/controller/plda/pcie-starfive.c
@@ -0,0 +1,492 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * PCIe host controller driver for StarFive JH7110 Soc.
+ *
+ * Copyright (C) 2023 StarFive Technology Co., Ltd.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include "../../pci.h"
+
+#include "pcie-plda.h"
+
+#define PCIE_FUNC_NUM 4
+
+/* system control */
+#define STG_SYSCON_PCIE0_BASE 0x48
+#define STG_SYSCON_PCIE1_BASE 0x1f8
+
+#define STG_SYSCON_AR_OFFSET 0x78
+#define STG_SYSCON_AXI4_SLVL_AR_MASK GENMASK(22, 8)
+#define STG_SYSCON_AXI4_SLVL_PHY_AR(x) FIELD_PREP(GENMASK(20, 17), x)
+#define STG_SYSCON_AW_OFFSET 0x7c
+#define STG_SYSCON_AXI4_SLVL_AW_MASK GENMASK(14, 0)
+#define STG_SYSCON_AXI4_SLVL_PHY_AW(x) FIELD_PREP(GENMASK(12, 9), x)
+#define STG_SYSCON_CLKREQ BIT(22)
+#define STG_SYSCON_CKREF_SRC_MASK GENMASK(19, 18)
+#define STG_SYSCON_RP_NEP_OFFSET 0xe8
+#define STG_SYSCON_K_RP_NEP BIT(8)
+#define STG_SYSCON_LNKSTA_OFFSET 0x170
+#define DATA_LINK_ACTIVE BIT(5)
+
+/* Parameters for the waiting for link up routine */
+#define LINK_WAIT_MAX_RETRIES 10
+#define LINK_WAIT_USLEEP_MIN 90000
+#define LINK_WAIT_USLEEP_MAX 100000
+
+struct starfive_jh7110_pcie {
+ struct plda_pcie_rp plda;
+ struct reset_control *resets;
+ struct clk_bulk_data *clks;
+ struct regmap *reg_syscon;
+ struct gpio_desc *power_gpio;
+ struct gpio_desc *reset_gpio;
+ struct phy *phy;
+
+ unsigned int stg_pcie_base;
+ int num_clks;
+};
+
+/*
+ * JH7110 PCIe port BAR0/1 can be configured as 64-bit prefetchable memory
+ * space. PCIe read and write requests targeting BAR0/1 are routed to so called
+ * 'Bridge Configuration space' in PLDA IP datasheet, which contains the bridge
+ * internal registers, such as interrupt, DMA and ATU registers...
+ * JH7110 can access the Bridge Configuration space by local bus, and don`t
+ * want the bridge internal registers accessed by the DMA from EP devices.
+ * Thus, they are unimplemented and should be hidden here.
+ */
+static bool starfive_pcie_hide_rc_bar(struct pci_bus *bus, unsigned int devfn,
+ int offset)
+{
+ if (pci_is_root_bus(bus) && !devfn &&
+ (offset == PCI_BASE_ADDRESS_0 || offset == PCI_BASE_ADDRESS_1))
+ return true;
+
+ return false;
+}
+
+static int starfive_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 value)
+{
+ if (starfive_pcie_hide_rc_bar(bus, devfn, where))
+ return PCIBIOS_SUCCESSFUL;
+
+ return pci_generic_config_write(bus, devfn, where, size, value);
+}
+
+static int starfive_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *value)
+{
+ if (starfive_pcie_hide_rc_bar(bus, devfn, where)) {
+ *value = 0;
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+ return pci_generic_config_read(bus, devfn, where, size, value);
+}
+
+static int starfive_pcie_parse_dt(struct starfive_jh7110_pcie *pcie,
+ struct device *dev)
+{
+ int domain_nr;
+
+ pcie->num_clks = devm_clk_bulk_get_all(dev, &pcie->clks);
+ if (pcie->num_clks < 0)
+ return dev_err_probe(dev, pcie->num_clks,
+ "failed to get pcie clocks\n");
+
+ pcie->resets = devm_reset_control_array_get_exclusive(dev);
+ if (IS_ERR(pcie->resets))
+ return dev_err_probe(dev, PTR_ERR(pcie->resets),
+ "failed to get pcie resets");
+
+ pcie->reg_syscon =
+ syscon_regmap_lookup_by_phandle(dev->of_node,
+ "starfive,stg-syscon");
+
+ if (IS_ERR(pcie->reg_syscon))
+ return dev_err_probe(dev, PTR_ERR(pcie->reg_syscon),
+ "failed to parse starfive,stg-syscon\n");
+
+ pcie->phy = devm_phy_optional_get(dev, NULL);
+ if (IS_ERR(pcie->phy))
+ return dev_err_probe(dev, PTR_ERR(pcie->phy),
+ "failed to get pcie phy\n");
+
+ /*
+ * The PCIe domain numbers are set to be static in JH7110 DTS.
+ * As the STG system controller defines different bases in PCIe RP0 &
+ * RP1, we use them to identify which controller is doing the hardware
+ * initialization.
+ */
+ domain_nr = of_get_pci_domain_nr(dev->of_node);
+
+ if (domain_nr < 0 || domain_nr > 1)
+ return dev_err_probe(dev, -ENODEV,
+ "failed to get valid pcie domain\n");
+
+ if (domain_nr == 0)
+ pcie->stg_pcie_base = STG_SYSCON_PCIE0_BASE;
+ else
+ pcie->stg_pcie_base = STG_SYSCON_PCIE1_BASE;
+
+ pcie->reset_gpio = devm_gpiod_get_optional(dev, "perst",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(pcie->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(pcie->reset_gpio),
+ "failed to get perst-gpio\n");
+
+ pcie->power_gpio = devm_gpiod_get_optional(dev, "enable",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(pcie->power_gpio))
+ return dev_err_probe(dev, PTR_ERR(pcie->power_gpio),
+ "failed to get power-gpio\n");
+
+ return 0;
+}
+
+static struct pci_ops starfive_pcie_ops = {
+ .map_bus = plda_pcie_map_bus,
+ .read = starfive_pcie_config_read,
+ .write = starfive_pcie_config_write,
+};
+
+static int starfive_pcie_clk_rst_init(struct starfive_jh7110_pcie *pcie)
+{
+ struct device *dev = pcie->plda.dev;
+ int ret;
+
+ ret = clk_bulk_prepare_enable(pcie->num_clks, pcie->clks);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to enable clocks\n");
+
+ ret = reset_control_deassert(pcie->resets);
+ if (ret) {
+ clk_bulk_disable_unprepare(pcie->num_clks, pcie->clks);
+ dev_err_probe(dev, ret, "failed to deassert resets\n");
+ }
+
+ return ret;
+}
+
+static void starfive_pcie_clk_rst_deinit(struct starfive_jh7110_pcie *pcie)
+{
+ reset_control_assert(pcie->resets);
+ clk_bulk_disable_unprepare(pcie->num_clks, pcie->clks);
+}
+
+static bool starfive_pcie_link_up(struct plda_pcie_rp *plda)
+{
+ struct starfive_jh7110_pcie *pcie =
+ container_of(plda, struct starfive_jh7110_pcie, plda);
+ int ret;
+ u32 stg_reg_val;
+
+ ret = regmap_read(pcie->reg_syscon,
+ pcie->stg_pcie_base + STG_SYSCON_LNKSTA_OFFSET,
+ &stg_reg_val);
+ if (ret) {
+ dev_err(pcie->plda.dev, "failed to read link status\n");
+ return false;
+ }
+
+ return !!(stg_reg_val & DATA_LINK_ACTIVE);
+}
+
+static int starfive_pcie_host_wait_for_link(struct starfive_jh7110_pcie *pcie)
+{
+ int retries;
+
+ /* Check if the link is up or not */
+ for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
+ if (starfive_pcie_link_up(&pcie->plda)) {
+ dev_info(pcie->plda.dev, "port link up\n");
+ return 0;
+ }
+ usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int starfive_pcie_enable_phy(struct device *dev,
+ struct starfive_jh7110_pcie *pcie)
+{
+ int ret;
+
+ if (!pcie->phy)
+ return 0;
+
+ ret = phy_init(pcie->phy);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to initialize pcie phy\n");
+
+ ret = phy_set_mode(pcie->phy, PHY_MODE_PCIE);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to set pcie mode\n");
+ goto err_phy_on;
+ }
+
+ ret = phy_power_on(pcie->phy);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to power on pcie phy\n");
+ goto err_phy_on;
+ }
+
+ return 0;
+
+err_phy_on:
+ phy_exit(pcie->phy);
+ return ret;
+}
+
+static void starfive_pcie_disable_phy(struct starfive_jh7110_pcie *pcie)
+{
+ phy_power_off(pcie->phy);
+ phy_exit(pcie->phy);
+}
+
+static void starfive_pcie_host_deinit(struct plda_pcie_rp *plda)
+{
+ struct starfive_jh7110_pcie *pcie =
+ container_of(plda, struct starfive_jh7110_pcie, plda);
+
+ starfive_pcie_clk_rst_deinit(pcie);
+ if (pcie->power_gpio)
+ gpiod_set_value_cansleep(pcie->power_gpio, 0);
+ starfive_pcie_disable_phy(pcie);
+}
+
+static int starfive_pcie_host_init(struct plda_pcie_rp *plda)
+{
+ struct starfive_jh7110_pcie *pcie =
+ container_of(plda, struct starfive_jh7110_pcie, plda);
+ struct device *dev = plda->dev;
+ int ret;
+ int i;
+
+ ret = starfive_pcie_enable_phy(dev, pcie);
+ if (ret)
+ return ret;
+
+ regmap_update_bits(pcie->reg_syscon,
+ pcie->stg_pcie_base + STG_SYSCON_RP_NEP_OFFSET,
+ STG_SYSCON_K_RP_NEP, STG_SYSCON_K_RP_NEP);
+
+ regmap_update_bits(pcie->reg_syscon,
+ pcie->stg_pcie_base + STG_SYSCON_AW_OFFSET,
+ STG_SYSCON_CKREF_SRC_MASK,
+ FIELD_PREP(STG_SYSCON_CKREF_SRC_MASK, 2));
+
+ regmap_update_bits(pcie->reg_syscon,
+ pcie->stg_pcie_base + STG_SYSCON_AW_OFFSET,
+ STG_SYSCON_CLKREQ, STG_SYSCON_CLKREQ);
+
+ ret = starfive_pcie_clk_rst_init(pcie);
+ if (ret)
+ return ret;
+
+ if (pcie->power_gpio)
+ gpiod_set_value_cansleep(pcie->power_gpio, 1);
+
+ if (pcie->reset_gpio)
+ gpiod_set_value_cansleep(pcie->reset_gpio, 1);
+
+ /* Disable physical functions except #0 */
+ for (i = 1; i < PCIE_FUNC_NUM; i++) {
+ regmap_update_bits(pcie->reg_syscon,
+ pcie->stg_pcie_base + STG_SYSCON_AR_OFFSET,
+ STG_SYSCON_AXI4_SLVL_AR_MASK,
+ STG_SYSCON_AXI4_SLVL_PHY_AR(i));
+
+ regmap_update_bits(pcie->reg_syscon,
+ pcie->stg_pcie_base + STG_SYSCON_AW_OFFSET,
+ STG_SYSCON_AXI4_SLVL_AW_MASK,
+ STG_SYSCON_AXI4_SLVL_PHY_AW(i));
+
+ plda_pcie_disable_func(plda);
+ }
+
+ regmap_update_bits(pcie->reg_syscon,
+ pcie->stg_pcie_base + STG_SYSCON_AR_OFFSET,
+ STG_SYSCON_AXI4_SLVL_AR_MASK, 0);
+ regmap_update_bits(pcie->reg_syscon,
+ pcie->stg_pcie_base + STG_SYSCON_AW_OFFSET,
+ STG_SYSCON_AXI4_SLVL_AW_MASK, 0);
+
+ plda_pcie_enable_root_port(plda);
+ plda_pcie_write_rc_bar(plda, 0);
+
+ /* PCIe PCI Standard Configuration Identification Settings. */
+ plda_pcie_set_standard_class(plda);
+
+ /*
+ * The LTR message receiving is enabled by the register "PCIe Message
+ * Reception" as default, but the forward id & addr are uninitialized.
+ * If we do not disable LTR message forwarding here, or set a legal
+ * forwarding address, the kernel will get stuck.
+ * To workaround, disable the LTR message forwarding here before using
+ * this feature.
+ */
+ plda_pcie_disable_ltr(plda);
+
+ /*
+ * Enable the prefetchable memory window 64-bit addressing in JH7110.
+ * The 64-bits prefetchable address translation configurations in ATU
+ * can be work after enable the register setting below.
+ */
+ plda_pcie_set_pref_win_64bit(plda);
+
+ /*
+ * Ensure that PERST has been asserted for at least 100 ms,
+ * the sleep value is T_PVPERL from PCIe CEM spec r2.0 (Table 2-4)
+ */
+ msleep(100);
+ if (pcie->reset_gpio)
+ gpiod_set_value_cansleep(pcie->reset_gpio, 0);
+
+ /*
+ * With a Downstream Port (<=5GT/s), software must wait a minimum
+ * of 100ms following exit from a conventional reset before
+ * sending a configuration request to the device.
+ */
+ msleep(PCIE_RESET_CONFIG_WAIT_MS);
+
+ if (starfive_pcie_host_wait_for_link(pcie))
+ dev_info(dev, "port link down\n");
+
+ return 0;
+}
+
+static const struct plda_pcie_host_ops sf_host_ops = {
+ .host_init = starfive_pcie_host_init,
+ .host_deinit = starfive_pcie_host_deinit,
+};
+
+static const struct plda_event stf_pcie_event = {
+ .intx_event = EVENT_PM_MSI_INT_INTX,
+ .msi_event = EVENT_PM_MSI_INT_MSI
+};
+
+static int starfive_pcie_probe(struct platform_device *pdev)
+{
+ struct starfive_jh7110_pcie *pcie;
+ struct device *dev = &pdev->dev;
+ struct plda_pcie_rp *plda;
+ int ret;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ plda = &pcie->plda;
+ plda->dev = dev;
+
+ ret = starfive_pcie_parse_dt(pcie, dev);
+ if (ret)
+ return ret;
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
+ plda->host_ops = &sf_host_ops;
+ plda->num_events = PLDA_MAX_EVENT_NUM;
+ /* mask doorbell event */
+ plda->events_bitmap = GENMASK(PLDA_INT_EVENT_NUM - 1, 0)
+ & ~BIT(PLDA_AXI_DOORBELL)
+ & ~BIT(PLDA_PCIE_DOORBELL);
+ plda->events_bitmap <<= PLDA_NUM_DMA_EVENTS;
+ ret = plda_pcie_host_init(&pcie->plda, &starfive_pcie_ops,
+ &stf_pcie_event);
+ if (ret) {
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, pcie);
+
+ return 0;
+}
+
+static void starfive_pcie_remove(struct platform_device *pdev)
+{
+ struct starfive_jh7110_pcie *pcie = platform_get_drvdata(pdev);
+
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ plda_pcie_host_deinit(&pcie->plda);
+ platform_set_drvdata(pdev, NULL);
+}
+
+static int starfive_pcie_suspend_noirq(struct device *dev)
+{
+ struct starfive_jh7110_pcie *pcie = dev_get_drvdata(dev);
+
+ clk_bulk_disable_unprepare(pcie->num_clks, pcie->clks);
+ starfive_pcie_disable_phy(pcie);
+
+ return 0;
+}
+
+static int starfive_pcie_resume_noirq(struct device *dev)
+{
+ struct starfive_jh7110_pcie *pcie = dev_get_drvdata(dev);
+ int ret;
+
+ ret = starfive_pcie_enable_phy(dev, pcie);
+ if (ret)
+ return ret;
+
+ ret = clk_bulk_prepare_enable(pcie->num_clks, pcie->clks);
+ if (ret) {
+ dev_err(dev, "failed to enable clocks\n");
+ starfive_pcie_disable_phy(pcie);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops starfive_pcie_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(starfive_pcie_suspend_noirq,
+ starfive_pcie_resume_noirq)
+};
+
+static const struct of_device_id starfive_pcie_of_match[] = {
+ { .compatible = "starfive,jh7110-pcie", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, starfive_pcie_of_match);
+
+static struct platform_driver starfive_pcie_driver = {
+ .driver = {
+ .name = "pcie-starfive",
+ .of_match_table = of_match_ptr(starfive_pcie_of_match),
+ .pm = pm_sleep_ptr(&starfive_pcie_pm_ops),
+ },
+ .probe = starfive_pcie_probe,
+ .remove = starfive_pcie_remove,
+};
+module_platform_driver(starfive_pcie_driver);
+
+MODULE_DESCRIPTION("StarFive JH7110 PCIe host driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index e718a816d481..ec6afc38e898 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -7,6 +7,7 @@
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/msi.h>
@@ -17,6 +18,8 @@
#include <linux/rculist.h>
#include <linux/rcupdate.h>
+#include <xen/xen.h>
+
#include <asm/irqdomain.h>
#define VMD_CFGBAR 0
@@ -125,7 +128,7 @@ struct vmd_irq_list {
struct vmd_dev {
struct pci_dev *dev;
- spinlock_t cfg_lock;
+ raw_spinlock_t cfg_lock;
void __iomem *cfgbar;
int msix_count;
@@ -172,69 +175,63 @@ static void vmd_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
msg->arch_addr_lo.destid_0_7 = index_from_irqs(vmd, irq);
}
-/*
- * We rely on MSI_FLAG_USE_DEF_CHIP_OPS to set the IRQ mask/unmask ops.
- */
static void vmd_irq_enable(struct irq_data *data)
{
struct vmd_irq *vmdirq = data->chip_data;
- unsigned long flags;
- raw_spin_lock_irqsave(&list_lock, flags);
- WARN_ON(vmdirq->enabled);
- list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list);
- vmdirq->enabled = true;
- raw_spin_unlock_irqrestore(&list_lock, flags);
+ scoped_guard(raw_spinlock_irqsave, &list_lock) {
+ WARN_ON(vmdirq->enabled);
+ list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list);
+ vmdirq->enabled = true;
+ }
+}
+static void vmd_pci_msi_enable(struct irq_data *data)
+{
+ vmd_irq_enable(data->parent_data);
data->chip->irq_unmask(data);
}
+static unsigned int vmd_pci_msi_startup(struct irq_data *data)
+{
+ vmd_pci_msi_enable(data);
+ return 0;
+}
+
static void vmd_irq_disable(struct irq_data *data)
{
struct vmd_irq *vmdirq = data->chip_data;
- unsigned long flags;
- data->chip->irq_mask(data);
-
- raw_spin_lock_irqsave(&list_lock, flags);
- if (vmdirq->enabled) {
- list_del_rcu(&vmdirq->node);
- vmdirq->enabled = false;
+ scoped_guard(raw_spinlock_irqsave, &list_lock) {
+ if (vmdirq->enabled) {
+ list_del_rcu(&vmdirq->node);
+ vmdirq->enabled = false;
+ }
}
- raw_spin_unlock_irqrestore(&list_lock, flags);
}
-/*
- * XXX: Stubbed until we develop acceptable way to not create conflicts with
- * other devices sharing the same vector.
- */
-static int vmd_irq_set_affinity(struct irq_data *data,
- const struct cpumask *dest, bool force)
+static void vmd_pci_msi_disable(struct irq_data *data)
+{
+ data->chip->irq_mask(data);
+ vmd_irq_disable(data->parent_data);
+}
+
+static void vmd_pci_msi_shutdown(struct irq_data *data)
{
- return -EINVAL;
+ vmd_pci_msi_disable(data);
}
static struct irq_chip vmd_msi_controller = {
.name = "VMD-MSI",
- .irq_enable = vmd_irq_enable,
- .irq_disable = vmd_irq_disable,
.irq_compose_msi_msg = vmd_compose_msi_msg,
- .irq_set_affinity = vmd_irq_set_affinity,
};
-static irq_hw_number_t vmd_get_hwirq(struct msi_domain_info *info,
- msi_alloc_info_t *arg)
-{
- return 0;
-}
-
/*
* XXX: We can be even smarter selecting the best IRQ once we solve the
* affinity problem.
*/
static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd, struct msi_desc *desc)
{
- unsigned long flags;
int i, best;
if (vmd->msix_count == 1 + vmd->first_vec)
@@ -251,113 +248,130 @@ static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd, struct msi_desc *d
return &vmd->irqs[vmd->first_vec];
}
- raw_spin_lock_irqsave(&list_lock, flags);
- best = vmd->first_vec + 1;
- for (i = best; i < vmd->msix_count; i++)
- if (vmd->irqs[i].count < vmd->irqs[best].count)
- best = i;
- vmd->irqs[best].count++;
- raw_spin_unlock_irqrestore(&list_lock, flags);
+ scoped_guard(raw_spinlock_irq, &list_lock) {
+ best = vmd->first_vec + 1;
+ for (i = best; i < vmd->msix_count; i++)
+ if (vmd->irqs[i].count < vmd->irqs[best].count)
+ best = i;
+ vmd->irqs[best].count++;
+ }
return &vmd->irqs[best];
}
-static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info,
- unsigned int virq, irq_hw_number_t hwirq,
- msi_alloc_info_t *arg)
+static void vmd_msi_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs);
+
+static int vmd_msi_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
{
- struct msi_desc *desc = arg->desc;
- struct vmd_dev *vmd = vmd_from_bus(msi_desc_to_pci_dev(desc)->bus);
- struct vmd_irq *vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL);
+ struct msi_desc *desc = ((msi_alloc_info_t *)arg)->desc;
+ struct vmd_dev *vmd = domain->host_data;
+ struct vmd_irq *vmdirq;
- if (!vmdirq)
- return -ENOMEM;
+ for (int i = 0; i < nr_irqs; ++i) {
+ vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL);
+ if (!vmdirq) {
+ vmd_msi_free(domain, virq, i);
+ return -ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&vmdirq->node);
+ vmdirq->irq = vmd_next_irq(vmd, desc);
+ vmdirq->virq = virq + i;
- INIT_LIST_HEAD(&vmdirq->node);
- vmdirq->irq = vmd_next_irq(vmd, desc);
- vmdirq->virq = virq;
+ irq_domain_set_info(domain, virq + i, vmdirq->irq->virq,
+ &vmd_msi_controller, vmdirq,
+ handle_untracked_irq, vmd, NULL);
+ }
- irq_domain_set_info(domain, virq, vmdirq->irq->virq, info->chip, vmdirq,
- handle_untracked_irq, vmd, NULL);
return 0;
}
-static void vmd_msi_free(struct irq_domain *domain,
- struct msi_domain_info *info, unsigned int virq)
+static void vmd_msi_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
{
- struct vmd_irq *vmdirq = irq_get_chip_data(virq);
- unsigned long flags;
-
- synchronize_srcu(&vmdirq->irq->srcu);
-
- /* XXX: Potential optimization to rebalance */
- raw_spin_lock_irqsave(&list_lock, flags);
- vmdirq->irq->count--;
- raw_spin_unlock_irqrestore(&list_lock, flags);
+ struct irq_data *irq_data;
+ struct vmd_irq *vmdirq;
- kfree(vmdirq);
-}
+ for (int i = 0; i < nr_irqs; ++i) {
+ irq_data = irq_domain_get_irq_data(domain, virq + i);
+ vmdirq = irq_data->chip_data;
-static int vmd_msi_prepare(struct irq_domain *domain, struct device *dev,
- int nvec, msi_alloc_info_t *arg)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct vmd_dev *vmd = vmd_from_bus(pdev->bus);
+ synchronize_srcu(&vmdirq->irq->srcu);
- if (nvec > vmd->msix_count)
- return vmd->msix_count;
+ /* XXX: Potential optimization to rebalance */
+ scoped_guard(raw_spinlock_irq, &list_lock)
+ vmdirq->irq->count--;
- memset(arg, 0, sizeof(*arg));
- return 0;
+ kfree(vmdirq);
+ }
}
-static void vmd_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
+static const struct irq_domain_ops vmd_msi_domain_ops = {
+ .alloc = vmd_msi_alloc,
+ .free = vmd_msi_free,
+};
+
+static bool vmd_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
+ struct irq_domain *real_parent,
+ struct msi_domain_info *info)
{
- arg->desc = desc;
+ if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info))
+ return false;
+
+ info->chip->irq_startup = vmd_pci_msi_startup;
+ info->chip->irq_shutdown = vmd_pci_msi_shutdown;
+ info->chip->irq_enable = vmd_pci_msi_enable;
+ info->chip->irq_disable = vmd_pci_msi_disable;
+ return true;
}
-static struct msi_domain_ops vmd_msi_domain_ops = {
- .get_hwirq = vmd_get_hwirq,
- .msi_init = vmd_msi_init,
- .msi_free = vmd_msi_free,
- .msi_prepare = vmd_msi_prepare,
- .set_desc = vmd_set_desc,
-};
+#define VMD_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | MSI_FLAG_PCI_MSIX)
+#define VMD_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_NO_AFFINITY)
-static struct msi_domain_info vmd_msi_domain_info = {
- .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX,
- .ops = &vmd_msi_domain_ops,
- .chip = &vmd_msi_controller,
+static const struct msi_parent_ops vmd_msi_parent_ops = {
+ .supported_flags = VMD_MSI_FLAGS_SUPPORTED,
+ .required_flags = VMD_MSI_FLAGS_REQUIRED,
+ .bus_select_token = DOMAIN_BUS_VMD_MSI,
+ .bus_select_mask = MATCH_PCI_MSI,
+ .prefix = "VMD-",
+ .init_dev_msi_info = vmd_init_dev_msi_info,
};
-static void vmd_set_msi_remapping(struct vmd_dev *vmd, bool enable)
-{
- u16 reg;
-
- pci_read_config_word(vmd->dev, PCI_REG_VMCONFIG, &reg);
- reg = enable ? (reg & ~VMCONFIG_MSI_REMAP) :
- (reg | VMCONFIG_MSI_REMAP);
- pci_write_config_word(vmd->dev, PCI_REG_VMCONFIG, reg);
-}
-
static int vmd_create_irq_domain(struct vmd_dev *vmd)
{
- struct fwnode_handle *fn;
+ struct irq_domain_info info = {
+ .size = vmd->msix_count,
+ .ops = &vmd_msi_domain_ops,
+ .host_data = vmd,
+ };
- fn = irq_domain_alloc_named_id_fwnode("VMD-MSI", vmd->sysdata.domain);
- if (!fn)
+ info.fwnode = irq_domain_alloc_named_id_fwnode("VMD-MSI",
+ vmd->sysdata.domain);
+ if (!info.fwnode)
return -ENODEV;
- vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info, NULL);
+ vmd->irq_domain = msi_create_parent_irq_domain(&info,
+ &vmd_msi_parent_ops);
if (!vmd->irq_domain) {
- irq_domain_free_fwnode(fn);
+ irq_domain_free_fwnode(info.fwnode);
return -ENODEV;
}
return 0;
}
+static void vmd_set_msi_remapping(struct vmd_dev *vmd, bool enable)
+{
+ u16 reg;
+
+ pci_read_config_word(vmd->dev, PCI_REG_VMCONFIG, &reg);
+ reg = enable ? (reg & ~VMCONFIG_MSI_REMAP) :
+ (reg | VMCONFIG_MSI_REMAP);
+ pci_write_config_word(vmd->dev, PCI_REG_VMCONFIG, reg);
+}
+
static void vmd_remove_irq_domain(struct vmd_dev *vmd)
{
/*
@@ -396,29 +410,24 @@ static int vmd_pci_read(struct pci_bus *bus, unsigned int devfn, int reg,
{
struct vmd_dev *vmd = vmd_from_bus(bus);
void __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len);
- unsigned long flags;
- int ret = 0;
if (!addr)
return -EFAULT;
- spin_lock_irqsave(&vmd->cfg_lock, flags);
+ guard(raw_spinlock_irqsave)(&vmd->cfg_lock);
switch (len) {
case 1:
*value = readb(addr);
- break;
+ return 0;
case 2:
*value = readw(addr);
- break;
+ return 0;
case 4:
*value = readl(addr);
- break;
+ return 0;
default:
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
- spin_unlock_irqrestore(&vmd->cfg_lock, flags);
- return ret;
}
/*
@@ -431,32 +440,27 @@ static int vmd_pci_write(struct pci_bus *bus, unsigned int devfn, int reg,
{
struct vmd_dev *vmd = vmd_from_bus(bus);
void __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len);
- unsigned long flags;
- int ret = 0;
if (!addr)
return -EFAULT;
- spin_lock_irqsave(&vmd->cfg_lock, flags);
+ guard(raw_spinlock_irqsave)(&vmd->cfg_lock);
switch (len) {
case 1:
writeb(value, addr);
readb(addr);
- break;
+ return 0;
case 2:
writew(value, addr);
readw(addr);
- break;
+ return 0;
case 4:
writel(value, addr);
readl(addr);
- break;
+ return 0;
default:
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
- spin_unlock_irqrestore(&vmd->cfg_lock, flags);
- return ret;
}
static struct pci_ops vmd_ops = {
@@ -525,10 +529,9 @@ static void vmd_domain_reset(struct vmd_dev *vmd)
base = vmd->cfgbar + PCIE_ECAM_OFFSET(bus,
PCI_DEVFN(dev, 0), 0);
- hdr_type = readb(base + PCI_HEADER_TYPE) &
- PCI_HEADER_TYPE_MASK;
+ hdr_type = readb(base + PCI_HEADER_TYPE);
- functions = (hdr_type & 0x80) ? 8 : 1;
+ functions = (hdr_type & PCI_HEADER_TYPE_MFD) ? 8 : 1;
for (fn = 0; fn < functions; fn++) {
base = vmd->cfgbar + PCIE_ECAM_OFFSET(bus,
PCI_DEVFN(dev, fn), 0);
@@ -541,8 +544,23 @@ static void vmd_domain_reset(struct vmd_dev *vmd)
PCI_CLASS_BRIDGE_PCI))
continue;
- memset_io(base + PCI_IO_BASE, 0,
- PCI_ROM_ADDRESS1 - PCI_IO_BASE);
+ /*
+ * Temporarily disable the I/O range before updating
+ * PCI_IO_BASE.
+ */
+ writel(0x0000ffff, base + PCI_IO_BASE_UPPER16);
+ /* Update lower 16 bits of I/O base/limit */
+ writew(0x00f0, base + PCI_IO_BASE);
+ /* Update upper 16 bits of I/O base/limit */
+ writel(0, base + PCI_IO_BASE_UPPER16);
+
+ /* MMIO Base/Limit */
+ writel(0x0000fff0, base + PCI_MEMORY_BASE);
+
+ /* Prefetchable MMIO Base/Limit */
+ writel(0, base + PCI_PREF_LIMIT_UPPER32);
+ writel(0x0000fff0, base + PCI_PREF_MEMORY_BASE);
+ writel(0xffffffff, base + PCI_PREF_BASE_UPPER32);
}
}
}
@@ -560,22 +578,6 @@ static void vmd_detach_resources(struct vmd_dev *vmd)
vmd->dev->resource[VMD_MEMBAR2].child = NULL;
}
-/*
- * VMD domains start at 0x10000 to not clash with ACPI _SEG domains.
- * Per ACPI r6.0, sec 6.5.6, _SEG returns an integer, of which the lower
- * 16 bits are the PCI Segment Group (domain) number. Other bits are
- * currently reserved.
- */
-static int vmd_find_free_domain(void)
-{
- int domain = 0xffff;
- struct pci_bus *bus = NULL;
-
- while ((bus = pci_find_next_bus(bus)) != NULL)
- domain = max_t(int, domain, pci_domain_nr(bus));
- return domain + 1;
-}
-
static int vmd_get_phys_offsets(struct vmd_dev *vmd, bool native_hint,
resource_size_t *offset1,
resource_size_t *offset2)
@@ -737,11 +739,9 @@ static int vmd_pm_enable_quirk(struct pci_dev *pdev, void *userdata)
if (!(features & VMD_FEAT_BIOS_PM_QUIRK))
return 0;
- pci_enable_link_state(pdev, PCIE_LINK_STATE_ALL);
-
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_LTR);
if (!pos)
- return 0;
+ goto out_state_change;
/*
* Skip if the max snoop LTR is non-zero, indicating BIOS has set it
@@ -749,7 +749,7 @@ static int vmd_pm_enable_quirk(struct pci_dev *pdev, void *userdata)
*/
pci_read_config_dword(pdev, pos + PCI_LTR_MAX_SNOOP_LAT, &ltr_reg);
if (!!(ltr_reg & (PCI_LTR_VALUE_MASK | PCI_LTR_SCALE_MASK)))
- return 0;
+ goto out_state_change;
/*
* Set the default values to the maximum required by the platform to
@@ -761,6 +761,13 @@ static int vmd_pm_enable_quirk(struct pci_dev *pdev, void *userdata)
pci_write_config_dword(pdev, pos + PCI_LTR_MAX_SNOOP_LAT, ltr_reg);
pci_info(pdev, "VMD: Default LTR value set by driver\n");
+out_state_change:
+ /*
+ * Ensure devices are in D0 before enabling PCI-PM L1 PM Substates, per
+ * PCIe r6.0, sec 5.5.4.
+ */
+ pci_set_power_state_locked(pdev, PCI_D0);
+ pci_enable_link_state_locked(pdev, PCIE_LINK_STATE_ALL);
return 0;
}
@@ -855,13 +862,6 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
.parent = res,
};
- sd->vmd_dev = vmd->dev;
- sd->domain = vmd_find_free_domain();
- if (sd->domain < 0)
- return sd->domain;
-
- sd->node = pcibus_to_node(vmd->dev->bus);
-
/*
* Currently MSI remapping must be enabled in guest passthrough mode
* due to some missing interrupt remapping plumbing. This is probably
@@ -879,12 +879,6 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
ret = vmd_create_irq_domain(vmd);
if (ret)
return ret;
-
- /*
- * Override the IRQ domain bus token so the domain can be
- * distinguished from a regular PCI/MSI domain.
- */
- irq_domain_update_bus_token(vmd->irq_domain, DOMAIN_BUS_VMD_MSI);
} else {
vmd_set_msi_remapping(vmd, false);
}
@@ -893,9 +887,24 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]);
pci_add_resource_offset(&resources, &vmd->resources[2], offset[1]);
+ sd->vmd_dev = vmd->dev;
+
+ /*
+ * Emulated domains start at 0x10000 to not clash with ACPI _SEG
+ * domains. Per ACPI r6.0, sec 6.5.6, _SEG returns an integer, of
+ * which the lower 16 bits are the PCI Segment Group (domain) number.
+ * Other bits are currently reserved.
+ */
+ sd->domain = pci_bus_find_emul_domain_nr(0, 0x10000, INT_MAX);
+ if (sd->domain < 0)
+ return sd->domain;
+
+ sd->node = pcibus_to_node(vmd->dev->bus);
+
vmd->bus = pci_create_root_bus(&vmd->dev->dev, vmd->busn_start,
&vmd_ops, sd, &resources);
if (!vmd->bus) {
+ pci_bus_release_emul_domain_nr(sd->domain);
pci_free_resource_list(&resources);
vmd_remove_irq_domain(vmd);
return -ENODEV;
@@ -911,6 +920,9 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
dev_set_msi_domain(&vmd->bus->dev,
dev_get_msi_domain(&vmd->dev->dev));
+ WARN(sysfs_create_link(&vmd->dev->dev.kobj, &vmd->bus->dev.kobj,
+ "domain"), "Can't create symlink to domain\n");
+
vmd_acpi_begin();
pci_scan_child_bus(vmd->bus);
@@ -950,9 +962,6 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
pci_bus_add_devices(vmd->bus);
vmd_acpi_end();
-
- WARN(sysfs_create_link(&vmd->dev->dev.kobj, &vmd->bus->dev.kobj,
- "domain"), "Can't create symlink to domain\n");
return 0;
}
@@ -962,6 +971,24 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
struct vmd_dev *vmd;
int err;
+ if (xen_domain()) {
+ /*
+ * Xen doesn't have knowledge about devices in the VMD bus
+ * because the config space of devices behind the VMD bridge is
+ * not known to Xen, and hence Xen cannot discover or configure
+ * them in any way.
+ *
+ * Bypass of MSI remapping won't work in that case as direct
+ * write by Linux to the MSI entries won't result in functional
+ * interrupts, as Xen is the entity that manages the host
+ * interrupt controller and must configure interrupts. However
+ * multiplexing of interrupts by the VMD bridge will work under
+ * Xen, so force the usage of that mode which must always be
+ * supported by VMD bridges.
+ */
+ features &= ~VMD_FEAT_CAN_BYPASS_MSI_REMAP;
+ }
+
if (resource_size(&dev->resource[VMD_CFGBAR]) < (1 << 20))
return -ENOMEM;
@@ -970,7 +997,8 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
return -ENOMEM;
vmd->dev = dev;
- vmd->instance = ida_simple_get(&vmd_instance_ida, 0, 0, GFP_KERNEL);
+ vmd->sysdata.domain = PCI_DOMAIN_NR_NOT_SET;
+ vmd->instance = ida_alloc(&vmd_instance_ida, GFP_KERNEL);
if (vmd->instance < 0)
return vmd->instance;
@@ -1001,7 +1029,7 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (features & VMD_FEAT_OFFSET_FIRST_VECTOR)
vmd->first_vec = 1;
- spin_lock_init(&vmd->cfg_lock);
+ raw_spin_lock_init(&vmd->cfg_lock);
pci_set_drvdata(dev, vmd);
err = vmd_enable_domain(vmd, features);
if (err)
@@ -1012,7 +1040,7 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
return 0;
out_release_instance:
- ida_simple_remove(&vmd_instance_ida, vmd->instance);
+ ida_free(&vmd_instance_ida, vmd->instance);
return err;
}
@@ -1028,20 +1056,21 @@ static void vmd_remove(struct pci_dev *dev)
{
struct vmd_dev *vmd = pci_get_drvdata(dev);
- sysfs_remove_link(&vmd->dev->dev.kobj, "domain");
pci_stop_root_bus(vmd->bus);
+ sysfs_remove_link(&vmd->dev->dev.kobj, "domain");
pci_remove_root_bus(vmd->bus);
vmd_cleanup_srcu(vmd);
vmd_detach_resources(vmd);
vmd_remove_irq_domain(vmd);
- ida_simple_remove(&vmd_instance_ida, vmd->instance);
+ ida_free(&vmd_instance_ida, vmd->instance);
+ pci_bus_release_emul_domain_nr(vmd->sysdata.domain);
}
static void vmd_shutdown(struct pci_dev *dev)
{
- struct vmd_dev *vmd = pci_get_drvdata(dev);
+ struct vmd_dev *vmd = pci_get_drvdata(dev);
- vmd_remove_irq_domain(vmd);
+ vmd_remove_irq_domain(vmd);
}
#ifdef CONFIG_PM_SLEEP
@@ -1063,10 +1092,7 @@ static int vmd_resume(struct device *dev)
struct vmd_dev *vmd = pci_get_drvdata(pdev);
int err, i;
- if (vmd->irq_domain)
- vmd_set_msi_remapping(vmd, true);
- else
- vmd_set_msi_remapping(vmd, false);
+ vmd_set_msi_remapping(vmd, !!vmd->irq_domain);
for (i = 0; i < vmd->msix_count; i++) {
err = devm_request_irq(dev, vmd->irqs[i].virq,
@@ -1100,6 +1126,12 @@ static const struct pci_device_id vmd_ids[] = {
.driver_data = VMD_FEATS_CLIENT,},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_VMD_9A0B),
.driver_data = VMD_FEATS_CLIENT,},
+ {PCI_VDEVICE(INTEL, 0xb60b),
+ .driver_data = VMD_FEATS_CLIENT,},
+ {PCI_VDEVICE(INTEL, 0xb06f),
+ .driver_data = VMD_FEATS_CLIENT,},
+ {PCI_VDEVICE(INTEL, 0xb07f),
+ .driver_data = VMD_FEATS_CLIENT,},
{0,}
};
MODULE_DEVICE_TABLE(pci, vmd_ids);
@@ -1117,5 +1149,6 @@ static struct pci_driver vmd_drv = {
module_pci_driver(vmd_drv);
MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("Volume Management Device driver");
MODULE_LICENSE("GPL v2");
MODULE_VERSION("0.6");
diff --git a/drivers/pci/devres.c b/drivers/pci/devres.c
new file mode 100644
index 000000000000..9f4190501395
--- /dev/null
+++ b/drivers/pci/devres.c
@@ -0,0 +1,864 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/device.h>
+#include <linux/pci.h>
+#include "pci.h"
+
+/*
+ * On the state of PCI's devres implementation:
+ *
+ * The older PCI devres API has one significant problem:
+ *
+ * It is very strongly tied to the statically allocated mapping table in struct
+ * pcim_iomap_devres below. This is mostly solved in the sense of the pcim_
+ * functions in this file providing things like ranged mapping by bypassing
+ * this table, whereas the functions that were present in the old API still
+ * enter the mapping addresses into the table for users of the old API.
+ *
+ * TODO:
+ * Remove the legacy table entirely once all calls to pcim_iomap_table() in
+ * the kernel have been removed.
+ */
+
+/*
+ * Legacy struct storing addresses to whole mapped BARs.
+ */
+struct pcim_iomap_devres {
+ void __iomem *table[PCI_NUM_RESOURCES];
+};
+
+/* Used to restore the old INTx state on driver detach. */
+struct pcim_intx_devres {
+ int orig_intx;
+};
+
+enum pcim_addr_devres_type {
+ /* Default initializer. */
+ PCIM_ADDR_DEVRES_TYPE_INVALID,
+
+ /* A requested region spanning an entire BAR. */
+ PCIM_ADDR_DEVRES_TYPE_REGION,
+
+ /*
+ * A requested region spanning an entire BAR, and a mapping for
+ * the entire BAR.
+ */
+ PCIM_ADDR_DEVRES_TYPE_REGION_MAPPING,
+
+ /*
+ * A mapping within a BAR, either spanning the whole BAR or just a
+ * range. Without a requested region.
+ */
+ PCIM_ADDR_DEVRES_TYPE_MAPPING,
+};
+
+/*
+ * This struct envelops IO or MEM addresses, i.e., mappings and region
+ * requests, because those are very frequently requested and released
+ * together.
+ */
+struct pcim_addr_devres {
+ enum pcim_addr_devres_type type;
+ void __iomem *baseaddr;
+ unsigned long offset;
+ unsigned long len;
+ int bar;
+};
+
+static inline void pcim_addr_devres_clear(struct pcim_addr_devres *res)
+{
+ memset(res, 0, sizeof(*res));
+ res->bar = -1;
+}
+
+static void pcim_addr_resource_release(struct device *dev, void *resource_raw)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pcim_addr_devres *res = resource_raw;
+
+ switch (res->type) {
+ case PCIM_ADDR_DEVRES_TYPE_REGION:
+ pci_release_region(pdev, res->bar);
+ break;
+ case PCIM_ADDR_DEVRES_TYPE_REGION_MAPPING:
+ pci_iounmap(pdev, res->baseaddr);
+ pci_release_region(pdev, res->bar);
+ break;
+ case PCIM_ADDR_DEVRES_TYPE_MAPPING:
+ pci_iounmap(pdev, res->baseaddr);
+ break;
+ default:
+ break;
+ }
+}
+
+static struct pcim_addr_devres *pcim_addr_devres_alloc(struct pci_dev *pdev)
+{
+ struct pcim_addr_devres *res;
+
+ res = devres_alloc_node(pcim_addr_resource_release, sizeof(*res),
+ GFP_KERNEL, dev_to_node(&pdev->dev));
+ if (res)
+ pcim_addr_devres_clear(res);
+ return res;
+}
+
+/* Just for consistency and readability. */
+static inline void pcim_addr_devres_free(struct pcim_addr_devres *res)
+{
+ devres_free(res);
+}
+
+/*
+ * Used by devres to identify a pcim_addr_devres.
+ */
+static int pcim_addr_resources_match(struct device *dev,
+ void *a_raw, void *b_raw)
+{
+ struct pcim_addr_devres *a, *b;
+
+ a = a_raw;
+ b = b_raw;
+
+ if (a->type != b->type)
+ return 0;
+
+ switch (a->type) {
+ case PCIM_ADDR_DEVRES_TYPE_REGION:
+ case PCIM_ADDR_DEVRES_TYPE_REGION_MAPPING:
+ return a->bar == b->bar;
+ case PCIM_ADDR_DEVRES_TYPE_MAPPING:
+ return a->baseaddr == b->baseaddr;
+ default:
+ return 0;
+ }
+}
+
+static void devm_pci_unmap_iospace(struct device *dev, void *ptr)
+{
+ struct resource **res = ptr;
+
+ pci_unmap_iospace(*res);
+}
+
+/**
+ * devm_pci_remap_iospace - Managed pci_remap_iospace()
+ * @dev: Generic device to remap IO address for
+ * @res: Resource describing the I/O space
+ * @phys_addr: physical address of range to be mapped
+ *
+ * Managed pci_remap_iospace(). Map is automatically unmapped on driver
+ * detach.
+ */
+int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
+ phys_addr_t phys_addr)
+{
+ const struct resource **ptr;
+ int error;
+
+ ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ error = pci_remap_iospace(res, phys_addr);
+ if (error) {
+ devres_free(ptr);
+ } else {
+ *ptr = res;
+ devres_add(dev, ptr);
+ }
+
+ return error;
+}
+EXPORT_SYMBOL(devm_pci_remap_iospace);
+
+/**
+ * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace()
+ * @dev: Generic device to remap IO address for
+ * @offset: Resource address to map
+ * @size: Size of map
+ *
+ * Managed pci_remap_cfgspace(). Map is automatically unmapped on driver
+ * detach.
+ */
+void __iomem *devm_pci_remap_cfgspace(struct device *dev,
+ resource_size_t offset,
+ resource_size_t size)
+{
+ void __iomem **ptr, *addr;
+
+ ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ addr = pci_remap_cfgspace(offset, size);
+ if (addr) {
+ *ptr = addr;
+ devres_add(dev, ptr);
+ } else
+ devres_free(ptr);
+
+ return addr;
+}
+EXPORT_SYMBOL(devm_pci_remap_cfgspace);
+
+/**
+ * devm_pci_remap_cfg_resource - check, request region and ioremap cfg resource
+ * @dev: generic device to handle the resource for
+ * @res: configuration space resource to be handled
+ *
+ * Checks that a resource is a valid memory region, requests the memory
+ * region and ioremaps with pci_remap_cfgspace() API that ensures the
+ * proper PCI configuration space memory attributes are guaranteed.
+ *
+ * All operations are managed and will be undone on driver detach.
+ *
+ * Returns a pointer to the remapped memory or an IOMEM_ERR_PTR() encoded error
+ * code on failure. Usage example::
+ *
+ * res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ * base = devm_pci_remap_cfg_resource(&pdev->dev, res);
+ * if (IS_ERR(base))
+ * return PTR_ERR(base);
+ */
+void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
+ struct resource *res)
+{
+ resource_size_t size;
+ const char *name;
+ void __iomem *dest_ptr;
+
+ BUG_ON(!dev);
+
+ if (!res || resource_type(res) != IORESOURCE_MEM) {
+ dev_err(dev, "invalid resource\n");
+ return IOMEM_ERR_PTR(-EINVAL);
+ }
+
+ size = resource_size(res);
+
+ if (res->name)
+ name = devm_kasprintf(dev, GFP_KERNEL, "%s %s", dev_name(dev),
+ res->name);
+ else
+ name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
+ if (!name)
+ return IOMEM_ERR_PTR(-ENOMEM);
+
+ if (!devm_request_mem_region(dev, res->start, size, name)) {
+ dev_err(dev, "can't request region for resource %pR\n", res);
+ return IOMEM_ERR_PTR(-EBUSY);
+ }
+
+ dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size);
+ if (!dest_ptr) {
+ dev_err(dev, "ioremap failed for resource %pR\n", res);
+ devm_release_mem_region(dev, res->start, size);
+ dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
+ }
+
+ return dest_ptr;
+}
+EXPORT_SYMBOL(devm_pci_remap_cfg_resource);
+
+static void __pcim_clear_mwi(void *pdev_raw)
+{
+ struct pci_dev *pdev = pdev_raw;
+
+ pci_clear_mwi(pdev);
+}
+
+/**
+ * pcim_set_mwi - a device-managed pci_set_mwi()
+ * @pdev: the PCI device for which MWI is enabled
+ *
+ * Managed pci_set_mwi().
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int pcim_set_mwi(struct pci_dev *pdev)
+{
+ int ret;
+
+ ret = devm_add_action(&pdev->dev, __pcim_clear_mwi, pdev);
+ if (ret != 0)
+ return ret;
+
+ ret = pci_set_mwi(pdev);
+ if (ret != 0)
+ devm_remove_action(&pdev->dev, __pcim_clear_mwi, pdev);
+
+ return ret;
+}
+EXPORT_SYMBOL(pcim_set_mwi);
+
+static inline bool mask_contains_bar(int mask, int bar)
+{
+ return mask & BIT(bar);
+}
+
+static void pcim_intx_restore(struct device *dev, void *data)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pcim_intx_devres *res = data;
+
+ pci_intx(pdev, res->orig_intx);
+}
+
+static void save_orig_intx(struct pci_dev *pdev, struct pcim_intx_devres *res)
+{
+ u16 pci_command;
+
+ pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
+ res->orig_intx = !(pci_command & PCI_COMMAND_INTX_DISABLE);
+}
+
+/**
+ * pcim_intx - managed pci_intx()
+ * @pdev: the PCI device to operate on
+ * @enable: boolean: whether to enable or disable PCI INTx
+ *
+ * Returns: 0 on success, -ENOMEM on error.
+ *
+ * Enable/disable PCI INTx for device @pdev.
+ * Restore the original state on driver detach.
+ */
+int pcim_intx(struct pci_dev *pdev, int enable)
+{
+ struct pcim_intx_devres *res;
+ struct device *dev = &pdev->dev;
+
+ /*
+ * pcim_intx() must only restore the INTx value that existed before the
+ * driver was loaded, i.e., before it called pcim_intx() for the
+ * first time.
+ */
+ res = devres_find(dev, pcim_intx_restore, NULL, NULL);
+ if (!res) {
+ res = devres_alloc(pcim_intx_restore, sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+
+ save_orig_intx(pdev, res);
+ devres_add(dev, res);
+ }
+
+ pci_intx(pdev, enable);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pcim_intx);
+
+static void pcim_disable_device(void *pdev_raw)
+{
+ struct pci_dev *pdev = pdev_raw;
+
+ if (!pdev->pinned)
+ pci_disable_device(pdev);
+
+ pdev->is_managed = false;
+}
+
+/**
+ * pcim_enable_device - Managed pci_enable_device()
+ * @pdev: PCI device to be initialized
+ *
+ * Returns: 0 on success, negative error code on failure.
+ *
+ * Managed pci_enable_device(). Device will automatically be disabled on
+ * driver detach.
+ */
+int pcim_enable_device(struct pci_dev *pdev)
+{
+ int ret;
+
+ ret = devm_add_action(&pdev->dev, pcim_disable_device, pdev);
+ if (ret != 0)
+ return ret;
+
+ /*
+ * We prefer removing the action in case of an error over
+ * devm_add_action_or_reset() because the latter could theoretically be
+ * disturbed by users having pinned the device too soon.
+ */
+ ret = pci_enable_device(pdev);
+ if (ret != 0) {
+ devm_remove_action(&pdev->dev, pcim_disable_device, pdev);
+ return ret;
+ }
+
+ pdev->is_managed = true;
+
+ return ret;
+}
+EXPORT_SYMBOL(pcim_enable_device);
+
+/**
+ * pcim_pin_device - Pin managed PCI device
+ * @pdev: PCI device to pin
+ *
+ * Pin managed PCI device @pdev. Pinned device won't be disabled on driver
+ * detach. @pdev must have been enabled with pcim_enable_device().
+ */
+void pcim_pin_device(struct pci_dev *pdev)
+{
+ pdev->pinned = true;
+}
+EXPORT_SYMBOL(pcim_pin_device);
+
+static void pcim_iomap_release(struct device *gendev, void *res)
+{
+ /*
+ * Do nothing. This is legacy code.
+ *
+ * Cleanup of the mappings is now done directly through the callbacks
+ * registered when creating them.
+ */
+}
+
+/**
+ * pcim_iomap_table - access iomap allocation table (DEPRECATED)
+ * @pdev: PCI device to access iomap table for
+ *
+ * Returns:
+ * Const pointer to array of __iomem pointers on success, NULL on failure.
+ *
+ * Access iomap allocation table for @dev. If iomap table doesn't
+ * exist and @pdev is managed, it will be allocated. All iomaps
+ * recorded in the iomap table are automatically unmapped on driver
+ * detach.
+ *
+ * This function might sleep when the table is first allocated but can
+ * be safely called without context and guaranteed to succeed once
+ * allocated.
+ *
+ * This function is DEPRECATED. Do not use it in new code. Instead, obtain a
+ * mapping's address directly from one of the pcim_* mapping functions. For
+ * example:
+ * void __iomem \*mappy = pcim_iomap(pdev, bar, length);
+ */
+void __iomem * const *pcim_iomap_table(struct pci_dev *pdev)
+{
+ struct pcim_iomap_devres *dr, *new_dr;
+
+ dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL);
+ if (dr)
+ return dr->table;
+
+ new_dr = devres_alloc_node(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL,
+ dev_to_node(&pdev->dev));
+ if (!new_dr)
+ return NULL;
+ dr = devres_get(&pdev->dev, new_dr, NULL, NULL);
+ return dr->table;
+}
+EXPORT_SYMBOL(pcim_iomap_table);
+
+/*
+ * Fill the legacy mapping-table, so that drivers using the old API can
+ * still get a BAR's mapping address through pcim_iomap_table().
+ */
+static int pcim_add_mapping_to_legacy_table(struct pci_dev *pdev,
+ void __iomem *mapping, int bar)
+{
+ void __iomem **legacy_iomap_table;
+
+ if (!pci_bar_index_is_valid(bar))
+ return -EINVAL;
+
+ legacy_iomap_table = (void __iomem **)pcim_iomap_table(pdev);
+ if (!legacy_iomap_table)
+ return -ENOMEM;
+
+ /* The legacy mechanism doesn't allow for duplicate mappings. */
+ WARN_ON(legacy_iomap_table[bar]);
+
+ legacy_iomap_table[bar] = mapping;
+
+ return 0;
+}
+
+/*
+ * Remove a mapping. The table only contains whole-BAR mappings, so this will
+ * never interfere with ranged mappings.
+ */
+static void pcim_remove_mapping_from_legacy_table(struct pci_dev *pdev,
+ void __iomem *addr)
+{
+ int bar;
+ void __iomem **legacy_iomap_table;
+
+ legacy_iomap_table = (void __iomem **)pcim_iomap_table(pdev);
+ if (!legacy_iomap_table)
+ return;
+
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
+ if (legacy_iomap_table[bar] == addr) {
+ legacy_iomap_table[bar] = NULL;
+ return;
+ }
+ }
+}
+
+/*
+ * The same as pcim_remove_mapping_from_legacy_table(), but identifies the
+ * mapping by its BAR index.
+ */
+static void pcim_remove_bar_from_legacy_table(struct pci_dev *pdev, int bar)
+{
+ void __iomem **legacy_iomap_table;
+
+ if (!pci_bar_index_is_valid(bar))
+ return;
+
+ legacy_iomap_table = (void __iomem **)pcim_iomap_table(pdev);
+ if (!legacy_iomap_table)
+ return;
+
+ legacy_iomap_table[bar] = NULL;
+}
+
+/**
+ * pcim_iomap - Managed pcim_iomap()
+ * @pdev: PCI device to iomap for
+ * @bar: BAR to iomap
+ * @maxlen: Maximum length of iomap
+ *
+ * Returns: __iomem pointer on success, NULL on failure.
+ *
+ * Managed pci_iomap(). Map is automatically unmapped on driver detach. If
+ * desired, unmap manually only with pcim_iounmap().
+ *
+ * This SHOULD only be used once per BAR.
+ *
+ * NOTE:
+ * Contrary to the other pcim_* functions, this function does not return an
+ * IOMEM_ERR_PTR() on failure, but a simple NULL. This is done for backwards
+ * compatibility.
+ */
+void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
+{
+ void __iomem *mapping;
+ struct pcim_addr_devres *res;
+
+ if (!pci_bar_index_is_valid(bar))
+ return NULL;
+
+ res = pcim_addr_devres_alloc(pdev);
+ if (!res)
+ return NULL;
+ res->type = PCIM_ADDR_DEVRES_TYPE_MAPPING;
+
+ mapping = pci_iomap(pdev, bar, maxlen);
+ if (!mapping)
+ goto err_iomap;
+ res->baseaddr = mapping;
+
+ if (pcim_add_mapping_to_legacy_table(pdev, mapping, bar) != 0)
+ goto err_table;
+
+ devres_add(&pdev->dev, res);
+ return mapping;
+
+err_table:
+ pci_iounmap(pdev, mapping);
+err_iomap:
+ pcim_addr_devres_free(res);
+ return NULL;
+}
+EXPORT_SYMBOL(pcim_iomap);
+
+/**
+ * pcim_iounmap - Managed pci_iounmap()
+ * @pdev: PCI device to iounmap for
+ * @addr: Address to unmap
+ *
+ * Managed pci_iounmap(). @addr must have been mapped using a pcim_* mapping
+ * function.
+ */
+void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr)
+{
+ struct pcim_addr_devres res_searched;
+
+ pcim_addr_devres_clear(&res_searched);
+ res_searched.type = PCIM_ADDR_DEVRES_TYPE_MAPPING;
+ res_searched.baseaddr = addr;
+
+ if (devres_release(&pdev->dev, pcim_addr_resource_release,
+ pcim_addr_resources_match, &res_searched) != 0) {
+ /* Doesn't exist. User passed nonsense. */
+ return;
+ }
+
+ pcim_remove_mapping_from_legacy_table(pdev, addr);
+}
+EXPORT_SYMBOL(pcim_iounmap);
+
+/**
+ * pcim_iomap_region - Request and iomap a PCI BAR
+ * @pdev: PCI device to map IO resources for
+ * @bar: Index of a BAR to map
+ * @name: Name of the driver requesting the resource
+ *
+ * Returns: __iomem pointer on success, an IOMEM_ERR_PTR on failure.
+ *
+ * Mapping and region will get automatically released on driver detach. If
+ * desired, release manually only with pcim_iounmap_region().
+ */
+void __iomem *pcim_iomap_region(struct pci_dev *pdev, int bar,
+ const char *name)
+{
+ int ret;
+ struct pcim_addr_devres *res;
+
+ if (!pci_bar_index_is_valid(bar))
+ return IOMEM_ERR_PTR(-EINVAL);
+
+ res = pcim_addr_devres_alloc(pdev);
+ if (!res)
+ return IOMEM_ERR_PTR(-ENOMEM);
+
+ res->type = PCIM_ADDR_DEVRES_TYPE_REGION_MAPPING;
+ res->bar = bar;
+
+ ret = pci_request_region(pdev, bar, name);
+ if (ret != 0)
+ goto err_region;
+
+ res->baseaddr = pci_iomap(pdev, bar, 0);
+ if (!res->baseaddr) {
+ ret = -EINVAL;
+ goto err_iomap;
+ }
+
+ devres_add(&pdev->dev, res);
+ return res->baseaddr;
+
+err_iomap:
+ pci_release_region(pdev, bar);
+err_region:
+ pcim_addr_devres_free(res);
+
+ return IOMEM_ERR_PTR(ret);
+}
+EXPORT_SYMBOL(pcim_iomap_region);
+
+/**
+ * pcim_iounmap_region - Unmap and release a PCI BAR
+ * @pdev: PCI device to operate on
+ * @bar: Index of BAR to unmap and release
+ *
+ * Unmap a BAR and release its region manually. Only pass BARs that were
+ * previously mapped by pcim_iomap_region().
+ */
+void pcim_iounmap_region(struct pci_dev *pdev, int bar)
+{
+ struct pcim_addr_devres res_searched;
+
+ pcim_addr_devres_clear(&res_searched);
+ res_searched.type = PCIM_ADDR_DEVRES_TYPE_REGION_MAPPING;
+ res_searched.bar = bar;
+
+ devres_release(&pdev->dev, pcim_addr_resource_release,
+ pcim_addr_resources_match, &res_searched);
+}
+EXPORT_SYMBOL(pcim_iounmap_region);
+
+/**
+ * pcim_iomap_regions - Request and iomap PCI BARs (DEPRECATED)
+ * @pdev: PCI device to map IO resources for
+ * @mask: Mask of BARs to request and iomap
+ * @name: Name of the driver requesting the resources
+ *
+ * Returns: 0 on success, negative error code on failure.
+ *
+ * Request and iomap regions specified by @mask.
+ *
+ * This function is DEPRECATED. Do not use it in new code.
+ * Use pcim_iomap_region() instead.
+ */
+int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name)
+{
+ int ret;
+ int bar;
+ void __iomem *mapping;
+
+ for (bar = 0; bar < DEVICE_COUNT_RESOURCE; bar++) {
+ if (!mask_contains_bar(mask, bar))
+ continue;
+
+ mapping = pcim_iomap_region(pdev, bar, name);
+ if (IS_ERR(mapping)) {
+ ret = PTR_ERR(mapping);
+ goto err;
+ }
+ ret = pcim_add_mapping_to_legacy_table(pdev, mapping, bar);
+ if (ret != 0)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ while (--bar >= 0) {
+ pcim_iounmap_region(pdev, bar);
+ pcim_remove_bar_from_legacy_table(pdev, bar);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(pcim_iomap_regions);
+
+/**
+ * pcim_request_region - Request a PCI BAR
+ * @pdev: PCI device to request region for
+ * @bar: Index of BAR to request
+ * @name: Name of the driver requesting the resource
+ *
+ * Returns: 0 on success, a negative error code on failure.
+ *
+ * Request region specified by @bar.
+ *
+ * The region will automatically be released on driver detach. If desired,
+ * release manually only with pcim_release_region().
+ */
+int pcim_request_region(struct pci_dev *pdev, int bar, const char *name)
+{
+ int ret;
+ struct pcim_addr_devres *res;
+
+ if (!pci_bar_index_is_valid(bar))
+ return -EINVAL;
+
+ res = pcim_addr_devres_alloc(pdev);
+ if (!res)
+ return -ENOMEM;
+ res->type = PCIM_ADDR_DEVRES_TYPE_REGION;
+ res->bar = bar;
+
+ ret = pci_request_region(pdev, bar, name);
+ if (ret != 0) {
+ pcim_addr_devres_free(res);
+ return ret;
+ }
+
+ devres_add(&pdev->dev, res);
+ return 0;
+}
+EXPORT_SYMBOL(pcim_request_region);
+
+/**
+ * pcim_release_region - Release a PCI BAR
+ * @pdev: PCI device to operate on
+ * @bar: Index of BAR to release
+ *
+ * Release a region manually that was previously requested by
+ * pcim_request_region().
+ */
+static void pcim_release_region(struct pci_dev *pdev, int bar)
+{
+ struct pcim_addr_devres res_searched;
+
+ pcim_addr_devres_clear(&res_searched);
+ res_searched.type = PCIM_ADDR_DEVRES_TYPE_REGION;
+ res_searched.bar = bar;
+
+ devres_release(&pdev->dev, pcim_addr_resource_release,
+ pcim_addr_resources_match, &res_searched);
+}
+
+
+/**
+ * pcim_release_all_regions - Release all regions of a PCI-device
+ * @pdev: the PCI device
+ *
+ * Release all regions previously requested through pcim_request_region()
+ * or pcim_request_all_regions().
+ *
+ * Can be called from any context, i.e., not necessarily as a counterpart to
+ * pcim_request_all_regions().
+ */
+static void pcim_release_all_regions(struct pci_dev *pdev)
+{
+ int bar;
+
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
+ pcim_release_region(pdev, bar);
+}
+
+/**
+ * pcim_request_all_regions - Request all regions
+ * @pdev: PCI device to map IO resources for
+ * @name: name of the driver requesting the resources
+ *
+ * Returns: 0 on success, negative error code on failure.
+ *
+ * Requested regions will automatically be released at driver detach. If
+ * desired, release individual regions with pcim_release_region() or all of
+ * them at once with pcim_release_all_regions().
+ */
+int pcim_request_all_regions(struct pci_dev *pdev, const char *name)
+{
+ int ret;
+ int bar;
+
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
+ ret = pcim_request_region(pdev, bar, name);
+ if (ret != 0)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ pcim_release_all_regions(pdev);
+
+ return ret;
+}
+EXPORT_SYMBOL(pcim_request_all_regions);
+
+/**
+ * pcim_iomap_range - Create a ranged __iomap mapping within a PCI BAR
+ * @pdev: PCI device to map IO resources for
+ * @bar: Index of the BAR
+ * @offset: Offset from the begin of the BAR
+ * @len: Length in bytes for the mapping
+ *
+ * Returns: __iomem pointer on success, an IOMEM_ERR_PTR on failure.
+ *
+ * Creates a new IO-Mapping within the specified @bar, ranging from @offset to
+ * @offset + @len.
+ *
+ * The mapping will automatically get unmapped on driver detach. If desired,
+ * release manually only with pcim_iounmap().
+ */
+void __iomem *pcim_iomap_range(struct pci_dev *pdev, int bar,
+ unsigned long offset, unsigned long len)
+{
+ void __iomem *mapping;
+ struct pcim_addr_devres *res;
+
+ if (!pci_bar_index_is_valid(bar))
+ return IOMEM_ERR_PTR(-EINVAL);
+
+ res = pcim_addr_devres_alloc(pdev);
+ if (!res)
+ return IOMEM_ERR_PTR(-ENOMEM);
+
+ mapping = pci_iomap_range(pdev, bar, offset, len);
+ if (!mapping) {
+ pcim_addr_devres_free(res);
+ return IOMEM_ERR_PTR(-EINVAL);
+ }
+
+ res->type = PCIM_ADDR_DEVRES_TYPE_MAPPING;
+ res->baseaddr = mapping;
+
+ /*
+ * Ranged mappings don't get added to the legacy-table, since the table
+ * only ever keeps track of whole BARs.
+ */
+
+ devres_add(&pdev->dev, res);
+ return mapping;
+}
+EXPORT_SYMBOL(pcim_iomap_range);
diff --git a/drivers/pci/doe.c b/drivers/pci/doe.c
index 1b97a5ab71a9..62be9c8dbc52 100644
--- a/drivers/pci/doe.c
+++ b/drivers/pci/doe.c
@@ -14,16 +14,16 @@
#include <linux/bitfield.h>
#include <linux/delay.h>
+#include <linux/device.h>
#include <linux/jiffies.h>
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/pci-doe.h>
+#include <linux/sysfs.h>
#include <linux/workqueue.h>
#include "pci.h"
-#define PCI_DOE_PROTOCOL_DISCOVERY 0
-
/* Timeout of 1 second from 6.30.2 Operation, PCI Spec r6.0 */
#define PCI_DOE_TIMEOUT HZ
#define PCI_DOE_POLL_INTERVAL (PCI_DOE_TIMEOUT / 128)
@@ -43,22 +43,27 @@
*
* @pdev: PCI device this mailbox belongs to
* @cap_offset: Capability offset
- * @prots: Array of protocols supported (encoded as long values)
+ * @feats: Array of features supported (encoded as long values)
* @wq: Wait queue for work item
* @work_queue: Queue of pci_doe_work items
* @flags: Bit array of PCI_DOE_FLAG_* flags
+ * @sysfs_attrs: Array of sysfs device attributes
*/
struct pci_doe_mb {
struct pci_dev *pdev;
u16 cap_offset;
- struct xarray prots;
+ struct xarray feats;
wait_queue_head_t wq;
struct workqueue_struct *work_queue;
unsigned long flags;
+
+#ifdef CONFIG_SYSFS
+ struct device_attribute *sysfs_attrs;
+#endif
};
-struct pci_doe_protocol {
+struct pci_doe_feature {
u16 vid;
u8 type;
};
@@ -66,7 +71,7 @@ struct pci_doe_protocol {
/**
* struct pci_doe_task - represents a single query/response
*
- * @prot: DOE Protocol
+ * @feat: DOE Feature
* @request_pl: The request payload
* @request_pl_sz: Size of the request payload (bytes)
* @response_pl: The response payload
@@ -78,7 +83,7 @@ struct pci_doe_protocol {
* @doe_mb: Used internally by the mailbox
*/
struct pci_doe_task {
- struct pci_doe_protocol prot;
+ struct pci_doe_feature feat;
const __le32 *request_pl;
size_t request_pl_sz;
__le32 *response_pl;
@@ -92,6 +97,152 @@ struct pci_doe_task {
struct pci_doe_mb *doe_mb;
};
+#ifdef CONFIG_SYSFS
+static ssize_t doe_discovery_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "0001:00\n");
+}
+static DEVICE_ATTR_RO(doe_discovery);
+
+static struct attribute *pci_doe_sysfs_feature_attrs[] = {
+ &dev_attr_doe_discovery.attr,
+ NULL
+};
+
+static bool pci_doe_features_sysfs_group_visible(struct kobject *kobj)
+{
+ struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
+
+ return !xa_empty(&pdev->doe_mbs);
+}
+DEFINE_SIMPLE_SYSFS_GROUP_VISIBLE(pci_doe_features_sysfs)
+
+const struct attribute_group pci_doe_sysfs_group = {
+ .name = "doe_features",
+ .attrs = pci_doe_sysfs_feature_attrs,
+ .is_visible = SYSFS_GROUP_VISIBLE(pci_doe_features_sysfs),
+};
+
+static ssize_t pci_doe_sysfs_feature_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%s\n", attr->attr.name);
+}
+
+static void pci_doe_sysfs_feature_remove(struct pci_dev *pdev,
+ struct pci_doe_mb *doe_mb)
+{
+ struct device_attribute *attrs = doe_mb->sysfs_attrs;
+ struct device *dev = &pdev->dev;
+ unsigned long i;
+ void *entry;
+
+ if (!attrs)
+ return;
+
+ doe_mb->sysfs_attrs = NULL;
+ xa_for_each(&doe_mb->feats, i, entry) {
+ if (attrs[i].show)
+ sysfs_remove_file_from_group(&dev->kobj, &attrs[i].attr,
+ pci_doe_sysfs_group.name);
+ kfree(attrs[i].attr.name);
+ }
+ kfree(attrs);
+}
+
+static int pci_doe_sysfs_feature_populate(struct pci_dev *pdev,
+ struct pci_doe_mb *doe_mb)
+{
+ struct device *dev = &pdev->dev;
+ struct device_attribute *attrs;
+ unsigned long num_features = 0;
+ unsigned long vid, type;
+ unsigned long i;
+ void *entry;
+ int ret;
+
+ xa_for_each(&doe_mb->feats, i, entry)
+ num_features++;
+
+ attrs = kcalloc(num_features, sizeof(*attrs), GFP_KERNEL);
+ if (!attrs) {
+ pci_warn(pdev, "Failed allocating the device_attribute array\n");
+ return -ENOMEM;
+ }
+
+ doe_mb->sysfs_attrs = attrs;
+ xa_for_each(&doe_mb->feats, i, entry) {
+ sysfs_attr_init(&attrs[i].attr);
+ vid = xa_to_value(entry) >> 8;
+ type = xa_to_value(entry) & 0xFF;
+
+ if (vid == PCI_VENDOR_ID_PCI_SIG &&
+ type == PCI_DOE_FEATURE_DISCOVERY) {
+
+ /*
+ * DOE Discovery, manually displayed by
+ * `dev_attr_doe_discovery`
+ */
+ continue;
+ }
+
+ attrs[i].attr.name = kasprintf(GFP_KERNEL,
+ "%04lx:%02lx", vid, type);
+ if (!attrs[i].attr.name) {
+ ret = -ENOMEM;
+ pci_warn(pdev, "Failed allocating the attribute name\n");
+ goto fail;
+ }
+
+ attrs[i].attr.mode = 0444;
+ attrs[i].show = pci_doe_sysfs_feature_show;
+
+ ret = sysfs_add_file_to_group(&dev->kobj, &attrs[i].attr,
+ pci_doe_sysfs_group.name);
+ if (ret) {
+ attrs[i].show = NULL;
+ if (ret != -EEXIST) {
+ pci_warn(pdev, "Failed adding %s to sysfs group\n",
+ attrs[i].attr.name);
+ goto fail;
+ } else
+ kfree(attrs[i].attr.name);
+ }
+ }
+
+ return 0;
+
+fail:
+ pci_doe_sysfs_feature_remove(pdev, doe_mb);
+ return ret;
+}
+
+void pci_doe_sysfs_teardown(struct pci_dev *pdev)
+{
+ struct pci_doe_mb *doe_mb;
+ unsigned long index;
+
+ xa_for_each(&pdev->doe_mbs, index, doe_mb)
+ pci_doe_sysfs_feature_remove(pdev, doe_mb);
+}
+
+void pci_doe_sysfs_init(struct pci_dev *pdev)
+{
+ struct pci_doe_mb *doe_mb;
+ unsigned long index;
+ int ret;
+
+ xa_for_each(&pdev->doe_mbs, index, doe_mb) {
+ ret = pci_doe_sysfs_feature_populate(pdev, doe_mb);
+ if (ret)
+ return;
+ }
+}
+#endif
+
static int pci_doe_wait(struct pci_doe_mb *doe_mb, unsigned long timeout)
{
if (wait_event_timeout(doe_mb->wq,
@@ -146,6 +297,7 @@ static int pci_doe_send_req(struct pci_doe_mb *doe_mb,
{
struct pci_dev *pdev = doe_mb->pdev;
int offset = doe_mb->cap_offset;
+ unsigned long timeout_jiffies;
size_t length, remainder;
u32 val;
int i;
@@ -155,8 +307,19 @@ static int pci_doe_send_req(struct pci_doe_mb *doe_mb,
* someone other than Linux (e.g. firmware) is using the mailbox. Note
* it is expected that firmware and OS will negotiate access rights via
* an, as yet to be defined, method.
+ *
+ * Wait up to one PCI_DOE_TIMEOUT period to allow the prior command to
+ * finish. Otherwise, simply error out as unable to field the request.
+ *
+ * PCIe r6.2 sec 6.30.3 states no interrupt is raised when the DOE Busy
+ * bit is cleared, so polling here is our best option for the moment.
*/
- pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val);
+ timeout_jiffies = jiffies + PCI_DOE_TIMEOUT;
+ do {
+ pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val);
+ } while (FIELD_GET(PCI_DOE_STATUS_BUSY, val) &&
+ !time_after(jiffies, timeout_jiffies));
+
if (FIELD_GET(PCI_DOE_STATUS_BUSY, val))
return -EBUSY;
@@ -171,8 +334,8 @@ static int pci_doe_send_req(struct pci_doe_mb *doe_mb,
length = 0;
/* Write DOE Header */
- val = FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_1_VID, task->prot.vid) |
- FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, task->prot.type);
+ val = FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_1_VID, task->feat.vid) |
+ FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, task->feat.type);
pci_write_config_dword(pdev, offset + PCI_DOE_WRITE, val);
pci_write_config_dword(pdev, offset + PCI_DOE_WRITE,
FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_2_LENGTH,
@@ -217,12 +380,12 @@ static int pci_doe_recv_resp(struct pci_doe_mb *doe_mb, struct pci_doe_task *tas
int i = 0;
u32 val;
- /* Read the first dword to get the protocol */
+ /* Read the first dword to get the feature */
pci_read_config_dword(pdev, offset + PCI_DOE_READ, &val);
- if ((FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_VID, val) != task->prot.vid) ||
- (FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, val) != task->prot.type)) {
- dev_err_ratelimited(&pdev->dev, "[%x] expected [VID, Protocol] = [%04x, %02x], got [%04x, %02x]\n",
- doe_mb->cap_offset, task->prot.vid, task->prot.type,
+ if ((FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_VID, val) != task->feat.vid) ||
+ (FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, val) != task->feat.type)) {
+ dev_err_ratelimited(&pdev->dev, "[%x] expected [VID, Feature] = [%04x, %02x], got [%04x, %02x]\n",
+ doe_mb->cap_offset, task->feat.vid, task->feat.type,
FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_VID, val),
FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, val));
return -EIO;
@@ -293,8 +456,8 @@ static int pci_doe_recv_resp(struct pci_doe_mb *doe_mb, struct pci_doe_task *tas
static void signal_task_complete(struct pci_doe_task *task, int rv)
{
task->rv = rv;
- task->complete(task);
destroy_work_on_stack(&task->work);
+ task->complete(task);
}
static void signal_task_abort(struct pci_doe_task *task, int rv)
@@ -383,17 +546,19 @@ static void pci_doe_task_complete(struct pci_doe_task *task)
complete(task->private);
}
-static int pci_doe_discovery(struct pci_doe_mb *doe_mb, u8 *index, u16 *vid,
- u8 *protocol)
+static int pci_doe_discovery(struct pci_doe_mb *doe_mb, u8 capver, u8 *index, u16 *vid,
+ u8 *feature)
{
u32 request_pl = FIELD_PREP(PCI_DOE_DATA_OBJECT_DISC_REQ_3_INDEX,
- *index);
+ *index) |
+ FIELD_PREP(PCI_DOE_DATA_OBJECT_DISC_REQ_3_VER,
+ (capver >= 2) ? 2 : 0);
__le32 request_pl_le = cpu_to_le32(request_pl);
__le32 response_pl_le;
u32 response_pl;
int rc;
- rc = pci_doe(doe_mb, PCI_VENDOR_ID_PCI_SIG, PCI_DOE_PROTOCOL_DISCOVERY,
+ rc = pci_doe(doe_mb, PCI_VENDOR_ID_PCI_SIG, PCI_DOE_FEATURE_DISCOVERY,
&request_pl_le, sizeof(request_pl_le),
&response_pl_le, sizeof(response_pl_le));
if (rc < 0)
@@ -404,7 +569,7 @@ static int pci_doe_discovery(struct pci_doe_mb *doe_mb, u8 *index, u16 *vid,
response_pl = le32_to_cpu(response_pl_le);
*vid = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_VID, response_pl);
- *protocol = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_PROTOCOL,
+ *feature = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_TYPE,
response_pl);
*index = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_NEXT_INDEX,
response_pl);
@@ -412,31 +577,35 @@ static int pci_doe_discovery(struct pci_doe_mb *doe_mb, u8 *index, u16 *vid,
return 0;
}
-static void *pci_doe_xa_prot_entry(u16 vid, u8 prot)
+static void *pci_doe_xa_feat_entry(u16 vid, u8 type)
{
- return xa_mk_value((vid << 8) | prot);
+ return xa_mk_value((vid << 8) | type);
}
-static int pci_doe_cache_protocols(struct pci_doe_mb *doe_mb)
+static int pci_doe_cache_features(struct pci_doe_mb *doe_mb)
{
u8 index = 0;
u8 xa_idx = 0;
+ u32 hdr = 0;
+
+ pci_read_config_dword(doe_mb->pdev, doe_mb->cap_offset, &hdr);
do {
int rc;
u16 vid;
- u8 prot;
+ u8 type;
- rc = pci_doe_discovery(doe_mb, &index, &vid, &prot);
+ rc = pci_doe_discovery(doe_mb, PCI_EXT_CAP_VER(hdr), &index,
+ &vid, &type);
if (rc)
return rc;
pci_dbg(doe_mb->pdev,
- "[%x] Found protocol %d vid: %x prot: %x\n",
- doe_mb->cap_offset, xa_idx, vid, prot);
+ "[%x] Found feature %d vid: %x type: %x\n",
+ doe_mb->cap_offset, xa_idx, vid, type);
- rc = xa_insert(&doe_mb->prots, xa_idx++,
- pci_doe_xa_prot_entry(vid, prot), GFP_KERNEL);
+ rc = xa_insert(&doe_mb->feats, xa_idx++,
+ pci_doe_xa_feat_entry(vid, type), GFP_KERNEL);
if (rc)
return rc;
} while (index);
@@ -460,7 +629,7 @@ static void pci_doe_cancel_tasks(struct pci_doe_mb *doe_mb)
* @pdev: PCI device to create the DOE mailbox for
* @cap_offset: Offset of the DOE mailbox
*
- * Create a single mailbox object to manage the mailbox protocol at the
+ * Create a single mailbox object to manage the mailbox feature at the
* cap_offset specified.
*
* RETURNS: created mailbox object on success
@@ -479,7 +648,7 @@ static struct pci_doe_mb *pci_doe_create_mb(struct pci_dev *pdev,
doe_mb->pdev = pdev;
doe_mb->cap_offset = cap_offset;
init_waitqueue_head(&doe_mb->wq);
- xa_init(&doe_mb->prots);
+ xa_init(&doe_mb->feats);
doe_mb->work_queue = alloc_ordered_workqueue("%s %s DOE [%x]", 0,
dev_bus_name(&pdev->dev),
@@ -502,11 +671,11 @@ static struct pci_doe_mb *pci_doe_create_mb(struct pci_dev *pdev,
/*
* The state machine and the mailbox should be in sync now;
- * Use the mailbox to query protocols.
+ * Use the mailbox to query features.
*/
- rc = pci_doe_cache_protocols(doe_mb);
+ rc = pci_doe_cache_features(doe_mb);
if (rc) {
- pci_err(pdev, "[%x] failed to cache protocols : %d\n",
+ pci_err(pdev, "[%x] failed to cache features : %d\n",
doe_mb->cap_offset, rc);
goto err_cancel;
}
@@ -515,7 +684,7 @@ static struct pci_doe_mb *pci_doe_create_mb(struct pci_dev *pdev,
err_cancel:
pci_doe_cancel_tasks(doe_mb);
- xa_destroy(&doe_mb->prots);
+ xa_destroy(&doe_mb->feats);
err_destroy_wq:
destroy_workqueue(doe_mb->work_queue);
err_free:
@@ -533,31 +702,31 @@ err_free:
static void pci_doe_destroy_mb(struct pci_doe_mb *doe_mb)
{
pci_doe_cancel_tasks(doe_mb);
- xa_destroy(&doe_mb->prots);
+ xa_destroy(&doe_mb->feats);
destroy_workqueue(doe_mb->work_queue);
kfree(doe_mb);
}
/**
- * pci_doe_supports_prot() - Return if the DOE instance supports the given
- * protocol
+ * pci_doe_supports_feat() - Return if the DOE instance supports the given
+ * feature
* @doe_mb: DOE mailbox capability to query
- * @vid: Protocol Vendor ID
- * @type: Protocol type
+ * @vid: Feature Vendor ID
+ * @type: Feature type
*
- * RETURNS: True if the DOE mailbox supports the protocol specified
+ * RETURNS: True if the DOE mailbox supports the feature specified
*/
-static bool pci_doe_supports_prot(struct pci_doe_mb *doe_mb, u16 vid, u8 type)
+static bool pci_doe_supports_feat(struct pci_doe_mb *doe_mb, u16 vid, u8 type)
{
unsigned long index;
void *entry;
- /* The discovery protocol must always be supported */
- if (vid == PCI_VENDOR_ID_PCI_SIG && type == PCI_DOE_PROTOCOL_DISCOVERY)
+ /* The discovery feature must always be supported */
+ if (vid == PCI_VENDOR_ID_PCI_SIG && type == PCI_DOE_FEATURE_DISCOVERY)
return true;
- xa_for_each(&doe_mb->prots, index, entry)
- if (entry == pci_doe_xa_prot_entry(vid, type))
+ xa_for_each(&doe_mb->feats, index, entry)
+ if (entry == pci_doe_xa_feat_entry(vid, type))
return true;
return false;
@@ -585,7 +754,7 @@ static bool pci_doe_supports_prot(struct pci_doe_mb *doe_mb, u16 vid, u8 type)
static int pci_doe_submit_task(struct pci_doe_mb *doe_mb,
struct pci_doe_task *task)
{
- if (!pci_doe_supports_prot(doe_mb, task->prot.vid, task->prot.type))
+ if (!pci_doe_supports_feat(doe_mb, task->feat.vid, task->feat.type))
return -EINVAL;
if (test_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags))
@@ -631,8 +800,8 @@ int pci_doe(struct pci_doe_mb *doe_mb, u16 vendor, u8 type,
{
DECLARE_COMPLETION_ONSTACK(c);
struct pci_doe_task task = {
- .prot.vid = vendor,
- .prot.type = type,
+ .feat.vid = vendor,
+ .feat.type = type,
.request_pl = request,
.request_pl_sz = request_sz,
.response_pl = response,
@@ -659,7 +828,7 @@ EXPORT_SYMBOL_GPL(pci_doe);
* @vendor: Vendor ID
* @type: Data Object Type
*
- * Find first DOE mailbox of a PCI device which supports the given protocol.
+ * Find first DOE mailbox of a PCI device which supports the given feature.
*
* RETURNS: Pointer to the DOE mailbox or NULL if none was found.
*/
@@ -670,7 +839,7 @@ struct pci_doe_mb *pci_find_doe_mailbox(struct pci_dev *pdev, u16 vendor,
unsigned long index;
xa_for_each(&pdev->doe_mbs, index, doe_mb)
- if (pci_doe_supports_prot(doe_mb, vendor, type))
+ if (pci_doe_supports_feat(doe_mb, vendor, type))
return doe_mb;
return NULL;
diff --git a/drivers/pci/ecam.c b/drivers/pci/ecam.c
index 1c40d2506aef..260b7de2dbd5 100644
--- a/drivers/pci/ecam.c
+++ b/drivers/pci/ecam.c
@@ -55,7 +55,7 @@ struct pci_config_window *pci_ecam_create(struct device *dev,
bus_range_max = resource_size(cfgres) >> bus_shift;
if (bus_range > bus_range_max) {
bus_range = bus_range_max;
- cfg->busr.end = busr->start + bus_range - 1;
+ resource_set_size(&cfg->busr, bus_range);
dev_warn(dev, "ECAM area %pR can only accommodate %pR (reduced from %pR desired)\n",
cfgres, &cfg->busr, busr);
}
diff --git a/drivers/pci/endpoint/Kconfig b/drivers/pci/endpoint/Kconfig
index 17bbdc9bbde0..8dad291be8b8 100644
--- a/drivers/pci/endpoint/Kconfig
+++ b/drivers/pci/endpoint/Kconfig
@@ -26,7 +26,15 @@ config PCI_ENDPOINT_CONFIGFS
help
This will enable the configfs entry that can be used to
configure the endpoint function and used to bind the
- function with a endpoint controller.
+ function with an endpoint controller.
+
+config PCI_ENDPOINT_MSI_DOORBELL
+ bool "PCI Endpoint MSI Doorbell Support"
+ depends on PCI_ENDPOINT && GENERIC_MSI_IRQ
+ help
+ This enables the EP's MSI interrupt controller to function as a
+ doorbell. The RC can trigger doorbell in EP by writing data to a
+ dedicated BAR, which the EP maps to the controller's message address.
source "drivers/pci/endpoint/functions/Kconfig"
diff --git a/drivers/pci/endpoint/Makefile b/drivers/pci/endpoint/Makefile
index 95b2fe47e3b0..b4869d52053a 100644
--- a/drivers/pci/endpoint/Makefile
+++ b/drivers/pci/endpoint/Makefile
@@ -6,3 +6,4 @@
obj-$(CONFIG_PCI_ENDPOINT_CONFIGFS) += pci-ep-cfs.o
obj-$(CONFIG_PCI_ENDPOINT) += pci-epc-core.o pci-epf-core.o\
pci-epc-mem.o functions/
+obj-$(CONFIG_PCI_ENDPOINT_MSI_DOORBELL) += pci-ep-msi.o
diff --git a/drivers/pci/endpoint/functions/pci-epf-mhi.c b/drivers/pci/endpoint/functions/pci-epf-mhi.c
index 9c1f5a154fbd..6643a88c7a0c 100644
--- a/drivers/pci/endpoint/functions/pci-epf-mhi.c
+++ b/drivers/pci/endpoint/functions/pci-epf-mhi.c
@@ -6,8 +6,10 @@
* Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
*/
+#include <linux/dmaengine.h>
#include <linux/mhi_ep.h>
#include <linux/module.h>
+#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/pci-epc.h>
#include <linux/pci-epf.h>
@@ -16,6 +18,18 @@
#define to_epf_mhi(cntrl) container_of(cntrl, struct pci_epf_mhi, cntrl)
+/* Platform specific flags */
+#define MHI_EPF_USE_DMA BIT(0)
+
+struct pci_epf_mhi_dma_transfer {
+ struct pci_epf_mhi *epf_mhi;
+ struct mhi_ep_buf_info buf_info;
+ struct list_head node;
+ dma_addr_t paddr;
+ enum dma_data_direction dir;
+ size_t size;
+};
+
struct pci_epf_mhi_ep_info {
const struct mhi_ep_cntrl_config *config;
struct pci_epf_header *epf_header;
@@ -23,6 +37,7 @@ struct pci_epf_mhi_ep_info {
u32 epf_flags;
u32 msi_count;
u32 mru;
+ u32 flags;
};
#define MHI_EP_CHANNEL_CONFIG(ch_num, ch_name, direction) \
@@ -91,17 +106,63 @@ static const struct pci_epf_mhi_ep_info sdx55_info = {
.mru = 0x8000,
};
+static struct pci_epf_header sm8450_header = {
+ .vendorid = PCI_VENDOR_ID_QCOM,
+ .deviceid = 0x0306,
+ .baseclass_code = PCI_CLASS_OTHERS,
+ .interrupt_pin = PCI_INTERRUPT_INTA,
+};
+
+static const struct pci_epf_mhi_ep_info sm8450_info = {
+ .config = &mhi_v1_config,
+ .epf_header = &sm8450_header,
+ .bar_num = BAR_0,
+ .epf_flags = PCI_BASE_ADDRESS_MEM_TYPE_32,
+ .msi_count = 32,
+ .mru = 0x8000,
+ .flags = MHI_EPF_USE_DMA,
+};
+
+static struct pci_epf_header sa8775p_header = {
+ .vendorid = PCI_VENDOR_ID_QCOM,
+ .deviceid = 0x0116,
+ .baseclass_code = PCI_CLASS_OTHERS,
+ .interrupt_pin = PCI_INTERRUPT_INTA,
+};
+
+static const struct pci_epf_mhi_ep_info sa8775p_info = {
+ .config = &mhi_v1_config,
+ .epf_header = &sa8775p_header,
+ .bar_num = BAR_0,
+ .epf_flags = PCI_BASE_ADDRESS_MEM_TYPE_32,
+ .msi_count = 32,
+ .mru = 0x8000,
+ .flags = MHI_EPF_USE_DMA,
+};
+
struct pci_epf_mhi {
+ const struct pci_epc_features *epc_features;
const struct pci_epf_mhi_ep_info *info;
struct mhi_ep_cntrl mhi_cntrl;
struct pci_epf *epf;
struct mutex lock;
void __iomem *mmio;
resource_size_t mmio_phys;
+ struct dma_chan *dma_chan_tx;
+ struct dma_chan *dma_chan_rx;
+ struct workqueue_struct *dma_wq;
+ struct work_struct dma_work;
+ struct list_head dma_list;
+ spinlock_t list_lock;
u32 mmio_size;
int irq;
};
+static size_t get_align_offset(struct pci_epf_mhi *epf_mhi, u64 addr)
+{
+ return addr & (epf_mhi->epc_features->align -1);
+}
+
static int __pci_epf_mhi_alloc_map(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr,
phys_addr_t *paddr, void __iomem **vaddr,
size_t offset, size_t size)
@@ -133,8 +194,7 @@ static int pci_epf_mhi_alloc_map(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr,
size_t size)
{
struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
- struct pci_epc *epc = epf_mhi->epf->epc;
- size_t offset = pci_addr & (epc->mem->window.page_size - 1);
+ size_t offset = get_align_offset(epf_mhi, pci_addr);
return __pci_epf_mhi_alloc_map(mhi_cntrl, pci_addr, paddr, vaddr,
offset, size);
@@ -159,9 +219,7 @@ static void pci_epf_mhi_unmap_free(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr,
size_t size)
{
struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
- struct pci_epf *epf = epf_mhi->epf;
- struct pci_epc *epc = epf->epc;
- size_t offset = pci_addr & (epc->mem->window.page_size - 1);
+ size_t offset = get_align_offset(epf_mhi, pci_addr);
__pci_epf_mhi_unmap_free(mhi_cntrl, pci_addr, paddr, vaddr, offset,
size);
@@ -177,67 +235,489 @@ static void pci_epf_mhi_raise_irq(struct mhi_ep_cntrl *mhi_cntrl, u32 vector)
* MHI supplies 0 based MSI vectors but the API expects the vector
* number to start from 1, so we need to increment the vector by 1.
*/
- pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, PCI_EPC_IRQ_MSI,
+ pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, PCI_IRQ_MSI,
vector + 1);
}
-static int pci_epf_mhi_read_from_host(struct mhi_ep_cntrl *mhi_cntrl, u64 from,
- void *to, size_t size)
+static int pci_epf_mhi_iatu_read(struct mhi_ep_cntrl *mhi_cntrl,
+ struct mhi_ep_buf_info *buf_info)
{
struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
- size_t offset = from % SZ_4K;
+ size_t offset = get_align_offset(epf_mhi, buf_info->host_addr);
void __iomem *tre_buf;
phys_addr_t tre_phys;
int ret;
mutex_lock(&epf_mhi->lock);
- ret = __pci_epf_mhi_alloc_map(mhi_cntrl, from, &tre_phys, &tre_buf,
- offset, size);
+ ret = __pci_epf_mhi_alloc_map(mhi_cntrl, buf_info->host_addr, &tre_phys,
+ &tre_buf, offset, buf_info->size);
if (ret) {
mutex_unlock(&epf_mhi->lock);
return ret;
}
- memcpy_fromio(to, tre_buf, size);
+ memcpy_fromio(buf_info->dev_addr, tre_buf, buf_info->size);
- __pci_epf_mhi_unmap_free(mhi_cntrl, from, tre_phys, tre_buf, offset,
- size);
+ __pci_epf_mhi_unmap_free(mhi_cntrl, buf_info->host_addr, tre_phys,
+ tre_buf, offset, buf_info->size);
mutex_unlock(&epf_mhi->lock);
+ if (buf_info->cb)
+ buf_info->cb(buf_info);
+
return 0;
}
-static int pci_epf_mhi_write_to_host(struct mhi_ep_cntrl *mhi_cntrl,
- void *from, u64 to, size_t size)
+static int pci_epf_mhi_iatu_write(struct mhi_ep_cntrl *mhi_cntrl,
+ struct mhi_ep_buf_info *buf_info)
{
struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
- size_t offset = to % SZ_4K;
+ size_t offset = get_align_offset(epf_mhi, buf_info->host_addr);
void __iomem *tre_buf;
phys_addr_t tre_phys;
int ret;
mutex_lock(&epf_mhi->lock);
- ret = __pci_epf_mhi_alloc_map(mhi_cntrl, to, &tre_phys, &tre_buf,
- offset, size);
+ ret = __pci_epf_mhi_alloc_map(mhi_cntrl, buf_info->host_addr, &tre_phys,
+ &tre_buf, offset, buf_info->size);
if (ret) {
mutex_unlock(&epf_mhi->lock);
return ret;
}
- memcpy_toio(tre_buf, from, size);
+ memcpy_toio(tre_buf, buf_info->dev_addr, buf_info->size);
- __pci_epf_mhi_unmap_free(mhi_cntrl, to, tre_phys, tre_buf, offset,
- size);
+ __pci_epf_mhi_unmap_free(mhi_cntrl, buf_info->host_addr, tre_phys,
+ tre_buf, offset, buf_info->size);
+
+ mutex_unlock(&epf_mhi->lock);
+
+ if (buf_info->cb)
+ buf_info->cb(buf_info);
+
+ return 0;
+}
+
+static void pci_epf_mhi_dma_callback(void *param)
+{
+ complete(param);
+}
+
+static int pci_epf_mhi_edma_read(struct mhi_ep_cntrl *mhi_cntrl,
+ struct mhi_ep_buf_info *buf_info)
+{
+ struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
+ struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
+ struct dma_chan *chan = epf_mhi->dma_chan_rx;
+ struct device *dev = &epf_mhi->epf->dev;
+ DECLARE_COMPLETION_ONSTACK(complete);
+ struct dma_async_tx_descriptor *desc;
+ struct dma_slave_config config = {};
+ dma_cookie_t cookie;
+ dma_addr_t dst_addr;
+ int ret;
+
+ if (buf_info->size < SZ_4K)
+ return pci_epf_mhi_iatu_read(mhi_cntrl, buf_info);
+
+ mutex_lock(&epf_mhi->lock);
+
+ config.direction = DMA_DEV_TO_MEM;
+ config.src_addr = buf_info->host_addr;
+
+ ret = dmaengine_slave_config(chan, &config);
+ if (ret) {
+ dev_err(dev, "Failed to configure DMA channel\n");
+ goto err_unlock;
+ }
+
+ dst_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
+ DMA_FROM_DEVICE);
+ ret = dma_mapping_error(dma_dev, dst_addr);
+ if (ret) {
+ dev_err(dev, "Failed to map remote memory\n");
+ goto err_unlock;
+ }
+
+ desc = dmaengine_prep_slave_single(chan, dst_addr, buf_info->size,
+ DMA_DEV_TO_MEM,
+ DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
+ if (!desc) {
+ dev_err(dev, "Failed to prepare DMA\n");
+ ret = -EIO;
+ goto err_unmap;
+ }
+
+ desc->callback = pci_epf_mhi_dma_callback;
+ desc->callback_param = &complete;
+
+ cookie = dmaengine_submit(desc);
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dev_err(dev, "Failed to do DMA submit\n");
+ goto err_unmap;
+ }
+
+ dma_async_issue_pending(chan);
+ ret = wait_for_completion_timeout(&complete, msecs_to_jiffies(1000));
+ if (!ret) {
+ dev_err(dev, "DMA transfer timeout\n");
+ dmaengine_terminate_sync(chan);
+ ret = -ETIMEDOUT;
+ }
+
+err_unmap:
+ dma_unmap_single(dma_dev, dst_addr, buf_info->size, DMA_FROM_DEVICE);
+err_unlock:
+ mutex_unlock(&epf_mhi->lock);
+
+ return ret;
+}
+
+static int pci_epf_mhi_edma_write(struct mhi_ep_cntrl *mhi_cntrl,
+ struct mhi_ep_buf_info *buf_info)
+{
+ struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
+ struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
+ struct dma_chan *chan = epf_mhi->dma_chan_tx;
+ struct device *dev = &epf_mhi->epf->dev;
+ DECLARE_COMPLETION_ONSTACK(complete);
+ struct dma_async_tx_descriptor *desc;
+ struct dma_slave_config config = {};
+ dma_cookie_t cookie;
+ dma_addr_t src_addr;
+ int ret;
+
+ if (buf_info->size < SZ_4K)
+ return pci_epf_mhi_iatu_write(mhi_cntrl, buf_info);
+
+ mutex_lock(&epf_mhi->lock);
+
+ config.direction = DMA_MEM_TO_DEV;
+ config.dst_addr = buf_info->host_addr;
+
+ ret = dmaengine_slave_config(chan, &config);
+ if (ret) {
+ dev_err(dev, "Failed to configure DMA channel\n");
+ goto err_unlock;
+ }
+
+ src_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
+ DMA_TO_DEVICE);
+ ret = dma_mapping_error(dma_dev, src_addr);
+ if (ret) {
+ dev_err(dev, "Failed to map remote memory\n");
+ goto err_unlock;
+ }
+
+ desc = dmaengine_prep_slave_single(chan, src_addr, buf_info->size,
+ DMA_MEM_TO_DEV,
+ DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
+ if (!desc) {
+ dev_err(dev, "Failed to prepare DMA\n");
+ ret = -EIO;
+ goto err_unmap;
+ }
+
+ desc->callback = pci_epf_mhi_dma_callback;
+ desc->callback_param = &complete;
+
+ cookie = dmaengine_submit(desc);
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dev_err(dev, "Failed to do DMA submit\n");
+ goto err_unmap;
+ }
+
+ dma_async_issue_pending(chan);
+ ret = wait_for_completion_timeout(&complete, msecs_to_jiffies(1000));
+ if (!ret) {
+ dev_err(dev, "DMA transfer timeout\n");
+ dmaengine_terminate_sync(chan);
+ ret = -ETIMEDOUT;
+ }
+
+err_unmap:
+ dma_unmap_single(dma_dev, src_addr, buf_info->size, DMA_TO_DEVICE);
+err_unlock:
+ mutex_unlock(&epf_mhi->lock);
+
+ return ret;
+}
+
+static void pci_epf_mhi_dma_worker(struct work_struct *work)
+{
+ struct pci_epf_mhi *epf_mhi = container_of(work, struct pci_epf_mhi, dma_work);
+ struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
+ struct pci_epf_mhi_dma_transfer *itr, *tmp;
+ struct mhi_ep_buf_info *buf_info;
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&epf_mhi->list_lock, flags);
+ list_splice_tail_init(&epf_mhi->dma_list, &head);
+ spin_unlock_irqrestore(&epf_mhi->list_lock, flags);
+
+ list_for_each_entry_safe(itr, tmp, &head, node) {
+ list_del(&itr->node);
+ dma_unmap_single(dma_dev, itr->paddr, itr->size, itr->dir);
+ buf_info = &itr->buf_info;
+ buf_info->cb(buf_info);
+ kfree(itr);
+ }
+}
+
+static void pci_epf_mhi_dma_async_callback(void *param)
+{
+ struct pci_epf_mhi_dma_transfer *transfer = param;
+ struct pci_epf_mhi *epf_mhi = transfer->epf_mhi;
+
+ spin_lock(&epf_mhi->list_lock);
+ list_add_tail(&transfer->node, &epf_mhi->dma_list);
+ spin_unlock(&epf_mhi->list_lock);
+
+ queue_work(epf_mhi->dma_wq, &epf_mhi->dma_work);
+}
+
+static int pci_epf_mhi_edma_read_async(struct mhi_ep_cntrl *mhi_cntrl,
+ struct mhi_ep_buf_info *buf_info)
+{
+ struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
+ struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
+ struct pci_epf_mhi_dma_transfer *transfer = NULL;
+ struct dma_chan *chan = epf_mhi->dma_chan_rx;
+ struct device *dev = &epf_mhi->epf->dev;
+ DECLARE_COMPLETION_ONSTACK(complete);
+ struct dma_async_tx_descriptor *desc;
+ struct dma_slave_config config = {};
+ dma_cookie_t cookie;
+ dma_addr_t dst_addr;
+ int ret;
+
+ mutex_lock(&epf_mhi->lock);
+
+ config.direction = DMA_DEV_TO_MEM;
+ config.src_addr = buf_info->host_addr;
+
+ ret = dmaengine_slave_config(chan, &config);
+ if (ret) {
+ dev_err(dev, "Failed to configure DMA channel\n");
+ goto err_unlock;
+ }
+
+ dst_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
+ DMA_FROM_DEVICE);
+ ret = dma_mapping_error(dma_dev, dst_addr);
+ if (ret) {
+ dev_err(dev, "Failed to map remote memory\n");
+ goto err_unlock;
+ }
+
+ desc = dmaengine_prep_slave_single(chan, dst_addr, buf_info->size,
+ DMA_DEV_TO_MEM,
+ DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
+ if (!desc) {
+ dev_err(dev, "Failed to prepare DMA\n");
+ ret = -EIO;
+ goto err_unmap;
+ }
+
+ transfer = kzalloc(sizeof(*transfer), GFP_KERNEL);
+ if (!transfer) {
+ ret = -ENOMEM;
+ goto err_unmap;
+ }
+
+ transfer->epf_mhi = epf_mhi;
+ transfer->paddr = dst_addr;
+ transfer->size = buf_info->size;
+ transfer->dir = DMA_FROM_DEVICE;
+ memcpy(&transfer->buf_info, buf_info, sizeof(*buf_info));
+
+ desc->callback = pci_epf_mhi_dma_async_callback;
+ desc->callback_param = transfer;
+
+ cookie = dmaengine_submit(desc);
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dev_err(dev, "Failed to do DMA submit\n");
+ goto err_free_transfer;
+ }
+
+ dma_async_issue_pending(chan);
+
+ goto err_unlock;
+
+err_free_transfer:
+ kfree(transfer);
+err_unmap:
+ dma_unmap_single(dma_dev, dst_addr, buf_info->size, DMA_FROM_DEVICE);
+err_unlock:
+ mutex_unlock(&epf_mhi->lock);
+
+ return ret;
+}
+
+static int pci_epf_mhi_edma_write_async(struct mhi_ep_cntrl *mhi_cntrl,
+ struct mhi_ep_buf_info *buf_info)
+{
+ struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
+ struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
+ struct pci_epf_mhi_dma_transfer *transfer = NULL;
+ struct dma_chan *chan = epf_mhi->dma_chan_tx;
+ struct device *dev = &epf_mhi->epf->dev;
+ DECLARE_COMPLETION_ONSTACK(complete);
+ struct dma_async_tx_descriptor *desc;
+ struct dma_slave_config config = {};
+ dma_cookie_t cookie;
+ dma_addr_t src_addr;
+ int ret;
+
+ mutex_lock(&epf_mhi->lock);
+
+ config.direction = DMA_MEM_TO_DEV;
+ config.dst_addr = buf_info->host_addr;
+
+ ret = dmaengine_slave_config(chan, &config);
+ if (ret) {
+ dev_err(dev, "Failed to configure DMA channel\n");
+ goto err_unlock;
+ }
+
+ src_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
+ DMA_TO_DEVICE);
+ ret = dma_mapping_error(dma_dev, src_addr);
+ if (ret) {
+ dev_err(dev, "Failed to map remote memory\n");
+ goto err_unlock;
+ }
+
+ desc = dmaengine_prep_slave_single(chan, src_addr, buf_info->size,
+ DMA_MEM_TO_DEV,
+ DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
+ if (!desc) {
+ dev_err(dev, "Failed to prepare DMA\n");
+ ret = -EIO;
+ goto err_unmap;
+ }
+
+ transfer = kzalloc(sizeof(*transfer), GFP_KERNEL);
+ if (!transfer) {
+ ret = -ENOMEM;
+ goto err_unmap;
+ }
+
+ transfer->epf_mhi = epf_mhi;
+ transfer->paddr = src_addr;
+ transfer->size = buf_info->size;
+ transfer->dir = DMA_TO_DEVICE;
+ memcpy(&transfer->buf_info, buf_info, sizeof(*buf_info));
+
+ desc->callback = pci_epf_mhi_dma_async_callback;
+ desc->callback_param = transfer;
+
+ cookie = dmaengine_submit(desc);
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dev_err(dev, "Failed to do DMA submit\n");
+ goto err_free_transfer;
+ }
+
+ dma_async_issue_pending(chan);
+ goto err_unlock;
+
+err_free_transfer:
+ kfree(transfer);
+err_unmap:
+ dma_unmap_single(dma_dev, src_addr, buf_info->size, DMA_TO_DEVICE);
+err_unlock:
mutex_unlock(&epf_mhi->lock);
+ return ret;
+}
+
+struct epf_dma_filter {
+ struct device *dev;
+ u32 dma_mask;
+};
+
+static bool pci_epf_mhi_filter(struct dma_chan *chan, void *node)
+{
+ struct epf_dma_filter *filter = node;
+ struct dma_slave_caps caps;
+
+ memset(&caps, 0, sizeof(caps));
+ dma_get_slave_caps(chan, &caps);
+
+ return chan->device->dev == filter->dev && filter->dma_mask &
+ caps.directions;
+}
+
+static int pci_epf_mhi_dma_init(struct pci_epf_mhi *epf_mhi)
+{
+ struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
+ struct device *dev = &epf_mhi->epf->dev;
+ struct epf_dma_filter filter;
+ dma_cap_mask_t mask;
+ int ret;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ filter.dev = dma_dev;
+ filter.dma_mask = BIT(DMA_MEM_TO_DEV);
+ epf_mhi->dma_chan_tx = dma_request_channel(mask, pci_epf_mhi_filter,
+ &filter);
+ if (IS_ERR_OR_NULL(epf_mhi->dma_chan_tx)) {
+ dev_err(dev, "Failed to request tx channel\n");
+ return -ENODEV;
+ }
+
+ filter.dma_mask = BIT(DMA_DEV_TO_MEM);
+ epf_mhi->dma_chan_rx = dma_request_channel(mask, pci_epf_mhi_filter,
+ &filter);
+ if (IS_ERR_OR_NULL(epf_mhi->dma_chan_rx)) {
+ dev_err(dev, "Failed to request rx channel\n");
+ ret = -ENODEV;
+ goto err_release_tx;
+ }
+
+ epf_mhi->dma_wq = alloc_workqueue("pci_epf_mhi_dma_wq", 0, 0);
+ if (!epf_mhi->dma_wq) {
+ ret = -ENOMEM;
+ goto err_release_rx;
+ }
+
+ INIT_LIST_HEAD(&epf_mhi->dma_list);
+ INIT_WORK(&epf_mhi->dma_work, pci_epf_mhi_dma_worker);
+ spin_lock_init(&epf_mhi->list_lock);
+
return 0;
+
+err_release_rx:
+ dma_release_channel(epf_mhi->dma_chan_rx);
+ epf_mhi->dma_chan_rx = NULL;
+err_release_tx:
+ dma_release_channel(epf_mhi->dma_chan_tx);
+ epf_mhi->dma_chan_tx = NULL;
+
+ return ret;
+}
+
+static void pci_epf_mhi_dma_deinit(struct pci_epf_mhi *epf_mhi)
+{
+ destroy_workqueue(epf_mhi->dma_wq);
+ dma_release_channel(epf_mhi->dma_chan_tx);
+ dma_release_channel(epf_mhi->dma_chan_rx);
+ epf_mhi->dma_chan_tx = NULL;
+ epf_mhi->dma_chan_rx = NULL;
}
-static int pci_epf_mhi_core_init(struct pci_epf *epf)
+static int pci_epf_mhi_epc_init(struct pci_epf *epf)
{
struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
@@ -270,9 +750,39 @@ static int pci_epf_mhi_core_init(struct pci_epf *epf)
return ret;
}
+ epf_mhi->epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
+ if (!epf_mhi->epc_features)
+ return -ENODATA;
+
+ if (info->flags & MHI_EPF_USE_DMA) {
+ ret = pci_epf_mhi_dma_init(epf_mhi);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DMA: %d\n", ret);
+ return ret;
+ }
+ }
+
return 0;
}
+static void pci_epf_mhi_epc_deinit(struct pci_epf *epf)
+{
+ struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
+ const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
+ struct pci_epf_bar *epf_bar = &epf->bar[info->bar_num];
+ struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl;
+ struct pci_epc *epc = epf->epc;
+
+ if (mhi_cntrl->mhi_dev) {
+ mhi_ep_power_down(mhi_cntrl);
+ if (info->flags & MHI_EPF_USE_DMA)
+ pci_epf_mhi_dma_deinit(epf_mhi);
+ mhi_ep_unregister_controller(mhi_cntrl);
+ }
+
+ pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no, epf_bar);
+}
+
static int pci_epf_mhi_link_up(struct pci_epf *epf)
{
struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
@@ -291,13 +801,21 @@ static int pci_epf_mhi_link_up(struct pci_epf *epf)
mhi_cntrl->raise_irq = pci_epf_mhi_raise_irq;
mhi_cntrl->alloc_map = pci_epf_mhi_alloc_map;
mhi_cntrl->unmap_free = pci_epf_mhi_unmap_free;
- mhi_cntrl->read_from_host = pci_epf_mhi_read_from_host;
- mhi_cntrl->write_to_host = pci_epf_mhi_write_to_host;
+ mhi_cntrl->read_sync = mhi_cntrl->read_async = pci_epf_mhi_iatu_read;
+ mhi_cntrl->write_sync = mhi_cntrl->write_async = pci_epf_mhi_iatu_write;
+ if (info->flags & MHI_EPF_USE_DMA) {
+ mhi_cntrl->read_sync = pci_epf_mhi_edma_read;
+ mhi_cntrl->write_sync = pci_epf_mhi_edma_write;
+ mhi_cntrl->read_async = pci_epf_mhi_edma_read_async;
+ mhi_cntrl->write_async = pci_epf_mhi_edma_write_async;
+ }
/* Register the MHI EP controller */
ret = mhi_ep_register_controller(mhi_cntrl, info->config);
if (ret) {
dev_err(dev, "Failed to register MHI EP controller: %d\n", ret);
+ if (info->flags & MHI_EPF_USE_DMA)
+ pci_epf_mhi_dma_deinit(epf_mhi);
return ret;
}
@@ -307,19 +825,23 @@ static int pci_epf_mhi_link_up(struct pci_epf *epf)
static int pci_epf_mhi_link_down(struct pci_epf *epf)
{
struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
+ const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl;
if (mhi_cntrl->mhi_dev) {
mhi_ep_power_down(mhi_cntrl);
+ if (info->flags & MHI_EPF_USE_DMA)
+ pci_epf_mhi_dma_deinit(epf_mhi);
mhi_ep_unregister_controller(mhi_cntrl);
}
return 0;
}
-static int pci_epf_mhi_bme(struct pci_epf *epf)
+static int pci_epf_mhi_bus_master_enable(struct pci_epf *epf)
{
struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
+ const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl;
struct device *dev = &epf->dev;
int ret;
@@ -332,6 +854,8 @@ static int pci_epf_mhi_bme(struct pci_epf *epf)
ret = mhi_ep_power_up(mhi_cntrl);
if (ret) {
dev_err(dev, "Failed to power up MHI EP: %d\n", ret);
+ if (info->flags & MHI_EPF_USE_DMA)
+ pci_epf_mhi_dma_deinit(epf_mhi);
mhi_ep_unregister_controller(mhi_cntrl);
}
}
@@ -343,12 +867,18 @@ static int pci_epf_mhi_bind(struct pci_epf *epf)
{
struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
struct pci_epc *epc = epf->epc;
+ struct device *dev = &epf->dev;
struct platform_device *pdev = to_platform_device(epc->dev.parent);
struct resource *res;
int ret;
/* Get MMIO base address from Endpoint controller */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mmio");
+ if (!res) {
+ dev_err(dev, "Failed to get \"mmio\" resource\n");
+ return -ENODEV;
+ }
+
epf_mhi->mmio_phys = res->start;
epf_mhi->mmio_size = resource_size(res);
@@ -377,11 +907,13 @@ static void pci_epf_mhi_unbind(struct pci_epf *epf)
/*
* Forcefully power down the MHI EP stack. Only way to bring the MHI EP
- * stack back to working state after successive bind is by getting BME
- * from host.
+ * stack back to working state after successive bind is by getting Bus
+ * Master Enable event from host.
*/
if (mhi_cntrl->mhi_dev) {
mhi_ep_power_down(mhi_cntrl);
+ if (info->flags & MHI_EPF_USE_DMA)
+ pci_epf_mhi_dma_deinit(epf_mhi);
mhi_ep_unregister_controller(mhi_cntrl);
}
@@ -389,11 +921,12 @@ static void pci_epf_mhi_unbind(struct pci_epf *epf)
pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no, epf_bar);
}
-static struct pci_epc_event_ops pci_epf_mhi_event_ops = {
- .core_init = pci_epf_mhi_core_init,
+static const struct pci_epc_event_ops pci_epf_mhi_event_ops = {
+ .epc_init = pci_epf_mhi_epc_init,
+ .epc_deinit = pci_epf_mhi_epc_deinit,
.link_up = pci_epf_mhi_link_up,
.link_down = pci_epf_mhi_link_down,
- .bme = pci_epf_mhi_bme,
+ .bus_master_enable = pci_epf_mhi_bus_master_enable,
};
static int pci_epf_mhi_probe(struct pci_epf *epf,
@@ -422,13 +955,13 @@ static int pci_epf_mhi_probe(struct pci_epf *epf,
}
static const struct pci_epf_device_id pci_epf_mhi_ids[] = {
- {
- .name = "sdx55", .driver_data = (kernel_ulong_t)&sdx55_info,
- },
+ { .name = "pci_epf_mhi_sa8775p", .driver_data = (kernel_ulong_t)&sa8775p_info },
+ { .name = "pci_epf_mhi_sdx55", .driver_data = (kernel_ulong_t)&sdx55_info },
+ { .name = "pci_epf_mhi_sm8450", .driver_data = (kernel_ulong_t)&sm8450_info },
{},
};
-static struct pci_epf_ops pci_epf_mhi_ops = {
+static const struct pci_epf_ops pci_epf_mhi_ops = {
.unbind = pci_epf_mhi_unbind,
.bind = pci_epf_mhi_bind,
};
diff --git a/drivers/pci/endpoint/functions/pci-epf-ntb.c b/drivers/pci/endpoint/functions/pci-epf-ntb.c
index 9aac2c6f3bb9..e01a98e74d21 100644
--- a/drivers/pci/endpoint/functions/pci-epf-ntb.c
+++ b/drivers/pci/endpoint/functions/pci-epf-ntb.c
@@ -140,9 +140,9 @@ static struct pci_epf_header epf_ntb_header = {
static int epf_ntb_link_up(struct epf_ntb *ntb, bool link_up)
{
enum pci_epc_interface_type type;
- enum pci_epc_irq_type irq_type;
struct epf_ntb_epc *ntb_epc;
struct epf_ntb_ctrl *ctrl;
+ unsigned int irq_type;
struct pci_epc *epc;
u8 func_no, vfunc_no;
bool is_msix;
@@ -159,7 +159,7 @@ static int epf_ntb_link_up(struct epf_ntb *ntb, bool link_up)
ctrl->link_status |= LINK_STATUS_UP;
else
ctrl->link_status &= ~LINK_STATUS_UP;
- irq_type = is_msix ? PCI_EPC_IRQ_MSIX : PCI_EPC_IRQ_MSI;
+ irq_type = is_msix ? PCI_IRQ_MSIX : PCI_IRQ_MSI;
ret = pci_epc_raise_irq(epc, func_no, vfunc_no, irq_type, 1);
if (ret) {
dev_err(&epc->dev,
@@ -1012,13 +1012,13 @@ static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb,
epc_features = ntb_epc->epc_features;
barno = ntb_epc->epf_ntb_bar[BAR_CONFIG];
- size = epc_features->bar_fixed_size[barno];
+ size = epc_features->bar[barno].fixed_size;
align = epc_features->align;
peer_ntb_epc = ntb->epc[!type];
peer_epc_features = peer_ntb_epc->epc_features;
peer_barno = ntb_epc->epf_ntb_bar[BAR_PEER_SPAD];
- peer_size = peer_epc_features->bar_fixed_size[peer_barno];
+ peer_size = peer_epc_features->bar[peer_barno].fixed_size;
/* Check if epc_features is populated incorrectly */
if ((!IS_ALIGNED(size, align)))
@@ -1067,7 +1067,7 @@ static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb,
else if (size < ctrl_size + spad_size)
return -EINVAL;
- base = pci_epf_alloc_space(epf, size, barno, align, type);
+ base = pci_epf_alloc_space(epf, size, barno, epc_features, type);
if (!base) {
dev_err(dev, "%s intf: Config/Status/SPAD alloc region fail\n",
pci_epc_interface_string(type));
@@ -2099,7 +2099,7 @@ static int epf_ntb_probe(struct pci_epf *epf,
return 0;
}
-static struct pci_epf_ops epf_ntb_ops = {
+static const struct pci_epf_ops epf_ntb_ops = {
.bind = epf_ntb_bind,
.unbind = epf_ntb_unbind,
.add_cfs = epf_ntb_add_cfs,
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index 1f0d2b84296a..debd235253c5 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -11,24 +11,28 @@
#include <linux/dmaengine.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/msi.h>
#include <linux/slab.h>
#include <linux/pci_ids.h>
#include <linux/random.h>
#include <linux/pci-epc.h>
#include <linux/pci-epf.h>
+#include <linux/pci-ep-msi.h>
#include <linux/pci_regs.h>
-#define IRQ_TYPE_LEGACY 0
+#define IRQ_TYPE_INTX 0
#define IRQ_TYPE_MSI 1
#define IRQ_TYPE_MSIX 2
-#define COMMAND_RAISE_LEGACY_IRQ BIT(0)
+#define COMMAND_RAISE_INTX_IRQ BIT(0)
#define COMMAND_RAISE_MSI_IRQ BIT(1)
#define COMMAND_RAISE_MSIX_IRQ BIT(2)
#define COMMAND_READ BIT(3)
#define COMMAND_WRITE BIT(4)
#define COMMAND_COPY BIT(5)
+#define COMMAND_ENABLE_DOORBELL BIT(6)
+#define COMMAND_DISABLE_DOORBELL BIT(7)
#define STATUS_READ_SUCCESS BIT(0)
#define STATUS_READ_FAIL BIT(1)
@@ -39,11 +43,21 @@
#define STATUS_IRQ_RAISED BIT(6)
#define STATUS_SRC_ADDR_INVALID BIT(7)
#define STATUS_DST_ADDR_INVALID BIT(8)
+#define STATUS_DOORBELL_SUCCESS BIT(9)
+#define STATUS_DOORBELL_ENABLE_SUCCESS BIT(10)
+#define STATUS_DOORBELL_ENABLE_FAIL BIT(11)
+#define STATUS_DOORBELL_DISABLE_SUCCESS BIT(12)
+#define STATUS_DOORBELL_DISABLE_FAIL BIT(13)
#define FLAG_USE_DMA BIT(0)
#define TIMER_RESOLUTION 1
+#define CAP_UNALIGNED_ACCESS BIT(0)
+#define CAP_MSI BIT(1)
+#define CAP_MSIX BIT(2)
+#define CAP_INTX BIT(3)
+
static struct workqueue_struct *kpcitest_workqueue;
struct pci_epf_test {
@@ -61,19 +75,24 @@ struct pci_epf_test {
bool dma_supported;
bool dma_private;
const struct pci_epc_features *epc_features;
+ struct pci_epf_bar db_bar;
};
struct pci_epf_test_reg {
- u32 magic;
- u32 command;
- u32 status;
- u64 src_addr;
- u64 dst_addr;
- u32 size;
- u32 checksum;
- u32 irq_type;
- u32 irq_number;
- u32 flags;
+ __le32 magic;
+ __le32 command;
+ __le32 status;
+ __le64 src_addr;
+ __le64 dst_addr;
+ __le32 size;
+ __le32 checksum;
+ __le32 irq_type;
+ __le32 irq_number;
+ __le32 flags;
+ __le32 caps;
+ __le32 doorbell_bar;
+ __le32 doorbell_offset;
+ __le32 doorbell_data;
} __packed;
static struct pci_epf_header test_header = {
@@ -251,7 +270,7 @@ static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test)
fail_back_rx:
dma_release_channel(epf_test->dma_chan_rx);
- epf_test->dma_chan_tx = NULL;
+ epf_test->dma_chan_rx = NULL;
fail_back_tx:
dma_cap_zero(mask);
@@ -282,17 +301,20 @@ static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test)
if (!epf_test->dma_supported)
return;
- dma_release_channel(epf_test->dma_chan_tx);
- if (epf_test->dma_chan_tx == epf_test->dma_chan_rx) {
+ if (epf_test->dma_chan_tx) {
+ dma_release_channel(epf_test->dma_chan_tx);
+ if (epf_test->dma_chan_tx == epf_test->dma_chan_rx) {
+ epf_test->dma_chan_tx = NULL;
+ epf_test->dma_chan_rx = NULL;
+ return;
+ }
epf_test->dma_chan_tx = NULL;
- epf_test->dma_chan_rx = NULL;
- return;
}
- dma_release_channel(epf_test->dma_chan_rx);
- epf_test->dma_chan_rx = NULL;
-
- return;
+ if (epf_test->dma_chan_rx) {
+ dma_release_channel(epf_test->dma_chan_rx);
+ epf_test->dma_chan_rx = NULL;
+ }
}
static void pci_epf_test_print_rate(struct pci_epf_test *epf_test,
@@ -309,258 +331,288 @@ static void pci_epf_test_print_rate(struct pci_epf_test *epf_test,
rate = div64_u64(size * NSEC_PER_SEC, ns * 1000);
dev_info(&epf_test->epf->dev,
- "%s => Size: %llu B, DMA: %s, Time: %llu.%09u s, Rate: %llu KB/s\n",
- op, size, dma ? "YES" : "NO",
- (u64)ts.tv_sec, (u32)ts.tv_nsec, rate);
+ "%s => Size: %llu B, DMA: %s, Time: %ptSp s, Rate: %llu KB/s\n",
+ op, size, dma ? "YES" : "NO", &ts, rate);
}
static void pci_epf_test_copy(struct pci_epf_test *epf_test,
struct pci_epf_test_reg *reg)
{
- int ret;
- void __iomem *src_addr;
- void __iomem *dst_addr;
- phys_addr_t src_phys_addr;
- phys_addr_t dst_phys_addr;
+ int ret = 0;
struct timespec64 start, end;
struct pci_epf *epf = epf_test->epf;
- struct device *dev = &epf->dev;
struct pci_epc *epc = epf->epc;
-
- src_addr = pci_epc_mem_alloc_addr(epc, &src_phys_addr, reg->size);
- if (!src_addr) {
- dev_err(dev, "Failed to allocate source address\n");
- reg->status = STATUS_SRC_ADDR_INVALID;
- ret = -ENOMEM;
- goto err;
- }
-
- ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, src_phys_addr,
- reg->src_addr, reg->size);
- if (ret) {
- dev_err(dev, "Failed to map source address\n");
- reg->status = STATUS_SRC_ADDR_INVALID;
- goto err_src_addr;
- }
-
- dst_addr = pci_epc_mem_alloc_addr(epc, &dst_phys_addr, reg->size);
- if (!dst_addr) {
- dev_err(dev, "Failed to allocate destination address\n");
- reg->status = STATUS_DST_ADDR_INVALID;
- ret = -ENOMEM;
- goto err_src_map_addr;
+ struct device *dev = &epf->dev;
+ struct pci_epc_map src_map, dst_map;
+ u64 src_addr = le64_to_cpu(reg->src_addr);
+ u64 dst_addr = le64_to_cpu(reg->dst_addr);
+ size_t orig_size, copy_size;
+ ssize_t map_size = 0;
+ u32 flags = le32_to_cpu(reg->flags);
+ u32 status = 0;
+ void *copy_buf = NULL, *buf;
+
+ orig_size = copy_size = le32_to_cpu(reg->size);
+
+ if (flags & FLAG_USE_DMA) {
+ if (!dma_has_cap(DMA_MEMCPY, epf_test->dma_chan_tx->device->cap_mask)) {
+ dev_err(dev, "DMA controller doesn't support MEMCPY\n");
+ ret = -EINVAL;
+ goto set_status;
+ }
+ } else {
+ copy_buf = kzalloc(copy_size, GFP_KERNEL);
+ if (!copy_buf) {
+ ret = -ENOMEM;
+ goto set_status;
+ }
+ buf = copy_buf;
}
- ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr,
- reg->dst_addr, reg->size);
- if (ret) {
- dev_err(dev, "Failed to map destination address\n");
- reg->status = STATUS_DST_ADDR_INVALID;
- goto err_dst_addr;
- }
+ while (copy_size) {
+ ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no,
+ src_addr, copy_size, &src_map);
+ if (ret) {
+ dev_err(dev, "Failed to map source address\n");
+ status = STATUS_SRC_ADDR_INVALID;
+ goto free_buf;
+ }
- ktime_get_ts64(&start);
- if (reg->flags & FLAG_USE_DMA) {
- if (epf_test->dma_private) {
- dev_err(dev, "Cannot transfer data using DMA\n");
- ret = -EINVAL;
- goto err_map_addr;
+ ret = pci_epc_mem_map(epf->epc, epf->func_no, epf->vfunc_no,
+ dst_addr, copy_size, &dst_map);
+ if (ret) {
+ dev_err(dev, "Failed to map destination address\n");
+ status = STATUS_DST_ADDR_INVALID;
+ pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no,
+ &src_map);
+ goto free_buf;
}
- ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr,
- src_phys_addr, reg->size, 0,
- DMA_MEM_TO_MEM);
- if (ret)
- dev_err(dev, "Data transfer failed\n");
- } else {
- void *buf;
+ map_size = min_t(size_t, dst_map.pci_size, src_map.pci_size);
- buf = kzalloc(reg->size, GFP_KERNEL);
- if (!buf) {
- ret = -ENOMEM;
- goto err_map_addr;
+ ktime_get_ts64(&start);
+ if (flags & FLAG_USE_DMA) {
+ ret = pci_epf_test_data_transfer(epf_test,
+ dst_map.phys_addr, src_map.phys_addr,
+ map_size, 0, DMA_MEM_TO_MEM);
+ if (ret) {
+ dev_err(dev, "Data transfer failed\n");
+ goto unmap;
+ }
+ } else {
+ memcpy_fromio(buf, src_map.virt_addr, map_size);
+ memcpy_toio(dst_map.virt_addr, buf, map_size);
+ buf += map_size;
}
+ ktime_get_ts64(&end);
- memcpy_fromio(buf, src_addr, reg->size);
- memcpy_toio(dst_addr, buf, reg->size);
- kfree(buf);
- }
- ktime_get_ts64(&end);
- pci_epf_test_print_rate(epf_test, "COPY", reg->size, &start, &end,
- reg->flags & FLAG_USE_DMA);
+ copy_size -= map_size;
+ src_addr += map_size;
+ dst_addr += map_size;
-err_map_addr:
- pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr);
+ pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &dst_map);
+ pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &src_map);
+ map_size = 0;
+ }
-err_dst_addr:
- pci_epc_mem_free_addr(epc, dst_phys_addr, dst_addr, reg->size);
+ pci_epf_test_print_rate(epf_test, "COPY", orig_size, &start, &end,
+ flags & FLAG_USE_DMA);
-err_src_map_addr:
- pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, src_phys_addr);
+unmap:
+ if (map_size) {
+ pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &dst_map);
+ pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &src_map);
+ }
-err_src_addr:
- pci_epc_mem_free_addr(epc, src_phys_addr, src_addr, reg->size);
+free_buf:
+ kfree(copy_buf);
-err:
+set_status:
if (!ret)
- reg->status |= STATUS_COPY_SUCCESS;
+ status |= STATUS_COPY_SUCCESS;
else
- reg->status |= STATUS_COPY_FAIL;
+ status |= STATUS_COPY_FAIL;
+ reg->status = cpu_to_le32(status);
}
static void pci_epf_test_read(struct pci_epf_test *epf_test,
struct pci_epf_test_reg *reg)
{
- int ret;
- void __iomem *src_addr;
- void *buf;
+ int ret = 0;
+ void *src_buf, *buf;
u32 crc32;
- phys_addr_t phys_addr;
+ struct pci_epc_map map;
phys_addr_t dst_phys_addr;
struct timespec64 start, end;
struct pci_epf *epf = epf_test->epf;
- struct device *dev = &epf->dev;
struct pci_epc *epc = epf->epc;
+ struct device *dev = &epf->dev;
struct device *dma_dev = epf->epc->dev.parent;
+ u64 src_addr = le64_to_cpu(reg->src_addr);
+ size_t orig_size, src_size;
+ ssize_t map_size = 0;
+ u32 flags = le32_to_cpu(reg->flags);
+ u32 checksum = le32_to_cpu(reg->checksum);
+ u32 status = 0;
- src_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
- if (!src_addr) {
- dev_err(dev, "Failed to allocate address\n");
- reg->status = STATUS_SRC_ADDR_INVALID;
- ret = -ENOMEM;
- goto err;
- }
+ orig_size = src_size = le32_to_cpu(reg->size);
- ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr,
- reg->src_addr, reg->size);
- if (ret) {
- dev_err(dev, "Failed to map address\n");
- reg->status = STATUS_SRC_ADDR_INVALID;
- goto err_addr;
- }
-
- buf = kzalloc(reg->size, GFP_KERNEL);
- if (!buf) {
+ src_buf = kzalloc(src_size, GFP_KERNEL);
+ if (!src_buf) {
ret = -ENOMEM;
- goto err_map_addr;
+ goto set_status;
}
+ buf = src_buf;
- if (reg->flags & FLAG_USE_DMA) {
- dst_phys_addr = dma_map_single(dma_dev, buf, reg->size,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(dma_dev, dst_phys_addr)) {
- dev_err(dev, "Failed to map destination buffer addr\n");
- ret = -ENOMEM;
- goto err_dma_map;
+ while (src_size) {
+ ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no,
+ src_addr, src_size, &map);
+ if (ret) {
+ dev_err(dev, "Failed to map address\n");
+ status = STATUS_SRC_ADDR_INVALID;
+ goto free_buf;
}
- ktime_get_ts64(&start);
- ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr,
- phys_addr, reg->size,
- reg->src_addr, DMA_DEV_TO_MEM);
- if (ret)
- dev_err(dev, "Data transfer failed\n");
- ktime_get_ts64(&end);
+ map_size = map.pci_size;
+ if (flags & FLAG_USE_DMA) {
+ dst_phys_addr = dma_map_single(dma_dev, buf, map_size,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dma_dev, dst_phys_addr)) {
+ dev_err(dev,
+ "Failed to map destination buffer addr\n");
+ ret = -ENOMEM;
+ goto unmap;
+ }
+
+ ktime_get_ts64(&start);
+ ret = pci_epf_test_data_transfer(epf_test,
+ dst_phys_addr, map.phys_addr,
+ map_size, src_addr, DMA_DEV_TO_MEM);
+ if (ret)
+ dev_err(dev, "Data transfer failed\n");
+ ktime_get_ts64(&end);
+
+ dma_unmap_single(dma_dev, dst_phys_addr, map_size,
+ DMA_FROM_DEVICE);
+
+ if (ret)
+ goto unmap;
+ } else {
+ ktime_get_ts64(&start);
+ memcpy_fromio(buf, map.virt_addr, map_size);
+ ktime_get_ts64(&end);
+ }
- dma_unmap_single(dma_dev, dst_phys_addr, reg->size,
- DMA_FROM_DEVICE);
- } else {
- ktime_get_ts64(&start);
- memcpy_fromio(buf, src_addr, reg->size);
- ktime_get_ts64(&end);
+ src_size -= map_size;
+ src_addr += map_size;
+ buf += map_size;
+
+ pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
+ map_size = 0;
}
- pci_epf_test_print_rate(epf_test, "READ", reg->size, &start, &end,
- reg->flags & FLAG_USE_DMA);
+ pci_epf_test_print_rate(epf_test, "READ", orig_size, &start, &end,
+ flags & FLAG_USE_DMA);
- crc32 = crc32_le(~0, buf, reg->size);
- if (crc32 != reg->checksum)
+ crc32 = crc32_le(~0, src_buf, orig_size);
+ if (crc32 != checksum)
ret = -EIO;
-err_dma_map:
- kfree(buf);
-
-err_map_addr:
- pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr);
+unmap:
+ if (map_size)
+ pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
-err_addr:
- pci_epc_mem_free_addr(epc, phys_addr, src_addr, reg->size);
+free_buf:
+ kfree(src_buf);
-err:
+set_status:
if (!ret)
- reg->status |= STATUS_READ_SUCCESS;
+ status |= STATUS_READ_SUCCESS;
else
- reg->status |= STATUS_READ_FAIL;
+ status |= STATUS_READ_FAIL;
+ reg->status = cpu_to_le32(status);
}
static void pci_epf_test_write(struct pci_epf_test *epf_test,
struct pci_epf_test_reg *reg)
{
- int ret;
- void __iomem *dst_addr;
- void *buf;
- phys_addr_t phys_addr;
+ int ret = 0;
+ void *dst_buf, *buf;
+ struct pci_epc_map map;
phys_addr_t src_phys_addr;
struct timespec64 start, end;
struct pci_epf *epf = epf_test->epf;
- struct device *dev = &epf->dev;
struct pci_epc *epc = epf->epc;
+ struct device *dev = &epf->dev;
struct device *dma_dev = epf->epc->dev.parent;
+ u64 dst_addr = le64_to_cpu(reg->dst_addr);
+ size_t orig_size, dst_size;
+ ssize_t map_size = 0;
+ u32 flags = le32_to_cpu(reg->flags);
+ u32 status = 0;
- dst_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
- if (!dst_addr) {
- dev_err(dev, "Failed to allocate address\n");
- reg->status = STATUS_DST_ADDR_INVALID;
- ret = -ENOMEM;
- goto err;
- }
+ orig_size = dst_size = le32_to_cpu(reg->size);
- ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr,
- reg->dst_addr, reg->size);
- if (ret) {
- dev_err(dev, "Failed to map address\n");
- reg->status = STATUS_DST_ADDR_INVALID;
- goto err_addr;
- }
-
- buf = kzalloc(reg->size, GFP_KERNEL);
- if (!buf) {
+ dst_buf = kzalloc(dst_size, GFP_KERNEL);
+ if (!dst_buf) {
ret = -ENOMEM;
- goto err_map_addr;
+ goto set_status;
}
+ get_random_bytes(dst_buf, dst_size);
+ reg->checksum = cpu_to_le32(crc32_le(~0, dst_buf, dst_size));
+ buf = dst_buf;
- get_random_bytes(buf, reg->size);
- reg->checksum = crc32_le(~0, buf, reg->size);
-
- if (reg->flags & FLAG_USE_DMA) {
- src_phys_addr = dma_map_single(dma_dev, buf, reg->size,
- DMA_TO_DEVICE);
- if (dma_mapping_error(dma_dev, src_phys_addr)) {
- dev_err(dev, "Failed to map source buffer addr\n");
- ret = -ENOMEM;
- goto err_dma_map;
+ while (dst_size) {
+ ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no,
+ dst_addr, dst_size, &map);
+ if (ret) {
+ dev_err(dev, "Failed to map address\n");
+ status = STATUS_DST_ADDR_INVALID;
+ goto free_buf;
}
- ktime_get_ts64(&start);
+ map_size = map.pci_size;
+ if (flags & FLAG_USE_DMA) {
+ src_phys_addr = dma_map_single(dma_dev, buf, map_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dma_dev, src_phys_addr)) {
+ dev_err(dev,
+ "Failed to map source buffer addr\n");
+ ret = -ENOMEM;
+ goto unmap;
+ }
+
+ ktime_get_ts64(&start);
+
+ ret = pci_epf_test_data_transfer(epf_test,
+ map.phys_addr, src_phys_addr,
+ map_size, dst_addr,
+ DMA_MEM_TO_DEV);
+ if (ret)
+ dev_err(dev, "Data transfer failed\n");
+ ktime_get_ts64(&end);
+
+ dma_unmap_single(dma_dev, src_phys_addr, map_size,
+ DMA_TO_DEVICE);
+
+ if (ret)
+ goto unmap;
+ } else {
+ ktime_get_ts64(&start);
+ memcpy_toio(map.virt_addr, buf, map_size);
+ ktime_get_ts64(&end);
+ }
- ret = pci_epf_test_data_transfer(epf_test, phys_addr,
- src_phys_addr, reg->size,
- reg->dst_addr,
- DMA_MEM_TO_DEV);
- if (ret)
- dev_err(dev, "Data transfer failed\n");
- ktime_get_ts64(&end);
+ dst_size -= map_size;
+ dst_addr += map_size;
+ buf += map_size;
- dma_unmap_single(dma_dev, src_phys_addr, reg->size,
- DMA_TO_DEVICE);
- } else {
- ktime_get_ts64(&start);
- memcpy_toio(dst_addr, buf, reg->size);
- ktime_get_ts64(&end);
+ pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
+ map_size = 0;
}
- pci_epf_test_print_rate(epf_test, "WRITE", reg->size, &start, &end,
- reg->flags & FLAG_USE_DMA);
+ pci_epf_test_print_rate(epf_test, "WRITE", orig_size, &start, &end,
+ flags & FLAG_USE_DMA);
/*
* wait 1ms inorder for the write to complete. Without this delay L3
@@ -568,20 +620,19 @@ static void pci_epf_test_write(struct pci_epf_test *epf_test,
*/
usleep_range(1000, 2000);
-err_dma_map:
- kfree(buf);
+unmap:
+ if (map_size)
+ pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
-err_map_addr:
- pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr);
+free_buf:
+ kfree(dst_buf);
-err_addr:
- pci_epc_mem_free_addr(epc, phys_addr, dst_addr, reg->size);
-
-err:
+set_status:
if (!ret)
- reg->status |= STATUS_WRITE_SUCCESS;
+ status |= STATUS_WRITE_SUCCESS;
else
- reg->status |= STATUS_WRITE_FAIL;
+ status |= STATUS_WRITE_FAIL;
+ reg->status = cpu_to_le32(status);
}
static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test,
@@ -590,39 +641,42 @@ static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test,
struct pci_epf *epf = epf_test->epf;
struct device *dev = &epf->dev;
struct pci_epc *epc = epf->epc;
- u32 status = reg->status | STATUS_IRQ_RAISED;
+ u32 status = le32_to_cpu(reg->status);
+ u32 irq_number = le32_to_cpu(reg->irq_number);
+ u32 irq_type = le32_to_cpu(reg->irq_type);
int count;
/*
* Set the status before raising the IRQ to ensure that the host sees
* the updated value when it gets the IRQ.
*/
- WRITE_ONCE(reg->status, status);
+ status |= STATUS_IRQ_RAISED;
+ WRITE_ONCE(reg->status, cpu_to_le32(status));
- switch (reg->irq_type) {
- case IRQ_TYPE_LEGACY:
+ switch (irq_type) {
+ case IRQ_TYPE_INTX:
pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
- PCI_EPC_IRQ_LEGACY, 0);
+ PCI_IRQ_INTX, 0);
break;
case IRQ_TYPE_MSI:
count = pci_epc_get_msi(epc, epf->func_no, epf->vfunc_no);
- if (reg->irq_number > count || count <= 0) {
+ if (irq_number > count || count <= 0) {
dev_err(dev, "Invalid MSI IRQ number %d / %d\n",
- reg->irq_number, count);
+ irq_number, count);
return;
}
pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
- PCI_EPC_IRQ_MSI, reg->irq_number);
+ PCI_IRQ_MSI, irq_number);
break;
case IRQ_TYPE_MSIX:
count = pci_epc_get_msix(epc, epf->func_no, epf->vfunc_no);
- if (reg->irq_number > count || count <= 0) {
- dev_err(dev, "Invalid MSIX IRQ number %d / %d\n",
- reg->irq_number, count);
+ if (irq_number > count || count <= 0) {
+ dev_err(dev, "Invalid MSI-X IRQ number %d / %d\n",
+ irq_number, count);
return;
}
pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
- PCI_EPC_IRQ_MSIX, reg->irq_number);
+ PCI_IRQ_MSIX, irq_number);
break;
default:
dev_err(dev, "Failed to raise IRQ, unknown type\n");
@@ -630,6 +684,128 @@ static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test,
}
}
+static irqreturn_t pci_epf_test_doorbell_handler(int irq, void *data)
+{
+ struct pci_epf_test *epf_test = data;
+ enum pci_barno test_reg_bar = epf_test->test_reg_bar;
+ struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
+ u32 status = le32_to_cpu(reg->status);
+
+ status |= STATUS_DOORBELL_SUCCESS;
+ reg->status = cpu_to_le32(status);
+ pci_epf_test_raise_irq(epf_test, reg);
+
+ return IRQ_HANDLED;
+}
+
+static void pci_epf_test_doorbell_cleanup(struct pci_epf_test *epf_test)
+{
+ struct pci_epf_test_reg *reg = epf_test->reg[epf_test->test_reg_bar];
+ struct pci_epf *epf = epf_test->epf;
+
+ free_irq(epf->db_msg[0].virq, epf_test);
+ reg->doorbell_bar = cpu_to_le32(NO_BAR);
+
+ pci_epf_free_doorbell(epf);
+}
+
+static void pci_epf_test_enable_doorbell(struct pci_epf_test *epf_test,
+ struct pci_epf_test_reg *reg)
+{
+ u32 status = le32_to_cpu(reg->status);
+ struct pci_epf *epf = epf_test->epf;
+ struct pci_epc *epc = epf->epc;
+ struct msi_msg *msg;
+ enum pci_barno bar;
+ size_t offset;
+ int ret;
+
+ ret = pci_epf_alloc_doorbell(epf, 1);
+ if (ret)
+ goto set_status_err;
+
+ msg = &epf->db_msg[0].msg;
+ bar = pci_epc_get_next_free_bar(epf_test->epc_features, epf_test->test_reg_bar + 1);
+ if (bar < BAR_0)
+ goto err_doorbell_cleanup;
+
+ ret = request_threaded_irq(epf->db_msg[0].virq, NULL,
+ pci_epf_test_doorbell_handler, IRQF_ONESHOT,
+ "pci-ep-test-doorbell", epf_test);
+ if (ret) {
+ dev_err(&epf->dev,
+ "Failed to request doorbell IRQ: %d\n",
+ epf->db_msg[0].virq);
+ goto err_doorbell_cleanup;
+ }
+
+ reg->doorbell_data = cpu_to_le32(msg->data);
+ reg->doorbell_bar = cpu_to_le32(bar);
+
+ msg = &epf->db_msg[0].msg;
+ ret = pci_epf_align_inbound_addr(epf, bar, ((u64)msg->address_hi << 32) | msg->address_lo,
+ &epf_test->db_bar.phys_addr, &offset);
+
+ if (ret)
+ goto err_doorbell_cleanup;
+
+ reg->doorbell_offset = cpu_to_le32(offset);
+
+ epf_test->db_bar.barno = bar;
+ epf_test->db_bar.size = epf->bar[bar].size;
+ epf_test->db_bar.flags = epf->bar[bar].flags;
+
+ ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, &epf_test->db_bar);
+ if (ret)
+ goto err_doorbell_cleanup;
+
+ status |= STATUS_DOORBELL_ENABLE_SUCCESS;
+ reg->status = cpu_to_le32(status);
+ return;
+
+err_doorbell_cleanup:
+ pci_epf_test_doorbell_cleanup(epf_test);
+set_status_err:
+ status |= STATUS_DOORBELL_ENABLE_FAIL;
+ reg->status = cpu_to_le32(status);
+}
+
+static void pci_epf_test_disable_doorbell(struct pci_epf_test *epf_test,
+ struct pci_epf_test_reg *reg)
+{
+ enum pci_barno bar = le32_to_cpu(reg->doorbell_bar);
+ u32 status = le32_to_cpu(reg->status);
+ struct pci_epf *epf = epf_test->epf;
+ struct pci_epc *epc = epf->epc;
+ int ret;
+
+ if (bar < BAR_0)
+ goto set_status_err;
+
+ pci_epf_test_doorbell_cleanup(epf_test);
+
+ /*
+ * The doorbell feature temporarily overrides the inbound translation
+ * to point to the address stored in epf_test->db_bar.phys_addr, i.e.,
+ * it calls set_bar() twice without ever calling clear_bar(), as
+ * calling clear_bar() would clear the BAR's PCI address assigned by
+ * the host. Thus, when disabling the doorbell, restore the inbound
+ * translation to point to the memory allocated for the BAR.
+ */
+ ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, &epf->bar[bar]);
+ if (ret)
+ goto set_status_err;
+
+ status |= STATUS_DOORBELL_DISABLE_SUCCESS;
+ reg->status = cpu_to_le32(status);
+
+ return;
+
+set_status_err:
+ status |= STATUS_DOORBELL_DISABLE_FAIL;
+ reg->status = cpu_to_le32(status);
+}
+
static void pci_epf_test_cmd_handler(struct work_struct *work)
{
u32 command;
@@ -639,27 +815,28 @@ static void pci_epf_test_cmd_handler(struct work_struct *work)
struct device *dev = &epf->dev;
enum pci_barno test_reg_bar = epf_test->test_reg_bar;
struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
+ u32 irq_type = le32_to_cpu(reg->irq_type);
- command = READ_ONCE(reg->command);
+ command = le32_to_cpu(READ_ONCE(reg->command));
if (!command)
goto reset_handler;
WRITE_ONCE(reg->command, 0);
WRITE_ONCE(reg->status, 0);
- if ((READ_ONCE(reg->flags) & FLAG_USE_DMA) &&
+ if ((le32_to_cpu(READ_ONCE(reg->flags)) & FLAG_USE_DMA) &&
!epf_test->dma_supported) {
dev_err(dev, "Cannot transfer data using DMA\n");
goto reset_handler;
}
- if (reg->irq_type > IRQ_TYPE_MSIX) {
+ if (irq_type > IRQ_TYPE_MSIX) {
dev_err(dev, "Failed to detect IRQ type\n");
goto reset_handler;
}
switch (command) {
- case COMMAND_RAISE_LEGACY_IRQ:
+ case COMMAND_RAISE_INTX_IRQ:
case COMMAND_RAISE_MSI_IRQ:
case COMMAND_RAISE_MSIX_IRQ:
pci_epf_test_raise_irq(epf_test, reg);
@@ -676,6 +853,14 @@ static void pci_epf_test_cmd_handler(struct work_struct *work)
pci_epf_test_copy(epf_test, reg);
pci_epf_test_raise_irq(epf_test, reg);
break;
+ case COMMAND_ENABLE_DOORBELL:
+ pci_epf_test_enable_doorbell(epf_test, reg);
+ pci_epf_test_raise_irq(epf_test, reg);
+ break;
+ case COMMAND_DISABLE_DOORBELL:
+ pci_epf_test_disable_doorbell(epf_test, reg);
+ pci_epf_test_raise_irq(epf_test, reg);
+ break;
default:
dev_err(dev, "Invalid command 0x%x\n", command);
break;
@@ -686,57 +871,24 @@ reset_handler:
msecs_to_jiffies(1));
}
-static void pci_epf_test_unbind(struct pci_epf *epf)
-{
- struct pci_epf_test *epf_test = epf_get_drvdata(epf);
- struct pci_epc *epc = epf->epc;
- struct pci_epf_bar *epf_bar;
- int bar;
-
- cancel_delayed_work(&epf_test->cmd_handler);
- pci_epf_test_clean_dma_chan(epf_test);
- for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
- epf_bar = &epf->bar[bar];
-
- if (epf_test->reg[bar]) {
- pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no,
- epf_bar);
- pci_epf_free_space(epf, epf_test->reg[bar], bar,
- PRIMARY_INTERFACE);
- }
- }
-}
-
static int pci_epf_test_set_bar(struct pci_epf *epf)
{
- int bar, add;
- int ret;
- struct pci_epf_bar *epf_bar;
+ int bar, ret;
struct pci_epc *epc = epf->epc;
struct device *dev = &epf->dev;
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
enum pci_barno test_reg_bar = epf_test->test_reg_bar;
- const struct pci_epc_features *epc_features;
- epc_features = epf_test->epc_features;
-
- for (bar = 0; bar < PCI_STD_NUM_BARS; bar += add) {
- epf_bar = &epf->bar[bar];
- /*
- * pci_epc_set_bar() sets PCI_BASE_ADDRESS_MEM_TYPE_64
- * if the specific implementation required a 64-bit BAR,
- * even if we only requested a 32-bit BAR.
- */
- add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1;
-
- if (!!(epc_features->reserved_bar & (1 << bar)))
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
+ if (!epf_test->reg[bar])
continue;
ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no,
- epf_bar);
+ &epf->bar[bar]);
if (ret) {
pci_epf_free_space(epf, epf_test->reg[bar], bar,
PRIMARY_INTERFACE);
+ epf_test->reg[bar] = NULL;
dev_err(dev, "Failed to set BAR%d\n", bar);
if (bar == test_reg_bar)
return ret;
@@ -746,22 +898,59 @@ static int pci_epf_test_set_bar(struct pci_epf *epf)
return 0;
}
-static int pci_epf_test_core_init(struct pci_epf *epf)
+static void pci_epf_test_clear_bar(struct pci_epf *epf)
+{
+ struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+ struct pci_epc *epc = epf->epc;
+ int bar;
+
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
+ if (!epf_test->reg[bar])
+ continue;
+
+ pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no,
+ &epf->bar[bar]);
+ }
+}
+
+static void pci_epf_test_set_capabilities(struct pci_epf *epf)
+{
+ struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+ enum pci_barno test_reg_bar = epf_test->test_reg_bar;
+ struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
+ struct pci_epc *epc = epf->epc;
+ u32 caps = 0;
+
+ if (epc->ops->align_addr)
+ caps |= CAP_UNALIGNED_ACCESS;
+
+ if (epf_test->epc_features->msi_capable)
+ caps |= CAP_MSI;
+
+ if (epf_test->epc_features->msix_capable)
+ caps |= CAP_MSIX;
+
+ if (epf_test->epc_features->intx_capable)
+ caps |= CAP_INTX;
+
+ reg->caps = cpu_to_le32(caps);
+}
+
+static int pci_epf_test_epc_init(struct pci_epf *epf)
{
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
struct pci_epf_header *header = epf->header;
- const struct pci_epc_features *epc_features;
+ const struct pci_epc_features *epc_features = epf_test->epc_features;
struct pci_epc *epc = epf->epc;
struct device *dev = &epf->dev;
- bool msix_capable = false;
- bool msi_capable = true;
+ bool linkup_notifier = false;
int ret;
- epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
- if (epc_features) {
- msix_capable = epc_features->msix_capable;
- msi_capable = epc_features->msi_capable;
- }
+ epf_test->dma_supported = true;
+
+ ret = pci_epf_test_init_dma_chan(epf_test);
+ if (ret)
+ epf_test->dma_supported = false;
if (epf->vfunc_no <= 1) {
ret = pci_epc_write_header(epc, epf->func_no, epf->vfunc_no, header);
@@ -771,11 +960,13 @@ static int pci_epf_test_core_init(struct pci_epf *epf)
}
}
+ pci_epf_test_set_capabilities(epf);
+
ret = pci_epf_test_set_bar(epf);
if (ret)
return ret;
- if (msi_capable) {
+ if (epc_features->msi_capable) {
ret = pci_epc_set_msi(epc, epf->func_no, epf->vfunc_no,
epf->msi_interrupts);
if (ret) {
@@ -784,7 +975,7 @@ static int pci_epf_test_core_init(struct pci_epf *epf)
}
}
- if (msix_capable) {
+ if (epc_features->msix_capable) {
ret = pci_epc_set_msix(epc, epf->func_no, epf->vfunc_no,
epf->msix_interrupts,
epf_test->test_reg_bar,
@@ -795,9 +986,22 @@ static int pci_epf_test_core_init(struct pci_epf *epf)
}
}
+ linkup_notifier = epc_features->linkup_notifier;
+ if (!linkup_notifier)
+ queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
+
return 0;
}
+static void pci_epf_test_epc_deinit(struct pci_epf *epf)
+{
+ struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+
+ cancel_delayed_work_sync(&epf_test->cmd_handler);
+ pci_epf_test_clean_dma_chan(epf_test);
+ pci_epf_test_clear_bar(epf);
+}
+
static int pci_epf_test_link_up(struct pci_epf *epf)
{
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
@@ -808,32 +1012,38 @@ static int pci_epf_test_link_up(struct pci_epf *epf)
return 0;
}
+static int pci_epf_test_link_down(struct pci_epf *epf)
+{
+ struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+
+ cancel_delayed_work_sync(&epf_test->cmd_handler);
+
+ return 0;
+}
+
static const struct pci_epc_event_ops pci_epf_test_event_ops = {
- .core_init = pci_epf_test_core_init,
+ .epc_init = pci_epf_test_epc_init,
+ .epc_deinit = pci_epf_test_epc_deinit,
.link_up = pci_epf_test_link_up,
+ .link_down = pci_epf_test_link_down,
};
static int pci_epf_test_alloc_space(struct pci_epf *epf)
{
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
struct device *dev = &epf->dev;
- struct pci_epf_bar *epf_bar;
size_t msix_table_size = 0;
size_t test_reg_bar_size;
size_t pba_size = 0;
- bool msix_capable;
void *base;
- int bar, add;
enum pci_barno test_reg_bar = epf_test->test_reg_bar;
- const struct pci_epc_features *epc_features;
+ enum pci_barno bar;
+ const struct pci_epc_features *epc_features = epf_test->epc_features;
size_t test_reg_size;
- epc_features = epf_test->epc_features;
-
test_reg_bar_size = ALIGN(sizeof(struct pci_epf_test_reg), 128);
- msix_capable = epc_features->msix_capable;
- if (msix_capable) {
+ if (epc_features->msix_capable) {
msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts;
epf_test->msix_table_offset = test_reg_bar_size;
/* Align to QWORD or 8 Bytes */
@@ -841,33 +1051,29 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
}
test_reg_size = test_reg_bar_size + msix_table_size + pba_size;
- if (epc_features->bar_fixed_size[test_reg_bar]) {
- if (test_reg_size > bar_size[test_reg_bar])
- return -ENOMEM;
- test_reg_size = bar_size[test_reg_bar];
- }
-
base = pci_epf_alloc_space(epf, test_reg_size, test_reg_bar,
- epc_features->align, PRIMARY_INTERFACE);
+ epc_features, PRIMARY_INTERFACE);
if (!base) {
dev_err(dev, "Failed to allocated register space\n");
return -ENOMEM;
}
epf_test->reg[test_reg_bar] = base;
- for (bar = 0; bar < PCI_STD_NUM_BARS; bar += add) {
- epf_bar = &epf->bar[bar];
- add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1;
+ for (bar = BAR_0; bar < PCI_STD_NUM_BARS; bar++) {
+ bar = pci_epc_get_next_free_bar(epc_features, bar);
+ if (bar == NO_BAR)
+ break;
if (bar == test_reg_bar)
continue;
- if (!!(epc_features->reserved_bar & (1 << bar)))
- continue;
+ if (epc_features->bar[bar].type == BAR_FIXED)
+ test_reg_size = epc_features->bar[bar].fixed_size;
+ else
+ test_reg_size = bar_size[bar];
- base = pci_epf_alloc_space(epf, bar_size[bar], bar,
- epc_features->align,
- PRIMARY_INTERFACE);
+ base = pci_epf_alloc_space(epf, test_reg_size, bar,
+ epc_features, PRIMARY_INTERFACE);
if (!base)
dev_err(dev, "Failed to allocate space for BAR%d\n",
bar);
@@ -877,20 +1083,18 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
return 0;
}
-static void pci_epf_configure_bar(struct pci_epf *epf,
- const struct pci_epc_features *epc_features)
+static void pci_epf_test_free_space(struct pci_epf *epf)
{
- struct pci_epf_bar *epf_bar;
- bool bar_fixed_64bit;
- int i;
-
- for (i = 0; i < PCI_STD_NUM_BARS; i++) {
- epf_bar = &epf->bar[i];
- bar_fixed_64bit = !!(epc_features->bar_fixed_64bit & (1 << i));
- if (bar_fixed_64bit)
- epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
- if (epc_features->bar_fixed_size[i])
- bar_size[i] = epc_features->bar_fixed_size[i];
+ struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+ int bar;
+
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
+ if (!epf_test->reg[bar])
+ continue;
+
+ pci_epf_free_space(epf, epf_test->reg[bar], bar,
+ PRIMARY_INTERFACE);
+ epf_test->reg[bar] = NULL;
}
}
@@ -901,8 +1105,6 @@ static int pci_epf_test_bind(struct pci_epf *epf)
const struct pci_epc_features *epc_features;
enum pci_barno test_reg_bar = BAR_0;
struct pci_epc *epc = epf->epc;
- bool linkup_notifier = false;
- bool core_init_notifier = false;
if (WARN_ON_ONCE(!epc))
return -EINVAL;
@@ -913,12 +1115,9 @@ static int pci_epf_test_bind(struct pci_epf *epf)
return -EOPNOTSUPP;
}
- linkup_notifier = epc_features->linkup_notifier;
- core_init_notifier = epc_features->core_init_notifier;
test_reg_bar = pci_epc_get_first_free_bar(epc_features);
if (test_reg_bar < 0)
return -EINVAL;
- pci_epf_configure_bar(epf, epc_features);
epf_test->test_reg_bar = test_reg_bar;
epf_test->epc_features = epc_features;
@@ -927,22 +1126,20 @@ static int pci_epf_test_bind(struct pci_epf *epf)
if (ret)
return ret;
- if (!core_init_notifier) {
- ret = pci_epf_test_core_init(epf);
- if (ret)
- return ret;
- }
-
- epf_test->dma_supported = true;
-
- ret = pci_epf_test_init_dma_chan(epf_test);
- if (ret)
- epf_test->dma_supported = false;
+ return 0;
+}
- if (!linkup_notifier && !core_init_notifier)
- queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
+static void pci_epf_test_unbind(struct pci_epf *epf)
+{
+ struct pci_epf_test *epf_test = epf_get_drvdata(epf);
+ struct pci_epc *epc = epf->epc;
- return 0;
+ cancel_delayed_work_sync(&epf_test->cmd_handler);
+ if (epc->init_complete) {
+ pci_epf_test_clean_dma_chan(epf_test);
+ pci_epf_test_clear_bar(epf);
+ }
+ pci_epf_test_free_space(epf);
}
static const struct pci_epf_device_id pci_epf_test_ids[] = {
@@ -973,7 +1170,7 @@ static int pci_epf_test_probe(struct pci_epf *epf,
return 0;
}
-static struct pci_epf_ops ops = {
+static const struct pci_epf_ops ops = {
.unbind = pci_epf_test_unbind,
.bind = pci_epf_test_bind,
};
diff --git a/drivers/pci/endpoint/functions/pci-epf-vntb.c b/drivers/pci/endpoint/functions/pci-epf-vntb.c
index c8b423c3c26e..3ecc5059f92b 100644
--- a/drivers/pci/endpoint/functions/pci-epf-vntb.c
+++ b/drivers/pci/endpoint/functions/pci-epf-vntb.c
@@ -36,11 +36,13 @@
* PCIe Root Port PCI EP
*/
+#include <linux/atomic.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/pci-ep-msi.h>
#include <linux/pci-epc.h>
#include <linux/pci-epf.h>
#include <linux/ntb.h>
@@ -70,9 +72,11 @@ static struct workqueue_struct *kpcintb_workqueue;
enum epf_ntb_bar {
BAR_CONFIG,
BAR_DB,
- BAR_MW0,
BAR_MW1,
BAR_MW2,
+ BAR_MW3,
+ BAR_MW4,
+ VNTB_BAR_NUM,
};
/*
@@ -124,15 +128,16 @@ struct epf_ntb {
u32 db_count;
u32 spad_count;
u64 mws_size[MAX_MW];
- u64 db;
+ atomic64_t db;
u32 vbus_number;
u16 vntb_pid;
u16 vntb_vid;
bool linkup;
+ bool msi_doorbell;
u32 spad_size;
- enum pci_barno epf_ntb_bar[6];
+ enum pci_barno epf_ntb_bar[VNTB_BAR_NUM];
struct epf_ntb_ctrl *reg;
@@ -256,9 +261,9 @@ static void epf_ntb_cmd_handler(struct work_struct *work)
ntb = container_of(work, struct epf_ntb, cmd_handler.work);
- for (i = 1; i < ntb->db_count; i++) {
+ for (i = 1; i < ntb->db_count && !ntb->msi_doorbell; i++) {
if (ntb->epf_db[i]) {
- ntb->db |= 1 << (i - 1);
+ atomic64_or(1 << (i - 1), &ntb->db);
ntb_db_event(&ntb->ntb, i);
ntb->epf_db[i] = 0;
}
@@ -317,7 +322,21 @@ static void epf_ntb_cmd_handler(struct work_struct *work)
reset_handler:
queue_delayed_work(kpcintb_workqueue, &ntb->cmd_handler,
- msecs_to_jiffies(5));
+ ntb->msi_doorbell ? msecs_to_jiffies(500) : msecs_to_jiffies(5));
+}
+
+static irqreturn_t epf_ntb_doorbell_handler(int irq, void *data)
+{
+ struct epf_ntb *ntb = data;
+ int i;
+
+ for (i = 1; i < ntb->db_count; i++)
+ if (irq == ntb->epf->db_msg[i].virq) {
+ atomic64_or(1 << (i - 1), &ntb->db);
+ ntb_db_event(&ntb->ntb, i);
+ }
+
+ return IRQ_HANDLED;
}
/**
@@ -408,11 +427,9 @@ static void epf_ntb_config_spad_bar_free(struct epf_ntb *ntb)
*/
static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb)
{
- size_t align;
enum pci_barno barno;
struct epf_ntb_ctrl *ctrl;
u32 spad_size, ctrl_size;
- u64 size;
struct pci_epf *epf = ntb->epf;
struct device *dev = &epf->dev;
u32 spad_count;
@@ -422,31 +439,13 @@ static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb)
epf->func_no,
epf->vfunc_no);
barno = ntb->epf_ntb_bar[BAR_CONFIG];
- size = epc_features->bar_fixed_size[barno];
- align = epc_features->align;
-
- if ((!IS_ALIGNED(size, align)))
- return -EINVAL;
-
spad_count = ntb->spad_count;
- ctrl_size = sizeof(struct epf_ntb_ctrl);
+ ctrl_size = ALIGN(sizeof(struct epf_ntb_ctrl), sizeof(u32));
spad_size = 2 * spad_count * sizeof(u32);
- if (!align) {
- ctrl_size = roundup_pow_of_two(ctrl_size);
- spad_size = roundup_pow_of_two(spad_size);
- } else {
- ctrl_size = ALIGN(ctrl_size, align);
- spad_size = ALIGN(spad_size, align);
- }
-
- if (!size)
- size = ctrl_size + spad_size;
- else if (size < ctrl_size + spad_size)
- return -EINVAL;
-
- base = pci_epf_alloc_space(epf, size, barno, align, 0);
+ base = pci_epf_alloc_space(epf, ctrl_size + spad_size,
+ barno, epc_features, 0);
if (!base) {
dev_err(dev, "Config/Status/SPAD alloc region fail\n");
return -ENOMEM;
@@ -518,6 +517,94 @@ static int epf_ntb_configure_interrupt(struct epf_ntb *ntb)
return 0;
}
+static int epf_ntb_db_bar_init_msi_doorbell(struct epf_ntb *ntb,
+ struct pci_epf_bar *db_bar,
+ const struct pci_epc_features *epc_features,
+ enum pci_barno barno)
+{
+ struct pci_epf *epf = ntb->epf;
+ dma_addr_t low, high;
+ struct msi_msg *msg;
+ size_t sz;
+ int ret;
+ int i;
+
+ ret = pci_epf_alloc_doorbell(epf, ntb->db_count);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ntb->db_count; i++) {
+ ret = request_irq(epf->db_msg[i].virq, epf_ntb_doorbell_handler,
+ 0, "pci_epf_vntb_db", ntb);
+
+ if (ret) {
+ dev_err(&epf->dev,
+ "Failed to request doorbell IRQ: %d\n",
+ epf->db_msg[i].virq);
+ goto err_free_irq;
+ }
+ }
+
+ msg = &epf->db_msg[0].msg;
+
+ high = 0;
+ low = (u64)msg->address_hi << 32 | msg->address_lo;
+
+ for (i = 0; i < ntb->db_count; i++) {
+ struct msi_msg *msg = &epf->db_msg[i].msg;
+ dma_addr_t addr = (u64)msg->address_hi << 32 | msg->address_lo;
+
+ low = min(low, addr);
+ high = max(high, addr);
+ }
+
+ sz = high - low + sizeof(u32);
+
+ ret = pci_epf_assign_bar_space(epf, sz, barno, epc_features, 0, low);
+ if (ret) {
+ dev_err(&epf->dev, "Failed to assign Doorbell BAR space\n");
+ goto err_free_irq;
+ }
+
+ ret = pci_epc_set_bar(ntb->epf->epc, ntb->epf->func_no,
+ ntb->epf->vfunc_no, db_bar);
+ if (ret) {
+ dev_err(&epf->dev, "Failed to set Doorbell BAR\n");
+ goto err_free_irq;
+ }
+
+ for (i = 0; i < ntb->db_count; i++) {
+ struct msi_msg *msg = &epf->db_msg[i].msg;
+ dma_addr_t addr;
+ size_t offset;
+
+ ret = pci_epf_align_inbound_addr(epf, db_bar->barno,
+ ((u64)msg->address_hi << 32) | msg->address_lo,
+ &addr, &offset);
+
+ if (ret) {
+ ntb->msi_doorbell = false;
+ goto err_free_irq;
+ }
+
+ ntb->reg->db_data[i] = msg->data;
+ ntb->reg->db_offset[i] = offset;
+ }
+
+ ntb->reg->db_entry_size = 0;
+
+ ntb->msi_doorbell = true;
+
+ return 0;
+
+err_free_irq:
+ for (i--; i >= 0; i--)
+ free_irq(epf->db_msg[i].virq, ntb);
+
+ pci_epf_free_doorbell(ntb->epf);
+ return ret;
+}
+
/**
* epf_ntb_db_bar_init() - Configure Doorbell window BARs
* @ntb: NTB device that facilitates communication between HOST and VHOST
@@ -527,43 +614,36 @@ static int epf_ntb_configure_interrupt(struct epf_ntb *ntb)
static int epf_ntb_db_bar_init(struct epf_ntb *ntb)
{
const struct pci_epc_features *epc_features;
- u32 align;
struct device *dev = &ntb->epf->dev;
int ret;
struct pci_epf_bar *epf_bar;
- void __iomem *mw_addr;
+ void *mw_addr;
enum pci_barno barno;
size_t size = sizeof(u32) * ntb->db_count;
epc_features = pci_epc_get_features(ntb->epf->epc,
ntb->epf->func_no,
ntb->epf->vfunc_no);
- align = epc_features->align;
-
- if (size < 128)
- size = 128;
-
- if (align)
- size = ALIGN(size, align);
- else
- size = roundup_pow_of_two(size);
-
barno = ntb->epf_ntb_bar[BAR_DB];
-
- mw_addr = pci_epf_alloc_space(ntb->epf, size, barno, align, 0);
- if (!mw_addr) {
- dev_err(dev, "Failed to allocate OB address\n");
- return -ENOMEM;
- }
-
- ntb->epf_db = mw_addr;
-
epf_bar = &ntb->epf->bar[barno];
- ret = pci_epc_set_bar(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no, epf_bar);
+ ret = epf_ntb_db_bar_init_msi_doorbell(ntb, epf_bar, epc_features, barno);
if (ret) {
- dev_err(dev, "Doorbell BAR set failed\n");
+ /* fall back to polling mode */
+ mw_addr = pci_epf_alloc_space(ntb->epf, size, barno, epc_features, 0);
+ if (!mw_addr) {
+ dev_err(dev, "Failed to allocate OB address\n");
+ return -ENOMEM;
+ }
+
+ ntb->epf_db = mw_addr;
+
+ ret = pci_epc_set_bar(ntb->epf->epc, ntb->epf->func_no,
+ ntb->epf->vfunc_no, epf_bar);
+ if (ret) {
+ dev_err(dev, "Doorbell BAR set failed\n");
goto err_alloc_peer_mem;
+ }
}
return ret;
@@ -583,6 +663,16 @@ static void epf_ntb_db_bar_clear(struct epf_ntb *ntb)
{
enum pci_barno barno;
+ if (ntb->msi_doorbell) {
+ int i;
+
+ for (i = 0; i < ntb->db_count; i++)
+ free_irq(ntb->epf->db_msg[i].virq, ntb);
+ }
+
+ if (ntb->epf->db_msg)
+ pci_epf_free_doorbell(ntb->epf);
+
barno = ntb->epf_ntb_bar[BAR_DB];
pci_epf_free_space(ntb->epf, ntb->epf_db, barno, 0);
pci_epc_clear_bar(ntb->epf->epc,
@@ -607,7 +697,7 @@ static int epf_ntb_mw_bar_init(struct epf_ntb *ntb)
for (i = 0; i < ntb->num_mws; i++) {
size = ntb->mws_size[i];
- barno = ntb->epf_ntb_bar[BAR_MW0 + i];
+ barno = ntb->epf_ntb_bar[BAR_MW1 + i];
ntb->epf->bar[barno].barno = barno;
ntb->epf->bar[barno].size = size;
@@ -660,7 +750,7 @@ static void epf_ntb_mw_bar_clear(struct epf_ntb *ntb, int num_mws)
int i;
for (i = 0; i < num_mws; i++) {
- barno = ntb->epf_ntb_bar[BAR_MW0 + i];
+ barno = ntb->epf_ntb_bar[BAR_MW1 + i];
pci_epc_clear_bar(ntb->epf->epc,
ntb->epf->func_no,
ntb->epf->vfunc_no,
@@ -685,6 +775,63 @@ static void epf_ntb_epc_destroy(struct epf_ntb *ntb)
pci_epc_put(ntb->epf->epc);
}
+
+/**
+ * epf_ntb_is_bar_used() - Check if a bar is used in the ntb configuration
+ * @ntb: NTB device that facilitates communication between HOST and VHOST
+ * @barno: Checked bar number
+ *
+ * Returns: true if used, false if free.
+ */
+static bool epf_ntb_is_bar_used(struct epf_ntb *ntb,
+ enum pci_barno barno)
+{
+ int i;
+
+ for (i = 0; i < VNTB_BAR_NUM; i++) {
+ if (ntb->epf_ntb_bar[i] == barno)
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * epf_ntb_find_bar() - Assign BAR number when no configuration is provided
+ * @ntb: NTB device that facilitates communication between HOST and VHOST
+ * @epc_features: The features provided by the EPC specific to this EPF
+ * @bar: NTB BAR index
+ * @barno: Bar start index
+ *
+ * When the BAR configuration was not provided through the userspace
+ * configuration, automatically assign BAR as it has been historically
+ * done by this endpoint function.
+ *
+ * Returns: the BAR number found, if any. -1 otherwise
+ */
+static int epf_ntb_find_bar(struct epf_ntb *ntb,
+ const struct pci_epc_features *epc_features,
+ enum epf_ntb_bar bar,
+ enum pci_barno barno)
+{
+ while (ntb->epf_ntb_bar[bar] < 0) {
+ barno = pci_epc_get_next_free_bar(epc_features, barno);
+ if (barno < 0)
+ break; /* No more BAR available */
+
+ /*
+ * Verify if the BAR found is not already assigned
+ * through the provided configuration
+ */
+ if (!epf_ntb_is_bar_used(ntb, barno))
+ ntb->epf_ntb_bar[bar] = barno;
+
+ barno += 1;
+ }
+
+ return barno;
+}
+
/**
* epf_ntb_init_epc_bar() - Identify BARs to be used for each of the NTB
* constructs (scratchpad region, doorbell, memorywindow)
@@ -707,23 +854,21 @@ static int epf_ntb_init_epc_bar(struct epf_ntb *ntb)
epc_features = pci_epc_get_features(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no);
/* These are required BARs which are mandatory for NTB functionality */
- for (bar = BAR_CONFIG; bar <= BAR_MW0; bar++, barno++) {
- barno = pci_epc_get_next_free_bar(epc_features, barno);
+ for (bar = BAR_CONFIG; bar <= BAR_MW1; bar++) {
+ barno = epf_ntb_find_bar(ntb, epc_features, bar, barno);
if (barno < 0) {
dev_err(dev, "Fail to get NTB function BAR\n");
- return barno;
+ return -ENOENT;
}
- ntb->epf_ntb_bar[bar] = barno;
}
/* These are optional BARs which don't impact NTB functionality */
- for (bar = BAR_MW1, i = 1; i < num_mws; bar++, barno++, i++) {
- barno = pci_epc_get_next_free_bar(epc_features, barno);
+ for (bar = BAR_MW1, i = 1; i < num_mws; bar++, i++) {
+ barno = epf_ntb_find_bar(ntb, epc_features, bar, barno);
if (barno < 0) {
ntb->num_mws = i;
dev_dbg(dev, "BAR not available for > MW%d\n", i + 1);
}
- ntb->epf_ntb_bar[bar] = barno;
}
return 0;
@@ -810,8 +955,9 @@ err_config_interrupt:
*/
static void epf_ntb_epc_cleanup(struct epf_ntb *ntb)
{
- epf_ntb_db_bar_clear(ntb);
epf_ntb_mw_bar_clear(ntb, ntb->num_mws);
+ epf_ntb_db_bar_clear(ntb);
+ epf_ntb_config_sspad_bar_clear(ntb);
}
#define EPF_NTB_R(_name) \
@@ -890,6 +1036,37 @@ static ssize_t epf_ntb_##_name##_store(struct config_item *item, \
return len; \
}
+#define EPF_NTB_BAR_R(_name, _id) \
+ static ssize_t epf_ntb_##_name##_show(struct config_item *item, \
+ char *page) \
+ { \
+ struct config_group *group = to_config_group(item); \
+ struct epf_ntb *ntb = to_epf_ntb(group); \
+ \
+ return sprintf(page, "%d\n", ntb->epf_ntb_bar[_id]); \
+ }
+
+#define EPF_NTB_BAR_W(_name, _id) \
+ static ssize_t epf_ntb_##_name##_store(struct config_item *item, \
+ const char *page, size_t len) \
+ { \
+ struct config_group *group = to_config_group(item); \
+ struct epf_ntb *ntb = to_epf_ntb(group); \
+ int val; \
+ int ret; \
+ \
+ ret = kstrtoint(page, 0, &val); \
+ if (ret) \
+ return ret; \
+ \
+ if (val < NO_BAR || val > BAR_5) \
+ return -EINVAL; \
+ \
+ ntb->epf_ntb_bar[_id] = val; \
+ \
+ return len; \
+ }
+
static ssize_t epf_ntb_num_mws_store(struct config_item *item,
const char *page, size_t len)
{
@@ -929,6 +1106,18 @@ EPF_NTB_MW_R(mw3)
EPF_NTB_MW_W(mw3)
EPF_NTB_MW_R(mw4)
EPF_NTB_MW_W(mw4)
+EPF_NTB_BAR_R(ctrl_bar, BAR_CONFIG)
+EPF_NTB_BAR_W(ctrl_bar, BAR_CONFIG)
+EPF_NTB_BAR_R(db_bar, BAR_DB)
+EPF_NTB_BAR_W(db_bar, BAR_DB)
+EPF_NTB_BAR_R(mw1_bar, BAR_MW1)
+EPF_NTB_BAR_W(mw1_bar, BAR_MW1)
+EPF_NTB_BAR_R(mw2_bar, BAR_MW2)
+EPF_NTB_BAR_W(mw2_bar, BAR_MW2)
+EPF_NTB_BAR_R(mw3_bar, BAR_MW3)
+EPF_NTB_BAR_W(mw3_bar, BAR_MW3)
+EPF_NTB_BAR_R(mw4_bar, BAR_MW4)
+EPF_NTB_BAR_W(mw4_bar, BAR_MW4)
CONFIGFS_ATTR(epf_ntb_, spad_count);
CONFIGFS_ATTR(epf_ntb_, db_count);
@@ -940,6 +1129,12 @@ CONFIGFS_ATTR(epf_ntb_, mw4);
CONFIGFS_ATTR(epf_ntb_, vbus_number);
CONFIGFS_ATTR(epf_ntb_, vntb_pid);
CONFIGFS_ATTR(epf_ntb_, vntb_vid);
+CONFIGFS_ATTR(epf_ntb_, ctrl_bar);
+CONFIGFS_ATTR(epf_ntb_, db_bar);
+CONFIGFS_ATTR(epf_ntb_, mw1_bar);
+CONFIGFS_ATTR(epf_ntb_, mw2_bar);
+CONFIGFS_ATTR(epf_ntb_, mw3_bar);
+CONFIGFS_ATTR(epf_ntb_, mw4_bar);
static struct configfs_attribute *epf_ntb_attrs[] = {
&epf_ntb_attr_spad_count,
@@ -952,6 +1147,12 @@ static struct configfs_attribute *epf_ntb_attrs[] = {
&epf_ntb_attr_vbus_number,
&epf_ntb_attr_vntb_pid,
&epf_ntb_attr_vntb_vid,
+ &epf_ntb_attr_ctrl_bar,
+ &epf_ntb_attr_db_bar,
+ &epf_ntb_attr_mw1_bar,
+ &epf_ntb_attr_mw2_bar,
+ &epf_ntb_attr_mw3_bar,
+ &epf_ntb_attr_mw4_bar,
NULL,
};
@@ -986,22 +1187,22 @@ static struct config_group *epf_ntb_add_cfs(struct pci_epf *epf,
/*==== virtual PCI bus driver, which only load virtual NTB PCI driver ====*/
static u32 pci_space[] = {
- 0xffffffff, /*DeviceID, Vendor ID*/
- 0, /*Status, Command*/
- 0xffffffff, /*Class code, subclass, prog if, revision id*/
- 0x40, /*bist, header type, latency Timer, cache line size*/
- 0, /*BAR 0*/
- 0, /*BAR 1*/
- 0, /*BAR 2*/
- 0, /*BAR 3*/
- 0, /*BAR 4*/
- 0, /*BAR 5*/
- 0, /*Cardbus cis point*/
- 0, /*Subsystem ID Subystem vendor id*/
- 0, /*ROM Base Address*/
- 0, /*Reserved, Cap. Point*/
- 0, /*Reserved,*/
- 0, /*Max Lat, Min Gnt, interrupt pin, interrupt line*/
+ 0xffffffff, /* Device ID, Vendor ID */
+ 0, /* Status, Command */
+ 0xffffffff, /* Base Class, Subclass, Prog Intf, Revision ID */
+ 0x40, /* BIST, Header Type, Latency Timer, Cache Line Size */
+ 0, /* BAR 0 */
+ 0, /* BAR 1 */
+ 0, /* BAR 2 */
+ 0, /* BAR 3 */
+ 0, /* BAR 4 */
+ 0, /* BAR 5 */
+ 0, /* Cardbus CIS Pointer */
+ 0, /* Subsystem ID, Subsystem Vendor ID */
+ 0, /* ROM Base Address */
+ 0, /* Reserved, Capabilities Pointer */
+ 0, /* Reserved */
+ 0, /* Max_Lat, Min_Gnt, Interrupt Pin, Interrupt Line */
};
static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val)
@@ -1029,8 +1230,10 @@ static int vpci_scan_bus(void *sysdata)
struct epf_ntb *ndev = sysdata;
vpci_bus = pci_scan_bus(ndev->vbus_number, &vpci_ops, sysdata);
- if (vpci_bus)
- pr_err("create pci bus\n");
+ if (!vpci_bus) {
+ pr_err("create pci bus failed\n");
+ return -EINVAL;
+ }
pci_bus_add_devices(vpci_bus);
@@ -1076,7 +1279,7 @@ static int vntb_epf_mw_set_trans(struct ntb_dev *ndev, int pidx, int idx,
struct device *dev;
dev = &ntb->ntb.dev;
- barno = ntb->epf_ntb_bar[BAR_MW0 + idx];
+ barno = ntb->epf_ntb_bar[BAR_MW1 + idx];
epf_bar = &ntb->epf->bar[barno];
epf_bar->phys_addr = addr;
epf_bar->barno = barno;
@@ -1172,11 +1375,8 @@ static int vntb_epf_peer_db_set(struct ntb_dev *ndev, u64 db_bits)
func_no = ntb->epf->func_no;
vfunc_no = ntb->epf->vfunc_no;
- ret = pci_epc_raise_irq(ntb->epf->epc,
- func_no,
- vfunc_no,
- PCI_EPC_IRQ_MSI,
- interrupt_num + 1);
+ ret = pci_epc_raise_irq(ntb->epf->epc, func_no, vfunc_no,
+ PCI_IRQ_MSI, interrupt_num + 1);
if (ret)
dev_err(&ntb->ntb.dev, "Failed to raise IRQ\n");
@@ -1187,7 +1387,7 @@ static u64 vntb_epf_db_read(struct ntb_dev *ndev)
{
struct epf_ntb *ntb = ntb_ndev(ndev);
- return ntb->db;
+ return atomic64_read(&ntb->db);
}
static int vntb_epf_mw_get_align(struct ntb_dev *ndev, int pidx, int idx,
@@ -1227,7 +1427,7 @@ static int vntb_epf_db_clear(struct ntb_dev *ndev, u64 db_bits)
{
struct epf_ntb *ntb = ntb_ndev(ndev);
- ntb->db &= ~db_bits;
+ atomic64_and(~db_bits, &ntb->db);
return 0;
}
@@ -1272,21 +1472,17 @@ static int pci_vntb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(dev, "Cannot set DMA mask\n");
- return -EINVAL;
+ return ret;
}
ret = ntb_register_device(&ndev->ntb);
if (ret) {
dev_err(dev, "Failed to register NTB device\n");
- goto err_register_dev;
+ return ret;
}
dev_dbg(dev, "PCI Virtual NTB driver loaded\n");
return 0;
-
-err_register_dev:
- put_device(&ndev->ntb.dev);
- return -EINVAL;
}
static struct pci_device_id pci_vntb_table[] = {
@@ -1353,13 +1549,19 @@ static int epf_ntb_bind(struct pci_epf *epf)
ret = pci_register_driver(&vntb_pci_driver);
if (ret) {
dev_err(dev, "failure register vntb pci driver\n");
- goto err_bar_alloc;
+ goto err_epc_cleanup;
}
- vpci_scan_bus(ntb);
+ ret = vpci_scan_bus(ntb);
+ if (ret)
+ goto err_unregister;
return 0;
+err_unregister:
+ pci_unregister_driver(&vntb_pci_driver);
+err_epc_cleanup:
+ epf_ntb_epc_cleanup(ntb);
err_bar_alloc:
epf_ntb_config_spad_bar_free(ntb);
@@ -1387,7 +1589,7 @@ static void epf_ntb_unbind(struct pci_epf *epf)
}
// EPF driver probe
-static struct pci_epf_ops epf_ntb_ops = {
+static const struct pci_epf_ops epf_ntb_ops = {
.bind = epf_ntb_bind,
.unbind = epf_ntb_unbind,
.add_cfs = epf_ntb_add_cfs,
@@ -1408,6 +1610,7 @@ static int epf_ntb_probe(struct pci_epf *epf,
{
struct epf_ntb *ntb;
struct device *dev;
+ int i;
dev = &epf->dev;
@@ -1418,6 +1621,11 @@ static int epf_ntb_probe(struct pci_epf *epf,
epf->header = &epf_ntb_header;
ntb->epf = epf;
ntb->vbus_number = 0xff;
+
+ /* Initially, no bar is assigned */
+ for (i = 0; i < VNTB_BAR_NUM; i++)
+ ntb->epf_ntb_bar[i] = NO_BAR;
+
epf_set_drvdata(epf, ntb);
dev_info(dev, "pci-ep epf driver loaded\n");
diff --git a/drivers/pci/endpoint/pci-ep-cfs.c b/drivers/pci/endpoint/pci-ep-cfs.c
index 0ea64e24ed61..ef50c82e647f 100644
--- a/drivers/pci/endpoint/pci-ep-cfs.c
+++ b/drivers/pci/endpoint/pci-ep-cfs.c
@@ -23,7 +23,6 @@ struct pci_epf_group {
struct config_group group;
struct config_group primary_epc_group;
struct config_group secondary_epc_group;
- struct config_group *type_group;
struct delayed_work cfs_work;
struct pci_epf *epf;
int index;
@@ -64,6 +63,9 @@ static int pci_secondary_epc_epf_link(struct config_item *epf_item,
return ret;
}
+ /* Send any pending EPC initialization complete to the EPF driver */
+ pci_epc_notify_pending_init(epc, epf);
+
return 0;
}
@@ -125,6 +127,9 @@ static int pci_primary_epc_epf_link(struct config_item *epf_item,
return ret;
}
+ /* Send any pending EPC initialization complete to the EPF driver */
+ pci_epc_notify_pending_init(epc, epf);
+
return 0;
}
@@ -230,6 +235,9 @@ static int pci_epc_epf_link(struct config_item *epc_item,
return ret;
}
+ /* Send any pending EPC initialization complete to the EPF driver */
+ pci_epc_notify_pending_init(epc, epf);
+
return 0;
}
@@ -683,6 +691,7 @@ void pci_ep_cfs_remove_epf_group(struct config_group *group)
if (IS_ERR_OR_NULL(group))
return;
+ list_del(&group->group_entry);
configfs_unregister_default_group(group);
}
EXPORT_SYMBOL(pci_ep_cfs_remove_epf_group);
diff --git a/drivers/pci/endpoint/pci-ep-msi.c b/drivers/pci/endpoint/pci-ep-msi.c
new file mode 100644
index 000000000000..1b58357b905f
--- /dev/null
+++ b/drivers/pci/endpoint/pci-ep-msi.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCI Endpoint *Controller* (EPC) MSI library
+ *
+ * Copyright (C) 2025 NXP
+ * Author: Frank Li <Frank.Li@nxp.com>
+ */
+
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_irq.h>
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+#include <linux/pci-ep-cfs.h>
+#include <linux/pci-ep-msi.h>
+#include <linux/slab.h>
+
+static void pci_epf_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
+{
+ struct pci_epc *epc;
+ struct pci_epf *epf;
+
+ epc = pci_epc_get(dev_name(msi_desc_to_dev(desc)));
+ if (IS_ERR(epc))
+ return;
+
+ epf = list_first_entry_or_null(&epc->pci_epf, struct pci_epf, list);
+
+ if (epf && epf->db_msg && desc->msi_index < epf->num_db)
+ memcpy(&epf->db_msg[desc->msi_index].msg, msg, sizeof(*msg));
+
+ pci_epc_put(epc);
+}
+
+int pci_epf_alloc_doorbell(struct pci_epf *epf, u16 num_db)
+{
+ struct pci_epc *epc = epf->epc;
+ struct device *dev = &epf->dev;
+ struct irq_domain *domain;
+ void *msg;
+ int ret;
+ int i;
+
+ /* TODO: Multi-EPF support */
+ if (list_first_entry_or_null(&epc->pci_epf, struct pci_epf, list) != epf) {
+ dev_err(dev, "MSI doorbell doesn't support multiple EPF\n");
+ return -EINVAL;
+ }
+
+ domain = of_msi_map_get_device_domain(epc->dev.parent, 0,
+ DOMAIN_BUS_PLATFORM_MSI);
+ if (!domain) {
+ dev_err(dev, "Can't find MSI domain for EPC\n");
+ return -ENODEV;
+ }
+
+ if (!irq_domain_is_msi_parent(domain))
+ return -ENODEV;
+
+ if (!irq_domain_is_msi_immutable(domain)) {
+ dev_err(dev, "Mutable MSI controller not supported\n");
+ return -ENODEV;
+ }
+
+ dev_set_msi_domain(epc->dev.parent, domain);
+
+ msg = kcalloc(num_db, sizeof(struct pci_epf_doorbell_msg), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ epf->num_db = num_db;
+ epf->db_msg = msg;
+
+ ret = platform_device_msi_init_and_alloc_irqs(epc->dev.parent, num_db,
+ pci_epf_write_msi_msg);
+ if (ret) {
+ dev_err(dev, "Failed to allocate MSI\n");
+ kfree(msg);
+ return ret;
+ }
+
+ for (i = 0; i < num_db; i++)
+ epf->db_msg[i].virq = msi_get_virq(epc->dev.parent, i);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pci_epf_alloc_doorbell);
+
+void pci_epf_free_doorbell(struct pci_epf *epf)
+{
+ platform_device_msi_free_irqs_all(epf->epc->dev.parent);
+
+ kfree(epf->db_msg);
+ epf->db_msg = NULL;
+ epf->num_db = 0;
+}
+EXPORT_SYMBOL_GPL(pci_epf_free_doorbell);
diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
index 6c54fa5684d2..ca7f19cc973a 100644
--- a/drivers/pci/endpoint/pci-epc-core.c
+++ b/drivers/pci/endpoint/pci-epc-core.c
@@ -9,13 +9,14 @@
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/pci-epc.h>
#include <linux/pci-epf.h>
#include <linux/pci-ep-cfs.h>
-static struct class *pci_epc_class;
+static const struct class pci_epc_class = {
+ .name = "pci_epc",
+};
static void devm_pci_epc_release(struct device *dev, void *res)
{
@@ -24,13 +25,6 @@ static void devm_pci_epc_release(struct device *dev, void *res)
pci_epc_destroy(epc);
}
-static int devm_pci_epc_match(struct device *dev, void *res, void *match_data)
-{
- struct pci_epc **epc = res;
-
- return *epc == match_data;
-}
-
/**
* pci_epc_put() - release the PCI endpoint controller
* @epc: epc returned by pci_epc_get()
@@ -39,7 +33,7 @@ static int devm_pci_epc_match(struct device *dev, void *res, void *match_data)
*/
void pci_epc_put(struct pci_epc *epc)
{
- if (!epc || IS_ERR(epc))
+ if (IS_ERR_OR_NULL(epc))
return;
module_put(epc->ops->owner);
@@ -59,26 +53,17 @@ struct pci_epc *pci_epc_get(const char *epc_name)
int ret = -EINVAL;
struct pci_epc *epc;
struct device *dev;
- struct class_dev_iter iter;
- class_dev_iter_init(&iter, pci_epc_class, NULL, NULL);
- while ((dev = class_dev_iter_next(&iter))) {
- if (strcmp(epc_name, dev_name(dev)))
- continue;
+ dev = class_find_device_by_name(&pci_epc_class, epc_name);
+ if (!dev)
+ goto err;
- epc = to_pci_epc(dev);
- if (!try_module_get(epc->ops->owner)) {
- ret = -EINVAL;
- goto err;
- }
-
- class_dev_iter_exit(&iter);
- get_device(&epc->dev);
+ epc = to_pci_epc(dev);
+ if (try_module_get(epc->ops->owner))
return epc;
- }
err:
- class_dev_iter_exit(&iter);
+ put_device(dev);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(pci_epc_get);
@@ -88,7 +73,7 @@ EXPORT_SYMBOL_GPL(pci_epc_get);
* @epc_features: pci_epc_features structure that holds the reserved bar bitmap
*
* Invoke to get the first unreserved BAR that can be used by the endpoint
- * function. For any incorrect value in reserved_bar return '0'.
+ * function.
*/
enum pci_barno
pci_epc_get_first_free_bar(const struct pci_epc_features *epc_features)
@@ -103,34 +88,41 @@ EXPORT_SYMBOL_GPL(pci_epc_get_first_free_bar);
* @bar: the starting BAR number from where unreserved BAR should be searched
*
* Invoke to get the next unreserved BAR starting from @bar that can be used
- * for endpoint function. For any incorrect value in reserved_bar return '0'.
+ * for endpoint function.
*/
enum pci_barno pci_epc_get_next_free_bar(const struct pci_epc_features
*epc_features, enum pci_barno bar)
{
- unsigned long free_bar;
+ int i;
if (!epc_features)
return BAR_0;
/* If 'bar - 1' is a 64-bit BAR, move to the next BAR */
- if ((epc_features->bar_fixed_64bit << 1) & 1 << bar)
+ if (bar > 0 && epc_features->bar[bar - 1].only_64bit)
bar++;
- /* Find if the reserved BAR is also a 64-bit BAR */
- free_bar = epc_features->reserved_bar & epc_features->bar_fixed_64bit;
+ for (i = bar; i < PCI_STD_NUM_BARS; i++) {
+ /* If the BAR is not reserved, return it. */
+ if (epc_features->bar[i].type != BAR_RESERVED)
+ return i;
+ }
+
+ return NO_BAR;
+}
+EXPORT_SYMBOL_GPL(pci_epc_get_next_free_bar);
- /* Set the adjacent bit if the reserved BAR is also a 64-bit BAR */
- free_bar <<= 1;
- free_bar |= epc_features->reserved_bar;
+static bool pci_epc_function_is_valid(struct pci_epc *epc,
+ u8 func_no, u8 vfunc_no)
+{
+ if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
+ return false;
- free_bar = find_next_zero_bit(&free_bar, 6, bar);
- if (free_bar > 5)
- return NO_BAR;
+ if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ return false;
- return free_bar;
+ return true;
}
-EXPORT_SYMBOL_GPL(pci_epc_get_next_free_bar);
/**
* pci_epc_get_features() - get the features supported by EPC
@@ -149,10 +141,7 @@ const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc,
{
const struct pci_epc_features *epc_features;
- if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
- return NULL;
-
- if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
return NULL;
if (!epc->ops->get_features)
@@ -212,20 +201,17 @@ EXPORT_SYMBOL_GPL(pci_epc_start);
* @epc: the EPC device which has to interrupt the host
* @func_no: the physical endpoint function number in the EPC device
* @vfunc_no: the virtual endpoint function number in the physical function
- * @type: specify the type of interrupt; legacy, MSI or MSI-X
+ * @type: specify the type of interrupt; INTX, MSI or MSI-X
* @interrupt_num: the MSI or MSI-X interrupt number with range (1-N)
*
- * Invoke to raise an legacy, MSI or MSI-X interrupt
+ * Invoke to raise an INTX, MSI or MSI-X interrupt
*/
int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
- enum pci_epc_irq_type type, u16 interrupt_num)
+ unsigned int type, u16 interrupt_num)
{
int ret;
- if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
- return -EINVAL;
-
- if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
return -EINVAL;
if (!epc->ops->raise_irq)
@@ -266,10 +252,7 @@ int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
{
int ret;
- if (IS_ERR_OR_NULL(epc))
- return -EINVAL;
-
- if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
return -EINVAL;
if (!epc->ops->map_msi_irq)
@@ -297,10 +280,7 @@ int pci_epc_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
int interrupt;
- if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
- return 0;
-
- if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
return 0;
if (!epc->ops->get_msi)
@@ -313,8 +293,6 @@ int pci_epc_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
if (interrupt < 0)
return 0;
- interrupt = 1 << interrupt;
-
return interrupt;
}
EXPORT_SYMBOL_GPL(pci_epc_get_msi);
@@ -324,29 +302,25 @@ EXPORT_SYMBOL_GPL(pci_epc_get_msi);
* @epc: the EPC device on which MSI has to be configured
* @func_no: the physical endpoint function number in the EPC device
* @vfunc_no: the virtual endpoint function number in the physical function
- * @interrupts: number of MSI interrupts required by the EPF
+ * @nr_irqs: number of MSI interrupts required by the EPF
*
* Invoke to set the required number of MSI interrupts.
*/
-int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, u8 interrupts)
+int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, u8 nr_irqs)
{
int ret;
- u8 encode_int;
- if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
- interrupts < 1 || interrupts > 32)
+ if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
return -EINVAL;
- if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ if (nr_irqs < 1 || nr_irqs > 32)
return -EINVAL;
if (!epc->ops->set_msi)
return 0;
- encode_int = order_base_2(interrupts);
-
mutex_lock(&epc->lock);
- ret = epc->ops->set_msi(epc, func_no, vfunc_no, encode_int);
+ ret = epc->ops->set_msi(epc, func_no, vfunc_no, nr_irqs);
mutex_unlock(&epc->lock);
return ret;
@@ -365,10 +339,7 @@ int pci_epc_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
int interrupt;
- if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
- return 0;
-
- if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
return 0;
if (!epc->ops->get_msix)
@@ -381,7 +352,7 @@ int pci_epc_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
if (interrupt < 0)
return 0;
- return interrupt + 1;
+ return interrupt;
}
EXPORT_SYMBOL_GPL(pci_epc_get_msix);
@@ -390,30 +361,28 @@ EXPORT_SYMBOL_GPL(pci_epc_get_msix);
* @epc: the EPC device on which MSI-X has to be configured
* @func_no: the physical endpoint function number in the EPC device
* @vfunc_no: the virtual endpoint function number in the physical function
- * @interrupts: number of MSI-X interrupts required by the EPF
+ * @nr_irqs: number of MSI-X interrupts required by the EPF
* @bir: BAR where the MSI-X table resides
* @offset: Offset pointing to the start of MSI-X table
*
* Invoke to set the required number of MSI-X interrupts.
*/
-int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
- u16 interrupts, enum pci_barno bir, u32 offset)
+int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no, u16 nr_irqs,
+ enum pci_barno bir, u32 offset)
{
int ret;
- if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
- interrupts < 1 || interrupts > 2048)
+ if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
return -EINVAL;
- if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ if (nr_irqs < 1 || nr_irqs > 2048)
return -EINVAL;
if (!epc->ops->set_msix)
return 0;
mutex_lock(&epc->lock);
- ret = epc->ops->set_msix(epc, func_no, vfunc_no, interrupts - 1, bir,
- offset);
+ ret = epc->ops->set_msix(epc, func_no, vfunc_no, nr_irqs, bir, offset);
mutex_unlock(&epc->lock);
return ret;
@@ -432,10 +401,7 @@ EXPORT_SYMBOL_GPL(pci_epc_set_msix);
void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t phys_addr)
{
- if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
- return;
-
- if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
return;
if (!epc->ops->unmap_addr)
@@ -463,10 +429,7 @@ int pci_epc_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
{
int ret;
- if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
- return -EINVAL;
-
- if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
return -EINVAL;
if (!epc->ops->map_addr)
@@ -482,6 +445,109 @@ int pci_epc_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
EXPORT_SYMBOL_GPL(pci_epc_map_addr);
/**
+ * pci_epc_mem_map() - allocate and map a PCI address to a CPU address
+ * @epc: the EPC device on which the CPU address is to be allocated and mapped
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
+ * @pci_addr: PCI address to which the CPU address should be mapped
+ * @pci_size: the number of bytes to map starting from @pci_addr
+ * @map: where to return the mapping information
+ *
+ * Allocate a controller memory address region and map it to a RC PCI address
+ * region, taking into account the controller physical address mapping
+ * constraints using the controller operation align_addr(). If this operation is
+ * not defined, we assume that there are no alignment constraints for the
+ * mapping.
+ *
+ * The effective size of the PCI address range mapped from @pci_addr is
+ * indicated by @map->pci_size. This size may be less than the requested
+ * @pci_size. The local virtual CPU address for the mapping is indicated by
+ * @map->virt_addr (@map->phys_addr indicates the physical address).
+ * The size and CPU address of the controller memory allocated and mapped are
+ * respectively indicated by @map->map_size and @map->virt_base (and
+ * @map->phys_base for the physical address of @map->virt_base).
+ *
+ * Returns 0 on success and a negative error code in case of error.
+ */
+int pci_epc_mem_map(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ u64 pci_addr, size_t pci_size, struct pci_epc_map *map)
+{
+ size_t map_size = pci_size;
+ size_t map_offset = 0;
+ int ret;
+
+ if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
+ return -EINVAL;
+
+ if (!pci_size || !map)
+ return -EINVAL;
+
+ /*
+ * Align the PCI address to map. If the controller defines the
+ * .align_addr() operation, use it to determine the PCI address to map
+ * and the size of the mapping. Otherwise, assume that the controller
+ * has no alignment constraint.
+ */
+ memset(map, 0, sizeof(*map));
+ map->pci_addr = pci_addr;
+ if (epc->ops->align_addr)
+ map->map_pci_addr =
+ epc->ops->align_addr(epc, pci_addr,
+ &map_size, &map_offset);
+ else
+ map->map_pci_addr = pci_addr;
+ map->map_size = map_size;
+ if (map->map_pci_addr + map->map_size < pci_addr + pci_size)
+ map->pci_size = map->map_pci_addr + map->map_size - pci_addr;
+ else
+ map->pci_size = pci_size;
+
+ map->virt_base = pci_epc_mem_alloc_addr(epc, &map->phys_base,
+ map->map_size);
+ if (!map->virt_base)
+ return -ENOMEM;
+
+ map->phys_addr = map->phys_base + map_offset;
+ map->virt_addr = map->virt_base + map_offset;
+
+ ret = pci_epc_map_addr(epc, func_no, vfunc_no, map->phys_base,
+ map->map_pci_addr, map->map_size);
+ if (ret) {
+ pci_epc_mem_free_addr(epc, map->phys_base, map->virt_base,
+ map->map_size);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_epc_mem_map);
+
+/**
+ * pci_epc_mem_unmap() - unmap and free a CPU address region
+ * @epc: the EPC device on which the CPU address is allocated and mapped
+ * @func_no: the physical endpoint function number in the EPC device
+ * @vfunc_no: the virtual endpoint function number in the physical function
+ * @map: the mapping information
+ *
+ * Unmap and free a CPU address region that was allocated and mapped with
+ * pci_epc_mem_map().
+ */
+void pci_epc_mem_unmap(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ struct pci_epc_map *map)
+{
+ if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
+ return;
+
+ if (!map || !map->virt_base)
+ return;
+
+ pci_epc_unmap_addr(epc, func_no, vfunc_no, map->phys_base);
+ pci_epc_mem_free_addr(epc, map->phys_base, map->virt_base,
+ map->map_size);
+}
+EXPORT_SYMBOL_GPL(pci_epc_mem_unmap);
+
+/**
* pci_epc_clear_bar() - reset the BAR
* @epc: the EPC device for which the BAR has to be cleared
* @func_no: the physical endpoint function number in the EPC device
@@ -493,12 +559,11 @@ EXPORT_SYMBOL_GPL(pci_epc_map_addr);
void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar)
{
- if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
- (epf_bar->barno == BAR_5 &&
- epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64))
+ if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
return;
- if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ if (epf_bar->barno == BAR_5 &&
+ epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
return;
if (!epc->ops->clear_bar)
@@ -522,21 +587,33 @@ EXPORT_SYMBOL_GPL(pci_epc_clear_bar);
int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar)
{
- int ret;
+ const struct pci_epc_features *epc_features;
+ enum pci_barno bar = epf_bar->barno;
int flags = epf_bar->flags;
+ int ret;
+
+ epc_features = pci_epc_get_features(epc, func_no, vfunc_no);
+ if (!epc_features)
+ return -EINVAL;
+
+ if (epc_features->bar[bar].type == BAR_RESIZABLE &&
+ (epf_bar->size < SZ_1M || (u64)epf_bar->size > (SZ_128G * 1024)))
+ return -EINVAL;
- if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
- (epf_bar->barno == BAR_5 &&
- flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ||
+ if (epc_features->bar[bar].type == BAR_FIXED &&
+ (epc_features->bar[bar].fixed_size != epf_bar->size))
+ return -EINVAL;
+
+ if (!is_power_of_2(epf_bar->size))
+ return -EINVAL;
+
+ if ((epf_bar->barno == BAR_5 && flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ||
(flags & PCI_BASE_ADDRESS_SPACE_IO &&
flags & PCI_BASE_ADDRESS_IO_MASK) ||
(upper_32_bits(epf_bar->size) &&
!(flags & PCI_BASE_ADDRESS_MEM_TYPE_64)))
return -EINVAL;
- if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
- return -EINVAL;
-
if (!epc->ops->set_bar)
return 0;
@@ -549,6 +626,33 @@ int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
EXPORT_SYMBOL_GPL(pci_epc_set_bar);
/**
+ * pci_epc_bar_size_to_rebar_cap() - convert a size to the representation used
+ * by the Resizable BAR Capability Register
+ * @size: the size to convert
+ * @cap: where to store the result
+ *
+ * Returns 0 on success and a negative error code in case of error.
+ */
+int pci_epc_bar_size_to_rebar_cap(size_t size, u32 *cap)
+{
+ /*
+ * As per PCIe r6.0, sec 7.8.6.2, min size for a resizable BAR is 1 MB,
+ * thus disallow a requested BAR size smaller than 1 MB.
+ * Disallow a requested BAR size larger than 128 TB.
+ */
+ if (size < SZ_1M || (u64)size > (SZ_128G * 1024))
+ return -EINVAL;
+
+ *cap = ilog2(size) - ilog2(SZ_1M);
+
+ /* Sizes in REBAR_CAP start at BIT(4). */
+ *cap = BIT(*cap + 4);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_epc_bar_size_to_rebar_cap);
+
+/**
* pci_epc_write_header() - write standard configuration header
* @epc: the EPC device to which the configuration header should be written
* @func_no: the physical endpoint function number in the EPC device
@@ -565,10 +669,7 @@ int pci_epc_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
{
int ret;
- if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions)
- return -EINVAL;
-
- if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
+ if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
return -EINVAL;
/* Only Virtual Function #1 has deviceID */
@@ -661,21 +762,21 @@ void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf,
struct list_head *list;
u32 func_no = 0;
- if (!epc || IS_ERR(epc) || !epf)
+ if (IS_ERR_OR_NULL(epc) || !epf)
return;
+ mutex_lock(&epc->list_lock);
if (type == PRIMARY_INTERFACE) {
func_no = epf->func_no;
list = &epf->list;
+ epf->epc = NULL;
} else {
func_no = epf->sec_epc_func_no;
list = &epf->sec_epc_list;
+ epf->sec_epc = NULL;
}
-
- mutex_lock(&epc->list_lock);
clear_bit(func_no, &epc->function_num_map);
list_del(list);
- epf->epc = NULL;
mutex_unlock(&epc->list_lock);
}
EXPORT_SYMBOL_GPL(pci_epc_remove_epf);
@@ -692,7 +793,7 @@ void pci_epc_linkup(struct pci_epc *epc)
{
struct pci_epf *epf;
- if (!epc || IS_ERR(epc))
+ if (IS_ERR_OR_NULL(epc))
return;
mutex_lock(&epc->list_lock);
@@ -718,7 +819,7 @@ void pci_epc_linkdown(struct pci_epc *epc)
{
struct pci_epf *epf;
- if (!epc || IS_ERR(epc))
+ if (IS_ERR_OR_NULL(epc))
return;
mutex_lock(&epc->list_lock);
@@ -733,9 +834,9 @@ void pci_epc_linkdown(struct pci_epc *epc)
EXPORT_SYMBOL_GPL(pci_epc_linkdown);
/**
- * pci_epc_init_notify() - Notify the EPF device that EPC device's core
- * initialization is completed.
- * @epc: the EPC device whose core initialization is completed
+ * pci_epc_init_notify() - Notify the EPF device that EPC device initialization
+ * is completed.
+ * @epc: the EPC device whose initialization is completed
*
* Invoke to Notify the EPF device that the EPC device's initialization
* is completed.
@@ -744,76 +845,109 @@ void pci_epc_init_notify(struct pci_epc *epc)
{
struct pci_epf *epf;
- if (!epc || IS_ERR(epc))
+ if (IS_ERR_OR_NULL(epc))
return;
mutex_lock(&epc->list_lock);
list_for_each_entry(epf, &epc->pci_epf, list) {
mutex_lock(&epf->lock);
- if (epf->event_ops && epf->event_ops->core_init)
- epf->event_ops->core_init(epf);
+ if (epf->event_ops && epf->event_ops->epc_init)
+ epf->event_ops->epc_init(epf);
mutex_unlock(&epf->lock);
}
+ epc->init_complete = true;
mutex_unlock(&epc->list_lock);
}
EXPORT_SYMBOL_GPL(pci_epc_init_notify);
/**
- * pci_epc_bme_notify() - Notify the EPF device that the EPC device has received
- * the BME event from the Root complex
- * @epc: the EPC device that received the BME event
+ * pci_epc_notify_pending_init() - Notify the pending EPC device initialization
+ * complete to the EPF device
+ * @epc: the EPC device whose initialization is pending to be notified
+ * @epf: the EPF device to be notified
*
- * Invoke to Notify the EPF device that the EPC device has received the Bus
- * Master Enable (BME) event from the Root complex
+ * Invoke to notify the pending EPC device initialization complete to the EPF
+ * device. This is used to deliver the notification if the EPC initialization
+ * got completed before the EPF driver bind.
*/
-void pci_epc_bme_notify(struct pci_epc *epc)
+void pci_epc_notify_pending_init(struct pci_epc *epc, struct pci_epf *epf)
+{
+ if (epc->init_complete) {
+ mutex_lock(&epf->lock);
+ if (epf->event_ops && epf->event_ops->epc_init)
+ epf->event_ops->epc_init(epf);
+ mutex_unlock(&epf->lock);
+ }
+}
+EXPORT_SYMBOL_GPL(pci_epc_notify_pending_init);
+
+/**
+ * pci_epc_deinit_notify() - Notify the EPF device about EPC deinitialization
+ * @epc: the EPC device whose deinitialization is completed
+ *
+ * Invoke to notify the EPF device that the EPC deinitialization is completed.
+ */
+void pci_epc_deinit_notify(struct pci_epc *epc)
{
struct pci_epf *epf;
- if (!epc || IS_ERR(epc))
+ if (IS_ERR_OR_NULL(epc))
return;
mutex_lock(&epc->list_lock);
list_for_each_entry(epf, &epc->pci_epf, list) {
mutex_lock(&epf->lock);
- if (epf->event_ops && epf->event_ops->bme)
- epf->event_ops->bme(epf);
+ if (epf->event_ops && epf->event_ops->epc_deinit)
+ epf->event_ops->epc_deinit(epf);
mutex_unlock(&epf->lock);
}
+ epc->init_complete = false;
mutex_unlock(&epc->list_lock);
}
-EXPORT_SYMBOL_GPL(pci_epc_bme_notify);
+EXPORT_SYMBOL_GPL(pci_epc_deinit_notify);
/**
- * pci_epc_destroy() - destroy the EPC device
- * @epc: the EPC device that has to be destroyed
+ * pci_epc_bus_master_enable_notify() - Notify the EPF device that the EPC
+ * device has received the Bus Master
+ * Enable event from the Root complex
+ * @epc: the EPC device that received the Bus Master Enable event
*
- * Invoke to destroy the PCI EPC device
+ * Notify the EPF device that the EPC device has generated the Bus Master Enable
+ * event due to host setting the Bus Master Enable bit in the Command register.
*/
-void pci_epc_destroy(struct pci_epc *epc)
+void pci_epc_bus_master_enable_notify(struct pci_epc *epc)
{
- pci_ep_cfs_remove_epc_group(epc->group);
- device_unregister(&epc->dev);
+ struct pci_epf *epf;
+
+ if (IS_ERR_OR_NULL(epc))
+ return;
+
+ mutex_lock(&epc->list_lock);
+ list_for_each_entry(epf, &epc->pci_epf, list) {
+ mutex_lock(&epf->lock);
+ if (epf->event_ops && epf->event_ops->bus_master_enable)
+ epf->event_ops->bus_master_enable(epf);
+ mutex_unlock(&epf->lock);
+ }
+ mutex_unlock(&epc->list_lock);
}
-EXPORT_SYMBOL_GPL(pci_epc_destroy);
+EXPORT_SYMBOL_GPL(pci_epc_bus_master_enable_notify);
/**
- * devm_pci_epc_destroy() - destroy the EPC device
- * @dev: device that wants to destroy the EPC
+ * pci_epc_destroy() - destroy the EPC device
* @epc: the EPC device that has to be destroyed
*
- * Invoke to destroy the devres associated with this
- * pci_epc and destroy the EPC device.
+ * Invoke to destroy the PCI EPC device
*/
-void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc)
+void pci_epc_destroy(struct pci_epc *epc)
{
- int r;
-
- r = devres_destroy(dev, devm_pci_epc_release, devm_pci_epc_match,
- epc);
- dev_WARN_ONCE(dev, r, "couldn't find PCI EPC resource\n");
+ pci_ep_cfs_remove_epc_group(epc->group);
+#ifdef CONFIG_PCI_DOMAINS_GENERIC
+ pci_bus_release_domain_nr(epc->dev.parent, epc->domain_nr);
+#endif
+ device_unregister(&epc->dev);
}
-EXPORT_SYMBOL_GPL(devm_pci_epc_destroy);
+EXPORT_SYMBOL_GPL(pci_epc_destroy);
static void pci_epc_release(struct device *dev)
{
@@ -851,11 +985,21 @@ __pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
INIT_LIST_HEAD(&epc->pci_epf);
device_initialize(&epc->dev);
- epc->dev.class = pci_epc_class;
+ epc->dev.class = &pci_epc_class;
epc->dev.parent = dev;
epc->dev.release = pci_epc_release;
epc->ops = ops;
+#ifdef CONFIG_PCI_DOMAINS_GENERIC
+ epc->domain_nr = pci_bus_find_domain_nr(NULL, dev);
+#else
+ /*
+ * TODO: If the architecture doesn't support generic PCI
+ * domains, then a custom implementation has to be used.
+ */
+ WARN_ONCE(1, "This architecture doesn't support generic PCI domains\n");
+#endif
+
ret = dev_set_name(&epc->dev, "%s", dev_name(dev));
if (ret)
goto put_dev;
@@ -870,7 +1014,6 @@ __pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
put_dev:
put_device(&epc->dev);
- kfree(epc);
err_ret:
return ERR_PTR(ret);
@@ -912,20 +1055,13 @@ EXPORT_SYMBOL_GPL(__devm_pci_epc_create);
static int __init pci_epc_init(void)
{
- pci_epc_class = class_create("pci_epc");
- if (IS_ERR(pci_epc_class)) {
- pr_err("failed to create pci epc class --> %ld\n",
- PTR_ERR(pci_epc_class));
- return PTR_ERR(pci_epc_class);
- }
-
- return 0;
+ return class_register(&pci_epc_class);
}
module_init(pci_epc_init);
static void __exit pci_epc_exit(void)
{
- class_destroy(pci_epc_class);
+ class_unregister(&pci_epc_class);
}
module_exit(pci_epc_exit);
diff --git a/drivers/pci/endpoint/pci-epc-mem.c b/drivers/pci/endpoint/pci-epc-mem.c
index 7dcf6f480b82..218a60e945db 100644
--- a/drivers/pci/endpoint/pci-epc-mem.c
+++ b/drivers/pci/endpoint/pci-epc-mem.c
@@ -115,6 +115,16 @@ err_mem:
}
EXPORT_SYMBOL_GPL(pci_epc_multi_mem_init);
+/**
+ * pci_epc_mem_init() - Initialize the pci_epc_mem structure
+ * @epc: the EPC device that invoked pci_epc_mem_init
+ * @base: Physical address of the window region
+ * @size: Total Size of the window region
+ * @page_size: Page size of the window region
+ *
+ * Invoke to initialize a single pci_epc_mem structure used by the
+ * endpoint functions to allocate memory for mapping the PCI host memory
+ */
int pci_epc_mem_init(struct pci_epc *epc, phys_addr_t base,
size_t size, size_t page_size)
{
@@ -168,7 +178,7 @@ EXPORT_SYMBOL_GPL(pci_epc_mem_exit);
void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc,
phys_addr_t *phys_addr, size_t size)
{
- void __iomem *virt_addr = NULL;
+ void __iomem *virt_addr;
struct pci_epc_mem *mem;
unsigned int page_shift;
size_t align_size;
@@ -178,10 +188,13 @@ void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc,
for (i = 0; i < epc->num_windows; i++) {
mem = epc->windows[i];
- mutex_lock(&mem->lock);
+ if (size > mem->window.size)
+ continue;
+
align_size = ALIGN(size, mem->window.page_size);
order = pci_epc_mem_get_order(mem, align_size);
+ mutex_lock(&mem->lock);
pageno = bitmap_find_free_region(mem->bitmap, mem->pages,
order);
if (pageno >= 0) {
@@ -201,7 +214,7 @@ void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc,
mutex_unlock(&mem->lock);
}
- return virt_addr;
+ return NULL;
}
EXPORT_SYMBOL_GPL(pci_epc_mem_alloc_addr);
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
index 2c32de667937..9a505c796370 100644
--- a/drivers/pci/endpoint/pci-epf-core.c
+++ b/drivers/pci/endpoint/pci-epf-core.c
@@ -17,7 +17,7 @@
static DEFINE_MUTEX(pci_epf_mutex);
-static struct bus_type pci_epf_bus_type;
+static const struct bus_type pci_epf_bus_type;
static const struct device_type pci_epf_type;
/**
@@ -202,11 +202,54 @@ void pci_epf_remove_vepf(struct pci_epf *epf_pf, struct pci_epf *epf_vf)
mutex_lock(&epf_pf->lock);
clear_bit(epf_vf->vfunc_no, &epf_pf->vfunction_num_map);
+ epf_vf->epf_pf = NULL;
list_del(&epf_vf->list);
mutex_unlock(&epf_pf->lock);
}
EXPORT_SYMBOL_GPL(pci_epf_remove_vepf);
+static int pci_epf_get_required_bar_size(struct pci_epf *epf, size_t *bar_size,
+ size_t *aligned_mem_size,
+ enum pci_barno bar,
+ const struct pci_epc_features *epc_features,
+ enum pci_epc_interface_type type)
+{
+ u64 bar_fixed_size = epc_features->bar[bar].fixed_size;
+ size_t align = epc_features->align;
+ size_t size = *bar_size;
+
+ if (size < 128)
+ size = 128;
+
+ /* According to PCIe base spec, min size for a resizable BAR is 1 MB. */
+ if (epc_features->bar[bar].type == BAR_RESIZABLE && size < SZ_1M)
+ size = SZ_1M;
+
+ if (epc_features->bar[bar].type == BAR_FIXED && bar_fixed_size) {
+ if (size > bar_fixed_size) {
+ dev_err(&epf->dev,
+ "requested BAR size is larger than fixed size\n");
+ return -ENOMEM;
+ }
+ size = bar_fixed_size;
+ } else {
+ /* BAR size must be power of two */
+ size = roundup_pow_of_two(size);
+ }
+
+ *bar_size = size;
+
+ /*
+ * The EPC's BAR start address must meet alignment requirements. In most
+ * cases, the alignment will match the BAR size. However, differences
+ * can occur—for example, when the fixed BAR size (e.g., 128 bytes) is
+ * smaller than the required alignment (e.g., 4 KB).
+ */
+ *aligned_mem_size = align ? ALIGN(size, align) : size;
+
+ return 0;
+}
+
/**
* pci_epf_free_space() - free the allocated PCI EPF register space
* @epf: the EPF device from whom to free the memory
@@ -235,12 +278,13 @@ void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar,
}
dev = epc->dev.parent;
- dma_free_coherent(dev, epf_bar[bar].size, addr,
+ dma_free_coherent(dev, epf_bar[bar].mem_size, addr,
epf_bar[bar].phys_addr);
epf_bar[bar].phys_addr = 0;
epf_bar[bar].addr = NULL;
epf_bar[bar].size = 0;
+ epf_bar[bar].mem_size = 0;
epf_bar[bar].barno = 0;
epf_bar[bar].flags = 0;
}
@@ -251,27 +295,27 @@ EXPORT_SYMBOL_GPL(pci_epf_free_space);
* @epf: the EPF device to whom allocate the memory
* @size: the size of the memory that has to be allocated
* @bar: the BAR number corresponding to the allocated register space
- * @align: alignment size for the allocation region
+ * @epc_features: the features provided by the EPC specific to this EPF
* @type: Identifies if the allocation is for primary EPC or secondary EPC
*
* Invoke to allocate memory for the PCI EPF register space.
+ * Flag PCI_BASE_ADDRESS_MEM_TYPE_64 will automatically get set if the BAR
+ * can only be a 64-bit BAR, or if the requested size is larger than 2 GB.
*/
void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
- size_t align, enum pci_epc_interface_type type)
+ const struct pci_epc_features *epc_features,
+ enum pci_epc_interface_type type)
{
struct pci_epf_bar *epf_bar;
dma_addr_t phys_addr;
struct pci_epc *epc;
struct device *dev;
+ size_t mem_size;
void *space;
- if (size < 128)
- size = 128;
-
- if (align)
- size = ALIGN(size, align);
- else
- size = roundup_pow_of_two(size);
+ if (pci_epf_get_required_bar_size(epf, &size, &mem_size, bar,
+ epc_features, type))
+ return NULL;
if (type == PRIMARY_INTERFACE) {
epc = epf->epc;
@@ -282,7 +326,7 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
}
dev = epc->dev.parent;
- space = dma_alloc_coherent(dev, size, &phys_addr, GFP_KERNEL);
+ space = dma_alloc_coherent(dev, mem_size, &phys_addr, GFP_KERNEL);
if (!space) {
dev_err(dev, "failed to allocate mem space\n");
return NULL;
@@ -291,15 +335,94 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
epf_bar[bar].phys_addr = phys_addr;
epf_bar[bar].addr = space;
epf_bar[bar].size = size;
+ epf_bar[bar].mem_size = mem_size;
epf_bar[bar].barno = bar;
- epf_bar[bar].flags |= upper_32_bits(size) ?
- PCI_BASE_ADDRESS_MEM_TYPE_64 :
- PCI_BASE_ADDRESS_MEM_TYPE_32;
+ if (upper_32_bits(size) || epc_features->bar[bar].only_64bit)
+ epf_bar[bar].flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
+ else
+ epf_bar[bar].flags |= PCI_BASE_ADDRESS_MEM_TYPE_32;
return space;
}
EXPORT_SYMBOL_GPL(pci_epf_alloc_space);
+/**
+ * pci_epf_assign_bar_space() - Assign PCI EPF BAR space
+ * @epf: EPF device to assign the BAR memory
+ * @size: Size of the memory that has to be assigned
+ * @bar: BAR number for which the memory is assigned
+ * @epc_features: Features provided by the EPC specific to this EPF
+ * @type: Identifies if the assignment is for primary EPC or secondary EPC
+ * @bar_addr: Address to be assigned for the @bar
+ *
+ * Invoke to assign memory for the PCI EPF BAR.
+ * Flag PCI_BASE_ADDRESS_MEM_TYPE_64 will automatically get set if the BAR
+ * can only be a 64-bit BAR, or if the requested size is larger than 2 GB.
+ */
+int pci_epf_assign_bar_space(struct pci_epf *epf, size_t size,
+ enum pci_barno bar,
+ const struct pci_epc_features *epc_features,
+ enum pci_epc_interface_type type,
+ dma_addr_t bar_addr)
+{
+ size_t bar_size, aligned_mem_size;
+ struct pci_epf_bar *epf_bar;
+ dma_addr_t limit;
+ int pos;
+
+ if (!size)
+ return -EINVAL;
+
+ limit = bar_addr + size - 1;
+
+ /*
+ * Bits: 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
+ * bar_addr: U U U U U U 0 X X X X X X X X X
+ * limit: U U U U U U 1 X X X X X X X X X
+ *
+ * bar_addr^limit 0 0 0 0 0 0 1 X X X X X X X X X
+ *
+ * U: unchanged address bits in range [bar_addr, limit]
+ * X: bit 0 or 1
+ *
+ * (bar_addr^limit) & BIT_ULL(pos) will find the first set bit from MSB
+ * (pos). And value of (2 ^ pos) should be able to cover the BAR range.
+ */
+ for (pos = 8 * sizeof(dma_addr_t) - 1; pos > 0; pos--)
+ if ((limit ^ bar_addr) & BIT_ULL(pos))
+ break;
+
+ if (pos == 8 * sizeof(dma_addr_t) - 1)
+ return -EINVAL;
+
+ bar_size = BIT_ULL(pos + 1);
+ if (pci_epf_get_required_bar_size(epf, &bar_size, &aligned_mem_size,
+ bar, epc_features, type))
+ return -ENOMEM;
+
+ if (type == PRIMARY_INTERFACE)
+ epf_bar = epf->bar;
+ else
+ epf_bar = epf->sec_epc_bar;
+
+ epf_bar[bar].phys_addr = ALIGN_DOWN(bar_addr, aligned_mem_size);
+
+ if (epf_bar[bar].phys_addr + bar_size < limit)
+ return -ENOMEM;
+
+ epf_bar[bar].addr = NULL;
+ epf_bar[bar].size = bar_size;
+ epf_bar[bar].mem_size = aligned_mem_size;
+ epf_bar[bar].barno = bar;
+ if (upper_32_bits(size) || epc_features->bar[bar].only_64bit)
+ epf_bar[bar].flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
+ else
+ epf_bar[bar].flags |= PCI_BASE_ADDRESS_MEM_TYPE_32;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_epf_assign_bar_space);
+
static void pci_epf_remove_cfs(struct pci_epf_driver *driver)
{
struct config_group *group, *tmp;
@@ -310,7 +433,7 @@ static void pci_epf_remove_cfs(struct pci_epf_driver *driver)
mutex_lock(&pci_epf_mutex);
list_for_each_entry_safe(group, tmp, &driver->epf_group, group_entry)
pci_ep_cfs_remove_epf_group(group);
- list_del(&driver->epf_group);
+ WARN_ON(!list_empty(&driver->epf_group));
mutex_unlock(&pci_epf_mutex);
}
@@ -449,6 +572,44 @@ struct pci_epf *pci_epf_create(const char *name)
}
EXPORT_SYMBOL_GPL(pci_epf_create);
+/**
+ * pci_epf_align_inbound_addr() - Align the given address based on the BAR
+ * alignment requirement
+ * @epf: the EPF device
+ * @addr: inbound address to be aligned
+ * @bar: the BAR number corresponding to the given addr
+ * @base: base address matching the @bar alignment requirement
+ * @off: offset to be added to the @base address
+ *
+ * Helper function to align input @addr based on BAR's alignment requirement.
+ * The aligned base address and offset are returned via @base and @off.
+ *
+ * NOTE: The pci_epf_alloc_space() function already accounts for alignment.
+ * This API is primarily intended for use with other memory regions not
+ * allocated by pci_epf_alloc_space(), such as peripheral register spaces or
+ * the message address of a platform MSI controller.
+ *
+ * Return: 0 on success, errno otherwise.
+ */
+int pci_epf_align_inbound_addr(struct pci_epf *epf, enum pci_barno bar,
+ u64 addr, dma_addr_t *base, size_t *off)
+{
+ /*
+ * Most EP controllers require the BAR start address to be aligned to
+ * the BAR size, because they mask off the lower bits.
+ *
+ * Alignment to BAR size also works for controllers that support
+ * unaligned addresses.
+ */
+ u64 align = epf->bar[bar].size;
+
+ *base = round_down(addr, align);
+ *off = addr & (align - 1);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_epf_align_inbound_addr);
+
static void pci_epf_dev_release(struct device *dev)
{
struct pci_epf *epf = to_pci_epf(dev);
@@ -473,10 +634,10 @@ pci_epf_match_id(const struct pci_epf_device_id *id, const struct pci_epf *epf)
return NULL;
}
-static int pci_epf_device_match(struct device *dev, struct device_driver *drv)
+static int pci_epf_device_match(struct device *dev, const struct device_driver *drv)
{
struct pci_epf *epf = to_pci_epf(dev);
- struct pci_epf_driver *driver = to_pci_epf_driver(drv);
+ const struct pci_epf_driver *driver = to_pci_epf_driver(drv);
if (driver->id_table)
return !!pci_epf_match_id(driver->id_table, epf);
@@ -507,7 +668,7 @@ static void pci_epf_device_remove(struct device *dev)
epf->driver = NULL;
}
-static struct bus_type pci_epf_bus_type = {
+static const struct bus_type pci_epf_bus_type = {
.name = "pci-epf",
.match = pci_epf_device_match,
.probe = pci_epf_device_probe,
diff --git a/drivers/pci/host-bridge.c b/drivers/pci/host-bridge.c
index afa50b446567..be5ef6516cff 100644
--- a/drivers/pci/host-bridge.c
+++ b/drivers/pci/host-bridge.c
@@ -33,6 +33,7 @@ struct device *pci_get_host_bridge_device(struct pci_dev *dev)
kobject_get(&bridge->kobj);
return bridge;
}
+EXPORT_SYMBOL_GPL(pci_get_host_bridge_device);
void pci_put_host_bridge_device(struct device *dev)
{
diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig
index 48113b210cf9..3207860b52e4 100644
--- a/drivers/pci/hotplug/Kconfig
+++ b/drivers/pci/hotplug/Kconfig
@@ -61,6 +61,18 @@ config HOTPLUG_PCI_ACPI
When in doubt, say N.
+config HOTPLUG_PCI_ACPI_AMPERE_ALTRA
+ tristate "ACPI PCI Hotplug driver Ampere Altra extensions"
+ depends on HOTPLUG_PCI_ACPI
+ depends on HAVE_ARM_SMCCC_DISCOVERY
+ help
+ Say Y here if you have an Ampere Altra system.
+
+ To compile this driver as a module, choose M here: the
+ module will be called acpiphp_ampere_altra.
+
+ When in doubt, say N.
+
config HOTPLUG_PCI_ACPI_IBM
tristate "ACPI PCI Hotplug driver IBM extensions"
depends on HOTPLUG_PCI_ACPI
@@ -85,7 +97,7 @@ config HOTPLUG_PCI_CPCI_ZT5550
tristate "Ziatech ZT5550 CompactPCI Hotplug driver"
depends on HOTPLUG_PCI_CPCI && X86
help
- Say Y here if you have an Performance Technologies (formerly Intel,
+ Say Y here if you have a Performance Technologies (formerly Intel,
formerly just Ziatech) Ziatech ZT5550 CompactPCI system card.
To compile this driver as a module, choose M here: the
@@ -106,6 +118,16 @@ config HOTPLUG_PCI_CPCI_GENERIC
When in doubt, say N.
+config HOTPLUG_PCI_OCTEONEP
+ bool "Marvell OCTEON PCI Hotplug driver"
+ depends on HOTPLUG_PCI
+ help
+ Say Y here if you have an OCTEON PCIe device with a hotplug
+ controller. This driver enables the non-controller functions of the
+ device to be registered as hotplug slots.
+
+ When in doubt, say N.
+
config HOTPLUG_PCI_SHPC
bool "SHPC PCI Hotplug driver"
help
diff --git a/drivers/pci/hotplug/Makefile b/drivers/pci/hotplug/Makefile
index 5196983220df..40aaf31fe338 100644
--- a/drivers/pci/hotplug/Makefile
+++ b/drivers/pci/hotplug/Makefile
@@ -20,9 +20,11 @@ obj-$(CONFIG_HOTPLUG_PCI_RPA) += rpaphp.o
obj-$(CONFIG_HOTPLUG_PCI_RPA_DLPAR) += rpadlpar_io.o
obj-$(CONFIG_HOTPLUG_PCI_ACPI) += acpiphp.o
obj-$(CONFIG_HOTPLUG_PCI_S390) += s390_pci_hpc.o
+obj-$(CONFIG_HOTPLUG_PCI_OCTEONEP) += octep_hp.o
# acpiphp_ibm extends acpiphp, so should be linked afterwards.
+obj-$(CONFIG_HOTPLUG_PCI_ACPI_AMPERE_ALTRA) += acpiphp_ampere_altra.o
obj-$(CONFIG_HOTPLUG_PCI_ACPI_IBM) += acpiphp_ibm.o
pci_hotplug-objs := pci_hotplug_core.o
diff --git a/drivers/pci/hotplug/TODO b/drivers/pci/hotplug/TODO
index fdb8dd6ea24d..7397374af171 100644
--- a/drivers/pci/hotplug/TODO
+++ b/drivers/pci/hotplug/TODO
@@ -2,9 +2,7 @@ Contributions are solicited in particular to remedy the following issues:
cpcihp:
-* There are no implementations of the ->hardware_test, ->get_power and
- ->set_power callbacks in struct cpci_hp_controller_ops. Why were they
- introduced? Can they be removed from the struct?
+* Returned code from pci_hp_add_bridge() is not checked.
cpqphp:
@@ -16,6 +14,8 @@ cpqphp:
* A large portion of cpqphp_ctrl.c and cpqphp_pci.c concerns resource
management. Doesn't this duplicate functionality in the core?
+* Returned code from pci_hp_add_bridge() is not checked.
+
ibmphp:
* Implementations of hotplug_slot_ops callbacks such as get_adapter_present()
@@ -43,21 +43,10 @@ ibmphp:
* A large portion of ibmphp_res.c and ibmphp_pci.c concerns resource
management. Doesn't this duplicate functionality in the core?
-sgi_hotplug:
-
-* Several functions access the pci_slot member in struct hotplug_slot even
- though pci_hotplug.h declares it private. See sn_hp_destroy() for an
- example. Either the pci_slot member should no longer be declared private
- or sgi_hotplug should store a pointer to it in struct slot. Probably the
- former.
+* Returned code from pci_hp_add_bridge() is not checked.
shpchp:
-* There is only a single implementation of struct hpc_ops. Can the struct be
- removed and its functions invoked directly? This has already been done in
- pciehp with commit 82a9e79ef132 ("PCI: pciehp: remove hpc_ops"). Clarify
- if there was a specific reason not to apply the same change to shpchp.
-
* The hardirq handler shpc_isr() queues events on a workqueue. It can be
simplified by converting it to threaded IRQ handling. Use pciehp as a
template.
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h
index 1f8ab4377ad8..5745be6018e1 100644
--- a/drivers/pci/hotplug/acpiphp.h
+++ b/drivers/pci/hotplug/acpiphp.h
@@ -178,7 +178,6 @@ void acpiphp_unregister_hotplug_slot(struct acpiphp_slot *slot);
int acpiphp_enable_slot(struct acpiphp_slot *slot);
int acpiphp_disable_slot(struct acpiphp_slot *slot);
u8 acpiphp_get_power_status(struct acpiphp_slot *slot);
-u8 acpiphp_get_attention_status(struct acpiphp_slot *slot);
u8 acpiphp_get_latch_status(struct acpiphp_slot *slot);
u8 acpiphp_get_adapter_status(struct acpiphp_slot *slot);
diff --git a/drivers/pci/hotplug/acpiphp_ampere_altra.c b/drivers/pci/hotplug/acpiphp_ampere_altra.c
new file mode 100644
index 000000000000..70dbc0431fc6
--- /dev/null
+++ b/drivers/pci/hotplug/acpiphp_ampere_altra.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ACPI PCI Hot Plug Extension for Ampere Altra. Allows control of
+ * attention LEDs via requests to system firmware.
+ *
+ * Copyright (C) 2023 Ampere Computing LLC
+ */
+
+#define pr_fmt(fmt) "acpiphp_ampere_altra: " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pci_hotplug.h>
+#include <linux/platform_device.h>
+
+#include "acpiphp.h"
+
+#define HANDLE_OPEN 0xb0200000
+#define HANDLE_CLOSE 0xb0300000
+#define REQUEST 0xf0700000
+#define LED_CMD 0x00000004
+#define LED_ATTENTION 0x00000002
+#define LED_SET_ON 0x00000001
+#define LED_SET_OFF 0x00000002
+#define LED_SET_BLINK 0x00000003
+
+static u32 led_service_id[4];
+
+static int led_status(u8 status)
+{
+ switch (status) {
+ case 1: return LED_SET_ON;
+ case 2: return LED_SET_BLINK;
+ default: return LED_SET_OFF;
+ }
+}
+
+static int set_attention_status(struct hotplug_slot *slot, u8 status)
+{
+ struct arm_smccc_res res;
+ struct pci_bus *bus;
+ struct pci_dev *root_port;
+ unsigned long flags;
+ u32 handle;
+ int ret = 0;
+
+ bus = slot->pci_slot->bus;
+ root_port = pcie_find_root_port(bus->self);
+ if (!root_port)
+ return -ENODEV;
+
+ local_irq_save(flags);
+ arm_smccc_smc(HANDLE_OPEN, led_service_id[0], led_service_id[1],
+ led_service_id[2], led_service_id[3], 0, 0, 0, &res);
+ if (res.a0) {
+ ret = -ENODEV;
+ goto out;
+ }
+ handle = res.a1 & 0xffff0000;
+
+ arm_smccc_smc(REQUEST, LED_CMD, led_status(status), LED_ATTENTION,
+ (PCI_SLOT(root_port->devfn) << 4) | (pci_domain_nr(bus) & 0xf),
+ 0, 0, handle, &res);
+ if (res.a0)
+ ret = -ENODEV;
+
+ arm_smccc_smc(HANDLE_CLOSE, handle, 0, 0, 0, 0, 0, 0, &res);
+
+ out:
+ local_irq_restore(flags);
+ return ret;
+}
+
+static int get_attention_status(struct hotplug_slot *slot, u8 *status)
+{
+ return -EINVAL;
+}
+
+static struct acpiphp_attention_info ampere_altra_attn = {
+ .set_attn = set_attention_status,
+ .get_attn = get_attention_status,
+ .owner = THIS_MODULE,
+};
+
+static int altra_led_probe(struct platform_device *pdev)
+{
+ struct fwnode_handle *fwnode = dev_fwnode(&pdev->dev);
+ int ret;
+
+ ret = fwnode_property_read_u32_array(fwnode, "uuid", led_service_id, 4);
+ if (ret) {
+ dev_err(&pdev->dev, "can't find uuid\n");
+ return ret;
+ }
+
+ ret = acpiphp_register_attention(&ampere_altra_attn);
+ if (ret) {
+ dev_err(&pdev->dev, "can't register driver\n");
+ return ret;
+ }
+ return 0;
+}
+
+static void altra_led_remove(struct platform_device *pdev)
+{
+ acpiphp_unregister_attention(&ampere_altra_attn);
+}
+
+static const struct acpi_device_id altra_led_ids[] = {
+ { "AMPC0008", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, altra_led_ids);
+
+static struct platform_driver altra_led_driver = {
+ .driver = {
+ .name = "ampere-altra-leds",
+ .acpi_match_table = altra_led_ids,
+ },
+ .probe = altra_led_probe,
+ .remove = altra_led_remove,
+};
+module_platform_driver(altra_led_driver);
+
+MODULE_AUTHOR("D Scott Phillips <scott@os.amperecomputing.com>");
+MODULE_DESCRIPTION("ACPI PCI Hot Plug Extension for Ampere Altra");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c
index c02257f4b61c..9dad14e80bcf 100644
--- a/drivers/pci/hotplug/acpiphp_core.c
+++ b/drivers/pci/hotplug/acpiphp_core.c
@@ -78,8 +78,7 @@ int acpiphp_register_attention(struct acpiphp_attention_info *info)
{
int retval = -EINVAL;
- if (info && info->owner && info->set_attn &&
- info->get_attn && !attention_info) {
+ if (info && info->set_attn && info->get_attn && !attention_info) {
retval = 0;
attention_info = info;
}
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 328d1e416014..5b1f271c6034 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -498,6 +498,7 @@ static void enable_slot(struct acpiphp_slot *slot, bool bridge)
acpiphp_native_scan_bridge(dev);
}
} else {
+ LIST_HEAD(add_list);
int max, pass;
acpiphp_rescan_slot(slot);
@@ -511,10 +512,12 @@ static void enable_slot(struct acpiphp_slot *slot, bool bridge)
if (pass && dev->subordinate) {
check_hotplug_bridge(slot, dev);
pcibios_resource_survey_bus(dev->subordinate);
+ __pci_bus_size_bridges(dev->subordinate,
+ &add_list);
}
}
}
- pci_assign_unassigned_bridge_resources(bus->self);
+ __pci_bus_assign_resources(bus, &add_list, NULL);
}
acpiphp_sanitize_bus(bus);
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
index 8f3a0a33f362..18e01cd55a8e 100644
--- a/drivers/pci/hotplug/acpiphp_ibm.c
+++ b/drivers/pci/hotplug/acpiphp_ibm.c
@@ -84,7 +84,7 @@ static int ibm_get_attention_status(struct hotplug_slot *slot, u8 *status);
static void ibm_handle_events(acpi_handle handle, u32 event, void *context);
static int ibm_get_table_from_acpi(char **bufp);
static ssize_t ibm_read_apci_table(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t size);
static acpi_status __init ibm_find_acpi_device(acpi_handle handle,
u32 lvl, void *context, void **rv);
@@ -353,7 +353,7 @@ read_table_done:
* our solution is to only allow reading the table in all at once.
*/
static ssize_t ibm_read_apci_table(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
+ const struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t size)
{
int bytes_read = -EINVAL;
diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
index 3fdd1b9bd8c3..a31d6b62f523 100644
--- a/drivers/pci/hotplug/cpci_hotplug.h
+++ b/drivers/pci/hotplug/cpci_hotplug.h
@@ -44,9 +44,6 @@ struct cpci_hp_controller_ops {
int (*enable_irq)(void);
int (*disable_irq)(void);
int (*check_irq)(void *dev_id);
- int (*hardware_test)(struct slot *slot, u32 value);
- u8 (*get_power)(struct slot *slot);
- int (*set_power)(struct slot *slot, int value);
};
struct cpci_hp_controller {
@@ -83,8 +80,6 @@ extern int cpci_debug;
* board/chassis drivers.
*/
u8 cpci_get_attention_status(struct slot *slot);
-u8 cpci_get_latch_status(struct slot *slot);
-u8 cpci_get_adapter_status(struct slot *slot);
u16 cpci_get_hs_csr(struct slot *slot);
int cpci_set_attention_status(struct slot *slot, int status);
int cpci_check_and_clear_ins(struct slot *slot);
diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c
index d0559d2faf50..dd93e53ea7c2 100644
--- a/drivers/pci/hotplug/cpci_hotplug_core.c
+++ b/drivers/pci/hotplug/cpci_hotplug_core.c
@@ -71,13 +71,10 @@ static int
enable_slot(struct hotplug_slot *hotplug_slot)
{
struct slot *slot = to_slot(hotplug_slot);
- int retval = 0;
dbg("%s - physical_slot = %s", __func__, slot_name(slot));
- if (controller->ops->set_power)
- retval = controller->ops->set_power(slot, 1);
- return retval;
+ return 0;
}
static int
@@ -109,12 +106,6 @@ disable_slot(struct hotplug_slot *hotplug_slot)
}
cpci_led_on(slot);
- if (controller->ops->set_power) {
- retval = controller->ops->set_power(slot, 0);
- if (retval)
- goto disable_error;
- }
-
slot->adapter_status = 0;
if (slot->extracting) {
@@ -129,11 +120,7 @@ disable_error:
static u8
cpci_get_power_status(struct slot *slot)
{
- u8 power = 1;
-
- if (controller->ops->get_power)
- power = controller->ops->get_power(slot);
- return power;
+ return 1;
}
static int
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index c94b40e64baf..47a3ed16159a 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -328,7 +328,7 @@ get_slot_mapping(struct pci_bus *bus, u8 bus_num, u8 dev_num, u8 *slot)
} else {
/* Did not get a match on the target PCI device. Check
* if the current IRQ table entry is a PCI-to-PCI
- * bridge device. If so, and it's secondary bus
+ * bridge device. If so, and its secondary bus
* matches the bus number for the target device, I need
* to save the bridge's slot number. If I can not find
* an entry for the target device, I will have to
diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c
index e429ecddc8fe..760a5dec0431 100644
--- a/drivers/pci/hotplug/cpqphp_ctrl.c
+++ b/drivers/pci/hotplug/cpqphp_ctrl.c
@@ -1794,7 +1794,7 @@ static void interrupt_event_handler(struct controller *ctrl)
} else if (ctrl->event_queue[loop].event_type ==
INT_BUTTON_CANCEL) {
dbg("button cancel\n");
- del_timer(&p_slot->task_event);
+ timer_delete(&p_slot->task_event);
mutex_lock(&ctrl->crit_sect);
@@ -1883,7 +1883,7 @@ void cpqhp_pushbutton_thread(struct timer_list *t)
{
u8 hp_slot;
struct pci_func *func;
- struct slot *p_slot = from_timer(p_slot, t, task_event);
+ struct slot *p_slot = timer_container_of(p_slot, t, task_event);
struct controller *ctrl = (struct controller *) p_slot->ctrl;
pushbutton_pending = NULL;
@@ -2059,7 +2059,7 @@ int cpqhp_process_SS(struct controller *ctrl, struct pci_func *func)
return rc;
/* If it's a bridge, check the VGA Enable bit */
- if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
+ if ((header_type & PCI_HEADER_TYPE_MASK) == PCI_HEADER_TYPE_BRIDGE) {
rc = pci_bus_read_config_byte(pci_bus, devfn, PCI_BRIDGE_CONTROL, &BCR);
if (rc)
return rc;
@@ -2342,7 +2342,7 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func
if (rc)
return rc;
- if ((temp_byte & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
+ if ((temp_byte & PCI_HEADER_TYPE_MASK) == PCI_HEADER_TYPE_BRIDGE) {
/* set Primary bus */
dbg("set Primary bus = %d\n", func->bus);
rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_PRIMARY_BUS, func->bus);
@@ -2739,7 +2739,7 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func
* PCI_BRIDGE_CTL_SERR |
* PCI_BRIDGE_CTL_NO_ISA */
rc = pci_bus_write_config_word(pci_bus, devfn, PCI_BRIDGE_CONTROL, command);
- } else if ((temp_byte & 0x7F) == PCI_HEADER_TYPE_NORMAL) {
+ } else if ((temp_byte & PCI_HEADER_TYPE_MASK) == PCI_HEADER_TYPE_NORMAL) {
/* Standard device */
rc = pci_bus_read_config_byte(pci_bus, devfn, 0x0B, &class_code);
diff --git a/drivers/pci/hotplug/cpqphp_pci.c b/drivers/pci/hotplug/cpqphp_pci.c
index 3b248426a9f4..88929360fe77 100644
--- a/drivers/pci/hotplug/cpqphp_pci.c
+++ b/drivers/pci/hotplug/cpqphp_pci.c
@@ -12,8 +12,11 @@
*
*/
+#define pr_fmt(fmt) "cpqphp: " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/printk.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
@@ -132,18 +135,6 @@ int cpqhp_unconfigure_device(struct pci_func *func)
return 0;
}
-static int PCI_RefinedAccessConfig(struct pci_bus *bus, unsigned int devfn, u8 offset, u32 *value)
-{
- u32 vendID = 0;
-
- if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, &vendID) == -1)
- return -1;
- if (vendID == 0xffffffff)
- return -1;
- return pci_bus_read_config_dword(bus, devfn, offset, value);
-}
-
-
/*
* cpqhp_set_irq
*
@@ -202,13 +193,16 @@ static int PCI_ScanBusForNonBridge(struct controller *ctrl, u8 bus_num, u8 *dev_
{
u16 tdevice;
u32 work;
- u8 tbus;
+ int ret = -1;
ctrl->pci_bus->number = bus_num;
for (tdevice = 0; tdevice < 0xFF; tdevice++) {
/* Scan for access first */
- if (PCI_RefinedAccessConfig(ctrl->pci_bus, tdevice, 0x08, &work) == -1)
+ if (!pci_bus_read_dev_vendor_id(ctrl->pci_bus, tdevice, &work, 0))
+ continue;
+ ret = pci_bus_read_config_dword(ctrl->pci_bus, tdevice, PCI_CLASS_REVISION, &work);
+ if (ret)
continue;
dbg("Looking for nonbridge bus_num %d dev_num %d\n", bus_num, tdevice);
/* Yep we got one. Not a bridge ? */
@@ -216,23 +210,20 @@ static int PCI_ScanBusForNonBridge(struct controller *ctrl, u8 bus_num, u8 *dev_
*dev_num = tdevice;
dbg("found it !\n");
return 0;
- }
- }
- for (tdevice = 0; tdevice < 0xFF; tdevice++) {
- /* Scan for access first */
- if (PCI_RefinedAccessConfig(ctrl->pci_bus, tdevice, 0x08, &work) == -1)
- continue;
- dbg("Looking for bridge bus_num %d dev_num %d\n", bus_num, tdevice);
- /* Yep we got one. bridge ? */
- if ((work >> 8) == PCI_TO_PCI_BRIDGE_CLASS) {
- pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(tdevice, 0), PCI_SECONDARY_BUS, &tbus);
- /* XXX: no recursion, wtf? */
- dbg("Recurse on bus_num %d tdevice %d\n", tbus, tdevice);
- return 0;
+ } else {
+ /*
+ * XXX: Code whose debug printout indicated
+ * recursion to buses underneath bridges might be
+ * necessary was removed because it never did
+ * any recursion.
+ */
+ ret = 0;
+ pr_warn("missing feature: bridge scan recursion not implemented\n");
}
}
- return -1;
+
+ return ret;
}
@@ -253,7 +244,7 @@ static int PCI_GetBusDevHelper(struct controller *ctrl, u8 *bus_num, u8 *dev_num
*dev_num = tdevice;
ctrl->pci_bus->number = tbus;
pci_bus_read_config_dword(ctrl->pci_bus, *dev_num, PCI_VENDOR_ID, &work);
- if (!nobridge || (work == 0xffffffff))
+ if (!nobridge || PCI_POSSIBLE_ERROR(work))
return 0;
dbg("bus_num %d devfn %d\n", *bus_num, *dev_num);
@@ -363,7 +354,7 @@ int cpqhp_save_config(struct controller *ctrl, int busnumber, int is_hot_plug)
return rc;
/* If multi-function device, set max_functions to 8 */
- if (header_type & 0x80)
+ if (header_type & PCI_HEADER_TYPE_MFD)
max_functions = 8;
else
max_functions = 1;
@@ -372,7 +363,7 @@ int cpqhp_save_config(struct controller *ctrl, int busnumber, int is_hot_plug)
do {
DevError = 0;
- if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
+ if ((header_type & PCI_HEADER_TYPE_MASK) == PCI_HEADER_TYPE_BRIDGE) {
/* Recurse the subordinate bus
* get the subordinate bus number
*/
@@ -487,13 +478,13 @@ int cpqhp_save_slot_config(struct controller *ctrl, struct pci_func *new_slot)
pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(new_slot->device, 0), 0x0B, &class_code);
pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(new_slot->device, 0), PCI_HEADER_TYPE, &header_type);
- if (header_type & 0x80) /* Multi-function device */
+ if (header_type & PCI_HEADER_TYPE_MFD)
max_functions = 8;
else
max_functions = 1;
while (function < max_functions) {
- if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
+ if ((header_type & PCI_HEADER_TYPE_MASK) == PCI_HEADER_TYPE_BRIDGE) {
/* Recurse the subordinate bus */
pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_SECONDARY_BUS, &secondary_bus);
@@ -571,7 +562,7 @@ int cpqhp_save_base_addr_length(struct controller *ctrl, struct pci_func *func)
/* Check for Bridge */
pci_bus_read_config_byte(pci_bus, devfn, PCI_HEADER_TYPE, &header_type);
- if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
+ if ((header_type & PCI_HEADER_TYPE_MASK) == PCI_HEADER_TYPE_BRIDGE) {
pci_bus_read_config_byte(pci_bus, devfn, PCI_SECONDARY_BUS, &secondary_bus);
sub_bus = (int) secondary_bus;
@@ -625,7 +616,7 @@ int cpqhp_save_base_addr_length(struct controller *ctrl, struct pci_func *func)
} /* End of base register loop */
- } else if ((header_type & 0x7F) == 0x00) {
+ } else if ((header_type & PCI_HEADER_TYPE_MASK) == PCI_HEADER_TYPE_NORMAL) {
/* Figure out IO and memory base lengths */
for (cloop = 0x10; cloop <= 0x24; cloop += 4) {
temp_register = 0xFFFFFFFF;
@@ -723,7 +714,7 @@ int cpqhp_save_used_resources(struct controller *ctrl, struct pci_func *func)
/* Check for Bridge */
pci_bus_read_config_byte(pci_bus, devfn, PCI_HEADER_TYPE, &header_type);
- if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
+ if ((header_type & PCI_HEADER_TYPE_MASK) == PCI_HEADER_TYPE_BRIDGE) {
/* Clear Bridge Control Register */
command = 0x00;
pci_bus_write_config_word(pci_bus, devfn, PCI_BRIDGE_CONTROL, command);
@@ -858,7 +849,7 @@ int cpqhp_save_used_resources(struct controller *ctrl, struct pci_func *func)
}
} /* End of base register loop */
/* Standard header */
- } else if ((header_type & 0x7F) == 0x00) {
+ } else if ((header_type & PCI_HEADER_TYPE_MASK) == PCI_HEADER_TYPE_NORMAL) {
/* Figure out IO and memory base lengths */
for (cloop = 0x10; cloop <= 0x24; cloop += 4) {
pci_bus_read_config_dword(pci_bus, devfn, cloop, &save_base);
@@ -975,7 +966,7 @@ int cpqhp_configure_board(struct controller *ctrl, struct pci_func *func)
pci_bus_read_config_byte(pci_bus, devfn, PCI_HEADER_TYPE, &header_type);
/* If this is a bridge device, restore subordinate devices */
- if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
+ if ((header_type & PCI_HEADER_TYPE_MASK) == PCI_HEADER_TYPE_BRIDGE) {
pci_bus_read_config_byte(pci_bus, devfn, PCI_SECONDARY_BUS, &secondary_bus);
sub_bus = (int) secondary_bus;
@@ -1067,7 +1058,7 @@ int cpqhp_valid_replace(struct controller *ctrl, struct pci_func *func)
/* Check for Bridge */
pci_bus_read_config_byte(pci_bus, devfn, PCI_HEADER_TYPE, &header_type);
- if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
+ if ((header_type & PCI_HEADER_TYPE_MASK) == PCI_HEADER_TYPE_BRIDGE) {
/* In order to continue checking, we must program the
* bus registers in the bridge to respond to accesses
* for its subordinate bus(es)
@@ -1090,7 +1081,7 @@ int cpqhp_valid_replace(struct controller *ctrl, struct pci_func *func)
}
/* Check to see if it is a standard config header */
- else if ((header_type & 0x7F) == PCI_HEADER_TYPE_NORMAL) {
+ else if ((header_type & PCI_HEADER_TYPE_MASK) == PCI_HEADER_TYPE_NORMAL) {
/* Check subsystem vendor and ID */
pci_bus_read_config_dword(pci_bus, devfn, PCI_SUBSYSTEM_VENDOR_ID, &temp_register);
@@ -1311,7 +1302,7 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
dbg("found io_node(base, length) = %x, %x\n",
io_node->base, io_node->length);
- dbg("populated slot =%d \n", populated_slot);
+ dbg("populated slot = %d\n", populated_slot);
if (!populated_slot) {
io_node->next = ctrl->io_head;
ctrl->io_head = io_node;
@@ -1334,7 +1325,7 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
dbg("found mem_node(base, length) = %x, %x\n",
mem_node->base, mem_node->length);
- dbg("populated slot =%d \n", populated_slot);
+ dbg("populated slot = %d\n", populated_slot);
if (!populated_slot) {
mem_node->next = ctrl->mem_head;
ctrl->mem_head = mem_node;
@@ -1358,7 +1349,7 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
p_mem_node->length = pre_mem_length << 16;
dbg("found p_mem_node(base, length) = %x, %x\n",
p_mem_node->base, p_mem_node->length);
- dbg("populated slot =%d \n", populated_slot);
+ dbg("populated slot = %d\n", populated_slot);
if (!populated_slot) {
p_mem_node->next = ctrl->p_mem_head;
@@ -1382,7 +1373,7 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
bus_node->length = max_bus - secondary_bus + 1;
dbg("found bus_node(base, length) = %x, %x\n",
bus_node->base, bus_node->length);
- dbg("populated slot =%d \n", populated_slot);
+ dbg("populated slot = %d\n", populated_slot);
if (!populated_slot) {
bus_node->next = ctrl->bus_head;
ctrl->bus_head = bus_node;
diff --git a/drivers/pci/hotplug/cpqphp_sysfs.c b/drivers/pci/hotplug/cpqphp_sysfs.c
index fed1360ee9b1..6143ebf71f21 100644
--- a/drivers/pci/hotplug/cpqphp_sysfs.c
+++ b/drivers/pci/hotplug/cpqphp_sysfs.c
@@ -123,7 +123,6 @@ static int spew_debug_info(struct controller *ctrl, char *data, int size)
struct ctrl_dbg {
int size;
char *data;
- struct controller *ctrl;
};
#define MAX_OUTPUT (4*PAGE_SIZE)
diff --git a/drivers/pci/hotplug/ibmphp.h b/drivers/pci/hotplug/ibmphp.h
index 0399c60d2ec1..c248a09be7b5 100644
--- a/drivers/pci/hotplug/ibmphp.h
+++ b/drivers/pci/hotplug/ibmphp.h
@@ -17,6 +17,7 @@
*/
#include <linux/pci_hotplug.h>
+#include <linux/pci_regs.h>
extern int ibmphp_debug;
@@ -264,8 +265,6 @@ extern struct list_head ibmphp_slot_head;
void ibmphp_free_ebda_hpc_queue(void);
int ibmphp_access_ebda(void);
struct slot *ibmphp_get_slot_from_physical_num(u8);
-int ibmphp_get_total_hp_slots(void);
-void ibmphp_free_ibm_slot(struct slot *);
void ibmphp_free_bus_info_queue(void);
void ibmphp_free_ebda_pci_rsrc_queue(void);
struct bus_info *ibmphp_find_same_bus_num(u32);
@@ -288,8 +287,8 @@ int ibmphp_register_pci(void);
/* pci specific defines */
#define PCI_VENDOR_ID_NOTVALID 0xFFFF
-#define PCI_HEADER_TYPE_MULTIDEVICE 0x80
-#define PCI_HEADER_TYPE_MULTIBRIDGE 0x81
+#define PCI_HEADER_TYPE_MULTIDEVICE (PCI_HEADER_TYPE_MFD|PCI_HEADER_TYPE_NORMAL)
+#define PCI_HEADER_TYPE_MULTIBRIDGE (PCI_HEADER_TYPE_MFD|PCI_HEADER_TYPE_BRIDGE)
#define LATENCY 0x64
#define CACHE 64
diff --git a/drivers/pci/hotplug/ibmphp_hpc.c b/drivers/pci/hotplug/ibmphp_hpc.c
index a5720d12e573..2324167656a6 100644
--- a/drivers/pci/hotplug/ibmphp_hpc.c
+++ b/drivers/pci/hotplug/ibmphp_hpc.c
@@ -124,7 +124,7 @@ static u8 i2c_ctrl_read(struct controller *ctlr_ptr, void __iomem *WPGBbar, u8 i
unsigned long ultemp;
unsigned long data; // actual data HILO format
- debug_polling("%s - Entry WPGBbar[%p] index[%x] \n", __func__, WPGBbar, index);
+ debug_polling("%s - Entry WPGBbar[%p] index[%x]\n", __func__, WPGBbar, index);
//--------------------------------------------------------------------
// READ - step 1
@@ -147,7 +147,7 @@ static u8 i2c_ctrl_read(struct controller *ctlr_ptr, void __iomem *WPGBbar, u8 i
ultemp = ultemp << 8;
data |= ultemp;
} else {
- err("this controller type is not supported \n");
+ err("this controller type is not supported\n");
return HPC_ERROR;
}
@@ -258,7 +258,7 @@ static u8 i2c_ctrl_write(struct controller *ctlr_ptr, void __iomem *WPGBbar, u8
ultemp = ultemp << 8;
data |= ultemp;
} else {
- err("this controller type is not supported \n");
+ err("this controller type is not supported\n");
return HPC_ERROR;
}
diff --git a/drivers/pci/hotplug/ibmphp_pci.c b/drivers/pci/hotplug/ibmphp_pci.c
index 754c3f23282e..eeb412cbd9fe 100644
--- a/drivers/pci/hotplug/ibmphp_pci.c
+++ b/drivers/pci/hotplug/ibmphp_pci.c
@@ -329,7 +329,7 @@ error:
static int configure_device(struct pci_func *func)
{
u32 bar[6];
- u32 address[] = {
+ static const u32 address[] = {
PCI_BASE_ADDRESS_0,
PCI_BASE_ADDRESS_1,
PCI_BASE_ADDRESS_2,
@@ -564,7 +564,7 @@ static int configure_bridge(struct pci_func **func_passed, u8 slotno)
struct resource_node *pfmem = NULL;
struct resource_node *bus_pfmem[2] = {NULL, NULL};
struct bus_node *bus;
- u32 address[] = {
+ static const u32 address[] = {
PCI_BASE_ADDRESS_0,
PCI_BASE_ADDRESS_1,
0
@@ -1053,7 +1053,7 @@ static struct res_needed *scan_behind_bridge(struct pci_func *func, u8 busno)
int howmany = 0; /*this is to see if there are any devices behind the bridge */
u32 bar[6], class;
- u32 address[] = {
+ static const u32 address[] = {
PCI_BASE_ADDRESS_0,
PCI_BASE_ADDRESS_1,
PCI_BASE_ADDRESS_2,
@@ -1087,7 +1087,7 @@ static struct res_needed *scan_behind_bridge(struct pci_func *func, u8 busno)
pci_bus_read_config_dword(ibmphp_pci_bus, devfn, PCI_CLASS_REVISION, &class);
debug("hdr_type behind the bridge is %x\n", hdr_type);
- if ((hdr_type & 0x7f) == PCI_HEADER_TYPE_BRIDGE) {
+ if ((hdr_type & PCI_HEADER_TYPE_MASK) == PCI_HEADER_TYPE_BRIDGE) {
err("embedded bridges not supported for hot-plugging.\n");
amount->not_correct = 1;
return amount;
@@ -1182,7 +1182,7 @@ static struct res_needed *scan_behind_bridge(struct pci_func *func, u8 busno)
static int unconfigure_boot_device(u8 busno, u8 device, u8 function)
{
u32 start_address;
- u32 address[] = {
+ static const u32 address[] = {
PCI_BASE_ADDRESS_0,
PCI_BASE_ADDRESS_1,
PCI_BASE_ADDRESS_2,
@@ -1310,7 +1310,7 @@ static int unconfigure_boot_bridge(u8 busno, u8 device, u8 function)
struct resource_node *mem = NULL;
struct resource_node *pfmem = NULL;
struct bus_node *bus;
- u32 address[] = {
+ static const u32 address[] = {
PCI_BASE_ADDRESS_0,
PCI_BASE_ADDRESS_1,
0
diff --git a/drivers/pci/hotplug/octep_hp.c b/drivers/pci/hotplug/octep_hp.c
new file mode 100644
index 000000000000..2bce7296c050
--- /dev/null
+++ b/drivers/pci/hotplug/octep_hp.c
@@ -0,0 +1,427 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2024 Marvell. */
+
+#include <linux/cleanup.h>
+#include <linux/container_of.h>
+#include <linux/delay.h>
+#include <linux/dev_printk.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/pci_hotplug.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#define OCTEP_HP_INTR_OFFSET(x) (0x20400 + ((x) << 4))
+#define OCTEP_HP_INTR_VECTOR(x) (16 + (x))
+#define OCTEP_HP_DRV_NAME "octep_hp"
+
+/*
+ * Type of MSI-X interrupts. OCTEP_HP_INTR_VECTOR() and
+ * OCTEP_HP_INTR_OFFSET() generate the vector and offset for an interrupt
+ * type.
+ */
+enum octep_hp_intr_type {
+ OCTEP_HP_INTR_INVALID = -1,
+ OCTEP_HP_INTR_ENA = 0,
+ OCTEP_HP_INTR_DIS = 1,
+ OCTEP_HP_INTR_MAX = 2,
+};
+
+struct octep_hp_cmd {
+ struct list_head list;
+ enum octep_hp_intr_type intr_type;
+ u64 intr_val;
+};
+
+struct octep_hp_slot {
+ struct list_head list;
+ struct hotplug_slot slot;
+ u16 slot_number;
+ struct pci_dev *hp_pdev;
+ unsigned int hp_devfn;
+ struct octep_hp_controller *ctrl;
+};
+
+struct octep_hp_intr_info {
+ enum octep_hp_intr_type type;
+ int number;
+ char name[16];
+};
+
+struct octep_hp_controller {
+ void __iomem *base;
+ struct pci_dev *pdev;
+ struct octep_hp_intr_info intr[OCTEP_HP_INTR_MAX];
+ struct work_struct work;
+ struct list_head slot_list;
+ struct mutex slot_lock; /* Protects slot_list */
+ struct list_head hp_cmd_list;
+ spinlock_t hp_cmd_lock; /* Protects hp_cmd_list */
+};
+
+static void octep_hp_enable_pdev(struct octep_hp_controller *hp_ctrl,
+ struct octep_hp_slot *hp_slot)
+{
+ guard(mutex)(&hp_ctrl->slot_lock);
+ if (hp_slot->hp_pdev) {
+ pci_dbg(hp_slot->hp_pdev, "Slot %s is already enabled\n",
+ hotplug_slot_name(&hp_slot->slot));
+ return;
+ }
+
+ /* Scan the device and add it to the bus */
+ hp_slot->hp_pdev = pci_scan_single_device(hp_ctrl->pdev->bus,
+ hp_slot->hp_devfn);
+ pci_bus_assign_resources(hp_ctrl->pdev->bus);
+ pci_bus_add_device(hp_slot->hp_pdev);
+
+ dev_dbg(&hp_slot->hp_pdev->dev, "Enabled slot %s\n",
+ hotplug_slot_name(&hp_slot->slot));
+}
+
+static void octep_hp_disable_pdev(struct octep_hp_controller *hp_ctrl,
+ struct octep_hp_slot *hp_slot)
+{
+ guard(mutex)(&hp_ctrl->slot_lock);
+ if (!hp_slot->hp_pdev) {
+ pci_dbg(hp_ctrl->pdev, "Slot %s is already disabled\n",
+ hotplug_slot_name(&hp_slot->slot));
+ return;
+ }
+
+ pci_dbg(hp_slot->hp_pdev, "Disabling slot %s\n",
+ hotplug_slot_name(&hp_slot->slot));
+
+ /* Remove the device from the bus */
+ pci_stop_and_remove_bus_device_locked(hp_slot->hp_pdev);
+ hp_slot->hp_pdev = NULL;
+}
+
+static int octep_hp_enable_slot(struct hotplug_slot *slot)
+{
+ struct octep_hp_slot *hp_slot =
+ container_of(slot, struct octep_hp_slot, slot);
+
+ octep_hp_enable_pdev(hp_slot->ctrl, hp_slot);
+ return 0;
+}
+
+static int octep_hp_disable_slot(struct hotplug_slot *slot)
+{
+ struct octep_hp_slot *hp_slot =
+ container_of(slot, struct octep_hp_slot, slot);
+
+ octep_hp_disable_pdev(hp_slot->ctrl, hp_slot);
+ return 0;
+}
+
+static struct hotplug_slot_ops octep_hp_slot_ops = {
+ .enable_slot = octep_hp_enable_slot,
+ .disable_slot = octep_hp_disable_slot,
+};
+
+#define SLOT_NAME_SIZE 16
+static struct octep_hp_slot *
+octep_hp_register_slot(struct octep_hp_controller *hp_ctrl,
+ struct pci_dev *pdev, u16 slot_number)
+{
+ char slot_name[SLOT_NAME_SIZE];
+ struct octep_hp_slot *hp_slot;
+ int ret;
+
+ hp_slot = kzalloc(sizeof(*hp_slot), GFP_KERNEL);
+ if (!hp_slot)
+ return ERR_PTR(-ENOMEM);
+
+ hp_slot->ctrl = hp_ctrl;
+ hp_slot->hp_pdev = pdev;
+ hp_slot->hp_devfn = pdev->devfn;
+ hp_slot->slot_number = slot_number;
+ hp_slot->slot.ops = &octep_hp_slot_ops;
+
+ snprintf(slot_name, sizeof(slot_name), "octep_hp_%u", slot_number);
+ ret = pci_hp_register(&hp_slot->slot, hp_ctrl->pdev->bus,
+ PCI_SLOT(pdev->devfn), slot_name);
+ if (ret) {
+ kfree(hp_slot);
+ return ERR_PTR(ret);
+ }
+
+ pci_info(pdev, "Registered slot %s for device %s\n",
+ slot_name, pci_name(pdev));
+
+ list_add_tail(&hp_slot->list, &hp_ctrl->slot_list);
+ octep_hp_disable_pdev(hp_ctrl, hp_slot);
+
+ return hp_slot;
+}
+
+static void octep_hp_deregister_slot(void *data)
+{
+ struct octep_hp_slot *hp_slot = data;
+ struct octep_hp_controller *hp_ctrl = hp_slot->ctrl;
+
+ pci_hp_deregister(&hp_slot->slot);
+ octep_hp_enable_pdev(hp_ctrl, hp_slot);
+ list_del(&hp_slot->list);
+ kfree(hp_slot);
+}
+
+static const char *octep_hp_cmd_name(enum octep_hp_intr_type type)
+{
+ switch (type) {
+ case OCTEP_HP_INTR_ENA:
+ return "hotplug enable";
+ case OCTEP_HP_INTR_DIS:
+ return "hotplug disable";
+ default:
+ return "invalid";
+ }
+}
+
+static void octep_hp_cmd_handler(struct octep_hp_controller *hp_ctrl,
+ struct octep_hp_cmd *hp_cmd)
+{
+ struct octep_hp_slot *hp_slot;
+
+ /*
+ * Enable or disable the slots based on the slot mask.
+ * intr_val is a bit mask where each bit represents a slot.
+ */
+ list_for_each_entry(hp_slot, &hp_ctrl->slot_list, list) {
+ if (!(hp_cmd->intr_val & BIT(hp_slot->slot_number)))
+ continue;
+
+ pci_info(hp_ctrl->pdev, "Received %s command for slot %s\n",
+ octep_hp_cmd_name(hp_cmd->intr_type),
+ hotplug_slot_name(&hp_slot->slot));
+
+ switch (hp_cmd->intr_type) {
+ case OCTEP_HP_INTR_ENA:
+ octep_hp_enable_pdev(hp_ctrl, hp_slot);
+ break;
+ case OCTEP_HP_INTR_DIS:
+ octep_hp_disable_pdev(hp_ctrl, hp_slot);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void octep_hp_work_handler(struct work_struct *work)
+{
+ struct octep_hp_controller *hp_ctrl;
+ struct octep_hp_cmd *hp_cmd;
+ unsigned long flags;
+
+ hp_ctrl = container_of(work, struct octep_hp_controller, work);
+
+ /* Process all the hotplug commands */
+ spin_lock_irqsave(&hp_ctrl->hp_cmd_lock, flags);
+ while (!list_empty(&hp_ctrl->hp_cmd_list)) {
+ hp_cmd = list_first_entry(&hp_ctrl->hp_cmd_list,
+ struct octep_hp_cmd, list);
+ list_del(&hp_cmd->list);
+ spin_unlock_irqrestore(&hp_ctrl->hp_cmd_lock, flags);
+
+ octep_hp_cmd_handler(hp_ctrl, hp_cmd);
+ kfree(hp_cmd);
+
+ spin_lock_irqsave(&hp_ctrl->hp_cmd_lock, flags);
+ }
+ spin_unlock_irqrestore(&hp_ctrl->hp_cmd_lock, flags);
+}
+
+static enum octep_hp_intr_type octep_hp_intr_type(struct octep_hp_intr_info *intr,
+ int irq)
+{
+ enum octep_hp_intr_type type;
+
+ for (type = OCTEP_HP_INTR_ENA; type < OCTEP_HP_INTR_MAX; type++) {
+ if (intr[type].number == irq)
+ return type;
+ }
+
+ return OCTEP_HP_INTR_INVALID;
+}
+
+static irqreturn_t octep_hp_intr_handler(int irq, void *data)
+{
+ struct octep_hp_controller *hp_ctrl = data;
+ struct pci_dev *pdev = hp_ctrl->pdev;
+ enum octep_hp_intr_type type;
+ struct octep_hp_cmd *hp_cmd;
+ u64 intr_val;
+
+ type = octep_hp_intr_type(hp_ctrl->intr, irq);
+ if (type == OCTEP_HP_INTR_INVALID) {
+ pci_err(pdev, "Invalid interrupt %d\n", irq);
+ return IRQ_HANDLED;
+ }
+
+ /* Read and clear the interrupt */
+ intr_val = readq(hp_ctrl->base + OCTEP_HP_INTR_OFFSET(type));
+ writeq(intr_val, hp_ctrl->base + OCTEP_HP_INTR_OFFSET(type));
+
+ hp_cmd = kzalloc(sizeof(*hp_cmd), GFP_ATOMIC);
+ if (!hp_cmd)
+ return IRQ_HANDLED;
+
+ hp_cmd->intr_val = intr_val;
+ hp_cmd->intr_type = type;
+
+ /* Add the command to the list and schedule the work */
+ spin_lock(&hp_ctrl->hp_cmd_lock);
+ list_add_tail(&hp_cmd->list, &hp_ctrl->hp_cmd_list);
+ spin_unlock(&hp_ctrl->hp_cmd_lock);
+ schedule_work(&hp_ctrl->work);
+
+ return IRQ_HANDLED;
+}
+
+static void octep_hp_irq_cleanup(void *data)
+{
+ struct octep_hp_controller *hp_ctrl = data;
+
+ pci_free_irq_vectors(hp_ctrl->pdev);
+ flush_work(&hp_ctrl->work);
+}
+
+static int octep_hp_request_irq(struct octep_hp_controller *hp_ctrl,
+ enum octep_hp_intr_type type)
+{
+ struct pci_dev *pdev = hp_ctrl->pdev;
+ struct octep_hp_intr_info *intr;
+ int irq;
+
+ irq = pci_irq_vector(pdev, OCTEP_HP_INTR_VECTOR(type));
+ if (irq < 0)
+ return irq;
+
+ intr = &hp_ctrl->intr[type];
+ intr->number = irq;
+ intr->type = type;
+ snprintf(intr->name, sizeof(intr->name), "octep_hp_%d", type);
+
+ return devm_request_irq(&pdev->dev, irq, octep_hp_intr_handler,
+ IRQF_SHARED, intr->name, hp_ctrl);
+}
+
+static int octep_hp_controller_setup(struct pci_dev *pdev,
+ struct octep_hp_controller *hp_ctrl)
+{
+ struct device *dev = &pdev->dev;
+ enum octep_hp_intr_type type;
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable PCI device\n");
+
+ hp_ctrl->base = pcim_iomap_region(pdev, 0, OCTEP_HP_DRV_NAME);
+ if (IS_ERR(hp_ctrl->base))
+ return dev_err_probe(dev, PTR_ERR(hp_ctrl->base),
+ "Failed to map PCI device region\n");
+
+ pci_set_master(pdev);
+ pci_set_drvdata(pdev, hp_ctrl);
+
+ INIT_LIST_HEAD(&hp_ctrl->slot_list);
+ INIT_LIST_HEAD(&hp_ctrl->hp_cmd_list);
+ mutex_init(&hp_ctrl->slot_lock);
+ spin_lock_init(&hp_ctrl->hp_cmd_lock);
+ INIT_WORK(&hp_ctrl->work, octep_hp_work_handler);
+ hp_ctrl->pdev = pdev;
+
+ ret = pci_alloc_irq_vectors(pdev, 1,
+ OCTEP_HP_INTR_VECTOR(OCTEP_HP_INTR_MAX),
+ PCI_IRQ_MSIX);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to alloc MSI-X vectors\n");
+
+ ret = devm_add_action(&pdev->dev, octep_hp_irq_cleanup, hp_ctrl);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Failed to add IRQ cleanup action\n");
+
+ for (type = OCTEP_HP_INTR_ENA; type < OCTEP_HP_INTR_MAX; type++) {
+ ret = octep_hp_request_irq(hp_ctrl, type);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to request IRQ for vector %d\n",
+ OCTEP_HP_INTR_VECTOR(type));
+ }
+
+ return 0;
+}
+
+static int octep_hp_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct octep_hp_controller *hp_ctrl;
+ struct pci_dev *tmp_pdev, *next;
+ struct octep_hp_slot *hp_slot;
+ u16 slot_number = 0;
+ int ret;
+
+ hp_ctrl = devm_kzalloc(&pdev->dev, sizeof(*hp_ctrl), GFP_KERNEL);
+ if (!hp_ctrl)
+ return -ENOMEM;
+
+ ret = octep_hp_controller_setup(pdev, hp_ctrl);
+ if (ret)
+ return ret;
+
+ /*
+ * Register all hotplug slots. Hotplug controller is the first function
+ * of the PCI device. The hotplug slots are the remaining functions of
+ * the PCI device. The hotplug slot functions are logically removed from
+ * the bus during probing and are re-enabled by the driver when a
+ * hotplug event is received.
+ */
+ list_for_each_entry_safe(tmp_pdev, next, &pdev->bus->devices, bus_list) {
+ if (tmp_pdev == pdev)
+ continue;
+
+ hp_slot = octep_hp_register_slot(hp_ctrl, tmp_pdev, slot_number);
+ if (IS_ERR(hp_slot))
+ return dev_err_probe(&pdev->dev, PTR_ERR(hp_slot),
+ "Failed to register hotplug slot %u\n",
+ slot_number);
+
+ ret = devm_add_action(&pdev->dev, octep_hp_deregister_slot,
+ hp_slot);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to add action for deregistering slot %u\n",
+ slot_number);
+ slot_number++;
+ }
+
+ return 0;
+}
+
+#define PCI_DEVICE_ID_CAVIUM_OCTEP_HP_CTLR 0xa0e3
+static struct pci_device_id octep_hp_pci_map[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_CAVIUM_OCTEP_HP_CTLR) },
+ { },
+};
+
+static struct pci_driver octep_hp = {
+ .name = OCTEP_HP_DRV_NAME,
+ .id_table = octep_hp_pci_map,
+ .probe = octep_hp_pci_probe,
+};
+
+module_pci_driver(octep_hp);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Marvell");
+MODULE_DESCRIPTION("Marvell OCTEON PCI Hotplug driver");
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index 058d5937d8a9..fadcf98a8a66 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -14,21 +14,15 @@
* Scott Murray <scottm@somanetworks.com>
*/
-#include <linux/module.h> /* try_module_get & module_put */
+#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/types.h>
-#include <linux/list.h>
#include <linux/kobject.h>
#include <linux/sysfs.h>
-#include <linux/pagemap.h>
#include <linux/init.h>
-#include <linux/mount.h>
-#include <linux/namei.h>
-#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
-#include <linux/uaccess.h>
#include "../pci.h"
#include "cpci_hotplug.h"
@@ -42,20 +36,14 @@
/* local variables */
static bool debug;
-static LIST_HEAD(pci_hotplug_slot_list);
-static DEFINE_MUTEX(pci_hp_mutex);
-
/* Weee, fun with macros... */
#define GET_STATUS(name, type) \
static int get_##name(struct hotplug_slot *slot, type *value) \
{ \
const struct hotplug_slot_ops *ops = slot->ops; \
int retval = 0; \
- if (!try_module_get(slot->owner)) \
- return -ENODEV; \
if (ops->get_##name) \
retval = ops->get_##name(slot, value); \
- module_put(slot->owner); \
return retval; \
}
@@ -88,10 +76,6 @@ static ssize_t power_write_file(struct pci_slot *pci_slot, const char *buf,
power = (u8)(lpower & 0xff);
dbg("power = %d\n", power);
- if (!try_module_get(slot->owner)) {
- retval = -ENODEV;
- goto exit;
- }
switch (power) {
case 0:
if (slot->ops->disable_slot)
@@ -107,9 +91,7 @@ static ssize_t power_write_file(struct pci_slot *pci_slot, const char *buf,
err("Illegal value specified for power\n");
retval = -EINVAL;
}
- module_put(slot->owner);
-exit:
if (retval)
return retval;
return count;
@@ -146,15 +128,9 @@ static ssize_t attention_write_file(struct pci_slot *pci_slot, const char *buf,
attention = (u8)(lattention & 0xff);
dbg(" - attention = %d\n", attention);
- if (!try_module_get(slot->owner)) {
- retval = -ENODEV;
- goto exit;
- }
if (ops->set_attention_status)
retval = ops->set_attention_status(slot, attention);
- module_put(slot->owner);
-exit:
if (retval)
return retval;
return count;
@@ -212,15 +188,9 @@ static ssize_t test_write_file(struct pci_slot *pci_slot, const char *buf,
test = (u32)(ltest & 0xffffffff);
dbg("test = %d\n", test);
- if (!try_module_get(slot->owner)) {
- retval = -ENODEV;
- goto exit;
- }
if (slot->ops->hardware_test)
retval = slot->ops->hardware_test(slot, test);
- module_put(slot->owner);
-exit:
if (retval)
return retval;
return count;
@@ -231,12 +201,8 @@ static struct pci_slot_attribute hotplug_slot_attr_test = {
.store = test_write_file
};
-static bool has_power_file(struct pci_slot *pci_slot)
+static bool has_power_file(struct hotplug_slot *slot)
{
- struct hotplug_slot *slot = pci_slot->hotplug;
-
- if ((!slot) || (!slot->ops))
- return false;
if ((slot->ops->enable_slot) ||
(slot->ops->disable_slot) ||
(slot->ops->get_power_status))
@@ -244,87 +210,79 @@ static bool has_power_file(struct pci_slot *pci_slot)
return false;
}
-static bool has_attention_file(struct pci_slot *pci_slot)
+static bool has_attention_file(struct hotplug_slot *slot)
{
- struct hotplug_slot *slot = pci_slot->hotplug;
-
- if ((!slot) || (!slot->ops))
- return false;
if ((slot->ops->set_attention_status) ||
(slot->ops->get_attention_status))
return true;
return false;
}
-static bool has_latch_file(struct pci_slot *pci_slot)
+static bool has_latch_file(struct hotplug_slot *slot)
{
- struct hotplug_slot *slot = pci_slot->hotplug;
-
- if ((!slot) || (!slot->ops))
- return false;
if (slot->ops->get_latch_status)
return true;
return false;
}
-static bool has_adapter_file(struct pci_slot *pci_slot)
+static bool has_adapter_file(struct hotplug_slot *slot)
{
- struct hotplug_slot *slot = pci_slot->hotplug;
-
- if ((!slot) || (!slot->ops))
- return false;
if (slot->ops->get_adapter_status)
return true;
return false;
}
-static bool has_test_file(struct pci_slot *pci_slot)
+static bool has_test_file(struct hotplug_slot *slot)
{
- struct hotplug_slot *slot = pci_slot->hotplug;
-
- if ((!slot) || (!slot->ops))
- return false;
if (slot->ops->hardware_test)
return true;
return false;
}
-static int fs_add_slot(struct pci_slot *pci_slot)
+static int fs_add_slot(struct hotplug_slot *slot, struct pci_slot *pci_slot)
{
+ struct kobject *kobj;
int retval = 0;
/* Create symbolic link to the hotplug driver module */
- pci_hp_create_module_link(pci_slot);
+ kobj = kset_find_obj(module_kset, slot->mod_name);
+ if (kobj) {
+ retval = sysfs_create_link(&pci_slot->kobj, kobj, "module");
+ if (retval)
+ dev_err(&pci_slot->bus->dev,
+ "Error creating sysfs link (%d)\n", retval);
+ kobject_put(kobj);
+ }
- if (has_power_file(pci_slot)) {
+ if (has_power_file(slot)) {
retval = sysfs_create_file(&pci_slot->kobj,
&hotplug_slot_attr_power.attr);
if (retval)
goto exit_power;
}
- if (has_attention_file(pci_slot)) {
+ if (has_attention_file(slot)) {
retval = sysfs_create_file(&pci_slot->kobj,
&hotplug_slot_attr_attention.attr);
if (retval)
goto exit_attention;
}
- if (has_latch_file(pci_slot)) {
+ if (has_latch_file(slot)) {
retval = sysfs_create_file(&pci_slot->kobj,
&hotplug_slot_attr_latch.attr);
if (retval)
goto exit_latch;
}
- if (has_adapter_file(pci_slot)) {
+ if (has_adapter_file(slot)) {
retval = sysfs_create_file(&pci_slot->kobj,
&hotplug_slot_attr_presence.attr);
if (retval)
goto exit_adapter;
}
- if (has_test_file(pci_slot)) {
+ if (has_test_file(slot)) {
retval = sysfs_create_file(&pci_slot->kobj,
&hotplug_slot_attr_test.attr);
if (retval)
@@ -334,62 +292,51 @@ static int fs_add_slot(struct pci_slot *pci_slot)
goto exit;
exit_test:
- if (has_adapter_file(pci_slot))
+ if (has_adapter_file(slot))
sysfs_remove_file(&pci_slot->kobj,
&hotplug_slot_attr_presence.attr);
exit_adapter:
- if (has_latch_file(pci_slot))
+ if (has_latch_file(slot))
sysfs_remove_file(&pci_slot->kobj, &hotplug_slot_attr_latch.attr);
exit_latch:
- if (has_attention_file(pci_slot))
+ if (has_attention_file(slot))
sysfs_remove_file(&pci_slot->kobj,
&hotplug_slot_attr_attention.attr);
exit_attention:
- if (has_power_file(pci_slot))
+ if (has_power_file(slot))
sysfs_remove_file(&pci_slot->kobj, &hotplug_slot_attr_power.attr);
exit_power:
- pci_hp_remove_module_link(pci_slot);
+ sysfs_remove_link(&pci_slot->kobj, "module");
exit:
return retval;
}
-static void fs_remove_slot(struct pci_slot *pci_slot)
+static void fs_remove_slot(struct hotplug_slot *slot, struct pci_slot *pci_slot)
{
- if (has_power_file(pci_slot))
+ if (has_power_file(slot))
sysfs_remove_file(&pci_slot->kobj, &hotplug_slot_attr_power.attr);
- if (has_attention_file(pci_slot))
+ if (has_attention_file(slot))
sysfs_remove_file(&pci_slot->kobj,
&hotplug_slot_attr_attention.attr);
- if (has_latch_file(pci_slot))
+ if (has_latch_file(slot))
sysfs_remove_file(&pci_slot->kobj, &hotplug_slot_attr_latch.attr);
- if (has_adapter_file(pci_slot))
+ if (has_adapter_file(slot))
sysfs_remove_file(&pci_slot->kobj,
&hotplug_slot_attr_presence.attr);
- if (has_test_file(pci_slot))
+ if (has_test_file(slot))
sysfs_remove_file(&pci_slot->kobj, &hotplug_slot_attr_test.attr);
- pci_hp_remove_module_link(pci_slot);
-}
-
-static struct hotplug_slot *get_slot_from_name(const char *name)
-{
- struct hotplug_slot *slot;
-
- list_for_each_entry(slot, &pci_hotplug_slot_list, slot_list) {
- if (strcmp(hotplug_slot_name(slot), name) == 0)
- return slot;
- }
- return NULL;
+ sysfs_remove_link(&pci_slot->kobj, "module");
}
/**
* __pci_hp_register - register a hotplug_slot with the PCI hotplug subsystem
- * @bus: bus this slot is on
* @slot: pointer to the &struct hotplug_slot to register
+ * @bus: bus this slot is on
* @devnr: device number
* @name: name registered with kobject core
* @owner: caller module owner
@@ -476,18 +423,19 @@ EXPORT_SYMBOL_GPL(__pci_hp_initialize);
*/
int pci_hp_add(struct hotplug_slot *slot)
{
- struct pci_slot *pci_slot = slot->pci_slot;
+ struct pci_slot *pci_slot;
int result;
- result = fs_add_slot(pci_slot);
+ if (WARN_ON(!slot))
+ return -EINVAL;
+
+ pci_slot = slot->pci_slot;
+
+ result = fs_add_slot(slot, pci_slot);
if (result)
return result;
kobject_uevent(&pci_slot->kobj, KOBJ_ADD);
- mutex_lock(&pci_hp_mutex);
- list_add(&slot->slot_list, &pci_hotplug_slot_list);
- mutex_unlock(&pci_hp_mutex);
- dbg("Added slot %s to the list\n", hotplug_slot_name(slot));
return 0;
}
EXPORT_SYMBOL_GPL(pci_hp_add);
@@ -498,8 +446,6 @@ EXPORT_SYMBOL_GPL(pci_hp_add);
*
* The @slot must have been registered with the pci hotplug subsystem
* previously with a call to pci_hp_register().
- *
- * Returns 0 if successful, anything else for an error.
*/
void pci_hp_deregister(struct hotplug_slot *slot)
{
@@ -513,27 +459,13 @@ EXPORT_SYMBOL_GPL(pci_hp_deregister);
* @slot: pointer to the &struct hotplug_slot to unpublish
*
* Remove a hotplug slot's sysfs interface.
- *
- * Returns 0 on success or a negative int on error.
*/
void pci_hp_del(struct hotplug_slot *slot)
{
- struct hotplug_slot *temp;
-
if (WARN_ON(!slot))
return;
- mutex_lock(&pci_hp_mutex);
- temp = get_slot_from_name(hotplug_slot_name(slot));
- if (WARN_ON(temp != slot)) {
- mutex_unlock(&pci_hp_mutex);
- return;
- }
-
- list_del(&slot->slot_list);
- mutex_unlock(&pci_hp_mutex);
- dbg("Removed slot %s from the list\n", hotplug_slot_name(slot));
- fs_remove_slot(slot->pci_slot);
+ fs_remove_slot(slot, slot->pci_slot);
}
EXPORT_SYMBOL_GPL(pci_hp_del);
@@ -545,8 +477,6 @@ EXPORT_SYMBOL_GPL(pci_hp_del);
* the driver may no longer invoke hotplug_slot_name() to get the slot's
* unique name. The driver no longer needs to handle a ->reset_slot callback
* from this point on.
- *
- * Returns 0 on success or a negative int on error.
*/
void pci_hp_destroy(struct hotplug_slot *slot)
{
@@ -558,6 +488,75 @@ void pci_hp_destroy(struct hotplug_slot *slot)
}
EXPORT_SYMBOL_GPL(pci_hp_destroy);
+static DECLARE_WAIT_QUEUE_HEAD(pci_hp_link_change_wq);
+
+/**
+ * pci_hp_ignore_link_change - begin code section causing spurious link changes
+ * @pdev: PCI hotplug bridge
+ *
+ * Mark the beginning of a code section causing spurious link changes on the
+ * Secondary Bus of @pdev, e.g. as a side effect of a Secondary Bus Reset,
+ * D3cold transition, firmware update or FPGA reconfiguration.
+ *
+ * Hotplug drivers can thus check whether such a code section is executing
+ * concurrently, await it with pci_hp_spurious_link_change() and ignore the
+ * resulting link change events.
+ *
+ * Must be paired with pci_hp_unignore_link_change(). May be called both
+ * from the PCI core and from Endpoint drivers. May be called for bridges
+ * which are not hotplug-capable, in which case it has no effect because
+ * no hotplug driver is bound to the bridge.
+ */
+void pci_hp_ignore_link_change(struct pci_dev *pdev)
+{
+ set_bit(PCI_LINK_CHANGING, &pdev->priv_flags);
+ smp_mb__after_atomic(); /* pairs with implied barrier of wait_event() */
+}
+
+/**
+ * pci_hp_unignore_link_change - end code section causing spurious link changes
+ * @pdev: PCI hotplug bridge
+ *
+ * Mark the end of a code section causing spurious link changes on the
+ * Secondary Bus of @pdev. Must be paired with pci_hp_ignore_link_change().
+ */
+void pci_hp_unignore_link_change(struct pci_dev *pdev)
+{
+ set_bit(PCI_LINK_CHANGED, &pdev->priv_flags);
+ mb(); /* ensure pci_hp_spurious_link_change() sees either bit set */
+ clear_bit(PCI_LINK_CHANGING, &pdev->priv_flags);
+ wake_up_all(&pci_hp_link_change_wq);
+}
+
+/**
+ * pci_hp_spurious_link_change - check for spurious link changes
+ * @pdev: PCI hotplug bridge
+ *
+ * Check whether a code section is executing concurrently which is causing
+ * spurious link changes on the Secondary Bus of @pdev. Await the end of the
+ * code section if so.
+ *
+ * May be called by hotplug drivers to check whether a link change is spurious
+ * and can be ignored.
+ *
+ * Because a genuine link change may have occurred in-between a spurious link
+ * change and the invocation of this function, hotplug drivers should perform
+ * sanity checks such as retrieving the current link state and bringing down
+ * the slot if the link is down.
+ *
+ * Return: %true if such a code section has been executing concurrently,
+ * otherwise %false. Also return %true if such a code section has not been
+ * executing concurrently, but at least once since the last invocation of this
+ * function.
+ */
+bool pci_hp_spurious_link_change(struct pci_dev *pdev)
+{
+ wait_event(pci_hp_link_change_wq,
+ !test_bit(PCI_LINK_CHANGING, &pdev->priv_flags));
+
+ return test_and_clear_bit(PCI_LINK_CHANGED, &pdev->priv_flags);
+}
+
static int __init pci_hotplug_init(void)
{
int result;
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index e0a614acee05..debc79b0adfb 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -46,6 +46,9 @@ extern int pciehp_poll_time;
/**
* struct controller - PCIe hotplug controller
* @pcie: pointer to the controller's PCIe port service device
+ * @dsn: cached copy of Device Serial Number of Function 0 in the hotplug slot
+ * (PCIe r6.2 sec 7.9.3); used to determine whether a hotplugged device
+ * was replaced with a different one during system sleep
* @slot_cap: cached copy of the Slot Capabilities register
* @inband_presence_disabled: In-Band Presence Detect Disable supported by
* controller and disabled per spec recommendation (PCIe r5.0, appendix I
@@ -87,6 +90,7 @@ extern int pciehp_poll_time;
*/
struct controller {
struct pcie_device *pcie;
+ u64 dsn;
u32 slot_cap; /* capabilities and quirks */
unsigned int inband_presence_disabled:1;
@@ -183,6 +187,7 @@ int pciehp_card_present(struct controller *ctrl);
int pciehp_card_present_or_link_active(struct controller *ctrl);
int pciehp_check_link_status(struct controller *ctrl);
int pciehp_check_link_active(struct controller *ctrl);
+bool pciehp_device_replaced(struct controller *ctrl);
void pciehp_release_ctrl(struct controller *ctrl);
int pciehp_sysfs_enable_slot(struct hotplug_slot *hotplug_slot);
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 4042d87d539d..f59baa912970 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -20,6 +20,7 @@
#define pr_fmt(fmt) "pciehp: " fmt
#define dev_fmt pr_fmt
+#include <linux/bitfield.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -103,7 +104,7 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
struct pci_dev *pdev = ctrl->pcie->port;
if (status)
- status <<= PCI_EXP_SLTCTL_ATTN_IND_SHIFT;
+ status = FIELD_PREP(PCI_EXP_SLTCTL_AIC, status);
else
status = PCI_EXP_SLTCTL_ATTN_IND_OFF;
@@ -292,9 +293,23 @@ static int pciehp_resume_noirq(struct pcie_device *dev)
ctrl->cmd_busy = true;
/* clear spurious events from rediscovery of inserted card */
- if (ctrl->state == ON_STATE || ctrl->state == BLINKINGOFF_STATE)
+ if (ctrl->state == ON_STATE || ctrl->state == BLINKINGOFF_STATE) {
pcie_clear_hotplug_events(ctrl);
+ /*
+ * If hotplugged device was replaced with a different one
+ * during system sleep, mark the old device disconnected
+ * (to prevent its driver from accessing the new device)
+ * and synthesize a Presence Detect Changed event.
+ */
+ if (pciehp_device_replaced(ctrl)) {
+ ctrl_dbg(ctrl, "device replaced during system sleep\n");
+ pci_walk_bus(ctrl->pcie->port->subordinate,
+ pci_dev_set_disconnected, NULL);
+ pciehp_request(ctrl, PCI_EXP_SLTSTA_PDC);
+ }
+ }
+
return 0;
}
#endif
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index dcdbfcf404dd..bcc938d4420f 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -19,6 +19,8 @@
#include <linux/types.h>
#include <linux/pm_runtime.h>
#include <linux/pci.h>
+
+#include "../pci.h"
#include "pciehp.h"
/* The following routines constitute the bulk of the
@@ -127,6 +129,9 @@ static void remove_board(struct controller *ctrl, bool safe_removal)
pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF,
INDICATOR_NOOP);
+
+ /* Don't carry LBMS indications across */
+ pcie_reset_lbms(ctrl->pcie->port);
}
static int pciehp_enable_slot(struct controller *ctrl);
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 8711325605f0..bcc51b26d03d 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -14,6 +14,7 @@
#define dev_fmt(fmt) "pciehp: " fmt
+#include <linux/bitfield.h>
#include <linux/dmi.h>
#include <linux/kernel.h>
#include <linux/types.h>
@@ -291,7 +292,7 @@ int pciehp_check_link_status(struct controller *ctrl)
{
struct pci_dev *pdev = ctrl_dev(ctrl);
bool found;
- u16 lnk_status;
+ u16 lnk_status, linksta2;
if (!pcie_wait_for_link(pdev, true)) {
ctrl_info(ctrl, "Slot(%s): No link\n", slot_name(ctrl));
@@ -318,7 +319,8 @@ int pciehp_check_link_status(struct controller *ctrl)
return -1;
}
- pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status);
+ pcie_capability_read_word(pdev, PCI_EXP_LNKSTA2, &linksta2);
+ __pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status, linksta2);
if (!found) {
ctrl_info(ctrl, "Slot(%s): No device found\n",
@@ -332,17 +334,11 @@ int pciehp_check_link_status(struct controller *ctrl)
static int __pciehp_link_set(struct controller *ctrl, bool enable)
{
struct pci_dev *pdev = ctrl_dev(ctrl);
- u16 lnk_ctrl;
-
- pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &lnk_ctrl);
- if (enable)
- lnk_ctrl &= ~PCI_EXP_LNKCTL_LD;
- else
- lnk_ctrl |= PCI_EXP_LNKCTL_LD;
+ pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_LD,
+ enable ? 0 : PCI_EXP_LNKCTL_LD);
- pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, lnk_ctrl);
- ctrl_dbg(ctrl, "%s: lnk_ctrl = %x\n", __func__, lnk_ctrl);
return 0;
}
@@ -435,7 +431,7 @@ void pciehp_get_latch_status(struct controller *ctrl, u8 *status)
* removed immediately after the check so the caller may need to take
* this into account.
*
- * It the hotplug controller itself is not available anymore returns
+ * If the hotplug controller itself is not available anymore returns
* %-ENODEV.
*/
int pciehp_card_present(struct controller *ctrl)
@@ -490,7 +486,9 @@ int pciehp_set_raw_indicator_status(struct hotplug_slot *hotplug_slot,
struct pci_dev *pdev = ctrl_dev(ctrl);
pci_config_pm_runtime_get(pdev);
- pcie_write_cmd_nowait(ctrl, status << 6,
+
+ /* Attention and Power Indicator Control bits are supported */
+ pcie_write_cmd_nowait(ctrl, FIELD_PREP(PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_PIC, status),
PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_PIC);
pci_config_pm_runtime_put(pdev);
return 0;
@@ -565,20 +563,50 @@ void pciehp_power_off_slot(struct controller *ctrl)
PCI_EXP_SLTCTL_PWR_OFF);
}
-static void pciehp_ignore_dpc_link_change(struct controller *ctrl,
- struct pci_dev *pdev, int irq)
+bool pciehp_device_replaced(struct controller *ctrl)
+{
+ struct pci_dev *pdev __free(pci_dev_put) = NULL;
+ u32 reg;
+
+ if (pci_dev_is_disconnected(ctrl->pcie->port))
+ return false;
+
+ pdev = pci_get_slot(ctrl->pcie->port->subordinate, PCI_DEVFN(0, 0));
+ if (!pdev)
+ return true;
+
+ if (pci_read_config_dword(pdev, PCI_VENDOR_ID, &reg) ||
+ reg != (pdev->vendor | (pdev->device << 16)) ||
+ pci_read_config_dword(pdev, PCI_CLASS_REVISION, &reg) ||
+ reg != (pdev->revision | (pdev->class << 8)))
+ return true;
+
+ if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
+ (pci_read_config_dword(pdev, PCI_SUBSYSTEM_VENDOR_ID, &reg) ||
+ reg != (pdev->subsystem_vendor | (pdev->subsystem_device << 16))))
+ return true;
+
+ if (pci_get_dsn(pdev) != ctrl->dsn)
+ return true;
+
+ return false;
+}
+
+static void pciehp_ignore_link_change(struct controller *ctrl,
+ struct pci_dev *pdev, int irq,
+ u16 ignored_events)
{
/*
* Ignore link changes which occurred while waiting for DPC recovery.
* Could be several if DPC triggered multiple times consecutively.
+ * Also ignore link changes caused by Secondary Bus Reset, etc.
*/
synchronize_hardirq(irq);
- atomic_and(~PCI_EXP_SLTSTA_DLLSC, &ctrl->pending_events);
+ atomic_and(~ignored_events, &ctrl->pending_events);
if (pciehp_poll_mode)
pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
- PCI_EXP_SLTSTA_DLLSC);
- ctrl_info(ctrl, "Slot(%s): Link Down/Up ignored (recovered by DPC)\n",
- slot_name(ctrl));
+ ignored_events);
+ ctrl_info(ctrl, "Slot(%s): Link Down/Up ignored\n", slot_name(ctrl));
/*
* If the link is unexpectedly down after successful recovery,
@@ -586,8 +614,8 @@ static void pciehp_ignore_dpc_link_change(struct controller *ctrl,
* Synthesize it to ensure that it is acted on.
*/
down_read_nested(&ctrl->reset_lock, ctrl->depth);
- if (!pciehp_check_link_active(ctrl))
- pciehp_request(ctrl, PCI_EXP_SLTSTA_DLLSC);
+ if (!pciehp_check_link_active(ctrl) || pciehp_device_replaced(ctrl))
+ pciehp_request(ctrl, ignored_events);
up_read(&ctrl->reset_lock);
}
@@ -734,12 +762,19 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
/*
* Ignore Link Down/Up events caused by Downstream Port Containment
- * if recovery from the error succeeded.
+ * if recovery succeeded, or caused by Secondary Bus Reset,
+ * suspend to D3cold, firmware update, FPGA reconfiguration, etc.
*/
- if ((events & PCI_EXP_SLTSTA_DLLSC) && pci_dpc_recovered(pdev) &&
+ if ((events & (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC)) &&
+ (pci_dpc_recovered(pdev) || pci_hp_spurious_link_change(pdev)) &&
ctrl->state == ON_STATE) {
- events &= ~PCI_EXP_SLTSTA_DLLSC;
- pciehp_ignore_dpc_link_change(ctrl, pdev, irq);
+ u16 ignored_events = PCI_EXP_SLTSTA_DLLSC;
+
+ if (!ctrl->inband_presence_disabled)
+ ignored_events |= PCI_EXP_SLTSTA_PDC;
+
+ events &= ~ignored_events;
+ pciehp_ignore_link_change(ctrl, pdev, irq, ignored_events);
}
/*
@@ -845,7 +880,9 @@ void pcie_enable_interrupt(struct controller *ctrl)
{
u16 mask;
- mask = PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_DLLSCE;
+ mask = PCI_EXP_SLTCTL_DLLSCE;
+ if (!pciehp_poll_mode)
+ mask |= PCI_EXP_SLTCTL_HPIE;
pcie_write_cmd(ctrl, mask, mask);
}
@@ -902,7 +939,6 @@ int pciehp_reset_slot(struct hotplug_slot *hotplug_slot, bool probe)
{
struct controller *ctrl = to_ctrl(hotplug_slot);
struct pci_dev *pdev = ctrl_dev(ctrl);
- u16 stat_mask = 0, ctrl_mask = 0;
int rc;
if (probe)
@@ -910,23 +946,11 @@ int pciehp_reset_slot(struct hotplug_slot *hotplug_slot, bool probe)
down_write_nested(&ctrl->reset_lock, ctrl->depth);
- if (!ATTN_BUTTN(ctrl)) {
- ctrl_mask |= PCI_EXP_SLTCTL_PDCE;
- stat_mask |= PCI_EXP_SLTSTA_PDC;
- }
- ctrl_mask |= PCI_EXP_SLTCTL_DLLSCE;
- stat_mask |= PCI_EXP_SLTSTA_DLLSC;
-
- pcie_write_cmd(ctrl, 0, ctrl_mask);
- ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
- pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, 0);
+ pci_hp_ignore_link_change(pdev);
rc = pci_bridge_secondary_bus_reset(ctrl->pcie->port);
- pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, stat_mask);
- pcie_write_cmd_nowait(ctrl, ctrl_mask, ctrl_mask);
- ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
- pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, ctrl_mask);
+ pci_hp_unignore_link_change(pdev);
up_write(&ctrl->reset_lock);
return rc;
@@ -971,7 +995,7 @@ static inline int pcie_hotplug_depth(struct pci_dev *dev)
while (bus->parent) {
bus = bus->parent;
- if (bus->self && bus->self->is_hotplug_bridge)
+ if (bus->self && bus->self->is_pciehp)
depth++;
}
@@ -1034,7 +1058,7 @@ struct controller *pcie_init(struct pcie_device *dev)
PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC);
ctrl_info(ctrl, "Slot #%d AttnBtn%c PwrCtrl%c MRL%c AttnInd%c PwrInd%c HotPlug%c Surprise%c Interlock%c NoCompl%c IbPresDis%c LLActRep%c%s\n",
- (slot_cap & PCI_EXP_SLTCAP_PSN) >> 19,
+ FIELD_GET(PCI_EXP_SLTCAP_PSN, slot_cap),
FLAG(slot_cap, PCI_EXP_SLTCAP_ABP),
FLAG(slot_cap, PCI_EXP_SLTCAP_PCP),
FLAG(slot_cap, PCI_EXP_SLTCAP_MRLSP),
@@ -1060,6 +1084,11 @@ struct controller *pcie_init(struct pcie_device *dev)
}
}
+ pdev = pci_get_slot(subordinate, PCI_DEVFN(0, 0));
+ if (pdev)
+ ctrl->dsn = pci_get_dsn(pdev);
+ pci_dev_put(pdev);
+
return ctrl;
}
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index ad12515a4a12..65e50bee1a8c 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -72,6 +72,10 @@ int pciehp_configure_device(struct controller *ctrl)
pci_bus_add_devices(parent);
down_read_nested(&ctrl->reset_lock, ctrl->depth);
+ dev = pci_get_slot(parent, PCI_DEVFN(0, 0));
+ ctrl->dsn = pci_get_dsn(dev);
+ pci_dev_put(dev);
+
out:
pci_unlock_rescan_remove();
return ret;
diff --git a/drivers/pci/hotplug/pnv_php.c b/drivers/pci/hotplug/pnv_php.c
index 881d420637bf..c5345bff9a55 100644
--- a/drivers/pci/hotplug/pnv_php.c
+++ b/drivers/pci/hotplug/pnv_php.c
@@ -3,11 +3,15 @@
* PCI Hotplug Driver for PowerPC PowerNV platform.
*
* Copyright Gavin Shan, IBM Corporation 2016.
+ * Copyright (C) 2025 Raptor Engineering, LLC
+ * Copyright (C) 2025 Raptor Computing Systems, LLC
*/
+#include <linux/bitfield.h>
#include <linux/libfdt.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/delay.h>
#include <linux/pci_hotplug.h>
#include <linux/of_fdt.h>
@@ -35,11 +39,12 @@ static void pnv_php_register(struct device_node *dn);
static void pnv_php_unregister_one(struct device_node *dn);
static void pnv_php_unregister(struct device_node *dn);
+static void pnv_php_enable_irq(struct pnv_php_slot *php_slot);
+
static void pnv_php_disable_irq(struct pnv_php_slot *php_slot,
- bool disable_device)
+ bool disable_device, bool disable_msi)
{
struct pci_dev *pdev = php_slot->pdev;
- int irq = php_slot->irq;
u16 ctrl;
if (php_slot->irq > 0) {
@@ -53,19 +58,15 @@ static void pnv_php_disable_irq(struct pnv_php_slot *php_slot,
php_slot->irq = 0;
}
- if (php_slot->wq) {
- destroy_workqueue(php_slot->wq);
- php_slot->wq = NULL;
- }
-
- if (disable_device || irq > 0) {
+ if (disable_device || disable_msi) {
if (pdev->msix_enabled)
pci_disable_msix(pdev);
else if (pdev->msi_enabled)
pci_disable_msi(pdev);
+ }
+ if (disable_device)
pci_disable_device(pdev);
- }
}
static void pnv_php_free_slot(struct kref *kref)
@@ -74,7 +75,8 @@ static void pnv_php_free_slot(struct kref *kref)
struct pnv_php_slot, kref);
WARN_ON(!list_empty(&php_slot->children));
- pnv_php_disable_irq(php_slot, false);
+ pnv_php_disable_irq(php_slot, false, false);
+ destroy_workqueue(php_slot->wq);
kfree(php_slot->name);
kfree(php_slot);
}
@@ -391,6 +393,20 @@ static int pnv_php_get_power_state(struct hotplug_slot *slot, u8 *state)
return 0;
}
+static int pcie_check_link_active(struct pci_dev *pdev)
+{
+ u16 lnk_status;
+ int ret;
+
+ ret = pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
+ if (ret == PCIBIOS_DEVICE_NOT_FOUND || PCI_POSSIBLE_ERROR(lnk_status))
+ return -ENODEV;
+
+ ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
+
+ return ret;
+}
+
static int pnv_php_get_adapter_state(struct hotplug_slot *slot, u8 *state)
{
struct pnv_php_slot *php_slot = to_pnv_php_slot(slot);
@@ -403,6 +419,19 @@ static int pnv_php_get_adapter_state(struct hotplug_slot *slot, u8 *state)
*/
ret = pnv_pci_get_presence_state(php_slot->id, &presence);
if (ret >= 0) {
+ if (pci_pcie_type(php_slot->pdev) == PCI_EXP_TYPE_DOWNSTREAM &&
+ presence == OPAL_PCI_SLOT_EMPTY) {
+ /*
+ * Similar to pciehp_hpc, check whether the Link Active
+ * bit is set to account for broken downstream bridges
+ * that don't properly assert Presence Detect State, as
+ * was observed on the Microsemi Switchtec PM8533 PFX
+ * [11f8:8533].
+ */
+ if (pcie_check_link_active(php_slot->pdev) > 0)
+ presence = OPAL_PCI_SLOT_PRESENT;
+ }
+
*state = presence;
ret = 0;
} else {
@@ -412,10 +441,23 @@ static int pnv_php_get_adapter_state(struct hotplug_slot *slot, u8 *state)
return ret;
}
+static int pnv_php_get_raw_indicator_status(struct hotplug_slot *slot, u8 *state)
+{
+ struct pnv_php_slot *php_slot = to_pnv_php_slot(slot);
+ struct pci_dev *bridge = php_slot->pdev;
+ u16 status;
+
+ pcie_capability_read_word(bridge, PCI_EXP_SLTCTL, &status);
+ *state = (status & (PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_PIC)) >> 6;
+ return 0;
+}
+
+
static int pnv_php_get_attention_state(struct hotplug_slot *slot, u8 *state)
{
struct pnv_php_slot *php_slot = to_pnv_php_slot(slot);
+ pnv_php_get_raw_indicator_status(slot, &php_slot->attention_state);
*state = php_slot->attention_state;
return 0;
}
@@ -433,7 +475,7 @@ static int pnv_php_set_attention_state(struct hotplug_slot *slot, u8 state)
mask = PCI_EXP_SLTCTL_AIC;
if (state)
- new = PCI_EXP_SLTCTL_ATTN_IND_ON;
+ new = FIELD_PREP(PCI_EXP_SLTCTL_AIC, state);
else
new = PCI_EXP_SLTCTL_ATTN_IND_OFF;
@@ -442,6 +484,61 @@ static int pnv_php_set_attention_state(struct hotplug_slot *slot, u8 state)
return 0;
}
+static int pnv_php_activate_slot(struct pnv_php_slot *php_slot,
+ struct hotplug_slot *slot)
+{
+ int ret, i;
+
+ /*
+ * Issue initial slot activation command to firmware
+ *
+ * Firmware will power slot on, attempt to train the link, and
+ * discover any downstream devices. If this process fails, firmware
+ * will return an error code and an invalid device tree. Failure
+ * can be caused for multiple reasons, including a faulty
+ * downstream device, poor connection to the downstream device, or
+ * a previously latched PHB fence. On failure, issue fundamental
+ * reset up to three times before aborting.
+ */
+ ret = pnv_php_set_slot_power_state(slot, OPAL_PCI_SLOT_POWER_ON);
+ if (ret) {
+ SLOT_WARN(
+ php_slot,
+ "PCI slot activation failed with error code %d, possible frozen PHB",
+ ret);
+ SLOT_WARN(
+ php_slot,
+ "Attempting complete PHB reset before retrying slot activation\n");
+ for (i = 0; i < 3; i++) {
+ /*
+ * Slot activation failed, PHB may be fenced from a
+ * prior device failure.
+ *
+ * Use the OPAL fundamental reset call to both try a
+ * device reset and clear any potentially active PHB
+ * fence / freeze.
+ */
+ SLOT_WARN(php_slot, "Try %d...\n", i + 1);
+ pci_set_pcie_reset_state(php_slot->pdev,
+ pcie_warm_reset);
+ msleep(250);
+ pci_set_pcie_reset_state(php_slot->pdev,
+ pcie_deassert_reset);
+
+ ret = pnv_php_set_slot_power_state(
+ slot, OPAL_PCI_SLOT_POWER_ON);
+ if (!ret)
+ break;
+ }
+
+ if (i >= 3)
+ SLOT_WARN(php_slot,
+ "Failed to bring slot online, aborting!\n");
+ }
+
+ return ret;
+}
+
static int pnv_php_enable(struct pnv_php_slot *php_slot, bool rescan)
{
struct hotplug_slot *slot = &php_slot->slot;
@@ -504,7 +601,7 @@ static int pnv_php_enable(struct pnv_php_slot *php_slot, bool rescan)
goto scan;
/* Power is off, turn it on and then scan the slot */
- ret = pnv_php_set_slot_power_state(slot, OPAL_PCI_SLOT_POWER_ON);
+ ret = pnv_php_activate_slot(php_slot, slot);
if (ret)
return ret;
@@ -561,8 +658,58 @@ static int pnv_php_reset_slot(struct hotplug_slot *slot, bool probe)
static int pnv_php_enable_slot(struct hotplug_slot *slot)
{
struct pnv_php_slot *php_slot = to_pnv_php_slot(slot);
+ u32 prop32;
+ int ret;
+
+ ret = pnv_php_enable(php_slot, true);
+ if (ret)
+ return ret;
+
+ /* (Re-)enable interrupt if the slot supports surprise hotplug */
+ ret = of_property_read_u32(php_slot->dn, "ibm,slot-surprise-pluggable",
+ &prop32);
+ if (!ret && prop32)
+ pnv_php_enable_irq(php_slot);
+
+ return 0;
+}
+
+/*
+ * Disable any hotplug interrupts for all slots on the provided bus, as well as
+ * all downstream slots in preparation for a hot unplug.
+ */
+static int pnv_php_disable_all_irqs(struct pci_bus *bus)
+{
+ struct pci_bus *child_bus;
+ struct pci_slot *slot;
+
+ /* First go down child buses */
+ list_for_each_entry(child_bus, &bus->children, node)
+ pnv_php_disable_all_irqs(child_bus);
+
+ /* Disable IRQs for all pnv_php slots on this bus */
+ list_for_each_entry(slot, &bus->slots, list) {
+ struct pnv_php_slot *php_slot = to_pnv_php_slot(slot->hotplug);
- return pnv_php_enable(php_slot, true);
+ pnv_php_disable_irq(php_slot, false, true);
+ }
+
+ return 0;
+}
+
+/*
+ * Disable any hotplug interrupts for all downstream slots on the provided
+ * bus in preparation for a hot unplug.
+ */
+static int pnv_php_disable_all_downstream_irqs(struct pci_bus *bus)
+{
+ struct pci_bus *child_bus;
+
+ /* Go down child buses, recursively deactivating their IRQs */
+ list_for_each_entry(child_bus, &bus->children, node)
+ pnv_php_disable_all_irqs(child_bus);
+
+ return 0;
}
static int pnv_php_disable_slot(struct hotplug_slot *slot)
@@ -579,6 +726,13 @@ static int pnv_php_disable_slot(struct hotplug_slot *slot)
php_slot->state != PNV_PHP_STATE_REGISTERED)
return 0;
+ /*
+ * Free all IRQ resources from all child slots before remove.
+ * Note that we do not disable the root slot IRQ here as that
+ * would also deactivate the slot hot (re)plug interrupt!
+ */
+ pnv_php_disable_all_downstream_irqs(php_slot->bus);
+
/* Remove all devices behind the slot */
pci_lock_rescan_remove();
pci_hp_remove_devices(php_slot->bus);
@@ -647,6 +801,15 @@ static struct pnv_php_slot *pnv_php_alloc_slot(struct device_node *dn)
return NULL;
}
+ /* Allocate workqueue for this slot's interrupt handling */
+ php_slot->wq = alloc_workqueue("pciehp-%s", 0, 0, php_slot->name);
+ if (!php_slot->wq) {
+ SLOT_WARN(php_slot, "Cannot alloc workqueue\n");
+ kfree(php_slot->name);
+ kfree(php_slot);
+ return NULL;
+ }
+
if (dn->child && PCI_DN(dn->child))
php_slot->slot_no = PCI_SLOT(PCI_DN(dn->child)->devfn);
else
@@ -731,7 +894,7 @@ static int pnv_php_enable_msix(struct pnv_php_slot *php_slot)
/* Check hotplug MSIx entry is in range */
pcie_capability_read_word(pdev, PCI_EXP_FLAGS, &pcie_flag);
- entry.entry = (pcie_flag & PCI_EXP_FLAGS_IRQ) >> 9;
+ entry.entry = FIELD_GET(PCI_EXP_FLAGS_IRQ, pcie_flag);
if (entry.entry >= nr_entries)
return -ERANGE;
@@ -745,16 +908,63 @@ static int pnv_php_enable_msix(struct pnv_php_slot *php_slot)
return entry.vector;
}
+static void
+pnv_php_detect_clear_suprise_removal_freeze(struct pnv_php_slot *php_slot)
+{
+ struct pci_dev *pdev = php_slot->pdev;
+ struct eeh_dev *edev;
+ struct eeh_pe *pe;
+ int i, rc;
+
+ /*
+ * When a device is surprise removed from a downstream bridge slot,
+ * the upstream bridge port can still end up frozen due to related EEH
+ * events, which will in turn block the MSI interrupts for slot hotplug
+ * detection.
+ *
+ * Detect and thaw any frozen upstream PE after slot deactivation.
+ */
+ edev = pci_dev_to_eeh_dev(pdev);
+ pe = edev ? edev->pe : NULL;
+ rc = eeh_pe_get_state(pe);
+ if ((rc == -ENODEV) || (rc == -ENOENT)) {
+ SLOT_WARN(
+ php_slot,
+ "Upstream bridge PE state unknown, hotplug detect may fail\n");
+ } else {
+ if (pe->state & EEH_PE_ISOLATED) {
+ SLOT_WARN(
+ php_slot,
+ "Upstream bridge PE %02x frozen, thawing...\n",
+ pe->addr);
+ for (i = 0; i < 3; i++)
+ if (!eeh_unfreeze_pe(pe))
+ break;
+ if (i >= 3)
+ SLOT_WARN(
+ php_slot,
+ "Unable to thaw PE %02x, hotplug detect will fail!\n",
+ pe->addr);
+ else
+ SLOT_WARN(php_slot,
+ "PE %02x thawed successfully\n",
+ pe->addr);
+ }
+ }
+}
+
static void pnv_php_event_handler(struct work_struct *work)
{
struct pnv_php_event *event =
container_of(work, struct pnv_php_event, work);
struct pnv_php_slot *php_slot = event->php_slot;
- if (event->added)
+ if (event->added) {
pnv_php_enable_slot(&php_slot->slot);
- else
+ } else {
pnv_php_disable_slot(&php_slot->slot);
+ pnv_php_detect_clear_suprise_removal_freeze(php_slot);
+ }
kfree(event);
}
@@ -843,14 +1053,6 @@ static void pnv_php_init_irq(struct pnv_php_slot *php_slot, int irq)
u16 sts, ctrl;
int ret;
- /* Allocate workqueue */
- php_slot->wq = alloc_workqueue("pciehp-%s", 0, 0, php_slot->name);
- if (!php_slot->wq) {
- SLOT_WARN(php_slot, "Cannot alloc workqueue\n");
- pnv_php_disable_irq(php_slot, true);
- return;
- }
-
/* Check PDC (Presence Detection Change) is broken or not */
ret = of_property_read_u32(php_slot->dn, "ibm,slot-broken-pdc",
&broken_pdc);
@@ -869,7 +1071,7 @@ static void pnv_php_init_irq(struct pnv_php_slot *php_slot, int irq)
ret = request_irq(irq, pnv_php_interrupt, IRQF_SHARED,
php_slot->name, php_slot);
if (ret) {
- pnv_php_disable_irq(php_slot, true);
+ pnv_php_disable_irq(php_slot, true, true);
SLOT_WARN(php_slot, "Error %d enabling IRQ %d\n", ret, irq);
return;
}
diff --git a/drivers/pci/hotplug/rpaphp_pci.c b/drivers/pci/hotplug/rpaphp_pci.c
index 630f77057c23..bcfd26ec6d30 100644
--- a/drivers/pci/hotplug/rpaphp_pci.c
+++ b/drivers/pci/hotplug/rpaphp_pci.c
@@ -19,12 +19,92 @@
#include "../pci.h" /* for pci_add_new_bus */
#include "rpaphp.h"
+/*
+ * RTAS call get-sensor-state(DR_ENTITY_SENSE) return values as per PAPR:
+ * -- generic return codes ---
+ * -1: Hardware Error
+ * -2: RTAS_BUSY
+ * -3: Invalid sensor. RTAS Parameter Error.
+ * -- rtas_get_sensor function specific return codes ---
+ * -9000: Need DR entity to be powered up and unisolated before RTAS call
+ * -9001: Need DR entity to be powered up, but not unisolated, before RTAS call
+ * -9002: DR entity unusable
+ * 990x: Extended delay - where x is a number in the range of 0-5
+ */
+#define RTAS_SLOT_UNISOLATED -9000
+#define RTAS_SLOT_NOT_UNISOLATED -9001
+#define RTAS_SLOT_NOT_USABLE -9002
+
+static int rtas_get_sensor_errno(int rtas_rc)
+{
+ switch (rtas_rc) {
+ case 0:
+ /* Success case */
+ return 0;
+ case RTAS_SLOT_UNISOLATED:
+ case RTAS_SLOT_NOT_UNISOLATED:
+ return -EFAULT;
+ case RTAS_SLOT_NOT_USABLE:
+ return -ENODEV;
+ case RTAS_BUSY:
+ case RTAS_EXTENDED_DELAY_MIN...RTAS_EXTENDED_DELAY_MAX:
+ return -EBUSY;
+ default:
+ return rtas_error_rc(rtas_rc);
+ }
+}
+
+/*
+ * get_adapter_status() can be called by the EEH handler during EEH recovery.
+ * On certain PHB failures, the RTAS call rtas_call(get-sensor-state) returns
+ * extended busy error (9902) until PHB is recovered by pHyp. The RTAS call
+ * interface rtas_get_sensor() loops over the RTAS call on extended delay
+ * return code (9902) until the return value is either success (0) or error
+ * (-1). This causes the EEH handler to get stuck for ~6 seconds before it
+ * could notify that the PCI error has been detected and stop any active
+ * operations. This sometimes causes EEH recovery to fail. To avoid this issue,
+ * invoke rtas_call(get-sensor-state) directly if the respective PE is in EEH
+ * recovery state and return -EBUSY error based on RTAS return status. This
+ * will help the EEH handler to notify the driver about the PCI error
+ * immediately and successfully proceed with EEH recovery steps.
+ */
+
+static int __rpaphp_get_sensor_state(struct slot *slot, int *state)
+{
+ int rc;
+ int token = rtas_token("get-sensor-state");
+ struct pci_dn *pdn;
+ struct eeh_pe *pe;
+ struct pci_controller *phb = PCI_DN(slot->dn)->phb;
+
+ if (token == RTAS_UNKNOWN_SERVICE)
+ return -ENOENT;
+
+ /*
+ * Fallback to existing method for empty slot or PE isn't in EEH
+ * recovery.
+ */
+ pdn = list_first_entry_or_null(&PCI_DN(phb->dn)->child_list,
+ struct pci_dn, list);
+ if (!pdn)
+ goto fallback;
+
+ pe = eeh_dev_to_pe(pdn->edev);
+ if (pe && (pe->state & EEH_PE_RECOVERING)) {
+ rc = rtas_call(token, 2, 2, state, DR_ENTITY_SENSE,
+ slot->index);
+ return rtas_get_sensor_errno(rc);
+ }
+fallback:
+ return rtas_get_sensor(DR_ENTITY_SENSE, slot->index, state);
+}
+
int rpaphp_get_sensor_state(struct slot *slot, int *state)
{
int rc;
int setlevel;
- rc = rtas_get_sensor(DR_ENTITY_SENSE, slot->index, state);
+ rc = __rpaphp_get_sensor_state(slot, state);
if (rc < 0) {
if (rc == -EFAULT || rc == -EEXIST) {
@@ -40,8 +120,7 @@ int rpaphp_get_sensor_state(struct slot *slot, int *state)
dbg("%s: power on slot[%s] failed rc=%d.\n",
__func__, slot->name, rc);
} else {
- rc = rtas_get_sensor(DR_ENTITY_SENSE,
- slot->index, state);
+ rc = __rpaphp_get_sensor_state(slot, state);
}
} else if (rc == -ENODEV)
info("%s: slot is unusable\n", __func__);
diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
index a89b7de72dcf..a55967082ef6 100644
--- a/drivers/pci/hotplug/s390_pci_hpc.c
+++ b/drivers/pci/hotplug/s390_pci_hpc.c
@@ -8,8 +8,7 @@
* Jan Glauber <jang@linux.vnet.ibm.com>
*/
-#define KMSG_COMPONENT "zpci"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "zpci: " fmt
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -26,58 +25,78 @@ static int enable_slot(struct hotplug_slot *hotplug_slot)
hotplug_slot);
int rc;
- if (zdev->state != ZPCI_FN_STATE_STANDBY)
- return -EIO;
+ mutex_lock(&zdev->state_lock);
+ if (zdev->state != ZPCI_FN_STATE_STANDBY) {
+ rc = -EIO;
+ goto out;
+ }
rc = sclp_pci_configure(zdev->fid);
zpci_dbg(3, "conf fid:%x, rc:%d\n", zdev->fid, rc);
if (rc)
- return rc;
+ goto out;
zdev->state = ZPCI_FN_STATE_CONFIGURED;
- return zpci_scan_configured_device(zdev, zdev->fh);
+ rc = zpci_scan_configured_device(zdev, zdev->fh);
+out:
+ mutex_unlock(&zdev->state_lock);
+ return rc;
}
static int disable_slot(struct hotplug_slot *hotplug_slot)
{
struct zpci_dev *zdev = container_of(hotplug_slot, struct zpci_dev,
hotplug_slot);
- struct pci_dev *pdev;
+ struct pci_dev *pdev = NULL;
+ int rc;
- if (zdev->state != ZPCI_FN_STATE_CONFIGURED)
- return -EIO;
+ mutex_lock(&zdev->state_lock);
+ if (zdev->state != ZPCI_FN_STATE_CONFIGURED) {
+ rc = -EIO;
+ goto out;
+ }
pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn);
if (pdev && pci_num_vf(pdev)) {
- pci_dev_put(pdev);
- return -EBUSY;
+ rc = -EBUSY;
+ goto out;
}
- pci_dev_put(pdev);
- return zpci_deconfigure_device(zdev);
+ rc = zpci_deconfigure_device(zdev);
+out:
+ if (pdev)
+ pci_dev_put(pdev);
+ mutex_unlock(&zdev->state_lock);
+ return rc;
}
static int reset_slot(struct hotplug_slot *hotplug_slot, bool probe)
{
struct zpci_dev *zdev = container_of(hotplug_slot, struct zpci_dev,
hotplug_slot);
+ int rc = -EIO;
- if (zdev->state != ZPCI_FN_STATE_CONFIGURED)
- return -EIO;
/*
- * We can't take the zdev->lock as reset_slot may be called during
- * probing and/or device removal which already happens under the
- * zdev->lock. Instead the user should use the higher level
- * pci_reset_function() or pci_bus_reset() which hold the PCI device
- * lock preventing concurrent removal. If not using these functions
- * holding the PCI device lock is required.
+ * If we can't get the zdev->state_lock the device state is
+ * currently undergoing a transition and we bail out - just
+ * the same as if the device's state is not configured at all.
*/
+ if (!mutex_trylock(&zdev->state_lock))
+ return rc;
- /* As long as the function is configured we can reset */
- if (probe)
- return 0;
+ /* We can reset only if the function is configured */
+ if (zdev->state != ZPCI_FN_STATE_CONFIGURED)
+ goto out;
+
+ if (probe) {
+ rc = 0;
+ goto out;
+ }
- return zpci_hot_reset_device(zdev);
+ rc = zpci_hot_reset_device(zdev);
+out:
+ mutex_unlock(&zdev->state_lock);
+ return rc;
}
static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
@@ -91,7 +110,7 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- /* if the slot exits it always contains a function */
+ /* if the slot exists it always contains a function */
*value = 1;
return 0;
}
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index 3a97f455336e..a425530e0939 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -33,24 +33,8 @@ extern bool shpchp_poll_mode;
extern int shpchp_poll_time;
extern bool shpchp_debug;
-#define dbg(format, arg...) \
-do { \
- if (shpchp_debug) \
- printk(KERN_DEBUG "%s: " format, MY_NAME, ## arg); \
-} while (0)
-#define err(format, arg...) \
- printk(KERN_ERR "%s: " format, MY_NAME, ## arg)
-#define info(format, arg...) \
- printk(KERN_INFO "%s: " format, MY_NAME, ## arg)
-#define warn(format, arg...) \
- printk(KERN_WARNING "%s: " format, MY_NAME, ## arg)
-
#define ctrl_dbg(ctrl, format, arg...) \
- do { \
- if (shpchp_debug) \
- pci_printk(KERN_DEBUG, ctrl->pci_dev, \
- format, ## arg); \
- } while (0)
+ pci_dbg(ctrl->pci_dev, format, ## arg)
#define ctrl_err(ctrl, format, arg...) \
pci_err(ctrl->pci_dev, format, ## arg)
#define ctrl_info(ctrl, format, arg...) \
@@ -72,7 +56,6 @@ struct slot {
u8 latch_save;
u8 pwr_save;
struct controller *ctrl;
- const struct hpc_ops *hpc_ops;
struct hotplug_slot hotplug_slot;
struct list_head slot_list;
struct delayed_work work; /* work for button event */
@@ -94,7 +77,6 @@ struct controller {
int slot_num_inc; /* 1 or -1 */
struct pci_dev *pci_dev;
struct list_head slot_list;
- const struct hpc_ops *hpc_ops;
wait_queue_head_t queue; /* sleep & wake process */
u8 slot_device_offset;
u32 pcix_misc2_reg; /* for amd pogo errata */
@@ -300,24 +282,22 @@ static inline void amd_pogo_errata_restore_misc_reg(struct slot *p_slot)
pci_write_config_dword(p_slot->ctrl->pci_dev, PCIX_MISCII_OFFSET, pcix_misc2_temp);
}
-struct hpc_ops {
- int (*power_on_slot)(struct slot *slot);
- int (*slot_enable)(struct slot *slot);
- int (*slot_disable)(struct slot *slot);
- int (*set_bus_speed_mode)(struct slot *slot, enum pci_bus_speed speed);
- int (*get_power_status)(struct slot *slot, u8 *status);
- int (*get_attention_status)(struct slot *slot, u8 *status);
- int (*set_attention_status)(struct slot *slot, u8 status);
- int (*get_latch_status)(struct slot *slot, u8 *status);
- int (*get_adapter_status)(struct slot *slot, u8 *status);
- int (*get_adapter_speed)(struct slot *slot, enum pci_bus_speed *speed);
- int (*get_prog_int)(struct slot *slot, u8 *prog_int);
- int (*query_power_fault)(struct slot *slot);
- void (*green_led_on)(struct slot *slot);
- void (*green_led_off)(struct slot *slot);
- void (*green_led_blink)(struct slot *slot);
- void (*release_ctlr)(struct controller *ctrl);
- int (*check_cmd_status)(struct controller *ctrl);
-};
+int shpchp_power_on_slot(struct slot *slot);
+int shpchp_slot_enable(struct slot *slot);
+int shpchp_slot_disable(struct slot *slot);
+int shpchp_set_bus_speed_mode(struct slot *slot, enum pci_bus_speed speed);
+int shpchp_get_power_status(struct slot *slot, u8 *status);
+int shpchp_get_attention_status(struct slot *slot, u8 *status);
+int shpchp_set_attention_status(struct slot *slot, u8 status);
+int shpchp_get_latch_status(struct slot *slot, u8 *status);
+int shpchp_get_adapter_status(struct slot *slot, u8 *status);
+int shpchp_get_adapter_speed(struct slot *slot, enum pci_bus_speed *speed);
+int shpchp_get_prog_int(struct slot *slot, u8 *prog_int);
+int shpchp_query_power_fault(struct slot *slot);
+void shpchp_green_led_on(struct slot *slot);
+void shpchp_green_led_off(struct slot *slot);
+void shpchp_green_led_blink(struct slot *slot);
+void shpchp_release_ctlr(struct controller *ctrl);
+int shpchp_check_cmd_status(struct controller *ctrl);
#endif /* _SHPCHP_H */
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index 56c7795ed890..0c341453afc6 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -22,7 +22,6 @@
#include "shpchp.h"
/* Global variables */
-bool shpchp_debug;
bool shpchp_poll_mode;
int shpchp_poll_time;
@@ -33,10 +32,8 @@ int shpchp_poll_time;
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
-module_param(shpchp_debug, bool, 0644);
module_param(shpchp_poll_mode, bool, 0644);
module_param(shpchp_poll_time, int, 0644);
-MODULE_PARM_DESC(shpchp_debug, "Debugging mode enabled or not");
MODULE_PARM_DESC(shpchp_poll_mode, "Using polling mechanism for hot-plug events or not");
MODULE_PARM_DESC(shpchp_poll_time, "Polling mechanism frequency, in seconds");
@@ -81,7 +78,6 @@ static int init_slots(struct controller *ctrl)
slot->ctrl = ctrl;
slot->bus = ctrl->pci_dev->subordinate->number;
slot->device = ctrl->slot_device_offset + i;
- slot->hpc_ops = ctrl->hpc_ops;
slot->number = ctrl->first_slot + (ctrl->slot_num_inc * i);
slot->wq = alloc_workqueue("shpchp-%d", 0, 0, slot->number);
@@ -150,7 +146,7 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
__func__, slot_name(slot));
slot->attention_save = status;
- slot->hpc_ops->set_attention_status(slot, status);
+ shpchp_set_attention_status(slot, status);
return 0;
}
@@ -183,7 +179,7 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
__func__, slot_name(slot));
- retval = slot->hpc_ops->get_power_status(slot, value);
+ retval = shpchp_get_power_status(slot, value);
if (retval < 0)
*value = slot->pwr_save;
@@ -198,7 +194,7 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
__func__, slot_name(slot));
- retval = slot->hpc_ops->get_attention_status(slot, value);
+ retval = shpchp_get_attention_status(slot, value);
if (retval < 0)
*value = slot->attention_save;
@@ -213,7 +209,7 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
__func__, slot_name(slot));
- retval = slot->hpc_ops->get_latch_status(slot, value);
+ retval = shpchp_get_latch_status(slot, value);
if (retval < 0)
*value = slot->latch_save;
@@ -228,7 +224,7 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
__func__, slot_name(slot));
- retval = slot->hpc_ops->get_adapter_status(slot, value);
+ retval = shpchp_get_adapter_status(slot, value);
if (retval < 0)
*value = slot->presence_save;
@@ -293,7 +289,7 @@ static int shpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err_cleanup_slots:
cleanup_slots(ctrl);
err_out_release_ctlr:
- ctrl->hpc_ops->release_ctlr(ctrl);
+ shpchp_release_ctlr(ctrl);
err_out_free_ctrl:
kfree(ctrl);
err_out_none:
@@ -306,7 +302,7 @@ static void shpc_remove(struct pci_dev *dev)
dev->shpc_managed = 0;
shpchp_remove_ctrl_files(ctrl);
- ctrl->hpc_ops->release_ctlr(ctrl);
+ shpchp_release_ctlr(ctrl);
kfree(ctrl);
}
@@ -325,20 +321,12 @@ static struct pci_driver shpc_driver = {
static int __init shpcd_init(void)
{
- int retval;
-
- retval = pci_register_driver(&shpc_driver);
- dbg("%s: pci_register_driver = %d\n", __func__, retval);
- info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
-
- return retval;
+ return pci_register_driver(&shpc_driver);
}
static void __exit shpcd_cleanup(void)
{
- dbg("unload_shpchpd()\n");
pci_unregister_driver(&shpc_driver);
- info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n");
}
module_init(shpcd_init);
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
index 6a6705e0cf17..e6c6f23bae27 100644
--- a/drivers/pci/hotplug/shpchp_ctrl.c
+++ b/drivers/pci/hotplug/shpchp_ctrl.c
@@ -51,7 +51,7 @@ u8 shpchp_handle_attention_button(u8 hp_slot, struct controller *ctrl)
ctrl_dbg(ctrl, "Attention button interrupt received\n");
p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
- p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save));
+ shpchp_get_adapter_status(p_slot, &p_slot->presence_save);
/*
* Button pressed - See if need to TAKE ACTION!!!
@@ -75,8 +75,8 @@ u8 shpchp_handle_switch_change(u8 hp_slot, struct controller *ctrl)
ctrl_dbg(ctrl, "Switch interrupt received\n");
p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
- p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save));
- p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
+ shpchp_get_adapter_status(p_slot, &p_slot->presence_save);
+ shpchp_get_latch_status(p_slot, &getstatus);
ctrl_dbg(ctrl, "Card present %x Power status %x\n",
p_slot->presence_save, p_slot->pwr_save);
@@ -116,7 +116,7 @@ u8 shpchp_handle_presence_change(u8 hp_slot, struct controller *ctrl)
/*
* Save the presence state
*/
- p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save));
+ shpchp_get_adapter_status(p_slot, &p_slot->presence_save);
if (p_slot->presence_save) {
/*
* Card Present
@@ -148,7 +148,7 @@ u8 shpchp_handle_power_fault(u8 hp_slot, struct controller *ctrl)
p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
- if (!(p_slot->hpc_ops->query_power_fault(p_slot))) {
+ if (!(shpchp_query_power_fault(p_slot))) {
/*
* Power fault Cleared
*/
@@ -181,7 +181,7 @@ static int change_bus_speed(struct controller *ctrl, struct slot *p_slot,
int rc = 0;
ctrl_dbg(ctrl, "Change speed to %d\n", speed);
- rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, speed);
+ rc = shpchp_set_bus_speed_mode(p_slot, speed);
if (rc) {
ctrl_err(ctrl, "%s: Issue of set bus speed mode command failed\n",
__func__);
@@ -241,14 +241,14 @@ static int board_added(struct slot *p_slot)
__func__, p_slot->device, ctrl->slot_device_offset, hp_slot);
/* Power on slot without connecting to bus */
- rc = p_slot->hpc_ops->power_on_slot(p_slot);
+ rc = shpchp_power_on_slot(p_slot);
if (rc) {
ctrl_err(ctrl, "Failed to power on slot\n");
return -1;
}
if ((ctrl->pci_dev->vendor == 0x8086) && (ctrl->pci_dev->device == 0x0332)) {
- rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, PCI_SPEED_33MHz);
+ rc = shpchp_set_bus_speed_mode(p_slot, PCI_SPEED_33MHz);
if (rc) {
ctrl_err(ctrl, "%s: Issue of set bus speed mode command failed\n",
__func__);
@@ -256,14 +256,14 @@ static int board_added(struct slot *p_slot)
}
/* turn on board, blink green LED, turn off Amber LED */
- rc = p_slot->hpc_ops->slot_enable(p_slot);
+ rc = shpchp_slot_enable(p_slot);
if (rc) {
ctrl_err(ctrl, "Issue of Slot Enable command failed\n");
return rc;
}
}
- rc = p_slot->hpc_ops->get_adapter_speed(p_slot, &asp);
+ rc = shpchp_get_adapter_speed(p_slot, &asp);
if (rc) {
ctrl_err(ctrl, "Can't get adapter speed or bus mode mismatch\n");
return WRONG_BUS_FREQUENCY;
@@ -285,7 +285,7 @@ static int board_added(struct slot *p_slot)
return rc;
/* turn on board, blink green LED, turn off Amber LED */
- rc = p_slot->hpc_ops->slot_enable(p_slot);
+ rc = shpchp_slot_enable(p_slot);
if (rc) {
ctrl_err(ctrl, "Issue of Slot Enable command failed\n");
return rc;
@@ -313,13 +313,13 @@ static int board_added(struct slot *p_slot)
p_slot->is_a_board = 0x01;
p_slot->pwr_save = 1;
- p_slot->hpc_ops->green_led_on(p_slot);
+ shpchp_green_led_on(p_slot);
return 0;
err_exit:
/* turn off slot, turn on Amber LED, turn off Green LED */
- rc = p_slot->hpc_ops->slot_disable(p_slot);
+ rc = shpchp_slot_disable(p_slot);
if (rc) {
ctrl_err(ctrl, "%s: Issue of Slot Disable command failed\n",
__func__);
@@ -352,14 +352,14 @@ static int remove_board(struct slot *p_slot)
p_slot->status = 0x01;
/* turn off slot, turn on Amber LED, turn off Green LED */
- rc = p_slot->hpc_ops->slot_disable(p_slot);
+ rc = shpchp_slot_disable(p_slot);
if (rc) {
ctrl_err(ctrl, "%s: Issue of Slot Disable command failed\n",
__func__);
return rc;
}
- rc = p_slot->hpc_ops->set_attention_status(p_slot, 0);
+ rc = shpchp_set_attention_status(p_slot, 0);
if (rc) {
ctrl_err(ctrl, "Issue of Set Attention command failed\n");
return rc;
@@ -401,7 +401,7 @@ static void shpchp_pushbutton_thread(struct work_struct *work)
case POWERON_STATE:
mutex_unlock(&p_slot->lock);
if (shpchp_enable_slot(p_slot))
- p_slot->hpc_ops->green_led_off(p_slot);
+ shpchp_green_led_off(p_slot);
mutex_lock(&p_slot->lock);
p_slot->state = STATIC_STATE;
break;
@@ -446,10 +446,10 @@ void shpchp_queue_pushbutton_work(struct work_struct *work)
static void update_slot_info(struct slot *slot)
{
- slot->hpc_ops->get_power_status(slot, &slot->pwr_save);
- slot->hpc_ops->get_attention_status(slot, &slot->attention_save);
- slot->hpc_ops->get_latch_status(slot, &slot->latch_save);
- slot->hpc_ops->get_adapter_status(slot, &slot->presence_save);
+ shpchp_get_power_status(slot, &slot->pwr_save);
+ shpchp_get_attention_status(slot, &slot->attention_save);
+ shpchp_get_latch_status(slot, &slot->latch_save);
+ shpchp_get_adapter_status(slot, &slot->presence_save);
}
/*
@@ -462,7 +462,7 @@ static void handle_button_press_event(struct slot *p_slot)
switch (p_slot->state) {
case STATIC_STATE:
- p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
+ shpchp_get_power_status(p_slot, &getstatus);
if (getstatus) {
p_slot->state = BLINKINGOFF_STATE;
ctrl_info(ctrl, "PCI slot #%s - powering off due to button press\n",
@@ -473,8 +473,8 @@ static void handle_button_press_event(struct slot *p_slot)
slot_name(p_slot));
}
/* blink green LED and turn off amber */
- p_slot->hpc_ops->green_led_blink(p_slot);
- p_slot->hpc_ops->set_attention_status(p_slot, 0);
+ shpchp_green_led_blink(p_slot);
+ shpchp_set_attention_status(p_slot, 0);
queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ);
break;
@@ -489,10 +489,10 @@ static void handle_button_press_event(struct slot *p_slot)
slot_name(p_slot));
cancel_delayed_work(&p_slot->work);
if (p_slot->state == BLINKINGOFF_STATE)
- p_slot->hpc_ops->green_led_on(p_slot);
+ shpchp_green_led_on(p_slot);
else
- p_slot->hpc_ops->green_led_off(p_slot);
- p_slot->hpc_ops->set_attention_status(p_slot, 0);
+ shpchp_green_led_off(p_slot);
+ shpchp_set_attention_status(p_slot, 0);
ctrl_info(ctrl, "PCI slot #%s - action canceled due to button press\n",
slot_name(p_slot));
p_slot->state = STATIC_STATE;
@@ -526,8 +526,8 @@ static void interrupt_event_handler(struct work_struct *work)
break;
case INT_POWER_FAULT:
ctrl_dbg(p_slot->ctrl, "%s: Power fault\n", __func__);
- p_slot->hpc_ops->set_attention_status(p_slot, 1);
- p_slot->hpc_ops->green_led_off(p_slot);
+ shpchp_set_attention_status(p_slot, 1);
+ shpchp_green_led_off(p_slot);
break;
default:
update_slot_info(p_slot);
@@ -547,17 +547,17 @@ static int shpchp_enable_slot (struct slot *p_slot)
/* Check to see if (latch closed, card present, power off) */
mutex_lock(&p_slot->ctrl->crit_sect);
- rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus);
+ rc = shpchp_get_adapter_status(p_slot, &getstatus);
if (rc || !getstatus) {
ctrl_info(ctrl, "No adapter on slot(%s)\n", slot_name(p_slot));
goto out;
}
- rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
+ rc = shpchp_get_latch_status(p_slot, &getstatus);
if (rc || getstatus) {
ctrl_info(ctrl, "Latch open on slot(%s)\n", slot_name(p_slot));
goto out;
}
- rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
+ rc = shpchp_get_power_status(p_slot, &getstatus);
if (rc || getstatus) {
ctrl_info(ctrl, "Already enabled on slot(%s)\n",
slot_name(p_slot));
@@ -567,10 +567,10 @@ static int shpchp_enable_slot (struct slot *p_slot)
p_slot->is_a_board = 1;
/* We have to save the presence info for these slots */
- p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save));
- p_slot->hpc_ops->get_power_status(p_slot, &(p_slot->pwr_save));
+ shpchp_get_adapter_status(p_slot, &p_slot->presence_save);
+ shpchp_get_power_status(p_slot, &p_slot->pwr_save);
ctrl_dbg(ctrl, "%s: p_slot->pwr_save %x\n", __func__, p_slot->pwr_save);
- p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
+ shpchp_get_latch_status(p_slot, &getstatus);
if ((p_slot->ctrl->pci_dev->vendor == PCI_VENDOR_ID_AMD &&
p_slot->ctrl->pci_dev->device == PCI_DEVICE_ID_AMD_POGO_7458)
@@ -584,9 +584,8 @@ static int shpchp_enable_slot (struct slot *p_slot)
retval = board_added(p_slot);
if (retval) {
- p_slot->hpc_ops->get_adapter_status(p_slot,
- &(p_slot->presence_save));
- p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
+ shpchp_get_adapter_status(p_slot, &p_slot->presence_save);
+ shpchp_get_latch_status(p_slot, &getstatus);
}
update_slot_info(p_slot);
@@ -608,17 +607,17 @@ static int shpchp_disable_slot (struct slot *p_slot)
/* Check to see if (latch closed, card present, power on) */
mutex_lock(&p_slot->ctrl->crit_sect);
- rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus);
+ rc = shpchp_get_adapter_status(p_slot, &getstatus);
if (rc || !getstatus) {
ctrl_info(ctrl, "No adapter on slot(%s)\n", slot_name(p_slot));
goto out;
}
- rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
+ rc = shpchp_get_latch_status(p_slot, &getstatus);
if (rc || getstatus) {
ctrl_info(ctrl, "Latch open on slot(%s)\n", slot_name(p_slot));
goto out;
}
- rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
+ rc = shpchp_get_power_status(p_slot, &getstatus);
if (rc || !getstatus) {
ctrl_info(ctrl, "Already disabled on slot(%s)\n",
slot_name(p_slot));
diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c
index 48e4daefc44a..183bf43510a1 100644
--- a/drivers/pci/hotplug/shpchp_hpc.c
+++ b/drivers/pci/hotplug/shpchp_hpc.c
@@ -167,7 +167,6 @@
static irqreturn_t shpc_isr(int irq, void *dev_id);
static void start_int_poll_timer(struct controller *ctrl, int sec);
-static int hpc_check_cmd_status(struct controller *ctrl);
static inline u8 shpc_readb(struct controller *ctrl, int reg)
{
@@ -212,7 +211,7 @@ static inline int shpc_indirect_read(struct controller *ctrl, int index,
*/
static void int_poll_timeout(struct timer_list *t)
{
- struct controller *ctrl = from_timer(ctrl, t, poll_timer);
+ struct controller *ctrl = timer_container_of(ctrl, t, poll_timer);
/* Poll for interrupt events. regs == NULL => polling */
shpc_isr(0, ctrl);
@@ -317,7 +316,7 @@ static int shpc_write_cmd(struct slot *slot, u8 t_slot, u8 cmd)
if (retval)
goto out;
- cmd_status = hpc_check_cmd_status(slot->ctrl);
+ cmd_status = shpchp_check_cmd_status(slot->ctrl);
if (cmd_status) {
ctrl_err(ctrl, "Failed to issued command 0x%x (error code = %d)\n",
cmd, cmd_status);
@@ -328,7 +327,7 @@ static int shpc_write_cmd(struct slot *slot, u8 t_slot, u8 cmd)
return retval;
}
-static int hpc_check_cmd_status(struct controller *ctrl)
+int shpchp_check_cmd_status(struct controller *ctrl)
{
int retval = 0;
u16 cmd_status = shpc_readw(ctrl, CMD_STATUS) & 0x000F;
@@ -357,7 +356,7 @@ static int hpc_check_cmd_status(struct controller *ctrl)
}
-static int hpc_get_attention_status(struct slot *slot, u8 *status)
+int shpchp_get_attention_status(struct slot *slot, u8 *status)
{
struct controller *ctrl = slot->ctrl;
u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
@@ -381,7 +380,7 @@ static int hpc_get_attention_status(struct slot *slot, u8 *status)
return 0;
}
-static int hpc_get_power_status(struct slot *slot, u8 *status)
+int shpchp_get_power_status(struct slot *slot, u8 *status)
{
struct controller *ctrl = slot->ctrl;
u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
@@ -406,7 +405,7 @@ static int hpc_get_power_status(struct slot *slot, u8 *status)
}
-static int hpc_get_latch_status(struct slot *slot, u8 *status)
+int shpchp_get_latch_status(struct slot *slot, u8 *status)
{
struct controller *ctrl = slot->ctrl;
u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
@@ -416,7 +415,7 @@ static int hpc_get_latch_status(struct slot *slot, u8 *status)
return 0;
}
-static int hpc_get_adapter_status(struct slot *slot, u8 *status)
+int shpchp_get_adapter_status(struct slot *slot, u8 *status)
{
struct controller *ctrl = slot->ctrl;
u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
@@ -427,7 +426,7 @@ static int hpc_get_adapter_status(struct slot *slot, u8 *status)
return 0;
}
-static int hpc_get_prog_int(struct slot *slot, u8 *prog_int)
+int shpchp_get_prog_int(struct slot *slot, u8 *prog_int)
{
struct controller *ctrl = slot->ctrl;
@@ -436,7 +435,7 @@ static int hpc_get_prog_int(struct slot *slot, u8 *prog_int)
return 0;
}
-static int hpc_get_adapter_speed(struct slot *slot, enum pci_bus_speed *value)
+int shpchp_get_adapter_speed(struct slot *slot, enum pci_bus_speed *value)
{
int retval = 0;
struct controller *ctrl = slot->ctrl;
@@ -444,7 +443,7 @@ static int hpc_get_adapter_speed(struct slot *slot, enum pci_bus_speed *value)
u8 m66_cap = !!(slot_reg & MHZ66_CAP);
u8 pi, pcix_cap;
- retval = hpc_get_prog_int(slot, &pi);
+ retval = shpchp_get_prog_int(slot, &pi);
if (retval)
return retval;
@@ -489,7 +488,7 @@ static int hpc_get_adapter_speed(struct slot *slot, enum pci_bus_speed *value)
return retval;
}
-static int hpc_query_power_fault(struct slot *slot)
+int shpchp_query_power_fault(struct slot *slot)
{
struct controller *ctrl = slot->ctrl;
u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
@@ -498,7 +497,7 @@ static int hpc_query_power_fault(struct slot *slot)
return !(slot_reg & POWER_FAULT);
}
-static int hpc_set_attention_status(struct slot *slot, u8 value)
+int shpchp_set_attention_status(struct slot *slot, u8 value)
{
u8 slot_cmd = 0;
@@ -520,22 +519,22 @@ static int hpc_set_attention_status(struct slot *slot, u8 value)
}
-static void hpc_set_green_led_on(struct slot *slot)
+void shpchp_green_led_on(struct slot *slot)
{
shpc_write_cmd(slot, slot->hp_slot, SET_PWR_ON);
}
-static void hpc_set_green_led_off(struct slot *slot)
+void shpchp_green_led_off(struct slot *slot)
{
shpc_write_cmd(slot, slot->hp_slot, SET_PWR_OFF);
}
-static void hpc_set_green_led_blink(struct slot *slot)
+void shpchp_green_led_blink(struct slot *slot)
{
shpc_write_cmd(slot, slot->hp_slot, SET_PWR_BLINK);
}
-static void hpc_release_ctlr(struct controller *ctrl)
+void shpchp_release_ctlr(struct controller *ctrl)
{
int i;
u32 slot_reg, serr_int;
@@ -565,7 +564,7 @@ static void hpc_release_ctlr(struct controller *ctrl)
shpc_writel(ctrl, SERR_INTR_ENABLE, serr_int);
if (shpchp_poll_mode)
- del_timer(&ctrl->poll_timer);
+ timer_delete(&ctrl->poll_timer);
else {
free_irq(ctrl->pci_dev->irq, ctrl);
pci_disable_msi(ctrl->pci_dev);
@@ -575,7 +574,7 @@ static void hpc_release_ctlr(struct controller *ctrl)
release_mem_region(ctrl->mmio_base, ctrl->mmio_size);
}
-static int hpc_power_on_slot(struct slot *slot)
+int shpchp_power_on_slot(struct slot *slot)
{
int retval;
@@ -586,7 +585,7 @@ static int hpc_power_on_slot(struct slot *slot)
return retval;
}
-static int hpc_slot_enable(struct slot *slot)
+int shpchp_slot_enable(struct slot *slot)
{
int retval;
@@ -599,7 +598,7 @@ static int hpc_slot_enable(struct slot *slot)
return retval;
}
-static int hpc_slot_disable(struct slot *slot)
+int shpchp_slot_disable(struct slot *slot)
{
int retval;
@@ -676,12 +675,12 @@ static int shpc_get_cur_bus_speed(struct controller *ctrl)
out:
bus->cur_bus_speed = bus_speed;
- dbg("Current bus speed = %d\n", bus_speed);
+ ctrl_dbg(ctrl, "Current bus speed = %d\n", bus_speed);
return retval;
}
-static int hpc_set_bus_speed_mode(struct slot *slot, enum pci_bus_speed value)
+int shpchp_set_bus_speed_mode(struct slot *slot, enum pci_bus_speed value)
{
int retval;
struct controller *ctrl = slot->ctrl;
@@ -871,28 +870,6 @@ static int shpc_get_max_bus_speed(struct controller *ctrl)
return retval;
}
-static const struct hpc_ops shpchp_hpc_ops = {
- .power_on_slot = hpc_power_on_slot,
- .slot_enable = hpc_slot_enable,
- .slot_disable = hpc_slot_disable,
- .set_bus_speed_mode = hpc_set_bus_speed_mode,
- .set_attention_status = hpc_set_attention_status,
- .get_power_status = hpc_get_power_status,
- .get_attention_status = hpc_get_attention_status,
- .get_latch_status = hpc_get_latch_status,
- .get_adapter_status = hpc_get_adapter_status,
-
- .get_adapter_speed = hpc_get_adapter_speed,
- .get_prog_int = hpc_get_prog_int,
-
- .query_power_fault = hpc_query_power_fault,
- .green_led_on = hpc_set_green_led_on,
- .green_led_off = hpc_set_green_led_off,
- .green_led_blink = hpc_set_green_led_blink,
-
- .release_ctlr = hpc_release_ctlr,
-};
-
int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
{
int rc = -1, num_slots = 0;
@@ -978,8 +955,6 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
/* Setup wait queue */
init_waitqueue_head(&ctrl->queue);
- ctrl->hpc_ops = &shpchp_hpc_ops;
-
/* Return PCI Controller Info */
slot_config = shpc_readl(ctrl, SLOT_CONFIG);
ctrl->slot_device_offset = (slot_config & FIRST_DEV_NUM) >> 8;
diff --git a/drivers/pci/ide.c b/drivers/pci/ide.c
new file mode 100644
index 000000000000..f0ef474e1a0d
--- /dev/null
+++ b/drivers/pci/ide.c
@@ -0,0 +1,815 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2024-2025 Intel Corporation. All rights reserved. */
+
+/* PCIe r7.0 section 6.33 Integrity & Data Encryption (IDE) */
+
+#define dev_fmt(fmt) "PCI/IDE: " fmt
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/pci.h>
+#include <linux/pci-ide.h>
+#include <linux/pci_regs.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/tsm.h>
+
+#include "pci.h"
+
+static int __sel_ide_offset(u16 ide_cap, u8 nr_link_ide, u8 stream_index,
+ u8 nr_ide_mem)
+{
+ u32 offset = ide_cap + PCI_IDE_LINK_STREAM_0 +
+ nr_link_ide * PCI_IDE_LINK_BLOCK_SIZE;
+
+ /*
+ * Assume a constant number of address association resources per stream
+ * index
+ */
+ return offset + stream_index * PCI_IDE_SEL_BLOCK_SIZE(nr_ide_mem);
+}
+
+static int sel_ide_offset(struct pci_dev *pdev,
+ struct pci_ide_partner *settings)
+{
+ return __sel_ide_offset(pdev->ide_cap, pdev->nr_link_ide,
+ settings->stream_index, pdev->nr_ide_mem);
+}
+
+static bool reserve_stream_index(struct pci_dev *pdev, u8 idx)
+{
+ int ret;
+
+ ret = ida_alloc_range(&pdev->ide_stream_ida, idx, idx, GFP_KERNEL);
+ return ret >= 0;
+}
+
+static bool reserve_stream_id(struct pci_host_bridge *hb, u8 id)
+{
+ int ret;
+
+ ret = ida_alloc_range(&hb->ide_stream_ids_ida, id, id, GFP_KERNEL);
+ return ret >= 0;
+}
+
+static bool claim_stream(struct pci_host_bridge *hb, u8 stream_id,
+ struct pci_dev *pdev, u8 stream_idx)
+{
+ dev_info(&hb->dev, "Stream ID %d active at init\n", stream_id);
+ if (!reserve_stream_id(hb, stream_id)) {
+ dev_info(&hb->dev, "Failed to claim %s Stream ID %d\n",
+ stream_id == PCI_IDE_RESERVED_STREAM_ID ? "reserved" :
+ "active",
+ stream_id);
+ return false;
+ }
+
+ /* No stream index to reserve in the Link IDE case */
+ if (!pdev)
+ return true;
+
+ if (!reserve_stream_index(pdev, stream_idx)) {
+ pci_info(pdev, "Failed to claim active Selective Stream %d\n",
+ stream_idx);
+ return false;
+ }
+
+ return true;
+}
+
+void pci_ide_init(struct pci_dev *pdev)
+{
+ struct pci_host_bridge *hb = pci_find_host_bridge(pdev->bus);
+ u16 nr_link_ide, nr_ide_mem, nr_streams;
+ u16 ide_cap;
+ u32 val;
+
+ /*
+ * Unconditionally init so that ida idle state is consistent with
+ * pdev->ide_cap.
+ */
+ ida_init(&pdev->ide_stream_ida);
+
+ if (!pci_is_pcie(pdev))
+ return;
+
+ ide_cap = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_IDE);
+ if (!ide_cap)
+ return;
+
+ pci_read_config_dword(pdev, ide_cap + PCI_IDE_CAP, &val);
+ if ((val & PCI_IDE_CAP_SELECTIVE) == 0)
+ return;
+
+ /*
+ * Require endpoint IDE capability to be paired with IDE Root Port IDE
+ * capability.
+ */
+ if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ENDPOINT) {
+ struct pci_dev *rp = pcie_find_root_port(pdev);
+
+ if (!rp->ide_cap)
+ return;
+ }
+
+ pdev->ide_cfg = FIELD_GET(PCI_IDE_CAP_SEL_CFG, val);
+ pdev->ide_tee_limit = FIELD_GET(PCI_IDE_CAP_TEE_LIMITED, val);
+
+ if (val & PCI_IDE_CAP_LINK)
+ nr_link_ide = 1 + FIELD_GET(PCI_IDE_CAP_LINK_TC_NUM, val);
+ else
+ nr_link_ide = 0;
+
+ nr_ide_mem = 0;
+ nr_streams = 1 + FIELD_GET(PCI_IDE_CAP_SEL_NUM, val);
+ for (u16 i = 0; i < nr_streams; i++) {
+ int pos = __sel_ide_offset(ide_cap, nr_link_ide, i, nr_ide_mem);
+ int nr_assoc;
+ u32 val;
+ u8 id;
+
+ pci_read_config_dword(pdev, pos + PCI_IDE_SEL_CAP, &val);
+
+ /*
+ * Let's not entertain streams that do not have a constant
+ * number of address association blocks
+ */
+ nr_assoc = FIELD_GET(PCI_IDE_SEL_CAP_ASSOC_NUM, val);
+ if (i && (nr_assoc != nr_ide_mem)) {
+ pci_info(pdev, "Unsupported Selective Stream %d capability, SKIP the rest\n", i);
+ nr_streams = i;
+ break;
+ }
+
+ nr_ide_mem = nr_assoc;
+
+ /*
+ * Claim Stream IDs and Selective Stream blocks that are already
+ * active on the device
+ */
+ pci_read_config_dword(pdev, pos + PCI_IDE_SEL_CTL, &val);
+ id = FIELD_GET(PCI_IDE_SEL_CTL_ID, val);
+ if ((val & PCI_IDE_SEL_CTL_EN) &&
+ !claim_stream(hb, id, pdev, i))
+ return;
+ }
+
+ /* Reserve link stream-ids that are already active on the device */
+ for (u16 i = 0; i < nr_link_ide; ++i) {
+ int pos = ide_cap + PCI_IDE_LINK_STREAM_0 + i * PCI_IDE_LINK_BLOCK_SIZE;
+ u8 id;
+
+ pci_read_config_dword(pdev, pos + PCI_IDE_LINK_CTL_0, &val);
+ id = FIELD_GET(PCI_IDE_LINK_CTL_ID, val);
+ if ((val & PCI_IDE_LINK_CTL_EN) &&
+ !claim_stream(hb, id, NULL, -1))
+ return;
+ }
+
+ for (u16 i = 0; i < nr_streams; i++) {
+ int pos = __sel_ide_offset(ide_cap, nr_link_ide, i, nr_ide_mem);
+
+ pci_read_config_dword(pdev, pos + PCI_IDE_SEL_CAP, &val);
+ if (val & PCI_IDE_SEL_CTL_EN)
+ continue;
+ val &= ~PCI_IDE_SEL_CTL_ID;
+ val |= FIELD_PREP(PCI_IDE_SEL_CTL_ID, PCI_IDE_RESERVED_STREAM_ID);
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_CTL, val);
+ }
+
+ for (u16 i = 0; i < nr_link_ide; ++i) {
+ int pos = ide_cap + PCI_IDE_LINK_STREAM_0 +
+ i * PCI_IDE_LINK_BLOCK_SIZE;
+
+ pci_read_config_dword(pdev, pos, &val);
+ if (val & PCI_IDE_LINK_CTL_EN)
+ continue;
+ val &= ~PCI_IDE_LINK_CTL_ID;
+ val |= FIELD_PREP(PCI_IDE_LINK_CTL_ID, PCI_IDE_RESERVED_STREAM_ID);
+ pci_write_config_dword(pdev, pos, val);
+ }
+
+ pdev->ide_cap = ide_cap;
+ pdev->nr_link_ide = nr_link_ide;
+ pdev->nr_sel_ide = nr_streams;
+ pdev->nr_ide_mem = nr_ide_mem;
+}
+
+struct stream_index {
+ struct ida *ida;
+ u8 stream_index;
+};
+
+static void free_stream_index(struct stream_index *stream)
+{
+ ida_free(stream->ida, stream->stream_index);
+}
+
+DEFINE_FREE(free_stream, struct stream_index *, if (_T) free_stream_index(_T))
+static struct stream_index *alloc_stream_index(struct ida *ida, u16 max,
+ struct stream_index *stream)
+{
+ int id;
+
+ if (!max)
+ return NULL;
+
+ id = ida_alloc_max(ida, max - 1, GFP_KERNEL);
+ if (id < 0)
+ return NULL;
+
+ *stream = (struct stream_index) {
+ .ida = ida,
+ .stream_index = id,
+ };
+ return stream;
+}
+
+/**
+ * pci_ide_stream_alloc() - Reserve stream indices and probe for settings
+ * @pdev: IDE capable PCIe Endpoint Physical Function
+ *
+ * Retrieve the Requester ID range of @pdev for programming its Root
+ * Port IDE RID Association registers, and conversely retrieve the
+ * Requester ID of the Root Port for programming @pdev's IDE RID
+ * Association registers.
+ *
+ * Allocate a Selective IDE Stream Register Block instance per port.
+ *
+ * Allocate a platform stream resource from the associated host bridge.
+ * Retrieve stream association parameters for Requester ID range and
+ * address range restrictions for the stream.
+ */
+struct pci_ide *pci_ide_stream_alloc(struct pci_dev *pdev)
+{
+ /* EP, RP, + HB Stream allocation */
+ struct stream_index __stream[PCI_IDE_HB + 1];
+ struct pci_bus_region pref_assoc = { 0, -1 };
+ struct pci_bus_region mem_assoc = { 0, -1 };
+ struct resource *mem, *pref;
+ struct pci_host_bridge *hb;
+ struct pci_dev *rp, *br;
+ int num_vf, rid_end;
+
+ if (!pci_is_pcie(pdev))
+ return NULL;
+
+ if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ENDPOINT)
+ return NULL;
+
+ if (!pdev->ide_cap)
+ return NULL;
+
+ struct pci_ide *ide __free(kfree) = kzalloc(sizeof(*ide), GFP_KERNEL);
+ if (!ide)
+ return NULL;
+
+ hb = pci_find_host_bridge(pdev->bus);
+ struct stream_index *hb_stream __free(free_stream) = alloc_stream_index(
+ &hb->ide_stream_ida, hb->nr_ide_streams, &__stream[PCI_IDE_HB]);
+ if (!hb_stream)
+ return NULL;
+
+ rp = pcie_find_root_port(pdev);
+ struct stream_index *rp_stream __free(free_stream) = alloc_stream_index(
+ &rp->ide_stream_ida, rp->nr_sel_ide, &__stream[PCI_IDE_RP]);
+ if (!rp_stream)
+ return NULL;
+
+ struct stream_index *ep_stream __free(free_stream) = alloc_stream_index(
+ &pdev->ide_stream_ida, pdev->nr_sel_ide, &__stream[PCI_IDE_EP]);
+ if (!ep_stream)
+ return NULL;
+
+ /* for SR-IOV case, cover all VFs */
+ num_vf = pci_num_vf(pdev);
+ if (num_vf)
+ rid_end = PCI_DEVID(pci_iov_virtfn_bus(pdev, num_vf),
+ pci_iov_virtfn_devfn(pdev, num_vf));
+ else
+ rid_end = pci_dev_id(pdev);
+
+ br = pci_upstream_bridge(pdev);
+ if (!br)
+ return NULL;
+
+ /*
+ * Check if the device consumes memory and/or prefetch-memory. Setup
+ * downstream address association ranges for each.
+ */
+ mem = pci_resource_n(br, PCI_BRIDGE_MEM_WINDOW);
+ pref = pci_resource_n(br, PCI_BRIDGE_PREF_MEM_WINDOW);
+ if (resource_assigned(mem))
+ pcibios_resource_to_bus(br->bus, &mem_assoc, mem);
+ if (resource_assigned(pref))
+ pcibios_resource_to_bus(br->bus, &pref_assoc, pref);
+
+ *ide = (struct pci_ide) {
+ .pdev = pdev,
+ .partner = {
+ [PCI_IDE_EP] = {
+ .rid_start = pci_dev_id(rp),
+ .rid_end = pci_dev_id(rp),
+ .stream_index = no_free_ptr(ep_stream)->stream_index,
+ /* Disable upstream address association */
+ .mem_assoc = { 0, -1 },
+ .pref_assoc = { 0, -1 },
+ },
+ [PCI_IDE_RP] = {
+ .rid_start = pci_dev_id(pdev),
+ .rid_end = rid_end,
+ .stream_index = no_free_ptr(rp_stream)->stream_index,
+ .mem_assoc = mem_assoc,
+ .pref_assoc = pref_assoc,
+ },
+ },
+ .host_bridge_stream = no_free_ptr(hb_stream)->stream_index,
+ .stream_id = -1,
+ };
+
+ return_ptr(ide);
+}
+EXPORT_SYMBOL_GPL(pci_ide_stream_alloc);
+
+/**
+ * pci_ide_stream_free() - unwind pci_ide_stream_alloc()
+ * @ide: idle IDE settings descriptor
+ *
+ * Free all of the stream index (register block) allocations acquired by
+ * pci_ide_stream_alloc(). The stream represented by @ide is assumed to
+ * be unregistered and not instantiated in any device.
+ */
+void pci_ide_stream_free(struct pci_ide *ide)
+{
+ struct pci_dev *pdev = ide->pdev;
+ struct pci_dev *rp = pcie_find_root_port(pdev);
+ struct pci_host_bridge *hb = pci_find_host_bridge(pdev->bus);
+
+ ida_free(&pdev->ide_stream_ida, ide->partner[PCI_IDE_EP].stream_index);
+ ida_free(&rp->ide_stream_ida, ide->partner[PCI_IDE_RP].stream_index);
+ ida_free(&hb->ide_stream_ida, ide->host_bridge_stream);
+ kfree(ide);
+}
+EXPORT_SYMBOL_GPL(pci_ide_stream_free);
+
+/**
+ * pci_ide_stream_release() - unwind and release an @ide context
+ * @ide: partially or fully registered IDE settings descriptor
+ *
+ * In support of automatic cleanup of IDE setup routines perform IDE
+ * teardown in expected reverse order of setup and with respect to which
+ * aspects of IDE setup have successfully completed.
+ *
+ * Be careful that setup order mirrors this shutdown order. Otherwise,
+ * open code releasing the IDE context.
+ */
+void pci_ide_stream_release(struct pci_ide *ide)
+{
+ struct pci_dev *pdev = ide->pdev;
+ struct pci_dev *rp = pcie_find_root_port(pdev);
+
+ if (ide->partner[PCI_IDE_RP].enable)
+ pci_ide_stream_disable(rp, ide);
+
+ if (ide->partner[PCI_IDE_EP].enable)
+ pci_ide_stream_disable(pdev, ide);
+
+ if (ide->tsm_dev)
+ tsm_ide_stream_unregister(ide);
+
+ if (ide->partner[PCI_IDE_RP].setup)
+ pci_ide_stream_teardown(rp, ide);
+
+ if (ide->partner[PCI_IDE_EP].setup)
+ pci_ide_stream_teardown(pdev, ide);
+
+ if (ide->name)
+ pci_ide_stream_unregister(ide);
+
+ pci_ide_stream_free(ide);
+}
+EXPORT_SYMBOL_GPL(pci_ide_stream_release);
+
+struct pci_ide_stream_id {
+ struct pci_host_bridge *hb;
+ u8 stream_id;
+};
+
+static struct pci_ide_stream_id *
+request_stream_id(struct pci_host_bridge *hb, u8 stream_id,
+ struct pci_ide_stream_id *sid)
+{
+ if (!reserve_stream_id(hb, stream_id))
+ return NULL;
+
+ *sid = (struct pci_ide_stream_id) {
+ .hb = hb,
+ .stream_id = stream_id,
+ };
+
+ return sid;
+}
+DEFINE_FREE(free_stream_id, struct pci_ide_stream_id *,
+ if (_T) ida_free(&_T->hb->ide_stream_ids_ida, _T->stream_id))
+
+/**
+ * pci_ide_stream_register() - Prepare to activate an IDE Stream
+ * @ide: IDE settings descriptor
+ *
+ * After a Stream ID has been acquired for @ide, record the presence of
+ * the stream in sysfs. The expectation is that @ide is immutable while
+ * registered.
+ */
+int pci_ide_stream_register(struct pci_ide *ide)
+{
+ struct pci_dev *pdev = ide->pdev;
+ struct pci_host_bridge *hb = pci_find_host_bridge(pdev->bus);
+ struct pci_ide_stream_id __sid;
+ u8 ep_stream, rp_stream;
+ int rc;
+
+ if (ide->stream_id < 0 || ide->stream_id > U8_MAX) {
+ pci_err(pdev, "Setup fail: Invalid Stream ID: %d\n", ide->stream_id);
+ return -ENXIO;
+ }
+
+ struct pci_ide_stream_id *sid __free(free_stream_id) =
+ request_stream_id(hb, ide->stream_id, &__sid);
+ if (!sid) {
+ pci_err(pdev, "Setup fail: Stream ID %d in use\n", ide->stream_id);
+ return -EBUSY;
+ }
+
+ ep_stream = ide->partner[PCI_IDE_EP].stream_index;
+ rp_stream = ide->partner[PCI_IDE_RP].stream_index;
+ const char *name __free(kfree) = kasprintf(GFP_KERNEL, "stream%d.%d.%d",
+ ide->host_bridge_stream,
+ rp_stream, ep_stream);
+ if (!name)
+ return -ENOMEM;
+
+ rc = sysfs_create_link(&hb->dev.kobj, &pdev->dev.kobj, name);
+ if (rc)
+ return rc;
+
+ ide->name = no_free_ptr(name);
+
+ /* Stream ID reservation recorded in @ide is now successfully registered */
+ retain_and_null_ptr(sid);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_ide_stream_register);
+
+/**
+ * pci_ide_stream_unregister() - unwind pci_ide_stream_register()
+ * @ide: idle IDE settings descriptor
+ *
+ * In preparation for freeing @ide, remove sysfs enumeration for the
+ * stream.
+ */
+void pci_ide_stream_unregister(struct pci_ide *ide)
+{
+ struct pci_dev *pdev = ide->pdev;
+ struct pci_host_bridge *hb = pci_find_host_bridge(pdev->bus);
+
+ sysfs_remove_link(&hb->dev.kobj, ide->name);
+ kfree(ide->name);
+ ida_free(&hb->ide_stream_ids_ida, ide->stream_id);
+ ide->name = NULL;
+}
+EXPORT_SYMBOL_GPL(pci_ide_stream_unregister);
+
+static int pci_ide_domain(struct pci_dev *pdev)
+{
+ if (pdev->fm_enabled)
+ return pci_domain_nr(pdev->bus);
+ return 0;
+}
+
+struct pci_ide_partner *pci_ide_to_settings(struct pci_dev *pdev, struct pci_ide *ide)
+{
+ if (!pci_is_pcie(pdev)) {
+ pci_warn_once(pdev, "not a PCIe device\n");
+ return NULL;
+ }
+
+ switch (pci_pcie_type(pdev)) {
+ case PCI_EXP_TYPE_ENDPOINT:
+ if (pdev != ide->pdev) {
+ pci_warn_once(pdev, "setup expected Endpoint: %s\n", pci_name(ide->pdev));
+ return NULL;
+ }
+ return &ide->partner[PCI_IDE_EP];
+ case PCI_EXP_TYPE_ROOT_PORT: {
+ struct pci_dev *rp = pcie_find_root_port(ide->pdev);
+
+ if (pdev != rp) {
+ pci_warn_once(pdev, "setup expected Root Port: %s\n",
+ pci_name(rp));
+ return NULL;
+ }
+ return &ide->partner[PCI_IDE_RP];
+ }
+ default:
+ pci_warn_once(pdev, "invalid device type\n");
+ return NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(pci_ide_to_settings);
+
+static void set_ide_sel_ctl(struct pci_dev *pdev, struct pci_ide *ide,
+ struct pci_ide_partner *settings, int pos,
+ bool enable)
+{
+ u32 val = FIELD_PREP(PCI_IDE_SEL_CTL_ID, ide->stream_id) |
+ FIELD_PREP(PCI_IDE_SEL_CTL_DEFAULT, settings->default_stream) |
+ FIELD_PREP(PCI_IDE_SEL_CTL_CFG_EN, pdev->ide_cfg) |
+ FIELD_PREP(PCI_IDE_SEL_CTL_TEE_LIMITED, pdev->ide_tee_limit) |
+ FIELD_PREP(PCI_IDE_SEL_CTL_EN, enable);
+
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_CTL, val);
+}
+
+#define SEL_ADDR1_LOWER GENMASK(31, 20)
+#define SEL_ADDR_UPPER GENMASK_ULL(63, 32)
+#define PREP_PCI_IDE_SEL_ADDR1(base, limit) \
+ (FIELD_PREP(PCI_IDE_SEL_ADDR_1_VALID, 1) | \
+ FIELD_PREP(PCI_IDE_SEL_ADDR_1_BASE_LOW, \
+ FIELD_GET(SEL_ADDR1_LOWER, (base))) | \
+ FIELD_PREP(PCI_IDE_SEL_ADDR_1_LIMIT_LOW, \
+ FIELD_GET(SEL_ADDR1_LOWER, (limit))))
+
+static void mem_assoc_to_regs(struct pci_bus_region *region,
+ struct pci_ide_regs *regs, int idx)
+{
+ /* convert to u64 range for bitfield size checks */
+ struct range r = { region->start, region->end };
+
+ regs->addr[idx].assoc1 = PREP_PCI_IDE_SEL_ADDR1(r.start, r.end);
+ regs->addr[idx].assoc2 = FIELD_GET(SEL_ADDR_UPPER, r.end);
+ regs->addr[idx].assoc3 = FIELD_GET(SEL_ADDR_UPPER, r.start);
+}
+
+/**
+ * pci_ide_stream_to_regs() - convert IDE settings to association register values
+ * @pdev: PCIe device object for either a Root Port or Endpoint Partner Port
+ * @ide: registered IDE settings descriptor
+ * @regs: output register values
+ */
+static void pci_ide_stream_to_regs(struct pci_dev *pdev, struct pci_ide *ide,
+ struct pci_ide_regs *regs)
+{
+ struct pci_ide_partner *settings = pci_ide_to_settings(pdev, ide);
+ int assoc_idx = 0;
+
+ memset(regs, 0, sizeof(*regs));
+
+ if (!settings)
+ return;
+
+ regs->rid1 = FIELD_PREP(PCI_IDE_SEL_RID_1_LIMIT, settings->rid_end);
+
+ regs->rid2 = FIELD_PREP(PCI_IDE_SEL_RID_2_VALID, 1) |
+ FIELD_PREP(PCI_IDE_SEL_RID_2_BASE, settings->rid_start) |
+ FIELD_PREP(PCI_IDE_SEL_RID_2_SEG, pci_ide_domain(pdev));
+
+ if (pdev->nr_ide_mem && pci_bus_region_size(&settings->mem_assoc)) {
+ mem_assoc_to_regs(&settings->mem_assoc, regs, assoc_idx);
+ assoc_idx++;
+ }
+
+ if (pdev->nr_ide_mem > assoc_idx &&
+ pci_bus_region_size(&settings->pref_assoc)) {
+ mem_assoc_to_regs(&settings->pref_assoc, regs, assoc_idx);
+ assoc_idx++;
+ }
+
+ regs->nr_addr = assoc_idx;
+}
+
+/**
+ * pci_ide_stream_setup() - program settings to Selective IDE Stream registers
+ * @pdev: PCIe device object for either a Root Port or Endpoint Partner Port
+ * @ide: registered IDE settings descriptor
+ *
+ * When @pdev is a PCI_EXP_TYPE_ENDPOINT then the PCI_IDE_EP partner
+ * settings are written to @pdev's Selective IDE Stream register block,
+ * and when @pdev is a PCI_EXP_TYPE_ROOT_PORT, the PCI_IDE_RP settings
+ * are selected.
+ */
+void pci_ide_stream_setup(struct pci_dev *pdev, struct pci_ide *ide)
+{
+ struct pci_ide_partner *settings = pci_ide_to_settings(pdev, ide);
+ struct pci_ide_regs regs;
+ int pos;
+
+ if (!settings)
+ return;
+
+ pci_ide_stream_to_regs(pdev, ide, &regs);
+
+ pos = sel_ide_offset(pdev, settings);
+
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_RID_1, regs.rid1);
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_RID_2, regs.rid2);
+
+ for (int i = 0; i < regs.nr_addr; i++) {
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_1(i),
+ regs.addr[i].assoc1);
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_2(i),
+ regs.addr[i].assoc2);
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_3(i),
+ regs.addr[i].assoc3);
+ }
+
+ /* clear extra unused address association blocks */
+ for (int i = regs.nr_addr; i < pdev->nr_ide_mem; i++) {
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_1(i), 0);
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_2(i), 0);
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_3(i), 0);
+ }
+
+ /*
+ * Setup control register early for devices that expect
+ * stream_id is set during key programming.
+ */
+ set_ide_sel_ctl(pdev, ide, settings, pos, false);
+ settings->setup = 1;
+}
+EXPORT_SYMBOL_GPL(pci_ide_stream_setup);
+
+/**
+ * pci_ide_stream_teardown() - disable the stream and clear all settings
+ * @pdev: PCIe device object for either a Root Port or Endpoint Partner Port
+ * @ide: registered IDE settings descriptor
+ *
+ * For stream destruction, zero all registers that may have been written
+ * by pci_ide_stream_setup(). Consider pci_ide_stream_disable() to leave
+ * settings in place while temporarily disabling the stream.
+ */
+void pci_ide_stream_teardown(struct pci_dev *pdev, struct pci_ide *ide)
+{
+ struct pci_ide_partner *settings = pci_ide_to_settings(pdev, ide);
+ int pos, i;
+
+ if (!settings)
+ return;
+
+ pos = sel_ide_offset(pdev, settings);
+
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_CTL, 0);
+
+ for (i = 0; i < pdev->nr_ide_mem; i++) {
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_1(i), 0);
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_2(i), 0);
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_ADDR_3(i), 0);
+ }
+
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_RID_2, 0);
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_RID_1, 0);
+ settings->setup = 0;
+}
+EXPORT_SYMBOL_GPL(pci_ide_stream_teardown);
+
+/**
+ * pci_ide_stream_enable() - enable a Selective IDE Stream
+ * @pdev: PCIe device object for either a Root Port or Endpoint Partner Port
+ * @ide: registered and setup IDE settings descriptor
+ *
+ * Activate the stream by writing to the Selective IDE Stream Control
+ * Register.
+ *
+ * Return: 0 if the stream successfully entered the "secure" state, and -EINVAL
+ * if @ide is invalid, and -ENXIO if the stream fails to enter the secure state.
+ *
+ * Note that the state may go "insecure" at any point after returning 0, but
+ * those events are equivalent to a "link down" event and handled via
+ * asynchronous error reporting.
+ *
+ * Caller is responsible to clear the enable bit in the -ENXIO case.
+ */
+int pci_ide_stream_enable(struct pci_dev *pdev, struct pci_ide *ide)
+{
+ struct pci_ide_partner *settings = pci_ide_to_settings(pdev, ide);
+ int pos;
+ u32 val;
+
+ if (!settings)
+ return -EINVAL;
+
+ pos = sel_ide_offset(pdev, settings);
+
+ set_ide_sel_ctl(pdev, ide, settings, pos, true);
+ settings->enable = 1;
+
+ pci_read_config_dword(pdev, pos + PCI_IDE_SEL_STS, &val);
+ if (FIELD_GET(PCI_IDE_SEL_STS_STATE, val) !=
+ PCI_IDE_SEL_STS_STATE_SECURE)
+ return -ENXIO;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_ide_stream_enable);
+
+/**
+ * pci_ide_stream_disable() - disable a Selective IDE Stream
+ * @pdev: PCIe device object for either a Root Port or Endpoint Partner Port
+ * @ide: registered and setup IDE settings descriptor
+ *
+ * Clear the Selective IDE Stream Control Register, but leave all other
+ * registers untouched.
+ */
+void pci_ide_stream_disable(struct pci_dev *pdev, struct pci_ide *ide)
+{
+ struct pci_ide_partner *settings = pci_ide_to_settings(pdev, ide);
+ int pos;
+
+ if (!settings)
+ return;
+
+ pos = sel_ide_offset(pdev, settings);
+
+ pci_write_config_dword(pdev, pos + PCI_IDE_SEL_CTL, 0);
+ settings->enable = 0;
+}
+EXPORT_SYMBOL_GPL(pci_ide_stream_disable);
+
+void pci_ide_init_host_bridge(struct pci_host_bridge *hb)
+{
+ hb->nr_ide_streams = 256;
+ ida_init(&hb->ide_stream_ida);
+ ida_init(&hb->ide_stream_ids_ida);
+ reserve_stream_id(hb, PCI_IDE_RESERVED_STREAM_ID);
+}
+
+static ssize_t available_secure_streams_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_host_bridge *hb = to_pci_host_bridge(dev);
+ int nr = READ_ONCE(hb->nr_ide_streams);
+ int avail = nr;
+
+ if (!nr)
+ return -ENXIO;
+
+ /*
+ * Yes, this is inefficient and racy, but it is only for occasional
+ * platform resource surveys. Worst case is bounded to 256 streams.
+ */
+ for (int i = 0; i < nr; i++)
+ if (ida_exists(&hb->ide_stream_ida, i))
+ avail--;
+ return sysfs_emit(buf, "%d\n", avail);
+}
+static DEVICE_ATTR_RO(available_secure_streams);
+
+static struct attribute *pci_ide_attrs[] = {
+ &dev_attr_available_secure_streams.attr,
+ NULL
+};
+
+static umode_t pci_ide_attr_visible(struct kobject *kobj, struct attribute *a, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct pci_host_bridge *hb = to_pci_host_bridge(dev);
+
+ if (a == &dev_attr_available_secure_streams.attr)
+ if (!hb->nr_ide_streams)
+ return 0;
+
+ return a->mode;
+}
+
+const struct attribute_group pci_ide_attr_group = {
+ .attrs = pci_ide_attrs,
+ .is_visible = pci_ide_attr_visible,
+};
+
+/**
+ * pci_ide_set_nr_streams() - sets size of the pool of IDE Stream resources
+ * @hb: host bridge boundary for the stream pool
+ * @nr: number of streams
+ *
+ * Platform PCI init and/or expert test module use only. Limit IDE
+ * Stream establishment by setting the number of stream resources
+ * available at the host bridge. Platform init code must set this before
+ * the first pci_ide_stream_alloc() call if the platform has less than the
+ * default of 256 streams per host-bridge.
+ *
+ * The "PCI_IDE" symbol namespace is required because this is typically
+ * a detail that is settled in early PCI init. I.e. this export is not
+ * for endpoint drivers.
+ */
+void pci_ide_set_nr_streams(struct pci_host_bridge *hb, u16 nr)
+{
+ hb->nr_ide_streams = min(nr, 256);
+ WARN_ON_ONCE(!ida_is_empty(&hb->ide_stream_ida));
+ sysfs_update_group(&hb->dev.kobj, &pci_ide_attr_group);
+}
+EXPORT_SYMBOL_NS_GPL(pci_ide_set_nr_streams, "PCI_IDE");
+
+void pci_ide_destroy(struct pci_dev *pdev)
+{
+ ida_destroy(&pdev->ide_stream_ida);
+}
diff --git a/drivers/pci/iomap.c b/drivers/pci/iomap.c
new file mode 100644
index 000000000000..ea86c282a386
--- /dev/null
+++ b/drivers/pci/iomap.c
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Implement the default iomap interfaces
+ *
+ * (C) Copyright 2004 Linus Torvalds
+ */
+#include <linux/pci.h>
+#include <linux/io.h>
+
+#include <linux/export.h>
+
+#include "pci.h" /* for pci_bar_index_is_valid() */
+
+/**
+ * pci_iomap_range - create a virtual mapping cookie for a PCI BAR
+ * @dev: PCI device that owns the BAR
+ * @bar: BAR number
+ * @offset: map memory at the given offset in BAR
+ * @maxlen: max length of the memory to map
+ *
+ * Using this function you will get a __iomem address to your device BAR.
+ * You can access it using ioread*() and iowrite*(). These functions hide
+ * the details if this is a MMIO or PIO address space and will just do what
+ * you expect from them in the correct way.
+ *
+ * @maxlen specifies the maximum length to map. If you want to get access to
+ * the complete BAR from offset to the end, pass %0 here.
+ * */
+void __iomem *pci_iomap_range(struct pci_dev *dev,
+ int bar,
+ unsigned long offset,
+ unsigned long maxlen)
+{
+ resource_size_t start, len;
+ unsigned long flags;
+
+ if (!pci_bar_index_is_valid(bar))
+ return NULL;
+
+ start = pci_resource_start(dev, bar);
+ len = pci_resource_len(dev, bar);
+ flags = pci_resource_flags(dev, bar);
+
+ if (len <= offset || !start)
+ return NULL;
+
+ len -= offset;
+ start += offset;
+ if (maxlen && len > maxlen)
+ len = maxlen;
+ if (flags & IORESOURCE_IO)
+ return __pci_ioport_map(dev, start, len);
+ if (flags & IORESOURCE_MEM)
+ return ioremap(start, len);
+ /* What? */
+ return NULL;
+}
+EXPORT_SYMBOL(pci_iomap_range);
+
+/**
+ * pci_iomap_wc_range - create a virtual WC mapping cookie for a PCI BAR
+ * @dev: PCI device that owns the BAR
+ * @bar: BAR number
+ * @offset: map memory at the given offset in BAR
+ * @maxlen: max length of the memory to map
+ *
+ * Using this function you will get a __iomem address to your device BAR.
+ * You can access it using ioread*() and iowrite*(). These functions hide
+ * the details if this is a MMIO or PIO address space and will just do what
+ * you expect from them in the correct way. When possible write combining
+ * is used.
+ *
+ * @maxlen specifies the maximum length to map. If you want to get access to
+ * the complete BAR from offset to the end, pass %0 here.
+ * */
+void __iomem *pci_iomap_wc_range(struct pci_dev *dev,
+ int bar,
+ unsigned long offset,
+ unsigned long maxlen)
+{
+ resource_size_t start, len;
+ unsigned long flags;
+
+ if (!pci_bar_index_is_valid(bar))
+ return NULL;
+
+ start = pci_resource_start(dev, bar);
+ len = pci_resource_len(dev, bar);
+ flags = pci_resource_flags(dev, bar);
+
+ if (len <= offset || !start)
+ return NULL;
+ if (flags & IORESOURCE_IO)
+ return NULL;
+
+ len -= offset;
+ start += offset;
+ if (maxlen && len > maxlen)
+ len = maxlen;
+
+ if (flags & IORESOURCE_MEM)
+ return ioremap_wc(start, len);
+
+ /* What? */
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(pci_iomap_wc_range);
+
+/**
+ * pci_iomap - create a virtual mapping cookie for a PCI BAR
+ * @dev: PCI device that owns the BAR
+ * @bar: BAR number
+ * @maxlen: length of the memory to map
+ *
+ * Using this function you will get a __iomem address to your device BAR.
+ * You can access it using ioread*() and iowrite*(). These functions hide
+ * the details if this is a MMIO or PIO address space and will just do what
+ * you expect from them in the correct way.
+ *
+ * @maxlen specifies the maximum length to map. If you want to get access to
+ * the complete BAR without checking for its length first, pass %0 here.
+ * */
+void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
+{
+ return pci_iomap_range(dev, bar, 0, maxlen);
+}
+EXPORT_SYMBOL(pci_iomap);
+
+/**
+ * pci_iomap_wc - create a virtual WC mapping cookie for a PCI BAR
+ * @dev: PCI device that owns the BAR
+ * @bar: BAR number
+ * @maxlen: length of the memory to map
+ *
+ * Using this function you will get a __iomem address to your device BAR.
+ * You can access it using ioread*() and iowrite*(). These functions hide
+ * the details if this is a MMIO or PIO address space and will just do what
+ * you expect from them in the correct way. When possible write combining
+ * is used.
+ *
+ * @maxlen specifies the maximum length to map. If you want to get access to
+ * the complete BAR without checking for its length first, pass %0 here.
+ * */
+void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen)
+{
+ return pci_iomap_wc_range(dev, bar, 0, maxlen);
+}
+EXPORT_SYMBOL_GPL(pci_iomap_wc);
+
+/*
+ * pci_iounmap() somewhat illogically comes from lib/iomap.c for the
+ * CONFIG_GENERIC_IOMAP case, because that's the code that knows about
+ * the different IOMAP ranges.
+ *
+ * But if the architecture does not use the generic iomap code, and if
+ * it has _not_ defined its own private pci_iounmap function, we define
+ * it here.
+ *
+ * NOTE! This default implementation assumes that if the architecture
+ * support ioport mapping (HAS_IOPORT_MAP), the ioport mapping will
+ * be fixed to the range [ PCI_IOBASE, PCI_IOBASE+IO_SPACE_LIMIT [,
+ * and does not need unmapping with 'ioport_unmap()'.
+ *
+ * If you have different rules for your architecture, you need to
+ * implement your own pci_iounmap() that knows the rules for where
+ * and how IO vs MEM get mapped.
+ *
+ * This code is odd, and the ARCH_HAS/ARCH_WANTS #define logic comes
+ * from legacy <asm-generic/io.h> header file behavior. In particular,
+ * it would seem to make sense to do the iounmap(p) for the non-IO-space
+ * case here regardless, but that's not what the old header file code
+ * did. Probably incorrectly, but this is meant to be bug-for-bug
+ * compatible.
+ */
+#if defined(ARCH_WANTS_GENERIC_PCI_IOUNMAP)
+
+void pci_iounmap(struct pci_dev *dev, void __iomem *p)
+{
+#ifdef ARCH_HAS_GENERIC_IOPORT_MAP
+ uintptr_t start = (uintptr_t) PCI_IOBASE;
+ uintptr_t addr = (uintptr_t) p;
+
+ if (addr >= start && addr < start + IO_SPACE_LIMIT)
+ return;
+#endif
+ iounmap(p);
+}
+EXPORT_SYMBOL(pci_iounmap);
+
+#endif /* ARCH_WANTS_GENERIC_PCI_IOUNMAP */
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index b2e8322755c1..00784a60ba80 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -7,11 +7,16 @@
* Copyright (C) 2009 Intel Corporation, Yu Zhao <yu.zhao@intel.com>
*/
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/log2.h>
#include <linux/pci.h>
+#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/string.h>
#include <linux/delay.h>
+#include <asm/div64.h>
#include "pci.h"
#define VIRTFN_ID_LEN 17 /* "virtfn%u\0" for 2^32 - 1 */
@@ -41,8 +46,7 @@ int pci_iov_vf_id(struct pci_dev *dev)
return -EINVAL;
pf = pci_physfn(dev);
- return (((dev->bus->number << 8) + dev->devfn) -
- ((pf->bus->number << 8) + pf->devfn + pf->sriov->offset)) /
+ return (pci_dev_id(dev) - (pci_dev_id(pf) + pf->sriov->offset)) /
pf->sriov->stride;
}
EXPORT_SYMBOL_GPL(pci_iov_vf_id);
@@ -151,7 +155,28 @@ resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno)
if (!dev->is_physfn)
return 0;
- return dev->sriov->barsz[resno - PCI_IOV_RESOURCES];
+ return dev->sriov->barsz[pci_resource_num_to_vf_bar(resno)];
+}
+
+void pci_iov_resource_set_size(struct pci_dev *dev, int resno, int size)
+{
+ if (!pci_resource_is_iov(resno)) {
+ pci_warn(dev, "%s is not an IOV resource\n",
+ pci_resource_name(dev, resno));
+ return;
+ }
+
+ resno = pci_resource_num_to_vf_bar(resno);
+ dev->sriov->barsz[resno] = pci_rebar_size_to_bytes(size);
+}
+
+bool pci_iov_is_memory_decoding_enabled(struct pci_dev *dev)
+{
+ u16 cmd;
+
+ pci_read_config_word(dev, dev->sriov->pos + PCI_SRIOV_CTRL, &cmd);
+
+ return cmd & PCI_SRIOV_CTRL_MSE;
}
static void pci_read_vf_config_common(struct pci_dev *virtfn)
@@ -286,23 +311,16 @@ const struct attribute_group sriov_vf_dev_attr_group = {
.is_visible = sriov_vf_attrs_are_visible,
};
-int pci_iov_add_virtfn(struct pci_dev *dev, int id)
+static struct pci_dev *pci_iov_scan_device(struct pci_dev *dev, int id,
+ struct pci_bus *bus)
{
- int i;
- int rc = -ENOMEM;
- u64 size;
- struct pci_dev *virtfn;
- struct resource *res;
struct pci_sriov *iov = dev->sriov;
- struct pci_bus *bus;
-
- bus = virtfn_add_bus(dev->bus, pci_iov_virtfn_bus(dev, id));
- if (!bus)
- goto failed;
+ struct pci_dev *virtfn;
+ int rc;
virtfn = pci_alloc_dev(bus);
if (!virtfn)
- goto failed0;
+ return ERR_PTR(-ENOMEM);
virtfn->devfn = pci_iov_virtfn_devfn(dev, id);
virtfn->vendor = dev->vendor;
@@ -315,21 +333,50 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id)
pci_read_vf_config_common(virtfn);
rc = pci_setup_device(virtfn);
- if (rc)
- goto failed1;
+ if (rc) {
+ pci_dev_put(dev);
+ pci_bus_put(virtfn->bus);
+ kfree(virtfn);
+ return ERR_PTR(rc);
+ }
+
+ return virtfn;
+}
+
+int pci_iov_add_virtfn(struct pci_dev *dev, int id)
+{
+ struct pci_bus *bus;
+ struct pci_dev *virtfn;
+ struct resource *res;
+ int rc, i;
+ u64 size;
+
+ bus = virtfn_add_bus(dev->bus, pci_iov_virtfn_bus(dev, id));
+ if (!bus) {
+ rc = -ENOMEM;
+ goto failed;
+ }
+
+ virtfn = pci_iov_scan_device(dev, id, bus);
+ if (IS_ERR(virtfn)) {
+ rc = PTR_ERR(virtfn);
+ goto failed0;
+ }
virtfn->dev.parent = dev->dev.parent;
virtfn->multifunction = 0;
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
- res = &dev->resource[i + PCI_IOV_RESOURCES];
+ int idx = pci_resource_num_from_vf_bar(i);
+
+ res = &dev->resource[idx];
if (!res->parent)
continue;
virtfn->resource[i].name = pci_name(virtfn);
virtfn->resource[i].flags = res->flags;
- size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES);
- virtfn->resource[i].start = res->start + size * id;
- virtfn->resource[i].end = virtfn->resource[i].start + size - 1;
+ size = pci_iov_resource_size(dev, idx);
+ resource_set_range(&virtfn->resource[i],
+ res->start + size * id, size);
rc = request_resource(res, &virtfn->resource[i]);
BUG_ON(rc);
}
@@ -582,15 +629,18 @@ static int sriov_add_vfs(struct pci_dev *dev, u16 num_vfs)
if (dev->no_vf_scan)
return 0;
+ pci_lock_rescan_remove();
for (i = 0; i < num_vfs; i++) {
rc = pci_iov_add_virtfn(dev, i);
if (rc)
goto failed;
}
+ pci_unlock_rescan_remove();
return 0;
failed:
while (i--)
pci_iov_remove_virtfn(dev, i);
+ pci_unlock_rescan_remove();
return rc;
}
@@ -624,8 +674,13 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
nres = 0;
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
- bars |= (1 << (i + PCI_IOV_RESOURCES));
- res = &dev->resource[i + PCI_IOV_RESOURCES];
+ int idx = pci_resource_num_from_vf_bar(i);
+ resource_size_t vf_bar_sz = pci_iov_resource_size(dev, idx);
+
+ bars |= (1 << idx);
+ res = &dev->resource[idx];
+ if (vf_bar_sz * nr_virtfn > resource_size(res))
+ continue;
if (res->parent)
nres++;
}
@@ -710,8 +765,10 @@ static void sriov_del_vfs(struct pci_dev *dev)
struct pci_sriov *iov = dev->sriov;
int i;
+ pci_lock_rescan_remove();
for (i = 0; i < iov->num_VFs; i++)
pci_iov_remove_virtfn(dev, i);
+ pci_unlock_rescan_remove();
}
static void sriov_disable(struct pci_dev *dev)
@@ -746,7 +803,9 @@ static int sriov_init(struct pci_dev *dev, int pos)
u16 ctrl, total;
struct pci_sriov *iov;
struct resource *res;
+ const char *res_name;
struct pci_dev *pdev;
+ u32 sriovbars[PCI_SRIOV_NUM_BARS];
pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &ctrl);
if (ctrl & PCI_SRIOV_CTRL_VFE) {
@@ -783,9 +842,17 @@ found:
if (!iov)
return -ENOMEM;
+ /* Sizing SR-IOV BARs with VF Enable cleared - no decode */
+ __pci_size_stdbars(dev, PCI_SRIOV_NUM_BARS,
+ pos + PCI_SRIOV_BAR, sriovbars);
+
nres = 0;
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
- res = &dev->resource[i + PCI_IOV_RESOURCES];
+ int idx = pci_resource_num_from_vf_bar(i);
+
+ res = &dev->resource[idx];
+ res_name = pci_resource_name(dev, idx);
+
/*
* If it is already FIXED, don't change it, something
* (perhaps EA or header fixups) wants it this way.
@@ -794,7 +861,8 @@ found:
bar64 = (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
else
bar64 = __pci_read_base(dev, pci_bar_unknown, res,
- pos + PCI_SRIOV_BAR + i * 4);
+ pos + PCI_SRIOV_BAR + i * 4,
+ &sriovbars[i]);
if (!res->flags)
continue;
if (resource_size(res) & (PAGE_SIZE - 1)) {
@@ -802,9 +870,9 @@ found:
goto failed;
}
iov->barsz[i] = resource_size(res);
- res->end = res->start + resource_size(res) * total - 1;
- pci_info(dev, "VF(n) BAR%d space: %pR (contains BAR%d for %d VFs)\n",
- i, res, i, total);
+ resource_set_size(res, resource_size(res) * total);
+ pci_info(dev, "%s %pR: contains BAR %d for %d VFs\n",
+ res_name, res, i, total);
i += bar64;
nres++;
}
@@ -822,6 +890,7 @@ found:
pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END)
iov->link = PCI_DEVFN(PCI_SLOT(dev->devfn), iov->link);
+ iov->vf_rebar_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_VF_REBAR);
if (pdev)
iov->dev = pci_dev_get(pdev);
@@ -841,7 +910,7 @@ fail_max_buses:
dev->is_physfn = 0;
failed:
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
- res = &dev->resource[i + PCI_IOV_RESOURCES];
+ res = &dev->resource[pci_resource_num_from_vf_bar(i)];
res->flags = 0;
}
@@ -860,6 +929,30 @@ static void sriov_release(struct pci_dev *dev)
dev->sriov = NULL;
}
+static void sriov_restore_vf_rebar_state(struct pci_dev *dev)
+{
+ unsigned int pos, nbars, i;
+ u32 ctrl;
+
+ pos = pci_iov_vf_rebar_cap(dev);
+ if (!pos)
+ return;
+
+ pci_read_config_dword(dev, pos + PCI_VF_REBAR_CTRL, &ctrl);
+ nbars = FIELD_GET(PCI_VF_REBAR_CTRL_NBAR_MASK, ctrl);
+
+ for (i = 0; i < nbars; i++, pos += 8) {
+ int bar_idx, size;
+
+ pci_read_config_dword(dev, pos + PCI_VF_REBAR_CTRL, &ctrl);
+ bar_idx = FIELD_GET(PCI_VF_REBAR_CTRL_BAR_IDX, ctrl);
+ size = pci_rebar_bytes_to_size(dev->sriov->barsz[bar_idx]);
+ ctrl &= ~PCI_VF_REBAR_CTRL_BAR_SIZE;
+ ctrl |= FIELD_PREP(PCI_VF_REBAR_CTRL_BAR_SIZE, size);
+ pci_write_config_dword(dev, pos + PCI_VF_REBAR_CTRL, ctrl);
+ }
+}
+
static void sriov_restore_state(struct pci_dev *dev)
{
int i;
@@ -879,7 +972,7 @@ static void sriov_restore_state(struct pci_dev *dev)
pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, ctrl);
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++)
- pci_update_resource(dev, i + PCI_IOV_RESOURCES);
+ pci_update_resource(dev, pci_resource_num_from_vf_bar(i));
pci_write_config_dword(dev, iov->pos + PCI_SRIOV_SYS_PGSIZE, iov->pgsz);
pci_iov_set_numvfs(dev, iov->num_VFs);
@@ -944,8 +1037,8 @@ void pci_iov_remove(struct pci_dev *dev)
void pci_iov_update_resource(struct pci_dev *dev, int resno)
{
struct pci_sriov *iov = dev->is_physfn ? dev->sriov : NULL;
- struct resource *res = dev->resource + resno;
- int vf_bar = resno - PCI_IOV_RESOURCES;
+ struct resource *res = pci_resource_n(dev, resno);
+ int vf_bar = pci_resource_num_to_vf_bar(resno);
struct pci_bus_region region;
u16 cmd;
u32 new;
@@ -1019,8 +1112,10 @@ resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno)
*/
void pci_restore_iov_state(struct pci_dev *dev)
{
- if (dev->is_physfn)
+ if (dev->is_physfn) {
+ sriov_restore_vf_rebar_state(dev);
sriov_restore_state(dev);
+ }
}
/**
@@ -1227,3 +1322,59 @@ int pci_sriov_configure_simple(struct pci_dev *dev, int nr_virtfn)
return nr_virtfn;
}
EXPORT_SYMBOL_GPL(pci_sriov_configure_simple);
+
+/**
+ * pci_iov_vf_bar_set_size - set a new size for a VF BAR
+ * @dev: the PCI device
+ * @resno: the resource number
+ * @size: new size as defined in the spec (0=1MB, 31=128TB)
+ *
+ * Set the new size of a VF BAR that supports VF resizable BAR capability.
+ * Unlike pci_resize_resource(), this does not cause the resource that
+ * reserves the MMIO space (originally up to total_VFs) to be resized, which
+ * means that following calls to pci_enable_sriov() can fail if the resources
+ * no longer fit.
+ *
+ * Return: 0 on success, or negative on failure.
+ */
+int pci_iov_vf_bar_set_size(struct pci_dev *dev, int resno, int size)
+{
+ if (!pci_resource_is_iov(resno))
+ return -EINVAL;
+
+ if (pci_iov_is_memory_decoding_enabled(dev))
+ return -EBUSY;
+
+ if (!pci_rebar_size_supported(dev, resno, size))
+ return -EINVAL;
+
+ return pci_rebar_set_size(dev, resno, size);
+}
+EXPORT_SYMBOL_GPL(pci_iov_vf_bar_set_size);
+
+/**
+ * pci_iov_vf_bar_get_sizes - get VF BAR sizes allowing to create up to num_vfs
+ * @dev: the PCI device
+ * @resno: the resource number
+ * @num_vfs: number of VFs
+ *
+ * Get the sizes of a VF resizable BAR that can accommodate @num_vfs within
+ * the currently assigned size of the resource @resno.
+ *
+ * Return: A bitmask of sizes in format defined in the spec (bit 0=1MB,
+ * bit 31=128TB).
+ */
+u32 pci_iov_vf_bar_get_sizes(struct pci_dev *dev, int resno, int num_vfs)
+{
+ u64 vf_len = pci_resource_len(dev, resno);
+ u64 sizes;
+
+ if (!num_vfs)
+ return 0;
+
+ do_div(vf_len, num_vfs);
+ sizes = (roundup_pow_of_two(vf_len + 1) - 1) >> ilog2(SZ_1M);
+
+ return sizes & pci_rebar_get_possible_sizes(dev, resno);
+}
+EXPORT_SYMBOL_GPL(pci_iov_vf_bar_get_sizes);
diff --git a/drivers/pci/irq.c b/drivers/pci/irq.c
index 0050e8f6814e..4555630be9ec 100644
--- a/drivers/pci/irq.c
+++ b/drivers/pci/irq.c
@@ -8,9 +8,13 @@
#include <linux/device.h>
#include <linux/kernel.h>
+#include <linux/errno.h>
#include <linux/export.h>
+#include <linux/interrupt.h>
#include <linux/pci.h>
+#include "pci.h"
+
/**
* pci_request_irq - allocate an interrupt line for a PCI device
* @dev: PCI device to operate on
@@ -74,3 +78,203 @@ void pci_free_irq(struct pci_dev *dev, unsigned int nr, void *dev_id)
kfree(free_irq(pci_irq_vector(dev, nr), dev_id));
}
EXPORT_SYMBOL(pci_free_irq);
+
+/**
+ * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
+ * @dev: the PCI device
+ * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD)
+ *
+ * Perform INTx swizzling for a device behind one level of bridge. This is
+ * required by section 9.1 of the PCI-to-PCI bridge specification for devices
+ * behind bridges on add-in cards. For devices with ARI enabled, the slot
+ * number is always 0 (see the Implementation Note in section 2.2.8.1 of
+ * the PCI Express Base Specification, Revision 2.1)
+ */
+u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
+{
+ int slot;
+
+ if (pci_ari_enabled(dev->bus))
+ slot = 0;
+ else
+ slot = PCI_SLOT(dev->devfn);
+
+ return (((pin - 1) + slot) % 4) + 1;
+}
+
+int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
+{
+ u8 pin;
+
+ pin = dev->pin;
+ if (!pin)
+ return -1;
+
+ while (!pci_is_root_bus(dev->bus)) {
+ pin = pci_swizzle_interrupt_pin(dev, pin);
+ dev = dev->bus->self;
+ }
+ *bridge = dev;
+ return pin;
+}
+
+/**
+ * pci_common_swizzle - swizzle INTx all the way to root bridge
+ * @dev: the PCI device
+ * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
+ *
+ * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI
+ * bridges all the way up to a PCI root bus.
+ */
+u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
+{
+ u8 pin = *pinp;
+
+ while (!pci_is_root_bus(dev->bus)) {
+ pin = pci_swizzle_interrupt_pin(dev, pin);
+ dev = dev->bus->self;
+ }
+ *pinp = pin;
+ return PCI_SLOT(dev->devfn);
+}
+EXPORT_SYMBOL_GPL(pci_common_swizzle);
+
+void pci_assign_irq(struct pci_dev *dev)
+{
+ u8 pin;
+ u8 slot = -1;
+ int irq = 0;
+ struct pci_host_bridge *hbrg = pci_find_host_bridge(dev->bus);
+
+ if (!(hbrg->map_irq)) {
+ pci_dbg(dev, "runtime IRQ mapping not provided by arch\n");
+ return;
+ }
+
+ /*
+ * If this device is not on the primary bus, we need to figure out
+ * which interrupt pin it will come in on. We know which slot it
+ * will come in on because that slot is where the bridge is. Each
+ * time the interrupt line passes through a PCI-PCI bridge we must
+ * apply the swizzle function.
+ */
+ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
+ /* Cope with illegal. */
+ if (pin > 4)
+ pin = 1;
+
+ if (pin) {
+ /* Follow the chain of bridges, swizzling as we go. */
+ if (hbrg->swizzle_irq)
+ slot = (*(hbrg->swizzle_irq))(dev, &pin);
+
+ /*
+ * If a swizzling function is not used, map_irq() must
+ * ignore slot.
+ */
+ irq = (*(hbrg->map_irq))(dev, slot, pin);
+ if (irq == -1)
+ irq = 0;
+ }
+ dev->irq = irq;
+
+ pci_dbg(dev, "assign IRQ: got %d\n", dev->irq);
+
+ /*
+ * Always tell the device, so the driver knows what is the real IRQ
+ * to use; the device does not use it.
+ */
+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
+}
+
+static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
+{
+ struct pci_bus *bus = dev->bus;
+ bool mask_updated = true;
+ u32 cmd_status_dword;
+ u16 origcmd, newcmd;
+ unsigned long flags;
+ bool irq_pending;
+
+ /*
+ * We do a single dword read to retrieve both command and status.
+ * Document assumptions that make this possible.
+ */
+ BUILD_BUG_ON(PCI_COMMAND % 4);
+ BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
+
+ raw_spin_lock_irqsave(&pci_lock, flags);
+
+ bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
+
+ irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
+
+ /*
+ * Check interrupt status register to see whether our device
+ * triggered the interrupt (when masking) or the next IRQ is
+ * already pending (when unmasking).
+ */
+ if (mask != irq_pending) {
+ mask_updated = false;
+ goto done;
+ }
+
+ origcmd = cmd_status_dword;
+ newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
+ if (mask)
+ newcmd |= PCI_COMMAND_INTX_DISABLE;
+ if (newcmd != origcmd)
+ bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
+
+done:
+ raw_spin_unlock_irqrestore(&pci_lock, flags);
+
+ return mask_updated;
+}
+
+/**
+ * pci_check_and_mask_intx - mask INTx on pending interrupt
+ * @dev: the PCI device to operate on
+ *
+ * Check if the device dev has its INTx line asserted, mask it and return
+ * true in that case. False is returned if no interrupt was pending.
+ */
+bool pci_check_and_mask_intx(struct pci_dev *dev)
+{
+ return pci_check_and_set_intx_mask(dev, true);
+}
+EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
+
+/**
+ * pci_check_and_unmask_intx - unmask INTx if no interrupt is pending
+ * @dev: the PCI device to operate on
+ *
+ * Check if the device dev has its INTx line asserted, unmask it if not and
+ * return true. False is returned and the mask remains active if there was
+ * still an interrupt pending.
+ */
+bool pci_check_and_unmask_intx(struct pci_dev *dev)
+{
+ return pci_check_and_set_intx_mask(dev, false);
+}
+EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
+
+/**
+ * pcibios_penalize_isa_irq - penalize an ISA IRQ
+ * @irq: ISA IRQ to penalize
+ * @active: IRQ active or not
+ *
+ * Permits the platform to provide architecture-specific functionality when
+ * penalizing ISA IRQs. This is the default implementation. Architecture
+ * implementations can override this.
+ */
+void __weak pcibios_penalize_isa_irq(int irq, int active) {}
+
+int __weak pcibios_alloc_irq(struct pci_dev *dev)
+{
+ return 0;
+}
+
+void __weak pcibios_free_irq(struct pci_dev *dev)
+{
+}
diff --git a/drivers/pci/mmap.c b/drivers/pci/mmap.c
index 4504039056d1..8da3347a95c4 100644
--- a/drivers/pci/mmap.c
+++ b/drivers/pci/mmap.c
@@ -11,6 +11,8 @@
#include <linux/mm.h>
#include <linux/pci.h>
+#include "pci.h"
+
#ifdef ARCH_GENERIC_PCI_MMAP_RESOURCE
static const struct vm_operations_struct pci_phys_vm_ops = {
@@ -50,3 +52,30 @@ int pci_mmap_resource_range(struct pci_dev *pdev, int bar,
}
#endif
+
+#if (defined(CONFIG_SYSFS) || defined(CONFIG_PROC_FS)) && \
+ (defined(HAVE_PCI_MMAP) || defined(ARCH_GENERIC_PCI_MMAP_RESOURCE))
+
+int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma,
+ enum pci_mmap_api mmap_api)
+{
+ resource_size_t pci_start = 0, pci_end;
+ unsigned long nr, start, size;
+
+ if (pci_resource_len(pdev, resno) == 0)
+ return 0;
+ nr = vma_pages(vma);
+ start = vma->vm_pgoff;
+ size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1;
+ if (mmap_api == PCI_MMAP_PROCFS) {
+ pci_resource_to_user(pdev, resno, &pdev->resource[resno],
+ &pci_start, &pci_end);
+ pci_start >>= PAGE_SHIFT;
+ }
+ if (start >= pci_start && start < pci_start + size &&
+ start + nr <= pci_start + size)
+ return 1;
+ return 0;
+}
+
+#endif
diff --git a/drivers/pci/msi/api.c b/drivers/pci/msi/api.c
index be679aa5db64..818d55fbad0d 100644
--- a/drivers/pci/msi/api.c
+++ b/drivers/pci/msi/api.c
@@ -53,10 +53,9 @@ void pci_disable_msi(struct pci_dev *dev)
if (!pci_msi_enabled() || !dev || !dev->msi_enabled)
return;
- msi_lock_descs(&dev->dev);
+ guard(msi_descs_lock)(&dev->dev);
pci_msi_shutdown(dev);
pci_free_msi_irqs(dev);
- msi_unlock_descs(&dev->dev);
}
EXPORT_SYMBOL(pci_disable_msi);
@@ -162,7 +161,7 @@ struct msi_map pci_msix_alloc_irq_at(struct pci_dev *dev, unsigned int index,
EXPORT_SYMBOL_GPL(pci_msix_alloc_irq_at);
/**
- * pci_msix_free_irq - Free an interrupt on a PCI/MSIX interrupt domain
+ * pci_msix_free_irq - Free an interrupt on a PCI/MSI-X interrupt domain
*
* @dev: The PCI device to operate on
* @map: A struct msi_map describing the interrupt to free
@@ -196,10 +195,9 @@ void pci_disable_msix(struct pci_dev *dev)
if (!pci_msi_enabled() || !dev || !dev->msix_enabled)
return;
- msi_lock_descs(&dev->dev);
+ guard(msi_descs_lock)(&dev->dev);
pci_msix_shutdown(dev);
pci_free_msi_irqs(dev);
- msi_unlock_descs(&dev->dev);
}
EXPORT_SYMBOL(pci_disable_msix);
@@ -213,8 +211,8 @@ EXPORT_SYMBOL(pci_disable_msix);
* * %PCI_IRQ_MSIX Allow trying MSI-X vector allocations
* * %PCI_IRQ_MSI Allow trying MSI vector allocations
*
- * * %PCI_IRQ_LEGACY Allow trying legacy INTx interrupts, if
- * and only if @min_vecs == 1
+ * * %PCI_IRQ_INTX Allow trying INTx interrupts, if and
+ * only if @min_vecs == 1
*
* * %PCI_IRQ_AFFINITY Auto-manage IRQs affinity by spreading
* the vectors around available CPUs
@@ -279,8 +277,8 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
return nvecs;
}
- /* use legacy IRQ if allowed */
- if (flags & PCI_IRQ_LEGACY) {
+ /* use INTx IRQ if allowed */
+ if (flags & PCI_IRQ_INTX) {
if (min_vecs == 1 && dev->irq) {
/*
* Invoke the affinity spreading logic to ensure that
@@ -366,56 +364,6 @@ const struct cpumask *pci_irq_get_affinity(struct pci_dev *dev, int nr)
EXPORT_SYMBOL(pci_irq_get_affinity);
/**
- * pci_ims_alloc_irq - Allocate an interrupt on a PCI/IMS interrupt domain
- * @dev: The PCI device to operate on
- * @icookie: Pointer to an IMS implementation specific cookie for this
- * IMS instance (PASID, queue ID, pointer...).
- * The cookie content is copied into the MSI descriptor for the
- * interrupt chip callbacks or domain specific setup functions.
- * @affdesc: Optional pointer to an interrupt affinity descriptor
- *
- * There is no index for IMS allocations as IMS is an implementation
- * specific storage and does not have any direct associations between
- * index, which might be a pure software construct, and device
- * functionality. This association is established by the driver either via
- * the index - if there is a hardware table - or in case of purely software
- * managed IMS implementation the association happens via the
- * irq_write_msi_msg() callback of the implementation specific interrupt
- * chip, which utilizes the provided @icookie to store the MSI message in
- * the appropriate place.
- *
- * Return: A struct msi_map
- *
- * On success msi_map::index contains the allocated index (>= 0) and
- * msi_map::virq the allocated Linux interrupt number (> 0).
- *
- * On fail msi_map::index contains the error code and msi_map::virq
- * is set to 0.
- */
-struct msi_map pci_ims_alloc_irq(struct pci_dev *dev, union msi_instance_cookie *icookie,
- const struct irq_affinity_desc *affdesc)
-{
- return msi_domain_alloc_irq_at(&dev->dev, MSI_SECONDARY_DOMAIN, MSI_ANY_INDEX,
- affdesc, icookie);
-}
-EXPORT_SYMBOL_GPL(pci_ims_alloc_irq);
-
-/**
- * pci_ims_free_irq - Allocate an interrupt on a PCI/IMS interrupt domain
- * which was allocated via pci_ims_alloc_irq()
- * @dev: The PCI device to operate on
- * @map: A struct msi_map describing the interrupt to free as
- * returned from pci_ims_alloc_irq()
- */
-void pci_ims_free_irq(struct pci_dev *dev, struct msi_map map)
-{
- if (WARN_ON_ONCE(map.index < 0 || map.virq <= 0))
- return;
- msi_domain_free_irqs_range(&dev->dev, MSI_SECONDARY_DOMAIN, map.index, map.index);
-}
-EXPORT_SYMBOL_GPL(pci_ims_free_irq);
-
-/**
* pci_free_irq_vectors() - Free previously allocated IRQs for a device
* @dev: the PCI device to operate on
*
@@ -451,7 +399,7 @@ EXPORT_SYMBOL_GPL(pci_restore_msi_state);
* Return: true if MSI has not been globally disabled through ACPI FADT,
* PCI bridge quirks, or the "pci=nomsi" kernel command-line option.
*/
-int pci_msi_enabled(void)
+bool pci_msi_enabled(void)
{
return pci_msi_enable;
}
diff --git a/drivers/pci/msi/irqdomain.c b/drivers/pci/msi/irqdomain.c
index e33bcc872699..a329060287b5 100644
--- a/drivers/pci/msi/irqdomain.c
+++ b/drivers/pci/msi/irqdomain.c
@@ -49,103 +49,52 @@ static void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *
__pci_write_msi_msg(desc, msg);
}
-/**
- * pci_msi_domain_calc_hwirq - Generate a unique ID for an MSI source
- * @desc: Pointer to the MSI descriptor
- *
- * The ID number is only used within the irqdomain.
+/*
+ * Per device MSI[-X] domain functionality
*/
-static irq_hw_number_t pci_msi_domain_calc_hwirq(struct msi_desc *desc)
-{
- struct pci_dev *dev = msi_desc_to_pci_dev(desc);
-
- return (irq_hw_number_t)desc->msi_index |
- pci_dev_id(dev) << 11 |
- (pci_domain_nr(dev->bus) & 0xFFFFFFFF) << 27;
-}
-
-static void pci_msi_domain_set_desc(msi_alloc_info_t *arg,
- struct msi_desc *desc)
+static void pci_device_domain_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
{
arg->desc = desc;
- arg->hwirq = pci_msi_domain_calc_hwirq(desc);
+ arg->hwirq = desc->msi_index;
}
-static struct msi_domain_ops pci_msi_domain_ops_default = {
- .set_desc = pci_msi_domain_set_desc,
-};
-
-static void pci_msi_domain_update_dom_ops(struct msi_domain_info *info)
+static void cond_shutdown_parent(struct irq_data *data)
{
- struct msi_domain_ops *ops = info->ops;
-
- if (ops == NULL) {
- info->ops = &pci_msi_domain_ops_default;
- } else {
- if (ops->set_desc == NULL)
- ops->set_desc = pci_msi_domain_set_desc;
- }
-}
+ struct msi_domain_info *info = data->domain->host_data;
-static void pci_msi_domain_update_chip_ops(struct msi_domain_info *info)
-{
- struct irq_chip *chip = info->chip;
-
- BUG_ON(!chip);
- if (!chip->irq_write_msi_msg)
- chip->irq_write_msi_msg = pci_msi_domain_write_msg;
- if (!chip->irq_mask)
- chip->irq_mask = pci_msi_mask_irq;
- if (!chip->irq_unmask)
- chip->irq_unmask = pci_msi_unmask_irq;
+ if (unlikely(info->flags & MSI_FLAG_PCI_MSI_STARTUP_PARENT))
+ irq_chip_shutdown_parent(data);
+ else if (unlikely(info->flags & MSI_FLAG_PCI_MSI_MASK_PARENT))
+ irq_chip_mask_parent(data);
}
-/**
- * pci_msi_create_irq_domain - Create a MSI interrupt domain
- * @fwnode: Optional fwnode of the interrupt controller
- * @info: MSI domain info
- * @parent: Parent irq domain
- *
- * Updates the domain and chip ops and creates a MSI interrupt domain.
- *
- * Returns:
- * A domain pointer or NULL in case of failure.
- */
-struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
- struct msi_domain_info *info,
- struct irq_domain *parent)
+static unsigned int cond_startup_parent(struct irq_data *data)
{
- if (WARN_ON(info->flags & MSI_FLAG_LEVEL_CAPABLE))
- info->flags &= ~MSI_FLAG_LEVEL_CAPABLE;
-
- if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS)
- pci_msi_domain_update_dom_ops(info);
- if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
- pci_msi_domain_update_chip_ops(info);
+ struct msi_domain_info *info = data->domain->host_data;
- /* Let the core code free MSI descriptors when freeing interrupts */
- info->flags |= MSI_FLAG_FREE_MSI_DESCS;
+ if (unlikely(info->flags & MSI_FLAG_PCI_MSI_STARTUP_PARENT))
+ return irq_chip_startup_parent(data);
+ else if (unlikely(info->flags & MSI_FLAG_PCI_MSI_MASK_PARENT))
+ irq_chip_unmask_parent(data);
- info->flags |= MSI_FLAG_ACTIVATE_EARLY | MSI_FLAG_DEV_SYSFS;
- if (IS_ENABLED(CONFIG_GENERIC_IRQ_RESERVATION_MODE))
- info->flags |= MSI_FLAG_MUST_REACTIVATE;
+ return 0;
+}
- /* PCI-MSI is oneshot-safe */
- info->chip->flags |= IRQCHIP_ONESHOT_SAFE;
- /* Let the core update the bus token */
- info->bus_token = DOMAIN_BUS_PCI_MSI;
+static void pci_irq_shutdown_msi(struct irq_data *data)
+{
+ struct msi_desc *desc = irq_data_get_msi_desc(data);
- return msi_create_irq_domain(fwnode, info, parent);
+ pci_msi_mask(desc, BIT(data->irq - desc->irq));
+ cond_shutdown_parent(data);
}
-EXPORT_SYMBOL_GPL(pci_msi_create_irq_domain);
-/*
- * Per device MSI[-X] domain functionality
- */
-static void pci_device_domain_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
+static unsigned int pci_irq_startup_msi(struct irq_data *data)
{
- arg->desc = desc;
- arg->hwirq = desc->msi_index;
+ struct msi_desc *desc = irq_data_get_msi_desc(data);
+ unsigned int ret = cond_startup_parent(data);
+
+ pci_msi_unmask(desc, BIT(data->irq - desc->irq));
+ return ret;
}
static void pci_irq_mask_msi(struct irq_data *data)
@@ -176,6 +125,8 @@ static void pci_irq_unmask_msi(struct irq_data *data)
static const struct msi_domain_template pci_msi_template = {
.chip = {
.name = "PCI-MSI",
+ .irq_startup = pci_irq_startup_msi,
+ .irq_shutdown = pci_irq_shutdown_msi,
.irq_mask = pci_irq_mask_msi,
.irq_unmask = pci_irq_unmask_msi,
.irq_write_msi_msg = pci_msi_domain_write_msg,
@@ -192,6 +143,20 @@ static const struct msi_domain_template pci_msi_template = {
},
};
+static void pci_irq_shutdown_msix(struct irq_data *data)
+{
+ pci_msix_mask(irq_data_get_msi_desc(data));
+ cond_shutdown_parent(data);
+}
+
+static unsigned int pci_irq_startup_msix(struct irq_data *data)
+{
+ unsigned int ret = cond_startup_parent(data);
+
+ pci_msix_unmask(irq_data_get_msi_desc(data));
+ return ret;
+}
+
static void pci_irq_mask_msix(struct irq_data *data)
{
pci_msix_mask(irq_data_get_msi_desc(data));
@@ -202,17 +167,20 @@ static void pci_irq_unmask_msix(struct irq_data *data)
pci_msix_unmask(irq_data_get_msi_desc(data));
}
-static void pci_msix_prepare_desc(struct irq_domain *domain, msi_alloc_info_t *arg,
- struct msi_desc *desc)
+void pci_msix_prepare_desc(struct irq_domain *domain, msi_alloc_info_t *arg,
+ struct msi_desc *desc)
{
/* Don't fiddle with preallocated MSI descriptors */
if (!desc->pci.mask_base)
msix_prepare_msi_desc(to_pci_dev(desc->dev), desc);
}
+EXPORT_SYMBOL_GPL(pci_msix_prepare_desc);
static const struct msi_domain_template pci_msix_template = {
.chip = {
.name = "PCI-MSIX",
+ .irq_startup = pci_irq_startup_msix,
+ .irq_shutdown = pci_irq_shutdown_msix,
.irq_mask = pci_irq_mask_msix,
.irq_unmask = pci_irq_unmask_msix,
.irq_write_msi_msg = pci_msi_domain_write_msg,
@@ -251,6 +219,7 @@ static bool pci_create_device_domain(struct pci_dev *pdev, const struct msi_doma
/**
* pci_setup_msi_device_domain - Setup a device MSI interrupt domain
* @pdev: The PCI device to create the domain on
+ * @hwsize: The maximum number of MSI vectors
*
* Return:
* True when:
@@ -267,7 +236,7 @@ static bool pci_create_device_domain(struct pci_dev *pdev, const struct msi_doma
* - The device is removed
* - MSI is disabled and a MSI-X domain is created
*/
-bool pci_setup_msi_device_domain(struct pci_dev *pdev)
+bool pci_setup_msi_device_domain(struct pci_dev *pdev, unsigned int hwsize)
{
if (WARN_ON_ONCE(pdev->msix_enabled))
return false;
@@ -277,7 +246,7 @@ bool pci_setup_msi_device_domain(struct pci_dev *pdev)
if (pci_match_device_domain(pdev, DOMAIN_BUS_PCI_DEVICE_MSIX))
msi_remove_device_irq_domain(&pdev->dev, MSI_DEFAULT_DOMAIN);
- return pci_create_device_domain(pdev, &pci_msi_template, 1);
+ return pci_create_device_domain(pdev, &pci_msi_template, hwsize);
}
/**
@@ -330,13 +299,16 @@ bool pci_msi_domain_supports(struct pci_dev *pdev, unsigned int feature_mask,
domain = dev_get_msi_domain(&pdev->dev);
- if (!domain || !irq_domain_is_hierarchy(domain))
- return mode == ALLOW_LEGACY;
+ if (!domain || !irq_domain_is_hierarchy(domain)) {
+ if (IS_ENABLED(CONFIG_PCI_MSI_ARCH_FALLBACKS))
+ return mode == ALLOW_LEGACY;
+ return false;
+ }
if (!irq_domain_is_msi_parent(domain)) {
/*
* For "global" PCI/MSI interrupt domains the associated
- * msi_domain_info::flags is the authoritive source of
+ * msi_domain_info::flags is the authoritative source of
* information.
*/
info = domain->host_data;
@@ -344,7 +316,7 @@ bool pci_msi_domain_supports(struct pci_dev *pdev, unsigned int feature_mask,
} else {
/*
* For MSI parent domains the supported feature set
- * is avaliable in the parent ops. This makes checks
+ * is available in the parent ops. This makes checks
* possible before actually instantiating the
* per device domain because the parent is never
* expanding the PCI/MSI functionality.
@@ -355,65 +327,6 @@ bool pci_msi_domain_supports(struct pci_dev *pdev, unsigned int feature_mask,
return (supported & feature_mask) == feature_mask;
}
-/**
- * pci_create_ims_domain - Create a secondary IMS domain for a PCI device
- * @pdev: The PCI device to operate on
- * @template: The MSI info template which describes the domain
- * @hwsize: The size of the hardware entry table or 0 if the domain
- * is purely software managed
- * @data: Optional pointer to domain specific data to be stored
- * in msi_domain_info::data
- *
- * Return: True on success, false otherwise
- *
- * An IMS domain is expected to have the following constraints:
- * - The index space is managed by the core code
- *
- * - There is no requirement for consecutive index ranges
- *
- * - The interrupt chip must provide the following callbacks:
- * - irq_mask()
- * - irq_unmask()
- * - irq_write_msi_msg()
- *
- * - The interrupt chip must provide the following optional callbacks
- * when the irq_mask(), irq_unmask() and irq_write_msi_msg() callbacks
- * cannot operate directly on hardware, e.g. in the case that the
- * interrupt message store is in queue memory:
- * - irq_bus_lock()
- * - irq_bus_unlock()
- *
- * These callbacks are invoked from preemptible task context and are
- * allowed to sleep. In this case the mandatory callbacks above just
- * store the information. The irq_bus_unlock() callback is supposed
- * to make the change effective before returning.
- *
- * - Interrupt affinity setting is handled by the underlying parent
- * interrupt domain and communicated to the IMS domain via
- * irq_write_msi_msg().
- *
- * The domain is automatically destroyed when the PCI device is removed.
- */
-bool pci_create_ims_domain(struct pci_dev *pdev, const struct msi_domain_template *template,
- unsigned int hwsize, void *data)
-{
- struct irq_domain *domain = dev_get_msi_domain(&pdev->dev);
-
- if (!domain || !irq_domain_is_msi_parent(domain))
- return false;
-
- if (template->info.bus_token != DOMAIN_BUS_PCI_DEVICE_IMS ||
- !(template->info.flags & MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS) ||
- !(template->info.flags & MSI_FLAG_FREE_MSI_DESCS) ||
- !template->chip.irq_mask || !template->chip.irq_unmask ||
- !template->chip.irq_write_msi_msg || template->chip.irq_set_affinity)
- return false;
-
- return msi_create_device_irq_domain(&pdev->dev, MSI_SECONDARY_DOMAIN, template,
- hwsize, data, NULL);
-}
-EXPORT_SYMBOL_GPL(pci_create_ims_domain);
-
/*
* Users of the generic MSI infrastructure expect a device to have a single ID,
* so with DMA aliases we have to pick the least-worst compromise. Devices with
@@ -456,13 +369,33 @@ u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev)
pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid);
of_node = irq_domain_get_of_node(domain);
- rid = of_node ? of_msi_map_id(&pdev->dev, of_node, rid) :
+ rid = of_node ? of_msi_xlate(&pdev->dev, &of_node, rid) :
iort_msi_map_id(&pdev->dev, rid);
return rid;
}
/**
+ * pci_msi_map_rid_ctlr_node - Get the MSI controller node and MSI requester id (RID)
+ * @pdev: The PCI device
+ * @node: Pointer to store the MSI controller device node
+ *
+ * Use the firmware data to find the MSI controller node for @pdev.
+ * If found map the RID and initialize @node with it. @node value must
+ * be set to NULL on entry.
+ *
+ * Returns: The RID.
+ */
+u32 pci_msi_map_rid_ctlr_node(struct pci_dev *pdev, struct device_node **node)
+{
+ u32 rid = pci_dev_id(pdev);
+
+ pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid);
+
+ return of_msi_xlate(&pdev->dev, node, rid);
+}
+
+/**
* pci_msi_get_device_domain - Get the MSI domain for a given PCI device
* @pdev: The PCI device
*
diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c
index ef1d8857a51b..34d664139f48 100644
--- a/drivers/pci/msi/msi.c
+++ b/drivers/pci/msi/msi.c
@@ -6,15 +6,16 @@
* Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
* Copyright (C) 2016 Christoph Hellwig.
*/
+#include <linux/bitfield.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include "../pci.h"
#include "msi.h"
-int pci_msi_enable = 1;
-int pci_msi_ignore_mask;
+bool pci_msi_enable = true;
/**
* pci_msi_supported - check whether MSI may be enabled on a device
@@ -85,9 +86,11 @@ static int pcim_setup_msi_release(struct pci_dev *dev)
return 0;
ret = devm_add_action(&dev->dev, pcim_msi_release, dev);
- if (!ret)
- dev->is_msi_managed = true;
- return ret;
+ if (ret)
+ return ret;
+
+ dev->is_msi_managed = true;
+ return 0;
}
/*
@@ -98,9 +101,10 @@ static int pci_setup_msi_context(struct pci_dev *dev)
{
int ret = msi_setup_device_data(&dev->dev);
- if (!ret)
- ret = pcim_setup_msi_release(dev);
- return ret;
+ if (ret)
+ return ret;
+
+ return pcim_setup_msi_release(dev);
}
/*
@@ -109,7 +113,8 @@ static int pci_setup_msi_context(struct pci_dev *dev)
void pci_msi_update_mask(struct msi_desc *desc, u32 clear, u32 set)
{
- raw_spinlock_t *lock = &to_pci_dev(desc->dev)->msi_lock;
+ struct pci_dev *dev = msi_desc_to_pci_dev(desc);
+ raw_spinlock_t *lock = &dev->msi_lock;
unsigned long flags;
if (!desc->pci.msi_attrib.can_mask)
@@ -118,8 +123,7 @@ void pci_msi_update_mask(struct msi_desc *desc, u32 clear, u32 set)
raw_spin_lock_irqsave(lock, flags);
desc->pci.msi_mask &= ~clear;
desc->pci.msi_mask |= set;
- pci_write_config_dword(msi_desc_to_pci_dev(desc), desc->pci.mask_pos,
- desc->pci.msi_mask);
+ pci_write_config_dword(dev, desc->pci.mask_pos, desc->pci.msi_mask);
raw_spin_unlock_irqrestore(lock, flags);
}
@@ -188,7 +192,7 @@ static inline void pci_write_msg_msi(struct pci_dev *dev, struct msi_desc *desc,
pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl);
msgctl &= ~PCI_MSI_FLAGS_QSIZE;
- msgctl |= desc->pci.msi_attrib.multiple << 4;
+ msgctl |= FIELD_PREP(PCI_MSI_FLAGS_QSIZE, desc->pci.msi_attrib.multiple);
pci_write_config_word(dev, pos + PCI_MSI_FLAGS, msgctl);
pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, msg->address_lo);
@@ -291,15 +295,14 @@ static int msi_setup_msi_desc(struct pci_dev *dev, int nvec,
/* Lies, damned lies, and MSIs */
if (dev->dev_flags & PCI_DEV_FLAGS_HAS_MSI_MASKING)
control |= PCI_MSI_FLAGS_MASKBIT;
- /* Respect XEN's mask disabling */
- if (pci_msi_ignore_mask)
+ if (pci_msi_domain_supports(dev, MSI_FLAG_NO_MASK, DENY_LEGACY))
control &= ~PCI_MSI_FLAGS_MASKBIT;
desc.nvec_used = nvec;
desc.pci.msi_attrib.is_64 = !!(control & PCI_MSI_FLAGS_64BIT);
desc.pci.msi_attrib.can_mask = !!(control & PCI_MSI_FLAGS_MASKBIT);
desc.pci.msi_attrib.default_irq = dev->irq;
- desc.pci.msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1;
+ desc.pci.msi_attrib.multi_cap = FIELD_GET(PCI_MSI_FLAGS_QMASK, control);
desc.pci.msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec));
desc.affinity = masks;
@@ -332,47 +335,23 @@ static int msi_verify_entries(struct pci_dev *dev)
return !entry ? 0 : -EIO;
}
-/**
- * msi_capability_init - configure device's MSI capability structure
- * @dev: pointer to the pci_dev data structure of MSI device function
- * @nvec: number of interrupts to allocate
- * @affd: description of automatic IRQ affinity assignments (may be %NULL)
- *
- * Setup the MSI capability structure of the device with the requested
- * number of interrupts. A return value of zero indicates the successful
- * setup of an entry with the new MSI IRQ. A negative return value indicates
- * an error, and a positive return value indicates the number of interrupts
- * which could have been allocated.
- */
-static int msi_capability_init(struct pci_dev *dev, int nvec,
- struct irq_affinity *affd)
+static int __msi_capability_init(struct pci_dev *dev, int nvec, struct irq_affinity_desc *masks)
{
- struct irq_affinity_desc *masks = NULL;
- struct msi_desc *entry;
- int ret;
-
- /* Reject multi-MSI early on irq domain enabled architectures */
- if (nvec > 1 && !pci_msi_domain_supports(dev, MSI_FLAG_MULTI_PCI_MSI, ALLOW_LEGACY))
- return 1;
-
- /*
- * Disable MSI during setup in the hardware, but mark it enabled
- * so that setup code can evaluate it.
- */
- pci_msi_set_enable(dev, 0);
- dev->msi_enabled = 1;
-
- if (affd)
- masks = irq_create_affinity_masks(nvec, affd);
+ int ret = msi_setup_msi_desc(dev, nvec, masks);
+ struct msi_desc *entry, desc;
- msi_lock_descs(&dev->dev);
- ret = msi_setup_msi_desc(dev, nvec, masks);
if (ret)
- goto fail;
+ return ret;
/* All MSIs are unmasked by default; mask them all */
entry = msi_first_desc(&dev->dev, MSI_DESC_ALL);
pci_msi_mask(entry, msi_multi_mask(entry));
+ /*
+ * Copy the MSI descriptor for the error path because
+ * pci_msi_setup_msi_irqs() will free it for the hierarchical
+ * interrupt domain case.
+ */
+ memcpy(&desc, entry, sizeof(desc));
/* Configure MSI capability structure */
ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
@@ -384,24 +363,51 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
goto err;
/* Set MSI enabled bits */
+ dev->msi_enabled = 1;
pci_intx_for_msi(dev, 0);
pci_msi_set_enable(dev, 1);
pcibios_free_irq(dev);
dev->irq = entry->irq;
- goto unlock;
-
+ return 0;
err:
- pci_msi_unmask(entry, msi_multi_mask(entry));
+ pci_msi_unmask(&desc, msi_multi_mask(&desc));
pci_free_msi_irqs(dev);
-fail:
- dev->msi_enabled = 0;
-unlock:
- msi_unlock_descs(&dev->dev);
- kfree(masks);
return ret;
}
+/**
+ * msi_capability_init - configure device's MSI capability structure
+ * @dev: pointer to the pci_dev data structure of MSI device function
+ * @nvec: number of interrupts to allocate
+ * @affd: description of automatic IRQ affinity assignments (may be %NULL)
+ *
+ * Setup the MSI capability structure of the device with the requested
+ * number of interrupts. A return value of zero indicates the successful
+ * setup of an entry with the new MSI IRQ. A negative return value indicates
+ * an error, and a positive return value indicates the number of interrupts
+ * which could have been allocated.
+ */
+static int msi_capability_init(struct pci_dev *dev, int nvec,
+ struct irq_affinity *affd)
+{
+ /* Reject multi-MSI early on irq domain enabled architectures */
+ if (nvec > 1 && !pci_msi_domain_supports(dev, MSI_FLAG_MULTI_PCI_MSI, ALLOW_LEGACY))
+ return 1;
+
+ /*
+ * Disable MSI during setup in the hardware, but mark it enabled
+ * so that setup code can evaluate it.
+ */
+ pci_msi_set_enable(dev, 0);
+
+ struct irq_affinity_desc *masks __free(kfree) =
+ affd ? irq_create_affinity_masks(nvec, affd) : NULL;
+
+ guard(msi_descs_lock)(&dev->dev);
+ return __msi_capability_init(dev, nvec, masks);
+}
+
int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
struct irq_affinity *affd)
{
@@ -423,22 +429,26 @@ int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
if (WARN_ON_ONCE(dev->msi_enabled))
return -EINVAL;
+ /* Test for the availability of MSI support */
+ if (!pci_msi_domain_supports(dev, 0, ALLOW_LEGACY))
+ return -ENOTSUPP;
+
nvec = pci_msi_vec_count(dev);
if (nvec < 0)
return nvec;
if (nvec < minvec)
return -ENOSPC;
- if (nvec > maxvec)
- nvec = maxvec;
-
rc = pci_setup_msi_context(dev);
if (rc)
return rc;
- if (!pci_setup_msi_device_domain(dev))
+ if (!pci_setup_msi_device_domain(dev, nvec))
return -ENODEV;
+ if (nvec > maxvec)
+ nvec = maxvec;
+
for (;;) {
if (affd) {
nvec = irq_calc_affinity_vectors(minvec, nvec, affd);
@@ -478,7 +488,7 @@ int pci_msi_vec_count(struct pci_dev *dev)
return -EINVAL;
pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &msgctl);
- ret = 1 << ((msgctl & PCI_MSI_FLAGS_QMASK) >> 1);
+ ret = 1 << FIELD_GET(PCI_MSI_FLAGS_QMASK, msgctl);
return ret;
}
@@ -511,7 +521,8 @@ void __pci_restore_msi_state(struct pci_dev *dev)
pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
pci_msi_update_mask(entry, 0, 0);
control &= ~PCI_MSI_FLAGS_QSIZE;
- control |= (entry->pci.msi_attrib.multiple << 4) | PCI_MSI_FLAGS_ENABLE;
+ control |= PCI_MSI_FLAGS_ENABLE |
+ FIELD_PREP(PCI_MSI_FLAGS_QSIZE, entry->pci.msi_attrib.multiple);
pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
}
@@ -594,12 +605,16 @@ void msix_prepare_msi_desc(struct pci_dev *dev, struct msi_desc *desc)
desc->pci.msi_attrib.is_64 = 1;
desc->pci.msi_attrib.default_irq = dev->irq;
desc->pci.mask_base = dev->msix_base;
- desc->pci.msi_attrib.can_mask = !pci_msi_ignore_mask &&
- !desc->pci.msi_attrib.is_virtual;
- if (desc->pci.msi_attrib.can_mask) {
+
+ if (!pci_msi_domain_supports(dev, MSI_FLAG_NO_MASK, DENY_LEGACY) &&
+ !desc->pci.msi_attrib.is_virtual) {
void __iomem *addr = pci_msix_desc_addr(desc);
+ desc->pci.msi_attrib.can_mask = 1;
+ /* Workaround for SUN NIU insanity, which requires write before read */
+ if (dev->dev_flags & PCI_DEV_FLAGS_MSIX_TOUCH_ENTRY_DATA_FIRST)
+ writel(0, addr + PCI_MSIX_ENTRY_DATA);
desc->pci.msix_ctrl = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
}
}
@@ -644,45 +659,43 @@ static void msix_mask_all(void __iomem *base, int tsize)
u32 ctrl = PCI_MSIX_ENTRY_CTRL_MASKBIT;
int i;
- if (pci_msi_ignore_mask)
- return;
-
for (i = 0; i < tsize; i++, base += PCI_MSIX_ENTRY_SIZE)
writel(ctrl, base + PCI_MSIX_ENTRY_VECTOR_CTRL);
}
-static int msix_setup_interrupts(struct pci_dev *dev, struct msix_entry *entries,
- int nvec, struct irq_affinity *affd)
-{
- struct irq_affinity_desc *masks = NULL;
- int ret;
+DEFINE_FREE(free_msi_irqs, struct pci_dev *, if (_T) pci_free_msi_irqs(_T));
- if (affd)
- masks = irq_create_affinity_masks(nvec, affd);
+static int __msix_setup_interrupts(struct pci_dev *__dev, struct msix_entry *entries,
+ int nvec, struct irq_affinity_desc *masks)
+{
+ struct pci_dev *dev __free(free_msi_irqs) = __dev;
- msi_lock_descs(&dev->dev);
- ret = msix_setup_msi_descs(dev, entries, nvec, masks);
+ int ret = msix_setup_msi_descs(dev, entries, nvec, masks);
if (ret)
- goto out_free;
+ return ret;
ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
if (ret)
- goto out_free;
+ return ret;
/* Check if all MSI entries honor device restrictions */
ret = msi_verify_entries(dev);
if (ret)
- goto out_free;
+ return ret;
msix_update_entries(dev, entries);
- goto out_unlock;
+ retain_and_null_ptr(dev);
+ return 0;
+}
-out_free:
- pci_free_msi_irqs(dev);
-out_unlock:
- msi_unlock_descs(&dev->dev);
- kfree(masks);
- return ret;
+static int msix_setup_interrupts(struct pci_dev *dev, struct msix_entry *entries,
+ int nvec, struct irq_affinity *affd)
+{
+ struct irq_affinity_desc *masks __free(kfree) =
+ affd ? irq_create_affinity_masks(nvec, affd) : NULL;
+
+ guard(msi_descs_lock)(&dev->dev);
+ return __msix_setup_interrupts(dev, entries, nvec, masks);
}
/**
@@ -729,15 +742,17 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
/* Disable INTX */
pci_intx_for_msi(dev, 0);
- /*
- * Ensure that all table entries are masked to prevent
- * stale entries from firing in a crash kernel.
- *
- * Done late to deal with a broken Marvell NVME device
- * which takes the MSI-X mask bits into account even
- * when MSI-X is disabled, which prevents MSI delivery.
- */
- msix_mask_all(dev->msix_base, tsize);
+ if (!pci_msi_domain_supports(dev, MSI_FLAG_NO_MASK, DENY_LEGACY)) {
+ /*
+ * Ensure that all table entries are masked to prevent
+ * stale entries from firing in a crash kernel.
+ *
+ * Done late to deal with a broken Marvell NVME device
+ * which takes the MSI-X mask bits into account even
+ * when MSI-X is disabled, which prevents MSI delivery.
+ */
+ msix_mask_all(dev->msix_base, tsize);
+ }
pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
pcibios_free_irq(dev);
@@ -856,13 +871,13 @@ void __pci_restore_msix_state(struct pci_dev *dev)
write_msg = arch_restore_msi_irqs(dev);
- msi_lock_descs(&dev->dev);
- msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) {
- if (write_msg)
- __pci_write_msi_msg(entry, &entry->msg);
- pci_msix_write_vector_ctrl(entry, entry->pci.msix_ctrl);
+ scoped_guard (msi_descs_lock, &dev->dev) {
+ msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) {
+ if (write_msg)
+ __pci_write_msi_msg(entry, &entry->msg);
+ pci_msix_write_vector_ctrl(entry, entry->pci.msix_ctrl);
+ }
}
- msi_unlock_descs(&dev->dev);
pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
}
@@ -901,6 +916,55 @@ void pci_free_msi_irqs(struct pci_dev *dev)
}
}
+#ifdef CONFIG_PCIE_TPH
+/**
+ * pci_msix_write_tph_tag - Update the TPH tag for a given MSI-X vector
+ * @pdev: The PCIe device to update
+ * @index: The MSI-X index to update
+ * @tag: The tag to write
+ *
+ * Returns: 0 on success, error code on failure
+ */
+int pci_msix_write_tph_tag(struct pci_dev *pdev, unsigned int index, u16 tag)
+{
+ struct msi_desc *msi_desc;
+ struct irq_desc *irq_desc;
+ unsigned int virq;
+
+ if (!pdev->msix_enabled)
+ return -ENXIO;
+
+ virq = msi_get_virq(&pdev->dev, index);
+ if (!virq)
+ return -ENXIO;
+
+ guard(msi_descs_lock)(&pdev->dev);
+
+ /*
+ * This is a horrible hack, but short of implementing a PCI
+ * specific interrupt chip callback and a huge pile of
+ * infrastructure, this is the minor nuisance. It provides the
+ * protection against concurrent operations on this entry and keeps
+ * the control word cache in sync.
+ */
+ irq_desc = irq_to_desc(virq);
+ if (!irq_desc)
+ return -ENXIO;
+
+ guard(raw_spinlock_irq)(&irq_desc->lock);
+ msi_desc = irq_data_get_msi_desc(&irq_desc->irq_data);
+ if (!msi_desc || msi_desc->pci.msi_attrib.is_virtual)
+ return -ENXIO;
+
+ msi_desc->pci.msix_ctrl &= ~PCI_MSIX_ENTRY_CTRL_ST;
+ msi_desc->pci.msix_ctrl |= FIELD_PREP(PCI_MSIX_ENTRY_CTRL_ST, tag);
+ pci_msix_write_vector_ctrl(msi_desc, msi_desc->pci.msix_ctrl);
+ /* Flush the write */
+ readl(pci_msix_desc_addr(msi_desc));
+ return 0;
+}
+#endif
+
/* Misc. infrastructure */
struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc)
@@ -911,5 +975,5 @@ EXPORT_SYMBOL(msi_desc_to_pci_dev);
void pci_no_msi(void)
{
- pci_msi_enable = 0;
+ pci_msi_enable = false;
}
diff --git a/drivers/pci/msi/msi.h b/drivers/pci/msi/msi.h
index ee53cf079f4e..0b420b319f50 100644
--- a/drivers/pci/msi/msi.h
+++ b/drivers/pci/msi/msi.h
@@ -87,7 +87,7 @@ static inline __attribute_const__ u32 msi_multi_mask(struct msi_desc *desc)
void msix_prepare_msi_desc(struct pci_dev *dev, struct msi_desc *desc);
/* Subsystem variables */
-extern int pci_msi_enable;
+extern bool pci_msi_enable;
/* MSI internal functions invoked from the public APIs */
void pci_msi_shutdown(struct pci_dev *dev);
@@ -107,7 +107,7 @@ enum support_mode {
};
bool pci_msi_domain_supports(struct pci_dev *dev, unsigned int feature_mask, enum support_mode mode);
-bool pci_setup_msi_device_domain(struct pci_dev *pdev);
+bool pci_setup_msi_device_domain(struct pci_dev *pdev, unsigned int hwsize);
bool pci_setup_msix_device_domain(struct pci_dev *pdev, unsigned int hwsize);
/* Legacy (!IRQDOMAIN) fallbacks */
diff --git a/drivers/pci/npem.c b/drivers/pci/npem.c
new file mode 100644
index 000000000000..97507e0df769
--- /dev/null
+++ b/drivers/pci/npem.c
@@ -0,0 +1,595 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe Enclosure management driver created for LED interfaces based on
+ * indications. It says *what indications* blink but does not specify *how*
+ * they blink - it is hardware defined.
+ *
+ * The driver name refers to Native PCIe Enclosure Management. It is
+ * first indication oriented standard with specification.
+ *
+ * Native PCIe Enclosure Management (NPEM)
+ * PCIe Base Specification r6.1 sec 6.28, 7.9.19
+ *
+ * _DSM Definitions for PCIe SSD Status LED
+ * PCI Firmware Specification, r3.3 sec 4.7
+ *
+ * Two backends are supported to manipulate indications: Direct NPEM register
+ * access (npem_ops) and indirect access through the ACPI _DSM (dsm_ops).
+ * _DSM is used if supported, else NPEM.
+ *
+ * Copyright (c) 2021-2022 Dell Inc.
+ * Copyright (c) 2023-2024 Intel Corporation
+ * Mariusz Tkaczyk <mariusz.tkaczyk@linux.intel.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/iopoll.h>
+#include <linux/leds.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/pci_regs.h>
+#include <linux/types.h>
+#include <linux/uleds.h>
+
+#include "pci.h"
+
+struct indication {
+ u32 bit;
+ const char *name;
+};
+
+static const struct indication npem_indications[] = {
+ {PCI_NPEM_IND_OK, "enclosure:ok"},
+ {PCI_NPEM_IND_LOCATE, "enclosure:locate"},
+ {PCI_NPEM_IND_FAIL, "enclosure:fail"},
+ {PCI_NPEM_IND_REBUILD, "enclosure:rebuild"},
+ {PCI_NPEM_IND_PFA, "enclosure:pfa"},
+ {PCI_NPEM_IND_HOTSPARE, "enclosure:hotspare"},
+ {PCI_NPEM_IND_ICA, "enclosure:ica"},
+ {PCI_NPEM_IND_IFA, "enclosure:ifa"},
+ {PCI_NPEM_IND_IDT, "enclosure:idt"},
+ {PCI_NPEM_IND_DISABLED, "enclosure:disabled"},
+ {PCI_NPEM_IND_SPEC_0, "enclosure:specific_0"},
+ {PCI_NPEM_IND_SPEC_1, "enclosure:specific_1"},
+ {PCI_NPEM_IND_SPEC_2, "enclosure:specific_2"},
+ {PCI_NPEM_IND_SPEC_3, "enclosure:specific_3"},
+ {PCI_NPEM_IND_SPEC_4, "enclosure:specific_4"},
+ {PCI_NPEM_IND_SPEC_5, "enclosure:specific_5"},
+ {PCI_NPEM_IND_SPEC_6, "enclosure:specific_6"},
+ {PCI_NPEM_IND_SPEC_7, "enclosure:specific_7"},
+ {0, NULL}
+};
+
+/* _DSM PCIe SSD LED States correspond to NPEM register values */
+static const struct indication dsm_indications[] = {
+ {PCI_NPEM_IND_OK, "enclosure:ok"},
+ {PCI_NPEM_IND_LOCATE, "enclosure:locate"},
+ {PCI_NPEM_IND_FAIL, "enclosure:fail"},
+ {PCI_NPEM_IND_REBUILD, "enclosure:rebuild"},
+ {PCI_NPEM_IND_PFA, "enclosure:pfa"},
+ {PCI_NPEM_IND_HOTSPARE, "enclosure:hotspare"},
+ {PCI_NPEM_IND_ICA, "enclosure:ica"},
+ {PCI_NPEM_IND_IFA, "enclosure:ifa"},
+ {PCI_NPEM_IND_IDT, "enclosure:idt"},
+ {PCI_NPEM_IND_DISABLED, "enclosure:disabled"},
+ {0, NULL}
+};
+
+#define for_each_indication(ind, inds) \
+ for (ind = inds; ind->bit; ind++)
+
+/*
+ * The driver has internal list of supported indications. Ideally, the driver
+ * should not touch bits that are not defined and for which LED devices are
+ * not exposed but in reality, it needs to turn them off.
+ *
+ * Otherwise, there will be no possibility to turn off indications turned on by
+ * other utilities or turned on by default and it leads to bad user experience.
+ *
+ * Additionally, it excludes NPEM commands like RESET or ENABLE.
+ */
+static u32 reg_to_indications(u32 caps, const struct indication *inds)
+{
+ const struct indication *ind;
+ u32 supported_indications = 0;
+
+ for_each_indication(ind, inds)
+ supported_indications |= ind->bit;
+
+ return caps & supported_indications;
+}
+
+/**
+ * struct npem_led - LED details
+ * @indication: indication details
+ * @npem: NPEM device
+ * @name: LED name
+ * @led: LED device
+ */
+struct npem_led {
+ const struct indication *indication;
+ struct npem *npem;
+ char name[LED_MAX_NAME_SIZE];
+ struct led_classdev led;
+};
+
+/**
+ * struct npem_ops - backend specific callbacks
+ * @get_active_indications: get active indications
+ * npem: NPEM device
+ * inds: response buffer
+ * @set_active_indications: set new indications
+ * npem: npem device
+ * inds: bit mask to set
+ * @inds: supported indications array, set of indications is backend specific
+ * @name: backend name
+ */
+struct npem_ops {
+ int (*get_active_indications)(struct npem *npem, u32 *inds);
+ int (*set_active_indications)(struct npem *npem, u32 inds);
+ const struct indication *inds;
+ const char *name;
+};
+
+/**
+ * struct npem - NPEM device properties
+ * @dev: PCI device this driver is attached to
+ * @ops: backend specific callbacks
+ * @lock: serializes concurrent access to NPEM device by multiple LED devices
+ * @pos: cached offset of NPEM Capability Register in Configuration Space;
+ * only used if NPEM registers are accessed directly and not through _DSM
+ * @supported_indications: cached bit mask of supported indications;
+ * non-indication and reserved bits in the NPEM Capability Register are
+ * cleared in this bit mask
+ * @active_indications: cached bit mask of active indications;
+ * non-indication and reserved bits in the NPEM Control Register are
+ * cleared in this bit mask
+ * @active_inds_initialized: whether @active_indications has been initialized;
+ * On Dell platforms, it is required that IPMI drivers are loaded before
+ * the GET_STATE_DSM method is invoked: They use an IPMI OpRegion to
+ * get/set the active LEDs. By initializing @active_indications lazily
+ * (on first access to an LED), IPMI drivers are given a chance to load.
+ * If they are not loaded in time, users will see various errors on LED
+ * access in dmesg. Once they are loaded, the errors go away and LED
+ * access becomes possible.
+ * @led_cnt: size of @leds array
+ * @leds: array containing LED class devices of all supported LEDs
+ */
+struct npem {
+ struct pci_dev *dev;
+ const struct npem_ops *ops;
+ struct mutex lock;
+ u16 pos;
+ u32 supported_indications;
+ u32 active_indications;
+ unsigned int active_inds_initialized:1;
+ int led_cnt;
+ struct npem_led leds[];
+};
+
+static int npem_read_reg(struct npem *npem, u16 reg, u32 *val)
+{
+ int ret = pci_read_config_dword(npem->dev, npem->pos + reg, val);
+
+ return pcibios_err_to_errno(ret);
+}
+
+static int npem_write_ctrl(struct npem *npem, u32 reg)
+{
+ int pos = npem->pos + PCI_NPEM_CTRL;
+ int ret = pci_write_config_dword(npem->dev, pos, reg);
+
+ return pcibios_err_to_errno(ret);
+}
+
+static int npem_get_active_indications(struct npem *npem, u32 *inds)
+{
+ u32 ctrl;
+ int ret;
+
+ ret = npem_read_reg(npem, PCI_NPEM_CTRL, &ctrl);
+ if (ret)
+ return ret;
+
+ /* If PCI_NPEM_CTRL_ENABLE is not set then no indication should blink */
+ if (!(ctrl & PCI_NPEM_CTRL_ENABLE)) {
+ *inds = 0;
+ return 0;
+ }
+
+ *inds = ctrl & npem->supported_indications;
+
+ return 0;
+}
+
+static int npem_set_active_indications(struct npem *npem, u32 inds)
+{
+ int ctrl, ret, ret_val;
+ u32 cc_status;
+
+ lockdep_assert_held(&npem->lock);
+
+ /* This bit is always required */
+ ctrl = inds | PCI_NPEM_CTRL_ENABLE;
+
+ ret = npem_write_ctrl(npem, ctrl);
+ if (ret)
+ return ret;
+
+ /*
+ * For the case where a NPEM command has not completed immediately,
+ * it is recommended that software not continuously "spin" on polling
+ * the status register, but rather poll under interrupt at a reduced
+ * rate; for example at 10 ms intervals.
+ *
+ * PCIe r6.1 sec 6.28 "Implementation Note: Software Polling of NPEM
+ * Command Completed"
+ */
+ ret = read_poll_timeout(npem_read_reg, ret_val,
+ ret_val || (cc_status & PCI_NPEM_STATUS_CC),
+ 10 * USEC_PER_MSEC, USEC_PER_SEC, false, npem,
+ PCI_NPEM_STATUS, &cc_status);
+ if (ret)
+ return ret;
+ if (ret_val)
+ return ret_val;
+
+ /*
+ * All writes to control register, including writes that do not change
+ * the register value, are NPEM commands and should eventually result
+ * in a command completion indication in the NPEM Status Register.
+ *
+ * PCIe Base Specification r6.1 sec 7.9.19.3
+ *
+ * Register may not be updated, or other conflicting bits may be
+ * cleared. Spec is not strict here. Read NPEM Control register after
+ * write to keep cache in-sync.
+ */
+ return npem_get_active_indications(npem, &npem->active_indications);
+}
+
+static const struct npem_ops npem_ops = {
+ .get_active_indications = npem_get_active_indications,
+ .set_active_indications = npem_set_active_indications,
+ .name = "Native PCIe Enclosure Management",
+ .inds = npem_indications,
+};
+
+#define DSM_GUID GUID_INIT(0x5d524d9d, 0xfff9, 0x4d4b, 0x8c, 0xb7, 0x74, 0x7e,\
+ 0xd5, 0x1e, 0x19, 0x4d)
+#define GET_SUPPORTED_STATES_DSM 1
+#define GET_STATE_DSM 2
+#define SET_STATE_DSM 3
+
+static const guid_t dsm_guid = DSM_GUID;
+
+static bool npem_has_dsm(struct pci_dev *pdev)
+{
+ acpi_handle handle;
+
+ handle = ACPI_HANDLE(&pdev->dev);
+ if (!handle)
+ return false;
+
+ return acpi_check_dsm(handle, &dsm_guid, 0x1,
+ BIT(GET_SUPPORTED_STATES_DSM) |
+ BIT(GET_STATE_DSM) | BIT(SET_STATE_DSM));
+}
+
+struct dsm_output {
+ u16 status;
+ u8 function_specific_err;
+ u8 vendor_specific_err;
+ u32 state;
+};
+
+/**
+ * dsm_evaluate() - send DSM PCIe SSD Status LED command
+ * @pdev: PCI device
+ * @dsm_func: DSM LED Function
+ * @output: buffer to copy DSM Response
+ * @value_to_set: value for SET_STATE_DSM function
+ *
+ * To not bother caller with ACPI context, the returned _DSM Output Buffer is
+ * copied.
+ */
+static int dsm_evaluate(struct pci_dev *pdev, u64 dsm_func,
+ struct dsm_output *output, u32 value_to_set)
+{
+ acpi_handle handle = ACPI_HANDLE(&pdev->dev);
+ union acpi_object *out_obj, arg3[2];
+ union acpi_object *arg3_p = NULL;
+
+ if (dsm_func == SET_STATE_DSM) {
+ arg3[0].type = ACPI_TYPE_PACKAGE;
+ arg3[0].package.count = 1;
+ arg3[0].package.elements = &arg3[1];
+
+ arg3[1].type = ACPI_TYPE_BUFFER;
+ arg3[1].buffer.length = 4;
+ arg3[1].buffer.pointer = (u8 *)&value_to_set;
+
+ arg3_p = arg3;
+ }
+
+ out_obj = acpi_evaluate_dsm_typed(handle, &dsm_guid, 0x1, dsm_func,
+ arg3_p, ACPI_TYPE_BUFFER);
+ if (!out_obj)
+ return -EIO;
+
+ if (out_obj->buffer.length < sizeof(struct dsm_output)) {
+ ACPI_FREE(out_obj);
+ return -EIO;
+ }
+
+ memcpy(output, out_obj->buffer.pointer, sizeof(struct dsm_output));
+
+ ACPI_FREE(out_obj);
+ return 0;
+}
+
+static int dsm_get(struct pci_dev *pdev, u64 dsm_func, u32 *buf)
+{
+ struct dsm_output output;
+ int ret = dsm_evaluate(pdev, dsm_func, &output, 0);
+
+ if (ret)
+ return ret;
+
+ if (output.status != 0)
+ return -EIO;
+
+ *buf = output.state;
+ return 0;
+}
+
+static int dsm_get_active_indications(struct npem *npem, u32 *buf)
+{
+ int ret = dsm_get(npem->dev, GET_STATE_DSM, buf);
+
+ /* Filter out not supported indications in response */
+ *buf &= npem->supported_indications;
+ return ret;
+}
+
+static int dsm_set_active_indications(struct npem *npem, u32 value)
+{
+ struct dsm_output output;
+ int ret = dsm_evaluate(npem->dev, SET_STATE_DSM, &output, value);
+
+ if (ret)
+ return ret;
+
+ switch (output.status) {
+ case 4:
+ /*
+ * Not all bits are set. If this bit is set, the platform
+ * disregarded some or all of the request state changes. OSPM
+ * should check the resulting PCIe SSD Status LED States to see
+ * what, if anything, has changed.
+ *
+ * PCI Firmware Specification, r3.3 Table 4-19.
+ */
+ if (output.function_specific_err != 1)
+ return -EIO;
+ fallthrough;
+ case 0:
+ break;
+ default:
+ return -EIO;
+ }
+
+ npem->active_indications = output.state;
+
+ return 0;
+}
+
+static const struct npem_ops dsm_ops = {
+ .get_active_indications = dsm_get_active_indications,
+ .set_active_indications = dsm_set_active_indications,
+ .name = "_DSM PCIe SSD Status LED Management",
+ .inds = dsm_indications,
+};
+
+static int npem_initialize_active_indications(struct npem *npem)
+{
+ int ret;
+
+ lockdep_assert_held(&npem->lock);
+
+ if (npem->active_inds_initialized)
+ return 0;
+
+ ret = npem->ops->get_active_indications(npem,
+ &npem->active_indications);
+ if (ret)
+ return ret;
+
+ npem->active_inds_initialized = true;
+ return 0;
+}
+
+/*
+ * The status of each indicator is cached on first brightness_ get/set time
+ * and updated at write time. brightness_get() is only responsible for
+ * reflecting the last written/cached value.
+ */
+static enum led_brightness brightness_get(struct led_classdev *led)
+{
+ struct npem_led *nled = container_of(led, struct npem_led, led);
+ struct npem *npem = nled->npem;
+ int ret, val = 0;
+
+ ret = mutex_lock_interruptible(&npem->lock);
+ if (ret)
+ return ret;
+
+ ret = npem_initialize_active_indications(npem);
+ if (ret)
+ goto out;
+
+ if (npem->active_indications & nled->indication->bit)
+ val = 1;
+
+out:
+ mutex_unlock(&npem->lock);
+ return val;
+}
+
+static int brightness_set(struct led_classdev *led,
+ enum led_brightness brightness)
+{
+ struct npem_led *nled = container_of(led, struct npem_led, led);
+ struct npem *npem = nled->npem;
+ u32 indications;
+ int ret;
+
+ ret = mutex_lock_interruptible(&npem->lock);
+ if (ret)
+ return ret;
+
+ ret = npem_initialize_active_indications(npem);
+ if (ret)
+ goto out;
+
+ if (brightness == 0)
+ indications = npem->active_indications & ~(nled->indication->bit);
+ else
+ indications = npem->active_indications | nled->indication->bit;
+
+ ret = npem->ops->set_active_indications(npem, indications);
+
+out:
+ mutex_unlock(&npem->lock);
+ return ret;
+}
+
+static void npem_free(struct npem *npem)
+{
+ struct npem_led *nled;
+ int cnt;
+
+ if (!npem)
+ return;
+
+ for (cnt = 0; cnt < npem->led_cnt; cnt++) {
+ nled = &npem->leds[cnt];
+
+ if (nled->name[0])
+ led_classdev_unregister(&nled->led);
+ }
+
+ mutex_destroy(&npem->lock);
+ kfree(npem);
+}
+
+static int pci_npem_set_led_classdev(struct npem *npem, struct npem_led *nled)
+{
+ struct led_classdev *led = &nled->led;
+ struct led_init_data init_data = {};
+ char *name = nled->name;
+ int ret;
+
+ init_data.devicename = pci_name(npem->dev);
+ init_data.default_label = nled->indication->name;
+
+ ret = led_compose_name(&npem->dev->dev, &init_data, name);
+ if (ret)
+ return ret;
+
+ led->name = name;
+ led->brightness_set_blocking = brightness_set;
+ led->brightness_get = brightness_get;
+ led->max_brightness = 1;
+ led->default_trigger = "none";
+ led->flags = 0;
+
+ ret = led_classdev_register(&npem->dev->dev, led);
+ if (ret)
+ /* Clear the name to indicate that it is not registered. */
+ name[0] = 0;
+ return ret;
+}
+
+static int pci_npem_init(struct pci_dev *dev, const struct npem_ops *ops,
+ int pos, u32 caps)
+{
+ u32 supported = reg_to_indications(caps, ops->inds);
+ int supported_cnt = hweight32(supported);
+ const struct indication *indication;
+ struct npem_led *nled;
+ struct npem *npem;
+ int led_idx = 0;
+ int ret;
+
+ npem = kzalloc(struct_size(npem, leds, supported_cnt), GFP_KERNEL);
+ if (!npem)
+ return -ENOMEM;
+
+ npem->supported_indications = supported;
+ npem->led_cnt = supported_cnt;
+ npem->pos = pos;
+ npem->dev = dev;
+ npem->ops = ops;
+
+ mutex_init(&npem->lock);
+
+ for_each_indication(indication, npem_indications) {
+ if (!(npem->supported_indications & indication->bit))
+ continue;
+
+ nled = &npem->leds[led_idx++];
+ nled->indication = indication;
+ nled->npem = npem;
+
+ ret = pci_npem_set_led_classdev(npem, nled);
+ if (ret) {
+ npem_free(npem);
+ return ret;
+ }
+ }
+
+ dev->npem = npem;
+ return 0;
+}
+
+void pci_npem_remove(struct pci_dev *dev)
+{
+ npem_free(dev->npem);
+}
+
+void pci_npem_create(struct pci_dev *dev)
+{
+ const struct npem_ops *ops = &npem_ops;
+ int pos = 0, ret;
+ u32 cap;
+
+ if (npem_has_dsm(dev)) {
+ /*
+ * OS should use the DSM for LED control if it is available
+ * PCI Firmware Spec r3.3 sec 4.7.
+ */
+ ret = dsm_get(dev, GET_SUPPORTED_STATES_DSM, &cap);
+ if (ret)
+ return;
+
+ ops = &dsm_ops;
+ } else {
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_NPEM);
+ if (pos == 0)
+ return;
+
+ if (pci_read_config_dword(dev, pos + PCI_NPEM_CAP, &cap) != 0 ||
+ (cap & PCI_NPEM_CAP_CAPABLE) == 0)
+ return;
+ }
+
+ pci_info(dev, "Configuring %s\n", ops->name);
+
+ ret = pci_npem_init(dev, ops, pos, cap);
+ if (ret)
+ pci_err(dev, "Failed to register %s, err: %d\n", ops->name,
+ ret);
+}
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index e51219f9f523..3579265f1198 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -6,6 +6,7 @@
*/
#define pr_fmt(fmt) "PCI: OF: " fmt
+#include <linux/cleanup.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/pci.h>
@@ -13,6 +14,7 @@
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
+#include <linux/platform_device.h>
#include "pci.h"
#ifdef CONFIG_PCI
@@ -25,21 +27,20 @@
*/
int pci_set_of_node(struct pci_dev *dev)
{
- struct device_node *node;
-
if (!dev->bus->dev.of_node)
return 0;
- node = of_pci_find_child_device(dev->bus->dev.of_node, dev->devfn);
+ struct device_node *node __free(device_node) =
+ of_pci_find_child_device(dev->bus->dev.of_node, dev->devfn);
if (!node)
return 0;
- if (!of_device_is_available(node)) {
- of_node_put(node);
- return -ENODEV;
- }
+ struct device *pdev __free(put_device) =
+ bus_find_device_by_of_node(&platform_bus_type, node);
+ if (pdev)
+ dev->bus->dev.of_node_reused = true;
- device_set_node(&dev->dev, of_fwnode_handle(node));
+ device_set_node(&dev->dev, of_fwnode_handle(no_free_ptr(node)));
return 0;
}
@@ -189,7 +190,8 @@ EXPORT_SYMBOL_GPL(of_pci_get_devfn);
*
* Returns 0 on success or a negative error-code on failure.
*/
-int of_pci_parse_bus_range(struct device_node *node, struct resource *res)
+static int of_pci_parse_bus_range(struct device_node *node,
+ struct resource *res)
{
u32 bus_range[2];
int error;
@@ -206,7 +208,6 @@ int of_pci_parse_bus_range(struct device_node *node, struct resource *res)
return 0;
}
-EXPORT_SYMBOL_GPL(of_pci_parse_bus_range);
/**
* of_get_pci_domain_nr - Find the host bridge domain number
@@ -239,27 +240,61 @@ int of_get_pci_domain_nr(struct device_node *node)
EXPORT_SYMBOL_GPL(of_get_pci_domain_nr);
/**
- * of_pci_check_probe_only - Setup probe only mode if linux,pci-probe-only
- * is present and valid
+ * of_pci_preserve_config - Return true if the boot configuration needs to
+ * be preserved
+ * @node: Device tree node.
+ *
+ * Look for "linux,pci-probe-only" property for a given PCI controller's
+ * node and return true if found. Also look in the chosen node if the
+ * property is not found in the given controller's node. Having this
+ * property ensures that the kernel doesn't reconfigure the BARs and bridge
+ * windows that are already done by the platform firmware.
+ *
+ * Return: true if the property exists; false otherwise.
*/
-void of_pci_check_probe_only(void)
+bool of_pci_preserve_config(struct device_node *node)
{
- u32 val;
+ u32 val = 0;
int ret;
- ret = of_property_read_u32(of_chosen, "linux,pci-probe-only", &val);
+ if (!node) {
+ pr_warn("device node is NULL, trying with of_chosen\n");
+ node = of_chosen;
+ }
+
+retry:
+ ret = of_property_read_u32(node, "linux,pci-probe-only", &val);
if (ret) {
- if (ret == -ENODATA || ret == -EOVERFLOW)
- pr_warn("linux,pci-probe-only without valid value, ignoring\n");
- return;
+ if (ret == -ENODATA || ret == -EOVERFLOW) {
+ pr_warn("Incorrect value for linux,pci-probe-only in %pOF, ignoring\n",
+ node);
+ return false;
+ }
+ if (ret == -EINVAL) {
+ if (node == of_chosen)
+ return false;
+
+ node = of_chosen;
+ goto retry;
+ }
}
if (val)
+ return true;
+ else
+ return false;
+}
+
+/**
+ * of_pci_check_probe_only - Setup probe only mode if linux,pci-probe-only
+ * is present and valid
+ */
+void of_pci_check_probe_only(void)
+{
+ if (of_pci_preserve_config(of_chosen))
pci_add_flags(PCI_PROBE_ONLY);
else
pci_clear_flags(PCI_PROBE_ONLY);
-
- pr_info("PROBE_ONLY %s\n", val ? "enabled" : "disabled");
}
EXPORT_SYMBOL_GPL(of_pci_check_probe_only);
@@ -267,8 +302,6 @@ EXPORT_SYMBOL_GPL(of_pci_check_probe_only);
* devm_of_pci_get_host_bridge_resources() - Resource-managed parsing of PCI
* host bridge resources from DT
* @dev: host bridge device
- * @busno: bus number associated with the bridge root bus
- * @bus_max: maximum number of buses for this bridge
* @resources: list where the range of resources will be added after DT parsing
* @ib_resources: list where the range of inbound resources (with addresses
* from 'dma-ranges') will be added after DT parsing
@@ -284,7 +317,6 @@ EXPORT_SYMBOL_GPL(of_pci_check_probe_only);
* value if it failed.
*/
static int devm_of_pci_get_host_bridge_resources(struct device *dev,
- unsigned char busno, unsigned char bus_max,
struct list_head *resources,
struct list_head *ib_resources,
resource_size_t *io_base)
@@ -308,14 +340,15 @@ static int devm_of_pci_get_host_bridge_resources(struct device *dev,
err = of_pci_parse_bus_range(dev_node, bus_range);
if (err) {
- bus_range->start = busno;
- bus_range->end = bus_max;
+ bus_range->start = 0;
+ bus_range->end = 0xff;
bus_range->flags = IORESOURCE_BUS;
- dev_info(dev, " No bus range found for %pOF, using %pR\n",
- dev_node, bus_range);
} else {
- if (bus_range->end > bus_range->start + bus_max)
- bus_range->end = bus_range->start + bus_max;
+ if (bus_range->end > 0xff) {
+ dev_warn(dev, " Invalid end bus number in %pR, defaulting to 0xff\n",
+ bus_range);
+ bus_range->end = 0xff;
+ }
}
pci_add_resource(resources, bus_range);
@@ -422,9 +455,9 @@ failed:
* @out_irq: structure of_phandle_args filled by this function
*
* This function resolves the PCI interrupt for a given PCI device. If a
- * device-node exists for a given pci_dev, it will use normal OF tree
+ * device node exists for a given pci_dev, it will use normal OF tree
* walking. If not, it will implement standard swizzling and walk up the
- * PCI tree until an device-node is found, at which point it will finish
+ * PCI tree until a device node is found, at which point it will finish
* resolving using the OF tree walking.
*/
static int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq)
@@ -484,13 +517,16 @@ static int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *
}
/*
- * Ok, we have found a parent with a device-node, hand over to
+ * Ok, we have found a parent with a device node, hand over to
* the OF parsing code.
+ *
* We build a unit address from the linux device to be used for
* resolution. Note that we use the linux bus number which may
* not match your firmware bus numbering.
+ *
* Fortunately, in most cases, interrupt-map-mask doesn't
* include the bus number as part of the matching.
+ *
* You should still be careful about that though if you intend
* to rely on this function (you ship a firmware that doesn't
* create device nodes for all PCI devices).
@@ -562,7 +598,7 @@ static int pci_parse_request_of_pci_ranges(struct device *dev,
INIT_LIST_HEAD(&bridge->windows);
INIT_LIST_HEAD(&bridge->dma_ranges);
- err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &bridge->windows,
+ err = devm_of_pci_get_host_bridge_resources(dev, &bridge->windows,
&bridge->dma_ranges, &iobase);
if (err)
return err;
@@ -611,6 +647,227 @@ int devm_of_pci_bridge_init(struct device *dev, struct pci_host_bridge *bridge)
return pci_parse_request_of_pci_ranges(dev, bridge);
}
+#ifdef CONFIG_PCI_DYNAMIC_OF_NODES
+
+void of_pci_remove_node(struct pci_dev *pdev)
+{
+ struct device_node *np;
+
+ np = pci_device_to_OF_node(pdev);
+ if (!np || !of_node_check_flag(np, OF_DYNAMIC))
+ return;
+
+ device_remove_of_node(&pdev->dev);
+ of_changeset_revert(np->data);
+ of_changeset_destroy(np->data);
+ of_node_put(np);
+}
+
+void of_pci_make_dev_node(struct pci_dev *pdev)
+{
+ struct device_node *ppnode, *np = NULL;
+ const char *pci_type;
+ struct of_changeset *cset;
+ const char *name;
+ int ret;
+
+ /*
+ * If there is already a device tree node linked to this device,
+ * return immediately.
+ */
+ if (pci_device_to_OF_node(pdev))
+ return;
+
+ /* Check if there is device tree node for parent device */
+ if (!pdev->bus->self)
+ ppnode = pdev->bus->dev.of_node;
+ else
+ ppnode = pdev->bus->self->dev.of_node;
+ if (!ppnode)
+ return;
+
+ if (pci_is_bridge(pdev))
+ pci_type = "pci";
+ else
+ pci_type = "dev";
+
+ name = kasprintf(GFP_KERNEL, "%s@%x,%x", pci_type,
+ PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
+ if (!name)
+ return;
+
+ cset = kmalloc(sizeof(*cset), GFP_KERNEL);
+ if (!cset)
+ goto out_free_name;
+ of_changeset_init(cset);
+
+ np = of_changeset_create_node(cset, ppnode, name);
+ if (!np)
+ goto out_destroy_cset;
+
+ ret = of_pci_add_properties(pdev, cset, np);
+ if (ret)
+ goto out_free_node;
+
+ ret = of_changeset_apply(cset);
+ if (ret)
+ goto out_free_node;
+
+ np->data = cset;
+
+ ret = device_add_of_node(&pdev->dev, np);
+ if (ret)
+ goto out_revert_cset;
+
+ kfree(name);
+
+ return;
+
+out_revert_cset:
+ np->data = NULL;
+ of_changeset_revert(cset);
+out_free_node:
+ of_node_put(np);
+out_destroy_cset:
+ of_changeset_destroy(cset);
+ kfree(cset);
+out_free_name:
+ kfree(name);
+}
+
+void of_pci_remove_host_bridge_node(struct pci_host_bridge *bridge)
+{
+ struct device_node *np;
+
+ np = pci_bus_to_OF_node(bridge->bus);
+ if (!np || !of_node_check_flag(np, OF_DYNAMIC))
+ return;
+
+ device_remove_of_node(&bridge->bus->dev);
+ device_remove_of_node(&bridge->dev);
+ of_changeset_revert(np->data);
+ of_changeset_destroy(np->data);
+ of_node_put(np);
+}
+
+void of_pci_make_host_bridge_node(struct pci_host_bridge *bridge)
+{
+ struct device_node *np = NULL;
+ struct of_changeset *cset;
+ const char *name;
+ int ret;
+
+ /*
+ * If there is already a device tree node linked to the PCI bus handled
+ * by this bridge (i.e. the PCI root bus), nothing to do.
+ */
+ if (pci_bus_to_OF_node(bridge->bus))
+ return;
+
+ /*
+ * The root bus has no node. Check that the host bridge has no node
+ * too
+ */
+ if (bridge->dev.of_node) {
+ dev_err(&bridge->dev, "PCI host bridge of_node already set");
+ return;
+ }
+
+ /* Check if there is a DT root node to attach the created node */
+ if (!of_root) {
+ pr_err("of_root node is NULL, cannot create PCI host bridge node\n");
+ return;
+ }
+
+ name = kasprintf(GFP_KERNEL, "pci@%x,%x", pci_domain_nr(bridge->bus),
+ bridge->bus->number);
+ if (!name)
+ return;
+
+ cset = kmalloc(sizeof(*cset), GFP_KERNEL);
+ if (!cset)
+ goto out_free_name;
+ of_changeset_init(cset);
+
+ np = of_changeset_create_node(cset, of_root, name);
+ if (!np)
+ goto out_destroy_cset;
+
+ ret = of_pci_add_host_bridge_properties(bridge, cset, np);
+ if (ret)
+ goto out_free_node;
+
+ /*
+ * This of_node will be added to an existing device. The of_node parent
+ * is the root OF node and so this node will be handled by the platform
+ * bus. Avoid any new device creation.
+ */
+ of_node_set_flag(np, OF_POPULATED);
+ np->fwnode.dev = &bridge->dev;
+ fwnode_dev_initialized(&np->fwnode, true);
+
+ ret = of_changeset_apply(cset);
+ if (ret)
+ goto out_free_node;
+
+ np->data = cset;
+
+ /* Add the of_node to host bridge and the root bus */
+ ret = device_add_of_node(&bridge->dev, np);
+ if (ret)
+ goto out_revert_cset;
+
+ ret = device_add_of_node(&bridge->bus->dev, np);
+ if (ret)
+ goto out_remove_bridge_dev_of_node;
+
+ kfree(name);
+
+ return;
+
+out_remove_bridge_dev_of_node:
+ device_remove_of_node(&bridge->dev);
+out_revert_cset:
+ np->data = NULL;
+ of_changeset_revert(cset);
+out_free_node:
+ of_node_put(np);
+out_destroy_cset:
+ of_changeset_destroy(cset);
+ kfree(cset);
+out_free_name:
+ kfree(name);
+}
+
+#endif /* CONFIG_PCI_DYNAMIC_OF_NODES */
+
+/**
+ * of_pci_supply_present() - Check if the power supply is present for the PCI
+ * device
+ * @np: Device tree node
+ *
+ * Check if the power supply for the PCI device is present in the device tree
+ * node or not.
+ *
+ * Return: true if at least one power supply exists; false otherwise.
+ */
+bool of_pci_supply_present(struct device_node *np)
+{
+ struct property *prop;
+ char *supply;
+
+ if (!np)
+ return false;
+
+ for_each_property_of_node(np, prop) {
+ supply = strrchr(prop->name, '-');
+ if (supply && !strcmp(supply, "-supply"))
+ return true;
+ }
+
+ return false;
+}
+
#endif /* CONFIG_PCI */
/**
@@ -709,3 +966,47 @@ u32 of_pci_get_slot_power_limit(struct device_node *node,
return slot_power_limit_mw;
}
EXPORT_SYMBOL_GPL(of_pci_get_slot_power_limit);
+
+/**
+ * of_pci_get_equalization_presets - Parses the "eq-presets-Ngts" property.
+ *
+ * @dev: Device containing the properties.
+ * @presets: Pointer to store the parsed data.
+ * @num_lanes: Maximum number of lanes supported.
+ *
+ * If the property is present, read and store the data in the @presets structure.
+ * Else, assign a default value of PCI_EQ_RESV.
+ *
+ * Return: 0 if the property is not available or successfully parsed else
+ * errno otherwise.
+ */
+int of_pci_get_equalization_presets(struct device *dev,
+ struct pci_eq_presets *presets,
+ int num_lanes)
+{
+ char name[20];
+ int ret;
+
+ presets->eq_presets_8gts[0] = PCI_EQ_RESV;
+ ret = of_property_read_u16_array(dev->of_node, "eq-presets-8gts",
+ presets->eq_presets_8gts, num_lanes);
+ if (ret && ret != -EINVAL) {
+ dev_err(dev, "Error reading eq-presets-8gts: %d\n", ret);
+ return ret;
+ }
+
+ for (int i = 0; i < EQ_PRESET_TYPE_MAX - 1; i++) {
+ presets->eq_presets_Ngts[i][0] = PCI_EQ_RESV;
+ snprintf(name, sizeof(name), "eq-presets-%dgts", 8 << (i + 1));
+ ret = of_property_read_u8_array(dev->of_node, name,
+ presets->eq_presets_Ngts[i],
+ num_lanes);
+ if (ret && ret != -EINVAL) {
+ dev_err(dev, "Error reading %s: %d\n", name, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_pci_get_equalization_presets);
diff --git a/drivers/pci/of_property.c b/drivers/pci/of_property.c
new file mode 100644
index 000000000000..7aae46f333d9
--- /dev/null
+++ b/drivers/pci/of_property.c
@@ -0,0 +1,507 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
+ */
+
+#include <linux/pci.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include "pci.h"
+
+#define OF_PCI_ADDRESS_CELLS 3
+#define OF_PCI_SIZE_CELLS 2
+#define OF_PCI_MAX_INT_PIN 4
+
+struct of_pci_addr_pair {
+ u32 phys_addr[OF_PCI_ADDRESS_CELLS];
+ u32 size[OF_PCI_SIZE_CELLS];
+};
+
+/*
+ * Each entry in the ranges table is a tuple containing the child address,
+ * the parent address, and the size of the region in the child address space.
+ * Thus, for PCI, in each entry parent address is an address on the primary
+ * side and the child address is the corresponding address on the secondary
+ * side.
+ */
+struct of_pci_range_entry {
+ u32 child_addr[OF_PCI_ADDRESS_CELLS];
+ u32 parent_addr[OF_PCI_ADDRESS_CELLS];
+ u32 size[OF_PCI_SIZE_CELLS];
+};
+
+#define OF_PCI_ADDR_SPACE_IO 0x1
+#define OF_PCI_ADDR_SPACE_MEM32 0x2
+#define OF_PCI_ADDR_SPACE_MEM64 0x3
+
+#define OF_PCI_ADDR_FIELD_NONRELOC BIT(31)
+#define OF_PCI_ADDR_FIELD_SS GENMASK(25, 24)
+#define OF_PCI_ADDR_FIELD_PREFETCH BIT(30)
+#define OF_PCI_ADDR_FIELD_BUS GENMASK(23, 16)
+#define OF_PCI_ADDR_FIELD_DEV GENMASK(15, 11)
+#define OF_PCI_ADDR_FIELD_FUNC GENMASK(10, 8)
+#define OF_PCI_ADDR_FIELD_REG GENMASK(7, 0)
+
+enum of_pci_prop_compatible {
+ PROP_COMPAT_PCI_VVVV_DDDD,
+ PROP_COMPAT_PCICLASS_CCSSPP,
+ PROP_COMPAT_PCICLASS_CCSS,
+ PROP_COMPAT_NUM,
+};
+
+static void of_pci_set_address(struct pci_dev *pdev, u32 *prop, u64 addr,
+ u32 reg_num, u32 flags, bool reloc)
+{
+ if (pdev) {
+ prop[0] = FIELD_PREP(OF_PCI_ADDR_FIELD_BUS, pdev->bus->number) |
+ FIELD_PREP(OF_PCI_ADDR_FIELD_DEV, PCI_SLOT(pdev->devfn)) |
+ FIELD_PREP(OF_PCI_ADDR_FIELD_FUNC, PCI_FUNC(pdev->devfn));
+ } else
+ prop[0] = 0;
+
+ prop[0] |= flags | reg_num;
+ if (!reloc) {
+ prop[0] |= OF_PCI_ADDR_FIELD_NONRELOC;
+ prop[1] = upper_32_bits(addr);
+ prop[2] = lower_32_bits(addr);
+ }
+}
+
+static int of_pci_get_addr_flags(const struct resource *res, u32 *flags)
+{
+ u32 ss;
+
+ if (res->flags & IORESOURCE_IO)
+ ss = OF_PCI_ADDR_SPACE_IO;
+ else if (res->flags & IORESOURCE_MEM_64)
+ ss = OF_PCI_ADDR_SPACE_MEM64;
+ else if (res->flags & IORESOURCE_MEM)
+ ss = OF_PCI_ADDR_SPACE_MEM32;
+ else
+ return -EINVAL;
+
+ *flags = 0;
+ if (res->flags & IORESOURCE_PREFETCH)
+ *flags |= OF_PCI_ADDR_FIELD_PREFETCH;
+
+ *flags |= FIELD_PREP(OF_PCI_ADDR_FIELD_SS, ss);
+
+ return 0;
+}
+
+static int of_pci_prop_bus_range(struct pci_dev *pdev,
+ struct of_changeset *ocs,
+ struct device_node *np)
+{
+ u32 bus_range[] = { pdev->subordinate->busn_res.start,
+ pdev->subordinate->busn_res.end };
+
+ return of_changeset_add_prop_u32_array(ocs, np, "bus-range", bus_range,
+ ARRAY_SIZE(bus_range));
+}
+
+static int of_pci_prop_ranges(struct pci_dev *pdev, struct of_changeset *ocs,
+ struct device_node *np)
+{
+ struct of_pci_range_entry *rp;
+ struct resource *res;
+ int i, j, ret;
+ u32 flags, num;
+ u64 val64;
+
+ if (pci_is_bridge(pdev)) {
+ num = PCI_BRIDGE_RESOURCE_NUM;
+ res = &pdev->resource[PCI_BRIDGE_RESOURCES];
+ } else {
+ num = PCI_STD_NUM_BARS;
+ res = &pdev->resource[PCI_STD_RESOURCES];
+ }
+
+ rp = kcalloc(num, sizeof(*rp), GFP_KERNEL);
+ if (!rp)
+ return -ENOMEM;
+
+ for (i = 0, j = 0; j < num; j++) {
+ if (!resource_size(&res[j]))
+ continue;
+
+ if (of_pci_get_addr_flags(&res[j], &flags))
+ continue;
+
+ val64 = pci_bus_address(pdev, &res[j] - pdev->resource);
+ of_pci_set_address(pdev, rp[i].parent_addr, val64, 0, flags,
+ false);
+ if (pci_is_bridge(pdev)) {
+ memcpy(rp[i].child_addr, rp[i].parent_addr,
+ sizeof(rp[i].child_addr));
+ } else {
+ /*
+ * For endpoint device, the lower 64-bits of child
+ * address is always zero.
+ */
+ rp[i].child_addr[0] = j;
+ }
+
+ val64 = resource_size(&res[j]);
+ rp[i].size[0] = upper_32_bits(val64);
+ rp[i].size[1] = lower_32_bits(val64);
+
+ i++;
+ }
+
+ ret = of_changeset_add_prop_u32_array(ocs, np, "ranges", (u32 *)rp,
+ i * sizeof(*rp) / sizeof(u32));
+ kfree(rp);
+
+ return ret;
+}
+
+static int of_pci_prop_reg(struct pci_dev *pdev, struct of_changeset *ocs,
+ struct device_node *np)
+{
+ struct of_pci_addr_pair reg = { 0 };
+
+ /* configuration space */
+ of_pci_set_address(pdev, reg.phys_addr, 0, 0, 0, true);
+
+ return of_changeset_add_prop_u32_array(ocs, np, "reg", (u32 *)&reg,
+ sizeof(reg) / sizeof(u32));
+}
+
+static int of_pci_prop_interrupts(struct pci_dev *pdev,
+ struct of_changeset *ocs,
+ struct device_node *np)
+{
+ int ret;
+ u8 pin;
+
+ ret = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin);
+ if (ret != 0)
+ return ret;
+
+ if (!pin)
+ return 0;
+
+ return of_changeset_add_prop_u32(ocs, np, "interrupts", (u32)pin);
+}
+
+static int of_pci_prop_intr_ctrl(struct pci_dev *pdev, struct of_changeset *ocs,
+ struct device_node *np)
+{
+ int ret;
+ u8 pin;
+
+ ret = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin);
+ if (ret != 0)
+ return ret;
+
+ if (!pin)
+ return 0;
+
+ ret = of_changeset_add_prop_u32(ocs, np, "#interrupt-cells", 1);
+ if (ret)
+ return ret;
+
+ return of_changeset_add_prop_bool(ocs, np, "interrupt-controller");
+}
+
+static int of_pci_prop_intr_map(struct pci_dev *pdev, struct of_changeset *ocs,
+ struct device_node *np)
+{
+ u32 i, addr_sz[OF_PCI_MAX_INT_PIN] = { 0 }, map_sz = 0;
+ struct of_phandle_args out_irq[OF_PCI_MAX_INT_PIN];
+ __be32 laddr[OF_PCI_ADDRESS_CELLS] = { 0 };
+ u32 int_map_mask[] = { 0xffff00, 0, 0, 7 };
+ struct device_node *pnode;
+ struct pci_dev *child;
+ u32 *int_map, *mapp;
+ int ret;
+ u8 pin;
+
+ pnode = pci_device_to_OF_node(pdev->bus->self);
+ if (!pnode)
+ pnode = pci_bus_to_OF_node(pdev->bus);
+
+ if (!pnode) {
+ pci_err(pdev, "failed to get parent device node");
+ return -EINVAL;
+ }
+
+ laddr[0] = cpu_to_be32((pdev->bus->number << 16) | (pdev->devfn << 8));
+ for (pin = 1; pin <= OF_PCI_MAX_INT_PIN; pin++) {
+ i = pin - 1;
+ out_irq[i].np = pnode;
+ out_irq[i].args_count = 1;
+ out_irq[i].args[0] = pin;
+ ret = of_irq_parse_raw(laddr, &out_irq[i]);
+ if (ret) {
+ out_irq[i].np = NULL;
+ pci_dbg(pdev, "parse irq %d failed, ret %d", pin, ret);
+ continue;
+ }
+ of_property_read_u32(out_irq[i].np, "#address-cells",
+ &addr_sz[i]);
+ }
+
+ list_for_each_entry(child, &pdev->subordinate->devices, bus_list) {
+ for (pin = 1; pin <= OF_PCI_MAX_INT_PIN; pin++) {
+ i = pci_swizzle_interrupt_pin(child, pin) - 1;
+ if (!out_irq[i].np)
+ continue;
+ map_sz += 5 + addr_sz[i] + out_irq[i].args_count;
+ }
+ }
+
+ /*
+ * Parsing interrupt failed for all pins. In this case, it does not
+ * need to generate interrupt-map property.
+ */
+ if (!map_sz)
+ return 0;
+
+ int_map = kcalloc(map_sz, sizeof(u32), GFP_KERNEL);
+ if (!int_map)
+ return -ENOMEM;
+ mapp = int_map;
+
+ list_for_each_entry(child, &pdev->subordinate->devices, bus_list) {
+ for (pin = 1; pin <= OF_PCI_MAX_INT_PIN; pin++) {
+ i = pci_swizzle_interrupt_pin(child, pin) - 1;
+ if (!out_irq[i].np)
+ continue;
+
+ *mapp = (child->bus->number << 16) |
+ (child->devfn << 8);
+ mapp += OF_PCI_ADDRESS_CELLS;
+ *mapp = pin;
+ mapp++;
+ *mapp = out_irq[i].np->phandle;
+ mapp++;
+
+ /*
+ * A device address does not affect the device <->
+ * interrupt-controller HW connection for all
+ * modern interrupt controllers; moreover, the
+ * kernel (i.e., of_irq_parse_raw()) ignores the
+ * values in the parent unit address cells while
+ * parsing the interrupt-map property because they
+ * are irrelevant for interrupt mapping in modern
+ * systems.
+ *
+ * Leave the parent unit address initialized to 0 --
+ * just take into account the #address-cells size
+ * to build the property properly.
+ */
+ mapp += addr_sz[i];
+ memcpy(mapp, out_irq[i].args,
+ out_irq[i].args_count * sizeof(u32));
+ mapp += out_irq[i].args_count;
+ }
+ }
+
+ ret = of_changeset_add_prop_u32_array(ocs, np, "interrupt-map", int_map,
+ map_sz);
+ if (ret)
+ goto failed;
+
+ ret = of_changeset_add_prop_u32(ocs, np, "#interrupt-cells", 1);
+ if (ret)
+ goto failed;
+
+ ret = of_changeset_add_prop_u32_array(ocs, np, "interrupt-map-mask",
+ int_map_mask,
+ ARRAY_SIZE(int_map_mask));
+ if (ret)
+ goto failed;
+
+ kfree(int_map);
+ return 0;
+
+failed:
+ kfree(int_map);
+ return ret;
+}
+
+static int of_pci_prop_compatible(struct pci_dev *pdev,
+ struct of_changeset *ocs,
+ struct device_node *np)
+{
+ const char *compat_strs[PROP_COMPAT_NUM] = { 0 };
+ int i, ret;
+
+ compat_strs[PROP_COMPAT_PCI_VVVV_DDDD] =
+ kasprintf(GFP_KERNEL, "pci%x,%x", pdev->vendor, pdev->device);
+ compat_strs[PROP_COMPAT_PCICLASS_CCSSPP] =
+ kasprintf(GFP_KERNEL, "pciclass,%06x", pdev->class);
+ compat_strs[PROP_COMPAT_PCICLASS_CCSS] =
+ kasprintf(GFP_KERNEL, "pciclass,%04x", pdev->class >> 8);
+
+ ret = of_changeset_add_prop_string_array(ocs, np, "compatible",
+ compat_strs, PROP_COMPAT_NUM);
+ for (i = 0; i < PROP_COMPAT_NUM; i++)
+ kfree(compat_strs[i]);
+
+ return ret;
+}
+
+int of_pci_add_properties(struct pci_dev *pdev, struct of_changeset *ocs,
+ struct device_node *np)
+{
+ int ret;
+
+ /*
+ * The added properties will be released when the
+ * changeset is destroyed.
+ */
+ if (pci_is_bridge(pdev)) {
+ ret = of_changeset_add_prop_string(ocs, np, "device_type",
+ "pci");
+ if (ret)
+ return ret;
+
+ ret = of_pci_prop_bus_range(pdev, ocs, np);
+ if (ret)
+ return ret;
+
+ ret = of_pci_prop_intr_map(pdev, ocs, np);
+ if (ret)
+ return ret;
+ } else {
+ ret = of_pci_prop_intr_ctrl(pdev, ocs, np);
+ if (ret)
+ return ret;
+ }
+
+ ret = of_pci_prop_ranges(pdev, ocs, np);
+ if (ret)
+ return ret;
+
+ ret = of_changeset_add_prop_u32(ocs, np, "#address-cells",
+ OF_PCI_ADDRESS_CELLS);
+ if (ret)
+ return ret;
+
+ ret = of_changeset_add_prop_u32(ocs, np, "#size-cells",
+ OF_PCI_SIZE_CELLS);
+ if (ret)
+ return ret;
+
+ ret = of_pci_prop_reg(pdev, ocs, np);
+ if (ret)
+ return ret;
+
+ ret = of_pci_prop_compatible(pdev, ocs, np);
+ if (ret)
+ return ret;
+
+ ret = of_pci_prop_interrupts(pdev, ocs, np);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static bool of_pci_is_range_resource(const struct resource *res, u32 *flags)
+{
+ if (!(resource_type(res) & IORESOURCE_MEM) &&
+ !(resource_type(res) & IORESOURCE_MEM_64))
+ return false;
+
+ if (of_pci_get_addr_flags(res, flags))
+ return false;
+
+ return true;
+}
+
+static int of_pci_host_bridge_prop_ranges(struct pci_host_bridge *bridge,
+ struct of_changeset *ocs,
+ struct device_node *np)
+{
+ struct resource_entry *window;
+ unsigned int ranges_sz = 0;
+ unsigned int n_range = 0;
+ struct resource *res;
+ int n_addr_cells;
+ u32 *ranges;
+ u64 val64;
+ u32 flags;
+ int ret;
+
+ n_addr_cells = of_n_addr_cells(np);
+ if (n_addr_cells <= 0 || n_addr_cells > 2)
+ return -EINVAL;
+
+ resource_list_for_each_entry(window, &bridge->windows) {
+ res = window->res;
+ if (!of_pci_is_range_resource(res, &flags))
+ continue;
+ n_range++;
+ }
+
+ if (!n_range)
+ return 0;
+
+ ranges = kcalloc(n_range,
+ (OF_PCI_ADDRESS_CELLS + OF_PCI_SIZE_CELLS +
+ n_addr_cells) * sizeof(*ranges),
+ GFP_KERNEL);
+ if (!ranges)
+ return -ENOMEM;
+
+ resource_list_for_each_entry(window, &bridge->windows) {
+ res = window->res;
+ if (!of_pci_is_range_resource(res, &flags))
+ continue;
+
+ /* PCI bus address */
+ val64 = res->start;
+ of_pci_set_address(NULL, &ranges[ranges_sz],
+ val64 - window->offset, 0, flags, false);
+ ranges_sz += OF_PCI_ADDRESS_CELLS;
+
+ /* Host bus address */
+ if (n_addr_cells == 2)
+ ranges[ranges_sz++] = upper_32_bits(val64);
+ ranges[ranges_sz++] = lower_32_bits(val64);
+
+ /* Size */
+ val64 = resource_size(res);
+ ranges[ranges_sz] = upper_32_bits(val64);
+ ranges[ranges_sz + 1] = lower_32_bits(val64);
+ ranges_sz += OF_PCI_SIZE_CELLS;
+ }
+
+ ret = of_changeset_add_prop_u32_array(ocs, np, "ranges", ranges,
+ ranges_sz);
+ kfree(ranges);
+ return ret;
+}
+
+int of_pci_add_host_bridge_properties(struct pci_host_bridge *bridge,
+ struct of_changeset *ocs,
+ struct device_node *np)
+{
+ int ret;
+
+ ret = of_changeset_add_prop_string(ocs, np, "device_type", "pci");
+ if (ret)
+ return ret;
+
+ ret = of_changeset_add_prop_u32(ocs, np, "#address-cells",
+ OF_PCI_ADDRESS_CELLS);
+ if (ret)
+ return ret;
+
+ ret = of_changeset_add_prop_u32(ocs, np, "#size-cells",
+ OF_PCI_SIZE_CELLS);
+ if (ret)
+ return ret;
+
+ ret = of_pci_host_bridge_prop_ranges(bridge, ocs, np);
+ if (ret)
+ return ret;
+
+ return 0;
+}
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
index 6cd98ffca198..4a2fc7ab42c3 100644
--- a/drivers/pci/p2pdma.c
+++ b/drivers/pci/p2pdma.c
@@ -25,12 +25,12 @@ struct pci_p2pdma {
struct gen_pool *pool;
bool p2pmem_published;
struct xarray map_types;
+ struct p2pdma_provider mem[PCI_STD_NUM_BARS];
};
struct pci_p2pdma_pagemap {
struct dev_pagemap pgmap;
- struct pci_dev *provider;
- u64 bus_offset;
+ struct p2pdma_provider *mem;
};
static struct pci_p2pdma_pagemap *to_p2p_pgmap(struct dev_pagemap *pgmap)
@@ -90,7 +90,7 @@ static ssize_t published_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RO(published);
static int p2pmem_alloc_mmap(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, struct vm_area_struct *vma)
+ const struct bin_attribute *attr, struct vm_area_struct *vma)
{
struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
size_t len = vma->vm_end - vma->vm_start;
@@ -140,13 +140,22 @@ static int p2pmem_alloc_mmap(struct file *filp, struct kobject *kobj,
rcu_read_unlock();
for (vaddr = vma->vm_start; vaddr < vma->vm_end; vaddr += PAGE_SIZE) {
- ret = vm_insert_page(vma, vaddr, virt_to_page(kaddr));
+ struct page *page = virt_to_page(kaddr);
+
+ /*
+ * Initialise the refcount for the freshly allocated page. As
+ * we have just allocated the page no one else should be
+ * using it.
+ */
+ VM_WARN_ON_ONCE_PAGE(!page_ref_count(page), page);
+ set_page_count(page, 1);
+ ret = vm_insert_page(vma, vaddr, page);
if (ret) {
gen_pool_free(p2pdma->pool, (uintptr_t)kaddr, len);
return ret;
}
percpu_ref_get(ref);
- put_page(virt_to_page(kaddr));
+ put_page(page);
kaddr += PAGE_SIZE;
len -= PAGE_SIZE;
}
@@ -161,7 +170,7 @@ out:
return ret;
}
-static struct bin_attribute p2pmem_alloc_attr = {
+static const struct bin_attribute p2pmem_alloc_attr = {
.attr = { .name = "allocate", .mode = 0660 },
.mmap = p2pmem_alloc_mmap,
/*
@@ -180,7 +189,7 @@ static struct attribute *p2pmem_attrs[] = {
NULL,
};
-static struct bin_attribute *p2pmem_bin_attrs[] = {
+static const struct bin_attribute *const p2pmem_bin_attrs[] = {
&p2pmem_alloc_attr,
NULL,
};
@@ -191,12 +200,13 @@ static const struct attribute_group p2pmem_group = {
.name = "p2pmem",
};
-static void p2pdma_page_free(struct page *page)
+static void p2pdma_folio_free(struct folio *folio)
{
- struct pci_p2pdma_pagemap *pgmap = to_p2p_pgmap(page->pgmap);
+ struct page *page = &folio->page;
+ struct pci_p2pdma_pagemap *pgmap = to_p2p_pgmap(page_pgmap(page));
/* safe to dereference while a reference is held to the percpu ref */
- struct pci_p2pdma *p2pdma =
- rcu_dereference_protected(pgmap->provider->p2pdma, 1);
+ struct pci_p2pdma *p2pdma = rcu_dereference_protected(
+ to_pci_dev(pgmap->mem->owner)->p2pdma, 1);
struct percpu_ref *ref;
gen_pool_free_owner(p2pdma->pool, (uintptr_t)page_to_virt(page),
@@ -205,7 +215,7 @@ static void p2pdma_page_free(struct page *page)
}
static const struct dev_pagemap_ops p2pdma_pgmap_ops = {
- .page_free = p2pdma_page_free,
+ .folio_free = p2pdma_folio_free,
};
static void pci_p2pdma_release(void *data)
@@ -219,56 +229,136 @@ static void pci_p2pdma_release(void *data)
/* Flush and disable pci_alloc_p2p_mem() */
pdev->p2pdma = NULL;
- synchronize_rcu();
+ if (p2pdma->pool)
+ synchronize_rcu();
+ xa_destroy(&p2pdma->map_types);
+
+ if (!p2pdma->pool)
+ return;
gen_pool_destroy(p2pdma->pool);
sysfs_remove_group(&pdev->dev.kobj, &p2pmem_group);
- xa_destroy(&p2pdma->map_types);
}
-static int pci_p2pdma_setup(struct pci_dev *pdev)
+/**
+ * pcim_p2pdma_init - Initialise peer-to-peer DMA providers
+ * @pdev: The PCI device to enable P2PDMA for
+ *
+ * This function initializes the peer-to-peer DMA infrastructure
+ * for a PCI device. It allocates and sets up the necessary data
+ * structures to support P2PDMA operations, including mapping type
+ * tracking.
+ */
+int pcim_p2pdma_init(struct pci_dev *pdev)
{
- int error = -ENOMEM;
struct pci_p2pdma *p2p;
+ int i, ret;
+
+ p2p = rcu_dereference_protected(pdev->p2pdma, 1);
+ if (p2p)
+ return 0;
p2p = devm_kzalloc(&pdev->dev, sizeof(*p2p), GFP_KERNEL);
if (!p2p)
return -ENOMEM;
xa_init(&p2p->map_types);
+ /*
+ * Iterate over all standard PCI BARs and record only those that
+ * correspond to MMIO regions. Skip non-memory resources (e.g. I/O
+ * port BARs) since they cannot be used for peer-to-peer (P2P)
+ * transactions.
+ */
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
+ if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM))
+ continue;
- p2p->pool = gen_pool_create(PAGE_SHIFT, dev_to_node(&pdev->dev));
- if (!p2p->pool)
- goto out;
+ p2p->mem[i].owner = &pdev->dev;
+ p2p->mem[i].bus_offset =
+ pci_bus_address(pdev, i) - pci_resource_start(pdev, i);
+ }
- error = devm_add_action_or_reset(&pdev->dev, pci_p2pdma_release, pdev);
- if (error)
- goto out_pool_destroy;
+ ret = devm_add_action_or_reset(&pdev->dev, pci_p2pdma_release, pdev);
+ if (ret)
+ goto out_p2p;
- error = sysfs_create_group(&pdev->dev.kobj, &p2pmem_group);
- if (error)
+ rcu_assign_pointer(pdev->p2pdma, p2p);
+ return 0;
+
+out_p2p:
+ devm_kfree(&pdev->dev, p2p);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pcim_p2pdma_init);
+
+/**
+ * pcim_p2pdma_provider - Get peer-to-peer DMA provider
+ * @pdev: The PCI device to enable P2PDMA for
+ * @bar: BAR index to get provider
+ *
+ * This function gets peer-to-peer DMA provider for a PCI device. The lifetime
+ * of the provider (and of course the MMIO) is bound to the lifetime of the
+ * driver. A driver calling this function must ensure that all references to the
+ * provider, and any DMA mappings created for any MMIO, are all cleaned up
+ * before the driver remove() completes.
+ *
+ * Since P2P is almost always shared with a second driver this means some system
+ * to notify, invalidate and revoke the MMIO's DMA must be in place to use this
+ * function. For example a revoke can be built using DMABUF.
+ */
+struct p2pdma_provider *pcim_p2pdma_provider(struct pci_dev *pdev, int bar)
+{
+ struct pci_p2pdma *p2p;
+
+ if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM))
+ return NULL;
+
+ p2p = rcu_dereference_protected(pdev->p2pdma, 1);
+ if (WARN_ON(!p2p))
+ /* Someone forgot to call to pcim_p2pdma_init() before */
+ return NULL;
+
+ return &p2p->mem[bar];
+}
+EXPORT_SYMBOL_GPL(pcim_p2pdma_provider);
+
+static int pci_p2pdma_setup_pool(struct pci_dev *pdev)
+{
+ struct pci_p2pdma *p2pdma;
+ int ret;
+
+ p2pdma = rcu_dereference_protected(pdev->p2pdma, 1);
+ if (p2pdma->pool)
+ /* We already setup pools, do nothing, */
+ return 0;
+
+ p2pdma->pool = gen_pool_create(PAGE_SHIFT, dev_to_node(&pdev->dev));
+ if (!p2pdma->pool)
+ return -ENOMEM;
+
+ ret = sysfs_create_group(&pdev->dev.kobj, &p2pmem_group);
+ if (ret)
goto out_pool_destroy;
- rcu_assign_pointer(pdev->p2pdma, p2p);
return 0;
out_pool_destroy:
- gen_pool_destroy(p2p->pool);
-out:
- devm_kfree(&pdev->dev, p2p);
- return error;
+ gen_pool_destroy(p2pdma->pool);
+ p2pdma->pool = NULL;
+ return ret;
}
static void pci_p2pdma_unmap_mappings(void *data)
{
- struct pci_dev *pdev = data;
+ struct pci_p2pdma_pagemap *p2p_pgmap = data;
/*
* Removing the alloc attribute from sysfs will call
* unmap_mapping_range() on the inode, teardown any existing userspace
* mappings and prevent new ones from being created.
*/
- sysfs_remove_file_from_group(&pdev->dev.kobj, &p2pmem_alloc_attr.attr,
+ sysfs_remove_file_from_group(&p2p_pgmap->mem->owner->kobj,
+ &p2pmem_alloc_attr.attr,
p2pmem_group.name);
}
@@ -286,6 +376,7 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
u64 offset)
{
struct pci_p2pdma_pagemap *p2p_pgmap;
+ struct p2pdma_provider *mem;
struct dev_pagemap *pgmap;
struct pci_p2pdma *p2pdma;
void *addr;
@@ -303,11 +394,21 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
if (size + offset > pci_resource_len(pdev, bar))
return -EINVAL;
- if (!pdev->p2pdma) {
- error = pci_p2pdma_setup(pdev);
- if (error)
- return error;
- }
+ error = pcim_p2pdma_init(pdev);
+ if (error)
+ return error;
+
+ error = pci_p2pdma_setup_pool(pdev);
+ if (error)
+ return error;
+
+ mem = pcim_p2pdma_provider(pdev, bar);
+ /*
+ * We checked validity of BAR prior to call
+ * to pcim_p2pdma_provider. It should never return NULL.
+ */
+ if (WARN_ON(!mem))
+ return -EINVAL;
p2p_pgmap = devm_kzalloc(&pdev->dev, sizeof(*p2p_pgmap), GFP_KERNEL);
if (!p2p_pgmap)
@@ -319,10 +420,7 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
pgmap->nr_range = 1;
pgmap->type = MEMORY_DEVICE_PCI_P2PDMA;
pgmap->ops = &p2pdma_pgmap_ops;
-
- p2p_pgmap->provider = pdev;
- p2p_pgmap->bus_offset = pci_bus_address(pdev, bar) -
- pci_resource_start(pdev, bar);
+ p2p_pgmap->mem = mem;
addr = devm_memremap_pages(&pdev->dev, pgmap);
if (IS_ERR(addr)) {
@@ -331,7 +429,7 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
}
error = devm_add_action_or_reset(&pdev->dev, pci_p2pdma_unmap_mappings,
- pdev);
+ p2p_pgmap);
if (error)
goto pages_free;
@@ -351,7 +449,7 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
pages_free:
devm_memunmap_pages(&pdev->dev, pgmap);
pgmap_free:
- devm_kfree(&pdev->dev, pgmap);
+ devm_kfree(&pdev->dev, p2p_pgmap);
return error;
}
EXPORT_SYMBOL_GPL(pci_p2pdma_add_resource);
@@ -435,7 +533,7 @@ static const struct pci_p2pdma_whitelist_entry {
/* Intel Xeon E7 v3/Xeon E5 v3/Core i7 */
{PCI_VENDOR_ID_INTEL, 0x2f00, REQ_SAME_HOST_BRIDGE},
{PCI_VENDOR_ID_INTEL, 0x2f01, REQ_SAME_HOST_BRIDGE},
- /* Intel SkyLake-E */
+ /* Intel Skylake-E */
{PCI_VENDOR_ID_INTEL, 0x2030, 0},
{PCI_VENDOR_ID_INTEL, 0x2031, 0},
{PCI_VENDOR_ID_INTEL, 0x2032, 0},
@@ -532,8 +630,7 @@ static bool host_bridge_whitelist(struct pci_dev *a, struct pci_dev *b,
static unsigned long map_types_idx(struct pci_dev *client)
{
- return (pci_domain_nr(client->bus) << 16) |
- (client->bus->number << 8) | client->devfn;
+ return (pci_domain_nr(client->bus) << 16) | pci_dev_id(client);
}
/*
@@ -662,7 +759,7 @@ done:
p2pdma = rcu_dereference(provider->p2pdma);
if (p2pdma)
xa_store(&p2pdma->map_types, map_types_idx(client),
- xa_mk_value(map_type), GFP_KERNEL);
+ xa_mk_value(map_type), GFP_ATOMIC);
rcu_read_unlock();
return map_type;
}
@@ -730,7 +827,7 @@ EXPORT_SYMBOL_GPL(pci_p2pdma_distance_many);
* pci_has_p2pmem - check if a given PCI device has published any p2pmem
* @pdev: PCI device to check
*/
-bool pci_has_p2pmem(struct pci_dev *pdev)
+static bool pci_has_p2pmem(struct pci_dev *pdev)
{
struct pci_p2pdma *p2pdma;
bool res;
@@ -742,7 +839,6 @@ bool pci_has_p2pmem(struct pci_dev *pdev)
return res;
}
-EXPORT_SYMBOL_GPL(pci_has_p2pmem);
/**
* pci_p2pmem_find_many - find a peer-to-peer DMA memory device compatible with
@@ -838,7 +934,6 @@ void *pci_alloc_p2pmem(struct pci_dev *pdev, size_t size)
if (unlikely(!percpu_ref_tryget_live_rcu(ref))) {
gen_pool_free(p2pdma->pool, (unsigned long) ret, size);
ret = NULL;
- goto out;
}
out:
rcu_read_unlock();
@@ -966,16 +1061,26 @@ void pci_p2pmem_publish(struct pci_dev *pdev, bool publish)
}
EXPORT_SYMBOL_GPL(pci_p2pmem_publish);
-static enum pci_p2pdma_map_type pci_p2pdma_map_type(struct dev_pagemap *pgmap,
- struct device *dev)
+/**
+ * pci_p2pdma_map_type - Determine the mapping type for P2PDMA transfers
+ * @provider: P2PDMA provider structure
+ * @dev: Target device for the transfer
+ *
+ * Determines how peer-to-peer DMA transfers should be mapped between
+ * the provider and the target device. The mapping type indicates whether
+ * the transfer can be done directly through PCI switches or must go
+ * through the host bridge.
+ */
+enum pci_p2pdma_map_type pci_p2pdma_map_type(struct p2pdma_provider *provider,
+ struct device *dev)
{
enum pci_p2pdma_map_type type = PCI_P2PDMA_MAP_NOT_SUPPORTED;
- struct pci_dev *provider = to_p2p_pgmap(pgmap)->provider;
+ struct pci_dev *pdev = to_pci_dev(provider->owner);
struct pci_dev *client;
struct pci_p2pdma *p2pdma;
int dist;
- if (!provider->p2pdma)
+ if (!pdev->p2pdma)
return PCI_P2PDMA_MAP_NOT_SUPPORTED;
if (!dev_is_pci(dev))
@@ -984,7 +1089,7 @@ static enum pci_p2pdma_map_type pci_p2pdma_map_type(struct dev_pagemap *pgmap,
client = to_pci_dev(dev);
rcu_read_lock();
- p2pdma = rcu_dereference(provider->p2pdma);
+ p2pdma = rcu_dereference(pdev->p2pdma);
if (p2pdma)
type = xa_to_value(xa_load(&p2pdma->map_types,
@@ -992,45 +1097,21 @@ static enum pci_p2pdma_map_type pci_p2pdma_map_type(struct dev_pagemap *pgmap,
rcu_read_unlock();
if (type == PCI_P2PDMA_MAP_UNKNOWN)
- return calc_map_type_and_dist(provider, client, &dist, true);
+ return calc_map_type_and_dist(pdev, client, &dist, true);
return type;
}
-/**
- * pci_p2pdma_map_segment - map an sg segment determining the mapping type
- * @state: State structure that should be declared outside of the for_each_sg()
- * loop and initialized to zero.
- * @dev: DMA device that's doing the mapping operation
- * @sg: scatterlist segment to map
- *
- * This is a helper to be used by non-IOMMU dma_map_sg() implementations where
- * the sg segment is the same for the page_link and the dma_address.
- *
- * Attempt to map a single segment in an SGL with the PCI bus address.
- * The segment must point to a PCI P2PDMA page and thus must be
- * wrapped in a is_pci_p2pdma_page(sg_page(sg)) check.
- *
- * Returns the type of mapping used and maps the page if the type is
- * PCI_P2PDMA_MAP_BUS_ADDR.
- */
-enum pci_p2pdma_map_type
-pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev,
- struct scatterlist *sg)
+void __pci_p2pdma_update_state(struct pci_p2pdma_map_state *state,
+ struct device *dev, struct page *page)
{
- if (state->pgmap != sg_page(sg)->pgmap) {
- state->pgmap = sg_page(sg)->pgmap;
- state->map = pci_p2pdma_map_type(state->pgmap, dev);
- state->bus_off = to_p2p_pgmap(state->pgmap)->bus_offset;
- }
+ struct pci_p2pdma_pagemap *p2p_pgmap = to_p2p_pgmap(page_pgmap(page));
- if (state->map == PCI_P2PDMA_MAP_BUS_ADDR) {
- sg->dma_address = sg_phys(sg) + state->bus_off;
- sg_dma_len(sg) = sg->length;
- sg_dma_mark_bus_address(sg);
- }
+ if (state->mem == p2p_pgmap->mem)
+ return;
- return state->map;
+ state->mem = p2p_pgmap->mem;
+ state->map = pci_p2pdma_map_type(p2p_pgmap->mem, dev);
}
/**
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index a05350a4e49c..9369377725fa 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -15,6 +15,7 @@
#include <linux/pci_hotplug.h>
#include <linux/module.h>
#include <linux/pci-acpi.h>
+#include <linux/pci-ecam.h>
#include <linux/pm_runtime.h>
#include <linux/pm_qos.h>
#include <linux/rwsem.h>
@@ -119,6 +120,30 @@ phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle)
return (phys_addr_t)mcfg_addr;
}
+bool pci_acpi_preserve_config(struct pci_host_bridge *host_bridge)
+{
+ bool ret = false;
+
+ if (ACPI_HANDLE(&host_bridge->dev)) {
+ union acpi_object *obj;
+
+ /*
+ * Evaluate the "PCI Boot Configuration" _DSM Function. If it
+ * exists and returns 0, we must preserve any PCI resource
+ * assignments made by firmware for this host bridge.
+ */
+ obj = acpi_evaluate_dsm_typed(ACPI_HANDLE(&host_bridge->dev),
+ &pci_acpi_dsm_guid,
+ 1, DSM_PCI_PRESERVE_BOOT_CONFIG,
+ NULL, ACPI_TYPE_INTEGER);
+ if (obj && obj->integer.value == 0)
+ ret = true;
+ ACPI_FREE(obj);
+ }
+
+ return ret;
+}
+
/* _HPX PCI Setting Record (Type 0); same as _HPP */
struct hpx_type0 {
u32 revision; /* Not present in _HPP */
@@ -793,15 +818,10 @@ int pci_acpi_program_hp_params(struct pci_dev *dev)
bool pciehp_is_native(struct pci_dev *bridge)
{
const struct pci_host_bridge *host;
- u32 slot_cap;
if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
return false;
- pcie_capability_read_dword(bridge, PCI_EXP_SLTCAP, &slot_cap);
- if (!(slot_cap & PCI_EXP_SLTCAP_HPC))
- return false;
-
if (pcie_ports_native)
return true;
@@ -911,7 +931,7 @@ pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
{
int acpi_state, d_max;
- if (pdev->no_d3cold)
+ if (pdev->no_d3cold || !pdev->d3cold_allowed)
d_max = ACPI_STATE_D3_HOT;
else
d_max = ACPI_STATE_D3_COLD;
@@ -979,7 +999,7 @@ bool acpi_pci_bridge_d3(struct pci_dev *dev)
struct acpi_device *adev, *rpadev;
const union acpi_object *obj;
- if (acpi_pci_disabled || !dev->is_hotplug_bridge)
+ if (acpi_pci_disabled || !dev->is_pciehp)
return false;
adev = ACPI_COMPANION(&dev->dev);
@@ -1215,12 +1235,12 @@ void acpi_pci_add_bus(struct pci_bus *bus)
if (!pci_is_root_bus(bus))
return;
- obj = acpi_evaluate_dsm(ACPI_HANDLE(bus->bridge), &pci_acpi_dsm_guid, 3,
- DSM_PCI_POWER_ON_RESET_DELAY, NULL);
+ obj = acpi_evaluate_dsm_typed(ACPI_HANDLE(bus->bridge), &pci_acpi_dsm_guid, 3,
+ DSM_PCI_POWER_ON_RESET_DELAY, NULL, ACPI_TYPE_INTEGER);
if (!obj)
return;
- if (obj->type == ACPI_TYPE_INTEGER && obj->integer.value == 1) {
+ if (obj->integer.value == 1) {
bridge = pci_find_host_bridge(bus);
bridge->ignore_reset_delay = 1;
}
@@ -1376,12 +1396,13 @@ static void pci_acpi_optimize_delay(struct pci_dev *pdev,
if (bridge->ignore_reset_delay)
pdev->d3cold_delay = 0;
- obj = acpi_evaluate_dsm(handle, &pci_acpi_dsm_guid, 3,
- DSM_PCI_DEVICE_READINESS_DURATIONS, NULL);
+ obj = acpi_evaluate_dsm_typed(handle, &pci_acpi_dsm_guid, 3,
+ DSM_PCI_DEVICE_READINESS_DURATIONS, NULL,
+ ACPI_TYPE_PACKAGE);
if (!obj)
return;
- if (obj->type == ACPI_TYPE_PACKAGE && obj->package.count == 5) {
+ if (obj->package.count == 5) {
elements = obj->package.elements;
if (elements[0].type == ACPI_TYPE_INTEGER) {
value = (int)elements[0].integer.value / 1000;
@@ -1518,3 +1539,184 @@ static int __init acpi_pci_init(void)
return 0;
}
arch_initcall(acpi_pci_init);
+
+#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
+
+/*
+ * Try to assign the IRQ number when probing a new device
+ */
+int pcibios_alloc_irq(struct pci_dev *dev)
+{
+ if (!acpi_disabled)
+ acpi_pci_irq_enable(dev);
+
+ return 0;
+}
+
+struct acpi_pci_generic_root_info {
+ struct acpi_pci_root_info common;
+ struct pci_config_window *cfg; /* config space mapping */
+};
+
+int acpi_pci_bus_find_domain_nr(struct pci_bus *bus)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ struct acpi_device *adev = to_acpi_device(cfg->parent);
+ struct acpi_pci_root *root = acpi_driver_data(adev);
+
+ return root->segment;
+}
+
+int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
+{
+ struct pci_config_window *cfg;
+ struct acpi_device *adev;
+ struct device *bus_dev;
+
+ if (acpi_disabled)
+ return 0;
+
+ cfg = bridge->bus->sysdata;
+
+ /*
+ * On Hyper-V there is no corresponding ACPI device for a root bridge,
+ * therefore ->parent is set as NULL by the driver. And set 'adev' as
+ * NULL in this case because there is no proper ACPI device.
+ */
+ if (!cfg->parent)
+ adev = NULL;
+ else
+ adev = to_acpi_device(cfg->parent);
+
+ bus_dev = &bridge->bus->dev;
+
+ ACPI_COMPANION_SET(&bridge->dev, adev);
+ set_dev_node(bus_dev, acpi_get_node(acpi_device_handle(adev)));
+
+ return 0;
+}
+
+static int pci_acpi_root_prepare_resources(struct acpi_pci_root_info *ci)
+{
+ struct resource_entry *entry, *tmp;
+ int status;
+
+ status = acpi_pci_probe_root_resources(ci);
+ resource_list_for_each_entry_safe(entry, tmp, &ci->resources) {
+ if (!(entry->res->flags & IORESOURCE_WINDOW))
+ resource_list_destroy_entry(entry);
+ }
+ return status;
+}
+
+/*
+ * Lookup the bus range for the domain in MCFG, and set up config space
+ * mapping.
+ */
+static struct pci_config_window *
+pci_acpi_setup_ecam_mapping(struct acpi_pci_root *root)
+{
+ struct device *dev = &root->device->dev;
+ struct resource *bus_res = &root->secondary;
+ u16 seg = root->segment;
+ const struct pci_ecam_ops *ecam_ops;
+ struct resource cfgres;
+ struct acpi_device *adev;
+ struct pci_config_window *cfg;
+ int ret;
+
+ ret = pci_mcfg_lookup(root, &cfgres, &ecam_ops);
+ if (ret) {
+ dev_err(dev, "%04x:%pR ECAM region not found\n", seg, bus_res);
+ return NULL;
+ }
+
+ adev = acpi_resource_consumer(&cfgres);
+ if (adev)
+ dev_info(dev, "ECAM area %pR reserved by %s\n", &cfgres,
+ dev_name(&adev->dev));
+ else
+ dev_warn(dev, FW_BUG "ECAM area %pR not reserved in ACPI namespace\n",
+ &cfgres);
+
+ cfg = pci_ecam_create(dev, &cfgres, bus_res, ecam_ops);
+ if (IS_ERR(cfg)) {
+ dev_err(dev, "%04x:%pR error %ld mapping ECAM\n", seg, bus_res,
+ PTR_ERR(cfg));
+ return NULL;
+ }
+
+ return cfg;
+}
+
+/* release_info: free resources allocated by init_info */
+static void pci_acpi_generic_release_info(struct acpi_pci_root_info *ci)
+{
+ struct acpi_pci_generic_root_info *ri;
+
+ ri = container_of(ci, struct acpi_pci_generic_root_info, common);
+ pci_ecam_free(ri->cfg);
+ kfree(ci->ops);
+ kfree(ri);
+}
+
+/* Interface called from ACPI code to setup PCI host controller */
+struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
+{
+ struct acpi_pci_generic_root_info *ri;
+ struct pci_bus *bus, *child;
+ struct acpi_pci_root_ops *root_ops;
+ struct pci_host_bridge *host;
+
+ ri = kzalloc(sizeof(*ri), GFP_KERNEL);
+ if (!ri)
+ return NULL;
+
+ root_ops = kzalloc(sizeof(*root_ops), GFP_KERNEL);
+ if (!root_ops) {
+ kfree(ri);
+ return NULL;
+ }
+
+ ri->cfg = pci_acpi_setup_ecam_mapping(root);
+ if (!ri->cfg) {
+ kfree(ri);
+ kfree(root_ops);
+ return NULL;
+ }
+
+ root_ops->release_info = pci_acpi_generic_release_info;
+ root_ops->prepare_resources = pci_acpi_root_prepare_resources;
+ root_ops->pci_ops = (struct pci_ops *)&ri->cfg->ops->pci_ops;
+ bus = acpi_pci_root_create(root, root_ops, &ri->common, ri->cfg);
+ if (!bus)
+ return NULL;
+
+ /* If we must preserve the resource configuration, claim now */
+ host = pci_find_host_bridge(bus);
+ if (host->preserve_config)
+ pci_bus_claim_resources(bus);
+
+ /*
+ * Assign whatever was left unassigned. If we didn't claim above,
+ * this will reassign everything.
+ */
+ pci_assign_unassigned_root_bus_resources(bus);
+
+ list_for_each_entry(child, &bus->children, node)
+ pcie_bus_configure_settings(child);
+
+ return bus;
+}
+
+void pcibios_add_bus(struct pci_bus *bus)
+{
+ acpi_pci_add_bus(bus);
+}
+
+void pcibios_remove_bus(struct pci_bus *bus)
+{
+ acpi_pci_remove_bus(bus);
+}
+
+#endif
diff --git a/drivers/pci/pci-bridge-emul.c b/drivers/pci/pci-bridge-emul.c
index 9334b2dd4764..6658c1edd464 100644
--- a/drivers/pci/pci-bridge-emul.c
+++ b/drivers/pci/pci-bridge-emul.c
@@ -257,8 +257,8 @@ struct pci_bridge_reg_behavior pcie_cap_regs_behavior[PCI_CAP_PCIE_SIZEOF / 4] =
*/
.rw = (PCI_EXP_RTCTL_SECEE | PCI_EXP_RTCTL_SENFEE |
PCI_EXP_RTCTL_SEFEE | PCI_EXP_RTCTL_PMEIE |
- PCI_EXP_RTCTL_CRSSVE),
- .ro = PCI_EXP_RTCAP_CRSVIS << 16,
+ PCI_EXP_RTCTL_RRS_SVE),
+ .ro = PCI_EXP_RTCAP_RRS_SV << 16,
},
[PCI_EXP_RTSTA / 4] = {
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index ae9baf801681..7c2d9d596258 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -193,7 +193,7 @@ static ssize_t new_id_store(struct device_driver *driver, const char *buf,
u32 vendor, device, subvendor = PCI_ANY_ID,
subdevice = PCI_ANY_ID, class = 0, class_mask = 0;
unsigned long driver_data = 0;
- int fields = 0;
+ int fields;
int retval = 0;
fields = sscanf(buf, "%x %x %x %x %x %x %lx",
@@ -260,7 +260,7 @@ static ssize_t remove_id_store(struct device_driver *driver, const char *buf,
struct pci_driver *pdrv = to_pci_driver(driver);
u32 vendor, device, subvendor = PCI_ANY_ID,
subdevice = PCI_ANY_ID, class = 0, class_mask = 0;
- int fields = 0;
+ int fields;
size_t retval = -ENODEV;
fields = sscanf(buf, "%x %x %x %x %x %x",
@@ -419,15 +419,6 @@ static int __pci_device_probe(struct pci_driver *drv, struct pci_dev *pci_dev)
return error;
}
-int __weak pcibios_alloc_irq(struct pci_dev *dev)
-{
- return 0;
-}
-
-void __weak pcibios_free_irq(struct pci_dev *dev)
-{
-}
-
#ifdef CONFIG_PCI_IOV
static inline bool pci_device_can_probe(struct pci_dev *pdev)
{
@@ -473,6 +464,13 @@ static void pci_device_remove(struct device *dev)
if (drv->remove) {
pm_runtime_get_sync(dev);
+ /*
+ * If the driver provides a .runtime_idle() callback and it has
+ * started to run already, it may continue to run in parallel
+ * with the code below, so wait until all of the runtime PM
+ * activity has completed.
+ */
+ pm_runtime_barrier(dev);
drv->remove(pci_dev);
pm_runtime_put_noidle(dev);
}
@@ -557,12 +555,6 @@ static void pci_pm_default_resume(struct pci_dev *pci_dev)
pci_enable_wake(pci_dev, PCI_D0, false);
}
-static void pci_pm_power_up_and_verify_state(struct pci_dev *pci_dev)
-{
- pci_power_up(pci_dev);
- pci_update_current_state(pci_dev, PCI_D0);
-}
-
static void pci_pm_default_resume_early(struct pci_dev *pci_dev)
{
pci_pm_power_up_and_verify_state(pci_dev);
@@ -572,7 +564,19 @@ static void pci_pm_default_resume_early(struct pci_dev *pci_dev)
static void pci_pm_bridge_power_up_actions(struct pci_dev *pci_dev)
{
- pci_bridge_wait_for_secondary_bus(pci_dev, "resume");
+ int ret;
+
+ ret = pci_bridge_wait_for_secondary_bus(pci_dev, "resume");
+ if (ret) {
+ /*
+ * The downstream link failed to come up, so mark the
+ * devices below as disconnected to make sure we don't
+ * attempt to resume them.
+ */
+ pci_walk_bus(pci_dev->subordinate, pci_dev_set_disconnected,
+ NULL);
+ return;
+ }
/*
* When powering on a bridge from D3cold, the whole hierarchy may be
@@ -625,6 +629,8 @@ static int pci_legacy_suspend(struct device *dev, pm_message_t state)
struct pci_dev *pci_dev = to_pci_dev(dev);
struct pci_driver *drv = pci_dev->driver;
+ pci_dev->state_saved = false;
+
if (drv && drv->suspend) {
pci_power_t prev = pci_dev->current_state;
int error;
@@ -704,6 +710,8 @@ static int pci_pm_prepare(struct device *dev)
struct pci_dev *pci_dev = to_pci_dev(dev);
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ dev_pm_set_strict_midlayer(dev, true);
+
if (pm && pm->prepare) {
int error = pm->prepare(dev);
if (error < 0)
@@ -745,6 +753,8 @@ static void pci_pm_complete(struct device *dev)
if (pci_dev->current_state < pre_sleep_state)
pm_request_resume(dev);
}
+
+ dev_pm_set_strict_midlayer(dev, false);
}
#else /* !CONFIG_PM_SLEEP */
@@ -802,8 +812,7 @@ static int pci_pm_suspend(struct device *dev)
* suspend callbacks can cope with runtime-suspended devices, it is
* better to resume the device from runtime suspend here.
*/
- if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
- pci_dev_need_resume(pci_dev)) {
+ if (!dev_pm_smart_suspend(dev) || pci_dev_need_resume(pci_dev)) {
pm_runtime_resume(dev);
pci_dev->state_saved = false;
} else {
@@ -1029,6 +1038,8 @@ static int pci_pm_freeze(struct device *dev)
if (!pm) {
pci_pm_default_suspend(pci_dev);
+ if (!pm_runtime_suspended(dev))
+ pci_dev->state_saved = false;
return 0;
}
@@ -1122,8 +1133,6 @@ static int pci_pm_thaw(struct device *dev)
pci_pm_reenable_device(pci_dev);
}
- pci_dev->state_saved = false;
-
return error;
}
@@ -1141,8 +1150,7 @@ static int pci_pm_poweroff(struct device *dev)
}
/* The reason to do that is the same as in pci_pm_suspend(). */
- if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
- pci_dev_need_resume(pci_dev)) {
+ if (!dev_pm_smart_suspend(dev) || pci_dev_need_resume(pci_dev)) {
pm_runtime_resume(dev);
pci_dev->state_saved = false;
} else {
@@ -1370,10 +1378,7 @@ static int pci_pm_runtime_idle(struct device *dev)
if (!pci_dev->driver)
return 0;
- if (!pm)
- return -ENOSYS;
-
- if (pm->runtime_idle)
+ if (pm && pm->runtime_idle)
return pm->runtime_idle(dev);
return 0;
@@ -1474,14 +1479,15 @@ static struct pci_driver pci_compat_driver = {
*/
struct pci_driver *pci_dev_driver(const struct pci_dev *dev)
{
+ int i;
+
if (dev->driver)
return dev->driver;
- else {
- int i;
- for (i = 0; i <= PCI_ROM_RESOURCE; i++)
- if (dev->resource[i].flags & IORESOURCE_BUSY)
- return &pci_compat_driver;
- }
+
+ for (i = 0; i <= PCI_ROM_RESOURCE; i++)
+ if (dev->resource[i].flags & IORESOURCE_BUSY)
+ return &pci_compat_driver;
+
return NULL;
}
EXPORT_SYMBOL(pci_dev_driver);
@@ -1495,16 +1501,16 @@ EXPORT_SYMBOL(pci_dev_driver);
* system is in its list of supported devices. Returns the matching
* pci_device_id structure or %NULL if there is no match.
*/
-static int pci_bus_match(struct device *dev, struct device_driver *drv)
+static int pci_bus_match(struct device *dev, const struct device_driver *drv)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
struct pci_driver *pci_drv;
const struct pci_device_id *found_id;
- if (!pci_dev->match_driver)
+ if (pci_dev_binding_disallowed(pci_dev))
return 0;
- pci_drv = to_pci_driver(drv);
+ pci_drv = (struct pci_driver *)to_pci_driver(drv);
found_id = pci_match_device(pci_drv, pci_dev);
if (found_id)
return 1;
@@ -1578,7 +1584,7 @@ static int pci_uevent(const struct device *dev, struct kobj_uevent_env *env)
return 0;
}
-#if defined(CONFIG_PCIEAER) || defined(CONFIG_EEH)
+#if defined(CONFIG_PCIEAER) || defined(CONFIG_EEH) || defined(CONFIG_S390)
/**
* pci_uevent_ers - emit a uevent during recovery path of PCI device
* @pdev: PCI device undergoing error recovery
@@ -1592,6 +1598,7 @@ void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type)
switch (err_type) {
case PCI_ERS_RESULT_NONE:
case PCI_ERS_RESULT_CAN_RECOVER:
+ case PCI_ERS_RESULT_NEED_RESET:
envp[idx++] = "ERROR_EVENT=BEGIN_RECOVERY";
envp[idx++] = "DEVICE_ONLINE=0";
break;
@@ -1628,7 +1635,7 @@ static int pci_bus_num_vf(struct device *dev)
*/
static int pci_dma_configure(struct device *dev)
{
- struct pci_driver *driver = to_pci_driver(dev->driver);
+ const struct device_driver *drv = READ_ONCE(dev->driver);
struct device *bridge;
int ret = 0;
@@ -1645,7 +1652,8 @@ static int pci_dma_configure(struct device *dev)
pci_put_host_bridge_device(bridge);
- if (!ret && !driver->driver_managed_dma) {
+ /* @drv may not be valid when we're called from the IOMMU layer */
+ if (!ret && drv && !to_pci_driver(drv)->driver_managed_dma) {
ret = iommu_device_use_default_domain(dev);
if (ret)
arch_teardown_dma_ops(dev);
@@ -1662,13 +1670,27 @@ static void pci_dma_cleanup(struct device *dev)
iommu_device_unuse_default_domain(dev);
}
-struct bus_type pci_bus_type = {
+/*
+ * pci_device_irq_get_affinity - get IRQ affinity mask for device
+ * @dev: ptr to dev structure
+ * @irq_vec: interrupt vector number
+ *
+ * Return the CPU affinity mask for @dev and @irq_vec.
+ */
+static const struct cpumask *pci_device_irq_get_affinity(struct device *dev,
+ unsigned int irq_vec)
+{
+ return pci_irq_get_affinity(to_pci_dev(dev), irq_vec);
+}
+
+const struct bus_type pci_bus_type = {
.name = "pci",
.match = pci_bus_match,
.uevent = pci_uevent,
.probe = pci_device_probe,
.remove = pci_device_remove,
.shutdown = pci_device_shutdown,
+ .irq_get_affinity = pci_device_irq_get_affinity,
.dev_groups = pci_dev_groups,
.bus_groups = pci_bus_groups,
.drv_groups = pci_drv_groups,
@@ -1680,10 +1702,10 @@ struct bus_type pci_bus_type = {
EXPORT_SYMBOL(pci_bus_type);
#ifdef CONFIG_PCIEPORTBUS
-static int pcie_port_bus_match(struct device *dev, struct device_driver *drv)
+static int pcie_port_bus_match(struct device *dev, const struct device_driver *drv)
{
struct pcie_device *pciedev;
- struct pcie_port_service_driver *driver;
+ const struct pcie_port_service_driver *driver;
if (drv->bus != &pcie_port_bus_type || dev->bus != &pcie_port_bus_type)
return 0;
@@ -1701,11 +1723,10 @@ static int pcie_port_bus_match(struct device *dev, struct device_driver *drv)
return 1;
}
-struct bus_type pcie_port_bus_type = {
+const struct bus_type pcie_port_bus_type = {
.name = "pci_express",
.match = pcie_port_bus_match,
};
-EXPORT_SYMBOL_GPL(pcie_port_bus_type);
#endif
static int __init pci_driver_init(void)
diff --git a/drivers/pci/pci-mid.c b/drivers/pci/pci-mid.c
index fbfd78127123..bed9f0755271 100644
--- a/drivers/pci/pci-mid.c
+++ b/drivers/pci/pci-mid.c
@@ -38,8 +38,8 @@ pci_power_t mid_pci_get_power_state(struct pci_dev *pdev)
* arch/x86/platform/intel-mid/pwr.c.
*/
static const struct x86_cpu_id lpss_cpu_ids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_SALTWELL_MID, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_MID, NULL),
+ X86_MATCH_VFM(INTEL_ATOM_SALTWELL_MID, NULL),
+ X86_MATCH_VFM(INTEL_ATOM_SILVERMONT_MID, NULL),
{}
};
diff --git a/drivers/pci/pci-pf-stub.c b/drivers/pci/pci-pf-stub.c
index 45855a5e9fca..da4db4928907 100644
--- a/drivers/pci/pci-pf-stub.c
+++ b/drivers/pci/pci-pf-stub.c
@@ -39,4 +39,5 @@ static struct pci_driver pf_stub_driver = {
};
module_pci_driver(pf_stub_driver);
+MODULE_DESCRIPTION("SR-IOV PF stub driver with no functionality");
MODULE_LICENSE("GPL");
diff --git a/drivers/pci/pci-stub.c b/drivers/pci/pci-stub.c
index d1f4c1ce7bd1..9bc478df4e8f 100644
--- a/drivers/pci/pci-stub.c
+++ b/drivers/pci/pci-stub.c
@@ -92,5 +92,6 @@ static void __exit pci_stub_exit(void)
module_init(pci_stub_init);
module_exit(pci_stub_exit);
+MODULE_DESCRIPTION("VM device assignment stub driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Chris Wright <chrisw@sous-sol.org>");
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index ab32a91f287b..c2df915ad2d2 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -12,7 +12,8 @@
* Modeled after usb's driverfs.c
*/
-
+#include <linux/bitfield.h>
+#include <linux/cleanup.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/pci.h>
@@ -29,8 +30,13 @@
#include <linux/msi.h>
#include <linux/of.h>
#include <linux/aperture.h>
+#include <linux/unaligned.h>
#include "pci.h"
+#ifndef ARCH_PCI_DEV_GROUPS
+#define ARCH_PCI_DEV_GROUPS
+#endif
+
static int sysfs_initialized; /* = 0 */
/* show configuration fields */
@@ -172,6 +178,13 @@ static ssize_t resource_show(struct device *dev, struct device_attribute *attr,
for (i = 0; i < max; i++) {
struct resource *res = &pci_dev->resource[i];
+ struct resource zerores = {};
+
+ /* For backwards compatibility */
+ if (i >= PCI_BRIDGE_RESOURCES && i <= PCI_BRIDGE_RESOURCE_END &&
+ res->flags & (IORESOURCE_UNSET | IORESOURCE_DISABLED))
+ res = &zerores;
+
pci_resource_to_user(pci_dev, i, res, &start, &end);
len += sysfs_emit_at(buf, len, "0x%016llx 0x%016llx 0x%016llx\n",
(unsigned long long)start,
@@ -196,8 +209,14 @@ static ssize_t max_link_width_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
+ ssize_t ret;
+
+ /* We read PCI_EXP_LNKCAP, so we need the device to be accessible. */
+ pci_config_pm_runtime_get(pdev);
+ ret = sysfs_emit(buf, "%u\n", pcie_get_width_cap(pdev));
+ pci_config_pm_runtime_put(pdev);
- return sysfs_emit(buf, "%u\n", pcie_get_width_cap(pdev));
+ return ret;
}
static DEVICE_ATTR_RO(max_link_width);
@@ -209,7 +228,10 @@ static ssize_t current_link_speed_show(struct device *dev,
int err;
enum pci_bus_speed speed;
+ pci_config_pm_runtime_get(pci_dev);
err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat);
+ pci_config_pm_runtime_put(pci_dev);
+
if (err)
return -EINVAL;
@@ -226,12 +248,14 @@ static ssize_t current_link_width_show(struct device *dev,
u16 linkstat;
int err;
+ pci_config_pm_runtime_get(pci_dev);
err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat);
+ pci_config_pm_runtime_put(pci_dev);
+
if (err)
return -EINVAL;
- return sysfs_emit(buf, "%u\n",
- (linkstat & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT);
+ return sysfs_emit(buf, "%u\n", FIELD_GET(PCI_EXP_LNKSTA_NLW, linkstat));
}
static DEVICE_ATTR_RO(current_link_width);
@@ -243,7 +267,10 @@ static ssize_t secondary_bus_number_show(struct device *dev,
u8 sec_bus;
int err;
+ pci_config_pm_runtime_get(pci_dev);
err = pci_read_config_byte(pci_dev, PCI_SECONDARY_BUS, &sec_bus);
+ pci_config_pm_runtime_put(pci_dev);
+
if (err)
return -EINVAL;
@@ -259,7 +286,10 @@ static ssize_t subordinate_bus_number_show(struct device *dev,
u8 sub_bus;
int err;
+ pci_config_pm_runtime_get(pci_dev);
err = pci_read_config_byte(pci_dev, PCI_SUBORDINATE_BUS, &sub_bus);
+ pci_config_pm_runtime_put(pci_dev);
+
if (err)
return -EINVAL;
@@ -518,6 +548,31 @@ static ssize_t bus_rescan_store(struct device *dev,
static struct device_attribute dev_attr_bus_rescan = __ATTR(rescan, 0200, NULL,
bus_rescan_store);
+static ssize_t reset_subordinate_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pci_bus *bus = pdev->subordinate;
+ unsigned long val;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (kstrtoul(buf, 0, &val) < 0)
+ return -EINVAL;
+
+ if (val) {
+ int ret = __pci_reset_bus(bus);
+
+ if (ret)
+ return ret;
+ }
+
+ return count;
+}
+static DEVICE_ATTR_WO(reset_subordinate);
+
#if defined(CONFIG_PM) && defined(CONFIG_ACPI)
static ssize_t d3cold_allowed_store(struct device *dev,
struct device_attribute *attr,
@@ -530,10 +585,7 @@ static ssize_t d3cold_allowed_store(struct device *dev,
return -EINVAL;
pdev->d3cold_allowed = !!val;
- if (pdev->d3cold_allowed)
- pci_d3cold_enable(pdev);
- else
- pci_d3cold_disable(pdev);
+ pci_bridge_d3_update(pdev);
pm_runtime_resume(dev);
@@ -625,6 +677,7 @@ static struct attribute *pci_dev_attrs[] = {
static struct attribute *pci_bridge_attrs[] = {
&dev_attr_subordinate_bus_number.attr,
&dev_attr_secondary_bus_number.attr,
+ &dev_attr_reset_subordinate.attr,
NULL,
};
@@ -667,8 +720,24 @@ static ssize_t boot_vga_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(boot_vga);
+static ssize_t serial_number_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ u64 dsn;
+ u8 bytes[8];
+
+ dsn = pci_get_dsn(pci_dev);
+ if (!dsn)
+ return -EIO;
+
+ put_unaligned_be64(dsn, bytes);
+ return sysfs_emit(buf, "%8phD\n", bytes);
+}
+static DEVICE_ATTR_ADMIN_RO(serial_number);
+
static ssize_t pci_read_config(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
@@ -743,7 +812,7 @@ static ssize_t pci_read_config(struct file *filp, struct kobject *kobj,
}
static ssize_t pci_write_config(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
@@ -811,30 +880,42 @@ static ssize_t pci_write_config(struct file *filp, struct kobject *kobj,
return count;
}
-static BIN_ATTR(config, 0644, pci_read_config, pci_write_config, 0);
+static const BIN_ATTR(config, 0644, pci_read_config, pci_write_config, 0);
-static struct bin_attribute *pci_dev_config_attrs[] = {
+static const struct bin_attribute *const pci_dev_config_attrs[] = {
&bin_attr_config,
NULL,
};
-static umode_t pci_dev_config_attr_is_visible(struct kobject *kobj,
- struct bin_attribute *a, int n)
+static size_t pci_dev_config_attr_bin_size(struct kobject *kobj,
+ const struct bin_attribute *a,
+ int n)
{
struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
- a->size = PCI_CFG_SPACE_SIZE;
if (pdev->cfg_size > PCI_CFG_SPACE_SIZE)
- a->size = PCI_CFG_SPACE_EXP_SIZE;
-
- return a->attr.mode;
+ return PCI_CFG_SPACE_EXP_SIZE;
+ return PCI_CFG_SPACE_SIZE;
}
static const struct attribute_group pci_dev_config_attr_group = {
.bin_attrs = pci_dev_config_attrs,
- .is_bin_visible = pci_dev_config_attr_is_visible,
+ .bin_size = pci_dev_config_attr_bin_size,
};
+/*
+ * llseek operation for mmappable PCI resources.
+ * May be left unused if the arch doesn't provide them.
+ */
+static __maybe_unused loff_t
+pci_llseek_resource(struct file *filep,
+ struct kobject *kobj __always_unused,
+ const struct bin_attribute *attr,
+ loff_t offset, int whence)
+{
+ return fixed_size_llseek(filep, offset, whence, attr->size);
+}
+
#ifdef HAVE_PCI_LEGACY
/**
* pci_read_legacy_io - read byte(s) from legacy I/O port space
@@ -849,8 +930,8 @@ static const struct attribute_group pci_dev_config_attr_group = {
* callback routine (pci_legacy_read).
*/
static ssize_t pci_read_legacy_io(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
- loff_t off, size_t count)
+ const struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj));
@@ -874,8 +955,8 @@ static ssize_t pci_read_legacy_io(struct file *filp, struct kobject *kobj,
* callback routine (pci_legacy_write).
*/
static ssize_t pci_write_legacy_io(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
- loff_t off, size_t count)
+ const struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj));
@@ -898,7 +979,7 @@ static ssize_t pci_write_legacy_io(struct file *filp, struct kobject *kobj,
* memory space.
*/
static int pci_mmap_legacy_mem(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
struct vm_area_struct *vma)
{
struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj));
@@ -918,7 +999,7 @@ static int pci_mmap_legacy_mem(struct file *filp, struct kobject *kobj,
* memory space. Returns -ENOSYS if the operation isn't supported
*/
static int pci_mmap_legacy_io(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
struct vm_area_struct *vma)
{
struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj));
@@ -967,6 +1048,8 @@ void pci_create_legacy_files(struct pci_bus *b)
b->legacy_io->attr.mode = 0600;
b->legacy_io->read = pci_read_legacy_io;
b->legacy_io->write = pci_write_legacy_io;
+ /* See pci_create_attr() for motivation */
+ b->legacy_io->llseek = pci_llseek_resource;
b->legacy_io->mmap = pci_mmap_legacy_io;
b->legacy_io->f_mapping = iomem_get_mapping;
pci_adjust_legacy_attr(b, pci_mmap_io);
@@ -981,6 +1064,8 @@ void pci_create_legacy_files(struct pci_bus *b)
b->legacy_mem->size = 1024*1024;
b->legacy_mem->attr.mode = 0600;
b->legacy_mem->mmap = pci_mmap_legacy_mem;
+ /* See pci_create_attr() for motivation */
+ b->legacy_mem->llseek = pci_llseek_resource;
b->legacy_mem->f_mapping = iomem_get_mapping;
pci_adjust_legacy_attr(b, pci_mmap_mem);
error = device_create_bin_file(&b->dev, b->legacy_mem);
@@ -1009,29 +1094,6 @@ void pci_remove_legacy_files(struct pci_bus *b)
#endif /* HAVE_PCI_LEGACY */
#if defined(HAVE_PCI_MMAP) || defined(ARCH_GENERIC_PCI_MMAP_RESOURCE)
-
-int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma,
- enum pci_mmap_api mmap_api)
-{
- unsigned long nr, start, size;
- resource_size_t pci_start = 0, pci_end;
-
- if (pci_resource_len(pdev, resno) == 0)
- return 0;
- nr = vma_pages(vma);
- start = vma->vm_pgoff;
- size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1;
- if (mmap_api == PCI_MMAP_PROCFS) {
- pci_resource_to_user(pdev, resno, &pdev->resource[resno],
- &pci_start, &pci_end);
- pci_start >>= PAGE_SHIFT;
- }
- if (start >= pci_start && start < pci_start + size &&
- start + nr <= pci_start + size)
- return 1;
- return 0;
-}
-
/**
* pci_mmap_resource - map a PCI resource into user memory space
* @kobj: kobject for mapping
@@ -1041,7 +1103,7 @@ int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma,
*
* Use the regular PCI mapping routines to map a PCI resource into userspace.
*/
-static int pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr,
+static int pci_mmap_resource(struct kobject *kobj, const struct bin_attribute *attr,
struct vm_area_struct *vma, int write_combine)
{
struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
@@ -1066,23 +1128,24 @@ static int pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr,
}
static int pci_mmap_resource_uc(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
struct vm_area_struct *vma)
{
return pci_mmap_resource(kobj, attr, vma, 0);
}
static int pci_mmap_resource_wc(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
+ const struct bin_attribute *attr,
struct vm_area_struct *vma)
{
return pci_mmap_resource(kobj, attr, vma, 1);
}
static ssize_t pci_resource_io(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t off, size_t count, bool write)
{
+#ifdef CONFIG_HAS_IOPORT
struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
int bar = (unsigned long)attr->private;
unsigned long port = off;
@@ -1116,17 +1179,20 @@ static ssize_t pci_resource_io(struct file *filp, struct kobject *kobj,
return 4;
}
return -EINVAL;
+#else
+ return -ENXIO;
+#endif
}
static ssize_t pci_read_resource_io(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
return pci_resource_io(filp, kobj, attr, buf, off, count, false);
}
static ssize_t pci_write_resource_io(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
+ const struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
int ret;
@@ -1195,8 +1261,15 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
res_attr->mmap = pci_mmap_resource_uc;
}
}
- if (res_attr->mmap)
+ if (res_attr->mmap) {
res_attr->f_mapping = iomem_get_mapping;
+ /*
+ * generic_file_llseek() consults f_mapping->host to determine
+ * the file size. As iomem_inode knows nothing about the
+ * attribute, it's not going to work, so override it as well.
+ */
+ res_attr->llseek = pci_llseek_resource;
+ }
res_attr->attr.name = res_attr_name;
res_attr->attr.mode = 0600;
res_attr->size = pci_resource_len(pdev, num);
@@ -1226,6 +1299,10 @@ static int pci_create_resource_files(struct pci_dev *pdev)
int i;
int retval;
+ /* Skip devices with non-mappable BARs */
+ if (pdev->non_mappable_bars)
+ return 0;
+
/* Expose the PCI resources from this device as files */
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
@@ -1262,7 +1339,7 @@ void __weak pci_remove_resource_files(struct pci_dev *dev) { return; }
* writing anything except 0 enables it
*/
static ssize_t pci_write_rom(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
@@ -1288,7 +1365,7 @@ static ssize_t pci_write_rom(struct file *filp, struct kobject *kobj,
* device corresponding to @kobj.
*/
static ssize_t pci_read_rom(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
@@ -1314,32 +1391,37 @@ static ssize_t pci_read_rom(struct file *filp, struct kobject *kobj,
return count;
}
-static BIN_ATTR(rom, 0600, pci_read_rom, pci_write_rom, 0);
+static const BIN_ATTR(rom, 0600, pci_read_rom, pci_write_rom, 0);
-static struct bin_attribute *pci_dev_rom_attrs[] = {
+static const struct bin_attribute *const pci_dev_rom_attrs[] = {
&bin_attr_rom,
NULL,
};
static umode_t pci_dev_rom_attr_is_visible(struct kobject *kobj,
- struct bin_attribute *a, int n)
+ const struct bin_attribute *a, int n)
{
struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
- size_t rom_size;
/* If the device has a ROM, try to expose it in sysfs. */
- rom_size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
- if (!rom_size)
+ if (!pci_resource_end(pdev, PCI_ROM_RESOURCE))
return 0;
- a->size = rom_size;
-
return a->attr.mode;
}
+static size_t pci_dev_rom_attr_bin_size(struct kobject *kobj,
+ const struct bin_attribute *a, int n)
+{
+ struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
+
+ return pci_resource_len(pdev, PCI_ROM_RESOURCE);
+}
+
static const struct attribute_group pci_dev_rom_attr_group = {
.bin_attrs = pci_dev_rom_attrs,
.is_bin_visible = pci_dev_rom_attr_is_visible,
+ .bin_size = pci_dev_rom_attr_bin_size,
};
static ssize_t reset_store(struct device *dev, struct device_attribute *attr,
@@ -1386,79 +1468,193 @@ static const struct attribute_group pci_dev_reset_attr_group = {
.is_visible = pci_dev_reset_attr_is_visible,
};
+static ssize_t reset_method_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ ssize_t len = 0;
+ int i, m;
+
+ for (i = 0; i < PCI_NUM_RESET_METHODS; i++) {
+ m = pdev->reset_methods[i];
+ if (!m)
+ break;
+
+ len += sysfs_emit_at(buf, len, "%s%s", len ? " " : "",
+ pci_reset_fn_methods[m].name);
+ }
+
+ if (len)
+ len += sysfs_emit_at(buf, len, "\n");
+
+ return len;
+}
+
+static int reset_method_lookup(const char *name)
+{
+ int m;
+
+ for (m = 1; m < PCI_NUM_RESET_METHODS; m++) {
+ if (sysfs_streq(name, pci_reset_fn_methods[m].name))
+ return m;
+ }
+
+ return 0; /* not found */
+}
+
+static ssize_t reset_method_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ char *tmp_options, *name;
+ int m, n;
+ u8 reset_methods[PCI_NUM_RESET_METHODS] = {};
+
+ if (sysfs_streq(buf, "")) {
+ pdev->reset_methods[0] = 0;
+ pci_warn(pdev, "All device reset methods disabled by user");
+ return count;
+ }
+
+ PM_RUNTIME_ACQUIRE(dev, pm);
+ if (PM_RUNTIME_ACQUIRE_ERR(&pm))
+ return -ENXIO;
+
+ if (sysfs_streq(buf, "default")) {
+ pci_init_reset_methods(pdev);
+ return count;
+ }
+
+ char *options __free(kfree) = kstrndup(buf, count, GFP_KERNEL);
+ if (!options)
+ return -ENOMEM;
+
+ n = 0;
+ tmp_options = options;
+ while ((name = strsep(&tmp_options, " ")) != NULL) {
+ if (sysfs_streq(name, ""))
+ continue;
+
+ name = strim(name);
+
+ /* Leave previous methods unchanged if input is invalid */
+ m = reset_method_lookup(name);
+ if (!m) {
+ pci_err(pdev, "Invalid reset method '%s'", name);
+ return -EINVAL;
+ }
+
+ if (pci_reset_fn_methods[m].reset_fn(pdev, PCI_RESET_PROBE)) {
+ pci_err(pdev, "Unsupported reset method '%s'", name);
+ return -EINVAL;
+ }
+
+ if (n == PCI_NUM_RESET_METHODS - 1) {
+ pci_err(pdev, "Too many reset methods\n");
+ return -EINVAL;
+ }
+
+ reset_methods[n++] = m;
+ }
+
+ reset_methods[n] = 0;
+
+ /* Warn if dev-specific supported but not highest priority */
+ if (pci_reset_fn_methods[1].reset_fn(pdev, PCI_RESET_PROBE) == 0 &&
+ reset_methods[0] != 1)
+ pci_warn(pdev, "Device-specific reset disabled/de-prioritized by user");
+ memcpy(pdev->reset_methods, reset_methods, sizeof(pdev->reset_methods));
+ return count;
+}
+static DEVICE_ATTR_RW(reset_method);
+
+static struct attribute *pci_dev_reset_method_attrs[] = {
+ &dev_attr_reset_method.attr,
+ NULL,
+};
+
+static const struct attribute_group pci_dev_reset_method_attr_group = {
+ .attrs = pci_dev_reset_method_attrs,
+ .is_visible = pci_dev_reset_attr_is_visible,
+};
+
+static ssize_t __resource_resize_show(struct device *dev, int n, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ ssize_t ret;
+
+ pci_config_pm_runtime_get(pdev);
+
+ ret = sysfs_emit(buf, "%016llx\n",
+ pci_rebar_get_possible_sizes(pdev, n));
+
+ pci_config_pm_runtime_put(pdev);
+
+ return ret;
+}
+
+static ssize_t __resource_resize_store(struct device *dev, int n,
+ const char *buf, size_t count)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pci_bus *bus = pdev->bus;
+ unsigned long size;
+ int ret;
+ u16 cmd;
+
+ if (kstrtoul(buf, 0, &size) < 0)
+ return -EINVAL;
+
+ device_lock(dev);
+ if (dev->driver || pci_num_vf(pdev)) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ pci_config_pm_runtime_get(pdev);
+
+ if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) {
+ ret = aperture_remove_conflicting_pci_devices(pdev,
+ "resourceN_resize");
+ if (ret)
+ goto pm_put;
+ }
+
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+ pci_write_config_word(pdev, PCI_COMMAND,
+ cmd & ~PCI_COMMAND_MEMORY);
+
+ pci_remove_resource_files(pdev);
+
+ ret = pci_resize_resource(pdev, n, size, 0);
+
+ pci_assign_unassigned_bus_resources(bus);
+
+ if (pci_create_resource_files(pdev))
+ pci_warn(pdev, "Failed to recreate resource files after BAR resizing\n");
+
+ pci_write_config_word(pdev, PCI_COMMAND, cmd);
+pm_put:
+ pci_config_pm_runtime_put(pdev);
+unlock:
+ device_unlock(dev);
+
+ return ret ? ret : count;
+}
+
#define pci_dev_resource_resize_attr(n) \
static ssize_t resource##n##_resize_show(struct device *dev, \
struct device_attribute *attr, \
- char * buf) \
+ char *buf) \
{ \
- struct pci_dev *pdev = to_pci_dev(dev); \
- ssize_t ret; \
- \
- pci_config_pm_runtime_get(pdev); \
- \
- ret = sysfs_emit(buf, "%016llx\n", \
- (u64)pci_rebar_get_possible_sizes(pdev, n)); \
- \
- pci_config_pm_runtime_put(pdev); \
- \
- return ret; \
+ return __resource_resize_show(dev, n, buf); \
} \
- \
static ssize_t resource##n##_resize_store(struct device *dev, \
struct device_attribute *attr,\
const char *buf, size_t count)\
{ \
- struct pci_dev *pdev = to_pci_dev(dev); \
- unsigned long size, flags; \
- int ret, i; \
- u16 cmd; \
- \
- if (kstrtoul(buf, 0, &size) < 0) \
- return -EINVAL; \
- \
- device_lock(dev); \
- if (dev->driver) { \
- ret = -EBUSY; \
- goto unlock; \
- } \
- \
- pci_config_pm_runtime_get(pdev); \
- \
- if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) { \
- ret = aperture_remove_conflicting_pci_devices(pdev, \
- "resourceN_resize"); \
- if (ret) \
- goto pm_put; \
- } \
- \
- pci_read_config_word(pdev, PCI_COMMAND, &cmd); \
- pci_write_config_word(pdev, PCI_COMMAND, \
- cmd & ~PCI_COMMAND_MEMORY); \
- \
- flags = pci_resource_flags(pdev, n); \
- \
- pci_remove_resource_files(pdev); \
- \
- for (i = 0; i < PCI_STD_NUM_BARS; i++) { \
- if (pci_resource_len(pdev, i) && \
- pci_resource_flags(pdev, i) == flags) \
- pci_release_resource(pdev, i); \
- } \
- \
- ret = pci_resize_resource(pdev, n, size); \
- \
- pci_assign_unassigned_bus_resources(pdev->bus); \
- \
- if (pci_create_resource_files(pdev)) \
- pci_warn(pdev, "Failed to recreate resource files after BAR resizing\n");\
- \
- pci_write_config_word(pdev, PCI_COMMAND, cmd); \
-pm_put: \
- pci_config_pm_runtime_put(pdev); \
-unlock: \
- device_unlock(dev); \
- \
- return ret ? ret : count; \
+ return __resource_resize_store(dev, n, buf, count); \
} \
static DEVICE_ATTR_RW(resource##n##_resize)
@@ -1538,6 +1734,7 @@ late_initcall(pci_sysfs_init);
static struct attribute *pci_dev_dev_attrs[] = {
&dev_attr_boot_vga.attr,
+ &dev_attr_serial_number.attr,
NULL,
};
@@ -1547,11 +1744,13 @@ static umode_t pci_dev_attrs_are_visible(struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct pci_dev *pdev = to_pci_dev(dev);
- if (a == &dev_attr_boot_vga.attr)
- if ((pdev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
- return 0;
+ if (a == &dev_attr_boot_vga.attr && pci_is_vga(pdev))
+ return a->mode;
- return a->mode;
+ if (a == &dev_attr_serial_number.attr && pci_get_dsn(pdev))
+ return a->mode;
+
+ return 0;
}
static struct attribute *pci_dev_hp_attrs[] = {
@@ -1614,6 +1813,7 @@ const struct attribute_group *pci_dev_groups[] = {
&pci_dev_acpi_attr_group,
#endif
&pci_dev_resource_resize_group,
+ ARCH_PCI_DEV_GROUPS
NULL,
};
@@ -1637,7 +1837,7 @@ static const struct attribute_group pcie_dev_attr_group = {
.is_visible = pcie_dev_attrs_are_visible,
};
-static const struct attribute_group *pci_dev_attr_groups[] = {
+const struct attribute_group *pci_dev_attr_groups[] = {
&pci_dev_attr_group,
&pci_dev_hp_attr_group,
#ifdef CONFIG_PCI_IOV
@@ -1648,13 +1848,17 @@ static const struct attribute_group *pci_dev_attr_groups[] = {
&pcie_dev_attr_group,
#ifdef CONFIG_PCIEAER
&aer_stats_attr_group,
+ &aer_attr_group,
#endif
#ifdef CONFIG_PCIEASPM
&aspm_ctrl_attr_group,
#endif
+#ifdef CONFIG_PCI_DOE
+ &pci_doe_sysfs_group,
+#endif
+#ifdef CONFIG_PCI_TSM
+ &pci_tsm_auth_attr_group,
+ &pci_tsm_attr_group,
+#endif
NULL,
};
-
-const struct device_type pci_dev_type = {
- .groups = pci_dev_attr_groups,
-};
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 60230da957e0..13dbb405dc31 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -23,8 +23,6 @@
#include <linux/string.h>
#include <linux/log2.h>
#include <linux/logic_pio.h>
-#include <linux/pm_wakeup.h>
-#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/pm_runtime.h>
#include <linux/pci_hotplug.h>
@@ -143,8 +141,8 @@ enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
* the dfl or actual value as it sees fit. Don't forget this is
* measured in 32-bit words, not bytes.
*/
-u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2;
-u8 pci_cache_line_size;
+u8 pci_dfl_cache_line_size __ro_after_init = L1_CACHE_BYTES >> 2;
+u8 pci_cache_line_size __ro_after_init ;
/*
* If we set up a device for bus mastering, we need to check the latency
@@ -425,36 +423,10 @@ found:
return 1;
}
-static u8 __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
- u8 pos, int cap, int *ttl)
-{
- u8 id;
- u16 ent;
-
- pci_bus_read_config_byte(bus, devfn, pos, &pos);
-
- while ((*ttl)--) {
- if (pos < 0x40)
- break;
- pos &= ~3;
- pci_bus_read_config_word(bus, devfn, pos, &ent);
-
- id = ent & 0xff;
- if (id == 0xff)
- break;
- if (id == cap)
- return pos;
- pos = (ent >> 8);
- }
- return 0;
-}
-
static u8 __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
u8 pos, int cap)
{
- int ttl = PCI_FIND_CAP_TTL;
-
- return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
+ return PCI_FIND_NEXT_CAP(pci_bus_read_config, pos, cap, bus, devfn);
}
u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
@@ -534,7 +506,7 @@ u8 pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
- pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
+ pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & PCI_HEADER_TYPE_MASK);
if (pos)
pos = __pci_find_next_cap(bus, devfn, pos, cap);
@@ -555,42 +527,11 @@ EXPORT_SYMBOL(pci_bus_find_capability);
*/
u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 start, int cap)
{
- u32 header;
- int ttl;
- u16 pos = PCI_CFG_SPACE_SIZE;
-
- /* minimum 8 bytes per capability */
- ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
-
if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
return 0;
- if (start)
- pos = start;
-
- if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
- return 0;
-
- /*
- * If we have no capabilities, this is indicated by cap ID,
- * cap version and next pointer all being 0.
- */
- if (header == 0)
- return 0;
-
- while (ttl-- > 0) {
- if (PCI_EXT_CAP_ID(header) == cap && pos != start)
- return pos;
-
- pos = PCI_EXT_CAP_NEXT(header);
- if (pos < PCI_CFG_SPACE_SIZE)
- break;
-
- if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
- break;
- }
-
- return 0;
+ return PCI_FIND_NEXT_EXT_CAP(pci_bus_read_config, start, cap,
+ dev->bus, dev->devfn);
}
EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
@@ -650,7 +591,7 @@ EXPORT_SYMBOL_GPL(pci_get_dsn);
static u8 __pci_find_next_ht_cap(struct pci_dev *dev, u8 pos, int ht_cap)
{
- int rc, ttl = PCI_FIND_CAP_TTL;
+ int rc;
u8 cap, mask;
if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
@@ -658,8 +599,8 @@ static u8 __pci_find_next_ht_cap(struct pci_dev *dev, u8 pos, int ht_cap)
else
mask = HT_5BIT_CAP_MASK;
- pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
- PCI_CAP_ID_HT, &ttl);
+ pos = PCI_FIND_NEXT_CAP(pci_bus_read_config, pos,
+ PCI_CAP_ID_HT, dev->bus, dev->devfn);
while (pos) {
rc = pci_read_config_byte(dev, pos + 3, &cap);
if (rc != PCIBIOS_SUCCESSFUL)
@@ -668,9 +609,10 @@ static u8 __pci_find_next_ht_cap(struct pci_dev *dev, u8 pos, int ht_cap)
if ((cap & mask) == ht_cap)
return pos;
- pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
- pos + PCI_CAP_LIST_NEXT,
- PCI_CAP_ID_HT, &ttl);
+ pos = PCI_FIND_NEXT_CAP(pci_bus_read_config,
+ pos + PCI_CAP_LIST_NEXT,
+ PCI_CAP_ID_HT, dev->bus,
+ dev->devfn);
}
return 0;
@@ -732,15 +674,18 @@ u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap)
{
u16 vsec = 0;
u32 header;
+ int ret;
if (vendor != dev->vendor)
return 0;
while ((vsec = pci_find_next_ext_capability(dev, vsec,
PCI_EXT_CAP_ID_VNDR))) {
- if (pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER,
- &header) == PCIBIOS_SUCCESSFUL &&
- PCI_VNDR_HEADER_ID(header) == cap)
+ ret = pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER, &header);
+ if (ret != PCIBIOS_SUCCESSFUL)
+ continue;
+
+ if (PCI_VNDR_HEADER_ID(header) == cap)
return vsec;
}
@@ -848,6 +793,66 @@ struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
EXPORT_SYMBOL(pci_find_resource);
/**
+ * pci_resource_name - Return the name of the PCI resource
+ * @dev: PCI device to query
+ * @i: index of the resource
+ *
+ * Return the standard PCI resource (BAR) name according to their index.
+ */
+const char *pci_resource_name(struct pci_dev *dev, unsigned int i)
+{
+ static const char * const bar_name[] = {
+ "BAR 0",
+ "BAR 1",
+ "BAR 2",
+ "BAR 3",
+ "BAR 4",
+ "BAR 5",
+ "ROM",
+#ifdef CONFIG_PCI_IOV
+ "VF BAR 0",
+ "VF BAR 1",
+ "VF BAR 2",
+ "VF BAR 3",
+ "VF BAR 4",
+ "VF BAR 5",
+#endif
+ "bridge window", /* "io" included in %pR */
+ "bridge window", /* "mem" included in %pR */
+ "bridge window", /* "mem pref" included in %pR */
+ };
+ static const char * const cardbus_name[] = {
+ "BAR 1",
+ "unknown",
+ "unknown",
+ "unknown",
+ "unknown",
+ "unknown",
+#ifdef CONFIG_PCI_IOV
+ "unknown",
+ "unknown",
+ "unknown",
+ "unknown",
+ "unknown",
+ "unknown",
+#endif
+ "CardBus bridge window 0", /* I/O */
+ "CardBus bridge window 1", /* I/O */
+ "CardBus bridge window 0", /* mem */
+ "CardBus bridge window 1", /* mem */
+ };
+
+ if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS &&
+ i < ARRAY_SIZE(cardbus_name))
+ return cardbus_name[i];
+
+ if (i < ARRAY_SIZE(bar_name))
+ return bar_name[i];
+
+ return "unknown";
+}
+
+/**
* pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
* @dev: the PCI device to operate on
* @pos: config space offset of status word
@@ -884,30 +889,71 @@ void pci_request_acs(void)
}
static const char *disable_acs_redir_param;
+static const char *config_acs_param;
-/**
- * pci_disable_acs_redir - disable ACS redirect capabilities
- * @dev: the PCI device
- *
- * For only devices specified in the disable_acs_redir parameter.
- */
-static void pci_disable_acs_redir(struct pci_dev *dev)
+struct pci_acs {
+ u16 cap;
+ u16 ctrl;
+ u16 fw_ctrl;
+};
+
+static void __pci_config_acs(struct pci_dev *dev, struct pci_acs *caps,
+ const char *p, const u16 acs_mask, const u16 acs_flags)
{
+ u16 flags = acs_flags;
+ u16 mask = acs_mask;
+ char *delimit;
int ret = 0;
- const char *p;
- int pos;
- u16 ctrl;
- if (!disable_acs_redir_param)
+ if (!p)
return;
- p = disable_acs_redir_param;
while (*p) {
+ if (!acs_mask) {
+ /* Check for ACS flags */
+ delimit = strstr(p, "@");
+ if (delimit) {
+ int end;
+ u32 shift = 0;
+
+ end = delimit - p - 1;
+ mask = 0;
+ flags = 0;
+
+ while (end > -1) {
+ if (*(p + end) == '0') {
+ mask |= 1 << shift;
+ shift++;
+ end--;
+ } else if (*(p + end) == '1') {
+ mask |= 1 << shift;
+ flags |= 1 << shift;
+ shift++;
+ end--;
+ } else if ((*(p + end) == 'x') || (*(p + end) == 'X')) {
+ shift++;
+ end--;
+ } else {
+ pci_err(dev, "Invalid ACS flags... Ignoring\n");
+ return;
+ }
+ }
+ p = delimit + 1;
+ } else {
+ pci_err(dev, "ACS Flags missing\n");
+ return;
+ }
+ }
+
+ if (mask & ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR | PCI_ACS_CR |
+ PCI_ACS_UF | PCI_ACS_EC | PCI_ACS_DT)) {
+ pci_err(dev, "Invalid ACS flags specified\n");
+ return;
+ }
+
ret = pci_dev_str_match(dev, p, &p);
if (ret < 0) {
- pr_info_once("PCI: Can't parse disable_acs_redir parameter: %s\n",
- disable_acs_redir_param);
-
+ pr_info_once("PCI: Can't parse ACS command line parameter\n");
break;
} else if (ret == 1) {
/* Found a match */
@@ -927,56 +973,42 @@ static void pci_disable_acs_redir(struct pci_dev *dev)
if (!pci_dev_specific_disable_acs_redir(dev))
return;
- pos = dev->acs_cap;
- if (!pos) {
- pci_warn(dev, "cannot disable ACS redirect for this hardware as it does not have ACS capabilities\n");
- return;
- }
-
- pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
+ pci_dbg(dev, "ACS mask = %#06x\n", mask);
+ pci_dbg(dev, "ACS flags = %#06x\n", flags);
+ pci_dbg(dev, "ACS control = %#06x\n", caps->ctrl);
+ pci_dbg(dev, "ACS fw_ctrl = %#06x\n", caps->fw_ctrl);
- /* P2P Request & Completion Redirect */
- ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC);
-
- pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
+ /*
+ * For mask bits that are 0, copy them from the firmware setting
+ * and apply flags for all the mask bits that are 1.
+ */
+ caps->ctrl = (caps->fw_ctrl & ~mask) | (flags & mask);
- pci_info(dev, "disabled ACS redirect\n");
+ pci_info(dev, "Configured ACS to %#06x\n", caps->ctrl);
}
/**
* pci_std_enable_acs - enable ACS on devices using standard ACS capabilities
* @dev: the PCI device
+ * @caps: default ACS controls
*/
-static void pci_std_enable_acs(struct pci_dev *dev)
+static void pci_std_enable_acs(struct pci_dev *dev, struct pci_acs *caps)
{
- int pos;
- u16 cap;
- u16 ctrl;
-
- pos = dev->acs_cap;
- if (!pos)
- return;
-
- pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
- pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
-
/* Source Validation */
- ctrl |= (cap & PCI_ACS_SV);
+ caps->ctrl |= (caps->cap & PCI_ACS_SV);
/* P2P Request Redirect */
- ctrl |= (cap & PCI_ACS_RR);
+ caps->ctrl |= (caps->cap & PCI_ACS_RR);
/* P2P Completion Redirect */
- ctrl |= (cap & PCI_ACS_CR);
+ caps->ctrl |= (caps->cap & PCI_ACS_CR);
/* Upstream Forwarding */
- ctrl |= (cap & PCI_ACS_UF);
+ caps->ctrl |= (caps->cap & PCI_ACS_UF);
/* Enable Translation Blocking for external devices and noats */
if (pci_ats_disabled() || dev->external_facing || dev->untrusted)
- ctrl |= (cap & PCI_ACS_TB);
-
- pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
+ caps->ctrl |= (caps->cap & PCI_ACS_TB);
}
/**
@@ -985,23 +1017,37 @@ static void pci_std_enable_acs(struct pci_dev *dev)
*/
static void pci_enable_acs(struct pci_dev *dev)
{
- if (!pci_acs_enable)
- goto disable_acs_redir;
+ struct pci_acs caps;
+ bool enable_acs = false;
+ int pos;
+
+ /* If an iommu is present we start with kernel default caps */
+ if (pci_acs_enable) {
+ if (pci_dev_specific_enable_acs(dev))
+ enable_acs = true;
+ }
- if (!pci_dev_specific_enable_acs(dev))
- goto disable_acs_redir;
+ pos = dev->acs_cap;
+ if (!pos)
+ return;
- pci_std_enable_acs(dev);
+ pci_read_config_word(dev, pos + PCI_ACS_CAP, &caps.cap);
+ pci_read_config_word(dev, pos + PCI_ACS_CTRL, &caps.ctrl);
+ caps.fw_ctrl = caps.ctrl;
+
+ if (enable_acs)
+ pci_std_enable_acs(dev, &caps);
-disable_acs_redir:
/*
- * Note: pci_disable_acs_redir() must be called even if ACS was not
- * enabled by the kernel because it may have been enabled by
- * platform firmware. So if we are told to disable it, we should
- * always disable it after setting the kernel's default
- * preferences.
+ * Always apply caps from the command line, even if there is no iommu.
+ * Trust that the admin has a reason to change the ACS settings.
*/
- pci_disable_acs_redir(dev);
+ __pci_config_acs(dev, &caps, disable_acs_redir_param,
+ PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC,
+ ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC));
+ __pci_config_acs(dev, &caps, config_acs_param, 0, 0);
+
+ pci_write_config_word(dev, pos + PCI_ACS_CTRL, caps.ctrl);
}
/**
@@ -1164,7 +1210,9 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
{
int delay = 1;
bool retrain = false;
- struct pci_dev *bridge;
+ struct pci_dev *root, *bridge;
+
+ root = pcie_find_root_port(dev);
if (pci_is_pcie(dev)) {
bridge = pci_upstream_bridge(dev);
@@ -1173,23 +1221,41 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
}
/*
- * After reset, the device should not silently discard config
- * requests, but it may still indicate that it needs more time by
- * responding to them with CRS completions. The Root Port will
- * generally synthesize ~0 (PCI_ERROR_RESPONSE) data to complete
- * the read (except when CRS SV is enabled and the read was for the
- * Vendor ID; in that case it synthesizes 0x0001 data).
+ * The caller has already waited long enough after a reset that the
+ * device should respond to config requests, but it may respond
+ * with Request Retry Status (RRS) if it needs more time to
+ * initialize.
+ *
+ * If the device is below a Root Port with Configuration RRS
+ * Software Visibility enabled, reading the Vendor ID returns a
+ * special data value if the device responded with RRS. Read the
+ * Vendor ID until we get non-RRS status.
*
- * Wait for the device to return a non-CRS completion. Read the
- * Command register instead of Vendor ID so we don't have to
- * contend with the CRS SV value.
+ * If there's no Root Port or Configuration RRS Software Visibility
+ * is not enabled, the device may still respond with RRS, but
+ * hardware may retry the config request. If no retries receive
+ * Successful Completion, hardware generally synthesizes ~0
+ * (PCI_ERROR_RESPONSE) data to complete the read. Reading Vendor
+ * ID for VFs and non-existent devices also returns ~0, so read the
+ * Command register until it returns something other than ~0.
*/
for (;;) {
u32 id;
- pci_read_config_dword(dev, PCI_COMMAND, &id);
- if (!PCI_POSSIBLE_ERROR(id))
- break;
+ if (pci_dev_is_disconnected(dev)) {
+ pci_dbg(dev, "disconnected; not waiting\n");
+ return -ENOTTY;
+ }
+
+ if (root && root->config_rrs_sv) {
+ pci_read_config_dword(dev, PCI_VENDOR_ID, &id);
+ if (!pci_bus_rrs_vendor_id(id))
+ break;
+ } else {
+ pci_read_config_dword(dev, PCI_COMMAND, &id);
+ if (!PCI_POSSIBLE_ERROR(id))
+ break;
+ }
if (delay > timeout) {
pci_warn(dev, "not ready %dms after %s; giving up\n",
@@ -1200,7 +1266,7 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
if (delay > PCI_RESET_WAIT) {
if (retrain) {
retrain = false;
- if (pcie_failed_link_retrain(bridge)) {
+ if (pcie_failed_link_retrain(bridge) == 0) {
delay = 1;
continue;
}
@@ -1216,6 +1282,9 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
if (delay > PCI_RESET_WAIT)
pci_info(dev, "ready %dms after %s\n", delay - 1,
reset_type);
+ else
+ pci_dbg(dev, "ready %dms after %s\n", delay - 1,
+ reset_type);
return 0;
}
@@ -1226,6 +1295,10 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
*
* On success, return 0 or 1, depending on whether or not it is necessary to
* restore the device's BARs subsequently (1 is returned in that case).
+ *
+ * On failure, return a negative error code. Always return failure if @dev
+ * lacks a Power Management Capability, even if the platform was able to
+ * put the device in D0 via non-PCI means.
*/
int pci_power_up(struct pci_dev *dev)
{
@@ -1242,9 +1315,11 @@ int pci_power_up(struct pci_dev *dev)
else
dev->current_state = state;
- if (state == PCI_D0)
- return 0;
+ return -EIO;
+ }
+ if (pci_dev_is_disconnected(dev)) {
+ dev->current_state = PCI_D3cold;
return -EIO;
}
@@ -1287,23 +1362,28 @@ end:
/**
* pci_set_full_power_state - Put a PCI device into D0 and update its state
* @dev: PCI device to power up
+ * @locked: whether pci_bus_sem is held
*
* Call pci_power_up() to put @dev into D0, read from its PCI_PM_CTRL register
* to confirm the state change, restore its BARs if they might be lost and
- * reconfigure ASPM in acordance with the new power state.
+ * reconfigure ASPM in accordance with the new power state.
*
* If pci_restore_state() is going to be called right after a power state change
* to D0, it is more efficient to use pci_power_up() directly instead of this
* function.
*/
-static int pci_set_full_power_state(struct pci_dev *dev)
+static int pci_set_full_power_state(struct pci_dev *dev, bool locked)
{
u16 pmcsr;
int ret;
ret = pci_power_up(dev);
- if (ret < 0)
+ if (ret < 0) {
+ if (dev->current_state == PCI_D0)
+ return 0;
+
return ret;
+ }
pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK;
@@ -1327,6 +1407,9 @@ static int pci_set_full_power_state(struct pci_dev *dev)
pci_restore_bars(dev);
}
+ if (dev->bus->self)
+ pcie_aspm_pm_state_change(dev->bus->self, locked);
+
return 0;
}
@@ -1354,10 +1437,22 @@ void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
pci_walk_bus(bus, __pci_dev_set_current_state, &state);
}
+static void __pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state, bool locked)
+{
+ if (!bus)
+ return;
+
+ if (locked)
+ pci_walk_bus_locked(bus, __pci_dev_set_current_state, &state);
+ else
+ pci_walk_bus(bus, __pci_dev_set_current_state, &state);
+}
+
/**
* pci_set_low_power_state - Put a PCI device into a low-power state.
* @dev: PCI device to handle.
* @state: PCI power state (D1, D2, D3hot) to put the device into.
+ * @locked: whether pci_bus_sem is held
*
* Use the device's PCI_PM_CTRL register to put it into a low-power state.
*
@@ -1368,7 +1463,7 @@ void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
* 0 if device already is in the requested state.
* 0 if device's power state has been successfully changed.
*/
-static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state)
+static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state, bool locked)
{
u16 pmcsr;
@@ -1421,27 +1516,13 @@ static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state)
pci_power_name(dev->current_state),
pci_power_name(state));
+ if (dev->bus->self)
+ pcie_aspm_pm_state_change(dev->bus->self, locked);
+
return 0;
}
-/**
- * pci_set_power_state - Set the power state of a PCI device
- * @dev: PCI device to handle.
- * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
- *
- * Transition a device to a new power state, using the platform firmware and/or
- * the device's PCI PM registers.
- *
- * RETURN VALUE:
- * -EINVAL if the requested state is invalid.
- * -EIO if device does not support PCI PM or its PM capabilities register has a
- * wrong version, or device doesn't support the requested state.
- * 0 if the transition is to D1 or D2 but D1 and D2 are not supported.
- * 0 if device already is in the requested state.
- * 0 if the transition is to D3 but D3 is not supported.
- * 0 if device's power state has been successfully changed.
- */
-int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
+static int __pci_set_power_state(struct pci_dev *dev, pci_power_t state, bool locked)
{
int error;
@@ -1465,7 +1546,7 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
return 0;
if (state == PCI_D0)
- return pci_set_full_power_state(dev);
+ return pci_set_full_power_state(dev, locked);
/*
* This device is quirked not to be put into D3, so don't put it in
@@ -1479,16 +1560,16 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
* To put the device in D3cold, put it into D3hot in the native
* way, then put it into D3cold using platform ops.
*/
- error = pci_set_low_power_state(dev, PCI_D3hot);
+ error = pci_set_low_power_state(dev, PCI_D3hot, locked);
if (pci_platform_power_transition(dev, PCI_D3cold))
return error;
/* Powering off a bridge may power off the whole hierarchy */
if (dev->current_state == PCI_D3cold)
- pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
+ __pci_bus_set_current_state(dev->subordinate, PCI_D3cold, locked);
} else {
- error = pci_set_low_power_state(dev, state);
+ error = pci_set_low_power_state(dev, state, locked);
if (pci_platform_power_transition(dev, state))
return error;
@@ -1496,8 +1577,38 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
return 0;
}
+
+/**
+ * pci_set_power_state - Set the power state of a PCI device
+ * @dev: PCI device to handle.
+ * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
+ *
+ * Transition a device to a new power state, using the platform firmware and/or
+ * the device's PCI PM registers.
+ *
+ * RETURN VALUE:
+ * -EINVAL if the requested state is invalid.
+ * -EIO if device does not support PCI PM or its PM capabilities register has a
+ * wrong version, or device doesn't support the requested state.
+ * 0 if the transition is to D1 or D2 but D1 and D2 are not supported.
+ * 0 if device already is in the requested state.
+ * 0 if the transition is to D3 but D3 is not supported.
+ * 0 if device's power state has been successfully changed.
+ */
+int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
+{
+ return __pci_set_power_state(dev, state, false);
+}
EXPORT_SYMBOL(pci_set_power_state);
+int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state)
+{
+ lockdep_assert_held(&pci_bus_sem);
+
+ return __pci_set_power_state(dev, state, true);
+}
+EXPORT_SYMBOL(pci_set_power_state_locked);
+
#define PCI_EXP_SAVE_REGS 7
static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
@@ -1546,25 +1657,10 @@ static int pci_save_pcie_state(struct pci_dev *dev)
pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
- return 0;
-}
-
-void pci_bridge_reconfigure_ltr(struct pci_dev *dev)
-{
-#ifdef CONFIG_PCIEASPM
- struct pci_dev *bridge;
- u32 ctl;
+ pci_save_aspm_l1ss_state(dev);
+ pci_save_ltr_state(dev);
- bridge = pci_upstream_bridge(dev);
- if (bridge && bridge->ltr_path) {
- pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2, &ctl);
- if (!(ctl & PCI_EXP_DEVCTL2_LTR_EN)) {
- pci_dbg(bridge, "re-enabling LTR\n");
- pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
- PCI_EXP_DEVCTL2_LTR_EN);
- }
- }
-#endif
+ return 0;
}
static void pci_restore_pcie_state(struct pci_dev *dev)
@@ -1573,6 +1669,13 @@ static void pci_restore_pcie_state(struct pci_dev *dev)
struct pci_cap_saved_state *save_state;
u16 *cap;
+ /*
+ * Restore max latencies (in the LTR capability) before enabling
+ * LTR itself in PCI_EXP_DEVCTL2.
+ */
+ pci_restore_ltr_state(dev);
+ pci_restore_aspm_l1ss_state(dev);
+
save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
if (!save_state)
return;
@@ -1630,46 +1733,6 @@ static void pci_restore_pcix_state(struct pci_dev *dev)
pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
}
-static void pci_save_ltr_state(struct pci_dev *dev)
-{
- int ltr;
- struct pci_cap_saved_state *save_state;
- u32 *cap;
-
- if (!pci_is_pcie(dev))
- return;
-
- ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
- if (!ltr)
- return;
-
- save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
- if (!save_state) {
- pci_err(dev, "no suspend buffer for LTR; ASPM issues possible after resume\n");
- return;
- }
-
- /* Some broken devices only support dword access to LTR */
- cap = &save_state->cap.data[0];
- pci_read_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap);
-}
-
-static void pci_restore_ltr_state(struct pci_dev *dev)
-{
- struct pci_cap_saved_state *save_state;
- int ltr;
- u32 *cap;
-
- save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
- ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
- if (!save_state || !ltr)
- return;
-
- /* Some broken devices only support dword access to LTR */
- cap = &save_state->cap.data[0];
- pci_write_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap);
-}
-
/**
* pci_save_state - save the PCI configuration space of a device before
* suspending
@@ -1681,7 +1744,7 @@ int pci_save_state(struct pci_dev *dev)
/* XXX: 100% dword access ok here? */
for (i = 0; i < 16; i++) {
pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
- pci_dbg(dev, "saving config space at offset %#x (reading %#x)\n",
+ pci_dbg(dev, "save config %#04x: %#010x\n",
i * 4, dev->saved_config_space[i]);
}
dev->state_saved = true;
@@ -1694,10 +1757,10 @@ int pci_save_state(struct pci_dev *dev)
if (i != 0)
return i;
- pci_save_ltr_state(dev);
pci_save_dpc_state(dev);
pci_save_aer_state(dev);
pci_save_ptm_state(dev);
+ pci_save_tph_state(dev);
return pci_save_vc_state(dev);
}
EXPORT_SYMBOL(pci_save_state);
@@ -1712,7 +1775,7 @@ static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
return;
for (;;) {
- pci_dbg(pdev, "restoring config space at offset %#x (was %#x, writing %#x)\n",
+ pci_dbg(pdev, "restore config %#04x: %#010x -> %#010x\n",
offset, val, saved_val);
pci_write_config_dword(pdev, offset, saved_val);
if (retry-- <= 0)
@@ -1760,48 +1823,12 @@ static void pci_restore_config_space(struct pci_dev *pdev)
}
}
-static void pci_restore_rebar_state(struct pci_dev *pdev)
-{
- unsigned int pos, nbars, i;
- u32 ctrl;
-
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
- if (!pos)
- return;
-
- pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
- nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
- PCI_REBAR_CTRL_NBAR_SHIFT;
-
- for (i = 0; i < nbars; i++, pos += 8) {
- struct resource *res;
- int bar_idx, size;
-
- pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
- bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
- res = pdev->resource + bar_idx;
- size = pci_rebar_bytes_to_size(resource_size(res));
- ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
- ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
- pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
- }
-}
-
/**
* pci_restore_state - Restore the saved state of a PCI device
* @dev: PCI device that we're dealing with
*/
void pci_restore_state(struct pci_dev *dev)
{
- if (!dev->state_saved)
- return;
-
- /*
- * Restore max latencies (in the LTR capability) before enabling
- * LTR itself (in the PCIe capability).
- */
- pci_restore_ltr_state(dev);
-
pci_restore_pcie_state(dev);
pci_restore_pasid_state(dev);
pci_restore_pri_state(dev);
@@ -1810,6 +1837,7 @@ void pci_restore_state(struct pci_dev *dev)
pci_restore_rebar_state(dev);
pci_restore_dpc_state(dev);
pci_restore_ptm_state(dev);
+ pci_restore_tph_state(dev);
pci_aer_clear_status(dev);
pci_restore_aer_state(dev);
@@ -1930,6 +1958,28 @@ int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
return pci_enable_resources(dev, bars);
}
+static int pci_host_bridge_enable_device(struct pci_dev *dev)
+{
+ struct pci_host_bridge *host_bridge = pci_find_host_bridge(dev->bus);
+ int err;
+
+ if (host_bridge && host_bridge->enable_device) {
+ err = host_bridge->enable_device(host_bridge, dev);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void pci_host_bridge_disable_device(struct pci_dev *dev)
+{
+ struct pci_host_bridge *host_bridge = pci_find_host_bridge(dev->bus);
+
+ if (host_bridge && host_bridge->disable_device)
+ host_bridge->disable_device(host_bridge, dev);
+}
+
static int do_pci_enable_device(struct pci_dev *dev, int bars)
{
int err;
@@ -1945,9 +1995,13 @@ static int do_pci_enable_device(struct pci_dev *dev, int bars)
if (bridge)
pcie_aspm_powersave_config_link(bridge);
+ err = pci_host_bridge_enable_device(dev);
+ if (err)
+ return err;
+
err = pcibios_enable_device(dev, bars);
if (err < 0)
- return err;
+ goto err_enable;
pci_fixup_device(pci_fixup_enable, dev);
if (dev->msi_enabled || dev->msix_enabled)
@@ -1962,6 +2016,12 @@ static int do_pci_enable_device(struct pci_dev *dev, int bars)
}
return 0;
+
+err_enable:
+ pci_host_bridge_disable_device(dev);
+
+ return err;
+
}
/**
@@ -2037,20 +2097,6 @@ static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
}
/**
- * pci_enable_device_io - Initialize a device for use with IO space
- * @dev: PCI device to be initialized
- *
- * Initialize device before it's used by a driver. Ask low-level code
- * to enable I/O resources. Wake up the device if it was suspended.
- * Beware, this function can fail.
- */
-int pci_enable_device_io(struct pci_dev *dev)
-{
- return pci_enable_device_flags(dev, IORESOURCE_IO);
-}
-EXPORT_SYMBOL(pci_enable_device_io);
-
-/**
* pci_enable_device_mem - Initialize a device for use with Memory space
* @dev: PCI device to be initialized
*
@@ -2082,107 +2128,6 @@ int pci_enable_device(struct pci_dev *dev)
EXPORT_SYMBOL(pci_enable_device);
/*
- * Managed PCI resources. This manages device on/off, INTx/MSI/MSI-X
- * on/off and BAR regions. pci_dev itself records MSI/MSI-X status, so
- * there's no need to track it separately. pci_devres is initialized
- * when a device is enabled using managed PCI device enable interface.
- */
-struct pci_devres {
- unsigned int enabled:1;
- unsigned int pinned:1;
- unsigned int orig_intx:1;
- unsigned int restore_intx:1;
- unsigned int mwi:1;
- u32 region_mask;
-};
-
-static void pcim_release(struct device *gendev, void *res)
-{
- struct pci_dev *dev = to_pci_dev(gendev);
- struct pci_devres *this = res;
- int i;
-
- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
- if (this->region_mask & (1 << i))
- pci_release_region(dev, i);
-
- if (this->mwi)
- pci_clear_mwi(dev);
-
- if (this->restore_intx)
- pci_intx(dev, this->orig_intx);
-
- if (this->enabled && !this->pinned)
- pci_disable_device(dev);
-}
-
-static struct pci_devres *get_pci_dr(struct pci_dev *pdev)
-{
- struct pci_devres *dr, *new_dr;
-
- dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
- if (dr)
- return dr;
-
- new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
- if (!new_dr)
- return NULL;
- return devres_get(&pdev->dev, new_dr, NULL, NULL);
-}
-
-static struct pci_devres *find_pci_dr(struct pci_dev *pdev)
-{
- if (pci_is_managed(pdev))
- return devres_find(&pdev->dev, pcim_release, NULL, NULL);
- return NULL;
-}
-
-/**
- * pcim_enable_device - Managed pci_enable_device()
- * @pdev: PCI device to be initialized
- *
- * Managed pci_enable_device().
- */
-int pcim_enable_device(struct pci_dev *pdev)
-{
- struct pci_devres *dr;
- int rc;
-
- dr = get_pci_dr(pdev);
- if (unlikely(!dr))
- return -ENOMEM;
- if (dr->enabled)
- return 0;
-
- rc = pci_enable_device(pdev);
- if (!rc) {
- pdev->is_managed = 1;
- dr->enabled = 1;
- }
- return rc;
-}
-EXPORT_SYMBOL(pcim_enable_device);
-
-/**
- * pcim_pin_device - Pin managed PCI device
- * @pdev: PCI device to pin
- *
- * Pin managed PCI device @pdev. Pinned device won't be disabled on
- * driver detach. @pdev must have been enabled with
- * pcim_enable_device().
- */
-void pcim_pin_device(struct pci_dev *pdev)
-{
- struct pci_devres *dr;
-
- dr = find_pci_dr(pdev);
- WARN_ON(!dr || !dr->enabled);
- if (dr)
- dr->pinned = 1;
-}
-EXPORT_SYMBOL(pcim_pin_device);
-
-/*
* pcibios_device_add - provide arch specific hooks when adding device dev
* @dev: the PCI device being added
*
@@ -2216,17 +2161,6 @@ void __weak pcibios_release_device(struct pci_dev *dev) {}
*/
void __weak pcibios_disable_device(struct pci_dev *dev) {}
-/**
- * pcibios_penalize_isa_irq - penalize an ISA IRQ
- * @irq: ISA IRQ to penalize
- * @active: IRQ active or not
- *
- * Permits the platform to provide architecture-specific functionality when
- * penalizing ISA IRQs. This is the default implementation. Architecture
- * implementations can override this.
- */
-void __weak pcibios_penalize_isa_irq(int irq, int active) {}
-
static void do_pci_disable_device(struct pci_dev *dev)
{
u16 pci_command;
@@ -2265,18 +2199,14 @@ void pci_disable_enabled_device(struct pci_dev *dev)
*/
void pci_disable_device(struct pci_dev *dev)
{
- struct pci_devres *dr;
-
- dr = find_pci_dr(dev);
- if (dr)
- dr->enabled = 0;
-
dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
"disabling already-disabled device");
if (atomic_dec_return(&dev->enable_cnt) != 0)
return;
+ pci_host_bridge_disable_device(dev);
+
do_pci_disable_device(dev);
dev->is_busmaster = 0;
@@ -2415,25 +2345,41 @@ static void pci_pme_list_scan(struct work_struct *work)
mutex_lock(&pci_pme_list_mutex);
list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
- if (pme_dev->dev->pme_poll) {
- struct pci_dev *bridge;
+ struct pci_dev *pdev = pme_dev->dev;
+
+ if (pdev->pme_poll) {
+ struct pci_dev *bridge = pdev->bus->self;
+ struct device *dev = &pdev->dev;
+ struct device *bdev = bridge ? &bridge->dev : NULL;
+ int bref = 0;
- bridge = pme_dev->dev->bus->self;
/*
- * If bridge is in low power state, the
- * configuration space of subordinate devices
- * may be not accessible
+ * If we have a bridge, it should be in an active/D0
+ * state or the configuration space of subordinate
+ * devices may not be accessible or stable over the
+ * course of the call.
*/
- if (bridge && bridge->current_state != PCI_D0)
- continue;
+ if (bdev) {
+ bref = pm_runtime_get_if_active(bdev);
+ if (!bref)
+ continue;
+
+ if (bridge->current_state != PCI_D0)
+ goto put_bridge;
+ }
+
/*
- * If the device is in D3cold it should not be
- * polled either.
+ * The device itself should be suspended but config
+ * space must be accessible, therefore it cannot be in
+ * D3cold.
*/
- if (pme_dev->dev->current_state == PCI_D3cold)
- continue;
+ if (pm_runtime_suspended(dev) &&
+ pdev->current_state != PCI_D3cold)
+ pci_pme_wakeup(pdev, NULL);
- pci_pme_wakeup(pme_dev->dev, NULL);
+put_bridge:
+ if (bref > 0)
+ pm_runtime_put(bdev);
} else {
list_del(&pme_dev->list);
kfree(pme_dev);
@@ -2984,6 +2930,18 @@ static const struct dmi_system_id bridge_d3_blacklist[] = {
DMI_MATCH(DMI_BOARD_VERSION, "Continental Z2"),
},
},
+ {
+ /*
+ * Changing power state of root port dGPU is connected fails
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/3229
+ */
+ .ident = "Hewlett-Packard HP Pavilion 17 Notebook PC/1972",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_BOARD_NAME, "1972"),
+ DMI_MATCH(DMI_BOARD_VERSION, "95.33"),
+ },
+ },
#endif
{ }
};
@@ -2992,8 +2950,12 @@ static const struct dmi_system_id bridge_d3_blacklist[] = {
* pci_bridge_d3_possible - Is it possible to put the bridge into D3
* @bridge: Bridge to check
*
- * This function checks if it is possible to move the bridge to D3.
- * Currently we only allow D3 for recent enough PCIe ports and Thunderbolt.
+ * Currently we only allow D3 for some PCIe ports and for Thunderbolt.
+ *
+ * Return: Whether it is possible to move the bridge to D3.
+ *
+ * The return value is guaranteed to be constant across the entire lifetime
+ * of the bridge, including its hot-removal.
*/
bool pci_bridge_d3_possible(struct pci_dev *bridge)
{
@@ -3008,10 +2970,14 @@ bool pci_bridge_d3_possible(struct pci_dev *bridge)
return false;
/*
- * Hotplug ports handled by firmware in System Management Mode
- * may not be put into D3 by the OS (Thunderbolt on non-Macs).
+ * Hotplug ports handled by platform firmware may not be put
+ * into D3 by the OS, e.g. ACPI slots ...
*/
- if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge))
+ if (bridge->is_hotplug_bridge && !bridge->is_pciehp)
+ return false;
+
+ /* ... or PCIe hotplug ports not handled natively by the OS. */
+ if (bridge->is_pciehp && !pciehp_is_native(bridge))
return false;
if (pci_bridge_d3_force)
@@ -3030,17 +2996,17 @@ bool pci_bridge_d3_possible(struct pci_dev *bridge)
* by vendors for runtime D3 at least until 2018 because there
* was no OS support.
*/
- if (bridge->is_hotplug_bridge)
+ if (bridge->is_pciehp)
return false;
if (dmi_check_system(bridge_d3_blacklist))
return false;
/*
- * It should be safe to put PCIe ports from 2015 or newer
- * to D3.
+ * Out of caution, we only allow PCIe ports from 2015 or newer
+ * into D3 on x86.
*/
- if (dmi_get_bios_year() >= 2015)
+ if (!IS_ENABLED(CONFIG_X86) || dmi_get_bios_year() >= 2015)
return true;
break;
}
@@ -3154,6 +3120,12 @@ void pci_d3cold_disable(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(pci_d3cold_disable);
+void pci_pm_power_up_and_verify_state(struct pci_dev *pci_dev)
+{
+ pci_power_up(pci_dev);
+ pci_update_current_state(pci_dev, PCI_D0);
+}
+
/**
* pci_pm_init - Initialize PM functions of given PCI device
* @dev: PCI device to handle.
@@ -3161,12 +3133,8 @@ EXPORT_SYMBOL_GPL(pci_d3cold_disable);
void pci_pm_init(struct pci_dev *dev)
{
int pm;
- u16 status;
u16 pmc;
- pm_runtime_forbid(&dev->dev);
- pm_runtime_set_active(&dev->dev);
- pm_runtime_enable(&dev->dev);
device_enable_async_suspend(&dev->dev);
dev->wakeup_prepared = false;
@@ -3176,14 +3144,14 @@ void pci_pm_init(struct pci_dev *dev)
/* find PCI PM capability in list */
pm = pci_find_capability(dev, PCI_CAP_ID_PM);
if (!pm)
- return;
+ goto poweron;
/* Check device's ability to generate PME# */
pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
pci_err(dev, "unsupported PM cap regs version (%u)\n",
pmc & PCI_PM_CAP_VER_MASK);
- return;
+ goto poweron;
}
dev->pm_cap = pm;
@@ -3214,7 +3182,7 @@ void pci_pm_init(struct pci_dev *dev)
(pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
(pmc & PCI_PM_CAP_PME_D3hot) ? " D3hot" : "",
(pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
- dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
+ dev->pme_support = FIELD_GET(PCI_PM_CAP_PME_MASK, pmc);
dev->pme_poll = true;
/*
* Make device's PM flags reflect the wake-up capability, but
@@ -3225,9 +3193,11 @@ void pci_pm_init(struct pci_dev *dev)
pci_pme_active(dev, false);
}
- pci_read_config_word(dev, PCI_STATUS, &status);
- if (status & PCI_STATUS_IMM_READY)
- dev->imm_ready = 1;
+poweron:
+ pci_pm_power_up_and_verify_state(dev);
+ pm_runtime_forbid(&dev->dev);
+ pm_runtime_set_active(&dev->dev);
+ pm_runtime_enable(&dev->dev);
}
static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
@@ -3274,6 +3244,7 @@ static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
static int pci_ea_read(struct pci_dev *dev, int offset)
{
struct resource *res;
+ const char *res_name;
int ent_size, ent_offset = offset;
resource_size_t start, end;
unsigned long flags;
@@ -3285,24 +3256,25 @@ static int pci_ea_read(struct pci_dev *dev, int offset)
ent_offset += 4;
/* Entry size field indicates DWORDs after 1st */
- ent_size = ((dw0 & PCI_EA_ES) + 1) << 2;
+ ent_size = (FIELD_GET(PCI_EA_ES, dw0) + 1) << 2;
if (!(dw0 & PCI_EA_ENABLE)) /* Entry not enabled */
goto out;
- bei = (dw0 & PCI_EA_BEI) >> 4;
- prop = (dw0 & PCI_EA_PP) >> 8;
+ bei = FIELD_GET(PCI_EA_BEI, dw0);
+ prop = FIELD_GET(PCI_EA_PP, dw0);
/*
* If the Property is in the reserved range, try the Secondary
* Property instead.
*/
if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED)
- prop = (dw0 & PCI_EA_SP) >> 16;
+ prop = FIELD_GET(PCI_EA_SP, dw0);
if (prop > PCI_EA_P_BRIDGE_IO)
goto out;
res = pci_ea_get_resource(dev, bei, prop);
+ res_name = pci_resource_name(dev, bei);
if (!res) {
pci_err(dev, "Unsupported EA entry BEI: %u\n", bei);
goto out;
@@ -3376,16 +3348,16 @@ static int pci_ea_read(struct pci_dev *dev, int offset)
res->flags = flags;
if (bei <= PCI_EA_BEI_BAR5)
- pci_info(dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
- bei, res, prop);
+ pci_info(dev, "%s %pR: from Enhanced Allocation, properties %#02x\n",
+ res_name, res, prop);
else if (bei == PCI_EA_BEI_ROM)
- pci_info(dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n",
- res, prop);
+ pci_info(dev, "%s %pR: from Enhanced Allocation, properties %#02x\n",
+ res_name, res, prop);
else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
- pci_info(dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
- bei - PCI_EA_BEI_VF_BAR0, res, prop);
+ pci_info(dev, "%s %pR: from Enhanced Allocation, properties %#02x\n",
+ res_name, res, prop);
else
- pci_info(dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n",
+ pci_info(dev, "BEI %d %pR: from Enhanced Allocation, properties %#02x\n",
bei, res, prop);
out:
@@ -3687,115 +3659,6 @@ void pci_acs_init(struct pci_dev *dev)
}
/**
- * pci_rebar_find_pos - find position of resize ctrl reg for BAR
- * @pdev: PCI device
- * @bar: BAR to find
- *
- * Helper to find the position of the ctrl register for a BAR.
- * Returns -ENOTSUPP if resizable BARs are not supported at all.
- * Returns -ENOENT if no ctrl register for the BAR could be found.
- */
-static int pci_rebar_find_pos(struct pci_dev *pdev, int bar)
-{
- unsigned int pos, nbars, i;
- u32 ctrl;
-
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
- if (!pos)
- return -ENOTSUPP;
-
- pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
- nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
- PCI_REBAR_CTRL_NBAR_SHIFT;
-
- for (i = 0; i < nbars; i++, pos += 8) {
- int bar_idx;
-
- pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
- bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
- if (bar_idx == bar)
- return pos;
- }
-
- return -ENOENT;
-}
-
-/**
- * pci_rebar_get_possible_sizes - get possible sizes for BAR
- * @pdev: PCI device
- * @bar: BAR to query
- *
- * Get the possible sizes of a resizable BAR as bitmask defined in the spec
- * (bit 0=1MB, bit 19=512GB). Returns 0 if BAR isn't resizable.
- */
-u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
-{
- int pos;
- u32 cap;
-
- pos = pci_rebar_find_pos(pdev, bar);
- if (pos < 0)
- return 0;
-
- pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap);
- cap &= PCI_REBAR_CAP_SIZES;
-
- /* Sapphire RX 5600 XT Pulse has an invalid cap dword for BAR 0 */
- if (pdev->vendor == PCI_VENDOR_ID_ATI && pdev->device == 0x731f &&
- bar == 0 && cap == 0x7000)
- cap = 0x3f000;
-
- return cap >> 4;
-}
-EXPORT_SYMBOL(pci_rebar_get_possible_sizes);
-
-/**
- * pci_rebar_get_current_size - get the current size of a BAR
- * @pdev: PCI device
- * @bar: BAR to set size to
- *
- * Read the size of a BAR from the resizable BAR config.
- * Returns size if found or negative error code.
- */
-int pci_rebar_get_current_size(struct pci_dev *pdev, int bar)
-{
- int pos;
- u32 ctrl;
-
- pos = pci_rebar_find_pos(pdev, bar);
- if (pos < 0)
- return pos;
-
- pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
- return (ctrl & PCI_REBAR_CTRL_BAR_SIZE) >> PCI_REBAR_CTRL_BAR_SHIFT;
-}
-
-/**
- * pci_rebar_set_size - set a new size for a BAR
- * @pdev: PCI device
- * @bar: BAR to set size to
- * @size: new size as defined in the spec (0=1MB, 19=512GB)
- *
- * Set the new size of a BAR as defined in the spec.
- * Returns zero if resizing was successful, error code otherwise.
- */
-int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size)
-{
- int pos;
- u32 ctrl;
-
- pos = pci_rebar_find_pos(pdev, bar);
- if (pos < 0)
- return pos;
-
- pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
- ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
- ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
- pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
- return 0;
-}
-
-/**
* pci_enable_atomic_ops_to_root - enable AtomicOp requests to root port
* @dev: the PCI device
* @cap_mask: mask of desired AtomicOp sizes, including one or more of:
@@ -3879,66 +3742,6 @@ int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
/**
- * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
- * @dev: the PCI device
- * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD)
- *
- * Perform INTx swizzling for a device behind one level of bridge. This is
- * required by section 9.1 of the PCI-to-PCI bridge specification for devices
- * behind bridges on add-in cards. For devices with ARI enabled, the slot
- * number is always 0 (see the Implementation Note in section 2.2.8.1 of
- * the PCI Express Base Specification, Revision 2.1)
- */
-u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
-{
- int slot;
-
- if (pci_ari_enabled(dev->bus))
- slot = 0;
- else
- slot = PCI_SLOT(dev->devfn);
-
- return (((pin - 1) + slot) % 4) + 1;
-}
-
-int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
-{
- u8 pin;
-
- pin = dev->pin;
- if (!pin)
- return -1;
-
- while (!pci_is_root_bus(dev->bus)) {
- pin = pci_swizzle_interrupt_pin(dev, pin);
- dev = dev->bus->self;
- }
- *bridge = dev;
- return pin;
-}
-
-/**
- * pci_common_swizzle - swizzle INTx all the way to root bridge
- * @dev: the PCI device
- * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
- *
- * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI
- * bridges all the way up to a PCI root bus.
- */
-u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
-{
- u8 pin = *pinp;
-
- while (!pci_is_root_bus(dev->bus)) {
- pin = pci_swizzle_interrupt_pin(dev, pin);
- dev = dev->bus->self;
- }
- *pinp = pin;
- return PCI_SLOT(dev->devfn);
-}
-EXPORT_SYMBOL_GPL(pci_common_swizzle);
-
-/**
* pci_release_region - Release a PCI bar
* @pdev: PCI device whose resources were previously reserved by
* pci_request_region()
@@ -3950,7 +3753,8 @@ EXPORT_SYMBOL_GPL(pci_common_swizzle);
*/
void pci_release_region(struct pci_dev *pdev, int bar)
{
- struct pci_devres *dr;
+ if (!pci_bar_index_is_valid(bar))
+ return;
if (pci_resource_len(pdev, bar) == 0)
return;
@@ -3960,10 +3764,6 @@ void pci_release_region(struct pci_dev *pdev, int bar)
else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
release_mem_region(pci_resource_start(pdev, bar),
pci_resource_len(pdev, bar));
-
- dr = find_pci_dr(pdev);
- if (dr)
- dr->region_mask &= ~(1 << bar);
}
EXPORT_SYMBOL(pci_release_region);
@@ -3971,13 +3771,14 @@ EXPORT_SYMBOL(pci_release_region);
* __pci_request_region - Reserved PCI I/O and memory resource
* @pdev: PCI device whose resources are to be reserved
* @bar: BAR to be reserved
- * @res_name: Name to be associated with resource.
+ * @name: name of the driver requesting the resource
* @exclusive: whether the region access is exclusive or not
*
- * Mark the PCI region associated with PCI device @pdev BAR @bar as
- * being reserved by owner @res_name. Do not access any
- * address inside the PCI regions unless this call returns
- * successfully.
+ * Returns: 0 on success, negative error code on failure.
+ *
+ * Mark the PCI region associated with PCI device @pdev BAR @bar as being
+ * reserved by owner @name. Do not access any address inside the PCI regions
+ * unless this call returns successfully.
*
* If @exclusive is set, then the region is marked so that userspace
* is explicitly not allowed to map the resource via /dev/mem or
@@ -3987,28 +3788,25 @@ EXPORT_SYMBOL(pci_release_region);
* message is also printed on failure.
*/
static int __pci_request_region(struct pci_dev *pdev, int bar,
- const char *res_name, int exclusive)
+ const char *name, int exclusive)
{
- struct pci_devres *dr;
+ if (!pci_bar_index_is_valid(bar))
+ return -EINVAL;
if (pci_resource_len(pdev, bar) == 0)
return 0;
if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
if (!request_region(pci_resource_start(pdev, bar),
- pci_resource_len(pdev, bar), res_name))
+ pci_resource_len(pdev, bar), name))
goto err_out;
} else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
if (!__request_mem_region(pci_resource_start(pdev, bar),
- pci_resource_len(pdev, bar), res_name,
+ pci_resource_len(pdev, bar), name,
exclusive))
goto err_out;
}
- dr = find_pci_dr(pdev);
- if (dr)
- dr->region_mask |= 1 << bar;
-
return 0;
err_out:
@@ -4021,19 +3819,20 @@ err_out:
* pci_request_region - Reserve PCI I/O and memory resource
* @pdev: PCI device whose resources are to be reserved
* @bar: BAR to be reserved
- * @res_name: Name to be associated with resource
+ * @name: name of the driver requesting the resource
+ *
+ * Returns: 0 on success, negative error code on failure.
*
- * Mark the PCI region associated with PCI device @pdev BAR @bar as
- * being reserved by owner @res_name. Do not access any
- * address inside the PCI regions unless this call returns
- * successfully.
+ * Mark the PCI region associated with PCI device @pdev BAR @bar as being
+ * reserved by owner @name. Do not access any address inside the PCI regions
+ * unless this call returns successfully.
*
* Returns 0 on success, or %EBUSY on error. A warning
* message is also printed on failure.
*/
-int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
+int pci_request_region(struct pci_dev *pdev, int bar, const char *name)
{
- return __pci_request_region(pdev, bar, res_name, 0);
+ return __pci_request_region(pdev, bar, name, 0);
}
EXPORT_SYMBOL(pci_request_region);
@@ -4056,13 +3855,13 @@ void pci_release_selected_regions(struct pci_dev *pdev, int bars)
EXPORT_SYMBOL(pci_release_selected_regions);
static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
- const char *res_name, int excl)
+ const char *name, int excl)
{
int i;
for (i = 0; i < PCI_STD_NUM_BARS; i++)
if (bars & (1 << i))
- if (__pci_request_region(pdev, i, res_name, excl))
+ if (__pci_request_region(pdev, i, name, excl))
goto err_out;
return 0;
@@ -4079,19 +3878,29 @@ err_out:
* pci_request_selected_regions - Reserve selected PCI I/O and memory resources
* @pdev: PCI device whose resources are to be reserved
* @bars: Bitmask of BARs to be requested
- * @res_name: Name to be associated with resource
+ * @name: Name of the driver requesting the resources
+ *
+ * Returns: 0 on success, negative error code on failure.
*/
int pci_request_selected_regions(struct pci_dev *pdev, int bars,
- const char *res_name)
+ const char *name)
{
- return __pci_request_selected_regions(pdev, bars, res_name, 0);
+ return __pci_request_selected_regions(pdev, bars, name, 0);
}
EXPORT_SYMBOL(pci_request_selected_regions);
+/**
+ * pci_request_selected_regions_exclusive - Request regions exclusively
+ * @pdev: PCI device to request regions from
+ * @bars: bit mask of BARs to request
+ * @name: name of the driver requesting the resources
+ *
+ * Returns: 0 on success, negative error code on failure.
+ */
int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
- const char *res_name)
+ const char *name)
{
- return __pci_request_selected_regions(pdev, bars, res_name,
+ return __pci_request_selected_regions(pdev, bars, name,
IORESOURCE_EXCLUSIVE);
}
EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
@@ -4105,7 +3914,6 @@ EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
* successful call to pci_request_regions(). Call this function only
* after all use of the PCI regions has ceased.
*/
-
void pci_release_regions(struct pci_dev *pdev)
{
pci_release_selected_regions(pdev, (1 << PCI_STD_NUM_BARS) - 1);
@@ -4115,30 +3923,31 @@ EXPORT_SYMBOL(pci_release_regions);
/**
* pci_request_regions - Reserve PCI I/O and memory resources
* @pdev: PCI device whose resources are to be reserved
- * @res_name: Name to be associated with resource.
+ * @name: name of the driver requesting the resources
*
- * Mark all PCI regions associated with PCI device @pdev as
- * being reserved by owner @res_name. Do not access any
- * address inside the PCI regions unless this call returns
- * successfully.
+ * Mark all PCI regions associated with PCI device @pdev as being reserved by
+ * owner @name. Do not access any address inside the PCI regions unless this
+ * call returns successfully.
*
* Returns 0 on success, or %EBUSY on error. A warning
* message is also printed on failure.
*/
-int pci_request_regions(struct pci_dev *pdev, const char *res_name)
+int pci_request_regions(struct pci_dev *pdev, const char *name)
{
return pci_request_selected_regions(pdev,
- ((1 << PCI_STD_NUM_BARS) - 1), res_name);
+ ((1 << PCI_STD_NUM_BARS) - 1), name);
}
EXPORT_SYMBOL(pci_request_regions);
/**
* pci_request_regions_exclusive - Reserve PCI I/O and memory resources
* @pdev: PCI device whose resources are to be reserved
- * @res_name: Name to be associated with resource.
+ * @name: name of the driver requesting the resources
+ *
+ * Returns: 0 on success, negative error code on failure.
*
* Mark all PCI regions associated with PCI device @pdev as being reserved
- * by owner @res_name. Do not access any address inside the PCI regions
+ * by owner @name. Do not access any address inside the PCI regions
* unless this call returns successfully.
*
* pci_request_regions_exclusive() will mark the region so that /dev/mem
@@ -4147,10 +3956,10 @@ EXPORT_SYMBOL(pci_request_regions);
* Returns 0 on success, or %EBUSY on error. A warning message is also
* printed on failure.
*/
-int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
+int pci_request_regions_exclusive(struct pci_dev *pdev, const char *name)
{
return pci_request_selected_regions_exclusive(pdev,
- ((1 << PCI_STD_NUM_BARS) - 1), res_name);
+ ((1 << PCI_STD_NUM_BARS) - 1), name);
}
EXPORT_SYMBOL(pci_request_regions_exclusive);
@@ -4158,7 +3967,7 @@ EXPORT_SYMBOL(pci_request_regions_exclusive);
* Record the PCI IO range (expressed as CPU physical address + size).
* Return a negative value if an error has occurred, zero otherwise
*/
-int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
+int pci_register_io_range(const struct fwnode_handle *fwnode, phys_addr_t addr,
resource_size_t size)
{
int ret = 0;
@@ -4191,16 +4000,12 @@ int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
phys_addr_t pci_pio_to_address(unsigned long pio)
{
- phys_addr_t address = (phys_addr_t)OF_BAD_ADDR;
-
#ifdef PCI_IOBASE
- if (pio >= MMIO_UPPER_LIMIT)
- return address;
-
- address = logic_pio_to_hwaddr(pio);
+ if (pio < MMIO_UPPER_LIMIT)
+ return logic_pio_to_hwaddr(pio);
#endif
- return address;
+ return (phys_addr_t) OF_BAD_ADDR;
}
EXPORT_SYMBOL_GPL(pci_pio_to_address);
@@ -4229,7 +4034,7 @@ unsigned long __weak pci_address_to_pio(phys_addr_t address)
#ifndef pci_remap_iospace
int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
{
-#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
+#if defined(PCI_IOBASE)
unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
if (!(res->flags & IORESOURCE_IO))
@@ -4238,8 +4043,8 @@ int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
if (res->end > IO_SPACE_LIMIT)
return -EINVAL;
- return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
- pgprot_device(PAGE_KERNEL));
+ return vmap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
+ pgprot_device(PAGE_KERNEL));
#else
/*
* This architecture does not have memory mapped I/O space,
@@ -4262,7 +4067,7 @@ EXPORT_SYMBOL(pci_remap_iospace);
*/
void pci_unmap_iospace(struct resource *res)
{
-#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
+#if defined(PCI_IOBASE)
unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
vunmap_range(vaddr, vaddr + resource_size(res));
@@ -4270,133 +4075,6 @@ void pci_unmap_iospace(struct resource *res)
}
EXPORT_SYMBOL(pci_unmap_iospace);
-static void devm_pci_unmap_iospace(struct device *dev, void *ptr)
-{
- struct resource **res = ptr;
-
- pci_unmap_iospace(*res);
-}
-
-/**
- * devm_pci_remap_iospace - Managed pci_remap_iospace()
- * @dev: Generic device to remap IO address for
- * @res: Resource describing the I/O space
- * @phys_addr: physical address of range to be mapped
- *
- * Managed pci_remap_iospace(). Map is automatically unmapped on driver
- * detach.
- */
-int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
- phys_addr_t phys_addr)
-{
- const struct resource **ptr;
- int error;
-
- ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
- return -ENOMEM;
-
- error = pci_remap_iospace(res, phys_addr);
- if (error) {
- devres_free(ptr);
- } else {
- *ptr = res;
- devres_add(dev, ptr);
- }
-
- return error;
-}
-EXPORT_SYMBOL(devm_pci_remap_iospace);
-
-/**
- * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace()
- * @dev: Generic device to remap IO address for
- * @offset: Resource address to map
- * @size: Size of map
- *
- * Managed pci_remap_cfgspace(). Map is automatically unmapped on driver
- * detach.
- */
-void __iomem *devm_pci_remap_cfgspace(struct device *dev,
- resource_size_t offset,
- resource_size_t size)
-{
- void __iomem **ptr, *addr;
-
- ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
- return NULL;
-
- addr = pci_remap_cfgspace(offset, size);
- if (addr) {
- *ptr = addr;
- devres_add(dev, ptr);
- } else
- devres_free(ptr);
-
- return addr;
-}
-EXPORT_SYMBOL(devm_pci_remap_cfgspace);
-
-/**
- * devm_pci_remap_cfg_resource - check, request region and ioremap cfg resource
- * @dev: generic device to handle the resource for
- * @res: configuration space resource to be handled
- *
- * Checks that a resource is a valid memory region, requests the memory
- * region and ioremaps with pci_remap_cfgspace() API that ensures the
- * proper PCI configuration space memory attributes are guaranteed.
- *
- * All operations are managed and will be undone on driver detach.
- *
- * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
- * on failure. Usage example::
- *
- * res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- * base = devm_pci_remap_cfg_resource(&pdev->dev, res);
- * if (IS_ERR(base))
- * return PTR_ERR(base);
- */
-void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
- struct resource *res)
-{
- resource_size_t size;
- const char *name;
- void __iomem *dest_ptr;
-
- BUG_ON(!dev);
-
- if (!res || resource_type(res) != IORESOURCE_MEM) {
- dev_err(dev, "invalid resource\n");
- return IOMEM_ERR_PTR(-EINVAL);
- }
-
- size = resource_size(res);
-
- if (res->name)
- name = devm_kasprintf(dev, GFP_KERNEL, "%s %s", dev_name(dev),
- res->name);
- else
- name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
- if (!name)
- return IOMEM_ERR_PTR(-ENOMEM);
-
- if (!devm_request_mem_region(dev, res->start, size, name)) {
- dev_err(dev, "can't request region for resource %pR\n", res);
- return IOMEM_ERR_PTR(-EBUSY);
- }
-
- dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size);
- if (!dest_ptr) {
- dev_err(dev, "ioremap failed for resource %pR\n", res);
- devm_release_mem_region(dev, res->start, size);
- dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
- }
-
- return dest_ptr;
-}
-EXPORT_SYMBOL(devm_pci_remap_cfg_resource);
-
static void __pci_set_master(struct pci_dev *dev, bool enable)
{
u16 old_cmd, cmd;
@@ -4547,27 +4225,6 @@ int pci_set_mwi(struct pci_dev *dev)
EXPORT_SYMBOL(pci_set_mwi);
/**
- * pcim_set_mwi - a device-managed pci_set_mwi()
- * @dev: the PCI device for which MWI is enabled
- *
- * Managed pci_set_mwi().
- *
- * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
- */
-int pcim_set_mwi(struct pci_dev *dev)
-{
- struct pci_devres *dr;
-
- dr = find_pci_dr(dev);
- if (!dr)
- return -ENOMEM;
-
- dr->mwi = 1;
- return pci_set_mwi(dev);
-}
-EXPORT_SYMBOL(pcim_set_mwi);
-
-/**
* pci_try_set_mwi - enables memory-write-invalidate PCI transaction
* @dev: the PCI device for which MWI is enabled
*
@@ -4641,92 +4298,13 @@ void pci_intx(struct pci_dev *pdev, int enable)
else
new = pci_command | PCI_COMMAND_INTX_DISABLE;
- if (new != pci_command) {
- struct pci_devres *dr;
-
- pci_write_config_word(pdev, PCI_COMMAND, new);
+ if (new == pci_command)
+ return;
- dr = find_pci_dr(pdev);
- if (dr && !dr->restore_intx) {
- dr->restore_intx = 1;
- dr->orig_intx = !enable;
- }
- }
+ pci_write_config_word(pdev, PCI_COMMAND, new);
}
EXPORT_SYMBOL_GPL(pci_intx);
-static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
-{
- struct pci_bus *bus = dev->bus;
- bool mask_updated = true;
- u32 cmd_status_dword;
- u16 origcmd, newcmd;
- unsigned long flags;
- bool irq_pending;
-
- /*
- * We do a single dword read to retrieve both command and status.
- * Document assumptions that make this possible.
- */
- BUILD_BUG_ON(PCI_COMMAND % 4);
- BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
-
- raw_spin_lock_irqsave(&pci_lock, flags);
-
- bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
-
- irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
-
- /*
- * Check interrupt status register to see whether our device
- * triggered the interrupt (when masking) or the next IRQ is
- * already pending (when unmasking).
- */
- if (mask != irq_pending) {
- mask_updated = false;
- goto done;
- }
-
- origcmd = cmd_status_dword;
- newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
- if (mask)
- newcmd |= PCI_COMMAND_INTX_DISABLE;
- if (newcmd != origcmd)
- bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
-
-done:
- raw_spin_unlock_irqrestore(&pci_lock, flags);
-
- return mask_updated;
-}
-
-/**
- * pci_check_and_mask_intx - mask INTx on pending interrupt
- * @dev: the PCI device to operate on
- *
- * Check if the device dev has its INTx line asserted, mask it and return
- * true in that case. False is returned if no interrupt was pending.
- */
-bool pci_check_and_mask_intx(struct pci_dev *dev)
-{
- return pci_check_and_set_intx_mask(dev, true);
-}
-EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
-
-/**
- * pci_check_and_unmask_intx - unmask INTx if no interrupt is pending
- * @dev: the PCI device to operate on
- *
- * Check if the device dev has its INTx line asserted, unmask it if not and
- * return true. False is returned and the mask remains active if there was
- * still an interrupt pending.
- */
-bool pci_check_and_unmask_intx(struct pci_dev *dev)
-{
- return pci_check_and_set_intx_mask(dev, false);
-}
-EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
-
/**
* pci_wait_for_pending_transaction - wait for pending transaction
* @dev: the PCI device to operate on
@@ -4917,6 +4495,11 @@ static int pcie_wait_for_link_status(struct pci_dev *pdev,
* @pdev: Device whose link to retrain.
* @use_lt: Use the LT bit if TRUE, or the DLLLA bit if FALSE, for status.
*
+ * Trigger retraining of the PCIe Link and wait for the completion of the
+ * retraining. As link retraining is known to asserts LBMS and may change
+ * the Link Speed, LBMS is cleared after the retraining and the Link Speed
+ * of the subordinate bus is updated.
+ *
* Retrain completion status is retrieved from the Link Status Register
* according to @use_lt. It is not verified whether the use of the DLLLA
* bit is valid.
@@ -4927,32 +4510,49 @@ static int pcie_wait_for_link_status(struct pci_dev *pdev,
int pcie_retrain_link(struct pci_dev *pdev, bool use_lt)
{
int rc;
- u16 lnkctl;
/*
* Ensure the updated LNKCTL parameters are used during link
- * training by checking that there is no ongoing link training to
- * avoid LTSSM race as recommended in Implementation Note at the
- * end of PCIe r6.0.1 sec 7.5.3.7.
+ * training by checking that there is no ongoing link training that
+ * may have started before link parameters were changed, so as to
+ * avoid LTSSM race as recommended in Implementation Note at the end
+ * of PCIe r6.1 sec 7.5.3.7.
*/
- rc = pcie_wait_for_link_status(pdev, use_lt, !use_lt);
+ rc = pcie_wait_for_link_status(pdev, true, false);
if (rc)
return rc;
- pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &lnkctl);
- lnkctl |= PCI_EXP_LNKCTL_RL;
- pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, lnkctl);
+ pcie_capability_set_word(pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_RL);
if (pdev->clear_retrain_link) {
/*
* Due to an erratum in some devices the Retrain Link bit
* needs to be cleared again manually to allow the link
* training to succeed.
*/
- lnkctl &= ~PCI_EXP_LNKCTL_RL;
- pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, lnkctl);
+ pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_RL);
}
- return pcie_wait_for_link_status(pdev, use_lt, !use_lt);
+ rc = pcie_wait_for_link_status(pdev, use_lt, !use_lt);
+
+ /*
+ * Clear LBMS after a manual retrain so that the bit can be used
+ * to track link speed or width changes made by hardware itself
+ * in attempt to correct unreliable link operation.
+ */
+ pcie_reset_lbms(pdev);
+
+ /*
+ * Ensure the Link Speed updates after retraining in case the Link
+ * Speed was changed because of the retraining. While the bwctrl's
+ * IRQ handler normally picks up the new Link Speed, clearing LBMS
+ * races with the IRQ handler reading the Link Status register and
+ * can result in the handler returning early without updating the
+ * Link Speed.
+ */
+ if (pdev->subordinate)
+ pcie_update_link_speed(pdev->subordinate);
+
+ return rc;
}
/**
@@ -4979,7 +4579,7 @@ static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
/*
* PCIe r4.0 sec 6.6.1, a component must enter LTSSM Detect within 20ms,
- * after which we should expect an link active if the reset was
+ * after which we should expect the link to be active if the reset was
* successful. If so, software must wait a minimum 100ms before sending
* configuration requests to devices downstream this port.
*
@@ -5058,7 +4658,7 @@ static int pci_bus_max_d3cold_delay(const struct pci_bus *bus)
*/
int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type)
{
- struct pci_dev *child;
+ struct pci_dev *child __free(pci_dev_put) = NULL;
int delay;
if (pci_dev_is_disconnected(dev))
@@ -5087,8 +4687,8 @@ int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type)
return 0;
}
- child = list_first_entry(&dev->subordinate->devices, struct pci_dev,
- bus_list);
+ child = pci_dev_get(list_first_entry(&dev->subordinate->devices,
+ struct pci_dev, bus_list));
up_read(&pci_bus_sem);
/*
@@ -5148,7 +4748,7 @@ int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type)
delay);
if (!pcie_wait_for_link_delay(dev, true, delay)) {
/* Did not train, no need to wait any further */
- pci_info(dev, "Data Link Layer Link Active not set in 1000 msec\n");
+ pci_info(dev, "Data Link Layer Link Active not set in %d msec\n", delay);
return -ENOTTY;
}
@@ -5188,6 +4788,9 @@ void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
*/
int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
{
+ if (!dev->block_cfg_access)
+ pci_warn_once(dev, "unlocked secondary bus reset via: %pS\n",
+ __builtin_return_address(0));
pcibios_reset_secondary_bus(dev);
return pci_bridge_wait_for_secondary_bus(dev, "bus reset");
@@ -5236,16 +4839,96 @@ static int pci_dev_reset_slot_function(struct pci_dev *dev, bool probe)
return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
}
+static u16 cxl_port_dvsec(struct pci_dev *dev)
+{
+ return pci_find_dvsec_capability(dev, PCI_VENDOR_ID_CXL,
+ PCI_DVSEC_CXL_PORT);
+}
+
+static bool cxl_sbr_masked(struct pci_dev *dev)
+{
+ u16 dvsec, reg;
+ int rc;
+
+ dvsec = cxl_port_dvsec(dev);
+ if (!dvsec)
+ return false;
+
+ rc = pci_read_config_word(dev, dvsec + PCI_DVSEC_CXL_PORT_CTL, &reg);
+ if (rc || PCI_POSSIBLE_ERROR(reg))
+ return false;
+
+ /*
+ * Per CXL spec r3.1, sec 8.1.5.2, when "Unmask SBR" is 0, the SBR
+ * bit in Bridge Control has no effect. When 1, the Port generates
+ * hot reset when the SBR bit is set to 1.
+ */
+ if (reg & PCI_DVSEC_CXL_PORT_CTL_UNMASK_SBR)
+ return false;
+
+ return true;
+}
+
static int pci_reset_bus_function(struct pci_dev *dev, bool probe)
{
+ struct pci_dev *bridge = pci_upstream_bridge(dev);
int rc;
+ /*
+ * If "dev" is below a CXL port that has SBR control masked, SBR
+ * won't do anything, so return error.
+ */
+ if (bridge && cxl_sbr_masked(bridge)) {
+ if (probe)
+ return 0;
+
+ return -ENOTTY;
+ }
+
rc = pci_dev_reset_slot_function(dev, probe);
if (rc != -ENOTTY)
return rc;
return pci_parent_bus_reset(dev, probe);
}
+static int cxl_reset_bus_function(struct pci_dev *dev, bool probe)
+{
+ struct pci_dev *bridge;
+ u16 dvsec, reg, val;
+ int rc;
+
+ bridge = pci_upstream_bridge(dev);
+ if (!bridge)
+ return -ENOTTY;
+
+ dvsec = cxl_port_dvsec(bridge);
+ if (!dvsec)
+ return -ENOTTY;
+
+ if (probe)
+ return 0;
+
+ rc = pci_read_config_word(bridge, dvsec + PCI_DVSEC_CXL_PORT_CTL, &reg);
+ if (rc)
+ return -ENOTTY;
+
+ if (reg & PCI_DVSEC_CXL_PORT_CTL_UNMASK_SBR) {
+ val = reg;
+ } else {
+ val = reg | PCI_DVSEC_CXL_PORT_CTL_UNMASK_SBR;
+ pci_write_config_word(bridge, dvsec + PCI_DVSEC_CXL_PORT_CTL,
+ val);
+ }
+
+ rc = pci_reset_bus_function(dev, probe);
+
+ if (reg != val)
+ pci_write_config_word(bridge, dvsec + PCI_DVSEC_CXL_PORT_CTL,
+ reg);
+
+ return rc;
+}
+
void pci_dev_lock(struct pci_dev *dev)
{
/* block PM suspend, driver probe, etc. */
@@ -5286,6 +4969,8 @@ static void pci_dev_save_and_disable(struct pci_dev *dev)
*/
if (err_handler && err_handler->reset_prepare)
err_handler->reset_prepare(dev);
+ else if (dev->driver)
+ pci_warn(dev, "resetting");
/*
* Wake-up device prior to save. PM registers default to D0 after
@@ -5319,10 +5004,12 @@ static void pci_dev_restore(struct pci_dev *dev)
*/
if (err_handler && err_handler->reset_done)
err_handler->reset_done(dev);
+ else if (dev->driver)
+ pci_warn(dev, "reset done");
}
/* dev->reset_methods[] is a 0-terminated list of indices into this array */
-static const struct pci_reset_fn_method pci_reset_fn_methods[] = {
+const struct pci_reset_fn_method pci_reset_fn_methods[] = {
{ },
{ pci_dev_specific_reset, .name = "device_specific" },
{ pci_dev_acpi_reset, .name = "acpi" },
@@ -5330,128 +5017,7 @@ static const struct pci_reset_fn_method pci_reset_fn_methods[] = {
{ pci_af_flr, .name = "af_flr" },
{ pci_pm_reset, .name = "pm" },
{ pci_reset_bus_function, .name = "bus" },
-};
-
-static ssize_t reset_method_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- ssize_t len = 0;
- int i, m;
-
- for (i = 0; i < PCI_NUM_RESET_METHODS; i++) {
- m = pdev->reset_methods[i];
- if (!m)
- break;
-
- len += sysfs_emit_at(buf, len, "%s%s", len ? " " : "",
- pci_reset_fn_methods[m].name);
- }
-
- if (len)
- len += sysfs_emit_at(buf, len, "\n");
-
- return len;
-}
-
-static int reset_method_lookup(const char *name)
-{
- int m;
-
- for (m = 1; m < PCI_NUM_RESET_METHODS; m++) {
- if (sysfs_streq(name, pci_reset_fn_methods[m].name))
- return m;
- }
-
- return 0; /* not found */
-}
-
-static ssize_t reset_method_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- char *options, *name;
- int m, n;
- u8 reset_methods[PCI_NUM_RESET_METHODS] = { 0 };
-
- if (sysfs_streq(buf, "")) {
- pdev->reset_methods[0] = 0;
- pci_warn(pdev, "All device reset methods disabled by user");
- return count;
- }
-
- if (sysfs_streq(buf, "default")) {
- pci_init_reset_methods(pdev);
- return count;
- }
-
- options = kstrndup(buf, count, GFP_KERNEL);
- if (!options)
- return -ENOMEM;
-
- n = 0;
- while ((name = strsep(&options, " ")) != NULL) {
- if (sysfs_streq(name, ""))
- continue;
-
- name = strim(name);
-
- m = reset_method_lookup(name);
- if (!m) {
- pci_err(pdev, "Invalid reset method '%s'", name);
- goto error;
- }
-
- if (pci_reset_fn_methods[m].reset_fn(pdev, PCI_RESET_PROBE)) {
- pci_err(pdev, "Unsupported reset method '%s'", name);
- goto error;
- }
-
- if (n == PCI_NUM_RESET_METHODS - 1) {
- pci_err(pdev, "Too many reset methods\n");
- goto error;
- }
-
- reset_methods[n++] = m;
- }
-
- reset_methods[n] = 0;
-
- /* Warn if dev-specific supported but not highest priority */
- if (pci_reset_fn_methods[1].reset_fn(pdev, PCI_RESET_PROBE) == 0 &&
- reset_methods[0] != 1)
- pci_warn(pdev, "Device-specific reset disabled/de-prioritized by user");
- memcpy(pdev->reset_methods, reset_methods, sizeof(pdev->reset_methods));
- kfree(options);
- return count;
-
-error:
- /* Leave previous methods unchanged */
- kfree(options);
- return -EINVAL;
-}
-static DEVICE_ATTR_RW(reset_method);
-
-static struct attribute *pci_dev_reset_method_attrs[] = {
- &dev_attr_reset_method.attr,
- NULL,
-};
-
-static umode_t pci_dev_reset_method_attr_is_visible(struct kobject *kobj,
- struct attribute *a, int n)
-{
- struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
-
- if (!pci_reset_supported(pdev))
- return 0;
-
- return a->mode;
-}
-
-const struct attribute_group pci_dev_reset_method_attr_group = {
- .attrs = pci_dev_reset_method_attrs,
- .is_visible = pci_dev_reset_method_attr_is_visible,
+ { cxl_reset_bus_function, .name = "cxl_bus" },
};
/**
@@ -5477,6 +5043,7 @@ const struct attribute_group pci_dev_reset_method_attr_group = {
int __pci_reset_function_locked(struct pci_dev *dev)
{
int i, m, rc;
+ const struct pci_reset_fn_method *method;
might_sleep();
@@ -5493,9 +5060,13 @@ int __pci_reset_function_locked(struct pci_dev *dev)
if (!m)
return -ENOTTY;
- rc = pci_reset_fn_methods[m].reset_fn(dev, PCI_RESET_DO_RESET);
+ method = &pci_reset_fn_methods[m];
+ pci_dbg(dev, "reset via %s\n", method->name);
+ rc = method->reset_fn(dev, PCI_RESET_DO_RESET);
if (!rc)
return 0;
+
+ pci_dbg(dev, "%s failed with %d\n", method->name, rc);
if (rc != -ENOTTY)
return rc;
}
@@ -5554,11 +5125,20 @@ void pci_init_reset_methods(struct pci_dev *dev)
*/
int pci_reset_function(struct pci_dev *dev)
{
+ struct pci_dev *bridge;
int rc;
if (!pci_reset_supported(dev))
return -ENOTTY;
+ /*
+ * If there's no upstream bridge, no locking is needed since there is
+ * no upstream bridge configuration to hold consistent.
+ */
+ bridge = pci_upstream_bridge(dev);
+ if (bridge)
+ pci_dev_lock(bridge);
+
pci_dev_lock(dev);
pci_dev_save_and_disable(dev);
@@ -5567,6 +5147,9 @@ int pci_reset_function(struct pci_dev *dev)
pci_dev_restore(dev);
pci_dev_unlock(dev);
+ if (bridge)
+ pci_dev_unlock(bridge);
+
return rc;
}
EXPORT_SYMBOL_GPL(pci_reset_function);
@@ -5631,7 +5214,7 @@ int pci_try_reset_function(struct pci_dev *dev)
EXPORT_SYMBOL_GPL(pci_try_reset_function);
/* Do any devices on or below this bus prevent a bus reset? */
-static bool pci_bus_resetable(struct pci_bus *bus)
+static bool pci_bus_resettable(struct pci_bus *bus)
{
struct pci_dev *dev;
@@ -5641,7 +5224,7 @@ static bool pci_bus_resetable(struct pci_bus *bus)
list_for_each_entry(dev, &bus->devices, bus_list) {
if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
- (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
+ (dev->subordinate && !pci_bus_resettable(dev->subordinate)))
return false;
}
@@ -5653,10 +5236,12 @@ static void pci_bus_lock(struct pci_bus *bus)
{
struct pci_dev *dev;
+ pci_dev_lock(bus->self);
list_for_each_entry(dev, &bus->devices, bus_list) {
- pci_dev_lock(dev);
if (dev->subordinate)
pci_bus_lock(dev->subordinate);
+ else
+ pci_dev_lock(dev);
}
}
@@ -5668,8 +5253,10 @@ static void pci_bus_unlock(struct pci_bus *bus)
list_for_each_entry(dev, &bus->devices, bus_list) {
if (dev->subordinate)
pci_bus_unlock(dev->subordinate);
- pci_dev_unlock(dev);
+ else
+ pci_dev_unlock(dev);
}
+ pci_dev_unlock(bus->self);
}
/* Return 1 on successful lock, 0 on contention */
@@ -5677,15 +5264,15 @@ static int pci_bus_trylock(struct pci_bus *bus)
{
struct pci_dev *dev;
+ if (!pci_dev_trylock(bus->self))
+ return 0;
+
list_for_each_entry(dev, &bus->devices, bus_list) {
- if (!pci_dev_trylock(dev))
- goto unlock;
if (dev->subordinate) {
- if (!pci_bus_trylock(dev->subordinate)) {
- pci_dev_unlock(dev);
+ if (!pci_bus_trylock(dev->subordinate))
goto unlock;
- }
- }
+ } else if (!pci_dev_trylock(dev))
+ goto unlock;
}
return 1;
@@ -5693,13 +5280,15 @@ unlock:
list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
if (dev->subordinate)
pci_bus_unlock(dev->subordinate);
- pci_dev_unlock(dev);
+ else
+ pci_dev_unlock(dev);
}
+ pci_dev_unlock(bus->self);
return 0;
}
/* Do any devices on or below this slot prevent a bus reset? */
-static bool pci_slot_resetable(struct pci_slot *slot)
+static bool pci_slot_resettable(struct pci_slot *slot)
{
struct pci_dev *dev;
@@ -5711,7 +5300,7 @@ static bool pci_slot_resetable(struct pci_slot *slot)
if (!dev->slot || dev->slot != slot)
continue;
if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
- (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
+ (dev->subordinate && !pci_bus_resettable(dev->subordinate)))
return false;
}
@@ -5726,9 +5315,10 @@ static void pci_slot_lock(struct pci_slot *slot)
list_for_each_entry(dev, &slot->bus->devices, bus_list) {
if (!dev->slot || dev->slot != slot)
continue;
- pci_dev_lock(dev);
if (dev->subordinate)
pci_bus_lock(dev->subordinate);
+ else
+ pci_dev_lock(dev);
}
}
@@ -5742,7 +5332,8 @@ static void pci_slot_unlock(struct pci_slot *slot)
continue;
if (dev->subordinate)
pci_bus_unlock(dev->subordinate);
- pci_dev_unlock(dev);
+ else
+ pci_dev_unlock(dev);
}
}
@@ -5754,14 +5345,13 @@ static int pci_slot_trylock(struct pci_slot *slot)
list_for_each_entry(dev, &slot->bus->devices, bus_list) {
if (!dev->slot || dev->slot != slot)
continue;
- if (!pci_dev_trylock(dev))
- goto unlock;
if (dev->subordinate) {
if (!pci_bus_trylock(dev->subordinate)) {
pci_dev_unlock(dev);
goto unlock;
}
- }
+ } else if (!pci_dev_trylock(dev))
+ goto unlock;
}
return 1;
@@ -5772,7 +5362,8 @@ unlock:
continue;
if (dev->subordinate)
pci_bus_unlock(dev->subordinate);
- pci_dev_unlock(dev);
+ else
+ pci_dev_unlock(dev);
}
return 0;
}
@@ -5803,8 +5394,10 @@ static void pci_bus_restore_locked(struct pci_bus *bus)
list_for_each_entry(dev, &bus->devices, bus_list) {
pci_dev_restore(dev);
- if (dev->subordinate)
+ if (dev->subordinate) {
+ pci_bridge_wait_for_secondary_bus(dev, "bus reset");
pci_bus_restore_locked(dev->subordinate);
+ }
}
}
@@ -5838,8 +5431,10 @@ static void pci_slot_restore_locked(struct pci_slot *slot)
if (!dev->slot || dev->slot != slot)
continue;
pci_dev_restore(dev);
- if (dev->subordinate)
+ if (dev->subordinate) {
+ pci_bridge_wait_for_secondary_bus(dev, "slot reset");
pci_bus_restore_locked(dev->subordinate);
+ }
}
}
@@ -5847,7 +5442,7 @@ static int pci_slot_reset(struct pci_slot *slot, bool probe)
{
int rc;
- if (!slot || !pci_slot_resetable(slot))
+ if (!slot || !pci_slot_resettable(slot))
return -ENOTTY;
if (!probe)
@@ -5914,7 +5509,7 @@ static int pci_bus_reset(struct pci_bus *bus, bool probe)
{
int ret;
- if (!bus->self || !pci_bus_resetable(bus))
+ if (!bus->self || !pci_bus_resettable(bus))
return -ENOTTY;
if (probe)
@@ -5984,7 +5579,7 @@ EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
*
* Same as above except return -EAGAIN if the bus cannot be locked
*/
-static int __pci_reset_bus(struct pci_bus *bus)
+int __pci_reset_bus(struct pci_bus *bus)
{
int rc;
@@ -6036,7 +5631,7 @@ int pcix_get_max_mmrbc(struct pci_dev *dev)
if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
return -EINVAL;
- return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
+ return 512 << FIELD_GET(PCI_X_STATUS_MAX_READ, stat);
}
EXPORT_SYMBOL(pcix_get_max_mmrbc);
@@ -6059,7 +5654,7 @@ int pcix_get_mmrbc(struct pci_dev *dev)
if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
return -EINVAL;
- return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
+ return 512 << FIELD_GET(PCI_X_CMD_MAX_READ, cmd);
}
EXPORT_SYMBOL(pcix_get_mmrbc);
@@ -6090,19 +5685,19 @@ int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
return -EINVAL;
- if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
+ if (v > FIELD_GET(PCI_X_STATUS_MAX_READ, stat))
return -E2BIG;
if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
return -EINVAL;
- o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
+ o = FIELD_GET(PCI_X_CMD_MAX_READ, cmd);
if (o != v) {
if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
return -EIO;
cmd &= ~PCI_X_CMD_MAX_READ;
- cmd |= v << 2;
+ cmd |= FIELD_PREP(PCI_X_CMD_MAX_READ, v);
if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
return -EIO;
}
@@ -6122,7 +5717,7 @@ int pcie_get_readrq(struct pci_dev *dev)
pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
- return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
+ return 128 << FIELD_GET(PCI_EXP_DEVCTL_READRQ, ctl);
}
EXPORT_SYMBOL(pcie_get_readrq);
@@ -6138,6 +5733,7 @@ int pcie_set_readrq(struct pci_dev *dev, int rq)
{
u16 v;
int ret;
+ unsigned int firstbit;
struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
@@ -6155,7 +5751,10 @@ int pcie_set_readrq(struct pci_dev *dev, int rq)
rq = mps;
}
- v = (ffs(rq) - 8) << 12;
+ firstbit = ffs(rq);
+ if (firstbit < 8)
+ return -EINVAL;
+ v = FIELD_PREP(PCI_EXP_DEVCTL_READRQ, firstbit - 8);
if (bridge->no_inc_mrrs) {
int max_mrrs = pcie_get_readrq(dev);
@@ -6185,7 +5784,7 @@ int pcie_get_mps(struct pci_dev *dev)
pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
- return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
+ return 128 << FIELD_GET(PCI_EXP_DEVCTL_PAYLOAD, ctl);
}
EXPORT_SYMBOL(pcie_get_mps);
@@ -6208,7 +5807,7 @@ int pcie_set_mps(struct pci_dev *dev, int mps)
v = ffs(mps) - 8;
if (v > dev->pcie_mpss)
return -EINVAL;
- v <<= 5;
+ v = FIELD_PREP(PCI_EXP_DEVCTL_PAYLOAD, v);
ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
PCI_EXP_DEVCTL_PAYLOAD, v);
@@ -6217,6 +5816,24 @@ int pcie_set_mps(struct pci_dev *dev, int mps)
}
EXPORT_SYMBOL(pcie_set_mps);
+static enum pci_bus_speed to_pcie_link_speed(u16 lnksta)
+{
+ return pcie_link_speed[FIELD_GET(PCI_EXP_LNKSTA_CLS, lnksta)];
+}
+
+int pcie_link_speed_mbps(struct pci_dev *pdev)
+{
+ u16 lnksta;
+ int err;
+
+ err = pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnksta);
+ if (err)
+ return err;
+
+ return pcie_dev_speed_mbps(to_pcie_link_speed(lnksta));
+}
+EXPORT_SYMBOL(pcie_link_speed_mbps);
+
/**
* pcie_bandwidth_available - determine minimum link settings of a PCIe
* device and its bandwidth limitation
@@ -6250,9 +5867,8 @@ u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
while (dev) {
pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
- next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
- next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
- PCI_EXP_LNKSTA_NLW_SHIFT;
+ next_speed = to_pcie_link_speed(lnksta);
+ next_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, lnksta);
next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed);
@@ -6276,38 +5892,66 @@ u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
EXPORT_SYMBOL(pcie_bandwidth_available);
/**
- * pcie_get_speed_cap - query for the PCI device's link speed capability
+ * pcie_get_supported_speeds - query Supported Link Speed Vector
* @dev: PCI device to query
*
- * Query the PCI device speed capability. Return the maximum link speed
- * supported by the device.
+ * Query @dev supported link speeds.
+ *
+ * Implementation Note in PCIe r6.0 sec 7.5.3.18 recommends determining
+ * supported link speeds using the Supported Link Speeds Vector in the Link
+ * Capabilities 2 Register (when available).
+ *
+ * Link Capabilities 2 was added in PCIe r3.0, sec 7.8.18.
+ *
+ * Without Link Capabilities 2, i.e., prior to PCIe r3.0, Supported Link
+ * Speeds field in Link Capabilities is used and only 2.5 GT/s and 5.0 GT/s
+ * speeds were defined.
+ *
+ * For @dev without Supported Link Speed Vector, the field is synthesized
+ * from the Max Link Speed field in the Link Capabilities Register.
+ *
+ * Return: Supported Link Speeds Vector (+ reserved 0 at LSB).
*/
-enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
+u8 pcie_get_supported_speeds(struct pci_dev *dev)
{
u32 lnkcap2, lnkcap;
+ u8 speeds;
/*
- * Link Capabilities 2 was added in PCIe r3.0, sec 7.8.18. The
- * implementation note there recommends using the Supported Link
- * Speeds Vector in Link Capabilities 2 when supported.
- *
- * Without Link Capabilities 2, i.e., prior to PCIe r3.0, software
- * should use the Supported Link Speeds field in Link Capabilities,
- * where only 2.5 GT/s and 5.0 GT/s speeds were defined.
+ * Speeds retain the reserved 0 at LSB before PCIe Supported Link
+ * Speeds Vector to allow using SLS Vector bit defines directly.
*/
pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
+ speeds = lnkcap2 & PCI_EXP_LNKCAP2_SLS;
+
+ /* Ignore speeds higher than Max Link Speed */
+ pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
+ speeds &= GENMASK(lnkcap & PCI_EXP_LNKCAP_SLS, 0);
/* PCIe r3.0-compliant */
- if (lnkcap2)
- return PCIE_LNKCAP2_SLS2SPEED(lnkcap2);
+ if (speeds)
+ return speeds;
- pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
+ /* Synthesize from the Max Link Speed field */
if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB)
- return PCIE_SPEED_5_0GT;
+ speeds = PCI_EXP_LNKCAP2_SLS_5_0GB | PCI_EXP_LNKCAP2_SLS_2_5GB;
else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_2_5GB)
- return PCIE_SPEED_2_5GT;
+ speeds = PCI_EXP_LNKCAP2_SLS_2_5GB;
- return PCI_SPEED_UNKNOWN;
+ return speeds;
+}
+
+/**
+ * pcie_get_speed_cap - query for the PCI device's link speed capability
+ * @dev: PCI device to query
+ *
+ * Query the PCI device speed capability.
+ *
+ * Return: the maximum link speed supported by the device.
+ */
+enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
+{
+ return PCIE_LNKCAP2_SLS2SPEED(dev->supported_speeds);
}
EXPORT_SYMBOL(pcie_get_speed_cap);
@@ -6324,7 +5968,7 @@ enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev)
pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
if (lnkcap)
- return (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4;
+ return FIELD_GET(PCI_EXP_LNKCAP_MLW, lnkcap);
return PCIE_LNK_WIDTH_UNKNOWN;
}
@@ -6340,8 +5984,9 @@ EXPORT_SYMBOL(pcie_get_width_cap);
* and width, multiplying them, and applying encoding overhead. The result
* is in Mb/s, i.e., megabits/second of raw bandwidth.
*/
-u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
- enum pcie_link_width *width)
+static u32 pcie_bandwidth_capable(struct pci_dev *dev,
+ enum pci_bus_speed *speed,
+ enum pcie_link_width *width)
{
*speed = pcie_get_speed_cap(dev);
*width = pcie_get_width_cap(dev);
@@ -6368,21 +6013,25 @@ void __pcie_print_link_status(struct pci_dev *dev, bool verbose)
enum pci_bus_speed speed, speed_cap;
struct pci_dev *limiting_dev = NULL;
u32 bw_avail, bw_cap;
+ char *flit_mode = "";
bw_cap = pcie_bandwidth_capable(dev, &speed_cap, &width_cap);
bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width);
+ if (dev->bus && dev->bus->flit_mode)
+ flit_mode = ", in Flit mode";
+
if (bw_avail >= bw_cap && verbose)
- pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n",
+ pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)%s\n",
bw_cap / 1000, bw_cap % 1000,
- pci_speed_string(speed_cap), width_cap);
+ pci_speed_string(speed_cap), width_cap, flit_mode);
else if (bw_avail < bw_cap)
- pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
+ pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)%s\n",
bw_avail / 1000, bw_avail % 1000,
pci_speed_string(speed), width,
limiting_dev ? pci_name(limiting_dev) : "<unknown>",
bw_cap / 1000, bw_cap % 1000,
- pci_speed_string(speed_cap), width_cap);
+ pci_speed_string(speed_cap), width_cap, flit_mode);
}
/**
@@ -6682,14 +6331,15 @@ static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
resource_size_t align, bool resize)
{
struct resource *r = &dev->resource[bar];
+ const char *r_name = pci_resource_name(dev, bar);
resource_size_t size;
if (!(r->flags & IORESOURCE_MEM))
return;
if (r->flags & IORESOURCE_PCI_FIXED) {
- pci_info(dev, "BAR%d %pR: ignoring requested alignment %#llx\n",
- bar, r, (unsigned long long)align);
+ pci_info(dev, "%s %pR: ignoring requested alignment %#llx\n",
+ r_name, r, (unsigned long long)align);
return;
}
@@ -6725,8 +6375,8 @@ static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
* devices and we use the second.
*/
- pci_info(dev, "BAR%d %pR: requesting alignment to %#llx\n",
- bar, r, (unsigned long long)align);
+ pci_info(dev, "%s %pR: requesting alignment to %#llx\n",
+ r_name, r, (unsigned long long)align);
if (resize) {
r->start = 0;
@@ -6734,8 +6384,7 @@ static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
} else {
r->flags &= ~IORESOURCE_SIZEALIGN;
r->flags |= IORESOURCE_STARTALIGN;
- r->start = align;
- r->end = r->start + size - 1;
+ resource_set_range(r, align, size);
}
r->flags |= IORESOURCE_UNSET;
}
@@ -6859,9 +6508,31 @@ static void pci_no_domains(void)
#endif
}
+#ifdef CONFIG_PCI_DOMAINS
+static DEFINE_IDA(pci_domain_nr_dynamic_ida);
+
+/**
+ * pci_bus_find_emul_domain_nr() - allocate a PCI domain number per constraints
+ * @hint: desired domain, 0 if any ID in the range of @min to @max is acceptable
+ * @min: minimum allowable domain
+ * @max: maximum allowable domain, no IDs higher than INT_MAX will be returned
+ */
+int pci_bus_find_emul_domain_nr(u32 hint, u32 min, u32 max)
+{
+ return ida_alloc_range(&pci_domain_nr_dynamic_ida, max(hint, min), max,
+ GFP_KERNEL);
+}
+EXPORT_SYMBOL_GPL(pci_bus_find_emul_domain_nr);
+
+void pci_bus_release_emul_domain_nr(int domain_nr)
+{
+ ida_free(&pci_domain_nr_dynamic_ida, domain_nr);
+}
+EXPORT_SYMBOL_GPL(pci_bus_release_emul_domain_nr);
+#endif
+
#ifdef CONFIG_PCI_DOMAINS_GENERIC
static DEFINE_IDA(pci_domain_nr_static_ida);
-static DEFINE_IDA(pci_domain_nr_dynamic_ida);
static void of_pci_reserve_static_domain_nr(void)
{
@@ -6914,16 +6585,16 @@ static int of_pci_bus_find_domain_nr(struct device *parent)
return ida_alloc(&pci_domain_nr_dynamic_ida, GFP_KERNEL);
}
-static void of_pci_bus_release_domain_nr(struct pci_bus *bus, struct device *parent)
+static void of_pci_bus_release_domain_nr(struct device *parent, int domain_nr)
{
- if (bus->domain_nr < 0)
+ if (domain_nr < 0)
return;
/* Release domain from IDA where it was allocated. */
- if (of_get_pci_domain_nr(parent->of_node) == bus->domain_nr)
- ida_free(&pci_domain_nr_static_ida, bus->domain_nr);
+ if (of_get_pci_domain_nr(parent->of_node) == domain_nr)
+ ida_free(&pci_domain_nr_static_ida, domain_nr);
else
- ida_free(&pci_domain_nr_dynamic_ida, bus->domain_nr);
+ ida_free(&pci_domain_nr_dynamic_ida, domain_nr);
}
int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
@@ -6932,11 +6603,11 @@ int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
acpi_pci_bus_find_domain_nr(bus);
}
-void pci_bus_release_domain_nr(struct pci_bus *bus, struct device *parent)
+void pci_bus_release_domain_nr(struct device *parent, int domain_nr)
{
if (!acpi_disabled)
return;
- of_pci_bus_release_domain_nr(bus, parent);
+ of_pci_bus_release_domain_nr(parent, domain_nr);
}
#endif
@@ -6952,11 +6623,6 @@ int __weak pci_ext_cfg_avail(void)
return 1;
}
-void __weak pci_fixup_cardbus(struct pci_bus *bus)
-{
-}
-EXPORT_SYMBOL(pci_fixup_cardbus);
-
static int __init pci_setup(char *str)
{
while (str) {
@@ -6981,6 +6647,8 @@ static int __init pci_setup(char *str)
pci_no_domains();
} else if (!strncmp(str, "noari", 5)) {
pcie_ari_disabled = true;
+ } else if (!strncmp(str, "notph", 5)) {
+ pci_no_tph();
} else if (!strncmp(str, "cbiosize=", 9)) {
pci_cardbus_io_size = memparse(str + 9, &str);
} else if (!strncmp(str, "cbmemsize=", 10)) {
@@ -7015,6 +6683,8 @@ static int __init pci_setup(char *str)
pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
} else if (!strncmp(str, "disable_acs_redir=", 18)) {
disable_acs_redir_param = str + 18;
+ } else if (!strncmp(str, "config_acs=", 11)) {
+ config_acs_param = str + 11;
} else {
pr_err("PCI: Unknown option `%s'\n", str);
}
@@ -7039,6 +6709,7 @@ static int __init pci_realloc_setup_params(void)
resource_alignment_param = kstrdup(resource_alignment_param,
GFP_KERNEL);
disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL);
+ config_acs_param = kstrdup(config_acs_param, GFP_KERNEL);
return 0;
}
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index a4c397434057..0e67014aa001 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -2,10 +2,17 @@
#ifndef DRIVERS_PCI_H
#define DRIVERS_PCI_H
+#include <linux/align.h>
+#include <linux/bitfield.h>
#include <linux/pci.h>
+struct pcie_tlp_log;
+
/* Number of possible devfns: 0.0 to 1f.7 inclusive */
#define MAX_NR_DEVFNS 256
+#define PCI_MAX_NR_DEVS 32
+
+#define MAX_NR_LANES 16
#define PCI_FIND_CAP_TTL 48
@@ -13,18 +20,168 @@
#define PCIE_LINK_RETRAIN_TIMEOUT_MS 1000
+/*
+ * Power stable to PERST# inactive.
+ *
+ * See the "Power Sequencing and Reset Signal Timings" table of the PCI Express
+ * Card Electromechanical Specification, Revision 5.1, Section 2.9.2, Symbol
+ * "T_PVPERL".
+ */
+#define PCIE_T_PVPERL_MS 100
+
+/*
+ * REFCLK stable before PERST# inactive.
+ *
+ * See the "Power Sequencing and Reset Signal Timings" table of the PCI Express
+ * Card Electromechanical Specification, Revision 5.1, Section 2.9.2, Symbol
+ * "T_PERST-CLK".
+ */
+#define PCIE_T_PERST_CLK_US 100
+
+/*
+ * PCIe r6.0, sec 5.3.3.2.1 <PME Synchronization>
+ * Recommends 1ms to 10ms timeout to check L2 ready.
+ */
+#define PCIE_PME_TO_L2_TIMEOUT_US 10000
+
+/*
+ * PCIe r6.0, sec 6.6.1 <Conventional Reset>
+ *
+ * - "With a Downstream Port that does not support Link speeds greater
+ * than 5.0 GT/s, software must wait a minimum of 100 ms following exit
+ * from a Conventional Reset before sending a Configuration Request to
+ * the device immediately below that Port."
+ *
+ * - "With a Downstream Port that supports Link speeds greater than
+ * 5.0 GT/s, software must wait a minimum of 100 ms after Link training
+ * completes before sending a Configuration Request to the device
+ * immediately below that Port."
+ */
+#define PCIE_RESET_CONFIG_WAIT_MS 100
+
+/* Parameters for the waiting for link up routine */
+#define PCIE_LINK_WAIT_MAX_RETRIES 10
+#define PCIE_LINK_WAIT_SLEEP_MS 90
+
+/* Message Routing (r[2:0]); PCIe r6.0, sec 2.2.8 */
+#define PCIE_MSG_TYPE_R_RC 0
+#define PCIE_MSG_TYPE_R_ADDR 1
+#define PCIE_MSG_TYPE_R_ID 2
+#define PCIE_MSG_TYPE_R_BC 3
+#define PCIE_MSG_TYPE_R_LOCAL 4
+#define PCIE_MSG_TYPE_R_GATHER 5
+
+/* Power Management Messages; PCIe r6.0, sec 2.2.8.2 */
+#define PCIE_MSG_CODE_PME_TURN_OFF 0x19
+
+/* INTx Mechanism Messages; PCIe r6.0, sec 2.2.8.1 */
+#define PCIE_MSG_CODE_ASSERT_INTA 0x20
+#define PCIE_MSG_CODE_ASSERT_INTB 0x21
+#define PCIE_MSG_CODE_ASSERT_INTC 0x22
+#define PCIE_MSG_CODE_ASSERT_INTD 0x23
+#define PCIE_MSG_CODE_DEASSERT_INTA 0x24
+#define PCIE_MSG_CODE_DEASSERT_INTB 0x25
+#define PCIE_MSG_CODE_DEASSERT_INTC 0x26
+#define PCIE_MSG_CODE_DEASSERT_INTD 0x27
+
+#define PCI_BUS_BRIDGE_IO_WINDOW 0
+#define PCI_BUS_BRIDGE_MEM_WINDOW 1
+#define PCI_BUS_BRIDGE_PREF_MEM_WINDOW 2
+
extern const unsigned char pcie_link_speed[];
extern bool pci_early_dump;
+extern struct mutex pci_rescan_remove_lock;
+
bool pcie_cap_has_lnkctl(const struct pci_dev *dev);
bool pcie_cap_has_lnkctl2(const struct pci_dev *dev);
bool pcie_cap_has_rtctl(const struct pci_dev *dev);
+/* Standard Capability finder */
+/**
+ * PCI_FIND_NEXT_CAP - Find a PCI standard capability
+ * @read_cfg: Function pointer for reading PCI config space
+ * @start: Starting position to begin search
+ * @cap: Capability ID to find
+ * @args: Arguments to pass to read_cfg function
+ *
+ * Search the capability list in PCI config space to find @cap.
+ * Implements TTL (time-to-live) protection against infinite loops.
+ *
+ * Return: Position of the capability if found, 0 otherwise.
+ */
+#define PCI_FIND_NEXT_CAP(read_cfg, start, cap, args...) \
+({ \
+ int __ttl = PCI_FIND_CAP_TTL; \
+ u8 __id, __found_pos = 0; \
+ u8 __pos = (start); \
+ u16 __ent; \
+ \
+ read_cfg##_byte(args, __pos, &__pos); \
+ \
+ while (__ttl--) { \
+ if (__pos < PCI_STD_HEADER_SIZEOF) \
+ break; \
+ \
+ __pos = ALIGN_DOWN(__pos, 4); \
+ read_cfg##_word(args, __pos, &__ent); \
+ \
+ __id = FIELD_GET(PCI_CAP_ID_MASK, __ent); \
+ if (__id == 0xff) \
+ break; \
+ \
+ if (__id == (cap)) { \
+ __found_pos = __pos; \
+ break; \
+ } \
+ \
+ __pos = FIELD_GET(PCI_CAP_LIST_NEXT_MASK, __ent); \
+ } \
+ __found_pos; \
+})
+
+/* Extended Capability finder */
+/**
+ * PCI_FIND_NEXT_EXT_CAP - Find a PCI extended capability
+ * @read_cfg: Function pointer for reading PCI config space
+ * @start: Starting position to begin search (0 for initial search)
+ * @cap: Extended capability ID to find
+ * @args: Arguments to pass to read_cfg function
+ *
+ * Search the extended capability list in PCI config space to find @cap.
+ * Implements TTL protection against infinite loops using a calculated
+ * maximum search count.
+ *
+ * Return: Position of the capability if found, 0 otherwise.
+ */
+#define PCI_FIND_NEXT_EXT_CAP(read_cfg, start, cap, args...) \
+({ \
+ u16 __pos = (start) ?: PCI_CFG_SPACE_SIZE; \
+ u16 __found_pos = 0; \
+ int __ttl, __ret; \
+ u32 __header; \
+ \
+ __ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8; \
+ while (__ttl-- > 0 && __pos >= PCI_CFG_SPACE_SIZE) { \
+ __ret = read_cfg##_dword(args, __pos, &__header); \
+ if (__ret != PCIBIOS_SUCCESSFUL) \
+ break; \
+ \
+ if (__header == 0) \
+ break; \
+ \
+ if (PCI_EXT_CAP_ID(__header) == (cap) && __pos != start) {\
+ __found_pos = __pos; \
+ break; \
+ } \
+ \
+ __pos = PCI_EXT_CAP_NEXT(__header); \
+ } \
+ __found_pos; \
+})
+
/* Functions internal to the PCI core code */
-int pci_create_sysfs_dev_files(struct pci_dev *pdev);
-void pci_remove_sysfs_dev_files(struct pci_dev *pdev);
-void pci_cleanup_rom(struct pci_dev *dev);
#ifdef CONFIG_DMI
extern const struct attribute_group pci_dev_smbios_attr_group;
#endif
@@ -40,6 +197,7 @@ bool pci_reset_supported(struct pci_dev *dev);
void pci_init_reset_methods(struct pci_dev *dev);
int pci_bridge_secondary_bus_reset(struct pci_dev *dev);
int pci_bus_error_reset(struct pci_dev *dev);
+int __pci_reset_bus(struct pci_bus *bus);
struct pci_cap_saved_data {
u16 cap_nr;
@@ -75,28 +233,48 @@ void pcie_clear_device_status(struct pci_dev *dev);
void pcie_clear_root_pme_status(struct pci_dev *dev);
bool pci_check_pme_status(struct pci_dev *dev);
void pci_pme_wakeup_bus(struct pci_bus *bus);
-int __pci_pme_wakeup(struct pci_dev *dev, void *ign);
void pci_pme_restore(struct pci_dev *dev);
bool pci_dev_need_resume(struct pci_dev *dev);
void pci_dev_adjust_pme(struct pci_dev *dev);
void pci_dev_complete_resume(struct pci_dev *pci_dev);
void pci_config_pm_runtime_get(struct pci_dev *dev);
void pci_config_pm_runtime_put(struct pci_dev *dev);
+void pci_pm_power_up_and_verify_state(struct pci_dev *pci_dev);
void pci_pm_init(struct pci_dev *dev);
void pci_ea_init(struct pci_dev *dev);
void pci_msi_init(struct pci_dev *dev);
void pci_msix_init(struct pci_dev *dev);
bool pci_bridge_d3_possible(struct pci_dev *dev);
void pci_bridge_d3_update(struct pci_dev *dev);
-void pci_bridge_reconfigure_ltr(struct pci_dev *dev);
int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type);
+static inline bool pci_bus_rrs_vendor_id(u32 l)
+{
+ return (l & 0xffff) == PCI_VENDOR_ID_PCI_SIG;
+}
+
static inline void pci_wakeup_event(struct pci_dev *dev)
{
/* Wait 100 ms before the system can be put into a sleep state. */
pm_wakeup_event(&dev->dev, 100);
}
+/**
+ * pci_bar_index_is_valid - Check whether a BAR index is within valid range
+ * @bar: BAR index
+ *
+ * Protects against overflowing &struct pci_dev.resource array.
+ *
+ * Return: true for valid index, false otherwise.
+ */
+static inline bool pci_bar_index_is_valid(int bar)
+{
+ if (bar >= 0 && bar < PCI_NUM_RESOURCES)
+ return true;
+
+ return false;
+}
+
static inline bool pci_has_subordinate(struct pci_dev *pci_dev)
{
return !!(pci_dev->subordinate);
@@ -121,7 +299,6 @@ static inline bool pcie_downstream_port(const struct pci_dev *dev)
}
void pci_vpd_init(struct pci_dev *dev);
-void pci_vpd_release(struct pci_dev *dev);
extern const struct attribute_group pci_dev_vpd_attr_group;
/* PCI Virtual Channel */
@@ -142,13 +319,14 @@ static inline int pci_proc_detach_bus(struct pci_bus *bus) { return 0; }
/* Functions for PCI Hotplug drivers to use */
int pci_hp_add_bridge(struct pci_dev *dev);
+bool pci_hp_spurious_link_change(struct pci_dev *pdev);
-#ifdef HAVE_PCI_LEGACY
+#if defined(CONFIG_SYSFS) && defined(HAVE_PCI_LEGACY)
void pci_create_legacy_files(struct pci_bus *bus);
void pci_remove_legacy_files(struct pci_bus *bus);
#else
-static inline void pci_create_legacy_files(struct pci_bus *bus) { return; }
-static inline void pci_remove_legacy_files(struct pci_bus *bus) { return; }
+static inline void pci_create_legacy_files(struct pci_bus *bus) { }
+static inline void pci_remove_legacy_files(struct pci_bus *bus) { }
#endif
/* Lock for read/write access to pci device and bus lists */
@@ -176,15 +354,30 @@ static inline int pci_no_d1d2(struct pci_dev *dev)
return (dev->no_d1d2 || parent_dstates);
}
+
+#ifdef CONFIG_SYSFS
+int pci_create_sysfs_dev_files(struct pci_dev *pdev);
+void pci_remove_sysfs_dev_files(struct pci_dev *pdev);
extern const struct attribute_group *pci_dev_groups[];
+extern const struct attribute_group *pci_dev_attr_groups[];
extern const struct attribute_group *pcibus_groups[];
-extern const struct device_type pci_dev_type;
extern const struct attribute_group *pci_bus_groups[];
+extern const struct attribute_group pci_doe_sysfs_group;
+#else
+static inline int pci_create_sysfs_dev_files(struct pci_dev *pdev) { return 0; }
+static inline void pci_remove_sysfs_dev_files(struct pci_dev *pdev) { }
+#define pci_dev_groups NULL
+#define pci_dev_attr_groups NULL
+#define pcibus_groups NULL
+#define pci_bus_groups NULL
+#endif
extern unsigned long pci_hotplug_io_size;
extern unsigned long pci_hotplug_mmio_size;
extern unsigned long pci_hotplug_mmio_pref_size;
extern unsigned long pci_hotplug_bus_size;
+extern unsigned long pci_cardbus_io_size;
+extern unsigned long pci_cardbus_mem_size;
/**
* pci_match_one_device - Tell if a PCI device structure has a matching
@@ -228,16 +421,24 @@ enum pci_bar_type {
struct device *pci_get_host_bridge_device(struct pci_dev *dev);
void pci_put_host_bridge_device(struct device *dev);
+void pci_resize_resource_set_size(struct pci_dev *dev, int resno, int size);
+int pci_do_resource_release_and_resize(struct pci_dev *dev, int resno, int size,
+ int exclude_bars);
+unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge);
+int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align);
+
int pci_configure_extended_tags(struct pci_dev *dev, void *ign);
bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl,
- int crs_timeout);
+ int rrs_timeout);
bool pci_bus_generic_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl,
- int crs_timeout);
-int pci_idt_bus_quirk(struct pci_bus *bus, int devfn, u32 *pl, int crs_timeout);
+ int rrs_timeout);
+int pci_idt_bus_quirk(struct pci_bus *bus, int devfn, u32 *pl, int rrs_timeout);
int pci_setup_device(struct pci_dev *dev);
+void __pci_size_stdbars(struct pci_dev *dev, int count,
+ unsigned int pos, u32 *sizes);
int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
- struct resource *res, unsigned int reg);
+ struct resource *res, unsigned int reg, u32 *sizes);
void pci_configure_ari(struct pci_dev *dev);
void __pci_bus_size_bridges(struct pci_bus *bus,
struct list_head *realloc_head);
@@ -245,12 +446,55 @@ void __pci_bus_assign_resources(const struct pci_bus *bus,
struct list_head *realloc_head,
struct list_head *fail_head);
bool pci_bus_clip_resource(struct pci_dev *dev, int idx);
+void pci_walk_bus_locked(struct pci_bus *top,
+ int (*cb)(struct pci_dev *, void *),
+ void *userdata);
+
+const char *pci_resource_name(struct pci_dev *dev, unsigned int i);
+bool pci_resource_is_optional(const struct pci_dev *dev, int resno);
+
+/**
+ * pci_resource_num - Reverse lookup resource number from device resources
+ * @dev: PCI device
+ * @res: Resource to lookup index for (MUST be a @dev's resource)
+ *
+ * Perform reverse lookup to determine the resource number for @res within
+ * @dev resource array. NOTE: The caller is responsible for ensuring @res is
+ * among @dev's resources!
+ *
+ * Returns: resource number.
+ */
+static inline int pci_resource_num(const struct pci_dev *dev,
+ const struct resource *res)
+{
+ int resno = res - &dev->resource[0];
+
+ /* Passing a resource that is not among dev's resources? */
+ WARN_ON_ONCE(resno >= PCI_NUM_RESOURCES);
+
+ return resno;
+}
+struct resource *pbus_select_window(struct pci_bus *bus,
+ const struct resource *res);
void pci_reassigndev_resource_alignment(struct pci_dev *dev);
void pci_disable_bridge_window(struct pci_dev *dev);
struct pci_bus *pci_bus_get(struct pci_bus *bus);
void pci_bus_put(struct pci_bus *bus);
+#define PCIE_LNKCAP_SLS2SPEED(lnkcap) \
+({ \
+ u32 lnkcap_sls = (lnkcap) & PCI_EXP_LNKCAP_SLS; \
+ \
+ (lnkcap_sls == PCI_EXP_LNKCAP_SLS_64_0GB ? PCIE_SPEED_64_0GT : \
+ lnkcap_sls == PCI_EXP_LNKCAP_SLS_32_0GB ? PCIE_SPEED_32_0GT : \
+ lnkcap_sls == PCI_EXP_LNKCAP_SLS_16_0GB ? PCIE_SPEED_16_0GT : \
+ lnkcap_sls == PCI_EXP_LNKCAP_SLS_8_0GB ? PCIE_SPEED_8_0GT : \
+ lnkcap_sls == PCI_EXP_LNKCAP_SLS_5_0GB ? PCIE_SPEED_5_0GT : \
+ lnkcap_sls == PCI_EXP_LNKCAP_SLS_2_5GB ? PCIE_SPEED_2_5GT : \
+ PCI_SPEED_UNKNOWN); \
+})
+
/* PCIe link information from Link Capabilities 2 */
#define PCIE_LNKCAP2_SLS2SPEED(lnkcap2) \
((lnkcap2) & PCI_EXP_LNKCAP2_SLS_64_0GB ? PCIE_SPEED_64_0GT : \
@@ -261,9 +505,22 @@ void pci_bus_put(struct pci_bus *bus);
(lnkcap2) & PCI_EXP_LNKCAP2_SLS_2_5GB ? PCIE_SPEED_2_5GT : \
PCI_SPEED_UNKNOWN)
+#define PCIE_LNKCTL2_TLS2SPEED(lnkctl2) \
+({ \
+ u16 lnkctl2_tls = (lnkctl2) & PCI_EXP_LNKCTL2_TLS; \
+ \
+ (lnkctl2_tls == PCI_EXP_LNKCTL2_TLS_64_0GT ? PCIE_SPEED_64_0GT : \
+ lnkctl2_tls == PCI_EXP_LNKCTL2_TLS_32_0GT ? PCIE_SPEED_32_0GT : \
+ lnkctl2_tls == PCI_EXP_LNKCTL2_TLS_16_0GT ? PCIE_SPEED_16_0GT : \
+ lnkctl2_tls == PCI_EXP_LNKCTL2_TLS_8_0GT ? PCIE_SPEED_8_0GT : \
+ lnkctl2_tls == PCI_EXP_LNKCTL2_TLS_5_0GT ? PCIE_SPEED_5_0GT : \
+ lnkctl2_tls == PCI_EXP_LNKCTL2_TLS_2_5GT ? PCIE_SPEED_2_5GT : \
+ PCI_SPEED_UNKNOWN); \
+})
+
/* PCIe speed to Mb/s reduced by encoding overhead */
#define PCIE_SPEED2MBS_ENC(speed) \
- ((speed) == PCIE_SPEED_64_0GT ? 64000*128/130 : \
+ ((speed) == PCIE_SPEED_64_0GT ? 64000*1/1 : \
(speed) == PCIE_SPEED_32_0GT ? 32000*128/130 : \
(speed) == PCIE_SPEED_16_0GT ? 16000*128/130 : \
(speed) == PCIE_SPEED_8_0GT ? 8000*128/130 : \
@@ -271,14 +528,39 @@ void pci_bus_put(struct pci_bus *bus);
(speed) == PCIE_SPEED_2_5GT ? 2500*8/10 : \
0)
+static inline int pcie_dev_speed_mbps(enum pci_bus_speed speed)
+{
+ switch (speed) {
+ case PCIE_SPEED_2_5GT:
+ return 2500;
+ case PCIE_SPEED_5_0GT:
+ return 5000;
+ case PCIE_SPEED_8_0GT:
+ return 8000;
+ case PCIE_SPEED_16_0GT:
+ return 16000;
+ case PCIE_SPEED_32_0GT:
+ return 32000;
+ case PCIE_SPEED_64_0GT:
+ return 64000;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+u8 pcie_get_supported_speeds(struct pci_dev *dev);
const char *pci_speed_string(enum pci_bus_speed speed);
-enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev);
-enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev);
-u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
- enum pcie_link_width *width);
void __pcie_print_link_status(struct pci_dev *dev, bool verbose);
void pcie_report_downtraining(struct pci_dev *dev);
-void pcie_update_link_speed(struct pci_bus *bus, u16 link_status);
+
+static inline void __pcie_update_link_speed(struct pci_bus *bus, u16 linksta, u16 linksta2)
+{
+ bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS];
+ bus->flit_mode = (linksta2 & PCI_EXP_LNKSTA2_FLIT) ? 1 : 0;
+}
+void pcie_update_link_speed(struct pci_bus *bus);
/* Single Root I/O Virtualization */
struct pci_sriov {
@@ -303,6 +585,7 @@ struct pci_sriov {
u16 subsystem_vendor; /* VF subsystem vendor */
u16 subsystem_device; /* VF subsystem device */
resource_size_t barsz[PCI_SRIOV_NUM_BARS]; /* VF BAR size */
+ u16 vf_rebar_cap; /* VF Resizable BAR capability offset */
bool drivers_autoprobe; /* Auto probing of VFs by driver */
};
@@ -316,6 +599,43 @@ static inline void pci_doe_destroy(struct pci_dev *pdev) { }
static inline void pci_doe_disconnected(struct pci_dev *pdev) { }
#endif
+#ifdef CONFIG_PCI_NPEM
+void pci_npem_create(struct pci_dev *dev);
+void pci_npem_remove(struct pci_dev *dev);
+#else
+static inline void pci_npem_create(struct pci_dev *dev) { }
+static inline void pci_npem_remove(struct pci_dev *dev) { }
+#endif
+
+#if defined(CONFIG_PCI_DOE) && defined(CONFIG_SYSFS)
+void pci_doe_sysfs_init(struct pci_dev *pci_dev);
+void pci_doe_sysfs_teardown(struct pci_dev *pdev);
+#else
+static inline void pci_doe_sysfs_init(struct pci_dev *pdev) { }
+static inline void pci_doe_sysfs_teardown(struct pci_dev *pdev) { }
+#endif
+
+#ifdef CONFIG_PCI_IDE
+void pci_ide_init(struct pci_dev *dev);
+void pci_ide_init_host_bridge(struct pci_host_bridge *hb);
+void pci_ide_destroy(struct pci_dev *dev);
+extern const struct attribute_group pci_ide_attr_group;
+#else
+static inline void pci_ide_init(struct pci_dev *dev) { }
+static inline void pci_ide_init_host_bridge(struct pci_host_bridge *hb) { }
+static inline void pci_ide_destroy(struct pci_dev *dev) { }
+#endif
+
+#ifdef CONFIG_PCI_TSM
+void pci_tsm_init(struct pci_dev *pdev);
+void pci_tsm_destroy(struct pci_dev *pdev);
+extern const struct attribute_group pci_tsm_attr_group;
+extern const struct attribute_group pci_tsm_auth_attr_group;
+#else
+static inline void pci_tsm_init(struct pci_dev *pdev) { }
+static inline void pci_tsm_destroy(struct pci_dev *pdev) { }
+#endif
+
/**
* pci_dev_set_io_state - Set the new error state if possible.
*
@@ -357,19 +677,26 @@ static inline int pci_dev_set_disconnected(struct pci_dev *dev, void *unused)
return 0;
}
-static inline bool pci_dev_is_disconnected(const struct pci_dev *dev)
-{
- return dev->error_state == pci_channel_io_perm_failure;
-}
-
/* pci_dev priv_flags */
#define PCI_DEV_ADDED 0
#define PCI_DPC_RECOVERED 1
#define PCI_DPC_RECOVERING 2
+#define PCI_DEV_REMOVED 3
+#define PCI_LINK_CHANGED 4
+#define PCI_LINK_CHANGING 5
+#define PCI_LINK_LBMS_SEEN 6
+#define PCI_DEV_ALLOW_BINDING 7
+
+static inline void pci_dev_assign_added(struct pci_dev *dev)
+{
+ smp_mb__before_atomic();
+ set_bit(PCI_DEV_ADDED, &dev->priv_flags);
+ smp_mb__after_atomic();
+}
-static inline void pci_dev_assign_added(struct pci_dev *dev, bool added)
+static inline bool pci_dev_test_and_clear_added(struct pci_dev *dev)
{
- assign_bit(PCI_DEV_ADDED, &dev->priv_flags, added);
+ return test_and_clear_bit(PCI_DEV_ADDED, &dev->priv_flags);
}
static inline bool pci_dev_is_added(const struct pci_dev *dev)
@@ -377,6 +704,21 @@ static inline bool pci_dev_is_added(const struct pci_dev *dev)
return test_bit(PCI_DEV_ADDED, &dev->priv_flags);
}
+static inline bool pci_dev_test_and_set_removed(struct pci_dev *dev)
+{
+ return test_and_set_bit(PCI_DEV_REMOVED, &dev->priv_flags);
+}
+
+static inline void pci_dev_allow_binding(struct pci_dev *dev)
+{
+ set_bit(PCI_DEV_ALLOW_BINDING, &dev->priv_flags);
+}
+
+static inline bool pci_dev_binding_disallowed(struct pci_dev *dev)
+{
+ return !test_bit(PCI_DEV_ALLOW_BINDING, &dev->priv_flags);
+}
+
#ifdef CONFIG_PCIEAER
#include <linux/aer.h>
@@ -384,12 +726,15 @@ static inline bool pci_dev_is_added(const struct pci_dev *dev)
struct aer_err_info {
struct pci_dev *dev[AER_MAX_MULTI_ERR_DEVICES];
+ int ratelimit_print[AER_MAX_MULTI_ERR_DEVICES];
int error_dev_num;
+ const char *level; /* printk level */
unsigned int id:16;
unsigned int severity:2; /* 0:NONFATAL | 1:FATAL | 2:COR */
- unsigned int __pad1:5;
+ unsigned int root_ratelimit_print:1; /* 0=skip, 1=print */
+ unsigned int __pad1:4;
unsigned int multi_error_valid:1;
unsigned int first_error:5;
@@ -398,11 +743,19 @@ struct aer_err_info {
unsigned int status; /* COR/UNCOR Error Status */
unsigned int mask; /* COR/UNCOR Error Mask */
- struct aer_header_log_regs tlp; /* TLP Header */
+ struct pcie_tlp_log tlp; /* TLP Header */
};
-int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info);
-void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
+int aer_get_device_error_info(struct aer_err_info *info, int i);
+void aer_print_error(struct aer_err_info *info, int i);
+
+int pcie_read_tlp_log(struct pci_dev *dev, int where, int where2,
+ unsigned int tlp_len, bool flit,
+ struct pcie_tlp_log *log);
+unsigned int aer_tlp_log_len(struct pci_dev *dev, u32 aercc);
+void pcie_print_tlp_log(const struct pci_dev *dev,
+ const struct pcie_tlp_log *log, const char *level,
+ const char *pfx);
#endif /* CONFIG_PCIEAER */
#ifdef CONFIG_PCIEPORTBUS
@@ -421,10 +774,11 @@ void pci_dpc_init(struct pci_dev *pdev);
void dpc_process_error(struct pci_dev *pdev);
pci_ers_result_t dpc_reset_link(struct pci_dev *pdev);
bool pci_dpc_recovered(struct pci_dev *pdev);
+unsigned int dpc_tlp_log_len(struct pci_dev *dev);
#else
-static inline void pci_save_dpc_state(struct pci_dev *dev) {}
-static inline void pci_restore_dpc_state(struct pci_dev *dev) {}
-static inline void pci_dpc_init(struct pci_dev *pdev) {}
+static inline void pci_save_dpc_state(struct pci_dev *dev) { }
+static inline void pci_restore_dpc_state(struct pci_dev *dev) { }
+static inline void pci_dpc_init(struct pci_dev *pdev) { }
static inline bool pci_dpc_recovered(struct pci_dev *pdev) { return false; }
#endif
@@ -436,12 +790,12 @@ void pcie_walk_rcec(struct pci_dev *rcec,
int (*cb)(struct pci_dev *, void *),
void *userdata);
#else
-static inline void pci_rcec_init(struct pci_dev *dev) {}
-static inline void pci_rcec_exit(struct pci_dev *dev) {}
-static inline void pcie_link_rcec(struct pci_dev *rcec) {}
+static inline void pci_rcec_init(struct pci_dev *dev) { }
+static inline void pci_rcec_exit(struct pci_dev *dev) { }
+static inline void pcie_link_rcec(struct pci_dev *rcec) { }
static inline void pcie_walk_rcec(struct pci_dev *rcec,
int (*cb)(struct pci_dev *, void *),
- void *userdata) {}
+ void *userdata) { }
#endif
#ifdef CONFIG_PCI_ATS
@@ -477,6 +831,27 @@ void pci_iov_update_resource(struct pci_dev *dev, int resno);
resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno);
void pci_restore_iov_state(struct pci_dev *dev);
int pci_iov_bus_range(struct pci_bus *bus);
+void pci_iov_resource_set_size(struct pci_dev *dev, int resno, int size);
+bool pci_iov_is_memory_decoding_enabled(struct pci_dev *dev);
+static inline u16 pci_iov_vf_rebar_cap(struct pci_dev *dev)
+{
+ if (!dev->is_physfn)
+ return 0;
+
+ return dev->sriov->vf_rebar_cap;
+}
+static inline bool pci_resource_is_iov(int resno)
+{
+ return resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END;
+}
+static inline int pci_resource_num_from_vf_bar(int resno)
+{
+ return resno + PCI_IOV_RESOURCES;
+}
+static inline int pci_resource_num_to_vf_bar(int resno)
+{
+ return resno - PCI_IOV_RESOURCES;
+}
extern const struct attribute_group sriov_pf_dev_attr_group;
extern const struct attribute_group sriov_vf_dev_attr_group;
#else
@@ -484,23 +859,57 @@ static inline int pci_iov_init(struct pci_dev *dev)
{
return -ENODEV;
}
-static inline void pci_iov_release(struct pci_dev *dev)
-
+static inline void pci_iov_release(struct pci_dev *dev) { }
+static inline void pci_iov_remove(struct pci_dev *dev) { }
+static inline void pci_iov_update_resource(struct pci_dev *dev, int resno) { }
+static inline resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev,
+ int resno)
{
+ return 0;
}
-static inline void pci_iov_remove(struct pci_dev *dev)
+static inline void pci_restore_iov_state(struct pci_dev *dev) { }
+static inline int pci_iov_bus_range(struct pci_bus *bus)
{
+ return 0;
}
-static inline void pci_restore_iov_state(struct pci_dev *dev)
+static inline void pci_iov_resource_set_size(struct pci_dev *dev, int resno,
+ int size) { }
+static inline bool pci_iov_is_memory_decoding_enabled(struct pci_dev *dev)
{
+ return false;
}
-static inline int pci_iov_bus_range(struct pci_bus *bus)
+static inline u16 pci_iov_vf_rebar_cap(struct pci_dev *dev)
{
return 0;
}
-
+static inline bool pci_resource_is_iov(int resno)
+{
+ return false;
+}
+static inline int pci_resource_num_from_vf_bar(int resno)
+{
+ WARN_ON_ONCE(1);
+ return -ENODEV;
+}
+static inline int pci_resource_num_to_vf_bar(int resno)
+{
+ WARN_ON_ONCE(1);
+ return -ENODEV;
+}
#endif /* CONFIG_PCI_IOV */
+#ifdef CONFIG_PCIE_TPH
+void pci_restore_tph_state(struct pci_dev *dev);
+void pci_save_tph_state(struct pci_dev *dev);
+void pci_no_tph(void);
+void pci_tph_init(struct pci_dev *dev);
+#else
+static inline void pci_restore_tph_state(struct pci_dev *dev) { }
+static inline void pci_save_tph_state(struct pci_dev *dev) { }
+static inline void pci_no_tph(void) { }
+static inline void pci_tph_init(struct pci_dev *dev) { }
+#endif
+
#ifdef CONFIG_PCIE_PTM
void pci_ptm_init(struct pci_dev *dev);
void pci_save_ptm_state(struct pci_dev *dev);
@@ -520,12 +929,10 @@ unsigned long pci_cardbus_resource_alignment(struct resource *);
static inline resource_size_t pci_resource_alignment(struct pci_dev *dev,
struct resource *res)
{
-#ifdef CONFIG_PCI_IOV
- int resno = res - dev->resource;
+ int resno = pci_resource_num(dev, res);
- if (resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END)
+ if (pci_resource_is_iov(resno))
return pci_sriov_resource_alignment(dev, resno);
-#endif
if (dev->class >> 8 == PCI_CLASS_BRIDGE_CARDBUS)
return pci_cardbus_resource_alignment(res);
return resource_alignment(res);
@@ -536,7 +943,7 @@ void pci_acs_init(struct pci_dev *dev);
int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags);
int pci_dev_specific_enable_acs(struct pci_dev *dev);
int pci_dev_specific_disable_acs_redir(struct pci_dev *dev);
-bool pcie_failed_link_retrain(struct pci_dev *dev);
+int pcie_failed_link_retrain(struct pci_dev *dev);
#else
static inline int pci_dev_specific_acs_enabled(struct pci_dev *dev,
u16 acs_flags)
@@ -551,9 +958,9 @@ static inline int pci_dev_specific_disable_acs_redir(struct pci_dev *dev)
{
return -ENOTTY;
}
-static inline bool pcie_failed_link_retrain(struct pci_dev *dev)
+static inline int pcie_failed_link_retrain(struct pci_dev *dev)
{
- return false;
+ return -ENOTTY;
}
#endif
@@ -564,14 +971,30 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
bool pcie_wait_for_link(struct pci_dev *pdev, bool active);
int pcie_retrain_link(struct pci_dev *pdev, bool use_lt);
+
+/* ASPM-related functionality we need even without CONFIG_PCIEASPM */
+void pci_save_ltr_state(struct pci_dev *dev);
+void pci_restore_ltr_state(struct pci_dev *dev);
+void pci_configure_aspm_l1ss(struct pci_dev *dev);
+void pci_save_aspm_l1ss_state(struct pci_dev *dev);
+void pci_restore_aspm_l1ss_state(struct pci_dev *dev);
+
#ifdef CONFIG_PCIEASPM
+void pcie_aspm_remove_cap(struct pci_dev *pdev, u32 lnkcap);
void pcie_aspm_init_link_state(struct pci_dev *pdev);
void pcie_aspm_exit_link_state(struct pci_dev *pdev);
+void pcie_aspm_pm_state_change(struct pci_dev *pdev, bool locked);
void pcie_aspm_powersave_config_link(struct pci_dev *pdev);
+void pci_configure_ltr(struct pci_dev *pdev);
+void pci_bridge_reconfigure_ltr(struct pci_dev *pdev);
#else
+static inline void pcie_aspm_remove_cap(struct pci_dev *pdev, u32 lnkcap) { }
static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) { }
static inline void pcie_aspm_exit_link_state(struct pci_dev *pdev) { }
+static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev, bool locked) { }
static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev) { }
+static inline void pci_configure_ltr(struct pci_dev *pdev) { }
+static inline void pci_bridge_reconfigure_ltr(struct pci_dev *pdev) { }
#endif
#ifdef CONFIG_PCIE_ECRC
@@ -582,6 +1005,12 @@ static inline void pcie_set_ecrc_checking(struct pci_dev *dev) { }
static inline void pcie_ecrc_get_policy(char *str) { }
#endif
+#ifdef CONFIG_PCIEPORTBUS
+void pcie_reset_lbms(struct pci_dev *port);
+#else
+static inline void pcie_reset_lbms(struct pci_dev *port) {}
+#endif
+
struct pci_dev_reset_methods {
u16 vendor;
u16 device;
@@ -592,6 +1021,7 @@ struct pci_reset_fn_method {
int (*reset_fn)(struct pci_dev *pdev, bool probe);
char *name;
};
+extern const struct pci_reset_fn_method pci_reset_fn_methods[];
#ifdef CONFIG_PCI_QUIRKS
int pci_dev_specific_reset(struct pci_dev *dev, bool probe);
@@ -613,37 +1043,47 @@ static inline int acpi_get_rc_resources(struct device *dev, const char *hid,
}
#endif
+void pci_rebar_init(struct pci_dev *pdev);
+void pci_restore_rebar_state(struct pci_dev *pdev);
int pci_rebar_get_current_size(struct pci_dev *pdev, int bar);
int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size);
-static inline u64 pci_rebar_size_to_bytes(int size)
-{
- return 1ULL << (size + 20);
-}
struct device_node;
+#define PCI_EQ_RESV 0xff
+
+enum equalization_preset_type {
+ EQ_PRESET_TYPE_8GTS,
+ EQ_PRESET_TYPE_16GTS,
+ EQ_PRESET_TYPE_32GTS,
+ EQ_PRESET_TYPE_64GTS,
+ EQ_PRESET_TYPE_MAX
+};
+
+struct pci_eq_presets {
+ u16 eq_presets_8gts[MAX_NR_LANES];
+ u8 eq_presets_Ngts[EQ_PRESET_TYPE_MAX - 1][MAX_NR_LANES];
+};
+
#ifdef CONFIG_OF
-int of_pci_parse_bus_range(struct device_node *node, struct resource *res);
int of_get_pci_domain_nr(struct device_node *node);
int of_pci_get_max_link_speed(struct device_node *node);
u32 of_pci_get_slot_power_limit(struct device_node *node,
u8 *slot_power_limit_value,
u8 *slot_power_limit_scale);
+bool of_pci_preserve_config(struct device_node *node);
int pci_set_of_node(struct pci_dev *dev);
void pci_release_of_node(struct pci_dev *dev);
void pci_set_bus_of_node(struct pci_bus *bus);
void pci_release_bus_of_node(struct pci_bus *bus);
int devm_of_pci_bridge_init(struct device *dev, struct pci_host_bridge *bridge);
-
+bool of_pci_supply_present(struct device_node *np);
+int of_pci_get_equalization_presets(struct device *dev,
+ struct pci_eq_presets *presets,
+ int num_lanes);
#else
static inline int
-of_pci_parse_bus_range(struct device_node *node, struct resource *res)
-{
- return -EINVAL;
-}
-
-static inline int
of_get_pci_domain_nr(struct device_node *node)
{
return -1;
@@ -667,6 +1107,11 @@ of_pci_get_slot_power_limit(struct device_node *node,
return 0;
}
+static inline bool of_pci_preserve_config(struct device_node *node)
+{
+ return false;
+}
+
static inline int pci_set_of_node(struct pci_dev *dev) { return 0; }
static inline void pci_release_of_node(struct pci_dev *dev) { }
static inline void pci_set_bus_of_node(struct pci_bus *bus) { }
@@ -677,13 +1122,48 @@ static inline int devm_of_pci_bridge_init(struct device *dev, struct pci_host_br
return 0;
}
+static inline bool of_pci_supply_present(struct device_node *np)
+{
+ return false;
+}
+
+static inline int of_pci_get_equalization_presets(struct device *dev,
+ struct pci_eq_presets *presets,
+ int num_lanes)
+{
+ presets->eq_presets_8gts[0] = PCI_EQ_RESV;
+ for (int i = 0; i < EQ_PRESET_TYPE_MAX - 1; i++)
+ presets->eq_presets_Ngts[i][0] = PCI_EQ_RESV;
+
+ return 0;
+}
#endif /* CONFIG_OF */
+struct of_changeset;
+
+#ifdef CONFIG_PCI_DYNAMIC_OF_NODES
+void of_pci_make_dev_node(struct pci_dev *pdev);
+void of_pci_remove_node(struct pci_dev *pdev);
+int of_pci_add_properties(struct pci_dev *pdev, struct of_changeset *ocs,
+ struct device_node *np);
+void of_pci_make_host_bridge_node(struct pci_host_bridge *bridge);
+void of_pci_remove_host_bridge_node(struct pci_host_bridge *bridge);
+int of_pci_add_host_bridge_properties(struct pci_host_bridge *bridge,
+ struct of_changeset *ocs,
+ struct device_node *np);
+#else
+static inline void of_pci_make_dev_node(struct pci_dev *pdev) { }
+static inline void of_pci_remove_node(struct pci_dev *pdev) { }
+static inline void of_pci_make_host_bridge_node(struct pci_host_bridge *bridge) { }
+static inline void of_pci_remove_host_bridge_node(struct pci_host_bridge *bridge) { }
+#endif
+
#ifdef CONFIG_PCIEAER
void pci_no_aer(void);
void pci_aer_init(struct pci_dev *dev);
void pci_aer_exit(struct pci_dev *dev);
extern const struct attribute_group aer_stats_attr_group;
+extern const struct attribute_group aer_attr_group;
void pci_aer_clear_fatal_status(struct pci_dev *dev);
int pci_aer_clear_status(struct pci_dev *dev);
int pci_aer_raw_clear_status(struct pci_dev *dev);
@@ -701,6 +1181,7 @@ static inline void pci_restore_aer_state(struct pci_dev *dev) { }
#endif
#ifdef CONFIG_ACPI
+bool pci_acpi_preserve_config(struct pci_host_bridge *bridge);
int pci_acpi_program_hp_params(struct pci_dev *dev);
extern const struct attribute_group pci_dev_acpi_attr_group;
void pci_set_acpi_fwnode(struct pci_dev *dev);
@@ -714,11 +1195,15 @@ int acpi_pci_wakeup(struct pci_dev *dev, bool enable);
bool acpi_pci_need_resume(struct pci_dev *dev);
pci_power_t acpi_pci_choose_state(struct pci_dev *pdev);
#else
+static inline bool pci_acpi_preserve_config(struct pci_host_bridge *bridge)
+{
+ return false;
+}
static inline int pci_dev_acpi_reset(struct pci_dev *dev, bool probe)
{
return -ENOTTY;
}
-static inline void pci_set_acpi_fwnode(struct pci_dev *dev) {}
+static inline void pci_set_acpi_fwnode(struct pci_dev *dev) { }
static inline int pci_acpi_program_hp_params(struct pci_dev *dev)
{
return -ENODEV;
@@ -739,7 +1224,7 @@ static inline pci_power_t acpi_pci_get_power_state(struct pci_dev *dev)
{
return PCI_UNKNOWN;
}
-static inline void acpi_pci_refresh_power_state(struct pci_dev *dev) {}
+static inline void acpi_pci_refresh_power_state(struct pci_dev *dev) { }
static inline int acpi_pci_wakeup(struct pci_dev *dev, bool enable)
{
return -ENODEV;
@@ -758,8 +1243,6 @@ static inline pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
extern const struct attribute_group aspm_ctrl_attr_group;
#endif
-extern const struct attribute_group pci_dev_reset_method_attr_group;
-
#ifdef CONFIG_X86_INTEL_MID
bool pci_use_mid_pm(void);
int mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state);
@@ -779,6 +1262,15 @@ static inline pci_power_t mid_pci_get_power_state(struct pci_dev *pdev)
}
#endif
+#ifdef CONFIG_PCI_MSI
+int pci_msix_write_tph_tag(struct pci_dev *pdev, unsigned int index, u16 tag);
+#else
+static inline int pci_msix_write_tph_tag(struct pci_dev *pdev, unsigned int index, u16 tag)
+{
+ return -ENODEV;
+}
+#endif
+
/*
* Config Address for PCI Configuration Mechanism #1
*
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index 228652a59f27..17919b99fa66 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -47,7 +47,16 @@ config PCIEAER_INJECT
error injection can fake almost all kinds of errors with the
help of a user space helper tool aer-inject, which can be
gotten from:
- https://git.kernel.org/cgit/linux/kernel/git/gong.chen/aer-inject.git/
+ https://github.com/intel/aer-inject.git
+
+config PCIEAER_CXL
+ bool "PCI Express CXL RAS support"
+ default y
+ depends on PCIEAER && CXL_PCI
+ help
+ Enables CXL error handling.
+
+ If unsure, say Y.
#
# PCI Express ECRC
diff --git a/drivers/pci/pcie/Makefile b/drivers/pci/pcie/Makefile
index 8de4ed5f98f1..173829aa02e6 100644
--- a/drivers/pci/pcie/Makefile
+++ b/drivers/pci/pcie/Makefile
@@ -4,10 +4,10 @@
pcieportdrv-y := portdrv.o rcec.o
-obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o
+obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o bwctrl.o
-obj-$(CONFIG_PCIEASPM) += aspm.o
-obj-$(CONFIG_PCIEAER) += aer.o err.o
+obj-y += aspm.o
+obj-$(CONFIG_PCIEAER) += aer.o err.o tlp.o
obj-$(CONFIG_PCIEAER_INJECT) += aer_inject.o
obj-$(CONFIG_PCIE_PME) += pme.o
obj-$(CONFIG_PCIE_DPC) += dpc.o
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index f6c24ded134c..e0bcaa896803 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -2,7 +2,7 @@
/*
* Implement the AER root port service driver. The driver registers an IRQ
* handler. When a root port triggers an AER interrupt, the IRQ handler
- * collects root port status and schedules work.
+ * collects Root Port status and schedules work.
*
* Copyright (C) 2006 Intel Corp.
* Tom Long Nguyen (tom.l.nguyen@intel.com)
@@ -17,6 +17,7 @@
#include <linux/bitops.h>
#include <linux/cper.h>
+#include <linux/dev_printk.h>
#include <linux/pci.h>
#include <linux/pci-acpi.h>
#include <linux/sched.h>
@@ -27,21 +28,27 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/kfifo.h>
+#include <linux/ratelimit.h>
#include <linux/slab.h>
+#include <linux/vmcore_info.h>
#include <acpi/apei.h>
+#include <acpi/ghes.h>
#include <ras/ras_event.h>
#include "../pci.h"
#include "portdrv.h"
+#define aer_printk(level, pdev, fmt, arg...) \
+ dev_printk(level, &(pdev)->dev, fmt, ##arg)
+
#define AER_ERROR_SOURCES_MAX 128
#define AER_MAX_TYPEOF_COR_ERRS 16 /* as per PCI_ERR_COR_STATUS */
-#define AER_MAX_TYPEOF_UNCOR_ERRS 27 /* as per PCI_ERR_UNCOR_STATUS*/
+#define AER_MAX_TYPEOF_UNCOR_ERRS 32 /* as per PCI_ERR_UNCOR_STATUS*/
struct aer_err_source {
- unsigned int status;
- unsigned int id;
+ u32 status; /* PCI_ERR_ROOT_STATUS */
+ u32 id; /* PCI_ERR_ROOT_ERR_SRC */
};
struct aer_rpc {
@@ -49,15 +56,15 @@ struct aer_rpc {
DECLARE_KFIFO(aer_fifo, struct aer_err_source, AER_ERROR_SOURCES_MAX);
};
-/* AER stats for the device */
-struct aer_stats {
+/* AER info for the device */
+struct aer_info {
/*
* Fields for all AER capable devices. They indicate the errors
* "as seen by this device". Note that this may mean that if an
- * end point is causing problems, the AER counters may increment
- * at its link partner (e.g. root port) because the errors will be
- * "seen" by the link partner and not the problematic end point
+ * Endpoint is causing problems, the AER counters may increment
+ * at its link partner (e.g. Root Port) because the errors will be
+ * "seen" by the link partner and not the problematic Endpoint
* itself (which may report all counters as 0 as it never saw any
* problems).
*/
@@ -75,22 +82,36 @@ struct aer_stats {
u64 dev_total_nonfatal_errs;
/*
- * Fields for Root ports & root complex event collectors only, these
+ * Fields for Root Ports & Root Complex Event Collectors only; these
* indicate the total number of ERR_COR, ERR_FATAL, and ERR_NONFATAL
- * messages received by the root port / event collector, INCLUDING the
- * ones that are generated internally (by the rootport itself)
+ * messages received by the Root Port / Event Collector, INCLUDING the
+ * ones that are generated internally (by the Root Port itself)
*/
u64 rootport_total_cor_errs;
u64 rootport_total_fatal_errs;
u64 rootport_total_nonfatal_errs;
+
+ /* Ratelimits for errors */
+ struct ratelimit_state correctable_ratelimit;
+ struct ratelimit_state nonfatal_ratelimit;
};
#define AER_LOG_TLP_MASKS (PCI_ERR_UNC_POISON_TLP| \
+ PCI_ERR_UNC_POISON_BLK | \
PCI_ERR_UNC_ECRC| \
PCI_ERR_UNC_UNSUP| \
PCI_ERR_UNC_COMP_ABORT| \
PCI_ERR_UNC_UNX_COMP| \
- PCI_ERR_UNC_MALF_TLP)
+ PCI_ERR_UNC_ACSV | \
+ PCI_ERR_UNC_MCBTLP | \
+ PCI_ERR_UNC_ATOMEG | \
+ PCI_ERR_UNC_DMWR_BLK | \
+ PCI_ERR_UNC_XLAT_BLK | \
+ PCI_ERR_UNC_TLPPRE | \
+ PCI_ERR_UNC_MALF_TLP | \
+ PCI_ERR_UNC_IDE_CHECK | \
+ PCI_ERR_UNC_MISR_IDE | \
+ PCI_ERR_UNC_PCRC_CHECK)
#define SYSTEM_ERROR_INTR_ON_MESG_MASK (PCI_EXP_RTCTL_SECEE| \
PCI_EXP_RTCTL_SENFEE| \
@@ -106,12 +127,12 @@ struct aer_stats {
PCI_ERR_ROOT_MULTI_COR_RCV | \
PCI_ERR_ROOT_MULTI_UNCOR_RCV)
-static int pcie_aer_disable;
+static bool pcie_aer_disable;
static pci_ers_result_t aer_root_reset(struct pci_dev *dev);
void pci_no_aer(void)
{
- pcie_aer_disable = 1;
+ pcie_aer_disable = true;
}
bool pci_aer_available(void)
@@ -137,7 +158,7 @@ static const char * const ecrc_policy_str[] = {
* enable_ecrc_checking - enable PCIe ECRC checking for a device
* @dev: the PCI device
*
- * Returns 0 on success, or negative on failure.
+ * Return: 0 on success, or negative on failure.
*/
static int enable_ecrc_checking(struct pci_dev *dev)
{
@@ -158,10 +179,10 @@ static int enable_ecrc_checking(struct pci_dev *dev)
}
/**
- * disable_ecrc_checking - disables PCIe ECRC checking for a device
+ * disable_ecrc_checking - disable PCIe ECRC checking for a device
* @dev: the PCI device
*
- * Returns 0 on success, or negative on failure.
+ * Return: 0 on success, or negative on failure.
*/
static int disable_ecrc_checking(struct pci_dev *dev)
{
@@ -179,7 +200,8 @@ static int disable_ecrc_checking(struct pci_dev *dev)
}
/**
- * pcie_set_ecrc_checking - set/unset PCIe ECRC checking for a device based on global policy
+ * pcie_set_ecrc_checking - set/unset PCIe ECRC checking for a device based
+ * on global policy
* @dev: the PCI device
*/
void pcie_set_ecrc_checking(struct pci_dev *dev)
@@ -229,8 +251,9 @@ int pcie_aer_is_native(struct pci_dev *dev)
return pcie_ports_native || host->native_aer;
}
+EXPORT_SYMBOL_NS_GPL(pcie_aer_is_native, "CXL");
-int pci_enable_pcie_error_reporting(struct pci_dev *dev)
+static int pci_enable_pcie_error_reporting(struct pci_dev *dev)
{
int rc;
@@ -240,19 +263,6 @@ int pci_enable_pcie_error_reporting(struct pci_dev *dev)
rc = pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_AER_FLAGS);
return pcibios_err_to_errno(rc);
}
-EXPORT_SYMBOL_GPL(pci_enable_pcie_error_reporting);
-
-int pci_disable_pcie_error_reporting(struct pci_dev *dev)
-{
- int rc;
-
- if (!pcie_aer_is_native(dev))
- return -EIO;
-
- rc = pcie_capability_clear_word(dev, PCI_EXP_DEVCTL, PCI_EXP_AER_FLAGS);
- return pcibios_err_to_errno(rc);
-}
-EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting);
int pci_aer_clear_nonfatal_status(struct pci_dev *dev)
{
@@ -293,10 +303,10 @@ void pci_aer_clear_fatal_status(struct pci_dev *dev)
* pci_aer_raw_clear_status - Clear AER error registers.
* @dev: the PCI device
*
- * Clearing AER error status registers unconditionally, regardless of
+ * Clear AER error status registers unconditionally, regardless of
* whether they're owned by firmware or the OS.
*
- * Returns 0 on success, or negative on failure.
+ * Return: 0 on success, or negative on failure.
*/
int pci_aer_raw_clear_status(struct pci_dev *dev)
{
@@ -383,13 +393,22 @@ void pci_aer_init(struct pci_dev *dev)
if (!dev->aer_cap)
return;
- dev->aer_stats = kzalloc(sizeof(struct aer_stats), GFP_KERNEL);
+ dev->aer_info = kzalloc(sizeof(*dev->aer_info), GFP_KERNEL);
+ if (!dev->aer_info) {
+ dev->aer_cap = 0;
+ return;
+ }
+
+ ratelimit_state_init(&dev->aer_info->correctable_ratelimit,
+ DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
+ ratelimit_state_init(&dev->aer_info->nonfatal_ratelimit,
+ DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
/*
* We save/restore PCI_ERR_UNCOR_MASK, PCI_ERR_UNCOR_SEVER,
* PCI_ERR_COR_MASK, and PCI_ERR_CAP. Root and Root Complex Event
- * Collectors also implement PCI_ERR_ROOT_COMMAND (PCIe r5.0, sec
- * 7.8.4).
+ * Collectors also implement PCI_ERR_ROOT_COMMAND (PCIe r6.0, sec
+ * 7.8.4.9).
*/
n = pcie_cap_has_rtctl(dev) ? 5 : 4;
pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_ERR, sizeof(u32) * n);
@@ -404,8 +423,8 @@ void pci_aer_init(struct pci_dev *dev)
void pci_aer_exit(struct pci_dev *dev)
{
- kfree(dev->aer_stats);
- dev->aer_stats = NULL;
+ kfree(dev->aer_info);
+ dev->aer_info = NULL;
}
#define AER_AGENT_RECEIVER 0
@@ -446,10 +465,10 @@ void pci_aer_exit(struct pci_dev *dev)
/*
* AER error strings
*/
-static const char *aer_error_severity_string[] = {
- "Uncorrected (Non-Fatal)",
- "Uncorrected (Fatal)",
- "Corrected"
+static const char * const aer_error_severity_string[] = {
+ "Uncorrectable (Non-Fatal)",
+ "Uncorrectable (Fatal)",
+ "Correctable"
};
static const char *aer_error_layer[] = {
@@ -521,11 +540,11 @@ static const char *aer_uncorrectable_error_string[] = {
"AtomicOpBlocked", /* Bit Position 24 */
"TLPBlockedErr", /* Bit Position 25 */
"PoisonTLPBlocked", /* Bit Position 26 */
- NULL, /* Bit Position 27 */
- NULL, /* Bit Position 28 */
- NULL, /* Bit Position 29 */
- NULL, /* Bit Position 30 */
- NULL, /* Bit Position 31 */
+ "DMWrReqBlocked", /* Bit Position 27 */
+ "IDECheck", /* Bit Position 28 */
+ "MisIDETLP", /* Bit Position 29 */
+ "PCRC_CHECK", /* Bit Position 30 */
+ "TLPXlatBlocked", /* Bit Position 31 */
};
static const char *aer_agent_string[] = {
@@ -543,10 +562,10 @@ static const char *aer_agent_string[] = {
{ \
unsigned int i; \
struct pci_dev *pdev = to_pci_dev(dev); \
- u64 *stats = pdev->aer_stats->stats_array; \
+ u64 *stats = pdev->aer_info->stats_array; \
size_t len = 0; \
\
- for (i = 0; i < ARRAY_SIZE(pdev->aer_stats->stats_array); i++) {\
+ for (i = 0; i < ARRAY_SIZE(pdev->aer_info->stats_array); i++) { \
if (strings_array[i]) \
len += sysfs_emit_at(buf, len, "%s %llu\n", \
strings_array[i], \
@@ -557,7 +576,7 @@ static const char *aer_agent_string[] = {
i, stats[i]); \
} \
len += sysfs_emit_at(buf, len, "TOTAL_%s %llu\n", total_string, \
- pdev->aer_stats->total_field); \
+ pdev->aer_info->total_field); \
return len; \
} \
static DEVICE_ATTR_RO(name)
@@ -578,7 +597,7 @@ aer_stats_dev_attr(aer_dev_nonfatal, dev_nonfatal_errs,
char *buf) \
{ \
struct pci_dev *pdev = to_pci_dev(dev); \
- return sysfs_emit(buf, "%llu\n", pdev->aer_stats->field); \
+ return sysfs_emit(buf, "%llu\n", pdev->aer_info->field); \
} \
static DEVICE_ATTR_RO(name)
@@ -605,7 +624,7 @@ static umode_t aer_stats_attrs_are_visible(struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct pci_dev *pdev = to_pci_dev(dev);
- if (!pdev->aer_stats)
+ if (!pdev->aer_info)
return 0;
if ((a == &dev_attr_aer_rootport_total_err_cor.attr ||
@@ -623,31 +642,137 @@ const struct attribute_group aer_stats_attr_group = {
.is_visible = aer_stats_attrs_are_visible,
};
+/*
+ * Ratelimit interval
+ * <=0: disabled with ratelimit.interval = 0
+ * >0: enabled with ratelimit.interval in ms
+ */
+#define aer_ratelimit_interval_attr(name, ratelimit) \
+ static ssize_t \
+ name##_show(struct device *dev, struct device_attribute *attr, \
+ char *buf) \
+ { \
+ struct pci_dev *pdev = to_pci_dev(dev); \
+ \
+ return sysfs_emit(buf, "%d\n", \
+ pdev->aer_info->ratelimit.interval); \
+ } \
+ \
+ static ssize_t \
+ name##_store(struct device *dev, struct device_attribute *attr, \
+ const char *buf, size_t count) \
+ { \
+ struct pci_dev *pdev = to_pci_dev(dev); \
+ int interval; \
+ \
+ if (!capable(CAP_SYS_ADMIN)) \
+ return -EPERM; \
+ \
+ if (kstrtoint(buf, 0, &interval) < 0) \
+ return -EINVAL; \
+ \
+ if (interval <= 0) \
+ interval = 0; \
+ else \
+ interval = msecs_to_jiffies(interval); \
+ \
+ pdev->aer_info->ratelimit.interval = interval; \
+ \
+ return count; \
+ } \
+ static DEVICE_ATTR_RW(name);
+
+#define aer_ratelimit_burst_attr(name, ratelimit) \
+ static ssize_t \
+ name##_show(struct device *dev, struct device_attribute *attr, \
+ char *buf) \
+ { \
+ struct pci_dev *pdev = to_pci_dev(dev); \
+ \
+ return sysfs_emit(buf, "%d\n", \
+ pdev->aer_info->ratelimit.burst); \
+ } \
+ \
+ static ssize_t \
+ name##_store(struct device *dev, struct device_attribute *attr, \
+ const char *buf, size_t count) \
+ { \
+ struct pci_dev *pdev = to_pci_dev(dev); \
+ int burst; \
+ \
+ if (!capable(CAP_SYS_ADMIN)) \
+ return -EPERM; \
+ \
+ if (kstrtoint(buf, 0, &burst) < 0) \
+ return -EINVAL; \
+ \
+ pdev->aer_info->ratelimit.burst = burst; \
+ \
+ return count; \
+ } \
+ static DEVICE_ATTR_RW(name);
+
+#define aer_ratelimit_attrs(name) \
+ aer_ratelimit_interval_attr(name##_ratelimit_interval_ms, \
+ name##_ratelimit) \
+ aer_ratelimit_burst_attr(name##_ratelimit_burst, \
+ name##_ratelimit)
+
+aer_ratelimit_attrs(correctable)
+aer_ratelimit_attrs(nonfatal)
+
+static struct attribute *aer_attrs[] = {
+ &dev_attr_correctable_ratelimit_interval_ms.attr,
+ &dev_attr_correctable_ratelimit_burst.attr,
+ &dev_attr_nonfatal_ratelimit_interval_ms.attr,
+ &dev_attr_nonfatal_ratelimit_burst.attr,
+ NULL
+};
+
+static umode_t aer_attrs_are_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ if (!pdev->aer_info)
+ return 0;
+
+ return a->mode;
+}
+
+const struct attribute_group aer_attr_group = {
+ .name = "aer",
+ .attrs = aer_attrs,
+ .is_visible = aer_attrs_are_visible,
+};
+
static void pci_dev_aer_stats_incr(struct pci_dev *pdev,
struct aer_err_info *info)
{
unsigned long status = info->status & ~info->mask;
int i, max = -1;
u64 *counter = NULL;
- struct aer_stats *aer_stats = pdev->aer_stats;
+ struct aer_info *aer_info = pdev->aer_info;
- if (!aer_stats)
+ if (!aer_info)
return;
switch (info->severity) {
case AER_CORRECTABLE:
- aer_stats->dev_total_cor_errs++;
- counter = &aer_stats->dev_cor_errs[0];
+ aer_info->dev_total_cor_errs++;
+ counter = &aer_info->dev_cor_errs[0];
max = AER_MAX_TYPEOF_COR_ERRS;
break;
case AER_NONFATAL:
- aer_stats->dev_total_nonfatal_errs++;
- counter = &aer_stats->dev_nonfatal_errs[0];
+ aer_info->dev_total_nonfatal_errs++;
+ hwerr_log_error_type(HWERR_RECOV_PCI);
+ counter = &aer_info->dev_nonfatal_errs[0];
max = AER_MAX_TYPEOF_UNCOR_ERRS;
break;
case AER_FATAL:
- aer_stats->dev_total_fatal_errs++;
- counter = &aer_stats->dev_fatal_errs[0];
+ aer_info->dev_total_fatal_errs++;
+ counter = &aer_info->dev_fatal_errs[0];
max = AER_MAX_TYPEOF_UNCOR_ERRS;
break;
}
@@ -659,61 +784,105 @@ static void pci_dev_aer_stats_incr(struct pci_dev *pdev,
static void pci_rootport_aer_stats_incr(struct pci_dev *pdev,
struct aer_err_source *e_src)
{
- struct aer_stats *aer_stats = pdev->aer_stats;
+ struct aer_info *aer_info = pdev->aer_info;
- if (!aer_stats)
+ if (!aer_info)
return;
if (e_src->status & PCI_ERR_ROOT_COR_RCV)
- aer_stats->rootport_total_cor_errs++;
+ aer_info->rootport_total_cor_errs++;
if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) {
if (e_src->status & PCI_ERR_ROOT_FATAL_RCV)
- aer_stats->rootport_total_fatal_errs++;
+ aer_info->rootport_total_fatal_errs++;
else
- aer_stats->rootport_total_nonfatal_errs++;
+ aer_info->rootport_total_nonfatal_errs++;
}
}
-static void __print_tlp_header(struct pci_dev *dev,
- struct aer_header_log_regs *t)
+static int aer_ratelimit(struct pci_dev *dev, unsigned int severity)
{
- pci_err(dev, " TLP Header: %08x %08x %08x %08x\n",
- t->dw0, t->dw1, t->dw2, t->dw3);
+ if (!dev->aer_info)
+ return 1;
+
+ switch (severity) {
+ case AER_NONFATAL:
+ return __ratelimit(&dev->aer_info->nonfatal_ratelimit);
+ case AER_CORRECTABLE:
+ return __ratelimit(&dev->aer_info->correctable_ratelimit);
+ default:
+ return 1; /* Don't ratelimit fatal errors */
+ }
}
-static void __aer_print_error(struct pci_dev *dev,
- struct aer_err_info *info)
+static bool tlp_header_logged(u32 status, u32 capctl)
+{
+ /* Errors for which a header is always logged (PCIe r7.0 sec 6.2.7) */
+ if (status & AER_LOG_TLP_MASKS)
+ return true;
+
+ /* Completion Timeout header is only logged on capable devices */
+ if (status & PCI_ERR_UNC_COMP_TIME &&
+ capctl & PCI_ERR_CAP_COMP_TIME_LOG)
+ return true;
+
+ return false;
+}
+
+static void __aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
{
const char **strings;
unsigned long status = info->status & ~info->mask;
- const char *level, *errmsg;
+ const char *level = info->level;
+ const char *errmsg;
int i;
- if (info->severity == AER_CORRECTABLE) {
+ if (info->severity == AER_CORRECTABLE)
strings = aer_correctable_error_string;
- level = KERN_WARNING;
- } else {
+ else
strings = aer_uncorrectable_error_string;
- level = KERN_ERR;
- }
for_each_set_bit(i, &status, 32) {
errmsg = strings[i];
if (!errmsg)
errmsg = "Unknown Error Bit";
- pci_printk(level, dev, " [%2d] %-22s%s\n", i, errmsg,
+ aer_printk(level, dev, " [%2d] %-22s%s\n", i, errmsg,
info->first_error == i ? " (First)" : "");
}
- pci_dev_aer_stats_incr(dev, info);
}
-void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
+static void aer_print_source(struct pci_dev *dev, struct aer_err_info *info,
+ bool found)
{
- int layer, agent;
- int id = ((dev->bus->number << 8) | dev->devfn);
- const char *level;
+ u16 source = info->id;
+
+ pci_info(dev, "%s%s error message received from %04x:%02x:%02x.%d%s\n",
+ info->multi_error_valid ? "Multiple " : "",
+ aer_error_severity_string[info->severity],
+ pci_domain_nr(dev->bus), PCI_BUS_NUM(source),
+ PCI_SLOT(source), PCI_FUNC(source),
+ found ? "" : " (no details found");
+}
+
+void aer_print_error(struct aer_err_info *info, int i)
+{
+ struct pci_dev *dev;
+ int layer, agent, id;
+ const char *level = info->level;
+
+ if (WARN_ON_ONCE(i >= AER_MAX_MULTI_ERR_DEVICES))
+ return;
+
+ dev = info->dev[i];
+ id = pci_dev_id(dev);
+
+ pci_dev_aer_stats_incr(dev, info);
+ trace_aer_event(pci_name(dev), (info->status & ~info->mask),
+ info->severity, info->tlp_header_valid, &info->tlp);
+
+ if (!info->ratelimit_print[i])
+ return;
if (!info->status) {
pci_err(dev, "PCIe Bus Error: severity=%s, type=Inaccessible, (Unregistered Agent ID)\n",
@@ -724,38 +893,21 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
layer = AER_GET_LAYER_ERROR(info->severity, info->status);
agent = AER_GET_AGENT(info->severity, info->status);
- level = (info->severity == AER_CORRECTABLE) ? KERN_WARNING : KERN_ERR;
-
- pci_printk(level, dev, "PCIe Bus Error: severity=%s, type=%s, (%s)\n",
+ aer_printk(level, dev, "PCIe Bus Error: severity=%s, type=%s, (%s)\n",
aer_error_severity_string[info->severity],
aer_error_layer[layer], aer_agent_string[agent]);
- pci_printk(level, dev, " device [%04x:%04x] error status/mask=%08x/%08x\n",
+ aer_printk(level, dev, " device [%04x:%04x] error status/mask=%08x/%08x\n",
dev->vendor, dev->device, info->status, info->mask);
__aer_print_error(dev, info);
if (info->tlp_header_valid)
- __print_tlp_header(dev, &info->tlp);
+ pcie_print_tlp_log(dev, &info->tlp, level, dev_fmt(" "));
out:
if (info->id && info->error_dev_num > 1 && info->id == id)
pci_err(dev, " Error of this Agent is reported first\n");
-
- trace_aer_event(dev_name(&dev->dev), (info->status & ~info->mask),
- info->severity, info->tlp_header_valid, &info->tlp);
-}
-
-static void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info)
-{
- u8 bus = info->id >> 8;
- u8 devfn = info->id & 0xff;
-
- pci_info(dev, "%s%s error received: %04x:%02x:%02x.%d\n",
- info->multi_error_valid ? "Multiple " : "",
- aer_error_severity_string[info->severity],
- pci_domain_nr(dev->bus), bus, PCI_SLOT(devfn),
- PCI_FUNC(devfn));
}
#ifdef CONFIG_ACPI_APEI_PCIEAER
@@ -771,48 +923,57 @@ int cper_severity_to_aer(int cper_severity)
}
}
EXPORT_SYMBOL_GPL(cper_severity_to_aer);
+#endif
-void cper_print_aer(struct pci_dev *dev, int aer_severity,
- struct aer_capability_regs *aer)
+void pci_print_aer(struct pci_dev *dev, int aer_severity,
+ struct aer_capability_regs *aer)
{
int layer, agent, tlp_header_valid = 0;
u32 status, mask;
- struct aer_err_info info;
+ struct aer_err_info info = {
+ .severity = aer_severity,
+ .first_error = PCI_ERR_CAP_FEP(aer->cap_control),
+ };
if (aer_severity == AER_CORRECTABLE) {
status = aer->cor_status;
mask = aer->cor_mask;
+ info.level = KERN_WARNING;
} else {
status = aer->uncor_status;
mask = aer->uncor_mask;
- tlp_header_valid = status & AER_LOG_TLP_MASKS;
+ info.level = KERN_ERR;
+ tlp_header_valid = tlp_header_logged(status, aer->cap_control);
}
- layer = AER_GET_LAYER_ERROR(aer_severity, status);
- agent = AER_GET_AGENT(aer_severity, status);
-
- memset(&info, 0, sizeof(info));
- info.severity = aer_severity;
info.status = status;
info.mask = mask;
- info.first_error = PCI_ERR_CAP_FEP(aer->cap_control);
- pci_err(dev, "aer_status: 0x%08x, aer_mask: 0x%08x\n", status, mask);
+ pci_dev_aer_stats_incr(dev, &info);
+ trace_aer_event(pci_name(dev), (status & ~mask),
+ aer_severity, tlp_header_valid, &aer->header_log);
+
+ if (!aer_ratelimit(dev, info.severity))
+ return;
+
+ layer = AER_GET_LAYER_ERROR(aer_severity, status);
+ agent = AER_GET_AGENT(aer_severity, status);
+
+ aer_printk(info.level, dev, "aer_status: 0x%08x, aer_mask: 0x%08x\n",
+ status, mask);
__aer_print_error(dev, &info);
- pci_err(dev, "aer_layer=%s, aer_agent=%s\n",
- aer_error_layer[layer], aer_agent_string[agent]);
+ aer_printk(info.level, dev, "aer_layer=%s, aer_agent=%s\n",
+ aer_error_layer[layer], aer_agent_string[agent]);
if (aer_severity != AER_CORRECTABLE)
- pci_err(dev, "aer_uncor_severity: 0x%08x\n",
- aer->uncor_severity);
+ aer_printk(info.level, dev, "aer_uncor_severity: 0x%08x\n",
+ aer->uncor_severity);
if (tlp_header_valid)
- __print_tlp_header(dev, &aer->header_log);
-
- trace_aer_event(dev_name(&dev->dev), (status & ~mask),
- aer_severity, tlp_header_valid, &aer->header_log);
+ pcie_print_tlp_log(dev, &aer->header_log, info.level,
+ dev_fmt(" "));
}
-#endif
+EXPORT_SYMBOL_NS_GPL(pci_print_aer, "CXL");
/**
* add_error_device - list device to be handled
@@ -821,12 +982,27 @@ void cper_print_aer(struct pci_dev *dev, int aer_severity,
*/
static int add_error_device(struct aer_err_info *e_info, struct pci_dev *dev)
{
- if (e_info->error_dev_num < AER_MAX_MULTI_ERR_DEVICES) {
- e_info->dev[e_info->error_dev_num] = pci_dev_get(dev);
- e_info->error_dev_num++;
- return 0;
+ int i = e_info->error_dev_num;
+
+ if (i >= AER_MAX_MULTI_ERR_DEVICES)
+ return -ENOSPC;
+
+ e_info->dev[i] = pci_dev_get(dev);
+ e_info->error_dev_num++;
+
+ /*
+ * Ratelimit AER log messages. "dev" is either the source
+ * identified by the root's Error Source ID or it has an unmasked
+ * error logged in its own AER Capability. Messages are emitted
+ * when "ratelimit_print[i]" is non-zero. If we will print detail
+ * for a downstream device, make sure we print the Error Source ID
+ * from the root as well.
+ */
+ if (aer_ratelimit(dev, e_info->severity)) {
+ e_info->ratelimit_print[i] = 1;
+ e_info->root_ratelimit_print = 1;
}
- return -ENOSPC;
+ return 0;
}
/**
@@ -841,24 +1017,24 @@ static bool is_error_source(struct pci_dev *dev, struct aer_err_info *e_info)
u16 reg16;
/*
- * When bus id is equal to 0, it might be a bad id
- * reported by root port.
+ * When bus ID is equal to 0, it might be a bad ID
+ * reported by Root Port.
*/
if ((PCI_BUS_NUM(e_info->id) != 0) &&
!(dev->bus->bus_flags & PCI_BUS_FLAGS_NO_AERSID)) {
/* Device ID match? */
- if (e_info->id == ((dev->bus->number << 8) | dev->devfn))
+ if (e_info->id == pci_dev_id(dev))
return true;
- /* Continue id comparing if there is no multiple error */
+ /* Continue ID comparing if there is no multiple error */
if (!e_info->multi_error_valid)
return false;
}
/*
* When either
- * 1) bus id is equal to 0. Some ports might lose the bus
- * id of error source id;
+ * 1) bus ID is equal to 0. Some ports might lose the bus
+ * ID of error source id;
* 2) bus flag PCI_BUS_FLAGS_NO_AERSID is set
* 3) There are multiple errors and prior ID comparing fails;
* We check AER status registers to find possible reporter.
@@ -896,7 +1072,8 @@ static int find_device_iter(struct pci_dev *dev, void *data)
/* List this device */
if (add_error_device(e_info, dev)) {
/* We cannot handle more... Stop iteration */
- /* TODO: Should print error message here? */
+ pci_err(dev, "Exceeded max supported (%d) devices with errors logged\n",
+ AER_MAX_MULTI_ERR_DEVICES);
return 1;
}
@@ -910,9 +1087,9 @@ static int find_device_iter(struct pci_dev *dev, void *data)
/**
* find_source_device - search through device hierarchy for source device
* @parent: pointer to Root Port pci_dev data structure
- * @e_info: including detailed error information such like id
+ * @e_info: including detailed error information such as ID
*
- * Return true if found.
+ * Return: true if found.
*
* Invoked by DPC when error is detected at the Root Port.
* Caller of this function must set id, severity, and multi_error_valid of
@@ -920,7 +1097,7 @@ static int find_device_iter(struct pci_dev *dev, void *data)
* e_info->error_dev_num and e_info->dev[], based on the given information.
*/
static bool find_source_device(struct pci_dev *parent,
- struct aer_err_info *e_info)
+ struct aer_err_info *e_info)
{
struct pci_dev *dev = parent;
int result;
@@ -938,21 +1115,158 @@ static bool find_source_device(struct pci_dev *parent,
else
pci_walk_bus(parent->subordinate, find_device_iter, e_info);
- if (!e_info->error_dev_num) {
- pci_info(parent, "can't find device of ID%04x\n", e_info->id);
+ if (!e_info->error_dev_num)
return false;
- }
return true;
}
+#ifdef CONFIG_PCIEAER_CXL
+
+/**
+ * pci_aer_unmask_internal_errors - unmask internal errors
+ * @dev: pointer to the pci_dev data structure
+ *
+ * Unmask internal errors in the Uncorrectable and Correctable Error
+ * Mask registers.
+ *
+ * Note: AER must be enabled and supported by the device which must be
+ * checked in advance, e.g. with pcie_aer_is_native().
+ */
+static void pci_aer_unmask_internal_errors(struct pci_dev *dev)
+{
+ int aer = dev->aer_cap;
+ u32 mask;
+
+ pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, &mask);
+ mask &= ~PCI_ERR_UNC_INTN;
+ pci_write_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, mask);
+
+ pci_read_config_dword(dev, aer + PCI_ERR_COR_MASK, &mask);
+ mask &= ~PCI_ERR_COR_INTERNAL;
+ pci_write_config_dword(dev, aer + PCI_ERR_COR_MASK, mask);
+}
+
+static bool is_cxl_mem_dev(struct pci_dev *dev)
+{
+ /*
+ * The capability, status, and control fields in Device 0,
+ * Function 0 DVSEC control the CXL functionality of the
+ * entire device (CXL 3.0, 8.1.3).
+ */
+ if (dev->devfn != PCI_DEVFN(0, 0))
+ return false;
+
+ /*
+ * CXL Memory Devices must have the 502h class code set (CXL
+ * 3.0, 8.1.12.1).
+ */
+ if ((dev->class >> 8) != PCI_CLASS_MEMORY_CXL)
+ return false;
+
+ return true;
+}
+
+static bool cxl_error_is_native(struct pci_dev *dev)
+{
+ struct pci_host_bridge *host = pci_find_host_bridge(dev->bus);
+
+ return (pcie_ports_native || host->native_aer);
+}
+
+static bool is_internal_error(struct aer_err_info *info)
+{
+ if (info->severity == AER_CORRECTABLE)
+ return info->status & PCI_ERR_COR_INTERNAL;
+
+ return info->status & PCI_ERR_UNC_INTN;
+}
+
+static int cxl_rch_handle_error_iter(struct pci_dev *dev, void *data)
+{
+ struct aer_err_info *info = (struct aer_err_info *)data;
+ const struct pci_error_handlers *err_handler;
+
+ if (!is_cxl_mem_dev(dev) || !cxl_error_is_native(dev))
+ return 0;
+
+ /* Protect dev->driver */
+ device_lock(&dev->dev);
+
+ err_handler = dev->driver ? dev->driver->err_handler : NULL;
+ if (!err_handler)
+ goto out;
+
+ if (info->severity == AER_CORRECTABLE) {
+ if (err_handler->cor_error_detected)
+ err_handler->cor_error_detected(dev);
+ } else if (err_handler->error_detected) {
+ if (info->severity == AER_NONFATAL)
+ err_handler->error_detected(dev, pci_channel_io_normal);
+ else if (info->severity == AER_FATAL)
+ err_handler->error_detected(dev, pci_channel_io_frozen);
+ }
+out:
+ device_unlock(&dev->dev);
+ return 0;
+}
+
+static void cxl_rch_handle_error(struct pci_dev *dev, struct aer_err_info *info)
+{
+ /*
+ * Internal errors of an RCEC indicate an AER error in an
+ * RCH's downstream port. Check and handle them in the CXL.mem
+ * device driver.
+ */
+ if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_EC &&
+ is_internal_error(info))
+ pcie_walk_rcec(dev, cxl_rch_handle_error_iter, info);
+}
+
+static int handles_cxl_error_iter(struct pci_dev *dev, void *data)
+{
+ bool *handles_cxl = data;
+
+ if (!*handles_cxl)
+ *handles_cxl = is_cxl_mem_dev(dev) && cxl_error_is_native(dev);
+
+ /* Non-zero terminates iteration */
+ return *handles_cxl;
+}
+
+static bool handles_cxl_errors(struct pci_dev *rcec)
+{
+ bool handles_cxl = false;
+
+ if (pci_pcie_type(rcec) == PCI_EXP_TYPE_RC_EC &&
+ pcie_aer_is_native(rcec))
+ pcie_walk_rcec(rcec, handles_cxl_error_iter, &handles_cxl);
+
+ return handles_cxl;
+}
+
+static void cxl_rch_enable_rcec(struct pci_dev *rcec)
+{
+ if (!handles_cxl_errors(rcec))
+ return;
+
+ pci_aer_unmask_internal_errors(rcec);
+ pci_info(rcec, "CXL: Internal errors unmasked");
+}
+
+#else
+static inline void cxl_rch_enable_rcec(struct pci_dev *dev) { }
+static inline void cxl_rch_handle_error(struct pci_dev *dev,
+ struct aer_err_info *info) { }
+#endif
+
/**
- * handle_error_source - handle logging error into an event log
+ * pci_aer_handle_error - handle logging error into an event log
* @dev: pointer to pci_dev data structure of error source device
* @info: comprehensive error information
*
* Invoked when an error being detected by Root Port.
*/
-static void handle_error_source(struct pci_dev *dev, struct aer_err_info *info)
+static void pci_aer_handle_error(struct pci_dev *dev, struct aer_err_info *info)
{
int aer = dev->aer_cap;
@@ -976,13 +1290,18 @@ static void handle_error_source(struct pci_dev *dev, struct aer_err_info *info)
pcie_do_recovery(dev, pci_channel_io_normal, aer_root_reset);
else if (info->severity == AER_FATAL)
pcie_do_recovery(dev, pci_channel_io_frozen, aer_root_reset);
+}
+
+static void handle_error_source(struct pci_dev *dev, struct aer_err_info *info)
+{
+ cxl_rch_handle_error(dev, info);
+ pci_aer_handle_error(dev, info);
pci_dev_put(dev);
}
#ifdef CONFIG_ACPI_APEI_PCIEAER
-#define AER_RECOVER_RING_ORDER 4
-#define AER_RECOVER_RING_SIZE (1 << AER_RECOVER_RING_ORDER)
+#define AER_RECOVER_RING_SIZE 16
struct aer_recover_entry {
u8 bus;
@@ -1004,12 +1323,24 @@ static void aer_recover_work_func(struct work_struct *work)
pdev = pci_get_domain_bus_and_slot(entry.domain, entry.bus,
entry.devfn);
if (!pdev) {
- pr_err("no pci_dev for %04x:%02x:%02x.%x\n",
- entry.domain, entry.bus,
- PCI_SLOT(entry.devfn), PCI_FUNC(entry.devfn));
+ pr_err_ratelimited("%04x:%02x:%02x.%x: no pci_dev found\n",
+ entry.domain, entry.bus,
+ PCI_SLOT(entry.devfn),
+ PCI_FUNC(entry.devfn));
continue;
}
- cper_print_aer(pdev, entry.severity, entry.regs);
+ pci_print_aer(pdev, entry.severity, entry.regs);
+
+ /*
+ * Memory for aer_capability_regs(entry.regs) is being
+ * allocated from the ghes_estatus_pool to protect it from
+ * overwriting when multiple sections are present in the
+ * error status. Thus free the same after processing the
+ * data.
+ */
+ ghes_estatus_pool_region_free((unsigned long)entry.regs,
+ sizeof(struct aer_capability_regs));
+
if (entry.severity == AER_NONFATAL)
pcie_do_recovery(pdev, pci_channel_io_normal,
aer_root_reset);
@@ -1051,18 +1382,25 @@ EXPORT_SYMBOL_GPL(aer_recover_queue);
/**
* aer_get_device_error_info - read error status from dev and store it to info
- * @dev: pointer to the device expected to have a error record
* @info: pointer to structure to store the error record
+ * @i: index into info->dev[]
*
- * Return 1 on success, 0 on error.
+ * Return: 1 on success, 0 on error.
*
* Note that @info is reused among all error devices. Clear fields properly.
*/
-int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
+int aer_get_device_error_info(struct aer_err_info *info, int i)
{
- int type = pci_pcie_type(dev);
- int aer = dev->aer_cap;
- int temp;
+ struct pci_dev *dev;
+ int type, aer;
+ u32 aercc;
+
+ if (i >= AER_MAX_MULTI_ERR_DEVICES)
+ return 0;
+
+ dev = info->dev[i];
+ aer = dev->aer_cap;
+ type = pci_pcie_type(dev);
/* Must reset in this function */
info->status = 0;
@@ -1093,19 +1431,16 @@ int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
return 0;
/* Get First Error Pointer */
- pci_read_config_dword(dev, aer + PCI_ERR_CAP, &temp);
- info->first_error = PCI_ERR_CAP_FEP(temp);
+ pci_read_config_dword(dev, aer + PCI_ERR_CAP, &aercc);
+ info->first_error = PCI_ERR_CAP_FEP(aercc);
- if (info->status & AER_LOG_TLP_MASKS) {
+ if (tlp_header_logged(info->status, aercc)) {
info->tlp_header_valid = 1;
- pci_read_config_dword(dev,
- aer + PCI_ERR_HEADER_LOG, &info->tlp.dw0);
- pci_read_config_dword(dev,
- aer + PCI_ERR_HEADER_LOG + 4, &info->tlp.dw1);
- pci_read_config_dword(dev,
- aer + PCI_ERR_HEADER_LOG + 8, &info->tlp.dw2);
- pci_read_config_dword(dev,
- aer + PCI_ERR_HEADER_LOG + 12, &info->tlp.dw3);
+ pcie_read_tlp_log(dev, aer + PCI_ERR_HEADER_LOG,
+ aer + PCI_ERR_PREFIX_LOG,
+ aer_tlp_log_len(dev, aercc),
+ aercc & PCI_ERR_CAP_TLP_LOG_FLIT,
+ &info->tlp);
}
}
@@ -1116,74 +1451,98 @@ static inline void aer_process_err_devices(struct aer_err_info *e_info)
{
int i;
- /* Report all before handle them, not to lost records by reset etc. */
+ /* Report all before handling them, to not lose records by reset etc. */
for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
- if (aer_get_device_error_info(e_info->dev[i], e_info))
- aer_print_error(e_info->dev[i], e_info);
+ if (aer_get_device_error_info(e_info, i))
+ aer_print_error(e_info, i);
}
for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
- if (aer_get_device_error_info(e_info->dev[i], e_info))
+ if (aer_get_device_error_info(e_info, i))
handle_error_source(e_info->dev[i], e_info);
}
}
/**
- * aer_isr_one_error - consume an error detected by root port
- * @rpc: pointer to the root port which holds an error
- * @e_src: pointer to an error source
+ * aer_isr_one_error_type - consume a Correctable or Uncorrectable Error
+ * detected by Root Port or RCEC
+ * @root: pointer to Root Port or RCEC that signaled AER interrupt
+ * @info: pointer to AER error info
*/
-static void aer_isr_one_error(struct aer_rpc *rpc,
- struct aer_err_source *e_src)
+static void aer_isr_one_error_type(struct pci_dev *root,
+ struct aer_err_info *info)
{
- struct pci_dev *pdev = rpc->rpd;
- struct aer_err_info e_info;
+ bool found;
- pci_rootport_aer_stats_incr(pdev, e_src);
+ found = find_source_device(root, info);
/*
- * There is a possibility that both correctable error and
- * uncorrectable error being logged. Report correctable error first.
+ * If we're going to log error messages, we've already set
+ * "info->root_ratelimit_print" and "info->ratelimit_print[i]" to
+ * non-zero (which enables printing) because this is either an
+ * ERR_FATAL or we found a device with an error logged in its AER
+ * Capability.
+ *
+ * If we didn't find the Error Source device, at least log the
+ * Requester ID from the ERR_* Message received by the Root Port or
+ * RCEC, ratelimited by the RP or RCEC.
*/
- if (e_src->status & PCI_ERR_ROOT_COR_RCV) {
- e_info.id = ERR_COR_ID(e_src->id);
- e_info.severity = AER_CORRECTABLE;
-
- if (e_src->status & PCI_ERR_ROOT_MULTI_COR_RCV)
- e_info.multi_error_valid = 1;
- else
- e_info.multi_error_valid = 0;
- aer_print_port_info(pdev, &e_info);
+ if (info->root_ratelimit_print ||
+ (!found && aer_ratelimit(root, info->severity)))
+ aer_print_source(root, info, found);
- if (find_source_device(pdev, &e_info))
- aer_process_err_devices(&e_info);
- }
-
- if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) {
- e_info.id = ERR_UNCOR_ID(e_src->id);
+ if (found)
+ aer_process_err_devices(info);
+}
- if (e_src->status & PCI_ERR_ROOT_FATAL_RCV)
- e_info.severity = AER_FATAL;
- else
- e_info.severity = AER_NONFATAL;
+/**
+ * aer_isr_one_error - consume error(s) signaled by an AER interrupt from
+ * Root Port or RCEC
+ * @root: pointer to Root Port or RCEC that signaled AER interrupt
+ * @e_src: pointer to an error source
+ */
+static void aer_isr_one_error(struct pci_dev *root,
+ struct aer_err_source *e_src)
+{
+ u32 status = e_src->status;
- if (e_src->status & PCI_ERR_ROOT_MULTI_UNCOR_RCV)
- e_info.multi_error_valid = 1;
- else
- e_info.multi_error_valid = 0;
+ pci_rootport_aer_stats_incr(root, e_src);
- aer_print_port_info(pdev, &e_info);
+ /*
+ * There is a possibility that both correctable error and
+ * uncorrectable error being logged. Report correctable error first.
+ */
+ if (status & PCI_ERR_ROOT_COR_RCV) {
+ int multi = status & PCI_ERR_ROOT_MULTI_COR_RCV;
+ struct aer_err_info e_info = {
+ .id = ERR_COR_ID(e_src->id),
+ .severity = AER_CORRECTABLE,
+ .level = KERN_WARNING,
+ .multi_error_valid = multi ? 1 : 0,
+ };
+
+ aer_isr_one_error_type(root, &e_info);
+ }
- if (find_source_device(pdev, &e_info))
- aer_process_err_devices(&e_info);
+ if (status & PCI_ERR_ROOT_UNCOR_RCV) {
+ int fatal = status & PCI_ERR_ROOT_FATAL_RCV;
+ int multi = status & PCI_ERR_ROOT_MULTI_UNCOR_RCV;
+ struct aer_err_info e_info = {
+ .id = ERR_UNCOR_ID(e_src->id),
+ .severity = fatal ? AER_FATAL : AER_NONFATAL,
+ .level = KERN_ERR,
+ .multi_error_valid = multi ? 1 : 0,
+ };
+
+ aer_isr_one_error_type(root, &e_info);
}
}
/**
- * aer_isr - consume errors detected by root port
+ * aer_isr - consume errors detected by Root Port
* @irq: IRQ assigned to Root Port
* @context: pointer to Root Port data structure
*
- * Invoked, as DPC, when root port records new detected error
+ * Invoked, as DPC, when Root Port records new detected error
*/
static irqreturn_t aer_isr(int irq, void *context)
{
@@ -1195,7 +1554,7 @@ static irqreturn_t aer_isr(int irq, void *context)
return IRQ_NONE;
while (kfifo_get(&rpc->aer_fifo, &e_src))
- aer_isr_one_error(rpc, &e_src);
+ aer_isr_one_error(rpc->rpd, &e_src);
return IRQ_HANDLED;
}
@@ -1227,6 +1586,28 @@ static irqreturn_t aer_irq(int irq, void *context)
return IRQ_WAKE_THREAD;
}
+static void aer_enable_irq(struct pci_dev *pdev)
+{
+ int aer = pdev->aer_cap;
+ u32 reg32;
+
+ /* Enable Root Port's interrupt in response to error messages */
+ pci_read_config_dword(pdev, aer + PCI_ERR_ROOT_COMMAND, &reg32);
+ reg32 |= ROOT_PORT_INTR_ON_MESG_MASK;
+ pci_write_config_dword(pdev, aer + PCI_ERR_ROOT_COMMAND, reg32);
+}
+
+static void aer_disable_irq(struct pci_dev *pdev)
+{
+ int aer = pdev->aer_cap;
+ u32 reg32;
+
+ /* Disable Root Port's interrupt in response to error messages */
+ pci_read_config_dword(pdev, aer + PCI_ERR_ROOT_COMMAND, &reg32);
+ reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK;
+ pci_write_config_dword(pdev, aer + PCI_ERR_ROOT_COMMAND, reg32);
+}
+
/**
* aer_enable_rootport - enable Root Port's interrupts when receiving messages
* @rpc: pointer to a Root Port data structure
@@ -1256,10 +1637,7 @@ static void aer_enable_rootport(struct aer_rpc *rpc)
pci_read_config_dword(pdev, aer + PCI_ERR_UNCOR_STATUS, &reg32);
pci_write_config_dword(pdev, aer + PCI_ERR_UNCOR_STATUS, reg32);
- /* Enable Root Port's interrupt in response to error messages */
- pci_read_config_dword(pdev, aer + PCI_ERR_ROOT_COMMAND, &reg32);
- reg32 |= ROOT_PORT_INTR_ON_MESG_MASK;
- pci_write_config_dword(pdev, aer + PCI_ERR_ROOT_COMMAND, reg32);
+ aer_enable_irq(pdev);
}
/**
@@ -1274,10 +1652,7 @@ static void aer_disable_rootport(struct aer_rpc *rpc)
int aer = pdev->aer_cap;
u32 reg32;
- /* Disable Root's interrupt in response to error messages */
- pci_read_config_dword(pdev, aer + PCI_ERR_ROOT_COMMAND, &reg32);
- reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK;
- pci_write_config_dword(pdev, aer + PCI_ERR_ROOT_COMMAND, reg32);
+ aer_disable_irq(pdev);
/* Clear Root's error status reg */
pci_read_config_dword(pdev, aer + PCI_ERR_ROOT_STATUS, &reg32);
@@ -1335,11 +1710,28 @@ static int aer_probe(struct pcie_device *dev)
return status;
}
+ cxl_rch_enable_rcec(port);
aer_enable_rootport(rpc);
pci_info(port, "enabled with IRQ %d\n", dev->irq);
return 0;
}
+static int aer_suspend(struct pcie_device *dev)
+{
+ struct aer_rpc *rpc = get_service_data(dev);
+
+ aer_disable_rootport(rpc);
+ return 0;
+}
+
+static int aer_resume(struct pcie_device *dev)
+{
+ struct aer_rpc *rpc = get_service_data(dev);
+
+ aer_enable_rootport(rpc);
+ return 0;
+}
+
/**
* aer_root_reset - reset Root Port hierarchy, RCEC, or RCiEP
* @dev: pointer to Root Port, RCEC, or RCiEP
@@ -1372,12 +1764,8 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
*/
aer = root ? root->aer_cap : 0;
- if ((host->native_aer || pcie_ports_native) && aer) {
- /* Disable Root's interrupt in response to error messages */
- pci_read_config_dword(root, aer + PCI_ERR_ROOT_COMMAND, &reg32);
- reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK;
- pci_write_config_dword(root, aer + PCI_ERR_ROOT_COMMAND, reg32);
- }
+ if ((host->native_aer || pcie_ports_native) && aer)
+ aer_disable_irq(root);
if (type == PCI_EXP_TYPE_RC_EC || type == PCI_EXP_TYPE_RC_END) {
rc = pcie_reset_flr(dev, PCI_RESET_DO_RESET);
@@ -1396,10 +1784,7 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
pci_read_config_dword(root, aer + PCI_ERR_ROOT_STATUS, &reg32);
pci_write_config_dword(root, aer + PCI_ERR_ROOT_STATUS, reg32);
- /* Enable Root Port's interrupt in response to error messages */
- pci_read_config_dword(root, aer + PCI_ERR_ROOT_COMMAND, &reg32);
- reg32 |= ROOT_PORT_INTR_ON_MESG_MASK;
- pci_write_config_dword(root, aer + PCI_ERR_ROOT_COMMAND, reg32);
+ aer_enable_irq(root);
}
return rc ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
@@ -1411,13 +1796,15 @@ static struct pcie_port_service_driver aerdriver = {
.service = PCIE_PORT_SERVICE_AER,
.probe = aer_probe,
+ .suspend = aer_suspend,
+ .resume = aer_resume,
.remove = aer_remove,
};
/**
- * pcie_aer_init - register AER root service driver
+ * pcie_aer_init - register AER service driver
*
- * Invoked when AER root service driver is loaded.
+ * Invoked when AER service driver is loaded.
*/
int __init pcie_aer_init(void)
{
diff --git a/drivers/pci/pcie/aer_inject.c b/drivers/pci/pcie/aer_inject.c
index 2dab275d252f..91acc7b17f68 100644
--- a/drivers/pci/pcie/aer_inject.c
+++ b/drivers/pci/pcie/aer_inject.c
@@ -6,7 +6,7 @@
* trigger various real hardware errors. Software based error
* injection can fake almost all kinds of errors with the help of a
* user space helper tool aer-inject, which can be gotten from:
- * https://git.kernel.org/cgit/linux/kernel/git/gong.chen/aer-inject.git/
+ * https://github.com/intel/aer-inject.git
*
* Copyright 2009 Intel Corporation.
* Huang Ying <ying.huang@intel.com>
@@ -430,7 +430,7 @@ static int aer_inject(struct aer_error_inj *einj)
else
rperr->root_status |= PCI_ERR_ROOT_COR_RCV;
rperr->source_id &= 0xffff0000;
- rperr->source_id |= (einj->bus << 8) | devfn;
+ rperr->source_id |= PCI_DEVID(einj->bus, devfn);
}
if (einj->uncor_status) {
if (rperr->root_status & PCI_ERR_ROOT_UNCOR_RCV)
@@ -443,7 +443,7 @@ static int aer_inject(struct aer_error_inj *einj)
rperr->root_status |= PCI_ERR_ROOT_NONFATAL_RCV;
rperr->root_status |= PCI_ERR_ROOT_UNCOR_RCV;
rperr->source_id &= 0x0000ffff;
- rperr->source_id |= ((einj->bus << 8) | devfn) << 16;
+ rperr->source_id |= PCI_DEVID(einj->bus, devfn) << 16;
}
spin_unlock_irqrestore(&inject_lock, flags);
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 3dafba0b5f41..cedea47a3547 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -7,40 +7,223 @@
* Copyright (C) Shaohua Li (shaohua.li@intel.com)
*/
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/build_bug.h>
#include <linux/kernel.h>
+#include <linux/limits.h>
#include <linux/math.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/of.h>
#include <linux/pci.h>
#include <linux/pci_regs.h>
#include <linux/errno.h>
#include <linux/pm.h>
#include <linux/init.h>
+#include <linux/printk.h>
#include <linux/slab.h>
-#include <linux/jiffies.h>
-#include <linux/delay.h>
+#include <linux/time.h>
+
#include "../pci.h"
+void pci_save_ltr_state(struct pci_dev *dev)
+{
+ int ltr;
+ struct pci_cap_saved_state *save_state;
+ u32 *cap;
+
+ if (!pci_is_pcie(dev))
+ return;
+
+ ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
+ if (!ltr)
+ return;
+
+ save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
+ if (!save_state) {
+ pci_err(dev, "no suspend buffer for LTR; ASPM issues possible after resume\n");
+ return;
+ }
+
+ /* Some broken devices only support dword access to LTR */
+ cap = &save_state->cap.data[0];
+ pci_read_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap);
+}
+
+void pci_restore_ltr_state(struct pci_dev *dev)
+{
+ struct pci_cap_saved_state *save_state;
+ int ltr;
+ u32 *cap;
+
+ save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
+ ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
+ if (!save_state || !ltr)
+ return;
+
+ /* Some broken devices only support dword access to LTR */
+ cap = &save_state->cap.data[0];
+ pci_write_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap);
+}
+
+void pci_configure_aspm_l1ss(struct pci_dev *pdev)
+{
+ int rc;
+
+ pdev->l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
+
+ rc = pci_add_ext_cap_save_buffer(pdev, PCI_EXT_CAP_ID_L1SS,
+ 2 * sizeof(u32));
+ if (rc)
+ pci_err(pdev, "unable to allocate ASPM L1SS save buffer (%pe)\n",
+ ERR_PTR(rc));
+}
+
+void pci_save_aspm_l1ss_state(struct pci_dev *pdev)
+{
+ struct pci_dev *parent = pdev->bus->self;
+ struct pci_cap_saved_state *save_state;
+ u32 *cap;
+
+ /*
+ * If this is a Downstream Port, we never restore the L1SS state
+ * directly; we only restore it when we restore the state of the
+ * Upstream Port below it.
+ */
+ if (pcie_downstream_port(pdev) || !parent)
+ return;
+
+ if (!pdev->l1ss || !parent->l1ss)
+ return;
+
+ /*
+ * Save L1 substate configuration. The ASPM L0s/L1 configuration
+ * in PCI_EXP_LNKCTL_ASPMC is saved by pci_save_pcie_state().
+ */
+ save_state = pci_find_saved_ext_cap(pdev, PCI_EXT_CAP_ID_L1SS);
+ if (!save_state)
+ return;
+
+ cap = &save_state->cap.data[0];
+ pci_read_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL2, cap++);
+ pci_read_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1, cap++);
+
+ /*
+ * Save parent's L1 substate configuration so we have it for
+ * pci_restore_aspm_l1ss_state(pdev) to restore.
+ */
+ save_state = pci_find_saved_ext_cap(parent, PCI_EXT_CAP_ID_L1SS);
+ if (!save_state)
+ return;
+
+ cap = &save_state->cap.data[0];
+ pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, cap++);
+ pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, cap++);
+}
+
+void pci_restore_aspm_l1ss_state(struct pci_dev *pdev)
+{
+ struct pci_cap_saved_state *pl_save_state, *cl_save_state;
+ struct pci_dev *parent = pdev->bus->self;
+ u32 *cap, pl_ctl1, pl_ctl2, pl_l1_2_enable;
+ u32 cl_ctl1, cl_ctl2, cl_l1_2_enable;
+ u16 clnkctl, plnkctl;
+
+ /*
+ * In case BIOS enabled L1.2 when resuming, we need to disable it first
+ * on the downstream component before the upstream. So, don't attempt to
+ * restore either until we are at the downstream component.
+ */
+ if (pcie_downstream_port(pdev) || !parent)
+ return;
+
+ if (!pdev->l1ss || !parent->l1ss)
+ return;
+
+ cl_save_state = pci_find_saved_ext_cap(pdev, PCI_EXT_CAP_ID_L1SS);
+ pl_save_state = pci_find_saved_ext_cap(parent, PCI_EXT_CAP_ID_L1SS);
+ if (!cl_save_state || !pl_save_state)
+ return;
+
+ cap = &cl_save_state->cap.data[0];
+ cl_ctl2 = *cap++;
+ cl_ctl1 = *cap;
+ cap = &pl_save_state->cap.data[0];
+ pl_ctl2 = *cap++;
+ pl_ctl1 = *cap;
+
+ /* Make sure L0s/L1 are disabled before updating L1SS config */
+ pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &clnkctl);
+ pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &plnkctl);
+ if (FIELD_GET(PCI_EXP_LNKCTL_ASPMC, clnkctl) ||
+ FIELD_GET(PCI_EXP_LNKCTL_ASPMC, plnkctl)) {
+ pcie_capability_write_word(pdev, PCI_EXP_LNKCTL,
+ clnkctl & ~PCI_EXP_LNKCTL_ASPMC);
+ pcie_capability_write_word(parent, PCI_EXP_LNKCTL,
+ plnkctl & ~PCI_EXP_LNKCTL_ASPMC);
+ }
+
+ /*
+ * Disable L1.2 on this downstream endpoint device first, followed
+ * by the upstream
+ */
+ pci_clear_and_set_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_L1_2_MASK, 0);
+ pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_L1_2_MASK, 0);
+
+ /*
+ * In addition, Common_Mode_Restore_Time and LTR_L1.2_THRESHOLD
+ * in PCI_L1SS_CTL1 must be programmed *before* setting the L1.2
+ * enable bits, even though they're all in PCI_L1SS_CTL1.
+ */
+ pl_l1_2_enable = pl_ctl1 & PCI_L1SS_CTL1_L1_2_MASK;
+ pl_ctl1 &= ~PCI_L1SS_CTL1_L1_2_MASK;
+ cl_l1_2_enable = cl_ctl1 & PCI_L1SS_CTL1_L1_2_MASK;
+ cl_ctl1 &= ~PCI_L1SS_CTL1_L1_2_MASK;
+
+ /* Write back without enables first (above we cleared them in ctl1) */
+ pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, pl_ctl2);
+ pci_write_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL2, cl_ctl2);
+ pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, pl_ctl1);
+ pci_write_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1, cl_ctl1);
+
+ /* Then write back the enables */
+ if (pl_l1_2_enable || cl_l1_2_enable) {
+ pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
+ pl_ctl1 | pl_l1_2_enable);
+ pci_write_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1,
+ cl_ctl1 | cl_l1_2_enable);
+ }
+
+ /* Restore L0s/L1 if they were enabled */
+ if (FIELD_GET(PCI_EXP_LNKCTL_ASPMC, clnkctl) ||
+ FIELD_GET(PCI_EXP_LNKCTL_ASPMC, plnkctl)) {
+ pcie_capability_write_word(parent, PCI_EXP_LNKCTL, plnkctl);
+ pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, clnkctl);
+ }
+}
+
+#ifdef CONFIG_PCIEASPM
+
#ifdef MODULE_PARAM_PREFIX
#undef MODULE_PARAM_PREFIX
#endif
#define MODULE_PARAM_PREFIX "pcie_aspm."
-/* Note: those are not register definitions */
-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
-#define ASPM_STATE_L1 (4) /* L1 state */
-#define ASPM_STATE_L1_1 (8) /* ASPM L1.1 state */
-#define ASPM_STATE_L1_2 (0x10) /* ASPM L1.2 state */
-#define ASPM_STATE_L1_1_PCIPM (0x20) /* PCI PM L1.1 state */
-#define ASPM_STATE_L1_2_PCIPM (0x40) /* PCI PM L1.2 state */
-#define ASPM_STATE_L1_SS_PCIPM (ASPM_STATE_L1_1_PCIPM | ASPM_STATE_L1_2_PCIPM)
-#define ASPM_STATE_L1_2_MASK (ASPM_STATE_L1_2 | ASPM_STATE_L1_2_PCIPM)
-#define ASPM_STATE_L1SS (ASPM_STATE_L1_1 | ASPM_STATE_L1_1_PCIPM |\
- ASPM_STATE_L1_2_MASK)
-#define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
-#define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1 | \
- ASPM_STATE_L1SS)
+/* Note: these are not register definitions */
+#define PCIE_LINK_STATE_L0S_UP BIT(0) /* Upstream direction L0s state */
+#define PCIE_LINK_STATE_L0S_DW BIT(1) /* Downstream direction L0s state */
+static_assert(PCIE_LINK_STATE_L0S == (PCIE_LINK_STATE_L0S_UP | PCIE_LINK_STATE_L0S_DW));
+
+#define PCIE_LINK_STATE_L1_SS_PCIPM (PCIE_LINK_STATE_L1_1_PCIPM |\
+ PCIE_LINK_STATE_L1_2_PCIPM)
+#define PCIE_LINK_STATE_L1_2_MASK (PCIE_LINK_STATE_L1_2 |\
+ PCIE_LINK_STATE_L1_2_PCIPM)
+#define PCIE_LINK_STATE_L1SS (PCIE_LINK_STATE_L1_1 |\
+ PCIE_LINK_STATE_L1_1_PCIPM |\
+ PCIE_LINK_STATE_L1_2_MASK)
struct pcie_link_state {
struct pci_dev *pdev; /* Upstream component of the Link */
@@ -53,7 +236,8 @@ struct pcie_link_state {
u32 aspm_support:7; /* Supported ASPM state */
u32 aspm_enabled:7; /* Enabled ASPM state */
u32 aspm_capable:7; /* Capable ASPM state with latency */
- u32 aspm_default:7; /* Default ASPM state by BIOS */
+ u32 aspm_default:7; /* Default ASPM state by BIOS or
+ override */
u32 aspm_disable:7; /* Disabled ASPM state */
/* Clock PM state */
@@ -63,7 +247,7 @@ struct pcie_link_state {
u32 clkpm_disable:1; /* Clock PM disabled */
};
-static int aspm_disabled, aspm_force;
+static bool aspm_disabled, aspm_force;
static bool aspm_support_enabled = true;
static DEFINE_MUTEX(aspm_lock);
static LIST_HEAD(link_list);
@@ -112,10 +296,10 @@ static int policy_to_aspm_state(struct pcie_link_state *link)
return 0;
case POLICY_POWERSAVE:
/* Enable ASPM L0s/L1 */
- return (ASPM_STATE_L0S | ASPM_STATE_L1);
+ return PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1;
case POLICY_POWER_SUPERSAVE:
/* Enable Everything */
- return ASPM_STATE_ALL;
+ return PCIE_LINK_STATE_ASPM_ALL;
case POLICY_DEFAULT:
return link->aspm_default;
}
@@ -138,16 +322,42 @@ static int policy_to_clkpm_state(struct pcie_link_state *link)
return 0;
}
+static void pci_update_aspm_saved_state(struct pci_dev *dev)
+{
+ struct pci_cap_saved_state *save_state;
+ u16 *cap, lnkctl, aspm_ctl;
+
+ save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
+ if (!save_state)
+ return;
+
+ pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &lnkctl);
+
+ /*
+ * Update ASPM and CLKREQ bits of LNKCTL in save_state. We only
+ * write PCI_EXP_LNKCTL_CCC during enumeration, so it shouldn't
+ * change after being captured in save_state.
+ */
+ aspm_ctl = lnkctl & (PCI_EXP_LNKCTL_ASPMC | PCI_EXP_LNKCTL_CLKREQ_EN);
+ lnkctl &= ~(PCI_EXP_LNKCTL_ASPMC | PCI_EXP_LNKCTL_CLKREQ_EN);
+
+ /* Depends on pci_save_pcie_state(): cap[1] is LNKCTL */
+ cap = (u16 *)&save_state->cap.data[0];
+ cap[1] = lnkctl | aspm_ctl;
+}
+
static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable)
{
struct pci_dev *child;
struct pci_bus *linkbus = link->pdev->subordinate;
u32 val = enable ? PCI_EXP_LNKCTL_CLKREQ_EN : 0;
- list_for_each_entry(child, &linkbus->devices, bus_list)
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL,
PCI_EXP_LNKCTL_CLKREQ_EN,
val);
+ pci_update_aspm_saved_state(child);
+ }
link->clkpm_enabled = !!enable;
}
@@ -199,7 +409,7 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
{
int same_clock = 1;
- u16 reg16, parent_reg, child_reg[8];
+ u16 reg16, ccc, parent_old_ccc, child_old_ccc[8];
struct pci_dev *child, *parent = link->pdev;
struct pci_bus *linkbus = parent->subordinate;
/*
@@ -221,6 +431,7 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
/* Port might be already in common clock mode */
pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &reg16);
+ parent_old_ccc = reg16 & PCI_EXP_LNKCTL_CCC;
if (same_clock && (reg16 & PCI_EXP_LNKCTL_CCC)) {
bool consistent = true;
@@ -237,44 +448,39 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
pci_info(parent, "ASPM: current common clock configuration is inconsistent, reconfiguring\n");
}
+ ccc = same_clock ? PCI_EXP_LNKCTL_CCC : 0;
/* Configure downstream component, all functions */
list_for_each_entry(child, &linkbus->devices, bus_list) {
pcie_capability_read_word(child, PCI_EXP_LNKCTL, &reg16);
- child_reg[PCI_FUNC(child->devfn)] = reg16;
- if (same_clock)
- reg16 |= PCI_EXP_LNKCTL_CCC;
- else
- reg16 &= ~PCI_EXP_LNKCTL_CCC;
- pcie_capability_write_word(child, PCI_EXP_LNKCTL, reg16);
+ child_old_ccc[PCI_FUNC(child->devfn)] = reg16 & PCI_EXP_LNKCTL_CCC;
+ pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_CCC, ccc);
}
/* Configure upstream component */
- pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &reg16);
- parent_reg = reg16;
- if (same_clock)
- reg16 |= PCI_EXP_LNKCTL_CCC;
- else
- reg16 &= ~PCI_EXP_LNKCTL_CCC;
- pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
+ pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_CCC, ccc);
if (pcie_retrain_link(link->pdev, true)) {
/* Training failed. Restore common clock configurations */
pci_err(parent, "ASPM: Could not configure common clock\n");
list_for_each_entry(child, &linkbus->devices, bus_list)
- pcie_capability_write_word(child, PCI_EXP_LNKCTL,
- child_reg[PCI_FUNC(child->devfn)]);
- pcie_capability_write_word(parent, PCI_EXP_LNKCTL, parent_reg);
+ pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_CCC,
+ child_old_ccc[PCI_FUNC(child->devfn)]);
+ pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_CCC, parent_old_ccc);
}
}
/* Convert L0s latency encoding to ns */
static u32 calc_l0s_latency(u32 lnkcap)
{
- u32 encoding = (lnkcap & PCI_EXP_LNKCAP_L0SEL) >> 12;
+ u32 encoding = FIELD_GET(PCI_EXP_LNKCAP_L0SEL, lnkcap);
if (encoding == 0x7)
- return (5 * 1000); /* > 4us */
+ return 5 * NSEC_PER_USEC; /* > 4us */
return (64 << encoding);
}
@@ -282,26 +488,26 @@ static u32 calc_l0s_latency(u32 lnkcap)
static u32 calc_l0s_acceptable(u32 encoding)
{
if (encoding == 0x7)
- return -1U;
+ return U32_MAX;
return (64 << encoding);
}
/* Convert L1 latency encoding to ns */
static u32 calc_l1_latency(u32 lnkcap)
{
- u32 encoding = (lnkcap & PCI_EXP_LNKCAP_L1EL) >> 15;
+ u32 encoding = FIELD_GET(PCI_EXP_LNKCAP_L1EL, lnkcap);
if (encoding == 0x7)
- return (65 * 1000); /* > 64us */
- return (1000 << encoding);
+ return 65 * NSEC_PER_USEC; /* > 64us */
+ return NSEC_PER_USEC << encoding;
}
/* Convert L1 acceptable latency encoding to ns */
static u32 calc_l1_acceptable(u32 encoding)
{
if (encoding == 0x7)
- return -1U;
- return (1000 << encoding);
+ return U32_MAX;
+ return NSEC_PER_USEC << encoding;
}
/* Convert L1SS T_pwr encoding to usec */
@@ -329,33 +535,33 @@ static u32 calc_l12_pwron(struct pci_dev *pdev, u32 scale, u32 val)
*/
static void encode_l12_threshold(u32 threshold_us, u32 *scale, u32 *value)
{
- u64 threshold_ns = (u64) threshold_us * 1000;
+ u64 threshold_ns = (u64)threshold_us * NSEC_PER_USEC;
/*
* LTR_L1.2_THRESHOLD_Value ("value") is a 10-bit field with max
* value of 0x3ff.
*/
- if (threshold_ns <= 0x3ff * 1) {
+ if (threshold_ns <= 1 * FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE)) {
*scale = 0; /* Value times 1ns */
*value = threshold_ns;
- } else if (threshold_ns <= 0x3ff * 32) {
+ } else if (threshold_ns <= 32 * FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE)) {
*scale = 1; /* Value times 32ns */
*value = roundup(threshold_ns, 32) / 32;
- } else if (threshold_ns <= 0x3ff * 1024) {
+ } else if (threshold_ns <= 1024 * FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE)) {
*scale = 2; /* Value times 1024ns */
*value = roundup(threshold_ns, 1024) / 1024;
- } else if (threshold_ns <= 0x3ff * 32768) {
+ } else if (threshold_ns <= 32768 * FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE)) {
*scale = 3; /* Value times 32768ns */
*value = roundup(threshold_ns, 32768) / 32768;
- } else if (threshold_ns <= 0x3ff * 1048576) {
+ } else if (threshold_ns <= 1048576 * FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE)) {
*scale = 4; /* Value times 1048576ns */
*value = roundup(threshold_ns, 1048576) / 1048576;
- } else if (threshold_ns <= 0x3ff * (u64) 33554432) {
+ } else if (threshold_ns <= (u64)33554432 * FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE)) {
*scale = 5; /* Value times 33554432ns */
*value = roundup(threshold_ns, 33554432) / 33554432;
} else {
*scale = 5;
- *value = 0x3ff; /* Max representable value */
+ *value = FIELD_MAX(PCI_L1SS_CTL1_LTR_L12_TH_VALUE);
}
}
@@ -375,11 +581,11 @@ static void pcie_aspm_check_latency(struct pci_dev *endpoint)
link = endpoint->bus->self->link_state;
/* Calculate endpoint L0s acceptable latency */
- encoding = (endpoint->devcap & PCI_EXP_DEVCAP_L0S) >> 6;
+ encoding = FIELD_GET(PCI_EXP_DEVCAP_L0S, endpoint->devcap);
acceptable_l0s = calc_l0s_acceptable(encoding);
/* Calculate endpoint L1 acceptable latency */
- encoding = (endpoint->devcap & PCI_EXP_DEVCAP_L1) >> 9;
+ encoding = FIELD_GET(PCI_EXP_DEVCAP_L1, endpoint->devcap);
acceptable_l1 = calc_l1_acceptable(encoding);
while (link) {
@@ -396,14 +602,14 @@ static void pcie_aspm_check_latency(struct pci_dev *endpoint)
latency_dw_l1 = calc_l1_latency(lnkcap_dw);
/* Check upstream direction L0s latency */
- if ((link->aspm_capable & ASPM_STATE_L0S_UP) &&
+ if ((link->aspm_capable & PCIE_LINK_STATE_L0S_UP) &&
(latency_up_l0s > acceptable_l0s))
- link->aspm_capable &= ~ASPM_STATE_L0S_UP;
+ link->aspm_capable &= ~PCIE_LINK_STATE_L0S_UP;
/* Check downstream direction L0s latency */
- if ((link->aspm_capable & ASPM_STATE_L0S_DW) &&
+ if ((link->aspm_capable & PCIE_LINK_STATE_L0S_DW) &&
(latency_dw_l0s > acceptable_l0s))
- link->aspm_capable &= ~ASPM_STATE_L0S_DW;
+ link->aspm_capable &= ~PCIE_LINK_STATE_L0S_DW;
/*
* Check L1 latency.
* Every switch on the path to root complex need 1
@@ -418,26 +624,15 @@ static void pcie_aspm_check_latency(struct pci_dev *endpoint)
* substate latencies (and hence do not do any check).
*/
latency = max_t(u32, latency_up_l1, latency_dw_l1);
- if ((link->aspm_capable & ASPM_STATE_L1) &&
+ if ((link->aspm_capable & PCIE_LINK_STATE_L1) &&
(latency + l1_switch_latency > acceptable_l1))
- link->aspm_capable &= ~ASPM_STATE_L1;
- l1_switch_latency += 1000;
+ link->aspm_capable &= ~PCIE_LINK_STATE_L1;
+ l1_switch_latency += NSEC_PER_USEC;
link = link->parent;
}
}
-static void pci_clear_and_set_dword(struct pci_dev *pdev, int pos,
- u32 clear, u32 set)
-{
- u32 val;
-
- pci_read_config_dword(pdev, pos, &val);
- val &= ~clear;
- val |= set;
- pci_write_config_dword(pdev, pos, val);
-}
-
/* Calculate L1.2 PM substate timing parameters */
static void aspm_calc_l12_info(struct pcie_link_state *link,
u32 parent_l1ss_cap, u32 child_l1ss_cap)
@@ -450,22 +645,24 @@ static void aspm_calc_l12_info(struct pcie_link_state *link,
u32 pl1_2_enables, cl1_2_enables;
/* Choose the greater of the two Port Common_Mode_Restore_Times */
- val1 = (parent_l1ss_cap & PCI_L1SS_CAP_CM_RESTORE_TIME) >> 8;
- val2 = (child_l1ss_cap & PCI_L1SS_CAP_CM_RESTORE_TIME) >> 8;
+ val1 = FIELD_GET(PCI_L1SS_CAP_CM_RESTORE_TIME, parent_l1ss_cap);
+ val2 = FIELD_GET(PCI_L1SS_CAP_CM_RESTORE_TIME, child_l1ss_cap);
t_common_mode = max(val1, val2);
/* Choose the greater of the two Port T_POWER_ON times */
- val1 = (parent_l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_VALUE) >> 19;
- scale1 = (parent_l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_SCALE) >> 16;
- val2 = (child_l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_VALUE) >> 19;
- scale2 = (child_l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_SCALE) >> 16;
+ val1 = FIELD_GET(PCI_L1SS_CAP_P_PWR_ON_VALUE, parent_l1ss_cap);
+ scale1 = FIELD_GET(PCI_L1SS_CAP_P_PWR_ON_SCALE, parent_l1ss_cap);
+ val2 = FIELD_GET(PCI_L1SS_CAP_P_PWR_ON_VALUE, child_l1ss_cap);
+ scale2 = FIELD_GET(PCI_L1SS_CAP_P_PWR_ON_SCALE, child_l1ss_cap);
if (calc_l12_pwron(parent, scale1, val1) >
calc_l12_pwron(child, scale2, val2)) {
- ctl2 |= scale1 | (val1 << 3);
+ ctl2 |= FIELD_PREP(PCI_L1SS_CTL2_T_PWR_ON_SCALE, scale1) |
+ FIELD_PREP(PCI_L1SS_CTL2_T_PWR_ON_VALUE, val1);
t_power_on = calc_l12_pwron(parent, scale1, val1);
} else {
- ctl2 |= scale2 | (val2 << 3);
+ ctl2 |= FIELD_PREP(PCI_L1SS_CTL2_T_PWR_ON_SCALE, scale2) |
+ FIELD_PREP(PCI_L1SS_CTL2_T_PWR_ON_VALUE, val2);
t_power_on = calc_l12_pwron(child, scale2, val2);
}
@@ -481,7 +678,9 @@ static void aspm_calc_l12_info(struct pcie_link_state *link,
*/
l1_2_threshold = 2 + 4 + t_common_mode + t_power_on;
encode_l12_threshold(l1_2_threshold, &scale, &value);
- ctl1 |= t_common_mode << 8 | scale << 29 | value << 16;
+ ctl1 |= FIELD_PREP(PCI_L1SS_CTL1_CM_RESTORE_TIME, t_common_mode) |
+ FIELD_PREP(PCI_L1SS_CTL1_LTR_L12_TH_VALUE, value) |
+ FIELD_PREP(PCI_L1SS_CTL1_LTR_L12_TH_SCALE, scale);
/* Some broken devices only support dword access to L1 SS */
pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, &pctl1);
@@ -498,10 +697,12 @@ static void aspm_calc_l12_info(struct pcie_link_state *link,
cl1_2_enables = cctl1 & PCI_L1SS_CTL1_L1_2_MASK;
if (pl1_2_enables || cl1_2_enables) {
- pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
- PCI_L1SS_CTL1_L1_2_MASK, 0);
- pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
- PCI_L1SS_CTL1_L1_2_MASK, 0);
+ pci_clear_and_set_config_dword(child,
+ child->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_L1_2_MASK, 0);
+ pci_clear_and_set_config_dword(parent,
+ parent->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_L1_2_MASK, 0);
}
/* Program T_POWER_ON times in both ports */
@@ -509,22 +710,26 @@ static void aspm_calc_l12_info(struct pcie_link_state *link,
pci_write_config_dword(child, child->l1ss + PCI_L1SS_CTL2, ctl2);
/* Program Common_Mode_Restore_Time in upstream device */
- pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
- PCI_L1SS_CTL1_CM_RESTORE_TIME, ctl1);
+ pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_CM_RESTORE_TIME, ctl1);
/* Program LTR_L1.2_THRESHOLD time in both ports */
- pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
- PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
- PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1);
- pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
- PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
- PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1);
+ pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
+ PCI_L1SS_CTL1_LTR_L12_TH_SCALE,
+ ctl1);
+ pci_clear_and_set_config_dword(child, child->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
+ PCI_L1SS_CTL1_LTR_L12_TH_SCALE,
+ ctl1);
if (pl1_2_enables || cl1_2_enables) {
- pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, 0,
- pl1_2_enables);
- pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, 0,
- cl1_2_enables);
+ pci_clear_and_set_config_dword(parent,
+ parent->l1ss + PCI_L1SS_CTL1, 0,
+ pl1_2_enables);
+ pci_clear_and_set_config_dword(child,
+ child->l1ss + PCI_L1SS_CTL1, 0,
+ cl1_2_enables);
}
}
@@ -557,13 +762,13 @@ static void aspm_l1ss_init(struct pcie_link_state *link)
child_l1ss_cap &= ~PCI_L1SS_CAP_ASPM_L1_2;
if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_ASPM_L1_1)
- link->aspm_support |= ASPM_STATE_L1_1;
+ link->aspm_support |= PCIE_LINK_STATE_L1_1;
if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_ASPM_L1_2)
- link->aspm_support |= ASPM_STATE_L1_2;
+ link->aspm_support |= PCIE_LINK_STATE_L1_2;
if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_1)
- link->aspm_support |= ASPM_STATE_L1_1_PCIPM;
+ link->aspm_support |= PCIE_LINK_STATE_L1_1_PCIPM;
if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_2)
- link->aspm_support |= ASPM_STATE_L1_2_PCIPM;
+ link->aspm_support |= PCIE_LINK_STATE_L1_2_PCIPM;
if (parent_l1ss_cap)
pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
@@ -573,29 +778,49 @@ static void aspm_l1ss_init(struct pcie_link_state *link)
&child_l1ss_ctl1);
if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_1)
- link->aspm_enabled |= ASPM_STATE_L1_1;
+ link->aspm_enabled |= PCIE_LINK_STATE_L1_1;
if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_2)
- link->aspm_enabled |= ASPM_STATE_L1_2;
+ link->aspm_enabled |= PCIE_LINK_STATE_L1_2;
if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_1)
- link->aspm_enabled |= ASPM_STATE_L1_1_PCIPM;
+ link->aspm_enabled |= PCIE_LINK_STATE_L1_1_PCIPM;
if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_2)
- link->aspm_enabled |= ASPM_STATE_L1_2_PCIPM;
+ link->aspm_enabled |= PCIE_LINK_STATE_L1_2_PCIPM;
- if (link->aspm_support & ASPM_STATE_L1_2_MASK)
+ if (link->aspm_support & PCIE_LINK_STATE_L1_2_MASK)
aspm_calc_l12_info(link, parent_l1ss_cap, child_l1ss_cap);
}
+#define FLAG(x, y, d) (((x) & (PCIE_LINK_STATE_##y)) ? d : "")
+
+static void pcie_aspm_override_default_link_state(struct pcie_link_state *link)
+{
+ struct pci_dev *pdev = link->downstream;
+ u32 override;
+
+ /* For devicetree platforms, enable L0s and L1 by default */
+ if (of_have_populated_dt()) {
+ if (link->aspm_support & PCIE_LINK_STATE_L0S)
+ link->aspm_default |= PCIE_LINK_STATE_L0S;
+ if (link->aspm_support & PCIE_LINK_STATE_L1)
+ link->aspm_default |= PCIE_LINK_STATE_L1;
+ override = link->aspm_default & ~link->aspm_enabled;
+ if (override)
+ pci_info(pdev, "ASPM: default states%s%s\n",
+ FLAG(override, L0S, " L0s"),
+ FLAG(override, L1, " L1"));
+ }
+}
+
static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
{
struct pci_dev *child = link->downstream, *parent = link->pdev;
- u32 parent_lnkcap, child_lnkcap;
u16 parent_lnkctl, child_lnkctl;
struct pci_bus *linkbus = parent->subordinate;
if (blacklist) {
/* Set enabled/disable so that we will disable ASPM later */
- link->aspm_enabled = ASPM_STATE_ALL;
- link->aspm_disable = ASPM_STATE_ALL;
+ link->aspm_enabled = PCIE_LINK_STATE_ASPM_ALL;
+ link->aspm_disable = PCIE_LINK_STATE_ASPM_ALL;
return;
}
@@ -603,9 +828,8 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
* If ASPM not supported, don't mess with the clocks and link,
* bail out now.
*/
- pcie_capability_read_dword(parent, PCI_EXP_LNKCAP, &parent_lnkcap);
- pcie_capability_read_dword(child, PCI_EXP_LNKCAP, &child_lnkcap);
- if (!(parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPMS))
+ if (!(parent->aspm_l0s_support && child->aspm_l0s_support) &&
+ !(parent->aspm_l1_support && child->aspm_l1_support))
return;
/* Configure common clock before checking latencies */
@@ -617,11 +841,18 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
* read-only Link Capabilities may change depending on common clock
* configuration (PCIe r5.0, sec 7.5.3.6).
*/
- pcie_capability_read_dword(parent, PCI_EXP_LNKCAP, &parent_lnkcap);
- pcie_capability_read_dword(child, PCI_EXP_LNKCAP, &child_lnkcap);
pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &parent_lnkctl);
pcie_capability_read_word(child, PCI_EXP_LNKCTL, &child_lnkctl);
+ /* Disable L0s/L1 before updating L1SS config */
+ if (FIELD_GET(PCI_EXP_LNKCTL_ASPMC, child_lnkctl) ||
+ FIELD_GET(PCI_EXP_LNKCTL_ASPMC, parent_lnkctl)) {
+ pcie_capability_write_word(child, PCI_EXP_LNKCTL,
+ child_lnkctl & ~PCI_EXP_LNKCTL_ASPMC);
+ pcie_capability_write_word(parent, PCI_EXP_LNKCTL,
+ parent_lnkctl & ~PCI_EXP_LNKCTL_ASPMC);
+ }
+
/*
* Setup L0s state
*
@@ -629,26 +860,35 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
* given link unless components on both sides of the link each
* support L0s.
*/
- if (parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPM_L0S)
- link->aspm_support |= ASPM_STATE_L0S;
+ if (parent->aspm_l0s_support && child->aspm_l0s_support)
+ link->aspm_support |= PCIE_LINK_STATE_L0S;
if (child_lnkctl & PCI_EXP_LNKCTL_ASPM_L0S)
- link->aspm_enabled |= ASPM_STATE_L0S_UP;
+ link->aspm_enabled |= PCIE_LINK_STATE_L0S_UP;
if (parent_lnkctl & PCI_EXP_LNKCTL_ASPM_L0S)
- link->aspm_enabled |= ASPM_STATE_L0S_DW;
+ link->aspm_enabled |= PCIE_LINK_STATE_L0S_DW;
/* Setup L1 state */
- if (parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPM_L1)
- link->aspm_support |= ASPM_STATE_L1;
+ if (parent->aspm_l1_support && child->aspm_l1_support)
+ link->aspm_support |= PCIE_LINK_STATE_L1;
if (parent_lnkctl & child_lnkctl & PCI_EXP_LNKCTL_ASPM_L1)
- link->aspm_enabled |= ASPM_STATE_L1;
+ link->aspm_enabled |= PCIE_LINK_STATE_L1;
aspm_l1ss_init(link);
+ /* Restore L0s/L1 if they were enabled */
+ if (FIELD_GET(PCI_EXP_LNKCTL_ASPMC, child_lnkctl) ||
+ FIELD_GET(PCI_EXP_LNKCTL_ASPMC, parent_lnkctl)) {
+ pcie_capability_write_word(parent, PCI_EXP_LNKCTL, parent_lnkctl);
+ pcie_capability_write_word(child, PCI_EXP_LNKCTL, child_lnkctl);
+ }
+
/* Save default state */
link->aspm_default = link->aspm_enabled;
+ pcie_aspm_override_default_link_state(link);
+
/* Setup initial capable state. Will be updated later */
link->aspm_capable = link->aspm_support;
@@ -662,58 +902,40 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
}
}
-/* Configure the ASPM L1 substates */
+/* Configure the ASPM L1 substates. Caller must disable L1 first. */
static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
{
- u32 val, enable_req;
+ u32 val = 0;
struct pci_dev *child = link->downstream, *parent = link->pdev;
- enable_req = (link->aspm_enabled ^ state) & state;
+ if (state & PCIE_LINK_STATE_L1_1)
+ val |= PCI_L1SS_CTL1_ASPM_L1_1;
+ if (state & PCIE_LINK_STATE_L1_2)
+ val |= PCI_L1SS_CTL1_ASPM_L1_2;
+ if (state & PCIE_LINK_STATE_L1_1_PCIPM)
+ val |= PCI_L1SS_CTL1_PCIPM_L1_1;
+ if (state & PCIE_LINK_STATE_L1_2_PCIPM)
+ val |= PCI_L1SS_CTL1_PCIPM_L1_2;
/*
- * Here are the rules specified in the PCIe spec for enabling L1SS:
- * - When enabling L1.x, enable bit at parent first, then at child
- * - When disabling L1.x, disable bit at child first, then at parent
- * - When enabling ASPM L1.x, need to disable L1
- * (at child followed by parent).
- * - The ASPM/PCIPM L1.2 must be disabled while programming timing
+ * PCIe r6.2, sec 5.5.4, rules for enabling L1 PM Substates:
+ * - Clear L1.x enable bits at child first, then at parent
+ * - Set L1.x enable bits at parent first, then at child
+ * - ASPM/PCIPM L1.2 must be disabled while programming timing
* parameters
- *
- * To keep it simple, disable all L1SS bits first, and later enable
- * what is needed.
*/
/* Disable all L1 substates */
- pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
- PCI_L1SS_CTL1_L1SS_MASK, 0);
- pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
- PCI_L1SS_CTL1_L1SS_MASK, 0);
- /*
- * If needed, disable L1, and it gets enabled later
- * in pcie_config_aspm_link().
- */
- if (enable_req & (ASPM_STATE_L1_1 | ASPM_STATE_L1_2)) {
- pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL,
- PCI_EXP_LNKCTL_ASPM_L1, 0);
- pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL,
- PCI_EXP_LNKCTL_ASPM_L1, 0);
- }
-
- val = 0;
- if (state & ASPM_STATE_L1_1)
- val |= PCI_L1SS_CTL1_ASPM_L1_1;
- if (state & ASPM_STATE_L1_2)
- val |= PCI_L1SS_CTL1_ASPM_L1_2;
- if (state & ASPM_STATE_L1_1_PCIPM)
- val |= PCI_L1SS_CTL1_PCIPM_L1_1;
- if (state & ASPM_STATE_L1_2_PCIPM)
- val |= PCI_L1SS_CTL1_PCIPM_L1_2;
+ pci_clear_and_set_config_dword(child, child->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_L1SS_MASK, 0);
+ pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_L1SS_MASK, 0);
/* Enable what we need to enable */
- pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
- PCI_L1SS_CTL1_L1SS_MASK, val);
- pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
- PCI_L1SS_CTL1_L1SS_MASK, val);
+ pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_L1SS_MASK, val);
+ pci_clear_and_set_config_dword(child, child->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_L1SS_MASK, val);
}
static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val)
@@ -732,45 +954,60 @@ static void pcie_config_aspm_link(struct pcie_link_state *link, u32 state)
state &= (link->aspm_capable & ~link->aspm_disable);
/* Can't enable any substates if L1 is not enabled */
- if (!(state & ASPM_STATE_L1))
- state &= ~ASPM_STATE_L1SS;
+ if (!(state & PCIE_LINK_STATE_L1))
+ state &= ~PCIE_LINK_STATE_L1SS;
/* Spec says both ports must be in D0 before enabling PCI PM substates*/
if (parent->current_state != PCI_D0 || child->current_state != PCI_D0) {
- state &= ~ASPM_STATE_L1_SS_PCIPM;
- state |= (link->aspm_enabled & ASPM_STATE_L1_SS_PCIPM);
+ state &= ~PCIE_LINK_STATE_L1_SS_PCIPM;
+ state |= (link->aspm_enabled & PCIE_LINK_STATE_L1_SS_PCIPM);
}
/* Nothing to do if the link is already in the requested state */
if (link->aspm_enabled == state)
return;
/* Convert ASPM state to upstream/downstream ASPM register state */
- if (state & ASPM_STATE_L0S_UP)
+ if (state & PCIE_LINK_STATE_L0S_UP)
dwstream |= PCI_EXP_LNKCTL_ASPM_L0S;
- if (state & ASPM_STATE_L0S_DW)
+ if (state & PCIE_LINK_STATE_L0S_DW)
upstream |= PCI_EXP_LNKCTL_ASPM_L0S;
- if (state & ASPM_STATE_L1) {
+ if (state & PCIE_LINK_STATE_L1) {
upstream |= PCI_EXP_LNKCTL_ASPM_L1;
dwstream |= PCI_EXP_LNKCTL_ASPM_L1;
}
- if (link->aspm_capable & ASPM_STATE_L1SS)
- pcie_config_aspm_l1ss(link, state);
-
/*
- * Spec 2.0 suggests all functions should be configured the
- * same setting for ASPM. Enabling ASPM L1 should be done in
- * upstream component first and then downstream, and vice
- * versa for disabling ASPM L1. Spec doesn't mention L0S.
+ * Per PCIe r6.2, sec 5.5.4, setting either or both of the enable
+ * bits for ASPM L1 PM Substates must be done while ASPM L1 is
+ * disabled. Disable L1 here and apply new configuration after L1SS
+ * configuration has been completed.
+ *
+ * Per sec 7.5.3.7, when disabling ASPM L1, software must disable
+ * it in the Downstream component prior to disabling it in the
+ * Upstream component, and ASPM L1 must be enabled in the Upstream
+ * component prior to enabling it in the Downstream component.
+ *
+ * Sec 7.5.3.7 also recommends programming the same ASPM Control
+ * value for all functions of a multi-function device.
*/
- if (state & ASPM_STATE_L1)
- pcie_config_aspm_dev(parent, upstream);
+ list_for_each_entry(child, &linkbus->devices, bus_list)
+ pcie_config_aspm_dev(child, 0);
+ pcie_config_aspm_dev(parent, 0);
+
+ if (link->aspm_capable & PCIE_LINK_STATE_L1SS)
+ pcie_config_aspm_l1ss(link, state);
+
+ pcie_config_aspm_dev(parent, upstream);
list_for_each_entry(child, &linkbus->devices, bus_list)
pcie_config_aspm_dev(child, dwstream);
- if (!(state & ASPM_STATE_L1))
- pcie_config_aspm_dev(parent, upstream);
link->aspm_enabled = state;
+
+ /* Update latest ASPM configuration in saved context */
+ pci_save_aspm_l1ss_state(link->downstream);
+ pci_update_aspm_saved_state(link->downstream);
+ pci_save_aspm_l1ss_state(parent);
+ pci_update_aspm_saved_state(parent);
}
static void pcie_config_aspm_path(struct pcie_link_state *link)
@@ -940,6 +1177,78 @@ out:
up_read(&pci_bus_sem);
}
+void pci_bridge_reconfigure_ltr(struct pci_dev *pdev)
+{
+ struct pci_dev *bridge;
+ u32 ctl;
+
+ bridge = pci_upstream_bridge(pdev);
+ if (bridge && bridge->ltr_path) {
+ pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2, &ctl);
+ if (!(ctl & PCI_EXP_DEVCTL2_LTR_EN)) {
+ pci_dbg(bridge, "re-enabling LTR\n");
+ pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_LTR_EN);
+ }
+ }
+}
+
+void pci_configure_ltr(struct pci_dev *pdev)
+{
+ struct pci_host_bridge *host = pci_find_host_bridge(pdev->bus);
+ struct pci_dev *bridge;
+ u32 cap, ctl;
+
+ if (!pci_is_pcie(pdev))
+ return;
+
+ pcie_capability_read_dword(pdev, PCI_EXP_DEVCAP2, &cap);
+ if (!(cap & PCI_EXP_DEVCAP2_LTR))
+ return;
+
+ pcie_capability_read_dword(pdev, PCI_EXP_DEVCTL2, &ctl);
+ if (ctl & PCI_EXP_DEVCTL2_LTR_EN) {
+ if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT) {
+ pdev->ltr_path = 1;
+ return;
+ }
+
+ bridge = pci_upstream_bridge(pdev);
+ if (bridge && bridge->ltr_path)
+ pdev->ltr_path = 1;
+
+ return;
+ }
+
+ if (!host->native_ltr)
+ return;
+
+ /*
+ * Software must not enable LTR in an Endpoint unless the Root
+ * Complex and all intermediate Switches indicate support for LTR.
+ * PCIe r4.0, sec 6.18.
+ */
+ if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT) {
+ pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_LTR_EN);
+ pdev->ltr_path = 1;
+ return;
+ }
+
+ /*
+ * If we're configuring a hot-added device, LTR was likely
+ * disabled in the upstream bridge, so re-enable it before enabling
+ * it in the new device.
+ */
+ bridge = pci_upstream_bridge(pdev);
+ if (bridge && bridge->ltr_path) {
+ pci_bridge_reconfigure_ltr(pdev);
+ pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_LTR_EN);
+ pdev->ltr_path = 1;
+ }
+}
+
/* Recheck latencies and update aspm_capable for links under the root */
static void pcie_update_aspm_capable(struct pcie_link_state *root)
{
@@ -981,16 +1290,16 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
parent_link = link->parent;
/*
- * link->downstream is a pointer to the pci_dev of function 0. If
- * we remove that function, the pci_dev is about to be deallocated,
- * so we can't use link->downstream again. Free the link state to
- * avoid this.
+ * Free the parent link state, no later than function 0 (i.e.
+ * link->downstream) being removed.
*
- * If we're removing a non-0 function, it's possible we could
- * retain the link state, but PCIe r6.0, sec 7.5.3.7, recommends
- * programming the same ASPM Control value for all functions of
- * multi-function devices, so disable ASPM for all of them.
+ * Do not free the link state any earlier. If function 0 is a
+ * switch upstream port, this link state is parent_link to all
+ * subordinate ones.
*/
+ if (pdev != link->downstream)
+ goto out;
+
pcie_config_aspm_link(link, 0);
list_del(&link->sibling);
free_link_state(link);
@@ -1001,10 +1310,35 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
pcie_config_aspm_path(parent_link);
}
+ out:
mutex_unlock(&aspm_lock);
up_read(&pci_bus_sem);
}
+/*
+ * @pdev: the root port or switch downstream port
+ * @locked: whether pci_bus_sem is held
+ */
+void pcie_aspm_pm_state_change(struct pci_dev *pdev, bool locked)
+{
+ struct pcie_link_state *link = pdev->link_state;
+
+ if (aspm_disabled || !link)
+ return;
+ /*
+ * Devices changed PM state, we should recheck if latency
+ * meets all functions' requirement
+ */
+ if (!locked)
+ down_read(&pci_bus_sem);
+ mutex_lock(&aspm_lock);
+ pcie_update_aspm_capable(link->root);
+ pcie_config_aspm_path(link);
+ mutex_unlock(&aspm_lock);
+ if (!locked)
+ up_read(&pci_bus_sem);
+}
+
void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
{
struct pcie_link_state *link = pdev->link_state;
@@ -1038,7 +1372,29 @@ static struct pcie_link_state *pcie_aspm_get_link(struct pci_dev *pdev)
return bridge->link_state;
}
-static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
+static u8 pci_calc_aspm_disable_mask(int state)
+{
+ state &= ~PCIE_LINK_STATE_CLKPM;
+
+ /* L1 PM substates require L1 */
+ if (state & PCIE_LINK_STATE_L1)
+ state |= PCIE_LINK_STATE_L1SS;
+
+ return state;
+}
+
+static u8 pci_calc_aspm_enable_mask(int state)
+{
+ state &= ~PCIE_LINK_STATE_CLKPM;
+
+ /* L1 PM substates require L1 */
+ if (state & PCIE_LINK_STATE_L1SS)
+ state |= PCIE_LINK_STATE_L1;
+
+ return state;
+}
+
+static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool locked)
{
struct pcie_link_state *link = pcie_aspm_get_link(pdev);
@@ -1057,28 +1413,17 @@ static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
return -EPERM;
}
- if (sem)
+ if (!locked)
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
- if (state & PCIE_LINK_STATE_L0S)
- link->aspm_disable |= ASPM_STATE_L0S;
- if (state & PCIE_LINK_STATE_L1)
- link->aspm_disable |= ASPM_STATE_L1;
- if (state & PCIE_LINK_STATE_L1_1)
- link->aspm_disable |= ASPM_STATE_L1_1;
- if (state & PCIE_LINK_STATE_L1_2)
- link->aspm_disable |= ASPM_STATE_L1_2;
- if (state & PCIE_LINK_STATE_L1_1_PCIPM)
- link->aspm_disable |= ASPM_STATE_L1_1_PCIPM;
- if (state & PCIE_LINK_STATE_L1_2_PCIPM)
- link->aspm_disable |= ASPM_STATE_L1_2_PCIPM;
+ link->aspm_disable |= pci_calc_aspm_disable_mask(state);
pcie_config_aspm_link(link, policy_to_aspm_state(link));
if (state & PCIE_LINK_STATE_CLKPM)
link->clkpm_disable = 1;
pcie_set_clkpm(link, policy_to_clkpm_state(link));
mutex_unlock(&aspm_lock);
- if (sem)
+ if (!locked)
up_read(&pci_bus_sem);
return 0;
@@ -1086,7 +1431,9 @@ static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
int pci_disable_link_state_locked(struct pci_dev *pdev, int state)
{
- return __pci_disable_link_state(pdev, state, false);
+ lockdep_assert_held_read(&pci_bus_sem);
+
+ return __pci_disable_link_state(pdev, state, true);
}
EXPORT_SYMBOL(pci_disable_link_state_locked);
@@ -1101,21 +1448,11 @@ EXPORT_SYMBOL(pci_disable_link_state_locked);
*/
int pci_disable_link_state(struct pci_dev *pdev, int state)
{
- return __pci_disable_link_state(pdev, state, true);
+ return __pci_disable_link_state(pdev, state, false);
}
EXPORT_SYMBOL(pci_disable_link_state);
-/**
- * pci_enable_link_state - Clear and set the default device link state so that
- * the link may be allowed to enter the specified states. Note that if the
- * BIOS didn't grant ASPM control to the OS, this does nothing because we can't
- * touch the LNKCTL register. Also note that this does not enable states
- * disabled by pci_disable_link_state(). Return 0 or a negative errno.
- *
- * @pdev: PCI device
- * @state: Mask of ASPM link states to enable
- */
-int pci_enable_link_state(struct pci_dev *pdev, int state)
+static int __pci_enable_link_state(struct pci_dev *pdev, int state, bool locked)
{
struct pcie_link_state *link = pcie_aspm_get_link(pdev);
@@ -1132,33 +1469,76 @@ int pci_enable_link_state(struct pci_dev *pdev, int state)
return -EPERM;
}
- down_read(&pci_bus_sem);
+ if (!locked)
+ down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
- link->aspm_default = 0;
- if (state & PCIE_LINK_STATE_L0S)
- link->aspm_default |= ASPM_STATE_L0S;
- if (state & PCIE_LINK_STATE_L1)
- link->aspm_default |= ASPM_STATE_L1;
- /* L1 PM substates require L1 */
- if (state & PCIE_LINK_STATE_L1_1)
- link->aspm_default |= ASPM_STATE_L1_1 | ASPM_STATE_L1;
- if (state & PCIE_LINK_STATE_L1_2)
- link->aspm_default |= ASPM_STATE_L1_2 | ASPM_STATE_L1;
- if (state & PCIE_LINK_STATE_L1_1_PCIPM)
- link->aspm_default |= ASPM_STATE_L1_1_PCIPM | ASPM_STATE_L1;
- if (state & PCIE_LINK_STATE_L1_2_PCIPM)
- link->aspm_default |= ASPM_STATE_L1_2_PCIPM | ASPM_STATE_L1;
+ link->aspm_default = pci_calc_aspm_enable_mask(state);
pcie_config_aspm_link(link, policy_to_aspm_state(link));
link->clkpm_default = (state & PCIE_LINK_STATE_CLKPM) ? 1 : 0;
pcie_set_clkpm(link, policy_to_clkpm_state(link));
mutex_unlock(&aspm_lock);
- up_read(&pci_bus_sem);
+ if (!locked)
+ up_read(&pci_bus_sem);
return 0;
}
+
+/**
+ * pci_enable_link_state - Clear and set the default device link state so that
+ * the link may be allowed to enter the specified states. Note that if the
+ * BIOS didn't grant ASPM control to the OS, this does nothing because we can't
+ * touch the LNKCTL register. Also note that this does not enable states
+ * disabled by pci_disable_link_state(). Return 0 or a negative errno.
+ *
+ * Note: Ensure devices are in D0 before enabling PCI-PM L1 PM Substates, per
+ * PCIe r6.0, sec 5.5.4.
+ *
+ * @pdev: PCI device
+ * @state: Mask of ASPM link states to enable
+ */
+int pci_enable_link_state(struct pci_dev *pdev, int state)
+{
+ return __pci_enable_link_state(pdev, state, false);
+}
EXPORT_SYMBOL(pci_enable_link_state);
+/**
+ * pci_enable_link_state_locked - Clear and set the default device link state
+ * so that the link may be allowed to enter the specified states. Note that if
+ * the BIOS didn't grant ASPM control to the OS, this does nothing because we
+ * can't touch the LNKCTL register. Also note that this does not enable states
+ * disabled by pci_disable_link_state(). Return 0 or a negative errno.
+ *
+ * Note: Ensure devices are in D0 before enabling PCI-PM L1 PM Substates, per
+ * PCIe r6.0, sec 5.5.4.
+ *
+ * @pdev: PCI device
+ * @state: Mask of ASPM link states to enable
+ *
+ * Context: Caller holds pci_bus_sem read lock.
+ */
+int pci_enable_link_state_locked(struct pci_dev *pdev, int state)
+{
+ lockdep_assert_held_read(&pci_bus_sem);
+
+ return __pci_enable_link_state(pdev, state, true);
+}
+EXPORT_SYMBOL(pci_enable_link_state_locked);
+
+void pcie_aspm_remove_cap(struct pci_dev *pdev, u32 lnkcap)
+{
+ if (lnkcap & PCI_EXP_LNKCAP_ASPM_L0S)
+ pdev->aspm_l0s_support = 0;
+ if (lnkcap & PCI_EXP_LNKCAP_ASPM_L1)
+ pdev->aspm_l1_support = 0;
+
+ pci_info(pdev, "ASPM: Link Capabilities%s%s treated as unsupported to avoid device defect\n",
+ lnkcap & PCI_EXP_LNKCAP_ASPM_L0S ? " L0s" : "",
+ lnkcap & PCI_EXP_LNKCAP_ASPM_L1 ? " L1" : "");
+
+}
+
static int pcie_aspm_set_policy(const char *val,
const struct kernel_param *kp)
{
@@ -1247,10 +1627,12 @@ static ssize_t aspm_attr_store_common(struct device *dev,
if (state_enable) {
link->aspm_disable &= ~state;
/* need to enable L1 for substates */
- if (state & ASPM_STATE_L1SS)
- link->aspm_disable &= ~ASPM_STATE_L1;
+ if (state & PCIE_LINK_STATE_L1SS)
+ link->aspm_disable &= ~PCIE_LINK_STATE_L1;
} else {
link->aspm_disable |= state;
+ if (state & PCIE_LINK_STATE_L1)
+ link->aspm_disable |= PCIE_LINK_STATE_L1SS;
}
pcie_config_aspm_link(link, policy_to_aspm_state(link));
@@ -1264,12 +1646,12 @@ static ssize_t aspm_attr_store_common(struct device *dev,
#define ASPM_ATTR(_f, _s) \
static ssize_t _f##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
-{ return aspm_attr_show_common(dev, attr, buf, ASPM_STATE_##_s); } \
+{ return aspm_attr_show_common(dev, attr, buf, PCIE_LINK_STATE_##_s); } \
\
static ssize_t _f##_store(struct device *dev, \
struct device_attribute *attr, \
const char *buf, size_t len) \
-{ return aspm_attr_store_common(dev, attr, buf, len, ASPM_STATE_##_s); }
+{ return aspm_attr_store_common(dev, attr, buf, len, PCIE_LINK_STATE_##_s); }
ASPM_ATTR(l0s_aspm, L0S)
ASPM_ATTR(l1_aspm, L1)
@@ -1336,12 +1718,12 @@ static umode_t aspm_ctrl_attrs_are_visible(struct kobject *kobj,
struct pci_dev *pdev = to_pci_dev(dev);
struct pcie_link_state *link = pcie_aspm_get_link(pdev);
static const u8 aspm_state_map[] = {
- ASPM_STATE_L0S,
- ASPM_STATE_L1,
- ASPM_STATE_L1_1,
- ASPM_STATE_L1_2,
- ASPM_STATE_L1_1_PCIPM,
- ASPM_STATE_L1_2_PCIPM,
+ PCIE_LINK_STATE_L0S,
+ PCIE_LINK_STATE_L1,
+ PCIE_LINK_STATE_L1_1,
+ PCIE_LINK_STATE_L1_2,
+ PCIE_LINK_STATE_L1_1_PCIPM,
+ PCIE_LINK_STATE_L1_2_PCIPM,
};
if (aspm_disabled || !link)
@@ -1363,12 +1745,12 @@ static int __init pcie_aspm_disable(char *str)
{
if (!strcmp(str, "off")) {
aspm_policy = POLICY_DEFAULT;
- aspm_disabled = 1;
+ aspm_disabled = true;
aspm_support_enabled = false;
- printk(KERN_INFO "PCIe ASPM is disabled\n");
+ pr_info("PCIe ASPM is disabled\n");
} else if (!strcmp(str, "force")) {
- aspm_force = 1;
- printk(KERN_INFO "PCIe ASPM is forcibly enabled\n");
+ aspm_force = true;
+ pr_info("PCIe ASPM is forcibly enabled\n");
}
return 1;
}
@@ -1385,7 +1767,7 @@ void pcie_no_aspm(void)
*/
if (!aspm_force) {
aspm_policy = POLICY_DEFAULT;
- aspm_disabled = 1;
+ aspm_disabled = true;
}
}
@@ -1393,3 +1775,5 @@ bool pcie_aspm_support_enabled(void)
{
return aspm_support_enabled;
}
+
+#endif /* CONFIG_PCIEASPM */
diff --git a/drivers/pci/pcie/bwctrl.c b/drivers/pci/pcie/bwctrl.c
new file mode 100644
index 000000000000..36f939f23d34
--- /dev/null
+++ b/drivers/pci/pcie/bwctrl.c
@@ -0,0 +1,329 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * PCIe bandwidth controller
+ *
+ * Author: Alexandru Gagniuc <mr.nuke.me@gmail.com>
+ *
+ * Copyright (C) 2019 Dell Inc
+ * Copyright (C) 2023-2024 Intel Corporation
+ *
+ * The PCIe bandwidth controller provides a way to alter PCIe Link Speeds
+ * and notify the operating system when the Link Width or Speed changes. The
+ * notification capability is required for all Root Ports and Downstream
+ * Ports supporting Link Width wider than x1 and/or multiple Link Speeds.
+ *
+ * This service port driver hooks into the Bandwidth Notification interrupt
+ * watching for changes or links becoming degraded in operation. It updates
+ * the cached Current Link Speed that is exposed to user space through sysfs.
+ */
+
+#define dev_fmt(fmt) "bwctrl: " fmt
+
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/pci-bwctrl.h>
+#include <linux/rwsem.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "../pci.h"
+#include "portdrv.h"
+
+/**
+ * struct pcie_bwctrl_data - PCIe bandwidth controller
+ * @set_speed_mutex: Serializes link speed changes
+ * @cdev: Thermal cooling device associated with the port
+ */
+struct pcie_bwctrl_data {
+ struct mutex set_speed_mutex;
+ struct thermal_cooling_device *cdev;
+};
+
+/* Prevent port removal during Link Speed changes. */
+static DECLARE_RWSEM(pcie_bwctrl_setspeed_rwsem);
+
+static bool pcie_valid_speed(enum pci_bus_speed speed)
+{
+ return (speed >= PCIE_SPEED_2_5GT) && (speed <= PCIE_SPEED_64_0GT);
+}
+
+static u16 pci_bus_speed2lnkctl2(enum pci_bus_speed speed)
+{
+ static const u8 speed_conv[] = {
+ [PCIE_SPEED_2_5GT] = PCI_EXP_LNKCTL2_TLS_2_5GT,
+ [PCIE_SPEED_5_0GT] = PCI_EXP_LNKCTL2_TLS_5_0GT,
+ [PCIE_SPEED_8_0GT] = PCI_EXP_LNKCTL2_TLS_8_0GT,
+ [PCIE_SPEED_16_0GT] = PCI_EXP_LNKCTL2_TLS_16_0GT,
+ [PCIE_SPEED_32_0GT] = PCI_EXP_LNKCTL2_TLS_32_0GT,
+ [PCIE_SPEED_64_0GT] = PCI_EXP_LNKCTL2_TLS_64_0GT,
+ };
+
+ if (WARN_ON_ONCE(!pcie_valid_speed(speed)))
+ return 0;
+
+ return speed_conv[speed];
+}
+
+static inline u16 pcie_supported_speeds2target_speed(u8 supported_speeds)
+{
+ return __fls(supported_speeds);
+}
+
+/**
+ * pcie_bwctrl_select_speed - Select Target Link Speed
+ * @port: PCIe Port
+ * @speed_req: Requested PCIe Link Speed
+ *
+ * Select Target Link Speed by take into account Supported Link Speeds of
+ * both the Root Port and the Endpoint.
+ *
+ * Return: Target Link Speed (1=2.5GT/s, 2=5GT/s, 3=8GT/s, etc.)
+ */
+static u16 pcie_bwctrl_select_speed(struct pci_dev *port, enum pci_bus_speed speed_req)
+{
+ struct pci_bus *bus = port->subordinate;
+ u8 desired_speeds, supported_speeds;
+ struct pci_dev *dev;
+
+ desired_speeds = GENMASK(pci_bus_speed2lnkctl2(speed_req),
+ __fls(PCI_EXP_LNKCAP2_SLS_2_5GB));
+
+ supported_speeds = port->supported_speeds;
+ if (bus) {
+ down_read(&pci_bus_sem);
+ dev = list_first_entry_or_null(&bus->devices, struct pci_dev, bus_list);
+ if (dev)
+ supported_speeds &= dev->supported_speeds;
+ up_read(&pci_bus_sem);
+ }
+ if (!supported_speeds)
+ supported_speeds = PCI_EXP_LNKCAP2_SLS_2_5GB;
+
+ return pcie_supported_speeds2target_speed(supported_speeds & desired_speeds);
+}
+
+static int pcie_bwctrl_change_speed(struct pci_dev *port, u16 target_speed, bool use_lt)
+{
+ int ret;
+
+ ret = pcie_capability_clear_and_set_word(port, PCI_EXP_LNKCTL2,
+ PCI_EXP_LNKCTL2_TLS, target_speed);
+ if (ret != PCIBIOS_SUCCESSFUL)
+ return pcibios_err_to_errno(ret);
+
+ return pcie_retrain_link(port, use_lt);
+}
+
+/**
+ * pcie_set_target_speed - Set downstream Link Speed for PCIe Port
+ * @port: PCIe Port
+ * @speed_req: Requested PCIe Link Speed
+ * @use_lt: Wait for the LT or DLLLA bit to detect the end of link training
+ *
+ * Attempt to set PCIe Port Link Speed to @speed_req. @speed_req may be
+ * adjusted downwards to the best speed supported by both the Port and PCIe
+ * Device underneath it.
+ *
+ * Return:
+ * * 0 - on success
+ * * -EINVAL - @speed_req is not a PCIe Link Speed
+ * * -ENODEV - @port is not controllable
+ * * -ETIMEDOUT - changing Link Speed took too long
+ * * -EAGAIN - Link Speed was changed but @speed_req was not achieved
+ */
+int pcie_set_target_speed(struct pci_dev *port, enum pci_bus_speed speed_req,
+ bool use_lt)
+{
+ struct pci_bus *bus = port->subordinate;
+ u16 target_speed;
+ int ret;
+
+ if (WARN_ON_ONCE(!pcie_valid_speed(speed_req)))
+ return -EINVAL;
+
+ if (bus && bus->cur_bus_speed == speed_req)
+ return 0;
+
+ target_speed = pcie_bwctrl_select_speed(port, speed_req);
+
+ scoped_guard(rwsem_read, &pcie_bwctrl_setspeed_rwsem) {
+ struct pcie_bwctrl_data *data = port->link_bwctrl;
+
+ /*
+ * port->link_bwctrl is NULL during initial scan when called
+ * e.g. from the Target Speed quirk.
+ */
+ if (data)
+ mutex_lock(&data->set_speed_mutex);
+
+ ret = pcie_bwctrl_change_speed(port, target_speed, use_lt);
+
+ if (data)
+ mutex_unlock(&data->set_speed_mutex);
+ }
+
+ /*
+ * Despite setting higher speed into the Target Link Speed, empty
+ * bus won't train to 5GT+ speeds.
+ */
+ if (!ret && bus && bus->cur_bus_speed != speed_req &&
+ !list_empty(&bus->devices))
+ ret = -EAGAIN;
+
+ return ret;
+}
+
+static void pcie_bwnotif_enable(struct pcie_device *srv)
+{
+ struct pci_dev *port = srv->port;
+ u16 link_status;
+ int ret;
+
+ /* Note if LBMS has been seen so far */
+ ret = pcie_capability_read_word(port, PCI_EXP_LNKSTA, &link_status);
+ if (ret == PCIBIOS_SUCCESSFUL && link_status & PCI_EXP_LNKSTA_LBMS)
+ set_bit(PCI_LINK_LBMS_SEEN, &port->priv_flags);
+
+ pcie_capability_set_word(port, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_LBMIE | PCI_EXP_LNKCTL_LABIE);
+ pcie_capability_write_word(port, PCI_EXP_LNKSTA,
+ PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_LABS);
+
+ /*
+ * Update after enabling notifications & clearing status bits ensures
+ * link speed is up to date.
+ */
+ pcie_update_link_speed(port->subordinate);
+}
+
+static void pcie_bwnotif_disable(struct pci_dev *port)
+{
+ pcie_capability_clear_word(port, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_LBMIE | PCI_EXP_LNKCTL_LABIE);
+}
+
+static irqreturn_t pcie_bwnotif_irq(int irq, void *context)
+{
+ struct pcie_device *srv = context;
+ struct pci_dev *port = srv->port;
+ u16 link_status, events;
+ int ret;
+
+ ret = pcie_capability_read_word(port, PCI_EXP_LNKSTA, &link_status);
+ if (ret != PCIBIOS_SUCCESSFUL)
+ return IRQ_NONE;
+
+ events = link_status & (PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_LABS);
+ if (!events)
+ return IRQ_NONE;
+
+ if (events & PCI_EXP_LNKSTA_LBMS)
+ set_bit(PCI_LINK_LBMS_SEEN, &port->priv_flags);
+
+ pcie_capability_write_word(port, PCI_EXP_LNKSTA, events);
+
+ /*
+ * Interrupts will not be triggered from any further Link Speed
+ * change until LBMS is cleared by the write. Therefore, re-read the
+ * speed (inside pcie_update_link_speed()) after LBMS has been
+ * cleared to avoid missing link speed changes.
+ */
+ pcie_update_link_speed(port->subordinate);
+
+ return IRQ_HANDLED;
+}
+
+void pcie_reset_lbms(struct pci_dev *port)
+{
+ clear_bit(PCI_LINK_LBMS_SEEN, &port->priv_flags);
+ pcie_capability_write_word(port, PCI_EXP_LNKSTA, PCI_EXP_LNKSTA_LBMS);
+}
+
+static int pcie_bwnotif_probe(struct pcie_device *srv)
+{
+ struct pci_dev *port = srv->port;
+ int ret;
+
+ /* Can happen if we run out of bus numbers during enumeration. */
+ if (!port->subordinate)
+ return -ENODEV;
+
+ struct pcie_bwctrl_data *data = devm_kzalloc(&srv->device,
+ sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ ret = devm_mutex_init(&srv->device, &data->set_speed_mutex);
+ if (ret)
+ return ret;
+
+ scoped_guard(rwsem_write, &pcie_bwctrl_setspeed_rwsem) {
+ port->link_bwctrl = data;
+
+ ret = request_irq(srv->irq, pcie_bwnotif_irq,
+ IRQF_SHARED, "PCIe bwctrl", srv);
+ if (ret) {
+ port->link_bwctrl = NULL;
+ return ret;
+ }
+
+ pcie_bwnotif_enable(srv);
+ }
+
+ pci_dbg(port, "enabled with IRQ %d\n", srv->irq);
+
+ /* Don't fail on errors. Don't leave IS_ERR() "pointer" into ->cdev */
+ port->link_bwctrl->cdev = pcie_cooling_device_register(port);
+ if (IS_ERR(port->link_bwctrl->cdev))
+ port->link_bwctrl->cdev = NULL;
+
+ return 0;
+}
+
+static void pcie_bwnotif_remove(struct pcie_device *srv)
+{
+ struct pcie_bwctrl_data *data = srv->port->link_bwctrl;
+
+ pcie_cooling_device_unregister(data->cdev);
+
+ scoped_guard(rwsem_write, &pcie_bwctrl_setspeed_rwsem) {
+ pcie_bwnotif_disable(srv->port);
+
+ free_irq(srv->irq, srv);
+
+ srv->port->link_bwctrl = NULL;
+ }
+}
+
+static int pcie_bwnotif_suspend(struct pcie_device *srv)
+{
+ pcie_bwnotif_disable(srv->port);
+ return 0;
+}
+
+static int pcie_bwnotif_resume(struct pcie_device *srv)
+{
+ pcie_bwnotif_enable(srv);
+ return 0;
+}
+
+static struct pcie_port_service_driver pcie_bwctrl_driver = {
+ .name = "pcie_bwctrl",
+ .port_type = PCIE_ANY_PORT,
+ .service = PCIE_PORT_SERVICE_BWCTRL,
+ .probe = pcie_bwnotif_probe,
+ .suspend = pcie_bwnotif_suspend,
+ .resume = pcie_bwnotif_resume,
+ .remove = pcie_bwnotif_remove,
+};
+
+int __init pcie_bwctrl_init(void)
+{
+ return pcie_port_service_register(&pcie_bwctrl_driver);
+}
diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
index 3ceed8e3de41..fc18349614d7 100644
--- a/drivers/pci/pcie/dpc.c
+++ b/drivers/pci/pcie/dpc.c
@@ -9,6 +9,7 @@
#define dev_fmt(fmt) "DPC: " fmt
#include <linux/aer.h>
+#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
@@ -17,6 +18,9 @@
#include "portdrv.h"
#include "../pci.h"
+#define PCI_EXP_DPC_CTL_EN_MASK (PCI_EXP_DPC_CTL_EN_FATAL | \
+ PCI_EXP_DPC_CTL_EN_NONFATAL)
+
static const char * const rp_pio_error_string[] = {
"Configuration Request received UR Completion", /* Bit Position 0 */
"Configuration Request received CA Completion", /* Bit Position 1 */
@@ -186,7 +190,8 @@ out:
static void dpc_process_rp_pio_error(struct pci_dev *pdev)
{
u16 cap = pdev->dpc_cap, dpc_status, first_error;
- u32 status, mask, sev, syserr, exc, dw0, dw1, dw2, dw3, log, prefix;
+ u32 status, mask, sev, syserr, exc, log;
+ struct pcie_tlp_log tlp_log;
int i;
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS, &status);
@@ -202,7 +207,7 @@ static void dpc_process_rp_pio_error(struct pci_dev *pdev)
/* Get First Error Pointer */
pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &dpc_status);
- first_error = (dpc_status & 0x1f00) >> 8;
+ first_error = FIELD_GET(PCI_EXP_DPC_RP_PIO_FEP, dpc_status);
for (i = 0; i < ARRAY_SIZE(rp_pio_error_string); i++) {
if ((status & ~mask) & (1 << i))
@@ -210,29 +215,20 @@ static void dpc_process_rp_pio_error(struct pci_dev *pdev)
first_error == i ? " (First)" : "");
}
- if (pdev->dpc_rp_log_size < 4)
+ if (pdev->dpc_rp_log_size < PCIE_STD_NUM_TLP_HEADERLOG)
goto clear_status;
- pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG,
- &dw0);
- pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG + 4,
- &dw1);
- pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG + 8,
- &dw2);
- pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG + 12,
- &dw3);
- pci_err(pdev, "TLP Header: %#010x %#010x %#010x %#010x\n",
- dw0, dw1, dw2, dw3);
-
- if (pdev->dpc_rp_log_size < 5)
+ pcie_read_tlp_log(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG,
+ cap + PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG,
+ dpc_tlp_log_len(pdev),
+ pdev->subordinate->flit_mode,
+ &tlp_log);
+ pcie_print_tlp_log(pdev, &tlp_log, KERN_ERR, dev_fmt(""));
+
+ if (pdev->dpc_rp_log_size < PCIE_STD_NUM_TLP_HEADERLOG + 1)
goto clear_status;
pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_IMPSPEC_LOG, &log);
pci_err(pdev, "RP PIO ImpSpec Log %#010x\n", log);
- for (i = 0; i < pdev->dpc_rp_log_size - 5; i++) {
- pci_read_config_dword(pdev,
- cap + PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG, &prefix);
- pci_err(pdev, "TLP Prefix Header: dw%d, %#010x\n", i, prefix);
- }
clear_status:
pci_write_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS, status);
}
@@ -256,46 +252,126 @@ static int dpc_get_aer_uncorrect_severity(struct pci_dev *dev,
else
info->severity = AER_NONFATAL;
+ info->level = KERN_ERR;
+
+ info->dev[0] = dev;
+ info->error_dev_num = 1;
+
return 1;
}
void dpc_process_error(struct pci_dev *pdev)
{
u16 cap = pdev->dpc_cap, status, source, reason, ext_reason;
- struct aer_err_info info;
+ struct aer_err_info info = {};
pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
- pci_read_config_word(pdev, cap + PCI_EXP_DPC_SOURCE_ID, &source);
-
- pci_info(pdev, "containment event, status:%#06x source:%#06x\n",
- status, source);
-
- reason = (status & PCI_EXP_DPC_STATUS_TRIGGER_RSN) >> 1;
- ext_reason = (status & PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT) >> 5;
- pci_warn(pdev, "%s detected\n",
- (reason == 0) ? "unmasked uncorrectable error" :
- (reason == 1) ? "ERR_NONFATAL" :
- (reason == 2) ? "ERR_FATAL" :
- (ext_reason == 0) ? "RP PIO error" :
- (ext_reason == 1) ? "software trigger" :
- "reserved error");
-
- /* show RP PIO error detail information */
- if (pdev->dpc_rp_extensions && reason == 3 && ext_reason == 0)
- dpc_process_rp_pio_error(pdev);
- else if (reason == 0 &&
- dpc_get_aer_uncorrect_severity(pdev, &info) &&
- aer_get_device_error_info(pdev, &info)) {
- aer_print_error(pdev, &info);
- pci_aer_clear_nonfatal_status(pdev);
- pci_aer_clear_fatal_status(pdev);
+
+ reason = status & PCI_EXP_DPC_STATUS_TRIGGER_RSN;
+
+ switch (reason) {
+ case PCI_EXP_DPC_STATUS_TRIGGER_RSN_UNCOR:
+ pci_warn(pdev, "containment event, status:%#06x: unmasked uncorrectable error detected\n",
+ status);
+ if (dpc_get_aer_uncorrect_severity(pdev, &info) &&
+ aer_get_device_error_info(&info, 0)) {
+ aer_print_error(&info, 0);
+ pci_aer_clear_nonfatal_status(pdev);
+ pci_aer_clear_fatal_status(pdev);
+ }
+ break;
+ case PCI_EXP_DPC_STATUS_TRIGGER_RSN_NFE:
+ case PCI_EXP_DPC_STATUS_TRIGGER_RSN_FE:
+ pci_read_config_word(pdev, cap + PCI_EXP_DPC_SOURCE_ID,
+ &source);
+ pci_warn(pdev, "containment event, status:%#06x, %s received from %04x:%02x:%02x.%d\n",
+ status,
+ (reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_FE) ?
+ "ERR_FATAL" : "ERR_NONFATAL",
+ pci_domain_nr(pdev->bus), PCI_BUS_NUM(source),
+ PCI_SLOT(source), PCI_FUNC(source));
+ break;
+ case PCI_EXP_DPC_STATUS_TRIGGER_RSN_IN_EXT:
+ ext_reason = status & PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT;
+ pci_warn(pdev, "containment event, status:%#06x: %s detected\n",
+ status,
+ (ext_reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_RP_PIO) ?
+ "RP PIO error" :
+ (ext_reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_SW_TRIGGER) ?
+ "software trigger" :
+ "reserved error");
+ /* show RP PIO error detail information */
+ if (ext_reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_RP_PIO &&
+ pdev->dpc_rp_extensions)
+ dpc_process_rp_pio_error(pdev);
+ break;
+ }
+}
+
+static void pci_clear_surpdn_errors(struct pci_dev *pdev)
+{
+ if (pdev->dpc_rp_extensions)
+ pci_write_config_dword(pdev, pdev->dpc_cap +
+ PCI_EXP_DPC_RP_PIO_STATUS, ~0);
+
+ /*
+ * In practice, Surprise Down errors have been observed to also set
+ * error bits in the Status Register as well as the Fatal Error
+ * Detected bit in the Device Status Register.
+ */
+ pci_write_config_word(pdev, PCI_STATUS, 0xffff);
+
+ pcie_capability_write_word(pdev, PCI_EXP_DEVSTA, PCI_EXP_DEVSTA_FED);
+}
+
+static void dpc_handle_surprise_removal(struct pci_dev *pdev)
+{
+ if (!pcie_wait_for_link(pdev, false)) {
+ pci_info(pdev, "Data Link Layer Link Active not cleared in 1000 msec\n");
+ goto out;
}
+
+ if (pdev->dpc_rp_extensions && dpc_wait_rp_inactive(pdev))
+ goto out;
+
+ pci_aer_raw_clear_status(pdev);
+ pci_clear_surpdn_errors(pdev);
+
+ pci_write_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_STATUS,
+ PCI_EXP_DPC_STATUS_TRIGGER);
+
+out:
+ clear_bit(PCI_DPC_RECOVERED, &pdev->priv_flags);
+ wake_up_all(&dpc_completed_waitqueue);
+}
+
+static bool dpc_is_surprise_removal(struct pci_dev *pdev)
+{
+ u16 status;
+
+ if (!pdev->is_hotplug_bridge)
+ return false;
+
+ if (pci_read_config_word(pdev, pdev->aer_cap + PCI_ERR_UNCOR_STATUS,
+ &status))
+ return false;
+
+ return status & PCI_ERR_UNC_SURPDN;
}
static irqreturn_t dpc_handler(int irq, void *context)
{
struct pci_dev *pdev = context;
+ /*
+ * According to PCIe r6.0 sec 6.7.6, errors are an expected side effect
+ * of async removal and should be ignored by software.
+ */
+ if (dpc_is_surprise_removal(pdev)) {
+ dpc_handle_surprise_removal(pdev);
+ return IRQ_HANDLED;
+ }
+
dpc_process_error(pdev);
/* We configure DPC so it only triggers on ERR_FATAL */
@@ -337,9 +413,21 @@ void pci_dpc_init(struct pci_dev *pdev)
/* Quirks may set dpc_rp_log_size if device or firmware is buggy */
if (!pdev->dpc_rp_log_size) {
+ u16 flags;
+ int ret;
+
+ ret = pcie_capability_read_word(pdev, PCI_EXP_FLAGS, &flags);
+ if (ret)
+ return;
+
pdev->dpc_rp_log_size =
- (cap & PCI_EXP_DPC_RP_PIO_LOG_SIZE) >> 8;
- if (pdev->dpc_rp_log_size < 4 || pdev->dpc_rp_log_size > 9) {
+ FIELD_GET(PCI_EXP_DPC_RP_PIO_LOG_SIZE, cap);
+ if (FIELD_GET(PCI_EXP_FLAGS_FLIT, flags))
+ pdev->dpc_rp_log_size += FIELD_GET(PCI_EXP_DPC_RP_PIO_LOG_SIZE4,
+ cap) << 4;
+
+ if (pdev->dpc_rp_log_size < PCIE_STD_NUM_TLP_HEADERLOG ||
+ pdev->dpc_rp_log_size > PCIE_STD_MAX_TLP_HEADERLOG + 1) {
pci_err(pdev, "RP PIO log size %u is invalid\n",
pdev->dpc_rp_log_size);
pdev->dpc_rp_log_size = 0;
@@ -347,13 +435,44 @@ void pci_dpc_init(struct pci_dev *pdev)
}
}
+static void dpc_enable(struct pcie_device *dev)
+{
+ struct pci_dev *pdev = dev->port;
+ int dpc = pdev->dpc_cap;
+ u16 ctl;
+
+ /*
+ * Clear DPC Interrupt Status so we don't get an interrupt for an
+ * old event when setting DPC Interrupt Enable.
+ */
+ pci_write_config_word(pdev, dpc + PCI_EXP_DPC_STATUS,
+ PCI_EXP_DPC_STATUS_INTERRUPT);
+
+ pci_read_config_word(pdev, dpc + PCI_EXP_DPC_CTL, &ctl);
+ ctl &= ~PCI_EXP_DPC_CTL_EN_MASK;
+ ctl |= PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN;
+ pci_write_config_word(pdev, dpc + PCI_EXP_DPC_CTL, ctl);
+}
+
+static void dpc_disable(struct pcie_device *dev)
+{
+ struct pci_dev *pdev = dev->port;
+ int dpc = pdev->dpc_cap;
+ u16 ctl;
+
+ /* Disable DPC triggering and DPC interrupts */
+ pci_read_config_word(pdev, dpc + PCI_EXP_DPC_CTL, &ctl);
+ ctl &= ~(PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN);
+ pci_write_config_word(pdev, dpc + PCI_EXP_DPC_CTL, ctl);
+}
+
#define FLAG(x, y) (((x) & (y)) ? '+' : '-')
static int dpc_probe(struct pcie_device *dev)
{
struct pci_dev *pdev = dev->port;
struct device *device = &dev->device;
int status;
- u16 ctl, cap;
+ u16 cap;
if (!pcie_aer_is_native(pdev) && !pcie_ports_dpc_native)
return -ENOTSUPP;
@@ -368,12 +487,9 @@ static int dpc_probe(struct pcie_device *dev)
}
pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CAP, &cap);
- pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, &ctl);
+ dpc_enable(dev);
- ctl = (ctl & 0xfff4) | PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN;
- pci_write_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, ctl);
pci_info(pdev, "enabled with IRQ %d\n", dev->irq);
-
pci_info(pdev, "error containment capabilities: Int Msg #%d, RPExt%c PoisonedTLP%c SwTrigger%c RP PIO Log %d, DL_ActiveErr%c\n",
cap & PCI_EXP_DPC_IRQ, FLAG(cap, PCI_EXP_DPC_CAP_RP_EXT),
FLAG(cap, PCI_EXP_DPC_CAP_POISONED_TLP),
@@ -384,14 +500,21 @@ static int dpc_probe(struct pcie_device *dev)
return status;
}
-static void dpc_remove(struct pcie_device *dev)
+static int dpc_suspend(struct pcie_device *dev)
{
- struct pci_dev *pdev = dev->port;
- u16 ctl;
+ dpc_disable(dev);
+ return 0;
+}
- pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, &ctl);
- ctl &= ~(PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN);
- pci_write_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, ctl);
+static int dpc_resume(struct pcie_device *dev)
+{
+ dpc_enable(dev);
+ return 0;
+}
+
+static void dpc_remove(struct pcie_device *dev)
+{
+ dpc_disable(dev);
}
static struct pcie_port_service_driver dpcdriver = {
@@ -399,6 +522,8 @@ static struct pcie_port_service_driver dpcdriver = {
.port_type = PCIE_ANY_PORT,
.service = PCIE_PORT_SERVICE_DPC,
.probe = dpc_probe,
+ .suspend = dpc_suspend,
+ .resume = dpc_resume,
.remove = dpc_remove,
};
diff --git a/drivers/pci/pcie/edr.c b/drivers/pci/pcie/edr.c
index 5f4914d313a1..e86298dbbcff 100644
--- a/drivers/pci/pcie/edr.c
+++ b/drivers/pci/pcie/edr.c
@@ -32,10 +32,10 @@ static int acpi_enable_dpc(struct pci_dev *pdev)
int status = 0;
/*
- * Behavior when calling unsupported _DSM functions is undefined,
- * so check whether EDR_PORT_DPC_ENABLE_DSM is supported.
+ * Per PCI Firmware r3.3, sec 4.6.12, EDR_PORT_DPC_ENABLE_DSM is
+ * optional. Return success if it's not implemented.
*/
- if (!acpi_check_dsm(adev->handle, &pci_acpi_dsm_guid, 5,
+ if (!acpi_check_dsm(adev->handle, &pci_acpi_dsm_guid, 6,
1ULL << EDR_PORT_DPC_ENABLE_DSM))
return 0;
@@ -46,12 +46,7 @@ static int acpi_enable_dpc(struct pci_dev *pdev)
argv4.package.count = 1;
argv4.package.elements = &req;
- /*
- * Per Downstream Port Containment Related Enhancements ECN to PCI
- * Firmware Specification r3.2, sec 4.6.12, EDR_PORT_DPC_ENABLE_DSM is
- * optional. Return success if it's not implemented.
- */
- obj = acpi_evaluate_dsm(adev->handle, &pci_acpi_dsm_guid, 5,
+ obj = acpi_evaluate_dsm(adev->handle, &pci_acpi_dsm_guid, 6,
EDR_PORT_DPC_ENABLE_DSM, &argv4);
if (!obj)
return 0;
@@ -85,8 +80,9 @@ static struct pci_dev *acpi_dpc_port_get(struct pci_dev *pdev)
u16 port;
/*
- * Behavior when calling unsupported _DSM functions is undefined,
- * so check whether EDR_PORT_DPC_ENABLE_DSM is supported.
+ * If EDR_PORT_LOCATE_DSM is not implemented under the target of
+ * EDR, the target is the port that experienced the containment
+ * event (PCI Firmware r3.3, sec 4.6.13).
*/
if (!acpi_check_dsm(adev->handle, &pci_acpi_dsm_guid, 5,
1ULL << EDR_PORT_LOCATE_DSM))
@@ -104,6 +100,16 @@ static struct pci_dev *acpi_dpc_port_get(struct pci_dev *pdev)
}
/*
+ * Bit 31 represents the success/failure of the operation. If bit
+ * 31 is set, the operation failed.
+ */
+ if (obj->integer.value & BIT(31)) {
+ ACPI_FREE(obj);
+ pci_err(pdev, "Locate Port _DSM failed\n");
+ return NULL;
+ }
+
+ /*
* Firmware returns DPC port BDF details in following format:
* 15:8 = bus
* 7:3 = device
diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c
index 59c90d04a609..bebe4bc111d7 100644
--- a/drivers/pci/pcie/err.c
+++ b/drivers/pci/pcie/err.c
@@ -13,6 +13,7 @@
#define dev_fmt(fmt) "AER: " fmt
#include <linux/pci.h>
+#include <linux/pm_runtime.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -85,6 +86,18 @@ static int report_error_detected(struct pci_dev *dev,
return 0;
}
+static int pci_pm_runtime_get_sync(struct pci_dev *pdev, void *data)
+{
+ pm_runtime_get_sync(&pdev->dev);
+ return 0;
+}
+
+static int pci_pm_runtime_put(struct pci_dev *pdev, void *data)
+{
+ pm_runtime_put(&pdev->dev);
+ return 0;
+}
+
static int report_frozen_detected(struct pci_dev *dev, void *data)
{
return report_error_detected(dev, pci_channel_io_frozen, data);
@@ -95,6 +108,24 @@ static int report_normal_detected(struct pci_dev *dev, void *data)
return report_error_detected(dev, pci_channel_io_normal, data);
}
+static int report_perm_failure_detected(struct pci_dev *dev, void *data)
+{
+ struct pci_driver *pdrv;
+ const struct pci_error_handlers *err_handler;
+
+ device_lock(&dev->dev);
+ pdrv = dev->driver;
+ if (!pdrv || !pdrv->err_handler || !pdrv->err_handler->error_detected)
+ goto out;
+
+ err_handler = pdrv->err_handler;
+ err_handler->error_detected(dev, pci_channel_io_perm_failure);
+out:
+ pci_uevent_ers(dev, PCI_ERS_RESULT_DISCONNECT);
+ device_unlock(&dev->dev);
+ return 0;
+}
+
static int report_mmio_enabled(struct pci_dev *dev, void *data)
{
struct pci_driver *pdrv;
@@ -103,9 +134,7 @@ static int report_mmio_enabled(struct pci_dev *dev, void *data)
device_lock(&dev->dev);
pdrv = dev->driver;
- if (!pdrv ||
- !pdrv->err_handler ||
- !pdrv->err_handler->mmio_enabled)
+ if (!pdrv || !pdrv->err_handler || !pdrv->err_handler->mmio_enabled)
goto out;
err_handler = pdrv->err_handler;
@@ -124,9 +153,8 @@ static int report_slot_reset(struct pci_dev *dev, void *data)
device_lock(&dev->dev);
pdrv = dev->driver;
- if (!pdrv ||
- !pdrv->err_handler ||
- !pdrv->err_handler->slot_reset)
+ if (!pci_dev_set_io_state(dev, pci_channel_io_normal) ||
+ !pdrv || !pdrv->err_handler || !pdrv->err_handler->slot_reset)
goto out;
err_handler = pdrv->err_handler;
@@ -145,9 +173,7 @@ static int report_resume(struct pci_dev *dev, void *data)
device_lock(&dev->dev);
pdrv = dev->driver;
if (!pci_dev_set_io_state(dev, pci_channel_io_normal) ||
- !pdrv ||
- !pdrv->err_handler ||
- !pdrv->err_handler->resume)
+ !pdrv || !pdrv->err_handler || !pdrv->err_handler->resume)
goto out;
err_handler = pdrv->err_handler;
@@ -207,16 +233,13 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
else
bridge = pci_upstream_bridge(dev);
+ pci_walk_bridge(bridge, pci_pm_runtime_get_sync, NULL);
+
pci_dbg(bridge, "broadcast error_detected message\n");
- if (state == pci_channel_io_frozen) {
+ if (state == pci_channel_io_frozen)
pci_walk_bridge(bridge, report_frozen_detected, &status);
- if (reset_subordinates(bridge) != PCI_ERS_RESULT_RECOVERED) {
- pci_warn(bridge, "subordinate device reset failed\n");
- goto failed;
- }
- } else {
+ else
pci_walk_bridge(bridge, report_normal_detected, &status);
- }
if (status == PCI_ERS_RESULT_CAN_RECOVER) {
status = PCI_ERS_RESULT_RECOVERED;
@@ -224,6 +247,14 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
pci_walk_bridge(bridge, report_mmio_enabled, &status);
}
+ if (status == PCI_ERS_RESULT_NEED_RESET ||
+ state == pci_channel_io_frozen) {
+ if (reset_subordinates(bridge) != PCI_ERS_RESULT_RECOVERED) {
+ pci_warn(bridge, "subordinate device reset failed\n");
+ goto failed;
+ }
+ }
+
if (status == PCI_ERS_RESULT_NEED_RESET) {
/*
* TODO: Should call platform-specific
@@ -251,13 +282,17 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
pcie_clear_device_status(dev);
pci_aer_clear_nonfatal_status(dev);
}
+
+ pci_walk_bridge(bridge, pci_pm_runtime_put, NULL);
+
pci_info(bridge, "device recovery successful\n");
return status;
failed:
- pci_uevent_ers(bridge, PCI_ERS_RESULT_DISCONNECT);
+ pci_walk_bridge(bridge, pci_pm_runtime_put, NULL);
+
+ pci_walk_bridge(bridge, report_perm_failure_detected, NULL);
- /* TODO: Should kernel panic here? */
pci_info(bridge, "device recovery failed\n");
return status;
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
index ef8ce436ead9..a2daebd9806c 100644
--- a/drivers/pci/pcie/pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -9,6 +9,7 @@
#define dev_fmt(fmt) "PME: " fmt
+#include <linux/bitfield.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -235,7 +236,8 @@ static void pcie_pme_work_fn(struct work_struct *work)
pcie_clear_root_pme_status(port);
spin_unlock_irq(&data->lock);
- pcie_pme_handle_request(port, rtsta & 0xffff);
+ pcie_pme_handle_request(port,
+ FIELD_GET(PCI_EXP_RTSTA_PME_RQ_ID, rtsta));
spin_lock_irq(&data->lock);
continue;
diff --git a/drivers/pci/pcie/portdrv.c b/drivers/pci/pcie/portdrv.c
index 46fad0d813b2..38a41ccf79b9 100644
--- a/drivers/pci/pcie/portdrv.c
+++ b/drivers/pci/pcie/portdrv.c
@@ -6,6 +6,7 @@
* Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
*/
+#include <linux/bitfield.h>
#include <linux/dmi.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -67,9 +68,9 @@ static int pcie_message_numbers(struct pci_dev *dev, int mask,
*/
if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP |
- PCIE_PORT_SERVICE_BWNOTIF)) {
+ PCIE_PORT_SERVICE_BWCTRL)) {
pcie_capability_read_word(dev, PCI_EXP_FLAGS, &reg16);
- *pme = (reg16 & PCI_EXP_FLAGS_IRQ) >> 9;
+ *pme = FIELD_GET(PCI_EXP_FLAGS_IRQ, reg16);
nvec = *pme + 1;
}
@@ -81,7 +82,7 @@ static int pcie_message_numbers(struct pci_dev *dev, int mask,
if (pos) {
pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS,
&reg32);
- *aer = (reg32 & PCI_ERR_ROOT_AER_IRQ) >> 27;
+ *aer = FIELD_GET(PCI_ERR_ROOT_AER_IRQ, reg32);
nvec = max(nvec, *aer + 1);
}
}
@@ -92,7 +93,7 @@ static int pcie_message_numbers(struct pci_dev *dev, int mask,
if (pos) {
pci_read_config_word(dev, pos + PCI_EXP_DPC_CAP,
&reg16);
- *dpc = reg16 & PCI_EXP_DPC_IRQ;
+ *dpc = FIELD_GET(PCI_EXP_DPC_IRQ, reg16);
nvec = max(nvec, *dpc + 1);
}
}
@@ -149,11 +150,11 @@ static int pcie_port_enable_irq_vec(struct pci_dev *dev, int *irqs, int mask)
/* PME, hotplug and bandwidth notification share an MSI/MSI-X vector */
if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP |
- PCIE_PORT_SERVICE_BWNOTIF)) {
+ PCIE_PORT_SERVICE_BWCTRL)) {
pcie_irq = pci_irq_vector(dev, pme);
irqs[PCIE_PORT_SERVICE_PME_SHIFT] = pcie_irq;
irqs[PCIE_PORT_SERVICE_HP_SHIFT] = pcie_irq;
- irqs[PCIE_PORT_SERVICE_BWNOTIF_SHIFT] = pcie_irq;
+ irqs[PCIE_PORT_SERVICE_BWCTRL_SHIFT] = pcie_irq;
}
if (mask & PCIE_PORT_SERVICE_AER)
@@ -186,15 +187,15 @@ static int pcie_init_service_irqs(struct pci_dev *dev, int *irqs, int mask)
* interrupt.
*/
if ((mask & PCIE_PORT_SERVICE_PME) && pcie_pme_no_msi())
- goto legacy_irq;
+ goto intx_irq;
/* Try to use MSI-X or MSI if supported */
if (pcie_port_enable_irq_vec(dev, irqs, mask) == 0)
return 0;
-legacy_irq:
- /* fall back to legacy IRQ */
- ret = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_LEGACY);
+intx_irq:
+ /* fall back to INTX IRQ */
+ ret = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_INTX);
if (ret < 0)
return -ENODEV;
@@ -219,7 +220,7 @@ static int get_port_device_capability(struct pci_dev *dev)
struct pci_host_bridge *host = pci_find_host_bridge(dev->bus);
int services = 0;
- if (dev->is_hotplug_bridge &&
+ if (dev->is_pciehp &&
(pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT ||
pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) &&
(pcie_ports_native || host->native_pcie_hotplug)) {
@@ -227,10 +228,12 @@ static int get_port_device_capability(struct pci_dev *dev)
/*
* Disable hot-plug interrupts in case they have been enabled
- * by the BIOS and the hot-plug service driver is not loaded.
+ * by the BIOS and the hot-plug service driver won't be loaded
+ * to handle them.
*/
- pcie_capability_clear_word(dev, PCI_EXP_SLTCTL,
- PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_HPIE);
+ if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
+ pcie_capability_clear_word(dev, PCI_EXP_SLTCTL,
+ PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_HPIE);
}
#ifdef CONFIG_PCIEAER
@@ -264,13 +267,15 @@ static int get_port_device_capability(struct pci_dev *dev)
(pcie_ports_dpc_native || (services & PCIE_PORT_SERVICE_AER)))
services |= PCIE_PORT_SERVICE_DPC;
+ /* Enable bandwidth control if more than one speed is supported. */
if (pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM ||
pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) {
u32 linkcap;
pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &linkcap);
- if (linkcap & PCI_EXP_LNKCAP_LBNC)
- services |= PCIE_PORT_SERVICE_BWNOTIF;
+ if (linkcap & PCI_EXP_LNKCAP_LBNC &&
+ hweight8(dev->supported_speeds) > 1)
+ services |= PCIE_PORT_SERVICE_BWCTRL;
}
return services;
@@ -755,7 +760,6 @@ static pci_ers_result_t pcie_portdrv_slot_reset(struct pci_dev *dev)
device_for_each_child(&dev->dev, &off, pcie_port_device_iter);
pci_restore_state(dev);
- pci_save_state(dev);
return PCI_ERS_RESULT_RECOVERED;
}
@@ -785,7 +789,7 @@ static const struct pci_error_handlers pcie_portdrv_err_handler = {
static struct pci_driver pcie_portdriver = {
.name = "pcieport",
- .id_table = &port_pci_ids[0],
+ .id_table = port_pci_ids,
.probe = pcie_portdrv_probe,
.remove = pcie_portdrv_remove,
@@ -827,6 +831,7 @@ static void __init pcie_init_services(void)
pcie_aer_init();
pcie_pme_init();
pcie_dpc_init();
+ pcie_bwctrl_init();
pcie_hp_init();
}
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index 58a2b1a1cae4..bd29d1cc7b8b 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -20,8 +20,8 @@
#define PCIE_PORT_SERVICE_HP (1 << PCIE_PORT_SERVICE_HP_SHIFT)
#define PCIE_PORT_SERVICE_DPC_SHIFT 3 /* Downstream Port Containment */
#define PCIE_PORT_SERVICE_DPC (1 << PCIE_PORT_SERVICE_DPC_SHIFT)
-#define PCIE_PORT_SERVICE_BWNOTIF_SHIFT 4 /* Bandwidth notification */
-#define PCIE_PORT_SERVICE_BWNOTIF (1 << PCIE_PORT_SERVICE_BWNOTIF_SHIFT)
+#define PCIE_PORT_SERVICE_BWCTRL_SHIFT 4 /* Bandwidth Controller (notifications) */
+#define PCIE_PORT_SERVICE_BWCTRL (1 << PCIE_PORT_SERVICE_BWCTRL_SHIFT)
#define PCIE_PORT_DEVICE_MAXSERVICES 5
@@ -29,10 +29,8 @@ extern bool pcie_ports_dpc_native;
#ifdef CONFIG_PCIEAER
int pcie_aer_init(void);
-int pcie_aer_is_native(struct pci_dev *dev);
#else
static inline int pcie_aer_init(void) { return 0; }
-static inline int pcie_aer_is_native(struct pci_dev *dev) { return 0; }
#endif
#ifdef CONFIG_HOTPLUG_PCI_PCIE
@@ -53,6 +51,8 @@ int pcie_dpc_init(void);
static inline int pcie_dpc_init(void) { return 0; }
#endif
+int pcie_bwctrl_init(void);
+
/* Port Type */
#define PCIE_ANY_PORT (~0)
@@ -98,7 +98,7 @@ struct pcie_port_service_driver {
int pcie_port_service_register(struct pcie_port_service_driver *new);
void pcie_port_service_unregister(struct pcie_port_service_driver *new);
-extern struct bus_type pcie_port_bus_type;
+extern const struct bus_type pcie_port_bus_type;
struct pci_dev;
diff --git a/drivers/pci/pcie/ptm.c b/drivers/pci/pcie/ptm.c
index b4e5f553467c..ed0f9691e7d1 100644
--- a/drivers/pci/pcie/ptm.c
+++ b/drivers/pci/pcie/ptm.c
@@ -4,6 +4,8 @@
* Copyright (c) 2016, Intel Corporation.
*/
+#include <linux/bitfield.h>
+#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
@@ -53,7 +55,7 @@ void pci_ptm_init(struct pci_dev *dev)
pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_PTM, sizeof(u32));
pci_read_config_dword(dev, ptm + PCI_PTM_CAP, &cap);
- dev->ptm_granularity = (cap & PCI_PTM_GRANULARITY_MASK) >> 8;
+ dev->ptm_granularity = FIELD_GET(PCI_PTM_GRANULARITY_MASK, cap);
/*
* Per the spec recommendation (PCIe r6.0, sec 7.9.15.3), select the
@@ -79,6 +81,11 @@ void pci_ptm_init(struct pci_dev *dev)
dev->ptm_granularity = 0;
}
+ if (cap & PCI_PTM_CAP_RES)
+ dev->ptm_responder = 1;
+ if (cap & PCI_PTM_CAP_REQ)
+ dev->ptm_requester = 1;
+
if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT ||
pci_pcie_type(dev) == PCI_EXP_TYPE_UPSTREAM)
pci_enable_ptm(dev, NULL);
@@ -142,11 +149,29 @@ static int __pci_enable_ptm(struct pci_dev *dev)
return -EINVAL;
}
+ switch (pci_pcie_type(dev)) {
+ case PCI_EXP_TYPE_ROOT_PORT:
+ if (!dev->ptm_root)
+ return -EINVAL;
+ break;
+ case PCI_EXP_TYPE_UPSTREAM:
+ if (!dev->ptm_responder)
+ return -EINVAL;
+ break;
+ case PCI_EXP_TYPE_ENDPOINT:
+ case PCI_EXP_TYPE_LEG_END:
+ if (!dev->ptm_requester)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
pci_read_config_dword(dev, ptm + PCI_PTM_CTRL, &ctrl);
ctrl |= PCI_PTM_CTRL_ENABLE;
ctrl &= ~PCI_PTM_GRANULARITY_MASK;
- ctrl |= dev->ptm_granularity << 8;
+ ctrl |= FIELD_PREP(PCI_PTM_GRANULARITY_MASK, dev->ptm_granularity);
if (dev->ptm_root)
ctrl |= PCI_PTM_CTRL_ROOT;
@@ -251,3 +276,304 @@ bool pcie_ptm_enabled(struct pci_dev *dev)
return dev->ptm_enabled;
}
EXPORT_SYMBOL(pcie_ptm_enabled);
+
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+static ssize_t context_update_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct pci_ptm_debugfs *ptm_debugfs = file->private_data;
+ char buf[7];
+ int ret;
+ u8 mode;
+
+ if (!ptm_debugfs->ops->context_update_write)
+ return -EOPNOTSUPP;
+
+ if (count < 1 || count >= sizeof(buf))
+ return -EINVAL;
+
+ ret = copy_from_user(buf, ubuf, count);
+ if (ret)
+ return -EFAULT;
+
+ buf[count] = '\0';
+
+ if (sysfs_streq(buf, "auto"))
+ mode = PCIE_PTM_CONTEXT_UPDATE_AUTO;
+ else if (sysfs_streq(buf, "manual"))
+ mode = PCIE_PTM_CONTEXT_UPDATE_MANUAL;
+ else
+ return -EINVAL;
+
+ mutex_lock(&ptm_debugfs->lock);
+ ret = ptm_debugfs->ops->context_update_write(ptm_debugfs->pdata, mode);
+ mutex_unlock(&ptm_debugfs->lock);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t context_update_read(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct pci_ptm_debugfs *ptm_debugfs = file->private_data;
+ char buf[8]; /* Extra space for NULL termination at the end */
+ ssize_t pos;
+ u8 mode;
+
+ if (!ptm_debugfs->ops->context_update_read)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&ptm_debugfs->lock);
+ ptm_debugfs->ops->context_update_read(ptm_debugfs->pdata, &mode);
+ mutex_unlock(&ptm_debugfs->lock);
+
+ if (mode == PCIE_PTM_CONTEXT_UPDATE_AUTO)
+ pos = scnprintf(buf, sizeof(buf), "auto\n");
+ else
+ pos = scnprintf(buf, sizeof(buf), "manual\n");
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, pos);
+}
+
+static const struct file_operations context_update_fops = {
+ .open = simple_open,
+ .read = context_update_read,
+ .write = context_update_write,
+};
+
+static int context_valid_get(void *data, u64 *val)
+{
+ struct pci_ptm_debugfs *ptm_debugfs = data;
+ bool valid;
+ int ret;
+
+ if (!ptm_debugfs->ops->context_valid_read)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&ptm_debugfs->lock);
+ ret = ptm_debugfs->ops->context_valid_read(ptm_debugfs->pdata, &valid);
+ mutex_unlock(&ptm_debugfs->lock);
+ if (ret)
+ return ret;
+
+ *val = valid;
+
+ return 0;
+}
+
+static int context_valid_set(void *data, u64 val)
+{
+ struct pci_ptm_debugfs *ptm_debugfs = data;
+ int ret;
+
+ if (!ptm_debugfs->ops->context_valid_write)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&ptm_debugfs->lock);
+ ret = ptm_debugfs->ops->context_valid_write(ptm_debugfs->pdata, !!val);
+ mutex_unlock(&ptm_debugfs->lock);
+
+ return ret;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(context_valid_fops, context_valid_get,
+ context_valid_set, "%llu\n");
+
+static int local_clock_get(void *data, u64 *val)
+{
+ struct pci_ptm_debugfs *ptm_debugfs = data;
+ u64 clock;
+ int ret;
+
+ if (!ptm_debugfs->ops->local_clock_read)
+ return -EOPNOTSUPP;
+
+ ret = ptm_debugfs->ops->local_clock_read(ptm_debugfs->pdata, &clock);
+ if (ret)
+ return ret;
+
+ *val = clock;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(local_clock_fops, local_clock_get, NULL, "%llu\n");
+
+static int master_clock_get(void *data, u64 *val)
+{
+ struct pci_ptm_debugfs *ptm_debugfs = data;
+ u64 clock;
+ int ret;
+
+ if (!ptm_debugfs->ops->master_clock_read)
+ return -EOPNOTSUPP;
+
+ ret = ptm_debugfs->ops->master_clock_read(ptm_debugfs->pdata, &clock);
+ if (ret)
+ return ret;
+
+ *val = clock;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(master_clock_fops, master_clock_get, NULL, "%llu\n");
+
+static int t1_get(void *data, u64 *val)
+{
+ struct pci_ptm_debugfs *ptm_debugfs = data;
+ u64 clock;
+ int ret;
+
+ if (!ptm_debugfs->ops->t1_read)
+ return -EOPNOTSUPP;
+
+ ret = ptm_debugfs->ops->t1_read(ptm_debugfs->pdata, &clock);
+ if (ret)
+ return ret;
+
+ *val = clock;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(t1_fops, t1_get, NULL, "%llu\n");
+
+static int t2_get(void *data, u64 *val)
+{
+ struct pci_ptm_debugfs *ptm_debugfs = data;
+ u64 clock;
+ int ret;
+
+ if (!ptm_debugfs->ops->t2_read)
+ return -EOPNOTSUPP;
+
+ ret = ptm_debugfs->ops->t2_read(ptm_debugfs->pdata, &clock);
+ if (ret)
+ return ret;
+
+ *val = clock;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(t2_fops, t2_get, NULL, "%llu\n");
+
+static int t3_get(void *data, u64 *val)
+{
+ struct pci_ptm_debugfs *ptm_debugfs = data;
+ u64 clock;
+ int ret;
+
+ if (!ptm_debugfs->ops->t3_read)
+ return -EOPNOTSUPP;
+
+ ret = ptm_debugfs->ops->t3_read(ptm_debugfs->pdata, &clock);
+ if (ret)
+ return ret;
+
+ *val = clock;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(t3_fops, t3_get, NULL, "%llu\n");
+
+static int t4_get(void *data, u64 *val)
+{
+ struct pci_ptm_debugfs *ptm_debugfs = data;
+ u64 clock;
+ int ret;
+
+ if (!ptm_debugfs->ops->t4_read)
+ return -EOPNOTSUPP;
+
+ ret = ptm_debugfs->ops->t4_read(ptm_debugfs->pdata, &clock);
+ if (ret)
+ return ret;
+
+ *val = clock;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(t4_fops, t4_get, NULL, "%llu\n");
+
+#define pcie_ptm_create_debugfs_file(pdata, mode, attr) \
+ do { \
+ if (ops->attr##_visible && ops->attr##_visible(pdata)) \
+ debugfs_create_file(#attr, mode, ptm_debugfs->debugfs, \
+ ptm_debugfs, &attr##_fops); \
+ } while (0)
+
+/*
+ * pcie_ptm_create_debugfs() - Create debugfs entries for the PTM context
+ * @dev: PTM capable component device
+ * @pdata: Private data of the PTM capable component device
+ * @ops: PTM callback structure
+ *
+ * Create debugfs entries for exposing the PTM context of the PTM capable
+ * components such as Root Complex and Endpoint controllers.
+ *
+ * Return: Pointer to 'struct pci_ptm_debugfs' if success, NULL otherwise.
+ */
+struct pci_ptm_debugfs *pcie_ptm_create_debugfs(struct device *dev, void *pdata,
+ const struct pcie_ptm_ops *ops)
+{
+ struct pci_ptm_debugfs *ptm_debugfs;
+ char *dirname;
+ int ret;
+
+ /* Caller must provide check_capability() callback */
+ if (!ops->check_capability)
+ return NULL;
+
+ /* Check for PTM capability before creating debugfs attributes */
+ ret = ops->check_capability(pdata);
+ if (!ret) {
+ dev_dbg(dev, "PTM capability not present\n");
+ return NULL;
+ }
+
+ ptm_debugfs = kzalloc(sizeof(*ptm_debugfs), GFP_KERNEL);
+ if (!ptm_debugfs)
+ return NULL;
+
+ dirname = devm_kasprintf(dev, GFP_KERNEL, "pcie_ptm_%s", dev_name(dev));
+ if (!dirname)
+ return NULL;
+
+ ptm_debugfs->debugfs = debugfs_create_dir(dirname, NULL);
+ ptm_debugfs->pdata = pdata;
+ ptm_debugfs->ops = ops;
+ mutex_init(&ptm_debugfs->lock);
+
+ pcie_ptm_create_debugfs_file(pdata, 0644, context_update);
+ pcie_ptm_create_debugfs_file(pdata, 0644, context_valid);
+ pcie_ptm_create_debugfs_file(pdata, 0444, local_clock);
+ pcie_ptm_create_debugfs_file(pdata, 0444, master_clock);
+ pcie_ptm_create_debugfs_file(pdata, 0444, t1);
+ pcie_ptm_create_debugfs_file(pdata, 0444, t2);
+ pcie_ptm_create_debugfs_file(pdata, 0444, t3);
+ pcie_ptm_create_debugfs_file(pdata, 0444, t4);
+
+ return ptm_debugfs;
+}
+EXPORT_SYMBOL_GPL(pcie_ptm_create_debugfs);
+
+/*
+ * pcie_ptm_destroy_debugfs() - Destroy debugfs entries for the PTM context
+ * @ptm_debugfs: Pointer to the PTM debugfs struct
+ */
+void pcie_ptm_destroy_debugfs(struct pci_ptm_debugfs *ptm_debugfs)
+{
+ if (!ptm_debugfs)
+ return;
+
+ mutex_destroy(&ptm_debugfs->lock);
+ debugfs_remove_recursive(ptm_debugfs->debugfs);
+}
+EXPORT_SYMBOL_GPL(pcie_ptm_destroy_debugfs);
+#endif
diff --git a/drivers/pci/pcie/tlp.c b/drivers/pci/pcie/tlp.c
new file mode 100644
index 000000000000..71f8fc9ea2ed
--- /dev/null
+++ b/drivers/pci/pcie/tlp.c
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe TLP Log handling
+ *
+ * Copyright (C) 2024 Intel Corporation
+ */
+
+#include <linux/aer.h>
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+
+#include "../pci.h"
+
+/**
+ * aer_tlp_log_len - Calculate AER Capability TLP Header/Prefix Log length
+ * @dev: PCIe device
+ * @aercc: AER Capabilities and Control register value
+ *
+ * Return: TLP Header/Prefix Log length
+ */
+unsigned int aer_tlp_log_len(struct pci_dev *dev, u32 aercc)
+{
+ if (aercc & PCI_ERR_CAP_TLP_LOG_FLIT)
+ return FIELD_GET(PCI_ERR_CAP_TLP_LOG_SIZE, aercc);
+
+ return PCIE_STD_NUM_TLP_HEADERLOG +
+ ((aercc & PCI_ERR_CAP_PREFIX_LOG_PRESENT) ?
+ dev->eetlp_prefix_max : 0);
+}
+
+#ifdef CONFIG_PCIE_DPC
+/**
+ * dpc_tlp_log_len - Calculate DPC RP PIO TLP Header/Prefix Log length
+ * @dev: PCIe device
+ *
+ * Return: TLP Header/Prefix Log length
+ */
+unsigned int dpc_tlp_log_len(struct pci_dev *dev)
+{
+ /* Remove ImpSpec Log register from the count */
+ if (dev->dpc_rp_log_size >= PCIE_STD_NUM_TLP_HEADERLOG + 1)
+ return dev->dpc_rp_log_size - 1;
+
+ return dev->dpc_rp_log_size;
+}
+#endif
+
+/**
+ * pcie_read_tlp_log - read TLP Header Log
+ * @dev: PCIe device
+ * @where: PCI Config offset of TLP Header Log
+ * @where2: PCI Config offset of TLP Prefix Log
+ * @tlp_len: TLP Log length (Header Log + TLP Prefix Log in DWORDs)
+ * @flit: TLP Logged in Flit mode
+ * @log: TLP Log structure to fill
+ *
+ * Fill @log from TLP Header Log registers, e.g., AER or DPC.
+ *
+ * Return: 0 on success and filled TLP Log structure, <0 on error.
+ */
+int pcie_read_tlp_log(struct pci_dev *dev, int where, int where2,
+ unsigned int tlp_len, bool flit, struct pcie_tlp_log *log)
+{
+ unsigned int i;
+ int off, ret;
+
+ if (tlp_len > ARRAY_SIZE(log->dw))
+ tlp_len = ARRAY_SIZE(log->dw);
+
+ memset(log, 0, sizeof(*log));
+
+ for (i = 0; i < tlp_len; i++) {
+ if (i < PCIE_STD_NUM_TLP_HEADERLOG)
+ off = where + i * 4;
+ else
+ off = where2 + (i - PCIE_STD_NUM_TLP_HEADERLOG) * 4;
+
+ ret = pci_read_config_dword(dev, off, &log->dw[i]);
+ if (ret)
+ return pcibios_err_to_errno(ret);
+ }
+
+ /*
+ * Hard-code non-Flit mode to 4 DWORDs, for now. The exact length
+ * can only be known if the TLP is parsed.
+ */
+ log->header_len = flit ? tlp_len : 4;
+ log->flit = flit;
+
+ return 0;
+}
+
+#define EE_PREFIX_STR " E-E Prefixes:"
+
+/**
+ * pcie_print_tlp_log - Print TLP Header / Prefix Log contents
+ * @dev: PCIe device
+ * @log: TLP Log structure
+ * @level: Printk log level
+ * @pfx: String prefix
+ *
+ * Prints TLP Header and Prefix Log information held by @log.
+ */
+void pcie_print_tlp_log(const struct pci_dev *dev,
+ const struct pcie_tlp_log *log, const char *level,
+ const char *pfx)
+{
+ /* EE_PREFIX_STR fits the extended DW space needed for the Flit mode */
+ char buf[11 * PCIE_STD_MAX_TLP_HEADERLOG + 1];
+ unsigned int i;
+ int len;
+
+ len = scnprintf(buf, sizeof(buf), "%#010x %#010x %#010x %#010x",
+ log->dw[0], log->dw[1], log->dw[2], log->dw[3]);
+
+ if (log->flit) {
+ for (i = PCIE_STD_NUM_TLP_HEADERLOG; i < log->header_len; i++) {
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ " %#010x", log->dw[i]);
+ }
+ } else {
+ if (log->prefix[0])
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ EE_PREFIX_STR);
+ for (i = 0; i < ARRAY_SIZE(log->prefix); i++) {
+ if (!log->prefix[i])
+ break;
+ len += scnprintf(buf + len, sizeof(buf) - len,
+ " %#010x", log->prefix[i]);
+ }
+ }
+
+ dev_printk(level, &dev->dev, "%sTLP Header%s: %s\n", pfx,
+ log->flit ? " (Flit)" : "", buf);
+}
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 8bac3ce02609..41183aed8f5d 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -3,13 +3,15 @@
* PCI detection and setup code
*/
+#include <linux/array_size.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/msi.h>
-#include <linux/of_device.h>
#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/pci_hotplug.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -96,7 +98,7 @@ static void release_pcibus_dev(struct device *dev)
kfree(pci_bus);
}
-static struct class pcibus_class = {
+static const struct class pcibus_class = {
.name = "pci_bus",
.dev_release = &release_pcibus_dev,
.dev_groups = pcibus_groups,
@@ -166,39 +168,66 @@ static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
#define PCI_COMMAND_DECODE_ENABLE (PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
/**
+ * __pci_size_bars - Read the raw BAR mask for a range of PCI BARs
+ * @dev: the PCI device
+ * @count: number of BARs to size
+ * @pos: starting config space position
+ * @sizes: array to store mask values
+ * @rom: indicate whether to use ROM mask, which avoids enabling ROM BARs
+ *
+ * Provided @sizes array must be sufficiently sized to store results for
+ * @count u32 BARs. Caller is responsible for disabling decode to specified
+ * BAR range around calling this function. This function is intended to avoid
+ * disabling decode around sizing each BAR individually, which can result in
+ * non-trivial overhead in virtualized environments with very large PCI BARs.
+ */
+static void __pci_size_bars(struct pci_dev *dev, int count,
+ unsigned int pos, u32 *sizes, bool rom)
+{
+ u32 orig, mask = rom ? PCI_ROM_ADDRESS_MASK : ~0;
+ int i;
+
+ for (i = 0; i < count; i++, pos += 4, sizes++) {
+ pci_read_config_dword(dev, pos, &orig);
+ pci_write_config_dword(dev, pos, mask);
+ pci_read_config_dword(dev, pos, sizes);
+ pci_write_config_dword(dev, pos, orig);
+ }
+}
+
+void __pci_size_stdbars(struct pci_dev *dev, int count,
+ unsigned int pos, u32 *sizes)
+{
+ __pci_size_bars(dev, count, pos, sizes, false);
+}
+
+static void __pci_size_rom(struct pci_dev *dev, unsigned int pos, u32 *sizes)
+{
+ __pci_size_bars(dev, 1, pos, sizes, true);
+}
+
+/**
* __pci_read_base - Read a PCI BAR
* @dev: the PCI device
* @type: type of the BAR
* @res: resource buffer to be filled in
* @pos: BAR position in the config space
+ * @sizes: array of one or more pre-read BAR masks
*
* Returns 1 if the BAR is 64-bit, or 0 if 32-bit.
*/
int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
- struct resource *res, unsigned int pos)
+ struct resource *res, unsigned int pos, u32 *sizes)
{
- u32 l = 0, sz = 0, mask;
+ u32 l = 0, sz;
u64 l64, sz64, mask64;
- u16 orig_cmd;
struct pci_bus_region region, inverted_region;
-
- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
-
- /* No printks while decoding is disabled! */
- if (!dev->mmio_always_on) {
- pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
- if (orig_cmd & PCI_COMMAND_DECODE_ENABLE) {
- pci_write_config_word(dev, PCI_COMMAND,
- orig_cmd & ~PCI_COMMAND_DECODE_ENABLE);
- }
- }
+ const char *res_name = pci_resource_name(dev, res - dev->resource);
res->name = pci_name(dev);
pci_read_config_dword(dev, pos, &l);
- pci_write_config_dword(dev, pos, l | mask);
- pci_read_config_dword(dev, pos, &sz);
- pci_write_config_dword(dev, pos, l);
+ sz = sizes[0];
/*
* All bits set in sz means the device isn't working properly.
@@ -238,25 +267,19 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
if (res->flags & IORESOURCE_MEM_64) {
pci_read_config_dword(dev, pos + 4, &l);
- pci_write_config_dword(dev, pos + 4, ~0);
- pci_read_config_dword(dev, pos + 4, &sz);
- pci_write_config_dword(dev, pos + 4, l);
+ sz = sizes[1];
l64 |= ((u64)l << 32);
sz64 |= ((u64)sz << 32);
mask64 |= ((u64)~0 << 32);
}
- if (!dev->mmio_always_on && (orig_cmd & PCI_COMMAND_DECODE_ENABLE))
- pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
-
if (!sz64)
goto fail;
sz64 = pci_size(l64, sz64, mask64);
if (!sz64) {
- pci_info(dev, FW_BUG "reg 0x%x: invalid BAR (can't size)\n",
- pos);
+ pci_info(dev, FW_BUG "%s: invalid; can't size\n", res_name);
goto fail;
}
@@ -266,8 +289,8 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
res->start = 0;
res->end = 0;
- pci_err(dev, "reg 0x%x: can't handle BAR larger than 4GB (size %#010llx)\n",
- pos, (unsigned long long)sz64);
+ pci_err(dev, "%s: can't handle BAR larger than 4GB (size %#010llx)\n",
+ res_name, (unsigned long long)sz64);
goto out;
}
@@ -276,8 +299,8 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
res->flags |= IORESOURCE_UNSET;
res->start = 0;
res->end = sz64 - 1;
- pci_info(dev, "reg 0x%x: can't handle BAR above 4GB (bus address %#010llx)\n",
- pos, (unsigned long long)l64);
+ pci_info(dev, "%s: can't handle BAR above 4GB (bus address %#010llx)\n",
+ res_name, (unsigned long long)l64);
goto out;
}
}
@@ -303,8 +326,8 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
res->flags |= IORESOURCE_UNSET;
res->start = 0;
res->end = region.end - region.start;
- pci_info(dev, "reg 0x%x: initial BAR value %#010llx invalid\n",
- pos, (unsigned long long)region.start);
+ pci_info(dev, "%s: initial BAR value %#010llx invalid\n",
+ res_name, (unsigned long long)region.start);
}
goto out;
@@ -314,14 +337,19 @@ fail:
res->flags = 0;
out:
if (res->flags)
- pci_info(dev, "reg 0x%x: %pR\n", pos, res);
+ pci_info(dev, "%s %pR\n", res_name, res);
return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
}
-static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
+static __always_inline void pci_read_bases(struct pci_dev *dev,
+ unsigned int howmany, int rom)
{
+ u32 rombar, stdbars[PCI_STD_NUM_BARS];
unsigned int pos, reg;
+ u16 orig_cmd;
+
+ BUILD_BUG_ON(statically_true(howmany > PCI_STD_NUM_BARS));
if (dev->non_compliant_bars)
return;
@@ -330,10 +358,28 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
if (dev->is_virtfn)
return;
+ /* No printks while decoding is disabled! */
+ if (!dev->mmio_always_on) {
+ pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
+ if (orig_cmd & PCI_COMMAND_DECODE_ENABLE) {
+ pci_write_config_word(dev, PCI_COMMAND,
+ orig_cmd & ~PCI_COMMAND_DECODE_ENABLE);
+ }
+ }
+
+ __pci_size_stdbars(dev, howmany, PCI_BASE_ADDRESS_0, stdbars);
+ if (rom)
+ __pci_size_rom(dev, rom, &rombar);
+
+ if (!dev->mmio_always_on &&
+ (orig_cmd & PCI_COMMAND_DECODE_ENABLE))
+ pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
+
for (pos = 0; pos < howmany; pos++) {
struct resource *res = &dev->resource[pos];
reg = PCI_BASE_ADDRESS_0 + (pos << 2);
- pos += __pci_read_base(dev, pci_bar_unknown, res, reg);
+ pos += __pci_read_base(dev, pci_bar_unknown,
+ res, reg, &stdbars[pos]);
}
if (rom) {
@@ -341,68 +387,16 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
dev->rom_base_reg = rom;
res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH |
IORESOURCE_READONLY | IORESOURCE_SIZEALIGN;
- __pci_read_base(dev, pci_bar_mem32, res, rom);
+ __pci_read_base(dev, pci_bar_mem32, res, rom, &rombar);
}
}
-static void pci_read_bridge_windows(struct pci_dev *bridge)
+static void pci_read_bridge_io(struct pci_dev *dev, struct resource *res,
+ bool log)
{
- u16 io;
- u32 pmem, tmp;
-
- pci_read_config_word(bridge, PCI_IO_BASE, &io);
- if (!io) {
- pci_write_config_word(bridge, PCI_IO_BASE, 0xe0f0);
- pci_read_config_word(bridge, PCI_IO_BASE, &io);
- pci_write_config_word(bridge, PCI_IO_BASE, 0x0);
- }
- if (io)
- bridge->io_window = 1;
-
- /*
- * DECchip 21050 pass 2 errata: the bridge may miss an address
- * disconnect boundary by one PCI data phase. Workaround: do not
- * use prefetching on this device.
- */
- if (bridge->vendor == PCI_VENDOR_ID_DEC && bridge->device == 0x0001)
- return;
-
- pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
- if (!pmem) {
- pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE,
- 0xffe0fff0);
- pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
- pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0);
- }
- if (!pmem)
- return;
-
- bridge->pref_window = 1;
-
- if ((pmem & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
-
- /*
- * Bridge claims to have a 64-bit prefetchable memory
- * window; verify that the upper bits are actually
- * writable.
- */
- pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &pmem);
- pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32,
- 0xffffffff);
- pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &tmp);
- pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, pmem);
- if (tmp)
- bridge->pref_64_window = 1;
- }
-}
-
-static void pci_read_bridge_io(struct pci_bus *child)
-{
- struct pci_dev *dev = child->self;
u8 io_base_lo, io_limit_lo;
unsigned long io_mask, io_granularity, base, limit;
struct pci_bus_region region;
- struct resource *res;
io_mask = PCI_IO_RANGE_MASK;
io_granularity = 0x1000;
@@ -412,7 +406,6 @@ static void pci_read_bridge_io(struct pci_bus *child)
io_granularity = 0x400;
}
- res = child->resource[0];
pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
base = (io_base_lo & io_mask) << 8;
@@ -427,47 +420,54 @@ static void pci_read_bridge_io(struct pci_bus *child)
limit |= ((unsigned long) io_limit_hi << 16);
}
+ res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
+
if (base <= limit) {
- res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
region.start = base;
region.end = limit + io_granularity - 1;
pcibios_bus_to_resource(dev->bus, res, &region);
- pci_info(dev, " bridge window %pR\n", res);
+ if (log)
+ pci_info(dev, " bridge window %pR\n", res);
+ } else {
+ resource_set_range(res, 0, 0);
+ res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
}
}
-static void pci_read_bridge_mmio(struct pci_bus *child)
+static void pci_read_bridge_mmio(struct pci_dev *dev, struct resource *res,
+ bool log)
{
- struct pci_dev *dev = child->self;
u16 mem_base_lo, mem_limit_lo;
unsigned long base, limit;
struct pci_bus_region region;
- struct resource *res;
- res = child->resource[1];
pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
limit = ((unsigned long) mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
+
+ res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
+
if (base <= limit) {
- res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
region.start = base;
region.end = limit + 0xfffff;
pcibios_bus_to_resource(dev->bus, res, &region);
- pci_info(dev, " bridge window %pR\n", res);
+ if (log)
+ pci_info(dev, " bridge window %pR\n", res);
+ } else {
+ resource_set_range(res, 0, 0);
+ res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
}
}
-static void pci_read_bridge_mmio_pref(struct pci_bus *child)
+static void pci_read_bridge_mmio_pref(struct pci_dev *dev, struct resource *res,
+ bool log)
{
- struct pci_dev *dev = child->self;
u16 mem_base_lo, mem_limit_lo;
u64 base64, limit64;
pci_bus_addr_t base, limit;
struct pci_bus_region region;
- struct resource *res;
- res = child->resource[2];
pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
base64 = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
@@ -499,16 +499,87 @@ static void pci_read_bridge_mmio_pref(struct pci_bus *child)
return;
}
+ res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) | IORESOURCE_MEM |
+ IORESOURCE_PREFETCH;
+ if (res->flags & PCI_PREF_RANGE_TYPE_64)
+ res->flags |= IORESOURCE_MEM_64;
+
if (base <= limit) {
- res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
- IORESOURCE_MEM | IORESOURCE_PREFETCH;
- if (res->flags & PCI_PREF_RANGE_TYPE_64)
- res->flags |= IORESOURCE_MEM_64;
region.start = base;
region.end = limit + 0xfffff;
pcibios_bus_to_resource(dev->bus, res, &region);
- pci_info(dev, " bridge window %pR\n", res);
+ if (log)
+ pci_info(dev, " bridge window %pR\n", res);
+ } else {
+ resource_set_range(res, 0, 0);
+ res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
+ }
+}
+
+static void pci_read_bridge_windows(struct pci_dev *bridge)
+{
+ u32 buses;
+ u16 io;
+ u32 pmem, tmp;
+ struct resource res;
+
+ pci_read_config_dword(bridge, PCI_PRIMARY_BUS, &buses);
+ res.flags = IORESOURCE_BUS;
+ res.start = (buses >> 8) & 0xff;
+ res.end = (buses >> 16) & 0xff;
+ pci_info(bridge, "PCI bridge to %pR%s\n", &res,
+ bridge->transparent ? " (subtractive decode)" : "");
+
+ pci_read_config_word(bridge, PCI_IO_BASE, &io);
+ if (!io) {
+ pci_write_config_word(bridge, PCI_IO_BASE, 0xe0f0);
+ pci_read_config_word(bridge, PCI_IO_BASE, &io);
+ pci_write_config_word(bridge, PCI_IO_BASE, 0x0);
}
+ if (io) {
+ bridge->io_window = 1;
+ pci_read_bridge_io(bridge, &res, true);
+ }
+
+ pci_read_bridge_mmio(bridge, &res, true);
+
+ /*
+ * DECchip 21050 pass 2 errata: the bridge may miss an address
+ * disconnect boundary by one PCI data phase. Workaround: do not
+ * use prefetching on this device.
+ */
+ if (bridge->vendor == PCI_VENDOR_ID_DEC && bridge->device == 0x0001)
+ return;
+
+ pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
+ if (!pmem) {
+ pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE,
+ 0xffe0fff0);
+ pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
+ pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0);
+ }
+ if (!pmem)
+ return;
+
+ bridge->pref_window = 1;
+
+ if ((pmem & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
+
+ /*
+ * Bridge claims to have a 64-bit prefetchable memory
+ * window; verify that the upper bits are actually
+ * writable.
+ */
+ pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &pmem);
+ pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32,
+ 0xffffffff);
+ pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &tmp);
+ pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, pmem);
+ if (tmp)
+ bridge->pref_64_window = 1;
+ }
+
+ pci_read_bridge_mmio_pref(bridge, &res, true);
}
void pci_read_bridge_bases(struct pci_bus *child)
@@ -528,19 +599,23 @@ void pci_read_bridge_bases(struct pci_bus *child)
for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
- pci_read_bridge_io(child);
- pci_read_bridge_mmio(child);
- pci_read_bridge_mmio_pref(child);
-
- if (dev->transparent) {
- pci_bus_for_each_resource(child->parent, res) {
- if (res && res->flags) {
- pci_bus_add_resource(child, res,
- PCI_SUBTRACTIVE_DECODE);
- pci_info(dev, " bridge window %pR (subtractive decode)\n",
- res);
- }
- }
+ pci_read_bridge_io(child->self,
+ child->resource[PCI_BUS_BRIDGE_IO_WINDOW], false);
+ pci_read_bridge_mmio(child->self,
+ child->resource[PCI_BUS_BRIDGE_MEM_WINDOW], false);
+ pci_read_bridge_mmio_pref(child->self,
+ child->resource[PCI_BUS_BRIDGE_PREF_MEM_WINDOW],
+ false);
+
+ if (!dev->transparent)
+ return;
+
+ pci_bus_for_each_resource(child->parent, res) {
+ if (!res || !res->flags)
+ continue;
+
+ pci_bus_add_resource(child, res);
+ pci_info(dev, " bridge window %pR (subtractive decode)\n", res);
}
}
@@ -575,9 +650,26 @@ static void pci_release_host_bridge_dev(struct device *dev)
pci_free_resource_list(&bridge->windows);
pci_free_resource_list(&bridge->dma_ranges);
+
+ /* Host bridges only have domain_nr set in the emulation case */
+ if (bridge->domain_nr != PCI_DOMAIN_NR_NOT_SET)
+ pci_bus_release_emul_domain_nr(bridge->domain_nr);
+
kfree(bridge);
}
+static const struct attribute_group *pci_host_bridge_groups[] = {
+#ifdef CONFIG_PCI_IDE
+ &pci_ide_attr_group,
+#endif
+ NULL
+};
+
+static const struct device_type pci_host_bridge_type = {
+ .groups = pci_host_bridge_groups,
+ .release = pci_release_host_bridge_dev,
+};
+
static void pci_init_host_bridge(struct pci_host_bridge *bridge)
{
INIT_LIST_HEAD(&bridge->windows);
@@ -597,6 +689,8 @@ static void pci_init_host_bridge(struct pci_host_bridge *bridge)
bridge->native_dpc = 1;
bridge->domain_nr = PCI_DOMAIN_NR_NOT_SET;
bridge->native_cxl_error = 1;
+ bridge->dev.type = &pci_host_bridge_type;
+ pci_ide_init_host_bridge(bridge);
device_initialize(&bridge->dev);
}
@@ -610,7 +704,6 @@ struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
return NULL;
pci_init_host_bridge(bridge);
- bridge->dev.release = pci_release_host_bridge_dev;
return bridge;
}
@@ -731,9 +824,14 @@ const char *pci_speed_string(enum pci_bus_speed speed)
}
EXPORT_SYMBOL_GPL(pci_speed_string);
-void pcie_update_link_speed(struct pci_bus *bus, u16 linksta)
+void pcie_update_link_speed(struct pci_bus *bus)
{
- bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS];
+ struct pci_dev *bridge = bus->self;
+ u16 linksta, linksta2;
+
+ pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta);
+ pcie_capability_read_word(bridge, PCI_EXP_LNKSTA2, &linksta2);
+ __pcie_update_link_speed(bus, linksta, linksta2);
}
EXPORT_SYMBOL_GPL(pcie_update_link_speed);
@@ -808,21 +906,19 @@ static void pci_set_bus_speed(struct pci_bus *bus)
}
bus->max_bus_speed = max;
- bus->cur_bus_speed = pcix_bus_speed[
- (status & PCI_X_SSTATUS_FREQ) >> 6];
+ bus->cur_bus_speed =
+ pcix_bus_speed[FIELD_GET(PCI_X_SSTATUS_FREQ, status)];
return;
}
if (pci_is_pcie(bridge)) {
u32 linkcap;
- u16 linksta;
pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap);
bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS];
- pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta);
- pcie_update_link_speed(bus, linksta);
+ pcie_update_link_speed(bus);
}
}
@@ -878,6 +974,17 @@ static void pci_set_bus_msi_domain(struct pci_bus *bus)
dev_set_msi_domain(&bus->dev, d);
}
+static bool pci_preserve_config(struct pci_host_bridge *host_bridge)
+{
+ if (pci_acpi_preserve_config(host_bridge))
+ return true;
+
+ if (host_bridge->dev.parent && host_bridge->dev.parent->of_node)
+ return of_pci_preserve_config(host_bridge->dev.parent->of_node);
+
+ return false;
+}
+
static int pci_register_host_bridge(struct pci_host_bridge *bridge)
{
struct device *parent = bridge->dev.parent;
@@ -886,6 +993,7 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
resource_size_t offset, next_offset;
LIST_HEAD(resources);
struct resource *res, *next_res;
+ bool bus_registered = false;
char addr[64], *fmt;
const char *name;
int err;
@@ -928,10 +1036,9 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
/* Temporarily move resources off the list */
list_splice_init(&bridge->windows, &resources);
err = device_add(&bridge->dev);
- if (err) {
- put_device(&bridge->dev);
+ if (err)
goto free;
- }
+
bus->bridge = get_device(&bridge->dev);
device_enable_async_suspend(bus->bridge);
pci_set_bus_of_node(bus);
@@ -950,6 +1057,7 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
name = dev_name(&bus->dev);
err = device_register(&bus->dev);
+ bus_registered = true;
if (err)
goto unregister;
@@ -972,6 +1080,9 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
if (nr_node_ids > 1 && pcibus_to_node(bus) == NUMA_NO_NODE)
dev_warn(&bus->dev, "Unknown NUMA node; performance will be reduced\n");
+ /* Check if the boot configuration by FW needs to be preserved */
+ bridge->preserve_config = pci_preserve_config(bridge);
+
/* Coalesce contiguous windows */
resource_list_for_each_entry_safe(window, n, &resources) {
if (list_is_last(&window->node, &resources))
@@ -998,6 +1109,7 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
res = window->res;
if (!res->flags && !res->start && !res->end) {
release_resource(res);
+ resource_list_destroy_entry(window);
continue;
}
@@ -1006,7 +1118,7 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
if (res->flags & IORESOURCE_BUS)
pci_bus_insert_busn_res(bus, bus->number, res->end);
else
- pci_bus_add_resource(bus, res, 0);
+ pci_bus_add_resource(bus, res);
if (offset) {
if (resource_type(res) == IORESOURCE_IO)
@@ -1023,6 +1135,8 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
dev_info(&bus->dev, "root bus resource %pR%s\n", res, addr);
}
+ of_pci_make_host_bridge_node(bridge);
+
down_write(&pci_bus_sem);
list_add_tail(&bus->node, &pci_root_buses);
up_write(&pci_bus_sem);
@@ -1032,12 +1146,16 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
unregister:
put_device(&bridge->dev);
device_del(&bridge->dev);
-
free:
#ifdef CONFIG_PCI_DOMAINS_GENERIC
- pci_bus_release_domain_nr(bus, parent);
+ if (bridge->domain_nr == PCI_DOMAIN_NR_NOT_SET)
+ pci_bus_release_domain_nr(parent, bus->domain_nr);
#endif
- kfree(bus);
+ if (bus_registered)
+ put_device(&bus->dev);
+ else
+ kfree(bus);
+
return err;
}
@@ -1146,7 +1264,10 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
add_dev:
pci_set_bus_msi_domain(child);
ret = device_register(&child->dev);
- WARN_ON(ret < 0);
+ if (WARN_ON(ret < 0)) {
+ put_device(&child->dev);
+ return NULL;
+ }
pcibios_add_bus(child);
@@ -1177,15 +1298,17 @@ struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
}
EXPORT_SYMBOL(pci_add_new_bus);
-static void pci_enable_crs(struct pci_dev *pdev)
+static void pci_enable_rrs_sv(struct pci_dev *pdev)
{
u16 root_cap = 0;
- /* Enable CRS Software Visibility if supported */
+ /* Enable Configuration RRS Software Visibility if supported */
pcie_capability_read_word(pdev, PCI_EXP_RTCAP, &root_cap);
- if (root_cap & PCI_EXP_RTCAP_CRSVIS)
+ if (root_cap & PCI_EXP_RTCAP_RRS_SV) {
pcie_capability_set_word(pdev, PCI_EXP_RTCTL,
- PCI_EXP_RTCTL_CRSSVE);
+ PCI_EXP_RTCTL_RRS_SVE);
+ pdev->config_rrs_sv = 1;
+ }
}
static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
@@ -1217,8 +1340,8 @@ static bool pci_ea_fixed_busnrs(struct pci_dev *dev, u8 *sec, u8 *sub)
offset = ea + PCI_EA_FIRST_ENT;
pci_read_config_dword(dev, offset, &dw);
- ea_sec = dw & PCI_EA_SEC_BUS_MASK;
- ea_sub = (dw & PCI_EA_SUB_BUS_MASK) >> PCI_EA_SUB_BUS_SHIFT;
+ ea_sec = FIELD_GET(PCI_EA_SEC_BUS_MASK, dw);
+ ea_sub = FIELD_GET(PCI_EA_SUB_BUS_MASK, dw);
if (ea_sec == 0 || ea_sub < ea_sec)
return false;
@@ -1300,8 +1423,6 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
- pci_enable_crs(dev);
-
if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
!is_cardbus && !broken) {
unsigned int cmax, buses;
@@ -1470,6 +1591,9 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
}
out:
+ /* Clear errors in the Secondary Status Register */
+ pci_write_config_word(dev, PCI_SEC_STATUS, 0xffff);
+
pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
pm_runtime_put(&dev->dev);
@@ -1539,6 +1663,11 @@ void set_pcie_port_type(struct pci_dev *pdev)
pdev->pcie_cap = pos;
pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
pdev->pcie_flags_reg = reg16;
+
+ type = pci_pcie_type(pdev);
+ if (type == PCI_EXP_TYPE_ROOT_PORT)
+ pci_enable_rrs_sv(pdev);
+
pci_read_config_dword(pdev, pos + PCI_EXP_DEVCAP, &pdev->devcap);
pdev->pcie_mpss = FIELD_GET(PCI_EXP_DEVCAP_PAYLOAD, pdev->devcap);
@@ -1546,6 +1675,13 @@ void set_pcie_port_type(struct pci_dev *pdev)
if (reg32 & PCI_EXP_LNKCAP_DLLLARC)
pdev->link_active_reporting = 1;
+#ifdef CONFIG_PCIEASPM
+ if (reg32 & PCI_EXP_LNKCAP_ASPM_L0S)
+ pdev->aspm_l0s_support = 1;
+ if (reg32 & PCI_EXP_LNKCAP_ASPM_L1)
+ pdev->aspm_l1_support = 1;
+#endif
+
parent = pci_upstream_bridge(pdev);
if (!parent)
return;
@@ -1555,7 +1691,6 @@ void set_pcie_port_type(struct pci_dev *pdev)
* correctly so detect impossible configurations here and correct
* the port type accordingly.
*/
- type = pci_pcie_type(pdev);
if (type == PCI_EXP_TYPE_DOWNSTREAM) {
/*
* If pdev claims to be downstream port but the parent
@@ -1587,7 +1722,7 @@ void set_pcie_hotplug_bridge(struct pci_dev *pdev)
pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &reg32);
if (reg32 & PCI_EXP_SLTCAP_HPC)
- pdev->is_hotplug_bridge = 1;
+ pdev->is_hotplug_bridge = pdev->is_pciehp = 1;
}
static void set_pcie_thunderbolt(struct pci_dev *dev)
@@ -1602,23 +1737,33 @@ static void set_pcie_thunderbolt(struct pci_dev *dev)
static void set_pcie_untrusted(struct pci_dev *dev)
{
- struct pci_dev *parent;
+ struct pci_dev *parent = pci_upstream_bridge(dev);
+ if (!parent)
+ return;
/*
- * If the upstream bridge is untrusted we treat this device
+ * If the upstream bridge is untrusted we treat this device as
* untrusted as well.
*/
- parent = pci_upstream_bridge(dev);
- if (parent && (parent->untrusted || parent->external_facing))
+ if (parent->untrusted) {
+ dev->untrusted = true;
+ return;
+ }
+
+ if (arch_pci_dev_is_removable(dev)) {
+ pci_dbg(dev, "marking as untrusted\n");
dev->untrusted = true;
+ }
}
static void pci_set_removable(struct pci_dev *dev)
{
struct pci_dev *parent = pci_upstream_bridge(dev);
+ if (!parent)
+ return;
/*
- * We (only) consider everything downstream from an external_facing
+ * We (only) consider everything tunneled below an external_facing
* device to be removable by the user. We're mainly concerned with
* consumer platforms with user accessible thunderbolt ports that are
* vulnerable to DMA attacks, and we expect those ports to be marked by
@@ -1628,9 +1773,15 @@ static void pci_set_removable(struct pci_dev *dev)
* accessible to user / may not be removed by end user, and thus not
* exposed as "removable" to userspace.
*/
- if (parent &&
- (parent->external_facing || dev_is_removable(&parent->dev)))
+ if (dev_is_removable(&parent->dev)) {
+ dev_set_removable(&dev->dev, DEVICE_REMOVABLE);
+ return;
+ }
+
+ if (arch_pci_dev_is_removable(dev)) {
+ pci_dbg(dev, "marking as removable\n");
dev_set_removable(&dev->dev, DEVICE_REMOVABLE);
+ }
}
/**
@@ -1652,15 +1803,15 @@ static void pci_set_removable(struct pci_dev *dev)
static bool pci_ext_cfg_is_aliased(struct pci_dev *dev)
{
#ifdef CONFIG_PCI_QUIRKS
- int pos;
+ int pos, ret;
u32 header, tmp;
pci_read_config_dword(dev, PCI_VENDOR_ID, &header);
for (pos = PCI_CFG_SPACE_SIZE;
pos < PCI_CFG_SPACE_EXP_SIZE; pos += PCI_CFG_SPACE_SIZE) {
- if (pci_read_config_dword(dev, pos, &tmp) != PCIBIOS_SUCCESSFUL
- || header != tmp)
+ ret = pci_read_config_dword(dev, pos, &tmp);
+ if ((ret != PCIBIOS_SUCCESSFUL) || (header != tmp))
return false;
}
@@ -1805,16 +1956,53 @@ static int pci_intx_mask_broken(struct pci_dev *dev)
static void early_dump_pci_device(struct pci_dev *pdev)
{
- u32 value[256 / 4];
+ u32 value[PCI_CFG_SPACE_SIZE / sizeof(u32)];
int i;
pci_info(pdev, "config space:\n");
- for (i = 0; i < 256; i += 4)
- pci_read_config_dword(pdev, i, &value[i / 4]);
+ for (i = 0; i < ARRAY_SIZE(value); i++)
+ pci_read_config_dword(pdev, i * sizeof(u32), &value[i]);
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1,
- value, 256, false);
+ value, ARRAY_SIZE(value) * sizeof(u32), false);
+}
+
+static const char *pci_type_str(struct pci_dev *dev)
+{
+ static const char * const str[] = {
+ "PCIe Endpoint",
+ "PCIe Legacy Endpoint",
+ "PCIe unknown",
+ "PCIe unknown",
+ "PCIe Root Port",
+ "PCIe Switch Upstream Port",
+ "PCIe Switch Downstream Port",
+ "PCIe to PCI/PCI-X bridge",
+ "PCI/PCI-X to PCIe bridge",
+ "PCIe Root Complex Integrated Endpoint",
+ "PCIe Root Complex Event Collector",
+ };
+ int type;
+
+ if (pci_is_pcie(dev)) {
+ type = pci_pcie_type(dev);
+ if (type < ARRAY_SIZE(str))
+ return str[type];
+
+ return "PCIe unknown";
+ }
+
+ switch (dev->hdr_type) {
+ case PCI_HEADER_TYPE_NORMAL:
+ return "conventional PCI endpoint";
+ case PCI_HEADER_TYPE_BRIDGE:
+ return "conventional PCI bridge";
+ case PCI_HEADER_TYPE_CARDBUS:
+ return "CardBus bridge";
+ default:
+ return "conventional PCI";
+ }
}
/**
@@ -1841,8 +2029,8 @@ int pci_setup_device(struct pci_dev *dev)
dev->sysdata = dev->bus->sysdata;
dev->dev.parent = dev->bus->bridge;
dev->dev.bus = &pci_bus_type;
- dev->hdr_type = hdr_type & 0x7f;
- dev->multifunction = !!(hdr_type & 0x80);
+ dev->hdr_type = FIELD_GET(PCI_HEADER_TYPE_MASK, hdr_type);
+ dev->multifunction = FIELD_GET(PCI_HEADER_TYPE_MFD, hdr_type);
dev->error_state = pci_channel_io_normal;
set_pcie_port_type(dev);
@@ -1879,6 +2067,9 @@ int pci_setup_device(struct pci_dev *dev)
set_pcie_untrusted(dev);
+ if (pci_is_pcie(dev))
+ dev->supported_speeds = pcie_get_supported_speeds(dev);
+
/* "Unknown power state" */
dev->current_state = PCI_UNKNOWN;
@@ -1887,8 +2078,9 @@ int pci_setup_device(struct pci_dev *dev)
pci_set_removable(dev);
- pci_info(dev, "[%04x:%04x] type %02x class %#08x\n",
- dev->vendor, dev->device, dev->hdr_type, dev->class);
+ pci_info(dev, "[%04x:%04x] type %02x class %#08x %s\n",
+ dev->vendor, dev->device, dev->hdr_type, dev->class,
+ pci_type_str(dev));
/* Device class may be changed after fixup */
class = dev->class >> 8;
@@ -1910,7 +2102,7 @@ int pci_setup_device(struct pci_dev *dev)
if (class == PCI_CLASS_BRIDGE_PCI)
goto bad;
pci_read_irq(dev);
- pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
+ pci_read_bases(dev, PCI_STD_NUM_BARS, PCI_ROM_ADDRESS);
pci_subsystem_ids(dev, &dev->subsystem_vendor, &dev->subsystem_device);
@@ -1929,14 +2121,14 @@ int pci_setup_device(struct pci_dev *dev)
res = &dev->resource[0];
res->flags = LEGACY_IO_RESOURCE;
pcibios_bus_to_resource(dev->bus, res, &region);
- pci_info(dev, "legacy IDE quirk: reg 0x10: %pR\n",
+ pci_info(dev, "BAR 0 %pR: legacy IDE quirk\n",
res);
region.start = 0x3F6;
region.end = 0x3F6;
res = &dev->resource[1];
res->flags = LEGACY_IO_RESOURCE;
pcibios_bus_to_resource(dev->bus, res, &region);
- pci_info(dev, "legacy IDE quirk: reg 0x14: %pR\n",
+ pci_info(dev, "BAR 1 %pR: legacy IDE quirk\n",
res);
}
if ((progif & 4) == 0) {
@@ -1945,14 +2137,14 @@ int pci_setup_device(struct pci_dev *dev)
res = &dev->resource[2];
res->flags = LEGACY_IO_RESOURCE;
pcibios_bus_to_resource(dev->bus, res, &region);
- pci_info(dev, "legacy IDE quirk: reg 0x18: %pR\n",
+ pci_info(dev, "BAR 2 %pR: legacy IDE quirk\n",
res);
region.start = 0x376;
region.end = 0x376;
res = &dev->resource[3];
res->flags = LEGACY_IO_RESOURCE;
pcibios_bus_to_resource(dev->bus, res, &region);
- pci_info(dev, "legacy IDE quirk: reg 0x1c: %pR\n",
+ pci_info(dev, "BAR 3 %pR: legacy IDE quirk\n",
res);
}
}
@@ -2117,6 +2309,17 @@ int pci_configure_extended_tags(struct pci_dev *dev, void *ign)
return 0;
}
+static void pci_dev3_init(struct pci_dev *pdev)
+{
+ u16 cap = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DEV3);
+ u32 val = 0;
+
+ if (!cap)
+ return;
+ pci_read_config_dword(pdev, cap + PCI_DEV3_STA, &val);
+ pdev->fm_enabled = !!(val & PCI_DEV3_STA_SEGMENT);
+}
+
/**
* pcie_relaxed_ordering_enabled - Probe for PCIe relaxed ordering enable
* @dev: PCI device to query
@@ -2137,7 +2340,7 @@ static void pci_configure_relaxed_ordering(struct pci_dev *dev)
{
struct pci_dev *root;
- /* PCI_EXP_DEVICE_RELAX_EN is RsvdP in VFs */
+ /* PCI_EXP_DEVCTL_RELAX_EN is RsvdP in VFs */
if (dev->is_virtfn)
return;
@@ -2159,71 +2362,10 @@ static void pci_configure_relaxed_ordering(struct pci_dev *dev)
}
}
-static void pci_configure_ltr(struct pci_dev *dev)
-{
-#ifdef CONFIG_PCIEASPM
- struct pci_host_bridge *host = pci_find_host_bridge(dev->bus);
- struct pci_dev *bridge;
- u32 cap, ctl;
-
- if (!pci_is_pcie(dev))
- return;
-
- /* Read L1 PM substate capabilities */
- dev->l1ss = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_L1SS);
-
- pcie_capability_read_dword(dev, PCI_EXP_DEVCAP2, &cap);
- if (!(cap & PCI_EXP_DEVCAP2_LTR))
- return;
-
- pcie_capability_read_dword(dev, PCI_EXP_DEVCTL2, &ctl);
- if (ctl & PCI_EXP_DEVCTL2_LTR_EN) {
- if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) {
- dev->ltr_path = 1;
- return;
- }
-
- bridge = pci_upstream_bridge(dev);
- if (bridge && bridge->ltr_path)
- dev->ltr_path = 1;
-
- return;
- }
-
- if (!host->native_ltr)
- return;
-
- /*
- * Software must not enable LTR in an Endpoint unless the Root
- * Complex and all intermediate Switches indicate support for LTR.
- * PCIe r4.0, sec 6.18.
- */
- if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) {
- pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
- PCI_EXP_DEVCTL2_LTR_EN);
- dev->ltr_path = 1;
- return;
- }
-
- /*
- * If we're configuring a hot-added device, LTR was likely
- * disabled in the upstream bridge, so re-enable it before enabling
- * it in the new device.
- */
- bridge = pci_upstream_bridge(dev);
- if (bridge && bridge->ltr_path) {
- pci_bridge_reconfigure_ltr(dev);
- pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
- PCI_EXP_DEVCTL2_LTR_EN);
- dev->ltr_path = 1;
- }
-#endif
-}
-
static void pci_configure_eetlp_prefix(struct pci_dev *dev)
{
-#ifdef CONFIG_PCI_PASID
struct pci_dev *bridge;
+ unsigned int eetlp_max;
int pcie_type;
u32 cap;
@@ -2235,15 +2377,19 @@ static void pci_configure_eetlp_prefix(struct pci_dev *dev)
return;
pcie_type = pci_pcie_type(dev);
+
+ eetlp_max = FIELD_GET(PCI_EXP_DEVCAP2_EE_PREFIX_MAX, cap);
+ /* 00b means 4 */
+ eetlp_max = eetlp_max ?: 4;
+
if (pcie_type == PCI_EXP_TYPE_ROOT_PORT ||
pcie_type == PCI_EXP_TYPE_RC_END)
- dev->eetlp_prefix_path = 1;
+ dev->eetlp_prefix_max = eetlp_max;
else {
bridge = pci_upstream_bridge(dev);
- if (bridge && bridge->eetlp_prefix_path)
- dev->eetlp_prefix_path = 1;
+ if (bridge && bridge->eetlp_prefix_max)
+ dev->eetlp_prefix_max = eetlp_max;
}
-#endif
}
static void pci_configure_serr(struct pci_dev *dev)
@@ -2270,6 +2416,7 @@ static void pci_configure_device(struct pci_dev *dev)
pci_configure_extended_tags(dev, NULL);
pci_configure_relaxed_ordering(dev);
pci_configure_ltr(dev);
+ pci_configure_aspm_l1ss(dev);
pci_configure_eetlp_prefix(dev);
pci_configure_serr(dev);
@@ -2307,6 +2454,10 @@ static void pci_release_dev(struct device *dev)
kfree(pci_dev);
}
+static const struct device_type pci_dev_type = {
+ .groups = pci_dev_attr_groups,
+};
+
struct pci_dev *pci_alloc_dev(struct pci_bus *bus)
{
struct pci_dev *dev;
@@ -2324,6 +2475,7 @@ struct pci_dev *pci_alloc_dev(struct pci_bus *bus)
.end = -1,
};
+ spin_lock_init(&dev->pcie_cap_lock);
#ifdef CONFIG_PCI_MSI
raw_spin_lock_init(&dev->msi_lock);
#endif
@@ -2331,28 +2483,23 @@ struct pci_dev *pci_alloc_dev(struct pci_bus *bus)
}
EXPORT_SYMBOL(pci_alloc_dev);
-static bool pci_bus_crs_vendor_id(u32 l)
-{
- return (l & 0xffff) == PCI_VENDOR_ID_PCI_SIG;
-}
-
-static bool pci_bus_wait_crs(struct pci_bus *bus, int devfn, u32 *l,
+static bool pci_bus_wait_rrs(struct pci_bus *bus, int devfn, u32 *l,
int timeout)
{
int delay = 1;
- if (!pci_bus_crs_vendor_id(*l))
- return true; /* not a CRS completion */
+ if (!pci_bus_rrs_vendor_id(*l))
+ return true; /* not a Configuration RRS completion */
if (!timeout)
- return false; /* CRS, but caller doesn't want to wait */
+ return false; /* RRS, but caller doesn't want to wait */
/*
* We got the reserved Vendor ID that indicates a completion with
- * Configuration Request Retry Status (CRS). Retry until we get a
+ * Configuration Request Retry Status (RRS). Retry until we get a
* valid Vendor ID or we time out.
*/
- while (pci_bus_crs_vendor_id(*l)) {
+ while (pci_bus_rrs_vendor_id(*l)) {
if (delay > timeout) {
pr_warn("pci %04x:%02x:%02x.%d: not ready after %dms; giving up\n",
pci_domain_nr(bus), bus->number,
@@ -2391,8 +2538,8 @@ bool pci_bus_generic_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
*l == 0x0000ffff || *l == 0xffff0000)
return false;
- if (pci_bus_crs_vendor_id(*l))
- return pci_bus_wait_crs(bus, devfn, l, timeout);
+ if (pci_bus_rrs_vendor_id(*l))
+ return pci_bus_wait_rrs(bus, devfn, l, timeout);
return true;
}
@@ -2416,6 +2563,56 @@ bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
}
EXPORT_SYMBOL(pci_bus_read_dev_vendor_id);
+#if IS_ENABLED(CONFIG_PCI_PWRCTRL)
+static struct platform_device *pci_pwrctrl_create_device(struct pci_bus *bus, int devfn)
+{
+ struct pci_host_bridge *host = pci_find_host_bridge(bus);
+ struct platform_device *pdev;
+ struct device_node *np;
+
+ np = of_pci_find_child_device(dev_of_node(&bus->dev), devfn);
+ if (!np)
+ return NULL;
+
+ pdev = of_find_device_by_node(np);
+ if (pdev) {
+ put_device(&pdev->dev);
+ goto err_put_of_node;
+ }
+
+ /*
+ * First check whether the pwrctrl device really needs to be created or
+ * not. This is decided based on at least one of the power supplies
+ * being defined in the devicetree node of the device.
+ */
+ if (!of_pci_supply_present(np)) {
+ pr_debug("PCI/pwrctrl: Skipping OF node: %s\n", np->name);
+ goto err_put_of_node;
+ }
+
+ /* Now create the pwrctrl device */
+ pdev = of_platform_device_create(np, NULL, &host->dev);
+ if (!pdev) {
+ pr_err("PCI/pwrctrl: Failed to create pwrctrl device for node: %s\n", np->name);
+ goto err_put_of_node;
+ }
+
+ of_node_put(np);
+
+ return pdev;
+
+err_put_of_node:
+ of_node_put(np);
+
+ return NULL;
+}
+#else
+static struct platform_device *pci_pwrctrl_create_device(struct pci_bus *bus, int devfn)
+{
+ return NULL;
+}
+#endif
+
/*
* Read the config data for a PCI device, sanity-check it,
* and fill in the dev structure.
@@ -2425,6 +2622,15 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
struct pci_dev *dev;
u32 l;
+ /*
+ * Create pwrctrl device (if required) for the PCI device to handle the
+ * power state. If the pwrctrl device is created, then skip scanning
+ * further as the pwrctrl core will rescan the bus after powering on
+ * the device.
+ */
+ if (pci_pwrctrl_create_device(bus, devfn))
+ return NULL;
+
if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000))
return NULL;
@@ -2464,6 +2670,15 @@ void pcie_report_downtraining(struct pci_dev *dev)
__pcie_print_link_status(dev, false);
}
+static void pci_imm_ready_init(struct pci_dev *dev)
+{
+ u16 status;
+
+ pci_read_config_word(dev, PCI_STATUS, &status);
+ if (status & PCI_STATUS_IMM_READY)
+ dev->imm_ready = 1;
+}
+
static void pci_init_capabilities(struct pci_dev *dev)
{
pci_ea_init(dev); /* Enhanced Allocation */
@@ -2473,6 +2688,7 @@ static void pci_init_capabilities(struct pci_dev *dev)
/* Buffers for saving PCIe and PCI-X capabilities */
pci_allocate_cap_save_buffers(dev);
+ pci_imm_ready_init(dev); /* Immediate Readiness */
pci_pm_init(dev); /* Power Management */
pci_vpd_init(dev); /* Vital Product Data */
pci_configure_ari(dev); /* Alternative Routing-ID Forwarding */
@@ -2486,6 +2702,10 @@ static void pci_init_capabilities(struct pci_dev *dev)
pci_dpc_init(dev); /* Downstream Port Containment */
pci_rcec_init(dev); /* Root Complex Event Collector */
pci_doe_init(dev); /* Data Object Exchange */
+ pci_tph_init(dev); /* TLP Processing Hints */
+ pci_rebar_init(dev); /* Resizable BAR */
+ pci_dev3_init(dev); /* Device 3 capabilities */
+ pci_ide_init(dev); /* Link Integrity and Data Encryption */
pcie_report_downtraining(dev);
pci_init_reset_methods(dev);
@@ -2559,8 +2779,6 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
pci_reassigndev_resource_alignment(dev);
- dev->state_saved = false;
-
pci_init_capabilities(dev);
/*
@@ -2578,9 +2796,15 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
pci_set_msi_domain(dev);
/* Notifier could use PCI capabilities */
- dev->match_driver = false;
ret = device_add(&dev->dev);
WARN_ON(ret < 0);
+
+ /* Establish pdev->tsm for newly added (e.g. new SR-IOV VFs) */
+ pci_tsm_init(dev);
+
+ pci_npem_create(dev);
+
+ pci_doe_sysfs_init(dev);
}
struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn)
@@ -2892,14 +3116,14 @@ static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
{
unsigned int used_buses, normal_bridges = 0, hotplug_bridges = 0;
unsigned int start = bus->busn_res.start;
- unsigned int devfn, cmax, max = start;
+ unsigned int devnr, cmax, max = start;
struct pci_dev *dev;
dev_dbg(&bus->dev, "scanning bus\n");
/* Go find them, Rover! */
- for (devfn = 0; devfn < 256; devfn += 8)
- pci_scan_slot(bus, devfn);
+ for (devnr = 0; devnr < PCI_MAX_NR_DEVS; devnr++)
+ pci_scan_slot(bus, PCI_DEVFN(devnr, 0));
/* Reserve buses for SR-IOV capability */
used_buses = pci_iov_bus_range(bus);
@@ -2979,8 +3203,7 @@ static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
* bus number if there is room.
*/
if (bus->self && bus->self->is_hotplug_bridge) {
- used_buses = max_t(unsigned int, available_buses,
- pci_hotplug_bus_size - 1);
+ used_buses = max(available_buses, pci_hotplug_bus_size - 1);
if (max - start < used_buses) {
max = start + used_buses;
@@ -3071,7 +3294,9 @@ int pci_host_probe(struct pci_host_bridge *bridge)
struct pci_bus *bus, *child;
int ret;
+ pci_lock_rescan_remove();
ret = pci_scan_root_bus_bridge(bridge);
+ pci_unlock_rescan_remove();
if (ret < 0) {
dev_err(bridge->dev.parent, "Scanning root bridge failed");
return ret;
@@ -3079,22 +3304,33 @@ int pci_host_probe(struct pci_host_bridge *bridge)
bus = bridge->bus;
+ /* If we must preserve the resource configuration, claim now */
+ if (bridge->preserve_config)
+ pci_bus_claim_resources(bus);
+
/*
- * We insert PCI resources into the iomem_resource and
- * ioport_resource trees in either pci_bus_claim_resources()
- * or pci_bus_assign_resources().
+ * Assign whatever was left unassigned. If we didn't claim above,
+ * this will reassign everything.
*/
- if (pci_has_flag(PCI_PROBE_ONLY)) {
- pci_bus_claim_resources(bus);
- } else {
- pci_bus_size_bridges(bus);
- pci_bus_assign_resources(bus);
+ pci_assign_unassigned_root_bus_resources(bus);
- list_for_each_entry(child, &bus->children, node)
- pcie_bus_configure_settings(child);
- }
+ list_for_each_entry(child, &bus->children, node)
+ pcie_bus_configure_settings(child);
+ pci_lock_rescan_remove();
pci_bus_add_devices(bus);
+ pci_unlock_rescan_remove();
+
+ /*
+ * Ensure pm_runtime_enable() is called for the controller drivers
+ * before calling pci_host_probe(). The PM framework expects that
+ * if the parent device supports runtime PM, it will be enabled
+ * before child runtime PM is enabled.
+ */
+ pm_runtime_set_active(&bridge->dev);
+ pm_runtime_no_callbacks(&bridge->dev);
+ devm_pm_runtime_enable(&bridge->dev);
+
return 0;
}
EXPORT_SYMBOL_GPL(pci_host_probe);
@@ -3303,7 +3539,7 @@ EXPORT_SYMBOL_GPL(pci_rescan_bus);
* pci_rescan_bus(), pci_rescan_bus_bridge_resize() and PCI device removal
* routines should always be executed under this mutex.
*/
-static DEFINE_MUTEX(pci_rescan_remove_lock);
+DEFINE_MUTEX(pci_rescan_remove_lock);
void pci_lock_rescan_remove(void)
{
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index f967709082d6..9348a0fb8084 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -251,6 +251,10 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma)
security_locked_down(LOCKDOWN_PCI_ACCESS))
return -EPERM;
+ /* Skip devices with non-mappable BARs */
+ if (dev->non_mappable_bars)
+ return -EINVAL;
+
if (fpriv->mmap_state == pci_mmap_io) {
if (!arch_can_pci_mmap_io())
return -EINVAL;
diff --git a/drivers/pci/pwrctrl/Kconfig b/drivers/pci/pwrctrl/Kconfig
new file mode 100644
index 000000000000..e0f999f299bb
--- /dev/null
+++ b/drivers/pci/pwrctrl/Kconfig
@@ -0,0 +1,48 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config HAVE_PWRCTRL
+ bool
+
+config PCI_PWRCTRL
+ tristate
+
+config PCI_PWRCTRL_PWRSEQ
+ tristate
+ select POWER_SEQUENCING
+ select PCI_PWRCTRL
+
+config PCI_PWRCTRL_SLOT
+ tristate "PCI Power Control driver for PCI slots"
+ select PCI_PWRCTRL
+ help
+ Say Y here to enable the PCI Power Control driver to control the power
+ state of PCI slots.
+
+ This is a generic driver that controls the power state of different
+ PCI slots. The voltage regulators powering the rails of the PCI slots
+ are expected to be defined in the devicetree node of the PCI bridge.
+
+config PCI_PWRCTRL_TC9563
+ tristate "PCI Power Control driver for TC9563 PCIe switch"
+ select PCI_PWRCTRL
+ default m if ARCH_QCOM
+ depends on I2C
+ help
+ Say Y here to enable the PCI Power Control driver of TC9563 PCIe
+ switch.
+
+ This driver enables power and configures the TC9563 PCIe switch
+ through i2c. TC9563 is a PCIe switch which has one upstream and three
+ downstream ports. To one of the downstream ports integrated ethernet
+ MAC is connected as endpoint device. Other two downstream ports are
+ supposed to connect to external device.
+
+# deprecated
+config HAVE_PWRCTL
+ bool
+ select HAVE_PWRCTRL
+
+# deprecated
+config PCI_PWRCTL_PWRSEQ
+ tristate
+ select PCI_PWRCTRL_PWRSEQ
diff --git a/drivers/pci/pwrctrl/Makefile b/drivers/pci/pwrctrl/Makefile
new file mode 100644
index 000000000000..13b02282106c
--- /dev/null
+++ b/drivers/pci/pwrctrl/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_PCI_PWRCTRL) += pci-pwrctrl-core.o
+pci-pwrctrl-core-y := core.o
+
+obj-$(CONFIG_PCI_PWRCTRL_PWRSEQ) += pci-pwrctrl-pwrseq.o
+
+obj-$(CONFIG_PCI_PWRCTRL_SLOT) += pci-pwrctrl-slot.o
+pci-pwrctrl-slot-y := slot.o
+
+obj-$(CONFIG_PCI_PWRCTRL_TC9563) += pci-pwrctrl-tc9563.o
diff --git a/drivers/pci/pwrctrl/core.c b/drivers/pci/pwrctrl/core.c
new file mode 100644
index 000000000000..6bdbfed584d6
--- /dev/null
+++ b/drivers/pci/pwrctrl/core.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Linaro Ltd.
+ */
+
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/pci-pwrctrl.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+
+static int pci_pwrctrl_notify(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ struct pci_pwrctrl *pwrctrl = container_of(nb, struct pci_pwrctrl, nb);
+ struct device *dev = data;
+
+ if (dev_fwnode(dev) != dev_fwnode(pwrctrl->dev))
+ return NOTIFY_DONE;
+
+ switch (action) {
+ case BUS_NOTIFY_ADD_DEVICE:
+ /*
+ * We will have two struct device objects bound to two different
+ * drivers on different buses but consuming the same DT node. We
+ * must not bind the pins twice in this case but only once for
+ * the first device to be added.
+ *
+ * If we got here then the PCI device is the second after the
+ * power control platform device. Mark its OF node as reused.
+ */
+ dev->of_node_reused = true;
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static void rescan_work_func(struct work_struct *work)
+{
+ struct pci_pwrctrl *pwrctrl = container_of(work,
+ struct pci_pwrctrl, work);
+
+ pci_lock_rescan_remove();
+ pci_rescan_bus(to_pci_host_bridge(pwrctrl->dev->parent)->bus);
+ pci_unlock_rescan_remove();
+}
+
+/**
+ * pci_pwrctrl_init() - Initialize the PCI power control context struct
+ *
+ * @pwrctrl: PCI power control data
+ * @dev: Parent device
+ */
+void pci_pwrctrl_init(struct pci_pwrctrl *pwrctrl, struct device *dev)
+{
+ pwrctrl->dev = dev;
+ INIT_WORK(&pwrctrl->work, rescan_work_func);
+}
+EXPORT_SYMBOL_GPL(pci_pwrctrl_init);
+
+/**
+ * pci_pwrctrl_device_set_ready() - Notify the pwrctrl subsystem that the PCI
+ * device is powered-up and ready to be detected.
+ *
+ * @pwrctrl: PCI power control data.
+ *
+ * Returns:
+ * 0 on success, negative error number on error.
+ *
+ * Note:
+ * This function returning 0 doesn't mean the device was detected. It means,
+ * that the bus rescan was successfully started. The device will get bound to
+ * its PCI driver asynchronously.
+ */
+int pci_pwrctrl_device_set_ready(struct pci_pwrctrl *pwrctrl)
+{
+ int ret;
+
+ if (!pwrctrl->dev)
+ return -ENODEV;
+
+ pwrctrl->nb.notifier_call = pci_pwrctrl_notify;
+ ret = bus_register_notifier(&pci_bus_type, &pwrctrl->nb);
+ if (ret)
+ return ret;
+
+ schedule_work(&pwrctrl->work);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_pwrctrl_device_set_ready);
+
+/**
+ * pci_pwrctrl_device_unset_ready() - Notify the pwrctrl subsystem that the PCI
+ * device is about to be powered-down.
+ *
+ * @pwrctrl: PCI power control data.
+ */
+void pci_pwrctrl_device_unset_ready(struct pci_pwrctrl *pwrctrl)
+{
+ cancel_work_sync(&pwrctrl->work);
+
+ /*
+ * We don't have to delete the link here. Typically, this function
+ * is only called when the power control device is being detached. If
+ * it is being detached then the child PCI device must have already
+ * been unbound too or the device core wouldn't let us unbind.
+ */
+ bus_unregister_notifier(&pci_bus_type, &pwrctrl->nb);
+}
+EXPORT_SYMBOL_GPL(pci_pwrctrl_device_unset_ready);
+
+static void devm_pci_pwrctrl_device_unset_ready(void *data)
+{
+ struct pci_pwrctrl *pwrctrl = data;
+
+ pci_pwrctrl_device_unset_ready(pwrctrl);
+}
+
+/**
+ * devm_pci_pwrctrl_device_set_ready - Managed variant of
+ * pci_pwrctrl_device_set_ready().
+ *
+ * @dev: Device managing this pwrctrl provider.
+ * @pwrctrl: PCI power control data.
+ *
+ * Returns:
+ * 0 on success, negative error number on error.
+ */
+int devm_pci_pwrctrl_device_set_ready(struct device *dev,
+ struct pci_pwrctrl *pwrctrl)
+{
+ int ret;
+
+ ret = pci_pwrctrl_device_set_ready(pwrctrl);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev,
+ devm_pci_pwrctrl_device_unset_ready,
+ pwrctrl);
+}
+EXPORT_SYMBOL_GPL(devm_pci_pwrctrl_device_set_ready);
+
+MODULE_AUTHOR("Bartosz Golaszewski <bartosz.golaszewski@linaro.org>");
+MODULE_DESCRIPTION("PCI Device Power Control core driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pci/pwrctrl/pci-pwrctrl-pwrseq.c b/drivers/pci/pwrctrl/pci-pwrctrl-pwrseq.c
new file mode 100644
index 000000000000..4e664e7b8dd2
--- /dev/null
+++ b/drivers/pci/pwrctrl/pci-pwrctrl-pwrseq.c
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Linaro Ltd.
+ */
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/pci-pwrctrl.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/pwrseq/consumer.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+struct pci_pwrctrl_pwrseq_data {
+ struct pci_pwrctrl ctx;
+ struct pwrseq_desc *pwrseq;
+};
+
+struct pci_pwrctrl_pwrseq_pdata {
+ const char *target;
+ /*
+ * Called before doing anything else to perform device-specific
+ * verification between requesting the power sequencing handle.
+ */
+ int (*validate_device)(struct device *dev);
+};
+
+static int pci_pwrctrl_pwrseq_qcm_wcn_validate_device(struct device *dev)
+{
+ /*
+ * Old device trees for some platforms already define wifi nodes for
+ * the WCN family of chips since before power sequencing was added
+ * upstream.
+ *
+ * These nodes don't consume the regulator outputs from the PMU, and
+ * if we allow this driver to bind to one of such "incomplete" nodes,
+ * we'll see a kernel log error about the indefinite probe deferral.
+ *
+ * Check the existence of the regulator supply that exists on all
+ * WCN models before moving forward.
+ */
+ if (!device_property_present(dev, "vddaon-supply"))
+ return -ENODEV;
+
+ return 0;
+}
+
+static const struct pci_pwrctrl_pwrseq_pdata pci_pwrctrl_pwrseq_qcom_wcn_pdata = {
+ .target = "wlan",
+ .validate_device = pci_pwrctrl_pwrseq_qcm_wcn_validate_device,
+};
+
+static void devm_pci_pwrctrl_pwrseq_power_off(void *data)
+{
+ struct pwrseq_desc *pwrseq = data;
+
+ pwrseq_power_off(pwrseq);
+}
+
+static int pci_pwrctrl_pwrseq_probe(struct platform_device *pdev)
+{
+ const struct pci_pwrctrl_pwrseq_pdata *pdata;
+ struct pci_pwrctrl_pwrseq_data *data;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ pdata = device_get_match_data(dev);
+ if (!pdata || !pdata->target)
+ return -EINVAL;
+
+ if (pdata->validate_device) {
+ ret = pdata->validate_device(dev);
+ if (ret)
+ return ret;
+ }
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->pwrseq = devm_pwrseq_get(dev, pdata->target);
+ if (IS_ERR(data->pwrseq))
+ return dev_err_probe(dev, PTR_ERR(data->pwrseq),
+ "Failed to get the power sequencer\n");
+
+ ret = pwrseq_power_on(data->pwrseq);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to power-on the device\n");
+
+ ret = devm_add_action_or_reset(dev, devm_pci_pwrctrl_pwrseq_power_off,
+ data->pwrseq);
+ if (ret)
+ return ret;
+
+ pci_pwrctrl_init(&data->ctx, dev);
+
+ ret = devm_pci_pwrctrl_device_set_ready(dev, &data->ctx);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to register the pwrctrl wrapper\n");
+
+ return 0;
+}
+
+static const struct of_device_id pci_pwrctrl_pwrseq_of_match[] = {
+ {
+ /* ATH11K in QCA6390 package. */
+ .compatible = "pci17cb,1101",
+ .data = &pci_pwrctrl_pwrseq_qcom_wcn_pdata,
+ },
+ {
+ /* ATH11K in WCN6855 package. */
+ .compatible = "pci17cb,1103",
+ .data = &pci_pwrctrl_pwrseq_qcom_wcn_pdata,
+ },
+ {
+ /* ATH12K in WCN7850 package. */
+ .compatible = "pci17cb,1107",
+ .data = &pci_pwrctrl_pwrseq_qcom_wcn_pdata,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, pci_pwrctrl_pwrseq_of_match);
+
+static struct platform_driver pci_pwrctrl_pwrseq_driver = {
+ .driver = {
+ .name = "pci-pwrctrl-pwrseq",
+ .of_match_table = pci_pwrctrl_pwrseq_of_match,
+ },
+ .probe = pci_pwrctrl_pwrseq_probe,
+};
+module_platform_driver(pci_pwrctrl_pwrseq_driver);
+
+MODULE_AUTHOR("Bartosz Golaszewski <bartosz.golaszewski@linaro.org>");
+MODULE_DESCRIPTION("Generic PCI Power Control module for power sequenced devices");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pci/pwrctrl/pci-pwrctrl-tc9563.c b/drivers/pci/pwrctrl/pci-pwrctrl-tc9563.c
new file mode 100644
index 000000000000..ec423432ac65
--- /dev/null
+++ b/drivers/pci/pwrctrl/pci-pwrctrl-tc9563.c
@@ -0,0 +1,648 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <linux/pci-pwrctrl.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/unaligned.h>
+
+#include "../pci.h"
+
+#define TC9563_GPIO_CONFIG 0x801208
+#define TC9563_RESET_GPIO 0x801210
+
+#define TC9563_PORT_L0S_DELAY 0x82496c
+#define TC9563_PORT_L1_DELAY 0x824970
+
+#define TC9563_EMBEDDED_ETH_DELAY 0x8200d8
+#define TC9563_ETH_L1_DELAY_MASK GENMASK(27, 18)
+#define TC9563_ETH_L1_DELAY_VALUE(x) FIELD_PREP(TC9563_ETH_L1_DELAY_MASK, x)
+#define TC9563_ETH_L0S_DELAY_MASK GENMASK(17, 13)
+#define TC9563_ETH_L0S_DELAY_VALUE(x) FIELD_PREP(TC9563_ETH_L0S_DELAY_MASK, x)
+
+#define TC9563_NFTS_2_5_GT 0x824978
+#define TC9563_NFTS_5_GT 0x82497c
+
+#define TC9563_PORT_LANE_ACCESS_ENABLE 0x828000
+
+#define TC9563_PHY_RATE_CHANGE_OVERRIDE 0x828040
+#define TC9563_PHY_RATE_CHANGE 0x828050
+
+#define TC9563_TX_MARGIN 0x828234
+
+#define TC9563_DFE_ENABLE 0x828a04
+#define TC9563_DFE_EQ0_MODE 0x828a08
+#define TC9563_DFE_EQ1_MODE 0x828a0c
+#define TC9563_DFE_EQ2_MODE 0x828a14
+#define TC9563_DFE_PD_MASK 0x828254
+
+#define TC9563_PORT_SELECT 0x82c02c
+#define TC9563_PORT_ACCESS_ENABLE 0x82c030
+
+#define TC9563_POWER_CONTROL 0x82b09c
+#define TC9563_POWER_CONTROL_OVREN 0x82b2c8
+
+#define TC9563_GPIO_MASK 0xfffffff3
+#define TC9563_GPIO_DEASSERT_BITS 0xc /* Bits to clear for GPIO deassert */
+
+#define TC9563_TX_MARGIN_MIN_UA 400000
+
+/*
+ * From TC9563 PORSYS rev 0.2, figure 1.1 POR boot sequence
+ * wait for 10ms for the internal osc frequency to stabilize.
+ */
+#define TC9563_OSC_STAB_DELAY_US (10 * USEC_PER_MSEC)
+
+#define TC9563_L0S_L1_DELAY_UNIT_NS 256 /* Each unit represents 256 nanoseconds */
+
+struct tc9563_pwrctrl_reg_setting {
+ unsigned int offset;
+ unsigned int val;
+};
+
+enum tc9563_pwrctrl_ports {
+ TC9563_USP,
+ TC9563_DSP1,
+ TC9563_DSP2,
+ TC9563_DSP3,
+ TC9563_ETHERNET,
+ TC9563_MAX
+};
+
+struct tc9563_pwrctrl_cfg {
+ u32 l0s_delay;
+ u32 l1_delay;
+ u32 tx_amp;
+ u8 nfts[2]; /* GEN1 & GEN2 */
+ bool disable_dfe;
+ bool disable_port;
+};
+
+#define TC9563_PWRCTL_MAX_SUPPLY 6
+
+static const char *const tc9563_supply_names[TC9563_PWRCTL_MAX_SUPPLY] = {
+ "vddc",
+ "vdd18",
+ "vdd09",
+ "vddio1",
+ "vddio2",
+ "vddio18",
+};
+
+struct tc9563_pwrctrl_ctx {
+ struct regulator_bulk_data supplies[TC9563_PWRCTL_MAX_SUPPLY];
+ struct tc9563_pwrctrl_cfg cfg[TC9563_MAX];
+ struct gpio_desc *reset_gpio;
+ struct i2c_adapter *adapter;
+ struct i2c_client *client;
+ struct pci_pwrctrl pwrctrl;
+};
+
+/*
+ * downstream port power off sequence, hardcoding the address
+ * as we don't know register names for these register offsets.
+ */
+static const struct tc9563_pwrctrl_reg_setting common_pwroff_seq[] = {
+ {0x82900c, 0x1},
+ {0x829010, 0x1},
+ {0x829018, 0x0},
+ {0x829020, 0x1},
+ {0x82902c, 0x1},
+ {0x829030, 0x1},
+ {0x82903c, 0x1},
+ {0x829058, 0x0},
+ {0x82905c, 0x1},
+ {0x829060, 0x1},
+ {0x8290cc, 0x1},
+ {0x8290d0, 0x1},
+ {0x8290d8, 0x1},
+ {0x8290e0, 0x1},
+ {0x8290e8, 0x1},
+ {0x8290ec, 0x1},
+ {0x8290f4, 0x1},
+ {0x82910c, 0x1},
+ {0x829110, 0x1},
+ {0x829114, 0x1},
+};
+
+static const struct tc9563_pwrctrl_reg_setting dsp1_pwroff_seq[] = {
+ {TC9563_PORT_ACCESS_ENABLE, 0x2},
+ {TC9563_PORT_LANE_ACCESS_ENABLE, 0x3},
+ {TC9563_POWER_CONTROL, 0x014f4804},
+ {TC9563_POWER_CONTROL_OVREN, 0x1},
+ {TC9563_PORT_ACCESS_ENABLE, 0x4},
+};
+
+static const struct tc9563_pwrctrl_reg_setting dsp2_pwroff_seq[] = {
+ {TC9563_PORT_ACCESS_ENABLE, 0x8},
+ {TC9563_PORT_LANE_ACCESS_ENABLE, 0x1},
+ {TC9563_POWER_CONTROL, 0x014f4804},
+ {TC9563_POWER_CONTROL_OVREN, 0x1},
+ {TC9563_PORT_ACCESS_ENABLE, 0x8},
+};
+
+/*
+ * Since all transfers are initiated by the probe, no locks are necessary,
+ * as there are no concurrent calls.
+ */
+static int tc9563_pwrctrl_i2c_write(struct i2c_client *client,
+ u32 reg_addr, u32 reg_val)
+{
+ struct i2c_msg msg;
+ u8 msg_buf[7];
+ int ret;
+
+ msg.addr = client->addr;
+ msg.len = 7;
+ msg.flags = 0;
+
+ /* Big Endian for reg addr */
+ put_unaligned_be24(reg_addr, &msg_buf[0]);
+
+ /* Little Endian for reg val */
+ put_unaligned_le32(reg_val, &msg_buf[3]);
+
+ msg.buf = msg_buf;
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ return ret == 1 ? 0 : ret;
+}
+
+static int tc9563_pwrctrl_i2c_read(struct i2c_client *client,
+ u32 reg_addr, u32 *reg_val)
+{
+ struct i2c_msg msg[2];
+ u8 wr_data[3];
+ u32 rd_data;
+ int ret;
+
+ msg[0].addr = client->addr;
+ msg[0].len = 3;
+ msg[0].flags = 0;
+
+ /* Big Endian for reg addr */
+ put_unaligned_be24(reg_addr, &wr_data[0]);
+
+ msg[0].buf = wr_data;
+
+ msg[1].addr = client->addr;
+ msg[1].len = 4;
+ msg[1].flags = I2C_M_RD;
+
+ msg[1].buf = (u8 *)&rd_data;
+
+ ret = i2c_transfer(client->adapter, &msg[0], 2);
+ if (ret == 2) {
+ *reg_val = get_unaligned_le32(&rd_data);
+ return 0;
+ }
+
+ /* If only one message successfully completed, return -EIO */
+ return ret == 1 ? -EIO : ret;
+}
+
+static int tc9563_pwrctrl_i2c_bulk_write(struct i2c_client *client,
+ const struct tc9563_pwrctrl_reg_setting *seq, int len)
+{
+ int ret, i;
+
+ for (i = 0; i < len; i++) {
+ ret = tc9563_pwrctrl_i2c_write(client, seq[i].offset, seq[i].val);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tc9563_pwrctrl_disable_port(struct tc9563_pwrctrl_ctx *ctx,
+ enum tc9563_pwrctrl_ports port)
+{
+ struct tc9563_pwrctrl_cfg *cfg = &ctx->cfg[port];
+ const struct tc9563_pwrctrl_reg_setting *seq;
+ int ret, len;
+
+ if (!cfg->disable_port)
+ return 0;
+
+ if (port == TC9563_DSP1) {
+ seq = dsp1_pwroff_seq;
+ len = ARRAY_SIZE(dsp1_pwroff_seq);
+ } else {
+ seq = dsp2_pwroff_seq;
+ len = ARRAY_SIZE(dsp2_pwroff_seq);
+ }
+
+ ret = tc9563_pwrctrl_i2c_bulk_write(ctx->client, seq, len);
+ if (ret)
+ return ret;
+
+ return tc9563_pwrctrl_i2c_bulk_write(ctx->client,
+ common_pwroff_seq, ARRAY_SIZE(common_pwroff_seq));
+}
+
+static int tc9563_pwrctrl_set_l0s_l1_entry_delay(struct tc9563_pwrctrl_ctx *ctx,
+ enum tc9563_pwrctrl_ports port, bool is_l1, u32 ns)
+{
+ u32 rd_val, units;
+ int ret;
+
+ if (ns < TC9563_L0S_L1_DELAY_UNIT_NS)
+ return 0;
+
+ /* convert to units of 256ns */
+ units = ns / TC9563_L0S_L1_DELAY_UNIT_NS;
+
+ if (port == TC9563_ETHERNET) {
+ ret = tc9563_pwrctrl_i2c_read(ctx->client, TC9563_EMBEDDED_ETH_DELAY, &rd_val);
+ if (ret)
+ return ret;
+
+ if (is_l1)
+ rd_val = u32_replace_bits(rd_val, units, TC9563_ETH_L1_DELAY_MASK);
+ else
+ rd_val = u32_replace_bits(rd_val, units, TC9563_ETH_L0S_DELAY_MASK);
+
+ return tc9563_pwrctrl_i2c_write(ctx->client, TC9563_EMBEDDED_ETH_DELAY, rd_val);
+ }
+
+ ret = tc9563_pwrctrl_i2c_write(ctx->client, TC9563_PORT_SELECT, BIT(port));
+ if (ret)
+ return ret;
+
+ return tc9563_pwrctrl_i2c_write(ctx->client,
+ is_l1 ? TC9563_PORT_L1_DELAY : TC9563_PORT_L0S_DELAY, units);
+}
+
+static int tc9563_pwrctrl_set_tx_amplitude(struct tc9563_pwrctrl_ctx *ctx,
+ enum tc9563_pwrctrl_ports port)
+{
+ u32 amp = ctx->cfg[port].tx_amp;
+ int port_access;
+
+ if (amp < TC9563_TX_MARGIN_MIN_UA)
+ return 0;
+
+ /* txmargin = (Amp(uV) - 400000) / 3125 */
+ amp = (amp - TC9563_TX_MARGIN_MIN_UA) / 3125;
+
+ switch (port) {
+ case TC9563_USP:
+ port_access = 0x1;
+ break;
+ case TC9563_DSP1:
+ port_access = 0x2;
+ break;
+ case TC9563_DSP2:
+ port_access = 0x8;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ struct tc9563_pwrctrl_reg_setting tx_amp_seq[] = {
+ {TC9563_PORT_ACCESS_ENABLE, port_access},
+ {TC9563_PORT_LANE_ACCESS_ENABLE, 0x3},
+ {TC9563_TX_MARGIN, amp},
+ };
+
+ return tc9563_pwrctrl_i2c_bulk_write(ctx->client, tx_amp_seq, ARRAY_SIZE(tx_amp_seq));
+}
+
+static int tc9563_pwrctrl_disable_dfe(struct tc9563_pwrctrl_ctx *ctx,
+ enum tc9563_pwrctrl_ports port)
+{
+ struct tc9563_pwrctrl_cfg *cfg = &ctx->cfg[port];
+ int port_access, lane_access = 0x3;
+ u32 phy_rate = 0x21;
+
+ if (!cfg->disable_dfe)
+ return 0;
+
+ switch (port) {
+ case TC9563_USP:
+ phy_rate = 0x1;
+ port_access = 0x1;
+ break;
+ case TC9563_DSP1:
+ port_access = 0x2;
+ break;
+ case TC9563_DSP2:
+ port_access = 0x8;
+ lane_access = 0x1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ struct tc9563_pwrctrl_reg_setting disable_dfe_seq[] = {
+ {TC9563_PORT_ACCESS_ENABLE, port_access},
+ {TC9563_PORT_LANE_ACCESS_ENABLE, lane_access},
+ {TC9563_DFE_ENABLE, 0x0},
+ {TC9563_DFE_EQ0_MODE, 0x411},
+ {TC9563_DFE_EQ1_MODE, 0x11},
+ {TC9563_DFE_EQ2_MODE, 0x11},
+ {TC9563_DFE_PD_MASK, 0x7},
+ {TC9563_PHY_RATE_CHANGE_OVERRIDE, 0x10},
+ {TC9563_PHY_RATE_CHANGE, phy_rate},
+ {TC9563_PHY_RATE_CHANGE, 0x0},
+ {TC9563_PHY_RATE_CHANGE_OVERRIDE, 0x0},
+ };
+
+ return tc9563_pwrctrl_i2c_bulk_write(ctx->client,
+ disable_dfe_seq, ARRAY_SIZE(disable_dfe_seq));
+}
+
+static int tc9563_pwrctrl_set_nfts(struct tc9563_pwrctrl_ctx *ctx,
+ enum tc9563_pwrctrl_ports port)
+{
+ u8 *nfts = ctx->cfg[port].nfts;
+ struct tc9563_pwrctrl_reg_setting nfts_seq[] = {
+ {TC9563_NFTS_2_5_GT, nfts[0]},
+ {TC9563_NFTS_5_GT, nfts[1]},
+ };
+ int ret;
+
+ if (!nfts[0])
+ return 0;
+
+ ret = tc9563_pwrctrl_i2c_write(ctx->client, TC9563_PORT_SELECT, BIT(port));
+ if (ret)
+ return ret;
+
+ return tc9563_pwrctrl_i2c_bulk_write(ctx->client, nfts_seq, ARRAY_SIZE(nfts_seq));
+}
+
+static int tc9563_pwrctrl_assert_deassert_reset(struct tc9563_pwrctrl_ctx *ctx, bool deassert)
+{
+ int ret, val;
+
+ ret = tc9563_pwrctrl_i2c_write(ctx->client, TC9563_GPIO_CONFIG, TC9563_GPIO_MASK);
+ if (ret)
+ return ret;
+
+ val = deassert ? TC9563_GPIO_DEASSERT_BITS : 0;
+
+ return tc9563_pwrctrl_i2c_write(ctx->client, TC9563_RESET_GPIO, val);
+}
+
+static int tc9563_pwrctrl_parse_device_dt(struct tc9563_pwrctrl_ctx *ctx, struct device_node *node,
+ enum tc9563_pwrctrl_ports port)
+{
+ struct tc9563_pwrctrl_cfg *cfg = &ctx->cfg[port];
+ int ret;
+
+ /* Disable port if the status of the port is disabled. */
+ if (!of_device_is_available(node)) {
+ cfg->disable_port = true;
+ return 0;
+ }
+
+ ret = of_property_read_u32(node, "aspm-l0s-entry-delay-ns", &cfg->l0s_delay);
+ if (ret && ret != -EINVAL)
+ return ret;
+
+ ret = of_property_read_u32(node, "aspm-l1-entry-delay-ns", &cfg->l1_delay);
+ if (ret && ret != -EINVAL)
+ return ret;
+
+ ret = of_property_read_u32(node, "toshiba,tx-amplitude-microvolt", &cfg->tx_amp);
+ if (ret && ret != -EINVAL)
+ return ret;
+
+ ret = of_property_read_u8_array(node, "n-fts", cfg->nfts, ARRAY_SIZE(cfg->nfts));
+ if (ret && ret != -EINVAL)
+ return ret;
+
+ cfg->disable_dfe = of_property_read_bool(node, "toshiba,no-dfe-support");
+
+ return 0;
+}
+
+static void tc9563_pwrctrl_power_off(struct tc9563_pwrctrl_ctx *ctx)
+{
+ gpiod_set_value(ctx->reset_gpio, 1);
+
+ regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+}
+
+static int tc9563_pwrctrl_bring_up(struct tc9563_pwrctrl_ctx *ctx)
+{
+ struct tc9563_pwrctrl_cfg *cfg;
+ int ret, i;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ if (ret < 0)
+ return dev_err_probe(ctx->pwrctrl.dev, ret, "cannot enable regulators\n");
+
+ gpiod_set_value(ctx->reset_gpio, 0);
+
+ fsleep(TC9563_OSC_STAB_DELAY_US);
+
+ ret = tc9563_pwrctrl_assert_deassert_reset(ctx, false);
+ if (ret)
+ goto power_off;
+
+ for (i = 0; i < TC9563_MAX; i++) {
+ cfg = &ctx->cfg[i];
+ ret = tc9563_pwrctrl_disable_port(ctx, i);
+ if (ret) {
+ dev_err(ctx->pwrctrl.dev, "Disabling port failed\n");
+ goto power_off;
+ }
+
+ ret = tc9563_pwrctrl_set_l0s_l1_entry_delay(ctx, i, false, cfg->l0s_delay);
+ if (ret) {
+ dev_err(ctx->pwrctrl.dev, "Setting L0s entry delay failed\n");
+ goto power_off;
+ }
+
+ ret = tc9563_pwrctrl_set_l0s_l1_entry_delay(ctx, i, true, cfg->l1_delay);
+ if (ret) {
+ dev_err(ctx->pwrctrl.dev, "Setting L1 entry delay failed\n");
+ goto power_off;
+ }
+
+ ret = tc9563_pwrctrl_set_tx_amplitude(ctx, i);
+ if (ret) {
+ dev_err(ctx->pwrctrl.dev, "Setting Tx amplitude failed\n");
+ goto power_off;
+ }
+
+ ret = tc9563_pwrctrl_set_nfts(ctx, i);
+ if (ret) {
+ dev_err(ctx->pwrctrl.dev, "Setting N_FTS failed\n");
+ goto power_off;
+ }
+
+ ret = tc9563_pwrctrl_disable_dfe(ctx, i);
+ if (ret) {
+ dev_err(ctx->pwrctrl.dev, "Disabling DFE failed\n");
+ goto power_off;
+ }
+ }
+
+ ret = tc9563_pwrctrl_assert_deassert_reset(ctx, true);
+ if (!ret)
+ return 0;
+
+power_off:
+ tc9563_pwrctrl_power_off(ctx);
+ return ret;
+}
+
+static int tc9563_pwrctrl_probe(struct platform_device *pdev)
+{
+ struct pci_host_bridge *bridge = to_pci_host_bridge(pdev->dev.parent);
+ struct pci_bus *bus = bridge->bus;
+ struct device *dev = &pdev->dev;
+ enum tc9563_pwrctrl_ports port;
+ struct tc9563_pwrctrl_ctx *ctx;
+ struct device_node *i2c_node;
+ int ret, addr;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ret = of_property_read_u32_index(pdev->dev.of_node, "i2c-parent", 1, &addr);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to read i2c-parent property\n");
+
+ i2c_node = of_parse_phandle(dev->of_node, "i2c-parent", 0);
+ ctx->adapter = of_find_i2c_adapter_by_node(i2c_node);
+ of_node_put(i2c_node);
+ if (!ctx->adapter)
+ return dev_err_probe(dev, -EPROBE_DEFER, "Failed to find I2C adapter\n");
+
+ ctx->client = i2c_new_dummy_device(ctx->adapter, addr);
+ if (IS_ERR(ctx->client)) {
+ dev_err(dev, "Failed to create I2C client\n");
+ i2c_put_adapter(ctx->adapter);
+ return PTR_ERR(ctx->client);
+ }
+
+ for (int i = 0; i < ARRAY_SIZE(tc9563_supply_names); i++)
+ ctx->supplies[i].supply = tc9563_supply_names[i];
+
+ ret = devm_regulator_bulk_get(dev, TC9563_PWRCTL_MAX_SUPPLY, ctx->supplies);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to get supply regulator\n");
+ goto remove_i2c;
+ }
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "resx", GPIOD_OUT_HIGH);
+ if (IS_ERR(ctx->reset_gpio)) {
+ ret = dev_err_probe(dev, PTR_ERR(ctx->reset_gpio), "failed to get resx GPIO\n");
+ goto remove_i2c;
+ }
+
+ pci_pwrctrl_init(&ctx->pwrctrl, dev);
+
+ port = TC9563_USP;
+ ret = tc9563_pwrctrl_parse_device_dt(ctx, pdev->dev.of_node, port);
+ if (ret) {
+ dev_err(dev, "failed to parse device tree properties: %d\n", ret);
+ goto remove_i2c;
+ }
+
+ /*
+ * Downstream ports are always children of the upstream port.
+ * The first node represents DSP1, the second node represents DSP2, and so on.
+ */
+ for_each_child_of_node_scoped(pdev->dev.of_node, child) {
+ port++;
+ ret = tc9563_pwrctrl_parse_device_dt(ctx, child, port);
+ if (ret)
+ break;
+ /* Embedded ethernet device are under DSP3 */
+ if (port == TC9563_DSP3) {
+ for_each_child_of_node_scoped(child, child1) {
+ port++;
+ ret = tc9563_pwrctrl_parse_device_dt(ctx, child1, port);
+ if (ret)
+ break;
+ }
+ }
+ }
+ if (ret) {
+ dev_err(dev, "failed to parse device tree properties: %d\n", ret);
+ goto remove_i2c;
+ }
+
+ if (bridge->ops->assert_perst) {
+ ret = bridge->ops->assert_perst(bus, true);
+ if (ret)
+ goto remove_i2c;
+ }
+
+ ret = tc9563_pwrctrl_bring_up(ctx);
+ if (ret)
+ goto remove_i2c;
+
+ if (bridge->ops->assert_perst) {
+ ret = bridge->ops->assert_perst(bus, false);
+ if (ret)
+ goto power_off;
+ }
+
+ ret = devm_pci_pwrctrl_device_set_ready(dev, &ctx->pwrctrl);
+ if (ret)
+ goto power_off;
+
+ platform_set_drvdata(pdev, ctx);
+
+ return 0;
+
+power_off:
+ tc9563_pwrctrl_power_off(ctx);
+remove_i2c:
+ i2c_unregister_device(ctx->client);
+ i2c_put_adapter(ctx->adapter);
+ return ret;
+}
+
+static void tc9563_pwrctrl_remove(struct platform_device *pdev)
+{
+ struct tc9563_pwrctrl_ctx *ctx = platform_get_drvdata(pdev);
+
+ tc9563_pwrctrl_power_off(ctx);
+ i2c_unregister_device(ctx->client);
+ i2c_put_adapter(ctx->adapter);
+}
+
+static const struct of_device_id tc9563_pwrctrl_of_match[] = {
+ { .compatible = "pci1179,0623"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, tc9563_pwrctrl_of_match);
+
+static struct platform_driver tc9563_pwrctrl_driver = {
+ .driver = {
+ .name = "pwrctrl-tc9563",
+ .of_match_table = tc9563_pwrctrl_of_match,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+ .probe = tc9563_pwrctrl_probe,
+ .remove = tc9563_pwrctrl_remove,
+};
+module_platform_driver(tc9563_pwrctrl_driver);
+
+MODULE_AUTHOR("Krishna chaitanya chundru <quic_krichai@quicinc.com>");
+MODULE_DESCRIPTION("TC956x power control driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pci/pwrctrl/slot.c b/drivers/pci/pwrctrl/slot.c
new file mode 100644
index 000000000000..3320494b62d8
--- /dev/null
+++ b/drivers/pci/pwrctrl/slot.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/pci-pwrctrl.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+struct pci_pwrctrl_slot_data {
+ struct pci_pwrctrl ctx;
+ struct regulator_bulk_data *supplies;
+ int num_supplies;
+};
+
+static void devm_pci_pwrctrl_slot_power_off(void *data)
+{
+ struct pci_pwrctrl_slot_data *slot = data;
+
+ regulator_bulk_disable(slot->num_supplies, slot->supplies);
+ regulator_bulk_free(slot->num_supplies, slot->supplies);
+}
+
+static int pci_pwrctrl_slot_probe(struct platform_device *pdev)
+{
+ struct pci_pwrctrl_slot_data *slot;
+ struct device *dev = &pdev->dev;
+ struct clk *clk;
+ int ret;
+
+ slot = devm_kzalloc(dev, sizeof(*slot), GFP_KERNEL);
+ if (!slot)
+ return -ENOMEM;
+
+ ret = of_regulator_bulk_get_all(dev, dev_of_node(dev),
+ &slot->supplies);
+ if (ret < 0) {
+ dev_err_probe(dev, ret, "Failed to get slot regulators\n");
+ return ret;
+ }
+
+ slot->num_supplies = ret;
+ ret = regulator_bulk_enable(slot->num_supplies, slot->supplies);
+ if (ret < 0) {
+ dev_err_probe(dev, ret, "Failed to enable slot regulators\n");
+ regulator_bulk_free(slot->num_supplies, slot->supplies);
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(dev, devm_pci_pwrctrl_slot_power_off,
+ slot);
+ if (ret)
+ return ret;
+
+ clk = devm_clk_get_optional_enabled(dev, NULL);
+ if (IS_ERR(clk)) {
+ return dev_err_probe(dev, PTR_ERR(clk),
+ "Failed to enable slot clock\n");
+ }
+
+ pci_pwrctrl_init(&slot->ctx, dev);
+
+ ret = devm_pci_pwrctrl_device_set_ready(dev, &slot->ctx);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register pwrctrl driver\n");
+
+ return 0;
+}
+
+static const struct of_device_id pci_pwrctrl_slot_of_match[] = {
+ {
+ .compatible = "pciclass,0604",
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, pci_pwrctrl_slot_of_match);
+
+static struct platform_driver pci_pwrctrl_slot_driver = {
+ .driver = {
+ .name = "pci-pwrctrl-slot",
+ .of_match_table = pci_pwrctrl_slot_of_match,
+ },
+ .probe = pci_pwrctrl_slot_probe,
+};
+module_platform_driver(pci_pwrctrl_slot_driver);
+
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
+MODULE_DESCRIPTION("Generic PCI Power Control driver for PCI Slots");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 321156ca273d..b9c252aa6fe0 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -12,6 +12,8 @@
* file, where their drivers can use them.
*/
+#include <linux/aer.h>
+#include <linux/align.h>
#include <linux/bitfield.h>
#include <linux/types.h>
#include <linux/kernel.h>
@@ -29,10 +31,19 @@
#include <linux/nvme.h>
#include <linux/platform_data/x86/apple.h>
#include <linux/pm_runtime.h>
+#include <linux/sizes.h>
#include <linux/suspend.h>
#include <linux/switchtec.h>
#include "pci.h"
+static bool pcie_lbms_seen(struct pci_dev *dev, u16 lnksta)
+{
+ if (test_bit(PCI_LINK_LBMS_SEEN, &dev->priv_flags))
+ return true;
+
+ return lnksta & PCI_EXP_LNKSTA_LBMS;
+}
+
/*
* Retrain the link of a downstream PCIe port by hand if necessary.
*
@@ -66,7 +77,7 @@
* apply this erratum workaround to any downstream ports as long as they
* support Link Active reporting and have the Link Control 2 register.
* Restrict the speed to 2.5GT/s then with the Target Link Speed field,
- * request a retrain and wait 200ms for the data link to go up.
+ * request a retrain and check the result.
*
* If this turns out successful and we know by the Vendor:Device ID it is
* safe to do so, then lift the restriction, letting the devices negotiate
@@ -74,38 +85,46 @@
* firmware may have already arranged and lift it with ports that already
* report their data link being up.
*
- * Return TRUE if the link has been successfully retrained, otherwise FALSE.
+ * Otherwise revert the speed to the original setting and request a retrain
+ * again to remove any residual state, ignoring the result as it's supposed
+ * to fail anyway.
+ *
+ * Return 0 if the link has been successfully retrained. Return an error
+ * if retraining was not needed or we attempted a retrain and it failed.
*/
-bool pcie_failed_link_retrain(struct pci_dev *dev)
+int pcie_failed_link_retrain(struct pci_dev *dev)
{
static const struct pci_device_id ids[] = {
{ PCI_VDEVICE(ASMEDIA, 0x2824) }, /* ASMedia ASM2824 */
{}
};
u16 lnksta, lnkctl2;
+ int ret = -ENOTTY;
if (!pci_is_pcie(dev) || !pcie_downstream_port(dev) ||
!pcie_cap_has_lnkctl2(dev) || !dev->link_active_reporting)
- return false;
+ return ret;
- pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &lnkctl2);
pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
- if ((lnksta & (PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_DLLLA)) ==
- PCI_EXP_LNKSTA_LBMS) {
- pci_info(dev, "broken device, retraining non-functional downstream link at 2.5GT/s\n");
+ if (!(lnksta & PCI_EXP_LNKSTA_DLLLA) && pcie_lbms_seen(dev, lnksta)) {
+ u16 oldlnkctl2;
- lnkctl2 &= ~PCI_EXP_LNKCTL2_TLS;
- lnkctl2 |= PCI_EXP_LNKCTL2_TLS_2_5GT;
- pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, lnkctl2);
+ pci_info(dev, "broken device, retraining non-functional downstream link at 2.5GT/s\n");
- if (pcie_retrain_link(dev, false)) {
+ pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &oldlnkctl2);
+ ret = pcie_set_target_speed(dev, PCIE_SPEED_2_5GT, false);
+ if (ret) {
pci_info(dev, "retraining failed\n");
- return false;
+ pcie_set_target_speed(dev, PCIE_LNKCTL2_TLS2SPEED(oldlnkctl2),
+ true);
+ return ret;
}
pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
}
+ pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &lnkctl2);
+
if ((lnksta & PCI_EXP_LNKSTA_DLLLA) &&
(lnkctl2 & PCI_EXP_LNKCTL2_TLS) == PCI_EXP_LNKCTL2_TLS_2_5GT &&
pci_match_id(ids, dev)) {
@@ -113,17 +132,14 @@ bool pcie_failed_link_retrain(struct pci_dev *dev)
pci_info(dev, "removing 2.5GT/s downstream link speed restriction\n");
pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
- lnkctl2 &= ~PCI_EXP_LNKCTL2_TLS;
- lnkctl2 |= lnkcap & PCI_EXP_LNKCAP_SLS;
- pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, lnkctl2);
-
- if (pcie_retrain_link(dev, false)) {
+ ret = pcie_set_target_speed(dev, PCIE_LNKCAP_SLS2SPEED(lnkcap), false);
+ if (ret) {
pci_info(dev, "retraining failed\n");
- return false;
+ return ret;
}
}
- return true;
+ return ret;
}
static ktime_t fixup_debug_start(struct pci_dev *dev,
@@ -361,8 +377,9 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_2, quirk_isa_d
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs);
#endif
+#ifdef CONFIG_HAS_IOPORT
/*
- * Intel NM10 "TigerPoint" LPC PM1a_STS.BM_STS must be clear
+ * Intel NM10 "Tiger Point" LPC PM1a_STS.BM_STS must be clear
* for some HT machines to use C4 w/o hanging.
*/
static void quirk_tigerpoint_bm_sts(struct pci_dev *dev)
@@ -375,11 +392,12 @@ static void quirk_tigerpoint_bm_sts(struct pci_dev *dev)
pm1a = inw(pmbase);
if (pm1a & 0x10) {
- pci_info(dev, FW_BUG "TigerPoint LPC.BM_STS cleared\n");
+ pci_info(dev, FW_BUG "Tiger Point LPC.BM_STS cleared\n");
outw(0x10, pmbase);
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGP_LPC, quirk_tigerpoint_bm_sts);
+#endif
/* Chipsets where PCI->PCI transfers vanish or hang */
static void quirk_nopcipci(struct pci_dev *dev)
@@ -568,13 +586,13 @@ static void quirk_extend_bar_to_page(struct pci_dev *dev)
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
struct resource *r = &dev->resource[i];
+ const char *r_name = pci_resource_name(dev, i);
if (r->flags & IORESOURCE_MEM && resource_size(r) < PAGE_SIZE) {
- r->end = PAGE_SIZE - 1;
- r->start = 0;
+ resource_set_range(r, 0, PAGE_SIZE);
r->flags |= IORESOURCE_UNSET;
- pci_info(dev, "expanded BAR %d to page size: %pR\n",
- i, r);
+ pci_info(dev, "%s %pR: expanded to page size\n",
+ r_name, r);
}
}
}
@@ -588,10 +606,9 @@ static void quirk_s3_64M(struct pci_dev *dev)
{
struct resource *r = &dev->resource[0];
- if ((r->start & 0x3ffffff) || r->end != r->start + 0x3ffffff) {
+ if (!IS_ALIGNED(r->start, SZ_64M) || resource_size(r) != SZ_64M) {
r->flags |= IORESOURCE_UNSET;
- r->start = 0;
- r->end = 0x3ffffff;
+ resource_set_range(r, 0, SZ_64M);
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M);
@@ -602,7 +619,8 @@ static void quirk_io(struct pci_dev *dev, int pos, unsigned int size,
{
u32 region;
struct pci_bus_region bus_region;
- struct resource *res = dev->resource + pos;
+ struct resource *res = pci_resource_n(dev, pos);
+ const char *res_name = pci_resource_name(dev, pos);
pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + (pos << 2), &region);
@@ -620,8 +638,7 @@ static void quirk_io(struct pci_dev *dev, int pos, unsigned int size,
bus_region.end = region + size - 1;
pcibios_bus_to_resource(dev->bus, res, &bus_region);
- pci_info(dev, FW_BUG "%s quirk: reg 0x%x: %pR\n",
- name, PCI_BASE_ADDRESS_0 + (pos << 2), res);
+ pci_info(dev, FW_BUG "%s %pR: %s quirk\n", res_name, res, name);
}
/*
@@ -652,7 +669,7 @@ static void quirk_io_region(struct pci_dev *dev, int port,
{
u16 region;
struct pci_bus_region bus_region;
- struct resource *res = dev->resource + nr;
+ struct resource *res = pci_resource_n(dev, nr);
pci_read_config_word(dev, port, &region);
region &= ~(size - 1);
@@ -668,6 +685,12 @@ static void quirk_io_region(struct pci_dev *dev, int port,
bus_region.end = region + size - 1;
pcibios_bus_to_resource(dev->bus, res, &bus_region);
+ /*
+ * "res" is typically a bridge window resource that's not being
+ * used for a bridge window, so it's just a place to stash this
+ * non-standard resource. Printing "nr" or pci_resource_name() of
+ * it doesn't really make sense.
+ */
if (!pci_claim_resource(dev, nr))
pci_info(dev, "quirk: %pR claimed by %s\n", res, name);
}
@@ -688,7 +711,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS100, quirk_ati_
/*
* In the AMD NL platform, this device ([1022:7912]) has a class code of
* PCI_CLASS_SERIAL_USB_XHCI (0x0c0330), which means the xhci driver will
- * claim it.
+ * claim it. The same applies on the VanGogh platform device ([1022:163a]).
*
* But the dwc3 driver is a more specific driver for this device, and we'd
* prefer to use it instead of xhci. To prevent xhci from claiming the
@@ -696,17 +719,22 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS100, quirk_ati_
* defines as "USB device (not host controller)". The dwc3 driver can then
* claim it based on its Vendor and Device ID.
*/
-static void quirk_amd_nl_class(struct pci_dev *pdev)
+static void quirk_amd_dwc_class(struct pci_dev *pdev)
{
u32 class = pdev->class;
- /* Use "USB Device (not host controller)" class */
- pdev->class = PCI_CLASS_SERIAL_USB_DEVICE;
- pci_info(pdev, "PCI class overridden (%#08x -> %#08x) so dwc3 driver can claim this instead of xhci\n",
- class, pdev->class);
+ if (class != PCI_CLASS_SERIAL_USB_DEVICE) {
+ /* Use "USB Device (not host controller)" class */
+ pdev->class = PCI_CLASS_SERIAL_USB_DEVICE;
+ pci_info(pdev,
+ "PCI class overridden (%#08x -> %#08x) so dwc3 driver can claim this instead of xhci\n",
+ class, pdev->class);
+ }
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB,
- quirk_amd_nl_class);
+ quirk_amd_dwc_class);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VANGOGH_USB,
+ quirk_amd_dwc_class);
/*
* Synopsys USB 3.x host HAPS platform has a class code of
@@ -1315,8 +1343,7 @@ static void quirk_dunord(struct pci_dev *dev)
struct resource *r = &dev->resource[1];
r->flags |= IORESOURCE_UNSET;
- r->start = 0;
- r->end = 0xffffff;
+ resource_set_range(r, 0, SZ_16M);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DUNORD, PCI_DEVICE_ID_DUNORD_I3000, quirk_dunord);
@@ -1842,8 +1869,8 @@ static void quirk_jmicron_ata(struct pci_dev *pdev)
/* Update pdev accordingly */
pci_read_config_byte(pdev, PCI_HEADER_TYPE, &hdr);
- pdev->hdr_type = hdr & 0x7f;
- pdev->multifunction = !!(hdr & 0x80);
+ pdev->hdr_type = hdr & PCI_HEADER_TYPE_MASK;
+ pdev->multifunction = FIELD_GET(PCI_HEADER_TYPE_MFD, hdr);
pci_read_config_dword(pdev, PCI_CLASS_REVISION, &class);
pdev->class = class >> 8;
@@ -1961,12 +1988,12 @@ static void quirk_huawei_pcie_sva(struct pci_dev *pdev)
device_create_managed_software_node(&pdev->dev, properties, NULL))
pci_warn(pdev, "could not add stall property");
}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa250, quirk_huawei_pcie_sva);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa251, quirk_huawei_pcie_sva);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa255, quirk_huawei_pcie_sva);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa256, quirk_huawei_pcie_sva);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa258, quirk_huawei_pcie_sva);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa259, quirk_huawei_pcie_sva);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HUAWEI, 0xa250, quirk_huawei_pcie_sva);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HUAWEI, 0xa251, quirk_huawei_pcie_sva);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HUAWEI, 0xa255, quirk_huawei_pcie_sva);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HUAWEI, 0xa256, quirk_huawei_pcie_sva);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HUAWEI, 0xa258, quirk_huawei_pcie_sva);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HUAWEI, 0xa259, quirk_huawei_pcie_sva);
/*
* It's possible for the MSI to get corrupted if SHPC and ACPI are used
@@ -2313,8 +2340,7 @@ static void quirk_tc86c001_ide(struct pci_dev *dev)
if (r->start & 0x8) {
r->flags |= IORESOURCE_UNSET;
- r->start = 0;
- r->end = 0xf;
+ resource_set_range(r, 0, SZ_16);
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA_2,
@@ -2342,8 +2368,7 @@ static void quirk_plx_pci9050(struct pci_dev *dev)
pci_info(dev, "Re-allocating PLX PCI 9050 BAR %u to length 256 to avoid bit 7 bug\n",
bar);
r->flags |= IORESOURCE_UNSET;
- r->start = 0;
- r->end = 0xff;
+ resource_set_range(r, 0, SZ_256);
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
@@ -2469,28 +2494,27 @@ DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
*/
static void quirk_disable_aspm_l0s(struct pci_dev *dev)
{
- pci_info(dev, "Disabling L0s\n");
- pci_disable_link_state(dev, PCIE_LINK_STATE_L0S);
-}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10a7, quirk_disable_aspm_l0s);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10a9, quirk_disable_aspm_l0s);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10b6, quirk_disable_aspm_l0s);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c6, quirk_disable_aspm_l0s);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c7, quirk_disable_aspm_l0s);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c8, quirk_disable_aspm_l0s);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10d6, quirk_disable_aspm_l0s);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10db, quirk_disable_aspm_l0s);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10dd, quirk_disable_aspm_l0s);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10e1, quirk_disable_aspm_l0s);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10ec, quirk_disable_aspm_l0s);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s);
+ pcie_aspm_remove_cap(dev, PCI_EXP_LNKCAP_ASPM_L0S);
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10a7, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10a9, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10b6, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10c6, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10c7, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10c8, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10d6, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10db, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10dd, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e1, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10ec, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s);
static void quirk_disable_aspm_l0s_l1(struct pci_dev *dev)
{
- pci_info(dev, "Disabling ASPM L0s/L1\n");
- pci_disable_link_state(dev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
+ pcie_aspm_remove_cap(dev,
+ PCI_EXP_LNKCAP_ASPM_L0S | PCI_EXP_LNKCAP_ASPM_L1);
}
/*
@@ -2498,7 +2522,10 @@ static void quirk_disable_aspm_l0s_l1(struct pci_dev *dev)
* upstream PCIe root port when ASPM is enabled. At least L0s mode is affected;
* disable both L0s and L1 for now to be safe.
*/
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASMEDIA, 0x1080, quirk_disable_aspm_l0s_l1);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ASMEDIA, 0x1080, quirk_disable_aspm_l0s_l1);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, 0x0451, quirk_disable_aspm_l0s_l1);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PASEMI, 0xa002, quirk_disable_aspm_l0s_l1);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HUAWEI, 0x1105, quirk_disable_aspm_l0s_l1);
/*
* Some Pericom PCIe-to-PCI bridges in reverse mode need the PCIe Retrain
@@ -2692,6 +2719,7 @@ static void quirk_disable_msi(struct pci_dev *dev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0xa238, quirk_disable_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x5a3f, quirk_disable_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_RDC, 0x1031, quirk_disable_msi);
/*
* The APC bridge device in AMD 780 family northbridges has some random
@@ -3073,7 +3101,7 @@ static void __nv_msi_ht_cap_quirk(struct pci_dev *dev, int all)
/*
* HT MSI mapping should be disabled on devices that are below
- * a non-Hypertransport host bridge. Locate the host bridge...
+ * a non-HyperTransport host bridge. Locate the host bridge.
*/
host_bridge = pci_get_domain_bus_and_slot(pci_domain_nr(dev->bus), 0,
PCI_DEVFN(0, 0));
@@ -3495,13 +3523,13 @@ static void quirk_intel_ntb(struct pci_dev *dev)
if (rc)
return;
- dev->resource[2].end = dev->resource[2].start + ((u64) 1 << val) - 1;
+ resource_set_size(&dev->resource[2], (resource_size_t)1 << val);
rc = pci_read_config_byte(dev, 0x00D1, &val);
if (rc)
return;
- dev->resource[4].end = dev->resource[4].start + ((u64) 1 << val) - 1;
+ resource_set_size(&dev->resource[4], (resource_size_t)1 << val);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e08, quirk_intel_ntb);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e0d, quirk_intel_ntb);
@@ -3594,6 +3622,8 @@ DECLARE_PCI_FIXUP_FINAL(0x1814, 0x0601, /* Ralink RT2800 802.11n PCI */
quirk_broken_intx_masking);
DECLARE_PCI_FIXUP_FINAL(0x1b7c, 0x0004, /* Ceton InfiniTV4 */
quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_20K2,
+ quirk_broken_intx_masking);
/*
* Realtek RTL8169 PCI Gigabit Ethernet Controller (rev 10)
@@ -3783,13 +3813,26 @@ DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
PCI_CLASS_DISPLAY_VGA, 8, quirk_no_pm_reset);
/*
+ * Spectrum-{1,2,3,4} devices report that a D3hot->D0 transition causes a reset
+ * (i.e., they advertise NoSoftRst-). However, this transition does not have
+ * any effect on the device: It continues to be operational and network ports
+ * remain up. Advertising this support makes it seem as if a PM reset is viable
+ * for these devices. Mark it as unavailable to skip it when testing reset
+ * methods.
+ */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, 0xcb84, quirk_no_pm_reset);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, 0xcf6c, quirk_no_pm_reset);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, 0xcf70, quirk_no_pm_reset);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, 0xcf80, quirk_no_pm_reset);
+
+/*
* Thunderbolt controllers with broken MSI hotplug signaling:
* Entire 1st generation (Light Ridge, Eagle Ridge, Light Peak) and part
* of the 2nd generation (Cactus Ridge 4C up to revision 1, Port Ridge).
*/
static void quirk_thunderbolt_hotplug_msi(struct pci_dev *pdev)
{
- if (pdev->is_hotplug_bridge &&
+ if (pdev->is_pciehp &&
(pdev->device != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C ||
pdev->revision <= 1))
pdev->no_msi = 1;
@@ -4219,6 +4262,10 @@ static void quirk_dma_func0_alias(struct pci_dev *dev)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe832, quirk_dma_func0_alias);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe476, quirk_dma_func0_alias);
+/* Some Glenfly chips use function 0 as the PCIe Requester ID for DMA */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_GLENFLY, 0x3d40, quirk_dma_func0_alias);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_GLENFLY, 0x3d41, quirk_dma_func0_alias);
+
static void quirk_dma_func1_alias(struct pci_dev *dev)
{
if (PCI_FUNC(dev->devfn) != 1)
@@ -4551,9 +4598,9 @@ static void quirk_disable_root_port_attributes(struct pci_dev *pdev)
pci_info(root_port, "Disabling No Snoop/Relaxed Ordering Attributes to avoid PCIe Completion erratum in %s\n",
dev_name(&pdev->dev));
- pcie_capability_clear_and_set_word(root_port, PCI_EXP_DEVCTL,
- PCI_EXP_DEVCTL_RELAX_EN |
- PCI_EXP_DEVCTL_NOSNOOP_EN, 0);
+ pcie_capability_clear_word(root_port, PCI_EXP_DEVCTL,
+ PCI_EXP_DEVCTL_RELAX_EN |
+ PCI_EXP_DEVCTL_NOSNOOP_EN);
}
/*
@@ -4695,17 +4742,21 @@ static int pci_quirk_xgene_acs(struct pci_dev *dev, u16 acs_flags)
* But the implementation could block peer-to-peer transactions between them
* and provide ACS-like functionality.
*/
-static int pci_quirk_zhaoxin_pcie_ports_acs(struct pci_dev *dev, u16 acs_flags)
+static int pci_quirk_zhaoxin_pcie_ports_acs(struct pci_dev *dev, u16 acs_flags)
{
if (!pci_is_pcie(dev) ||
((pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) &&
(pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM)))
return -ENOTTY;
+ /*
+ * Future Zhaoxin Root Ports and Switch Downstream Ports will
+ * implement ACS capability in accordance with the PCIe Spec.
+ */
switch (dev->device) {
case 0x0710 ... 0x071e:
case 0x0721:
- case 0x0723 ... 0x0732:
+ case 0x0723 ... 0x0752:
return pci_acs_ctrl_enabled(acs_flags,
PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
}
@@ -4945,19 +4996,34 @@ static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags)
PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
}
+static int pci_quirk_loongson_acs(struct pci_dev *dev, u16 acs_flags)
+{
+ /*
+ * Loongson PCIe Root Ports don't advertise an ACS capability, but
+ * they do not allow peer-to-peer transactions between Root Ports.
+ * Allow each Root Port to be in a separate IOMMU group by masking
+ * SV/RR/CR/UF bits.
+ */
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
+}
+
/*
- * Wangxun 10G/1G NICs have no ACS capability, and on multi-function
- * devices, peer-to-peer transactions are not be used between the functions.
- * So add an ACS quirk for below devices to isolate functions.
+ * Wangxun 40G/25G/10G/1G NICs have no ACS capability, but on
+ * multi-function devices, the hardware isolates the functions by
+ * directing all peer-to-peer traffic upstream as though PCI_ACS_RR and
+ * PCI_ACS_CR were set.
* SFxxx 1G NICs(em).
* RP1000/RP2000 10G NICs(sp).
+ * FF5xxx 40G/25G/10G NICs(aml).
*/
static int pci_quirk_wangxun_nic_acs(struct pci_dev *dev, u16 acs_flags)
{
switch (dev->device) {
- case 0x0100 ... 0x010F:
- case 0x1001:
- case 0x2001:
+ case 0x0100 ... 0x010F: /* EM */
+ case 0x1001: case 0x2001: /* SP */
+ case 0x5010: case 0x5025: case 0x5040: /* AML */
+ case 0x5110: case 0x5125: case 0x5140: /* AML */
return pci_acs_ctrl_enabled(acs_flags,
PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
}
@@ -5039,6 +5105,8 @@ static const struct pci_dev_acs_enabled {
/* QCOM QDF2xxx root ports */
{ PCI_VENDOR_ID_QCOM, 0x0400, pci_quirk_qcom_rp_acs },
{ PCI_VENDOR_ID_QCOM, 0x0401, pci_quirk_qcom_rp_acs },
+ /* QCOM SA8775P root port */
+ { PCI_VENDOR_ID_QCOM, 0x0115, pci_quirk_qcom_rp_acs },
/* HXT SD4800 root ports. The ACS design is same as QCOM QDF2xxx */
{ PCI_VENDOR_ID_HXT, 0x0401, pci_quirk_qcom_rp_acs },
/* Intel PCH root ports */
@@ -5068,7 +5136,22 @@ static const struct pci_dev_acs_enabled {
{ PCI_VENDOR_ID_BROADCOM, 0x1750, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_BROADCOM, 0x1751, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_BROADCOM, 0x1752, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_BROADCOM, 0x1760, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_BROADCOM, 0x1761, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_BROADCOM, 0x1762, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_BROADCOM, 0x1763, pci_quirk_mf_endpoint_acs },
{ PCI_VENDOR_ID_BROADCOM, 0xD714, pci_quirk_brcm_acs },
+ /* Loongson PCIe Root Ports */
+ { PCI_VENDOR_ID_LOONGSON, 0x3C09, pci_quirk_loongson_acs },
+ { PCI_VENDOR_ID_LOONGSON, 0x3C19, pci_quirk_loongson_acs },
+ { PCI_VENDOR_ID_LOONGSON, 0x3C29, pci_quirk_loongson_acs },
+ { PCI_VENDOR_ID_LOONGSON, 0x7A09, pci_quirk_loongson_acs },
+ { PCI_VENDOR_ID_LOONGSON, 0x7A19, pci_quirk_loongson_acs },
+ { PCI_VENDOR_ID_LOONGSON, 0x7A29, pci_quirk_loongson_acs },
+ { PCI_VENDOR_ID_LOONGSON, 0x7A39, pci_quirk_loongson_acs },
+ { PCI_VENDOR_ID_LOONGSON, 0x7A49, pci_quirk_loongson_acs },
+ { PCI_VENDOR_ID_LOONGSON, 0x7A59, pci_quirk_loongson_acs },
+ { PCI_VENDOR_ID_LOONGSON, 0x7A69, pci_quirk_loongson_acs },
/* Amazon Annapurna Labs */
{ PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, pci_quirk_al_acs },
/* Zhaoxin multi-function devices */
@@ -5381,7 +5464,7 @@ int pci_dev_specific_disable_acs_redir(struct pci_dev *dev)
*/
static void quirk_intel_qat_vf_cap(struct pci_dev *pdev)
{
- int pos, i = 0;
+ int pos, i = 0, ret;
u8 next_cap;
u16 reg16, *cap;
struct pci_cap_saved_state *state;
@@ -5427,8 +5510,8 @@ static void quirk_intel_qat_vf_cap(struct pci_dev *pdev)
pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
pdev->cfg_size = PCI_CFG_SPACE_EXP_SIZE;
- if (pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, &status) !=
- PCIBIOS_SUCCESSFUL || (status == 0xffffffff))
+ ret = pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, &status);
+ if ((ret != PCIBIOS_SUCCESSFUL) || (PCI_POSSIBLE_ERROR(status)))
pdev->cfg_size = PCI_CFG_SPACE_SIZE;
if (pci_find_saved_cap(pdev, PCI_CAP_ID_EXP))
@@ -5463,7 +5546,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x443, quirk_intel_qat_vf_cap);
* AMD Matisse USB 3.0 Host Controller 0x149c
* Intel 82579LM Gigabit Ethernet Controller 0x1502
* Intel 82579V Gigabit Ethernet Controller 0x1503
- *
+ * Mediatek MT7922 802.11ax PCI Express Wireless Network Adapter
*/
static void quirk_no_flr(struct pci_dev *dev)
{
@@ -5475,6 +5558,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x149c, quirk_no_flr);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x7901, quirk_no_flr);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_no_flr);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_no_flr);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_MEDIATEK, 0x0616, quirk_no_flr);
/* FLR may cause the SolidRun SNET DPU (rev 0x1) to hang */
static void quirk_no_flr_snet(struct pci_dev *dev)
@@ -5496,6 +5580,7 @@ static void quirk_no_ext_tags(struct pci_dev *pdev)
pci_walk_bus(bridge->bus, pci_configure_extended_tags, NULL);
}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_3WARE, 0x1004, quirk_no_ext_tags);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0132, quirk_no_ext_tags);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0140, quirk_no_ext_tags);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0141, quirk_no_ext_tags);
@@ -5505,6 +5590,12 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0420, quirk_no_ext_tags);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags);
#ifdef CONFIG_PCI_ATS
+static void quirk_no_ats(struct pci_dev *pdev)
+{
+ pci_info(pdev, "disabling ATS\n");
+ pdev->ats_cap = 0;
+}
+
/*
* Some devices require additional driver setup to enable ATS. Don't use
* ATS for those devices as ATS will be enabled before the driver has had a
@@ -5518,14 +5609,10 @@ static void quirk_amd_harvest_no_ats(struct pci_dev *pdev)
(pdev->subsystem_device == 0xce19 ||
pdev->subsystem_device == 0xcc10 ||
pdev->subsystem_device == 0xcc08))
- goto no_ats;
- else
- return;
+ quirk_no_ats(pdev);
+ } else {
+ quirk_no_ats(pdev);
}
-
-no_ats:
- pci_info(pdev, "disabling ATS\n");
- pdev->ats_cap = 0;
}
/* AMD Stoney platform GPU */
@@ -5548,6 +5635,25 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7347, quirk_amd_harvest_no_ats);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x734f, quirk_amd_harvest_no_ats);
/* AMD Raven platform iGPU */
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x15d8, quirk_amd_harvest_no_ats);
+
+/*
+ * Intel IPU E2000 revisions before C0 implement incorrect endianness
+ * in ATS Invalidate Request message body. Disable ATS for those devices.
+ */
+static void quirk_intel_e2000_no_ats(struct pci_dev *pdev)
+{
+ if (pdev->revision < 0x20)
+ quirk_no_ats(pdev);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1451, quirk_intel_e2000_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1452, quirk_intel_e2000_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1453, quirk_intel_e2000_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1454, quirk_intel_e2000_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1455, quirk_intel_e2000_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1457, quirk_intel_e2000_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1459, quirk_intel_e2000_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x145a, quirk_intel_e2000_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x145c, quirk_intel_e2000_no_ats);
#endif /* CONFIG_PCI_ATS */
/* Freescale PCIe doesn't support MSI in RC mode */
@@ -5664,7 +5770,7 @@ static void quirk_nvidia_hda(struct pci_dev *gpu)
/* The GPU becomes a multi-function device when the HDA is enabled */
pci_read_config_byte(gpu, PCI_HEADER_TYPE, &hdr_type);
- gpu->multifunction = !!(hdr_type & 0x80);
+ gpu->multifunction = FIELD_GET(PCI_HEADER_TYPE_MFD, hdr_type);
}
DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
PCI_BASE_CLASS_DISPLAY, 16, quirk_nvidia_hda);
@@ -5729,7 +5835,7 @@ int pci_idt_bus_quirk(struct pci_bus *bus, int devfn, u32 *l, int timeout)
/*
* Microsemi Switchtec NTB uses devfn proxy IDs to move TLPs between
* NT endpoints via the internal switch fabric. These IDs replace the
- * originating requestor ID TLPs which access host memory on peer NTB
+ * originating Requester ID TLPs which access host memory on peer NTB
* ports. Therefore, all proxy IDs must be aliased to the NTB device
* to permit access when the IOMMU is turned on.
*/
@@ -5867,6 +5973,53 @@ SWITCHTEC_QUIRK(0x4428); /* PSXA 28XG4 */
SWITCHTEC_QUIRK(0x4552); /* PAXA 52XG4 */
SWITCHTEC_QUIRK(0x4536); /* PAXA 36XG4 */
SWITCHTEC_QUIRK(0x4528); /* PAXA 28XG4 */
+SWITCHTEC_QUIRK(0x5000); /* PFX 100XG5 */
+SWITCHTEC_QUIRK(0x5084); /* PFX 84XG5 */
+SWITCHTEC_QUIRK(0x5068); /* PFX 68XG5 */
+SWITCHTEC_QUIRK(0x5052); /* PFX 52XG5 */
+SWITCHTEC_QUIRK(0x5036); /* PFX 36XG5 */
+SWITCHTEC_QUIRK(0x5028); /* PFX 28XG5 */
+SWITCHTEC_QUIRK(0x5100); /* PSX 100XG5 */
+SWITCHTEC_QUIRK(0x5184); /* PSX 84XG5 */
+SWITCHTEC_QUIRK(0x5168); /* PSX 68XG5 */
+SWITCHTEC_QUIRK(0x5152); /* PSX 52XG5 */
+SWITCHTEC_QUIRK(0x5136); /* PSX 36XG5 */
+SWITCHTEC_QUIRK(0x5128); /* PSX 28XG5 */
+SWITCHTEC_QUIRK(0x5200); /* PAX 100XG5 */
+SWITCHTEC_QUIRK(0x5284); /* PAX 84XG5 */
+SWITCHTEC_QUIRK(0x5268); /* PAX 68XG5 */
+SWITCHTEC_QUIRK(0x5252); /* PAX 52XG5 */
+SWITCHTEC_QUIRK(0x5236); /* PAX 36XG5 */
+SWITCHTEC_QUIRK(0x5228); /* PAX 28XG5 */
+SWITCHTEC_QUIRK(0x5300); /* PFXA 100XG5 */
+SWITCHTEC_QUIRK(0x5384); /* PFXA 84XG5 */
+SWITCHTEC_QUIRK(0x5368); /* PFXA 68XG5 */
+SWITCHTEC_QUIRK(0x5352); /* PFXA 52XG5 */
+SWITCHTEC_QUIRK(0x5336); /* PFXA 36XG5 */
+SWITCHTEC_QUIRK(0x5328); /* PFXA 28XG5 */
+SWITCHTEC_QUIRK(0x5400); /* PSXA 100XG5 */
+SWITCHTEC_QUIRK(0x5484); /* PSXA 84XG5 */
+SWITCHTEC_QUIRK(0x5468); /* PSXA 68XG5 */
+SWITCHTEC_QUIRK(0x5452); /* PSXA 52XG5 */
+SWITCHTEC_QUIRK(0x5436); /* PSXA 36XG5 */
+SWITCHTEC_QUIRK(0x5428); /* PSXA 28XG5 */
+SWITCHTEC_QUIRK(0x5500); /* PAXA 100XG5 */
+SWITCHTEC_QUIRK(0x5584); /* PAXA 84XG5 */
+SWITCHTEC_QUIRK(0x5568); /* PAXA 68XG5 */
+SWITCHTEC_QUIRK(0x5552); /* PAXA 52XG5 */
+SWITCHTEC_QUIRK(0x5536); /* PAXA 36XG5 */
+SWITCHTEC_QUIRK(0x5528); /* PAXA 28XG5 */
+
+#define SWITCHTEC_PCI100X_QUIRK(vid) \
+ DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_EFAR, vid, \
+ PCI_CLASS_BRIDGE_OTHER, 8, quirk_switchtec_ntb_dma_alias)
+SWITCHTEC_PCI100X_QUIRK(0x1001); /* PCI1001XG4 */
+SWITCHTEC_PCI100X_QUIRK(0x1002); /* PCI1002XG4 */
+SWITCHTEC_PCI100X_QUIRK(0x1003); /* PCI1003XG4 */
+SWITCHTEC_PCI100X_QUIRK(0x1004); /* PCI1004XG4 */
+SWITCHTEC_PCI100X_QUIRK(0x1005); /* PCI1005XG4 */
+SWITCHTEC_PCI100X_QUIRK(0x1006); /* PCI1006XG4 */
+
/*
* The PLX NTB uses devfn proxy IDs to move TLPs between NT endpoints.
@@ -6116,9 +6269,10 @@ static void dpc_log_size(struct pci_dev *dev)
if (!(val & PCI_EXP_DPC_CAP_RP_EXT))
return;
- if (!((val & PCI_EXP_DPC_RP_PIO_LOG_SIZE) >> 8)) {
- pci_info(dev, "Overriding RP PIO Log Size to 4\n");
- dev->dpc_rp_log_size = 4;
+ if (FIELD_GET(PCI_EXP_DPC_RP_PIO_LOG_SIZE, val) == 0) {
+ pci_info(dev, "Overriding RP PIO Log Size to %d\n",
+ PCIE_STD_NUM_TLP_HEADERLOG);
+ dev->dpc_rp_log_size = PCIE_STD_NUM_TLP_HEADERLOG;
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x461f, dpc_log_size);
@@ -6137,4 +6291,53 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2b, dpc_log_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2d, dpc_log_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2f, dpc_log_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a31, dpc_log_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa72f, dpc_log_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa73f, dpc_log_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa76e, dpc_log_size);
+#endif
+
+/*
+ * For a PCI device with multiple downstream devices, its driver may use
+ * a flattened device tree to describe the downstream devices.
+ * To overlay the flattened device tree, the PCI device and all its ancestor
+ * devices need to have device tree nodes on system base device tree. Thus,
+ * before driver probing, it might need to add a device tree node as the final
+ * fixup.
+ */
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_XILINX, 0x5020, of_pci_make_dev_node);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_XILINX, 0x5021, of_pci_make_dev_node);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_REDHAT, 0x0005, of_pci_make_dev_node);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_EFAR, 0x9660, of_pci_make_dev_node);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_RPI, PCI_DEVICE_ID_RPI_RP1_C0, of_pci_make_dev_node);
+
+/*
+ * Devices known to require a longer delay before first config space access
+ * after reset recovery or resume from D3cold:
+ *
+ * VideoPropulsion (aka Genroco) Torrent QN16e MPEG QAM Modulator
+ */
+static void pci_fixup_d3cold_delay_1sec(struct pci_dev *pdev)
+{
+ pdev->d3cold_delay = 1000;
+}
+DECLARE_PCI_FIXUP_FINAL(0x5555, 0x0004, pci_fixup_d3cold_delay_1sec);
+
+#ifdef CONFIG_PCIEAER
+static void pci_mask_replay_timer_timeout(struct pci_dev *pdev)
+{
+ struct pci_dev *parent = pci_upstream_bridge(pdev);
+ u32 val;
+
+ if (!parent || !parent->aer_cap)
+ return;
+
+ pci_info(parent, "mask Replay Timer Timeout Correctable Errors due to %s hardware defect",
+ pci_name(pdev));
+
+ pci_read_config_dword(parent, parent->aer_cap + PCI_ERR_COR_MASK, &val);
+ val |= PCI_ERR_COR_REP_TIMER;
+ pci_write_config_dword(parent, parent->aer_cap + PCI_ERR_COR_MASK, val);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_GLI, 0x9750, pci_mask_replay_timer_timeout);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_GLI, 0x9755, pci_mask_replay_timer_timeout);
#endif
diff --git a/drivers/pci/rebar.c b/drivers/pci/rebar.c
new file mode 100644
index 000000000000..ecdebdeb2dff
--- /dev/null
+++ b/drivers/pci/rebar.c
@@ -0,0 +1,328 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCI Resizable BAR Extended Capability handling.
+ */
+
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/ioport.h>
+#include <linux/log2.h>
+#include <linux/pci.h>
+#include <linux/sizes.h>
+#include <linux/types.h>
+
+#include "pci.h"
+
+#define PCI_REBAR_MIN_SIZE ((resource_size_t)SZ_1M)
+
+/**
+ * pci_rebar_bytes_to_size - Convert size in bytes to PCI BAR Size
+ * @bytes: size in bytes
+ *
+ * Convert size in bytes to encoded BAR Size in Resizable BAR Capability
+ * (PCIe r6.2, sec. 7.8.6.3).
+ *
+ * Return: encoded BAR Size as defined in the PCIe spec (0=1MB, 31=128TB)
+ */
+int pci_rebar_bytes_to_size(u64 bytes)
+{
+ int rebar_minsize = ilog2(PCI_REBAR_MIN_SIZE);
+
+ bytes = roundup_pow_of_two(bytes);
+
+ return max(ilog2(bytes), rebar_minsize) - rebar_minsize;
+}
+EXPORT_SYMBOL_GPL(pci_rebar_bytes_to_size);
+
+/**
+ * pci_rebar_size_to_bytes - Convert encoded BAR Size to size in bytes
+ * @size: encoded BAR Size as defined in the PCIe spec (0=1MB, 31=128TB)
+ *
+ * Return: BAR size in bytes
+ */
+resource_size_t pci_rebar_size_to_bytes(int size)
+{
+ return 1ULL << (size + ilog2(PCI_REBAR_MIN_SIZE));
+}
+EXPORT_SYMBOL_GPL(pci_rebar_size_to_bytes);
+
+void pci_rebar_init(struct pci_dev *pdev)
+{
+ pdev->rebar_cap = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
+}
+
+/**
+ * pci_rebar_find_pos - find position of resize control reg for BAR
+ * @pdev: PCI device
+ * @bar: BAR to find
+ *
+ * Helper to find the position of the control register for a BAR.
+ *
+ * Return:
+ * * %-ENOTSUPP if resizable BARs are not supported at all,
+ * * %-ENOENT if no control register for the BAR could be found.
+ */
+static int pci_rebar_find_pos(struct pci_dev *pdev, int bar)
+{
+ unsigned int pos, nbars, i;
+ u32 ctrl;
+
+ if (pci_resource_is_iov(bar)) {
+ pos = pci_iov_vf_rebar_cap(pdev);
+ bar = pci_resource_num_to_vf_bar(bar);
+ } else {
+ pos = pdev->rebar_cap;
+ }
+
+ if (!pos)
+ return -ENOTSUPP;
+
+ pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
+ nbars = FIELD_GET(PCI_REBAR_CTRL_NBAR_MASK, ctrl);
+
+ for (i = 0; i < nbars; i++, pos += 8) {
+ int bar_idx;
+
+ pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
+ bar_idx = FIELD_GET(PCI_REBAR_CTRL_BAR_IDX, ctrl);
+ if (bar_idx == bar)
+ return pos;
+ }
+
+ return -ENOENT;
+}
+
+/**
+ * pci_rebar_get_possible_sizes - get possible sizes for Resizable BAR
+ * @pdev: PCI device
+ * @bar: BAR to query
+ *
+ * Get the possible sizes of a resizable BAR as bitmask.
+ *
+ * Return: A bitmask of possible sizes (bit 0=1MB, bit 31=128TB), or %0 if
+ * BAR isn't resizable.
+ */
+u64 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
+{
+ int pos;
+ u32 cap;
+
+ pos = pci_rebar_find_pos(pdev, bar);
+ if (pos < 0)
+ return 0;
+
+ pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap);
+ cap = FIELD_GET(PCI_REBAR_CAP_SIZES, cap);
+
+ /* Sapphire RX 5600 XT Pulse has an invalid cap dword for BAR 0 */
+ if (pdev->vendor == PCI_VENDOR_ID_ATI && pdev->device == 0x731f &&
+ bar == 0 && cap == 0x700)
+ return 0x3f00;
+
+ return cap;
+}
+EXPORT_SYMBOL(pci_rebar_get_possible_sizes);
+
+/**
+ * pci_rebar_size_supported - check if size is supported for BAR
+ * @pdev: PCI device
+ * @bar: BAR to check
+ * @size: encoded size as defined in the PCIe spec (0=1MB, 31=128TB)
+ *
+ * Return: %true if @bar is resizable and @size is supported, otherwise
+ * %false.
+ */
+bool pci_rebar_size_supported(struct pci_dev *pdev, int bar, int size)
+{
+ u64 sizes = pci_rebar_get_possible_sizes(pdev, bar);
+
+ if (size < 0 || size > ilog2(SZ_128T) - ilog2(PCI_REBAR_MIN_SIZE))
+ return false;
+
+ return BIT(size) & sizes;
+}
+EXPORT_SYMBOL_GPL(pci_rebar_size_supported);
+
+/**
+ * pci_rebar_get_max_size - get the maximum supported size of a BAR
+ * @pdev: PCI device
+ * @bar: BAR to query
+ *
+ * Get the largest supported size of a resizable BAR as a size.
+ *
+ * Return: the encoded maximum BAR size as defined in the PCIe spec
+ * (0=1MB, 31=128TB), or %-NOENT on error.
+ */
+int pci_rebar_get_max_size(struct pci_dev *pdev, int bar)
+{
+ u64 sizes;
+
+ sizes = pci_rebar_get_possible_sizes(pdev, bar);
+ if (!sizes)
+ return -ENOENT;
+
+ return __fls(sizes);
+}
+EXPORT_SYMBOL_GPL(pci_rebar_get_max_size);
+
+/**
+ * pci_rebar_get_current_size - get the current size of a Resizable BAR
+ * @pdev: PCI device
+ * @bar: BAR to get the size from
+ *
+ * Read the current size of a BAR from the Resizable BAR config.
+ *
+ * Return: BAR Size if @bar is resizable (0=1MB, 31=128TB), or negative on
+ * error.
+ */
+int pci_rebar_get_current_size(struct pci_dev *pdev, int bar)
+{
+ int pos;
+ u32 ctrl;
+
+ pos = pci_rebar_find_pos(pdev, bar);
+ if (pos < 0)
+ return pos;
+
+ pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
+ return FIELD_GET(PCI_REBAR_CTRL_BAR_SIZE, ctrl);
+}
+
+/**
+ * pci_rebar_set_size - set a new size for a Resizable BAR
+ * @pdev: PCI device
+ * @bar: BAR to set size to
+ * @size: new size as defined in the PCIe spec (0=1MB, 31=128TB)
+ *
+ * Set the new size of a BAR as defined in the spec.
+ *
+ * Return: %0 if resizing was successful, or negative on error.
+ */
+int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size)
+{
+ int pos;
+ u32 ctrl;
+
+ pos = pci_rebar_find_pos(pdev, bar);
+ if (pos < 0)
+ return pos;
+
+ pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
+ ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
+ ctrl |= FIELD_PREP(PCI_REBAR_CTRL_BAR_SIZE, size);
+ pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
+
+ if (pci_resource_is_iov(bar))
+ pci_iov_resource_set_size(pdev, bar, size);
+
+ return 0;
+}
+
+void pci_restore_rebar_state(struct pci_dev *pdev)
+{
+ unsigned int pos, nbars, i;
+ u32 ctrl;
+
+ pos = pdev->rebar_cap;
+ if (!pos)
+ return;
+
+ pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
+ nbars = FIELD_GET(PCI_REBAR_CTRL_NBAR_MASK, ctrl);
+
+ for (i = 0; i < nbars; i++, pos += 8) {
+ struct resource *res;
+ int bar_idx, size;
+
+ pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
+ bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
+ res = pci_resource_n(pdev, bar_idx);
+ size = pci_rebar_bytes_to_size(resource_size(res));
+ ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
+ ctrl |= FIELD_PREP(PCI_REBAR_CTRL_BAR_SIZE, size);
+ pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
+ }
+}
+
+static bool pci_resize_is_memory_decoding_enabled(struct pci_dev *dev,
+ int resno)
+{
+ u16 cmd;
+
+ if (pci_resource_is_iov(resno))
+ return pci_iov_is_memory_decoding_enabled(dev);
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+
+ return cmd & PCI_COMMAND_MEMORY;
+}
+
+void pci_resize_resource_set_size(struct pci_dev *dev, int resno, int size)
+{
+ resource_size_t res_size = pci_rebar_size_to_bytes(size);
+ struct resource *res = pci_resource_n(dev, resno);
+
+ if (pci_resource_is_iov(resno))
+ res_size *= pci_sriov_get_totalvfs(dev);
+
+ resource_set_size(res, res_size);
+}
+
+/**
+ * pci_resize_resource - reconfigure a Resizable BAR and resources
+ * @dev: the PCI device
+ * @resno: index of the BAR to be resized
+ * @size: new size as defined in the spec (0=1MB, 31=128TB)
+ * @exclude_bars: a mask of BARs that should not be released
+ *
+ * Reconfigure @resno to @size and re-run resource assignment algorithm
+ * with the new size.
+ *
+ * Prior to resize, release @dev resources that share a bridge window with
+ * @resno. This unpins the bridge window resource to allow changing it.
+ *
+ * The caller may prevent releasing a particular BAR by providing
+ * @exclude_bars mask, but this may result in the resize operation failing
+ * due to insufficient space.
+ *
+ * Return: 0 on success, or negative on error. In case of an error, the
+ * resources are restored to their original places.
+ */
+int pci_resize_resource(struct pci_dev *dev, int resno, int size,
+ int exclude_bars)
+{
+ struct pci_host_bridge *host;
+ int old, ret;
+
+ /* Check if we must preserve the firmware's resource assignment */
+ host = pci_find_host_bridge(dev->bus);
+ if (host->preserve_config)
+ return -ENOTSUPP;
+
+ if (pci_resize_is_memory_decoding_enabled(dev, resno))
+ return -EBUSY;
+
+ if (!pci_rebar_size_supported(dev, resno, size))
+ return -EINVAL;
+
+ old = pci_rebar_get_current_size(dev, resno);
+ if (old < 0)
+ return old;
+
+ ret = pci_rebar_set_size(dev, resno, size);
+ if (ret)
+ return ret;
+
+ ret = pci_do_resource_release_and_resize(dev, resno, size, exclude_bars);
+ if (ret)
+ goto error_resize;
+ return 0;
+
+error_resize:
+ pci_rebar_set_size(dev, resno, old);
+ return ret;
+}
+EXPORT_SYMBOL(pci_resize_resource);
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index d68aee29386b..417a9ea59117 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -1,6 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/pci.h>
#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
#include "pci.h"
static void pci_free_resources(struct pci_dev *dev)
@@ -13,25 +17,52 @@ static void pci_free_resources(struct pci_dev *dev)
}
}
+static void pci_pwrctrl_unregister(struct device *dev)
+{
+ struct device_node *np;
+ struct platform_device *pdev;
+
+ np = dev_of_node(dev);
+ if (!np)
+ return;
+
+ pdev = of_find_device_by_node(np);
+ if (!pdev)
+ return;
+
+ of_device_unregister(pdev);
+ put_device(&pdev->dev);
+
+ of_node_clear_flag(np, OF_POPULATED);
+}
+
static void pci_stop_dev(struct pci_dev *dev)
{
pci_pme_active(dev, false);
- if (pci_dev_is_added(dev)) {
-
- device_release_driver(&dev->dev);
- pci_proc_detach_device(dev);
- pci_remove_sysfs_dev_files(dev);
+ if (!pci_dev_test_and_clear_added(dev))
+ return;
- pci_dev_assign_added(dev, false);
- }
+ device_release_driver(&dev->dev);
+ pci_proc_detach_device(dev);
+ pci_remove_sysfs_dev_files(dev);
+ of_pci_remove_node(dev);
}
static void pci_destroy_dev(struct pci_dev *dev)
{
- if (!dev->dev.kobj.parent)
+ if (pci_dev_test_and_set_removed(dev))
return;
+ pci_doe_sysfs_teardown(dev);
+ pci_npem_remove(dev);
+
+ /*
+ * While device is in D0 drop the device from TSM link operations
+ * including unbind and disconnect (IDE + SPDM teardown).
+ */
+ pci_tsm_destroy(dev);
+
device_del(&dev->dev);
down_write(&pci_bus_sem);
@@ -39,8 +70,10 @@ static void pci_destroy_dev(struct pci_dev *dev)
up_write(&pci_bus_sem);
pci_doe_destroy(dev);
+ pci_ide_destroy(dev);
pcie_aspm_exit_link_state(dev);
pci_bridge_d3_update(dev);
+ pci_pwrctrl_unregister(&dev->dev);
pci_free_resources(dev);
put_device(&dev->dev);
}
@@ -114,6 +147,7 @@ static void pci_remove_bus_device(struct pci_dev *dev)
*/
void pci_stop_and_remove_bus_device(struct pci_dev *dev)
{
+ lockdep_assert_held(&pci_rescan_remove_lock);
pci_stop_bus_device(dev);
pci_remove_bus_device(dev);
}
@@ -140,6 +174,8 @@ void pci_stop_root_bus(struct pci_bus *bus)
&bus->devices, bus_list)
pci_stop_bus_device(child);
+ of_pci_remove_host_bridge_node(host_bridge);
+
/* stop the host bridge */
device_release_driver(&host_bridge->dev);
}
@@ -161,7 +197,7 @@ void pci_remove_root_bus(struct pci_bus *bus)
#ifdef CONFIG_PCI_DOMAINS_GENERIC
/* Release domain_nr if it was dynamically allocated */
if (host_bridge->domain_nr == PCI_DOMAIN_NR_NOT_SET)
- pci_bus_release_domain_nr(bus, host_bridge->dev.parent);
+ pci_bus_release_domain_nr(host_bridge->dev.parent, bus->domain_nr);
#endif
pci_remove_bus(bus);
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index b4c138a6ec02..e6e84dc62e82 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -282,6 +282,45 @@ static struct pci_dev *pci_get_dev_by_id(const struct pci_device_id *id,
return pdev;
}
+static struct pci_dev *pci_get_dev_by_id_reverse(const struct pci_device_id *id,
+ struct pci_dev *from)
+{
+ struct device *dev;
+ struct device *dev_start = NULL;
+ struct pci_dev *pdev = NULL;
+
+ if (from)
+ dev_start = &from->dev;
+ dev = bus_find_device_reverse(&pci_bus_type, dev_start, (void *)id,
+ match_pci_dev_by_id);
+ if (dev)
+ pdev = to_pci_dev(dev);
+ pci_dev_put(from);
+ return pdev;
+}
+
+enum pci_search_direction {
+ PCI_SEARCH_FORWARD,
+ PCI_SEARCH_REVERSE,
+};
+
+static struct pci_dev *__pci_get_subsys(unsigned int vendor, unsigned int device,
+ unsigned int ss_vendor, unsigned int ss_device,
+ struct pci_dev *from, enum pci_search_direction dir)
+{
+ struct pci_device_id id = {
+ .vendor = vendor,
+ .device = device,
+ .subvendor = ss_vendor,
+ .subdevice = ss_device,
+ };
+
+ if (dir == PCI_SEARCH_FORWARD)
+ return pci_get_dev_by_id(&id, from);
+ else
+ return pci_get_dev_by_id_reverse(&id, from);
+}
+
/**
* pci_get_subsys - begin or continue searching for a PCI device by vendor/subvendor/device/subdevice id
* @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids
@@ -302,14 +341,8 @@ struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device,
unsigned int ss_vendor, unsigned int ss_device,
struct pci_dev *from)
{
- struct pci_device_id id = {
- .vendor = vendor,
- .device = device,
- .subvendor = ss_vendor,
- .subdevice = ss_device,
- };
-
- return pci_get_dev_by_id(&id, from);
+ return __pci_get_subsys(vendor, device, ss_vendor, ss_device, from,
+ PCI_SEARCH_FORWARD);
}
EXPORT_SYMBOL(pci_get_subsys);
@@ -334,6 +367,19 @@ struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device,
}
EXPORT_SYMBOL(pci_get_device);
+/*
+ * Same semantics as pci_get_device(), except walks the PCI device list
+ * in reverse discovery order.
+ */
+struct pci_dev *pci_get_device_reverse(unsigned int vendor,
+ unsigned int device,
+ struct pci_dev *from)
+{
+ return __pci_get_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from,
+ PCI_SEARCH_REVERSE);
+}
+EXPORT_SYMBOL(pci_get_device_reverse);
+
/**
* pci_get_class - begin or continue searching for a PCI device by class
* @class: search for a PCI device with this class designation
@@ -364,6 +410,37 @@ struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from)
EXPORT_SYMBOL(pci_get_class);
/**
+ * pci_get_base_class - searching for a PCI device by matching against the base class code only
+ * @class: search for a PCI device with this base class code
+ * @from: Previous PCI device found in search, or %NULL for new search.
+ *
+ * Iterates through the list of known PCI devices. If a PCI device is found
+ * with a matching base class code, the reference count to the device is
+ * incremented. See pci_match_one_device() to figure out how does this works.
+ * A new search is initiated by passing %NULL as the @from argument.
+ * Otherwise if @from is not %NULL, searches continue from next device on the
+ * global list. The reference count for @from is always decremented if it is
+ * not %NULL.
+ *
+ * Returns:
+ * A pointer to a matched PCI device, %NULL Otherwise.
+ */
+struct pci_dev *pci_get_base_class(unsigned int class, struct pci_dev *from)
+{
+ struct pci_device_id id = {
+ .vendor = PCI_ANY_ID,
+ .device = PCI_ANY_ID,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .class_mask = 0xFF0000,
+ .class = class << 16,
+ };
+
+ return pci_get_dev_by_id(&id, from);
+}
+EXPORT_SYMBOL(pci_get_base_class);
+
+/**
* pci_dev_present - Returns 1 if device matching the device list is present, 0 if not.
* @ids: A pointer to a null terminated list of struct pci_device_id structures
* that describe the type of PCI device the caller is trying to find.
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index fdeb121e9175..6e90f46f52af 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -14,6 +14,8 @@
* tighter packing. Prefetchable range support.
*/
+#include <linux/bitops.h>
+#include <linux/bug.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -21,10 +23,16 @@
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/cache.h>
+#include <linux/limits.h>
+#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/acpi.h>
#include "pci.h"
+#define PCI_RES_TYPE_MASK \
+ (IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH |\
+ IORESOURCE_MEM_64)
+
unsigned int pci_flags;
EXPORT_SYMBOL_GPL(pci_flags);
@@ -124,27 +132,216 @@ static resource_size_t get_res_add_align(struct list_head *head,
return dev_res ? dev_res->min_align : 0;
}
+static void restore_dev_resource(struct pci_dev_resource *dev_res)
+{
+ struct resource *res = dev_res->res;
+
+ if (WARN_ON_ONCE(res->parent))
+ return;
+
+ res->start = dev_res->start;
+ res->end = dev_res->end;
+ res->flags = dev_res->flags;
+}
+
+/*
+ * Helper function for sizing routines. Assigned resources have non-NULL
+ * parent resource.
+ *
+ * Return first unassigned resource of the correct type. If there is none,
+ * return first assigned resource of the correct type. If none of the
+ * above, return NULL.
+ *
+ * Returning an assigned resource of the correct type allows the caller to
+ * distinguish between already assigned and no resource of the correct type.
+ */
+static struct resource *find_bus_resource_of_type(struct pci_bus *bus,
+ unsigned long type_mask,
+ unsigned long type)
+{
+ struct resource *r, *r_assigned = NULL;
+
+ pci_bus_for_each_resource(bus, r) {
+ if (!r || r == &ioport_resource || r == &iomem_resource)
+ continue;
+
+ if ((r->flags & type_mask) != type)
+ continue;
+
+ if (!r->parent)
+ return r;
+ if (!r_assigned)
+ r_assigned = r;
+ }
+ return r_assigned;
+}
+
+/**
+ * pbus_select_window_for_type - Select bridge window for a resource type
+ * @bus: PCI bus
+ * @type: Resource type (resource flags can be passed as is)
+ *
+ * Select the bridge window based on a resource @type.
+ *
+ * For memory resources, the selection is done as follows:
+ *
+ * Any non-prefetchable resource is put into the non-prefetchable window.
+ *
+ * If there is no prefetchable MMIO window, put all memory resources into the
+ * non-prefetchable window.
+ *
+ * If there's a 64-bit prefetchable MMIO window, put all 64-bit prefetchable
+ * resources into it and place 32-bit prefetchable memory into the
+ * non-prefetchable window.
+ *
+ * Otherwise, put all prefetchable resources into the prefetchable window.
+ *
+ * Return: the bridge window resource or NULL if no bridge window is found.
+ */
+static struct resource *pbus_select_window_for_type(struct pci_bus *bus,
+ unsigned long type)
+{
+ int iores_type = type & IORESOURCE_TYPE_BITS; /* w/o 64bit & pref */
+ struct resource *mmio, *mmio_pref, *win;
+
+ type &= PCI_RES_TYPE_MASK; /* with 64bit & pref */
+
+ if ((iores_type != IORESOURCE_IO) && (iores_type != IORESOURCE_MEM))
+ return NULL;
+
+ if (pci_is_root_bus(bus)) {
+ win = find_bus_resource_of_type(bus, type, type);
+ if (win)
+ return win;
+
+ type &= ~IORESOURCE_MEM_64;
+ win = find_bus_resource_of_type(bus, type, type);
+ if (win)
+ return win;
+
+ type &= ~IORESOURCE_PREFETCH;
+ return find_bus_resource_of_type(bus, type, type);
+ }
+
+ switch (iores_type) {
+ case IORESOURCE_IO:
+ return pci_bus_resource_n(bus, PCI_BUS_BRIDGE_IO_WINDOW);
+
+ case IORESOURCE_MEM:
+ mmio = pci_bus_resource_n(bus, PCI_BUS_BRIDGE_MEM_WINDOW);
+ mmio_pref = pci_bus_resource_n(bus, PCI_BUS_BRIDGE_PREF_MEM_WINDOW);
+
+ if (!(type & IORESOURCE_PREFETCH) ||
+ !(mmio_pref->flags & IORESOURCE_MEM))
+ return mmio;
+
+ if ((type & IORESOURCE_MEM_64) ||
+ !(mmio_pref->flags & IORESOURCE_MEM_64))
+ return mmio_pref;
+
+ return mmio;
+ default:
+ return NULL;
+ }
+}
+
+/**
+ * pbus_select_window - Select bridge window for a resource
+ * @bus: PCI bus
+ * @res: Resource
+ *
+ * Select the bridge window for @res. If the resource is already assigned,
+ * return the current bridge window.
+ *
+ * For memory resources, the selection is done as follows:
+ *
+ * Any non-prefetchable resource is put into the non-prefetchable window.
+ *
+ * If there is no prefetchable MMIO window, put all memory resources into the
+ * non-prefetchable window.
+ *
+ * If there's a 64-bit prefetchable MMIO window, put all 64-bit prefetchable
+ * resources into it and place 32-bit prefetchable memory into the
+ * non-prefetchable window.
+ *
+ * Otherwise, put all prefetchable resources into the prefetchable window.
+ *
+ * Return: the bridge window resource or NULL if no bridge window is found.
+ */
+struct resource *pbus_select_window(struct pci_bus *bus,
+ const struct resource *res)
+{
+ if (res->parent)
+ return res->parent;
+
+ return pbus_select_window_for_type(bus, res->flags);
+}
+
+static bool pdev_resources_assignable(struct pci_dev *dev)
+{
+ u16 class = dev->class >> 8, command;
+
+ /* Don't touch classless devices or host bridges or IOAPICs */
+ if (class == PCI_CLASS_NOT_DEFINED || class == PCI_CLASS_BRIDGE_HOST)
+ return false;
+
+ /* Don't touch IOAPIC devices already enabled by firmware */
+ if (class == PCI_CLASS_SYSTEM_PIC) {
+ pci_read_config_word(dev, PCI_COMMAND, &command);
+ if (command & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY))
+ return false;
+ }
+
+ return true;
+}
+
+static bool pdev_resource_assignable(struct pci_dev *dev, struct resource *res)
+{
+ int idx = pci_resource_num(dev, res);
+
+ if (!res->flags)
+ return false;
+
+ if (idx >= PCI_BRIDGE_RESOURCES && idx <= PCI_BRIDGE_RESOURCE_END &&
+ res->flags & IORESOURCE_DISABLED)
+ return false;
+
+ return true;
+}
+
+static bool pdev_resource_should_fit(struct pci_dev *dev, struct resource *res)
+{
+ if (res->parent)
+ return false;
+
+ if (res->flags & IORESOURCE_PCI_FIXED)
+ return false;
+
+ return pdev_resource_assignable(dev, res);
+}
+
/* Sort resources by alignment */
static void pdev_sort_resources(struct pci_dev *dev, struct list_head *head)
{
struct resource *r;
int i;
+ if (!pdev_resources_assignable(dev))
+ return;
+
pci_dev_for_each_resource(dev, r, i) {
+ const char *r_name = pci_resource_name(dev, i);
struct pci_dev_resource *dev_res, *tmp;
resource_size_t r_align;
struct list_head *n;
- if (r->flags & IORESOURCE_PCI_FIXED)
- continue;
-
- if (!(r->flags) || r->parent)
+ if (!pdev_resource_should_fit(dev, r))
continue;
r_align = pci_resource_alignment(dev, r);
if (!r_align) {
- pci_warn(dev, "BAR %d: %pR has bogus alignment\n",
- i, r);
+ pci_warn(dev, "%s %pR: alignment must not be zero\n",
+ r_name, r);
continue;
}
@@ -153,6 +350,9 @@ static void pdev_sort_resources(struct pci_dev *dev, struct list_head *head)
panic("%s: kzalloc() failed!\n", __func__);
tmp->res = r;
tmp->dev = dev;
+ tmp->start = r->start;
+ tmp->end = r->end;
+ tmp->flags = r->flags;
/* Fallback is smallest one or list is empty */
n = head;
@@ -172,27 +372,27 @@ static void pdev_sort_resources(struct pci_dev *dev, struct list_head *head)
}
}
-static void __dev_sort_resources(struct pci_dev *dev, struct list_head *head)
+bool pci_resource_is_optional(const struct pci_dev *dev, int resno)
{
- u16 class = dev->class >> 8;
+ const struct resource *res = pci_resource_n(dev, resno);
- /* Don't touch classless devices or host bridges or IOAPICs */
- if (class == PCI_CLASS_NOT_DEFINED || class == PCI_CLASS_BRIDGE_HOST)
- return;
+ if (pci_resource_is_iov(resno))
+ return true;
+ if (resno == PCI_ROM_RESOURCE && !(res->flags & IORESOURCE_ROM_ENABLE))
+ return true;
- /* Don't touch IOAPIC devices already enabled by firmware */
- if (class == PCI_CLASS_SYSTEM_PIC) {
- u16 command;
- pci_read_config_word(dev, PCI_COMMAND, &command);
- if (command & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY))
- return;
- }
-
- pdev_sort_resources(dev, head);
+ return false;
}
-static inline void reset_resource(struct resource *res)
+static inline void reset_resource(struct pci_dev *dev, struct resource *res)
{
+ int idx = pci_resource_num(dev, res);
+
+ if (idx >= PCI_BRIDGE_RESOURCES && idx <= PCI_BRIDGE_RESOURCE_END) {
+ res->flags |= IORESOURCE_UNSET;
+ return;
+ }
+
res->start = 0;
res->end = 0;
res->flags = 0;
@@ -212,9 +412,11 @@ static inline void reset_resource(struct resource *res)
static void reassign_resources_sorted(struct list_head *realloc_head,
struct list_head *head)
{
- struct resource *res;
struct pci_dev_resource *add_res, *tmp;
struct pci_dev_resource *dev_res;
+ struct pci_dev *dev;
+ struct resource *res;
+ const char *res_name;
resource_size_t add_size, align;
int idx;
@@ -222,8 +424,15 @@ static void reassign_resources_sorted(struct list_head *realloc_head,
bool found_match = false;
res = add_res->res;
- /* Skip resource that has been reset */
- if (!res->flags)
+ dev = add_res->dev;
+ idx = pci_resource_num(dev, res);
+
+ /*
+ * Skip resource that failed the earlier assignment and is
+ * not optional as it would just fail again.
+ */
+ if (!res->parent && resource_size(res) &&
+ !pci_resource_is_optional(dev, idx))
goto out;
/* Skip this resource if not found in head list */
@@ -236,22 +445,24 @@ static void reassign_resources_sorted(struct list_head *realloc_head,
if (!found_match) /* Just skip */
continue;
- idx = res - &add_res->dev->resource[0];
+ res_name = pci_resource_name(dev, idx);
add_size = add_res->add_size;
align = add_res->min_align;
- if (!resource_size(res)) {
- res->start = align;
- res->end = res->start + add_size - 1;
- if (pci_assign_resource(add_res->dev, idx))
- reset_resource(res);
- } else {
+ if (!res->parent) {
+ resource_set_range(res, align,
+ resource_size(res) + add_size);
+ if (pci_assign_resource(dev, idx)) {
+ pci_dbg(dev,
+ "%s %pR: ignoring failure in optional allocation\n",
+ res_name, res);
+ }
+ } else if (add_size > 0) {
res->flags |= add_res->flags &
(IORESOURCE_STARTALIGN|IORESOURCE_SIZEALIGN);
- if (pci_reassign_resource(add_res->dev, idx,
- add_size, align))
- pci_info(add_res->dev, "failed to add %llx res[%d]=%pR\n",
- (unsigned long long) add_size, idx,
- res);
+ if (pci_reassign_resource(dev, idx, add_size, align))
+ pci_info(dev, "%s %pR: failed to add optional %llx\n",
+ res_name, res,
+ (unsigned long long) add_size);
}
out:
list_del(&add_res->list);
@@ -265,36 +476,39 @@ out:
* @head: Head of the list tracking requests for resources
* @fail_head: Head of the list tracking requests that could not be
* allocated
+ * @optional: Assign also optional resources
*
* Satisfy resource requests of each element in the list. Add requests that
* could not be satisfied to the failed_list.
*/
static void assign_requested_resources_sorted(struct list_head *head,
- struct list_head *fail_head)
+ struct list_head *fail_head,
+ bool optional)
{
- struct resource *res;
struct pci_dev_resource *dev_res;
+ struct resource *res;
+ struct pci_dev *dev;
+ bool optional_res;
int idx;
list_for_each_entry(dev_res, head, list) {
res = dev_res->res;
- idx = res - &dev_res->dev->resource[0];
- if (resource_size(res) &&
- pci_assign_resource(dev_res->dev, idx)) {
+ dev = dev_res->dev;
+ idx = pci_resource_num(dev, res);
+ optional_res = pci_resource_is_optional(dev, idx);
+
+ if (!resource_size(res))
+ continue;
+
+ if (!optional && optional_res)
+ continue;
+
+ if (pci_assign_resource(dev, idx)) {
if (fail_head) {
- /*
- * If the failed resource is a ROM BAR and
- * it will be enabled later, don't add it
- * to the list.
- */
- if (!((idx == PCI_ROM_RESOURCE) &&
- (!(res->flags & IORESOURCE_ROM_ENABLE))))
- add_to_list(fail_head,
- dev_res->dev, res,
- 0 /* don't care */,
- 0 /* don't care */);
+ add_to_list(fail_head, dev, res,
+ 0 /* don't care */,
+ 0 /* don't care */);
}
- reset_resource(res);
}
}
}
@@ -339,6 +553,26 @@ static bool pci_need_to_release(unsigned long mask, struct resource *res)
return false; /* Should not get here */
}
+/* Return: @true if assignment of a required resource failed. */
+static bool pci_required_resource_failed(struct list_head *fail_head,
+ unsigned long type)
+{
+ struct pci_dev_resource *fail_res;
+
+ type &= PCI_RES_TYPE_MASK;
+
+ list_for_each_entry(fail_res, fail_head, list) {
+ int idx = pci_resource_num(fail_res->dev, fail_res->res);
+
+ if (type && (fail_res->flags & PCI_RES_TYPE_MASK) != type)
+ continue;
+
+ if (!pci_resource_is_optional(fail_res->dev, idx))
+ return true;
+ }
+ return false;
+}
+
static void __assign_resources_sorted(struct list_head *head,
struct list_head *realloc_head,
struct list_head *fail_head)
@@ -348,9 +582,11 @@ static void __assign_resources_sorted(struct list_head *head,
* adjacent, so later reassign can not reallocate them one by one in
* parent resource window.
*
- * Try to assign requested + add_size at beginning. If could do that,
- * could get out early. If could not do that, we still try to assign
- * requested at first, then try to reassign add_size for some resources.
+ * Try to assign required and any optional resources at beginning
+ * (add_size included). If all required resources were successfully
+ * assigned, get out early. If could not do that, we still try to
+ * assign required at first, then try to reassign some optional
+ * resources.
*
* Separate three resource type checking if we need to release
* assigned resource after requested + add_size try.
@@ -366,27 +602,34 @@ static void __assign_resources_sorted(struct list_head *head,
*/
LIST_HEAD(save_head);
LIST_HEAD(local_fail_head);
+ LIST_HEAD(dummy_head);
struct pci_dev_resource *save_res;
struct pci_dev_resource *dev_res, *tmp_res, *dev_res2;
+ struct resource *res;
+ struct pci_dev *dev;
unsigned long fail_type;
resource_size_t add_align, align;
+ if (!realloc_head)
+ realloc_head = &dummy_head;
+
/* Check if optional add_size is there */
- if (!realloc_head || list_empty(realloc_head))
- goto requested_and_reassign;
+ if (list_empty(realloc_head))
+ goto assign;
/* Save original start, end, flags etc at first */
list_for_each_entry(dev_res, head, list) {
if (add_to_list(&save_head, dev_res->dev, dev_res->res, 0, 0)) {
free_list(&save_head);
- goto requested_and_reassign;
+ goto assign;
}
}
/* Update res in head list with add_size in realloc_head list */
list_for_each_entry_safe(dev_res, tmp_res, head, list) {
- dev_res->res->end += get_res_add_size(realloc_head,
- dev_res->res);
+ res = dev_res->res;
+
+ res->end += get_res_add_size(realloc_head, res);
/*
* There are two kinds of additional resources in the list:
@@ -394,10 +637,10 @@ static void __assign_resources_sorted(struct list_head *head,
* 2. SR-IOV resource -- IORESOURCE_SIZEALIGN
* Here just fix the additional alignment for bridge
*/
- if (!(dev_res->res->flags & IORESOURCE_STARTALIGN))
+ if (!(res->flags & IORESOURCE_STARTALIGN))
continue;
- add_align = get_res_add_align(realloc_head, dev_res->res);
+ add_align = get_res_add_align(realloc_head, res);
/*
* The "head" list is sorted by alignment so resources with
@@ -406,11 +649,8 @@ static void __assign_resources_sorted(struct list_head *head,
* need to reorder the list by alignment to make it
* consistent.
*/
- if (add_align > dev_res->res->start) {
- resource_size_t r_size = resource_size(dev_res->res);
-
- dev_res->res->start = add_align;
- dev_res->res->end = add_align + r_size - 1;
+ if (add_align > res->start) {
+ resource_set_range(res, add_align, resource_size(res));
list_for_each_entry(dev_res2, head, list) {
align = pci_resource_alignment(dev_res2->dev,
@@ -425,54 +665,88 @@ static void __assign_resources_sorted(struct list_head *head,
}
- /* Try updated head list with add_size added */
- assign_requested_resources_sorted(head, &local_fail_head);
+assign:
+ assign_requested_resources_sorted(head, &local_fail_head, true);
- /* All assigned with add_size? */
+ /* All non-optional resources assigned? */
if (list_empty(&local_fail_head)) {
/* Remove head list from realloc_head list */
list_for_each_entry(dev_res, head, list)
remove_from_list(realloc_head, dev_res->res);
free_list(&save_head);
- free_list(head);
- return;
+ goto out;
+ }
+
+ /* Without realloc_head and only optional fails, nothing more to do. */
+ if (!pci_required_resource_failed(&local_fail_head, 0) &&
+ list_empty(realloc_head)) {
+ list_for_each_entry(save_res, &save_head, list) {
+ struct resource *res = save_res->res;
+
+ if (res->parent)
+ continue;
+
+ restore_dev_resource(save_res);
+ }
+ free_list(&local_fail_head);
+ free_list(&save_head);
+ goto out;
}
/* Check failed type */
fail_type = pci_fail_res_type_mask(&local_fail_head);
/* Remove not need to be released assigned res from head list etc */
- list_for_each_entry_safe(dev_res, tmp_res, head, list)
- if (dev_res->res->parent &&
- !pci_need_to_release(fail_type, dev_res->res)) {
+ list_for_each_entry_safe(dev_res, tmp_res, head, list) {
+ res = dev_res->res;
+
+ if (res->parent && !pci_need_to_release(fail_type, res)) {
/* Remove it from realloc_head list */
- remove_from_list(realloc_head, dev_res->res);
- remove_from_list(&save_head, dev_res->res);
+ remove_from_list(realloc_head, res);
+ remove_from_list(&save_head, res);
list_del(&dev_res->list);
kfree(dev_res);
}
+ }
free_list(&local_fail_head);
/* Release assigned resource */
- list_for_each_entry(dev_res, head, list)
- if (dev_res->res->parent)
- release_resource(dev_res->res);
- /* Restore start/end/flags from saved list */
- list_for_each_entry(save_res, &save_head, list) {
- struct resource *res = save_res->res;
+ list_for_each_entry(dev_res, head, list) {
+ res = dev_res->res;
+ dev = dev_res->dev;
- res->start = save_res->start;
- res->end = save_res->end;
- res->flags = save_res->flags;
+ pci_release_resource(dev, pci_resource_num(dev, res));
+ restore_dev_resource(dev_res);
}
+ /* Restore start/end/flags from saved list */
+ list_for_each_entry(save_res, &save_head, list)
+ restore_dev_resource(save_res);
free_list(&save_head);
-requested_and_reassign:
/* Satisfy the must-have resource requests */
- assign_requested_resources_sorted(head, fail_head);
+ assign_requested_resources_sorted(head, NULL, false);
/* Try to satisfy any additional optional resource requests */
- if (realloc_head)
+ if (!list_empty(realloc_head))
reassign_resources_sorted(realloc_head, head);
+
+out:
+ /* Reset any failed resource, cannot use fail_head as it can be NULL. */
+ list_for_each_entry(dev_res, head, list) {
+ res = dev_res->res;
+ dev = dev_res->dev;
+
+ if (res->parent)
+ continue;
+
+ if (fail_head) {
+ add_to_list(fail_head, dev, res,
+ 0 /* don't care */,
+ 0 /* don't care */);
+ }
+
+ reset_resource(dev, res);
+ }
+
free_list(head);
}
@@ -482,7 +756,7 @@ static void pdev_assign_resources_sorted(struct pci_dev *dev,
{
LIST_HEAD(head);
- __dev_sort_resources(dev, &head);
+ pdev_sort_resources(dev, &head);
__assign_resources_sorted(&head, add_head, fail_head);
}
@@ -495,7 +769,7 @@ static void pbus_assign_resources_sorted(const struct pci_bus *bus,
LIST_HEAD(head);
list_for_each_entry(dev, &bus->devices, bus_list)
- __dev_sort_resources(dev, &head);
+ pdev_sort_resources(dev, &head);
__assign_resources_sorted(&head, realloc_head, fail_head);
}
@@ -511,7 +785,7 @@ void pci_setup_cardbus(struct pci_bus *bus)
res = bus->resource[0];
pcibios_resource_to_bus(bridge->bus, &region, res);
- if (res->flags & IORESOURCE_IO) {
+ if (res->parent && res->flags & IORESOURCE_IO) {
/*
* The IO resource is allocated a range twice as large as it
* would normally need. This allows us to set both IO regs.
@@ -525,7 +799,7 @@ void pci_setup_cardbus(struct pci_bus *bus)
res = bus->resource[1];
pcibios_resource_to_bus(bridge->bus, &region, res);
- if (res->flags & IORESOURCE_IO) {
+ if (res->parent && res->flags & IORESOURCE_IO) {
pci_info(bridge, " bridge window %pR\n", res);
pci_write_config_dword(bridge, PCI_CB_IO_BASE_1,
region.start);
@@ -535,7 +809,7 @@ void pci_setup_cardbus(struct pci_bus *bus)
res = bus->resource[2];
pcibios_resource_to_bus(bridge->bus, &region, res);
- if (res->flags & IORESOURCE_MEM) {
+ if (res->parent && res->flags & IORESOURCE_MEM) {
pci_info(bridge, " bridge window %pR\n", res);
pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_0,
region.start);
@@ -545,7 +819,7 @@ void pci_setup_cardbus(struct pci_bus *bus)
res = bus->resource[3];
pcibios_resource_to_bus(bridge->bus, &region, res);
- if (res->flags & IORESOURCE_MEM) {
+ if (res->parent && res->flags & IORESOURCE_MEM) {
pci_info(bridge, " bridge window %pR\n", res);
pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_1,
region.start);
@@ -571,6 +845,7 @@ EXPORT_SYMBOL(pci_setup_cardbus);
static void pci_setup_bridge_io(struct pci_dev *bridge)
{
struct resource *res;
+ const char *res_name;
struct pci_bus_region region;
unsigned long io_mask;
u8 io_base_lo, io_limit_lo;
@@ -583,15 +858,16 @@ static void pci_setup_bridge_io(struct pci_dev *bridge)
/* Set up the top and bottom of the PCI I/O segment for this bus */
res = &bridge->resource[PCI_BRIDGE_IO_WINDOW];
+ res_name = pci_resource_name(bridge, PCI_BRIDGE_IO_WINDOW);
pcibios_resource_to_bus(bridge->bus, &region, res);
- if (res->flags & IORESOURCE_IO) {
+ if (res->parent && res->flags & IORESOURCE_IO) {
pci_read_config_word(bridge, PCI_IO_BASE, &l);
io_base_lo = (region.start >> 8) & io_mask;
io_limit_lo = (region.end >> 8) & io_mask;
l = ((u16) io_limit_lo << 8) | io_base_lo;
/* Set up upper 16 bits of I/O base/limit */
io_upper16 = (region.end & 0xffff0000) | (region.start >> 16);
- pci_info(bridge, " bridge window %pR\n", res);
+ pci_info(bridge, " %s %pR\n", res_name, res);
} else {
/* Clear upper 16 bits of I/O base/limit */
io_upper16 = 0;
@@ -608,16 +884,18 @@ static void pci_setup_bridge_io(struct pci_dev *bridge)
static void pci_setup_bridge_mmio(struct pci_dev *bridge)
{
struct resource *res;
+ const char *res_name;
struct pci_bus_region region;
u32 l;
/* Set up the top and bottom of the PCI Memory segment for this bus */
res = &bridge->resource[PCI_BRIDGE_MEM_WINDOW];
+ res_name = pci_resource_name(bridge, PCI_BRIDGE_MEM_WINDOW);
pcibios_resource_to_bus(bridge->bus, &region, res);
- if (res->flags & IORESOURCE_MEM) {
+ if (res->parent && res->flags & IORESOURCE_MEM) {
l = (region.start >> 16) & 0xfff0;
l |= region.end & 0xfff00000;
- pci_info(bridge, " bridge window %pR\n", res);
+ pci_info(bridge, " %s %pR\n", res_name, res);
} else {
l = 0x0000fff0;
}
@@ -627,6 +905,7 @@ static void pci_setup_bridge_mmio(struct pci_dev *bridge)
static void pci_setup_bridge_mmio_pref(struct pci_dev *bridge)
{
struct resource *res;
+ const char *res_name;
struct pci_bus_region region;
u32 l, bu, lu;
@@ -640,15 +919,16 @@ static void pci_setup_bridge_mmio_pref(struct pci_dev *bridge)
/* Set up PREF base/limit */
bu = lu = 0;
res = &bridge->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
+ res_name = pci_resource_name(bridge, PCI_BRIDGE_PREF_MEM_WINDOW);
pcibios_resource_to_bus(bridge->bus, &region, res);
- if (res->flags & IORESOURCE_PREFETCH) {
+ if (res->parent && res->flags & IORESOURCE_PREFETCH) {
l = (region.start >> 16) & 0xfff0;
l |= region.end & 0xfff00000;
if (res->flags & IORESOURCE_MEM_64) {
bu = upper_32_bits(region.start);
lu = upper_32_bits(region.end);
}
- pci_info(bridge, " bridge window %pR\n", res);
+ pci_info(bridge, " %s %pR\n", res_name, res);
} else {
l = 0x0000fff0;
}
@@ -663,8 +943,7 @@ static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type)
{
struct pci_dev *bridge = bus->self;
- pci_info(bridge, "PCI bridge to %pR\n",
- &bus->busn_res);
+ pci_info(bridge, "PCI bridge to %pR\n", &bus->busn_res);
if (type & IORESOURCE_IO)
pci_setup_bridge_io(bridge);
@@ -678,11 +957,28 @@ static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type)
pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl);
}
+static void pci_setup_one_bridge_window(struct pci_dev *bridge, int resno)
+{
+ switch (resno) {
+ case PCI_BRIDGE_IO_WINDOW:
+ pci_setup_bridge_io(bridge);
+ break;
+ case PCI_BRIDGE_MEM_WINDOW:
+ pci_setup_bridge_mmio(bridge);
+ break;
+ case PCI_BRIDGE_PREF_MEM_WINDOW:
+ pci_setup_bridge_mmio_pref(bridge);
+ break;
+ default:
+ return;
+ }
+}
+
void __weak pcibios_setup_bridge(struct pci_bus *bus, unsigned long type)
{
}
-void pci_setup_bridge(struct pci_bus *bus)
+static void pci_setup_bridge(struct pci_bus *bus)
{
unsigned long type = IORESOURCE_IO | IORESOURCE_MEM |
IORESOURCE_PREFETCH;
@@ -694,6 +990,8 @@ void pci_setup_bridge(struct pci_bus *bus)
int pci_claim_bridge_resource(struct pci_dev *bridge, int i)
{
+ int ret = -EINVAL;
+
if (i < PCI_BRIDGE_RESOURCES || i > PCI_BRIDGE_RESOURCE_END)
return 0;
@@ -703,27 +1001,16 @@ int pci_claim_bridge_resource(struct pci_dev *bridge, int i)
if ((bridge->class >> 8) != PCI_CLASS_BRIDGE_PCI)
return 0;
- if (!pci_bus_clip_resource(bridge, i))
- return -EINVAL; /* Clipping didn't change anything */
-
- switch (i) {
- case PCI_BRIDGE_IO_WINDOW:
- pci_setup_bridge_io(bridge);
- break;
- case PCI_BRIDGE_MEM_WINDOW:
- pci_setup_bridge_mmio(bridge);
- break;
- case PCI_BRIDGE_PREF_MEM_WINDOW:
- pci_setup_bridge_mmio_pref(bridge);
- break;
- default:
+ if (i > PCI_BRIDGE_PREF_MEM_WINDOW)
return -EINVAL;
- }
- if (pci_claim_resource(bridge, i) == 0)
- return 0; /* Claimed a smaller window */
+ /* Try to clip the resource and claim the smaller window */
+ if (pci_bus_clip_resource(bridge, i))
+ ret = pci_claim_resource(bridge, i);
+
+ pci_setup_one_bridge_window(bridge, i);
- return -EINVAL;
+ return ret;
}
/*
@@ -754,34 +1041,6 @@ static void pci_bridge_check_ranges(struct pci_bus *bus)
}
}
-/*
- * Helper function for sizing routines. Assigned resources have non-NULL
- * parent resource.
- *
- * Return first unassigned resource of the correct type. If there is none,
- * return first assigned resource of the correct type. If none of the
- * above, return NULL.
- *
- * Returning an assigned resource of the correct type allows the caller to
- * distinguish between already assigned and no resource of the correct type.
- */
-static struct resource *find_bus_resource_of_type(struct pci_bus *bus,
- unsigned long type_mask,
- unsigned long type)
-{
- struct resource *r, *r_assigned = NULL;
-
- pci_bus_for_each_resource(bus, r) {
- if (r == &ioport_resource || r == &iomem_resource)
- continue;
- if (r && (r->flags & type_mask) == type && !r->parent)
- return r;
- if (r && (r->flags & type_mask) == type && !r_assigned)
- r_assigned = r;
- }
- return r_assigned;
-}
-
static resource_size_t calculate_iosize(resource_size_t size,
resource_size_t min_size,
resource_size_t size1,
@@ -802,11 +1061,9 @@ static resource_size_t calculate_iosize(resource_size_t size,
size = (size & 0xff) + ((size & ~0xffUL) << 2);
#endif
size = size + size1;
- if (size < old_size)
- size = old_size;
- size = ALIGN(max(size, add_size) + children_add_size, align);
- return size;
+ size = max(size, add_size) + children_add_size;
+ return ALIGN(max(size, old_size), align);
}
static resource_size_t calculate_memsize(resource_size_t size,
@@ -820,11 +1077,9 @@ static resource_size_t calculate_memsize(resource_size_t size,
size = min_size;
if (old_size == 1)
old_size = 0;
- if (size < old_size)
- size = old_size;
- size = ALIGN(max(size, add_size) + children_add_size, align);
- return size;
+ size = max(size, add_size) + children_add_size;
+ return ALIGN(max(size, old_size), align);
}
resource_size_t __weak pcibios_window_alignment(struct pci_bus *bus,
@@ -833,9 +1088,9 @@ resource_size_t __weak pcibios_window_alignment(struct pci_bus *bus,
return 1;
}
-#define PCI_P2P_DEFAULT_MEM_ALIGN 0x100000 /* 1MiB */
-#define PCI_P2P_DEFAULT_IO_ALIGN 0x1000 /* 4KiB */
-#define PCI_P2P_DEFAULT_IO_ALIGN_1K 0x400 /* 1KiB */
+#define PCI_P2P_DEFAULT_MEM_ALIGN SZ_1M
+#define PCI_P2P_DEFAULT_IO_ALIGN SZ_4K
+#define PCI_P2P_DEFAULT_IO_ALIGN_1K SZ_1K
static resource_size_t window_alignment(struct pci_bus *bus, unsigned long type)
{
@@ -876,8 +1131,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
struct list_head *realloc_head)
{
struct pci_dev *dev;
- struct resource *b_res = find_bus_resource_of_type(bus, IORESOURCE_IO,
- IORESOURCE_IO);
+ struct resource *b_res = pbus_select_window_for_type(bus, IORESOURCE_IO);
resource_size_t size = 0, size0 = 0, size1 = 0;
resource_size_t children_add_size = 0;
resource_size_t min_align, align;
@@ -898,9 +1152,12 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
if (r->parent || !(r->flags & IORESOURCE_IO))
continue;
- r_size = resource_size(r);
- if (r_size < 0x400)
+ if (!pdev_resource_assignable(dev, r))
+ continue;
+
+ r_size = resource_size(r);
+ if (r_size < SZ_1K)
/* Might be re-aligned for ISA */
size += r_size;
else
@@ -917,21 +1174,29 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
size0 = calculate_iosize(size, min_size, size1, 0, 0,
resource_size(b_res), min_align);
- size1 = (!realloc_head || (realloc_head && !add_size && !children_add_size)) ? size0 :
- calculate_iosize(size, min_size, size1, add_size, children_add_size,
- resource_size(b_res), min_align);
+
+ if (size0)
+ b_res->flags &= ~IORESOURCE_DISABLED;
+
+ size1 = size0;
+ if (realloc_head && (add_size > 0 || children_add_size > 0)) {
+ size1 = calculate_iosize(size, min_size, size1, add_size,
+ children_add_size, resource_size(b_res),
+ min_align);
+ }
+
if (!size0 && !size1) {
if (bus->self && (b_res->start || b_res->end))
pci_info(bus->self, "disabling bridge window %pR to %pR (unused)\n",
b_res, &bus->busn_res);
- b_res->flags = 0;
+ b_res->flags |= IORESOURCE_DISABLED;
return;
}
- b_res->start = min_align;
- b_res->end = b_res->start + size0 - 1;
+ resource_set_range(b_res, min_align, size0);
b_res->flags |= IORESOURCE_STARTALIGN;
if (bus->self && size1 > size0 && realloc_head) {
+ b_res->flags &= ~IORESOURCE_DISABLED;
add_to_list(realloc_head, bus->self, b_res, size1-size0,
min_align);
pci_info(bus->self, "bridge window %pR to %pR add_size %llx\n",
@@ -950,7 +1215,7 @@ static inline resource_size_t calculate_mem_align(resource_size_t *aligns,
for (order = 0; order <= max_order; order++) {
resource_size_t align1 = 1;
- align1 <<= (order + 20);
+ align1 <<= order + __ffs(SZ_1M);
if (!align)
min_align = align1;
@@ -963,46 +1228,107 @@ static inline resource_size_t calculate_mem_align(resource_size_t *aligns,
}
/**
+ * pbus_upstream_space_available - Check no upstream resource limits allocation
+ * @bus: The bus
+ * @res: The resource to help select the correct bridge window
+ * @size: The size required from the bridge window
+ * @align: Required alignment for the resource
+ *
+ * Check that @size can fit inside the upstream bridge resources that are
+ * already assigned. Select the upstream bridge window based on the type of
+ * @res.
+ *
+ * Return: %true if enough space is available on all assigned upstream
+ * resources.
+ */
+static bool pbus_upstream_space_available(struct pci_bus *bus,
+ struct resource *res,
+ resource_size_t size,
+ resource_size_t align)
+{
+ struct resource_constraint constraint = {
+ .max = RESOURCE_SIZE_MAX,
+ .align = align,
+ };
+ struct pci_bus *downstream = bus;
+
+ while ((bus = bus->parent)) {
+ if (pci_is_root_bus(bus))
+ break;
+
+ res = pbus_select_window(bus, res);
+ if (!res)
+ return false;
+ if (!res->parent)
+ continue;
+
+ if (resource_size(res) >= size) {
+ struct resource gap = {};
+
+ if (find_resource_space(res, &gap, size, &constraint) == 0) {
+ gap.flags = res->flags;
+ pci_dbg(bus->self,
+ "Assigned bridge window %pR to %pR free space at %pR\n",
+ res, &bus->busn_res, &gap);
+ return true;
+ }
+ }
+
+ if (bus->self) {
+ pci_info(bus->self,
+ "Assigned bridge window %pR to %pR cannot fit 0x%llx required for %s bridging to %pR\n",
+ res, &bus->busn_res,
+ (unsigned long long)size,
+ pci_name(downstream->self),
+ &downstream->busn_res);
+ }
+
+ return false;
+ }
+
+ return true;
+}
+
+/**
* pbus_size_mem() - Size the memory window of a given bus
*
* @bus: The bus
- * @mask: Mask the resource flag, then compare it with type
- * @type: The type of free resource from bridge
- * @type2: Second match type
- * @type3: Third match type
+ * @type: The type of bridge resource
* @min_size: The minimum memory window that must be allocated
* @add_size: Additional optional memory window
* @realloc_head: Track the additional memory window on this list
*
- * Calculate the size of the bus and minimal alignment which guarantees
- * that all child resources fit in this size.
+ * Calculate the size of the bus resource for @type and minimal alignment
+ * which guarantees that all child resources fit in this size.
+ *
+ * Set the bus resource start/end to indicate the required size if there an
+ * available unassigned bus resource of the desired @type.
*
- * Return -ENOSPC if there's no available bus resource of the desired
- * type. Otherwise, set the bus resource start/end to indicate the
- * required size, add things to realloc_head (if supplied), and return 0.
+ * Add optional resource requests to the @realloc_head list if it is
+ * supplied.
*/
-static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
- unsigned long type, unsigned long type2,
- unsigned long type3, resource_size_t min_size,
+static void pbus_size_mem(struct pci_bus *bus, unsigned long type,
+ resource_size_t min_size,
resource_size_t add_size,
struct list_head *realloc_head)
{
struct pci_dev *dev;
- resource_size_t min_align, align, size, size0, size1;
- resource_size_t aligns[24]; /* Alignments from 1MB to 8TB */
+ resource_size_t min_align, win_align, align, size, size0, size1 = 0;
+ resource_size_t aligns[28]; /* Alignments from 1MB to 128TB */
int order, max_order;
- struct resource *b_res = find_bus_resource_of_type(bus,
- mask | IORESOURCE_PREFETCH, type);
+ struct resource *b_res = pbus_select_window_for_type(bus, type);
resource_size_t children_add_size = 0;
resource_size_t children_add_align = 0;
resource_size_t add_align = 0;
+ resource_size_t relaxed_align;
+ resource_size_t old_size;
if (!b_res)
- return -ENOSPC;
+ return;
/* If resource is already assigned, nothing more to do */
if (b_res->parent)
- return 0;
+ return;
memset(aligns, 0, sizeof(aligns));
max_order = 0;
@@ -1013,25 +1339,25 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
int i;
pci_dev_for_each_resource(dev, r, i) {
+ const char *r_name = pci_resource_name(dev, i);
resource_size_t r_size;
- if (r->parent || (r->flags & IORESOURCE_PCI_FIXED) ||
- ((r->flags & mask) != type &&
- (r->flags & mask) != type2 &&
- (r->flags & mask) != type3))
+ if (!pdev_resources_assignable(dev) ||
+ !pdev_resource_should_fit(dev, r))
+ continue;
+ if (b_res != pbus_select_window(bus, r))
continue;
+
r_size = resource_size(r);
-#ifdef CONFIG_PCI_IOV
+
/* Put SRIOV requested res to the optional list */
- if (realloc_head && i >= PCI_IOV_RESOURCES &&
- i <= PCI_IOV_RESOURCE_END) {
+ if (realloc_head && pci_resource_is_optional(dev, i)) {
add_align = max(pci_resource_alignment(dev, r), add_align);
- r->end = r->start - 1;
- add_to_list(realloc_head, dev, r, r_size, 0 /* Don't care */);
+ add_to_list(realloc_head, dev, r, 0, 0 /* Don't care */);
children_add_size += r_size;
continue;
}
-#endif
+
/*
* aligns[0] is for 1MB (since bridge memory
* windows are always at least 1MB aligned), so
@@ -1039,12 +1365,12 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
* resources.
*/
align = pci_resource_alignment(dev, r);
- order = __ffs(align) - 20;
+ order = __ffs(align) - __ffs(SZ_1M);
if (order < 0)
order = 0;
if (order >= ARRAY_SIZE(aligns)) {
- pci_warn(dev, "disabling BAR %d: %pR (bad alignment %#llx)\n",
- i, r, (unsigned long long) align);
+ pci_warn(dev, "%s %pR: disabling; bad alignment %#llx\n",
+ r_name, r, (unsigned long long) align);
r->flags = 0;
continue;
}
@@ -1066,31 +1392,64 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
}
}
+ old_size = resource_size(b_res);
+ win_align = window_alignment(bus, b_res->flags);
min_align = calculate_mem_align(aligns, max_order);
- min_align = max(min_align, window_alignment(bus, b_res->flags));
- size0 = calculate_memsize(size, min_size, 0, 0, resource_size(b_res), min_align);
- add_align = max(min_align, add_align);
- size1 = (!realloc_head || (realloc_head && !add_size && !children_add_size)) ? size0 :
- calculate_memsize(size, min_size, add_size, children_add_size,
- resource_size(b_res), add_align);
+ min_align = max(min_align, win_align);
+ size0 = calculate_memsize(size, min_size, 0, 0, old_size, min_align);
+
+ if (size0) {
+ resource_set_range(b_res, min_align, size0);
+ b_res->flags &= ~IORESOURCE_DISABLED;
+ }
+
+ if (bus->self && size0 &&
+ !pbus_upstream_space_available(bus, b_res, size0, min_align)) {
+ relaxed_align = 1ULL << (max_order + __ffs(SZ_1M));
+ relaxed_align = max(relaxed_align, win_align);
+ min_align = min(min_align, relaxed_align);
+ size0 = calculate_memsize(size, min_size, 0, 0, old_size, win_align);
+ resource_set_range(b_res, min_align, size0);
+ pci_info(bus->self, "bridge window %pR to %pR requires relaxed alignment rules\n",
+ b_res, &bus->busn_res);
+ }
+
+ if (realloc_head && (add_size > 0 || children_add_size > 0)) {
+ add_align = max(min_align, add_align);
+ size1 = calculate_memsize(size, min_size, add_size, children_add_size,
+ old_size, add_align);
+
+ if (bus->self && size1 &&
+ !pbus_upstream_space_available(bus, b_res, size1, add_align)) {
+ relaxed_align = 1ULL << (max_order + __ffs(SZ_1M));
+ relaxed_align = max(relaxed_align, win_align);
+ min_align = min(min_align, relaxed_align);
+ size1 = calculate_memsize(size, min_size, add_size, children_add_size,
+ old_size, win_align);
+ pci_info(bus->self,
+ "bridge window %pR to %pR requires relaxed alignment rules\n",
+ b_res, &bus->busn_res);
+ }
+ }
+
if (!size0 && !size1) {
if (bus->self && (b_res->start || b_res->end))
pci_info(bus->self, "disabling bridge window %pR to %pR (unused)\n",
b_res, &bus->busn_res);
- b_res->flags = 0;
- return 0;
+ b_res->flags |= IORESOURCE_DISABLED;
+ return;
}
- b_res->start = min_align;
- b_res->end = size0 + min_align - 1;
+
+ resource_set_range(b_res, min_align, size0);
b_res->flags |= IORESOURCE_STARTALIGN;
if (bus->self && size1 > size0 && realloc_head) {
+ b_res->flags &= ~IORESOURCE_DISABLED;
add_to_list(realloc_head, bus->self, b_res, size1-size0, add_align);
pci_info(bus->self, "bridge window %pR to %pR add_size %llx add_align %llx\n",
b_res, &bus->busn_res,
(unsigned long long) (size1 - size0),
(unsigned long long) add_align);
}
- return 0;
}
unsigned long pci_cardbus_resource_alignment(struct resource *res)
@@ -1117,8 +1476,7 @@ static void pci_bus_size_cardbus(struct pci_bus *bus,
* Reserve some resources for CardBus. We reserve a fixed amount
* of bus space for CardBus bridges.
*/
- b_res->start = pci_cardbus_io_size;
- b_res->end = b_res->start + pci_cardbus_io_size - 1;
+ resource_set_range(b_res, pci_cardbus_io_size, pci_cardbus_io_size);
b_res->flags |= IORESOURCE_IO | IORESOURCE_STARTALIGN;
if (realloc_head) {
b_res->end -= pci_cardbus_io_size;
@@ -1130,8 +1488,7 @@ handle_b_res_1:
b_res = &bridge->resource[PCI_CB_BRIDGE_IO_1_WINDOW];
if (b_res->parent)
goto handle_b_res_2;
- b_res->start = pci_cardbus_io_size;
- b_res->end = b_res->start + pci_cardbus_io_size - 1;
+ resource_set_range(b_res, pci_cardbus_io_size, pci_cardbus_io_size);
b_res->flags |= IORESOURCE_IO | IORESOURCE_STARTALIGN;
if (realloc_head) {
b_res->end -= pci_cardbus_io_size;
@@ -1164,8 +1521,8 @@ handle_b_res_2:
* Otherwise, allocate one region of twice the size.
*/
if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0) {
- b_res->start = pci_cardbus_mem_size;
- b_res->end = b_res->start + pci_cardbus_mem_size - 1;
+ resource_set_range(b_res, pci_cardbus_mem_size,
+ pci_cardbus_mem_size);
b_res->flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH |
IORESOURCE_STARTALIGN;
if (realloc_head) {
@@ -1182,8 +1539,7 @@ handle_b_res_3:
b_res = &bridge->resource[PCI_CB_BRIDGE_MEM_1_WINDOW];
if (b_res->parent)
goto handle_done;
- b_res->start = pci_cardbus_mem_size;
- b_res->end = b_res->start + b_res_3_size - 1;
+ resource_set_range(b_res, pci_cardbus_mem_size, b_res_3_size);
b_res->flags |= IORESOURCE_MEM | IORESOURCE_STARTALIGN;
if (realloc_head) {
b_res->end -= b_res_3_size;
@@ -1198,12 +1554,11 @@ handle_done:
void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
{
struct pci_dev *dev;
- unsigned long mask, prefmask, type2 = 0, type3 = 0;
resource_size_t additional_io_size = 0, additional_mmio_size = 0,
additional_mmio_pref_size = 0;
struct resource *pref;
struct pci_host_bridge *host;
- int hdr_type, ret;
+ int hdr_type;
list_for_each_entry(dev, &bus->devices, bus_list) {
struct pci_bus *b = dev->subordinate;
@@ -1253,71 +1608,15 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
pbus_size_io(bus, realloc_head ? 0 : additional_io_size,
additional_io_size, realloc_head);
- /*
- * If there's a 64-bit prefetchable MMIO window, compute
- * the size required to put all 64-bit prefetchable
- * resources in it.
- */
- mask = IORESOURCE_MEM;
- prefmask = IORESOURCE_MEM | IORESOURCE_PREFETCH;
- if (pref && (pref->flags & IORESOURCE_MEM_64)) {
- prefmask |= IORESOURCE_MEM_64;
- ret = pbus_size_mem(bus, prefmask, prefmask,
- prefmask, prefmask,
- realloc_head ? 0 : additional_mmio_pref_size,
- additional_mmio_pref_size, realloc_head);
-
- /*
- * If successful, all non-prefetchable resources
- * and any 32-bit prefetchable resources will go in
- * the non-prefetchable window.
- */
- if (ret == 0) {
- mask = prefmask;
- type2 = prefmask & ~IORESOURCE_MEM_64;
- type3 = prefmask & ~IORESOURCE_PREFETCH;
- }
- }
-
- /*
- * If there is no 64-bit prefetchable window, compute the
- * size required to put all prefetchable resources in the
- * 32-bit prefetchable window (if there is one).
- */
- if (!type2) {
- prefmask &= ~IORESOURCE_MEM_64;
- ret = pbus_size_mem(bus, prefmask, prefmask,
- prefmask, prefmask,
- realloc_head ? 0 : additional_mmio_pref_size,
- additional_mmio_pref_size, realloc_head);
-
- /*
- * If successful, only non-prefetchable resources
- * will go in the non-prefetchable window.
- */
- if (ret == 0)
- mask = prefmask;
- else
- additional_mmio_size += additional_mmio_pref_size;
-
- type2 = type3 = IORESOURCE_MEM;
+ if (pref && (pref->flags & IORESOURCE_PREFETCH)) {
+ pbus_size_mem(bus,
+ IORESOURCE_MEM | IORESOURCE_PREFETCH |
+ (pref->flags & IORESOURCE_MEM_64),
+ realloc_head ? 0 : additional_mmio_pref_size,
+ additional_mmio_pref_size, realloc_head);
}
- /*
- * Compute the size required to put everything else in the
- * non-prefetchable window. This includes:
- *
- * - all non-prefetchable resources
- * - 32-bit prefetchable resources if there's a 64-bit
- * prefetchable window or no prefetchable window at all
- * - 64-bit prefetchable resources if there's no prefetchable
- * window at all
- *
- * Note that the strategy in __pci_assign_resource() must match
- * that used here. Specifically, we cannot put a 32-bit
- * prefetchable resource in a 64-bit prefetchable window.
- */
- pbus_size_mem(bus, mask, IORESOURCE_MEM, type2, type3,
+ pbus_size_mem(bus, IORESOURCE_MEM,
realloc_head ? 0 : additional_mmio_size,
additional_mmio_size, realloc_head);
break;
@@ -1509,67 +1808,25 @@ static void __pci_bridge_assign_resources(const struct pci_dev *bridge,
}
}
-#define PCI_RES_TYPE_MASK \
- (IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH |\
- IORESOURCE_MEM_64)
-
static void pci_bridge_release_resources(struct pci_bus *bus,
- unsigned long type)
+ struct resource *b_win)
{
struct pci_dev *dev = bus->self;
- struct resource *r;
- unsigned int old_flags;
- struct resource *b_res;
- int idx = 1;
+ int idx, ret;
- b_res = &dev->resource[PCI_BRIDGE_RESOURCES];
-
- /*
- * 1. If IO port assignment fails, release bridge IO port.
- * 2. If non pref MMIO assignment fails, release bridge nonpref MMIO.
- * 3. If 64bit pref MMIO assignment fails, and bridge pref is 64bit,
- * release bridge pref MMIO.
- * 4. If pref MMIO assignment fails, and bridge pref is 32bit,
- * release bridge pref MMIO.
- * 5. If pref MMIO assignment fails, and bridge pref is not
- * assigned, release bridge nonpref MMIO.
- */
- if (type & IORESOURCE_IO)
- idx = 0;
- else if (!(type & IORESOURCE_PREFETCH))
- idx = 1;
- else if ((type & IORESOURCE_MEM_64) &&
- (b_res[2].flags & IORESOURCE_MEM_64))
- idx = 2;
- else if (!(b_res[2].flags & IORESOURCE_MEM_64) &&
- (b_res[2].flags & IORESOURCE_PREFETCH))
- idx = 2;
- else
- idx = 1;
-
- r = &b_res[idx];
-
- if (!r->parent)
+ if (!b_win->parent)
return;
+ idx = pci_resource_num(dev, b_win);
+
/* If there are children, release them all */
- release_child_resources(r);
- if (!release_resource(r)) {
- type = old_flags = r->flags & PCI_RES_TYPE_MASK;
- pci_info(dev, "resource %d %pR released\n",
- PCI_BRIDGE_RESOURCES + idx, r);
- /* Keep the old size */
- r->end = resource_size(r) - 1;
- r->start = 0;
- r->flags = 0;
+ release_child_resources(b_win);
- /* Avoiding touch the one without PREF */
- if (type & IORESOURCE_PREFETCH)
- type = IORESOURCE_PREFETCH;
- __pci_setup_bridge(bus, type);
- /* For next child res under same bridge */
- r->flags = old_flags;
- }
+ ret = pci_release_resource(dev, idx);
+ if (ret)
+ return;
+
+ pci_setup_one_bridge_window(dev, idx);
}
enum release_type {
@@ -1582,7 +1839,7 @@ enum release_type {
* a larger window later.
*/
static void pci_bus_release_bridge_resources(struct pci_bus *bus,
- unsigned long type,
+ struct resource *b_win,
enum release_type rel_type)
{
struct pci_dev *dev;
@@ -1590,6 +1847,8 @@ static void pci_bus_release_bridge_resources(struct pci_bus *bus,
list_for_each_entry(dev, &bus->devices, bus_list) {
struct pci_bus *b = dev->subordinate;
+ struct resource *res;
+
if (!b)
continue;
@@ -1598,9 +1857,15 @@ static void pci_bus_release_bridge_resources(struct pci_bus *bus,
if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
continue;
- if (rel_type == whole_subtree)
- pci_bus_release_bridge_resources(b, type,
- whole_subtree);
+ if (rel_type != whole_subtree)
+ continue;
+
+ pci_bus_for_each_resource(b, res) {
+ if (res->parent != b_win)
+ continue;
+
+ pci_bus_release_bridge_resources(b, res, rel_type);
+ }
}
if (pci_is_root_bus(bus))
@@ -1610,7 +1875,7 @@ static void pci_bus_release_bridge_resources(struct pci_bus *bus,
return;
if ((rel_type == whole_subtree) || is_leaf_bridge)
- pci_bridge_release_resources(bus, type);
+ pci_bridge_release_resources(bus, b_win);
}
static void pci_bus_dump_res(struct pci_bus *bus)
@@ -1694,7 +1959,8 @@ static int iov_resources_unassigned(struct pci_dev *dev, void *data)
bool *unassigned = data;
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
- struct resource *r = &dev->resource[i + PCI_IOV_RESOURCES];
+ int idx = pci_resource_num_from_vf_bar(i);
+ struct resource *r = &dev->resource[idx];
struct pci_bus_region region;
/* Not assigned or rejected by kernel? */
@@ -1762,7 +2028,7 @@ static void adjust_bridge_window(struct pci_dev *bridge, struct resource *res,
return;
}
- res->end = res->start + new_size - 1;
+ resource_set_size(res, new_size);
/* If the resource is part of the add_list, remove it now */
if (add_list)
@@ -1784,36 +2050,27 @@ static void remove_dev_resource(struct resource *avail, struct pci_dev *dev,
avail->start = min(avail->start + tmp, avail->end + 1);
}
-static void remove_dev_resources(struct pci_dev *dev, struct resource *io,
- struct resource *mmio,
- struct resource *mmio_pref)
+static void remove_dev_resources(struct pci_dev *dev,
+ struct resource available[PCI_P2P_BRIDGE_RESOURCE_NUM])
{
- struct resource *res;
+ struct resource *res, *b_win;
+ int idx;
pci_dev_for_each_resource(dev, res) {
- if (resource_type(res) == IORESOURCE_IO) {
- remove_dev_resource(io, dev, res);
- } else if (resource_type(res) == IORESOURCE_MEM) {
+ b_win = pbus_select_window(dev->bus, res);
+ if (!b_win)
+ continue;
- /*
- * Make sure prefetchable memory is reduced from
- * the correct resource. Specifically we put 32-bit
- * prefetchable memory in non-prefetchable window
- * if there is an 64-bit pretchable window.
- *
- * See comments in __pci_bus_size_bridges() for
- * more information.
- */
- if ((res->flags & IORESOURCE_PREFETCH) &&
- ((res->flags & IORESOURCE_MEM_64) ==
- (mmio_pref->flags & IORESOURCE_MEM_64)))
- remove_dev_resource(mmio_pref, dev, res);
- else
- remove_dev_resource(mmio, dev, res);
- }
+ idx = pci_resource_num(dev->bus->self, b_win);
+ idx -= PCI_BRIDGE_RESOURCES;
+
+ remove_dev_resource(&available[idx], dev, res);
}
}
+#define ALIGN_DOWN_IF_NONZERO(addr, align) \
+ ((align) ? ALIGN_DOWN((addr), (align)) : (addr))
+
/*
* io, mmio and mmio_pref contain the total amount of bridge window space
* available. This includes the minimal space needed to cover all the
@@ -1821,45 +2078,40 @@ static void remove_dev_resources(struct pci_dev *dev, struct resource *io,
* shared with the bridges.
*/
static void pci_bus_distribute_available_resources(struct pci_bus *bus,
- struct list_head *add_list,
- struct resource io,
- struct resource mmio,
- struct resource mmio_pref)
+ struct list_head *add_list,
+ struct resource available_in[PCI_P2P_BRIDGE_RESOURCE_NUM])
{
+ struct resource available[PCI_P2P_BRIDGE_RESOURCE_NUM];
unsigned int normal_bridges = 0, hotplug_bridges = 0;
- struct resource *io_res, *mmio_res, *mmio_pref_res;
struct pci_dev *dev, *bridge = bus->self;
- resource_size_t io_per_b, mmio_per_b, mmio_pref_per_b, align;
-
- io_res = &bridge->resource[PCI_BRIDGE_IO_WINDOW];
- mmio_res = &bridge->resource[PCI_BRIDGE_MEM_WINDOW];
- mmio_pref_res = &bridge->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
+ resource_size_t per_bridge[PCI_P2P_BRIDGE_RESOURCE_NUM];
+ resource_size_t align;
+ int i;
- /*
- * The alignment of this bridge is yet to be considered, hence it must
- * be done now before extending its bridge window.
- */
- align = pci_resource_alignment(bridge, io_res);
- if (!io_res->parent && align)
- io.start = min(ALIGN(io.start, align), io.end + 1);
+ for (i = 0; i < PCI_P2P_BRIDGE_RESOURCE_NUM; i++) {
+ struct resource *res =
+ pci_resource_n(bridge, PCI_BRIDGE_RESOURCES + i);
- align = pci_resource_alignment(bridge, mmio_res);
- if (!mmio_res->parent && align)
- mmio.start = min(ALIGN(mmio.start, align), mmio.end + 1);
+ available[i] = available_in[i];
- align = pci_resource_alignment(bridge, mmio_pref_res);
- if (!mmio_pref_res->parent && align)
- mmio_pref.start = min(ALIGN(mmio_pref.start, align),
- mmio_pref.end + 1);
+ /*
+ * The alignment of this bridge is yet to be considered,
+ * hence it must be done now before extending its bridge
+ * window.
+ */
+ align = pci_resource_alignment(bridge, res);
+ if (!res->parent && align)
+ available[i].start = min(ALIGN(available[i].start, align),
+ available[i].end + 1);
- /*
- * Now that we have adjusted for alignment, update the bridge window
- * resources to fill as much remaining resource space as possible.
- */
- adjust_bridge_window(bridge, io_res, add_list, resource_size(&io));
- adjust_bridge_window(bridge, mmio_res, add_list, resource_size(&mmio));
- adjust_bridge_window(bridge, mmio_pref_res, add_list,
- resource_size(&mmio_pref));
+ /*
+ * Now that we have adjusted for alignment, update the
+ * bridge window resources to fill as much remaining
+ * resource space as possible.
+ */
+ adjust_bridge_window(bridge, res, add_list,
+ resource_size(&available[i]));
+ }
/*
* Calculate how many hotplug bridges and normal bridges there
@@ -1883,7 +2135,7 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus,
*/
list_for_each_entry(dev, &bus->devices, bus_list) {
if (!dev->is_virtfn)
- remove_dev_resources(dev, &io, &mmio, &mmio_pref);
+ remove_dev_resources(dev, available);
}
/*
@@ -1895,16 +2147,9 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus,
* split between non-hotplug bridges. This is to allow possible
* hotplug bridges below them to get the extra space as well.
*/
- if (hotplug_bridges) {
- io_per_b = div64_ul(resource_size(&io), hotplug_bridges);
- mmio_per_b = div64_ul(resource_size(&mmio), hotplug_bridges);
- mmio_pref_per_b = div64_ul(resource_size(&mmio_pref),
- hotplug_bridges);
- } else {
- io_per_b = div64_ul(resource_size(&io), normal_bridges);
- mmio_per_b = div64_ul(resource_size(&mmio), normal_bridges);
- mmio_pref_per_b = div64_ul(resource_size(&mmio_pref),
- normal_bridges);
+ for (i = 0; i < PCI_P2P_BRIDGE_RESOURCE_NUM; i++) {
+ per_bridge[i] = div64_ul(resource_size(&available[i]),
+ hotplug_bridges ?: normal_bridges);
}
for_each_pci_bridge(dev, bus) {
@@ -1917,51 +2162,41 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus,
if (hotplug_bridges && !dev->is_hotplug_bridge)
continue;
- res = &dev->resource[PCI_BRIDGE_IO_WINDOW];
+ for (i = 0; i < PCI_P2P_BRIDGE_RESOURCE_NUM; i++) {
+ res = pci_resource_n(dev, PCI_BRIDGE_RESOURCES + i);
- /*
- * Make sure the split resource space is properly aligned
- * for bridge windows (align it down to avoid going above
- * what is available).
- */
- align = pci_resource_alignment(dev, res);
- io.end = align ? io.start + ALIGN_DOWN(io_per_b, align) - 1
- : io.start + io_per_b - 1;
-
- /*
- * The x_per_b holds the extra resource space that can be
- * added for each bridge but there is the minimal already
- * reserved as well so adjust x.start down accordingly to
- * cover the whole space.
- */
- io.start -= resource_size(res);
-
- res = &dev->resource[PCI_BRIDGE_MEM_WINDOW];
- align = pci_resource_alignment(dev, res);
- mmio.end = align ? mmio.start + ALIGN_DOWN(mmio_per_b, align) - 1
- : mmio.start + mmio_per_b - 1;
- mmio.start -= resource_size(res);
+ /*
+ * Make sure the split resource space is properly
+ * aligned for bridge windows (align it down to
+ * avoid going above what is available).
+ */
+ align = pci_resource_alignment(dev, res);
+ resource_set_size(&available[i],
+ ALIGN_DOWN_IF_NONZERO(per_bridge[i],
+ align));
- res = &dev->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
- align = pci_resource_alignment(dev, res);
- mmio_pref.end = align ? mmio_pref.start +
- ALIGN_DOWN(mmio_pref_per_b, align) - 1
- : mmio_pref.start + mmio_pref_per_b - 1;
- mmio_pref.start -= resource_size(res);
+ /*
+ * The per_bridge holds the extra resource space
+ * that can be added for each bridge but there is
+ * the minimal already reserved as well so adjust
+ * x.start down accordingly to cover the whole
+ * space.
+ */
+ available[i].start -= resource_size(res);
+ }
- pci_bus_distribute_available_resources(b, add_list, io, mmio,
- mmio_pref);
+ pci_bus_distribute_available_resources(b, add_list, available);
- io.start += io.end + 1;
- mmio.start += mmio.end + 1;
- mmio_pref.start += mmio_pref.end + 1;
+ for (i = 0; i < PCI_P2P_BRIDGE_RESOURCE_NUM; i++)
+ available[i].start += available[i].end + 1;
}
}
static void pci_bridge_distribute_available_resources(struct pci_dev *bridge,
struct list_head *add_list)
{
- struct resource available_io, available_mmio, available_mmio_pref;
+ struct resource *res, available[PCI_P2P_BRIDGE_RESOURCE_NUM];
+ unsigned int i;
if (!bridge->is_hotplug_bridge)
return;
@@ -1969,14 +2204,13 @@ static void pci_bridge_distribute_available_resources(struct pci_dev *bridge,
pci_dbg(bridge, "distributing available resources\n");
/* Take the initial extra resources from the hotplug port */
- available_io = bridge->resource[PCI_BRIDGE_IO_WINDOW];
- available_mmio = bridge->resource[PCI_BRIDGE_MEM_WINDOW];
- available_mmio_pref = bridge->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
+ for (i = 0; i < PCI_P2P_BRIDGE_RESOURCE_NUM; i++) {
+ res = pci_resource_n(bridge, PCI_BRIDGE_RESOURCES + i);
+ available[i] = *res;
+ }
pci_bus_distribute_available_resources(bridge->subordinate,
- add_list, available_io,
- available_mmio,
- available_mmio_pref);
+ add_list, available);
}
static bool pci_bridge_resources_not_assigned(struct pci_dev *dev)
@@ -2020,13 +2254,41 @@ pci_root_bus_distribute_available_resources(struct pci_bus *bus,
* in case of root bus.
*/
if (bridge && pci_bridge_resources_not_assigned(dev))
- pci_bridge_distribute_available_resources(bridge,
- add_list);
+ pci_bridge_distribute_available_resources(dev, add_list);
else
pci_root_bus_distribute_available_resources(b, add_list);
}
}
+static void pci_prepare_next_assign_round(struct list_head *fail_head,
+ int tried_times,
+ enum release_type rel_type)
+{
+ struct pci_dev_resource *fail_res;
+
+ pr_info("PCI: No. %d try to assign unassigned res\n", tried_times + 1);
+
+ /*
+ * Try to release leaf bridge's resources that aren't big
+ * enough to contain child device resources.
+ */
+ list_for_each_entry(fail_res, fail_head, list) {
+ struct pci_bus *bus = fail_res->dev->bus;
+ struct resource *b_win;
+
+ b_win = pbus_select_window_for_type(bus, fail_res->flags);
+ if (!b_win)
+ continue;
+ pci_bus_release_bridge_resources(bus, b_win, rel_type);
+ }
+
+ /* Restore size and flags */
+ list_for_each_entry(fail_res, fail_head, list)
+ restore_dev_resource(fail_res);
+
+ free_list(fail_head);
+}
+
/*
* First try will not touch PCI bridge res.
* Second and later try will clear small leaf bridge res.
@@ -2040,7 +2302,6 @@ void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus)
int tried_times = 0;
enum release_type rel_type = leaf_only;
LIST_HEAD(fail_head);
- struct pci_dev_resource *fail_res;
int pci_try_num = 1;
enum enable_type enable_local;
@@ -2054,82 +2315,54 @@ void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus)
max_depth, pci_try_num);
}
-again:
- /*
- * Last try will use add_list, otherwise will try good to have as must
- * have, so can realloc parent bridge resource
- */
- if (tried_times + 1 == pci_try_num)
- add_list = &realloc_head;
- /*
- * Depth first, calculate sizes and alignments of all subordinate buses.
- */
- __pci_bus_size_bridges(bus, add_list);
-
- pci_root_bus_distribute_available_resources(bus, add_list);
-
- /* Depth last, allocate resources and update the hardware. */
- __pci_bus_assign_resources(bus, add_list, &fail_head);
- if (add_list)
- BUG_ON(!list_empty(add_list));
- tried_times++;
-
- /* Any device complain? */
- if (list_empty(&fail_head))
- goto dump;
+ while (1) {
+ /*
+ * Last try will use add_list, otherwise will try good to
+ * have as must have, so can realloc parent bridge resource
+ */
+ if (tried_times + 1 == pci_try_num)
+ add_list = &realloc_head;
+ /*
+ * Depth first, calculate sizes and alignments of all
+ * subordinate buses.
+ */
+ __pci_bus_size_bridges(bus, add_list);
- if (tried_times >= pci_try_num) {
- if (enable_local == undefined)
- dev_info(&bus->dev, "Some PCI device resources are unassigned, try booting with pci=realloc\n");
- else if (enable_local == auto_enabled)
- dev_info(&bus->dev, "Automatically enabled pci realloc, if you have problem, try booting with pci=realloc=off\n");
+ pci_root_bus_distribute_available_resources(bus, add_list);
- free_list(&fail_head);
- goto dump;
- }
+ /* Depth last, allocate resources and update the hardware. */
+ __pci_bus_assign_resources(bus, add_list, &fail_head);
+ if (WARN_ON_ONCE(add_list && !list_empty(add_list)))
+ free_list(add_list);
+ tried_times++;
- dev_info(&bus->dev, "No. %d try to assign unassigned res\n",
- tried_times + 1);
+ /* Any device complain? */
+ if (list_empty(&fail_head))
+ break;
- /* Third times and later will not check if it is leaf */
- if ((tried_times + 1) > 2)
- rel_type = whole_subtree;
+ if (tried_times >= pci_try_num) {
+ if (enable_local == undefined) {
+ dev_info(&bus->dev,
+ "Some PCI device resources are unassigned, try booting with pci=realloc\n");
+ } else if (enable_local == auto_enabled) {
+ dev_info(&bus->dev,
+ "Automatically enabled pci realloc, if you have problem, try booting with pci=realloc=off\n");
+ }
+ free_list(&fail_head);
+ break;
+ }
- /*
- * Try to release leaf bridge's resources that doesn't fit resource of
- * child device under that bridge.
- */
- list_for_each_entry(fail_res, &fail_head, list)
- pci_bus_release_bridge_resources(fail_res->dev->bus,
- fail_res->flags & PCI_RES_TYPE_MASK,
- rel_type);
+ /* Third times and later will not check if it is leaf */
+ if (tried_times + 1 > 2)
+ rel_type = whole_subtree;
- /* Restore size and flags */
- list_for_each_entry(fail_res, &fail_head, list) {
- struct resource *res = fail_res->res;
- int idx;
-
- res->start = fail_res->start;
- res->end = fail_res->end;
- res->flags = fail_res->flags;
-
- if (pci_is_bridge(fail_res->dev)) {
- idx = res - &fail_res->dev->resource[0];
- if (idx >= PCI_BRIDGE_RESOURCES &&
- idx <= PCI_BRIDGE_RESOURCE_END)
- res->flags = 0;
- }
+ pci_prepare_next_assign_round(&fail_head, tried_times, rel_type);
}
- free_list(&fail_head);
-
- goto again;
-dump:
- /* Dump the resource on buses */
pci_bus_dump_resources(bus);
}
-void __init pci_assign_unassigned_resources(void)
+void pci_assign_unassigned_resources(void)
{
struct pci_bus *root_bus;
@@ -2147,177 +2380,197 @@ void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge)
struct pci_bus *parent = bridge->subordinate;
/* List of resources that want additional resources */
LIST_HEAD(add_list);
-
int tried_times = 0;
LIST_HEAD(fail_head);
- struct pci_dev_resource *fail_res;
- int retval;
-
-again:
- __pci_bus_size_bridges(parent, &add_list);
-
- /*
- * Distribute remaining resources (if any) equally between hotplug
- * bridges below. This makes it possible to extend the hierarchy
- * later without running out of resources.
- */
- pci_bridge_distribute_available_resources(bridge, &add_list);
-
- __pci_bridge_assign_resources(bridge, &add_list, &fail_head);
- BUG_ON(!list_empty(&add_list));
- tried_times++;
+ int ret;
- if (list_empty(&fail_head))
- goto enable_all;
+ while (1) {
+ __pci_bus_size_bridges(parent, &add_list);
- if (tried_times >= 2) {
- /* Still fail, don't need to try more */
- free_list(&fail_head);
- goto enable_all;
- }
+ /*
+ * Distribute remaining resources (if any) equally between
+ * hotplug bridges below. This makes it possible to extend
+ * the hierarchy later without running out of resources.
+ */
+ pci_bridge_distribute_available_resources(bridge, &add_list);
- printk(KERN_DEBUG "PCI: No. %d try to assign unassigned res\n",
- tried_times + 1);
+ __pci_bridge_assign_resources(bridge, &add_list, &fail_head);
+ if (WARN_ON_ONCE(!list_empty(&add_list)))
+ free_list(&add_list);
+ tried_times++;
- /*
- * Try to release leaf bridge's resources that aren't big enough
- * to contain child device resources.
- */
- list_for_each_entry(fail_res, &fail_head, list)
- pci_bus_release_bridge_resources(fail_res->dev->bus,
- fail_res->flags & PCI_RES_TYPE_MASK,
- whole_subtree);
+ if (list_empty(&fail_head))
+ break;
- /* Restore size and flags */
- list_for_each_entry(fail_res, &fail_head, list) {
- struct resource *res = fail_res->res;
- int idx;
-
- res->start = fail_res->start;
- res->end = fail_res->end;
- res->flags = fail_res->flags;
-
- if (pci_is_bridge(fail_res->dev)) {
- idx = res - &fail_res->dev->resource[0];
- if (idx >= PCI_BRIDGE_RESOURCES &&
- idx <= PCI_BRIDGE_RESOURCE_END)
- res->flags = 0;
+ if (tried_times >= 2) {
+ /* Still fail, don't need to try more */
+ free_list(&fail_head);
+ break;
}
- }
- free_list(&fail_head);
- goto again;
+ pci_prepare_next_assign_round(&fail_head, tried_times,
+ whole_subtree);
+ }
-enable_all:
- retval = pci_reenable_device(bridge);
- if (retval)
- pci_err(bridge, "Error reenabling bridge (%d)\n", retval);
+ ret = pci_reenable_device(bridge);
+ if (ret)
+ pci_err(bridge, "Error reenabling bridge (%d)\n", ret);
pci_set_master(bridge);
}
EXPORT_SYMBOL_GPL(pci_assign_unassigned_bridge_resources);
-int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type)
+/*
+ * Walk to the root bus, find the bridge window relevant for @res and
+ * release it when possible. If the bridge window contains assigned
+ * resources, it cannot be released.
+ */
+static int pbus_reassign_bridge_resources(struct pci_bus *bus, struct resource *res,
+ struct list_head *saved)
{
+ unsigned long type = res->flags;
struct pci_dev_resource *dev_res;
- struct pci_dev *next;
- LIST_HEAD(saved);
+ struct pci_dev *bridge = NULL;
LIST_HEAD(added);
LIST_HEAD(failed);
unsigned int i;
- int ret;
+ int ret = 0;
- down_read(&pci_bus_sem);
-
- /* Walk to the root hub, releasing bridge BARs when possible */
- next = bridge;
- do {
- bridge = next;
- for (i = PCI_BRIDGE_RESOURCES; i < PCI_BRIDGE_RESOURCE_END;
- i++) {
- struct resource *res = &bridge->resource[i];
+ while (!pci_is_root_bus(bus)) {
+ bridge = bus->self;
+ res = pbus_select_window(bus, res);
+ if (!res)
+ break;
- if ((res->flags ^ type) & PCI_RES_TYPE_MASK)
- continue;
+ i = pci_resource_num(bridge, res);
- /* Ignore BARs which are still in use */
- if (res->child)
- continue;
-
- ret = add_to_list(&saved, bridge, res, 0, 0);
+ /* Ignore BARs which are still in use */
+ if (!res->child) {
+ ret = add_to_list(saved, bridge, res, 0, 0);
if (ret)
- goto cleanup;
+ return ret;
- pci_info(bridge, "BAR %d: releasing %pR\n",
- i, res);
+ pci_release_resource(bridge, i);
+ } else {
+ const char *res_name = pci_resource_name(bridge, i);
- if (res->parent)
- release_resource(res);
- res->start = 0;
- res->end = 0;
- break;
+ pci_warn(bridge,
+ "%s %pR: was not released (still contains assigned resources)\n",
+ res_name, res);
}
- if (i == PCI_BRIDGE_RESOURCE_END)
- break;
- next = bridge->bus ? bridge->bus->self : NULL;
- } while (next);
+ bus = bus->parent;
+ }
- if (list_empty(&saved)) {
- up_read(&pci_bus_sem);
+ if (!bridge)
return -ENOENT;
- }
__pci_bus_size_bridges(bridge->subordinate, &added);
__pci_bridge_assign_resources(bridge, &added, &failed);
- BUG_ON(!list_empty(&added));
+ if (WARN_ON_ONCE(!list_empty(&added)))
+ free_list(&added);
if (!list_empty(&failed)) {
- ret = -ENOSPC;
- goto cleanup;
+ if (pci_required_resource_failed(&failed, type))
+ ret = -ENOSPC;
+ free_list(&failed);
+ if (ret)
+ return ret;
+
+ /* Only resources with unrelated types failed (again) */
}
- list_for_each_entry(dev_res, &saved, list) {
+ list_for_each_entry(dev_res, saved, list) {
+ struct pci_dev *dev = dev_res->dev;
+
/* Skip the bridge we just assigned resources for */
- if (bridge == dev_res->dev)
+ if (bridge == dev)
continue;
- bridge = dev_res->dev;
- pci_setup_bridge(bridge->subordinate);
+ if (!dev->subordinate)
+ continue;
+
+ pci_setup_bridge(dev->subordinate);
}
- free_list(&saved);
- up_read(&pci_bus_sem);
return 0;
+}
-cleanup:
- /* Restore size and flags */
- list_for_each_entry(dev_res, &failed, list) {
- struct resource *res = dev_res->res;
+int pci_do_resource_release_and_resize(struct pci_dev *pdev, int resno, int size,
+ int exclude_bars)
+{
+ struct resource *res = pci_resource_n(pdev, resno);
+ struct pci_dev_resource *dev_res;
+ struct pci_bus *bus = pdev->bus;
+ struct resource *b_win, *r;
+ LIST_HEAD(saved);
+ unsigned int i;
+ int ret = 0;
+
+ b_win = pbus_select_window(bus, res);
+ if (!b_win)
+ return -EINVAL;
+
+ pci_dev_for_each_resource(pdev, r, i) {
+ if (i >= PCI_BRIDGE_RESOURCES)
+ break;
- res->start = dev_res->start;
- res->end = dev_res->end;
- res->flags = dev_res->flags;
+ if (exclude_bars & BIT(i))
+ continue;
+
+ if (b_win != pbus_select_window(bus, r))
+ continue;
+
+ ret = add_to_list(&saved, pdev, r, 0, 0);
+ if (ret)
+ goto restore;
+ pci_release_resource(pdev, i);
}
- free_list(&failed);
+ pci_resize_resource_set_size(pdev, resno, size);
+
+ if (!bus->self)
+ goto out;
+
+ down_read(&pci_bus_sem);
+ ret = pbus_reassign_bridge_resources(bus, res, &saved);
+ if (ret)
+ goto restore;
+
+out:
+ up_read(&pci_bus_sem);
+ free_list(&saved);
+ return ret;
+
+restore:
/* Revert to the old configuration */
list_for_each_entry(dev_res, &saved, list) {
struct resource *res = dev_res->res;
+ struct pci_dev *dev = dev_res->dev;
- bridge = dev_res->dev;
- i = res - bridge->resource;
+ i = pci_resource_num(dev, res);
- res->start = dev_res->start;
- res->end = dev_res->end;
- res->flags = dev_res->flags;
+ if (res->parent) {
+ release_child_resources(res);
+ pci_release_resource(dev, i);
+ }
- pci_claim_resource(bridge, i);
- pci_setup_bridge(bridge->subordinate);
- }
- free_list(&saved);
- up_read(&pci_bus_sem);
+ restore_dev_resource(dev_res);
- return ret;
+ ret = pci_claim_resource(dev, i);
+ if (ret)
+ continue;
+
+ if (i < PCI_BRIDGE_RESOURCES) {
+ const char *res_name = pci_resource_name(dev, i);
+
+ pci_update_resource(dev, i);
+ pci_info(dev, "%s %pR: old value restored\n",
+ res_name, res);
+ }
+ if (dev->subordinate)
+ pci_setup_bridge(dev->subordinate);
+ }
+ goto out;
}
void pci_assign_unassigned_bus_resources(struct pci_bus *bus)
@@ -2332,6 +2585,7 @@ void pci_assign_unassigned_bus_resources(struct pci_bus *bus)
__pci_bus_size_bridges(dev->subordinate, &add_list);
up_read(&pci_bus_sem);
__pci_bus_assign_resources(bus, &add_list, NULL);
- BUG_ON(!list_empty(&add_list));
+ if (WARN_ON_ONCE(!list_empty(&add_list)))
+ free_list(&add_list);
}
EXPORT_SYMBOL_GPL(pci_assign_unassigned_bus_resources);
diff --git a/drivers/pci/setup-irq.c b/drivers/pci/setup-irq.c
deleted file mode 100644
index cc7d26b015f3..000000000000
--- a/drivers/pci/setup-irq.c
+++ /dev/null
@@ -1,64 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Support routines for initializing a PCI subsystem
- *
- * Extruded from code written by
- * Dave Rusling (david.rusling@reo.mts.dec.com)
- * David Mosberger (davidm@cs.arizona.edu)
- * David Miller (davem@redhat.com)
- */
-
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/cache.h>
-#include "pci.h"
-
-void pci_assign_irq(struct pci_dev *dev)
-{
- u8 pin;
- u8 slot = -1;
- int irq = 0;
- struct pci_host_bridge *hbrg = pci_find_host_bridge(dev->bus);
-
- if (!(hbrg->map_irq)) {
- pci_dbg(dev, "runtime IRQ mapping not provided by arch\n");
- return;
- }
-
- /*
- * If this device is not on the primary bus, we need to figure out
- * which interrupt pin it will come in on. We know which slot it
- * will come in on because that slot is where the bridge is. Each
- * time the interrupt line passes through a PCI-PCI bridge we must
- * apply the swizzle function.
- */
- pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
- /* Cope with illegal. */
- if (pin > 4)
- pin = 1;
-
- if (pin) {
- /* Follow the chain of bridges, swizzling as we go. */
- if (hbrg->swizzle_irq)
- slot = (*(hbrg->swizzle_irq))(dev, &pin);
-
- /*
- * If a swizzling function is not used, map_irq() must
- * ignore slot.
- */
- irq = (*(hbrg->map_irq))(dev, slot, pin);
- if (irq == -1)
- irq = 0;
- }
- dev->irq = irq;
-
- pci_dbg(dev, "assign IRQ: got %d\n", dev->irq);
-
- /*
- * Always tell the device, so the driver knows what is the real IRQ
- * to use; the device does not use it.
- */
- pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
-}
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 967f9a758923..e5fcadfc58b0 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -29,7 +29,8 @@ static void pci_std_update_resource(struct pci_dev *dev, int resno)
u16 cmd;
u32 new, check, mask;
int reg;
- struct resource *res = dev->resource + resno;
+ struct resource *res = pci_resource_n(dev, resno);
+ const char *res_name = pci_resource_name(dev, resno);
/* Per SR-IOV spec 3.4.1.11, VF BARs are RO zero */
if (dev->is_virtfn)
@@ -104,8 +105,8 @@ static void pci_std_update_resource(struct pci_dev *dev, int resno)
pci_read_config_dword(dev, reg, &check);
if ((new ^ check) & mask) {
- pci_err(dev, "BAR %d: error updating (%#08x != %#08x)\n",
- resno, new, check);
+ pci_err(dev, "%s: error updating (%#010x != %#010x)\n",
+ res_name, new, check);
}
if (res->flags & IORESOURCE_MEM_64) {
@@ -113,8 +114,8 @@ static void pci_std_update_resource(struct pci_dev *dev, int resno)
pci_write_config_dword(dev, reg + 4, new);
pci_read_config_dword(dev, reg + 4, &check);
if (check != new) {
- pci_err(dev, "BAR %d: error updating (high %#08x != %#08x)\n",
- resno, new, check);
+ pci_err(dev, "%s: error updating (high %#010x != %#010x)\n",
+ res_name, new, check);
}
}
@@ -126,20 +127,19 @@ void pci_update_resource(struct pci_dev *dev, int resno)
{
if (resno <= PCI_ROM_RESOURCE)
pci_std_update_resource(dev, resno);
-#ifdef CONFIG_PCI_IOV
- else if (resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END)
+ else if (pci_resource_is_iov(resno))
pci_iov_update_resource(dev, resno);
-#endif
}
int pci_claim_resource(struct pci_dev *dev, int resource)
{
struct resource *res = &dev->resource[resource];
+ const char *res_name = pci_resource_name(dev, resource);
struct resource *root, *conflict;
if (res->flags & IORESOURCE_UNSET) {
- pci_info(dev, "can't claim BAR %d %pR: no address assigned\n",
- resource, res);
+ pci_info(dev, "%s %pR: can't claim; no address assigned\n",
+ res_name, res);
return -EINVAL;
}
@@ -153,16 +153,16 @@ int pci_claim_resource(struct pci_dev *dev, int resource)
root = pci_find_parent_resource(dev, res);
if (!root) {
- pci_info(dev, "can't claim BAR %d %pR: no compatible bridge window\n",
- resource, res);
+ pci_info(dev, "%s %pR: can't claim; no compatible bridge window\n",
+ res_name, res);
res->flags |= IORESOURCE_UNSET;
return -EINVAL;
}
conflict = request_resource_conflict(root, res);
if (conflict) {
- pci_info(dev, "can't claim BAR %d %pR: address conflict with %s %pR\n",
- resource, res, conflict->name, conflict);
+ pci_info(dev, "%s %pR: can't claim; address conflict with %s %pR\n",
+ res_name, res, conflict->name, conflict);
res->flags |= IORESOURCE_UNSET;
return -EBUSY;
}
@@ -201,6 +201,7 @@ static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev,
{
struct resource *root, *conflict;
resource_size_t fw_addr, start, end;
+ const char *res_name = pci_resource_name(dev, resno);
fw_addr = pcibios_retrieve_fw_addr(dev, resno);
if (!fw_addr)
@@ -208,8 +209,7 @@ static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev,
start = res->start;
end = res->end;
- res->start = fw_addr;
- res->end = res->start + size - 1;
+ resource_set_range(res, fw_addr, size);
res->flags &= ~IORESOURCE_UNSET;
root = pci_find_parent_resource(dev, res);
@@ -231,12 +231,11 @@ static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev,
root = &iomem_resource;
}
- pci_info(dev, "BAR %d: trying firmware assignment %pR\n",
- resno, res);
+ pci_info(dev, "%s: trying firmware assignment %pR\n", res_name, res);
conflict = request_resource_conflict(root, res);
if (conflict) {
- pci_info(dev, "BAR %d: %pR conflicts with %s %pR\n",
- resno, res, conflict->name, conflict);
+ pci_info(dev, "%s %pR: conflicts with %s %pR\n", res_name, res,
+ conflict->name, conflict);
res->start = start;
res->end = end;
res->flags |= IORESOURCE_UNSET;
@@ -261,7 +260,7 @@ resource_size_t __weak pcibios_align_resource(void *data,
static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev,
int resno, resource_size_t size, resource_size_t align)
{
- struct resource *res = dev->resource + resno;
+ struct resource *res = pci_resource_n(dev, resno);
resource_size_t min;
int ret;
@@ -324,7 +323,8 @@ static int _pci_assign_resource(struct pci_dev *dev, int resno,
int pci_assign_resource(struct pci_dev *dev, int resno)
{
- struct resource *res = dev->resource + resno;
+ struct resource *res = pci_resource_n(dev, resno);
+ const char *res_name = pci_resource_name(dev, resno);
resource_size_t align, size;
int ret;
@@ -334,8 +334,8 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
res->flags |= IORESOURCE_UNSET;
align = pci_resource_alignment(dev, res);
if (!align) {
- pci_info(dev, "BAR %d: can't assign %pR (bogus alignment)\n",
- resno, res);
+ pci_info(dev, "%s %pR: can't assign; bogus alignment\n",
+ res_name, res);
return -EINVAL;
}
@@ -348,18 +348,21 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
* working, which is better than just leaving it disabled.
*/
if (ret < 0) {
- pci_info(dev, "BAR %d: no space for %pR\n", resno, res);
+ pci_info(dev, "%s %pR: can't assign; no space\n", res_name, res);
ret = pci_revert_fw_address(res, dev, resno, size);
}
if (ret < 0) {
- pci_info(dev, "BAR %d: failed to assign %pR\n", resno, res);
+ pci_info(dev, "%s %pR: failed to assign\n", res_name, res);
return ret;
}
res->flags &= ~IORESOURCE_UNSET;
res->flags &= ~IORESOURCE_STARTALIGN;
- pci_info(dev, "BAR %d: assigned %pR\n", resno, res);
+ if (resno >= PCI_BRIDGE_RESOURCES && resno <= PCI_BRIDGE_RESOURCE_END)
+ res->flags &= ~IORESOURCE_DISABLED;
+
+ pci_info(dev, "%s %pR: assigned\n", res_name, res);
if (resno < PCI_BRIDGE_RESOURCES)
pci_update_resource(dev, resno);
@@ -367,10 +370,11 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
}
EXPORT_SYMBOL(pci_assign_resource);
-int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsize,
- resource_size_t min_align)
+int pci_reassign_resource(struct pci_dev *dev, int resno,
+ resource_size_t addsize, resource_size_t min_align)
{
- struct resource *res = dev->resource + resno;
+ struct resource *res = pci_resource_n(dev, resno);
+ const char *res_name = pci_resource_name(dev, resno);
unsigned long flags;
resource_size_t new_size;
int ret;
@@ -381,105 +385,58 @@ int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsiz
flags = res->flags;
res->flags |= IORESOURCE_UNSET;
if (!res->parent) {
- pci_info(dev, "BAR %d: can't reassign an unassigned resource %pR\n",
- resno, res);
+ pci_info(dev, "%s %pR: can't reassign; unassigned resource\n",
+ res_name, res);
return -EINVAL;
}
- /* already aligned with min_align */
new_size = resource_size(res) + addsize;
ret = _pci_assign_resource(dev, resno, new_size, min_align);
if (ret) {
res->flags = flags;
- pci_info(dev, "BAR %d: %pR (failed to expand by %#llx)\n",
- resno, res, (unsigned long long) addsize);
+ pci_info(dev, "%s %pR: failed to expand by %#llx\n",
+ res_name, res, (unsigned long long) addsize);
return ret;
}
res->flags &= ~IORESOURCE_UNSET;
res->flags &= ~IORESOURCE_STARTALIGN;
- pci_info(dev, "BAR %d: reassigned %pR (expanded by %#llx)\n",
- resno, res, (unsigned long long) addsize);
+ pci_info(dev, "%s %pR: reassigned; expanded by %#llx\n",
+ res_name, res, (unsigned long long) addsize);
if (resno < PCI_BRIDGE_RESOURCES)
pci_update_resource(dev, resno);
return 0;
}
-void pci_release_resource(struct pci_dev *dev, int resno)
+int pci_release_resource(struct pci_dev *dev, int resno)
{
- struct resource *res = dev->resource + resno;
-
- pci_info(dev, "BAR %d: releasing %pR\n", resno, res);
+ struct resource *res = pci_resource_n(dev, resno);
+ const char *res_name = pci_resource_name(dev, resno);
+ int ret;
if (!res->parent)
- return;
-
- release_resource(res);
- res->end = resource_size(res) - 1;
- res->start = 0;
- res->flags |= IORESOURCE_UNSET;
-}
-EXPORT_SYMBOL(pci_release_resource);
-
-int pci_resize_resource(struct pci_dev *dev, int resno, int size)
-{
- struct resource *res = dev->resource + resno;
- struct pci_host_bridge *host;
- int old, ret;
- u32 sizes;
- u16 cmd;
-
- /* Check if we must preserve the firmware's resource assignment */
- host = pci_find_host_bridge(dev->bus);
- if (host->preserve_config)
- return -ENOTSUPP;
-
- /* Make sure the resource isn't assigned before resizing it. */
- if (!(res->flags & IORESOURCE_UNSET))
- return -EBUSY;
-
- pci_read_config_word(dev, PCI_COMMAND, &cmd);
- if (cmd & PCI_COMMAND_MEMORY)
- return -EBUSY;
-
- sizes = pci_rebar_get_possible_sizes(dev, resno);
- if (!sizes)
- return -ENOTSUPP;
-
- if (!(sizes & BIT(size)))
- return -EINVAL;
+ return 0;
- old = pci_rebar_get_current_size(dev, resno);
- if (old < 0)
- return old;
+ pci_info(dev, "%s %pR: releasing\n", res_name, res);
- ret = pci_rebar_set_size(dev, resno, size);
+ ret = release_resource(res);
if (ret)
return ret;
+ res->end = resource_size(res) - 1;
+ res->start = 0;
+ res->flags |= IORESOURCE_UNSET;
- res->end = res->start + pci_rebar_size_to_bytes(size) - 1;
-
- /* Check if the new config works by trying to assign everything. */
- if (dev->bus->self) {
- ret = pci_reassign_bridge_resources(dev->bus->self, res->flags);
- if (ret)
- goto error_resize;
- }
return 0;
-
-error_resize:
- pci_rebar_set_size(dev, resno, old);
- res->end = res->start + pci_rebar_size_to_bytes(old) - 1;
- return ret;
}
-EXPORT_SYMBOL(pci_resize_resource);
+EXPORT_SYMBOL(pci_release_resource);
int pci_enable_resources(struct pci_dev *dev, int mask)
{
u16 cmd, old_cmd;
int i;
struct resource *r;
+ const char *r_name;
pci_read_config_word(dev, PCI_COMMAND, &cmd);
old_cmd = cmd;
@@ -488,28 +445,33 @@ int pci_enable_resources(struct pci_dev *dev, int mask)
if (!(mask & (1 << i)))
continue;
+ r_name = pci_resource_name(dev, i);
+
if (!(r->flags & (IORESOURCE_IO | IORESOURCE_MEM)))
continue;
- if ((i == PCI_ROM_RESOURCE) &&
- (!(r->flags & IORESOURCE_ROM_ENABLE)))
+ if (pci_resource_is_optional(dev, i))
continue;
- if (r->flags & IORESOURCE_UNSET) {
- pci_err(dev, "can't enable device: BAR %d %pR not assigned\n",
- i, r);
- return -EINVAL;
+ if (i < PCI_BRIDGE_RESOURCES) {
+ if (r->flags & IORESOURCE_UNSET) {
+ pci_err(dev, "%s %pR: not assigned; can't enable device\n",
+ r_name, r);
+ return -EINVAL;
+ }
+
+ if (!r->parent) {
+ pci_err(dev, "%s %pR: not claimed; can't enable device\n",
+ r_name, r);
+ return -EINVAL;
+ }
}
- if (!r->parent) {
- pci_err(dev, "can't enable device: BAR %d %pR not claimed\n",
- i, r);
- return -EINVAL;
+ if (r->parent) {
+ if (r->flags & IORESOURCE_IO)
+ cmd |= PCI_COMMAND_IO;
+ if (r->flags & IORESOURCE_MEM)
+ cmd |= PCI_COMMAND_MEMORY;
}
-
- if (r->flags & IORESOURCE_IO)
- cmd |= PCI_COMMAND_IO;
- if (r->flags & IORESOURCE_MEM)
- cmd |= PCI_COMMAND_MEMORY;
}
if (cmd != old_cmd) {
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index 0f87cade10f7..50fb3eb595fe 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -7,7 +7,6 @@
#include <linux/kobject.h>
#include <linux/slab.h>
-#include <linux/module.h>
#include <linux/pci.h>
#include <linux/err.h>
#include "pci.h"
@@ -79,6 +78,7 @@ static void pci_slot_release(struct kobject *kobj)
up_read(&pci_bus_sem);
list_del(&slot->list);
+ pci_bus_put(slot->bus);
kfree(slot);
}
@@ -244,12 +244,13 @@ struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
slot = get_slot(parent, slot_nr);
if (slot) {
if (hotplug) {
- if ((err = slot->hotplug ? -EBUSY : 0)
- || (err = rename_slot(slot, name))) {
- kobject_put(&slot->kobj);
- slot = NULL;
- goto err;
+ if (slot->hotplug) {
+ err = -EBUSY;
+ goto put_slot;
}
+ err = rename_slot(slot, name);
+ if (err)
+ goto put_slot;
}
goto out;
}
@@ -261,7 +262,7 @@ placeholder:
goto err;
}
- slot->bus = parent;
+ slot->bus = pci_bus_get(parent);
slot->number = slot_nr;
slot->kobj.kset = pci_slots_kset;
@@ -269,6 +270,7 @@ placeholder:
slot_name = make_slot_name(name);
if (!slot_name) {
err = -ENOMEM;
+ pci_bus_put(slot->bus);
kfree(slot);
goto err;
}
@@ -278,10 +280,8 @@ placeholder:
err = kobject_init_and_add(&slot->kobj, &pci_slot_ktype, NULL,
"%s", slot_name);
- if (err) {
- kobject_put(&slot->kobj);
- goto err;
- }
+ if (err)
+ goto put_slot;
down_read(&pci_bus_sem);
list_for_each_entry(dev, &parent->devices, bus_list)
@@ -296,6 +296,9 @@ out:
kfree(slot_name);
mutex_unlock(&pci_slot_mutex);
return slot;
+
+put_slot:
+ kobject_put(&slot->kobj);
err:
slot = ERR_PTR(err);
goto out;
@@ -321,49 +324,6 @@ void pci_destroy_slot(struct pci_slot *slot)
}
EXPORT_SYMBOL_GPL(pci_destroy_slot);
-#if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE)
-#include <linux/pci_hotplug.h>
-/**
- * pci_hp_create_module_link - create symbolic link to hotplug driver module
- * @pci_slot: struct pci_slot
- *
- * Helper function for pci_hotplug_core.c to create symbolic link to
- * the hotplug driver module.
- */
-void pci_hp_create_module_link(struct pci_slot *pci_slot)
-{
- struct hotplug_slot *slot = pci_slot->hotplug;
- struct kobject *kobj = NULL;
- int ret;
-
- if (!slot || !slot->ops)
- return;
- kobj = kset_find_obj(module_kset, slot->mod_name);
- if (!kobj)
- return;
- ret = sysfs_create_link(&pci_slot->kobj, kobj, "module");
- if (ret)
- dev_err(&pci_slot->bus->dev, "Error creating sysfs link (%d)\n",
- ret);
- kobject_put(kobj);
-}
-EXPORT_SYMBOL_GPL(pci_hp_create_module_link);
-
-/**
- * pci_hp_remove_module_link - remove symbolic link to the hotplug driver
- * module.
- * @pci_slot: struct pci_slot
- *
- * Helper function for pci_hotplug_core.c to remove symbolic link to
- * the hotplug driver module.
- */
-void pci_hp_remove_module_link(struct pci_slot *pci_slot)
-{
- sysfs_remove_link(&pci_slot->kobj, "module");
-}
-EXPORT_SYMBOL_GPL(pci_hp_remove_module_link);
-#endif
-
static int pci_slot_init(void)
{
struct kset *pci_bus_kset;
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index d837da055921..5ff84fb8fb0f 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -37,7 +37,9 @@ MODULE_PARM_DESC(nirqs, "number of interrupts to allocate (more may be useful fo
static dev_t switchtec_devt;
static DEFINE_IDA(switchtec_minor_ida);
-struct class *switchtec_class;
+const struct class switchtec_class = {
+ .name = "switchtec",
+};
EXPORT_SYMBOL_GPL(switchtec_class);
enum mrpc_state {
@@ -267,10 +269,9 @@ static void mrpc_event_work(struct work_struct *work)
dev_dbg(&stdev->dev, "%s\n", __func__);
- mutex_lock(&stdev->mrpc_mutex);
+ guard(mutex)(&stdev->mrpc_mutex);
cancel_delayed_work(&stdev->mrpc_timeout);
mrpc_complete_cmd(stdev);
- mutex_unlock(&stdev->mrpc_mutex);
}
static void mrpc_error_complete_cmd(struct switchtec_dev *stdev)
@@ -372,7 +373,7 @@ static ssize_t field ## _show(struct device *dev, \
if (stdev->gen == SWITCHTEC_GEN3) \
return io_string_show(buf, &si->gen3.field, \
sizeof(si->gen3.field)); \
- else if (stdev->gen == SWITCHTEC_GEN4) \
+ else if (stdev->gen >= SWITCHTEC_GEN4) \
return io_string_show(buf, &si->gen4.field, \
sizeof(si->gen4.field)); \
else \
@@ -663,7 +664,7 @@ static int ioctl_flash_info(struct switchtec_dev *stdev,
if (stdev->gen == SWITCHTEC_GEN3) {
info.flash_length = ioread32(&fi->gen3.flash_length);
info.num_partitions = SWITCHTEC_NUM_PARTITIONS_GEN3;
- } else if (stdev->gen == SWITCHTEC_GEN4) {
+ } else if (stdev->gen >= SWITCHTEC_GEN4) {
info.flash_length = ioread32(&fi->gen4.flash_length);
info.num_partitions = SWITCHTEC_NUM_PARTITIONS_GEN4;
} else {
@@ -870,7 +871,7 @@ static int ioctl_flash_part_info(struct switchtec_dev *stdev,
ret = flash_part_info_gen3(stdev, &info);
if (ret)
return ret;
- } else if (stdev->gen == SWITCHTEC_GEN4) {
+ } else if (stdev->gen >= SWITCHTEC_GEN4) {
ret = flash_part_info_gen4(stdev, &info);
if (ret)
return ret;
@@ -1308,13 +1309,6 @@ static void stdev_release(struct device *dev)
{
struct switchtec_dev *stdev = to_stdev(dev);
- if (stdev->dma_mrpc) {
- iowrite32(0, &stdev->mmio_mrpc->dma_en);
- flush_wc_buf(stdev);
- writeq(0, &stdev->mmio_mrpc->dma_addr);
- dma_free_coherent(&stdev->pdev->dev, sizeof(*stdev->dma_mrpc),
- stdev->dma_mrpc, stdev->dma_mrpc_dma_addr);
- }
kfree(stdev);
}
@@ -1327,18 +1321,18 @@ static void stdev_kill(struct switchtec_dev *stdev)
cancel_delayed_work_sync(&stdev->mrpc_timeout);
/* Mark the hardware as unavailable and complete all completions */
- mutex_lock(&stdev->mrpc_mutex);
- stdev->alive = false;
-
- /* Wake up and kill any users waiting on an MRPC request */
- list_for_each_entry_safe(stuser, tmpuser, &stdev->mrpc_queue, list) {
- stuser->cmd_done = true;
- wake_up_interruptible(&stuser->cmd_comp);
- list_del_init(&stuser->list);
- stuser_put(stuser);
- }
+ scoped_guard (mutex, &stdev->mrpc_mutex) {
+ stdev->alive = false;
+
+ /* Wake up and kill any users waiting on an MRPC request */
+ list_for_each_entry_safe(stuser, tmpuser, &stdev->mrpc_queue, list) {
+ stuser->cmd_done = true;
+ wake_up_interruptible(&stuser->cmd_comp);
+ list_del_init(&stuser->list);
+ stuser_put(stuser);
+ }
- mutex_unlock(&stdev->mrpc_mutex);
+ }
/* Wake up any users waiting on event_wq */
wake_up_interruptible(&stdev->event_wq);
@@ -1358,7 +1352,7 @@ static struct switchtec_dev *stdev_create(struct pci_dev *pdev)
return ERR_PTR(-ENOMEM);
stdev->alive = true;
- stdev->pdev = pdev;
+ stdev->pdev = pci_dev_get(pdev);
INIT_LIST_HEAD(&stdev->mrpc_queue);
mutex_init(&stdev->mrpc_mutex);
stdev->mrpc_busy = 0;
@@ -1370,7 +1364,7 @@ static struct switchtec_dev *stdev_create(struct pci_dev *pdev)
dev = &stdev->dev;
device_initialize(dev);
- dev->class = switchtec_class;
+ dev->class = &switchtec_class;
dev->parent = &pdev->dev;
dev->groups = switchtec_device_groups;
dev->release = stdev_release;
@@ -1391,6 +1385,7 @@ static struct switchtec_dev *stdev_create(struct pci_dev *pdev)
return stdev;
err_put:
+ pci_dev_put(stdev->pdev);
put_device(&stdev->dev);
return ERR_PTR(rc);
}
@@ -1610,7 +1605,7 @@ static int switchtec_init_pci(struct switchtec_dev *stdev,
if (stdev->gen == SWITCHTEC_GEN3)
part_id = &stdev->mmio_sys_info->gen3.partition_id;
- else if (stdev->gen == SWITCHTEC_GEN4)
+ else if (stdev->gen >= SWITCHTEC_GEN4)
part_id = &stdev->mmio_sys_info->gen4.partition_id;
else
return -EOPNOTSUPP;
@@ -1644,6 +1639,18 @@ static int switchtec_init_pci(struct switchtec_dev *stdev,
return 0;
}
+static void switchtec_exit_pci(struct switchtec_dev *stdev)
+{
+ if (stdev->dma_mrpc) {
+ iowrite32(0, &stdev->mmio_mrpc->dma_en);
+ flush_wc_buf(stdev);
+ writeq(0, &stdev->mmio_mrpc->dma_addr);
+ dma_free_coherent(&stdev->pdev->dev, sizeof(*stdev->dma_mrpc),
+ stdev->dma_mrpc, stdev->dma_mrpc_dma_addr);
+ stdev->dma_mrpc = NULL;
+ }
+}
+
static int switchtec_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -1666,7 +1673,7 @@ static int switchtec_pci_probe(struct pci_dev *pdev,
rc = switchtec_init_isr(stdev);
if (rc) {
dev_err(&stdev->dev, "failed to init isr.\n");
- goto err_put;
+ goto err_exit_pci;
}
iowrite32(SWITCHTEC_EVENT_CLEAR |
@@ -1687,6 +1694,8 @@ static int switchtec_pci_probe(struct pci_dev *pdev,
err_devadd:
stdev_kill(stdev);
+err_exit_pci:
+ switchtec_exit_pci(stdev);
err_put:
ida_free(&switchtec_minor_ida, MINOR(stdev->dev.devt));
put_device(&stdev->dev);
@@ -1703,6 +1712,9 @@ static void switchtec_pci_remove(struct pci_dev *pdev)
ida_free(&switchtec_minor_ida, MINOR(stdev->dev.devt));
dev_info(&stdev->dev, "unregistered.\n");
stdev_kill(stdev);
+ switchtec_exit_pci(stdev);
+ pci_dev_put(stdev->pdev);
+ stdev->pdev = NULL;
put_device(&stdev->dev);
}
@@ -1726,64 +1738,126 @@ static void switchtec_pci_remove(struct pci_dev *pdev)
.driver_data = gen, \
}
+#define SWITCHTEC_PCI100X_DEVICE(device_id, gen) \
+ { \
+ .vendor = PCI_VENDOR_ID_EFAR, \
+ .device = device_id, \
+ .subvendor = PCI_ANY_ID, \
+ .subdevice = PCI_ANY_ID, \
+ .class = (PCI_CLASS_MEMORY_OTHER << 8), \
+ .class_mask = 0xFFFFFFFF, \
+ .driver_data = gen, \
+ }, \
+ { \
+ .vendor = PCI_VENDOR_ID_EFAR, \
+ .device = device_id, \
+ .subvendor = PCI_ANY_ID, \
+ .subdevice = PCI_ANY_ID, \
+ .class = (PCI_CLASS_BRIDGE_OTHER << 8), \
+ .class_mask = 0xFFFFFFFF, \
+ .driver_data = gen, \
+ }
+
static const struct pci_device_id switchtec_pci_tbl[] = {
- SWITCHTEC_PCI_DEVICE(0x8531, SWITCHTEC_GEN3), //PFX 24xG3
- SWITCHTEC_PCI_DEVICE(0x8532, SWITCHTEC_GEN3), //PFX 32xG3
- SWITCHTEC_PCI_DEVICE(0x8533, SWITCHTEC_GEN3), //PFX 48xG3
- SWITCHTEC_PCI_DEVICE(0x8534, SWITCHTEC_GEN3), //PFX 64xG3
- SWITCHTEC_PCI_DEVICE(0x8535, SWITCHTEC_GEN3), //PFX 80xG3
- SWITCHTEC_PCI_DEVICE(0x8536, SWITCHTEC_GEN3), //PFX 96xG3
- SWITCHTEC_PCI_DEVICE(0x8541, SWITCHTEC_GEN3), //PSX 24xG3
- SWITCHTEC_PCI_DEVICE(0x8542, SWITCHTEC_GEN3), //PSX 32xG3
- SWITCHTEC_PCI_DEVICE(0x8543, SWITCHTEC_GEN3), //PSX 48xG3
- SWITCHTEC_PCI_DEVICE(0x8544, SWITCHTEC_GEN3), //PSX 64xG3
- SWITCHTEC_PCI_DEVICE(0x8545, SWITCHTEC_GEN3), //PSX 80xG3
- SWITCHTEC_PCI_DEVICE(0x8546, SWITCHTEC_GEN3), //PSX 96xG3
- SWITCHTEC_PCI_DEVICE(0x8551, SWITCHTEC_GEN3), //PAX 24XG3
- SWITCHTEC_PCI_DEVICE(0x8552, SWITCHTEC_GEN3), //PAX 32XG3
- SWITCHTEC_PCI_DEVICE(0x8553, SWITCHTEC_GEN3), //PAX 48XG3
- SWITCHTEC_PCI_DEVICE(0x8554, SWITCHTEC_GEN3), //PAX 64XG3
- SWITCHTEC_PCI_DEVICE(0x8555, SWITCHTEC_GEN3), //PAX 80XG3
- SWITCHTEC_PCI_DEVICE(0x8556, SWITCHTEC_GEN3), //PAX 96XG3
- SWITCHTEC_PCI_DEVICE(0x8561, SWITCHTEC_GEN3), //PFXL 24XG3
- SWITCHTEC_PCI_DEVICE(0x8562, SWITCHTEC_GEN3), //PFXL 32XG3
- SWITCHTEC_PCI_DEVICE(0x8563, SWITCHTEC_GEN3), //PFXL 48XG3
- SWITCHTEC_PCI_DEVICE(0x8564, SWITCHTEC_GEN3), //PFXL 64XG3
- SWITCHTEC_PCI_DEVICE(0x8565, SWITCHTEC_GEN3), //PFXL 80XG3
- SWITCHTEC_PCI_DEVICE(0x8566, SWITCHTEC_GEN3), //PFXL 96XG3
- SWITCHTEC_PCI_DEVICE(0x8571, SWITCHTEC_GEN3), //PFXI 24XG3
- SWITCHTEC_PCI_DEVICE(0x8572, SWITCHTEC_GEN3), //PFXI 32XG3
- SWITCHTEC_PCI_DEVICE(0x8573, SWITCHTEC_GEN3), //PFXI 48XG3
- SWITCHTEC_PCI_DEVICE(0x8574, SWITCHTEC_GEN3), //PFXI 64XG3
- SWITCHTEC_PCI_DEVICE(0x8575, SWITCHTEC_GEN3), //PFXI 80XG3
- SWITCHTEC_PCI_DEVICE(0x8576, SWITCHTEC_GEN3), //PFXI 96XG3
- SWITCHTEC_PCI_DEVICE(0x4000, SWITCHTEC_GEN4), //PFX 100XG4
- SWITCHTEC_PCI_DEVICE(0x4084, SWITCHTEC_GEN4), //PFX 84XG4
- SWITCHTEC_PCI_DEVICE(0x4068, SWITCHTEC_GEN4), //PFX 68XG4
- SWITCHTEC_PCI_DEVICE(0x4052, SWITCHTEC_GEN4), //PFX 52XG4
- SWITCHTEC_PCI_DEVICE(0x4036, SWITCHTEC_GEN4), //PFX 36XG4
- SWITCHTEC_PCI_DEVICE(0x4028, SWITCHTEC_GEN4), //PFX 28XG4
- SWITCHTEC_PCI_DEVICE(0x4100, SWITCHTEC_GEN4), //PSX 100XG4
- SWITCHTEC_PCI_DEVICE(0x4184, SWITCHTEC_GEN4), //PSX 84XG4
- SWITCHTEC_PCI_DEVICE(0x4168, SWITCHTEC_GEN4), //PSX 68XG4
- SWITCHTEC_PCI_DEVICE(0x4152, SWITCHTEC_GEN4), //PSX 52XG4
- SWITCHTEC_PCI_DEVICE(0x4136, SWITCHTEC_GEN4), //PSX 36XG4
- SWITCHTEC_PCI_DEVICE(0x4128, SWITCHTEC_GEN4), //PSX 28XG4
- SWITCHTEC_PCI_DEVICE(0x4200, SWITCHTEC_GEN4), //PAX 100XG4
- SWITCHTEC_PCI_DEVICE(0x4284, SWITCHTEC_GEN4), //PAX 84XG4
- SWITCHTEC_PCI_DEVICE(0x4268, SWITCHTEC_GEN4), //PAX 68XG4
- SWITCHTEC_PCI_DEVICE(0x4252, SWITCHTEC_GEN4), //PAX 52XG4
- SWITCHTEC_PCI_DEVICE(0x4236, SWITCHTEC_GEN4), //PAX 36XG4
- SWITCHTEC_PCI_DEVICE(0x4228, SWITCHTEC_GEN4), //PAX 28XG4
- SWITCHTEC_PCI_DEVICE(0x4352, SWITCHTEC_GEN4), //PFXA 52XG4
- SWITCHTEC_PCI_DEVICE(0x4336, SWITCHTEC_GEN4), //PFXA 36XG4
- SWITCHTEC_PCI_DEVICE(0x4328, SWITCHTEC_GEN4), //PFXA 28XG4
- SWITCHTEC_PCI_DEVICE(0x4452, SWITCHTEC_GEN4), //PSXA 52XG4
- SWITCHTEC_PCI_DEVICE(0x4436, SWITCHTEC_GEN4), //PSXA 36XG4
- SWITCHTEC_PCI_DEVICE(0x4428, SWITCHTEC_GEN4), //PSXA 28XG4
- SWITCHTEC_PCI_DEVICE(0x4552, SWITCHTEC_GEN4), //PAXA 52XG4
- SWITCHTEC_PCI_DEVICE(0x4536, SWITCHTEC_GEN4), //PAXA 36XG4
- SWITCHTEC_PCI_DEVICE(0x4528, SWITCHTEC_GEN4), //PAXA 28XG4
+ SWITCHTEC_PCI_DEVICE(0x8531, SWITCHTEC_GEN3), /* PFX 24xG3 */
+ SWITCHTEC_PCI_DEVICE(0x8532, SWITCHTEC_GEN3), /* PFX 32xG3 */
+ SWITCHTEC_PCI_DEVICE(0x8533, SWITCHTEC_GEN3), /* PFX 48xG3 */
+ SWITCHTEC_PCI_DEVICE(0x8534, SWITCHTEC_GEN3), /* PFX 64xG3 */
+ SWITCHTEC_PCI_DEVICE(0x8535, SWITCHTEC_GEN3), /* PFX 80xG3 */
+ SWITCHTEC_PCI_DEVICE(0x8536, SWITCHTEC_GEN3), /* PFX 96xG3 */
+ SWITCHTEC_PCI_DEVICE(0x8541, SWITCHTEC_GEN3), /* PSX 24xG3 */
+ SWITCHTEC_PCI_DEVICE(0x8542, SWITCHTEC_GEN3), /* PSX 32xG3 */
+ SWITCHTEC_PCI_DEVICE(0x8543, SWITCHTEC_GEN3), /* PSX 48xG3 */
+ SWITCHTEC_PCI_DEVICE(0x8544, SWITCHTEC_GEN3), /* PSX 64xG3 */
+ SWITCHTEC_PCI_DEVICE(0x8545, SWITCHTEC_GEN3), /* PSX 80xG3 */
+ SWITCHTEC_PCI_DEVICE(0x8546, SWITCHTEC_GEN3), /* PSX 96xG3 */
+ SWITCHTEC_PCI_DEVICE(0x8551, SWITCHTEC_GEN3), /* PAX 24XG3 */
+ SWITCHTEC_PCI_DEVICE(0x8552, SWITCHTEC_GEN3), /* PAX 32XG3 */
+ SWITCHTEC_PCI_DEVICE(0x8553, SWITCHTEC_GEN3), /* PAX 48XG3 */
+ SWITCHTEC_PCI_DEVICE(0x8554, SWITCHTEC_GEN3), /* PAX 64XG3 */
+ SWITCHTEC_PCI_DEVICE(0x8555, SWITCHTEC_GEN3), /* PAX 80XG3 */
+ SWITCHTEC_PCI_DEVICE(0x8556, SWITCHTEC_GEN3), /* PAX 96XG3 */
+ SWITCHTEC_PCI_DEVICE(0x8561, SWITCHTEC_GEN3), /* PFXL 24XG3 */
+ SWITCHTEC_PCI_DEVICE(0x8562, SWITCHTEC_GEN3), /* PFXL 32XG3 */
+ SWITCHTEC_PCI_DEVICE(0x8563, SWITCHTEC_GEN3), /* PFXL 48XG3 */
+ SWITCHTEC_PCI_DEVICE(0x8564, SWITCHTEC_GEN3), /* PFXL 64XG3 */
+ SWITCHTEC_PCI_DEVICE(0x8565, SWITCHTEC_GEN3), /* PFXL 80XG3 */
+ SWITCHTEC_PCI_DEVICE(0x8566, SWITCHTEC_GEN3), /* PFXL 96XG3 */
+ SWITCHTEC_PCI_DEVICE(0x8571, SWITCHTEC_GEN3), /* PFXI 24XG3 */
+ SWITCHTEC_PCI_DEVICE(0x8572, SWITCHTEC_GEN3), /* PFXI 32XG3 */
+ SWITCHTEC_PCI_DEVICE(0x8573, SWITCHTEC_GEN3), /* PFXI 48XG3 */
+ SWITCHTEC_PCI_DEVICE(0x8574, SWITCHTEC_GEN3), /* PFXI 64XG3 */
+ SWITCHTEC_PCI_DEVICE(0x8575, SWITCHTEC_GEN3), /* PFXI 80XG3 */
+ SWITCHTEC_PCI_DEVICE(0x8576, SWITCHTEC_GEN3), /* PFXI 96XG3 */
+ SWITCHTEC_PCI_DEVICE(0x4000, SWITCHTEC_GEN4), /* PFX 100XG4 */
+ SWITCHTEC_PCI_DEVICE(0x4084, SWITCHTEC_GEN4), /* PFX 84XG4 */
+ SWITCHTEC_PCI_DEVICE(0x4068, SWITCHTEC_GEN4), /* PFX 68XG4 */
+ SWITCHTEC_PCI_DEVICE(0x4052, SWITCHTEC_GEN4), /* PFX 52XG4 */
+ SWITCHTEC_PCI_DEVICE(0x4036, SWITCHTEC_GEN4), /* PFX 36XG4 */
+ SWITCHTEC_PCI_DEVICE(0x4028, SWITCHTEC_GEN4), /* PFX 28XG4 */
+ SWITCHTEC_PCI_DEVICE(0x4100, SWITCHTEC_GEN4), /* PSX 100XG4 */
+ SWITCHTEC_PCI_DEVICE(0x4184, SWITCHTEC_GEN4), /* PSX 84XG4 */
+ SWITCHTEC_PCI_DEVICE(0x4168, SWITCHTEC_GEN4), /* PSX 68XG4 */
+ SWITCHTEC_PCI_DEVICE(0x4152, SWITCHTEC_GEN4), /* PSX 52XG4 */
+ SWITCHTEC_PCI_DEVICE(0x4136, SWITCHTEC_GEN4), /* PSX 36XG4 */
+ SWITCHTEC_PCI_DEVICE(0x4128, SWITCHTEC_GEN4), /* PSX 28XG4 */
+ SWITCHTEC_PCI_DEVICE(0x4200, SWITCHTEC_GEN4), /* PAX 100XG4 */
+ SWITCHTEC_PCI_DEVICE(0x4284, SWITCHTEC_GEN4), /* PAX 84XG4 */
+ SWITCHTEC_PCI_DEVICE(0x4268, SWITCHTEC_GEN4), /* PAX 68XG4 */
+ SWITCHTEC_PCI_DEVICE(0x4252, SWITCHTEC_GEN4), /* PAX 52XG4 */
+ SWITCHTEC_PCI_DEVICE(0x4236, SWITCHTEC_GEN4), /* PAX 36XG4 */
+ SWITCHTEC_PCI_DEVICE(0x4228, SWITCHTEC_GEN4), /* PAX 28XG4 */
+ SWITCHTEC_PCI_DEVICE(0x4352, SWITCHTEC_GEN4), /* PFXA 52XG4 */
+ SWITCHTEC_PCI_DEVICE(0x4336, SWITCHTEC_GEN4), /* PFXA 36XG4 */
+ SWITCHTEC_PCI_DEVICE(0x4328, SWITCHTEC_GEN4), /* PFXA 28XG4 */
+ SWITCHTEC_PCI_DEVICE(0x4452, SWITCHTEC_GEN4), /* PSXA 52XG4 */
+ SWITCHTEC_PCI_DEVICE(0x4436, SWITCHTEC_GEN4), /* PSXA 36XG4 */
+ SWITCHTEC_PCI_DEVICE(0x4428, SWITCHTEC_GEN4), /* PSXA 28XG4 */
+ SWITCHTEC_PCI_DEVICE(0x4552, SWITCHTEC_GEN4), /* PAXA 52XG4 */
+ SWITCHTEC_PCI_DEVICE(0x4536, SWITCHTEC_GEN4), /* PAXA 36XG4 */
+ SWITCHTEC_PCI_DEVICE(0x4528, SWITCHTEC_GEN4), /* PAXA 28XG4 */
+ SWITCHTEC_PCI_DEVICE(0x5000, SWITCHTEC_GEN5), /* PFX 100XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5084, SWITCHTEC_GEN5), /* PFX 84XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5068, SWITCHTEC_GEN5), /* PFX 68XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5052, SWITCHTEC_GEN5), /* PFX 52XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5036, SWITCHTEC_GEN5), /* PFX 36XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5028, SWITCHTEC_GEN5), /* PFX 28XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5100, SWITCHTEC_GEN5), /* PSX 100XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5184, SWITCHTEC_GEN5), /* PSX 84XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5168, SWITCHTEC_GEN5), /* PSX 68XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5152, SWITCHTEC_GEN5), /* PSX 52XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5136, SWITCHTEC_GEN5), /* PSX 36XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5128, SWITCHTEC_GEN5), /* PSX 28XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5200, SWITCHTEC_GEN5), /* PAX 100XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5284, SWITCHTEC_GEN5), /* PAX 84XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5268, SWITCHTEC_GEN5), /* PAX 68XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5252, SWITCHTEC_GEN5), /* PAX 52XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5236, SWITCHTEC_GEN5), /* PAX 36XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5228, SWITCHTEC_GEN5), /* PAX 28XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5300, SWITCHTEC_GEN5), /* PFXA 100XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5384, SWITCHTEC_GEN5), /* PFXA 84XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5368, SWITCHTEC_GEN5), /* PFXA 68XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5352, SWITCHTEC_GEN5), /* PFXA 52XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5336, SWITCHTEC_GEN5), /* PFXA 36XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5328, SWITCHTEC_GEN5), /* PFXA 28XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5400, SWITCHTEC_GEN5), /* PSXA 100XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5484, SWITCHTEC_GEN5), /* PSXA 84XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5468, SWITCHTEC_GEN5), /* PSXA 68XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5452, SWITCHTEC_GEN5), /* PSXA 52XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5436, SWITCHTEC_GEN5), /* PSXA 36XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5428, SWITCHTEC_GEN5), /* PSXA 28XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5500, SWITCHTEC_GEN5), /* PAXA 100XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5584, SWITCHTEC_GEN5), /* PAXA 84XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5568, SWITCHTEC_GEN5), /* PAXA 68XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5552, SWITCHTEC_GEN5), /* PAXA 52XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5536, SWITCHTEC_GEN5), /* PAXA 36XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5528, SWITCHTEC_GEN5), /* PAXA 28XG5 */
+ SWITCHTEC_PCI100X_DEVICE(0x1001, SWITCHTEC_GEN4), /* PCI1001 16XG4 */
+ SWITCHTEC_PCI100X_DEVICE(0x1002, SWITCHTEC_GEN4), /* PCI1002 12XG4 */
+ SWITCHTEC_PCI100X_DEVICE(0x1003, SWITCHTEC_GEN4), /* PCI1003 16XG4 */
+ SWITCHTEC_PCI100X_DEVICE(0x1004, SWITCHTEC_GEN4), /* PCI1004 16XG4 */
+ SWITCHTEC_PCI100X_DEVICE(0x1005, SWITCHTEC_GEN4), /* PCI1005 16XG4 */
+ SWITCHTEC_PCI100X_DEVICE(0x1006, SWITCHTEC_GEN4), /* PCI1006 16XG4 */
{0}
};
MODULE_DEVICE_TABLE(pci, switchtec_pci_tbl);
@@ -1804,11 +1878,9 @@ static int __init switchtec_init(void)
if (rc)
return rc;
- switchtec_class = class_create("switchtec");
- if (IS_ERR(switchtec_class)) {
- rc = PTR_ERR(switchtec_class);
+ rc = class_register(&switchtec_class);
+ if (rc)
goto err_create_class;
- }
rc = pci_register_driver(&switchtec_pci_driver);
if (rc)
@@ -1819,7 +1891,7 @@ static int __init switchtec_init(void)
return 0;
err_pci_register:
- class_destroy(switchtec_class);
+ class_unregister(&switchtec_class);
err_create_class:
unregister_chrdev_region(switchtec_devt, max_devices);
@@ -1831,7 +1903,7 @@ module_init(switchtec_init);
static void __exit switchtec_exit(void)
{
pci_unregister_driver(&switchtec_pci_driver);
- class_destroy(switchtec_class);
+ class_unregister(&switchtec_class);
unregister_chrdev_region(switchtec_devt, max_devices);
ida_destroy(&switchtec_minor_ida);
diff --git a/drivers/pci/syscall.c b/drivers/pci/syscall.c
index 61a6fe3cde21..803acbf33eb2 100644
--- a/drivers/pci/syscall.c
+++ b/drivers/pci/syscall.c
@@ -52,13 +52,13 @@ SYSCALL_DEFINE5(pciconfig_read, unsigned long, bus, unsigned long, dfn,
switch (len) {
case 1:
- err = put_user(byte, (unsigned char __user *)buf);
+ err = put_user(byte, (u8 __user *)buf);
break;
case 2:
- err = put_user(word, (unsigned short __user *)buf);
+ err = put_user(word, (u16 __user *)buf);
break;
case 4:
- err = put_user(dword, (unsigned int __user *)buf);
+ err = put_user(dword, (u32 __user *)buf);
break;
}
pci_dev_put(dev);
@@ -70,13 +70,13 @@ error:
they get instead of a machine check on x86. */
switch (len) {
case 1:
- put_user(-1, (unsigned char __user *)buf);
+ put_user(-1, (u8 __user *)buf);
break;
case 2:
- put_user(-1, (unsigned short __user *)buf);
+ put_user(-1, (u16 __user *)buf);
break;
case 4:
- put_user(-1, (unsigned int __user *)buf);
+ put_user(-1, (u32 __user *)buf);
break;
}
pci_dev_put(dev);
diff --git a/drivers/pci/tph.c b/drivers/pci/tph.c
new file mode 100644
index 000000000000..ca4f97be7538
--- /dev/null
+++ b/drivers/pci/tph.c
@@ -0,0 +1,516 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * TPH (TLP Processing Hints) support
+ *
+ * Copyright (C) 2024 Advanced Micro Devices, Inc.
+ * Eric Van Tassell <Eric.VanTassell@amd.com>
+ * Wei Huang <wei.huang2@amd.com>
+ */
+#include <linux/pci.h>
+#include <linux/pci-acpi.h>
+#include <linux/msi.h>
+#include <linux/bitfield.h>
+#include <linux/pci-tph.h>
+
+#include "pci.h"
+
+/* System-wide TPH disabled */
+static bool pci_tph_disabled;
+
+#ifdef CONFIG_ACPI
+/*
+ * The st_info struct defines the Steering Tag (ST) info returned by the
+ * firmware PCI ACPI _DSM method (rev=0x7, func=0xF, "_DSM to Query Cache
+ * Locality TPH Features"), as specified in the approved ECN for PCI Firmware
+ * Spec and available at https://members.pcisig.com/wg/PCI-SIG/document/15470.
+ *
+ * @vm_st_valid: 8-bit ST for volatile memory is valid
+ * @vm_xst_valid: 16-bit extended ST for volatile memory is valid
+ * @vm_ph_ignore: 1 => PH was and will be ignored, 0 => PH should be supplied
+ * @vm_st: 8-bit ST for volatile mem
+ * @vm_xst: 16-bit extended ST for volatile mem
+ * @pm_st_valid: 8-bit ST for persistent memory is valid
+ * @pm_xst_valid: 16-bit extended ST for persistent memory is valid
+ * @pm_ph_ignore: 1 => PH was and will be ignored, 0 => PH should be supplied
+ * @pm_st: 8-bit ST for persistent mem
+ * @pm_xst: 16-bit extended ST for persistent mem
+ */
+union st_info {
+ struct {
+ u64 vm_st_valid : 1;
+ u64 vm_xst_valid : 1;
+ u64 vm_ph_ignore : 1;
+ u64 rsvd1 : 5;
+ u64 vm_st : 8;
+ u64 vm_xst : 16;
+ u64 pm_st_valid : 1;
+ u64 pm_xst_valid : 1;
+ u64 pm_ph_ignore : 1;
+ u64 rsvd2 : 5;
+ u64 pm_st : 8;
+ u64 pm_xst : 16;
+ };
+ u64 value;
+};
+
+static u16 tph_extract_tag(enum tph_mem_type mem_type, u8 req_type,
+ union st_info *info)
+{
+ switch (req_type) {
+ case PCI_TPH_REQ_TPH_ONLY: /* 8-bit tag */
+ switch (mem_type) {
+ case TPH_MEM_TYPE_VM:
+ if (info->vm_st_valid)
+ return info->vm_st;
+ break;
+ case TPH_MEM_TYPE_PM:
+ if (info->pm_st_valid)
+ return info->pm_st;
+ break;
+ }
+ break;
+ case PCI_TPH_REQ_EXT_TPH: /* 16-bit tag */
+ switch (mem_type) {
+ case TPH_MEM_TYPE_VM:
+ if (info->vm_xst_valid)
+ return info->vm_xst;
+ break;
+ case TPH_MEM_TYPE_PM:
+ if (info->pm_xst_valid)
+ return info->pm_xst;
+ break;
+ }
+ break;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+#define TPH_ST_DSM_FUNC_INDEX 0xF
+static acpi_status tph_invoke_dsm(acpi_handle handle, u32 cpu_uid,
+ union st_info *st_out)
+{
+ union acpi_object arg3[3], in_obj, *out_obj;
+
+ if (!acpi_check_dsm(handle, &pci_acpi_dsm_guid, 7,
+ BIT(TPH_ST_DSM_FUNC_INDEX)))
+ return AE_ERROR;
+
+ /* DWORD: feature ID (0 for processor cache ST query) */
+ arg3[0].integer.type = ACPI_TYPE_INTEGER;
+ arg3[0].integer.value = 0;
+
+ /* DWORD: target UID */
+ arg3[1].integer.type = ACPI_TYPE_INTEGER;
+ arg3[1].integer.value = cpu_uid;
+
+ /* QWORD: properties, all 0's */
+ arg3[2].integer.type = ACPI_TYPE_INTEGER;
+ arg3[2].integer.value = 0;
+
+ in_obj.type = ACPI_TYPE_PACKAGE;
+ in_obj.package.count = ARRAY_SIZE(arg3);
+ in_obj.package.elements = arg3;
+
+ out_obj = acpi_evaluate_dsm(handle, &pci_acpi_dsm_guid, 7,
+ TPH_ST_DSM_FUNC_INDEX, &in_obj);
+ if (!out_obj)
+ return AE_ERROR;
+
+ if (out_obj->type != ACPI_TYPE_BUFFER) {
+ ACPI_FREE(out_obj);
+ return AE_ERROR;
+ }
+
+ st_out->value = *((u64 *)(out_obj->buffer.pointer));
+
+ ACPI_FREE(out_obj);
+
+ return AE_OK;
+}
+#endif
+
+/* Update the TPH Requester Enable field of TPH Control Register */
+static void set_ctrl_reg_req_en(struct pci_dev *pdev, u8 req_type)
+{
+ u32 reg;
+
+ pci_read_config_dword(pdev, pdev->tph_cap + PCI_TPH_CTRL, &reg);
+
+ reg &= ~PCI_TPH_CTRL_REQ_EN_MASK;
+ reg |= FIELD_PREP(PCI_TPH_CTRL_REQ_EN_MASK, req_type);
+
+ pci_write_config_dword(pdev, pdev->tph_cap + PCI_TPH_CTRL, reg);
+}
+
+static u8 get_st_modes(struct pci_dev *pdev)
+{
+ u32 reg;
+
+ pci_read_config_dword(pdev, pdev->tph_cap + PCI_TPH_CAP, &reg);
+ reg &= PCI_TPH_CAP_ST_NS | PCI_TPH_CAP_ST_IV | PCI_TPH_CAP_ST_DS;
+
+ return reg;
+}
+
+/**
+ * pcie_tph_get_st_table_loc - Return the device's ST table location
+ * @pdev: PCI device to query
+ *
+ * Return:
+ * PCI_TPH_LOC_NONE - Not present
+ * PCI_TPH_LOC_CAP - Located in the TPH Requester Extended Capability
+ * PCI_TPH_LOC_MSIX - Located in the MSI-X Table
+ */
+u32 pcie_tph_get_st_table_loc(struct pci_dev *pdev)
+{
+ u32 reg;
+
+ pci_read_config_dword(pdev, pdev->tph_cap + PCI_TPH_CAP, &reg);
+
+ return FIELD_GET(PCI_TPH_CAP_LOC_MASK, reg);
+}
+EXPORT_SYMBOL(pcie_tph_get_st_table_loc);
+
+/*
+ * Return the size of ST table. If ST table is not in TPH Requester Extended
+ * Capability space, return 0. Otherwise return the ST Table Size + 1.
+ */
+u16 pcie_tph_get_st_table_size(struct pci_dev *pdev)
+{
+ u32 reg;
+ u32 loc;
+
+ /* Check ST table location first */
+ loc = pcie_tph_get_st_table_loc(pdev);
+
+ /* Convert loc to match with PCI_TPH_LOC_* defined in pci_regs.h */
+ loc = FIELD_PREP(PCI_TPH_CAP_LOC_MASK, loc);
+ if (loc != PCI_TPH_LOC_CAP)
+ return 0;
+
+ pci_read_config_dword(pdev, pdev->tph_cap + PCI_TPH_CAP, &reg);
+
+ return FIELD_GET(PCI_TPH_CAP_ST_MASK, reg) + 1;
+}
+EXPORT_SYMBOL(pcie_tph_get_st_table_size);
+
+/* Return device's Root Port completer capability */
+static u8 get_rp_completer_type(struct pci_dev *pdev)
+{
+ struct pci_dev *rp;
+ u32 reg;
+ int ret;
+
+ rp = pcie_find_root_port(pdev);
+ if (!rp)
+ return 0;
+
+ ret = pcie_capability_read_dword(rp, PCI_EXP_DEVCAP2, &reg);
+ if (ret)
+ return 0;
+
+ return FIELD_GET(PCI_EXP_DEVCAP2_TPH_COMP_MASK, reg);
+}
+
+/* Write tag to ST table - Return 0 if OK, otherwise -errno */
+static int write_tag_to_st_table(struct pci_dev *pdev, int index, u16 tag)
+{
+ int st_table_size;
+ int offset;
+
+ /* Check if index is out of bound */
+ st_table_size = pcie_tph_get_st_table_size(pdev);
+ if (index >= st_table_size)
+ return -ENXIO;
+
+ offset = pdev->tph_cap + PCI_TPH_BASE_SIZEOF + index * sizeof(u16);
+
+ return pci_write_config_word(pdev, offset, tag);
+}
+
+/**
+ * pcie_tph_get_cpu_st() - Retrieve Steering Tag for a target memory associated
+ * with a specific CPU
+ * @pdev: PCI device
+ * @mem_type: target memory type (volatile or persistent RAM)
+ * @cpu_uid: associated CPU id
+ * @tag: Steering Tag to be returned
+ *
+ * Return the Steering Tag for a target memory that is associated with a
+ * specific CPU as indicated by cpu_uid.
+ *
+ * Return: 0 if success, otherwise negative value (-errno)
+ */
+int pcie_tph_get_cpu_st(struct pci_dev *pdev, enum tph_mem_type mem_type,
+ unsigned int cpu_uid, u16 *tag)
+{
+#ifdef CONFIG_ACPI
+ struct pci_dev *rp;
+ acpi_handle rp_acpi_handle;
+ union st_info info;
+
+ rp = pcie_find_root_port(pdev);
+ if (!rp || !rp->bus || !rp->bus->bridge)
+ return -ENODEV;
+
+ rp_acpi_handle = ACPI_HANDLE(rp->bus->bridge);
+
+ if (tph_invoke_dsm(rp_acpi_handle, cpu_uid, &info) != AE_OK) {
+ *tag = 0;
+ return -EINVAL;
+ }
+
+ *tag = tph_extract_tag(mem_type, pdev->tph_req_type, &info);
+
+ pci_dbg(pdev, "get steering tag: mem_type=%s, cpu_uid=%d, tag=%#04x\n",
+ (mem_type == TPH_MEM_TYPE_VM) ? "volatile" : "persistent",
+ cpu_uid, *tag);
+
+ return 0;
+#else
+ return -ENODEV;
+#endif
+}
+EXPORT_SYMBOL(pcie_tph_get_cpu_st);
+
+/**
+ * pcie_tph_set_st_entry() - Set Steering Tag in the ST table entry
+ * @pdev: PCI device
+ * @index: ST table entry index
+ * @tag: Steering Tag to be written
+ *
+ * Figure out the proper location of ST table, either in the MSI-X table or
+ * in the TPH Extended Capability space, and write the Steering Tag into
+ * the ST entry pointed by index.
+ *
+ * Return: 0 if success, otherwise negative value (-errno)
+ */
+int pcie_tph_set_st_entry(struct pci_dev *pdev, unsigned int index, u16 tag)
+{
+ u32 loc;
+ int err = 0;
+
+ if (!pdev->tph_cap)
+ return -EINVAL;
+
+ if (!pdev->tph_enabled)
+ return -EINVAL;
+
+ /* No need to write tag if device is in "No ST Mode" */
+ if (pdev->tph_mode == PCI_TPH_ST_NS_MODE)
+ return 0;
+
+ /*
+ * Disable TPH before updating ST to avoid potential instability as
+ * cautioned in PCIe r6.2, sec 6.17.3, "ST Modes of Operation"
+ */
+ set_ctrl_reg_req_en(pdev, PCI_TPH_REQ_DISABLE);
+
+ loc = pcie_tph_get_st_table_loc(pdev);
+ /* Convert loc to match with PCI_TPH_LOC_* */
+ loc = FIELD_PREP(PCI_TPH_CAP_LOC_MASK, loc);
+
+ switch (loc) {
+ case PCI_TPH_LOC_MSIX:
+ err = pci_msix_write_tph_tag(pdev, index, tag);
+ break;
+ case PCI_TPH_LOC_CAP:
+ err = write_tag_to_st_table(pdev, index, tag);
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ if (err) {
+ pcie_disable_tph(pdev);
+ return err;
+ }
+
+ set_ctrl_reg_req_en(pdev, pdev->tph_req_type);
+
+ pci_dbg(pdev, "set steering tag: %s table, index=%d, tag=%#04x\n",
+ (loc == PCI_TPH_LOC_MSIX) ? "MSI-X" : "ST", index, tag);
+
+ return 0;
+}
+EXPORT_SYMBOL(pcie_tph_set_st_entry);
+
+/**
+ * pcie_disable_tph - Turn off TPH support for device
+ * @pdev: PCI device
+ *
+ * Return: none
+ */
+void pcie_disable_tph(struct pci_dev *pdev)
+{
+ if (!pdev->tph_cap)
+ return;
+
+ if (!pdev->tph_enabled)
+ return;
+
+ pci_write_config_dword(pdev, pdev->tph_cap + PCI_TPH_CTRL, 0);
+
+ pdev->tph_mode = 0;
+ pdev->tph_req_type = 0;
+ pdev->tph_enabled = 0;
+}
+EXPORT_SYMBOL(pcie_disable_tph);
+
+/**
+ * pcie_enable_tph - Enable TPH support for device using a specific ST mode
+ * @pdev: PCI device
+ * @mode: ST mode to enable. Current supported modes include:
+ *
+ * - PCI_TPH_ST_NS_MODE: NO ST Mode
+ * - PCI_TPH_ST_IV_MODE: Interrupt Vector Mode
+ * - PCI_TPH_ST_DS_MODE: Device Specific Mode
+ *
+ * Check whether the mode is actually supported by the device before enabling
+ * and return an error if not. Additionally determine what types of requests,
+ * TPH or extended TPH, can be issued by the device based on its TPH requester
+ * capability and the Root Port's completer capability.
+ *
+ * Return: 0 on success, otherwise negative value (-errno)
+ */
+int pcie_enable_tph(struct pci_dev *pdev, int mode)
+{
+ u32 reg;
+ u8 dev_modes;
+ u8 rp_req_type;
+
+ /* Honor "notph" kernel parameter */
+ if (pci_tph_disabled)
+ return -EINVAL;
+
+ if (!pdev->tph_cap)
+ return -EINVAL;
+
+ if (pdev->tph_enabled)
+ return -EBUSY;
+
+ /* Sanitize and check ST mode compatibility */
+ mode &= PCI_TPH_CTRL_MODE_SEL_MASK;
+ dev_modes = get_st_modes(pdev);
+ if (!((1 << mode) & dev_modes))
+ return -EINVAL;
+
+ pdev->tph_mode = mode;
+
+ /* Get req_type supported by device and its Root Port */
+ pci_read_config_dword(pdev, pdev->tph_cap + PCI_TPH_CAP, &reg);
+ if (FIELD_GET(PCI_TPH_CAP_EXT_TPH, reg))
+ pdev->tph_req_type = PCI_TPH_REQ_EXT_TPH;
+ else
+ pdev->tph_req_type = PCI_TPH_REQ_TPH_ONLY;
+
+ rp_req_type = get_rp_completer_type(pdev);
+
+ /* Final req_type is the smallest value of two */
+ pdev->tph_req_type = min(pdev->tph_req_type, rp_req_type);
+
+ if (pdev->tph_req_type == PCI_TPH_REQ_DISABLE)
+ return -EINVAL;
+
+ /* Write them into TPH control register */
+ pci_read_config_dword(pdev, pdev->tph_cap + PCI_TPH_CTRL, &reg);
+
+ reg &= ~PCI_TPH_CTRL_MODE_SEL_MASK;
+ reg |= FIELD_PREP(PCI_TPH_CTRL_MODE_SEL_MASK, pdev->tph_mode);
+
+ reg &= ~PCI_TPH_CTRL_REQ_EN_MASK;
+ reg |= FIELD_PREP(PCI_TPH_CTRL_REQ_EN_MASK, pdev->tph_req_type);
+
+ pci_write_config_dword(pdev, pdev->tph_cap + PCI_TPH_CTRL, reg);
+
+ pdev->tph_enabled = 1;
+
+ return 0;
+}
+EXPORT_SYMBOL(pcie_enable_tph);
+
+void pci_restore_tph_state(struct pci_dev *pdev)
+{
+ struct pci_cap_saved_state *save_state;
+ int num_entries, i, offset;
+ u16 *st_entry;
+ u32 *cap;
+
+ if (!pdev->tph_cap)
+ return;
+
+ if (!pdev->tph_enabled)
+ return;
+
+ save_state = pci_find_saved_ext_cap(pdev, PCI_EXT_CAP_ID_TPH);
+ if (!save_state)
+ return;
+
+ /* Restore control register and all ST entries */
+ cap = &save_state->cap.data[0];
+ pci_write_config_dword(pdev, pdev->tph_cap + PCI_TPH_CTRL, *cap++);
+ st_entry = (u16 *)cap;
+ offset = PCI_TPH_BASE_SIZEOF;
+ num_entries = pcie_tph_get_st_table_size(pdev);
+ for (i = 0; i < num_entries; i++) {
+ pci_write_config_word(pdev, pdev->tph_cap + offset,
+ *st_entry++);
+ offset += sizeof(u16);
+ }
+}
+
+void pci_save_tph_state(struct pci_dev *pdev)
+{
+ struct pci_cap_saved_state *save_state;
+ int num_entries, i, offset;
+ u16 *st_entry;
+ u32 *cap;
+
+ if (!pdev->tph_cap)
+ return;
+
+ if (!pdev->tph_enabled)
+ return;
+
+ save_state = pci_find_saved_ext_cap(pdev, PCI_EXT_CAP_ID_TPH);
+ if (!save_state)
+ return;
+
+ /* Save control register */
+ cap = &save_state->cap.data[0];
+ pci_read_config_dword(pdev, pdev->tph_cap + PCI_TPH_CTRL, cap++);
+
+ /* Save all ST entries in extended capability structure */
+ st_entry = (u16 *)cap;
+ offset = PCI_TPH_BASE_SIZEOF;
+ num_entries = pcie_tph_get_st_table_size(pdev);
+ for (i = 0; i < num_entries; i++) {
+ pci_read_config_word(pdev, pdev->tph_cap + offset,
+ st_entry++);
+ offset += sizeof(u16);
+ }
+}
+
+void pci_no_tph(void)
+{
+ pci_tph_disabled = true;
+
+ pr_info("PCIe TPH is disabled\n");
+}
+
+void pci_tph_init(struct pci_dev *pdev)
+{
+ int num_entries;
+ u32 save_size;
+
+ pdev->tph_cap = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_TPH);
+ if (!pdev->tph_cap)
+ return;
+
+ num_entries = pcie_tph_get_st_table_size(pdev);
+ save_size = sizeof(u32) + num_entries * sizeof(u16);
+ pci_add_ext_cap_save_buffer(pdev, PCI_EXT_CAP_ID_TPH, save_size);
+}
diff --git a/drivers/pci/tsm.c b/drivers/pci/tsm.c
new file mode 100644
index 000000000000..5fdcd7f2e820
--- /dev/null
+++ b/drivers/pci/tsm.c
@@ -0,0 +1,900 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Interface with platform TEE Security Manager (TSM) objects as defined by
+ * PCIe r7.0 section 11 TEE Device Interface Security Protocol (TDISP)
+ *
+ * Copyright(c) 2024-2025 Intel Corporation. All rights reserved.
+ */
+
+#define dev_fmt(fmt) "PCI/TSM: " fmt
+
+#include <linux/bitfield.h>
+#include <linux/pci.h>
+#include <linux/pci-doe.h>
+#include <linux/pci-tsm.h>
+#include <linux/sysfs.h>
+#include <linux/tsm.h>
+#include <linux/xarray.h>
+#include "pci.h"
+
+/*
+ * Provide a read/write lock against the init / exit of pdev tsm
+ * capabilities and arrival/departure of a TSM instance
+ */
+static DECLARE_RWSEM(pci_tsm_rwsem);
+
+/*
+ * Count of TSMs registered that support physical link operations vs device
+ * security state management.
+ */
+static int pci_tsm_link_count;
+static int pci_tsm_devsec_count;
+
+static const struct pci_tsm_ops *to_pci_tsm_ops(struct pci_tsm *tsm)
+{
+ return tsm->tsm_dev->pci_ops;
+}
+
+static inline bool is_dsm(struct pci_dev *pdev)
+{
+ return pdev->tsm && pdev->tsm->dsm_dev == pdev;
+}
+
+static inline bool has_tee(struct pci_dev *pdev)
+{
+ return pdev->devcap & PCI_EXP_DEVCAP_TEE;
+}
+
+/* 'struct pci_tsm_pf0' wraps 'struct pci_tsm' when ->dsm_dev == ->pdev (self) */
+static struct pci_tsm_pf0 *to_pci_tsm_pf0(struct pci_tsm *tsm)
+{
+ /*
+ * All "link" TSM contexts reference the device that hosts the DSM
+ * interface for a set of devices. Walk to the DSM device and cast its
+ * ->tsm context to a 'struct pci_tsm_pf0 *'.
+ */
+ struct pci_dev *pf0 = tsm->dsm_dev;
+
+ if (!is_pci_tsm_pf0(pf0) || !is_dsm(pf0)) {
+ pci_WARN_ONCE(tsm->pdev, 1, "invalid context object\n");
+ return NULL;
+ }
+
+ return container_of(pf0->tsm, struct pci_tsm_pf0, base_tsm);
+}
+
+static void tsm_remove(struct pci_tsm *tsm)
+{
+ struct pci_dev *pdev;
+
+ if (!tsm)
+ return;
+
+ pdev = tsm->pdev;
+ to_pci_tsm_ops(tsm)->remove(tsm);
+ pdev->tsm = NULL;
+}
+DEFINE_FREE(tsm_remove, struct pci_tsm *, if (_T) tsm_remove(_T))
+
+static void pci_tsm_walk_fns(struct pci_dev *pdev,
+ int (*cb)(struct pci_dev *pdev, void *data),
+ void *data)
+{
+ /* Walk subordinate physical functions */
+ for (int i = 0; i < 8; i++) {
+ struct pci_dev *pf __free(pci_dev_put) = pci_get_slot(
+ pdev->bus, PCI_DEVFN(PCI_SLOT(pdev->devfn), i));
+
+ if (!pf)
+ continue;
+
+ /* on entry function 0 has already run @cb */
+ if (i > 0)
+ cb(pf, data);
+
+ /* walk virtual functions of each pf */
+ for (int j = 0; j < pci_num_vf(pf); j++) {
+ struct pci_dev *vf __free(pci_dev_put) =
+ pci_get_domain_bus_and_slot(
+ pci_domain_nr(pf->bus),
+ pci_iov_virtfn_bus(pf, j),
+ pci_iov_virtfn_devfn(pf, j));
+
+ if (!vf)
+ continue;
+
+ cb(vf, data);
+ }
+ }
+
+ /*
+ * Walk downstream devices, assumes that an upstream DSM is
+ * limited to downstream physical functions
+ */
+ if (pci_pcie_type(pdev) == PCI_EXP_TYPE_UPSTREAM && is_dsm(pdev))
+ pci_walk_bus(pdev->subordinate, cb, data);
+}
+
+static void pci_tsm_walk_fns_reverse(struct pci_dev *pdev,
+ int (*cb)(struct pci_dev *pdev,
+ void *data),
+ void *data)
+{
+ /* Reverse walk downstream devices */
+ if (pci_pcie_type(pdev) == PCI_EXP_TYPE_UPSTREAM && is_dsm(pdev))
+ pci_walk_bus_reverse(pdev->subordinate, cb, data);
+
+ /* Reverse walk subordinate physical functions */
+ for (int i = 7; i >= 0; i--) {
+ struct pci_dev *pf __free(pci_dev_put) = pci_get_slot(
+ pdev->bus, PCI_DEVFN(PCI_SLOT(pdev->devfn), i));
+
+ if (!pf)
+ continue;
+
+ /* reverse walk virtual functions */
+ for (int j = pci_num_vf(pf) - 1; j >= 0; j--) {
+ struct pci_dev *vf __free(pci_dev_put) =
+ pci_get_domain_bus_and_slot(
+ pci_domain_nr(pf->bus),
+ pci_iov_virtfn_bus(pf, j),
+ pci_iov_virtfn_devfn(pf, j));
+
+ if (!vf)
+ continue;
+ cb(vf, data);
+ }
+
+ /* on exit, caller will run @cb on function 0 */
+ if (i > 0)
+ cb(pf, data);
+ }
+}
+
+static void link_sysfs_disable(struct pci_dev *pdev)
+{
+ sysfs_update_group(&pdev->dev.kobj, &pci_tsm_auth_attr_group);
+ sysfs_update_group(&pdev->dev.kobj, &pci_tsm_attr_group);
+}
+
+static void link_sysfs_enable(struct pci_dev *pdev)
+{
+ bool tee = has_tee(pdev);
+
+ pci_dbg(pdev, "%s Security Manager detected (%s%s%s)\n",
+ pdev->tsm ? "Device" : "Platform TEE",
+ pdev->ide_cap ? "IDE" : "", pdev->ide_cap && tee ? " " : "",
+ tee ? "TEE" : "");
+
+ sysfs_update_group(&pdev->dev.kobj, &pci_tsm_auth_attr_group);
+ sysfs_update_group(&pdev->dev.kobj, &pci_tsm_attr_group);
+}
+
+static int probe_fn(struct pci_dev *pdev, void *dsm)
+{
+ struct pci_dev *dsm_dev = dsm;
+ const struct pci_tsm_ops *ops = to_pci_tsm_ops(dsm_dev->tsm);
+
+ pdev->tsm = ops->probe(dsm_dev->tsm->tsm_dev, pdev);
+ pci_dbg(pdev, "setup TSM context: DSM: %s status: %s\n",
+ pci_name(dsm_dev), pdev->tsm ? "success" : "failed");
+ if (pdev->tsm)
+ link_sysfs_enable(pdev);
+ return 0;
+}
+
+static int pci_tsm_connect(struct pci_dev *pdev, struct tsm_dev *tsm_dev)
+{
+ int rc;
+ struct pci_tsm_pf0 *tsm_pf0;
+ const struct pci_tsm_ops *ops = tsm_dev->pci_ops;
+ struct pci_tsm *pci_tsm __free(tsm_remove) = ops->probe(tsm_dev, pdev);
+
+ /* connect() mutually exclusive with subfunction pci_tsm_init() */
+ lockdep_assert_held_write(&pci_tsm_rwsem);
+
+ if (!pci_tsm)
+ return -ENXIO;
+
+ pdev->tsm = pci_tsm;
+ tsm_pf0 = to_pci_tsm_pf0(pdev->tsm);
+
+ /* mutex_intr assumes connect() is always sysfs/user driven */
+ ACQUIRE(mutex_intr, lock)(&tsm_pf0->lock);
+ if ((rc = ACQUIRE_ERR(mutex_intr, &lock)))
+ return rc;
+
+ rc = ops->connect(pdev);
+ if (rc)
+ return rc;
+
+ pdev->tsm = no_free_ptr(pci_tsm);
+
+ /*
+ * Now that the DSM is established, probe() all the potential
+ * dependent functions. Failure to probe a function is not fatal
+ * to connect(), it just disables subsequent security operations
+ * for that function.
+ *
+ * Note this is done unconditionally, without regard to finding
+ * PCI_EXP_DEVCAP_TEE on the dependent function, for robustness. The DSM
+ * is the ultimate arbiter of security state relative to a given
+ * interface id, and if it says it can manage TDISP state of a function,
+ * let it.
+ */
+ if (has_tee(pdev))
+ pci_tsm_walk_fns(pdev, probe_fn, pdev);
+ return 0;
+}
+
+static ssize_t connect_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct tsm_dev *tsm_dev;
+ int rc;
+
+ ACQUIRE(rwsem_read_intr, lock)(&pci_tsm_rwsem);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &lock)))
+ return rc;
+
+ if (!pdev->tsm)
+ return sysfs_emit(buf, "\n");
+
+ tsm_dev = pdev->tsm->tsm_dev;
+ return sysfs_emit(buf, "%s\n", dev_name(&tsm_dev->dev));
+}
+
+/* Is @tsm_dev managing physical link / session properties... */
+static bool is_link_tsm(struct tsm_dev *tsm_dev)
+{
+ return tsm_dev && tsm_dev->pci_ops && tsm_dev->pci_ops->link_ops.probe;
+}
+
+/* ...or is @tsm_dev managing device security state ? */
+static bool is_devsec_tsm(struct tsm_dev *tsm_dev)
+{
+ return tsm_dev && tsm_dev->pci_ops && tsm_dev->pci_ops->devsec_ops.lock;
+}
+
+static ssize_t connect_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int rc, id;
+
+ rc = sscanf(buf, "tsm%d\n", &id);
+ if (rc != 1)
+ return -EINVAL;
+
+ ACQUIRE(rwsem_write_kill, lock)(&pci_tsm_rwsem);
+ if ((rc = ACQUIRE_ERR(rwsem_write_kill, &lock)))
+ return rc;
+
+ if (pdev->tsm)
+ return -EBUSY;
+
+ struct tsm_dev *tsm_dev __free(put_tsm_dev) = find_tsm_dev(id);
+ if (!is_link_tsm(tsm_dev))
+ return -ENXIO;
+
+ rc = pci_tsm_connect(pdev, tsm_dev);
+ if (rc)
+ return rc;
+ return len;
+}
+static DEVICE_ATTR_RW(connect);
+
+static int remove_fn(struct pci_dev *pdev, void *data)
+{
+ tsm_remove(pdev->tsm);
+ link_sysfs_disable(pdev);
+ return 0;
+}
+
+/*
+ * Note, this helper only returns an error code and takes an argument for
+ * compatibility with the pci_walk_bus() callback prototype. pci_tsm_unbind()
+ * always succeeds.
+ */
+static int __pci_tsm_unbind(struct pci_dev *pdev, void *data)
+{
+ struct pci_tdi *tdi;
+ struct pci_tsm_pf0 *tsm_pf0;
+
+ lockdep_assert_held(&pci_tsm_rwsem);
+
+ if (!pdev->tsm)
+ return 0;
+
+ tsm_pf0 = to_pci_tsm_pf0(pdev->tsm);
+ guard(mutex)(&tsm_pf0->lock);
+
+ tdi = pdev->tsm->tdi;
+ if (!tdi)
+ return 0;
+
+ to_pci_tsm_ops(pdev->tsm)->unbind(tdi);
+ pdev->tsm->tdi = NULL;
+
+ return 0;
+}
+
+void pci_tsm_unbind(struct pci_dev *pdev)
+{
+ guard(rwsem_read)(&pci_tsm_rwsem);
+ __pci_tsm_unbind(pdev, NULL);
+}
+EXPORT_SYMBOL_GPL(pci_tsm_unbind);
+
+/**
+ * pci_tsm_bind() - Bind @pdev as a TDI for @kvm
+ * @pdev: PCI device function to bind
+ * @kvm: Private memory attach context
+ * @tdi_id: Identifier (virtual BDF) for the TDI as referenced by the TSM and DSM
+ *
+ * Returns 0 on success, or a negative error code on failure.
+ *
+ * Context: Caller is responsible for constraining the bind lifetime to the
+ * registered state of the device. For example, pci_tsm_bind() /
+ * pci_tsm_unbind() limited to the VFIO driver bound state of the device.
+ */
+int pci_tsm_bind(struct pci_dev *pdev, struct kvm *kvm, u32 tdi_id)
+{
+ struct pci_tsm_pf0 *tsm_pf0;
+ struct pci_tdi *tdi;
+
+ if (!kvm)
+ return -EINVAL;
+
+ guard(rwsem_read)(&pci_tsm_rwsem);
+
+ if (!pdev->tsm)
+ return -EINVAL;
+
+ if (!is_link_tsm(pdev->tsm->tsm_dev))
+ return -ENXIO;
+
+ tsm_pf0 = to_pci_tsm_pf0(pdev->tsm);
+ guard(mutex)(&tsm_pf0->lock);
+
+ /* Resolve races to bind a TDI */
+ if (pdev->tsm->tdi) {
+ if (pdev->tsm->tdi->kvm != kvm)
+ return -EBUSY;
+ return 0;
+ }
+
+ tdi = to_pci_tsm_ops(pdev->tsm)->bind(pdev, kvm, tdi_id);
+ if (IS_ERR(tdi))
+ return PTR_ERR(tdi);
+
+ pdev->tsm->tdi = tdi;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_tsm_bind);
+
+/**
+ * pci_tsm_guest_req() - helper to marshal guest requests to the TSM driver
+ * @pdev: @pdev representing a bound tdi
+ * @scope: caller asserts this passthrough request is limited to TDISP operations
+ * @req_in: Input payload forwarded from the guest
+ * @in_len: Length of @req_in
+ * @req_out: Output payload buffer response to the guest
+ * @out_len: Length of @req_out on input, bytes filled in @req_out on output
+ * @tsm_code: Optional TSM arch specific result code for the guest TSM
+ *
+ * This is a common entry point for requests triggered by userspace KVM-exit
+ * service handlers responding to TDI information or state change requests. The
+ * scope parameter limits requests to TDISP state management, or limited debug.
+ * This path is only suitable for commands and results that are the host kernel
+ * has no use, the host is only facilitating guest to TSM communication.
+ *
+ * Returns 0 on success and -error on failure and positive "residue" on success
+ * but @req_out is filled with less then @out_len, or @req_out is NULL and a
+ * residue number of bytes were not consumed from @req_in. On success or
+ * failure @tsm_code may be populated with a TSM implementation specific result
+ * code for the guest to consume.
+ *
+ * Context: Caller is responsible for calling this within the pci_tsm_bind()
+ * state of the TDI.
+ */
+ssize_t pci_tsm_guest_req(struct pci_dev *pdev, enum pci_tsm_req_scope scope,
+ sockptr_t req_in, size_t in_len, sockptr_t req_out,
+ size_t out_len, u64 *tsm_code)
+{
+ struct pci_tsm_pf0 *tsm_pf0;
+ struct pci_tdi *tdi;
+ int rc;
+
+ /* Forbid requests that are not directly related to TDISP operations */
+ if (scope > PCI_TSM_REQ_STATE_CHANGE)
+ return -EINVAL;
+
+ ACQUIRE(rwsem_read_intr, lock)(&pci_tsm_rwsem);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &lock)))
+ return rc;
+
+ if (!pdev->tsm)
+ return -ENXIO;
+
+ if (!is_link_tsm(pdev->tsm->tsm_dev))
+ return -ENXIO;
+
+ tsm_pf0 = to_pci_tsm_pf0(pdev->tsm);
+ ACQUIRE(mutex_intr, ops_lock)(&tsm_pf0->lock);
+ if ((rc = ACQUIRE_ERR(mutex_intr, &ops_lock)))
+ return rc;
+
+ tdi = pdev->tsm->tdi;
+ if (!tdi)
+ return -ENXIO;
+ return to_pci_tsm_ops(pdev->tsm)->guest_req(tdi, scope, req_in, in_len,
+ req_out, out_len, tsm_code);
+}
+EXPORT_SYMBOL_GPL(pci_tsm_guest_req);
+
+static void pci_tsm_unbind_all(struct pci_dev *pdev)
+{
+ pci_tsm_walk_fns_reverse(pdev, __pci_tsm_unbind, NULL);
+ __pci_tsm_unbind(pdev, NULL);
+}
+
+static void __pci_tsm_disconnect(struct pci_dev *pdev)
+{
+ struct pci_tsm_pf0 *tsm_pf0 = to_pci_tsm_pf0(pdev->tsm);
+ const struct pci_tsm_ops *ops = to_pci_tsm_ops(pdev->tsm);
+
+ /* disconnect() mutually exclusive with subfunction pci_tsm_init() */
+ lockdep_assert_held_write(&pci_tsm_rwsem);
+
+ pci_tsm_unbind_all(pdev);
+
+ /*
+ * disconnect() is uninterruptible as it may be called for device
+ * teardown
+ */
+ guard(mutex)(&tsm_pf0->lock);
+ pci_tsm_walk_fns_reverse(pdev, remove_fn, NULL);
+ ops->disconnect(pdev);
+}
+
+static void pci_tsm_disconnect(struct pci_dev *pdev)
+{
+ __pci_tsm_disconnect(pdev);
+ tsm_remove(pdev->tsm);
+}
+
+static ssize_t disconnect_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t len)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct tsm_dev *tsm_dev;
+ int rc;
+
+ ACQUIRE(rwsem_write_kill, lock)(&pci_tsm_rwsem);
+ if ((rc = ACQUIRE_ERR(rwsem_write_kill, &lock)))
+ return rc;
+
+ if (!pdev->tsm)
+ return -ENXIO;
+
+ tsm_dev = pdev->tsm->tsm_dev;
+ if (!sysfs_streq(buf, dev_name(&tsm_dev->dev)))
+ return -EINVAL;
+
+ pci_tsm_disconnect(pdev);
+ return len;
+}
+static DEVICE_ATTR_WO(disconnect);
+
+static ssize_t bound_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pci_tsm_pf0 *tsm_pf0;
+ struct pci_tsm *tsm;
+ int rc;
+
+ ACQUIRE(rwsem_read_intr, lock)(&pci_tsm_rwsem);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &lock)))
+ return rc;
+
+ tsm = pdev->tsm;
+ if (!tsm)
+ return sysfs_emit(buf, "\n");
+ tsm_pf0 = to_pci_tsm_pf0(tsm);
+
+ ACQUIRE(mutex_intr, ops_lock)(&tsm_pf0->lock);
+ if ((rc = ACQUIRE_ERR(mutex_intr, &ops_lock)))
+ return rc;
+
+ if (!tsm->tdi)
+ return sysfs_emit(buf, "\n");
+ return sysfs_emit(buf, "%s\n", dev_name(&tsm->tsm_dev->dev));
+}
+static DEVICE_ATTR_RO(bound);
+
+static ssize_t dsm_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pci_tsm *tsm;
+ int rc;
+
+ ACQUIRE(rwsem_read_intr, lock)(&pci_tsm_rwsem);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &lock)))
+ return rc;
+
+ tsm = pdev->tsm;
+ if (!tsm)
+ return sysfs_emit(buf, "\n");
+
+ return sysfs_emit(buf, "%s\n", pci_name(tsm->dsm_dev));
+}
+static DEVICE_ATTR_RO(dsm);
+
+/* The 'authenticated' attribute is exclusive to the presence of a 'link' TSM */
+static bool pci_tsm_link_group_visible(struct kobject *kobj)
+{
+ struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
+
+ if (!pci_tsm_link_count)
+ return false;
+
+ if (!pci_is_pcie(pdev))
+ return false;
+
+ if (is_pci_tsm_pf0(pdev))
+ return true;
+
+ /*
+ * Show 'authenticated' and other attributes for the managed
+ * sub-functions of a DSM.
+ */
+ if (pdev->tsm)
+ return true;
+
+ return false;
+}
+DEFINE_SIMPLE_SYSFS_GROUP_VISIBLE(pci_tsm_link);
+
+/*
+ * 'link' and 'devsec' TSMs share the same 'tsm/' sysfs group, so the TSM type
+ * specific attributes need individual visibility checks.
+ */
+static umode_t pci_tsm_attr_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ if (pci_tsm_link_group_visible(kobj)) {
+ struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
+
+ if (attr == &dev_attr_bound.attr) {
+ if (is_pci_tsm_pf0(pdev) && has_tee(pdev))
+ return attr->mode;
+ if (pdev->tsm && has_tee(pdev->tsm->dsm_dev))
+ return attr->mode;
+ }
+
+ if (attr == &dev_attr_dsm.attr) {
+ if (is_pci_tsm_pf0(pdev))
+ return attr->mode;
+ if (pdev->tsm && has_tee(pdev->tsm->dsm_dev))
+ return attr->mode;
+ }
+
+ if (attr == &dev_attr_connect.attr ||
+ attr == &dev_attr_disconnect.attr) {
+ if (is_pci_tsm_pf0(pdev))
+ return attr->mode;
+ }
+ }
+
+ return 0;
+}
+
+static bool pci_tsm_group_visible(struct kobject *kobj)
+{
+ return pci_tsm_link_group_visible(kobj);
+}
+DEFINE_SYSFS_GROUP_VISIBLE(pci_tsm);
+
+static struct attribute *pci_tsm_attrs[] = {
+ &dev_attr_connect.attr,
+ &dev_attr_disconnect.attr,
+ &dev_attr_bound.attr,
+ &dev_attr_dsm.attr,
+ NULL
+};
+
+const struct attribute_group pci_tsm_attr_group = {
+ .name = "tsm",
+ .attrs = pci_tsm_attrs,
+ .is_visible = SYSFS_GROUP_VISIBLE(pci_tsm),
+};
+
+static ssize_t authenticated_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ /*
+ * When the SPDM session established via TSM the 'authenticated' state
+ * of the device is identical to the connect state.
+ */
+ return connect_show(dev, attr, buf);
+}
+static DEVICE_ATTR_RO(authenticated);
+
+static struct attribute *pci_tsm_auth_attrs[] = {
+ &dev_attr_authenticated.attr,
+ NULL
+};
+
+const struct attribute_group pci_tsm_auth_attr_group = {
+ .attrs = pci_tsm_auth_attrs,
+ .is_visible = SYSFS_GROUP_VISIBLE(pci_tsm_link),
+};
+
+/*
+ * Retrieve physical function0 device whether it has TEE capability or not
+ */
+static struct pci_dev *pf0_dev_get(struct pci_dev *pdev)
+{
+ struct pci_dev *pf_dev = pci_physfn(pdev);
+
+ if (PCI_FUNC(pf_dev->devfn) == 0)
+ return pci_dev_get(pf_dev);
+
+ return pci_get_slot(pf_dev->bus,
+ pf_dev->devfn - PCI_FUNC(pf_dev->devfn));
+}
+
+/*
+ * Find the PCI Device instance that serves as the Device Security Manager (DSM)
+ * for @pdev. Note that no additional reference is held for the resulting device
+ * because that resulting object always has a registered lifetime
+ * greater-than-or-equal to that of the @pdev argument. This is by virtue of
+ * @pdev being a descendant of, or identical to, the returned DSM device.
+ */
+static struct pci_dev *find_dsm_dev(struct pci_dev *pdev)
+{
+ struct device *grandparent;
+ struct pci_dev *uport;
+
+ if (is_pci_tsm_pf0(pdev))
+ return pdev;
+
+ struct pci_dev *pf0 __free(pci_dev_put) = pf0_dev_get(pdev);
+ if (!pf0)
+ return NULL;
+
+ if (is_dsm(pf0))
+ return pf0;
+
+ /*
+ * For cases where a switch may be hosting TDISP services on behalf of
+ * downstream devices, check the first upstream port relative to this
+ * endpoint.
+ */
+ if (!pdev->dev.parent)
+ return NULL;
+ grandparent = pdev->dev.parent->parent;
+ if (!grandparent)
+ return NULL;
+ if (!dev_is_pci(grandparent))
+ return NULL;
+ uport = to_pci_dev(grandparent);
+ if (!pci_is_pcie(uport) ||
+ pci_pcie_type(uport) != PCI_EXP_TYPE_UPSTREAM)
+ return NULL;
+
+ if (is_dsm(uport))
+ return uport;
+ return NULL;
+}
+
+/**
+ * pci_tsm_tdi_constructor() - base 'struct pci_tdi' initialization for link TSMs
+ * @pdev: PCI device function representing the TDI
+ * @tdi: context to initialize
+ * @kvm: Private memory attach context
+ * @tdi_id: Identifier (virtual BDF) for the TDI as referenced by the TSM and DSM
+ */
+void pci_tsm_tdi_constructor(struct pci_dev *pdev, struct pci_tdi *tdi,
+ struct kvm *kvm, u32 tdi_id)
+{
+ tdi->pdev = pdev;
+ tdi->kvm = kvm;
+ tdi->tdi_id = tdi_id;
+}
+EXPORT_SYMBOL_GPL(pci_tsm_tdi_constructor);
+
+/**
+ * pci_tsm_link_constructor() - base 'struct pci_tsm' initialization for link TSMs
+ * @pdev: The PCI device
+ * @tsm: context to initialize
+ * @tsm_dev: Platform TEE Security Manager, initiator of security operations
+ */
+int pci_tsm_link_constructor(struct pci_dev *pdev, struct pci_tsm *tsm,
+ struct tsm_dev *tsm_dev)
+{
+ if (!is_link_tsm(tsm_dev))
+ return -EINVAL;
+
+ tsm->dsm_dev = find_dsm_dev(pdev);
+ if (!tsm->dsm_dev) {
+ pci_warn(pdev, "failed to find Device Security Manager\n");
+ return -ENXIO;
+ }
+ tsm->pdev = pdev;
+ tsm->tsm_dev = tsm_dev;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_tsm_link_constructor);
+
+/**
+ * pci_tsm_pf0_constructor() - common 'struct pci_tsm_pf0' (DSM) initialization
+ * @pdev: Physical Function 0 PCI device (as indicated by is_pci_tsm_pf0())
+ * @tsm: context to initialize
+ * @tsm_dev: Platform TEE Security Manager, initiator of security operations
+ */
+int pci_tsm_pf0_constructor(struct pci_dev *pdev, struct pci_tsm_pf0 *tsm,
+ struct tsm_dev *tsm_dev)
+{
+ mutex_init(&tsm->lock);
+ tsm->doe_mb = pci_find_doe_mailbox(pdev, PCI_VENDOR_ID_PCI_SIG,
+ PCI_DOE_FEATURE_CMA);
+ if (!tsm->doe_mb) {
+ pci_warn(pdev, "TSM init failure, no CMA mailbox\n");
+ return -ENODEV;
+ }
+
+ return pci_tsm_link_constructor(pdev, &tsm->base_tsm, tsm_dev);
+}
+EXPORT_SYMBOL_GPL(pci_tsm_pf0_constructor);
+
+void pci_tsm_pf0_destructor(struct pci_tsm_pf0 *pf0_tsm)
+{
+ mutex_destroy(&pf0_tsm->lock);
+}
+EXPORT_SYMBOL_GPL(pci_tsm_pf0_destructor);
+
+int pci_tsm_register(struct tsm_dev *tsm_dev)
+{
+ struct pci_dev *pdev = NULL;
+
+ if (!tsm_dev)
+ return -EINVAL;
+
+ /* The TSM device must only implement one of link_ops or devsec_ops */
+ if (!is_link_tsm(tsm_dev) && !is_devsec_tsm(tsm_dev))
+ return -EINVAL;
+
+ if (is_link_tsm(tsm_dev) && is_devsec_tsm(tsm_dev))
+ return -EINVAL;
+
+ guard(rwsem_write)(&pci_tsm_rwsem);
+
+ /* On first enable, update sysfs groups */
+ if (is_link_tsm(tsm_dev) && pci_tsm_link_count++ == 0) {
+ for_each_pci_dev(pdev)
+ if (is_pci_tsm_pf0(pdev))
+ link_sysfs_enable(pdev);
+ } else if (is_devsec_tsm(tsm_dev)) {
+ pci_tsm_devsec_count++;
+ }
+
+ return 0;
+}
+
+static void pci_tsm_fn_exit(struct pci_dev *pdev)
+{
+ __pci_tsm_unbind(pdev, NULL);
+ tsm_remove(pdev->tsm);
+}
+
+/**
+ * __pci_tsm_destroy() - destroy the TSM context for @pdev
+ * @pdev: device to cleanup
+ * @tsm_dev: the TSM device being removed, or NULL if @pdev is being removed.
+ *
+ * At device removal or TSM unregistration all established context
+ * with the TSM is torn down. Additionally, if there are no more TSMs
+ * registered, the PCI tsm/ sysfs attributes are hidden.
+ */
+static void __pci_tsm_destroy(struct pci_dev *pdev, struct tsm_dev *tsm_dev)
+{
+ struct pci_tsm *tsm = pdev->tsm;
+
+ lockdep_assert_held_write(&pci_tsm_rwsem);
+
+ /*
+ * First, handle the TSM removal case to shutdown @pdev sysfs, this is
+ * skipped if the device itself is being removed since sysfs goes away
+ * naturally at that point
+ */
+ if (is_link_tsm(tsm_dev) && is_pci_tsm_pf0(pdev) && !pci_tsm_link_count)
+ link_sysfs_disable(pdev);
+
+ /* Nothing else to do if this device never attached to the departing TSM */
+ if (!tsm)
+ return;
+
+ /* Now lookup the tsm_dev to destroy TSM context */
+ if (!tsm_dev)
+ tsm_dev = tsm->tsm_dev;
+ else if (tsm_dev != tsm->tsm_dev)
+ return;
+
+ if (is_link_tsm(tsm_dev) && is_pci_tsm_pf0(pdev))
+ pci_tsm_disconnect(pdev);
+ else
+ pci_tsm_fn_exit(pdev);
+}
+
+void pci_tsm_destroy(struct pci_dev *pdev)
+{
+ guard(rwsem_write)(&pci_tsm_rwsem);
+ __pci_tsm_destroy(pdev, NULL);
+}
+
+void pci_tsm_init(struct pci_dev *pdev)
+{
+ guard(rwsem_read)(&pci_tsm_rwsem);
+
+ /*
+ * Subfunctions are either probed synchronous with connect() or later
+ * when either the SR-IOV configuration is changed, or, unlikely,
+ * connect() raced initial bus scanning.
+ */
+ if (pdev->tsm)
+ return;
+
+ if (pci_tsm_link_count) {
+ struct pci_dev *dsm = find_dsm_dev(pdev);
+
+ if (!dsm)
+ return;
+
+ /*
+ * The only path to init a Device Security Manager capable
+ * device is via connect().
+ */
+ if (!dsm->tsm)
+ return;
+
+ probe_fn(pdev, dsm);
+ }
+}
+
+void pci_tsm_unregister(struct tsm_dev *tsm_dev)
+{
+ struct pci_dev *pdev = NULL;
+
+ guard(rwsem_write)(&pci_tsm_rwsem);
+ if (is_link_tsm(tsm_dev))
+ pci_tsm_link_count--;
+ if (is_devsec_tsm(tsm_dev))
+ pci_tsm_devsec_count--;
+ for_each_pci_dev_reverse(pdev)
+ __pci_tsm_destroy(pdev, tsm_dev);
+}
+
+int pci_tsm_doe_transfer(struct pci_dev *pdev, u8 type, const void *req,
+ size_t req_sz, void *resp, size_t resp_sz)
+{
+ struct pci_tsm_pf0 *tsm;
+
+ if (!pdev->tsm || !is_pci_tsm_pf0(pdev))
+ return -ENXIO;
+
+ tsm = to_pci_tsm_pf0(pdev->tsm);
+ if (!tsm->doe_mb)
+ return -ENXIO;
+
+ return pci_doe(tsm->doe_mb, PCI_VENDOR_ID_PCI_SIG, type, req, req_sz,
+ resp, resp_sz);
+}
+EXPORT_SYMBOL_GPL(pci_tsm_doe_transfer);
diff --git a/drivers/pci/vc.c b/drivers/pci/vc.c
index 5fc59ac31145..a4ff7f5f66dd 100644
--- a/drivers/pci/vc.c
+++ b/drivers/pci/vc.c
@@ -6,6 +6,7 @@
* Author: Alex Williamson <alex.williamson@redhat.com>
*/
+#include <linux/bitfield.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -201,9 +202,9 @@ static int pci_vc_do_save_buffer(struct pci_dev *dev, int pos,
/* Extended VC Count (not counting VC0) */
evcc = cap1 & PCI_VC_CAP1_EVCC;
/* Low Priority Extended VC Count (not counting VC0) */
- lpevcc = (cap1 & PCI_VC_CAP1_LPEVCC) >> 4;
+ lpevcc = FIELD_GET(PCI_VC_CAP1_LPEVCC, cap1);
/* Port Arbitration Table Entry Size (bits) */
- parb_size = 1 << ((cap1 & PCI_VC_CAP1_ARB_SIZE) >> 10);
+ parb_size = 1 << FIELD_GET(PCI_VC_CAP1_ARB_SIZE, cap1);
/*
* Port VC Control Register contains VC Arbitration Select, which
@@ -231,7 +232,7 @@ static int pci_vc_do_save_buffer(struct pci_dev *dev, int pos,
int vcarb_offset;
pci_read_config_dword(dev, pos + PCI_VC_PORT_CAP2, &cap2);
- vcarb_offset = ((cap2 & PCI_VC_CAP2_ARB_OFF) >> 24) * 16;
+ vcarb_offset = FIELD_GET(PCI_VC_CAP2_ARB_OFF, cap2) * 16;
if (vcarb_offset) {
int size, vcarb_phases = 0;
@@ -277,7 +278,7 @@ static int pci_vc_do_save_buffer(struct pci_dev *dev, int pos,
pci_read_config_dword(dev, pos + PCI_VC_RES_CAP +
(i * PCI_CAP_VC_PER_VC_SIZEOF), &cap);
- parb_offset = ((cap & PCI_VC_RES_CAP_ARB_OFF) >> 24) * 16;
+ parb_offset = FIELD_GET(PCI_VC_RES_CAP_ARB_OFF, cap) * 16;
if (parb_offset) {
int size, parb_phases = 0;
diff --git a/drivers/pci/vgaarb.c b/drivers/pci/vgaarb.c
index 5a696078b382..436fa7f4c387 100644
--- a/drivers/pci/vgaarb.c
+++ b/drivers/pci/vgaarb.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: MIT
/*
- * vgaarb.c: Implements the VGA arbitration. For details refer to
+ * vgaarb.c: Implements VGA arbitration. For details refer to
* Documentation/gpu/vgaarbiter.rst
*
* (C) Copyright 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org>
@@ -30,22 +30,21 @@
#include <linux/vt.h>
#include <linux/console.h>
#include <linux/acpi.h>
-
#include <linux/uaccess.h>
-
#include <linux/vgaarb.h>
static void vga_arbiter_notify_clients(void);
+
/*
- * We keep a list of all vga devices in the system to speed
+ * We keep a list of all VGA devices in the system to speed
* up the various operations of the arbiter
*/
struct vga_device {
struct list_head list;
struct pci_dev *pdev;
- unsigned int decodes; /* what does it decodes */
- unsigned int owns; /* what does it owns */
- unsigned int locks; /* what does it locks */
+ unsigned int decodes; /* what it decodes */
+ unsigned int owns; /* what it owns */
+ unsigned int locks; /* what it locks */
unsigned int io_lock_cnt; /* legacy IO lock count */
unsigned int mem_lock_cnt; /* legacy MEM lock count */
unsigned int io_norm_cnt; /* normal IO count */
@@ -61,7 +60,6 @@ static bool vga_arbiter_used;
static DEFINE_SPINLOCK(vga_lock);
static DECLARE_WAIT_QUEUE_HEAD(vga_wait_queue);
-
static const char *vga_iostate_to_str(unsigned int iostate)
{
/* Ignore VGA_RSRC_IO and VGA_RSRC_MEM */
@@ -77,16 +75,18 @@ static const char *vga_iostate_to_str(unsigned int iostate)
return "none";
}
-static int vga_str_to_iostate(char *buf, int str_size, int *io_state)
+static int vga_str_to_iostate(char *buf, int str_size, unsigned int *io_state)
{
- /* we could in theory hand out locks on IO and mem
- * separately to userspace but it can cause deadlocks */
+ /*
+ * In theory, we could hand out locks on IO and MEM separately to
+ * userspace, but this can cause deadlocks.
+ */
if (strncmp(buf, "none", 4) == 0) {
*io_state = VGA_RSRC_NONE;
return 1;
}
- /* XXX We're not chekcing the str_size! */
+ /* XXX We're not checking the str_size! */
if (strncmp(buf, "io+mem", 6) == 0)
goto both;
else if (strncmp(buf, "io", 2) == 0)
@@ -99,7 +99,7 @@ both:
return 1;
}
-/* this is only used a cookie - it should not be dereferenced */
+/* This is only used as a cookie, it should not be dereferenced */
static struct pci_dev *vga_default;
/* Find somebody in our list */
@@ -116,20 +116,18 @@ static struct vga_device *vgadev_find(struct pci_dev *pdev)
/**
* vga_default_device - return the default VGA device, for vgacon
*
- * This can be defined by the platform. The default implementation
- * is rather dumb and will probably only work properly on single
- * vga card setups and/or x86 platforms.
+ * This can be defined by the platform. The default implementation is
+ * rather dumb and will probably only work properly on single VGA card
+ * setups and/or x86 platforms.
*
- * If your VGA default device is not PCI, you'll have to return
- * NULL here. In this case, I assume it will not conflict with
- * any PCI card. If this is not true, I'll have to define two archs
- * hooks for enabling/disabling the VGA default device if that is
- * possible. This may be a problem with real _ISA_ VGA cards, in
- * addition to a PCI one. I don't know at this point how to deal
- * with that card. Can theirs IOs be disabled at all ? If not, then
- * I suppose it's a matter of having the proper arch hook telling
- * us about it, so we basically never allow anybody to succeed a
- * vga_get()...
+ * If your VGA default device is not PCI, you'll have to return NULL here.
+ * In this case, I assume it will not conflict with any PCI card. If this
+ * is not true, I'll have to define two arch hooks for enabling/disabling
+ * the VGA default device if that is possible. This may be a problem with
+ * real _ISA_ VGA cards, in addition to a PCI one. I don't know at this
+ * point how to deal with that card. Can their IOs be disabled at all? If
+ * not, then I suppose it's a matter of having the proper arch hook telling
+ * us about it, so we basically never allow anybody to succeed a vga_get().
*/
struct pci_dev *vga_default_device(void)
{
@@ -147,14 +145,13 @@ void vga_set_default_device(struct pci_dev *pdev)
}
/**
- * vga_remove_vgacon - deactivete vga console
+ * vga_remove_vgacon - deactivate VGA console
*
- * Unbind and unregister vgacon in case pdev is the default vga
- * device. Can be called by gpu drivers on initialization to make
- * sure vga register access done by vgacon will not disturb the
- * device.
+ * Unbind and unregister vgacon in case pdev is the default VGA device.
+ * Can be called by GPU drivers on initialization to make sure VGA register
+ * access done by vgacon will not disturb the device.
*
- * @pdev: pci device.
+ * @pdev: PCI device.
*/
#if !defined(CONFIG_VGA_CONSOLE)
int vga_remove_vgacon(struct pci_dev *pdev)
@@ -193,14 +190,17 @@ int vga_remove_vgacon(struct pci_dev *pdev)
#endif
EXPORT_SYMBOL(vga_remove_vgacon);
-/* If we don't ever use VGA arb we should avoid
- turning off anything anywhere due to old X servers getting
- confused about the boot device not being VGA */
+/*
+ * If we don't ever use VGA arbitration, we should avoid turning off
+ * anything anywhere due to old X servers getting confused about the boot
+ * device not being VGA.
+ */
static void vga_check_first_use(void)
{
- /* we should inform all GPUs in the system that
- * VGA arb has occurred and to try and disable resources
- * if they can */
+ /*
+ * Inform all GPUs in the system that VGA arbitration has occurred
+ * so they can disable resources if possible.
+ */
if (!vga_arbiter_used) {
vga_arbiter_used = true;
vga_arbiter_notify_clients();
@@ -216,7 +216,8 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev,
unsigned int pci_bits;
u32 flags = 0;
- /* Account for "normal" resources to lock. If we decode the legacy,
+ /*
+ * Account for "normal" resources to lock. If we decode the legacy,
* counterpart, we need to request it as well
*/
if ((rsrc & VGA_RSRC_NORMAL_IO) &&
@@ -236,14 +237,15 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev,
if (wants == 0)
goto lock_them;
- /* We don't need to request a legacy resource, we just enable
- * appropriate decoding and go
+ /*
+ * We don't need to request a legacy resource, we just enable
+ * appropriate decoding and go.
*/
legacy_wants = wants & VGA_RSRC_LEGACY_MASK;
if (legacy_wants == 0)
goto enable_them;
- /* Ok, we don't, let's find out how we need to kick off */
+ /* Ok, we don't, let's find out who we need to kick off */
list_for_each_entry(conflict, &vga_list, list) {
unsigned int lwants = legacy_wants;
unsigned int change_bridge = 0;
@@ -252,39 +254,44 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev,
if (vgadev == conflict)
continue;
- /* We have a possible conflict. before we go further, we must
+ /*
+ * We have a possible conflict. Before we go further, we must
* check if we sit on the same bus as the conflicting device.
- * if we don't, then we must tie both IO and MEM resources
+ * If we don't, then we must tie both IO and MEM resources
* together since there is only a single bit controlling
- * VGA forwarding on P2P bridges
+ * VGA forwarding on P2P bridges.
*/
if (vgadev->pdev->bus != conflict->pdev->bus) {
change_bridge = 1;
lwants = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
}
- /* Check if the guy has a lock on the resource. If he does,
- * return the conflicting entry
+ /*
+ * Check if the guy has a lock on the resource. If he does,
+ * return the conflicting entry.
*/
if (conflict->locks & lwants)
return conflict;
- /* Ok, now check if it owns the resource we want. We can
- * lock resources that are not decoded, therefore a device
+ /*
+ * Ok, now check if it owns the resource we want. We can
+ * lock resources that are not decoded; therefore a device
* can own resources it doesn't decode.
*/
match = lwants & conflict->owns;
if (!match)
continue;
- /* looks like he doesn't have a lock, we can steal
- * them from him
+ /*
+ * Looks like he doesn't have a lock, we can steal them
+ * from him.
*/
flags = 0;
pci_bits = 0;
- /* If we can't control legacy resources via the bridge, we
+ /*
+ * If we can't control legacy resources via the bridge, we
* also need to disable normal decoding.
*/
if (!conflict->bridge_has_one_vga) {
@@ -311,7 +318,8 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev,
}
enable_them:
- /* ok dude, we got it, everybody conflicting has been disabled, let's
+ /*
+ * Ok, we got it, everybody conflicting has been disabled, let's
* enable us. Mark any bits in "owns" regardless of whether we
* decoded them. We can lock resources we don't decode, therefore
* we must track them via "owns".
@@ -353,8 +361,9 @@ static void __vga_put(struct vga_device *vgadev, unsigned int rsrc)
vgaarb_dbg(dev, "%s\n", __func__);
- /* Update our counters, and account for equivalent legacy resources
- * if we decode them
+ /*
+ * Update our counters and account for equivalent legacy resources
+ * if we decode them.
*/
if ((rsrc & VGA_RSRC_NORMAL_IO) && vgadev->io_norm_cnt > 0) {
vgadev->io_norm_cnt--;
@@ -371,32 +380,34 @@ static void __vga_put(struct vga_device *vgadev, unsigned int rsrc)
if ((rsrc & VGA_RSRC_LEGACY_MEM) && vgadev->mem_lock_cnt > 0)
vgadev->mem_lock_cnt--;
- /* Just clear lock bits, we do lazy operations so we don't really
- * have to bother about anything else at this point
+ /*
+ * Just clear lock bits, we do lazy operations so we don't really
+ * have to bother about anything else at this point.
*/
if (vgadev->io_lock_cnt == 0)
vgadev->locks &= ~VGA_RSRC_LEGACY_IO;
if (vgadev->mem_lock_cnt == 0)
vgadev->locks &= ~VGA_RSRC_LEGACY_MEM;
- /* Kick the wait queue in case somebody was waiting if we actually
- * released something
+ /*
+ * Kick the wait queue in case somebody was waiting if we actually
+ * released something.
*/
if (old_locks != vgadev->locks)
wake_up_all(&vga_wait_queue);
}
/**
- * vga_get - acquire & locks VGA resources
- * @pdev: pci device of the VGA card or NULL for the system default
+ * vga_get - acquire & lock VGA resources
+ * @pdev: PCI device of the VGA card or NULL for the system default
* @rsrc: bit mask of resources to acquire and lock
* @interruptible: blocking should be interruptible by signals ?
*
- * This function acquires VGA resources for the given card and mark those
- * resources locked. If the resource requested are "normal" (and not legacy)
+ * Acquire VGA resources for the given card and mark those resources
+ * locked. If the resources requested are "normal" (and not legacy)
* resources, the arbiter will first check whether the card is doing legacy
- * decoding for that type of resource. If yes, the lock is "converted" into a
- * legacy resource lock.
+ * decoding for that type of resource. If yes, the lock is "converted" into
+ * a legacy resource lock.
*
* The arbiter will first look for all VGA cards that might conflict and disable
* their IOs and/or Memory access, including VGA forwarding on P2P bridges if
@@ -428,7 +439,7 @@ int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible)
int rc = 0;
vga_check_first_use();
- /* The one who calls us should check for this, but lets be sure... */
+ /* The caller should check for this, but let's be sure */
if (pdev == NULL)
pdev = vga_default_device();
if (pdev == NULL)
@@ -447,12 +458,12 @@ int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible)
if (conflict == NULL)
break;
-
- /* We have a conflict, we wait until somebody kicks the
+ /*
+ * We have a conflict; we wait until somebody kicks the
* work queue. Currently we have one work queue that we
* kick each time some resources are released, but it would
- * be fairly easy to have a per device one so that we only
- * need to attach to the conflicting device
+ * be fairly easy to have a per-device one so that we only
+ * need to attach to the conflicting device.
*/
init_waitqueue_entry(&wait, current);
add_wait_queue(&vga_wait_queue, &wait);
@@ -474,12 +485,12 @@ EXPORT_SYMBOL(vga_get);
/**
* vga_tryget - try to acquire & lock legacy VGA resources
- * @pdev: pci devivce of VGA card or NULL for system default
+ * @pdev: PCI device of VGA card or NULL for system default
* @rsrc: bit mask of resources to acquire and lock
*
- * This function performs the same operation as vga_get(), but will return an
- * error (-EBUSY) instead of blocking if the resources are already locked by
- * another card. It can be called in any context
+ * Perform the same operation as vga_get(), but return an error (-EBUSY)
+ * instead of blocking if the resources are already locked by another card.
+ * Can be called in any context.
*
* On success, release the VGA resource again with vga_put().
*
@@ -495,7 +506,7 @@ static int vga_tryget(struct pci_dev *pdev, unsigned int rsrc)
vga_check_first_use();
- /* The one who calls us should check for this, but lets be sure... */
+ /* The caller should check for this, but let's be sure */
if (pdev == NULL)
pdev = vga_default_device();
if (pdev == NULL)
@@ -515,20 +526,20 @@ bail:
/**
* vga_put - release lock on legacy VGA resources
- * @pdev: pci device of VGA card or NULL for system default
- * @rsrc: but mask of resource to release
+ * @pdev: PCI device of VGA card or NULL for system default
+ * @rsrc: bit mask of resource to release
*
- * This fuction releases resources previously locked by vga_get() or
- * vga_tryget(). The resources aren't disabled right away, so that a subsequence
- * vga_get() on the same card will succeed immediately. Resources have a
- * counter, so locks are only released if the counter reaches 0.
+ * Release resources previously locked by vga_get() or vga_tryget(). The
+ * resources aren't disabled right away, so that a subsequent vga_get() on
+ * the same card will succeed immediately. Resources have a counter, so
+ * locks are only released if the counter reaches 0.
*/
void vga_put(struct pci_dev *pdev, unsigned int rsrc)
{
struct vga_device *vgadev;
unsigned long flags;
- /* The one who calls us should check for this, but lets be sure... */
+ /* The caller should check for this, but let's be sure */
if (pdev == NULL)
pdev = vga_default_device();
if (pdev == NULL)
@@ -545,34 +556,11 @@ EXPORT_SYMBOL(vga_put);
static bool vga_is_firmware_default(struct pci_dev *pdev)
{
-#if defined(CONFIG_X86) || defined(CONFIG_IA64)
- u64 base = screen_info.lfb_base;
- u64 size = screen_info.lfb_size;
- struct resource *r;
- u64 limit;
-
- /* Select the device owning the boot framebuffer if there is one */
-
- if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
- base |= (u64)screen_info.ext_lfb_base << 32;
-
- limit = base + size;
-
- /* Does firmware framebuffer belong to us? */
- pci_dev_for_each_resource(pdev, r) {
- if (resource_type(r) != IORESOURCE_MEM)
- continue;
-
- if (!r->start || !r->end)
- continue;
-
- if (base < r->start || limit >= r->end)
- continue;
-
- return true;
- }
-#endif
+#if defined CONFIG_X86
+ return pdev == screen_info_pci_dev(&screen_info);
+#else
return false;
+#endif
}
static bool vga_arb_integrated_gpu(struct device *dev)
@@ -665,7 +653,7 @@ static bool vga_is_boot_device(struct vga_device *vgadev)
}
/*
- * vgadev has neither IO nor MEM enabled. If we haven't found any
+ * Vgadev has neither IO nor MEM enabled. If we haven't found any
* other VGA devices, it is the best candidate so far.
*/
if (!boot_vga)
@@ -696,20 +684,20 @@ static void vga_arbiter_check_bridge_sharing(struct vga_device *vgadev)
return;
}
- /* okay iterate the new devices bridge hierarachy */
+ /* Iterate the new device's bridge hierarchy */
new_bus = vgadev->pdev->bus;
while (new_bus) {
new_bridge = new_bus->self;
- /* go through list of devices already registered */
+ /* Go through list of devices already registered */
list_for_each_entry(same_bridge_vgadev, &vga_list, list) {
bus = same_bridge_vgadev->pdev->bus;
bridge = bus->self;
- /* see if the share a bridge with this device */
+ /* See if it shares a bridge with this device */
if (new_bridge == bridge) {
/*
- * If their direct parent bridge is the same
+ * If its direct parent bridge is the same
* as any bridge of this device then it can't
* be used for that device.
*/
@@ -717,10 +705,10 @@ static void vga_arbiter_check_bridge_sharing(struct vga_device *vgadev)
}
/*
- * Now iterate the previous devices bridge hierarchy.
- * If the new devices parent bridge is in the other
- * devices hierarchy then we can't use it to control
- * this device
+ * Now iterate the previous device's bridge hierarchy.
+ * If the new device's parent bridge is in the other
+ * device's hierarchy, we can't use it to control this
+ * device.
*/
while (bus) {
bridge = bus->self;
@@ -741,10 +729,9 @@ static void vga_arbiter_check_bridge_sharing(struct vga_device *vgadev)
}
/*
- * Currently, we assume that the "initial" setup of the system is
- * not sane, that is we come up with conflicting devices and let
- * the arbiter's client decides if devices decodes or not legacy
- * things.
+ * Currently, we assume that the "initial" setup of the system is not sane,
+ * that is, we come up with conflicting devices and let the arbiter's
+ * client decide if devices decodes legacy things or not.
*/
static bool vga_arbiter_add_pci_device(struct pci_dev *pdev)
{
@@ -754,16 +741,12 @@ static bool vga_arbiter_add_pci_device(struct pci_dev *pdev)
struct pci_dev *bridge;
u16 cmd;
- /* Only deal with VGA class devices */
- if ((pdev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
- return false;
-
/* Allocate structure */
vgadev = kzalloc(sizeof(struct vga_device), GFP_KERNEL);
if (vgadev == NULL) {
vgaarb_err(&pdev->dev, "failed to allocate VGA arbiter data\n");
/*
- * What to do on allocation failure ? For now, let's just do
+ * What to do on allocation failure? For now, let's just do
* nothing, I'm not sure there is anything saner to be done.
*/
return false;
@@ -781,10 +764,12 @@ static bool vga_arbiter_add_pci_device(struct pci_dev *pdev)
vgadev->decodes = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
- /* by default mark it as decoding */
+ /* By default, mark it as decoding */
vga_decode_count++;
- /* Mark that we "own" resources based on our enables, we will
- * clear that below if the bridge isn't forwarding
+
+ /*
+ * Mark that we "own" resources based on our enables, we will
+ * clear that below if the bridge isn't forwarding.
*/
pci_read_config_word(pdev, PCI_COMMAND, &cmd);
if (cmd & PCI_COMMAND_IO)
@@ -864,24 +849,23 @@ bail:
return ret;
}
-/* this is called with the lock */
-static inline void vga_update_device_decodes(struct vga_device *vgadev,
- int new_decodes)
+/* Called with the lock */
+static void vga_update_device_decodes(struct vga_device *vgadev,
+ unsigned int new_decodes)
{
struct device *dev = &vgadev->pdev->dev;
- int old_decodes, decodes_removed, decodes_unlocked;
+ unsigned int old_decodes = vgadev->decodes;
+ unsigned int decodes_removed = ~new_decodes & old_decodes;
+ unsigned int decodes_unlocked = vgadev->locks & decodes_removed;
- old_decodes = vgadev->decodes;
- decodes_removed = ~new_decodes & old_decodes;
- decodes_unlocked = vgadev->locks & decodes_removed;
vgadev->decodes = new_decodes;
- vgaarb_info(dev, "changed VGA decodes: olddecodes=%s,decodes=%s:owns=%s\n",
- vga_iostate_to_str(old_decodes),
- vga_iostate_to_str(vgadev->decodes),
- vga_iostate_to_str(vgadev->owns));
+ vgaarb_info(dev, "VGA decodes changed: olddecodes=%s,decodes=%s:owns=%s\n",
+ vga_iostate_to_str(old_decodes),
+ vga_iostate_to_str(vgadev->decodes),
+ vga_iostate_to_str(vgadev->owns));
- /* if we removed locked decodes, lock count goes to zero, and release */
+ /* If we removed locked decodes, lock count goes to zero, and release */
if (decodes_unlocked) {
if (decodes_unlocked & VGA_RSRC_LEGACY_IO)
vgadev->io_lock_cnt = 0;
@@ -890,7 +874,7 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev,
__vga_put(vgadev, decodes_unlocked);
}
- /* change decodes counter */
+ /* Change decodes counter */
if (old_decodes & VGA_RSRC_LEGACY_MASK &&
!(new_decodes & VGA_RSRC_LEGACY_MASK))
vga_decode_count--;
@@ -914,16 +898,17 @@ static void __vga_set_legacy_decoding(struct pci_dev *pdev,
if (vgadev == NULL)
goto bail;
- /* don't let userspace futz with kernel driver decodes */
+ /* Don't let userspace futz with kernel driver decodes */
if (userspace && vgadev->set_decode)
goto bail;
- /* update the device decodes + counter */
+ /* Update the device decodes + counter */
vga_update_device_decodes(vgadev, decodes);
- /* XXX if somebody is going from "doesn't decode" to "decodes" state
- * here, additional care must be taken as we may have pending owner
- * ship of non-legacy region ...
+ /*
+ * XXX If somebody is going from "doesn't decode" to "decodes"
+ * state here, additional care must be taken as we may have pending
+ * ownership of non-legacy region.
*/
bail:
spin_unlock_irqrestore(&vga_lock, flags);
@@ -931,10 +916,10 @@ bail:
/**
* vga_set_legacy_decoding
- * @pdev: pci device of the VGA card
+ * @pdev: PCI device of the VGA card
* @decodes: bit mask of what legacy regions the card decodes
*
- * Indicates to the arbiter if the card decodes legacy VGA IOs, legacy VGA
+ * Indicate to the arbiter if the card decodes legacy VGA IOs, legacy VGA
* Memory, both, or none. All cards default to both, the card driver (fbdev for
* example) should tell the arbiter if it has disabled legacy decoding, so the
* card can be left out of the arbitration process (and can be safe to take
@@ -948,47 +933,42 @@ EXPORT_SYMBOL(vga_set_legacy_decoding);
/**
* vga_client_register - register or unregister a VGA arbitration client
- * @pdev: pci device of the VGA client
- * @set_decode: vga decode change callback
+ * @pdev: PCI device of the VGA client
+ * @set_decode: VGA decode change callback
*
* Clients have two callback mechanisms they can use.
*
* @set_decode callback: If a client can disable its GPU VGA resource, it
* will get a callback from this to set the encode/decode state.
*
- * Rationale: we cannot disable VGA decode resources unconditionally some single
- * GPU laptops seem to require ACPI or BIOS access to the VGA registers to
- * control things like backlights etc. Hopefully newer multi-GPU laptops do
- * something saner, and desktops won't have any special ACPI for this. The
- * driver will get a callback when VGA arbitration is first used by userspace
- * since some older X servers have issues.
+ * Rationale: we cannot disable VGA decode resources unconditionally
+ * because some single GPU laptops seem to require ACPI or BIOS access to
+ * the VGA registers to control things like backlights etc. Hopefully newer
+ * multi-GPU laptops do something saner, and desktops won't have any
+ * special ACPI for this. The driver will get a callback when VGA
+ * arbitration is first used by userspace since some older X servers have
+ * issues.
*
- * This function does not check whether a client for @pdev has been registered
- * already.
+ * Does not check whether a client for @pdev has been registered already.
*
- * To unregister just call vga_client_unregister().
+ * To unregister, call vga_client_unregister().
*
- * Returns: 0 on success, -1 on failure
+ * Returns: 0 on success, -ENODEV on failure
*/
int vga_client_register(struct pci_dev *pdev,
unsigned int (*set_decode)(struct pci_dev *pdev, bool decode))
{
- int ret = -ENODEV;
- struct vga_device *vgadev;
unsigned long flags;
+ struct vga_device *vgadev;
spin_lock_irqsave(&vga_lock, flags);
vgadev = vgadev_find(pdev);
- if (!vgadev)
- goto bail;
-
- vgadev->set_decode = set_decode;
- ret = 0;
-
-bail:
+ if (vgadev)
+ vgadev->set_decode = set_decode;
spin_unlock_irqrestore(&vga_lock, flags);
- return ret;
-
+ if (!vgadev)
+ return -ENODEV;
+ return 0;
}
EXPORT_SYMBOL(vga_client_register);
@@ -997,13 +977,13 @@ EXPORT_SYMBOL(vga_client_register);
*
* Semantics is:
*
- * open : open user instance of the arbitrer. by default, it's
+ * open : Open user instance of the arbiter. By default, it's
* attached to the default VGA device of the system.
*
- * close : close user instance, release locks
+ * close : Close user instance, release locks
*
- * read : return a string indicating the status of the target.
- * an IO state string is of the form {io,mem,io+mem,none},
+ * read : Return a string indicating the status of the target.
+ * An IO state string is of the form {io,mem,io+mem,none},
* mc and ic are respectively mem and io lock counts (for
* debugging/diagnostic only). "decodes" indicate what the
* card currently decodes, "owns" indicates what is currently
@@ -1017,7 +997,7 @@ EXPORT_SYMBOL(vga_client_register);
* write : write a command to the arbiter. List of commands is:
*
* target <card_ID> : switch target to card <card_ID> (see below)
- * lock <io_state> : acquires locks on target ("none" is invalid io_state)
+ * lock <io_state> : acquire locks on target ("none" is invalid io_state)
* trylock <io_state> : non-blocking acquire locks on target
* unlock <io_state> : release locks on target
* unlock all : release all locks on target held by this user
@@ -1034,23 +1014,21 @@ EXPORT_SYMBOL(vga_client_register);
* Note about locks:
*
* The driver keeps track of which user has what locks on which card. It
- * supports stacking, like the kernel one. This complexifies the implementation
+ * supports stacking, like the kernel one. This complicates the implementation
* a bit, but makes the arbiter more tolerant to userspace problems and able
* to properly cleanup in all cases when a process dies.
* Currently, a max of 16 cards simultaneously can have locks issued from
* userspace for a given user (file descriptor instance) of the arbiter.
*
* If the device is hot-unplugged, there is a hook inside the module to notify
- * they being added/removed in the system and automatically added/removed in
+ * it being added/removed in the system and automatically added/removed in
* the arbiter.
*/
#define MAX_USER_CARDS CONFIG_VGA_ARB_MAX_GPUS
#define PCI_INVALID_CARD ((struct pci_dev *)-1UL)
-/*
- * Each user has an array of these, tracking which cards have locks
- */
+/* Each user has an array of these, tracking which cards have locks */
struct vga_arb_user_card {
struct pci_dev *pdev;
unsigned int mem_cnt;
@@ -1069,9 +1047,8 @@ static DEFINE_SPINLOCK(vga_user_lock);
/*
- * This function gets a string in the format: "PCI:domain:bus:dev.fn" and
- * returns the respective values. If the string is not in this format,
- * it returns 0.
+ * Take a string in the format: "PCI:domain:bus:dev.fn" and return the
+ * respective values. If the string is not in this format, return 0.
*/
static int vga_pci_str_to_vars(char *buf, int count, unsigned int *domain,
unsigned int *bus, unsigned int *devfn)
@@ -1079,7 +1056,6 @@ static int vga_pci_str_to_vars(char *buf, int count, unsigned int *domain,
int n;
unsigned int slot, func;
-
n = sscanf(buf, "PCI:%x:%x:%x.%x", domain, bus, &slot, &func);
if (n != 4)
return 0;
@@ -1104,7 +1080,7 @@ static ssize_t vga_arb_read(struct file *file, char __user *buf,
if (lbuf == NULL)
return -ENOMEM;
- /* Protects vga_list */
+ /* Protect vga_list */
spin_lock_irqsave(&vga_lock, flags);
/* If we are targeting the default, use it */
@@ -1118,15 +1094,16 @@ static ssize_t vga_arb_read(struct file *file, char __user *buf,
/* Find card vgadev structure */
vgadev = vgadev_find(pdev);
if (vgadev == NULL) {
- /* Wow, it's not in the list, that shouldn't happen,
- * let's fix us up and return invalid card
+ /*
+ * Wow, it's not in the list, that shouldn't happen, let's
+ * fix us up and return invalid card.
*/
spin_unlock_irqrestore(&vga_lock, flags);
len = sprintf(lbuf, "invalid");
goto done;
}
- /* Fill the buffer with infos */
+ /* Fill the buffer with info */
len = snprintf(lbuf, 1024,
"count:%d,PCI:%s,decodes=%s,owns=%s,locks=%s(%u:%u)\n",
vga_decode_count, pci_name(pdev),
@@ -1172,7 +1149,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user *buf,
if (copy_from_user(kbuf, buf, count))
return -EFAULT;
curr_pos = kbuf;
- kbuf[count] = '\0'; /* Just to make sure... */
+ kbuf[count] = '\0';
if (strncmp(curr_pos, "lock ", 5) == 0) {
curr_pos += 5;
@@ -1197,7 +1174,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user *buf,
vga_get_uninterruptible(pdev, io_state);
- /* Update the client's locks lists... */
+ /* Update the client's locks lists */
for (i = 0; i < MAX_USER_CARDS; i++) {
if (priv->cards[i].pdev == pdev) {
if (io_state & VGA_RSRC_LEGACY_IO)
@@ -1314,7 +1291,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user *buf,
curr_pos += 7;
remaining -= 7;
pr_debug("client 0x%p called 'target'\n", priv);
- /* if target is default */
+ /* If target is default */
if (!strncmp(curr_pos, "default", 7))
pdev = pci_dev_get(vga_default_device());
else {
@@ -1364,7 +1341,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user *buf,
vgaarb_dbg(&pdev->dev, "maximum user cards (%d) number reached, ignoring this one!\n",
MAX_USER_CARDS);
pci_dev_put(pdev);
- /* XXX: which value to return? */
+ /* XXX: Which value to return? */
ret_val = -ENOMEM;
goto done;
}
@@ -1425,13 +1402,12 @@ static int vga_arb_open(struct inode *inode, struct file *file)
list_add(&priv->list, &vga_user_list);
spin_unlock_irqrestore(&vga_user_lock, flags);
- /* Set the client' lists of locks */
+ /* Set the client's lists of locks */
priv->target = vga_default_device(); /* Maybe this is still null! */
priv->cards[0].pdev = priv->target;
priv->cards[0].io_cnt = 0;
priv->cards[0].mem_cnt = 0;
-
return 0;
}
@@ -1465,25 +1441,23 @@ static int vga_arb_release(struct inode *inode, struct file *file)
}
/*
- * callback any registered clients to let them know we have a
- * change in VGA cards
+ * Callback any registered clients to let them know we have a change in VGA
+ * cards.
*/
static void vga_arbiter_notify_clients(void)
{
struct vga_device *vgadev;
unsigned long flags;
- uint32_t new_decodes;
+ unsigned int new_decodes;
bool new_state;
if (!vga_arbiter_used)
return;
+ new_state = (vga_count > 1) ? false : true;
+
spin_lock_irqsave(&vga_lock, flags);
list_for_each_entry(vgadev, &vga_list, list) {
- if (vga_count > 1)
- new_state = false;
- else
- new_state = true;
if (vgadev->set_decode) {
new_decodes = vgadev->set_decode(vgadev->pdev,
new_state);
@@ -1502,9 +1476,15 @@ static int pci_notify(struct notifier_block *nb, unsigned long action,
vgaarb_dbg(dev, "%s\n", __func__);
- /* For now we're only intereted in devices added and removed. I didn't
- * test this thing here, so someone needs to double check for the
- * cases of hotplugable vga cards. */
+ /* Only deal with VGA class devices */
+ if (!pci_is_vga(pdev))
+ return 0;
+
+ /*
+ * For now, we're only interested in devices added and removed.
+ * I didn't test this thing here, so someone needs to double check
+ * for the cases of hot-pluggable VGA cards.
+ */
if (action == BUS_NOTIFY_ADD_DEVICE)
notify = vga_arbiter_add_pci_device(pdev);
else if (action == BUS_NOTIFY_DEL_DEVICE)
@@ -1543,13 +1523,14 @@ static int __init vga_arb_device_init(void)
bus_register_notifier(&pci_bus_type, &pci_notifier);
- /* We add all PCI devices satisfying VGA class in the arbiter by
- * default */
+ /* Add all VGA class PCI devices by default */
pdev = NULL;
while ((pdev =
pci_get_subsys(PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
- PCI_ANY_ID, pdev)) != NULL)
- vga_arbiter_add_pci_device(pdev);
+ PCI_ANY_ID, pdev)) != NULL) {
+ if (pci_is_vga(pdev))
+ vga_arbiter_add_pci_device(pdev);
+ }
pr_info("loaded\n");
return rc;
diff --git a/drivers/pci/vpd.c b/drivers/pci/vpd.c
index a4fc4d0690fe..153394a652d3 100644
--- a/drivers/pci/vpd.c
+++ b/drivers/pci/vpd.c
@@ -9,7 +9,7 @@
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/sched/signal.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "pci.h"
#define PCI_VPD_LRDT_TAG_SIZE 3
@@ -271,31 +271,61 @@ void pci_vpd_init(struct pci_dev *dev)
}
static ssize_t vpd_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf, loff_t off,
- size_t count)
+ const struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
{
struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
+ struct pci_dev *vpd_dev = dev;
+ ssize_t ret;
+
+ if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0) {
+ vpd_dev = pci_get_func0_dev(dev);
+ if (!vpd_dev)
+ return -ENODEV;
+ }
+
+ pci_config_pm_runtime_get(vpd_dev);
+ ret = pci_read_vpd(vpd_dev, off, count, buf);
+ pci_config_pm_runtime_put(vpd_dev);
+
+ if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0)
+ pci_dev_put(vpd_dev);
- return pci_read_vpd(dev, off, count, buf);
+ return ret;
}
static ssize_t vpd_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf, loff_t off,
- size_t count)
+ const struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
{
struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj));
+ struct pci_dev *vpd_dev = dev;
+ ssize_t ret;
+
+ if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0) {
+ vpd_dev = pci_get_func0_dev(dev);
+ if (!vpd_dev)
+ return -ENODEV;
+ }
+
+ pci_config_pm_runtime_get(vpd_dev);
+ ret = pci_write_vpd(vpd_dev, off, count, buf);
+ pci_config_pm_runtime_put(vpd_dev);
+
+ if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0)
+ pci_dev_put(vpd_dev);
- return pci_write_vpd(dev, off, count, buf);
+ return ret;
}
-static BIN_ATTR(vpd, 0600, vpd_read, vpd_write, 0);
+static const BIN_ATTR(vpd, 0600, vpd_read, vpd_write, 0);
-static struct bin_attribute *vpd_attrs[] = {
+static const struct bin_attribute *const vpd_attrs[] = {
&bin_attr_vpd,
NULL,
};
static umode_t vpd_attr_is_visible(struct kobject *kobj,
- struct bin_attribute *a, int n)
+ const struct bin_attribute *a, int n)
{
struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index 83c0ab50676d..11636634ae51 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -22,7 +22,6 @@
#include <linux/bitops.h>
#include <linux/time.h>
#include <linux/ktime.h>
-#include <linux/swiotlb.h>
#include <xen/platform_pci.h>
#include <asm/xen/swiotlb-xen.h>
@@ -669,11 +668,6 @@ static int pcifront_connect_and_init_dma(struct pcifront_device *pdev)
spin_unlock(&pcifront_dev_lock);
- if (!err && !is_swiotlb_active(&pdev->xdev->dev)) {
- err = pci_xen_swiotlb_init_late();
- if (err)
- dev_err(&pdev->xdev->dev, "Could not setup SWIOTLB!\n");
- }
return err;
}