summaryrefslogtreecommitdiff
path: root/drivers/pci/controller/dwc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci/controller/dwc')
-rw-r--r--drivers/pci/controller/dwc/Kconfig583
-rw-r--r--drivers/pci/controller/dwc/Makefile31
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c411
-rw-r--r--drivers/pci/controller/dwc/pci-exynos.c460
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c2166
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c1248
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape-ep.c301
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape.c392
-rw-r--r--drivers/pci/controller/dwc/pci-meson.c363
-rw-r--r--drivers/pci/controller/dwc/pcie-al.c391
-rw-r--r--drivers/pci/controller/dwc/pcie-amd-mdb.c526
-rw-r--r--drivers/pci/controller/dwc/pcie-armada8k.c148
-rw-r--r--drivers/pci/controller/dwc/pcie-artpec6.c197
-rw-r--r--drivers/pci/controller/dwc/pcie-bt1.c645
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-debugfs.c927
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c1022
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c1351
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-plat.c147
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c1206
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h861
-rw-r--r--drivers/pci/controller/dwc/pcie-dw-rockchip.c777
-rw-r--r--drivers/pci/controller/dwc/pcie-fu740.c357
-rw-r--r--drivers/pci/controller/dwc/pcie-hisi.c273
-rw-r--r--drivers/pci/controller/dwc/pcie-histb.c168
-rw-r--r--drivers/pci/controller/dwc/pcie-intel-gw.c447
-rw-r--r--drivers/pci/controller/dwc/pcie-keembay.c483
-rw-r--r--drivers/pci/controller/dwc/pcie-kirin.c696
-rw-r--r--drivers/pci/controller/dwc/pcie-nxp-s32g.c406
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-common.c88
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-common.h14
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-ep.c955
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c2343
-rw-r--r--drivers/pci/controller/dwc/pcie-rcar-gen4.c804
-rw-r--r--drivers/pci/controller/dwc/pcie-sophgo.c257
-rw-r--r--drivers/pci/controller/dwc/pcie-spacemit-k1.c357
-rw-r--r--drivers/pci/controller/dwc/pcie-spear13xx.c122
-rw-r--r--drivers/pci/controller/dwc/pcie-stm32-ep.c343
-rw-r--r--drivers/pci/controller/dwc/pcie-stm32.c370
-rw-r--r--drivers/pci/controller/dwc/pcie-stm32.h19
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194-acpi.c109
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c2503
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier-ep.c474
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier.c290
-rw-r--r--drivers/pci/controller/dwc/pcie-visconti.c329
44 files changed, 21452 insertions, 4908 deletions
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index 548c58223868..519b59422b47 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -1,61 +1,277 @@
# SPDX-License-Identifier: GPL-2.0
-menu "DesignWare PCI Core Support"
+menu "DesignWare-based PCIe controllers"
depends on PCI
config PCIE_DW
bool
+config PCIE_DW_DEBUGFS
+ bool "DesignWare PCIe debugfs entries"
+ depends on DEBUG_FS
+ depends on PCIE_DW_HOST || PCIE_DW_EP
+ help
+ Say Y here to enable debugfs entries for the PCIe controller. These
+ entries provide various debug features related to the controller and
+ expose the RAS DES capabilities such as Silicon Debug, Error Injection
+ and Statistical Counters.
+
config PCIE_DW_HOST
- bool
- depends on PCI_MSI_IRQ_DOMAIN
- select PCIE_DW
+ bool
+ select PCIE_DW
+ select IRQ_MSI_LIB
+ select PCI_HOST_COMMON
config PCIE_DW_EP
bool
- depends on PCI_ENDPOINT
select PCIE_DW
-config PCI_DRA7XX
+config PCIE_AL
+ bool "Amazon Annapurna Labs PCIe controller"
+ depends on OF && (ARM64 || COMPILE_TEST)
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ select PCI_ECAM
+ help
+ Say Y here to enable support of the Amazon's Annapurna Labs PCIe
+ controller IP on Amazon SoCs. The PCIe controller uses the DesignWare
+ core plus Annapurna Labs proprietary hardware wrappers. This is
+ required only for DT-based platforms. ACPI platforms with the
+ Annapurna Labs PCIe controller don't need to enable this.
+
+config PCIE_AMD_MDB
+ bool "AMD MDB Versal2 PCIe controller"
+ depends on OF && (ARM64 || COMPILE_TEST)
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ help
+ Say Y here if you want to enable PCIe controller support on AMD
+ Versal2 SoCs. The AMD MDB Versal2 PCIe controller is based on
+ DesignWare IP and therefore the driver re-uses the DesignWare
+ core functions to implement the driver.
+
+config PCI_MESON
+ tristate "Amlogic Meson PCIe controller"
+ default m if ARCH_MESON
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ help
+ Say Y here if you want to enable PCI controller support on Amlogic
+ SoCs. The PCI controller on Amlogic is based on DesignWare hardware
+ and therefore the driver re-uses the DesignWare core functions to
+ implement the driver.
+
+config PCIE_ARTPEC6
bool
-config PCI_DRA7XX_HOST
- bool "TI DRA7xx PCIe controller Host Mode"
- depends on SOC_DRA7XX || COMPILE_TEST
- depends on PCI_MSI_IRQ_DOMAIN
- depends on OF && HAS_IOMEM && TI_PIPE3
+config PCIE_ARTPEC6_HOST
+ bool "Axis ARTPEC-6 PCIe controller (host mode)"
+ depends on MACH_ARTPEC6 || COMPILE_TEST
+ depends on PCI_MSI
select PCIE_DW_HOST
- select PCI_DRA7XX
- default y
+ select PCIE_ARTPEC6
help
- Enables support for the PCIe controller in the DRA7xx SoC to work in
- host mode. There are two instances of PCIe controller in DRA7xx.
- This controller can work either as EP or RC. In order to enable
- host-specific features PCI_DRA7XX_HOST must be selected and in order
- to enable device-specific features PCI_DRA7XX_EP must be selected.
- This uses the DesignWare core.
+ Enables support for the PCIe controller in the ARTPEC-6 SoC to work in
+ host mode. This uses the DesignWare core.
-config PCI_DRA7XX_EP
- bool "TI DRA7xx PCIe controller Endpoint Mode"
- depends on SOC_DRA7XX || COMPILE_TEST
+config PCIE_ARTPEC6_EP
+ bool "Axis ARTPEC-6 PCIe controller (endpoint mode)"
+ depends on MACH_ARTPEC6 || COMPILE_TEST
depends on PCI_ENDPOINT
- depends on OF && HAS_IOMEM && TI_PIPE3
select PCIE_DW_EP
- select PCI_DRA7XX
+ select PCIE_ARTPEC6
help
- Enables support for the PCIe controller in the DRA7xx SoC to work in
- endpoint mode. There are two instances of PCIe controller in DRA7xx.
- This controller can work either as EP or RC. In order to enable
- host-specific features PCI_DRA7XX_HOST must be selected and in order
- to enable device-specific features PCI_DRA7XX_EP must be selected.
- This uses the DesignWare core.
+ Enables support for the PCIe controller in the ARTPEC-6 SoC to work in
+ endpoint mode. This uses the DesignWare core.
+
+config PCIE_BT1
+ tristate "Baikal-T1 PCIe controller"
+ depends on MIPS_BAIKAL_T1 || COMPILE_TEST
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ help
+ Enables support for the PCIe controller in the Baikal-T1 SoC to work
+ in host mode. It's based on the Synopsys DWC PCIe v4.60a IP-core.
+
+config PCI_IMX6
+ bool
+
+config PCI_IMX6_HOST
+ bool "Freescale i.MX6/7/8 PCIe controller (host mode)"
+ depends on ARCH_MXC || COMPILE_TEST
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ select PCI_IMX6
+ help
+ Enables support for the PCIe controller in the i.MX SoCs to
+ work in Root Complex mode. The PCI controller on i.MX is based
+ on DesignWare hardware and therefore the driver re-uses the
+ DesignWare core functions to implement the driver.
+
+config PCI_IMX6_EP
+ bool "Freescale i.MX6/7/8 PCIe controller (endpoint mode)"
+ depends on ARCH_MXC || COMPILE_TEST
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ select PCI_IMX6
+ help
+ Enables support for the PCIe controller in the i.MX SoCs to
+ work in endpoint mode. The PCI controller on i.MX is based
+ on DesignWare hardware and therefore the driver re-uses the
+ DesignWare core functions to implement the driver.
+
+config PCI_LAYERSCAPE
+ bool "Freescale Layerscape PCIe controller (host mode)"
+ depends on OF && (ARM || ARCH_LAYERSCAPE || COMPILE_TEST)
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ select MFD_SYSCON
+ help
+ Say Y here if you want to enable PCIe controller support on Layerscape
+ SoCs to work in Host mode.
+ This controller can work either as EP or RC. The RCW[HOST_AGT_PEX]
+ determines which PCIe controller works in EP mode and which PCIe
+ controller works in RC mode.
+
+config PCI_LAYERSCAPE_EP
+ bool "Freescale Layerscape PCIe controller (endpoint mode)"
+ depends on OF && (ARM || ARCH_LAYERSCAPE || COMPILE_TEST)
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ help
+ Say Y here if you want to enable PCIe controller support on Layerscape
+ SoCs to work in Endpoint mode.
+ This controller can work either as EP or RC. The RCW[HOST_AGT_PEX]
+ determines which PCIe controller works in EP mode and which PCIe
+ controller works in RC mode.
+
+config PCI_HISI
+ depends on OF && (ARM64 || COMPILE_TEST)
+ bool "HiSilicon Hip05 and Hip06 SoCs PCIe controller"
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ select PCI_HOST_COMMON
+ help
+ Say Y here if you want PCIe controller support on HiSilicon
+ Hip05 and Hip06 SoCs
+
+config PCIE_KIRIN
+ depends on OF && (ARM64 || COMPILE_TEST)
+ tristate "HiSilicon Kirin PCIe controller"
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ select REGMAP_MMIO
+ help
+ Say Y here if you want PCIe controller support
+ on HiSilicon Kirin series SoCs.
+
+config PCIE_HISI_STB
+ bool "HiSilicon STB PCIe controller"
+ depends on ARCH_HISI || COMPILE_TEST
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ help
+ Say Y here if you want PCIe controller support on HiSilicon STB SoCs
+
+config PCIE_INTEL_GW
+ bool "Intel Gateway PCIe controller "
+ depends on OF && (X86 || COMPILE_TEST)
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ help
+ Say 'Y' here to enable PCIe Host controller support on Intel
+ Gateway SoCs.
+ The PCIe controller uses the DesignWare core plus Intel-specific
+ hardware wrappers.
+
+config PCIE_KEEMBAY
+ bool
+
+config PCIE_KEEMBAY_HOST
+ bool "Intel Keem Bay PCIe controller (host mode)"
+ depends on ARCH_KEEMBAY || COMPILE_TEST
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ select PCIE_KEEMBAY
+ help
+ Say 'Y' here to enable support for the PCIe controller in Keem Bay
+ to work in host mode.
+ The PCIe controller is based on DesignWare Hardware and uses
+ DesignWare core functions.
+
+config PCIE_KEEMBAY_EP
+ bool "Intel Keem Bay PCIe controller (endpoint mode)"
+ depends on ARCH_KEEMBAY || COMPILE_TEST
+ depends on PCI_MSI
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ select PCIE_KEEMBAY
+ help
+ Say 'Y' here to enable support for the PCIe controller in Keem Bay
+ to work in endpoint mode.
+ The PCIe controller is based on DesignWare Hardware and uses
+ DesignWare core functions.
+
+config PCIE_ARMADA_8K
+ bool "Marvell Armada-8K PCIe controller"
+ depends on ARCH_MVEBU || COMPILE_TEST
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ help
+ Say Y here if you want to enable PCIe controller support on
+ Armada-8K SoCs. The PCIe controller on Armada-8K is based on
+ DesignWare hardware and therefore the driver re-uses the
+ DesignWare core functions to implement the driver.
+
+config PCIE_TEGRA194
+ tristate
+
+config PCIE_TEGRA194_HOST
+ tristate "NVIDIA Tegra194 (and later) PCIe controller (host mode)"
+ depends on ARCH_TEGRA_194_SOC || COMPILE_TEST
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ select PHY_TEGRA194_P2U
+ select PCIE_TEGRA194
+ help
+ Enables support for the PCIe controller in the NVIDIA Tegra194 SoC to
+ work in host mode. There are two instances of PCIe controllers in
+ Tegra194. This controller can work either as EP or RC. In order to
+ enable host-specific features PCIE_TEGRA194_HOST must be selected and
+ in order to enable device-specific features PCIE_TEGRA194_EP must be
+ selected. This uses the DesignWare core.
+
+config PCIE_TEGRA194_EP
+ tristate "NVIDIA Tegra194 (and later) PCIe controller (endpoint mode)"
+ depends on ARCH_TEGRA_194_SOC || COMPILE_TEST
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ select PHY_TEGRA194_P2U
+ select PCIE_TEGRA194
+ help
+ Enables support for the PCIe controller in the NVIDIA Tegra194 SoC to
+ work in endpoint mode. There are two instances of PCIe controllers in
+ Tegra194. This controller can work either as EP or RC. In order to
+ enable host-specific features PCIE_TEGRA194_HOST must be selected and
+ in order to enable device-specific features PCIE_TEGRA194_EP must be
+ selected. This uses the DesignWare core.
+
+config PCIE_NXP_S32G
+ bool "NXP S32G PCIe controller (host mode)"
+ depends on ARCH_S32 || COMPILE_TEST
+ select PCIE_DW_HOST
+ help
+ Enable support for the PCIe controller in NXP S32G based boards to
+ work in Host mode. The controller is based on DesignWare IP and
+ can work either as RC or EP. In order to enable host-specific
+ features PCIE_NXP_S32G must be selected.
config PCIE_DW_PLAT
bool
config PCIE_DW_PLAT_HOST
- bool "Platform bus based DesignWare PCIe Controller - Host mode"
- depends on PCI && PCI_MSI_IRQ_DOMAIN
+ bool "Platform bus based DesignWare PCIe controller (host mode)"
+ depends on PCI_MSI
select PCIE_DW_HOST
select PCIE_DW_PLAT
help
@@ -68,8 +284,8 @@ config PCIE_DW_PLAT_HOST
selected.
config PCIE_DW_PLAT_EP
- bool "Platform bus based DesignWare PCIe Controller - Endpoint mode"
- depends on PCI && PCI_MSI_IRQ_DOMAIN
+ bool "Platform bus based DesignWare PCIe controller (endpoint mode)"
+ depends on PCI && PCI_MSI
depends on PCI_ENDPOINT
select PCIE_DW_EP
select PCIE_DW_PLAT
@@ -82,135 +298,254 @@ config PCIE_DW_PLAT_EP
order to enable device-specific features PCI_DW_PLAT_EP must be
selected.
-config PCI_EXYNOS
- bool "Samsung Exynos PCIe controller"
- depends on SOC_EXYNOS5440 || COMPILE_TEST
- depends on PCI_MSI_IRQ_DOMAIN
+config PCIE_QCOM_COMMON
+ bool
+
+config PCIE_QCOM
+ bool "Qualcomm PCIe controller (host mode)"
+ depends on OF && (ARCH_QCOM || COMPILE_TEST)
+ depends on PCI_MSI
select PCIE_DW_HOST
+ select CRC8
+ select PCIE_QCOM_COMMON
+ select PCI_HOST_COMMON
+ select PCI_PWRCTRL_SLOT
+ help
+ Say Y here to enable PCIe controller support on Qualcomm SoCs. The
+ PCIe controller uses the DesignWare core plus Qualcomm-specific
+ hardware wrappers.
-config PCI_IMX6
- bool "Freescale i.MX6/7 PCIe controller"
- depends on SOC_IMX6Q || SOC_IMX7D || (ARM && COMPILE_TEST)
- depends on PCI_MSI_IRQ_DOMAIN
+config PCIE_QCOM_EP
+ tristate "Qualcomm PCIe controller (endpoint mode)"
+ depends on OF && (ARCH_QCOM || COMPILE_TEST)
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ select PCIE_QCOM_COMMON
+ help
+ Say Y here to enable support for the PCIe controllers on Qualcomm SoCs
+ to work in endpoint mode. The PCIe controller uses the DesignWare core
+ plus Qualcomm-specific hardware wrappers.
+
+config PCIE_RCAR_GEN4
+ tristate
+
+config PCIE_RCAR_GEN4_HOST
+ tristate "Renesas R-Car Gen4 PCIe controller (host mode)"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ depends on PCI_MSI
select PCIE_DW_HOST
+ select PCIE_RCAR_GEN4
+ help
+ Say Y here if you want PCIe controller (host mode) on R-Car Gen4 SoCs.
+ To compile this driver as a module, choose M here: the module will be
+ called pcie-rcar-gen4.ko. This uses the DesignWare core.
-config PCIE_SPEAR13XX
- bool "STMicroelectronics SPEAr PCIe controller"
- depends on ARCH_SPEAR13XX || COMPILE_TEST
- depends on PCI_MSI_IRQ_DOMAIN
+config PCIE_RCAR_GEN4_EP
+ tristate "Renesas R-Car Gen4 PCIe controller (endpoint mode)"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ select PCIE_RCAR_GEN4
+ help
+ Say Y here if you want PCIe controller (endpoint mode) on R-Car Gen4
+ SoCs. To compile this driver as a module, choose M here: the module
+ will be called pcie-rcar-gen4.ko. This uses the DesignWare core.
+
+config PCIE_ROCKCHIP_DW
+ bool
+
+config PCIE_ROCKCHIP_DW_HOST
+ bool "Rockchip DesignWare PCIe controller (host mode)"
+ depends on PCI_MSI
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
+ depends on OF
select PCIE_DW_HOST
+ select PCIE_ROCKCHIP_DW
help
- Say Y here if you want PCIe support on SPEAr13XX SoCs.
+ Enables support for the DesignWare PCIe controller in the
+ Rockchip SoC (except RK3399) to work in host mode.
-config PCI_KEYSTONE
- bool "TI Keystone PCIe controller"
- depends on ARCH_KEYSTONE || (ARM && COMPILE_TEST)
- depends on PCI_MSI_IRQ_DOMAIN
+config PCIE_ROCKCHIP_DW_EP
+ bool "Rockchip DesignWare PCIe controller (endpoint mode)"
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
+ depends on OF
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ select PCIE_ROCKCHIP_DW
+ help
+ Enables support for the DesignWare PCIe controller in the
+ Rockchip SoC (except RK3399) to work in endpoint mode.
+
+config PCI_EXYNOS
+ tristate "Samsung Exynos PCIe controller"
+ depends on ARCH_EXYNOS || COMPILE_TEST
+ depends on PCI_MSI
select PCIE_DW_HOST
help
- Say Y here if you want to enable PCI controller support on Keystone
- SoCs. The PCI controller on Keystone is based on DesignWare hardware
- and therefore the driver re-uses the DesignWare core functions to
- implement the driver.
+ Enables support for the PCIe controller in the Samsung Exynos SoCs
+ to work in host mode. The PCI controller is based on the DesignWare
+ hardware and therefore the driver re-uses the DesignWare core
+ functions to implement the driver.
-config PCI_LAYERSCAPE
- bool "Freescale Layerscape PCIe controller"
- depends on OF && (ARM || ARCH_LAYERSCAPE || COMPILE_TEST)
- depends on PCI_MSI_IRQ_DOMAIN
- select MFD_SYSCON
+config PCIE_FU740
+ bool "SiFive FU740 PCIe controller"
+ depends on PCI_MSI
+ depends on ARCH_SIFIVE || COMPILE_TEST
select PCIE_DW_HOST
help
- Say Y here if you want PCIe controller support on Layerscape SoCs.
+ Say Y here if you want PCIe controller support for the SiFive
+ FU740.
-config PCI_HISI
- depends on OF && (ARM64 || COMPILE_TEST)
- bool "HiSilicon Hip05 and Hip06 SoCs PCIe controllers"
- depends on PCI_MSI_IRQ_DOMAIN
+config PCIE_UNIPHIER
+ bool "Socionext UniPhier PCIe controller (host mode)"
+ depends on ARCH_UNIPHIER || COMPILE_TEST
+ depends on OF && HAS_IOMEM
+ depends on PCI_MSI
select PCIE_DW_HOST
- select PCI_HOST_COMMON
help
- Say Y here if you want PCIe controller support on HiSilicon
- Hip05 and Hip06 SoCs
+ Say Y here if you want PCIe host controller support on UniPhier SoCs.
+ This driver supports LD20 and PXs3 SoCs.
-config PCIE_QCOM
- bool "Qualcomm PCIe controller"
- depends on OF && (ARCH_QCOM || COMPILE_TEST)
- depends on PCI_MSI_IRQ_DOMAIN
+config PCIE_UNIPHIER_EP
+ bool "Socionext UniPhier PCIe controller (endpoint mode)"
+ depends on ARCH_UNIPHIER || COMPILE_TEST
+ depends on OF && HAS_IOMEM
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ help
+ Say Y here if you want PCIe endpoint controller support on
+ UniPhier SoCs. This driver supports Pro5 SoC.
+
+config PCIE_SOPHGO_DW
+ bool "Sophgo DesignWare PCIe controller (host mode)"
+ depends on ARCH_SOPHGO || COMPILE_TEST
+ depends on PCI_MSI
+ depends on OF
select PCIE_DW_HOST
help
- Say Y here to enable PCIe controller support on Qualcomm SoCs. The
- PCIe controller uses the DesignWare core plus Qualcomm-specific
- hardware wrappers.
+ Say Y here if you want PCIe host controller support on
+ Sophgo SoCs.
-config PCIE_ARMADA_8K
- bool "Marvell Armada-8K PCIe controller"
- depends on ARCH_MVEBU || COMPILE_TEST
- depends on PCI_MSI_IRQ_DOMAIN
+config PCIE_SPACEMIT_K1
+ tristate "SpacemiT K1 PCIe controller (host mode)"
+ depends on ARCH_SPACEMIT || COMPILE_TEST
+ depends on HAS_IOMEM
select PCIE_DW_HOST
+ select PCI_PWRCTRL_SLOT
+ default ARCH_SPACEMIT
help
- Say Y here if you want to enable PCIe controller support on
- Armada-8K SoCs. The PCIe controller on Armada-8K is based on
- DesignWare hardware and therefore the driver re-uses the
- DesignWare core functions to implement the driver.
+ Enables support for the DesignWare based PCIe controller in
+ the SpacemiT K1 SoC operating in host mode. Three controllers
+ are available on the K1 SoC; the first of these shares a PHY
+ with a USB 3.0 host controller (one or the other can be used).
-config PCIE_ARTPEC6
- bool
+config PCIE_SPEAR13XX
+ bool "STMicroelectronics SPEAr PCIe controller"
+ depends on ARCH_SPEAR13XX || COMPILE_TEST
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ help
+ Say Y here if you want PCIe support on SPEAr13XX SoCs.
-config PCIE_ARTPEC6_HOST
- bool "Axis ARTPEC-6 PCIe controller Host Mode"
- depends on MACH_ARTPEC6 || COMPILE_TEST
- depends on PCI_MSI_IRQ_DOMAIN
+config PCIE_STM32_HOST
+ tristate "STMicroelectronics STM32MP25 PCIe Controller (host mode)"
+ depends on ARCH_STM32 || COMPILE_TEST
+ depends on PCI_MSI
select PCIE_DW_HOST
- select PCIE_ARTPEC6
help
- Enables support for the PCIe controller in the ARTPEC-6 SoC to work in
- host mode. This uses the DesignWare core.
+ Enables Root Complex (RC) support for the DesignWare core based PCIe
+ controller found in STM32MP25 SoC.
-config PCIE_ARTPEC6_EP
- bool "Axis ARTPEC-6 PCIe controller Endpoint Mode"
- depends on MACH_ARTPEC6 || COMPILE_TEST
+ This driver can also be built as a module. If so, the module
+ will be called pcie-stm32.
+
+config PCIE_STM32_EP
+ tristate "STMicroelectronics STM32MP25 PCIe Controller (endpoint mode)"
+ depends on ARCH_STM32 || COMPILE_TEST
depends on PCI_ENDPOINT
select PCIE_DW_EP
- select PCIE_ARTPEC6
help
- Enables support for the PCIe controller in the ARTPEC-6 SoC to work in
- endpoint mode. This uses the DesignWare core.
+ Enables Endpoint (EP) support for the DesignWare core based PCIe
+ controller found in STM32MP25 SoC.
-config PCIE_KIRIN
- depends on OF && (ARM64 || COMPILE_TEST)
- bool "HiSilicon Kirin series SoCs PCIe controllers"
- depends on PCI_MSI_IRQ_DOMAIN
+ This driver can also be built as a module. If so, the module
+ will be called pcie-stm32-ep.
+
+config PCI_DRA7XX
+ tristate
+
+config PCI_DRA7XX_HOST
+ tristate "TI DRA7xx PCIe controller (host mode)"
+ depends on SOC_DRA7XX || COMPILE_TEST
+ depends on OF && HAS_IOMEM && TI_PIPE3
+ depends on PCI_MSI
select PCIE_DW_HOST
+ select PCI_DRA7XX
+ default y if SOC_DRA7XX
help
- Say Y here if you want PCIe controller support
- on HiSilicon Kirin series SoCs.
+ Enables support for the PCIe controller in the DRA7xx SoC to work in
+ host mode. There are two instances of PCIe controller in DRA7xx.
+ This controller can work either as EP or RC. In order to enable
+ host-specific features PCI_DRA7XX_HOST must be selected and in order
+ to enable device-specific features PCI_DRA7XX_EP must be selected.
+ This uses the DesignWare core.
-config PCIE_HISI_STB
- bool "HiSilicon STB SoCs PCIe controllers"
- depends on ARCH_HISI || COMPILE_TEST
- depends on PCI_MSI_IRQ_DOMAIN
- select PCIE_DW_HOST
+config PCI_DRA7XX_EP
+ tristate "TI DRA7xx PCIe controller (endpoint mode)"
+ depends on SOC_DRA7XX || COMPILE_TEST
+ depends on OF && HAS_IOMEM && TI_PIPE3
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ select PCI_DRA7XX
help
- Say Y here if you want PCIe controller support on HiSilicon STB SoCs
+ Enables support for the PCIe controller in the DRA7xx SoC to work in
+ endpoint mode. There are two instances of PCIe controller in DRA7xx.
+ This controller can work either as EP or RC. In order to enable
+ host-specific features PCI_DRA7XX_HOST must be selected and in order
+ to enable device-specific features PCI_DRA7XX_EP must be selected.
+ This uses the DesignWare core.
-config PCI_MESON
- bool "MESON PCIe controller"
- depends on PCI_MSI_IRQ_DOMAIN
+# ARM32 platforms use hook_fault_code() and cannot support loadable module.
+config PCI_KEYSTONE
+ bool
+
+# On non-ARM32 platforms, loadable module can be supported.
+config PCI_KEYSTONE_TRISTATE
+ tristate
+
+config PCI_KEYSTONE_HOST
+ tristate "TI Keystone PCIe controller (host mode)"
+ depends on ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
+ depends on PCI_MSI
select PCIE_DW_HOST
+ select PCI_KEYSTONE if ARM
+ select PCI_KEYSTONE_TRISTATE if !ARM
help
- Say Y here if you want to enable PCI controller support on Amlogic
- SoCs. The PCI controller on Amlogic is based on DesignWare hardware
- and therefore the driver re-uses the DesignWare core functions to
- implement the driver.
+ Enables support for the PCIe controller in the Keystone SoC to
+ work in host mode. The PCI controller on Keystone is based on
+ DesignWare hardware and therefore the driver re-uses the
+ DesignWare core functions to implement the driver.
-config PCIE_UNIPHIER
- bool "Socionext UniPhier PCIe controllers"
- depends on ARCH_UNIPHIER || COMPILE_TEST
- depends on OF && HAS_IOMEM
- depends on PCI_MSI_IRQ_DOMAIN
+config PCI_KEYSTONE_EP
+ tristate "TI Keystone PCIe controller (endpoint mode)"
+ depends on ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ select PCI_KEYSTONE if ARM
+ select PCI_KEYSTONE_TRISTATE if !ARM
+ help
+ Enables support for the PCIe controller in the Keystone SoC to
+ work in endpoint mode. The PCI controller on Keystone is based
+ on DesignWare hardware and therefore the driver re-uses the
+ DesignWare core functions to implement the driver.
+
+config PCIE_VISCONTI_HOST
+ bool "Toshiba Visconti PCIe controller"
+ depends on ARCH_VISCONTI || COMPILE_TEST
+ depends on PCI_MSI
select PCIE_DW_HOST
help
- Say Y here if you want PCIe controller support on UniPhier SoCs.
- This driver supports LD20 and PXs3 SoCs.
+ Say Y here if you want PCIe controller support on Toshiba Visconti SoC.
+ This driver supports TMPV7708 SoC.
endmenu
diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile
index 7bcdcdf5024e..67ba59c02038 100644
--- a/drivers/pci/controller/dwc/Makefile
+++ b/drivers/pci/controller/dwc/Makefile
@@ -1,21 +1,43 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_PCIE_DW) += pcie-designware.o
+obj-$(CONFIG_PCIE_DW_DEBUGFS) += pcie-designware-debugfs.o
obj-$(CONFIG_PCIE_DW_HOST) += pcie-designware-host.o
obj-$(CONFIG_PCIE_DW_EP) += pcie-designware-ep.o
obj-$(CONFIG_PCIE_DW_PLAT) += pcie-designware-plat.o
+obj-$(CONFIG_PCIE_AMD_MDB) += pcie-amd-mdb.o
+obj-$(CONFIG_PCIE_BT1) += pcie-bt1.o
obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o
obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
+obj-$(CONFIG_PCIE_FU740) += pcie-fu740.o
obj-$(CONFIG_PCI_IMX6) += pci-imx6.o
+obj-$(CONFIG_PCIE_NXP_S32G) += pcie-nxp-s32g.o
obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
+# ARM32 platforms use hook_fault_code() and cannot support loadable module.
obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone.o
+# On non-ARM32 platforms, loadable module can be supported.
+obj-$(CONFIG_PCI_KEYSTONE_TRISTATE) += pci-keystone.o
obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o
+obj-$(CONFIG_PCI_LAYERSCAPE_EP) += pci-layerscape-ep.o
+obj-$(CONFIG_PCIE_QCOM_COMMON) += pcie-qcom-common.o
obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
+obj-$(CONFIG_PCIE_QCOM_EP) += pcie-qcom-ep.o
obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o
+obj-$(CONFIG_PCIE_ROCKCHIP_DW) += pcie-dw-rockchip.o
+obj-$(CONFIG_PCIE_SOPHGO_DW) += pcie-sophgo.o
+obj-$(CONFIG_PCIE_INTEL_GW) += pcie-intel-gw.o
+obj-$(CONFIG_PCIE_KEEMBAY) += pcie-keembay.o
obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o
obj-$(CONFIG_PCIE_HISI_STB) += pcie-histb.o
obj-$(CONFIG_PCI_MESON) += pci-meson.o
+obj-$(CONFIG_PCIE_TEGRA194) += pcie-tegra194.o
obj-$(CONFIG_PCIE_UNIPHIER) += pcie-uniphier.o
+obj-$(CONFIG_PCIE_UNIPHIER_EP) += pcie-uniphier-ep.o
+obj-$(CONFIG_PCIE_VISCONTI_HOST) += pcie-visconti.o
+obj-$(CONFIG_PCIE_RCAR_GEN4) += pcie-rcar-gen4.o
+obj-$(CONFIG_PCIE_SPACEMIT_K1) += pcie-spacemit-k1.o
+obj-$(CONFIG_PCIE_STM32_HOST) += pcie-stm32.o
+obj-$(CONFIG_PCIE_STM32_EP) += pcie-stm32-ep.o
# The following drivers are for devices that use the generic ACPI
# pci_root.c driver but don't support standard ECAM config access.
@@ -27,6 +49,13 @@ obj-$(CONFIG_PCIE_UNIPHIER) += pcie-uniphier.o
# ARM64 and use internal ifdefs to only build the pieces we need
# depending on whether ACPI, the DT driver, or both are enabled.
-ifdef CONFIG_PCI
+obj-$(CONFIG_PCIE_AL) += pcie-al.o
+obj-$(CONFIG_PCI_HISI) += pcie-hisi.o
+
+ifdef CONFIG_ACPI
+ifdef CONFIG_PCI_QUIRKS
+obj-$(CONFIG_ARM64) += pcie-al.o
obj-$(CONFIG_ARM64) += pcie-hisi.o
+obj-$(CONFIG_ARM64) += pcie-tegra194-acpi.o
+endif
endif
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index a32d6dde7a57..01cfd9aeb0b8 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -2,21 +2,22 @@
/*
* pcie-dra7xx - PCIe controller driver for TI DRA7xx SoCs
*
- * Copyright (C) 2013-2014 Texas Instruments Incorporated - http://www.ti.com
+ * Copyright (C) 2013-2014 Texas Instruments Incorporated - https://www.ti.com
*
* Authors: Kishon Vijay Abraham I <kishon@ti.com>
*/
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/of_device.h>
-#include <linux/of_gpio.h>
+#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
#include <linux/phy/phy.h>
@@ -26,6 +27,7 @@
#include <linux/types.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
+#include <linux/gpio/consumer.h>
#include "../../pci.h"
#include "pcie-designware.h"
@@ -72,8 +74,6 @@
#define LINK_UP BIT(16)
#define DRA7XX_CPU_TO_BUS_ADDR 0x0FFFFFFF
-#define EXP_CAP_ID_OFFSET 0x70
-
#define PCIECTRL_TI_CONF_INTX_ASSERT 0x0124
#define PCIECTRL_TI_CONF_INTX_DEASSERT 0x0128
@@ -81,18 +81,23 @@
#define MSI_REQ_GRANT BIT(0)
#define MSI_VECTOR_SHIFT 7
+#define PCIE_1LANE_2LANE_SELECTION BIT(13)
+#define PCIE_B1C0_MODE_SEL BIT(2)
+#define PCIE_B0_B1_TSYNCEN BIT(0)
+
struct dra7xx_pcie {
struct dw_pcie *pci;
void __iomem *base; /* DT ti_conf */
int phy_count; /* DT phy-names count */
struct phy **phy;
- int link_gen;
struct irq_domain *irq_domain;
+ struct clk *clk;
enum dw_pcie_device_mode mode;
};
struct dra7xx_pcie_of_data {
enum dw_pcie_device_mode mode;
+ u32 b1co_mode_sel_mask;
};
#define to_dra7xx_pcie(x) dev_get_drvdata((x)->dev)
@@ -108,17 +113,17 @@ static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset,
writel(value, pcie->base + offset);
}
-static u64 dra7xx_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr)
+static u64 dra7xx_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 cpu_addr)
{
- return pci_addr & DRA7XX_CPU_TO_BUS_ADDR;
+ return cpu_addr & DRA7XX_CPU_TO_BUS_ADDR;
}
-static int dra7xx_pcie_link_up(struct dw_pcie *pci)
+static bool dra7xx_pcie_link_up(struct dw_pcie *pci)
{
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
u32 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS);
- return !!(reg & LINK_UP);
+ return reg & LINK_UP;
}
static void dra7xx_pcie_stop_link(struct dw_pcie *pci)
@@ -136,33 +141,12 @@ static int dra7xx_pcie_establish_link(struct dw_pcie *pci)
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
struct device *dev = pci->dev;
u32 reg;
- u32 exp_cap_off = EXP_CAP_ID_OFFSET;
if (dw_pcie_link_up(pci)) {
dev_err(dev, "link is already up\n");
return 0;
}
- if (dra7xx->link_gen == 1) {
- dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCAP,
- 4, &reg);
- if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
- reg &= ~((u32)PCI_EXP_LNKCAP_SLS);
- reg |= PCI_EXP_LNKCAP_SLS_2_5GB;
- dw_pcie_write(pci->dbi_base + exp_cap_off +
- PCI_EXP_LNKCAP, 4, reg);
- }
-
- dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCTL2,
- 2, &reg);
- if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
- reg &= ~((u32)PCI_EXP_LNKCAP_SLS);
- reg |= PCI_EXP_LNKCAP_SLS_2_5GB;
- dw_pcie_write(pci->dbi_base + exp_cap_off +
- PCI_EXP_LNKCTL2, 2, reg);
- }
- }
-
reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
reg |= LTSSM_EN;
dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
@@ -194,25 +178,16 @@ static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx)
dra7xx_pcie_enable_msi_interrupts(dra7xx);
}
-static int dra7xx_pcie_host_init(struct pcie_port *pp)
+static int dra7xx_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
- dw_pcie_setup_rc(pp);
-
- dra7xx_pcie_establish_link(pci);
- dw_pcie_wait_for_link(pci);
- dw_pcie_msi_init(pp);
dra7xx_pcie_enable_interrupts(dra7xx);
return 0;
}
-static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = {
- .host_init = dra7xx_pcie_host_init,
-};
-
static int dra7xx_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
irq_hw_number_t hwirq)
{
@@ -227,58 +202,87 @@ static const struct irq_domain_ops intx_domain_ops = {
.xlate = pci_irqd_intx_xlate,
};
-static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp)
+static int dra7xx_pcie_handle_msi(struct dw_pcie_rp *pp, int index)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct device *dev = pci->dev;
- struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
- struct device_node *node = dev->of_node;
- struct device_node *pcie_intc_node = of_get_next_child(node, NULL);
+ unsigned long val;
+ int pos;
- if (!pcie_intc_node) {
- dev_err(dev, "No PCIe Intc node found\n");
- return -ENODEV;
- }
+ val = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS +
+ (index * MSI_REG_CTRL_BLOCK_SIZE));
+ if (!val)
+ return 0;
- dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
- &intx_domain_ops, pp);
- if (!dra7xx->irq_domain) {
- dev_err(dev, "Failed to get a INTx IRQ domain\n");
- return -ENODEV;
+ pos = find_first_bit(&val, MAX_MSI_IRQS_PER_CTRL);
+ while (pos != MAX_MSI_IRQS_PER_CTRL) {
+ generic_handle_domain_irq(pp->irq_domain,
+ (index * MAX_MSI_IRQS_PER_CTRL) + pos);
+ pos++;
+ pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, pos);
}
- return 0;
+ return 1;
}
-static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg)
+static void dra7xx_pcie_handle_msi_irq(struct dw_pcie_rp *pp)
{
- struct dra7xx_pcie *dra7xx = arg;
- struct dw_pcie *pci = dra7xx->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ int ret, i, count, num_ctrls;
+
+ num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
+
+ /**
+ * Need to make sure all MSI status bits read 0 before exiting.
+ * Else, new MSI IRQs are not registered by the wrapper. Have an
+ * upperbound for the loop and exit the IRQ in case of IRQ flood
+ * to avoid locking up system in interrupt context.
+ */
+ count = 0;
+ do {
+ ret = 0;
+
+ for (i = 0; i < num_ctrls; i++)
+ ret |= dra7xx_pcie_handle_msi(pp, i);
+ count++;
+ } while (ret && count <= 1000);
+
+ if (count > 1000)
+ dev_warn_ratelimited(pci->dev,
+ "Too many MSI IRQs to handle\n");
+}
+
+static void dra7xx_pcie_msi_irq_handler(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct dra7xx_pcie *dra7xx;
+ struct dw_pcie_rp *pp;
+ struct dw_pcie *pci;
unsigned long reg;
- u32 virq, bit;
+ u32 bit;
+
+ chained_irq_enter(chip, desc);
+
+ pp = irq_desc_get_handler_data(desc);
+ pci = to_dw_pcie_from_pp(pp);
+ dra7xx = to_dra7xx_pcie(pci);
reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI);
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, reg);
switch (reg) {
case MSI:
- dw_handle_msi_irq(pp);
+ dra7xx_pcie_handle_msi_irq(pp);
break;
case INTA:
case INTB:
case INTC:
case INTD:
- for_each_set_bit(bit, &reg, PCI_NUM_INTX) {
- virq = irq_find_mapping(dra7xx->irq_domain, bit);
- if (virq)
- generic_handle_irq(virq);
- }
+ for_each_set_bit(bit, &reg, PCI_NUM_INTX)
+ generic_handle_domain_irq(dra7xx->irq_domain, bit);
break;
}
- dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, reg);
-
- return IRQ_HANDLED;
+ chained_irq_exit(chip, desc);
}
static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
@@ -340,19 +344,49 @@ static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
return IRQ_HANDLED;
}
+static int dra7xx_pcie_init_irq_domain(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct device *dev = pci->dev;
+ struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
+ struct device_node *node = dev->of_node;
+ struct device_node *pcie_intc_node = of_get_next_child(node, NULL);
+
+ if (!pcie_intc_node) {
+ dev_err(dev, "No PCIe Intc node found\n");
+ return -ENODEV;
+ }
+
+ irq_set_chained_handler_and_data(pp->irq, dra7xx_pcie_msi_irq_handler,
+ pp);
+ dra7xx->irq_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node),
+ PCI_NUM_INTX, &intx_domain_ops, pp);
+ of_node_put(pcie_intc_node);
+ if (!dra7xx->irq_domain) {
+ dev_err(dev, "Failed to get a INTx IRQ domain\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = {
+ .init = dra7xx_pcie_host_init,
+};
+
static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
enum pci_barno bar;
- for (bar = BAR_0; bar <= BAR_5; bar++)
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
dw_pcie_ep_reset_bar(pci, bar);
dra7xx_pcie_enable_wrapper_interrupts(dra7xx);
}
-static void dra7xx_pcie_raise_legacy_irq(struct dra7xx_pcie *dra7xx)
+static void dra7xx_pcie_raise_intx_irq(struct dra7xx_pcie *dra7xx)
{
dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_ASSERT, 0x1);
mdelay(1);
@@ -370,16 +404,16 @@ static void dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie *dra7xx,
}
static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
- enum pci_epc_irq_type type, u16 interrupt_num)
+ unsigned int type, u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
switch (type) {
- case PCI_EPC_IRQ_LEGACY:
- dra7xx_pcie_raise_legacy_irq(dra7xx);
+ case PCI_IRQ_INTX:
+ dra7xx_pcie_raise_intx_irq(dra7xx);
break;
- case PCI_EPC_IRQ_MSI:
+ case PCI_IRQ_MSI:
dra7xx_pcie_raise_msi_irq(dra7xx, interrupt_num);
break;
default:
@@ -389,78 +423,81 @@ static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
return 0;
}
-static struct dw_pcie_ep_ops pcie_ep_ops = {
- .ep_init = dra7xx_pcie_ep_init,
+static const struct pci_epc_features dra7xx_pcie_epc_features = {
+ .linkup_notifier = true,
+ .msi_capable = true,
+};
+
+static const struct pci_epc_features*
+dra7xx_pcie_get_features(struct dw_pcie_ep *ep)
+{
+ return &dra7xx_pcie_epc_features;
+}
+
+static const struct dw_pcie_ep_ops pcie_ep_ops = {
+ .init = dra7xx_pcie_ep_init,
.raise_irq = dra7xx_pcie_raise_irq,
+ .get_features = dra7xx_pcie_get_features,
};
-static int __init dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx,
- struct platform_device *pdev)
+static int dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx,
+ struct platform_device *pdev)
{
int ret;
struct dw_pcie_ep *ep;
- struct resource *res;
struct device *dev = &pdev->dev;
struct dw_pcie *pci = dra7xx->pci;
ep = &pci->ep;
ep->ops = &pcie_ep_ops;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics");
- pci->dbi_base = devm_ioremap_resource(dev, res);
+ pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "ep_dbics");
if (IS_ERR(pci->dbi_base))
return PTR_ERR(pci->dbi_base);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics2");
- pci->dbi_base2 = devm_ioremap_resource(dev, res);
+ pci->dbi_base2 =
+ devm_platform_ioremap_resource_byname(pdev, "ep_dbics2");
if (IS_ERR(pci->dbi_base2))
return PTR_ERR(pci->dbi_base2);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
- if (!res)
- return -EINVAL;
-
- ep->phys_base = res->start;
- ep->addr_size = resource_size(res);
-
ret = dw_pcie_ep_init(ep);
if (ret) {
dev_err(dev, "failed to initialize endpoint\n");
return ret;
}
+ ret = dw_pcie_ep_init_registers(ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(ep);
+ return ret;
+ }
+
+ pci_epc_init_notify(ep->epc);
+
return 0;
}
-static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
- struct platform_device *pdev)
+static int dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
+ struct platform_device *pdev)
{
int ret;
struct dw_pcie *pci = dra7xx->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
struct device *dev = pci->dev;
- struct resource *res;
pp->irq = platform_get_irq(pdev, 1);
- if (pp->irq < 0) {
- dev_err(dev, "missing IRQ resource\n");
+ if (pp->irq < 0)
return pp->irq;
- }
- ret = devm_request_irq(dev, pp->irq, dra7xx_pcie_msi_irq_handler,
- IRQF_SHARED | IRQF_NO_THREAD,
- "dra7-pcie-msi", dra7xx);
- if (ret) {
- dev_err(dev, "failed to request irq\n");
- return ret;
- }
+ /* MSI IRQ is muxed */
+ pp->msi_irq[0] = -ENODEV;
ret = dra7xx_pcie_init_irq_domain(pp);
if (ret < 0)
return ret;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbics");
- pci->dbi_base = devm_ioremap_resource(dev, res);
+ pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "rc_dbics");
if (IS_ERR(pci->dbi_base))
return PTR_ERR(pci->dbi_base);
@@ -499,6 +536,10 @@ static int dra7xx_pcie_enable_phy(struct dra7xx_pcie *dra7xx)
int i;
for (i = 0; i < phy_count; i++) {
+ ret = phy_set_mode(dra7xx->phy[i], PHY_MODE_PCIE);
+ if (ret < 0)
+ goto err_phy;
+
ret = phy_init(dra7xx->phy[i]);
if (ret < 0)
goto err_phy;
@@ -529,6 +570,26 @@ static const struct dra7xx_pcie_of_data dra7xx_pcie_ep_of_data = {
.mode = DW_PCIE_EP_TYPE,
};
+static const struct dra7xx_pcie_of_data dra746_pcie_rc_of_data = {
+ .b1co_mode_sel_mask = BIT(2),
+ .mode = DW_PCIE_RC_TYPE,
+};
+
+static const struct dra7xx_pcie_of_data dra726_pcie_rc_of_data = {
+ .b1co_mode_sel_mask = GENMASK(3, 2),
+ .mode = DW_PCIE_RC_TYPE,
+};
+
+static const struct dra7xx_pcie_of_data dra746_pcie_ep_of_data = {
+ .b1co_mode_sel_mask = BIT(2),
+ .mode = DW_PCIE_EP_TYPE,
+};
+
+static const struct dra7xx_pcie_of_data dra726_pcie_ep_of_data = {
+ .b1co_mode_sel_mask = GENMASK(3, 2),
+ .mode = DW_PCIE_EP_TYPE,
+};
+
static const struct of_device_id of_dra7xx_pcie_match[] = {
{
.compatible = "ti,dra7-pcie",
@@ -538,8 +599,25 @@ static const struct of_device_id of_dra7xx_pcie_match[] = {
.compatible = "ti,dra7-pcie-ep",
.data = &dra7xx_pcie_ep_of_data,
},
+ {
+ .compatible = "ti,dra746-pcie-rc",
+ .data = &dra746_pcie_rc_of_data,
+ },
+ {
+ .compatible = "ti,dra726-pcie-rc",
+ .data = &dra726_pcie_rc_of_data,
+ },
+ {
+ .compatible = "ti,dra746-pcie-ep",
+ .data = &dra746_pcie_ep_of_data,
+ },
+ {
+ .compatible = "ti,dra726-pcie-ep",
+ .data = &dra726_pcie_ep_of_data,
+ },
{},
};
+MODULE_DEVICE_TABLE(of, of_dra7xx_pcie_match);
/*
* dra7xx_pcie_unaligned_memaccess: workaround for AM572x/AM571x Errata i870
@@ -556,34 +634,47 @@ static int dra7xx_pcie_unaligned_memaccess(struct device *dev)
{
int ret;
struct device_node *np = dev->of_node;
- struct of_phandle_args args;
+ unsigned int args[2];
struct regmap *regmap;
- regmap = syscon_regmap_lookup_by_phandle(np,
- "ti,syscon-unaligned-access");
+ regmap = syscon_regmap_lookup_by_phandle_args(np, "ti,syscon-unaligned-access",
+ 2, args);
if (IS_ERR(regmap)) {
dev_dbg(dev, "can't get ti,syscon-unaligned-access\n");
return -EINVAL;
}
- ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-unaligned-access",
- 2, 0, &args);
- if (ret) {
- dev_err(dev, "failed to parse ti,syscon-unaligned-access\n");
- return ret;
- }
-
- ret = regmap_update_bits(regmap, args.args[0], args.args[1],
- args.args[1]);
+ ret = regmap_update_bits(regmap, args[0], args[1], args[1]);
if (ret)
dev_err(dev, "failed to enable unaligned access\n");
- of_node_put(args.np);
-
return ret;
}
-static int __init dra7xx_pcie_probe(struct platform_device *pdev)
+static int dra7xx_pcie_configure_two_lane(struct device *dev,
+ u32 b1co_mode_sel_mask)
+{
+ struct device_node *np = dev->of_node;
+ struct regmap *pcie_syscon;
+ unsigned int pcie_reg;
+ u32 mask;
+ u32 val;
+
+ pcie_syscon = syscon_regmap_lookup_by_phandle_args(np, "ti,syscon-lane-sel",
+ 1, &pcie_reg);
+ if (IS_ERR(pcie_syscon)) {
+ dev_err(dev, "unable to get ti,syscon-lane-sel\n");
+ return -EINVAL;
+ }
+
+ mask = b1co_mode_sel_mask | PCIE_B0_B1_TSYNCEN;
+ val = PCIE_B1C0_MODE_SEL | PCIE_B0_B1_TSYNCEN;
+ regmap_update_bits(pcie_syscon, pcie_reg, mask, val);
+
+ return 0;
+}
+
+static int dra7xx_pcie_probe(struct platform_device *pdev)
{
u32 reg;
int ret;
@@ -593,23 +684,22 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
struct phy **phy;
struct device_link **link;
void __iomem *base;
- struct resource *res;
struct dw_pcie *pci;
struct dra7xx_pcie *dra7xx;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
char name[10];
struct gpio_desc *reset;
- const struct of_device_id *match;
const struct dra7xx_pcie_of_data *data;
enum dw_pcie_device_mode mode;
+ u32 b1co_mode_sel_mask;
- match = of_match_device(of_match_ptr(of_dra7xx_pcie_match), dev);
- if (!match)
+ data = of_device_get_match_data(dev);
+ if (!data)
return -EINVAL;
- data = (struct dra7xx_pcie_of_data *)match->data;
mode = (enum dw_pcie_device_mode)data->mode;
+ b1co_mode_sel_mask = data->b1co_mode_sel_mask;
dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL);
if (!dra7xx)
@@ -623,15 +713,12 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
pci->ops = &dw_pcie_ops;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "missing IRQ resource: %d\n", irq);
+ if (irq < 0)
return irq;
- }
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ti_conf");
- base = devm_ioremap_nocache(dev, res->start, resource_size(res));
- if (!base)
- return -ENOMEM;
+ base = devm_platform_ioremap_resource_byname(pdev, "ti_conf");
+ if (IS_ERR(base))
+ return PTR_ERR(base);
phy_count = of_property_count_strings(np, "phy-names");
if (phy_count < 0) {
@@ -647,6 +734,15 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
if (!link)
return -ENOMEM;
+ dra7xx->clk = devm_clk_get_optional(dev, NULL);
+ if (IS_ERR(dra7xx->clk))
+ return dev_err_probe(dev, PTR_ERR(dra7xx->clk),
+ "clock request failed");
+
+ ret = clk_prepare_enable(dra7xx->clk);
+ if (ret)
+ return ret;
+
for (i = 0; i < phy_count; i++) {
snprintf(name, sizeof(name), "pcie-phy%d", i);
phy[i] = devm_phy_get(dev, name);
@@ -665,6 +761,12 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
dra7xx->pci = pci;
dra7xx->phy_count = phy_count;
+ if (phy_count == 2) {
+ ret = dra7xx_pcie_configure_two_lane(dev, b1co_mode_sel_mask);
+ if (ret < 0)
+ dra7xx->phy_count = 1; /* Fallback to x1 lane mode */
+ }
+
ret = dra7xx_pcie_enable_phy(dra7xx);
if (ret) {
dev_err(dev, "failed to enable phy\n");
@@ -691,10 +793,6 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
reg &= ~LTSSM_EN;
dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
- dra7xx->link_gen = of_pci_get_max_link_speed(np);
- if (dra7xx->link_gen < 0 || dra7xx->link_gen > 2)
- dra7xx->link_gen = 2;
-
switch (mode) {
case DW_PCIE_RC_TYPE:
if (!IS_ENABLED(CONFIG_PCI_DRA7XX_HOST)) {
@@ -735,19 +833,25 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
}
dra7xx->mode = mode;
- ret = devm_request_irq(dev, irq, dra7xx_pcie_irq_handler,
- IRQF_SHARED, "dra7xx-pcie-main", dra7xx);
+ ret = devm_request_threaded_irq(dev, irq, NULL, dra7xx_pcie_irq_handler,
+ IRQF_SHARED | IRQF_ONESHOT,
+ "dra7xx-pcie-main", dra7xx);
if (ret) {
dev_err(dev, "failed to request irq\n");
- goto err_gpio;
+ goto err_deinit;
}
return 0;
-err_gpio:
- pm_runtime_put(dev);
+err_deinit:
+ if (dra7xx->mode == DW_PCIE_RC_TYPE)
+ dw_pcie_host_deinit(&dra7xx->pci->pp);
+ else
+ dw_pcie_ep_deinit(&dra7xx->pci->ep);
+err_gpio:
err_get_sync:
+ pm_runtime_put(dev);
pm_runtime_disable(dev);
dra7xx_pcie_disable_phy(dra7xx);
@@ -758,7 +862,6 @@ err_link:
return ret;
}
-#ifdef CONFIG_PM_SLEEP
static int dra7xx_pcie_suspend(struct device *dev)
{
struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
@@ -815,7 +918,6 @@ static int dra7xx_pcie_resume_noirq(struct device *dev)
return 0;
}
-#endif
static void dra7xx_pcie_shutdown(struct platform_device *pdev)
{
@@ -831,15 +933,18 @@ static void dra7xx_pcie_shutdown(struct platform_device *pdev)
pm_runtime_disable(dev);
dra7xx_pcie_disable_phy(dra7xx);
+
+ clk_disable_unprepare(dra7xx->clk);
}
static const struct dev_pm_ops dra7xx_pcie_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend, dra7xx_pcie_resume)
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq,
- dra7xx_pcie_resume_noirq)
+ SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend, dra7xx_pcie_resume)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq,
+ dra7xx_pcie_resume_noirq)
};
static struct platform_driver dra7xx_pcie_driver = {
+ .probe = dra7xx_pcie_probe,
.driver = {
.name = "dra7-pcie",
.of_match_table = of_dra7xx_pcie_match,
@@ -848,4 +953,8 @@ static struct platform_driver dra7xx_pcie_driver = {
},
.shutdown = dra7xx_pcie_shutdown,
};
-builtin_platform_driver_probe(dra7xx_pcie_driver, dra7xx_pcie_probe);
+module_platform_driver(dra7xx_pcie_driver);
+
+MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
+MODULE_DESCRIPTION("PCIe controller driver for TI DRA7xx SoCs");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c
index cee5f2f590e2..0bb7d4f5d784 100644
--- a/drivers/pci/controller/dwc/pci-exynos.c
+++ b/drivers/pci/controller/dwc/pci-exynos.c
@@ -1,27 +1,25 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * PCIe host controller driver for Samsung EXYNOS SoCs
+ * PCIe host controller driver for Samsung Exynos SoCs
*
- * Copyright (C) 2013 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
+ * Copyright (C) 2013-2020 Samsung Electronics Co., Ltd.
+ * https://www.samsung.com
*
* Author: Jingoo Han <jg1.han@samsung.com>
+ * Jaehoon Chung <jh80.chung@samsung.com>
*/
#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
-#include <linux/resource.h>
-#include <linux/signal.h>
-#include <linux/types.h>
+#include <linux/regulator/consumer.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
#include "pcie-designware.h"
@@ -37,128 +35,27 @@
#define PCIE_IRQ_SPECIAL 0x008
#define PCIE_IRQ_EN_PULSE 0x00c
#define PCIE_IRQ_EN_LEVEL 0x010
-#define IRQ_MSI_ENABLE BIT(2)
#define PCIE_IRQ_EN_SPECIAL 0x014
-#define PCIE_PWR_RESET 0x018
+#define PCIE_SW_WAKE 0x018
+#define PCIE_BUS_EN BIT(1)
#define PCIE_CORE_RESET 0x01c
#define PCIE_CORE_RESET_ENABLE BIT(0)
#define PCIE_STICKY_RESET 0x020
#define PCIE_NONSTICKY_RESET 0x024
#define PCIE_APP_INIT_RESET 0x028
#define PCIE_APP_LTSSM_ENABLE 0x02c
-#define PCIE_ELBI_RDLH_LINKUP 0x064
+#define PCIE_ELBI_RDLH_LINKUP 0x074
+#define PCIE_ELBI_XMLH_LINKUP BIT(4)
#define PCIE_ELBI_LTSSM_ENABLE 0x1
#define PCIE_ELBI_SLV_AWMISC 0x11c
#define PCIE_ELBI_SLV_ARMISC 0x120
#define PCIE_ELBI_SLV_DBI_ENABLE BIT(21)
-struct exynos_pcie_mem_res {
- void __iomem *elbi_base; /* DT 0th resource: PCIe CTRL */
-};
-
-struct exynos_pcie_clk_res {
- struct clk *clk;
- struct clk *bus_clk;
-};
-
struct exynos_pcie {
- struct dw_pcie *pci;
- struct exynos_pcie_mem_res *mem_res;
- struct exynos_pcie_clk_res *clk_res;
- const struct exynos_pcie_ops *ops;
- int reset_gpio;
-
+ struct dw_pcie pci;
+ struct clk_bulk_data *clks;
struct phy *phy;
-};
-
-struct exynos_pcie_ops {
- int (*get_mem_resources)(struct platform_device *pdev,
- struct exynos_pcie *ep);
- int (*get_clk_resources)(struct exynos_pcie *ep);
- int (*init_clk_resources)(struct exynos_pcie *ep);
- void (*deinit_clk_resources)(struct exynos_pcie *ep);
-};
-
-static int exynos5440_pcie_get_mem_resources(struct platform_device *pdev,
- struct exynos_pcie *ep)
-{
- struct dw_pcie *pci = ep->pci;
- struct device *dev = pci->dev;
- struct resource *res;
-
- ep->mem_res = devm_kzalloc(dev, sizeof(*ep->mem_res), GFP_KERNEL);
- if (!ep->mem_res)
- return -ENOMEM;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ep->mem_res->elbi_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(ep->mem_res->elbi_base))
- return PTR_ERR(ep->mem_res->elbi_base);
-
- return 0;
-}
-
-static int exynos5440_pcie_get_clk_resources(struct exynos_pcie *ep)
-{
- struct dw_pcie *pci = ep->pci;
- struct device *dev = pci->dev;
-
- ep->clk_res = devm_kzalloc(dev, sizeof(*ep->clk_res), GFP_KERNEL);
- if (!ep->clk_res)
- return -ENOMEM;
-
- ep->clk_res->clk = devm_clk_get(dev, "pcie");
- if (IS_ERR(ep->clk_res->clk)) {
- dev_err(dev, "Failed to get pcie rc clock\n");
- return PTR_ERR(ep->clk_res->clk);
- }
-
- ep->clk_res->bus_clk = devm_clk_get(dev, "pcie_bus");
- if (IS_ERR(ep->clk_res->bus_clk)) {
- dev_err(dev, "Failed to get pcie bus clock\n");
- return PTR_ERR(ep->clk_res->bus_clk);
- }
-
- return 0;
-}
-
-static int exynos5440_pcie_init_clk_resources(struct exynos_pcie *ep)
-{
- struct dw_pcie *pci = ep->pci;
- struct device *dev = pci->dev;
- int ret;
-
- ret = clk_prepare_enable(ep->clk_res->clk);
- if (ret) {
- dev_err(dev, "cannot enable pcie rc clock");
- return ret;
- }
-
- ret = clk_prepare_enable(ep->clk_res->bus_clk);
- if (ret) {
- dev_err(dev, "cannot enable pcie bus clock");
- goto err_bus_clk;
- }
-
- return 0;
-
-err_bus_clk:
- clk_disable_unprepare(ep->clk_res->clk);
-
- return ret;
-}
-
-static void exynos5440_pcie_deinit_clk_resources(struct exynos_pcie *ep)
-{
- clk_disable_unprepare(ep->clk_res->bus_clk);
- clk_disable_unprepare(ep->clk_res->clk);
-}
-
-static const struct exynos_pcie_ops exynos5440_pcie_ops = {
- .get_mem_resources = exynos5440_pcie_get_mem_resources,
- .get_clk_resources = exynos5440_pcie_get_clk_resources,
- .init_clk_resources = exynos5440_pcie_init_clk_resources,
- .deinit_clk_resources = exynos5440_pcie_deinit_clk_resources,
+ struct regulator_bulk_data supplies[2];
};
static void exynos_pcie_writel(void __iomem *base, u32 val, u32 reg)
@@ -173,117 +70,78 @@ static u32 exynos_pcie_readl(void __iomem *base, u32 reg)
static void exynos_pcie_sideband_dbi_w_mode(struct exynos_pcie *ep, bool on)
{
+ struct dw_pcie *pci = &ep->pci;
u32 val;
- val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_ELBI_SLV_AWMISC);
+ val = exynos_pcie_readl(pci->elbi_base, PCIE_ELBI_SLV_AWMISC);
if (on)
val |= PCIE_ELBI_SLV_DBI_ENABLE;
else
val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
- exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_ELBI_SLV_AWMISC);
+ exynos_pcie_writel(pci->elbi_base, val, PCIE_ELBI_SLV_AWMISC);
}
static void exynos_pcie_sideband_dbi_r_mode(struct exynos_pcie *ep, bool on)
{
+ struct dw_pcie *pci = &ep->pci;
u32 val;
- val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_ELBI_SLV_ARMISC);
+ val = exynos_pcie_readl(pci->elbi_base, PCIE_ELBI_SLV_ARMISC);
if (on)
val |= PCIE_ELBI_SLV_DBI_ENABLE;
else
val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
- exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_ELBI_SLV_ARMISC);
+ exynos_pcie_writel(pci->elbi_base, val, PCIE_ELBI_SLV_ARMISC);
}
static void exynos_pcie_assert_core_reset(struct exynos_pcie *ep)
{
+ struct dw_pcie *pci = &ep->pci;
u32 val;
- val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_CORE_RESET);
+ val = exynos_pcie_readl(pci->elbi_base, PCIE_CORE_RESET);
val &= ~PCIE_CORE_RESET_ENABLE;
- exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_CORE_RESET);
- exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_PWR_RESET);
- exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_STICKY_RESET);
- exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_NONSTICKY_RESET);
+ exynos_pcie_writel(pci->elbi_base, val, PCIE_CORE_RESET);
+ exynos_pcie_writel(pci->elbi_base, 0, PCIE_STICKY_RESET);
+ exynos_pcie_writel(pci->elbi_base, 0, PCIE_NONSTICKY_RESET);
}
static void exynos_pcie_deassert_core_reset(struct exynos_pcie *ep)
{
+ struct dw_pcie *pci = &ep->pci;
u32 val;
- val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_CORE_RESET);
+ val = exynos_pcie_readl(pci->elbi_base, PCIE_CORE_RESET);
val |= PCIE_CORE_RESET_ENABLE;
- exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_CORE_RESET);
- exynos_pcie_writel(ep->mem_res->elbi_base, 1, PCIE_STICKY_RESET);
- exynos_pcie_writel(ep->mem_res->elbi_base, 1, PCIE_NONSTICKY_RESET);
- exynos_pcie_writel(ep->mem_res->elbi_base, 1, PCIE_APP_INIT_RESET);
- exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_APP_INIT_RESET);
+ exynos_pcie_writel(pci->elbi_base, val, PCIE_CORE_RESET);
+ exynos_pcie_writel(pci->elbi_base, 1, PCIE_STICKY_RESET);
+ exynos_pcie_writel(pci->elbi_base, 1, PCIE_NONSTICKY_RESET);
+ exynos_pcie_writel(pci->elbi_base, 1, PCIE_APP_INIT_RESET);
+ exynos_pcie_writel(pci->elbi_base, 0, PCIE_APP_INIT_RESET);
}
-static void exynos_pcie_assert_reset(struct exynos_pcie *ep)
+static int exynos_pcie_start_link(struct dw_pcie *pci)
{
- struct dw_pcie *pci = ep->pci;
- struct device *dev = pci->dev;
-
- if (ep->reset_gpio >= 0)
- devm_gpio_request_one(dev, ep->reset_gpio,
- GPIOF_OUT_INIT_HIGH, "RESET");
-}
-
-static int exynos_pcie_establish_link(struct exynos_pcie *ep)
-{
- struct dw_pcie *pci = ep->pci;
- struct pcie_port *pp = &pci->pp;
- struct device *dev = pci->dev;
-
- if (dw_pcie_link_up(pci)) {
- dev_err(dev, "Link already up\n");
- return 0;
- }
-
- exynos_pcie_assert_core_reset(ep);
-
- phy_reset(ep->phy);
+ u32 val;
- exynos_pcie_writel(ep->mem_res->elbi_base, 1,
- PCIE_PWR_RESET);
-
- phy_power_on(ep->phy);
- phy_init(ep->phy);
-
- exynos_pcie_deassert_core_reset(ep);
- dw_pcie_setup_rc(pp);
- exynos_pcie_assert_reset(ep);
+ val = exynos_pcie_readl(pci->elbi_base, PCIE_SW_WAKE);
+ val &= ~PCIE_BUS_EN;
+ exynos_pcie_writel(pci->elbi_base, val, PCIE_SW_WAKE);
/* assert LTSSM enable */
- exynos_pcie_writel(ep->mem_res->elbi_base, PCIE_ELBI_LTSSM_ENABLE,
+ exynos_pcie_writel(pci->elbi_base, PCIE_ELBI_LTSSM_ENABLE,
PCIE_APP_LTSSM_ENABLE);
-
- /* check if the link is up or not */
- if (!dw_pcie_wait_for_link(pci))
- return 0;
-
- phy_power_off(ep->phy);
- return -ETIMEDOUT;
+ return 0;
}
static void exynos_pcie_clear_irq_pulse(struct exynos_pcie *ep)
{
- u32 val;
+ struct dw_pcie *pci = &ep->pci;
- val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_IRQ_PULSE);
- exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_IRQ_PULSE);
-}
+ u32 val = exynos_pcie_readl(pci->elbi_base, PCIE_IRQ_PULSE);
-static void exynos_pcie_enable_irq_pulse(struct exynos_pcie *ep)
-{
- u32 val;
-
- /* enable INTX interrupt */
- val = IRQ_INTA_ASSERT | IRQ_INTB_ASSERT |
- IRQ_INTC_ASSERT | IRQ_INTD_ASSERT;
- exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_IRQ_EN_PULSE);
+ exynos_pcie_writel(pci->elbi_base, val, PCIE_IRQ_PULSE);
}
static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg)
@@ -294,26 +152,16 @@ static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg)
return IRQ_HANDLED;
}
-static void exynos_pcie_msi_init(struct exynos_pcie *ep)
+static void exynos_pcie_enable_irq_pulse(struct exynos_pcie *ep)
{
- struct dw_pcie *pci = ep->pci;
- struct pcie_port *pp = &pci->pp;
- u32 val;
-
- dw_pcie_msi_init(pp);
+ struct dw_pcie *pci = &ep->pci;
- /* enable MSI interrupt */
- val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_IRQ_EN_LEVEL);
- val |= IRQ_MSI_ENABLE;
- exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_IRQ_EN_LEVEL);
-}
-
-static void exynos_pcie_enable_interrupts(struct exynos_pcie *ep)
-{
- exynos_pcie_enable_irq_pulse(ep);
+ u32 val = IRQ_INTA_ASSERT | IRQ_INTB_ASSERT |
+ IRQ_INTC_ASSERT | IRQ_INTD_ASSERT;
- if (IS_ENABLED(CONFIG_PCI_MSI))
- exynos_pcie_msi_init(ep);
+ exynos_pcie_writel(pci->elbi_base, val, PCIE_IRQ_EN_PULSE);
+ exynos_pcie_writel(pci->elbi_base, 0, PCIE_IRQ_EN_LEVEL);
+ exynos_pcie_writel(pci->elbi_base, 0, PCIE_IRQ_EN_SPECIAL);
}
static u32 exynos_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base,
@@ -338,90 +186,85 @@ static void exynos_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base,
exynos_pcie_sideband_dbi_w_mode(ep, false);
}
-static int exynos_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
- u32 *val)
+static int exynos_pcie_rd_own_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct exynos_pcie *ep = to_exynos_pcie(pci);
- int ret;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata);
- exynos_pcie_sideband_dbi_r_mode(ep, true);
- ret = dw_pcie_read(pci->dbi_base + where, size, val);
- exynos_pcie_sideband_dbi_r_mode(ep, false);
- return ret;
+ if (PCI_SLOT(devfn))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ *val = dw_pcie_read_dbi(pci, where, size);
+ return PCIBIOS_SUCCESSFUL;
}
-static int exynos_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
- u32 val)
+static int exynos_pcie_wr_own_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct exynos_pcie *ep = to_exynos_pcie(pci);
- int ret;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata);
- exynos_pcie_sideband_dbi_w_mode(ep, true);
- ret = dw_pcie_write(pci->dbi_base + where, size, val);
- exynos_pcie_sideband_dbi_w_mode(ep, false);
- return ret;
+ if (PCI_SLOT(devfn))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ dw_pcie_write_dbi(pci, where, size, val);
+ return PCIBIOS_SUCCESSFUL;
}
-static int exynos_pcie_link_up(struct dw_pcie *pci)
-{
- struct exynos_pcie *ep = to_exynos_pcie(pci);
- u32 val;
+static struct pci_ops exynos_pci_ops = {
+ .read = exynos_pcie_rd_own_conf,
+ .write = exynos_pcie_wr_own_conf,
+};
- val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_ELBI_RDLH_LINKUP);
- if (val == PCIE_ELBI_LTSSM_ENABLE)
- return 1;
+static bool exynos_pcie_link_up(struct dw_pcie *pci)
+{
+ u32 val = exynos_pcie_readl(pci->elbi_base, PCIE_ELBI_RDLH_LINKUP);
- return 0;
+ return val & PCIE_ELBI_XMLH_LINKUP;
}
-static int exynos_pcie_host_init(struct pcie_port *pp)
+static int exynos_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct exynos_pcie *ep = to_exynos_pcie(pci);
- exynos_pcie_establish_link(ep);
- exynos_pcie_enable_interrupts(ep);
+ pp->bridge->ops = &exynos_pci_ops;
+
+ exynos_pcie_assert_core_reset(ep);
+
+ phy_init(ep->phy);
+ phy_power_on(ep->phy);
+
+ exynos_pcie_deassert_core_reset(ep);
+ exynos_pcie_enable_irq_pulse(ep);
return 0;
}
static const struct dw_pcie_host_ops exynos_pcie_host_ops = {
- .rd_own_conf = exynos_pcie_rd_own_conf,
- .wr_own_conf = exynos_pcie_wr_own_conf,
- .host_init = exynos_pcie_host_init,
+ .init = exynos_pcie_host_init,
};
-static int __init exynos_add_pcie_port(struct exynos_pcie *ep,
+static int exynos_add_pcie_port(struct exynos_pcie *ep,
struct platform_device *pdev)
{
- struct dw_pcie *pci = ep->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie *pci = &ep->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
struct device *dev = &pdev->dev;
int ret;
- pp->irq = platform_get_irq(pdev, 1);
- if (pp->irq < 0) {
- dev_err(dev, "failed to get irq\n");
+ pp->irq = platform_get_irq(pdev, 0);
+ if (pp->irq < 0)
return pp->irq;
- }
+
ret = devm_request_irq(dev, pp->irq, exynos_pcie_irq_handler,
- IRQF_SHARED, "exynos-pcie", ep);
+ IRQF_SHARED, "exynos-pcie", ep);
if (ret) {
dev_err(dev, "failed to request irq\n");
return ret;
}
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- pp->msi_irq = platform_get_irq(pdev, 0);
- if (pp->msi_irq < 0) {
- dev_err(dev, "failed to get msi irq\n");
- return pp->msi_irq;
- }
- }
-
pp->ops = &exynos_pcie_host_ops;
+ pp->msi_irq[0] = -ENODEV;
ret = dw_pcie_host_init(pp);
if (ret) {
@@ -436,12 +279,12 @@ static const struct dw_pcie_ops dw_pcie_ops = {
.read_dbi = exynos_pcie_read_dbi,
.write_dbi = exynos_pcie_write_dbi,
.link_up = exynos_pcie_link_up,
+ .start_link = exynos_pcie_start_link,
};
-static int __init exynos_pcie_probe(struct platform_device *pdev)
+static int exynos_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct dw_pcie *pci;
struct exynos_pcie *ep;
struct device_node *np = dev->of_node;
int ret;
@@ -450,42 +293,27 @@ static int __init exynos_pcie_probe(struct platform_device *pdev)
if (!ep)
return -ENOMEM;
- pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
- if (!pci)
- return -ENOMEM;
-
- pci->dev = dev;
- pci->ops = &dw_pcie_ops;
-
- ep->pci = pci;
- ep->ops = (const struct exynos_pcie_ops *)
- of_device_get_match_data(dev);
-
- ep->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
+ ep->pci.dev = dev;
+ ep->pci.ops = &dw_pcie_ops;
ep->phy = devm_of_phy_get(dev, np, NULL);
- if (IS_ERR(ep->phy)) {
- if (PTR_ERR(ep->phy) == -EPROBE_DEFER)
- return PTR_ERR(ep->phy);
+ if (IS_ERR(ep->phy))
+ return PTR_ERR(ep->phy);
- ep->phy = NULL;
- }
+ ret = devm_clk_bulk_get_all_enabled(dev, &ep->clks);
+ if (ret < 0)
+ return ret;
- if (ep->ops && ep->ops->get_mem_resources) {
- ret = ep->ops->get_mem_resources(pdev, ep);
- if (ret)
- return ret;
- }
+ ep->supplies[0].supply = "vdd18";
+ ep->supplies[1].supply = "vdd10";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ep->supplies),
+ ep->supplies);
+ if (ret)
+ return ret;
- if (ep->ops && ep->ops->get_clk_resources &&
- ep->ops->init_clk_resources) {
- ret = ep->ops->get_clk_resources(ep);
- if (ret)
- return ret;
- ret = ep->ops->init_clk_resources(ep);
- if (ret)
- return ret;
- }
+ ret = regulator_bulk_enable(ARRAY_SIZE(ep->supplies), ep->supplies);
+ if (ret)
+ return ret;
platform_set_drvdata(pdev, ep);
@@ -497,42 +325,72 @@ static int __init exynos_pcie_probe(struct platform_device *pdev)
fail_probe:
phy_exit(ep->phy);
+ regulator_bulk_disable(ARRAY_SIZE(ep->supplies), ep->supplies);
- if (ep->ops && ep->ops->deinit_clk_resources)
- ep->ops->deinit_clk_resources(ep);
return ret;
}
-static int __exit exynos_pcie_remove(struct platform_device *pdev)
+static void exynos_pcie_remove(struct platform_device *pdev)
{
struct exynos_pcie *ep = platform_get_drvdata(pdev);
- if (ep->ops && ep->ops->deinit_clk_resources)
- ep->ops->deinit_clk_resources(ep);
+ dw_pcie_host_deinit(&ep->pci.pp);
+ exynos_pcie_assert_core_reset(ep);
+ phy_power_off(ep->phy);
+ phy_exit(ep->phy);
+ regulator_bulk_disable(ARRAY_SIZE(ep->supplies), ep->supplies);
+}
+
+static int exynos_pcie_suspend_noirq(struct device *dev)
+{
+ struct exynos_pcie *ep = dev_get_drvdata(dev);
+
+ exynos_pcie_assert_core_reset(ep);
+ phy_power_off(ep->phy);
+ phy_exit(ep->phy);
+ regulator_bulk_disable(ARRAY_SIZE(ep->supplies), ep->supplies);
return 0;
}
+static int exynos_pcie_resume_noirq(struct device *dev)
+{
+ struct exynos_pcie *ep = dev_get_drvdata(dev);
+ struct dw_pcie *pci = &ep->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ep->supplies), ep->supplies);
+ if (ret)
+ return ret;
+
+ /* exynos_pcie_host_init controls ep->phy */
+ exynos_pcie_host_init(pp);
+ dw_pcie_setup_rc(pp);
+ exynos_pcie_start_link(pci);
+ return dw_pcie_wait_for_link(pci);
+}
+
+static const struct dev_pm_ops exynos_pcie_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(exynos_pcie_suspend_noirq,
+ exynos_pcie_resume_noirq)
+};
+
static const struct of_device_id exynos_pcie_of_match[] = {
- {
- .compatible = "samsung,exynos5440-pcie",
- .data = &exynos5440_pcie_ops
- },
- {},
+ { .compatible = "samsung,exynos5433-pcie", },
+ { },
};
static struct platform_driver exynos_pcie_driver = {
- .remove = __exit_p(exynos_pcie_remove),
+ .probe = exynos_pcie_probe,
+ .remove = exynos_pcie_remove,
.driver = {
.name = "exynos-pcie",
.of_match_table = exynos_pcie_of_match,
+ .pm = &exynos_pcie_pm_ops,
},
};
-
-/* Exynos PCIe driver does not allow module unload */
-
-static int __init exynos_pcie_init(void)
-{
- return platform_driver_probe(&exynos_pcie_driver, exynos_pcie_probe);
-}
-subsys_initcall(exynos_pcie_init);
+module_platform_driver(exynos_pcie_driver);
+MODULE_DESCRIPTION("Samsung Exynos PCIe host controller driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, exynos_pcie_of_match);
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 52e47dac028f..4668fc9648bf 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -3,21 +3,22 @@
* PCIe host controller driver for Freescale i.MX6 SoCs
*
* Copyright (C) 2013 Kosagi
- * http://www.kosagi.com
+ * https://www.kosagi.com
*
* Author: Sean Cross <xobs@kosagi.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
#include <linux/mfd/syscon/imx7-iomuxc-gpr.h>
#include <linux/module.h>
-#include <linux/of_gpio.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -27,110 +28,284 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/reset.h>
+#include <linux/phy/pcie.h>
+#include <linux/phy/phy.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
+#include "../../pci.h"
#include "pcie-designware.h"
-#define to_imx6_pcie(x) dev_get_drvdata((x)->dev)
+#define IMX8MQ_GPR_PCIE_REF_USE_PAD BIT(9)
+#define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN BIT(10)
+#define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE BIT(11)
+#define IMX8MQ_GPR_PCIE_VREG_BYPASS BIT(12)
+#define IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE GENMASK(11, 8)
-enum imx6_pcie_variants {
+#define IMX95_PCIE_PHY_GEN_CTRL 0x0
+#define IMX95_PCIE_REF_USE_PAD BIT(17)
+
+#define IMX95_PCIE_PHY_MPLLA_CTRL 0x10
+#define IMX95_PCIE_PHY_MPLL_STATE BIT(30)
+
+#define IMX95_PCIE_SS_RW_REG_0 0xf0
+#define IMX95_PCIE_REF_CLKEN BIT(23)
+#define IMX95_PCIE_PHY_CR_PARA_SEL BIT(9)
+#define IMX95_PCIE_SS_RW_REG_1 0xf4
+#define IMX95_PCIE_SYS_AUX_PWR_DET BIT(31)
+
+#define IMX95_PE0_GEN_CTRL_1 0x1050
+#define IMX95_PCIE_DEVICE_TYPE GENMASK(3, 0)
+
+#define IMX95_PE0_GEN_CTRL_3 0x1058
+#define IMX95_PCIE_LTSSM_EN BIT(0)
+
+#define IMX95_PE0_LUT_ACSCTRL 0x1008
+#define IMX95_PEO_LUT_RWA BIT(16)
+#define IMX95_PE0_LUT_ENLOC GENMASK(4, 0)
+
+#define IMX95_PE0_LUT_DATA1 0x100c
+#define IMX95_PE0_LUT_VLD BIT(31)
+#define IMX95_PE0_LUT_DAC_ID GENMASK(10, 8)
+#define IMX95_PE0_LUT_STREAM_ID GENMASK(5, 0)
+
+#define IMX95_PE0_LUT_DATA2 0x1010
+#define IMX95_PE0_LUT_REQID GENMASK(31, 16)
+#define IMX95_PE0_LUT_MASK GENMASK(15, 0)
+
+#define IMX95_SID_MASK GENMASK(5, 0)
+#define IMX95_MAX_LUT 32
+
+#define IMX95_PCIE_RST_CTRL 0x3010
+#define IMX95_PCIE_COLD_RST BIT(0)
+
+#define to_imx_pcie(x) dev_get_drvdata((x)->dev)
+
+enum imx_pcie_variants {
IMX6Q,
IMX6SX,
IMX6QP,
IMX7D,
+ IMX8MQ,
+ IMX8MM,
+ IMX8MP,
+ IMX8Q,
+ IMX95,
+ IMX8MQ_EP,
+ IMX8MM_EP,
+ IMX8MP_EP,
+ IMX8Q_EP,
+ IMX95_EP,
+};
+
+#define IMX_PCIE_FLAG_IMX_PHY BIT(0)
+#define IMX_PCIE_FLAG_SPEED_CHANGE_WORKAROUND BIT(1)
+#define IMX_PCIE_FLAG_SUPPORTS_SUSPEND BIT(2)
+#define IMX_PCIE_FLAG_HAS_PHYDRV BIT(3)
+#define IMX_PCIE_FLAG_HAS_APP_RESET BIT(4)
+#define IMX_PCIE_FLAG_HAS_PHY_RESET BIT(5)
+#define IMX_PCIE_FLAG_HAS_SERDES BIT(6)
+#define IMX_PCIE_FLAG_SUPPORT_64BIT BIT(7)
+#define IMX_PCIE_FLAG_CPU_ADDR_FIXUP BIT(8)
+/*
+ * Because of ERR005723 (PCIe does not support L2 power down) we need to
+ * workaround suspend resume on some devices which are affected by this errata.
+ */
+#define IMX_PCIE_FLAG_BROKEN_SUSPEND BIT(9)
+#define IMX_PCIE_FLAG_HAS_LUT BIT(10)
+#define IMX_PCIE_FLAG_8GT_ECN_ERR051586 BIT(11)
+
+#define imx_check_flag(pci, val) (pci->drvdata->flags & val)
+
+#define IMX_PCIE_MAX_INSTANCES 2
+
+struct imx_pcie;
+
+struct imx_pcie_drvdata {
+ enum imx_pcie_variants variant;
+ enum dw_pcie_device_mode mode;
+ u32 flags;
+ int dbi_length;
+ const char *gpr;
+ const u32 ltssm_off;
+ const u32 ltssm_mask;
+ const u32 mode_off[IMX_PCIE_MAX_INSTANCES];
+ const u32 mode_mask[IMX_PCIE_MAX_INSTANCES];
+ const struct pci_epc_features *epc_features;
+ int (*init_phy)(struct imx_pcie *pcie);
+ int (*enable_ref_clk)(struct imx_pcie *pcie, bool enable);
+ int (*core_reset)(struct imx_pcie *pcie, bool assert);
+ int (*wait_pll_lock)(struct imx_pcie *pcie);
+ const struct dw_pcie_host_ops *ops;
+};
+
+struct imx_lut_data {
+ u32 data1;
+ u32 data2;
};
-struct imx6_pcie {
+struct imx_pcie {
struct dw_pcie *pci;
- int reset_gpio;
- bool gpio_active_high;
- struct clk *pcie_bus;
- struct clk *pcie_phy;
- struct clk *pcie_inbound_axi;
- struct clk *pcie;
+ struct gpio_desc *reset_gpiod;
+ struct clk_bulk_data *clks;
+ int num_clks;
struct regmap *iomuxc_gpr;
+ u16 msi_ctrl;
+ u32 controller_id;
struct reset_control *pciephy_reset;
struct reset_control *apps_reset;
- struct reset_control *turnoff_reset;
- enum imx6_pcie_variants variant;
u32 tx_deemph_gen1;
u32 tx_deemph_gen2_3p5db;
u32 tx_deemph_gen2_6db;
u32 tx_swing_full;
u32 tx_swing_low;
- int link_gen;
struct regulator *vpcie;
+ struct regulator *vph;
+ void __iomem *phy_base;
+ /* LUT data for pcie */
+ struct imx_lut_data luts[IMX95_MAX_LUT];
/* power domain for pcie */
struct device *pd_pcie;
/* power domain for pcie phy */
struct device *pd_pcie_phy;
+ struct phy *phy;
+ const struct imx_pcie_drvdata *drvdata;
+
+ /* Ensure that only one device's LUT is configured at any given time */
+ struct mutex lock;
};
/* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */
-#define PHY_PLL_LOCK_WAIT_MAX_RETRIES 2000
-#define PHY_PLL_LOCK_WAIT_USLEEP_MIN 50
#define PHY_PLL_LOCK_WAIT_USLEEP_MAX 200
-
-/* PCIe Root Complex registers (memory-mapped) */
-#define PCIE_RC_IMX6_MSI_CAP 0x50
-#define PCIE_RC_LCR 0x7c
-#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1 0x1
-#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2 0x2
-#define PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK 0xf
-
-#define PCIE_RC_LCSR 0x80
+#define PHY_PLL_LOCK_WAIT_TIMEOUT (2000 * PHY_PLL_LOCK_WAIT_USLEEP_MAX)
/* PCIe Port Logic registers (memory-mapped) */
#define PL_OFFSET 0x700
-#define PCIE_PL_PFLR (PL_OFFSET + 0x08)
-#define PCIE_PL_PFLR_LINK_STATE_MASK (0x3f << 16)
-#define PCIE_PL_PFLR_FORCE_LINK (1 << 15)
-#define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
-#define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
#define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
-#define PCIE_PHY_CTRL_DATA_LOC 0
-#define PCIE_PHY_CTRL_CAP_ADR_LOC 16
-#define PCIE_PHY_CTRL_CAP_DAT_LOC 17
-#define PCIE_PHY_CTRL_WR_LOC 18
-#define PCIE_PHY_CTRL_RD_LOC 19
+#define PCIE_PHY_CTRL_DATA(x) FIELD_PREP(GENMASK(15, 0), (x))
+#define PCIE_PHY_CTRL_CAP_ADR BIT(16)
+#define PCIE_PHY_CTRL_CAP_DAT BIT(17)
+#define PCIE_PHY_CTRL_WR BIT(18)
+#define PCIE_PHY_CTRL_RD BIT(19)
#define PCIE_PHY_STAT (PL_OFFSET + 0x110)
-#define PCIE_PHY_STAT_ACK_LOC 16
-
-#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
-#define PORT_LOGIC_SPEED_CHANGE (0x1 << 17)
+#define PCIE_PHY_STAT_ACK BIT(16)
/* PHY registers (not memory-mapped) */
#define PCIE_PHY_ATEOVRD 0x10
-#define PCIE_PHY_ATEOVRD_EN (0x1 << 2)
+#define PCIE_PHY_ATEOVRD_EN BIT(2)
#define PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT 0
#define PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK 0x1
#define PCIE_PHY_MPLL_OVRD_IN_LO 0x11
#define PCIE_PHY_MPLL_MULTIPLIER_SHIFT 2
#define PCIE_PHY_MPLL_MULTIPLIER_MASK 0x7f
-#define PCIE_PHY_MPLL_MULTIPLIER_OVRD (0x1 << 9)
+#define PCIE_PHY_MPLL_MULTIPLIER_OVRD BIT(9)
#define PCIE_PHY_RX_ASIC_OUT 0x100D
#define PCIE_PHY_RX_ASIC_OUT_VALID (1 << 0)
+/* iMX7 PCIe PHY registers */
+#define PCIE_PHY_CMN_REG4 0x14
+/* These are probably the bits that *aren't* DCC_FB_EN */
+#define PCIE_PHY_CMN_REG4_DCC_FB_EN 0x29
+
+#define PCIE_PHY_CMN_REG15 0x54
+#define PCIE_PHY_CMN_REG15_DLY_4 BIT(2)
+#define PCIE_PHY_CMN_REG15_PLL_PD BIT(5)
+#define PCIE_PHY_CMN_REG15_OVRD_PLL_PD BIT(7)
+
+#define PCIE_PHY_CMN_REG24 0x90
+#define PCIE_PHY_CMN_REG24_RX_EQ BIT(6)
+#define PCIE_PHY_CMN_REG24_RX_EQ_SEL BIT(3)
+
+#define PCIE_PHY_CMN_REG26 0x98
+#define PCIE_PHY_CMN_REG26_ATT_MODE 0xBC
+
#define PHY_RX_OVRD_IN_LO 0x1005
-#define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5)
-#define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3)
+#define PHY_RX_OVRD_IN_LO_RX_DATA_EN BIT(5)
+#define PHY_RX_OVRD_IN_LO_RX_PLL_EN BIT(3)
-static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, int exp_val)
+static unsigned int imx_pcie_grp_offset(const struct imx_pcie *imx_pcie)
{
- struct dw_pcie *pci = imx6_pcie->pci;
- u32 val;
+ WARN_ON(imx_pcie->drvdata->variant != IMX8MQ &&
+ imx_pcie->drvdata->variant != IMX8MQ_EP &&
+ imx_pcie->drvdata->variant != IMX8MM &&
+ imx_pcie->drvdata->variant != IMX8MM_EP &&
+ imx_pcie->drvdata->variant != IMX8MP &&
+ imx_pcie->drvdata->variant != IMX8MP_EP);
+ return imx_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14;
+}
+
+static int imx95_pcie_init_phy(struct imx_pcie *imx_pcie)
+{
+ /*
+ * ERR051624: The Controller Without Vaux Cannot Exit L23 Ready
+ * Through Beacon or PERST# De-assertion
+ *
+ * When the auxiliary power is not available, the controller
+ * cannot exit from L23 Ready with beacon or PERST# de-assertion
+ * when main power is not removed.
+ *
+ * Workaround: Set SS_RW_REG_1[SYS_AUX_PWR_DET] to 1.
+ */
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IMX95_PCIE_SS_RW_REG_1,
+ IMX95_PCIE_SYS_AUX_PWR_DET);
+
+ regmap_update_bits(imx_pcie->iomuxc_gpr,
+ IMX95_PCIE_SS_RW_REG_0,
+ IMX95_PCIE_PHY_CR_PARA_SEL,
+ IMX95_PCIE_PHY_CR_PARA_SEL);
+
+ regmap_update_bits(imx_pcie->iomuxc_gpr,
+ IMX95_PCIE_PHY_GEN_CTRL,
+ IMX95_PCIE_REF_USE_PAD, 0);
+ regmap_update_bits(imx_pcie->iomuxc_gpr,
+ IMX95_PCIE_SS_RW_REG_0,
+ IMX95_PCIE_REF_CLKEN,
+ IMX95_PCIE_REF_CLKEN);
+
+ return 0;
+}
+
+static void imx_pcie_configure_type(struct imx_pcie *imx_pcie)
+{
+ const struct imx_pcie_drvdata *drvdata = imx_pcie->drvdata;
+ unsigned int mask, val, mode, id;
+
+ if (drvdata->mode == DW_PCIE_EP_TYPE)
+ mode = PCI_EXP_TYPE_ENDPOINT;
+ else
+ mode = PCI_EXP_TYPE_ROOT_PORT;
+
+ id = imx_pcie->controller_id;
+
+ /* If mode_mask is 0, generic PHY driver is used to set the mode */
+ if (!drvdata->mode_mask[0])
+ return;
+
+ /* If mode_mask[id] is 0, each controller has its individual GPR */
+ if (!drvdata->mode_mask[id])
+ id = 0;
+
+ mask = drvdata->mode_mask[id];
+ val = mode << (ffs(mask) - 1);
+
+ regmap_update_bits(imx_pcie->iomuxc_gpr, drvdata->mode_off[id], mask, val);
+}
+
+static int pcie_phy_poll_ack(struct imx_pcie *imx_pcie, bool exp_val)
+{
+ struct dw_pcie *pci = imx_pcie->pci;
+ bool val;
u32 max_iterations = 10;
u32 wait_counter = 0;
do {
- val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT);
- val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1;
+ val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT) &
+ PCIE_PHY_STAT_ACK;
wait_counter++;
if (val == exp_val)
@@ -142,103 +317,102 @@ static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, int exp_val)
return -ETIMEDOUT;
}
-static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr)
+static int pcie_phy_wait_ack(struct imx_pcie *imx_pcie, int addr)
{
- struct dw_pcie *pci = imx6_pcie->pci;
+ struct dw_pcie *pci = imx_pcie->pci;
u32 val;
int ret;
- val = addr << PCIE_PHY_CTRL_DATA_LOC;
+ val = PCIE_PHY_CTRL_DATA(addr);
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
- val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC);
+ val |= PCIE_PHY_CTRL_CAP_ADR;
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
- ret = pcie_phy_poll_ack(imx6_pcie, 1);
+ ret = pcie_phy_poll_ack(imx_pcie, true);
if (ret)
return ret;
- val = addr << PCIE_PHY_CTRL_DATA_LOC;
+ val = PCIE_PHY_CTRL_DATA(addr);
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
- return pcie_phy_poll_ack(imx6_pcie, 0);
+ return pcie_phy_poll_ack(imx_pcie, false);
}
/* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
-static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, int *data)
+static int pcie_phy_read(struct imx_pcie *imx_pcie, int addr, u16 *data)
{
- struct dw_pcie *pci = imx6_pcie->pci;
- u32 val, phy_ctl;
+ struct dw_pcie *pci = imx_pcie->pci;
+ u32 phy_ctl;
int ret;
- ret = pcie_phy_wait_ack(imx6_pcie, addr);
+ ret = pcie_phy_wait_ack(imx_pcie, addr);
if (ret)
return ret;
/* assert Read signal */
- phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC;
+ phy_ctl = PCIE_PHY_CTRL_RD;
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, phy_ctl);
- ret = pcie_phy_poll_ack(imx6_pcie, 1);
+ ret = pcie_phy_poll_ack(imx_pcie, true);
if (ret)
return ret;
- val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT);
- *data = val & 0xffff;
+ *data = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT);
/* deassert Read signal */
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x00);
- return pcie_phy_poll_ack(imx6_pcie, 0);
+ return pcie_phy_poll_ack(imx_pcie, false);
}
-static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, int data)
+static int pcie_phy_write(struct imx_pcie *imx_pcie, int addr, u16 data)
{
- struct dw_pcie *pci = imx6_pcie->pci;
+ struct dw_pcie *pci = imx_pcie->pci;
u32 var;
int ret;
/* write addr */
/* cap addr */
- ret = pcie_phy_wait_ack(imx6_pcie, addr);
+ ret = pcie_phy_wait_ack(imx_pcie, addr);
if (ret)
return ret;
- var = data << PCIE_PHY_CTRL_DATA_LOC;
+ var = PCIE_PHY_CTRL_DATA(data);
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
/* capture data */
- var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC);
+ var |= PCIE_PHY_CTRL_CAP_DAT;
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
- ret = pcie_phy_poll_ack(imx6_pcie, 1);
+ ret = pcie_phy_poll_ack(imx_pcie, true);
if (ret)
return ret;
/* deassert cap data */
- var = data << PCIE_PHY_CTRL_DATA_LOC;
+ var = PCIE_PHY_CTRL_DATA(data);
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
/* wait for ack de-assertion */
- ret = pcie_phy_poll_ack(imx6_pcie, 0);
+ ret = pcie_phy_poll_ack(imx_pcie, false);
if (ret)
return ret;
/* assert wr signal */
- var = 0x1 << PCIE_PHY_CTRL_WR_LOC;
+ var = PCIE_PHY_CTRL_WR;
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
/* wait for ack */
- ret = pcie_phy_poll_ack(imx6_pcie, 1);
+ ret = pcie_phy_poll_ack(imx_pcie, true);
if (ret)
return ret;
/* deassert wr signal */
- var = data << PCIE_PHY_CTRL_DATA_LOC;
+ var = PCIE_PHY_CTRL_DATA(data);
dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
/* wait for ack de-assertion */
- ret = pcie_phy_poll_ack(imx6_pcie, 0);
+ ret = pcie_phy_poll_ack(imx_pcie, false);
if (ret)
return ret;
@@ -247,23 +421,166 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, int data)
return 0;
}
-static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie)
+static int imx8mq_pcie_init_phy(struct imx_pcie *imx_pcie)
{
- u32 tmp;
+ /* TODO: This code assumes external oscillator is being used */
+ regmap_update_bits(imx_pcie->iomuxc_gpr,
+ imx_pcie_grp_offset(imx_pcie),
+ IMX8MQ_GPR_PCIE_REF_USE_PAD,
+ IMX8MQ_GPR_PCIE_REF_USE_PAD);
+ /*
+ * Per the datasheet, the PCIE_VPH is suggested to be 1.8V. If the
+ * PCIE_VPH is supplied by 3.3V, the VREG_BYPASS should be cleared
+ * to zero.
+ */
+ if (imx_pcie->vph && regulator_get_voltage(imx_pcie->vph) > 3000000)
+ regmap_update_bits(imx_pcie->iomuxc_gpr,
+ imx_pcie_grp_offset(imx_pcie),
+ IMX8MQ_GPR_PCIE_VREG_BYPASS,
+ 0);
+
+ return 0;
+}
+
+static int imx_pcie_init_phy(struct imx_pcie *imx_pcie)
+{
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
+
+ /* configure constant input signal to the pcie ctrl and phy */
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
+
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_DEEMPH_GEN1,
+ imx_pcie->tx_deemph_gen1 << 0);
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB,
+ imx_pcie->tx_deemph_gen2_3p5db << 6);
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB,
+ imx_pcie->tx_deemph_gen2_6db << 12);
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_SWING_FULL,
+ imx_pcie->tx_swing_full << 18);
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_SWING_LOW,
+ imx_pcie->tx_swing_low << 25);
+ return 0;
+}
+
+static int imx6sx_pcie_init_phy(struct imx_pcie *imx_pcie)
+{
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6SX_GPR12_PCIE_RX_EQ_MASK, IMX6SX_GPR12_PCIE_RX_EQ_2);
+
+ return imx_pcie_init_phy(imx_pcie);
+}
+
+static void imx7d_pcie_wait_for_phy_pll_lock(struct imx_pcie *imx_pcie)
+{
+ u32 val;
+ struct device *dev = imx_pcie->pci->dev;
+
+ if (regmap_read_poll_timeout(imx_pcie->iomuxc_gpr,
+ IOMUXC_GPR22, val,
+ val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED,
+ PHY_PLL_LOCK_WAIT_USLEEP_MAX,
+ PHY_PLL_LOCK_WAIT_TIMEOUT))
+ dev_err(dev, "PCIe PLL lock timeout\n");
+}
+
+static int imx95_pcie_wait_for_phy_pll_lock(struct imx_pcie *imx_pcie)
+{
+ u32 val;
+ struct device *dev = imx_pcie->pci->dev;
+
+ if (regmap_read_poll_timeout(imx_pcie->iomuxc_gpr,
+ IMX95_PCIE_PHY_MPLLA_CTRL, val,
+ val & IMX95_PCIE_PHY_MPLL_STATE,
+ PHY_PLL_LOCK_WAIT_USLEEP_MAX,
+ PHY_PLL_LOCK_WAIT_TIMEOUT)) {
+ dev_err(dev, "PCIe PLL lock timeout\n");
+ return -ETIMEDOUT;
+ }
- pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
+ return 0;
+}
+
+static int imx_setup_phy_mpll(struct imx_pcie *imx_pcie)
+{
+ unsigned long phy_rate = 0;
+ int mult, div;
+ u16 val;
+ int i;
+ struct clk_bulk_data *clks = imx_pcie->clks;
+
+ if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_IMX_PHY))
+ return 0;
+
+ for (i = 0; i < imx_pcie->num_clks; i++)
+ if (strncmp(clks[i].id, "pcie_phy", 8) == 0)
+ phy_rate = clk_get_rate(clks[i].clk);
+
+ switch (phy_rate) {
+ case 125000000:
+ /*
+ * The default settings of the MPLL are for a 125MHz input
+ * clock, so no need to reconfigure anything in that case.
+ */
+ return 0;
+ case 100000000:
+ mult = 25;
+ div = 0;
+ break;
+ case 200000000:
+ mult = 25;
+ div = 1;
+ break;
+ default:
+ dev_err(imx_pcie->pci->dev,
+ "Unsupported PHY reference clock rate %lu\n", phy_rate);
+ return -EINVAL;
+ }
+
+ pcie_phy_read(imx_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, &val);
+ val &= ~(PCIE_PHY_MPLL_MULTIPLIER_MASK <<
+ PCIE_PHY_MPLL_MULTIPLIER_SHIFT);
+ val |= mult << PCIE_PHY_MPLL_MULTIPLIER_SHIFT;
+ val |= PCIE_PHY_MPLL_MULTIPLIER_OVRD;
+ pcie_phy_write(imx_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, val);
+
+ pcie_phy_read(imx_pcie, PCIE_PHY_ATEOVRD, &val);
+ val &= ~(PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK <<
+ PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT);
+ val |= div << PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT;
+ val |= PCIE_PHY_ATEOVRD_EN;
+ pcie_phy_write(imx_pcie, PCIE_PHY_ATEOVRD, val);
+
+ return 0;
+}
+
+static void imx_pcie_reset_phy(struct imx_pcie *imx_pcie)
+{
+ u16 tmp;
+
+ if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_IMX_PHY))
+ return;
+
+ pcie_phy_read(imx_pcie, PHY_RX_OVRD_IN_LO, &tmp);
tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
PHY_RX_OVRD_IN_LO_RX_PLL_EN);
- pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
+ pcie_phy_write(imx_pcie, PHY_RX_OVRD_IN_LO, tmp);
usleep_range(2000, 3000);
- pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
+ pcie_phy_read(imx_pcie, PHY_RX_OVRD_IN_LO, &tmp);
tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
PHY_RX_OVRD_IN_LO_RX_PLL_EN);
- pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
+ pcie_phy_write(imx_pcie, PHY_RX_OVRD_IN_LO, tmp);
}
+#ifdef CONFIG_ARM
/* Added for PCI abort handling */
static int imx6q_pcie_abort_handler(unsigned long addr,
unsigned int fsr, struct pt_regs *regs)
@@ -297,333 +614,281 @@ static int imx6q_pcie_abort_handler(unsigned long addr,
return 1;
}
+#endif
-static int imx6_pcie_attach_pd(struct device *dev)
+static int imx_pcie_attach_pd(struct device *dev)
{
- struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+ struct imx_pcie *imx_pcie = dev_get_drvdata(dev);
struct device_link *link;
/* Do nothing when in a single power domain */
if (dev->pm_domain)
return 0;
- imx6_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie");
- if (IS_ERR(imx6_pcie->pd_pcie))
- return PTR_ERR(imx6_pcie->pd_pcie);
- link = device_link_add(dev, imx6_pcie->pd_pcie,
+ imx_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie");
+ if (IS_ERR(imx_pcie->pd_pcie))
+ return PTR_ERR(imx_pcie->pd_pcie);
+ /* Do nothing when power domain missing */
+ if (!imx_pcie->pd_pcie)
+ return 0;
+ link = device_link_add(dev, imx_pcie->pd_pcie,
DL_FLAG_STATELESS |
DL_FLAG_PM_RUNTIME |
DL_FLAG_RPM_ACTIVE);
if (!link) {
- dev_err(dev, "Failed to add device_link to pcie pd.\n");
+ dev_err(dev, "Failed to add device_link to pcie pd\n");
return -EINVAL;
}
- imx6_pcie->pd_pcie_phy = dev_pm_domain_attach_by_name(dev, "pcie_phy");
- if (IS_ERR(imx6_pcie->pd_pcie_phy))
- return PTR_ERR(imx6_pcie->pd_pcie_phy);
+ imx_pcie->pd_pcie_phy = dev_pm_domain_attach_by_name(dev, "pcie_phy");
+ if (IS_ERR(imx_pcie->pd_pcie_phy))
+ return PTR_ERR(imx_pcie->pd_pcie_phy);
- device_link_add(dev, imx6_pcie->pd_pcie_phy,
+ link = device_link_add(dev, imx_pcie->pd_pcie_phy,
DL_FLAG_STATELESS |
DL_FLAG_PM_RUNTIME |
DL_FLAG_RPM_ACTIVE);
- if (IS_ERR(link)) {
- dev_err(dev, "Failed to add device_link to pcie_phy pd: %ld\n", PTR_ERR(link));
- return PTR_ERR(link);
+ if (!link) {
+ dev_err(dev, "Failed to add device_link to pcie_phy pd\n");
+ return -EINVAL;
}
return 0;
}
-static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
+static int imx6sx_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable)
{
- struct device *dev = imx6_pcie->pci->dev;
-
- switch (imx6_pcie->variant) {
- case IMX7D:
- reset_control_assert(imx6_pcie->pciephy_reset);
- reset_control_assert(imx6_pcie->apps_reset);
- break;
- case IMX6SX:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
- IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
- /* Force PCIe PHY reset */
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
- IMX6SX_GPR5_PCIE_BTNRST_RESET,
- IMX6SX_GPR5_PCIE_BTNRST_RESET);
- break;
- case IMX6QP:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_SW_RST,
- IMX6Q_GPR1_PCIE_SW_RST);
- break;
- case IMX6Q:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
- break;
- }
-
- if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) {
- int ret = regulator_disable(imx6_pcie->vpcie);
-
- if (ret)
- dev_err(dev, "failed to disable vpcie regulator: %d\n",
- ret);
- }
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
+ enable ? 0 : IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
+ return 0;
}
-static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
+static int imx6q_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable)
{
- struct dw_pcie *pci = imx6_pcie->pci;
- struct device *dev = pci->dev;
- int ret = 0;
-
- switch (imx6_pcie->variant) {
- case IMX6SX:
- ret = clk_prepare_enable(imx6_pcie->pcie_inbound_axi);
- if (ret) {
- dev_err(dev, "unable to enable pcie_axi clock\n");
- break;
- }
-
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0);
- break;
- case IMX6QP: /* FALLTHROUGH */
- case IMX6Q:
+ if (enable) {
/* power up core phy and enable ref clock */
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
+ regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD);
/*
- * the async reset input need ref clock to sync internally,
+ * The async reset input need ref clock to sync internally,
* when the ref clock comes after reset, internal synced
* reset time is too short, cannot meet the requirement.
- * add one ~10us delay here.
+ * Add a ~10us delay here.
*/
- udelay(10);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
- break;
- case IMX7D:
- break;
+ usleep_range(10, 100);
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN);
+ } else {
+ regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN);
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD);
}
- return ret;
+ return 0;
}
-static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
+static int imx8mm_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable)
{
- u32 val;
- unsigned int retries;
- struct device *dev = imx6_pcie->pci->dev;
-
- for (retries = 0; retries < PHY_PLL_LOCK_WAIT_MAX_RETRIES; retries++) {
- regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR22, &val);
-
- if (val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED)
- return;
-
- usleep_range(PHY_PLL_LOCK_WAIT_USLEEP_MIN,
- PHY_PLL_LOCK_WAIT_USLEEP_MAX);
- }
+ int offset = imx_pcie_grp_offset(imx_pcie);
+
+ regmap_update_bits(imx_pcie->iomuxc_gpr, offset,
+ IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE,
+ enable ? 0 : IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE);
+ regmap_update_bits(imx_pcie->iomuxc_gpr, offset,
+ IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN,
+ enable ? IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN : 0);
+ return 0;
+}
- dev_err(dev, "PCIe PLL lock timeout\n");
+static int imx7d_pcie_enable_ref_clk(struct imx_pcie *imx_pcie, bool enable)
+{
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX7D_GPR12_PCIE_PHY_REFCLK_SEL,
+ enable ? 0 : IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
+ return 0;
}
-static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
+static int imx_pcie_clk_enable(struct imx_pcie *imx_pcie)
{
- struct dw_pcie *pci = imx6_pcie->pci;
+ struct dw_pcie *pci = imx_pcie->pci;
struct device *dev = pci->dev;
int ret;
- if (imx6_pcie->vpcie && !regulator_is_enabled(imx6_pcie->vpcie)) {
- ret = regulator_enable(imx6_pcie->vpcie);
+ ret = clk_bulk_prepare_enable(imx_pcie->num_clks, imx_pcie->clks);
+ if (ret)
+ return ret;
+
+ if (imx_pcie->drvdata->enable_ref_clk) {
+ ret = imx_pcie->drvdata->enable_ref_clk(imx_pcie, true);
if (ret) {
- dev_err(dev, "failed to enable vpcie regulator: %d\n",
- ret);
- return;
+ dev_err(dev, "Failed to enable PCIe REFCLK\n");
+ goto err_ref_clk;
}
}
- ret = clk_prepare_enable(imx6_pcie->pcie_phy);
- if (ret) {
- dev_err(dev, "unable to enable pcie_phy clock\n");
- goto err_pcie_phy;
- }
-
- ret = clk_prepare_enable(imx6_pcie->pcie_bus);
- if (ret) {
- dev_err(dev, "unable to enable pcie_bus clock\n");
- goto err_pcie_bus;
- }
+ /* allow the clocks to stabilize */
+ usleep_range(200, 500);
+ return 0;
- ret = clk_prepare_enable(imx6_pcie->pcie);
- if (ret) {
- dev_err(dev, "unable to enable pcie clock\n");
- goto err_pcie;
- }
+err_ref_clk:
+ clk_bulk_disable_unprepare(imx_pcie->num_clks, imx_pcie->clks);
- ret = imx6_pcie_enable_ref_clk(imx6_pcie);
- if (ret) {
- dev_err(dev, "unable to enable pcie ref clock\n");
- goto err_ref_clk;
- }
+ return ret;
+}
- /* allow the clocks to stabilize */
- usleep_range(200, 500);
+static void imx_pcie_clk_disable(struct imx_pcie *imx_pcie)
+{
+ if (imx_pcie->drvdata->enable_ref_clk)
+ imx_pcie->drvdata->enable_ref_clk(imx_pcie, false);
+ clk_bulk_disable_unprepare(imx_pcie->num_clks, imx_pcie->clks);
+}
- /* Some boards don't have PCIe reset GPIO. */
- if (gpio_is_valid(imx6_pcie->reset_gpio)) {
- gpio_set_value_cansleep(imx6_pcie->reset_gpio,
- imx6_pcie->gpio_active_high);
- msleep(100);
- gpio_set_value_cansleep(imx6_pcie->reset_gpio,
- !imx6_pcie->gpio_active_high);
- }
+static int imx6sx_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert)
+{
+ if (assert)
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
- switch (imx6_pcie->variant) {
- case IMX7D:
- reset_control_deassert(imx6_pcie->pciephy_reset);
- imx7d_pcie_wait_for_phy_pll_lock(imx6_pcie);
- break;
- case IMX6SX:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
- IMX6SX_GPR5_PCIE_BTNRST_RESET, 0);
- break;
- case IMX6QP:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_SW_RST, 0);
+ /* Force PCIe PHY reset */
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR5, IMX6SX_GPR5_PCIE_BTNRST_RESET,
+ assert ? IMX6SX_GPR5_PCIE_BTNRST_RESET : 0);
+ return 0;
+}
+static int imx6qp_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert)
+{
+ regmap_update_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_SW_RST,
+ assert ? IMX6Q_GPR1_PCIE_SW_RST : 0);
+ if (!assert)
usleep_range(200, 500);
- break;
- case IMX6Q: /* Nothing to do */
- break;
- }
- return;
+ return 0;
+}
-err_ref_clk:
- clk_disable_unprepare(imx6_pcie->pcie);
-err_pcie:
- clk_disable_unprepare(imx6_pcie->pcie_bus);
-err_pcie_bus:
- clk_disable_unprepare(imx6_pcie->pcie_phy);
-err_pcie_phy:
- if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) {
- ret = regulator_disable(imx6_pcie->vpcie);
- if (ret)
- dev_err(dev, "failed to disable vpcie regulator: %d\n",
- ret);
- }
+static int imx6q_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert)
+{
+ if (!assert)
+ return 0;
+
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD);
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN);
+
+ return 0;
}
-static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
+static int imx7d_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert)
{
- switch (imx6_pcie->variant) {
- case IMX7D:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0);
- break;
- case IMX6SX:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6SX_GPR12_PCIE_RX_EQ_MASK,
- IMX6SX_GPR12_PCIE_RX_EQ_2);
- /* FALLTHROUGH */
- default:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
+ struct dw_pcie *pci = imx_pcie->pci;
+ struct device *dev = pci->dev;
- /* configure constant input signal to the pcie ctrl and phy */
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
-
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_DEEMPH_GEN1,
- imx6_pcie->tx_deemph_gen1 << 0);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB,
- imx6_pcie->tx_deemph_gen2_3p5db << 6);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB,
- imx6_pcie->tx_deemph_gen2_6db << 12);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_SWING_FULL,
- imx6_pcie->tx_swing_full << 18);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
- IMX6Q_GPR8_TX_SWING_LOW,
- imx6_pcie->tx_swing_low << 25);
- break;
- }
+ if (assert)
+ return 0;
+
+ /*
+ * Workaround for ERR010728 (IMX7DS_2N09P, Rev. 1.1, 4/2023):
+ *
+ * PCIe: PLL may fail to lock under corner conditions.
+ *
+ * Initial VCO oscillation may fail under corner conditions such as
+ * cold temperature which will cause the PCIe PLL fail to lock in the
+ * initialization phase.
+ *
+ * The Duty-cycle Corrector calibration must be disabled.
+ *
+ * 1. De-assert the G_RST signal by clearing
+ * SRC_PCIEPHY_RCR[PCIEPHY_G_RST].
+ * 2. De-assert DCC_FB_EN by writing data “0x29” to the register
+ * address 0x306d0014 (PCIE_PHY_CMN_REG4).
+ * 3. Assert RX_EQS, RX_EQ_SEL by writing data “0x48” to the register
+ * address 0x306d0090 (PCIE_PHY_CMN_REG24).
+ * 4. Assert ATT_MODE by writing data “0xbc” to the register
+ * address 0x306d0098 (PCIE_PHY_CMN_REG26).
+ * 5. De-assert the CMN_RST signal by clearing register bit
+ * SRC_PCIEPHY_RCR[PCIEPHY_BTN]
+ */
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6Q_GPR12_DEVICE_TYPE, PCI_EXP_TYPE_ROOT_PORT << 12);
+ if (likely(imx_pcie->phy_base)) {
+ /* De-assert DCC_FB_EN */
+ writel(PCIE_PHY_CMN_REG4_DCC_FB_EN, imx_pcie->phy_base + PCIE_PHY_CMN_REG4);
+ /* Assert RX_EQS and RX_EQS_SEL */
+ writel(PCIE_PHY_CMN_REG24_RX_EQ_SEL | PCIE_PHY_CMN_REG24_RX_EQ,
+ imx_pcie->phy_base + PCIE_PHY_CMN_REG24);
+ /* Assert ATT_MODE */
+ writel(PCIE_PHY_CMN_REG26_ATT_MODE, imx_pcie->phy_base + PCIE_PHY_CMN_REG26);
+ } else {
+ dev_warn(dev, "Unable to apply ERR010728 workaround. DT missing fsl,imx7d-pcie-phy phandle ?\n");
+ }
+ imx7d_pcie_wait_for_phy_pll_lock(imx_pcie);
+ return 0;
}
-static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie)
+static int imx95_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert)
{
- unsigned long phy_rate = clk_get_rate(imx6_pcie->pcie_phy);
- int mult, div;
u32 val;
- switch (phy_rate) {
- case 125000000:
+ if (assert) {
/*
- * The default settings of the MPLL are for a 125MHz input
- * clock, so no need to reconfigure anything in that case.
+ * From i.MX95 PCIe PHY perspective, the COLD reset toggle
+ * should be complete after power-up by the following sequence.
+ * > 10us(at power-up)
+ * > 10ns(warm reset)
+ * |<------------>|
+ * ______________
+ * phy_reset ____/ \________________
+ * ____________
+ * ref_clk_en_______________________/
+ * Toggle COLD reset aligned with this sequence for i.MX95 PCIe.
*/
- return 0;
- case 100000000:
- mult = 25;
- div = 0;
- break;
- case 200000000:
- mult = 25;
- div = 1;
- break;
- default:
- dev_err(imx6_pcie->pci->dev,
- "Unsupported PHY reference clock rate %lu\n", phy_rate);
- return -EINVAL;
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IMX95_PCIE_RST_CTRL,
+ IMX95_PCIE_COLD_RST);
+ /*
+ * Make sure the write to IMX95_PCIE_RST_CTRL is flushed to the
+ * hardware by doing a read. Otherwise, there is no guarantee
+ * that the write has reached the hardware before udelay().
+ */
+ regmap_read_bypassed(imx_pcie->iomuxc_gpr, IMX95_PCIE_RST_CTRL,
+ &val);
+ udelay(15);
+ regmap_clear_bits(imx_pcie->iomuxc_gpr, IMX95_PCIE_RST_CTRL,
+ IMX95_PCIE_COLD_RST);
+ regmap_read_bypassed(imx_pcie->iomuxc_gpr, IMX95_PCIE_RST_CTRL,
+ &val);
+ udelay(10);
}
- pcie_phy_read(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, &val);
- val &= ~(PCIE_PHY_MPLL_MULTIPLIER_MASK <<
- PCIE_PHY_MPLL_MULTIPLIER_SHIFT);
- val |= mult << PCIE_PHY_MPLL_MULTIPLIER_SHIFT;
- val |= PCIE_PHY_MPLL_MULTIPLIER_OVRD;
- pcie_phy_write(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, val);
+ return 0;
+}
- pcie_phy_read(imx6_pcie, PCIE_PHY_ATEOVRD, &val);
- val &= ~(PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK <<
- PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT);
- val |= div << PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT;
- val |= PCIE_PHY_ATEOVRD_EN;
- pcie_phy_write(imx6_pcie, PCIE_PHY_ATEOVRD, val);
+static void imx_pcie_assert_core_reset(struct imx_pcie *imx_pcie)
+{
+ reset_control_assert(imx_pcie->pciephy_reset);
- return 0;
+ if (imx_pcie->drvdata->core_reset)
+ imx_pcie->drvdata->core_reset(imx_pcie, true);
+
+ /* Some boards don't have PCIe reset GPIO. */
+ gpiod_set_value_cansleep(imx_pcie->reset_gpiod, 1);
}
-static int imx6_pcie_wait_for_link(struct imx6_pcie *imx6_pcie)
+static int imx_pcie_deassert_core_reset(struct imx_pcie *imx_pcie)
{
- struct dw_pcie *pci = imx6_pcie->pci;
- struct device *dev = pci->dev;
+ reset_control_deassert(imx_pcie->pciephy_reset);
- /* check if the link is up or not */
- if (!dw_pcie_wait_for_link(pci))
- return 0;
+ if (imx_pcie->drvdata->core_reset)
+ imx_pcie->drvdata->core_reset(imx_pcie, false);
- dev_dbg(dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
- dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R0),
- dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1));
- return -ETIMEDOUT;
+ /* Some boards don't have PCIe reset GPIO. */
+ if (imx_pcie->reset_gpiod) {
+ msleep(100);
+ gpiod_set_value_cansleep(imx_pcie->reset_gpiod, 0);
+ /* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */
+ msleep(100);
+ }
+
+ return 0;
}
-static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
+static int imx_pcie_wait_for_speed_change(struct imx_pcie *imx_pcie)
{
- struct dw_pcie *pci = imx6_pcie->pci;
+ struct dw_pcie *pci = imx_pcie->pci;
struct device *dev = pci->dev;
u32 tmp;
unsigned int retries;
@@ -637,57 +902,78 @@ static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
}
dev_err(dev, "Speed change timeout\n");
- return -EINVAL;
+ return -ETIMEDOUT;
}
-static void imx6_pcie_ltssm_enable(struct device *dev)
+static void imx_pcie_ltssm_enable(struct device *dev)
{
- struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
-
- switch (imx6_pcie->variant) {
- case IMX6Q:
- case IMX6SX:
- case IMX6QP:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6Q_GPR12_PCIE_CTL_2,
- IMX6Q_GPR12_PCIE_CTL_2);
- break;
- case IMX7D:
- reset_control_deassert(imx6_pcie->apps_reset);
- break;
- }
+ struct imx_pcie *imx_pcie = dev_get_drvdata(dev);
+ const struct imx_pcie_drvdata *drvdata = imx_pcie->drvdata;
+ u8 offset = dw_pcie_find_capability(imx_pcie->pci, PCI_CAP_ID_EXP);
+ u32 tmp;
+
+ tmp = dw_pcie_readl_dbi(imx_pcie->pci, offset + PCI_EXP_LNKCAP);
+ phy_set_speed(imx_pcie->phy, FIELD_GET(PCI_EXP_LNKCAP_SLS, tmp));
+ if (drvdata->ltssm_mask)
+ regmap_update_bits(imx_pcie->iomuxc_gpr, drvdata->ltssm_off, drvdata->ltssm_mask,
+ drvdata->ltssm_mask);
+
+ reset_control_deassert(imx_pcie->apps_reset);
+}
+
+static void imx_pcie_ltssm_disable(struct device *dev)
+{
+ struct imx_pcie *imx_pcie = dev_get_drvdata(dev);
+ const struct imx_pcie_drvdata *drvdata = imx_pcie->drvdata;
+
+ phy_set_speed(imx_pcie->phy, 0);
+ if (drvdata->ltssm_mask)
+ regmap_update_bits(imx_pcie->iomuxc_gpr, drvdata->ltssm_off,
+ drvdata->ltssm_mask, 0);
+
+ reset_control_assert(imx_pcie->apps_reset);
}
-static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
+static int imx_pcie_start_link(struct dw_pcie *pci)
{
- struct dw_pcie *pci = imx6_pcie->pci;
+ struct imx_pcie *imx_pcie = to_imx_pcie(pci);
struct device *dev = pci->dev;
+ u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
u32 tmp;
int ret;
+ if (!(imx_pcie->drvdata->flags &
+ IMX_PCIE_FLAG_SPEED_CHANGE_WORKAROUND)) {
+ imx_pcie_ltssm_enable(dev);
+ return 0;
+ }
+
/*
* Force Gen1 operation when starting the link. In case the link is
* started in Gen2 mode, there is a possibility the devices on the
* bus will not be detected at all. This happens with PCIe switches.
*/
- tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCR);
- tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
- tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1;
- dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp);
+ dw_pcie_dbi_ro_wr_en(pci);
+ tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
+ tmp &= ~PCI_EXP_LNKCAP_SLS;
+ tmp |= PCI_EXP_LNKCAP_SLS_2_5GB;
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp);
+ dw_pcie_dbi_ro_wr_dis(pci);
/* Start LTSSM. */
- imx6_pcie_ltssm_enable(dev);
+ imx_pcie_ltssm_enable(dev);
- ret = imx6_pcie_wait_for_link(imx6_pcie);
- if (ret)
- goto err_reset_phy;
+ if (pci->max_link_speed > 1) {
+ ret = dw_pcie_wait_for_link(pci);
+ if (ret)
+ goto err_reset_phy;
- if (imx6_pcie->link_gen == 2) {
- /* Allow Gen2 mode after the link is up. */
- tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCR);
- tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
- tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2;
- dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp);
+ /* Allow faster modes after the link is up */
+ dw_pcie_dbi_ro_wr_en(pci);
+ tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
+ tmp &= ~PCI_EXP_LNKCAP_SLS;
+ tmp |= pci->max_link_speed;
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp);
/*
* Start Directed Speed Change so the best possible
@@ -696,233 +982,631 @@ static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
tmp |= PORT_LOGIC_SPEED_CHANGE;
dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);
+ dw_pcie_dbi_ro_wr_dis(pci);
- if (imx6_pcie->variant != IMX7D) {
- /*
- * On i.MX7, DIRECT_SPEED_CHANGE behaves differently
- * from i.MX6 family when no link speed transition
- * occurs and we go Gen1 -> yep, Gen1. The difference
- * is that, in such case, it will not be cleared by HW
- * which will cause the following code to report false
- * failure.
- */
-
- ret = imx6_pcie_wait_for_speed_change(imx6_pcie);
- if (ret) {
- dev_err(dev, "Failed to bring link up!\n");
- goto err_reset_phy;
- }
- }
-
- /* Make sure link training is finished as well! */
- ret = imx6_pcie_wait_for_link(imx6_pcie);
+ ret = imx_pcie_wait_for_speed_change(imx_pcie);
if (ret) {
dev_err(dev, "Failed to bring link up!\n");
goto err_reset_phy;
}
} else {
- dev_info(dev, "Link: Gen2 disabled\n");
+ dev_info(dev, "Link: Only Gen1 is enabled\n");
}
- tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCSR);
- dev_info(dev, "Link up, Gen%i\n", (tmp >> 16) & 0xf);
return 0;
err_reset_phy:
dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n",
- dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R0),
- dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1));
- imx6_pcie_reset_phy(imx6_pcie);
- return ret;
+ dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0),
+ dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1));
+ imx_pcie_reset_phy(imx_pcie);
+ return 0;
}
-static int imx6_pcie_host_init(struct pcie_port *pp)
+static void imx_pcie_stop_link(struct dw_pcie *pci)
{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
+ struct device *dev = pci->dev;
- imx6_pcie_assert_core_reset(imx6_pcie);
- imx6_pcie_init_phy(imx6_pcie);
- imx6_pcie_deassert_core_reset(imx6_pcie);
- imx6_setup_phy_mpll(imx6_pcie);
- dw_pcie_setup_rc(pp);
- imx6_pcie_establish_link(imx6_pcie);
+ /* Turn off PCIe LTSSM */
+ imx_pcie_ltssm_disable(dev);
+}
- if (IS_ENABLED(CONFIG_PCI_MSI))
- dw_pcie_msi_init(pp);
+static int imx_pcie_add_lut(struct imx_pcie *imx_pcie, u16 rid, u8 sid)
+{
+ struct dw_pcie *pci = imx_pcie->pci;
+ struct device *dev = pci->dev;
+ u32 data1, data2;
+ int free = -1;
+ int i;
+
+ if (sid >= 64) {
+ dev_err(dev, "Invalid SID for index %d\n", sid);
+ return -EINVAL;
+ }
+
+ guard(mutex)(&imx_pcie->lock);
+
+ /*
+ * Iterate through all LUT entries to check for duplicate RID and
+ * identify the first available entry. Configure this available entry
+ * immediately after verification to avoid rescanning it.
+ */
+ for (i = 0; i < IMX95_MAX_LUT; i++) {
+ regmap_write(imx_pcie->iomuxc_gpr,
+ IMX95_PE0_LUT_ACSCTRL, IMX95_PEO_LUT_RWA | i);
+ regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA1, &data1);
+
+ if (!(data1 & IMX95_PE0_LUT_VLD)) {
+ if (free < 0)
+ free = i;
+ continue;
+ }
+
+ regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, &data2);
+
+ /* Do not add duplicate RID */
+ if (rid == FIELD_GET(IMX95_PE0_LUT_REQID, data2)) {
+ dev_warn(dev, "Existing LUT entry available for RID (%d)", rid);
+ return 0;
+ }
+ }
+
+ if (free < 0) {
+ dev_err(dev, "LUT entry is not available\n");
+ return -ENOSPC;
+ }
+
+ data1 = FIELD_PREP(IMX95_PE0_LUT_DAC_ID, 0);
+ data1 |= FIELD_PREP(IMX95_PE0_LUT_STREAM_ID, sid);
+ data1 |= IMX95_PE0_LUT_VLD;
+ regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA1, data1);
+
+ if (imx_pcie->drvdata->mode == DW_PCIE_EP_TYPE)
+ data2 = 0x7; /* In the EP mode, only 'Device ID' is required */
+ else
+ data2 = IMX95_PE0_LUT_MASK; /* Match all bits of RID */
+ data2 |= FIELD_PREP(IMX95_PE0_LUT_REQID, rid);
+ regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, data2);
+
+ regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_ACSCTRL, free);
return 0;
}
-static const struct dw_pcie_host_ops imx6_pcie_host_ops = {
- .host_init = imx6_pcie_host_init,
-};
+static void imx_pcie_remove_lut(struct imx_pcie *imx_pcie, u16 rid)
+{
+ u32 data2;
+ int i;
+
+ guard(mutex)(&imx_pcie->lock);
+
+ for (i = 0; i < IMX95_MAX_LUT; i++) {
+ regmap_write(imx_pcie->iomuxc_gpr,
+ IMX95_PE0_LUT_ACSCTRL, IMX95_PEO_LUT_RWA | i);
+ regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, &data2);
+ if (FIELD_GET(IMX95_PE0_LUT_REQID, data2) == rid) {
+ regmap_write(imx_pcie->iomuxc_gpr,
+ IMX95_PE0_LUT_DATA1, 0);
+ regmap_write(imx_pcie->iomuxc_gpr,
+ IMX95_PE0_LUT_DATA2, 0);
+ regmap_write(imx_pcie->iomuxc_gpr,
+ IMX95_PE0_LUT_ACSCTRL, i);
+
+ break;
+ }
+ }
+}
-static int imx6_add_pcie_port(struct imx6_pcie *imx6_pcie,
- struct platform_device *pdev)
+static int imx_pcie_add_lut_by_rid(struct imx_pcie *imx_pcie, u32 rid)
{
- struct dw_pcie *pci = imx6_pcie->pci;
- struct pcie_port *pp = &pci->pp;
- struct device *dev = &pdev->dev;
+ struct device *dev = imx_pcie->pci->dev;
+ struct device_node *target;
+ u32 sid_i, sid_m;
+ int err_i, err_m;
+ u32 sid = 0;
+
+ target = NULL;
+ err_i = of_map_id(dev->of_node, rid, "iommu-map", "iommu-map-mask",
+ &target, &sid_i);
+ if (target) {
+ of_node_put(target);
+ } else {
+ /*
+ * "target == NULL && err_i == 0" means RID out of map range.
+ * Use 1:1 map RID to streamID. Hardware can't support this
+ * because the streamID is only 6 bits
+ */
+ err_i = -EINVAL;
+ }
+
+ target = NULL;
+ err_m = of_map_id(dev->of_node, rid, "msi-map", "msi-map-mask",
+ &target, &sid_m);
+
+ /*
+ * err_m target
+ * 0 NULL RID out of range. Use 1:1 map RID to
+ * streamID, Current hardware can't
+ * support it, so return -EINVAL.
+ * != 0 NULL msi-map does not exist, use built-in MSI
+ * 0 != NULL Get correct streamID from RID
+ * != 0 != NULL Invalid combination
+ */
+ if (!err_m && !target)
+ return -EINVAL;
+ else if (target)
+ of_node_put(target); /* Find streamID map entry for RID in msi-map */
+
+ /*
+ * msi-map iommu-map
+ * N N DWC MSI Ctrl
+ * Y Y ITS + SMMU, require the same SID
+ * Y N ITS
+ * N Y DWC MSI Ctrl + SMMU
+ */
+ if (err_i && err_m)
+ return 0;
+
+ if (!err_i && !err_m) {
+ /*
+ * Glue Layer
+ * <==========>
+ * ┌─────┐ ┌──────────┐
+ * │ LUT │ 6-bit streamID │ │
+ * │ │─────────────────►│ MSI │
+ * └─────┘ 2-bit ctrl ID │ │
+ * ┌───────────►│ │
+ * (i.MX95) │ │ │
+ * 00 PCIe0 │ │ │
+ * 01 ENETC │ │ │
+ * 10 PCIe1 │ │ │
+ * │ └──────────┘
+ * The MSI glue layer auto adds 2 bits controller ID ahead of
+ * streamID, so mask these 2 bits to get streamID. The
+ * IOMMU glue layer doesn't do that.
+ */
+ if (sid_i != (sid_m & IMX95_SID_MASK)) {
+ dev_err(dev, "iommu-map and msi-map entries mismatch!\n");
+ return -EINVAL;
+ }
+ }
+
+ if (!err_i)
+ sid = sid_i;
+ else if (!err_m)
+ sid = sid_m & IMX95_SID_MASK;
+
+ return imx_pcie_add_lut(imx_pcie, rid, sid);
+}
+
+static int imx_pcie_enable_device(struct pci_host_bridge *bridge, struct pci_dev *pdev)
+{
+ struct imx_pcie *imx_pcie = to_imx_pcie(to_dw_pcie_from_pp(bridge->sysdata));
+
+ return imx_pcie_add_lut_by_rid(imx_pcie, pci_dev_id(pdev));
+}
+
+static void imx_pcie_disable_device(struct pci_host_bridge *bridge,
+ struct pci_dev *pdev)
+{
+ struct imx_pcie *imx_pcie;
+
+ imx_pcie = to_imx_pcie(to_dw_pcie_from_pp(bridge->sysdata));
+ imx_pcie_remove_lut(imx_pcie, pci_dev_id(pdev));
+}
+
+static int imx_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct device *dev = pci->dev;
+ struct imx_pcie *imx_pcie = to_imx_pcie(pci);
int ret;
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- pp->msi_irq = platform_get_irq_byname(pdev, "msi");
- if (pp->msi_irq <= 0) {
- dev_err(dev, "failed to get MSI irq\n");
- return -ENODEV;
+ if (imx_pcie->vpcie) {
+ ret = regulator_enable(imx_pcie->vpcie);
+ if (ret) {
+ dev_err(dev, "failed to enable vpcie regulator: %d\n",
+ ret);
+ return ret;
}
}
- pp->ops = &imx6_pcie_host_ops;
+ if (pp->bridge && imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_LUT)) {
+ pp->bridge->enable_device = imx_pcie_enable_device;
+ pp->bridge->disable_device = imx_pcie_disable_device;
+ }
+
+ imx_pcie_assert_core_reset(imx_pcie);
- ret = dw_pcie_host_init(pp);
+ if (imx_pcie->drvdata->init_phy)
+ imx_pcie->drvdata->init_phy(imx_pcie);
+
+ imx_pcie_configure_type(imx_pcie);
+
+ ret = imx_pcie_clk_enable(imx_pcie);
if (ret) {
- dev_err(dev, "failed to initialize host\n");
- return ret;
+ dev_err(dev, "unable to enable pcie clocks: %d\n", ret);
+ goto err_reg_disable;
}
+ if (imx_pcie->phy) {
+ ret = phy_init(imx_pcie->phy);
+ if (ret) {
+ dev_err(dev, "pcie PHY power up failed\n");
+ goto err_clk_disable;
+ }
+
+ ret = phy_set_mode_ext(imx_pcie->phy, PHY_MODE_PCIE,
+ imx_pcie->drvdata->mode == DW_PCIE_EP_TYPE ?
+ PHY_MODE_PCIE_EP : PHY_MODE_PCIE_RC);
+ if (ret) {
+ dev_err(dev, "unable to set PCIe PHY mode\n");
+ goto err_phy_exit;
+ }
+
+ ret = phy_power_on(imx_pcie->phy);
+ if (ret) {
+ dev_err(dev, "waiting for PHY ready timeout!\n");
+ goto err_phy_exit;
+ }
+ }
+
+ /* Make sure that PCIe LTSSM is cleared */
+ imx_pcie_ltssm_disable(dev);
+
+ ret = imx_pcie_deassert_core_reset(imx_pcie);
+ if (ret < 0) {
+ dev_err(dev, "pcie deassert core reset failed: %d\n", ret);
+ goto err_phy_off;
+ }
+
+ if (imx_pcie->drvdata->wait_pll_lock) {
+ ret = imx_pcie->drvdata->wait_pll_lock(imx_pcie);
+ if (ret < 0)
+ goto err_phy_off;
+ }
+
+ imx_setup_phy_mpll(imx_pcie);
+
return 0;
+
+err_phy_off:
+ phy_power_off(imx_pcie->phy);
+err_phy_exit:
+ phy_exit(imx_pcie->phy);
+err_clk_disable:
+ imx_pcie_clk_disable(imx_pcie);
+err_reg_disable:
+ if (imx_pcie->vpcie)
+ regulator_disable(imx_pcie->vpcie);
+ return ret;
+}
+
+static void imx_pcie_host_exit(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct imx_pcie *imx_pcie = to_imx_pcie(pci);
+
+ if (imx_pcie->phy) {
+ if (phy_power_off(imx_pcie->phy))
+ dev_err(pci->dev, "unable to power off PHY\n");
+ phy_exit(imx_pcie->phy);
+ }
+ imx_pcie_clk_disable(imx_pcie);
+
+ if (imx_pcie->vpcie)
+ regulator_disable(imx_pcie->vpcie);
+}
+
+static void imx_pcie_host_post_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct imx_pcie *imx_pcie = to_imx_pcie(pci);
+ u32 val;
+
+ if (imx_pcie->drvdata->flags & IMX_PCIE_FLAG_8GT_ECN_ERR051586) {
+ /*
+ * ERR051586: Compliance with 8GT/s Receiver Impedance ECN
+ *
+ * The default value of GEN3_RELATED_OFF[GEN3_ZRXDC_NONCOMPL]
+ * is 1 which makes receiver non-compliant with the ZRX-DC
+ * parameter for 2.5 GT/s when operating at 8 GT/s or higher.
+ * It causes unnecessary timeout in L1.
+ *
+ * Workaround: Program GEN3_RELATED_OFF[GEN3_ZRXDC_NONCOMPL]
+ * to 0.
+ */
+ dw_pcie_dbi_ro_wr_en(pci);
+ val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
+ val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
+ dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
+ dw_pcie_dbi_ro_wr_dis(pci);
+ }
+}
+
+/*
+ * In old DWC implementations, PCIE_ATU_INHIBIT_PAYLOAD in iATU Ctrl2
+ * register is reserved, so the generic DWC implementation of sending the
+ * PME_Turn_Off message using a dummy MMIO write cannot be used.
+ */
+static void imx_pcie_pme_turn_off(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct imx_pcie *imx_pcie = to_imx_pcie(pci);
+
+ regmap_set_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6SX_GPR12_PCIE_PM_TURN_OFF);
+ regmap_clear_bits(imx_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6SX_GPR12_PCIE_PM_TURN_OFF);
+
+ usleep_range(PCIE_PME_TO_L2_TIMEOUT_US/10, PCIE_PME_TO_L2_TIMEOUT_US);
}
+static const struct dw_pcie_host_ops imx_pcie_host_ops = {
+ .init = imx_pcie_host_init,
+ .deinit = imx_pcie_host_exit,
+ .pme_turn_off = imx_pcie_pme_turn_off,
+};
+
+static const struct dw_pcie_host_ops imx_pcie_host_dw_pme_ops = {
+ .init = imx_pcie_host_init,
+ .deinit = imx_pcie_host_exit,
+ .post_init = imx_pcie_host_post_init,
+};
+
static const struct dw_pcie_ops dw_pcie_ops = {
- /* No special ops needed, but pcie-designware still expects this struct */
+ .start_link = imx_pcie_start_link,
+ .stop_link = imx_pcie_stop_link,
};
-#ifdef CONFIG_PM_SLEEP
-static void imx6_pcie_ltssm_disable(struct device *dev)
+static void imx_pcie_ep_init(struct dw_pcie_ep *ep)
{
- struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+ enum pci_barno bar;
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- switch (imx6_pcie->variant) {
- case IMX6SX:
- case IMX6QP:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6Q_GPR12_PCIE_CTL_2, 0);
- break;
- case IMX7D:
- reset_control_assert(imx6_pcie->apps_reset);
- break;
+ for (bar = BAR_0; bar <= BAR_5; bar++)
+ dw_pcie_ep_reset_bar(pci, bar);
+}
+
+static int imx_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ unsigned int type, u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ switch (type) {
+ case PCI_IRQ_INTX:
+ return dw_pcie_ep_raise_intx_irq(ep, func_no);
+ case PCI_IRQ_MSI:
+ return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+ case PCI_IRQ_MSIX:
+ return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
default:
- dev_err(dev, "ltssm_disable not supported\n");
+ dev_err(pci->dev, "UNKNOWN IRQ type\n");
+ return -EINVAL;
}
+
+ return 0;
+}
+
+static const struct pci_epc_features imx8m_pcie_epc_features = {
+ .msi_capable = true,
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
+ .bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = SZ_256, },
+ .bar[BAR_5] = { .type = BAR_RESERVED, },
+ .align = SZ_64K,
+};
+
+static const struct pci_epc_features imx8q_pcie_epc_features = {
+ .msi_capable = true,
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
+ .bar[BAR_5] = { .type = BAR_RESERVED, },
+ .align = SZ_64K,
+};
+
+/*
+ * | Default | Default | Default | BAR Sizing
+ * BAR# | Enable? | Type | Size | Scheme
+ * =======================================================
+ * BAR0 | Enable | 64-bit | 1 MB | Programmable Size
+ * BAR1 | Disable | 32-bit | 64 KB | Fixed Size
+ * (BAR1 should be disabled if BAR0 is 64-bit)
+ * BAR2 | Enable | 32-bit | 1 MB | Programmable Size
+ * BAR3 | Enable | 32-bit | 64 KB | Programmable Size
+ * BAR4 | Enable | 32-bit | 1 MB | Programmable Size
+ * BAR5 | Enable | 32-bit | 64 KB | Programmable Size
+ */
+static const struct pci_epc_features imx95_pcie_epc_features = {
+ .msi_capable = true,
+ .bar[BAR_1] = { .type = BAR_FIXED, .fixed_size = SZ_64K, },
+ .align = SZ_4K,
+};
+
+static const struct pci_epc_features*
+imx_pcie_ep_get_features(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct imx_pcie *imx_pcie = to_imx_pcie(pci);
+
+ return imx_pcie->drvdata->epc_features;
}
-static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie)
+static const struct dw_pcie_ep_ops pcie_ep_ops = {
+ .init = imx_pcie_ep_init,
+ .raise_irq = imx_pcie_ep_raise_irq,
+ .get_features = imx_pcie_ep_get_features,
+};
+
+static int imx_add_pcie_ep(struct imx_pcie *imx_pcie,
+ struct platform_device *pdev)
{
- struct device *dev = imx6_pcie->pci->dev;
+ int ret;
+ struct dw_pcie_ep *ep;
+ struct dw_pcie *pci = imx_pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct device *dev = pci->dev;
+
+ imx_pcie_host_init(pp);
+ ep = &pci->ep;
+ ep->ops = &pcie_ep_ops;
+
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_SUPPORT_64BIT))
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
- /* Some variants have a turnoff reset in DT */
- if (imx6_pcie->turnoff_reset) {
- reset_control_assert(imx6_pcie->turnoff_reset);
- reset_control_deassert(imx6_pcie->turnoff_reset);
- goto pm_turnoff_sleep;
+ ep->page_size = imx_pcie->drvdata->epc_features->align;
+
+ ret = dw_pcie_ep_init(ep);
+ if (ret) {
+ dev_err(dev, "failed to initialize endpoint\n");
+ return ret;
}
+ imx_pcie_host_post_init(pp);
- /* Others poke directly at IOMUXC registers */
- switch (imx6_pcie->variant) {
- case IMX6SX:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6SX_GPR12_PCIE_PM_TURN_OFF,
- IMX6SX_GPR12_PCIE_PM_TURN_OFF);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6SX_GPR12_PCIE_PM_TURN_OFF, 0);
- break;
- default:
- dev_err(dev, "PME_Turn_Off not implemented\n");
- return;
+ ret = dw_pcie_ep_init_registers(ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(ep);
+ return ret;
}
- /*
- * Components with an upstream port must respond to
- * PME_Turn_Off with PME_TO_Ack but we can't check.
- *
- * The standard recommends a 1-10ms timeout after which to
- * proceed anyway as if acks were received.
- */
-pm_turnoff_sleep:
- usleep_range(1000, 10000);
+ pci_epc_init_notify(ep->epc);
+
+ return 0;
}
-static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie)
+static void imx_pcie_msi_save_restore(struct imx_pcie *imx_pcie, bool save)
{
- clk_disable_unprepare(imx6_pcie->pcie);
- clk_disable_unprepare(imx6_pcie->pcie_phy);
- clk_disable_unprepare(imx6_pcie->pcie_bus);
+ u8 offset;
+ u16 val;
+ struct dw_pcie *pci = imx_pcie->pci;
- switch (imx6_pcie->variant) {
- case IMX6SX:
- clk_disable_unprepare(imx6_pcie->pcie_inbound_axi);
- break;
- case IMX7D:
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX7D_GPR12_PCIE_PHY_REFCLK_SEL,
- IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
- break;
- default:
- break;
+ if (pci_msi_enabled()) {
+ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI);
+ if (save) {
+ val = dw_pcie_readw_dbi(pci, offset + PCI_MSI_FLAGS);
+ imx_pcie->msi_ctrl = val;
+ } else {
+ dw_pcie_dbi_ro_wr_en(pci);
+ val = imx_pcie->msi_ctrl;
+ dw_pcie_writew_dbi(pci, offset + PCI_MSI_FLAGS, val);
+ dw_pcie_dbi_ro_wr_dis(pci);
+ }
}
}
-static inline bool imx6_pcie_supports_suspend(struct imx6_pcie *imx6_pcie)
+static void imx_pcie_lut_save(struct imx_pcie *imx_pcie)
{
- return (imx6_pcie->variant == IMX7D ||
- imx6_pcie->variant == IMX6SX);
+ u32 data1, data2;
+ int i;
+
+ for (i = 0; i < IMX95_MAX_LUT; i++) {
+ regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_ACSCTRL,
+ IMX95_PEO_LUT_RWA | i);
+ regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA1, &data1);
+ regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, &data2);
+ if (data1 & IMX95_PE0_LUT_VLD) {
+ imx_pcie->luts[i].data1 = data1;
+ imx_pcie->luts[i].data2 = data2;
+ } else {
+ imx_pcie->luts[i].data1 = 0;
+ imx_pcie->luts[i].data2 = 0;
+ }
+ }
+}
+
+static void imx_pcie_lut_restore(struct imx_pcie *imx_pcie)
+{
+ int i;
+
+ for (i = 0; i < IMX95_MAX_LUT; i++) {
+ if ((imx_pcie->luts[i].data1 & IMX95_PE0_LUT_VLD) == 0)
+ continue;
+
+ regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA1,
+ imx_pcie->luts[i].data1);
+ regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2,
+ imx_pcie->luts[i].data2);
+ regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_ACSCTRL, i);
+ }
}
-static int imx6_pcie_suspend_noirq(struct device *dev)
+static int imx_pcie_suspend_noirq(struct device *dev)
{
- struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+ struct imx_pcie *imx_pcie = dev_get_drvdata(dev);
- if (!imx6_pcie_supports_suspend(imx6_pcie))
+ if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_SUPPORTS_SUSPEND))
return 0;
- imx6_pcie_pm_turnoff(imx6_pcie);
- imx6_pcie_clk_disable(imx6_pcie);
- imx6_pcie_ltssm_disable(dev);
+ imx_pcie_msi_save_restore(imx_pcie, true);
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_LUT))
+ imx_pcie_lut_save(imx_pcie);
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_BROKEN_SUSPEND)) {
+ /*
+ * The minimum for a workaround would be to set PERST# and to
+ * set the PCIE_TEST_PD flag. However, we can also disable the
+ * clock which saves some power.
+ */
+ imx_pcie_assert_core_reset(imx_pcie);
+ imx_pcie->drvdata->enable_ref_clk(imx_pcie, false);
+ } else {
+ return dw_pcie_suspend_noirq(imx_pcie->pci);
+ }
return 0;
}
-static int imx6_pcie_resume_noirq(struct device *dev)
+static int imx_pcie_resume_noirq(struct device *dev)
{
int ret;
- struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
- struct pcie_port *pp = &imx6_pcie->pci->pp;
+ struct imx_pcie *imx_pcie = dev_get_drvdata(dev);
- if (!imx6_pcie_supports_suspend(imx6_pcie))
+ if (!(imx_pcie->drvdata->flags & IMX_PCIE_FLAG_SUPPORTS_SUSPEND))
return 0;
- imx6_pcie_assert_core_reset(imx6_pcie);
- imx6_pcie_init_phy(imx6_pcie);
- imx6_pcie_deassert_core_reset(imx6_pcie);
- dw_pcie_setup_rc(pp);
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_BROKEN_SUSPEND)) {
+ ret = imx_pcie->drvdata->enable_ref_clk(imx_pcie, true);
+ if (ret)
+ return ret;
+ ret = imx_pcie_deassert_core_reset(imx_pcie);
+ if (ret)
+ return ret;
- ret = imx6_pcie_establish_link(imx6_pcie);
- if (ret < 0)
- dev_info(dev, "pcie link is down after resume.\n");
+ /*
+ * Using PCIE_TEST_PD seems to disable MSI and powers down the
+ * root complex. This is why we have to setup the rc again and
+ * why we have to restore the MSI register.
+ */
+ ret = dw_pcie_setup_rc(&imx_pcie->pci->pp);
+ if (ret)
+ return ret;
+ } else {
+ ret = dw_pcie_resume_noirq(imx_pcie->pci);
+ if (ret)
+ return ret;
+ }
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_LUT))
+ imx_pcie_lut_restore(imx_pcie);
+ imx_pcie_msi_save_restore(imx_pcie, false);
return 0;
}
-#endif
-static const struct dev_pm_ops imx6_pcie_pm_ops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx6_pcie_suspend_noirq,
- imx6_pcie_resume_noirq)
+static const struct dev_pm_ops imx_pcie_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_pcie_suspend_noirq,
+ imx_pcie_resume_noirq)
};
-static int imx6_pcie_probe(struct platform_device *pdev)
+static int imx_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct dw_pcie *pci;
- struct imx6_pcie *imx6_pcie;
- struct resource *dbi_base;
+ struct imx_pcie *imx_pcie;
+ struct device_node *np;
struct device_node *node = dev->of_node;
- int ret;
+ int ret, domain;
u16 val;
- imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL);
- if (!imx6_pcie)
+ imx_pcie = devm_kzalloc(dev, sizeof(*imx_pcie), GFP_KERNEL);
+ if (!imx_pcie)
return -ENOMEM;
pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
@@ -932,179 +1616,434 @@ static int imx6_pcie_probe(struct platform_device *pdev)
pci->dev = dev;
pci->ops = &dw_pcie_ops;
- imx6_pcie->pci = pci;
- imx6_pcie->variant =
- (enum imx6_pcie_variants)of_device_get_match_data(dev);
+ imx_pcie->pci = pci;
+ imx_pcie->drvdata = of_device_get_match_data(dev);
- dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pci->dbi_base = devm_ioremap_resource(dev, dbi_base);
- if (IS_ERR(pci->dbi_base))
- return PTR_ERR(pci->dbi_base);
+ mutex_init(&imx_pcie->lock);
- /* Fetch GPIOs */
- imx6_pcie->reset_gpio = of_get_named_gpio(node, "reset-gpio", 0);
- imx6_pcie->gpio_active_high = of_property_read_bool(node,
- "reset-gpio-active-high");
- if (gpio_is_valid(imx6_pcie->reset_gpio)) {
- ret = devm_gpio_request_one(dev, imx6_pcie->reset_gpio,
- imx6_pcie->gpio_active_high ?
- GPIOF_OUT_INIT_HIGH :
- GPIOF_OUT_INIT_LOW,
- "PCIe reset");
+ if (imx_pcie->drvdata->ops)
+ pci->pp.ops = imx_pcie->drvdata->ops;
+ else
+ pci->pp.ops = &imx_pcie_host_dw_pme_ops;
+
+ /* Find the PHY if one is defined, only imx7d uses it */
+ np = of_parse_phandle(node, "fsl,imx7d-pcie-phy", 0);
+ if (np) {
+ struct resource res;
+
+ ret = of_address_to_resource(np, 0, &res);
if (ret) {
- dev_err(dev, "unable to get reset gpio\n");
+ dev_err(dev, "Unable to map PCIe PHY\n");
return ret;
}
- } else if (imx6_pcie->reset_gpio == -EPROBE_DEFER) {
- return imx6_pcie->reset_gpio;
+ imx_pcie->phy_base = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(imx_pcie->phy_base))
+ return PTR_ERR(imx_pcie->phy_base);
}
+ /* Fetch GPIOs */
+ imx_pcie->reset_gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(imx_pcie->reset_gpiod))
+ return dev_err_probe(dev, PTR_ERR(imx_pcie->reset_gpiod),
+ "unable to get reset gpio\n");
+ gpiod_set_consumer_name(imx_pcie->reset_gpiod, "PCIe reset");
+
/* Fetch clocks */
- imx6_pcie->pcie_phy = devm_clk_get(dev, "pcie_phy");
- if (IS_ERR(imx6_pcie->pcie_phy)) {
- dev_err(dev, "pcie_phy clock source missing or invalid\n");
- return PTR_ERR(imx6_pcie->pcie_phy);
+ imx_pcie->num_clks = devm_clk_bulk_get_all(dev, &imx_pcie->clks);
+ if (imx_pcie->num_clks < 0)
+ return dev_err_probe(dev, imx_pcie->num_clks,
+ "failed to get clocks\n");
+
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_PHYDRV)) {
+ imx_pcie->phy = devm_phy_get(dev, "pcie-phy");
+ if (IS_ERR(imx_pcie->phy))
+ return dev_err_probe(dev, PTR_ERR(imx_pcie->phy),
+ "failed to get pcie phy\n");
}
- imx6_pcie->pcie_bus = devm_clk_get(dev, "pcie_bus");
- if (IS_ERR(imx6_pcie->pcie_bus)) {
- dev_err(dev, "pcie_bus clock source missing or invalid\n");
- return PTR_ERR(imx6_pcie->pcie_bus);
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_APP_RESET)) {
+ imx_pcie->apps_reset = devm_reset_control_get_exclusive(dev, "apps");
+ if (IS_ERR(imx_pcie->apps_reset))
+ return dev_err_probe(dev, PTR_ERR(imx_pcie->apps_reset),
+ "failed to get pcie apps reset control\n");
}
- imx6_pcie->pcie = devm_clk_get(dev, "pcie");
- if (IS_ERR(imx6_pcie->pcie)) {
- dev_err(dev, "pcie clock source missing or invalid\n");
- return PTR_ERR(imx6_pcie->pcie);
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_PHY_RESET)) {
+ imx_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev, "pciephy");
+ if (IS_ERR(imx_pcie->pciephy_reset))
+ return dev_err_probe(dev, PTR_ERR(imx_pcie->pciephy_reset),
+ "Failed to get PCIEPHY reset control\n");
}
- switch (imx6_pcie->variant) {
- case IMX6SX:
- imx6_pcie->pcie_inbound_axi = devm_clk_get(dev,
- "pcie_inbound_axi");
- if (IS_ERR(imx6_pcie->pcie_inbound_axi)) {
- dev_err(dev, "pcie_inbound_axi clock missing or invalid\n");
- return PTR_ERR(imx6_pcie->pcie_inbound_axi);
- }
- break;
- case IMX7D:
- imx6_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev,
- "pciephy");
- if (IS_ERR(imx6_pcie->pciephy_reset)) {
- dev_err(dev, "Failed to get PCIEPHY reset control\n");
- return PTR_ERR(imx6_pcie->pciephy_reset);
- }
+ switch (imx_pcie->drvdata->variant) {
+ case IMX8MQ:
+ case IMX8MQ_EP:
+ domain = of_get_pci_domain_nr(node);
+ if (domain < 0 || domain > 1)
+ return dev_err_probe(dev, -ENODEV, "no \"linux,pci-domain\" property in devicetree\n");
- imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev,
- "apps");
- if (IS_ERR(imx6_pcie->apps_reset)) {
- dev_err(dev, "Failed to get PCIE APPS reset control\n");
- return PTR_ERR(imx6_pcie->apps_reset);
- }
+ imx_pcie->controller_id = domain;
break;
default:
break;
}
- /* Grab turnoff reset */
- imx6_pcie->turnoff_reset = devm_reset_control_get_optional_exclusive(dev, "turnoff");
- if (IS_ERR(imx6_pcie->turnoff_reset)) {
- dev_err(dev, "Failed to get TURNOFF reset control\n");
- return PTR_ERR(imx6_pcie->turnoff_reset);
+ if (imx_pcie->drvdata->gpr) {
+ /* Grab GPR config register range */
+ imx_pcie->iomuxc_gpr =
+ syscon_regmap_lookup_by_compatible(imx_pcie->drvdata->gpr);
+ if (IS_ERR(imx_pcie->iomuxc_gpr))
+ return dev_err_probe(dev, PTR_ERR(imx_pcie->iomuxc_gpr),
+ "unable to find iomuxc registers\n");
}
- /* Grab GPR config register range */
- imx6_pcie->iomuxc_gpr =
- syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
- if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
- dev_err(dev, "unable to find iomuxc registers\n");
- return PTR_ERR(imx6_pcie->iomuxc_gpr);
+ if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_SERDES)) {
+ void __iomem *off = devm_platform_ioremap_resource_byname(pdev, "app");
+
+ if (IS_ERR(off))
+ return dev_err_probe(dev, PTR_ERR(off),
+ "unable to find serdes registers\n");
+
+ static const struct regmap_config regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ };
+
+ imx_pcie->iomuxc_gpr = devm_regmap_init_mmio(dev, off, &regmap_config);
+ if (IS_ERR(imx_pcie->iomuxc_gpr))
+ return dev_err_probe(dev, PTR_ERR(imx_pcie->iomuxc_gpr),
+ "unable to find iomuxc registers\n");
}
/* Grab PCIe PHY Tx Settings */
if (of_property_read_u32(node, "fsl,tx-deemph-gen1",
- &imx6_pcie->tx_deemph_gen1))
- imx6_pcie->tx_deemph_gen1 = 0;
+ &imx_pcie->tx_deemph_gen1))
+ imx_pcie->tx_deemph_gen1 = 0;
if (of_property_read_u32(node, "fsl,tx-deemph-gen2-3p5db",
- &imx6_pcie->tx_deemph_gen2_3p5db))
- imx6_pcie->tx_deemph_gen2_3p5db = 0;
+ &imx_pcie->tx_deemph_gen2_3p5db))
+ imx_pcie->tx_deemph_gen2_3p5db = 0;
if (of_property_read_u32(node, "fsl,tx-deemph-gen2-6db",
- &imx6_pcie->tx_deemph_gen2_6db))
- imx6_pcie->tx_deemph_gen2_6db = 20;
+ &imx_pcie->tx_deemph_gen2_6db))
+ imx_pcie->tx_deemph_gen2_6db = 20;
if (of_property_read_u32(node, "fsl,tx-swing-full",
- &imx6_pcie->tx_swing_full))
- imx6_pcie->tx_swing_full = 127;
+ &imx_pcie->tx_swing_full))
+ imx_pcie->tx_swing_full = 127;
if (of_property_read_u32(node, "fsl,tx-swing-low",
- &imx6_pcie->tx_swing_low))
- imx6_pcie->tx_swing_low = 127;
+ &imx_pcie->tx_swing_low))
+ imx_pcie->tx_swing_low = 127;
/* Limit link speed */
- ret = of_property_read_u32(node, "fsl,max-link-speed",
- &imx6_pcie->link_gen);
- if (ret)
- imx6_pcie->link_gen = 1;
+ pci->max_link_speed = 1;
+ of_property_read_u32(node, "fsl,max-link-speed", &pci->max_link_speed);
+
+ ret = devm_regulator_get_enable_optional(&pdev->dev, "vpcie3v3aux");
+ if (ret < 0 && ret != -ENODEV)
+ return dev_err_probe(dev, ret, "failed to enable Vaux supply\n");
+
+ imx_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie");
+ if (IS_ERR(imx_pcie->vpcie)) {
+ if (PTR_ERR(imx_pcie->vpcie) != -ENODEV)
+ return PTR_ERR(imx_pcie->vpcie);
+ imx_pcie->vpcie = NULL;
+ }
- imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie");
- if (IS_ERR(imx6_pcie->vpcie)) {
- if (PTR_ERR(imx6_pcie->vpcie) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- imx6_pcie->vpcie = NULL;
+ imx_pcie->vph = devm_regulator_get_optional(&pdev->dev, "vph");
+ if (IS_ERR(imx_pcie->vph)) {
+ if (PTR_ERR(imx_pcie->vph) != -ENODEV)
+ return PTR_ERR(imx_pcie->vph);
+ imx_pcie->vph = NULL;
}
- platform_set_drvdata(pdev, imx6_pcie);
+ platform_set_drvdata(pdev, imx_pcie);
- ret = imx6_pcie_attach_pd(dev);
+ ret = imx_pcie_attach_pd(dev);
if (ret)
return ret;
- ret = imx6_add_pcie_port(imx6_pcie, pdev);
- if (ret < 0)
- return ret;
+ pci->use_parent_dt_ranges = true;
+ if (imx_pcie->drvdata->mode == DW_PCIE_EP_TYPE) {
+ ret = imx_add_pcie_ep(imx_pcie, pdev);
+ if (ret < 0)
+ return ret;
- if (pci_msi_enabled()) {
- val = dw_pcie_readw_dbi(pci, PCIE_RC_IMX6_MSI_CAP +
- PCI_MSI_FLAGS);
- val |= PCI_MSI_FLAGS_ENABLE;
- dw_pcie_writew_dbi(pci, PCIE_RC_IMX6_MSI_CAP + PCI_MSI_FLAGS,
- val);
+ /*
+ * FIXME: Only single Device (EPF) is supported due to the
+ * Endpoint framework limitation.
+ */
+ imx_pcie_add_lut_by_rid(imx_pcie, 0);
+ } else {
+ pci->pp.use_atu_msg = true;
+ ret = dw_pcie_host_init(&pci->pp);
+ if (ret < 0)
+ return ret;
+
+ if (pci_msi_enabled()) {
+ u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI);
+
+ val = dw_pcie_readw_dbi(pci, offset + PCI_MSI_FLAGS);
+ val |= PCI_MSI_FLAGS_ENABLE;
+ dw_pcie_writew_dbi(pci, offset + PCI_MSI_FLAGS, val);
+ }
}
return 0;
}
-static void imx6_pcie_shutdown(struct platform_device *pdev)
+static void imx_pcie_shutdown(struct platform_device *pdev)
{
- struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev);
+ struct imx_pcie *imx_pcie = platform_get_drvdata(pdev);
/* bring down link, so bootloader gets clean state in case of reboot */
- imx6_pcie_assert_core_reset(imx6_pcie);
+ imx_pcie_assert_core_reset(imx_pcie);
}
-static const struct of_device_id imx6_pcie_of_match[] = {
- { .compatible = "fsl,imx6q-pcie", .data = (void *)IMX6Q, },
- { .compatible = "fsl,imx6sx-pcie", .data = (void *)IMX6SX, },
- { .compatible = "fsl,imx6qp-pcie", .data = (void *)IMX6QP, },
- { .compatible = "fsl,imx7d-pcie", .data = (void *)IMX7D, },
+static const struct imx_pcie_drvdata drvdata[] = {
+ [IMX6Q] = {
+ .variant = IMX6Q,
+ .flags = IMX_PCIE_FLAG_IMX_PHY |
+ IMX_PCIE_FLAG_SPEED_CHANGE_WORKAROUND |
+ IMX_PCIE_FLAG_BROKEN_SUSPEND |
+ IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .dbi_length = 0x200,
+ .gpr = "fsl,imx6q-iomuxc-gpr",
+ .ltssm_off = IOMUXC_GPR12,
+ .ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2,
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .init_phy = imx_pcie_init_phy,
+ .enable_ref_clk = imx6q_pcie_enable_ref_clk,
+ .core_reset = imx6q_pcie_core_reset,
+ },
+ [IMX6SX] = {
+ .variant = IMX6SX,
+ .flags = IMX_PCIE_FLAG_IMX_PHY |
+ IMX_PCIE_FLAG_SPEED_CHANGE_WORKAROUND |
+ IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .gpr = "fsl,imx6q-iomuxc-gpr",
+ .ltssm_off = IOMUXC_GPR12,
+ .ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2,
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .init_phy = imx6sx_pcie_init_phy,
+ .enable_ref_clk = imx6sx_pcie_enable_ref_clk,
+ .core_reset = imx6sx_pcie_core_reset,
+ .ops = &imx_pcie_host_ops,
+ },
+ [IMX6QP] = {
+ .variant = IMX6QP,
+ .flags = IMX_PCIE_FLAG_IMX_PHY |
+ IMX_PCIE_FLAG_SPEED_CHANGE_WORKAROUND |
+ IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .dbi_length = 0x200,
+ .gpr = "fsl,imx6q-iomuxc-gpr",
+ .ltssm_off = IOMUXC_GPR12,
+ .ltssm_mask = IMX6Q_GPR12_PCIE_CTL_2,
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .init_phy = imx_pcie_init_phy,
+ .enable_ref_clk = imx6q_pcie_enable_ref_clk,
+ .core_reset = imx6qp_pcie_core_reset,
+ .ops = &imx_pcie_host_ops,
+ },
+ [IMX7D] = {
+ .variant = IMX7D,
+ .flags = IMX_PCIE_FLAG_SUPPORTS_SUSPEND |
+ IMX_PCIE_FLAG_HAS_APP_RESET |
+ IMX_PCIE_FLAG_HAS_PHY_RESET,
+ .gpr = "fsl,imx7d-iomuxc-gpr",
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .enable_ref_clk = imx7d_pcie_enable_ref_clk,
+ .core_reset = imx7d_pcie_core_reset,
+ },
+ [IMX8MQ] = {
+ .variant = IMX8MQ,
+ .flags = IMX_PCIE_FLAG_HAS_APP_RESET |
+ IMX_PCIE_FLAG_HAS_PHY_RESET |
+ IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .gpr = "fsl,imx8mq-iomuxc-gpr",
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .mode_off[1] = IOMUXC_GPR12,
+ .mode_mask[1] = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
+ .init_phy = imx8mq_pcie_init_phy,
+ .enable_ref_clk = imx8mm_pcie_enable_ref_clk,
+ },
+ [IMX8MM] = {
+ .variant = IMX8MM,
+ .flags = IMX_PCIE_FLAG_SUPPORTS_SUSPEND |
+ IMX_PCIE_FLAG_HAS_PHYDRV |
+ IMX_PCIE_FLAG_HAS_APP_RESET,
+ .gpr = "fsl,imx8mm-iomuxc-gpr",
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .enable_ref_clk = imx8mm_pcie_enable_ref_clk,
+ },
+ [IMX8MP] = {
+ .variant = IMX8MP,
+ .flags = IMX_PCIE_FLAG_SUPPORTS_SUSPEND |
+ IMX_PCIE_FLAG_HAS_PHYDRV |
+ IMX_PCIE_FLAG_HAS_APP_RESET,
+ .gpr = "fsl,imx8mp-iomuxc-gpr",
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .enable_ref_clk = imx8mm_pcie_enable_ref_clk,
+ },
+ [IMX8Q] = {
+ .variant = IMX8Q,
+ .flags = IMX_PCIE_FLAG_HAS_PHYDRV |
+ IMX_PCIE_FLAG_CPU_ADDR_FIXUP |
+ IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
+ },
+ [IMX95] = {
+ .variant = IMX95,
+ .flags = IMX_PCIE_FLAG_HAS_SERDES |
+ IMX_PCIE_FLAG_HAS_LUT |
+ IMX_PCIE_FLAG_8GT_ECN_ERR051586 |
+ IMX_PCIE_FLAG_SUPPORTS_SUSPEND,
+ .ltssm_off = IMX95_PE0_GEN_CTRL_3,
+ .ltssm_mask = IMX95_PCIE_LTSSM_EN,
+ .mode_off[0] = IMX95_PE0_GEN_CTRL_1,
+ .mode_mask[0] = IMX95_PCIE_DEVICE_TYPE,
+ .core_reset = imx95_pcie_core_reset,
+ .init_phy = imx95_pcie_init_phy,
+ .wait_pll_lock = imx95_pcie_wait_for_phy_pll_lock,
+ },
+ [IMX8MQ_EP] = {
+ .variant = IMX8MQ_EP,
+ .flags = IMX_PCIE_FLAG_HAS_APP_RESET |
+ IMX_PCIE_FLAG_HAS_PHY_RESET,
+ .mode = DW_PCIE_EP_TYPE,
+ .gpr = "fsl,imx8mq-iomuxc-gpr",
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .mode_off[1] = IOMUXC_GPR12,
+ .mode_mask[1] = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
+ .epc_features = &imx8q_pcie_epc_features,
+ .init_phy = imx8mq_pcie_init_phy,
+ .enable_ref_clk = imx8mm_pcie_enable_ref_clk,
+ },
+ [IMX8MM_EP] = {
+ .variant = IMX8MM_EP,
+ .flags = IMX_PCIE_FLAG_HAS_APP_RESET |
+ IMX_PCIE_FLAG_HAS_PHYDRV,
+ .mode = DW_PCIE_EP_TYPE,
+ .gpr = "fsl,imx8mm-iomuxc-gpr",
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .epc_features = &imx8m_pcie_epc_features,
+ .enable_ref_clk = imx8mm_pcie_enable_ref_clk,
+ },
+ [IMX8MP_EP] = {
+ .variant = IMX8MP_EP,
+ .flags = IMX_PCIE_FLAG_HAS_APP_RESET |
+ IMX_PCIE_FLAG_HAS_PHYDRV,
+ .mode = DW_PCIE_EP_TYPE,
+ .gpr = "fsl,imx8mp-iomuxc-gpr",
+ .mode_off[0] = IOMUXC_GPR12,
+ .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE,
+ .epc_features = &imx8m_pcie_epc_features,
+ .enable_ref_clk = imx8mm_pcie_enable_ref_clk,
+ },
+ [IMX8Q_EP] = {
+ .variant = IMX8Q_EP,
+ .flags = IMX_PCIE_FLAG_HAS_PHYDRV,
+ .mode = DW_PCIE_EP_TYPE,
+ .epc_features = &imx8q_pcie_epc_features,
+ },
+ [IMX95_EP] = {
+ .variant = IMX95_EP,
+ .flags = IMX_PCIE_FLAG_HAS_SERDES |
+ IMX_PCIE_FLAG_8GT_ECN_ERR051586 |
+ IMX_PCIE_FLAG_SUPPORT_64BIT,
+ .ltssm_off = IMX95_PE0_GEN_CTRL_3,
+ .ltssm_mask = IMX95_PCIE_LTSSM_EN,
+ .mode_off[0] = IMX95_PE0_GEN_CTRL_1,
+ .mode_mask[0] = IMX95_PCIE_DEVICE_TYPE,
+ .init_phy = imx95_pcie_init_phy,
+ .core_reset = imx95_pcie_core_reset,
+ .wait_pll_lock = imx95_pcie_wait_for_phy_pll_lock,
+ .epc_features = &imx95_pcie_epc_features,
+ .mode = DW_PCIE_EP_TYPE,
+ },
+};
+
+static const struct of_device_id imx_pcie_of_match[] = {
+ { .compatible = "fsl,imx6q-pcie", .data = &drvdata[IMX6Q], },
+ { .compatible = "fsl,imx6sx-pcie", .data = &drvdata[IMX6SX], },
+ { .compatible = "fsl,imx6qp-pcie", .data = &drvdata[IMX6QP], },
+ { .compatible = "fsl,imx7d-pcie", .data = &drvdata[IMX7D], },
+ { .compatible = "fsl,imx8mq-pcie", .data = &drvdata[IMX8MQ], },
+ { .compatible = "fsl,imx8mm-pcie", .data = &drvdata[IMX8MM], },
+ { .compatible = "fsl,imx8mp-pcie", .data = &drvdata[IMX8MP], },
+ { .compatible = "fsl,imx8q-pcie", .data = &drvdata[IMX8Q], },
+ { .compatible = "fsl,imx95-pcie", .data = &drvdata[IMX95], },
+ { .compatible = "fsl,imx8mq-pcie-ep", .data = &drvdata[IMX8MQ_EP], },
+ { .compatible = "fsl,imx8mm-pcie-ep", .data = &drvdata[IMX8MM_EP], },
+ { .compatible = "fsl,imx8mp-pcie-ep", .data = &drvdata[IMX8MP_EP], },
+ { .compatible = "fsl,imx8q-pcie-ep", .data = &drvdata[IMX8Q_EP], },
+ { .compatible = "fsl,imx95-pcie-ep", .data = &drvdata[IMX95_EP], },
{},
};
-static struct platform_driver imx6_pcie_driver = {
+static struct platform_driver imx_pcie_driver = {
.driver = {
.name = "imx6q-pcie",
- .of_match_table = imx6_pcie_of_match,
+ .of_match_table = imx_pcie_of_match,
.suppress_bind_attrs = true,
- .pm = &imx6_pcie_pm_ops,
+ .pm = &imx_pcie_pm_ops,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
- .probe = imx6_pcie_probe,
- .shutdown = imx6_pcie_shutdown,
+ .probe = imx_pcie_probe,
+ .shutdown = imx_pcie_shutdown,
};
-static int __init imx6_pcie_init(void)
+static void imx_pcie_quirk(struct pci_dev *dev)
{
+ struct pci_bus *bus = dev->bus;
+ struct dw_pcie_rp *pp = bus->sysdata;
+
+ /* Bus parent is the PCI bridge, its parent is this platform driver */
+ if (!bus->dev.parent || !bus->dev.parent->parent)
+ return;
+
+ /* Make sure we only quirk devices associated with this driver */
+ if (bus->dev.parent->parent->driver != &imx_pcie_driver.driver)
+ return;
+
+ if (pci_is_root_bus(bus)) {
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct imx_pcie *imx_pcie = to_imx_pcie(pci);
+
+ /*
+ * Limit config length to avoid the kernel reading beyond
+ * the register set and causing an abort on i.MX 6Quad
+ */
+ if (imx_pcie->drvdata->dbi_length) {
+ dev->cfg_size = imx_pcie->drvdata->dbi_length;
+ dev_info(&dev->dev, "Limiting cfg_size to %d\n",
+ dev->cfg_size);
+ }
+ }
+}
+DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, 0xabcd,
+ PCI_CLASS_BRIDGE_PCI, 8, imx_pcie_quirk);
+
+static int __init imx_pcie_init(void)
+{
+#ifdef CONFIG_ARM
+ struct device_node *np;
+
+ np = of_find_matching_node(NULL, imx_pcie_of_match);
+ if (!np)
+ return -ENODEV;
+ of_node_put(np);
+
/*
* Since probe() can be deferred we need to make sure that
* hook_fault_code is not called after __init memory is freed
@@ -1114,7 +2053,8 @@ static int __init imx6_pcie_init(void)
*/
hook_fault_code(8, imx6q_pcie_abort_handler, SIGBUS, 0,
"external abort on non-linefetch");
+#endif
- return platform_driver_register(&imx6_pcie_driver);
+ return platform_driver_register(&imx_pcie_driver);
}
-device_initcall(imx6_pcie_init);
+device_initcall(imx_pcie_init);
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index 14f2b0b4ed5e..f86d9111f863 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -3,7 +3,7 @@
* PCIe host controller driver for Texas Instruments Keystone SoCs
*
* Copyright (C) 2013-2014 Texas Instruments., Ltd.
- * http://www.ti.com
+ * https://www.ti.com
*
* Author: Murali Karicheri <m-karicheri2@ti.com>
* Implementation based on pci-exynos.c and pcie-designware.c
@@ -11,11 +11,13 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/mfd/syscon.h>
+#include <linux/module.h>
#include <linux/msi.h>
#include <linux/of.h>
#include <linux/of_irq.h>
@@ -26,12 +28,18 @@
#include <linux/resource.h>
#include <linux/signal.h>
+#include "../../pci.h"
#include "pcie-designware.h"
#define PCIE_VENDORID_MASK 0xffff
#define PCIE_DEVICEID_SHIFT 16
/* Application registers */
+#define PID 0x000
+#define RTL GENMASK(15, 11)
+#define RTL_SHIFT 11
+#define AM6_PCI_PG1_RTL_VER 0x15
+
#define CMD_STATUS 0x004
#define LTSSM_EN_VAL BIT(0)
#define OB_XLAT_EN_VAL BIT(1)
@@ -44,28 +52,34 @@
#define CFG_TYPE1 BIT(24)
#define OB_SIZE 0x030
-#define SPACE0_REMOTE_CFG_OFFSET 0x1000
#define OB_OFFSET_INDEX(n) (0x200 + (8 * (n)))
#define OB_OFFSET_HI(n) (0x204 + (8 * (n)))
#define OB_ENABLEN BIT(0)
#define OB_WIN_SIZE 8 /* 8MB */
+#define PCIE_LEGACY_IRQ_ENABLE_SET(n) (0x188 + (0x10 * ((n) - 1)))
+#define PCIE_LEGACY_IRQ_ENABLE_CLR(n) (0x18c + (0x10 * ((n) - 1)))
+#define PCIE_EP_IRQ_SET 0x64
+#define PCIE_EP_IRQ_CLR 0x68
+#define INT_ENABLE BIT(0)
+
/* IRQ register defines */
#define IRQ_EOI 0x050
-#define IRQ_STATUS 0x184
-#define IRQ_ENABLE_SET 0x188
-#define IRQ_ENABLE_CLR 0x18c
#define MSI_IRQ 0x054
-#define MSI0_IRQ_STATUS 0x104
-#define MSI0_IRQ_ENABLE_SET 0x108
-#define MSI0_IRQ_ENABLE_CLR 0x10c
-#define IRQ_STATUS 0x184
+#define MSI_IRQ_STATUS(n) (0x104 + ((n) << 4))
+#define MSI_IRQ_ENABLE_SET(n) (0x108 + ((n) << 4))
+#define MSI_IRQ_ENABLE_CLR(n) (0x10c + ((n) << 4))
#define MSI_IRQ_OFFSET 4
+#define IRQ_STATUS(n) (0x184 + ((n) << 4))
+#define IRQ_ENABLE_SET(n) (0x188 + ((n) << 4))
+#define INTx_EN BIT(0)
+
#define ERR_IRQ_STATUS 0x1c4
#define ERR_IRQ_ENABLE_SET 0x1c8
#define ERR_AER BIT(5) /* ECRC error */
+#define AM6_ERR_AER BIT(4) /* AM6 ECRC error */
#define ERR_AXI BIT(4) /* AXI tag lookup fatal error */
#define ERR_CORR BIT(3) /* Correctable error */
#define ERR_NONFATAL BIT(2) /* Non-fatal error */
@@ -74,55 +88,58 @@
#define ERR_IRQ_ALL (ERR_AER | ERR_AXI | ERR_CORR | \
ERR_NONFATAL | ERR_FATAL | ERR_SYS)
-#define MAX_MSI_HOST_IRQS 8
/* PCIE controller device IDs */
#define PCIE_RC_K2HK 0xb008
#define PCIE_RC_K2E 0xb009
#define PCIE_RC_K2L 0xb00a
#define PCIE_RC_K2G 0xb00b
+#define KS_PCIE_DEV_TYPE_MASK (0x3 << 1)
+#define KS_PCIE_DEV_TYPE(mode) ((mode) << 1)
+
+#define EP 0x0
+#define LEG_EP 0x1
+#define RC 0x2
+
+#define KS_PCIE_SYSCLOCKOUTEN BIT(0)
+
+#define AM654_PCIE_DEV_TYPE_MASK 0x3
+#define AM654_WIN_SIZE SZ_64K
+
+#define APP_ADDR_SPACE_0 (16 * SZ_1K)
+
#define to_keystone_pcie(x) dev_get_drvdata((x)->dev)
+#define PCI_DEVICE_ID_TI_AM654X 0xb00c
+
+struct ks_pcie_of_data {
+ enum dw_pcie_device_mode mode;
+ const struct dw_pcie_host_ops *host_ops;
+ const struct dw_pcie_ep_ops *ep_ops;
+ u32 version;
+};
+
struct keystone_pcie {
struct dw_pcie *pci;
/* PCI Device ID */
u32 device_id;
- int num_legacy_host_irqs;
- int legacy_host_irqs[PCI_NUM_INTX];
- struct device_node *legacy_intc_np;
+ int intx_host_irqs[PCI_NUM_INTX];
- int num_msi_host_irqs;
- int msi_host_irqs[MAX_MSI_HOST_IRQS];
+ int msi_host_irq;
int num_lanes;
u32 num_viewport;
struct phy **phy;
struct device_link **link;
struct device_node *msi_intc_np;
- struct irq_domain *legacy_irq_domain;
+ struct irq_domain *intx_irq_domain;
struct device_node *np;
- int error_irq;
-
/* Application register space */
void __iomem *va_app_base; /* DT 1st resource */
struct resource app;
+ bool is_am6;
};
-static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset,
- u32 *bit_pos)
-{
- *reg_offset = offset % 8;
- *bit_pos = offset >> 3;
-}
-
-static phys_addr_t ks_pcie_get_msi_addr(struct pcie_port *pp)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
-
- return ks_pcie->app.start + MSI_IRQ;
-}
-
static u32 ks_pcie_app_readl(struct keystone_pcie *ks_pcie, u32 offset)
{
return readl(ks_pcie->va_app_base + offset);
@@ -134,95 +151,179 @@ static void ks_pcie_app_writel(struct keystone_pcie *ks_pcie, u32 offset,
writel(val, ks_pcie->va_app_base + offset);
}
-static void ks_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset)
+static void ks_pcie_msi_irq_ack(struct irq_data *data)
{
- struct dw_pcie *pci = ks_pcie->pci;
- struct pcie_port *pp = &pci->pp;
- struct device *dev = pci->dev;
- u32 pending, vector;
- int src, virq;
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
+ struct keystone_pcie *ks_pcie;
+ u32 irq = data->hwirq;
+ struct dw_pcie *pci;
+ u32 reg_offset;
+ u32 bit_pos;
- pending = ks_pcie_app_readl(ks_pcie, MSI0_IRQ_STATUS + (offset << 4));
+ pci = to_dw_pcie_from_pp(pp);
+ ks_pcie = to_keystone_pcie(pci);
- /*
- * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit
- * shows 1, 9, 17, 25 and so forth
- */
- for (src = 0; src < 4; src++) {
- if (BIT(src) & pending) {
- vector = offset + (src << 3);
- virq = irq_linear_revmap(pp->irq_domain, vector);
- dev_dbg(dev, "irq: bit %d, vector %d, virq %d\n",
- src, vector, virq);
- generic_handle_irq(virq);
- }
- }
+ reg_offset = irq % 8;
+ bit_pos = irq >> 3;
+
+ ks_pcie_app_writel(ks_pcie, MSI_IRQ_STATUS(reg_offset),
+ BIT(bit_pos));
+ ks_pcie_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET);
}
-static void ks_pcie_msi_irq_ack(int irq, struct pcie_port *pp)
+static void ks_pcie_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
- u32 reg_offset, bit_pos;
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
struct keystone_pcie *ks_pcie;
struct dw_pcie *pci;
+ u64 msi_target;
pci = to_dw_pcie_from_pp(pp);
ks_pcie = to_keystone_pcie(pci);
- update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
- ks_pcie_app_writel(ks_pcie, MSI0_IRQ_STATUS + (reg_offset << 4),
- BIT(bit_pos));
- ks_pcie_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET);
+ msi_target = ks_pcie->app.start + MSI_IRQ;
+ msg->address_lo = lower_32_bits(msi_target);
+ msg->address_hi = upper_32_bits(msi_target);
+ msg->data = data->hwirq;
+
+ dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
+ (int)data->hwirq, msg->address_hi, msg->address_lo);
}
-static void ks_pcie_msi_set_irq(struct pcie_port *pp, int irq)
+static void ks_pcie_msi_mask(struct irq_data *data)
{
- u32 reg_offset, bit_pos;
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
+ struct keystone_pcie *ks_pcie;
+ u32 irq = data->hwirq;
+ struct dw_pcie *pci;
+ unsigned long flags;
+ u32 reg_offset;
+ u32 bit_pos;
+
+ raw_spin_lock_irqsave(&pp->lock, flags);
- update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
- ks_pcie_app_writel(ks_pcie, MSI0_IRQ_ENABLE_SET + (reg_offset << 4),
+ pci = to_dw_pcie_from_pp(pp);
+ ks_pcie = to_keystone_pcie(pci);
+
+ reg_offset = irq % 8;
+ bit_pos = irq >> 3;
+
+ ks_pcie_app_writel(ks_pcie, MSI_IRQ_ENABLE_CLR(reg_offset),
BIT(bit_pos));
+
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
}
-static void ks_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
+static void ks_pcie_msi_unmask(struct irq_data *data)
{
- u32 reg_offset, bit_pos;
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data);
+ struct keystone_pcie *ks_pcie;
+ u32 irq = data->hwirq;
+ struct dw_pcie *pci;
+ unsigned long flags;
+ u32 reg_offset;
+ u32 bit_pos;
+
+ raw_spin_lock_irqsave(&pp->lock, flags);
- update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
- ks_pcie_app_writel(ks_pcie, MSI0_IRQ_ENABLE_CLR + (reg_offset << 4),
+ pci = to_dw_pcie_from_pp(pp);
+ ks_pcie = to_keystone_pcie(pci);
+
+ reg_offset = irq % 8;
+ bit_pos = irq >> 3;
+
+ ks_pcie_app_writel(ks_pcie, MSI_IRQ_ENABLE_SET(reg_offset),
BIT(bit_pos));
+
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
}
-static int ks_pcie_msi_host_init(struct pcie_port *pp)
+static struct irq_chip ks_pcie_msi_irq_chip = {
+ .name = "KEYSTONE-PCI-MSI",
+ .irq_ack = ks_pcie_msi_irq_ack,
+ .irq_compose_msi_msg = ks_pcie_compose_msi_msg,
+ .irq_mask = ks_pcie_msi_mask,
+ .irq_unmask = ks_pcie_msi_unmask,
+};
+
+/**
+ * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask registers
+ * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
+ * PCIe host controller driver information.
+ *
+ * Since modification of dbi_cs2 involves different clock domain, read the
+ * status back to ensure the transition is complete.
+ */
+static void ks_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie)
{
- return dw_pcie_allocate_domains(pp);
+ u32 val;
+
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ val |= DBI_CS2;
+ ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
+
+ do {
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ } while (!(val & DBI_CS2));
}
-static void ks_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie)
+/**
+ * ks_pcie_clear_dbi_mode() - Disable DBI mode
+ * @ks_pcie: A pointer to the keystone_pcie structure which holds the KeyStone
+ * PCIe host controller driver information.
+ *
+ * Since modification of dbi_cs2 involves different clock domain, read the
+ * status back to ensure the transition is complete.
+ */
+static void ks_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
{
- int i;
+ u32 val;
- for (i = 0; i < PCI_NUM_INTX; i++)
- ks_pcie_app_writel(ks_pcie, IRQ_ENABLE_SET + (i << 4), 0x1);
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ val &= ~DBI_CS2;
+ ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
+
+ do {
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ } while (val & DBI_CS2);
}
-static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie,
- int offset)
+static int ks_pcie_msi_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+
+ /* Configure and set up BAR0 */
+ ks_pcie_set_dbi_mode(ks_pcie);
+
+ /* Enable BAR0 */
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1);
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1);
+
+ ks_pcie_clear_dbi_mode(ks_pcie);
+
+ /*
+ * For BAR0, just setting bus address for inbound writes (MSI) should
+ * be sufficient. Use physical address to avoid any conflicts.
+ */
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start);
+
+ pp->msi_irq_chip = &ks_pcie_msi_irq_chip;
+ return dw_pcie_allocate_domains(pp);
+}
+
+static void ks_pcie_handle_intx_irq(struct keystone_pcie *ks_pcie,
+ int offset)
{
struct dw_pcie *pci = ks_pcie->pci;
struct device *dev = pci->dev;
u32 pending;
- int virq;
- pending = ks_pcie_app_readl(ks_pcie, IRQ_STATUS + (offset << 4));
+ pending = ks_pcie_app_readl(ks_pcie, IRQ_STATUS(offset));
if (BIT(0) & pending) {
- virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset);
- dev_dbg(dev, ": irq: irq_offset %d, virq %d\n", offset, virq);
- generic_handle_irq(virq);
+ dev_dbg(dev, ": irq: irq_offset %d", offset);
+ generic_handle_domain_irq(ks_pcie->intx_irq_domain, offset);
}
/* EOI the INTx interrupt */
@@ -255,10 +356,10 @@ static irqreturn_t ks_pcie_handle_error_irq(struct keystone_pcie *ks_pcie)
if (reg & ERR_CORR)
dev_dbg(dev, "Correctable Error\n");
- if (reg & ERR_AXI)
+ if (!ks_pcie->is_am6 && (reg & ERR_AXI))
dev_err(dev, "AXI tag lookup fatal Error\n");
- if (reg & ERR_AER)
+ if (reg & ERR_AER || (ks_pcie->is_am6 && (reg & AM6_ERR_AER)))
dev_err(dev, "ECRC Error\n");
ks_pcie_app_writel(ks_pcie, ERR_IRQ_STATUS, reg);
@@ -266,96 +367,68 @@ static irqreturn_t ks_pcie_handle_error_irq(struct keystone_pcie *ks_pcie)
return IRQ_HANDLED;
}
-static void ks_pcie_ack_legacy_irq(struct irq_data *d)
+static void ks_pcie_ack_intx_irq(struct irq_data *d)
{
}
-static void ks_pcie_mask_legacy_irq(struct irq_data *d)
+static void ks_pcie_mask_intx_irq(struct irq_data *d)
{
}
-static void ks_pcie_unmask_legacy_irq(struct irq_data *d)
+static void ks_pcie_unmask_intx_irq(struct irq_data *d)
{
}
-static struct irq_chip ks_pcie_legacy_irq_chip = {
- .name = "Keystone-PCI-Legacy-IRQ",
- .irq_ack = ks_pcie_ack_legacy_irq,
- .irq_mask = ks_pcie_mask_legacy_irq,
- .irq_unmask = ks_pcie_unmask_legacy_irq,
+static struct irq_chip ks_pcie_intx_irq_chip = {
+ .name = "Keystone-PCI-INTX-IRQ",
+ .irq_ack = ks_pcie_ack_intx_irq,
+ .irq_mask = ks_pcie_mask_intx_irq,
+ .irq_unmask = ks_pcie_unmask_intx_irq,
};
-static int ks_pcie_init_legacy_irq_map(struct irq_domain *d,
- unsigned int irq,
- irq_hw_number_t hw_irq)
+static int ks_pcie_init_intx_irq_map(struct irq_domain *d,
+ unsigned int irq, irq_hw_number_t hw_irq)
{
- irq_set_chip_and_handler(irq, &ks_pcie_legacy_irq_chip,
+ irq_set_chip_and_handler(irq, &ks_pcie_intx_irq_chip,
handle_level_irq);
irq_set_chip_data(irq, d->host_data);
return 0;
}
-static const struct irq_domain_ops ks_pcie_legacy_irq_domain_ops = {
- .map = ks_pcie_init_legacy_irq_map,
+static const struct irq_domain_ops ks_pcie_intx_irq_domain_ops = {
+ .map = ks_pcie_init_intx_irq_map,
.xlate = irq_domain_xlate_onetwocell,
};
-/**
- * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask
- * registers
- *
- * Since modification of dbi_cs2 involves different clock domain, read the
- * status back to ensure the transition is complete.
- */
-static void ks_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie)
-{
- u32 val;
-
- val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
- val |= DBI_CS2;
- ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
-
- do {
- val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
- } while (!(val & DBI_CS2));
-}
-
-/**
- * ks_pcie_clear_dbi_mode() - Disable DBI mode
- *
- * Since modification of dbi_cs2 involves different clock domain, read the
- * status back to ensure the transition is complete.
- */
-static void ks_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
-{
- u32 val;
-
- val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
- val &= ~DBI_CS2;
- ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
-
- do {
- val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
- } while (val & DBI_CS2);
-}
-
-static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
+static int ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
{
u32 val;
u32 num_viewport = ks_pcie->num_viewport;
struct dw_pcie *pci = ks_pcie->pci;
- struct pcie_port *pp = &pci->pp;
- u64 start = pp->mem->start;
- u64 end = pp->mem->end;
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct resource_entry *entry;
+ struct resource *mem;
+ u64 start, end;
int i;
+ entry = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM);
+ if (!entry)
+ return -ENODEV;
+
+ mem = entry->res;
+ start = mem->start;
+ end = mem->end;
+
/* Disable BARs for inbound access */
ks_pcie_set_dbi_mode(ks_pcie);
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0);
ks_pcie_clear_dbi_mode(ks_pcie);
+ if (ks_pcie->is_am6)
+ return 0;
+
val = ilog2(OB_WIN_SIZE);
ks_pcie_app_writel(ks_pcie, OB_SIZE, val);
@@ -365,162 +438,113 @@ static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
lower_32_bits(start) | OB_ENABLEN);
ks_pcie_app_writel(ks_pcie, OB_OFFSET_HI(i),
upper_32_bits(start));
- start += OB_WIN_SIZE;
+ start += OB_WIN_SIZE * SZ_1M;
}
val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
val |= OB_XLAT_EN_VAL;
ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
-}
-static int ks_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
- unsigned int devfn, int where, int size,
- u32 *val)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
- u32 reg;
-
- reg = CFG_BUS(bus->number) | CFG_DEVICE(PCI_SLOT(devfn)) |
- CFG_FUNC(PCI_FUNC(devfn));
- if (bus->parent->number != pp->root_bus_nr)
- reg |= CFG_TYPE1;
- ks_pcie_app_writel(ks_pcie, CFG_SETUP, reg);
-
- return dw_pcie_read(pp->va_cfg0_base + where, size, val);
+ return 0;
}
-static int ks_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
- unsigned int devfn, int where, int size,
- u32 val)
+static void __iomem *ks_pcie_other_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
{
+ struct dw_pcie_rp *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
u32 reg;
+ /*
+ * Checking whether the link is up here is a last line of defense
+ * against platforms that forward errors on the system bus as
+ * SError upon PCI configuration transactions issued when the link
+ * is down. This check is racy by definition and does not stop
+ * the system from triggering an SError if the link goes down
+ * after this check is performed.
+ */
+ if (!dw_pcie_link_up(pci))
+ return NULL;
+
reg = CFG_BUS(bus->number) | CFG_DEVICE(PCI_SLOT(devfn)) |
CFG_FUNC(PCI_FUNC(devfn));
- if (bus->parent->number != pp->root_bus_nr)
+ if (!pci_is_root_bus(bus->parent))
reg |= CFG_TYPE1;
ks_pcie_app_writel(ks_pcie, CFG_SETUP, reg);
- return dw_pcie_write(pp->va_cfg0_base + where, size, val);
+ return pp->va_cfg0_base + where;
}
-/**
- * ks_pcie_v3_65_scan_bus() - keystone scan_bus post initialization
- *
- * This sets BAR0 to enable inbound access for MSI_IRQ register
- */
-static void ks_pcie_v3_65_scan_bus(struct pcie_port *pp)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
-
- /* Configure and set up BAR0 */
- ks_pcie_set_dbi_mode(ks_pcie);
-
- /* Enable BAR0 */
- dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1);
- dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1);
-
- ks_pcie_clear_dbi_mode(ks_pcie);
+static struct pci_ops ks_child_pcie_ops = {
+ .map_bus = ks_pcie_other_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+};
- /*
- * For BAR0, just setting bus address for inbound writes (MSI) should
- * be sufficient. Use physical address to avoid any conflicts.
- */
- dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start);
-}
+static struct pci_ops ks_pcie_ops = {
+ .map_bus = dw_pcie_own_conf_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+};
/**
* ks_pcie_link_up() - Check if link up
+ * @pci: A pointer to the dw_pcie structure which holds the DesignWare PCIe host
+ * controller driver information.
*/
-static int ks_pcie_link_up(struct dw_pcie *pci)
+static bool ks_pcie_link_up(struct dw_pcie *pci)
{
u32 val;
val = dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0);
- val &= PORT_LOGIC_LTSSM_STATE_MASK;
- return (val == PORT_LOGIC_LTSSM_STATE_L0);
+ return (val & PORT_LOGIC_LTSSM_STATE_MASK) == PORT_LOGIC_LTSSM_STATE_L0;
}
-static void ks_pcie_initiate_link_train(struct keystone_pcie *ks_pcie)
+static void ks_pcie_stop_link(struct dw_pcie *pci)
{
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
u32 val;
/* Disable Link training */
val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
val &= ~LTSSM_EN_VAL;
- ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
-
- /* Initiate Link Training */
- val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
- ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
+ ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
}
-/**
- * ks_pcie_dw_host_init() - initialize host for v3_65 dw hardware
- *
- * Ioremap the register resources, initialize legacy irq domain
- * and call dw_pcie_v3_65_host_init() API to initialize the Keystone
- * PCI host controller.
- */
-static int __init ks_pcie_dw_host_init(struct keystone_pcie *ks_pcie)
+static int ks_pcie_start_link(struct dw_pcie *pci)
{
- struct dw_pcie *pci = ks_pcie->pci;
- struct pcie_port *pp = &pci->pp;
- struct device *dev = pci->dev;
- struct platform_device *pdev = to_platform_device(dev);
- struct resource *res;
-
- /* Index 0 is the config reg. space address */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
- if (IS_ERR(pci->dbi_base))
- return PTR_ERR(pci->dbi_base);
-
- /*
- * We set these same and is used in pcie rd/wr_other_conf
- * functions
- */
- pp->va_cfg0_base = pci->dbi_base + SPACE0_REMOTE_CFG_OFFSET;
- pp->va_cfg1_base = pp->va_cfg0_base;
-
- /* Index 1 is the application reg. space address */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- ks_pcie->va_app_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(ks_pcie->va_app_base))
- return PTR_ERR(ks_pcie->va_app_base);
-
- ks_pcie->app = *res;
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+ u32 val;
- /* Create legacy IRQ domain */
- ks_pcie->legacy_irq_domain =
- irq_domain_add_linear(ks_pcie->legacy_intc_np,
- PCI_NUM_INTX,
- &ks_pcie_legacy_irq_domain_ops,
- NULL);
- if (!ks_pcie->legacy_irq_domain) {
- dev_err(dev, "Failed to add irq domain for legacy irqs\n");
- return -EINVAL;
- }
+ /* Initiate Link Training */
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
- return dw_pcie_host_init(pp);
+ return 0;
}
static void ks_pcie_quirk(struct pci_dev *dev)
{
struct pci_bus *bus = dev->bus;
+ struct keystone_pcie *ks_pcie;
+ struct device *bridge_dev;
struct pci_dev *bridge;
+ u32 val;
+
static const struct pci_device_id rc_pci_devids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK),
- .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
+ .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2E),
- .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
+ .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2L),
- .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
+ .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2G),
+ .class = PCI_CLASS_BRIDGE_PCI_NORMAL, .class_mask = ~0, },
+ { 0, },
+ };
+ static const struct pci_device_id am6_pci_devids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654X),
.class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
{ 0, },
};
@@ -545,41 +569,49 @@ static void ks_pcie_quirk(struct pci_dev *dev)
*/
if (pci_match_id(rc_pci_devids, bridge)) {
if (pcie_get_readrq(dev) > 256) {
- dev_info(&dev->dev, "limiting MRRS to 256\n");
+ dev_info(&dev->dev, "limiting MRRS to 256 bytes\n");
pcie_set_readrq(dev, 256);
}
}
-}
-DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, ks_pcie_quirk);
-static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie)
-{
- struct dw_pcie *pci = ks_pcie->pci;
- struct device *dev = pci->dev;
-
- if (dw_pcie_link_up(pci)) {
- dev_info(dev, "Link already up\n");
- return 0;
+ /*
+ * Memory transactions fail with PCI controller in AM654 PG1.0
+ * when MRRS is set to more than 128 bytes. Force the MRRS to
+ * 128 bytes in all downstream devices.
+ */
+ if (pci_match_id(am6_pci_devids, bridge)) {
+ bridge_dev = pci_get_host_bridge_device(dev);
+ if (!bridge_dev || !bridge_dev->parent)
+ return;
+
+ ks_pcie = dev_get_drvdata(bridge_dev->parent);
+ if (!ks_pcie)
+ return;
+
+ val = ks_pcie_app_readl(ks_pcie, PID);
+ val &= RTL;
+ val >>= RTL_SHIFT;
+ if (val != AM6_PCI_PG1_RTL_VER)
+ return;
+
+ if (pcie_get_readrq(dev) > 128) {
+ dev_info(&dev->dev, "limiting MRRS to 128 bytes\n");
+ pcie_set_readrq(dev, 128);
+ }
}
-
- ks_pcie_initiate_link_train(ks_pcie);
-
- /* check if the link is up or not */
- if (!dw_pcie_wait_for_link(pci))
- return 0;
-
- dev_err(dev, "phy link never came up\n");
- return -ETIMEDOUT;
}
+DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, ks_pcie_quirk);
static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
{
- unsigned int irq = irq_desc_get_irq(desc);
+ unsigned int irq = desc->irq_data.hwirq;
struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
- u32 offset = irq - ks_pcie->msi_host_irqs[0];
+ u32 offset = irq - ks_pcie->msi_host_irq;
struct dw_pcie *pci = ks_pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
struct device *dev = pci->dev;
struct irq_chip *chip = irq_desc_get_chip(desc);
+ u32 vector, reg, pos;
dev_dbg(dev, "%s, irq %d\n", __func__, irq);
@@ -589,28 +621,41 @@ static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
* ack operation.
*/
chained_irq_enter(chip, desc);
- ks_pcie_handle_msi_irq(ks_pcie, offset);
+
+ reg = ks_pcie_app_readl(ks_pcie, MSI_IRQ_STATUS(offset));
+ /*
+ * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit
+ * shows 1, 9, 17, 25 and so forth
+ */
+ for (pos = 0; pos < 4; pos++) {
+ if (!(reg & BIT(pos)))
+ continue;
+
+ vector = offset + (pos << 3);
+ dev_dbg(dev, "irq: bit %d, vector %d\n", pos, vector);
+ generic_handle_domain_irq(pp->irq_domain, vector);
+ }
+
chained_irq_exit(chip, desc);
}
/**
- * ks_pcie_legacy_irq_handler() - Handle legacy interrupt
- * @irq: IRQ line for legacy interrupts
+ * ks_pcie_intx_irq_handler() - Handle INTX interrupt
* @desc: Pointer to irq descriptor
*
- * Traverse through pending legacy interrupts and invoke handler for each. Also
+ * Traverse through pending INTX interrupts and invoke handler for each. Also
* takes care of interrupt controller level mask/ack operation.
*/
-static void ks_pcie_legacy_irq_handler(struct irq_desc *desc)
+static void ks_pcie_intx_irq_handler(struct irq_desc *desc)
{
unsigned int irq = irq_desc_get_irq(desc);
struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
struct dw_pcie *pci = ks_pcie->pci;
struct device *dev = pci->dev;
- u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0];
+ u32 irq_offset = irq - ks_pcie->intx_host_irqs[0];
struct irq_chip *chip = irq_desc_get_chip(desc);
- dev_dbg(dev, ": Handling legacy irq %d\n", irq);
+ dev_dbg(dev, ": Handling INTX irq %d\n", irq);
/*
* The chained irq handler installation would have replaced normal
@@ -618,114 +663,122 @@ static void ks_pcie_legacy_irq_handler(struct irq_desc *desc)
* ack operation.
*/
chained_irq_enter(chip, desc);
- ks_pcie_handle_legacy_irq(ks_pcie, irq_offset);
+ ks_pcie_handle_intx_irq(ks_pcie, irq_offset);
chained_irq_exit(chip, desc);
}
-static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
- char *controller, int *num_irqs)
+static int ks_pcie_config_msi_irq(struct keystone_pcie *ks_pcie)
{
- int temp, max_host_irqs, legacy = 1, *host_irqs;
struct device *dev = ks_pcie->pci->dev;
- struct device_node *np_pcie = dev->of_node, **np_temp;
-
- if (!strcmp(controller, "msi-interrupt-controller"))
- legacy = 0;
-
- if (legacy) {
- np_temp = &ks_pcie->legacy_intc_np;
- max_host_irqs = PCI_NUM_INTX;
- host_irqs = &ks_pcie->legacy_host_irqs[0];
- } else {
- np_temp = &ks_pcie->msi_intc_np;
- max_host_irqs = MAX_MSI_HOST_IRQS;
- host_irqs = &ks_pcie->msi_host_irqs[0];
- }
+ struct device_node *np = ks_pcie->np;
+ struct device_node *intc_np;
+ struct irq_data *irq_data;
+ int irq_count, irq, ret, i;
- /* interrupt controller is in a child node */
- *np_temp = of_get_child_by_name(np_pcie, controller);
- if (!(*np_temp)) {
- dev_err(dev, "Node for %s is absent\n", controller);
- return -EINVAL;
- }
+ if (!IS_ENABLED(CONFIG_PCI_MSI))
+ return 0;
- temp = of_irq_count(*np_temp);
- if (!temp) {
- dev_err(dev, "No IRQ entries in %s\n", controller);
- of_node_put(*np_temp);
+ intc_np = of_get_child_by_name(np, "msi-interrupt-controller");
+ if (!intc_np) {
+ if (ks_pcie->is_am6)
+ return 0;
+ dev_warn(dev, "msi-interrupt-controller node is absent\n");
return -EINVAL;
}
- if (temp > max_host_irqs)
- dev_warn(dev, "Too many %s interrupts defined %u\n",
- (legacy ? "legacy" : "MSI"), temp);
-
- /*
- * support upto max_host_irqs. In dt from index 0 to 3 (legacy) or 0 to
- * 7 (MSI)
- */
- for (temp = 0; temp < max_host_irqs; temp++) {
- host_irqs[temp] = irq_of_parse_and_map(*np_temp, temp);
- if (!host_irqs[temp])
- break;
+ irq_count = of_irq_count(intc_np);
+ if (!irq_count) {
+ dev_err(dev, "No IRQ entries in msi-interrupt-controller\n");
+ ret = -EINVAL;
+ goto err;
}
- of_node_put(*np_temp);
+ for (i = 0; i < irq_count; i++) {
+ irq = irq_of_parse_and_map(intc_np, i);
+ if (!irq) {
+ ret = -EINVAL;
+ goto err;
+ }
- if (temp) {
- *num_irqs = temp;
- return 0;
+ if (!ks_pcie->msi_host_irq) {
+ irq_data = irq_get_irq_data(irq);
+ if (!irq_data) {
+ ret = -EINVAL;
+ goto err;
+ }
+ ks_pcie->msi_host_irq = irq_data->hwirq;
+ }
+
+ irq_set_chained_handler_and_data(irq, ks_pcie_msi_irq_handler,
+ ks_pcie);
}
- return -EINVAL;
+ of_node_put(intc_np);
+ return 0;
+
+err:
+ of_node_put(intc_np);
+ return ret;
}
-static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie)
+static int ks_pcie_config_intx_irq(struct keystone_pcie *ks_pcie)
{
- int i;
-
- /* Legacy IRQ */
- for (i = 0; i < ks_pcie->num_legacy_host_irqs; i++) {
- irq_set_chained_handler_and_data(ks_pcie->legacy_host_irqs[i],
- ks_pcie_legacy_irq_handler,
- ks_pcie);
- }
- ks_pcie_enable_legacy_irqs(ks_pcie);
-
- /* MSI IRQ */
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- for (i = 0; i < ks_pcie->num_msi_host_irqs; i++) {
- irq_set_chained_handler_and_data(ks_pcie->msi_host_irqs[i],
- ks_pcie_msi_irq_handler,
- ks_pcie);
- }
+ struct device *dev = ks_pcie->pci->dev;
+ struct irq_domain *intx_irq_domain;
+ struct device_node *np = ks_pcie->np;
+ struct device_node *intc_np;
+ int irq_count, irq, ret = 0, i;
+
+ intc_np = of_get_child_by_name(np, "legacy-interrupt-controller");
+ if (!intc_np) {
+ /*
+ * Since INTX interrupts are modeled as edge-interrupts in
+ * AM6, keep it disabled for now.
+ */
+ if (ks_pcie->is_am6)
+ return 0;
+ dev_warn(dev, "legacy-interrupt-controller node is absent\n");
+ return -EINVAL;
}
- if (ks_pcie->error_irq > 0)
- ks_pcie_enable_error_irq(ks_pcie);
-}
+ irq_count = of_irq_count(intc_np);
+ if (!irq_count) {
+ dev_err(dev, "No IRQ entries in legacy-interrupt-controller\n");
+ ret = -EINVAL;
+ goto err;
+ }
-/*
- * When a PCI device does not exist during config cycles, keystone host gets a
- * bus error instead of returning 0xffffffff. This handler always returns 0
- * for this kind of faults.
- */
-static int ks_pcie_fault(unsigned long addr, unsigned int fsr,
- struct pt_regs *regs)
-{
- unsigned long instr = *(unsigned long *) instruction_pointer(regs);
+ for (i = 0; i < irq_count; i++) {
+ irq = irq_of_parse_and_map(intc_np, i);
+ if (!irq) {
+ ret = -EINVAL;
+ goto err;
+ }
+ ks_pcie->intx_host_irqs[i] = irq;
- if ((instr & 0x0e100090) == 0x00100090) {
- int reg = (instr >> 12) & 15;
+ irq_set_chained_handler_and_data(irq,
+ ks_pcie_intx_irq_handler,
+ ks_pcie);
+ }
- regs->uregs[reg] = -1;
- regs->ARM_pc += 4;
+ intx_irq_domain = irq_domain_create_linear(of_fwnode_handle(intc_np), PCI_NUM_INTX,
+ &ks_pcie_intx_irq_domain_ops, NULL);
+ if (!intx_irq_domain) {
+ dev_err(dev, "Failed to add irq domain for INTX irqs\n");
+ ret = -EINVAL;
+ goto err;
}
+ ks_pcie->intx_irq_domain = intx_irq_domain;
- return 0;
+ for (i = 0; i < PCI_NUM_INTX; i++)
+ ks_pcie_app_writel(ks_pcie, IRQ_ENABLE_SET(i), INTx_EN);
+
+err:
+ of_node_put(intc_np);
+ return ret;
}
-static int __init ks_pcie_init_id(struct keystone_pcie *ks_pcie)
+static int ks_pcie_init_id(struct keystone_pcie *ks_pcie)
{
int ret;
unsigned int id;
@@ -733,32 +786,53 @@ static int __init ks_pcie_init_id(struct keystone_pcie *ks_pcie)
struct dw_pcie *pci = ks_pcie->pci;
struct device *dev = pci->dev;
struct device_node *np = dev->of_node;
+ struct of_phandle_args args;
+ unsigned int offset = 0;
devctrl_regs = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-id");
if (IS_ERR(devctrl_regs))
return PTR_ERR(devctrl_regs);
- ret = regmap_read(devctrl_regs, 0, &id);
+ /* Do not error out to maintain old DT compatibility */
+ ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-pcie-id", 1, 0, &args);
+ if (!ret)
+ offset = args.args[0];
+
+ ret = regmap_read(devctrl_regs, offset, &id);
if (ret)
return ret;
+ dw_pcie_dbi_ro_wr_en(pci);
dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, id & PCIE_VENDORID_MASK);
dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, id >> PCIE_DEVICEID_SHIFT);
+ dw_pcie_dbi_ro_wr_dis(pci);
return 0;
}
-static int __init ks_pcie_host_init(struct pcie_port *pp)
+static int ks_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
int ret;
- dw_pcie_setup_rc(pp);
+ pp->bridge->ops = &ks_pcie_ops;
+ if (!ks_pcie->is_am6)
+ pp->bridge->child_ops = &ks_child_pcie_ops;
+
+ ret = ks_pcie_config_intx_irq(ks_pcie);
+ if (ret)
+ return ret;
+
+ ret = ks_pcie_config_msi_irq(ks_pcie);
+ if (ret)
+ return ret;
+
+ ks_pcie_stop_link(pci);
+ ret = ks_pcie_setup_rc_app_regs(ks_pcie);
+ if (ret)
+ return ret;
- ks_pcie_establish_link(ks_pcie);
- ks_pcie_setup_rc_app_regs(ks_pcie);
- ks_pcie_setup_interrupts(ks_pcie);
writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8),
pci->dbi_base + PCI_IO_BASE);
@@ -766,26 +840,16 @@ static int __init ks_pcie_host_init(struct pcie_port *pp)
if (ret < 0)
return ret;
- /*
- * PCIe access errors that result into OCP errors are caught by ARM as
- * "External aborts"
- */
- hook_fault_code(17, ks_pcie_fault, SIGBUS, 0,
- "Asynchronous external abort");
-
return 0;
}
static const struct dw_pcie_host_ops ks_pcie_host_ops = {
- .rd_other_conf = ks_pcie_rd_other_conf,
- .wr_other_conf = ks_pcie_wr_other_conf,
- .host_init = ks_pcie_host_init,
- .msi_set_irq = ks_pcie_msi_set_irq,
- .msi_clear_irq = ks_pcie_msi_clear_irq,
- .get_msi_addr = ks_pcie_get_msi_addr,
- .msi_host_init = ks_pcie_msi_host_init,
- .msi_irq_ack = ks_pcie_msi_irq_ack,
- .scan_bus = ks_pcie_v3_65_scan_bus,
+ .init = ks_pcie_host_init,
+ .msi_init = ks_pcie_msi_host_init,
+};
+
+static const struct dw_pcie_host_ops ks_pcie_am654_host_ops = {
+ .init = ks_pcie_host_init,
};
static irqreturn_t ks_pcie_err_irq_handler(int irq, void *priv)
@@ -795,65 +859,98 @@ static irqreturn_t ks_pcie_err_irq_handler(int irq, void *priv)
return ks_pcie_handle_error_irq(ks_pcie);
}
-static int __init ks_pcie_add_pcie_port(struct keystone_pcie *ks_pcie,
- struct platform_device *pdev)
+static void ks_pcie_am654_write_dbi2(struct dw_pcie *pci, void __iomem *base,
+ u32 reg, size_t size, u32 val)
+{
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+
+ ks_pcie_set_dbi_mode(ks_pcie);
+ dw_pcie_write(base + reg, size, val);
+ ks_pcie_clear_dbi_mode(ks_pcie);
+}
+
+static const struct dw_pcie_ops ks_pcie_dw_pcie_ops = {
+ .start_link = ks_pcie_start_link,
+ .stop_link = ks_pcie_stop_link,
+ .link_up = ks_pcie_link_up,
+ .write_dbi2 = ks_pcie_am654_write_dbi2,
+};
+
+static void ks_pcie_am654_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ int flags;
+
+ ep->page_size = AM654_WIN_SIZE;
+ flags = PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_32;
+ dw_pcie_writel_dbi2(pci, PCI_BASE_ADDRESS_0, APP_ADDR_SPACE_0 - 1);
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, flags);
+}
+
+static void ks_pcie_am654_raise_intx_irq(struct keystone_pcie *ks_pcie)
{
struct dw_pcie *pci = ks_pcie->pci;
- struct pcie_port *pp = &pci->pp;
- struct device *dev = &pdev->dev;
- int ret;
+ u8 int_pin;
- ret = ks_pcie_get_irq_controller_info(ks_pcie,
- "legacy-interrupt-controller",
- &ks_pcie->num_legacy_host_irqs);
- if (ret)
- return ret;
+ int_pin = dw_pcie_readb_dbi(pci, PCI_INTERRUPT_PIN);
+ if (int_pin == 0 || int_pin > 4)
+ return;
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- ret = ks_pcie_get_irq_controller_info(ks_pcie,
- "msi-interrupt-controller",
- &ks_pcie->num_msi_host_irqs);
- if (ret)
- return ret;
- }
+ ks_pcie_app_writel(ks_pcie, PCIE_LEGACY_IRQ_ENABLE_SET(int_pin),
+ INT_ENABLE);
+ ks_pcie_app_writel(ks_pcie, PCIE_EP_IRQ_SET, INT_ENABLE);
+ mdelay(1);
+ ks_pcie_app_writel(ks_pcie, PCIE_EP_IRQ_CLR, INT_ENABLE);
+ ks_pcie_app_writel(ks_pcie, PCIE_LEGACY_IRQ_ENABLE_CLR(int_pin),
+ INT_ENABLE);
+}
- /*
- * Index 0 is the platform interrupt for error interrupt
- * from RC. This is optional.
- */
- ks_pcie->error_irq = irq_of_parse_and_map(ks_pcie->np, 0);
- if (ks_pcie->error_irq <= 0)
- dev_info(dev, "no error IRQ defined\n");
- else {
- ret = request_irq(ks_pcie->error_irq, ks_pcie_err_irq_handler,
- IRQF_SHARED, "pcie-error-irq", ks_pcie);
- if (ret < 0) {
- dev_err(dev, "failed to request error IRQ %d\n",
- ks_pcie->error_irq);
- return ret;
- }
- }
+static int ks_pcie_am654_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ unsigned int type, u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
- pp->ops = &ks_pcie_host_ops;
- ret = ks_pcie_dw_host_init(ks_pcie);
- if (ret) {
- dev_err(dev, "failed to initialize host\n");
- return ret;
+ switch (type) {
+ case PCI_IRQ_INTX:
+ ks_pcie_am654_raise_intx_irq(ks_pcie);
+ break;
+ case PCI_IRQ_MSI:
+ dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+ break;
+ case PCI_IRQ_MSIX:
+ dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
+ break;
+ default:
+ dev_err(pci->dev, "UNKNOWN IRQ type\n");
+ return -EINVAL;
}
return 0;
}
-static const struct of_device_id ks_pcie_of_match[] = {
- {
- .type = "pci",
- .compatible = "ti,keystone-pcie",
- },
- { },
+static const struct pci_epc_features ks_pcie_am654_epc_features = {
+ .msi_capable = true,
+ .msix_capable = true,
+ .bar[BAR_0] = { .type = BAR_RESERVED, },
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_2] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_3] = { .type = BAR_FIXED, .fixed_size = SZ_64K, },
+ .bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = 256, },
+ .bar[BAR_5] = { .type = BAR_RESIZABLE, },
+ .align = SZ_64K,
};
-static const struct dw_pcie_ops ks_pcie_dw_pcie_ops = {
- .link_up = ks_pcie_link_up,
+static const struct pci_epc_features*
+ks_pcie_am654_get_features(struct dw_pcie_ep *ep)
+{
+ return &ks_pcie_am654_epc_features;
+}
+
+static const struct dw_pcie_ep_ops ks_pcie_am654_ep_ops = {
+ .init = ks_pcie_am654_ep_init,
+ .raise_irq = ks_pcie_am654_raise_irq,
+ .get_features = &ks_pcie_am654_get_features,
};
static void ks_pcie_disable_phy(struct keystone_pcie *ks_pcie)
@@ -873,6 +970,10 @@ static int ks_pcie_enable_phy(struct keystone_pcie *ks_pcie)
int num_lanes = ks_pcie->num_lanes;
for (i = 0; i < num_lanes; i++) {
+ ret = phy_reset(ks_pcie->phy[i]);
+ if (ret < 0)
+ goto err_phy;
+
ret = phy_init(ks_pcie->phy[i]);
if (ret < 0)
goto err_phy;
@@ -895,20 +996,148 @@ err_phy:
return ret;
}
-static int __init ks_pcie_probe(struct platform_device *pdev)
+static int ks_pcie_set_mode(struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct of_phandle_args args;
+ unsigned int offset = 0;
+ struct regmap *syscon;
+ u32 val;
+ u32 mask;
+ int ret = 0;
+
+ syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-mode");
+ if (IS_ERR(syscon))
+ return 0;
+
+ /* Do not error out to maintain old DT compatibility */
+ ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-pcie-mode", 1, 0, &args);
+ if (!ret)
+ offset = args.args[0];
+
+ mask = KS_PCIE_DEV_TYPE_MASK | KS_PCIE_SYSCLOCKOUTEN;
+ val = KS_PCIE_DEV_TYPE(RC) | KS_PCIE_SYSCLOCKOUTEN;
+
+ ret = regmap_update_bits(syscon, offset, mask, val);
+ if (ret) {
+ dev_err(dev, "failed to set pcie mode\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ks_pcie_am654_set_mode(struct device *dev,
+ enum dw_pcie_device_mode mode)
{
+ struct device_node *np = dev->of_node;
+ struct of_phandle_args args;
+ unsigned int offset = 0;
+ struct regmap *syscon;
+ u32 val;
+ u32 mask;
+ int ret = 0;
+
+ syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-mode");
+ if (IS_ERR(syscon))
+ return 0;
+
+ /* Do not error out to maintain old DT compatibility */
+ ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-pcie-mode", 1, 0, &args);
+ if (!ret)
+ offset = args.args[0];
+
+ mask = AM654_PCIE_DEV_TYPE_MASK;
+
+ switch (mode) {
+ case DW_PCIE_RC_TYPE:
+ val = RC;
+ break;
+ case DW_PCIE_EP_TYPE:
+ val = EP;
+ break;
+ default:
+ dev_err(dev, "INVALID device type %d\n", mode);
+ return -EINVAL;
+ }
+
+ ret = regmap_update_bits(syscon, offset, mask, val);
+ if (ret) {
+ dev_err(dev, "failed to set pcie mode\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct ks_pcie_of_data ks_pcie_rc_of_data = {
+ .host_ops = &ks_pcie_host_ops,
+ .mode = DW_PCIE_RC_TYPE,
+ .version = DW_PCIE_VER_365A,
+};
+
+static const struct ks_pcie_of_data ks_pcie_am654_rc_of_data = {
+ .host_ops = &ks_pcie_am654_host_ops,
+ .mode = DW_PCIE_RC_TYPE,
+ .version = DW_PCIE_VER_490A,
+};
+
+static const struct ks_pcie_of_data ks_pcie_am654_ep_of_data = {
+ .ep_ops = &ks_pcie_am654_ep_ops,
+ .mode = DW_PCIE_EP_TYPE,
+ .version = DW_PCIE_VER_490A,
+};
+
+static const struct of_device_id ks_pcie_of_match[] = {
+ {
+ .type = "pci",
+ .data = &ks_pcie_rc_of_data,
+ .compatible = "ti,keystone-pcie",
+ },
+ {
+ .data = &ks_pcie_am654_rc_of_data,
+ .compatible = "ti,am654-pcie-rc",
+ },
+ {
+ .data = &ks_pcie_am654_ep_of_data,
+ .compatible = "ti,am654-pcie-ep",
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ks_pcie_of_match);
+
+static int ks_pcie_probe(struct platform_device *pdev)
+{
+ const struct dw_pcie_host_ops *host_ops;
+ const struct dw_pcie_ep_ops *ep_ops;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
+ const struct ks_pcie_of_data *data;
+ enum dw_pcie_device_mode mode;
struct dw_pcie *pci;
struct keystone_pcie *ks_pcie;
struct device_link **link;
+ struct gpio_desc *gpiod;
+ struct resource *res;
+ void __iomem *base;
u32 num_viewport;
struct phy **phy;
u32 num_lanes;
char name[10];
+ u32 version;
int ret;
+ int irq;
int i;
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -EINVAL;
+
+ version = data->version;
+ host_ops = data->host_ops;
+ ep_ops = data->ep_ops;
+ mode = data->mode;
+
ks_pcie = devm_kzalloc(dev, sizeof(*ks_pcie), GFP_KERNEL);
if (!ks_pcie)
return -ENOMEM;
@@ -917,12 +1146,36 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
if (!pci)
return -ENOMEM;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "app");
+ ks_pcie->va_app_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ks_pcie->va_app_base))
+ return PTR_ERR(ks_pcie->va_app_base);
+
+ ks_pcie->app = *res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbics");
+ base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ if (of_device_is_compatible(np, "ti,am654-pcie-rc"))
+ ks_pcie->is_am6 = true;
+
+ pci->dbi_base = base;
+ pci->dbi_base2 = base;
pci->dev = dev;
pci->ops = &ks_pcie_dw_pcie_ops;
+ pci->version = version;
- ret = of_property_read_u32(np, "num-viewport", &num_viewport);
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(dev, irq, ks_pcie_err_irq_handler, IRQF_SHARED,
+ "ks-pcie-error-irq", ks_pcie);
if (ret < 0) {
- dev_err(dev, "unable to read *num-viewport* property\n");
+ dev_err(dev, "failed to request error IRQ %d\n",
+ irq);
return ret;
}
@@ -930,11 +1183,11 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
if (ret)
num_lanes = 1;
- phy = devm_kzalloc(dev, sizeof(*phy) * num_lanes, GFP_KERNEL);
+ phy = devm_kcalloc(dev, num_lanes, sizeof(*phy), GFP_KERNEL);
if (!phy)
return -ENOMEM;
- link = devm_kzalloc(dev, sizeof(*link) * num_lanes, GFP_KERNEL);
+ link = devm_kcalloc(dev, num_lanes, sizeof(*link), GFP_KERNEL);
if (!link)
return -ENOMEM;
@@ -960,10 +1213,27 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
ks_pcie->pci = pci;
ks_pcie->link = link;
ks_pcie->num_lanes = num_lanes;
- ks_pcie->num_viewport = num_viewport;
ks_pcie->phy = phy;
+ gpiod = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(gpiod)) {
+ ret = PTR_ERR(gpiod);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get reset GPIO\n");
+ goto err_link;
+ }
+
+ /* Obtain references to the PHYs */
+ for (i = 0; i < num_lanes; i++)
+ phy_pm_runtime_get_sync(ks_pcie->phy[i]);
+
ret = ks_pcie_enable_phy(ks_pcie);
+
+ /* Release references to the PHYs */
+ for (i = 0; i < num_lanes; i++)
+ phy_pm_runtime_put_sync(ks_pcie->phy[i]);
+
if (ret) {
dev_err(dev, "failed to enable phy\n");
goto err_link;
@@ -977,12 +1247,77 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
goto err_get_sync;
}
- ret = ks_pcie_add_pcie_port(ks_pcie, pdev);
+ if (dw_pcie_ver_is_ge(pci, 480A))
+ ret = ks_pcie_am654_set_mode(dev, mode);
+ else
+ ret = ks_pcie_set_mode(dev);
if (ret < 0)
goto err_get_sync;
+ switch (mode) {
+ case DW_PCIE_RC_TYPE:
+ if (!IS_ENABLED(CONFIG_PCI_KEYSTONE_HOST)) {
+ ret = -ENODEV;
+ goto err_get_sync;
+ }
+
+ ret = of_property_read_u32(np, "num-viewport", &num_viewport);
+ if (ret < 0) {
+ dev_err(dev, "unable to read *num-viewport* property\n");
+ goto err_get_sync;
+ }
+
+ /*
+ * "Power Sequencing and Reset Signal Timings" table in
+ * PCI EXPRESS CARD ELECTROMECHANICAL SPECIFICATION, REV. 2.0
+ * indicates PERST# should be deasserted after minimum of 100us
+ * once REFCLK is stable. The REFCLK to the connector in RC
+ * mode is selected while enabling the PHY. So deassert PERST#
+ * after 100 us.
+ */
+ if (gpiod) {
+ usleep_range(100, 200);
+ gpiod_set_value_cansleep(gpiod, 1);
+ }
+
+ ks_pcie->num_viewport = num_viewport;
+ pci->pp.ops = host_ops;
+ ret = dw_pcie_host_init(&pci->pp);
+ if (ret < 0)
+ goto err_get_sync;
+ break;
+ case DW_PCIE_EP_TYPE:
+ if (!IS_ENABLED(CONFIG_PCI_KEYSTONE_EP)) {
+ ret = -ENODEV;
+ goto err_get_sync;
+ }
+
+ pci->ep.ops = ep_ops;
+ ret = dw_pcie_ep_init(&pci->ep);
+ if (ret < 0)
+ goto err_get_sync;
+
+ ret = dw_pcie_ep_init_registers(&pci->ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ goto err_ep_init;
+ }
+
+ pci_epc_init_notify(pci->ep.epc);
+
+ break;
+ default:
+ dev_err(dev, "INVALID device type %d\n", mode);
+ ret = -EINVAL;
+ goto err_get_sync;
+ }
+
+ ks_pcie_enable_error_irq(ks_pcie);
+
return 0;
+err_ep_init:
+ dw_pcie_ep_deinit(&pci->ep);
err_get_sync:
pm_runtime_put(dev);
pm_runtime_disable(dev);
@@ -995,7 +1330,7 @@ err_link:
return ret;
}
-static int __exit ks_pcie_remove(struct platform_device *pdev)
+static void ks_pcie_remove(struct platform_device *pdev)
{
struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev);
struct device_link **link = ks_pcie->link;
@@ -1007,16 +1342,55 @@ static int __exit ks_pcie_remove(struct platform_device *pdev)
ks_pcie_disable_phy(ks_pcie);
while (num_lanes--)
device_link_del(link[num_lanes]);
-
- return 0;
}
-static struct platform_driver ks_pcie_driver __refdata = {
+static struct platform_driver ks_pcie_driver = {
.probe = ks_pcie_probe,
- .remove = __exit_p(ks_pcie_remove),
+ .remove = ks_pcie_remove,
.driver = {
.name = "keystone-pcie",
- .of_match_table = of_match_ptr(ks_pcie_of_match),
+ .of_match_table = ks_pcie_of_match,
},
};
+
+#ifdef CONFIG_ARM
+/*
+ * When a PCI device does not exist during config cycles, keystone host
+ * gets a bus error instead of returning 0xffffffff (PCI_ERROR_RESPONSE).
+ * This handler always returns 0 for this kind of fault.
+ */
+static int ks_pcie_fault(unsigned long addr, unsigned int fsr,
+ struct pt_regs *regs)
+{
+ unsigned long instr = *(unsigned long *)instruction_pointer(regs);
+
+ if ((instr & 0x0e100090) == 0x00100090) {
+ int reg = (instr >> 12) & 15;
+
+ regs->uregs[reg] = -1;
+ regs->ARM_pc += 4;
+ }
+
+ return 0;
+}
+
+static int __init ks_pcie_init(void)
+{
+ /*
+ * PCIe access errors that result into OCP errors are caught by ARM as
+ * "External aborts"
+ */
+ if (of_find_matching_node(NULL, ks_pcie_of_match))
+ hook_fault_code(17, ks_pcie_fault, SIGBUS, 0,
+ "Asynchronous external abort");
+
+ return platform_driver_register(&ks_pcie_driver);
+}
+device_initcall(ks_pcie_init);
+#else
builtin_platform_driver(ks_pcie_driver);
+#endif
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("PCIe controller driver for Texas Instruments Keystone SoCs");
+MODULE_AUTHOR("Murali Karicheri <m-karicheri2@ti.com>");
diff --git a/drivers/pci/controller/dwc/pci-layerscape-ep.c b/drivers/pci/controller/dwc/pci-layerscape-ep.c
new file mode 100644
index 000000000000..a4a800699f89
--- /dev/null
+++ b/drivers/pci/controller/dwc/pci-layerscape-ep.c
@@ -0,0 +1,301 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe controller EP driver for Freescale Layerscape SoCs
+ *
+ * Copyright (C) 2018 NXP Semiconductor.
+ *
+ * Author: Xiaowei Bao <xiaowei.bao@nxp.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+
+#include "pcie-designware.h"
+
+#define PEX_PF0_CONFIG 0xC0014
+#define PEX_PF0_CFG_READY BIT(0)
+
+/* PEX PFa PCIE PME and message interrupt registers*/
+#define PEX_PF0_PME_MES_DR 0xC0020
+#define PEX_PF0_PME_MES_DR_LUD BIT(7)
+#define PEX_PF0_PME_MES_DR_LDD BIT(9)
+#define PEX_PF0_PME_MES_DR_HRD BIT(10)
+
+#define PEX_PF0_PME_MES_IER 0xC0028
+#define PEX_PF0_PME_MES_IER_LUDIE BIT(7)
+#define PEX_PF0_PME_MES_IER_LDDIE BIT(9)
+#define PEX_PF0_PME_MES_IER_HRDIE BIT(10)
+
+#define to_ls_pcie_ep(x) dev_get_drvdata((x)->dev)
+
+struct ls_pcie_ep_drvdata {
+ u32 func_offset;
+ const struct dw_pcie_ep_ops *ops;
+ const struct dw_pcie_ops *dw_pcie_ops;
+};
+
+struct ls_pcie_ep {
+ struct dw_pcie *pci;
+ struct pci_epc_features *ls_epc;
+ const struct ls_pcie_ep_drvdata *drvdata;
+ int irq;
+ u32 lnkcap;
+ bool big_endian;
+};
+
+static u32 ls_pcie_pf_lut_readl(struct ls_pcie_ep *pcie, u32 offset)
+{
+ struct dw_pcie *pci = pcie->pci;
+
+ if (pcie->big_endian)
+ return ioread32be(pci->dbi_base + offset);
+ else
+ return ioread32(pci->dbi_base + offset);
+}
+
+static void ls_pcie_pf_lut_writel(struct ls_pcie_ep *pcie, u32 offset, u32 value)
+{
+ struct dw_pcie *pci = pcie->pci;
+
+ if (pcie->big_endian)
+ iowrite32be(value, pci->dbi_base + offset);
+ else
+ iowrite32(value, pci->dbi_base + offset);
+}
+
+static irqreturn_t ls_pcie_ep_event_handler(int irq, void *dev_id)
+{
+ struct ls_pcie_ep *pcie = dev_id;
+ struct dw_pcie *pci = pcie->pci;
+ u32 val, cfg;
+ u8 offset;
+
+ val = ls_pcie_pf_lut_readl(pcie, PEX_PF0_PME_MES_DR);
+ ls_pcie_pf_lut_writel(pcie, PEX_PF0_PME_MES_DR, val);
+
+ if (!val)
+ return IRQ_NONE;
+
+ if (val & PEX_PF0_PME_MES_DR_LUD) {
+
+ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+
+ /*
+ * The values of the Maximum Link Width and Supported Link
+ * Speed from the Link Capabilities Register will be lost
+ * during link down or hot reset. Restore initial value
+ * that configured by the Reset Configuration Word (RCW).
+ */
+ dw_pcie_dbi_ro_wr_en(pci);
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, pcie->lnkcap);
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ cfg = ls_pcie_pf_lut_readl(pcie, PEX_PF0_CONFIG);
+ cfg |= PEX_PF0_CFG_READY;
+ ls_pcie_pf_lut_writel(pcie, PEX_PF0_CONFIG, cfg);
+ dw_pcie_ep_linkup(&pci->ep);
+
+ dev_dbg(pci->dev, "Link up\n");
+ } else if (val & PEX_PF0_PME_MES_DR_LDD) {
+ dev_dbg(pci->dev, "Link down\n");
+ dw_pcie_ep_linkdown(&pci->ep);
+ } else if (val & PEX_PF0_PME_MES_DR_HRD) {
+ dev_dbg(pci->dev, "Hot reset\n");
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int ls_pcie_ep_interrupt_init(struct ls_pcie_ep *pcie,
+ struct platform_device *pdev)
+{
+ u32 val;
+ int ret;
+
+ pcie->irq = platform_get_irq_byname(pdev, "pme");
+ if (pcie->irq < 0)
+ return pcie->irq;
+
+ ret = devm_request_irq(&pdev->dev, pcie->irq, ls_pcie_ep_event_handler,
+ IRQF_SHARED, pdev->name, pcie);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't register PCIe IRQ\n");
+ return ret;
+ }
+
+ /* Enable interrupts */
+ val = ls_pcie_pf_lut_readl(pcie, PEX_PF0_PME_MES_IER);
+ val |= PEX_PF0_PME_MES_IER_LDDIE | PEX_PF0_PME_MES_IER_HRDIE |
+ PEX_PF0_PME_MES_IER_LUDIE;
+ ls_pcie_pf_lut_writel(pcie, PEX_PF0_PME_MES_IER, val);
+
+ return 0;
+}
+
+static const struct pci_epc_features*
+ls_pcie_ep_get_features(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct ls_pcie_ep *pcie = to_ls_pcie_ep(pci);
+
+ return pcie->ls_epc;
+}
+
+static void ls_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct ls_pcie_ep *pcie = to_ls_pcie_ep(pci);
+ struct dw_pcie_ep_func *ep_func;
+ enum pci_barno bar;
+
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, 0);
+ if (!ep_func)
+ return;
+
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
+ dw_pcie_ep_reset_bar(pci, bar);
+
+ pcie->ls_epc->msi_capable = ep_func->msi_cap ? true : false;
+ pcie->ls_epc->msix_capable = ep_func->msix_cap ? true : false;
+}
+
+static int ls_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ unsigned int type, u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ switch (type) {
+ case PCI_IRQ_INTX:
+ return dw_pcie_ep_raise_intx_irq(ep, func_no);
+ case PCI_IRQ_MSI:
+ return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+ case PCI_IRQ_MSIX:
+ return dw_pcie_ep_raise_msix_irq_doorbell(ep, func_no,
+ interrupt_num);
+ default:
+ dev_err(pci->dev, "UNKNOWN IRQ type\n");
+ return -EINVAL;
+ }
+}
+
+static unsigned int ls_pcie_ep_get_dbi_offset(struct dw_pcie_ep *ep, u8 func_no)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct ls_pcie_ep *pcie = to_ls_pcie_ep(pci);
+
+ WARN_ON(func_no && !pcie->drvdata->func_offset);
+ return pcie->drvdata->func_offset * func_no;
+}
+
+static const struct dw_pcie_ep_ops ls_pcie_ep_ops = {
+ .init = ls_pcie_ep_init,
+ .raise_irq = ls_pcie_ep_raise_irq,
+ .get_features = ls_pcie_ep_get_features,
+ .get_dbi_offset = ls_pcie_ep_get_dbi_offset,
+};
+
+static const struct ls_pcie_ep_drvdata ls1_ep_drvdata = {
+ .ops = &ls_pcie_ep_ops,
+};
+
+static const struct ls_pcie_ep_drvdata ls2_ep_drvdata = {
+ .func_offset = 0x20000,
+ .ops = &ls_pcie_ep_ops,
+};
+
+static const struct ls_pcie_ep_drvdata lx2_ep_drvdata = {
+ .func_offset = 0x8000,
+ .ops = &ls_pcie_ep_ops,
+};
+
+static const struct of_device_id ls_pcie_ep_of_match[] = {
+ { .compatible = "fsl,ls1028a-pcie-ep", .data = &ls1_ep_drvdata },
+ { .compatible = "fsl,ls1046a-pcie-ep", .data = &ls1_ep_drvdata },
+ { .compatible = "fsl,ls1088a-pcie-ep", .data = &ls2_ep_drvdata },
+ { .compatible = "fsl,ls2088a-pcie-ep", .data = &ls2_ep_drvdata },
+ { .compatible = "fsl,lx2160ar2-pcie-ep", .data = &lx2_ep_drvdata },
+ { },
+};
+
+static int __init ls_pcie_ep_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dw_pcie *pci;
+ struct ls_pcie_ep *pcie;
+ struct pci_epc_features *ls_epc;
+ struct resource *dbi_base;
+ u8 offset;
+ int ret;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+ if (!pci)
+ return -ENOMEM;
+
+ ls_epc = devm_kzalloc(dev, sizeof(*ls_epc), GFP_KERNEL);
+ if (!ls_epc)
+ return -ENOMEM;
+
+ pcie->drvdata = of_device_get_match_data(dev);
+
+ pci->dev = dev;
+ pci->ops = pcie->drvdata->dw_pcie_ops;
+
+ ls_epc->bar[BAR_2].only_64bit = true;
+ ls_epc->bar[BAR_3].type = BAR_RESERVED;
+ ls_epc->bar[BAR_4].only_64bit = true;
+ ls_epc->bar[BAR_5].type = BAR_RESERVED;
+ ls_epc->linkup_notifier = true;
+
+ pcie->pci = pci;
+ pcie->ls_epc = ls_epc;
+
+ dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
+ pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base);
+ if (IS_ERR(pci->dbi_base))
+ return PTR_ERR(pci->dbi_base);
+
+ pci->ep.ops = &ls_pcie_ep_ops;
+
+ pcie->big_endian = of_property_read_bool(dev->of_node, "big-endian");
+
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+
+ platform_set_drvdata(pdev, pcie);
+
+ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ pcie->lnkcap = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
+
+ ret = dw_pcie_ep_init(&pci->ep);
+ if (ret)
+ return ret;
+
+ ret = dw_pcie_ep_init_registers(&pci->ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(&pci->ep);
+ return ret;
+ }
+
+ pci_epc_init_notify(pci->ep.epc);
+
+ return ls_pcie_ep_interrupt_init(pcie, pdev);
+}
+
+static struct platform_driver ls_pcie_ep_driver = {
+ .driver = {
+ .name = "layerscape-pcie-ep",
+ .of_match_table = ls_pcie_ep_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver_probe(ls_pcie_ep_driver, ls_pcie_ep_probe);
diff --git a/drivers/pci/controller/dwc/pci-layerscape.c b/drivers/pci/controller/dwc/pci-layerscape.c
index ce45bde29bf8..a44b5c256d6e 100644
--- a/drivers/pci/controller/dwc/pci-layerscape.c
+++ b/drivers/pci/controller/dwc/pci-layerscape.c
@@ -3,16 +3,18 @@
* PCIe host controller driver for Freescale Layerscape SoCs
*
* Copyright (C) 2014 Freescale Semiconductor.
+ * Copyright 2021 NXP
*
* Author: Minghuan Lian <Minghuan.Lian@freescale.com>
*/
+#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/init.h>
+#include <linux/iopoll.h>
#include <linux/of_pci.h>
#include <linux/of_platform.h>
-#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
@@ -20,37 +22,54 @@
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
+#include "../../pci.h"
#include "pcie-designware.h"
-/* PEX1/2 Misc Ports Status Register */
-#define SCFG_PEXMSCPORTSR(pex_idx) (0x94 + (pex_idx) * 4)
-#define LTSSM_STATE_SHIFT 20
-#define LTSSM_STATE_MASK 0x3f
-#define LTSSM_PCIE_L0 0x11 /* L0 state */
-
/* PEX Internal Configuration Registers */
#define PCIE_STRFMR1 0x71c /* Symbol Timer & Filter Mask Register1 */
#define PCIE_ABSERR 0x8d0 /* Bridge Slave Error Response Register */
#define PCIE_ABSERR_SETTING 0x9401 /* Forward error of non-posted request */
+/* PF Message Command Register */
+#define LS_PCIE_PF_MCR 0x2c
+#define PF_MCR_PTOMR BIT(0)
+#define PF_MCR_EXL2S BIT(1)
+
+/* LS1021A PEXn PM Write Control Register */
+#define SCFG_PEXPMWRCR(idx) (0x5c + (idx) * 0x64)
+#define PMXMTTURNOFF BIT(31)
+#define SCFG_PEXSFTRSTCR 0x190
+#define PEXSR(idx) BIT(idx)
+
+/* LS1043A PEX PME control register */
+#define SCFG_PEXPMECR 0x144
+#define PEXPME(idx) BIT(31 - (idx) * 4)
+
+/* LS1043A PEX LUT debug register */
+#define LS_PCIE_LDBG 0x7fc
+#define LDBG_SR BIT(30)
+#define LDBG_WE BIT(31)
+
#define PCIE_IATU_NUM 6
struct ls_pcie_drvdata {
- u32 lut_offset;
- u32 ltssm_shift;
- u32 lut_dbg;
+ const u32 pf_lut_off;
const struct dw_pcie_host_ops *ops;
- const struct dw_pcie_ops *dw_pcie_ops;
+ int (*exit_from_l2)(struct dw_pcie_rp *pp);
+ bool scfg_support;
+ bool pm_support;
};
struct ls_pcie {
struct dw_pcie *pci;
- void __iomem *lut;
- struct regmap *scfg;
const struct ls_pcie_drvdata *drvdata;
+ void __iomem *pf_lut_base;
+ struct regmap *scfg;
int index;
+ bool big_endian;
};
+#define ls_pcie_pf_lut_readl_addr(addr) ls_pcie_pf_lut_readl(pcie, addr)
#define to_ls_pcie(x) dev_get_drvdata((x)->dev)
static bool ls_pcie_is_bridge(struct ls_pcie *pcie)
@@ -59,7 +78,7 @@ static bool ls_pcie_is_bridge(struct ls_pcie *pcie)
u32 header_type;
header_type = ioread8(pci->dbi_base + PCI_HEADER_TYPE);
- header_type &= 0x7f;
+ header_type &= PCI_HEADER_TYPE_MASK;
return header_type == PCI_HEADER_TYPE_BRIDGE;
}
@@ -83,65 +102,83 @@ static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie)
iowrite32(val, pci->dbi_base + PCIE_STRFMR1);
}
-static void ls_pcie_disable_outbound_atus(struct ls_pcie *pcie)
+/* Forward error response of outbound non-posted requests */
+static void ls_pcie_fix_error_response(struct ls_pcie *pcie)
{
- int i;
+ struct dw_pcie *pci = pcie->pci;
- for (i = 0; i < PCIE_IATU_NUM; i++)
- dw_pcie_disable_atu(pcie->pci, i, DW_PCIE_REGION_OUTBOUND);
+ iowrite32(PCIE_ABSERR_SETTING, pci->dbi_base + PCIE_ABSERR);
}
-static int ls1021_pcie_link_up(struct dw_pcie *pci)
+static u32 ls_pcie_pf_lut_readl(struct ls_pcie *pcie, u32 off)
{
- u32 state;
- struct ls_pcie *pcie = to_ls_pcie(pci);
-
- if (!pcie->scfg)
- return 0;
-
- regmap_read(pcie->scfg, SCFG_PEXMSCPORTSR(pcie->index), &state);
- state = (state >> LTSSM_STATE_SHIFT) & LTSSM_STATE_MASK;
+ if (pcie->big_endian)
+ return ioread32be(pcie->pf_lut_base + off);
- if (state < LTSSM_PCIE_L0)
- return 0;
+ return ioread32(pcie->pf_lut_base + off);
+}
- return 1;
+static void ls_pcie_pf_lut_writel(struct ls_pcie *pcie, u32 off, u32 val)
+{
+ if (pcie->big_endian)
+ iowrite32be(val, pcie->pf_lut_base + off);
+ else
+ iowrite32(val, pcie->pf_lut_base + off);
}
-static int ls_pcie_link_up(struct dw_pcie *pci)
+static void ls_pcie_send_turnoff_msg(struct dw_pcie_rp *pp)
{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct ls_pcie *pcie = to_ls_pcie(pci);
- u32 state;
-
- state = (ioread32(pcie->lut + pcie->drvdata->lut_dbg) >>
- pcie->drvdata->ltssm_shift) &
- LTSSM_STATE_MASK;
+ u32 val;
+ int ret;
- if (state < LTSSM_PCIE_L0)
- return 0;
+ val = ls_pcie_pf_lut_readl(pcie, LS_PCIE_PF_MCR);
+ val |= PF_MCR_PTOMR;
+ ls_pcie_pf_lut_writel(pcie, LS_PCIE_PF_MCR, val);
- return 1;
+ ret = readx_poll_timeout(ls_pcie_pf_lut_readl_addr, LS_PCIE_PF_MCR,
+ val, !(val & PF_MCR_PTOMR),
+ PCIE_PME_TO_L2_TIMEOUT_US/10,
+ PCIE_PME_TO_L2_TIMEOUT_US);
+ if (ret)
+ dev_err(pcie->pci->dev, "PME_Turn_off timeout\n");
}
-/* Forward error response of outbound non-posted requests */
-static void ls_pcie_fix_error_response(struct ls_pcie *pcie)
+static int ls_pcie_exit_from_l2(struct dw_pcie_rp *pp)
{
- struct dw_pcie *pci = pcie->pci;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct ls_pcie *pcie = to_ls_pcie(pci);
+ u32 val;
+ int ret;
- iowrite32(PCIE_ABSERR_SETTING, pci->dbi_base + PCIE_ABSERR);
+ /*
+ * Set PF_MCR_EXL2S bit in LS_PCIE_PF_MCR register for the link
+ * to exit L2 state.
+ */
+ val = ls_pcie_pf_lut_readl(pcie, LS_PCIE_PF_MCR);
+ val |= PF_MCR_EXL2S;
+ ls_pcie_pf_lut_writel(pcie, LS_PCIE_PF_MCR, val);
+
+ /*
+ * L2 exit timeout of 10ms is not defined in the specifications,
+ * it was chosen based on empirical observations.
+ */
+ ret = readx_poll_timeout(ls_pcie_pf_lut_readl_addr, LS_PCIE_PF_MCR,
+ val, !(val & PF_MCR_EXL2S),
+ 1000,
+ 10000);
+ if (ret)
+ dev_err(pcie->pci->dev, "L2 exit timeout\n");
+
+ return ret;
}
-static int ls_pcie_host_init(struct pcie_port *pp)
+static int ls_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct ls_pcie *pcie = to_ls_pcie(pci);
- /*
- * Disable outbound windows configured by the bootloader to avoid
- * one transaction hitting multiple outbound windows.
- * dw_pcie_setup_rc() will reconfigure the outbound windows.
- */
- ls_pcie_disable_outbound_atus(pcie);
ls_pcie_fix_error_response(pcie);
dw_pcie_dbi_ro_wr_en(pci);
@@ -150,152 +187,148 @@ static int ls_pcie_host_init(struct pcie_port *pp)
ls_pcie_drop_msg_tlp(pcie);
- dw_pcie_setup_rc(pp);
+ return 0;
+}
+
+static void scfg_pcie_send_turnoff_msg(struct regmap *scfg, u32 reg, u32 mask)
+{
+ /* Send PME_Turn_Off message */
+ regmap_write_bits(scfg, reg, mask, mask);
+
+ /*
+ * There is no specific register to check for PME_To_Ack from endpoint.
+ * So on the safe side, wait for PCIE_PME_TO_L2_TIMEOUT_US.
+ */
+ mdelay(PCIE_PME_TO_L2_TIMEOUT_US/1000);
+
+ /*
+ * Layerscape hardware reference manual recommends clearing the PMXMTTURNOFF bit
+ * to complete the PME_Turn_Off handshake.
+ */
+ regmap_write_bits(scfg, reg, mask, 0);
+}
+
+static void ls1021a_pcie_send_turnoff_msg(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct ls_pcie *pcie = to_ls_pcie(pci);
+
+ scfg_pcie_send_turnoff_msg(pcie->scfg, SCFG_PEXPMWRCR(pcie->index), PMXMTTURNOFF);
+}
+
+static int scfg_pcie_exit_from_l2(struct regmap *scfg, u32 reg, u32 mask)
+{
+ /* Reset the PEX wrapper to bring the link out of L2 */
+ regmap_write_bits(scfg, reg, mask, mask);
+ regmap_write_bits(scfg, reg, mask, 0);
return 0;
}
-static int ls1021_pcie_host_init(struct pcie_port *pp)
+static int ls1021a_pcie_exit_from_l2(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct ls_pcie *pcie = to_ls_pcie(pci);
- struct device *dev = pci->dev;
- u32 index[2];
- int ret;
- pcie->scfg = syscon_regmap_lookup_by_phandle(dev->of_node,
- "fsl,pcie-scfg");
- if (IS_ERR(pcie->scfg)) {
- ret = PTR_ERR(pcie->scfg);
- dev_err(dev, "No syscfg phandle specified\n");
- pcie->scfg = NULL;
- return ret;
- }
+ return scfg_pcie_exit_from_l2(pcie->scfg, SCFG_PEXSFTRSTCR, PEXSR(pcie->index));
+}
- if (of_property_read_u32_array(dev->of_node,
- "fsl,pcie-scfg", index, 2)) {
- pcie->scfg = NULL;
- return -EINVAL;
- }
- pcie->index = index[1];
+static void ls1043a_pcie_send_turnoff_msg(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct ls_pcie *pcie = to_ls_pcie(pci);
- return ls_pcie_host_init(pp);
+ scfg_pcie_send_turnoff_msg(pcie->scfg, SCFG_PEXPMECR, PEXPME(pcie->index));
}
-static int ls_pcie_msi_host_init(struct pcie_port *pp)
+static int ls1043a_pcie_exit_from_l2(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct device *dev = pci->dev;
- struct device_node *np = dev->of_node;
- struct device_node *msi_node;
+ struct ls_pcie *pcie = to_ls_pcie(pci);
+ u32 val;
/*
- * The MSI domain is set by the generic of_msi_configure(). This
- * .msi_host_init() function keeps us from doing the default MSI
- * domain setup in dw_pcie_host_init() and also enforces the
- * requirement that "msi-parent" exists.
+ * Reset the PEX wrapper to bring the link out of L2.
+ * LDBG_WE: allows the user to have write access to the PEXDBG[SR] for both setting and
+ * clearing the soft reset on the PEX module.
+ * LDBG_SR: When SR is set to 1, the PEX module enters soft reset.
*/
- msi_node = of_parse_phandle(np, "msi-parent", 0);
- if (!msi_node) {
- dev_err(dev, "failed to find msi-parent\n");
- return -EINVAL;
- }
+ val = ls_pcie_pf_lut_readl(pcie, LS_PCIE_LDBG);
+ val |= LDBG_WE;
+ ls_pcie_pf_lut_writel(pcie, LS_PCIE_LDBG, val);
- return 0;
-}
+ val = ls_pcie_pf_lut_readl(pcie, LS_PCIE_LDBG);
+ val |= LDBG_SR;
+ ls_pcie_pf_lut_writel(pcie, LS_PCIE_LDBG, val);
-static const struct dw_pcie_host_ops ls1021_pcie_host_ops = {
- .host_init = ls1021_pcie_host_init,
- .msi_host_init = ls_pcie_msi_host_init,
-};
+ val = ls_pcie_pf_lut_readl(pcie, LS_PCIE_LDBG);
+ val &= ~LDBG_SR;
+ ls_pcie_pf_lut_writel(pcie, LS_PCIE_LDBG, val);
-static const struct dw_pcie_host_ops ls_pcie_host_ops = {
- .host_init = ls_pcie_host_init,
- .msi_host_init = ls_pcie_msi_host_init,
-};
+ val = ls_pcie_pf_lut_readl(pcie, LS_PCIE_LDBG);
+ val &= ~LDBG_WE;
+ ls_pcie_pf_lut_writel(pcie, LS_PCIE_LDBG, val);
-static const struct dw_pcie_ops dw_ls1021_pcie_ops = {
- .link_up = ls1021_pcie_link_up,
-};
+ return 0;
+}
-static const struct dw_pcie_ops dw_ls_pcie_ops = {
- .link_up = ls_pcie_link_up,
+static const struct dw_pcie_host_ops ls_pcie_host_ops = {
+ .init = ls_pcie_host_init,
+ .pme_turn_off = ls_pcie_send_turnoff_msg,
};
-static const struct ls_pcie_drvdata ls1021_drvdata = {
- .ops = &ls1021_pcie_host_ops,
- .dw_pcie_ops = &dw_ls1021_pcie_ops,
+static const struct dw_pcie_host_ops ls1021a_pcie_host_ops = {
+ .init = ls_pcie_host_init,
+ .pme_turn_off = ls1021a_pcie_send_turnoff_msg,
};
-static const struct ls_pcie_drvdata ls1043_drvdata = {
- .lut_offset = 0x10000,
- .ltssm_shift = 24,
- .lut_dbg = 0x7fc,
- .ops = &ls_pcie_host_ops,
- .dw_pcie_ops = &dw_ls_pcie_ops,
+static const struct ls_pcie_drvdata ls1021a_drvdata = {
+ .pm_support = true,
+ .scfg_support = true,
+ .ops = &ls1021a_pcie_host_ops,
+ .exit_from_l2 = ls1021a_pcie_exit_from_l2,
};
-static const struct ls_pcie_drvdata ls1046_drvdata = {
- .lut_offset = 0x80000,
- .ltssm_shift = 24,
- .lut_dbg = 0x407fc,
- .ops = &ls_pcie_host_ops,
- .dw_pcie_ops = &dw_ls_pcie_ops,
+static const struct dw_pcie_host_ops ls1043a_pcie_host_ops = {
+ .init = ls_pcie_host_init,
+ .pme_turn_off = ls1043a_pcie_send_turnoff_msg,
};
-static const struct ls_pcie_drvdata ls2080_drvdata = {
- .lut_offset = 0x80000,
- .ltssm_shift = 0,
- .lut_dbg = 0x7fc,
- .ops = &ls_pcie_host_ops,
- .dw_pcie_ops = &dw_ls_pcie_ops,
+static const struct ls_pcie_drvdata ls1043a_drvdata = {
+ .pf_lut_off = 0x10000,
+ .pm_support = true,
+ .scfg_support = true,
+ .ops = &ls1043a_pcie_host_ops,
+ .exit_from_l2 = ls1043a_pcie_exit_from_l2,
};
-static const struct ls_pcie_drvdata ls2088_drvdata = {
- .lut_offset = 0x80000,
- .ltssm_shift = 0,
- .lut_dbg = 0x407fc,
+static const struct ls_pcie_drvdata layerscape_drvdata = {
+ .pf_lut_off = 0xc0000,
+ .pm_support = true,
.ops = &ls_pcie_host_ops,
- .dw_pcie_ops = &dw_ls_pcie_ops,
+ .exit_from_l2 = ls_pcie_exit_from_l2,
};
static const struct of_device_id ls_pcie_of_match[] = {
- { .compatible = "fsl,ls1012a-pcie", .data = &ls1046_drvdata },
- { .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata },
- { .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata },
- { .compatible = "fsl,ls1046a-pcie", .data = &ls1046_drvdata },
- { .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata },
- { .compatible = "fsl,ls2085a-pcie", .data = &ls2080_drvdata },
- { .compatible = "fsl,ls2088a-pcie", .data = &ls2088_drvdata },
- { .compatible = "fsl,ls1088a-pcie", .data = &ls2088_drvdata },
+ { .compatible = "fsl,ls1012a-pcie", .data = &layerscape_drvdata },
+ { .compatible = "fsl,ls1021a-pcie", .data = &ls1021a_drvdata },
+ { .compatible = "fsl,ls1028a-pcie", .data = &layerscape_drvdata },
+ { .compatible = "fsl,ls1043a-pcie", .data = &ls1043a_drvdata },
+ { .compatible = "fsl,ls1046a-pcie", .data = &layerscape_drvdata },
+ { .compatible = "fsl,ls2080a-pcie", .data = &layerscape_drvdata },
+ { .compatible = "fsl,ls2085a-pcie", .data = &layerscape_drvdata },
+ { .compatible = "fsl,ls2088a-pcie", .data = &layerscape_drvdata },
+ { .compatible = "fsl,ls1088a-pcie", .data = &layerscape_drvdata },
{ },
};
-static int __init ls_add_pcie_port(struct ls_pcie *pcie)
-{
- struct dw_pcie *pci = pcie->pci;
- struct pcie_port *pp = &pci->pp;
- struct device *dev = pci->dev;
- int ret;
-
- pp->ops = pcie->drvdata->ops;
-
- ret = dw_pcie_host_init(pp);
- if (ret) {
- dev_err(dev, "failed to initialize host\n");
- return ret;
- }
-
- return 0;
-}
-
-static int __init ls_pcie_probe(struct platform_device *pdev)
+static int ls_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct dw_pcie *pci;
struct ls_pcie *pcie;
struct resource *dbi_base;
- int ret;
+ u32 index[2];
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
@@ -308,34 +341,75 @@ static int __init ls_pcie_probe(struct platform_device *pdev)
pcie->drvdata = of_device_get_match_data(dev);
pci->dev = dev;
- pci->ops = pcie->drvdata->dw_pcie_ops;
-
pcie->pci = pci;
+ pci->pp.ops = pcie->drvdata->ops;
dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base);
if (IS_ERR(pci->dbi_base))
return PTR_ERR(pci->dbi_base);
- pcie->lut = pci->dbi_base + pcie->drvdata->lut_offset;
+ pcie->big_endian = of_property_read_bool(dev->of_node, "big-endian");
+
+ pcie->pf_lut_base = pci->dbi_base + pcie->drvdata->pf_lut_off;
+
+ if (pcie->drvdata->scfg_support) {
+ pcie->scfg =
+ syscon_regmap_lookup_by_phandle_args(dev->of_node,
+ "fsl,pcie-scfg", 1,
+ index);
+ if (IS_ERR(pcie->scfg)) {
+ dev_err(dev, "No syscfg phandle specified\n");
+ return PTR_ERR(pcie->scfg);
+ }
+
+ pcie->index = index[1];
+ }
if (!ls_pcie_is_bridge(pcie))
return -ENODEV;
platform_set_drvdata(pdev, pcie);
- ret = ls_add_pcie_port(pcie);
- if (ret < 0)
+ return dw_pcie_host_init(&pci->pp);
+}
+
+static int ls_pcie_suspend_noirq(struct device *dev)
+{
+ struct ls_pcie *pcie = dev_get_drvdata(dev);
+
+ if (!pcie->drvdata->pm_support)
+ return 0;
+
+ return dw_pcie_suspend_noirq(pcie->pci);
+}
+
+static int ls_pcie_resume_noirq(struct device *dev)
+{
+ struct ls_pcie *pcie = dev_get_drvdata(dev);
+ int ret;
+
+ if (!pcie->drvdata->pm_support)
+ return 0;
+
+ ret = pcie->drvdata->exit_from_l2(&pcie->pci->pp);
+ if (ret)
return ret;
- return 0;
+ return dw_pcie_resume_noirq(pcie->pci);
}
+static const struct dev_pm_ops ls_pcie_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(ls_pcie_suspend_noirq, ls_pcie_resume_noirq)
+};
+
static struct platform_driver ls_pcie_driver = {
+ .probe = ls_pcie_probe,
.driver = {
.name = "layerscape-pcie",
.of_match_table = ls_pcie_of_match,
.suppress_bind_attrs = true,
+ .pm = &ls_pcie_pm_ops,
},
};
-builtin_platform_driver_probe(ls_pcie_driver, ls_pcie_probe);
+builtin_platform_driver(ls_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c
index e35e9eaa50ee..54b6a4196f17 100644
--- a/drivers/pci/controller/dwc/pci-meson.c
+++ b/drivers/pci/controller/dwc/pci-meson.c
@@ -9,44 +9,20 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
-#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/resource.h>
#include <linux/types.h>
+#include <linux/phy/phy.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
#include "pcie-designware.h"
#define to_meson_pcie(x) dev_get_drvdata((x)->dev)
-/* External local bus interface registers */
-#define PLR_OFFSET 0x700
-#define PCIE_PORT_LINK_CTRL_OFF (PLR_OFFSET + 0x10)
-#define FAST_LINK_MODE BIT(7)
-#define LINK_CAPABLE_MASK GENMASK(21, 16)
-#define LINK_CAPABLE_X1 BIT(16)
-
-#define PCIE_GEN2_CTRL_OFF (PLR_OFFSET + 0x10c)
-#define NUM_OF_LANES_MASK GENMASK(12, 8)
-#define NUM_OF_LANES_X1 BIT(8)
-#define DIRECT_SPEED_CHANGE BIT(17)
-
-#define TYPE1_HDR_OFFSET 0x0
-#define PCIE_STATUS_COMMAND (TYPE1_HDR_OFFSET + 0x04)
-#define PCI_IO_EN BIT(0)
-#define PCI_MEM_SPACE_EN BIT(1)
-#define PCI_BUS_MASTER_EN BIT(2)
-
-#define PCIE_BASE_ADDR0 (TYPE1_HDR_OFFSET + 0x10)
-#define PCIE_BASE_ADDR1 (TYPE1_HDR_OFFSET + 0x14)
-
-#define PCIE_CAP_OFFSET 0x70
-#define PCIE_DEV_CTRL_DEV_STUS (PCIE_CAP_OFFSET + 0x08)
-#define PCIE_CAP_MAX_PAYLOAD_MASK GENMASK(7, 5)
#define PCIE_CAP_MAX_PAYLOAD_SIZE(x) ((x) << 5)
-#define PCIE_CAP_MAX_READ_REQ_MASK GENMASK(14, 12)
#define PCIE_CAP_MAX_READ_REQ_SIZE(x) ((x) << 12)
/* PCIe specific config registers */
@@ -65,7 +41,6 @@
#define PORT_CLK_RATE 100000000UL
#define MAX_PAYLOAD_SIZE 256
#define MAX_READ_REQ_SIZE 256
-#define MESON_PCIE_PHY_POWERUP 0x1c
#define PCIE_RESET_DELAY 500
#define PCIE_SHARED_RESET 1
#define PCIE_NORMAL_RESET 0
@@ -77,31 +52,24 @@ enum pcie_data_rate {
PCIE_GEN4
};
-struct meson_pcie_mem_res {
- void __iomem *elbi_base;
- void __iomem *cfg_base;
- void __iomem *phy_base;
-};
-
struct meson_pcie_clk_res {
struct clk *clk;
- struct clk *mipi_gate;
struct clk *port_clk;
struct clk *general_clk;
};
struct meson_pcie_rc_reset {
- struct reset_control *phy;
struct reset_control *port;
struct reset_control *apb;
};
struct meson_pcie {
struct dw_pcie pci;
- struct meson_pcie_mem_res mem_res;
+ void __iomem *cfg_base;
struct meson_pcie_clk_res clk_res;
struct meson_pcie_rc_reset mrst;
struct gpio_desc *reset_gpio;
+ struct phy *phy;
};
static struct reset_control *meson_pcie_get_reset(struct meson_pcie *mp,
@@ -123,11 +91,6 @@ static int meson_pcie_get_resets(struct meson_pcie *mp)
{
struct meson_pcie_rc_reset *mrst = &mp->mrst;
- mrst->phy = meson_pcie_get_reset(mp, "phy", PCIE_SHARED_RESET);
- if (IS_ERR(mrst->phy))
- return PTR_ERR(mrst->phy);
- reset_control_deassert(mrst->phy);
-
mrst->port = meson_pcie_get_reset(mp, "port", PCIE_NORMAL_RESET);
if (IS_ERR(mrst->port))
return PTR_ERR(mrst->port);
@@ -141,66 +104,65 @@ static int meson_pcie_get_resets(struct meson_pcie *mp)
return 0;
}
-static void __iomem *meson_pcie_get_mem(struct platform_device *pdev,
- struct meson_pcie *mp,
- const char *id)
-{
- struct device *dev = mp->pci.dev;
- struct resource *res;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, id);
-
- return devm_ioremap_resource(dev, res);
-}
-
-static void __iomem *meson_pcie_get_mem_shared(struct platform_device *pdev,
- struct meson_pcie *mp,
- const char *id)
+static int meson_pcie_get_mems(struct platform_device *pdev,
+ struct meson_pcie *mp)
{
- struct device *dev = mp->pci.dev;
+ struct dw_pcie *pci = &mp->pci;
struct resource *res;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, id);
- if (!res) {
- dev_err(dev, "No REG resource %s\n", id);
- return ERR_PTR(-ENXIO);
+ /*
+ * For the broken DTs that supply 'dbi' as 'elbi', parse the 'elbi'
+ * region and assign it to both 'pci->elbi_base' and 'pci->dbi_space' so
+ * that the DWC core can skip parsing both regions.
+ */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
+ if (res) {
+ pci->elbi_base = devm_pci_remap_cfg_resource(pci->dev, res);
+ if (IS_ERR(pci->elbi_base))
+ return PTR_ERR(pci->elbi_base);
+
+ pci->dbi_base = pci->elbi_base;
+ pci->dbi_phys_addr = res->start;
}
- return devm_ioremap(dev, res->start, resource_size(res));
+ mp->cfg_base = devm_platform_ioremap_resource_byname(pdev, "cfg");
+ if (IS_ERR(mp->cfg_base))
+ return PTR_ERR(mp->cfg_base);
+
+ return 0;
}
-static int meson_pcie_get_mems(struct platform_device *pdev,
- struct meson_pcie *mp)
+static int meson_pcie_power_on(struct meson_pcie *mp)
{
- mp->mem_res.elbi_base = meson_pcie_get_mem(pdev, mp, "elbi");
- if (IS_ERR(mp->mem_res.elbi_base))
- return PTR_ERR(mp->mem_res.elbi_base);
+ int ret = 0;
- mp->mem_res.cfg_base = meson_pcie_get_mem(pdev, mp, "cfg");
- if (IS_ERR(mp->mem_res.cfg_base))
- return PTR_ERR(mp->mem_res.cfg_base);
+ ret = phy_init(mp->phy);
+ if (ret)
+ return ret;
- /* Meson SoC has two PCI controllers use same phy register*/
- mp->mem_res.phy_base = meson_pcie_get_mem_shared(pdev, mp, "phy");
- if (IS_ERR(mp->mem_res.phy_base))
- return PTR_ERR(mp->mem_res.phy_base);
+ ret = phy_power_on(mp->phy);
+ if (ret) {
+ phy_exit(mp->phy);
+ return ret;
+ }
return 0;
}
-static void meson_pcie_power_on(struct meson_pcie *mp)
+static void meson_pcie_power_off(struct meson_pcie *mp)
{
- writel(MESON_PCIE_PHY_POWERUP, mp->mem_res.phy_base);
+ phy_power_off(mp->phy);
+ phy_exit(mp->phy);
}
-static void meson_pcie_reset(struct meson_pcie *mp)
+static int meson_pcie_reset(struct meson_pcie *mp)
{
struct meson_pcie_rc_reset *mrst = &mp->mrst;
+ int ret = 0;
- reset_control_assert(mrst->phy);
- udelay(PCIE_RESET_DELAY);
- reset_control_deassert(mrst->phy);
- udelay(PCIE_RESET_DELAY);
+ ret = phy_reset(mp->phy);
+ if (ret)
+ return ret;
reset_control_assert(mrst->port);
reset_control_assert(mrst->apb);
@@ -208,6 +170,15 @@ static void meson_pcie_reset(struct meson_pcie *mp)
reset_control_deassert(mrst->port);
reset_control_deassert(mrst->apb);
udelay(PCIE_RESET_DELAY);
+
+ return 0;
+}
+
+static inline void meson_pcie_disable_clock(void *data)
+{
+ struct clk *clk = data;
+
+ clk_disable_unprepare(clk);
}
static inline struct clk *meson_pcie_probe_clock(struct device *dev,
@@ -234,9 +205,7 @@ static inline struct clk *meson_pcie_probe_clock(struct device *dev,
return ERR_PTR(ret);
}
- devm_add_action_or_reset(dev,
- (void (*) (void *))clk_disable_unprepare,
- clk);
+ devm_add_action_or_reset(dev, meson_pcie_disable_clock, clk);
return clk;
}
@@ -250,74 +219,41 @@ static int meson_pcie_probe_clocks(struct meson_pcie *mp)
if (IS_ERR(res->port_clk))
return PTR_ERR(res->port_clk);
- res->mipi_gate = meson_pcie_probe_clock(dev, "pcie_mipi_en", 0);
- if (IS_ERR(res->mipi_gate))
- return PTR_ERR(res->mipi_gate);
-
- res->general_clk = meson_pcie_probe_clock(dev, "pcie_general", 0);
+ res->general_clk = meson_pcie_probe_clock(dev, "general", 0);
if (IS_ERR(res->general_clk))
return PTR_ERR(res->general_clk);
- res->clk = meson_pcie_probe_clock(dev, "pcie", 0);
+ res->clk = meson_pcie_probe_clock(dev, "pclk", 0);
if (IS_ERR(res->clk))
return PTR_ERR(res->clk);
return 0;
}
-static inline void meson_elb_writel(struct meson_pcie *mp, u32 val, u32 reg)
-{
- writel(val, mp->mem_res.elbi_base + reg);
-}
-
-static inline u32 meson_elb_readl(struct meson_pcie *mp, u32 reg)
-{
- return readl(mp->mem_res.elbi_base + reg);
-}
-
static inline u32 meson_cfg_readl(struct meson_pcie *mp, u32 reg)
{
- return readl(mp->mem_res.cfg_base + reg);
+ return readl(mp->cfg_base + reg);
}
static inline void meson_cfg_writel(struct meson_pcie *mp, u32 val, u32 reg)
{
- writel(val, mp->mem_res.cfg_base + reg);
+ writel(val, mp->cfg_base + reg);
}
static void meson_pcie_assert_reset(struct meson_pcie *mp)
{
- gpiod_set_value_cansleep(mp->reset_gpio, 0);
- udelay(500);
gpiod_set_value_cansleep(mp->reset_gpio, 1);
+ udelay(500);
+ gpiod_set_value_cansleep(mp->reset_gpio, 0);
}
-static void meson_pcie_init_dw(struct meson_pcie *mp)
+static void meson_pcie_ltssm_enable(struct meson_pcie *mp)
{
u32 val;
val = meson_cfg_readl(mp, PCIE_CFG0);
val |= APP_LTSSM_ENABLE;
meson_cfg_writel(mp, val, PCIE_CFG0);
-
- val = meson_elb_readl(mp, PCIE_PORT_LINK_CTRL_OFF);
- val &= ~LINK_CAPABLE_MASK;
- meson_elb_writel(mp, val, PCIE_PORT_LINK_CTRL_OFF);
-
- val = meson_elb_readl(mp, PCIE_PORT_LINK_CTRL_OFF);
- val |= LINK_CAPABLE_X1 | FAST_LINK_MODE;
- meson_elb_writel(mp, val, PCIE_PORT_LINK_CTRL_OFF);
-
- val = meson_elb_readl(mp, PCIE_GEN2_CTRL_OFF);
- val &= ~NUM_OF_LANES_MASK;
- meson_elb_writel(mp, val, PCIE_GEN2_CTRL_OFF);
-
- val = meson_elb_readl(mp, PCIE_GEN2_CTRL_OFF);
- val |= NUM_OF_LANES_X1 | DIRECT_SPEED_CHANGE;
- meson_elb_writel(mp, val, PCIE_GEN2_CTRL_OFF);
-
- meson_elb_writel(mp, 0x0, PCIE_BASE_ADDR0);
- meson_elb_writel(mp, 0x0, PCIE_BASE_ADDR1);
}
static int meson_size_to_payload(struct meson_pcie *mp, int size)
@@ -339,69 +275,52 @@ static int meson_size_to_payload(struct meson_pcie *mp, int size)
static void meson_set_max_payload(struct meson_pcie *mp, int size)
{
+ struct dw_pcie *pci = &mp->pci;
u32 val;
+ u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
int max_payload_size = meson_size_to_payload(mp, size);
- val = meson_elb_readl(mp, PCIE_DEV_CTRL_DEV_STUS);
- val &= ~PCIE_CAP_MAX_PAYLOAD_MASK;
- meson_elb_writel(mp, val, PCIE_DEV_CTRL_DEV_STUS);
+ val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_DEVCTL);
+ val &= ~PCI_EXP_DEVCTL_PAYLOAD;
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_DEVCTL, val);
- val = meson_elb_readl(mp, PCIE_DEV_CTRL_DEV_STUS);
+ val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_DEVCTL);
val |= PCIE_CAP_MAX_PAYLOAD_SIZE(max_payload_size);
- meson_elb_writel(mp, val, PCIE_DEV_CTRL_DEV_STUS);
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_DEVCTL, val);
}
static void meson_set_max_rd_req_size(struct meson_pcie *mp, int size)
{
+ struct dw_pcie *pci = &mp->pci;
u32 val;
+ u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
int max_rd_req_size = meson_size_to_payload(mp, size);
- val = meson_elb_readl(mp, PCIE_DEV_CTRL_DEV_STUS);
- val &= ~PCIE_CAP_MAX_READ_REQ_MASK;
- meson_elb_writel(mp, val, PCIE_DEV_CTRL_DEV_STUS);
+ val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_DEVCTL);
+ val &= ~PCI_EXP_DEVCTL_READRQ;
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_DEVCTL, val);
- val = meson_elb_readl(mp, PCIE_DEV_CTRL_DEV_STUS);
+ val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_DEVCTL);
val |= PCIE_CAP_MAX_READ_REQ_SIZE(max_rd_req_size);
- meson_elb_writel(mp, val, PCIE_DEV_CTRL_DEV_STUS);
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_DEVCTL, val);
}
-static inline void meson_enable_memory_space(struct meson_pcie *mp)
+static int meson_pcie_start_link(struct dw_pcie *pci)
{
- /* Set the RC Bus Master, Memory Space and I/O Space enables */
- meson_elb_writel(mp, PCI_IO_EN | PCI_MEM_SPACE_EN | PCI_BUS_MASTER_EN,
- PCIE_STATUS_COMMAND);
-}
-
-static int meson_pcie_establish_link(struct meson_pcie *mp)
-{
- struct dw_pcie *pci = &mp->pci;
- struct pcie_port *pp = &pci->pp;
-
- meson_pcie_init_dw(mp);
- meson_set_max_payload(mp, MAX_PAYLOAD_SIZE);
- meson_set_max_rd_req_size(mp, MAX_READ_REQ_SIZE);
-
- dw_pcie_setup_rc(pp);
- meson_enable_memory_space(mp);
+ struct meson_pcie *mp = to_meson_pcie(pci);
+ meson_pcie_ltssm_enable(mp);
meson_pcie_assert_reset(mp);
- return dw_pcie_wait_for_link(pci);
-}
-
-static void meson_pcie_enable_interrupts(struct meson_pcie *mp)
-{
- if (IS_ENABLED(CONFIG_PCI_MSI))
- dw_pcie_msi_init(&mp->pci.pp);
+ return 0;
}
-static int meson_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
- u32 *val)
+static int meson_pcie_rd_own_conf(struct pci_bus *bus, u32 devfn,
+ int where, int size, u32 *val)
{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
int ret;
- ret = dw_pcie_read(pci->dbi_base + where, size, val);
+ ret = pci_generic_config_read(bus, devfn, where, size, val);
if (ret != PCIBIOS_SUCCESSFUL)
return ret;
@@ -410,27 +329,25 @@ static int meson_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
* cannot program the PCI_CLASS_DEVICE register, so we must fabricate
* the return value in the config accessors.
*/
- if (where == PCI_CLASS_REVISION && size == 4)
- *val = (PCI_CLASS_BRIDGE_PCI << 16) | (*val & 0xffff);
- else if (where == PCI_CLASS_DEVICE && size == 2)
- *val = PCI_CLASS_BRIDGE_PCI;
- else if (where == PCI_CLASS_DEVICE && size == 1)
- *val = PCI_CLASS_BRIDGE_PCI & 0xff;
- else if (where == PCI_CLASS_DEVICE + 1 && size == 1)
- *val = (PCI_CLASS_BRIDGE_PCI >> 8) & 0xff;
+ if ((where & ~3) == PCI_CLASS_REVISION) {
+ if (size <= 2)
+ *val = (*val & ((1 << (size * 8)) - 1)) << (8 * (where & 3));
+ *val &= ~0xffffff00;
+ *val |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8;
+ if (size <= 2)
+ *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
+ }
return PCIBIOS_SUCCESSFUL;
}
-static int meson_pcie_wr_own_conf(struct pcie_port *pp, int where,
- int size, u32 val)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
-
- return dw_pcie_write(pci->dbi_base + where, size, val);
-}
+static struct pci_ops meson_pci_ops = {
+ .map_bus = dw_pcie_own_conf_map_bus,
+ .read = meson_pcie_rd_own_conf,
+ .write = pci_generic_config_write,
+};
-static int meson_pcie_link_up(struct dw_pcie *pci)
+static bool meson_pcie_link_up(struct dw_pcie *pci)
{
struct meson_pcie *mp = to_meson_pcie(pci);
struct device *dev = pci->dev;
@@ -458,7 +375,7 @@ static int meson_pcie_link_up(struct dw_pcie *pci)
dev_dbg(dev, "speed_okay\n");
if (smlh_up && rdlh_up && ltssm_up && speed_okay)
- return 1;
+ return true;
cnt++;
@@ -466,60 +383,29 @@ static int meson_pcie_link_up(struct dw_pcie *pci)
} while (cnt < WAIT_LINKUP_TIMEOUT);
dev_err(dev, "error: wait linkup timeout\n");
- return 0;
+ return false;
}
-static int meson_pcie_host_init(struct pcie_port *pp)
+static int meson_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct meson_pcie *mp = to_meson_pcie(pci);
- int ret;
- ret = meson_pcie_establish_link(mp);
- if (ret)
- return ret;
+ pp->bridge->ops = &meson_pci_ops;
- meson_pcie_enable_interrupts(mp);
+ meson_set_max_payload(mp, MAX_PAYLOAD_SIZE);
+ meson_set_max_rd_req_size(mp, MAX_READ_REQ_SIZE);
return 0;
}
static const struct dw_pcie_host_ops meson_pcie_host_ops = {
- .rd_own_conf = meson_pcie_rd_own_conf,
- .wr_own_conf = meson_pcie_wr_own_conf,
- .host_init = meson_pcie_host_init,
+ .init = meson_pcie_host_init,
};
-static int meson_add_pcie_port(struct meson_pcie *mp,
- struct platform_device *pdev)
-{
- struct dw_pcie *pci = &mp->pci;
- struct pcie_port *pp = &pci->pp;
- struct device *dev = &pdev->dev;
- int ret;
-
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- pp->msi_irq = platform_get_irq(pdev, 0);
- if (pp->msi_irq < 0) {
- dev_err(dev, "failed to get MSI IRQ\n");
- return pp->msi_irq;
- }
- }
-
- pp->ops = &meson_pcie_host_ops;
- pci->dbi_base = mp->mem_res.elbi_base;
-
- ret = dw_pcie_host_init(pp);
- if (ret) {
- dev_err(dev, "failed to initialize host\n");
- return ret;
- }
-
- return 0;
-}
-
static const struct dw_pcie_ops dw_pcie_ops = {
.link_up = meson_pcie_link_up,
+ .start_link = meson_pcie_start_link,
};
static int meson_pcie_probe(struct platform_device *pdev)
@@ -536,6 +422,14 @@ static int meson_pcie_probe(struct platform_device *pdev)
pci = &mp->pci;
pci->dev = dev;
pci->ops = &dw_pcie_ops;
+ pci->pp.ops = &meson_pcie_host_ops;
+ pci->num_lanes = 1;
+
+ mp->phy = devm_phy_get(dev, "pcie");
+ if (IS_ERR(mp->phy)) {
+ dev_err(dev, "get phy failed, %ld\n", PTR_ERR(mp->phy));
+ return PTR_ERR(mp->phy);
+ }
mp->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(mp->reset_gpio)) {
@@ -555,32 +449,49 @@ static int meson_pcie_probe(struct platform_device *pdev)
return ret;
}
- meson_pcie_power_on(mp);
- meson_pcie_reset(mp);
+ ret = meson_pcie_power_on(mp);
+ if (ret) {
+ dev_err(dev, "phy power on failed, %d\n", ret);
+ return ret;
+ }
+
+ ret = meson_pcie_reset(mp);
+ if (ret) {
+ dev_err(dev, "reset failed, %d\n", ret);
+ goto err_phy;
+ }
ret = meson_pcie_probe_clocks(mp);
if (ret) {
dev_err(dev, "init clock resources failed, %d\n", ret);
- return ret;
+ goto err_phy;
}
platform_set_drvdata(pdev, mp);
- ret = meson_add_pcie_port(mp, pdev);
+ ret = dw_pcie_host_init(&pci->pp);
if (ret < 0) {
dev_err(dev, "Add PCIe port failed, %d\n", ret);
- return ret;
+ goto err_phy;
}
return 0;
+
+err_phy:
+ meson_pcie_power_off(mp);
+ return ret;
}
static const struct of_device_id meson_pcie_of_match[] = {
{
.compatible = "amlogic,axg-pcie",
},
+ {
+ .compatible = "amlogic,g12a-pcie",
+ },
{},
};
+MODULE_DEVICE_TABLE(of, meson_pcie_of_match);
static struct platform_driver meson_pcie_driver = {
.probe = meson_pcie_probe,
@@ -590,4 +501,8 @@ static struct platform_driver meson_pcie_driver = {
},
};
-builtin_platform_driver(meson_pcie_driver);
+module_platform_driver(meson_pcie_driver);
+
+MODULE_AUTHOR("Yue Wang <yue.wang@amlogic.com>");
+MODULE_DESCRIPTION("Amlogic PCIe Controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/dwc/pcie-al.c b/drivers/pci/controller/dwc/pcie-al.c
new file mode 100644
index 000000000000..345c281c74fe
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-al.c
@@ -0,0 +1,391 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Amazon's Annapurna Labs IP (used in chips
+ * such as Graviton and Alpine)
+ *
+ * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Author: Jonathan Chocron <jonnyc@amazon.com>
+ */
+
+#include <linux/pci.h>
+#include <linux/pci-ecam.h>
+#include <linux/pci-acpi.h>
+#include "../../pci.h"
+
+#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
+
+struct al_pcie_acpi {
+ void __iomem *dbi_base;
+};
+
+static void __iomem *al_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int where)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ struct al_pcie_acpi *pcie = cfg->priv;
+ void __iomem *dbi_base = pcie->dbi_base;
+
+ if (bus->number == cfg->busr.start) {
+ /*
+ * The DW PCIe core doesn't filter out transactions to other
+ * devices/functions on the root bus num, so we do this here.
+ */
+ if (PCI_SLOT(devfn) > 0)
+ return NULL;
+ else
+ return dbi_base + where;
+ }
+
+ return pci_ecam_map_bus(bus, devfn, where);
+}
+
+static int al_pcie_init(struct pci_config_window *cfg)
+{
+ struct device *dev = cfg->parent;
+ struct acpi_device *adev = to_acpi_device(dev);
+ struct acpi_pci_root *root = acpi_driver_data(adev);
+ struct al_pcie_acpi *al_pcie;
+ struct resource *res;
+ int ret;
+
+ al_pcie = devm_kzalloc(dev, sizeof(*al_pcie), GFP_KERNEL);
+ if (!al_pcie)
+ return -ENOMEM;
+
+ res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+
+ ret = acpi_get_rc_resources(dev, "AMZN0001", root->segment, res);
+ if (ret) {
+ dev_err(dev, "can't get rc dbi base address for SEG %d\n",
+ root->segment);
+ return ret;
+ }
+
+ dev_dbg(dev, "Root port dbi res: %pR\n", res);
+
+ al_pcie->dbi_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(al_pcie->dbi_base))
+ return PTR_ERR(al_pcie->dbi_base);
+
+ cfg->priv = al_pcie;
+
+ return 0;
+}
+
+const struct pci_ecam_ops al_pcie_ops = {
+ .init = al_pcie_init,
+ .pci_ops = {
+ .map_bus = al_pcie_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ }
+};
+
+#endif /* defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) */
+
+#ifdef CONFIG_PCIE_AL
+
+#include <linux/of_pci.h>
+#include "pcie-designware.h"
+
+#define AL_PCIE_REV_ID_2 2
+#define AL_PCIE_REV_ID_3 3
+#define AL_PCIE_REV_ID_4 4
+
+#define AXI_BASE_OFFSET 0x0
+
+#define DEVICE_ID_OFFSET 0x16c
+
+#define DEVICE_REV_ID 0x0
+#define DEVICE_REV_ID_DEV_ID_MASK GENMASK(31, 16)
+
+#define DEVICE_REV_ID_DEV_ID_X4 0
+#define DEVICE_REV_ID_DEV_ID_X8 2
+#define DEVICE_REV_ID_DEV_ID_X16 4
+
+#define OB_CTRL_REV1_2_OFFSET 0x0040
+#define OB_CTRL_REV3_5_OFFSET 0x0030
+
+#define CFG_TARGET_BUS 0x0
+#define CFG_TARGET_BUS_MASK_MASK GENMASK(7, 0)
+#define CFG_TARGET_BUS_BUSNUM_MASK GENMASK(15, 8)
+
+#define CFG_CONTROL 0x4
+#define CFG_CONTROL_SUBBUS_MASK GENMASK(15, 8)
+#define CFG_CONTROL_SEC_BUS_MASK GENMASK(23, 16)
+
+struct al_pcie_reg_offsets {
+ unsigned int ob_ctrl;
+};
+
+struct al_pcie_target_bus_cfg {
+ u8 reg_val;
+ u8 reg_mask;
+ u8 ecam_mask;
+};
+
+struct al_pcie {
+ struct dw_pcie *pci;
+ void __iomem *controller_base; /* base of PCIe unit (not DW core) */
+ struct device *dev;
+ resource_size_t ecam_size;
+ unsigned int controller_rev_id;
+ struct al_pcie_reg_offsets reg_offsets;
+ struct al_pcie_target_bus_cfg target_bus_cfg;
+};
+
+#define to_al_pcie(x) dev_get_drvdata((x)->dev)
+
+static inline u32 al_pcie_controller_readl(struct al_pcie *pcie, u32 offset)
+{
+ return readl_relaxed(pcie->controller_base + offset);
+}
+
+static inline void al_pcie_controller_writel(struct al_pcie *pcie, u32 offset,
+ u32 val)
+{
+ writel_relaxed(val, pcie->controller_base + offset);
+}
+
+static int al_pcie_rev_id_get(struct al_pcie *pcie, unsigned int *rev_id)
+{
+ u32 dev_rev_id_val;
+ u32 dev_id_val;
+
+ dev_rev_id_val = al_pcie_controller_readl(pcie, AXI_BASE_OFFSET +
+ DEVICE_ID_OFFSET +
+ DEVICE_REV_ID);
+ dev_id_val = FIELD_GET(DEVICE_REV_ID_DEV_ID_MASK, dev_rev_id_val);
+
+ switch (dev_id_val) {
+ case DEVICE_REV_ID_DEV_ID_X4:
+ *rev_id = AL_PCIE_REV_ID_2;
+ break;
+ case DEVICE_REV_ID_DEV_ID_X8:
+ *rev_id = AL_PCIE_REV_ID_3;
+ break;
+ case DEVICE_REV_ID_DEV_ID_X16:
+ *rev_id = AL_PCIE_REV_ID_4;
+ break;
+ default:
+ dev_err(pcie->dev, "Unsupported dev_id_val (0x%x)\n",
+ dev_id_val);
+ return -EINVAL;
+ }
+
+ dev_dbg(pcie->dev, "dev_id_val: 0x%x\n", dev_id_val);
+
+ return 0;
+}
+
+static int al_pcie_reg_offsets_set(struct al_pcie *pcie)
+{
+ switch (pcie->controller_rev_id) {
+ case AL_PCIE_REV_ID_2:
+ pcie->reg_offsets.ob_ctrl = OB_CTRL_REV1_2_OFFSET;
+ break;
+ case AL_PCIE_REV_ID_3:
+ case AL_PCIE_REV_ID_4:
+ pcie->reg_offsets.ob_ctrl = OB_CTRL_REV3_5_OFFSET;
+ break;
+ default:
+ dev_err(pcie->dev, "Unsupported controller rev_id: 0x%x\n",
+ pcie->controller_rev_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline void al_pcie_target_bus_set(struct al_pcie *pcie,
+ u8 target_bus,
+ u8 mask_target_bus)
+{
+ u32 reg;
+
+ reg = FIELD_PREP(CFG_TARGET_BUS_MASK_MASK, mask_target_bus) |
+ FIELD_PREP(CFG_TARGET_BUS_BUSNUM_MASK, target_bus);
+
+ al_pcie_controller_writel(pcie, AXI_BASE_OFFSET +
+ pcie->reg_offsets.ob_ctrl + CFG_TARGET_BUS,
+ reg);
+}
+
+static void __iomem *al_pcie_conf_addr_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
+{
+ struct dw_pcie_rp *pp = bus->sysdata;
+ struct al_pcie *pcie = to_al_pcie(to_dw_pcie_from_pp(pp));
+ unsigned int busnr = bus->number;
+ struct al_pcie_target_bus_cfg *target_bus_cfg = &pcie->target_bus_cfg;
+ unsigned int busnr_ecam = busnr & target_bus_cfg->ecam_mask;
+ unsigned int busnr_reg = busnr & target_bus_cfg->reg_mask;
+
+ if (busnr_reg != target_bus_cfg->reg_val) {
+ dev_dbg(pcie->pci->dev, "Changing target bus busnum val from 0x%x to 0x%x\n",
+ target_bus_cfg->reg_val, busnr_reg);
+ target_bus_cfg->reg_val = busnr_reg;
+ al_pcie_target_bus_set(pcie,
+ target_bus_cfg->reg_val,
+ target_bus_cfg->reg_mask);
+ }
+
+ return pp->va_cfg0_base + PCIE_ECAM_OFFSET(busnr_ecam, devfn, where);
+}
+
+static struct pci_ops al_child_pci_ops = {
+ .map_bus = al_pcie_conf_addr_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+};
+
+static int al_pcie_config_prepare(struct al_pcie *pcie)
+{
+ struct al_pcie_target_bus_cfg *target_bus_cfg;
+ struct dw_pcie_rp *pp = &pcie->pci->pp;
+ unsigned int ecam_bus_mask;
+ struct resource_entry *ft;
+ u32 cfg_control_offset;
+ struct resource *bus;
+ u8 subordinate_bus;
+ u8 secondary_bus;
+ u32 cfg_control;
+ u32 reg;
+
+ ft = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS);
+ if (!ft)
+ return -ENODEV;
+
+ bus = ft->res;
+ target_bus_cfg = &pcie->target_bus_cfg;
+
+ ecam_bus_mask = (pcie->ecam_size >> PCIE_ECAM_BUS_SHIFT) - 1;
+ if (ecam_bus_mask > 255) {
+ dev_warn(pcie->dev, "ECAM window size is larger than 256MB. Cutting off at 256\n");
+ ecam_bus_mask = 255;
+ }
+
+ /* This portion is taken from the transaction address */
+ target_bus_cfg->ecam_mask = ecam_bus_mask;
+ /* This portion is taken from the cfg_target_bus reg */
+ target_bus_cfg->reg_mask = ~target_bus_cfg->ecam_mask;
+ target_bus_cfg->reg_val = bus->start & target_bus_cfg->reg_mask;
+
+ al_pcie_target_bus_set(pcie, target_bus_cfg->reg_val,
+ target_bus_cfg->reg_mask);
+
+ secondary_bus = bus->start + 1;
+ subordinate_bus = bus->end;
+
+ /* Set the valid values of secondary and subordinate buses */
+ cfg_control_offset = AXI_BASE_OFFSET + pcie->reg_offsets.ob_ctrl +
+ CFG_CONTROL;
+
+ cfg_control = al_pcie_controller_readl(pcie, cfg_control_offset);
+
+ reg = cfg_control &
+ ~(CFG_CONTROL_SEC_BUS_MASK | CFG_CONTROL_SUBBUS_MASK);
+
+ reg |= FIELD_PREP(CFG_CONTROL_SUBBUS_MASK, subordinate_bus) |
+ FIELD_PREP(CFG_CONTROL_SEC_BUS_MASK, secondary_bus);
+
+ al_pcie_controller_writel(pcie, cfg_control_offset, reg);
+
+ return 0;
+}
+
+static int al_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct al_pcie *pcie = to_al_pcie(pci);
+ int rc;
+
+ pp->bridge->child_ops = &al_child_pci_ops;
+
+ rc = al_pcie_rev_id_get(pcie, &pcie->controller_rev_id);
+ if (rc)
+ return rc;
+
+ rc = al_pcie_reg_offsets_set(pcie);
+ if (rc)
+ return rc;
+
+ rc = al_pcie_config_prepare(pcie);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static const struct dw_pcie_host_ops al_pcie_host_ops = {
+ .init = al_pcie_host_init,
+};
+
+static int al_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *controller_res;
+ struct resource *ecam_res;
+ struct al_pcie *al_pcie;
+ struct dw_pcie *pci;
+
+ al_pcie = devm_kzalloc(dev, sizeof(*al_pcie), GFP_KERNEL);
+ if (!al_pcie)
+ return -ENOMEM;
+
+ pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+ if (!pci)
+ return -ENOMEM;
+
+ pci->dev = dev;
+ pci->pp.ops = &al_pcie_host_ops;
+
+ al_pcie->pci = pci;
+ al_pcie->dev = dev;
+
+ ecam_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
+ if (!ecam_res) {
+ dev_err(dev, "couldn't find 'config' reg in DT\n");
+ return -ENOENT;
+ }
+ al_pcie->ecam_size = resource_size(ecam_res);
+ pci->pp.native_ecam = true;
+
+ controller_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "controller");
+ al_pcie->controller_base = devm_ioremap_resource(dev, controller_res);
+ if (IS_ERR(al_pcie->controller_base)) {
+ dev_err(dev, "couldn't remap controller base %pR\n",
+ controller_res);
+ return PTR_ERR(al_pcie->controller_base);
+ }
+
+ dev_dbg(dev, "From DT: controller_base: %pR\n", controller_res);
+
+ platform_set_drvdata(pdev, al_pcie);
+
+ return dw_pcie_host_init(&pci->pp);
+}
+
+static const struct of_device_id al_pcie_of_match[] = {
+ { .compatible = "amazon,al-alpine-v2-pcie",
+ },
+ { .compatible = "amazon,al-alpine-v3-pcie",
+ },
+ {},
+};
+
+static struct platform_driver al_pcie_driver = {
+ .driver = {
+ .name = "al-pcie",
+ .of_match_table = al_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = al_pcie_probe,
+};
+builtin_platform_driver(al_pcie_driver);
+
+#endif /* CONFIG_PCIE_AL*/
diff --git a/drivers/pci/controller/dwc/pcie-amd-mdb.c b/drivers/pci/controller/dwc/pcie-amd-mdb.c
new file mode 100644
index 000000000000..3c6e837465bb
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-amd-mdb.c
@@ -0,0 +1,526 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for AMD MDB PCIe Bridge
+ *
+ * Copyright (C) 2024-2025, Advanced Micro Devices, Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of_device.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+
+#include "../../pci.h"
+#include "pcie-designware.h"
+
+#define AMD_MDB_TLP_IR_STATUS_MISC 0x4C0
+#define AMD_MDB_TLP_IR_MASK_MISC 0x4C4
+#define AMD_MDB_TLP_IR_ENABLE_MISC 0x4C8
+#define AMD_MDB_TLP_IR_DISABLE_MISC 0x4CC
+
+#define AMD_MDB_TLP_PCIE_INTX_MASK GENMASK(23, 16)
+
+#define AMD_MDB_PCIE_INTR_INTX_ASSERT(x) BIT((x) * 2)
+
+/* Interrupt registers definitions. */
+#define AMD_MDB_PCIE_INTR_CMPL_TIMEOUT 15
+#define AMD_MDB_PCIE_INTR_INTX 16
+#define AMD_MDB_PCIE_INTR_PM_PME_RCVD 24
+#define AMD_MDB_PCIE_INTR_PME_TO_ACK_RCVD 25
+#define AMD_MDB_PCIE_INTR_MISC_CORRECTABLE 26
+#define AMD_MDB_PCIE_INTR_NONFATAL 27
+#define AMD_MDB_PCIE_INTR_FATAL 28
+
+#define IMR(x) BIT(AMD_MDB_PCIE_INTR_ ##x)
+#define AMD_MDB_PCIE_IMR_ALL_MASK \
+ ( \
+ IMR(CMPL_TIMEOUT) | \
+ IMR(PM_PME_RCVD) | \
+ IMR(PME_TO_ACK_RCVD) | \
+ IMR(MISC_CORRECTABLE) | \
+ IMR(NONFATAL) | \
+ IMR(FATAL) | \
+ AMD_MDB_TLP_PCIE_INTX_MASK \
+ )
+
+/**
+ * struct amd_mdb_pcie - PCIe port information
+ * @pci: DesignWare PCIe controller structure
+ * @slcr: MDB System Level Control and Status Register (SLCR) base
+ * @intx_domain: INTx IRQ domain pointer
+ * @mdb_domain: MDB IRQ domain pointer
+ * @perst_gpio: GPIO descriptor for PERST# signal handling
+ * @intx_irq: INTx IRQ interrupt number
+ */
+struct amd_mdb_pcie {
+ struct dw_pcie pci;
+ void __iomem *slcr;
+ struct irq_domain *intx_domain;
+ struct irq_domain *mdb_domain;
+ struct gpio_desc *perst_gpio;
+ int intx_irq;
+};
+
+static const struct dw_pcie_host_ops amd_mdb_pcie_host_ops = {
+};
+
+static void amd_mdb_intx_irq_mask(struct irq_data *data)
+{
+ struct amd_mdb_pcie *pcie = irq_data_get_irq_chip_data(data);
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *port = &pci->pp;
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&port->lock, flags);
+ val = FIELD_PREP(AMD_MDB_TLP_PCIE_INTX_MASK,
+ AMD_MDB_PCIE_INTR_INTX_ASSERT(data->hwirq));
+
+ /*
+ * Writing '1' to a bit in AMD_MDB_TLP_IR_DISABLE_MISC disables that
+ * interrupt, writing '0' has no effect.
+ */
+ writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_DISABLE_MISC);
+ raw_spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void amd_mdb_intx_irq_unmask(struct irq_data *data)
+{
+ struct amd_mdb_pcie *pcie = irq_data_get_irq_chip_data(data);
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *port = &pci->pp;
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&port->lock, flags);
+ val = FIELD_PREP(AMD_MDB_TLP_PCIE_INTX_MASK,
+ AMD_MDB_PCIE_INTR_INTX_ASSERT(data->hwirq));
+
+ /*
+ * Writing '1' to a bit in AMD_MDB_TLP_IR_ENABLE_MISC enables that
+ * interrupt, writing '0' has no effect.
+ */
+ writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_ENABLE_MISC);
+ raw_spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static struct irq_chip amd_mdb_intx_irq_chip = {
+ .name = "AMD MDB INTx",
+ .irq_mask = amd_mdb_intx_irq_mask,
+ .irq_unmask = amd_mdb_intx_irq_unmask,
+};
+
+/**
+ * amd_mdb_pcie_intx_map - Set the handler for the INTx and mark IRQ as valid
+ * @domain: IRQ domain
+ * @irq: Virtual IRQ number
+ * @hwirq: Hardware interrupt number
+ *
+ * Return: Always returns '0'.
+ */
+static int amd_mdb_pcie_intx_map(struct irq_domain *domain,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &amd_mdb_intx_irq_chip,
+ handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ irq_set_status_flags(irq, IRQ_LEVEL);
+
+ return 0;
+}
+
+/* INTx IRQ domain operations. */
+static const struct irq_domain_ops amd_intx_domain_ops = {
+ .map = amd_mdb_pcie_intx_map,
+};
+
+static irqreturn_t dw_pcie_rp_intx(int irq, void *args)
+{
+ struct amd_mdb_pcie *pcie = args;
+ unsigned long val;
+ int i, int_status;
+
+ val = readl_relaxed(pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC);
+ int_status = FIELD_GET(AMD_MDB_TLP_PCIE_INTX_MASK, val);
+
+ for (i = 0; i < PCI_NUM_INTX; i++) {
+ if (int_status & AMD_MDB_PCIE_INTR_INTX_ASSERT(i))
+ generic_handle_domain_irq(pcie->intx_domain, i);
+ }
+
+ return IRQ_HANDLED;
+}
+
+#define _IC(x, s)[AMD_MDB_PCIE_INTR_ ## x] = { __stringify(x), s }
+
+static const struct {
+ const char *sym;
+ const char *str;
+} intr_cause[32] = {
+ _IC(CMPL_TIMEOUT, "Completion timeout"),
+ _IC(PM_PME_RCVD, "PM_PME message received"),
+ _IC(PME_TO_ACK_RCVD, "PME_TO_ACK message received"),
+ _IC(MISC_CORRECTABLE, "Correctable error message"),
+ _IC(NONFATAL, "Non fatal error message"),
+ _IC(FATAL, "Fatal error message"),
+};
+
+static void amd_mdb_event_irq_mask(struct irq_data *d)
+{
+ struct amd_mdb_pcie *pcie = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *port = &pci->pp;
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&port->lock, flags);
+ val = BIT(d->hwirq);
+ writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_DISABLE_MISC);
+ raw_spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void amd_mdb_event_irq_unmask(struct irq_data *d)
+{
+ struct amd_mdb_pcie *pcie = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *port = &pci->pp;
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&port->lock, flags);
+ val = BIT(d->hwirq);
+ writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_ENABLE_MISC);
+ raw_spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static struct irq_chip amd_mdb_event_irq_chip = {
+ .name = "AMD MDB RC-Event",
+ .irq_mask = amd_mdb_event_irq_mask,
+ .irq_unmask = amd_mdb_event_irq_unmask,
+};
+
+static int amd_mdb_pcie_event_map(struct irq_domain *domain,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &amd_mdb_event_irq_chip,
+ handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ irq_set_status_flags(irq, IRQ_LEVEL);
+
+ return 0;
+}
+
+static const struct irq_domain_ops event_domain_ops = {
+ .map = amd_mdb_pcie_event_map,
+};
+
+static irqreturn_t amd_mdb_pcie_event(int irq, void *args)
+{
+ struct amd_mdb_pcie *pcie = args;
+ unsigned long val;
+ int i;
+
+ val = readl_relaxed(pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC);
+ val &= ~readl_relaxed(pcie->slcr + AMD_MDB_TLP_IR_MASK_MISC);
+ for_each_set_bit(i, &val, 32)
+ generic_handle_domain_irq(pcie->mdb_domain, i);
+ writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC);
+
+ return IRQ_HANDLED;
+}
+
+static void amd_mdb_pcie_free_irq_domains(struct amd_mdb_pcie *pcie)
+{
+ if (pcie->intx_domain) {
+ irq_domain_remove(pcie->intx_domain);
+ pcie->intx_domain = NULL;
+ }
+
+ if (pcie->mdb_domain) {
+ irq_domain_remove(pcie->mdb_domain);
+ pcie->mdb_domain = NULL;
+ }
+}
+
+static int amd_mdb_pcie_init_port(struct amd_mdb_pcie *pcie)
+{
+ unsigned long val;
+
+ /* Disable all TLP interrupts. */
+ writel_relaxed(AMD_MDB_PCIE_IMR_ALL_MASK,
+ pcie->slcr + AMD_MDB_TLP_IR_DISABLE_MISC);
+
+ /* Clear pending TLP interrupts. */
+ val = readl_relaxed(pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC);
+ val &= AMD_MDB_PCIE_IMR_ALL_MASK;
+ writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC);
+
+ /* Enable all TLP interrupts. */
+ writel_relaxed(AMD_MDB_PCIE_IMR_ALL_MASK,
+ pcie->slcr + AMD_MDB_TLP_IR_ENABLE_MISC);
+
+ return 0;
+}
+
+/**
+ * amd_mdb_pcie_init_irq_domains - Initialize IRQ domain
+ * @pcie: PCIe port information
+ * @pdev: Platform device
+ *
+ * Return: Returns '0' on success and error value on failure.
+ */
+static int amd_mdb_pcie_init_irq_domains(struct amd_mdb_pcie *pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct device_node *pcie_intc_node;
+ int err;
+
+ pcie_intc_node = of_get_child_by_name(node, "interrupt-controller");
+ if (!pcie_intc_node) {
+ dev_err(dev, "No PCIe Intc node found\n");
+ return -ENODEV;
+ }
+
+ pcie->mdb_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), 32,
+ &event_domain_ops, pcie);
+ if (!pcie->mdb_domain) {
+ err = -ENOMEM;
+ dev_err(dev, "Failed to add MDB domain\n");
+ goto out;
+ }
+
+ irq_domain_update_bus_token(pcie->mdb_domain, DOMAIN_BUS_NEXUS);
+
+ pcie->intx_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node),
+ PCI_NUM_INTX, &amd_intx_domain_ops, pcie);
+ if (!pcie->intx_domain) {
+ err = -ENOMEM;
+ dev_err(dev, "Failed to add INTx domain\n");
+ goto mdb_out;
+ }
+
+ of_node_put(pcie_intc_node);
+ irq_domain_update_bus_token(pcie->intx_domain, DOMAIN_BUS_WIRED);
+
+ raw_spin_lock_init(&pp->lock);
+
+ return 0;
+mdb_out:
+ amd_mdb_pcie_free_irq_domains(pcie);
+out:
+ of_node_put(pcie_intc_node);
+ return err;
+}
+
+static irqreturn_t amd_mdb_pcie_intr_handler(int irq, void *args)
+{
+ struct amd_mdb_pcie *pcie = args;
+ struct device *dev;
+ struct irq_data *d;
+
+ dev = pcie->pci.dev;
+
+ /*
+ * In the future, error reporting will be hooked to the AER subsystem.
+ * Currently, the driver prints a warning message to the user.
+ */
+ d = irq_domain_get_irq_data(pcie->mdb_domain, irq);
+ if (intr_cause[d->hwirq].str)
+ dev_warn(dev, "%s\n", intr_cause[d->hwirq].str);
+ else
+ dev_warn_once(dev, "Unknown IRQ %ld\n", d->hwirq);
+
+ return IRQ_HANDLED;
+}
+
+static int amd_mdb_setup_irq(struct amd_mdb_pcie *pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct device *dev = &pdev->dev;
+ int i, irq, err;
+
+ amd_mdb_pcie_init_port(pcie);
+
+ pp->irq = platform_get_irq(pdev, 0);
+ if (pp->irq < 0)
+ return pp->irq;
+
+ for (i = 0; i < ARRAY_SIZE(intr_cause); i++) {
+ if (!intr_cause[i].str)
+ continue;
+
+ irq = irq_create_mapping(pcie->mdb_domain, i);
+ if (!irq) {
+ dev_err(dev, "Failed to map MDB domain interrupt\n");
+ return -ENOMEM;
+ }
+
+ err = devm_request_irq(dev, irq, amd_mdb_pcie_intr_handler,
+ IRQF_NO_THREAD, intr_cause[i].sym, pcie);
+ if (err) {
+ dev_err(dev, "Failed to request IRQ %d, err=%d\n",
+ irq, err);
+ return err;
+ }
+ }
+
+ pcie->intx_irq = irq_create_mapping(pcie->mdb_domain,
+ AMD_MDB_PCIE_INTR_INTX);
+ if (!pcie->intx_irq) {
+ dev_err(dev, "Failed to map INTx interrupt\n");
+ return -ENXIO;
+ }
+
+ err = devm_request_irq(dev, pcie->intx_irq, dw_pcie_rp_intx,
+ IRQF_NO_THREAD, NULL, pcie);
+ if (err) {
+ dev_err(dev, "Failed to request INTx IRQ %d, err=%d\n",
+ irq, err);
+ return err;
+ }
+
+ /* Plug the main event handler. */
+ err = devm_request_irq(dev, pp->irq, amd_mdb_pcie_event, IRQF_NO_THREAD,
+ "amd_mdb pcie_irq", pcie);
+ if (err) {
+ dev_err(dev, "Failed to request event IRQ %d, err=%d\n",
+ pp->irq, err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int amd_mdb_parse_pcie_port(struct amd_mdb_pcie *pcie)
+{
+ struct device *dev = pcie->pci.dev;
+ struct device_node *pcie_port_node __maybe_unused;
+
+ /*
+ * This platform currently supports only one Root Port, so the loop
+ * will execute only once.
+ * TODO: Enhance the driver to handle multiple Root Ports in the future.
+ */
+ for_each_child_of_node_with_prefix(dev->of_node, pcie_port_node, "pcie") {
+ pcie->perst_gpio = devm_fwnode_gpiod_get(dev, of_fwnode_handle(pcie_port_node),
+ "reset", GPIOD_OUT_HIGH, NULL);
+ if (IS_ERR(pcie->perst_gpio))
+ return dev_err_probe(dev, PTR_ERR(pcie->perst_gpio),
+ "Failed to request reset GPIO\n");
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+static int amd_mdb_add_pcie_port(struct amd_mdb_pcie *pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct device *dev = &pdev->dev;
+ int err;
+
+ pcie->slcr = devm_platform_ioremap_resource_byname(pdev, "slcr");
+ if (IS_ERR(pcie->slcr))
+ return PTR_ERR(pcie->slcr);
+
+ err = amd_mdb_pcie_init_irq_domains(pcie, pdev);
+ if (err)
+ return err;
+
+ err = amd_mdb_setup_irq(pcie, pdev);
+ if (err) {
+ dev_err(dev, "Failed to set up interrupts, err=%d\n", err);
+ goto out;
+ }
+
+ pp->ops = &amd_mdb_pcie_host_ops;
+
+ if (pcie->perst_gpio) {
+ mdelay(PCIE_T_PVPERL_MS);
+ gpiod_set_value_cansleep(pcie->perst_gpio, 0);
+ mdelay(PCIE_RESET_CONFIG_WAIT_MS);
+ }
+
+ err = dw_pcie_host_init(pp);
+ if (err) {
+ dev_err(dev, "Failed to initialize host, err=%d\n", err);
+ goto out;
+ }
+
+ return 0;
+
+out:
+ amd_mdb_pcie_free_irq_domains(pcie);
+ return err;
+}
+
+static int amd_mdb_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct amd_mdb_pcie *pcie;
+ struct dw_pcie *pci;
+ int ret;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pci = &pcie->pci;
+ pci->dev = dev;
+
+ platform_set_drvdata(pdev, pcie);
+
+ ret = amd_mdb_parse_pcie_port(pcie);
+ /*
+ * If amd_mdb_parse_pcie_port returns -ENODEV, it indicates that the
+ * PCIe Bridge node was not found in the device tree. This is not
+ * considered a fatal error and will trigger a fallback where the
+ * reset GPIO is acquired directly from the PCIe Host Bridge node.
+ */
+ if (ret) {
+ if (ret != -ENODEV)
+ return ret;
+
+ pcie->perst_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(pcie->perst_gpio))
+ return dev_err_probe(dev, PTR_ERR(pcie->perst_gpio),
+ "Failed to request reset GPIO\n");
+ }
+
+ return amd_mdb_add_pcie_port(pcie, pdev);
+}
+
+static const struct of_device_id amd_mdb_pcie_of_match[] = {
+ {
+ .compatible = "amd,versal2-mdb-host",
+ },
+ {},
+};
+
+static struct platform_driver amd_mdb_pcie_driver = {
+ .driver = {
+ .name = "amd-mdb-pcie",
+ .of_match_table = amd_mdb_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = amd_mdb_pcie_probe,
+};
+
+builtin_platform_driver(amd_mdb_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-armada8k.c b/drivers/pci/controller/dwc/pcie-armada8k.c
index b171b6bc15c8..c2650fd0d458 100644
--- a/drivers/pci/controller/dwc/pcie-armada8k.c
+++ b/drivers/pci/controller/dwc/pcie-armada8k.c
@@ -21,16 +21,17 @@
#include <linux/platform_device.h>
#include <linux/resource.h>
#include <linux/of_pci.h>
-#include <linux/of_irq.h>
-#include <linux/gpio/consumer.h>
#include "pcie-designware.h"
+#define ARMADA8K_PCIE_MAX_LANES PCIE_LNK_X4
+
struct armada8k_pcie {
struct dw_pcie *pci;
struct clk *clk;
struct clk *clk_reg;
- struct gpio_desc *reset_gpio;
+ struct phy *phy[ARMADA8K_PCIE_MAX_LANES];
+ unsigned int phy_count;
};
#define PCIE_VENDOR_REGS_OFFSET 0x8000
@@ -57,7 +58,7 @@ struct armada8k_pcie {
#define PCIE_ARUSER_REG (PCIE_VENDOR_REGS_OFFSET + 0x5C)
#define PCIE_AWUSER_REG (PCIE_VENDOR_REGS_OFFSET + 0x60)
/*
- * AR/AW Cache defauls: Normal memory, Write-Back, Read / Write
+ * AR/AW Cache defaults: Normal memory, Write-Back, Read / Write
* allocate
*/
#define ARCACHE_DEFAULT_VALUE 0x3511
@@ -69,7 +70,76 @@ struct armada8k_pcie {
#define to_armada8k_pcie(x) dev_get_drvdata((x)->dev)
-static int armada8k_pcie_link_up(struct dw_pcie *pci)
+static void armada8k_pcie_disable_phys(struct armada8k_pcie *pcie)
+{
+ int i;
+
+ for (i = 0; i < ARMADA8K_PCIE_MAX_LANES; i++) {
+ phy_power_off(pcie->phy[i]);
+ phy_exit(pcie->phy[i]);
+ }
+}
+
+static int armada8k_pcie_enable_phys(struct armada8k_pcie *pcie)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < ARMADA8K_PCIE_MAX_LANES; i++) {
+ ret = phy_init(pcie->phy[i]);
+ if (ret)
+ return ret;
+
+ ret = phy_set_mode_ext(pcie->phy[i], PHY_MODE_PCIE,
+ pcie->phy_count);
+ if (ret) {
+ phy_exit(pcie->phy[i]);
+ return ret;
+ }
+
+ ret = phy_power_on(pcie->phy[i]);
+ if (ret) {
+ phy_exit(pcie->phy[i]);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int armada8k_pcie_setup_phys(struct armada8k_pcie *pcie)
+{
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ struct device_node *node = dev->of_node;
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < ARMADA8K_PCIE_MAX_LANES; i++) {
+ pcie->phy[i] = devm_of_phy_get_by_index(dev, node, i);
+ if (IS_ERR(pcie->phy[i])) {
+ if (PTR_ERR(pcie->phy[i]) != -ENODEV)
+ return PTR_ERR(pcie->phy[i]);
+
+ pcie->phy[i] = NULL;
+ continue;
+ }
+
+ pcie->phy_count++;
+ }
+
+ /* Old bindings miss the PHY handle, so just warn if there is no PHY */
+ if (!pcie->phy_count)
+ dev_warn(dev, "No available PHY\n");
+
+ ret = armada8k_pcie_enable_phys(pcie);
+ if (ret)
+ dev_err(dev, "Failed to initialize PHY(s) (%d)\n", ret);
+
+ return ret;
+}
+
+static bool armada8k_pcie_link_up(struct dw_pcie *pci)
{
u32 reg;
u32 mask = PCIE_GLB_STS_RDLH_LINK_UP | PCIE_GLB_STS_PHY_LINK_UP;
@@ -77,16 +147,28 @@ static int armada8k_pcie_link_up(struct dw_pcie *pci)
reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_STATUS_REG);
if ((reg & mask) == mask)
- return 1;
+ return true;
dev_dbg(pci->dev, "No link detected (Global-Status: 0x%08x).\n", reg);
+ return false;
+}
+
+static int armada8k_pcie_start_link(struct dw_pcie *pci)
+{
+ u32 reg;
+
+ /* Start LTSSM */
+ reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_CONTROL_REG);
+ reg |= PCIE_APP_LTSSM_EN;
+ dw_pcie_writel_dbi(pci, PCIE_GLOBAL_CONTROL_REG, reg);
+
return 0;
}
-static void armada8k_pcie_establish_link(struct armada8k_pcie *pcie)
+static int armada8k_pcie_host_init(struct dw_pcie_rp *pp)
{
- struct dw_pcie *pci = pcie->pci;
u32 reg;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
if (!dw_pcie_link_up(pci)) {
/* Disable LTSSM state machine to enable configuration */
@@ -122,32 +204,6 @@ static void armada8k_pcie_establish_link(struct armada8k_pcie *pcie)
PCIE_INT_C_ASSERT_MASK | PCIE_INT_D_ASSERT_MASK;
dw_pcie_writel_dbi(pci, PCIE_GLOBAL_INT_MASK1_REG, reg);
- if (!dw_pcie_link_up(pci)) {
- /* Configuration done. Start LTSSM */
- reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_CONTROL_REG);
- reg |= PCIE_APP_LTSSM_EN;
- dw_pcie_writel_dbi(pci, PCIE_GLOBAL_CONTROL_REG, reg);
- }
-
- /* Wait until the link becomes active again */
- if (dw_pcie_wait_for_link(pci))
- dev_err(pci->dev, "Link not up after reconfiguration\n");
-}
-
-static int armada8k_pcie_host_init(struct pcie_port *pp)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct armada8k_pcie *pcie = to_armada8k_pcie(pci);
-
- if (pcie->reset_gpio) {
- /* assert and then deassert the reset signal */
- gpiod_set_value_cansleep(pcie->reset_gpio, 1);
- msleep(100);
- gpiod_set_value_cansleep(pcie->reset_gpio, 0);
- }
- dw_pcie_setup_rc(pp);
- armada8k_pcie_establish_link(pcie);
-
return 0;
}
@@ -169,24 +225,22 @@ static irqreturn_t armada8k_pcie_irq_handler(int irq, void *arg)
}
static const struct dw_pcie_host_ops armada8k_pcie_host_ops = {
- .host_init = armada8k_pcie_host_init,
+ .init = armada8k_pcie_host_init,
};
static int armada8k_add_pcie_port(struct armada8k_pcie *pcie,
struct platform_device *pdev)
{
struct dw_pcie *pci = pcie->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
struct device *dev = &pdev->dev;
int ret;
pp->ops = &armada8k_pcie_host_ops;
pp->irq = platform_get_irq(pdev, 0);
- if (pp->irq < 0) {
- dev_err(dev, "failed to get irq for port\n");
+ if (pp->irq < 0)
return pp->irq;
- }
ret = devm_request_irq(dev, pp->irq, armada8k_pcie_irq_handler,
IRQF_SHARED, "armada8k-pcie", pcie);
@@ -206,6 +260,7 @@ static int armada8k_add_pcie_port(struct armada8k_pcie *pcie,
static const struct dw_pcie_ops dw_pcie_ops = {
.link_up = armada8k_pcie_link_up,
+ .start_link = armada8k_pcie_start_link,
};
static int armada8k_pcie_probe(struct platform_device *pdev)
@@ -252,27 +307,24 @@ static int armada8k_pcie_probe(struct platform_device *pdev)
base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl");
pci->dbi_base = devm_pci_remap_cfg_resource(dev, base);
if (IS_ERR(pci->dbi_base)) {
- dev_err(dev, "couldn't remap regs base %p\n", base);
ret = PTR_ERR(pci->dbi_base);
goto fail_clkreg;
}
- /* Get reset gpio signal and hold asserted (logically high) */
- pcie->reset_gpio = devm_gpiod_get_optional(dev, "reset",
- GPIOD_OUT_HIGH);
- if (IS_ERR(pcie->reset_gpio)) {
- ret = PTR_ERR(pcie->reset_gpio);
+ ret = armada8k_pcie_setup_phys(pcie);
+ if (ret)
goto fail_clkreg;
- }
platform_set_drvdata(pdev, pcie);
ret = armada8k_add_pcie_port(pcie, pdev);
if (ret)
- goto fail_clkreg;
+ goto disable_phy;
return 0;
+disable_phy:
+ armada8k_pcie_disable_phys(pcie);
fail_clkreg:
clk_disable_unprepare(pcie->clk_reg);
fail:
@@ -290,7 +342,7 @@ static struct platform_driver armada8k_pcie_driver = {
.probe = armada8k_pcie_probe,
.driver = {
.name = "armada8k-pcie",
- .of_match_table = of_match_ptr(armada8k_pcie_of_match),
+ .of_match_table = armada8k_pcie_of_match,
.suppress_bind_attrs = true,
},
};
diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c
index dba83abfe764..f4a136ee2daf 100644
--- a/drivers/pci/controller/dwc/pcie-artpec6.c
+++ b/drivers/pci/controller/dwc/pcie-artpec6.c
@@ -10,7 +10,7 @@
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/resource.h>
@@ -44,16 +44,6 @@ struct artpec_pcie_of_data {
static const struct of_device_id artpec6_pcie_of_match[];
-/* PCIe Port Logic registers (memory-mapped) */
-#define PL_OFFSET 0x700
-
-#define ACK_F_ASPM_CTRL_OFF (PL_OFFSET + 0xc)
-#define ACK_N_FTS_MASK GENMASK(15, 8)
-#define ACK_N_FTS(x) (((x) << 8) & ACK_N_FTS_MASK)
-
-#define FAST_TRAINING_SEQ_MASK GENMASK(7, 0)
-#define FAST_TRAINING_SEQ(x) (((x) << 0) & FAST_TRAINING_SEQ_MASK)
-
/* ARTPEC-6 specific registers */
#define PCIECFG 0x18
#define PCIECFG_DBG_OEN BIT(24)
@@ -104,21 +94,21 @@ static void artpec6_pcie_writel(struct artpec6_pcie *artpec6_pcie, u32 offset, u
regmap_write(artpec6_pcie->regmap, offset, val);
}
-static u64 artpec6_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr)
+static u64 artpec6_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 cpu_addr)
{
struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
struct dw_pcie_ep *ep = &pci->ep;
switch (artpec6_pcie->mode) {
case DW_PCIE_RC_TYPE:
- return pci_addr - pp->cfg0_base;
+ return cpu_addr - pp->cfg0_base;
case DW_PCIE_EP_TYPE:
- return pci_addr - ep->phys_base;
+ return cpu_addr - ep->phys_base;
default:
dev_err(pci->dev, "UNKNOWN device type\n");
}
- return pci_addr;
+ return cpu_addr;
}
static int artpec6_pcie_establish_link(struct dw_pcie *pci)
@@ -292,33 +282,6 @@ static void artpec6_pcie_init_phy(struct artpec6_pcie *artpec6_pcie)
}
}
-static void artpec6_pcie_set_nfts(struct artpec6_pcie *artpec6_pcie)
-{
- struct dw_pcie *pci = artpec6_pcie->pci;
- u32 val;
-
- if (artpec6_pcie->variant != ARTPEC7)
- return;
-
- /*
- * Increase the N_FTS (Number of Fast Training Sequences)
- * to be transmitted when transitioning from L0s to L0.
- */
- val = dw_pcie_readl_dbi(pci, ACK_F_ASPM_CTRL_OFF);
- val &= ~ACK_N_FTS_MASK;
- val |= ACK_N_FTS(180);
- dw_pcie_writel_dbi(pci, ACK_F_ASPM_CTRL_OFF, val);
-
- /*
- * Set the Number of Fast Training Sequences that the core
- * advertises as its N_FTS during Gen2 or Gen3 link training.
- */
- val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
- val &= ~FAST_TRAINING_SEQ_MASK;
- val |= FAST_TRAINING_SEQ(180);
- dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
-}
-
static void artpec6_pcie_assert_core_reset(struct artpec6_pcie *artpec6_pcie)
{
u32 val;
@@ -352,64 +315,27 @@ static void artpec6_pcie_deassert_core_reset(struct artpec6_pcie *artpec6_pcie)
usleep_range(100, 200);
}
-static void artpec6_pcie_enable_interrupts(struct artpec6_pcie *artpec6_pcie)
-{
- struct dw_pcie *pci = artpec6_pcie->pci;
- struct pcie_port *pp = &pci->pp;
-
- if (IS_ENABLED(CONFIG_PCI_MSI))
- dw_pcie_msi_init(pp);
-}
-
-static int artpec6_pcie_host_init(struct pcie_port *pp)
+static int artpec6_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
+ if (artpec6_pcie->variant == ARTPEC7) {
+ pci->n_fts[0] = 180;
+ pci->n_fts[1] = 180;
+ }
artpec6_pcie_assert_core_reset(artpec6_pcie);
artpec6_pcie_init_phy(artpec6_pcie);
artpec6_pcie_deassert_core_reset(artpec6_pcie);
artpec6_pcie_wait_for_phy(artpec6_pcie);
- artpec6_pcie_set_nfts(artpec6_pcie);
- dw_pcie_setup_rc(pp);
- artpec6_pcie_establish_link(pci);
- dw_pcie_wait_for_link(pci);
- artpec6_pcie_enable_interrupts(artpec6_pcie);
return 0;
}
static const struct dw_pcie_host_ops artpec6_pcie_host_ops = {
- .host_init = artpec6_pcie_host_init,
+ .init = artpec6_pcie_host_init,
};
-static int artpec6_add_pcie_port(struct artpec6_pcie *artpec6_pcie,
- struct platform_device *pdev)
-{
- struct dw_pcie *pci = artpec6_pcie->pci;
- struct pcie_port *pp = &pci->pp;
- struct device *dev = pci->dev;
- int ret;
-
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- pp->msi_irq = platform_get_irq_byname(pdev, "msi");
- if (pp->msi_irq < 0) {
- dev_err(dev, "failed to get MSI irq\n");
- return pp->msi_irq;
- }
- }
-
- pp->ops = &artpec6_pcie_host_ops;
-
- ret = dw_pcie_host_init(pp);
- if (ret) {
- dev_err(dev, "failed to initialize host\n");
- return ret;
- }
-
- return 0;
-}
-
static void artpec6_pcie_ep_init(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -420,22 +346,21 @@ static void artpec6_pcie_ep_init(struct dw_pcie_ep *ep)
artpec6_pcie_init_phy(artpec6_pcie);
artpec6_pcie_deassert_core_reset(artpec6_pcie);
artpec6_pcie_wait_for_phy(artpec6_pcie);
- artpec6_pcie_set_nfts(artpec6_pcie);
- for (bar = BAR_0; bar <= BAR_5; bar++)
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
dw_pcie_ep_reset_bar(pci, bar);
}
static int artpec6_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
- enum pci_epc_irq_type type, u16 interrupt_num)
+ unsigned int type, u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
switch (type) {
- case PCI_EPC_IRQ_LEGACY:
- dev_err(pci->dev, "EP cannot trigger legacy IRQs\n");
+ case PCI_IRQ_INTX:
+ dev_err(pci->dev, "EP cannot trigger INTx IRQs\n");
return -EINVAL;
- case PCI_EPC_IRQ_MSI:
+ case PCI_IRQ_MSI:
return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
default:
dev_err(pci->dev, "UNKNOWN IRQ type\n");
@@ -444,62 +369,37 @@ static int artpec6_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
return 0;
}
-static struct dw_pcie_ep_ops pcie_ep_ops = {
- .ep_init = artpec6_pcie_ep_init,
- .raise_irq = artpec6_pcie_raise_irq,
+static const struct pci_epc_features artpec6_pcie_epc_features = {
+ .msi_capable = true,
};
-static int artpec6_add_pcie_ep(struct artpec6_pcie *artpec6_pcie,
- struct platform_device *pdev)
+static const struct pci_epc_features *
+artpec6_pcie_get_features(struct dw_pcie_ep *ep)
{
- int ret;
- struct dw_pcie_ep *ep;
- struct resource *res;
- struct device *dev = &pdev->dev;
- struct dw_pcie *pci = artpec6_pcie->pci;
-
- ep = &pci->ep;
- ep->ops = &pcie_ep_ops;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2");
- pci->dbi_base2 = devm_ioremap_resource(dev, res);
- if (IS_ERR(pci->dbi_base2))
- return PTR_ERR(pci->dbi_base2);
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
- if (!res)
- return -EINVAL;
-
- ep->phys_base = res->start;
- ep->addr_size = resource_size(res);
-
- ret = dw_pcie_ep_init(ep);
- if (ret) {
- dev_err(dev, "failed to initialize endpoint\n");
- return ret;
- }
-
- return 0;
+ return &artpec6_pcie_epc_features;
}
+static const struct dw_pcie_ep_ops pcie_ep_ops = {
+ .init = artpec6_pcie_ep_init,
+ .raise_irq = artpec6_pcie_raise_irq,
+ .get_features = artpec6_pcie_get_features,
+};
+
static int artpec6_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct dw_pcie *pci;
struct artpec6_pcie *artpec6_pcie;
- struct resource *dbi_base;
- struct resource *phy_base;
int ret;
- const struct of_device_id *match;
const struct artpec_pcie_of_data *data;
enum artpec_pcie_variants variant;
enum dw_pcie_device_mode mode;
+ u32 val;
- match = of_match_device(artpec6_pcie_of_match, dev);
- if (!match)
+ data = of_device_get_match_data(dev);
+ if (!data)
return -EINVAL;
- data = (struct artpec_pcie_of_data *)match->data;
variant = (enum artpec_pcie_variants)data->variant;
mode = (enum dw_pcie_device_mode)data->mode;
@@ -518,13 +418,8 @@ static int artpec6_pcie_probe(struct platform_device *pdev)
artpec6_pcie->variant = variant;
artpec6_pcie->mode = mode;
- dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
- pci->dbi_base = devm_ioremap_resource(dev, dbi_base);
- if (IS_ERR(pci->dbi_base))
- return PTR_ERR(pci->dbi_base);
-
- phy_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy");
- artpec6_pcie->phy_base = devm_ioremap_resource(dev, phy_base);
+ artpec6_pcie->phy_base =
+ devm_platform_ioremap_resource_byname(pdev, "phy");
if (IS_ERR(artpec6_pcie->phy_base))
return PTR_ERR(artpec6_pcie->phy_base);
@@ -541,24 +436,36 @@ static int artpec6_pcie_probe(struct platform_device *pdev)
if (!IS_ENABLED(CONFIG_PCIE_ARTPEC6_HOST))
return -ENODEV;
- ret = artpec6_add_pcie_port(artpec6_pcie, pdev);
+ pci->pp.ops = &artpec6_pcie_host_ops;
+
+ ret = dw_pcie_host_init(&pci->pp);
if (ret < 0)
return ret;
break;
- case DW_PCIE_EP_TYPE: {
- u32 val;
-
+ case DW_PCIE_EP_TYPE:
if (!IS_ENABLED(CONFIG_PCIE_ARTPEC6_EP))
return -ENODEV;
val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
val &= ~PCIECFG_DEVICE_TYPE_MASK;
artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
- ret = artpec6_add_pcie_ep(artpec6_pcie, pdev);
- if (ret < 0)
+
+ pci->ep.ops = &pcie_ep_ops;
+
+ ret = dw_pcie_ep_init(&pci->ep);
+ if (ret)
+ return ret;
+
+ ret = dw_pcie_ep_init_registers(&pci->ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(&pci->ep);
return ret;
+ }
+
+ pci_epc_init_notify(pci->ep.epc);
+
break;
- }
default:
dev_err(dev, "INVALID device type %d\n", artpec6_pcie->mode);
}
diff --git a/drivers/pci/controller/dwc/pcie-bt1.c b/drivers/pci/controller/dwc/pcie-bt1.c
new file mode 100644
index 000000000000..1340edc18d12
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-bt1.c
@@ -0,0 +1,645 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 BAIKAL ELECTRONICS, JSC
+ *
+ * Authors:
+ * Vadim Vlasov <Vadim.Vlasov@baikalelectronics.ru>
+ * Serge Semin <Sergey.Semin@baikalelectronics.ru>
+ *
+ * Baikal-T1 PCIe controller driver
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+
+/* Baikal-T1 System CCU control registers */
+#define BT1_CCU_PCIE_CLKC 0x140
+#define BT1_CCU_PCIE_REQ_PCS_CLK BIT(16)
+#define BT1_CCU_PCIE_REQ_MAC_CLK BIT(17)
+#define BT1_CCU_PCIE_REQ_PIPE_CLK BIT(18)
+
+#define BT1_CCU_PCIE_RSTC 0x144
+#define BT1_CCU_PCIE_REQ_LINK_RST BIT(13)
+#define BT1_CCU_PCIE_REQ_SMLH_RST BIT(14)
+#define BT1_CCU_PCIE_REQ_PHY_RST BIT(16)
+#define BT1_CCU_PCIE_REQ_CORE_RST BIT(24)
+#define BT1_CCU_PCIE_REQ_STICKY_RST BIT(26)
+#define BT1_CCU_PCIE_REQ_NSTICKY_RST BIT(27)
+
+#define BT1_CCU_PCIE_PMSC 0x148
+#define BT1_CCU_PCIE_LTSSM_STATE_MASK GENMASK(5, 0)
+#define BT1_CCU_PCIE_LTSSM_DET_QUIET 0x00
+#define BT1_CCU_PCIE_LTSSM_DET_ACT 0x01
+#define BT1_CCU_PCIE_LTSSM_POLL_ACT 0x02
+#define BT1_CCU_PCIE_LTSSM_POLL_COMP 0x03
+#define BT1_CCU_PCIE_LTSSM_POLL_CONF 0x04
+#define BT1_CCU_PCIE_LTSSM_PRE_DET_QUIET 0x05
+#define BT1_CCU_PCIE_LTSSM_DET_WAIT 0x06
+#define BT1_CCU_PCIE_LTSSM_CFG_LNKWD_START 0x07
+#define BT1_CCU_PCIE_LTSSM_CFG_LNKWD_ACEPT 0x08
+#define BT1_CCU_PCIE_LTSSM_CFG_LNNUM_WAIT 0x09
+#define BT1_CCU_PCIE_LTSSM_CFG_LNNUM_ACEPT 0x0a
+#define BT1_CCU_PCIE_LTSSM_CFG_COMPLETE 0x0b
+#define BT1_CCU_PCIE_LTSSM_CFG_IDLE 0x0c
+#define BT1_CCU_PCIE_LTSSM_RCVR_LOCK 0x0d
+#define BT1_CCU_PCIE_LTSSM_RCVR_SPEED 0x0e
+#define BT1_CCU_PCIE_LTSSM_RCVR_RCVRCFG 0x0f
+#define BT1_CCU_PCIE_LTSSM_RCVR_IDLE 0x10
+#define BT1_CCU_PCIE_LTSSM_L0 0x11
+#define BT1_CCU_PCIE_LTSSM_L0S 0x12
+#define BT1_CCU_PCIE_LTSSM_L123_SEND_IDLE 0x13
+#define BT1_CCU_PCIE_LTSSM_L1_IDLE 0x14
+#define BT1_CCU_PCIE_LTSSM_L2_IDLE 0x15
+#define BT1_CCU_PCIE_LTSSM_L2_WAKE 0x16
+#define BT1_CCU_PCIE_LTSSM_DIS_ENTRY 0x17
+#define BT1_CCU_PCIE_LTSSM_DIS_IDLE 0x18
+#define BT1_CCU_PCIE_LTSSM_DISABLE 0x19
+#define BT1_CCU_PCIE_LTSSM_LPBK_ENTRY 0x1a
+#define BT1_CCU_PCIE_LTSSM_LPBK_ACTIVE 0x1b
+#define BT1_CCU_PCIE_LTSSM_LPBK_EXIT 0x1c
+#define BT1_CCU_PCIE_LTSSM_LPBK_EXIT_TOUT 0x1d
+#define BT1_CCU_PCIE_LTSSM_HOT_RST_ENTRY 0x1e
+#define BT1_CCU_PCIE_LTSSM_HOT_RST 0x1f
+#define BT1_CCU_PCIE_LTSSM_RCVR_EQ0 0x20
+#define BT1_CCU_PCIE_LTSSM_RCVR_EQ1 0x21
+#define BT1_CCU_PCIE_LTSSM_RCVR_EQ2 0x22
+#define BT1_CCU_PCIE_LTSSM_RCVR_EQ3 0x23
+#define BT1_CCU_PCIE_SMLH_LINKUP BIT(6)
+#define BT1_CCU_PCIE_RDLH_LINKUP BIT(7)
+#define BT1_CCU_PCIE_PM_LINKSTATE_L0S BIT(8)
+#define BT1_CCU_PCIE_PM_LINKSTATE_L1 BIT(9)
+#define BT1_CCU_PCIE_PM_LINKSTATE_L2 BIT(10)
+#define BT1_CCU_PCIE_L1_PENDING BIT(12)
+#define BT1_CCU_PCIE_REQ_EXIT_L1 BIT(14)
+#define BT1_CCU_PCIE_LTSSM_RCVR_EQ BIT(15)
+#define BT1_CCU_PCIE_PM_DSTAT_MASK GENMASK(18, 16)
+#define BT1_CCU_PCIE_PM_PME_EN BIT(20)
+#define BT1_CCU_PCIE_PM_PME_STATUS BIT(21)
+#define BT1_CCU_PCIE_AUX_PM_EN BIT(22)
+#define BT1_CCU_PCIE_AUX_PWR_DET BIT(23)
+#define BT1_CCU_PCIE_WAKE_DET BIT(24)
+#define BT1_CCU_PCIE_TURNOFF_REQ BIT(30)
+#define BT1_CCU_PCIE_TURNOFF_ACK BIT(31)
+
+#define BT1_CCU_PCIE_GENC 0x14c
+#define BT1_CCU_PCIE_LTSSM_EN BIT(1)
+#define BT1_CCU_PCIE_DBI2_MODE BIT(2)
+#define BT1_CCU_PCIE_MGMT_EN BIT(3)
+#define BT1_CCU_PCIE_RXLANE_FLIP_EN BIT(16)
+#define BT1_CCU_PCIE_TXLANE_FLIP_EN BIT(17)
+#define BT1_CCU_PCIE_SLV_XFER_PEND BIT(24)
+#define BT1_CCU_PCIE_RCV_XFER_PEND BIT(25)
+#define BT1_CCU_PCIE_DBI_XFER_PEND BIT(26)
+#define BT1_CCU_PCIE_DMA_XFER_PEND BIT(27)
+
+#define BT1_CCU_PCIE_LTSSM_LINKUP(_pmsc) \
+({ \
+ int __state = FIELD_GET(BT1_CCU_PCIE_LTSSM_STATE_MASK, _pmsc); \
+ __state >= BT1_CCU_PCIE_LTSSM_L0 && __state <= BT1_CCU_PCIE_LTSSM_L2_WAKE; \
+})
+
+/* Baikal-T1 PCIe specific control registers */
+#define BT1_PCIE_AXI2MGM_LANENUM 0xd04
+#define BT1_PCIE_AXI2MGM_LANESEL_MASK GENMASK(3, 0)
+
+#define BT1_PCIE_AXI2MGM_ADDRCTL 0xd08
+#define BT1_PCIE_AXI2MGM_PHYREG_ADDR_MASK GENMASK(20, 0)
+#define BT1_PCIE_AXI2MGM_READ_FLAG BIT(29)
+#define BT1_PCIE_AXI2MGM_DONE BIT(30)
+#define BT1_PCIE_AXI2MGM_BUSY BIT(31)
+
+#define BT1_PCIE_AXI2MGM_WRITEDATA 0xd0c
+#define BT1_PCIE_AXI2MGM_WDATA GENMASK(15, 0)
+
+#define BT1_PCIE_AXI2MGM_READDATA 0xd10
+#define BT1_PCIE_AXI2MGM_RDATA GENMASK(15, 0)
+
+/* Generic Baikal-T1 PCIe interface resources */
+#define BT1_PCIE_NUM_APP_CLKS ARRAY_SIZE(bt1_pcie_app_clks)
+#define BT1_PCIE_NUM_CORE_CLKS ARRAY_SIZE(bt1_pcie_core_clks)
+#define BT1_PCIE_NUM_APP_RSTS ARRAY_SIZE(bt1_pcie_app_rsts)
+#define BT1_PCIE_NUM_CORE_RSTS ARRAY_SIZE(bt1_pcie_core_rsts)
+
+/* PCIe bus setup delays and timeouts */
+#define BT1_PCIE_RST_DELAY_MS 100
+#define BT1_PCIE_RUN_DELAY_US 100
+#define BT1_PCIE_REQ_DELAY_US 1
+#define BT1_PCIE_REQ_TIMEOUT_US 1000
+#define BT1_PCIE_LNK_DELAY_US 1000
+#define BT1_PCIE_LNK_TIMEOUT_US 1000000
+
+static const enum dw_pcie_app_clk bt1_pcie_app_clks[] = {
+ DW_PCIE_DBI_CLK, DW_PCIE_MSTR_CLK, DW_PCIE_SLV_CLK,
+};
+
+static const enum dw_pcie_core_clk bt1_pcie_core_clks[] = {
+ DW_PCIE_REF_CLK,
+};
+
+static const enum dw_pcie_app_rst bt1_pcie_app_rsts[] = {
+ DW_PCIE_MSTR_RST, DW_PCIE_SLV_RST,
+};
+
+static const enum dw_pcie_core_rst bt1_pcie_core_rsts[] = {
+ DW_PCIE_NON_STICKY_RST, DW_PCIE_STICKY_RST, DW_PCIE_CORE_RST,
+ DW_PCIE_PIPE_RST, DW_PCIE_PHY_RST, DW_PCIE_HOT_RST, DW_PCIE_PWR_RST,
+};
+
+struct bt1_pcie {
+ struct dw_pcie dw;
+ struct platform_device *pdev;
+ struct regmap *sys_regs;
+};
+#define to_bt1_pcie(_dw) container_of(_dw, struct bt1_pcie, dw)
+
+/*
+ * Baikal-T1 MMIO space must be read/written by the dword-aligned
+ * instructions. Note the methods are optimized to have the dword operations
+ * performed with minimum overhead as the most frequently used ones.
+ */
+static int bt1_pcie_read_mmio(void __iomem *addr, int size, u32 *val)
+{
+ unsigned int ofs = (uintptr_t)addr & 0x3;
+
+ if (!IS_ALIGNED((uintptr_t)addr, size))
+ return -EINVAL;
+
+ *val = readl(addr - ofs) >> ofs * BITS_PER_BYTE;
+ if (size == 4) {
+ return 0;
+ } else if (size == 2) {
+ *val &= 0xffff;
+ return 0;
+ } else if (size == 1) {
+ *val &= 0xff;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int bt1_pcie_write_mmio(void __iomem *addr, int size, u32 val)
+{
+ unsigned int ofs = (uintptr_t)addr & 0x3;
+ u32 tmp, mask;
+
+ if (!IS_ALIGNED((uintptr_t)addr, size))
+ return -EINVAL;
+
+ if (size == 4) {
+ writel(val, addr);
+ return 0;
+ } else if (size == 2 || size == 1) {
+ mask = GENMASK(size * BITS_PER_BYTE - 1, 0);
+ tmp = readl(addr - ofs) & ~(mask << ofs * BITS_PER_BYTE);
+ tmp |= (val & mask) << ofs * BITS_PER_BYTE;
+ writel(tmp, addr - ofs);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static u32 bt1_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
+ size_t size)
+{
+ int ret;
+ u32 val;
+
+ ret = bt1_pcie_read_mmio(base + reg, size, &val);
+ if (ret) {
+ dev_err(pci->dev, "Read DBI address failed\n");
+ return ~0U;
+ }
+
+ return val;
+}
+
+static void bt1_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
+ size_t size, u32 val)
+{
+ int ret;
+
+ ret = bt1_pcie_write_mmio(base + reg, size, val);
+ if (ret)
+ dev_err(pci->dev, "Write DBI address failed\n");
+}
+
+static void bt1_pcie_write_dbi2(struct dw_pcie *pci, void __iomem *base, u32 reg,
+ size_t size, u32 val)
+{
+ struct bt1_pcie *btpci = to_bt1_pcie(pci);
+ int ret;
+
+ regmap_update_bits(btpci->sys_regs, BT1_CCU_PCIE_GENC,
+ BT1_CCU_PCIE_DBI2_MODE, BT1_CCU_PCIE_DBI2_MODE);
+
+ ret = bt1_pcie_write_mmio(base + reg, size, val);
+ if (ret)
+ dev_err(pci->dev, "Write DBI2 address failed\n");
+
+ regmap_update_bits(btpci->sys_regs, BT1_CCU_PCIE_GENC,
+ BT1_CCU_PCIE_DBI2_MODE, 0);
+}
+
+static int bt1_pcie_start_link(struct dw_pcie *pci)
+{
+ struct bt1_pcie *btpci = to_bt1_pcie(pci);
+ u32 val;
+ int ret;
+
+ /*
+ * Enable LTSSM and make sure it was able to establish both PHY and
+ * data links. This procedure shall work fine to reach 2.5 GT/s speed.
+ */
+ regmap_update_bits(btpci->sys_regs, BT1_CCU_PCIE_GENC,
+ BT1_CCU_PCIE_LTSSM_EN, BT1_CCU_PCIE_LTSSM_EN);
+
+ ret = regmap_read_poll_timeout(btpci->sys_regs, BT1_CCU_PCIE_PMSC, val,
+ (val & BT1_CCU_PCIE_SMLH_LINKUP),
+ BT1_PCIE_LNK_DELAY_US, BT1_PCIE_LNK_TIMEOUT_US);
+ if (ret) {
+ dev_err(pci->dev, "LTSSM failed to set PHY link up\n");
+ return ret;
+ }
+
+ ret = regmap_read_poll_timeout(btpci->sys_regs, BT1_CCU_PCIE_PMSC, val,
+ (val & BT1_CCU_PCIE_RDLH_LINKUP),
+ BT1_PCIE_LNK_DELAY_US, BT1_PCIE_LNK_TIMEOUT_US);
+ if (ret) {
+ dev_err(pci->dev, "LTSSM failed to set data link up\n");
+ return ret;
+ }
+
+ /*
+ * Activate direct speed change after the link is established in an
+ * attempt to reach a higher bus performance (up to Gen.3 - 8.0 GT/s).
+ * This is required at least to get 8.0 GT/s speed.
+ */
+ val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ val |= PORT_LOGIC_SPEED_CHANGE;
+ dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
+
+ ret = regmap_read_poll_timeout(btpci->sys_regs, BT1_CCU_PCIE_PMSC, val,
+ BT1_CCU_PCIE_LTSSM_LINKUP(val),
+ BT1_PCIE_LNK_DELAY_US, BT1_PCIE_LNK_TIMEOUT_US);
+ if (ret)
+ dev_err(pci->dev, "LTSSM failed to get into L0 state\n");
+
+ return ret;
+}
+
+static void bt1_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct bt1_pcie *btpci = to_bt1_pcie(pci);
+
+ regmap_update_bits(btpci->sys_regs, BT1_CCU_PCIE_GENC,
+ BT1_CCU_PCIE_LTSSM_EN, 0);
+}
+
+static const struct dw_pcie_ops bt1_pcie_ops = {
+ .read_dbi = bt1_pcie_read_dbi,
+ .write_dbi = bt1_pcie_write_dbi,
+ .write_dbi2 = bt1_pcie_write_dbi2,
+ .start_link = bt1_pcie_start_link,
+ .stop_link = bt1_pcie_stop_link,
+};
+
+static struct pci_ops bt1_pci_ops = {
+ .map_bus = dw_pcie_own_conf_map_bus,
+ .read = pci_generic_config_read32,
+ .write = pci_generic_config_write32,
+};
+
+static int bt1_pcie_get_resources(struct bt1_pcie *btpci)
+{
+ struct device *dev = btpci->dw.dev;
+ int i;
+
+ /* DBI access is supposed to be performed by the dword-aligned IOs */
+ btpci->dw.pp.bridge->ops = &bt1_pci_ops;
+
+ /* These CSRs are in MMIO so we won't check the regmap-methods status */
+ btpci->sys_regs =
+ syscon_regmap_lookup_by_phandle(dev->of_node, "baikal,bt1-syscon");
+ if (IS_ERR(btpci->sys_regs))
+ return dev_err_probe(dev, PTR_ERR(btpci->sys_regs),
+ "Failed to get syscon\n");
+
+ /* Make sure all the required resources have been specified */
+ for (i = 0; i < BT1_PCIE_NUM_APP_CLKS; i++) {
+ if (!btpci->dw.app_clks[bt1_pcie_app_clks[i]].clk) {
+ dev_err(dev, "App clocks set is incomplete\n");
+ return -ENOENT;
+ }
+ }
+
+ for (i = 0; i < BT1_PCIE_NUM_CORE_CLKS; i++) {
+ if (!btpci->dw.core_clks[bt1_pcie_core_clks[i]].clk) {
+ dev_err(dev, "Core clocks set is incomplete\n");
+ return -ENOENT;
+ }
+ }
+
+ for (i = 0; i < BT1_PCIE_NUM_APP_RSTS; i++) {
+ if (!btpci->dw.app_rsts[bt1_pcie_app_rsts[i]].rstc) {
+ dev_err(dev, "App resets set is incomplete\n");
+ return -ENOENT;
+ }
+ }
+
+ for (i = 0; i < BT1_PCIE_NUM_CORE_RSTS; i++) {
+ if (!btpci->dw.core_rsts[bt1_pcie_core_rsts[i]].rstc) {
+ dev_err(dev, "Core resets set is incomplete\n");
+ return -ENOENT;
+ }
+ }
+
+ return 0;
+}
+
+static void bt1_pcie_full_stop_bus(struct bt1_pcie *btpci, bool init)
+{
+ struct device *dev = btpci->dw.dev;
+ struct dw_pcie *pci = &btpci->dw;
+ int ret;
+
+ /* Disable LTSSM for sure */
+ regmap_update_bits(btpci->sys_regs, BT1_CCU_PCIE_GENC,
+ BT1_CCU_PCIE_LTSSM_EN, 0);
+
+ /*
+ * Application reset controls are trigger-based so assert the core
+ * resets only.
+ */
+ ret = reset_control_bulk_assert(DW_PCIE_NUM_CORE_RSTS, pci->core_rsts);
+ if (ret)
+ dev_err(dev, "Failed to assert core resets\n");
+
+ /*
+ * Clocks are disabled by default at least in accordance with the clk
+ * enable counter value on init stage.
+ */
+ if (!init) {
+ clk_bulk_disable_unprepare(DW_PCIE_NUM_CORE_CLKS, pci->core_clks);
+
+ clk_bulk_disable_unprepare(DW_PCIE_NUM_APP_CLKS, pci->app_clks);
+ }
+
+ /* The peripheral devices are unavailable anyway so reset them too */
+ gpiod_set_value_cansleep(pci->pe_rst, 1);
+
+ /* Make sure all the resets are settled */
+ msleep(BT1_PCIE_RST_DELAY_MS);
+}
+
+/*
+ * Implements the cold reset procedure in accordance with the reference manual
+ * and available PM signals.
+ */
+static int bt1_pcie_cold_start_bus(struct bt1_pcie *btpci)
+{
+ struct device *dev = btpci->dw.dev;
+ struct dw_pcie *pci = &btpci->dw;
+ u32 val;
+ int ret;
+
+ /* First get out of the Power/Hot reset state */
+ ret = reset_control_deassert(pci->core_rsts[DW_PCIE_PWR_RST].rstc);
+ if (ret) {
+ dev_err(dev, "Failed to deassert PHY reset\n");
+ return ret;
+ }
+
+ ret = reset_control_deassert(pci->core_rsts[DW_PCIE_HOT_RST].rstc);
+ if (ret) {
+ dev_err(dev, "Failed to deassert hot reset\n");
+ goto err_assert_pwr_rst;
+ }
+
+ /* Wait for the PM-core to stop requesting the PHY reset */
+ ret = regmap_read_poll_timeout(btpci->sys_regs, BT1_CCU_PCIE_RSTC, val,
+ !(val & BT1_CCU_PCIE_REQ_PHY_RST),
+ BT1_PCIE_REQ_DELAY_US, BT1_PCIE_REQ_TIMEOUT_US);
+ if (ret) {
+ dev_err(dev, "Timed out waiting for PM to stop PHY resetting\n");
+ goto err_assert_hot_rst;
+ }
+
+ ret = reset_control_deassert(pci->core_rsts[DW_PCIE_PHY_RST].rstc);
+ if (ret) {
+ dev_err(dev, "Failed to deassert PHY reset\n");
+ goto err_assert_hot_rst;
+ }
+
+ /* Clocks can be now enabled, but the ref one is crucial at this stage */
+ ret = clk_bulk_prepare_enable(DW_PCIE_NUM_APP_CLKS, pci->app_clks);
+ if (ret) {
+ dev_err(dev, "Failed to enable app clocks\n");
+ goto err_assert_phy_rst;
+ }
+
+ ret = clk_bulk_prepare_enable(DW_PCIE_NUM_CORE_CLKS, pci->core_clks);
+ if (ret) {
+ dev_err(dev, "Failed to enable ref clocks\n");
+ goto err_disable_app_clk;
+ }
+
+ /* Wait for the PM to stop requesting the controller core reset */
+ ret = regmap_read_poll_timeout(btpci->sys_regs, BT1_CCU_PCIE_RSTC, val,
+ !(val & BT1_CCU_PCIE_REQ_CORE_RST),
+ BT1_PCIE_REQ_DELAY_US, BT1_PCIE_REQ_TIMEOUT_US);
+ if (ret) {
+ dev_err(dev, "Timed out waiting for PM to stop core resetting\n");
+ goto err_disable_core_clk;
+ }
+
+ /* PCS-PIPE interface and controller core can be now activated */
+ ret = reset_control_deassert(pci->core_rsts[DW_PCIE_PIPE_RST].rstc);
+ if (ret) {
+ dev_err(dev, "Failed to deassert PIPE reset\n");
+ goto err_disable_core_clk;
+ }
+
+ ret = reset_control_deassert(pci->core_rsts[DW_PCIE_CORE_RST].rstc);
+ if (ret) {
+ dev_err(dev, "Failed to deassert core reset\n");
+ goto err_assert_pipe_rst;
+ }
+
+ /* It's recommended to reset the core and application logic together */
+ ret = reset_control_bulk_reset(DW_PCIE_NUM_APP_RSTS, pci->app_rsts);
+ if (ret) {
+ dev_err(dev, "Failed to reset app domain\n");
+ goto err_assert_core_rst;
+ }
+
+ /* Sticky/Non-sticky CSR flags can be now unreset too */
+ ret = reset_control_deassert(pci->core_rsts[DW_PCIE_STICKY_RST].rstc);
+ if (ret) {
+ dev_err(dev, "Failed to deassert sticky reset\n");
+ goto err_assert_core_rst;
+ }
+
+ ret = reset_control_deassert(pci->core_rsts[DW_PCIE_NON_STICKY_RST].rstc);
+ if (ret) {
+ dev_err(dev, "Failed to deassert non-sticky reset\n");
+ goto err_assert_sticky_rst;
+ }
+
+ /* Activate the PCIe bus peripheral devices */
+ gpiod_set_value_cansleep(pci->pe_rst, 0);
+
+ /* Make sure the state is settled (LTSSM is still disabled though) */
+ usleep_range(BT1_PCIE_RUN_DELAY_US, BT1_PCIE_RUN_DELAY_US + 100);
+
+ return 0;
+
+err_assert_sticky_rst:
+ reset_control_assert(pci->core_rsts[DW_PCIE_STICKY_RST].rstc);
+
+err_assert_core_rst:
+ reset_control_assert(pci->core_rsts[DW_PCIE_CORE_RST].rstc);
+
+err_assert_pipe_rst:
+ reset_control_assert(pci->core_rsts[DW_PCIE_PIPE_RST].rstc);
+
+err_disable_core_clk:
+ clk_bulk_disable_unprepare(DW_PCIE_NUM_CORE_CLKS, pci->core_clks);
+
+err_disable_app_clk:
+ clk_bulk_disable_unprepare(DW_PCIE_NUM_APP_CLKS, pci->app_clks);
+
+err_assert_phy_rst:
+ reset_control_assert(pci->core_rsts[DW_PCIE_PHY_RST].rstc);
+
+err_assert_hot_rst:
+ reset_control_assert(pci->core_rsts[DW_PCIE_HOT_RST].rstc);
+
+err_assert_pwr_rst:
+ reset_control_assert(pci->core_rsts[DW_PCIE_PWR_RST].rstc);
+
+ return ret;
+}
+
+static int bt1_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct bt1_pcie *btpci = to_bt1_pcie(pci);
+ int ret;
+
+ ret = bt1_pcie_get_resources(btpci);
+ if (ret)
+ return ret;
+
+ bt1_pcie_full_stop_bus(btpci, true);
+
+ return bt1_pcie_cold_start_bus(btpci);
+}
+
+static void bt1_pcie_host_deinit(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct bt1_pcie *btpci = to_bt1_pcie(pci);
+
+ bt1_pcie_full_stop_bus(btpci, false);
+}
+
+static const struct dw_pcie_host_ops bt1_pcie_host_ops = {
+ .init = bt1_pcie_host_init,
+ .deinit = bt1_pcie_host_deinit,
+};
+
+static struct bt1_pcie *bt1_pcie_create_data(struct platform_device *pdev)
+{
+ struct bt1_pcie *btpci;
+
+ btpci = devm_kzalloc(&pdev->dev, sizeof(*btpci), GFP_KERNEL);
+ if (!btpci)
+ return ERR_PTR(-ENOMEM);
+
+ btpci->pdev = pdev;
+
+ platform_set_drvdata(pdev, btpci);
+
+ return btpci;
+}
+
+static int bt1_pcie_add_port(struct bt1_pcie *btpci)
+{
+ struct device *dev = &btpci->pdev->dev;
+ int ret;
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ if (ret)
+ return ret;
+
+ btpci->dw.version = DW_PCIE_VER_460A;
+ btpci->dw.dev = dev;
+ btpci->dw.ops = &bt1_pcie_ops;
+
+ btpci->dw.pp.num_vectors = MAX_MSI_IRQS;
+ btpci->dw.pp.ops = &bt1_pcie_host_ops;
+
+ dw_pcie_cap_set(&btpci->dw, REQ_RES);
+
+ ret = dw_pcie_host_init(&btpci->dw.pp);
+
+ return dev_err_probe(dev, ret, "Failed to initialize DWC PCIe host\n");
+}
+
+static void bt1_pcie_del_port(struct bt1_pcie *btpci)
+{
+ dw_pcie_host_deinit(&btpci->dw.pp);
+}
+
+static int bt1_pcie_probe(struct platform_device *pdev)
+{
+ struct bt1_pcie *btpci;
+
+ btpci = bt1_pcie_create_data(pdev);
+ if (IS_ERR(btpci))
+ return PTR_ERR(btpci);
+
+ return bt1_pcie_add_port(btpci);
+}
+
+static void bt1_pcie_remove(struct platform_device *pdev)
+{
+ struct bt1_pcie *btpci = platform_get_drvdata(pdev);
+
+ bt1_pcie_del_port(btpci);
+}
+
+static const struct of_device_id bt1_pcie_of_match[] = {
+ { .compatible = "baikal,bt1-pcie" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, bt1_pcie_of_match);
+
+static struct platform_driver bt1_pcie_driver = {
+ .probe = bt1_pcie_probe,
+ .remove = bt1_pcie_remove,
+ .driver = {
+ .name = "bt1-pcie",
+ .of_match_table = bt1_pcie_of_match,
+ },
+};
+module_platform_driver(bt1_pcie_driver);
+
+MODULE_AUTHOR("Serge Semin <Sergey.Semin@baikalelectronics.ru>");
+MODULE_DESCRIPTION("Baikal-T1 PCIe driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pci/controller/dwc/pcie-designware-debugfs.c b/drivers/pci/controller/dwc/pcie-designware-debugfs.c
new file mode 100644
index 000000000000..0fbf86c0b97e
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-designware-debugfs.c
@@ -0,0 +1,927 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Synopsys DesignWare PCIe controller debugfs driver
+ *
+ * Copyright (C) 2025 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Shradha Todi <shradha.t@samsung.com>
+ */
+
+#include <linux/debugfs.h>
+
+#include "pcie-designware.h"
+
+#define SD_STATUS_L1LANE_REG 0xb0
+#define PIPE_RXVALID BIT(18)
+#define PIPE_DETECT_LANE BIT(17)
+#define LANE_SELECT GENMASK(3, 0)
+
+#define ERR_INJ0_OFF 0x34
+#define EINJ_VAL_DIFF GENMASK(28, 16)
+#define EINJ_VC_NUM GENMASK(14, 12)
+#define EINJ_TYPE_SHIFT 8
+#define EINJ0_TYPE GENMASK(11, 8)
+#define EINJ1_TYPE BIT(8)
+#define EINJ2_TYPE GENMASK(9, 8)
+#define EINJ3_TYPE GENMASK(10, 8)
+#define EINJ4_TYPE GENMASK(10, 8)
+#define EINJ5_TYPE BIT(8)
+#define EINJ_COUNT GENMASK(7, 0)
+
+#define ERR_INJ_ENABLE_REG 0x30
+
+#define RAS_DES_EVENT_COUNTER_DATA_REG 0xc
+
+#define RAS_DES_EVENT_COUNTER_CTRL_REG 0x8
+#define EVENT_COUNTER_GROUP_SELECT GENMASK(27, 24)
+#define EVENT_COUNTER_EVENT_SELECT GENMASK(23, 16)
+#define EVENT_COUNTER_LANE_SELECT GENMASK(11, 8)
+#define EVENT_COUNTER_STATUS BIT(7)
+#define EVENT_COUNTER_ENABLE GENMASK(4, 2)
+#define PER_EVENT_ON 0x3
+#define PER_EVENT_OFF 0x1
+
+#define DWC_DEBUGFS_BUF_MAX 128
+
+/**
+ * struct dwc_pcie_rasdes_info - Stores controller common information
+ * @ras_cap_offset: RAS DES vendor specific extended capability offset
+ * @reg_event_lock: Mutex used for RAS DES shadow event registers
+ *
+ * Any parameter constant to all files of the debugfs hierarchy for a single
+ * controller will be stored in this struct. It is allocated and assigned to
+ * controller specific struct dw_pcie during initialization.
+ */
+struct dwc_pcie_rasdes_info {
+ u32 ras_cap_offset;
+ struct mutex reg_event_lock;
+};
+
+/**
+ * struct dwc_pcie_rasdes_priv - Stores file specific private data information
+ * @pci: Reference to the dw_pcie structure
+ * @idx: Index of specific file related information in array of structs
+ *
+ * All debugfs files will have this struct as its private data.
+ */
+struct dwc_pcie_rasdes_priv {
+ struct dw_pcie *pci;
+ int idx;
+};
+
+/**
+ * struct dwc_pcie_err_inj - Store details about each error injection
+ * supported by DWC RAS DES
+ * @name: Name of the error that can be injected
+ * @err_inj_group: Group number to which the error belongs. The value
+ * can range from 0 to 5
+ * @err_inj_type: Each group can have multiple types of error
+ */
+struct dwc_pcie_err_inj {
+ const char *name;
+ u32 err_inj_group;
+ u32 err_inj_type;
+};
+
+static const struct dwc_pcie_err_inj err_inj_list[] = {
+ {"tx_lcrc", 0x0, 0x0},
+ {"b16_crc_dllp", 0x0, 0x1},
+ {"b16_crc_upd_fc", 0x0, 0x2},
+ {"tx_ecrc", 0x0, 0x3},
+ {"fcrc_tlp", 0x0, 0x4},
+ {"parity_tsos", 0x0, 0x5},
+ {"parity_skpos", 0x0, 0x6},
+ {"rx_lcrc", 0x0, 0x8},
+ {"rx_ecrc", 0x0, 0xb},
+ {"tlp_err_seq", 0x1, 0x0},
+ {"ack_nak_dllp_seq", 0x1, 0x1},
+ {"ack_nak_dllp", 0x2, 0x0},
+ {"upd_fc_dllp", 0x2, 0x1},
+ {"nak_dllp", 0x2, 0x2},
+ {"inv_sync_hdr_sym", 0x3, 0x0},
+ {"com_pad_ts1", 0x3, 0x1},
+ {"com_pad_ts2", 0x3, 0x2},
+ {"com_fts", 0x3, 0x3},
+ {"com_idl", 0x3, 0x4},
+ {"end_edb", 0x3, 0x5},
+ {"stp_sdp", 0x3, 0x6},
+ {"com_skp", 0x3, 0x7},
+ {"posted_tlp_hdr", 0x4, 0x0},
+ {"non_post_tlp_hdr", 0x4, 0x1},
+ {"cmpl_tlp_hdr", 0x4, 0x2},
+ {"posted_tlp_data", 0x4, 0x4},
+ {"non_post_tlp_data", 0x4, 0x5},
+ {"cmpl_tlp_data", 0x4, 0x6},
+ {"duplicate_tlp", 0x5, 0x0},
+ {"nullified_tlp", 0x5, 0x1},
+};
+
+static const u32 err_inj_type_mask[] = {
+ EINJ0_TYPE,
+ EINJ1_TYPE,
+ EINJ2_TYPE,
+ EINJ3_TYPE,
+ EINJ4_TYPE,
+ EINJ5_TYPE,
+};
+
+/**
+ * struct dwc_pcie_event_counter - Store details about each event counter
+ * supported in DWC RAS DES
+ * @name: Name of the error counter
+ * @group_no: Group number that the event belongs to. The value can range
+ * from 0 to 4
+ * @event_no: Event number of the particular event. The value ranges are:
+ * Group 0: 0 - 10
+ * Group 1: 5 - 13
+ * Group 2: 0 - 7
+ * Group 3: 0 - 5
+ * Group 4: 0 - 1
+ */
+struct dwc_pcie_event_counter {
+ const char *name;
+ u32 group_no;
+ u32 event_no;
+};
+
+static const struct dwc_pcie_event_counter event_list[] = {
+ {"ebuf_overflow", 0x0, 0x0},
+ {"ebuf_underrun", 0x0, 0x1},
+ {"decode_err", 0x0, 0x2},
+ {"running_disparity_err", 0x0, 0x3},
+ {"skp_os_parity_err", 0x0, 0x4},
+ {"sync_header_err", 0x0, 0x5},
+ {"rx_valid_deassertion", 0x0, 0x6},
+ {"ctl_skp_os_parity_err", 0x0, 0x7},
+ {"retimer_parity_err_1st", 0x0, 0x8},
+ {"retimer_parity_err_2nd", 0x0, 0x9},
+ {"margin_crc_parity_err", 0x0, 0xA},
+ {"detect_ei_infer", 0x1, 0x5},
+ {"receiver_err", 0x1, 0x6},
+ {"rx_recovery_req", 0x1, 0x7},
+ {"n_fts_timeout", 0x1, 0x8},
+ {"framing_err", 0x1, 0x9},
+ {"deskew_err", 0x1, 0xa},
+ {"framing_err_in_l0", 0x1, 0xc},
+ {"deskew_uncompleted_err", 0x1, 0xd},
+ {"bad_tlp", 0x2, 0x0},
+ {"lcrc_err", 0x2, 0x1},
+ {"bad_dllp", 0x2, 0x2},
+ {"replay_num_rollover", 0x2, 0x3},
+ {"replay_timeout", 0x2, 0x4},
+ {"rx_nak_dllp", 0x2, 0x5},
+ {"tx_nak_dllp", 0x2, 0x6},
+ {"retry_tlp", 0x2, 0x7},
+ {"fc_timeout", 0x3, 0x0},
+ {"poisoned_tlp", 0x3, 0x1},
+ {"ecrc_error", 0x3, 0x2},
+ {"unsupported_request", 0x3, 0x3},
+ {"completer_abort", 0x3, 0x4},
+ {"completion_timeout", 0x3, 0x5},
+ {"ebuf_skp_add", 0x4, 0x0},
+ {"ebuf_skp_del", 0x4, 0x1},
+};
+
+static ssize_t lane_detect_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dw_pcie *pci = file->private_data;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ char debugfs_buf[DWC_DEBUGFS_BUF_MAX];
+ ssize_t pos;
+ u32 val;
+
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + SD_STATUS_L1LANE_REG);
+ val = FIELD_GET(PIPE_DETECT_LANE, val);
+ if (val)
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Lane Detected\n");
+ else
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Lane Undetected\n");
+
+ return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos);
+}
+
+static ssize_t lane_detect_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dw_pcie *pci = file->private_data;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ u32 lane, val;
+
+ val = kstrtou32_from_user(buf, count, 0, &lane);
+ if (val)
+ return val;
+
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + SD_STATUS_L1LANE_REG);
+ val &= ~(LANE_SELECT);
+ val |= FIELD_PREP(LANE_SELECT, lane);
+ dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + SD_STATUS_L1LANE_REG, val);
+
+ return count;
+}
+
+static ssize_t rx_valid_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dw_pcie *pci = file->private_data;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ char debugfs_buf[DWC_DEBUGFS_BUF_MAX];
+ ssize_t pos;
+ u32 val;
+
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + SD_STATUS_L1LANE_REG);
+ val = FIELD_GET(PIPE_RXVALID, val);
+ if (val)
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "RX Valid\n");
+ else
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "RX Invalid\n");
+
+ return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos);
+}
+
+static ssize_t rx_valid_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return lane_detect_write(file, buf, count, ppos);
+}
+
+static ssize_t err_inj_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dwc_pcie_rasdes_priv *pdata = file->private_data;
+ struct dw_pcie *pci = pdata->pci;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ u32 val, counter, vc_num, err_group, type_mask;
+ int val_diff = 0;
+ char *kern_buf;
+
+ err_group = err_inj_list[pdata->idx].err_inj_group;
+ type_mask = err_inj_type_mask[err_group];
+
+ kern_buf = memdup_user_nul(buf, count);
+ if (IS_ERR(kern_buf))
+ return PTR_ERR(kern_buf);
+
+ if (err_group == 4) {
+ val = sscanf(kern_buf, "%u %d %u", &counter, &val_diff, &vc_num);
+ if ((val != 3) || (val_diff < -4095 || val_diff > 4095)) {
+ kfree(kern_buf);
+ return -EINVAL;
+ }
+ } else if (err_group == 1) {
+ val = sscanf(kern_buf, "%u %d", &counter, &val_diff);
+ if ((val != 2) || (val_diff < -4095 || val_diff > 4095)) {
+ kfree(kern_buf);
+ return -EINVAL;
+ }
+ } else {
+ val = kstrtou32(kern_buf, 0, &counter);
+ if (val) {
+ kfree(kern_buf);
+ return val;
+ }
+ }
+
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + ERR_INJ0_OFF + (0x4 * err_group));
+ val &= ~(type_mask | EINJ_COUNT);
+ val |= ((err_inj_list[pdata->idx].err_inj_type << EINJ_TYPE_SHIFT) & type_mask);
+ val |= FIELD_PREP(EINJ_COUNT, counter);
+
+ if (err_group == 1 || err_group == 4) {
+ val &= ~(EINJ_VAL_DIFF);
+ val |= FIELD_PREP(EINJ_VAL_DIFF, val_diff);
+ }
+ if (err_group == 4) {
+ val &= ~(EINJ_VC_NUM);
+ val |= FIELD_PREP(EINJ_VC_NUM, vc_num);
+ }
+
+ dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + ERR_INJ0_OFF + (0x4 * err_group), val);
+ dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + ERR_INJ_ENABLE_REG, (0x1 << err_group));
+
+ kfree(kern_buf);
+ return count;
+}
+
+static void set_event_number(struct dwc_pcie_rasdes_priv *pdata,
+ struct dw_pcie *pci, struct dwc_pcie_rasdes_info *rinfo)
+{
+ u32 val;
+
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG);
+ val &= ~EVENT_COUNTER_ENABLE;
+ val &= ~(EVENT_COUNTER_GROUP_SELECT | EVENT_COUNTER_EVENT_SELECT);
+ val |= FIELD_PREP(EVENT_COUNTER_GROUP_SELECT, event_list[pdata->idx].group_no);
+ val |= FIELD_PREP(EVENT_COUNTER_EVENT_SELECT, event_list[pdata->idx].event_no);
+ dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG, val);
+}
+
+static ssize_t counter_enable_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dwc_pcie_rasdes_priv *pdata = file->private_data;
+ struct dw_pcie *pci = pdata->pci;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ char debugfs_buf[DWC_DEBUGFS_BUF_MAX];
+ ssize_t pos;
+ u32 val;
+
+ mutex_lock(&rinfo->reg_event_lock);
+ set_event_number(pdata, pci, rinfo);
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG);
+ mutex_unlock(&rinfo->reg_event_lock);
+ val = FIELD_GET(EVENT_COUNTER_STATUS, val);
+ if (val)
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Counter Enabled\n");
+ else
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Counter Disabled\n");
+
+ return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos);
+}
+
+static ssize_t counter_enable_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dwc_pcie_rasdes_priv *pdata = file->private_data;
+ struct dw_pcie *pci = pdata->pci;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ u32 val, enable;
+
+ val = kstrtou32_from_user(buf, count, 0, &enable);
+ if (val)
+ return val;
+
+ mutex_lock(&rinfo->reg_event_lock);
+ set_event_number(pdata, pci, rinfo);
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG);
+ if (enable)
+ val |= FIELD_PREP(EVENT_COUNTER_ENABLE, PER_EVENT_ON);
+ else
+ val |= FIELD_PREP(EVENT_COUNTER_ENABLE, PER_EVENT_OFF);
+
+ dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG, val);
+
+ /*
+ * While enabling the counter, always read the status back to check if
+ * it is enabled or not. Return error if it is not enabled to let the
+ * users know that the counter is not supported on the platform.
+ */
+ if (enable) {
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset +
+ RAS_DES_EVENT_COUNTER_CTRL_REG);
+ if (!FIELD_GET(EVENT_COUNTER_STATUS, val)) {
+ mutex_unlock(&rinfo->reg_event_lock);
+ return -EOPNOTSUPP;
+ }
+ }
+
+ mutex_unlock(&rinfo->reg_event_lock);
+
+ return count;
+}
+
+static ssize_t counter_lane_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dwc_pcie_rasdes_priv *pdata = file->private_data;
+ struct dw_pcie *pci = pdata->pci;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ char debugfs_buf[DWC_DEBUGFS_BUF_MAX];
+ ssize_t pos;
+ u32 val;
+
+ mutex_lock(&rinfo->reg_event_lock);
+ set_event_number(pdata, pci, rinfo);
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG);
+ mutex_unlock(&rinfo->reg_event_lock);
+ val = FIELD_GET(EVENT_COUNTER_LANE_SELECT, val);
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Lane: %d\n", val);
+
+ return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos);
+}
+
+static ssize_t counter_lane_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dwc_pcie_rasdes_priv *pdata = file->private_data;
+ struct dw_pcie *pci = pdata->pci;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ u32 val, lane;
+
+ val = kstrtou32_from_user(buf, count, 0, &lane);
+ if (val)
+ return val;
+
+ mutex_lock(&rinfo->reg_event_lock);
+ set_event_number(pdata, pci, rinfo);
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG);
+ val &= ~(EVENT_COUNTER_LANE_SELECT);
+ val |= FIELD_PREP(EVENT_COUNTER_LANE_SELECT, lane);
+ dw_pcie_writel_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_CTRL_REG, val);
+ mutex_unlock(&rinfo->reg_event_lock);
+
+ return count;
+}
+
+static ssize_t counter_value_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct dwc_pcie_rasdes_priv *pdata = file->private_data;
+ struct dw_pcie *pci = pdata->pci;
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+ char debugfs_buf[DWC_DEBUGFS_BUF_MAX];
+ ssize_t pos;
+ u32 val;
+
+ mutex_lock(&rinfo->reg_event_lock);
+ set_event_number(pdata, pci, rinfo);
+ val = dw_pcie_readl_dbi(pci, rinfo->ras_cap_offset + RAS_DES_EVENT_COUNTER_DATA_REG);
+ mutex_unlock(&rinfo->reg_event_lock);
+ pos = scnprintf(debugfs_buf, DWC_DEBUGFS_BUF_MAX, "Counter value: %d\n", val);
+
+ return simple_read_from_buffer(buf, count, ppos, debugfs_buf, pos);
+}
+
+static const char *ltssm_status_string(enum dw_pcie_ltssm ltssm)
+{
+ const char *str;
+
+ switch (ltssm) {
+#define DW_PCIE_LTSSM_NAME(n) case n: str = #n; break
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DETECT_QUIET);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DETECT_ACT);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_POLL_ACTIVE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_POLL_COMPLIANCE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_POLL_CONFIG);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_PRE_DETECT_QUIET);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DETECT_WAIT);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_LINKWD_START);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_LINKWD_ACEPT);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_LANENUM_WAI);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_LANENUM_ACEPT);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_COMPLETE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_IDLE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_LOCK);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_SPEED);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_RCVRCFG);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_IDLE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L0);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L0S);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L123_SEND_EIDLE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L1_IDLE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L2_IDLE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L2_WAKE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DISABLED_ENTRY);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DISABLED_IDLE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DISABLED);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_LPBK_ENTRY);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_LPBK_ACTIVE);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_LPBK_EXIT);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_LPBK_EXIT_TIMEOUT);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_HOT_RESET_ENTRY);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_HOT_RESET);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_EQ0);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_EQ1);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_EQ2);
+ DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_EQ3);
+ default:
+ str = "DW_PCIE_LTSSM_UNKNOWN";
+ break;
+ }
+
+ return str + strlen("DW_PCIE_LTSSM_");
+}
+
+static int ltssm_status_show(struct seq_file *s, void *v)
+{
+ struct dw_pcie *pci = s->private;
+ enum dw_pcie_ltssm val;
+
+ val = dw_pcie_get_ltssm(pci);
+ seq_printf(s, "%s (0x%02x)\n", ltssm_status_string(val), val);
+
+ return 0;
+}
+
+static int ltssm_status_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ltssm_status_show, inode->i_private);
+}
+
+#define dwc_debugfs_create(name) \
+debugfs_create_file(#name, 0644, rasdes_debug, pci, \
+ &dbg_ ## name ## _fops)
+
+#define DWC_DEBUGFS_FOPS(name) \
+static const struct file_operations dbg_ ## name ## _fops = { \
+ .open = simple_open, \
+ .read = name ## _read, \
+ .write = name ## _write \
+}
+
+DWC_DEBUGFS_FOPS(lane_detect);
+DWC_DEBUGFS_FOPS(rx_valid);
+
+static const struct file_operations dwc_pcie_err_inj_ops = {
+ .open = simple_open,
+ .write = err_inj_write,
+};
+
+static const struct file_operations dwc_pcie_counter_enable_ops = {
+ .open = simple_open,
+ .read = counter_enable_read,
+ .write = counter_enable_write,
+};
+
+static const struct file_operations dwc_pcie_counter_lane_ops = {
+ .open = simple_open,
+ .read = counter_lane_read,
+ .write = counter_lane_write,
+};
+
+static const struct file_operations dwc_pcie_counter_value_ops = {
+ .open = simple_open,
+ .read = counter_value_read,
+};
+
+static const struct file_operations dwc_pcie_ltssm_status_ops = {
+ .open = ltssm_status_open,
+ .read = seq_read,
+};
+
+static void dwc_pcie_rasdes_debugfs_deinit(struct dw_pcie *pci)
+{
+ struct dwc_pcie_rasdes_info *rinfo = pci->debugfs->rasdes_info;
+
+ mutex_destroy(&rinfo->reg_event_lock);
+}
+
+static int dwc_pcie_rasdes_debugfs_init(struct dw_pcie *pci, struct dentry *dir)
+{
+ struct dentry *rasdes_debug, *rasdes_err_inj;
+ struct dentry *rasdes_event_counter, *rasdes_events;
+ struct dwc_pcie_rasdes_info *rasdes_info;
+ struct dwc_pcie_rasdes_priv *priv_tmp;
+ struct device *dev = pci->dev;
+ int ras_cap, i, ret;
+
+ /*
+ * If a given SoC has no RAS DES capability, the following call is
+ * bound to return an error, breaking some existing platforms. So,
+ * return 0 here, as this is not necessarily an error.
+ */
+ ras_cap = dw_pcie_find_rasdes_capability(pci);
+ if (!ras_cap) {
+ dev_dbg(dev, "no RAS DES capability available\n");
+ return 0;
+ }
+
+ rasdes_info = devm_kzalloc(dev, sizeof(*rasdes_info), GFP_KERNEL);
+ if (!rasdes_info)
+ return -ENOMEM;
+
+ /* Create subdirectories for Debug, Error Injection, Statistics. */
+ rasdes_debug = debugfs_create_dir("rasdes_debug", dir);
+ rasdes_err_inj = debugfs_create_dir("rasdes_err_inj", dir);
+ rasdes_event_counter = debugfs_create_dir("rasdes_event_counter", dir);
+
+ mutex_init(&rasdes_info->reg_event_lock);
+ rasdes_info->ras_cap_offset = ras_cap;
+ pci->debugfs->rasdes_info = rasdes_info;
+
+ /* Create debugfs files for Debug subdirectory. */
+ dwc_debugfs_create(lane_detect);
+ dwc_debugfs_create(rx_valid);
+
+ /* Create debugfs files for Error Injection subdirectory. */
+ for (i = 0; i < ARRAY_SIZE(err_inj_list); i++) {
+ priv_tmp = devm_kzalloc(dev, sizeof(*priv_tmp), GFP_KERNEL);
+ if (!priv_tmp) {
+ ret = -ENOMEM;
+ goto err_deinit;
+ }
+
+ priv_tmp->idx = i;
+ priv_tmp->pci = pci;
+ debugfs_create_file(err_inj_list[i].name, 0200, rasdes_err_inj, priv_tmp,
+ &dwc_pcie_err_inj_ops);
+ }
+
+ /* Create debugfs files for Statistical Counter subdirectory. */
+ for (i = 0; i < ARRAY_SIZE(event_list); i++) {
+ priv_tmp = devm_kzalloc(dev, sizeof(*priv_tmp), GFP_KERNEL);
+ if (!priv_tmp) {
+ ret = -ENOMEM;
+ goto err_deinit;
+ }
+
+ priv_tmp->idx = i;
+ priv_tmp->pci = pci;
+ rasdes_events = debugfs_create_dir(event_list[i].name, rasdes_event_counter);
+ if (event_list[i].group_no == 0 || event_list[i].group_no == 4) {
+ debugfs_create_file("lane_select", 0644, rasdes_events,
+ priv_tmp, &dwc_pcie_counter_lane_ops);
+ }
+ debugfs_create_file("counter_value", 0444, rasdes_events, priv_tmp,
+ &dwc_pcie_counter_value_ops);
+ debugfs_create_file("counter_enable", 0644, rasdes_events, priv_tmp,
+ &dwc_pcie_counter_enable_ops);
+ }
+
+ return 0;
+
+err_deinit:
+ dwc_pcie_rasdes_debugfs_deinit(pci);
+ return ret;
+}
+
+static void dwc_pcie_ltssm_debugfs_init(struct dw_pcie *pci, struct dentry *dir)
+{
+ debugfs_create_file("ltssm_status", 0444, dir, pci,
+ &dwc_pcie_ltssm_status_ops);
+}
+
+static int dw_pcie_ptm_check_capability(void *drvdata)
+{
+ struct dw_pcie *pci = drvdata;
+
+ pci->ptm_vsec_offset = dw_pcie_find_ptm_capability(pci);
+
+ return pci->ptm_vsec_offset;
+}
+
+static int dw_pcie_ptm_context_update_write(void *drvdata, u8 mode)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 val;
+
+ if (mode == PCIE_PTM_CONTEXT_UPDATE_AUTO) {
+ val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL);
+ val |= PTM_REQ_AUTO_UPDATE_ENABLED;
+ dw_pcie_writel_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL, val);
+ } else if (mode == PCIE_PTM_CONTEXT_UPDATE_MANUAL) {
+ val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL);
+ val &= ~PTM_REQ_AUTO_UPDATE_ENABLED;
+ val |= PTM_REQ_START_UPDATE;
+ dw_pcie_writel_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL, val);
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dw_pcie_ptm_context_update_read(void *drvdata, u8 *mode)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 val;
+
+ val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL);
+ if (FIELD_GET(PTM_REQ_AUTO_UPDATE_ENABLED, val))
+ *mode = PCIE_PTM_CONTEXT_UPDATE_AUTO;
+ else
+ /*
+ * PTM_REQ_START_UPDATE is a self clearing register bit. So if
+ * PTM_REQ_AUTO_UPDATE_ENABLED is not set, then it implies that
+ * manual update is used.
+ */
+ *mode = PCIE_PTM_CONTEXT_UPDATE_MANUAL;
+
+ return 0;
+}
+
+static int dw_pcie_ptm_context_valid_write(void *drvdata, bool valid)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 val;
+
+ if (valid) {
+ val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL);
+ val |= PTM_RES_CCONTEXT_VALID;
+ dw_pcie_writel_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL, val);
+ } else {
+ val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL);
+ val &= ~PTM_RES_CCONTEXT_VALID;
+ dw_pcie_writel_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL, val);
+ }
+
+ return 0;
+}
+
+static int dw_pcie_ptm_context_valid_read(void *drvdata, bool *valid)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 val;
+
+ val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL);
+ *valid = !!FIELD_GET(PTM_RES_CCONTEXT_VALID, val);
+
+ return 0;
+}
+
+static int dw_pcie_ptm_local_clock_read(void *drvdata, u64 *clock)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 msb, lsb;
+
+ do {
+ msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_LOCAL_MSB);
+ lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_LOCAL_LSB);
+ } while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_LOCAL_MSB));
+
+ *clock = ((u64) msb) << 32 | lsb;
+
+ return 0;
+}
+
+static int dw_pcie_ptm_master_clock_read(void *drvdata, u64 *clock)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 msb, lsb;
+
+ do {
+ msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_MASTER_MSB);
+ lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_MASTER_LSB);
+ } while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_MASTER_MSB));
+
+ *clock = ((u64) msb) << 32 | lsb;
+
+ return 0;
+}
+
+static int dw_pcie_ptm_t1_read(void *drvdata, u64 *clock)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 msb, lsb;
+
+ do {
+ msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_MSB);
+ lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_LSB);
+ } while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_MSB));
+
+ *clock = ((u64) msb) << 32 | lsb;
+
+ return 0;
+}
+
+static int dw_pcie_ptm_t2_read(void *drvdata, u64 *clock)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 msb, lsb;
+
+ do {
+ msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_MSB);
+ lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_LSB);
+ } while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_MSB));
+
+ *clock = ((u64) msb) << 32 | lsb;
+
+ return 0;
+}
+
+static int dw_pcie_ptm_t3_read(void *drvdata, u64 *clock)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 msb, lsb;
+
+ do {
+ msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_MSB);
+ lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_LSB);
+ } while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_MSB));
+
+ *clock = ((u64) msb) << 32 | lsb;
+
+ return 0;
+}
+
+static int dw_pcie_ptm_t4_read(void *drvdata, u64 *clock)
+{
+ struct dw_pcie *pci = drvdata;
+ u32 msb, lsb;
+
+ do {
+ msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_MSB);
+ lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_LSB);
+ } while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_MSB));
+
+ *clock = ((u64) msb) << 32 | lsb;
+
+ return 0;
+}
+
+static bool dw_pcie_ptm_context_update_visible(void *drvdata)
+{
+ struct dw_pcie *pci = drvdata;
+
+ return pci->mode == DW_PCIE_EP_TYPE;
+}
+
+static bool dw_pcie_ptm_context_valid_visible(void *drvdata)
+{
+ struct dw_pcie *pci = drvdata;
+
+ return pci->mode == DW_PCIE_RC_TYPE;
+}
+
+static bool dw_pcie_ptm_local_clock_visible(void *drvdata)
+{
+ /* PTM local clock is always visible */
+ return true;
+}
+
+static bool dw_pcie_ptm_master_clock_visible(void *drvdata)
+{
+ struct dw_pcie *pci = drvdata;
+
+ return pci->mode == DW_PCIE_EP_TYPE;
+}
+
+static bool dw_pcie_ptm_t1_visible(void *drvdata)
+{
+ struct dw_pcie *pci = drvdata;
+
+ return pci->mode == DW_PCIE_EP_TYPE;
+}
+
+static bool dw_pcie_ptm_t2_visible(void *drvdata)
+{
+ struct dw_pcie *pci = drvdata;
+
+ return pci->mode == DW_PCIE_RC_TYPE;
+}
+
+static bool dw_pcie_ptm_t3_visible(void *drvdata)
+{
+ struct dw_pcie *pci = drvdata;
+
+ return pci->mode == DW_PCIE_RC_TYPE;
+}
+
+static bool dw_pcie_ptm_t4_visible(void *drvdata)
+{
+ struct dw_pcie *pci = drvdata;
+
+ return pci->mode == DW_PCIE_EP_TYPE;
+}
+
+static const struct pcie_ptm_ops dw_pcie_ptm_ops = {
+ .check_capability = dw_pcie_ptm_check_capability,
+ .context_update_write = dw_pcie_ptm_context_update_write,
+ .context_update_read = dw_pcie_ptm_context_update_read,
+ .context_valid_write = dw_pcie_ptm_context_valid_write,
+ .context_valid_read = dw_pcie_ptm_context_valid_read,
+ .local_clock_read = dw_pcie_ptm_local_clock_read,
+ .master_clock_read = dw_pcie_ptm_master_clock_read,
+ .t1_read = dw_pcie_ptm_t1_read,
+ .t2_read = dw_pcie_ptm_t2_read,
+ .t3_read = dw_pcie_ptm_t3_read,
+ .t4_read = dw_pcie_ptm_t4_read,
+ .context_update_visible = dw_pcie_ptm_context_update_visible,
+ .context_valid_visible = dw_pcie_ptm_context_valid_visible,
+ .local_clock_visible = dw_pcie_ptm_local_clock_visible,
+ .master_clock_visible = dw_pcie_ptm_master_clock_visible,
+ .t1_visible = dw_pcie_ptm_t1_visible,
+ .t2_visible = dw_pcie_ptm_t2_visible,
+ .t3_visible = dw_pcie_ptm_t3_visible,
+ .t4_visible = dw_pcie_ptm_t4_visible,
+};
+
+void dwc_pcie_debugfs_deinit(struct dw_pcie *pci)
+{
+ if (!pci->debugfs)
+ return;
+
+ pcie_ptm_destroy_debugfs(pci->ptm_debugfs);
+ dwc_pcie_rasdes_debugfs_deinit(pci);
+ debugfs_remove_recursive(pci->debugfs->debug_dir);
+}
+
+void dwc_pcie_debugfs_init(struct dw_pcie *pci, enum dw_pcie_device_mode mode)
+{
+ char dirname[DWC_DEBUGFS_BUF_MAX];
+ struct device *dev = pci->dev;
+ struct debugfs_info *debugfs;
+ struct dentry *dir;
+ int err;
+
+ /* Create main directory for each platform driver. */
+ snprintf(dirname, DWC_DEBUGFS_BUF_MAX, "dwc_pcie_%s", dev_name(dev));
+ dir = debugfs_create_dir(dirname, NULL);
+ debugfs = devm_kzalloc(dev, sizeof(*debugfs), GFP_KERNEL);
+ if (!debugfs)
+ return;
+
+ debugfs->debug_dir = dir;
+ pci->debugfs = debugfs;
+ err = dwc_pcie_rasdes_debugfs_init(pci, dir);
+ if (err)
+ dev_err(dev, "failed to initialize RAS DES debugfs, err=%d\n",
+ err);
+
+ dwc_pcie_ltssm_debugfs_init(pci, dir);
+
+ pci->mode = mode;
+ pci->ptm_debugfs = pcie_ptm_create_debugfs(pci->dev, pci,
+ &dw_pcie_ptm_ops);
+}
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index a543c45c7224..19571ac2b961 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -1,207 +1,430 @@
// SPDX-License-Identifier: GPL-2.0
-/**
+/*
* Synopsys DesignWare PCIe Endpoint controller driver
*
* Copyright (C) 2017 Texas Instruments
* Author: Kishon Vijay Abraham I <kishon@ti.com>
*/
+#include <linux/align.h>
+#include <linux/bitfield.h>
#include <linux/of.h>
+#include <linux/platform_device.h>
#include "pcie-designware.h"
#include <linux/pci-epc.h>
#include <linux/pci-epf.h>
-void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
+/**
+ * dw_pcie_ep_get_func_from_ep - Get the struct dw_pcie_ep_func corresponding to
+ * the endpoint function
+ * @ep: DWC EP device
+ * @func_no: Function number of the endpoint device
+ *
+ * Return: struct dw_pcie_ep_func if success, NULL otherwise.
+ */
+struct dw_pcie_ep_func *
+dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no)
{
- struct pci_epc *epc = ep->epc;
+ struct dw_pcie_ep_func *ep_func;
- pci_epc_linkup(epc);
+ list_for_each_entry(ep_func, &ep->func_list, list) {
+ if (ep_func->func_no == func_no)
+ return ep_func;
+ }
+
+ return NULL;
}
-static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar,
- int flags)
+static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, u8 func_no,
+ enum pci_barno bar, int flags)
{
+ struct dw_pcie_ep *ep = &pci->ep;
u32 reg;
reg = PCI_BASE_ADDRESS_0 + (4 * bar);
dw_pcie_dbi_ro_wr_en(pci);
- dw_pcie_writel_dbi2(pci, reg, 0x0);
- dw_pcie_writel_dbi(pci, reg, 0x0);
+ dw_pcie_ep_writel_dbi2(ep, func_no, reg, 0x0);
+ dw_pcie_ep_writel_dbi(ep, func_no, reg, 0x0);
if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
- dw_pcie_writel_dbi2(pci, reg + 4, 0x0);
- dw_pcie_writel_dbi(pci, reg + 4, 0x0);
+ dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, 0x0);
+ dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0x0);
}
dw_pcie_dbi_ro_wr_dis(pci);
}
+/**
+ * dw_pcie_ep_reset_bar - Reset endpoint BAR
+ * @pci: DWC PCI device
+ * @bar: BAR number of the endpoint
+ */
void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
{
- __dw_pcie_ep_reset_bar(pci, bar, 0);
+ u8 func_no, funcs;
+
+ funcs = pci->ep.epc->max_functions;
+
+ for (func_no = 0; func_no < funcs; func_no++)
+ __dw_pcie_ep_reset_bar(pci, func_no, bar, 0);
}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_reset_bar);
-static u8 __dw_pcie_ep_find_next_cap(struct dw_pcie *pci, u8 cap_ptr,
- u8 cap)
+static u8 dw_pcie_ep_find_capability(struct dw_pcie_ep *ep, u8 func_no, u8 cap)
{
- u8 cap_id, next_cap_ptr;
- u16 reg;
+ return PCI_FIND_NEXT_CAP(dw_pcie_ep_read_cfg, PCI_CAPABILITY_LIST,
+ cap, ep, func_no);
+}
- reg = dw_pcie_readw_dbi(pci, cap_ptr);
- next_cap_ptr = (reg & 0xff00) >> 8;
- cap_id = (reg & 0x00ff);
+/**
+ * dw_pcie_ep_hide_ext_capability - Hide a capability from the linked list
+ * @pci: DWC PCI device
+ * @prev_cap: Capability preceding the capability that should be hidden
+ * @cap: Capability that should be hidden
+ *
+ * Return: 0 if success, errno otherwise.
+ */
+int dw_pcie_ep_hide_ext_capability(struct dw_pcie *pci, u8 prev_cap, u8 cap)
+{
+ u16 prev_cap_offset, cap_offset;
+ u32 prev_cap_header, cap_header;
- if (!next_cap_ptr || cap_id > PCI_CAP_ID_MAX)
- return 0;
+ prev_cap_offset = dw_pcie_find_ext_capability(pci, prev_cap);
+ if (!prev_cap_offset)
+ return -EINVAL;
- if (cap_id == cap)
- return cap_ptr;
+ prev_cap_header = dw_pcie_readl_dbi(pci, prev_cap_offset);
+ cap_offset = PCI_EXT_CAP_NEXT(prev_cap_header);
+ cap_header = dw_pcie_readl_dbi(pci, cap_offset);
- return __dw_pcie_ep_find_next_cap(pci, next_cap_ptr, cap);
-}
+ /* cap must immediately follow prev_cap. */
+ if (PCI_EXT_CAP_ID(cap_header) != cap)
+ return -EINVAL;
-static u8 dw_pcie_ep_find_capability(struct dw_pcie *pci, u8 cap)
-{
- u8 next_cap_ptr;
- u16 reg;
+ /* Clear next ptr. */
+ prev_cap_header &= ~GENMASK(31, 20);
- reg = dw_pcie_readw_dbi(pci, PCI_CAPABILITY_LIST);
- next_cap_ptr = (reg & 0x00ff);
+ /* Set next ptr to next ptr of cap. */
+ prev_cap_header |= cap_header & GENMASK(31, 20);
- if (!next_cap_ptr)
- return 0;
+ dw_pcie_dbi_ro_wr_en(pci);
+ dw_pcie_writel_dbi(pci, prev_cap_offset, prev_cap_header);
+ dw_pcie_dbi_ro_wr_dis(pci);
- return __dw_pcie_ep_find_next_cap(pci, next_cap_ptr, cap);
+ return 0;
}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_hide_ext_capability);
-static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no,
+static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_header *hdr)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
dw_pcie_dbi_ro_wr_en(pci);
- dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, hdr->vendorid);
- dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, hdr->deviceid);
- dw_pcie_writeb_dbi(pci, PCI_REVISION_ID, hdr->revid);
- dw_pcie_writeb_dbi(pci, PCI_CLASS_PROG, hdr->progif_code);
- dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE,
- hdr->subclass_code | hdr->baseclass_code << 8);
- dw_pcie_writeb_dbi(pci, PCI_CACHE_LINE_SIZE,
- hdr->cache_line_size);
- dw_pcie_writew_dbi(pci, PCI_SUBSYSTEM_VENDOR_ID,
- hdr->subsys_vendor_id);
- dw_pcie_writew_dbi(pci, PCI_SUBSYSTEM_ID, hdr->subsys_id);
- dw_pcie_writeb_dbi(pci, PCI_INTERRUPT_PIN,
- hdr->interrupt_pin);
+ dw_pcie_ep_writew_dbi(ep, func_no, PCI_VENDOR_ID, hdr->vendorid);
+ dw_pcie_ep_writew_dbi(ep, func_no, PCI_DEVICE_ID, hdr->deviceid);
+ dw_pcie_ep_writeb_dbi(ep, func_no, PCI_REVISION_ID, hdr->revid);
+ dw_pcie_ep_writeb_dbi(ep, func_no, PCI_CLASS_PROG, hdr->progif_code);
+ dw_pcie_ep_writew_dbi(ep, func_no, PCI_CLASS_DEVICE,
+ hdr->subclass_code | hdr->baseclass_code << 8);
+ dw_pcie_ep_writeb_dbi(ep, func_no, PCI_CACHE_LINE_SIZE,
+ hdr->cache_line_size);
+ dw_pcie_ep_writew_dbi(ep, func_no, PCI_SUBSYSTEM_VENDOR_ID,
+ hdr->subsys_vendor_id);
+ dw_pcie_ep_writew_dbi(ep, func_no, PCI_SUBSYSTEM_ID, hdr->subsys_id);
+ dw_pcie_ep_writeb_dbi(ep, func_no, PCI_INTERRUPT_PIN,
+ hdr->interrupt_pin);
dw_pcie_dbi_ro_wr_dis(pci);
return 0;
}
-static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, enum pci_barno bar,
- dma_addr_t cpu_addr,
- enum dw_pcie_as_type as_type)
+static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, int type,
+ dma_addr_t parent_bus_addr, enum pci_barno bar,
+ size_t size)
{
int ret;
u32 free_win;
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- free_win = find_first_zero_bit(ep->ib_window_map, ep->num_ib_windows);
- if (free_win >= ep->num_ib_windows) {
+ if (!ep->bar_to_atu[bar])
+ free_win = find_first_zero_bit(ep->ib_window_map, pci->num_ib_windows);
+ else
+ free_win = ep->bar_to_atu[bar] - 1;
+
+ if (free_win >= pci->num_ib_windows) {
dev_err(pci->dev, "No free inbound window\n");
return -EINVAL;
}
- ret = dw_pcie_prog_inbound_atu(pci, free_win, bar, cpu_addr,
- as_type);
+ ret = dw_pcie_prog_ep_inbound_atu(pci, func_no, free_win, type,
+ parent_bus_addr, bar, size);
if (ret < 0) {
dev_err(pci->dev, "Failed to program IB window\n");
return ret;
}
- ep->bar_to_atu[bar] = free_win;
+ /*
+ * Always increment free_win before assignment, since value 0 is used to identify
+ * unallocated mapping.
+ */
+ ep->bar_to_atu[bar] = free_win + 1;
set_bit(free_win, ep->ib_window_map);
return 0;
}
-static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, phys_addr_t phys_addr,
- u64 pci_addr, size_t size)
+static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep,
+ struct dw_pcie_ob_atu_cfg *atu)
{
- u32 free_win;
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ u32 free_win;
+ int ret;
- free_win = find_first_zero_bit(ep->ob_window_map, ep->num_ob_windows);
- if (free_win >= ep->num_ob_windows) {
+ free_win = find_first_zero_bit(ep->ob_window_map, pci->num_ob_windows);
+ if (free_win >= pci->num_ob_windows) {
dev_err(pci->dev, "No free outbound window\n");
return -EINVAL;
}
- dw_pcie_prog_outbound_atu(pci, free_win, PCIE_ATU_TYPE_MEM,
- phys_addr, pci_addr, size);
+ atu->index = free_win;
+ ret = dw_pcie_prog_outbound_atu(pci, atu);
+ if (ret)
+ return ret;
set_bit(free_win, ep->ob_window_map);
- ep->outbound_addr[free_win] = phys_addr;
+ ep->outbound_addr[free_win] = atu->parent_bus_addr;
return 0;
}
-static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no,
+static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
struct pci_epf_bar *epf_bar)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
enum pci_barno bar = epf_bar->barno;
- u32 atu_index = ep->bar_to_atu[bar];
+ u32 atu_index = ep->bar_to_atu[bar] - 1;
+
+ if (!ep->bar_to_atu[bar])
+ return;
- __dw_pcie_ep_reset_bar(pci, bar, epf_bar->flags);
+ __dw_pcie_ep_reset_bar(pci, func_no, bar, epf_bar->flags);
- dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_INBOUND);
+ dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_IB, atu_index);
clear_bit(atu_index, ep->ib_window_map);
+ ep->epf_bar[bar] = NULL;
+ ep->bar_to_atu[bar] = 0;
}
-static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
- struct pci_epf_bar *epf_bar)
+static unsigned int dw_pcie_ep_get_rebar_offset(struct dw_pcie *pci,
+ enum pci_barno bar)
+{
+ u32 reg, bar_index;
+ unsigned int offset, nbars;
+ int i;
+
+ offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
+ if (!offset)
+ return offset;
+
+ reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
+ nbars = FIELD_GET(PCI_REBAR_CTRL_NBAR_MASK, reg);
+
+ for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) {
+ reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
+ bar_index = FIELD_GET(PCI_REBAR_CTRL_BAR_IDX, reg);
+ if (bar_index == bar)
+ return offset;
+ }
+
+ return 0;
+}
+
+static int dw_pcie_ep_set_bar_resizable(struct dw_pcie_ep *ep, u8 func_no,
+ struct pci_epf_bar *epf_bar)
{
- int ret;
- struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
enum pci_barno bar = epf_bar->barno;
size_t size = epf_bar->size;
int flags = epf_bar->flags;
- enum dw_pcie_as_type as_type;
u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar);
+ unsigned int rebar_offset;
+ u32 rebar_cap, rebar_ctrl;
+ int ret;
- if (!(flags & PCI_BASE_ADDRESS_SPACE))
- as_type = DW_PCIE_AS_MEM;
- else
- as_type = DW_PCIE_AS_IO;
+ rebar_offset = dw_pcie_ep_get_rebar_offset(pci, bar);
+ if (!rebar_offset)
+ return -EINVAL;
- ret = dw_pcie_ep_inbound_atu(ep, bar, epf_bar->phys_addr, as_type);
+ ret = pci_epc_bar_size_to_rebar_cap(size, &rebar_cap);
if (ret)
return ret;
dw_pcie_dbi_ro_wr_en(pci);
- dw_pcie_writel_dbi2(pci, reg, lower_32_bits(size - 1));
- dw_pcie_writel_dbi(pci, reg, flags);
+ /*
+ * A BAR mask should not be written for a resizable BAR. The BAR mask
+ * is automatically derived by the controller every time the "selected
+ * size" bits are updated, see "Figure 3-26 Resizable BAR Example for
+ * 32-bit Memory BAR0" in DWC EP databook 5.96a. We simply need to write
+ * BIT(0) to set the BAR enable bit.
+ */
+ dw_pcie_ep_writel_dbi2(ep, func_no, reg, BIT(0));
+ dw_pcie_ep_writel_dbi(ep, func_no, reg, flags);
if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
- dw_pcie_writel_dbi2(pci, reg + 4, upper_32_bits(size - 1));
- dw_pcie_writel_dbi(pci, reg + 4, 0);
+ dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, 0);
+ dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0);
}
+ /*
+ * Bits 31:0 in PCI_REBAR_CAP define "supported sizes" bits for sizes
+ * 1 MB to 128 TB. Bits 31:16 in PCI_REBAR_CTRL define "supported sizes"
+ * bits for sizes 256 TB to 8 EB. Disallow sizes 256 TB to 8 EB.
+ */
+ rebar_ctrl = dw_pcie_readl_dbi(pci, rebar_offset + PCI_REBAR_CTRL);
+ rebar_ctrl &= ~GENMASK(31, 16);
+ dw_pcie_writel_dbi(pci, rebar_offset + PCI_REBAR_CTRL, rebar_ctrl);
+
+ /*
+ * The "selected size" (bits 13:8) in PCI_REBAR_CTRL are automatically
+ * updated when writing PCI_REBAR_CAP, see "Figure 3-26 Resizable BAR
+ * Example for 32-bit Memory BAR0" in DWC EP databook 5.96a.
+ */
+ dw_pcie_writel_dbi(pci, rebar_offset + PCI_REBAR_CAP, rebar_cap);
+
dw_pcie_dbi_ro_wr_dis(pci);
return 0;
}
+static int dw_pcie_ep_set_bar_programmable(struct dw_pcie_ep *ep, u8 func_no,
+ struct pci_epf_bar *epf_bar)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar = epf_bar->barno;
+ size_t size = epf_bar->size;
+ int flags = epf_bar->flags;
+ u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar);
+
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ dw_pcie_ep_writel_dbi2(ep, func_no, reg, lower_32_bits(size - 1));
+ dw_pcie_ep_writel_dbi(ep, func_no, reg, flags);
+
+ if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
+ dw_pcie_ep_writel_dbi2(ep, func_no, reg + 4, upper_32_bits(size - 1));
+ dw_pcie_ep_writel_dbi(ep, func_no, reg + 4, 0);
+ }
+
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ return 0;
+}
+
+static enum pci_epc_bar_type dw_pcie_ep_get_bar_type(struct dw_pcie_ep *ep,
+ enum pci_barno bar)
+{
+ const struct pci_epc_features *epc_features;
+
+ if (!ep->ops->get_features)
+ return BAR_PROGRAMMABLE;
+
+ epc_features = ep->ops->get_features(ep);
+
+ return epc_features->bar[bar].type;
+}
+
+static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ struct pci_epf_bar *epf_bar)
+{
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar = epf_bar->barno;
+ size_t size = epf_bar->size;
+ enum pci_epc_bar_type bar_type;
+ int flags = epf_bar->flags;
+ int ret, type;
+
+ /*
+ * DWC does not allow BAR pairs to overlap, e.g. you cannot combine BARs
+ * 1 and 2 to form a 64-bit BAR.
+ */
+ if ((flags & PCI_BASE_ADDRESS_MEM_TYPE_64) && (bar & 1))
+ return -EINVAL;
+
+ /*
+ * Certain EPF drivers dynamically change the physical address of a BAR
+ * (i.e. they call set_bar() twice, without ever calling clear_bar(), as
+ * calling clear_bar() would clear the BAR's PCI address assigned by the
+ * host).
+ */
+ if (ep->epf_bar[bar]) {
+ /*
+ * We can only dynamically change a BAR if the new BAR size and
+ * BAR flags do not differ from the existing configuration.
+ */
+ if (ep->epf_bar[bar]->barno != bar ||
+ ep->epf_bar[bar]->size != size ||
+ ep->epf_bar[bar]->flags != flags)
+ return -EINVAL;
+
+ /*
+ * When dynamically changing a BAR, skip writing the BAR reg, as
+ * that would clear the BAR's PCI address assigned by the host.
+ */
+ goto config_atu;
+ }
+
+ bar_type = dw_pcie_ep_get_bar_type(ep, bar);
+ switch (bar_type) {
+ case BAR_FIXED:
+ /*
+ * There is no need to write a BAR mask for a fixed BAR (except
+ * to write 1 to the LSB of the BAR mask register, to enable the
+ * BAR). Write the BAR mask regardless. (The fixed bits in the
+ * BAR mask register will be read-only anyway.)
+ */
+ fallthrough;
+ case BAR_PROGRAMMABLE:
+ ret = dw_pcie_ep_set_bar_programmable(ep, func_no, epf_bar);
+ break;
+ case BAR_RESIZABLE:
+ ret = dw_pcie_ep_set_bar_resizable(ep, func_no, epf_bar);
+ break;
+ default:
+ ret = -EINVAL;
+ dev_err(pci->dev, "Invalid BAR type\n");
+ break;
+ }
+
+ if (ret)
+ return ret;
+
+config_atu:
+ if (!(flags & PCI_BASE_ADDRESS_SPACE))
+ type = PCIE_ATU_TYPE_MEM;
+ else
+ type = PCIE_ATU_TYPE_IO;
+
+ ret = dw_pcie_ep_inbound_atu(ep, func_no, type, epf_bar->phys_addr, bar,
+ size);
+ if (ret)
+ return ret;
+
+ ep->epf_bar[bar] = epf_bar;
+
+ return 0;
+}
+
static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr,
u32 *atu_index)
{
u32 index;
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- for (index = 0; index < ep->num_ob_windows; index++) {
+ for_each_set_bit(index, ep->ob_window_map, pci->num_ob_windows) {
if (ep->outbound_addr[index] != addr)
continue;
*atu_index = index;
@@ -211,7 +434,21 @@ static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr,
return -EINVAL;
}
-static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no,
+static u64 dw_pcie_ep_align_addr(struct pci_epc *epc, u64 pci_addr,
+ size_t *pci_size, size_t *offset)
+{
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ u64 mask = pci->region_align - 1;
+ size_t ofst = pci_addr & mask;
+
+ *pci_size = ALIGN(ofst + *pci_size, epc->mem->window.page_size);
+ *offset = ofst;
+
+ return pci_addr & ~mask;
+}
+
+static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
phys_addr_t addr)
{
int ret;
@@ -219,23 +456,30 @@ static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no,
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- ret = dw_pcie_find_index(ep, addr, &atu_index);
+ ret = dw_pcie_find_index(ep, addr - pci->parent_bus_offset,
+ &atu_index);
if (ret < 0)
return;
- dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_OUTBOUND);
+ ep->outbound_addr[atu_index] = 0;
+ dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, atu_index);
clear_bit(atu_index, ep->ob_window_map);
}
-static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no,
- phys_addr_t addr,
- u64 pci_addr, size_t size)
+static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ phys_addr_t addr, u64 pci_addr, size_t size)
{
int ret;
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
-
- ret = dw_pcie_ep_outbound_atu(ep, addr, pci_addr, size);
+ struct dw_pcie_ob_atu_cfg atu = { 0 };
+
+ atu.func_no = func_no;
+ atu.type = PCIE_ATU_TYPE_MEM;
+ atu.parent_bus_addr = addr - pci->parent_bus_offset;
+ atu.pci_addr = pci_addr;
+ atu.size = size;
+ ret = dw_pcie_ep_outbound_atu(ep, &atu);
if (ret) {
dev_err(pci->dev, "Failed to enable address\n");
return ret;
@@ -244,86 +488,105 @@ static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no,
return 0;
}
-static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no)
+static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
- struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct dw_pcie_ep_func *ep_func;
u32 val, reg;
- if (!ep->msi_cap)
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+ if (!ep_func || !ep_func->msi_cap)
return -EINVAL;
- reg = ep->msi_cap + PCI_MSI_FLAGS;
- val = dw_pcie_readw_dbi(pci, reg);
+ reg = ep_func->msi_cap + PCI_MSI_FLAGS;
+ val = dw_pcie_ep_readw_dbi(ep, func_no, reg);
if (!(val & PCI_MSI_FLAGS_ENABLE))
return -EINVAL;
- val = (val & PCI_MSI_FLAGS_QSIZE) >> 4;
+ val = FIELD_GET(PCI_MSI_FLAGS_QSIZE, val);
- return val;
+ return 1 << val;
}
-static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
+static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ u8 nr_irqs)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct dw_pcie_ep_func *ep_func;
+ u8 mmc = order_base_2(nr_irqs);
u32 val, reg;
- if (!ep->msi_cap)
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+ if (!ep_func || !ep_func->msi_cap)
return -EINVAL;
- reg = ep->msi_cap + PCI_MSI_FLAGS;
- val = dw_pcie_readw_dbi(pci, reg);
+ reg = ep_func->msi_cap + PCI_MSI_FLAGS;
+ val = dw_pcie_ep_readw_dbi(ep, func_no, reg);
val &= ~PCI_MSI_FLAGS_QMASK;
- val |= (interrupts << 1) & PCI_MSI_FLAGS_QMASK;
+ val |= FIELD_PREP(PCI_MSI_FLAGS_QMASK, mmc);
dw_pcie_dbi_ro_wr_en(pci);
- dw_pcie_writew_dbi(pci, reg, val);
+ dw_pcie_ep_writew_dbi(ep, func_no, reg, val);
dw_pcie_dbi_ro_wr_dis(pci);
return 0;
}
-static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no)
+static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
- struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct dw_pcie_ep_func *ep_func;
u32 val, reg;
- if (!ep->msix_cap)
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+ if (!ep_func || !ep_func->msix_cap)
return -EINVAL;
- reg = ep->msix_cap + PCI_MSIX_FLAGS;
- val = dw_pcie_readw_dbi(pci, reg);
+ reg = ep_func->msix_cap + PCI_MSIX_FLAGS;
+ val = dw_pcie_ep_readw_dbi(ep, func_no, reg);
if (!(val & PCI_MSIX_FLAGS_ENABLE))
return -EINVAL;
val &= PCI_MSIX_FLAGS_QSIZE;
- return val;
+ return val + 1;
}
-static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts)
+static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ u16 nr_irqs, enum pci_barno bir, u32 offset)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct dw_pcie_ep_func *ep_func;
u32 val, reg;
- if (!ep->msix_cap)
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+ if (!ep_func || !ep_func->msix_cap)
return -EINVAL;
- reg = ep->msix_cap + PCI_MSIX_FLAGS;
- val = dw_pcie_readw_dbi(pci, reg);
- val &= ~PCI_MSIX_FLAGS_QSIZE;
- val |= interrupts;
dw_pcie_dbi_ro_wr_en(pci);
+
+ reg = ep_func->msix_cap + PCI_MSIX_FLAGS;
+ val = dw_pcie_ep_readw_dbi(ep, func_no, reg);
+ val &= ~PCI_MSIX_FLAGS_QSIZE;
+ val |= nr_irqs - 1; /* encoded as N-1 */
dw_pcie_writew_dbi(pci, reg, val);
+
+ reg = ep_func->msix_cap + PCI_MSIX_TABLE;
+ val = offset | bir;
+ dw_pcie_ep_writel_dbi(ep, func_no, reg, val);
+
+ reg = ep_func->msix_cap + PCI_MSIX_PBA;
+ val = (offset + (nr_irqs * PCI_MSIX_ENTRY_SIZE)) | bir;
+ dw_pcie_ep_writel_dbi(ep, func_no, reg, val);
+
dw_pcie_dbi_ro_wr_dis(pci);
return 0;
}
-static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no,
- enum pci_epc_irq_type type, u16 interrupt_num)
+static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ unsigned int type, u16 interrupt_num)
{
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
@@ -338,10 +601,7 @@ static void dw_pcie_ep_stop(struct pci_epc *epc)
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- if (!pci->ops->stop_link)
- return;
-
- pci->ops->stop_link(pci);
+ dw_pcie_stop_link(pci);
}
static int dw_pcie_ep_start(struct pci_epc *epc)
@@ -349,16 +609,25 @@ static int dw_pcie_ep_start(struct pci_epc *epc)
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- if (!pci->ops->start_link)
- return -EINVAL;
+ return dw_pcie_start_link(pci);
+}
- return pci->ops->start_link(pci);
+static const struct pci_epc_features*
+dw_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
+{
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+
+ if (!ep->ops->get_features)
+ return NULL;
+
+ return ep->ops->get_features(ep);
}
static const struct pci_epc_ops epc_ops = {
.write_header = dw_pcie_ep_write_header,
.set_bar = dw_pcie_ep_set_bar,
.clear_bar = dw_pcie_ep_clear_bar,
+ .align_addr = dw_pcie_ep_align_addr,
.map_addr = dw_pcie_ep_map_addr,
.unmap_addr = dw_pcie_ep_unmap_addr,
.set_msi = dw_pcie_ep_set_msi,
@@ -368,185 +637,456 @@ static const struct pci_epc_ops epc_ops = {
.raise_irq = dw_pcie_ep_raise_irq,
.start = dw_pcie_ep_start,
.stop = dw_pcie_ep_stop,
+ .get_features = dw_pcie_ep_get_features,
};
-int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no)
+/**
+ * dw_pcie_ep_raise_intx_irq - Raise INTx IRQ to the host
+ * @ep: DWC EP device
+ * @func_no: Function number of the endpoint
+ *
+ * Return: 0 if success, errno otherwise.
+ */
+int dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep, u8 func_no)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct device *dev = pci->dev;
- dev_err(dev, "EP cannot trigger legacy IRQs\n");
+ dev_err(dev, "EP cannot raise INTX IRQs\n");
return -EINVAL;
}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_intx_irq);
+/**
+ * dw_pcie_ep_raise_msi_irq - Raise MSI IRQ to the host
+ * @ep: DWC EP device
+ * @func_no: Function number of the endpoint
+ * @interrupt_num: Interrupt number to be raised
+ *
+ * Return: 0 if success, errno otherwise.
+ */
int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
u8 interrupt_num)
{
- struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ u32 msg_addr_lower, msg_addr_upper, reg;
+ struct dw_pcie_ep_func *ep_func;
struct pci_epc *epc = ep->epc;
+ size_t map_size = sizeof(u32);
+ size_t offset;
u16 msg_ctrl, msg_data;
- u32 msg_addr_lower, msg_addr_upper, reg;
- u64 msg_addr;
bool has_upper;
+ u64 msg_addr;
int ret;
- if (!ep->msi_cap)
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+ if (!ep_func || !ep_func->msi_cap)
return -EINVAL;
/* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */
- reg = ep->msi_cap + PCI_MSI_FLAGS;
- msg_ctrl = dw_pcie_readw_dbi(pci, reg);
+ reg = ep_func->msi_cap + PCI_MSI_FLAGS;
+ msg_ctrl = dw_pcie_ep_readw_dbi(ep, func_no, reg);
has_upper = !!(msg_ctrl & PCI_MSI_FLAGS_64BIT);
- reg = ep->msi_cap + PCI_MSI_ADDRESS_LO;
- msg_addr_lower = dw_pcie_readl_dbi(pci, reg);
+ reg = ep_func->msi_cap + PCI_MSI_ADDRESS_LO;
+ msg_addr_lower = dw_pcie_ep_readl_dbi(ep, func_no, reg);
if (has_upper) {
- reg = ep->msi_cap + PCI_MSI_ADDRESS_HI;
- msg_addr_upper = dw_pcie_readl_dbi(pci, reg);
- reg = ep->msi_cap + PCI_MSI_DATA_64;
- msg_data = dw_pcie_readw_dbi(pci, reg);
+ reg = ep_func->msi_cap + PCI_MSI_ADDRESS_HI;
+ msg_addr_upper = dw_pcie_ep_readl_dbi(ep, func_no, reg);
+ reg = ep_func->msi_cap + PCI_MSI_DATA_64;
+ msg_data = dw_pcie_ep_readw_dbi(ep, func_no, reg);
} else {
msg_addr_upper = 0;
- reg = ep->msi_cap + PCI_MSI_DATA_32;
- msg_data = dw_pcie_readw_dbi(pci, reg);
+ reg = ep_func->msi_cap + PCI_MSI_DATA_32;
+ msg_data = dw_pcie_ep_readw_dbi(ep, func_no, reg);
}
- msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower;
- ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
- epc->mem->page_size);
+ msg_addr = ((u64)msg_addr_upper) << 32 | msg_addr_lower;
+
+ msg_addr = dw_pcie_ep_align_addr(epc, msg_addr, &map_size, &offset);
+ ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr,
+ map_size);
if (ret)
return ret;
- writel(msg_data | (interrupt_num - 1), ep->msi_mem);
+ writel(msg_data | (interrupt_num - 1), ep->msi_mem + offset);
+
+ dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_msi_irq);
+
+/**
+ * dw_pcie_ep_raise_msix_irq_doorbell - Raise MSI-X to the host using Doorbell
+ * method
+ * @ep: DWC EP device
+ * @func_no: Function number of the endpoint device
+ * @interrupt_num: Interrupt number to be raised
+ *
+ * Return: 0 if success, errno otherwise.
+ */
+int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep, u8 func_no,
+ u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct dw_pcie_ep_func *ep_func;
+ u32 msg_data;
+
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+ if (!ep_func || !ep_func->msix_cap)
+ return -EINVAL;
+
+ msg_data = (func_no << PCIE_MSIX_DOORBELL_PF_SHIFT) |
+ (interrupt_num - 1);
- dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
+ dw_pcie_writel_dbi(pci, PCIE_MSIX_DOORBELL, msg_data);
return 0;
}
+/**
+ * dw_pcie_ep_raise_msix_irq - Raise MSI-X to the host
+ * @ep: DWC EP device
+ * @func_no: Function number of the endpoint device
+ * @interrupt_num: Interrupt number to be raised
+ *
+ * Return: 0 if success, errno otherwise.
+ */
int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
- u16 interrupt_num)
+ u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct pci_epf_msix_tbl *msix_tbl;
+ struct dw_pcie_ep_func *ep_func;
struct pci_epc *epc = ep->epc;
- u16 tbl_offset, bir;
- u32 bar_addr_upper, bar_addr_lower;
- u32 msg_addr_upper, msg_addr_lower;
+ size_t map_size = sizeof(u32);
+ size_t offset;
u32 reg, msg_data, vec_ctrl;
- u64 tbl_addr, msg_addr, reg_u64;
- void __iomem *msix_tbl;
+ u32 tbl_offset;
+ u64 msg_addr;
int ret;
+ u8 bir;
- reg = ep->msix_cap + PCI_MSIX_TABLE;
- tbl_offset = dw_pcie_readl_dbi(pci, reg);
- bir = (tbl_offset & PCI_MSIX_TABLE_BIR);
- tbl_offset &= PCI_MSIX_TABLE_OFFSET;
-
- reg = PCI_BASE_ADDRESS_0 + (4 * bir);
- bar_addr_upper = 0;
- bar_addr_lower = dw_pcie_readl_dbi(pci, reg);
- reg_u64 = (bar_addr_lower & PCI_BASE_ADDRESS_MEM_TYPE_MASK);
- if (reg_u64 == PCI_BASE_ADDRESS_MEM_TYPE_64)
- bar_addr_upper = dw_pcie_readl_dbi(pci, reg + 4);
-
- tbl_addr = ((u64) bar_addr_upper) << 32 | bar_addr_lower;
- tbl_addr += (tbl_offset + ((interrupt_num - 1) * PCI_MSIX_ENTRY_SIZE));
- tbl_addr &= PCI_BASE_ADDRESS_MEM_MASK;
-
- msix_tbl = ioremap_nocache(ep->phys_base + tbl_addr,
- PCI_MSIX_ENTRY_SIZE);
- if (!msix_tbl)
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+ if (!ep_func || !ep_func->msix_cap)
return -EINVAL;
- msg_addr_lower = readl(msix_tbl + PCI_MSIX_ENTRY_LOWER_ADDR);
- msg_addr_upper = readl(msix_tbl + PCI_MSIX_ENTRY_UPPER_ADDR);
- msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower;
- msg_data = readl(msix_tbl + PCI_MSIX_ENTRY_DATA);
- vec_ctrl = readl(msix_tbl + PCI_MSIX_ENTRY_VECTOR_CTRL);
+ reg = ep_func->msix_cap + PCI_MSIX_TABLE;
+ tbl_offset = dw_pcie_ep_readl_dbi(ep, func_no, reg);
+ bir = FIELD_GET(PCI_MSIX_TABLE_BIR, tbl_offset);
+ tbl_offset &= PCI_MSIX_TABLE_OFFSET;
- iounmap(msix_tbl);
+ msix_tbl = ep->epf_bar[bir]->addr + tbl_offset;
+ msg_addr = msix_tbl[(interrupt_num - 1)].msg_addr;
+ msg_data = msix_tbl[(interrupt_num - 1)].msg_data;
+ vec_ctrl = msix_tbl[(interrupt_num - 1)].vector_ctrl;
- if (vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT)
+ if (vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT) {
+ dev_dbg(pci->dev, "MSI-X entry ctrl set\n");
return -EPERM;
+ }
- ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
- epc->mem->page_size);
+ msg_addr = dw_pcie_ep_align_addr(epc, msg_addr, &map_size, &offset);
+ ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr,
+ map_size);
if (ret)
return ret;
- writel(msg_data, ep->msi_mem);
+ writel(msg_data, ep->msi_mem + offset);
- dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
+ dw_pcie_ep_unmap_addr(epc, func_no, 0, ep->msi_mem_phys);
return 0;
}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_msix_irq);
+
+/**
+ * dw_pcie_ep_cleanup - Cleanup DWC EP resources after fundamental reset
+ * @ep: DWC EP device
+ *
+ * Cleans up the DWC EP specific resources like eDMA etc... after fundamental
+ * reset like PERST#. Note that this API is only applicable for drivers
+ * supporting PERST# or any other methods of fundamental reset.
+ */
+void dw_pcie_ep_cleanup(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ dwc_pcie_debugfs_deinit(pci);
+ dw_pcie_edma_remove(pci);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_cleanup);
-void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
+/**
+ * dw_pcie_ep_deinit - Deinitialize the endpoint device
+ * @ep: DWC EP device
+ *
+ * Deinitialize the endpoint device. EPC device is not destroyed since that will
+ * be taken care by Devres.
+ */
+void dw_pcie_ep_deinit(struct dw_pcie_ep *ep)
{
struct pci_epc *epc = ep->epc;
+ dw_pcie_ep_cleanup(ep);
+
pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
- epc->mem->page_size);
+ epc->mem->window.page_size);
pci_epc_mem_exit(epc);
}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_deinit);
-int dw_pcie_ep_init(struct dw_pcie_ep *ep)
+static void dw_pcie_ep_init_non_sticky_registers(struct dw_pcie *pci)
+{
+ struct dw_pcie_ep *ep = &pci->ep;
+ unsigned int offset;
+ unsigned int nbars;
+ enum pci_barno bar;
+ u32 reg, i, val;
+
+ offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
+
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ if (offset) {
+ reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
+ nbars = FIELD_GET(PCI_REBAR_CTRL_NBAR_MASK, reg);
+
+ /*
+ * PCIe r6.0, sec 7.8.6.2 require us to support at least one
+ * size in the range from 1 MB to 512 GB. Advertise support
+ * for 1 MB BAR size only.
+ *
+ * For a BAR that has been configured via dw_pcie_ep_set_bar(),
+ * advertise support for only that size instead.
+ */
+ for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) {
+ /*
+ * While the RESBAR_CAP_REG_* fields are sticky, the
+ * RESBAR_CTRL_REG_BAR_SIZE field is non-sticky (it is
+ * sticky in certain versions of DWC PCIe, but not all).
+ *
+ * RESBAR_CTRL_REG_BAR_SIZE is updated automatically by
+ * the controller when RESBAR_CAP_REG is written, which
+ * is why RESBAR_CAP_REG is written here.
+ */
+ val = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);
+ bar = FIELD_GET(PCI_REBAR_CTRL_BAR_IDX, val);
+ if (ep->epf_bar[bar])
+ pci_epc_bar_size_to_rebar_cap(ep->epf_bar[bar]->size, &val);
+ else
+ val = BIT(4);
+
+ dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, val);
+ }
+ }
+
+ dw_pcie_setup(pci);
+ dw_pcie_dbi_ro_wr_dis(pci);
+}
+
+/**
+ * dw_pcie_ep_init_registers - Initialize DWC EP specific registers
+ * @ep: DWC EP device
+ *
+ * Initialize the registers (CSRs) specific to DWC EP. This API should be called
+ * only when the endpoint receives an active refclk (either from host or
+ * generated locally).
+ */
+int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep)
{
- int ret;
- void *addr;
- struct pci_epc *epc;
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct dw_pcie_ep_func *ep_func;
struct device *dev = pci->dev;
- struct device_node *np = dev->of_node;
+ struct pci_epc *epc = ep->epc;
+ u32 ptm_cap_base, reg;
+ u8 hdr_type;
+ u8 func_no;
+ void *addr;
+ int ret;
- if (!pci->dbi_base || !pci->dbi_base2) {
- dev_err(dev, "dbi_base/dbi_base2 is not populated\n");
- return -EINVAL;
- }
- if (pci->iatu_unroll_enabled && !pci->atu_base) {
- dev_err(dev, "atu_base is not populated\n");
- return -EINVAL;
+ hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE) &
+ PCI_HEADER_TYPE_MASK;
+ if (hdr_type != PCI_HEADER_TYPE_NORMAL) {
+ dev_err(pci->dev,
+ "PCIe controller is not set to EP mode (hdr_type:0x%x)!\n",
+ hdr_type);
+ return -EIO;
}
- ret = of_property_read_u32(np, "num-ib-windows", &ep->num_ib_windows);
- if (ret < 0) {
- dev_err(dev, "Unable to read *num-ib-windows* property\n");
+ dw_pcie_version_detect(pci);
+
+ dw_pcie_iatu_detect(pci);
+
+ ret = dw_pcie_edma_detect(pci);
+ if (ret)
return ret;
+
+ ret = -ENOMEM;
+ if (!ep->ib_window_map) {
+ ep->ib_window_map = devm_bitmap_zalloc(dev, pci->num_ib_windows,
+ GFP_KERNEL);
+ if (!ep->ib_window_map)
+ goto err_remove_edma;
}
- if (ep->num_ib_windows > MAX_IATU_IN) {
- dev_err(dev, "Invalid *num-ib-windows*\n");
- return -EINVAL;
+
+ if (!ep->ob_window_map) {
+ ep->ob_window_map = devm_bitmap_zalloc(dev, pci->num_ob_windows,
+ GFP_KERNEL);
+ if (!ep->ob_window_map)
+ goto err_remove_edma;
}
- ret = of_property_read_u32(np, "num-ob-windows", &ep->num_ob_windows);
- if (ret < 0) {
- dev_err(dev, "Unable to read *num-ob-windows* property\n");
- return ret;
+ if (!ep->outbound_addr) {
+ addr = devm_kcalloc(dev, pci->num_ob_windows, sizeof(phys_addr_t),
+ GFP_KERNEL);
+ if (!addr)
+ goto err_remove_edma;
+ ep->outbound_addr = addr;
}
- if (ep->num_ob_windows > MAX_IATU_OUT) {
- dev_err(dev, "Invalid *num-ob-windows*\n");
- return -EINVAL;
+
+ for (func_no = 0; func_no < epc->max_functions; func_no++) {
+
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+ if (ep_func)
+ continue;
+
+ ep_func = devm_kzalloc(dev, sizeof(*ep_func), GFP_KERNEL);
+ if (!ep_func)
+ goto err_remove_edma;
+
+ ep_func->func_no = func_no;
+ ep_func->msi_cap = dw_pcie_ep_find_capability(ep, func_no,
+ PCI_CAP_ID_MSI);
+ ep_func->msix_cap = dw_pcie_ep_find_capability(ep, func_no,
+ PCI_CAP_ID_MSIX);
+
+ list_add_tail(&ep_func->list, &ep->func_list);
}
- ep->ib_window_map = devm_kcalloc(dev,
- BITS_TO_LONGS(ep->num_ib_windows),
- sizeof(long),
- GFP_KERNEL);
- if (!ep->ib_window_map)
- return -ENOMEM;
-
- ep->ob_window_map = devm_kcalloc(dev,
- BITS_TO_LONGS(ep->num_ob_windows),
- sizeof(long),
- GFP_KERNEL);
- if (!ep->ob_window_map)
- return -ENOMEM;
-
- addr = devm_kcalloc(dev, ep->num_ob_windows, sizeof(phys_addr_t),
- GFP_KERNEL);
- if (!addr)
- return -ENOMEM;
- ep->outbound_addr = addr;
+ if (ep->ops->init)
+ ep->ops->init(ep);
+
+ ptm_cap_base = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_PTM);
+
+ /*
+ * PTM responder capability can be disabled only after disabling
+ * PTM root capability.
+ */
+ if (ptm_cap_base) {
+ dw_pcie_dbi_ro_wr_en(pci);
+ reg = dw_pcie_readl_dbi(pci, ptm_cap_base + PCI_PTM_CAP);
+ reg &= ~PCI_PTM_CAP_ROOT;
+ dw_pcie_writel_dbi(pci, ptm_cap_base + PCI_PTM_CAP, reg);
+
+ reg = dw_pcie_readl_dbi(pci, ptm_cap_base + PCI_PTM_CAP);
+ reg &= ~(PCI_PTM_CAP_RES | PCI_PTM_GRANULARITY_MASK);
+ dw_pcie_writel_dbi(pci, ptm_cap_base + PCI_PTM_CAP, reg);
+ dw_pcie_dbi_ro_wr_dis(pci);
+ }
+
+ dw_pcie_ep_init_non_sticky_registers(pci);
+
+ dwc_pcie_debugfs_init(pci, DW_PCIE_EP_TYPE);
+
+ return 0;
+
+err_remove_edma:
+ dw_pcie_edma_remove(pci);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_init_registers);
+
+/**
+ * dw_pcie_ep_linkup - Notify EPF drivers about Link Up event
+ * @ep: DWC EP device
+ */
+void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
+{
+ struct pci_epc *epc = ep->epc;
+
+ pci_epc_linkup(epc);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_linkup);
+
+/**
+ * dw_pcie_ep_linkdown - Notify EPF drivers about Link Down event
+ * @ep: DWC EP device
+ *
+ * Non-sticky registers are also initialized before sending the notification to
+ * the EPF drivers. This is needed since the registers need to be initialized
+ * before the link comes back again.
+ */
+void dw_pcie_ep_linkdown(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct pci_epc *epc = ep->epc;
+
+ /*
+ * Initialize the non-sticky DWC registers as they would've reset post
+ * Link Down. This is specifically needed for drivers not supporting
+ * PERST# as they have no way to reinitialize the registers before the
+ * link comes back again.
+ */
+ dw_pcie_ep_init_non_sticky_registers(pci);
+
+ pci_epc_linkdown(epc);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_linkdown);
+
+static int dw_pcie_ep_get_resources(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct device *dev = pci->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct device_node *np = dev->of_node;
+ struct pci_epc *epc = ep->epc;
+ struct resource *res;
+ int ret;
+
+ ret = dw_pcie_get_resources(pci);
+ if (ret)
+ return ret;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
+ if (!res)
+ return -EINVAL;
+
+ ep->phys_base = res->start;
+ ep->addr_size = resource_size(res);
+
+ /*
+ * artpec6_pcie_cpu_addr_fixup() uses ep->phys_base, so call
+ * dw_pcie_parent_bus_offset() after setting ep->phys_base.
+ */
+ pci->parent_bus_offset = dw_pcie_parent_bus_offset(pci, "addr_space",
+ ep->phys_base);
+
+ ret = of_property_read_u8(np, "max-functions", &epc->max_functions);
+ if (ret < 0)
+ epc->max_functions = 1;
+
+ return 0;
+}
+
+/**
+ * dw_pcie_ep_init - Initialize the endpoint device
+ * @ep: DWC EP device
+ *
+ * Initialize the endpoint device. Allocate resources and create the EPC
+ * device with the endpoint framework.
+ *
+ * Return: 0 if success, errno otherwise.
+ */
+int dw_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ int ret;
+ struct pci_epc *epc;
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct device *dev = pci->dev;
+
+ INIT_LIST_HEAD(&ep->func_list);
epc = devm_pci_epc_create(dev, &epc_ops);
if (IS_ERR(epc)) {
@@ -557,31 +1097,33 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
ep->epc = epc;
epc_set_drvdata(epc, ep);
- if (ep->ops->ep_init)
- ep->ops->ep_init(ep);
+ ret = dw_pcie_ep_get_resources(ep);
+ if (ret)
+ return ret;
- ret = of_property_read_u8(np, "max-functions", &epc->max_functions);
- if (ret < 0)
- epc->max_functions = 1;
+ if (ep->ops->pre_init)
+ ep->ops->pre_init(ep);
- ret = __pci_epc_mem_init(epc, ep->phys_base, ep->addr_size,
- ep->page_size);
+ ret = pci_epc_mem_init(epc, ep->phys_base, ep->addr_size,
+ ep->page_size);
if (ret < 0) {
dev_err(dev, "Failed to initialize address space\n");
return ret;
}
ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys,
- epc->mem->page_size);
+ epc->mem->window.page_size);
if (!ep->msi_mem) {
+ ret = -ENOMEM;
dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n");
- return -ENOMEM;
+ goto err_exit_epc_mem;
}
- ep->msi_cap = dw_pcie_ep_find_capability(pci, PCI_CAP_ID_MSI);
- ep->msix_cap = dw_pcie_ep_find_capability(pci, PCI_CAP_ID_MSIX);
+ return 0;
- dw_pcie_setup(pci);
+err_exit_epc_mem:
+ pci_epc_mem_exit(epc);
- return 0;
+ return ret;
}
+EXPORT_SYMBOL_GPL(dw_pcie_ep_init);
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 721d60a5d9e4..372207c33a85 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -3,13 +3,17 @@
* Synopsys DesignWare PCIe host controller driver
*
* Copyright (C) 2013 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
+ * https://www.samsung.com
*
* Author: Jingoo Han <jg1.han@samsung.com>
*/
+#include <linux/align.h>
+#include <linux/iopoll.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
+#include <linux/msi.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
#include <linux/pci_regs.h>
@@ -19,86 +23,53 @@
#include "pcie-designware.h"
static struct pci_ops dw_pcie_ops;
-
-static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
- u32 *val)
-{
- struct dw_pcie *pci;
-
- if (pp->ops->rd_own_conf)
- return pp->ops->rd_own_conf(pp, where, size, val);
-
- pci = to_dw_pcie_from_pp(pp);
- return dw_pcie_read(pci->dbi_base + where, size, val);
-}
-
-static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
- u32 val)
-{
- struct dw_pcie *pci;
-
- if (pp->ops->wr_own_conf)
- return pp->ops->wr_own_conf(pp, where, size, val);
-
- pci = to_dw_pcie_from_pp(pp);
- return dw_pcie_write(pci->dbi_base + where, size, val);
-}
-
-static void dw_msi_ack_irq(struct irq_data *d)
-{
- irq_chip_ack_parent(d);
-}
-
-static void dw_msi_mask_irq(struct irq_data *d)
-{
- pci_msi_mask_irq(d);
- irq_chip_mask_parent(d);
-}
-
-static void dw_msi_unmask_irq(struct irq_data *d)
-{
- pci_msi_unmask_irq(d);
- irq_chip_unmask_parent(d);
-}
-
-static struct irq_chip dw_pcie_msi_irq_chip = {
- .name = "PCI-MSI",
- .irq_ack = dw_msi_ack_irq,
- .irq_mask = dw_msi_mask_irq,
- .irq_unmask = dw_msi_unmask_irq,
-};
-
-static struct msi_domain_info dw_pcie_msi_domain_info = {
- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
- .chip = &dw_pcie_msi_irq_chip,
+static struct pci_ops dw_pcie_ecam_ops;
+static struct pci_ops dw_child_pcie_ops;
+
+#define DW_PCIE_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_NO_AFFINITY | \
+ MSI_FLAG_PCI_MSI_MASK_PARENT)
+#define DW_PCIE_MSI_FLAGS_SUPPORTED (MSI_FLAG_MULTI_PCI_MSI | \
+ MSI_FLAG_PCI_MSIX | \
+ MSI_GENERIC_FLAGS_MASK)
+
+#define IS_256MB_ALIGNED(x) IS_ALIGNED(x, SZ_256M)
+
+static const struct msi_parent_ops dw_pcie_msi_parent_ops = {
+ .required_flags = DW_PCIE_MSI_FLAGS_REQUIRED,
+ .supported_flags = DW_PCIE_MSI_FLAGS_SUPPORTED,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .chip_flags = MSI_CHIP_FLAG_SET_ACK,
+ .prefix = "DW-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
};
/* MSI int handler */
-irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
+irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp)
{
- int i, pos, irq;
- u32 val, num_ctrls;
+ int i, pos;
+ unsigned long val;
+ u32 status, num_ctrls;
irqreturn_t ret = IRQ_NONE;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
for (i = 0; i < num_ctrls; i++) {
- dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS +
- (i * MSI_REG_CTRL_BLOCK_SIZE),
- 4, &val);
- if (!val)
+ status = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS +
+ (i * MSI_REG_CTRL_BLOCK_SIZE));
+ if (!status)
continue;
ret = IRQ_HANDLED;
+ val = status;
pos = 0;
- while ((pos = find_next_bit((unsigned long *) &val,
- MAX_MSI_IRQS_PER_CTRL,
+ while ((pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL,
pos)) != MAX_MSI_IRQS_PER_CTRL) {
- irq = irq_find_mapping(pp->irq_domain,
- (i * MAX_MSI_IRQS_PER_CTRL) +
- pos);
- generic_handle_irq(irq);
+ generic_handle_domain_irq(pp->irq_domain,
+ (i * MAX_MSI_IRQS_PER_CTRL) +
+ pos);
pos++;
}
}
@@ -110,7 +81,7 @@ irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
static void dw_chained_msi_isr(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
- struct pcie_port *pp;
+ struct dw_pcie_rp *pp;
chained_irq_enter(chip, desc);
@@ -120,106 +91,78 @@ static void dw_chained_msi_isr(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
-static void dw_pci_setup_msi_msg(struct irq_data *data, struct msi_msg *msg)
+static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg)
{
- struct pcie_port *pp = irq_data_get_irq_chip_data(data);
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
u64 msi_target;
- if (pp->ops->get_msi_addr)
- msi_target = pp->ops->get_msi_addr(pp);
- else
- msi_target = (u64)pp->msi_data;
+ msi_target = (u64)pp->msi_data;
msg->address_lo = lower_32_bits(msi_target);
msg->address_hi = upper_32_bits(msi_target);
- if (pp->ops->get_msi_data)
- msg->data = pp->ops->get_msi_data(pp, data->hwirq);
- else
- msg->data = data->hwirq;
+ msg->data = d->hwirq;
dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
- (int)data->hwirq, msg->address_hi, msg->address_lo);
-}
-
-static int dw_pci_msi_set_affinity(struct irq_data *irq_data,
- const struct cpumask *mask, bool force)
-{
- return -EINVAL;
+ (int)d->hwirq, msg->address_hi, msg->address_lo);
}
-static void dw_pci_bottom_mask(struct irq_data *data)
+static void dw_pci_bottom_mask(struct irq_data *d)
{
- struct pcie_port *pp = irq_data_get_irq_chip_data(data);
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
unsigned int res, bit, ctrl;
unsigned long flags;
raw_spin_lock_irqsave(&pp->lock, flags);
- if (pp->ops->msi_clear_irq) {
- pp->ops->msi_clear_irq(pp, data->hwirq);
- } else {
- ctrl = data->hwirq / MAX_MSI_IRQS_PER_CTRL;
- res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
- bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL;
+ ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
+ res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
+ bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
- pp->irq_status[ctrl] &= ~(1 << bit);
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4,
- ~pp->irq_status[ctrl]);
- }
+ pp->irq_mask[ctrl] |= BIT(bit);
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);
raw_spin_unlock_irqrestore(&pp->lock, flags);
}
-static void dw_pci_bottom_unmask(struct irq_data *data)
+static void dw_pci_bottom_unmask(struct irq_data *d)
{
- struct pcie_port *pp = irq_data_get_irq_chip_data(data);
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
unsigned int res, bit, ctrl;
unsigned long flags;
raw_spin_lock_irqsave(&pp->lock, flags);
- if (pp->ops->msi_set_irq) {
- pp->ops->msi_set_irq(pp, data->hwirq);
- } else {
- ctrl = data->hwirq / MAX_MSI_IRQS_PER_CTRL;
- res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
- bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL;
+ ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
+ res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
+ bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
- pp->irq_status[ctrl] |= 1 << bit;
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4,
- ~pp->irq_status[ctrl]);
- }
+ pp->irq_mask[ctrl] &= ~BIT(bit);
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);
raw_spin_unlock_irqrestore(&pp->lock, flags);
}
static void dw_pci_bottom_ack(struct irq_data *d)
{
- struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
unsigned int res, bit, ctrl;
- unsigned long flags;
ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
- raw_spin_lock_irqsave(&pp->lock, flags);
-
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + res, 4, 1 << bit);
-
- if (pp->ops->msi_irq_ack)
- pp->ops->msi_irq_ack(d->hwirq, pp);
-
- raw_spin_unlock_irqrestore(&pp->lock, flags);
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_STATUS + res, BIT(bit));
}
static struct irq_chip dw_pci_msi_bottom_irq_chip = {
.name = "DWPCI-MSI",
.irq_ack = dw_pci_bottom_ack,
.irq_compose_msi_msg = dw_pci_setup_msi_msg,
- .irq_set_affinity = dw_pci_msi_set_affinity,
.irq_mask = dw_pci_bottom_mask,
.irq_unmask = dw_pci_bottom_unmask,
};
@@ -228,7 +171,7 @@ static int dw_pcie_irq_domain_alloc(struct irq_domain *domain,
unsigned int virq, unsigned int nr_irqs,
void *args)
{
- struct pcie_port *pp = domain->host_data;
+ struct dw_pcie_rp *pp = domain->host_data;
unsigned long flags;
u32 i;
int bit;
@@ -245,7 +188,7 @@ static int dw_pcie_irq_domain_alloc(struct irq_domain *domain,
for (i = 0; i < nr_irqs; i++)
irq_domain_set_info(domain, virq + i, bit + i,
- &dw_pci_msi_bottom_irq_chip,
+ pp->msi_irq_chip,
pp, handle_edge_irq,
NULL, NULL);
@@ -255,13 +198,13 @@ static int dw_pcie_irq_domain_alloc(struct irq_domain *domain,
static void dw_pcie_irq_domain_free(struct irq_domain *domain,
unsigned int virq, unsigned int nr_irqs)
{
- struct irq_data *data = irq_domain_get_irq_data(domain, virq);
- struct pcie_port *pp = irq_data_get_irq_chip_data(data);
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct dw_pcie_rp *pp = domain->host_data;
unsigned long flags;
raw_spin_lock_irqsave(&pp->lock, flags);
- bitmap_release_region(pp->msi_irq_in_use, data->hwirq,
+ bitmap_release_region(pp->msi_irq_in_use, d->hwirq,
order_base_2(nr_irqs));
raw_spin_unlock_irqrestore(&pp->lock, flags);
@@ -272,419 +215,867 @@ static const struct irq_domain_ops dw_pcie_msi_domain_ops = {
.free = dw_pcie_irq_domain_free,
};
-int dw_pcie_allocate_domains(struct pcie_port *pp)
+int dw_pcie_allocate_domains(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node);
-
- pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors,
- &dw_pcie_msi_domain_ops, pp);
+ struct irq_domain_info info = {
+ .fwnode = dev_fwnode(pci->dev),
+ .ops = &dw_pcie_msi_domain_ops,
+ .size = pp->num_vectors,
+ .host_data = pp,
+ };
+
+ pp->irq_domain = msi_create_parent_irq_domain(&info, &dw_pcie_msi_parent_ops);
if (!pp->irq_domain) {
dev_err(pci->dev, "Failed to create IRQ domain\n");
return -ENOMEM;
}
- pp->msi_domain = pci_msi_create_irq_domain(fwnode,
- &dw_pcie_msi_domain_info,
- pp->irq_domain);
- if (!pp->msi_domain) {
- dev_err(pci->dev, "Failed to create MSI domain\n");
- irq_domain_remove(pp->irq_domain);
- return -ENOMEM;
- }
-
return 0;
}
+EXPORT_SYMBOL_GPL(dw_pcie_allocate_domains);
-void dw_pcie_free_msi(struct pcie_port *pp)
+void dw_pcie_free_msi(struct dw_pcie_rp *pp)
{
- irq_set_chained_handler(pp->msi_irq, NULL);
- irq_set_handler_data(pp->msi_irq, NULL);
+ u32 ctrl;
+
+ for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) {
+ if (pp->msi_irq[ctrl] > 0)
+ irq_set_chained_handler_and_data(pp->msi_irq[ctrl],
+ NULL, NULL);
+ }
- irq_domain_remove(pp->msi_domain);
irq_domain_remove(pp->irq_domain);
}
+EXPORT_SYMBOL_GPL(dw_pcie_free_msi);
-void dw_pcie_msi_init(struct pcie_port *pp)
+void dw_pcie_msi_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct device *dev = pci->dev;
- struct page *page;
- u64 msi_target;
+ u64 msi_target = (u64)pp->msi_data;
+ u32 ctrl, num_ctrls;
- page = alloc_page(GFP_KERNEL);
- pp->msi_data = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
- if (dma_mapping_error(dev, pp->msi_data)) {
- dev_err(dev, "Failed to map MSI data\n");
- __free_page(page);
+ if (!pci_msi_enabled() || !pp->has_msi_ctrl)
return;
+
+ num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
+
+ /* Initialize IRQ Status array */
+ for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK +
+ (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
+ pp->irq_mask[ctrl]);
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE +
+ (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
+ ~0);
}
- msi_target = (u64)pp->msi_data;
/* Program the msi_data */
- dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
- lower_32_bits(msi_target));
- dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4,
- upper_32_bits(msi_target));
+ dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_LO, lower_32_bits(msi_target));
+ dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_HI, upper_32_bits(msi_target));
}
+EXPORT_SYMBOL_GPL(dw_pcie_msi_init);
-int dw_pcie_host_init(struct pcie_port *pp)
+static int dw_pcie_parse_split_msi_irq(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct device *dev = pci->dev;
- struct device_node *np = dev->of_node;
struct platform_device *pdev = to_platform_device(dev);
- struct resource_entry *win, *tmp;
- struct pci_bus *bus, *child;
- struct pci_host_bridge *bridge;
- struct resource *cfg_res;
+ u32 ctrl, max_vectors;
+ int irq;
+
+ /* Parse any "msiX" IRQs described in the devicetree */
+ for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) {
+ char msi_name[] = "msiX";
+
+ msi_name[3] = '0' + ctrl;
+ irq = platform_get_irq_byname_optional(pdev, msi_name);
+ if (irq == -ENXIO)
+ break;
+ if (irq < 0)
+ return dev_err_probe(dev, irq,
+ "Failed to parse MSI IRQ '%s'\n",
+ msi_name);
+
+ pp->msi_irq[ctrl] = irq;
+ }
+
+ /* If no "msiX" IRQs, caller should fallback to "msi" IRQ */
+ if (ctrl == 0)
+ return -ENXIO;
+
+ max_vectors = ctrl * MAX_MSI_IRQS_PER_CTRL;
+ if (pp->num_vectors > max_vectors) {
+ dev_warn(dev, "Exceeding number of MSI vectors, limiting to %u\n",
+ max_vectors);
+ pp->num_vectors = max_vectors;
+ }
+ if (!pp->num_vectors)
+ pp->num_vectors = max_vectors;
+
+ return 0;
+}
+
+int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct device *dev = pci->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ u64 *msi_vaddr = NULL;
int ret;
+ u32 ctrl, num_ctrls;
- raw_spin_lock_init(&pci->pp.lock);
+ for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++)
+ pp->irq_mask[ctrl] = ~0;
- cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
- if (cfg_res) {
- pp->cfg0_size = resource_size(cfg_res) >> 1;
- pp->cfg1_size = resource_size(cfg_res) >> 1;
- pp->cfg0_base = cfg_res->start;
- pp->cfg1_base = cfg_res->start + pp->cfg0_size;
- } else if (!pp->va_cfg0_base) {
- dev_err(dev, "Missing *config* reg space\n");
+ if (!pp->msi_irq[0]) {
+ ret = dw_pcie_parse_split_msi_irq(pp);
+ if (ret < 0 && ret != -ENXIO)
+ return ret;
}
- bridge = pci_alloc_host_bridge(0);
- if (!bridge)
- return -ENOMEM;
+ if (!pp->num_vectors)
+ pp->num_vectors = MSI_DEF_NUM_VECTORS;
+ num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
+
+ if (!pp->msi_irq[0]) {
+ pp->msi_irq[0] = platform_get_irq_byname_optional(pdev, "msi");
+ if (pp->msi_irq[0] < 0) {
+ pp->msi_irq[0] = platform_get_irq(pdev, 0);
+ if (pp->msi_irq[0] < 0)
+ return pp->msi_irq[0];
+ }
+ }
+
+ dev_dbg(dev, "Using %d MSI vectors\n", pp->num_vectors);
- ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
- &bridge->windows, &pp->io_base);
+ pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip;
+
+ ret = dw_pcie_allocate_domains(pp);
if (ret)
return ret;
- ret = devm_request_pci_bus_resources(dev, &bridge->windows);
- if (ret)
- goto error;
-
- /* Get the I/O and memory ranges from DT */
- resource_list_for_each_entry_safe(win, tmp, &bridge->windows) {
- switch (resource_type(win->res)) {
- case IORESOURCE_IO:
- ret = devm_pci_remap_iospace(dev, win->res,
- pp->io_base);
- if (ret) {
- dev_warn(dev, "Error %d: failed to map resource %pR\n",
- ret, win->res);
- resource_list_destroy_entry(win);
- } else {
- pp->io = win->res;
- pp->io->name = "I/O";
- pp->io_size = resource_size(pp->io);
- pp->io_bus_addr = pp->io->start - win->offset;
- }
- break;
- case IORESOURCE_MEM:
- pp->mem = win->res;
- pp->mem->name = "MEM";
- pp->mem_size = resource_size(pp->mem);
- pp->mem_bus_addr = pp->mem->start - win->offset;
- break;
- case 0:
- pp->cfg = win->res;
- pp->cfg0_size = resource_size(pp->cfg) >> 1;
- pp->cfg1_size = resource_size(pp->cfg) >> 1;
- pp->cfg0_base = pp->cfg->start;
- pp->cfg1_base = pp->cfg->start + pp->cfg0_size;
- break;
- case IORESOURCE_BUS:
- pp->busn = win->res;
- break;
- }
+ for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
+ if (pp->msi_irq[ctrl] > 0)
+ irq_set_chained_handler_and_data(pp->msi_irq[ctrl],
+ dw_chained_msi_isr, pp);
}
- if (!pci->dbi_base) {
- pci->dbi_base = devm_pci_remap_cfgspace(dev,
- pp->cfg->start,
- resource_size(pp->cfg));
- if (!pci->dbi_base) {
- dev_err(dev, "Error with ioremap\n");
- ret = -ENOMEM;
- goto error;
+ /*
+ * Even though the iMSI-RX Module supports 64-bit addresses some
+ * peripheral PCIe devices may lack 64-bit message support. In
+ * order not to miss MSI TLPs from those devices the MSI target
+ * address has to be within the lowest 4GB.
+ *
+ * Note until there is a better alternative found the reservation is
+ * done by allocating from the artificially limited DMA-coherent
+ * memory.
+ */
+ ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+ if (!ret)
+ msi_vaddr = dmam_alloc_coherent(dev, sizeof(u64), &pp->msi_data,
+ GFP_KERNEL);
+
+ if (!msi_vaddr) {
+ dev_warn(dev, "Failed to allocate 32-bit MSI address\n");
+ dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
+ msi_vaddr = dmam_alloc_coherent(dev, sizeof(u64), &pp->msi_data,
+ GFP_KERNEL);
+ if (!msi_vaddr) {
+ dev_err(dev, "Failed to allocate MSI address\n");
+ dw_pcie_free_msi(pp);
+ return -ENOMEM;
}
}
- pp->mem_base = pp->mem->start;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_msi_host_init);
- if (!pp->va_cfg0_base) {
- pp->va_cfg0_base = devm_pci_remap_cfgspace(dev,
- pp->cfg0_base, pp->cfg0_size);
- if (!pp->va_cfg0_base) {
- dev_err(dev, "Error with ioremap in function\n");
- ret = -ENOMEM;
- goto error;
- }
+static void dw_pcie_host_request_msg_tlp_res(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct resource_entry *win;
+ struct resource *res;
+
+ win = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM);
+ if (win) {
+ res = devm_kzalloc(pci->dev, sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return;
+
+ /*
+ * Allocate MSG TLP region of size 'region_align' at the end of
+ * the host bridge window.
+ */
+ res->start = win->res->end - pci->region_align + 1;
+ res->end = win->res->end;
+ res->name = "msg";
+ res->flags = win->res->flags | IORESOURCE_BUSY;
+
+ if (!devm_request_resource(pci->dev, win->res, res))
+ pp->msg_res = res;
}
+}
- if (!pp->va_cfg1_base) {
- pp->va_cfg1_base = devm_pci_remap_cfgspace(dev,
- pp->cfg1_base,
- pp->cfg1_size);
- if (!pp->va_cfg1_base) {
- dev_err(dev, "Error with ioremap\n");
- ret = -ENOMEM;
- goto error;
- }
+static int dw_pcie_config_ecam_iatu(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct dw_pcie_ob_atu_cfg atu = {0};
+ resource_size_t bus_range_max;
+ struct resource_entry *bus;
+ int ret;
+
+ bus = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS);
+
+ /*
+ * Root bus under the host bridge doesn't require any iATU configuration
+ * as DBI region will be used to access root bus config space.
+ * Immediate bus under Root Bus, needs type 0 iATU configuration and
+ * remaining buses need type 1 iATU configuration.
+ */
+ atu.index = 0;
+ atu.type = PCIE_ATU_TYPE_CFG0;
+ atu.parent_bus_addr = pp->cfg0_base + SZ_1M;
+ /* 1MiB is to cover 1 (bus) * 32 (devices) * 8 (functions) */
+ atu.size = SZ_1M;
+ atu.ctrl2 = PCIE_ATU_CFG_SHIFT_MODE_ENABLE;
+ ret = dw_pcie_prog_outbound_atu(pci, &atu);
+ if (ret)
+ return ret;
+
+ bus_range_max = resource_size(bus->res);
+
+ if (bus_range_max < 2)
+ return 0;
+
+ /* Configure remaining buses in type 1 iATU configuration */
+ atu.index = 1;
+ atu.type = PCIE_ATU_TYPE_CFG1;
+ atu.parent_bus_addr = pp->cfg0_base + SZ_2M;
+ atu.size = (SZ_1M * bus_range_max) - SZ_2M;
+ atu.ctrl2 = PCIE_ATU_CFG_SHIFT_MODE_ENABLE;
+
+ return dw_pcie_prog_outbound_atu(pci, &atu);
+}
+
+static int dw_pcie_create_ecam_window(struct dw_pcie_rp *pp, struct resource *res)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct device *dev = pci->dev;
+ struct resource_entry *bus;
+
+ bus = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS);
+ if (!bus)
+ return -ENODEV;
+
+ pp->cfg = pci_ecam_create(dev, res, bus->res, &pci_generic_ecam_ops);
+ if (IS_ERR(pp->cfg))
+ return PTR_ERR(pp->cfg);
+
+ return 0;
+}
+
+static bool dw_pcie_ecam_enabled(struct dw_pcie_rp *pp, struct resource *config_res)
+{
+ struct resource *bus_range;
+ u64 nr_buses;
+
+ /* Vendor glue drivers may implement their own ECAM mechanism */
+ if (pp->native_ecam)
+ return false;
+
+ /*
+ * PCIe spec r6.0, sec 7.2.2 mandates the base address used for ECAM to
+ * be aligned on a 2^(n+20) byte boundary, where n is the number of bits
+ * used for representing 'bus' in BDF. Since the DWC cores always use 8
+ * bits for representing 'bus', the base address has to be aligned to
+ * 2^28 byte boundary, which is 256 MiB.
+ */
+ if (!IS_256MB_ALIGNED(config_res->start))
+ return false;
+
+ bus_range = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS)->res;
+ if (!bus_range)
+ return false;
+
+ nr_buses = resource_size(config_res) >> PCIE_ECAM_BUS_SHIFT;
+
+ return nr_buses >= resource_size(bus_range);
+}
+
+static int dw_pcie_host_get_resources(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct device *dev = pci->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource_entry *win;
+ struct resource *res;
+ int ret;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
+ if (!res) {
+ dev_err(dev, "Missing \"config\" reg space\n");
+ return -ENODEV;
+ }
+
+ pp->cfg0_size = resource_size(res);
+ pp->cfg0_base = res->start;
+
+ pp->ecam_enabled = dw_pcie_ecam_enabled(pp, res);
+ if (pp->ecam_enabled) {
+ ret = dw_pcie_create_ecam_window(pp, res);
+ if (ret)
+ return ret;
+
+ pp->bridge->ops = &dw_pcie_ecam_ops;
+ pp->bridge->sysdata = pp->cfg;
+ pp->cfg->priv = pp;
+ } else {
+ pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pp->va_cfg0_base))
+ return PTR_ERR(pp->va_cfg0_base);
+
+ /* Set default bus ops */
+ pp->bridge->ops = &dw_pcie_ops;
+ pp->bridge->child_ops = &dw_child_pcie_ops;
+ pp->bridge->sysdata = pp;
+ }
+
+ ret = dw_pcie_get_resources(pci);
+ if (ret) {
+ if (pp->cfg)
+ pci_ecam_free(pp->cfg);
+ return ret;
}
- ret = of_property_read_u32(np, "num-viewport", &pci->num_viewport);
+ /* Get the I/O range from DT */
+ win = resource_list_first_type(&pp->bridge->windows, IORESOURCE_IO);
+ if (win) {
+ pp->io_size = resource_size(win->res);
+ pp->io_bus_addr = win->res->start - win->offset;
+ pp->io_base = pci_pio_to_address(win->res->start);
+ }
+
+ /*
+ * visconti_pcie_cpu_addr_fixup() uses pp->io_base, so we have to
+ * call dw_pcie_parent_bus_offset() after setting pp->io_base.
+ */
+ pci->parent_bus_offset = dw_pcie_parent_bus_offset(pci, "config",
+ pp->cfg0_base);
+ return 0;
+}
+
+int dw_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct device *dev = pci->dev;
+ struct device_node *np = dev->of_node;
+ struct pci_host_bridge *bridge;
+ int ret;
+
+ raw_spin_lock_init(&pp->lock);
+
+ bridge = devm_pci_alloc_host_bridge(dev, 0);
+ if (!bridge)
+ return -ENOMEM;
+
+ pp->bridge = bridge;
+
+ ret = dw_pcie_host_get_resources(pp);
if (ret)
- pci->num_viewport = 2;
+ return ret;
+
+ if (pp->ops->init) {
+ ret = pp->ops->init(pp);
+ if (ret)
+ goto err_free_ecam;
+ }
+
+ if (pci_msi_enabled()) {
+ pp->has_msi_ctrl = !(pp->ops->msi_init ||
+ of_property_present(np, "msi-parent") ||
+ of_property_present(np, "msi-map"));
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
/*
- * If a specific SoC driver needs to change the
- * default number of vectors, it needs to implement
- * the set_num_vectors callback.
+ * For the has_msi_ctrl case the default assignment is handled
+ * in the dw_pcie_msi_host_init().
*/
- if (!pp->ops->set_num_vectors) {
+ if (!pp->has_msi_ctrl && !pp->num_vectors) {
pp->num_vectors = MSI_DEF_NUM_VECTORS;
- } else {
- pp->ops->set_num_vectors(pp);
-
- if (pp->num_vectors > MAX_MSI_IRQS ||
- pp->num_vectors == 0) {
- dev_err(dev,
- "Invalid number of vectors\n");
- goto error;
- }
+ } else if (pp->num_vectors > MAX_MSI_IRQS) {
+ dev_err(dev, "Invalid number of vectors\n");
+ ret = -EINVAL;
+ goto err_deinit_host;
}
- if (!pp->ops->msi_host_init) {
- ret = dw_pcie_allocate_domains(pp);
- if (ret)
- goto error;
-
- if (pp->msi_irq)
- irq_set_chained_handler_and_data(pp->msi_irq,
- dw_chained_msi_isr,
- pp);
- } else {
- ret = pp->ops->msi_host_init(pp);
+ if (pp->ops->msi_init) {
+ ret = pp->ops->msi_init(pp);
+ if (ret < 0)
+ goto err_deinit_host;
+ } else if (pp->has_msi_ctrl) {
+ ret = dw_pcie_msi_host_init(pp);
if (ret < 0)
- goto error;
+ goto err_deinit_host;
}
}
- if (pp->ops->host_init) {
- ret = pp->ops->host_init(pp);
- if (ret)
- goto error;
+ dw_pcie_version_detect(pci);
+
+ dw_pcie_iatu_detect(pci);
+
+ if (pci->num_lanes < 1)
+ pci->num_lanes = dw_pcie_link_get_max_link_width(pci);
+
+ ret = of_pci_get_equalization_presets(dev, &pp->presets, pci->num_lanes);
+ if (ret)
+ goto err_free_msi;
+
+ if (pp->ecam_enabled) {
+ ret = dw_pcie_config_ecam_iatu(pp);
+ if (ret) {
+ dev_err(dev, "Failed to configure iATU in ECAM mode\n");
+ goto err_free_msi;
+ }
}
- pp->root_bus_nr = pp->busn->start;
+ /*
+ * Allocate the resource for MSG TLP before programming the iATU
+ * outbound window in dw_pcie_setup_rc(). Since the allocation depends
+ * on the value of 'region_align', this has to be done after
+ * dw_pcie_iatu_detect().
+ *
+ * Glue drivers need to set 'use_atu_msg' before dw_pcie_host_init() to
+ * make use of the generic MSG TLP implementation.
+ */
+ if (pp->use_atu_msg)
+ dw_pcie_host_request_msg_tlp_res(pp);
- bridge->dev.parent = dev;
- bridge->sysdata = pp;
- bridge->busnr = pp->root_bus_nr;
- bridge->ops = &dw_pcie_ops;
- bridge->map_irq = of_irq_parse_and_map_pci;
- bridge->swizzle_irq = pci_common_swizzle;
+ ret = dw_pcie_edma_detect(pci);
+ if (ret)
+ goto err_free_msi;
- ret = pci_scan_root_bus_bridge(bridge);
+ ret = dw_pcie_setup_rc(pp);
if (ret)
- goto error;
+ goto err_remove_edma;
- bus = bridge->bus;
+ if (!dw_pcie_link_up(pci)) {
+ ret = dw_pcie_start_link(pci);
+ if (ret)
+ goto err_remove_edma;
+ }
- if (pp->ops->scan_bus)
- pp->ops->scan_bus(pp);
+ /*
+ * Note: Skip the link up delay only when a Link Up IRQ is present.
+ * If there is no Link Up IRQ, we should not bypass the delay
+ * because that would require users to manually rescan for devices.
+ */
+ if (!pp->use_linkup_irq)
+ /* Ignore errors, the link may come up later */
+ dw_pcie_wait_for_link(pci);
+
+ ret = pci_host_probe(bridge);
+ if (ret)
+ goto err_stop_link;
- pci_bus_size_bridges(bus);
- pci_bus_assign_resources(bus);
+ if (pp->ops->post_init)
+ pp->ops->post_init(pp);
- list_for_each_entry(child, &bus->children, node)
- pcie_bus_configure_settings(child);
+ dwc_pcie_debugfs_init(pci, DW_PCIE_RC_TYPE);
- pci_bus_add_devices(bus);
return 0;
-error:
- pci_free_host_bridge(bridge);
+err_stop_link:
+ dw_pcie_stop_link(pci);
+
+err_remove_edma:
+ dw_pcie_edma_remove(pci);
+
+err_free_msi:
+ if (pp->has_msi_ctrl)
+ dw_pcie_free_msi(pp);
+
+err_deinit_host:
+ if (pp->ops->deinit)
+ pp->ops->deinit(pp);
+
+err_free_ecam:
+ if (pp->cfg)
+ pci_ecam_free(pp->cfg);
+
return ret;
}
+EXPORT_SYMBOL_GPL(dw_pcie_host_init);
-static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
- u32 devfn, int where, int size, u32 *val)
+void dw_pcie_host_deinit(struct dw_pcie_rp *pp)
{
- int ret, type;
- u32 busdev, cfg_size;
- u64 cpu_addr;
- void __iomem *va_cfg_base;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- if (pp->ops->rd_other_conf)
- return pp->ops->rd_other_conf(pp, bus, devfn, where, size, val);
+ dwc_pcie_debugfs_deinit(pci);
- busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
- PCIE_ATU_FUNC(PCI_FUNC(devfn));
+ pci_stop_root_bus(pp->bridge->bus);
+ pci_remove_root_bus(pp->bridge->bus);
- if (bus->parent->number == pp->root_bus_nr) {
- type = PCIE_ATU_TYPE_CFG0;
- cpu_addr = pp->cfg0_base;
- cfg_size = pp->cfg0_size;
- va_cfg_base = pp->va_cfg0_base;
- } else {
- type = PCIE_ATU_TYPE_CFG1;
- cpu_addr = pp->cfg1_base;
- cfg_size = pp->cfg1_size;
- va_cfg_base = pp->va_cfg1_base;
- }
+ dw_pcie_stop_link(pci);
- dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
- type, cpu_addr,
- busdev, cfg_size);
- ret = dw_pcie_read(va_cfg_base + where, size, val);
- if (pci->num_viewport <= 2)
- dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
- PCIE_ATU_TYPE_IO, pp->io_base,
- pp->io_bus_addr, pp->io_size);
+ dw_pcie_edma_remove(pci);
- return ret;
+ if (pp->has_msi_ctrl)
+ dw_pcie_free_msi(pp);
+
+ if (pp->ops->deinit)
+ pp->ops->deinit(pp);
+
+ if (pp->cfg)
+ pci_ecam_free(pp->cfg);
}
+EXPORT_SYMBOL_GPL(dw_pcie_host_deinit);
-static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
- u32 devfn, int where, int size, u32 val)
+static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
{
- int ret, type;
- u32 busdev, cfg_size;
- u64 cpu_addr;
- void __iomem *va_cfg_base;
+ struct dw_pcie_rp *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct dw_pcie_ob_atu_cfg atu = { 0 };
+ int type, ret;
+ u32 busdev;
- if (pp->ops->wr_other_conf)
- return pp->ops->wr_other_conf(pp, bus, devfn, where, size, val);
+ /*
+ * Checking whether the link is up here is a last line of defense
+ * against platforms that forward errors on the system bus as
+ * SError upon PCI configuration transactions issued when the link
+ * is down. This check is racy by definition and does not stop
+ * the system from triggering an SError if the link goes down
+ * after this check is performed.
+ */
+ if (!dw_pcie_link_up(pci))
+ return NULL;
busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
PCIE_ATU_FUNC(PCI_FUNC(devfn));
- if (bus->parent->number == pp->root_bus_nr) {
+ if (pci_is_root_bus(bus->parent))
type = PCIE_ATU_TYPE_CFG0;
- cpu_addr = pp->cfg0_base;
- cfg_size = pp->cfg0_size;
- va_cfg_base = pp->va_cfg0_base;
- } else {
+ else
type = PCIE_ATU_TYPE_CFG1;
- cpu_addr = pp->cfg1_base;
- cfg_size = pp->cfg1_size;
- va_cfg_base = pp->va_cfg1_base;
- }
- dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
- type, cpu_addr,
- busdev, cfg_size);
- ret = dw_pcie_write(va_cfg_base + where, size, val);
- if (pci->num_viewport <= 2)
- dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
- PCIE_ATU_TYPE_IO, pp->io_base,
- pp->io_bus_addr, pp->io_size);
+ atu.type = type;
+ atu.parent_bus_addr = pp->cfg0_base - pci->parent_bus_offset;
+ atu.pci_addr = busdev;
+ atu.size = pp->cfg0_size;
- return ret;
+ ret = dw_pcie_prog_outbound_atu(pci, &atu);
+ if (ret)
+ return NULL;
+
+ return pp->va_cfg0_base + where;
}
-static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus,
- int dev)
+static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
{
+ struct dw_pcie_rp *pp = bus->sysdata;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct dw_pcie_ob_atu_cfg atu = { 0 };
+ int ret;
- /* If there is no link, then there is no device */
- if (bus->number != pp->root_bus_nr) {
- if (!dw_pcie_link_up(pci))
- return 0;
- }
+ ret = pci_generic_config_read(bus, devfn, where, size, val);
+ if (ret != PCIBIOS_SUCCESSFUL)
+ return ret;
- /* Access only one slot on each root port */
- if (bus->number == pp->root_bus_nr && dev > 0)
- return 0;
+ if (pp->cfg0_io_shared) {
+ atu.type = PCIE_ATU_TYPE_IO;
+ atu.parent_bus_addr = pp->io_base - pci->parent_bus_offset;
+ atu.pci_addr = pp->io_bus_addr;
+ atu.size = pp->io_size;
- return 1;
+ ret = dw_pcie_prog_outbound_atu(pci, &atu);
+ if (ret)
+ return PCIBIOS_SET_FAILED;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
}
-static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
- int size, u32 *val)
+static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
{
- struct pcie_port *pp = bus->sysdata;
+ struct dw_pcie_rp *pp = bus->sysdata;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct dw_pcie_ob_atu_cfg atu = { 0 };
+ int ret;
+
+ ret = pci_generic_config_write(bus, devfn, where, size, val);
+ if (ret != PCIBIOS_SUCCESSFUL)
+ return ret;
+
+ if (pp->cfg0_io_shared) {
+ atu.type = PCIE_ATU_TYPE_IO;
+ atu.parent_bus_addr = pp->io_base - pci->parent_bus_offset;
+ atu.pci_addr = pp->io_bus_addr;
+ atu.size = pp->io_size;
- if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) {
- *val = 0xffffffff;
- return PCIBIOS_DEVICE_NOT_FOUND;
+ ret = dw_pcie_prog_outbound_atu(pci, &atu);
+ if (ret)
+ return PCIBIOS_SET_FAILED;
}
- if (bus->number == pp->root_bus_nr)
- return dw_pcie_rd_own_conf(pp, where, size, val);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops dw_child_pcie_ops = {
+ .map_bus = dw_pcie_other_conf_map_bus,
+ .read = dw_pcie_rd_other_conf,
+ .write = dw_pcie_wr_other_conf,
+};
- return dw_pcie_rd_other_conf(pp, bus, devfn, where, size, val);
+void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where)
+{
+ struct dw_pcie_rp *pp = bus->sysdata;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+
+ if (PCI_SLOT(devfn) > 0)
+ return NULL;
+
+ return pci->dbi_base + where;
}
+EXPORT_SYMBOL_GPL(dw_pcie_own_conf_map_bus);
-static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
- int where, int size, u32 val)
+static void __iomem *dw_pcie_ecam_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where)
{
- struct pcie_port *pp = bus->sysdata;
+ struct pci_config_window *cfg = bus->sysdata;
+ struct dw_pcie_rp *pp = cfg->priv;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ unsigned int busn = bus->number;
- if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn)))
- return PCIBIOS_DEVICE_NOT_FOUND;
+ if (busn > 0)
+ return pci_ecam_map_bus(bus, devfn, where);
- if (bus->number == pp->root_bus_nr)
- return dw_pcie_wr_own_conf(pp, where, size, val);
+ if (PCI_SLOT(devfn) > 0)
+ return NULL;
- return dw_pcie_wr_other_conf(pp, bus, devfn, where, size, val);
+ return pci->dbi_base + where;
+}
+
+static int dw_pcie_op_assert_perst(struct pci_bus *bus, bool assert)
+{
+ struct dw_pcie_rp *pp = bus->sysdata;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+
+ return dw_pcie_assert_perst(pci, assert);
}
static struct pci_ops dw_pcie_ops = {
- .read = dw_pcie_rd_conf,
- .write = dw_pcie_wr_conf,
+ .map_bus = dw_pcie_own_conf_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ .assert_perst = dw_pcie_op_assert_perst,
+};
+
+static struct pci_ops dw_pcie_ecam_ops = {
+ .map_bus = dw_pcie_ecam_conf_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
};
-static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
+static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
{
- u32 val;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct dw_pcie_ob_atu_cfg atu = { 0 };
+ struct resource_entry *entry;
+ int i, ret;
+
+ /* Note the very first outbound ATU is used for CFG IOs */
+ if (!pci->num_ob_windows) {
+ dev_err(pci->dev, "No outbound iATU found\n");
+ return -EINVAL;
+ }
- val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
- if (val == 0xffffffff)
- return 1;
+ /*
+ * Ensure all out/inbound windows are disabled before proceeding with
+ * the MEM/IO (dma-)ranges setups.
+ */
+ for (i = 0; i < pci->num_ob_windows; i++)
+ dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_OB, i);
+
+ for (i = 0; i < pci->num_ib_windows; i++)
+ dw_pcie_disable_atu(pci, PCIE_ATU_REGION_DIR_IB, i);
+
+ i = 0;
+ resource_list_for_each_entry(entry, &pp->bridge->windows) {
+ if (resource_type(entry->res) != IORESOURCE_MEM)
+ continue;
+
+ if (pci->num_ob_windows <= ++i)
+ break;
+
+ atu.index = i;
+ atu.type = PCIE_ATU_TYPE_MEM;
+ atu.parent_bus_addr = entry->res->start - pci->parent_bus_offset;
+ atu.pci_addr = entry->res->start - entry->offset;
+
+ /* Adjust iATU size if MSG TLP region was allocated before */
+ if (pp->msg_res && pp->msg_res->parent == entry->res)
+ atu.size = resource_size(entry->res) -
+ resource_size(pp->msg_res);
+ else
+ atu.size = resource_size(entry->res);
+
+ ret = dw_pcie_prog_outbound_atu(pci, &atu);
+ if (ret) {
+ dev_err(pci->dev, "Failed to set MEM range %pr\n",
+ entry->res);
+ return ret;
+ }
+ }
+
+ if (pp->io_size) {
+ if (pci->num_ob_windows > ++i) {
+ atu.index = i;
+ atu.type = PCIE_ATU_TYPE_IO;
+ atu.parent_bus_addr = pp->io_base - pci->parent_bus_offset;
+ atu.pci_addr = pp->io_bus_addr;
+ atu.size = pp->io_size;
+
+ ret = dw_pcie_prog_outbound_atu(pci, &atu);
+ if (ret) {
+ dev_err(pci->dev, "Failed to set IO range %pr\n",
+ entry->res);
+ return ret;
+ }
+ } else {
+ pp->cfg0_io_shared = true;
+ }
+ }
+
+ if (pci->num_ob_windows <= i)
+ dev_warn(pci->dev, "Ranges exceed outbound iATU size (%d)\n",
+ pci->num_ob_windows);
+
+ pp->msg_atu_index = i;
+
+ i = 0;
+ resource_list_for_each_entry(entry, &pp->bridge->dma_ranges) {
+ if (resource_type(entry->res) != IORESOURCE_MEM)
+ continue;
+
+ if (pci->num_ib_windows <= i)
+ break;
+
+ ret = dw_pcie_prog_inbound_atu(pci, i++, PCIE_ATU_TYPE_MEM,
+ entry->res->start,
+ entry->res->start - entry->offset,
+ resource_size(entry->res));
+ if (ret) {
+ dev_err(pci->dev, "Failed to set DMA range %pr\n",
+ entry->res);
+ return ret;
+ }
+ }
+
+ if (pci->num_ib_windows <= i)
+ dev_warn(pci->dev, "Dma-ranges exceed inbound iATU size (%u)\n",
+ pci->num_ib_windows);
return 0;
}
-void dw_pcie_setup_rc(struct pcie_port *pp)
+static void dw_pcie_program_presets(struct dw_pcie_rp *pp, enum pci_bus_speed speed)
{
- u32 val, ctrl, num_ctrls;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ u8 lane_eq_offset, lane_reg_size, cap_id;
+ u8 *presets;
+ u32 cap;
+ int i;
+
+ if (speed == PCIE_SPEED_8_0GT) {
+ presets = (u8 *)pp->presets.eq_presets_8gts;
+ lane_eq_offset = PCI_SECPCI_LE_CTRL;
+ cap_id = PCI_EXT_CAP_ID_SECPCI;
+ /* For data rate of 8 GT/S each lane equalization control is 16bits wide*/
+ lane_reg_size = 0x2;
+ } else if (speed == PCIE_SPEED_16_0GT) {
+ presets = pp->presets.eq_presets_Ngts[EQ_PRESET_TYPE_16GTS - 1];
+ lane_eq_offset = PCI_PL_16GT_LE_CTRL;
+ cap_id = PCI_EXT_CAP_ID_PL_16GT;
+ lane_reg_size = 0x1;
+ } else if (speed == PCIE_SPEED_32_0GT) {
+ presets = pp->presets.eq_presets_Ngts[EQ_PRESET_TYPE_32GTS - 1];
+ lane_eq_offset = PCI_PL_32GT_LE_CTRL;
+ cap_id = PCI_EXT_CAP_ID_PL_32GT;
+ lane_reg_size = 0x1;
+ } else if (speed == PCIE_SPEED_64_0GT) {
+ presets = pp->presets.eq_presets_Ngts[EQ_PRESET_TYPE_64GTS - 1];
+ lane_eq_offset = PCI_PL_64GT_LE_CTRL;
+ cap_id = PCI_EXT_CAP_ID_PL_64GT;
+ lane_reg_size = 0x1;
+ } else {
+ return;
+ }
- dw_pcie_setup(pci);
+ if (presets[0] == PCI_EQ_RESV)
+ return;
- num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
+ cap = dw_pcie_find_ext_capability(pci, cap_id);
+ if (!cap)
+ return;
- /* Initialize IRQ Status array */
- for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK +
- (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
- 4, ~0);
- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE +
- (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
- 4, ~0);
- pp->irq_status[ctrl] = 0;
- }
+ /*
+ * Write preset values to the registers byte-by-byte for the given
+ * number of lanes and register size.
+ */
+ for (i = 0; i < pci->num_lanes * lane_reg_size; i++)
+ dw_pcie_writeb_dbi(pci, cap + lane_eq_offset + i, presets[i]);
+}
+
+static void dw_pcie_config_presets(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ enum pci_bus_speed speed = pcie_link_speed[pci->max_link_speed];
+
+ /*
+ * Lane equalization settings need to be applied for all data rates the
+ * controller supports and for all supported lanes.
+ */
+
+ if (speed >= PCIE_SPEED_8_0GT)
+ dw_pcie_program_presets(pp, PCIE_SPEED_8_0GT);
+
+ if (speed >= PCIE_SPEED_16_0GT)
+ dw_pcie_program_presets(pp, PCIE_SPEED_16_0GT);
+
+ if (speed >= PCIE_SPEED_32_0GT)
+ dw_pcie_program_presets(pp, PCIE_SPEED_32_0GT);
+
+ if (speed >= PCIE_SPEED_64_0GT)
+ dw_pcie_program_presets(pp, PCIE_SPEED_64_0GT);
+}
+
+int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ u32 val;
+ int ret;
+
+ /*
+ * Enable DBI read-only registers for writing/updating configuration.
+ * Write permission gets disabled towards the end of this function.
+ */
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ dw_pcie_setup(pci);
+
+ dw_pcie_msi_init(pp);
/* Setup RC BARs */
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000);
/* Setup interrupt pins */
- dw_pcie_dbi_ro_wr_en(pci);
val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE);
val &= 0xffff00ff;
val |= 0x00000100;
dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val);
- dw_pcie_dbi_ro_wr_dis(pci);
/* Setup bus numbers */
val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
@@ -699,39 +1090,147 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
+ dw_pcie_hide_unsupported_l1ss(pci);
+
+ dw_pcie_config_presets(pp);
/*
- * If the platform provides ->rd_other_conf, it means the platform
- * uses its own address translation component rather than ATU, so
- * we should not program the ATU here.
+ * If the platform provides its own child bus config accesses, it means
+ * the platform uses its own address translation component rather than
+ * ATU, so we should not program the ATU here.
*/
- if (!pp->ops->rd_other_conf) {
- /* Get iATU unroll support */
- pci->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pci);
- dev_dbg(pci->dev, "iATU unroll: %s\n",
- pci->iatu_unroll_enabled ? "enabled" : "disabled");
-
- if (pci->iatu_unroll_enabled && !pci->atu_base)
- pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
-
- dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX0,
- PCIE_ATU_TYPE_MEM, pp->mem_base,
- pp->mem_bus_addr, pp->mem_size);
- if (pci->num_viewport > 2)
- dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX2,
- PCIE_ATU_TYPE_IO, pp->io_base,
- pp->io_bus_addr, pp->io_size);
+ if (pp->bridge->child_ops == &dw_child_pcie_ops) {
+ ret = dw_pcie_iatu_setup(pp);
+ if (ret)
+ return ret;
}
- dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
- /* Enable write permission for the DBI read-only register */
- dw_pcie_dbi_ro_wr_en(pci);
/* Program correct class for RC */
- dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
- /* Better disable write permission right after the update */
- dw_pcie_dbi_ro_wr_dis(pci);
+ dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI);
- dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
+ val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
val |= PORT_LOGIC_SPEED_CHANGE;
- dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
+ dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
+
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_setup_rc);
+
+static int dw_pcie_pme_turn_off(struct dw_pcie *pci)
+{
+ struct dw_pcie_ob_atu_cfg atu = { 0 };
+ void __iomem *mem;
+ int ret;
+
+ if (pci->num_ob_windows <= pci->pp.msg_atu_index)
+ return -ENOSPC;
+
+ if (!pci->pp.msg_res)
+ return -ENOSPC;
+
+ atu.code = PCIE_MSG_CODE_PME_TURN_OFF;
+ atu.routing = PCIE_MSG_TYPE_R_BC;
+ atu.type = PCIE_ATU_TYPE_MSG;
+ atu.size = resource_size(pci->pp.msg_res);
+ atu.index = pci->pp.msg_atu_index;
+
+ atu.parent_bus_addr = pci->pp.msg_res->start - pci->parent_bus_offset;
+
+ ret = dw_pcie_prog_outbound_atu(pci, &atu);
+ if (ret)
+ return ret;
+
+ mem = ioremap(pci->pp.msg_res->start, pci->region_align);
+ if (!mem)
+ return -ENOMEM;
+
+ /* A dummy write is converted to a Msg TLP */
+ writel(0, mem);
+
+ iounmap(mem);
+
+ return 0;
+}
+
+int dw_pcie_suspend_noirq(struct dw_pcie *pci)
+{
+ u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ u32 val;
+ int ret;
+
+ /*
+ * If L1SS is supported, then do not put the link into L2 as some
+ * devices such as NVMe expect low resume latency.
+ */
+ if (dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKCTL) & PCI_EXP_LNKCTL_ASPM_L1)
+ return 0;
+
+ if (pci->pp.ops->pme_turn_off) {
+ pci->pp.ops->pme_turn_off(&pci->pp);
+ } else {
+ ret = dw_pcie_pme_turn_off(pci);
+ if (ret)
+ return ret;
+ }
+
+ ret = read_poll_timeout(dw_pcie_get_ltssm, val,
+ val == DW_PCIE_LTSSM_L2_IDLE ||
+ val <= DW_PCIE_LTSSM_DETECT_WAIT,
+ PCIE_PME_TO_L2_TIMEOUT_US/10,
+ PCIE_PME_TO_L2_TIMEOUT_US, false, pci);
+ if (ret) {
+ /* Only log message when LTSSM isn't in DETECT or POLL */
+ dev_err(pci->dev, "Timeout waiting for L2 entry! LTSSM: 0x%x\n", val);
+ return ret;
+ }
+
+ /*
+ * Per PCIe r6.0, sec 5.3.3.2.1, software should wait at least
+ * 100ns after L2/L3 Ready before turning off refclock and
+ * main power. This is harmless when no endpoint is connected.
+ */
+ udelay(1);
+
+ dw_pcie_stop_link(pci);
+ if (pci->pp.ops->deinit)
+ pci->pp.ops->deinit(&pci->pp);
+
+ pci->suspended = true;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dw_pcie_suspend_noirq);
+
+int dw_pcie_resume_noirq(struct dw_pcie *pci)
+{
+ int ret;
+
+ if (!pci->suspended)
+ return 0;
+
+ pci->suspended = false;
+
+ if (pci->pp.ops->init) {
+ ret = pci->pp.ops->init(&pci->pp);
+ if (ret) {
+ dev_err(pci->dev, "Host init failed: %d\n", ret);
+ return ret;
+ }
+ }
+
+ dw_pcie_setup_rc(&pci->pp);
+
+ ret = dw_pcie_start_link(pci);
+ if (ret)
+ return ret;
+
+ ret = dw_pcie_wait_for_link(pci);
+ if (ret)
+ return ret;
+
+ return ret;
}
+EXPORT_SYMBOL_GPL(dw_pcie_resume_noirq);
diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c
index c12bf794d69c..12f41886c65d 100644
--- a/drivers/pci/controller/dwc/pcie-designware-plat.c
+++ b/drivers/pci/controller/dwc/pcie-designware-plat.c
@@ -12,20 +12,16 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/of_device.h>
-#include <linux/of_gpio.h>
+#include <linux/of.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/resource.h>
-#include <linux/signal.h>
#include <linux/types.h>
-#include <linux/regmap.h>
#include "pcie-designware.h"
struct dw_plat_pcie {
struct dw_pcie *pci;
- struct regmap *regmap;
enum dw_pcie_device_mode mode;
};
@@ -33,65 +29,29 @@ struct dw_plat_pcie_of_data {
enum dw_pcie_device_mode mode;
};
-static const struct of_device_id dw_plat_pcie_of_match[];
-
-static int dw_plat_pcie_host_init(struct pcie_port *pp)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
-
- dw_pcie_setup_rc(pp);
- dw_pcie_wait_for_link(pci);
-
- if (IS_ENABLED(CONFIG_PCI_MSI))
- dw_pcie_msi_init(pp);
-
- return 0;
-}
-
-static void dw_plat_set_num_vectors(struct pcie_port *pp)
-{
- pp->num_vectors = MAX_MSI_IRQS;
-}
-
static const struct dw_pcie_host_ops dw_plat_pcie_host_ops = {
- .host_init = dw_plat_pcie_host_init,
- .set_num_vectors = dw_plat_set_num_vectors,
-};
-
-static int dw_plat_pcie_establish_link(struct dw_pcie *pci)
-{
- return 0;
-}
-
-static const struct dw_pcie_ops dw_pcie_ops = {
- .start_link = dw_plat_pcie_establish_link,
};
static void dw_plat_pcie_ep_init(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- struct pci_epc *epc = ep->epc;
enum pci_barno bar;
- for (bar = BAR_0; bar <= BAR_5; bar++)
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
dw_pcie_ep_reset_bar(pci, bar);
-
- epc->features |= EPC_FEATURE_NO_LINKUP_NOTIFIER;
- epc->features |= EPC_FEATURE_MSIX_AVAILABLE;
}
static int dw_plat_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
- enum pci_epc_irq_type type,
- u16 interrupt_num)
+ unsigned int type, u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
switch (type) {
- case PCI_EPC_IRQ_LEGACY:
- return dw_pcie_ep_raise_legacy_irq(ep, func_no);
- case PCI_EPC_IRQ_MSI:
+ case PCI_IRQ_INTX:
+ return dw_pcie_ep_raise_intx_irq(ep, func_no);
+ case PCI_IRQ_MSI:
return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
- case PCI_EPC_IRQ_MSIX:
+ case PCI_IRQ_MSIX:
return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
default:
dev_err(pci->dev, "UNKNOWN IRQ type\n");
@@ -100,16 +60,28 @@ static int dw_plat_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
return 0;
}
-static struct dw_pcie_ep_ops pcie_ep_ops = {
- .ep_init = dw_plat_pcie_ep_init,
+static const struct pci_epc_features dw_plat_pcie_epc_features = {
+ .msi_capable = true,
+ .msix_capable = true,
+};
+
+static const struct pci_epc_features*
+dw_plat_pcie_get_features(struct dw_pcie_ep *ep)
+{
+ return &dw_plat_pcie_epc_features;
+}
+
+static const struct dw_pcie_ep_ops pcie_ep_ops = {
+ .init = dw_plat_pcie_ep_init,
.raise_irq = dw_plat_pcie_ep_raise_irq,
+ .get_features = dw_plat_pcie_get_features,
};
static int dw_plat_add_pcie_port(struct dw_plat_pcie *dw_plat_pcie,
struct platform_device *pdev)
{
struct dw_pcie *pci = dw_plat_pcie->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
struct device *dev = &pdev->dev;
int ret;
@@ -117,12 +89,7 @@ static int dw_plat_add_pcie_port(struct dw_plat_pcie *dw_plat_pcie,
if (pp->irq < 0)
return pp->irq;
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- pp->msi_irq = platform_get_irq(pdev, 0);
- if (pp->msi_irq < 0)
- return pp->msi_irq;
- }
-
+ pp->num_vectors = MAX_MSI_IRQS;
pp->ops = &dw_plat_pcie_host_ops;
ret = dw_pcie_host_init(pp);
@@ -134,54 +101,19 @@ static int dw_plat_add_pcie_port(struct dw_plat_pcie *dw_plat_pcie,
return 0;
}
-static int dw_plat_add_pcie_ep(struct dw_plat_pcie *dw_plat_pcie,
- struct platform_device *pdev)
-{
- int ret;
- struct dw_pcie_ep *ep;
- struct resource *res;
- struct device *dev = &pdev->dev;
- struct dw_pcie *pci = dw_plat_pcie->pci;
-
- ep = &pci->ep;
- ep->ops = &pcie_ep_ops;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2");
- pci->dbi_base2 = devm_ioremap_resource(dev, res);
- if (IS_ERR(pci->dbi_base2))
- return PTR_ERR(pci->dbi_base2);
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
- if (!res)
- return -EINVAL;
-
- ep->phys_base = res->start;
- ep->addr_size = resource_size(res);
-
- ret = dw_pcie_ep_init(ep);
- if (ret) {
- dev_err(dev, "Failed to initialize endpoint\n");
- return ret;
- }
- return 0;
-}
-
static int dw_plat_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct dw_plat_pcie *dw_plat_pcie;
struct dw_pcie *pci;
- struct resource *res; /* Resource from DT */
int ret;
- const struct of_device_id *match;
const struct dw_plat_pcie_of_data *data;
enum dw_pcie_device_mode mode;
- match = of_match_device(dw_plat_pcie_of_match, dev);
- if (!match)
+ data = of_device_get_match_data(dev);
+ if (!data)
return -EINVAL;
- data = (struct dw_plat_pcie_of_data *)match->data;
mode = (enum dw_pcie_device_mode)data->mode;
dw_plat_pcie = devm_kzalloc(dev, sizeof(*dw_plat_pcie), GFP_KERNEL);
@@ -193,19 +125,10 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)
return -ENOMEM;
pci->dev = dev;
- pci->ops = &dw_pcie_ops;
dw_plat_pcie->pci = pci;
dw_plat_pcie->mode = mode;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
- if (!res)
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- pci->dbi_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(pci->dbi_base))
- return PTR_ERR(pci->dbi_base);
-
platform_set_drvdata(pdev, dw_plat_pcie);
switch (dw_plat_pcie->mode) {
@@ -214,22 +137,32 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)
return -ENODEV;
ret = dw_plat_add_pcie_port(dw_plat_pcie, pdev);
- if (ret < 0)
- return ret;
break;
case DW_PCIE_EP_TYPE:
if (!IS_ENABLED(CONFIG_PCIE_DW_PLAT_EP))
return -ENODEV;
- ret = dw_plat_add_pcie_ep(dw_plat_pcie, pdev);
- if (ret < 0)
+ pci->ep.ops = &pcie_ep_ops;
+ ret = dw_pcie_ep_init(&pci->ep);
+ if (ret)
return ret;
+
+ ret = dw_pcie_ep_init_registers(&pci->ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(&pci->ep);
+ }
+
+ pci_epc_init_notify(pci->ep.epc);
+
break;
default:
dev_err(dev, "INVALID device type %d\n", dw_plat_pcie->mode);
+ ret = -EINVAL;
+ break;
}
- return 0;
+ return ret;
}
static const struct dw_plat_pcie_of_data dw_plat_pcie_rc_of_data = {
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index 93ef8c31fb39..75fc8b767fcc 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -3,26 +3,293 @@
* Synopsys DesignWare PCIe host controller driver
*
* Copyright (C) 2013 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
+ * https://www.samsung.com
*
* Author: Jingoo Han <jg1.han@samsung.com>
*/
+#include <linux/align.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/dma/edma.h>
+#include <linux/gpio/consumer.h>
+#include <linux/ioport.h>
#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/pcie-dwc.h>
+#include <linux/platform_device.h>
+#include <linux/sizes.h>
#include <linux/types.h>
+#include "../../pci.h"
#include "pcie-designware.h"
-/* PCIe Port Logic registers */
-#define PLR_OFFSET 0x700
-#define PCIE_PHY_DEBUG_R1 (PLR_OFFSET + 0x2c)
-#define PCIE_PHY_DEBUG_R1_LINK_UP (0x1 << 4)
-#define PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING (0x1 << 29)
+static const char * const dw_pcie_app_clks[DW_PCIE_NUM_APP_CLKS] = {
+ [DW_PCIE_DBI_CLK] = "dbi",
+ [DW_PCIE_MSTR_CLK] = "mstr",
+ [DW_PCIE_SLV_CLK] = "slv",
+};
+
+static const char * const dw_pcie_core_clks[DW_PCIE_NUM_CORE_CLKS] = {
+ [DW_PCIE_PIPE_CLK] = "pipe",
+ [DW_PCIE_CORE_CLK] = "core",
+ [DW_PCIE_AUX_CLK] = "aux",
+ [DW_PCIE_REF_CLK] = "ref",
+};
+
+static const char * const dw_pcie_app_rsts[DW_PCIE_NUM_APP_RSTS] = {
+ [DW_PCIE_DBI_RST] = "dbi",
+ [DW_PCIE_MSTR_RST] = "mstr",
+ [DW_PCIE_SLV_RST] = "slv",
+};
+
+static const char * const dw_pcie_core_rsts[DW_PCIE_NUM_CORE_RSTS] = {
+ [DW_PCIE_NON_STICKY_RST] = "non-sticky",
+ [DW_PCIE_STICKY_RST] = "sticky",
+ [DW_PCIE_CORE_RST] = "core",
+ [DW_PCIE_PIPE_RST] = "pipe",
+ [DW_PCIE_PHY_RST] = "phy",
+ [DW_PCIE_HOT_RST] = "hot",
+ [DW_PCIE_PWR_RST] = "pwr",
+};
+
+static const struct dwc_pcie_vsec_id dwc_pcie_ptm_vsec_ids[] = {
+ { .vendor_id = PCI_VENDOR_ID_QCOM, /* EP */
+ .vsec_id = 0x03, .vsec_rev = 0x1 },
+ { .vendor_id = PCI_VENDOR_ID_QCOM, /* RC */
+ .vsec_id = 0x04, .vsec_rev = 0x1 },
+ { }
+};
+
+static int dw_pcie_get_clocks(struct dw_pcie *pci)
+{
+ int i, ret;
+
+ for (i = 0; i < DW_PCIE_NUM_APP_CLKS; i++)
+ pci->app_clks[i].id = dw_pcie_app_clks[i];
+
+ for (i = 0; i < DW_PCIE_NUM_CORE_CLKS; i++)
+ pci->core_clks[i].id = dw_pcie_core_clks[i];
+
+ ret = devm_clk_bulk_get_optional(pci->dev, DW_PCIE_NUM_APP_CLKS,
+ pci->app_clks);
+ if (ret)
+ return ret;
+
+ return devm_clk_bulk_get_optional(pci->dev, DW_PCIE_NUM_CORE_CLKS,
+ pci->core_clks);
+}
+
+static int dw_pcie_get_resets(struct dw_pcie *pci)
+{
+ int i, ret;
+
+ for (i = 0; i < DW_PCIE_NUM_APP_RSTS; i++)
+ pci->app_rsts[i].id = dw_pcie_app_rsts[i];
+
+ for (i = 0; i < DW_PCIE_NUM_CORE_RSTS; i++)
+ pci->core_rsts[i].id = dw_pcie_core_rsts[i];
+
+ ret = devm_reset_control_bulk_get_optional_shared(pci->dev,
+ DW_PCIE_NUM_APP_RSTS,
+ pci->app_rsts);
+ if (ret)
+ return ret;
+
+ ret = devm_reset_control_bulk_get_optional_exclusive(pci->dev,
+ DW_PCIE_NUM_CORE_RSTS,
+ pci->core_rsts);
+ if (ret)
+ return ret;
+
+ pci->pe_rst = devm_gpiod_get_optional(pci->dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(pci->pe_rst))
+ return PTR_ERR(pci->pe_rst);
+
+ return 0;
+}
+
+int dw_pcie_get_resources(struct dw_pcie *pci)
+{
+ struct platform_device *pdev = to_platform_device(pci->dev);
+ struct device_node *np = dev_of_node(pci->dev);
+ struct resource *res;
+ int ret;
+
+ if (!pci->dbi_base) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+ pci->dbi_base = devm_pci_remap_cfg_resource(pci->dev, res);
+ if (IS_ERR(pci->dbi_base))
+ return PTR_ERR(pci->dbi_base);
+ pci->dbi_phys_addr = res->start;
+ }
+
+ /* DBI2 is mainly useful for the endpoint controller */
+ if (!pci->dbi_base2) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2");
+ if (res) {
+ pci->dbi_base2 = devm_pci_remap_cfg_resource(pci->dev, res);
+ if (IS_ERR(pci->dbi_base2))
+ return PTR_ERR(pci->dbi_base2);
+ } else {
+ pci->dbi_base2 = pci->dbi_base + SZ_4K;
+ }
+ }
+
+ /* For non-unrolled iATU/eDMA platforms this range will be ignored */
+ if (!pci->atu_base) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "atu");
+ if (res) {
+ pci->atu_size = resource_size(res);
+ pci->atu_base = devm_ioremap_resource(pci->dev, res);
+ if (IS_ERR(pci->atu_base))
+ return PTR_ERR(pci->atu_base);
+ pci->atu_phys_addr = res->start;
+ } else {
+ pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
+ }
+ }
+
+ /* Set a default value suitable for at most 8 in and 8 out windows */
+ if (!pci->atu_size)
+ pci->atu_size = SZ_4K;
+
+ /* eDMA region can be mapped to a custom base address */
+ if (!pci->edma.reg_base) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dma");
+ if (res) {
+ pci->edma.reg_base = devm_ioremap_resource(pci->dev, res);
+ if (IS_ERR(pci->edma.reg_base))
+ return PTR_ERR(pci->edma.reg_base);
+ } else if (pci->atu_size >= 2 * DEFAULT_DBI_DMA_OFFSET) {
+ pci->edma.reg_base = pci->atu_base + DEFAULT_DBI_DMA_OFFSET;
+ }
+ }
+
+ /* ELBI is an optional resource */
+ if (!pci->elbi_base) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
+ if (res) {
+ pci->elbi_base = devm_ioremap_resource(pci->dev, res);
+ if (IS_ERR(pci->elbi_base))
+ return PTR_ERR(pci->elbi_base);
+ }
+ }
+
+ /* LLDD is supposed to manually switch the clocks and resets state */
+ if (dw_pcie_cap_is(pci, REQ_RES)) {
+ ret = dw_pcie_get_clocks(pci);
+ if (ret)
+ return ret;
+
+ ret = dw_pcie_get_resets(pci);
+ if (ret)
+ return ret;
+ }
+
+ if (pci->max_link_speed < 1)
+ pci->max_link_speed = of_pci_get_max_link_speed(np);
+
+ of_property_read_u32(np, "num-lanes", &pci->num_lanes);
+
+ if (of_property_read_bool(np, "snps,enable-cdm-check"))
+ dw_pcie_cap_set(pci, CDM_CHECK);
+
+ return 0;
+}
+
+void dw_pcie_version_detect(struct dw_pcie *pci)
+{
+ u32 ver;
+
+ /* The content of the CSR is zero on DWC PCIe older than v4.70a */
+ ver = dw_pcie_readl_dbi(pci, PCIE_VERSION_NUMBER);
+ if (!ver)
+ return;
+
+ if (pci->version && pci->version != ver)
+ dev_warn(pci->dev, "Versions don't match (%08x != %08x)\n",
+ pci->version, ver);
+ else
+ pci->version = ver;
+
+ ver = dw_pcie_readl_dbi(pci, PCIE_VERSION_TYPE);
+
+ if (pci->type && pci->type != ver)
+ dev_warn(pci->dev, "Types don't match (%08x != %08x)\n",
+ pci->type, ver);
+ else
+ pci->type = ver;
+}
+
+u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap)
+{
+ return PCI_FIND_NEXT_CAP(dw_pcie_read_cfg, PCI_CAPABILITY_LIST, cap,
+ pci);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_find_capability);
+
+u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap)
+{
+ return PCI_FIND_NEXT_EXT_CAP(dw_pcie_read_cfg, 0, cap, pci);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_find_ext_capability);
+
+static u16 __dw_pcie_find_vsec_capability(struct dw_pcie *pci, u16 vendor_id,
+ u16 vsec_id)
+{
+ u16 vsec = 0;
+ u32 header;
+
+ if (vendor_id != dw_pcie_readw_dbi(pci, PCI_VENDOR_ID))
+ return 0;
+
+ while ((vsec = PCI_FIND_NEXT_EXT_CAP(dw_pcie_read_cfg, vsec,
+ PCI_EXT_CAP_ID_VNDR, pci))) {
+ header = dw_pcie_readl_dbi(pci, vsec + PCI_VNDR_HEADER);
+ if (PCI_VNDR_HEADER_ID(header) == vsec_id)
+ return vsec;
+ }
+
+ return 0;
+}
+
+static u16 dw_pcie_find_vsec_capability(struct dw_pcie *pci,
+ const struct dwc_pcie_vsec_id *vsec_ids)
+{
+ const struct dwc_pcie_vsec_id *vid;
+ u16 vsec;
+ u32 header;
+
+ for (vid = vsec_ids; vid->vendor_id; vid++) {
+ vsec = __dw_pcie_find_vsec_capability(pci, vid->vendor_id,
+ vid->vsec_id);
+ if (vsec) {
+ header = dw_pcie_readl_dbi(pci, vsec + PCI_VNDR_HEADER);
+ if (PCI_VNDR_HEADER_REV(header) == vid->vsec_rev)
+ return vsec;
+ }
+ }
+
+ return 0;
+}
+
+u16 dw_pcie_find_rasdes_capability(struct dw_pcie *pci)
+{
+ return dw_pcie_find_vsec_capability(pci, dwc_pcie_rasdes_vsec_ids);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_find_rasdes_capability);
+
+u16 dw_pcie_find_ptm_capability(struct dw_pcie *pci)
+{
+ return dw_pcie_find_vsec_capability(pci, dwc_pcie_ptm_vsec_ids);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_find_ptm_capability);
int dw_pcie_read(void __iomem *addr, int size, u32 *val)
{
- if ((uintptr_t)addr & (size - 1)) {
+ if (!IS_ALIGNED((uintptr_t)addr, size)) {
*val = 0;
return PCIBIOS_BAD_REGISTER_NUMBER;
}
@@ -40,10 +307,11 @@ int dw_pcie_read(void __iomem *addr, int size, u32 *val)
return PCIBIOS_SUCCESSFUL;
}
+EXPORT_SYMBOL_GPL(dw_pcie_read);
int dw_pcie_write(void __iomem *addr, int size, u32 val)
{
- if ((uintptr_t)addr & (size - 1))
+ if (!IS_ALIGNED((uintptr_t)addr, size))
return PCIBIOS_BAD_REGISTER_NUMBER;
if (size == 4)
@@ -57,338 +325,882 @@ int dw_pcie_write(void __iomem *addr, int size, u32 val)
return PCIBIOS_SUCCESSFUL;
}
+EXPORT_SYMBOL_GPL(dw_pcie_write);
-u32 __dw_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
- size_t size)
+u32 dw_pcie_read_dbi(struct dw_pcie *pci, u32 reg, size_t size)
{
int ret;
u32 val;
- if (pci->ops->read_dbi)
- return pci->ops->read_dbi(pci, base, reg, size);
+ if (pci->ops && pci->ops->read_dbi)
+ return pci->ops->read_dbi(pci, pci->dbi_base, reg, size);
- ret = dw_pcie_read(base + reg, size, &val);
+ ret = dw_pcie_read(pci->dbi_base + reg, size, &val);
if (ret)
dev_err(pci->dev, "Read DBI address failed\n");
return val;
}
+EXPORT_SYMBOL_GPL(dw_pcie_read_dbi);
-void __dw_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
- size_t size, u32 val)
+void dw_pcie_write_dbi(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
{
int ret;
- if (pci->ops->write_dbi) {
- pci->ops->write_dbi(pci, base, reg, size, val);
+ if (pci->ops && pci->ops->write_dbi) {
+ pci->ops->write_dbi(pci, pci->dbi_base, reg, size, val);
return;
}
- ret = dw_pcie_write(base + reg, size, val);
+ ret = dw_pcie_write(pci->dbi_base + reg, size, val);
if (ret)
dev_err(pci->dev, "Write DBI address failed\n");
}
+EXPORT_SYMBOL_GPL(dw_pcie_write_dbi);
-static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg)
+void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
{
- u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
+ int ret;
+
+ if (pci->ops && pci->ops->write_dbi2) {
+ pci->ops->write_dbi2(pci, pci->dbi_base2, reg, size, val);
+ return;
+ }
- return dw_pcie_readl_atu(pci, offset + reg);
+ ret = dw_pcie_write(pci->dbi_base2 + reg, size, val);
+ if (ret)
+ dev_err(pci->dev, "write DBI address failed\n");
}
+EXPORT_SYMBOL_GPL(dw_pcie_write_dbi2);
-static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg,
- u32 val)
+static inline void __iomem *dw_pcie_select_atu(struct dw_pcie *pci, u32 dir,
+ u32 index)
{
- u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index);
+ if (dw_pcie_cap_is(pci, IATU_UNROLL))
+ return pci->atu_base + PCIE_ATU_UNROLL_BASE(dir, index);
- dw_pcie_writel_atu(pci, offset + reg, val);
+ dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, dir | index);
+ return pci->atu_base;
}
-static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index,
- int type, u64 cpu_addr,
- u64 pci_addr, u32 size)
+static u32 dw_pcie_readl_atu(struct dw_pcie *pci, u32 dir, u32 index, u32 reg)
{
- u32 retries, val;
+ void __iomem *base;
+ int ret;
+ u32 val;
+
+ base = dw_pcie_select_atu(pci, dir, index);
+
+ if (pci->ops && pci->ops->read_dbi)
+ return pci->ops->read_dbi(pci, base, reg, 4);
- dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE,
- lower_32_bits(cpu_addr));
- dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE,
- upper_32_bits(cpu_addr));
- dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LIMIT,
- lower_32_bits(cpu_addr + size - 1));
- dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
- lower_32_bits(pci_addr));
- dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
- upper_32_bits(pci_addr));
- dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1,
- type);
- dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
- PCIE_ATU_ENABLE);
+ ret = dw_pcie_read(base + reg, 4, &val);
+ if (ret)
+ dev_err(pci->dev, "Read ATU address failed\n");
+
+ return val;
+}
+
+static void dw_pcie_writel_atu(struct dw_pcie *pci, u32 dir, u32 index,
+ u32 reg, u32 val)
+{
+ void __iomem *base;
+ int ret;
+
+ base = dw_pcie_select_atu(pci, dir, index);
+
+ if (pci->ops && pci->ops->write_dbi) {
+ pci->ops->write_dbi(pci, base, reg, 4, val);
+ return;
+ }
+
+ ret = dw_pcie_write(base + reg, 4, val);
+ if (ret)
+ dev_err(pci->dev, "Write ATU address failed\n");
+}
+
+static inline u32 dw_pcie_readl_atu_ob(struct dw_pcie *pci, u32 index, u32 reg)
+{
+ return dw_pcie_readl_atu(pci, PCIE_ATU_REGION_DIR_OB, index, reg);
+}
+
+static inline void dw_pcie_writel_atu_ob(struct dw_pcie *pci, u32 index, u32 reg,
+ u32 val)
+{
+ dw_pcie_writel_atu(pci, PCIE_ATU_REGION_DIR_OB, index, reg, val);
+}
+static inline u32 dw_pcie_enable_ecrc(u32 val)
+{
/*
- * Make sure ATU enable takes effect before any subsequent config
- * and I/O accesses.
+ * DesignWare core version 4.90A has a design issue where the 'TD'
+ * bit in the Control register-1 of the ATU outbound region acts
+ * like an override for the ECRC setting, i.e., the presence of TLP
+ * Digest (ECRC) in the outgoing TLPs is solely determined by this
+ * bit. This is contrary to the PCIe spec which says that the
+ * enablement of the ECRC is solely determined by the AER
+ * registers.
+ *
+ * Because of this, even when the ECRC is enabled through AER
+ * registers, the transactions going through ATU won't have TLP
+ * Digest as there is no way the PCI core AER code could program
+ * the TD bit which is specific to the DesignWare core.
+ *
+ * The best way to handle this scenario is to program the TD bit
+ * always. It affects only the traffic from root port to downstream
+ * devices.
+ *
+ * At this point,
+ * When ECRC is enabled in AER registers, everything works normally
+ * When ECRC is NOT enabled in AER registers, then,
+ * on Root Port:- TLP Digest (DWord size) gets appended to each packet
+ * even through it is not required. Since downstream
+ * TLPs are mostly for configuration accesses and BAR
+ * accesses, they are not in critical path and won't
+ * have much negative effect on the performance.
+ * on End Point:- TLP Digest is received for some/all the packets coming
+ * from the root port. TLP Digest is ignored because,
+ * as per the PCIe Spec r5.0 v1.0 section 2.2.3
+ * "TLP Digest Rules", when an endpoint receives TLP
+ * Digest when its ECRC check functionality is disabled
+ * in AER registers, received TLP Digest is just ignored.
+ * Since there is no issue or error reported either side, best way to
+ * handle the scenario is to program TD bit by default.
*/
- for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
- val = dw_pcie_readl_ob_unroll(pci, index,
- PCIE_ATU_UNR_REGION_CTRL2);
- if (val & PCIE_ATU_ENABLE)
- return;
- mdelay(LINK_WAIT_IATU);
- }
- dev_err(pci->dev, "Outbound iATU is not being enabled\n");
+ return val | PCIE_ATU_TD;
}
-void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
- u64 cpu_addr, u64 pci_addr, u32 size)
+int dw_pcie_prog_outbound_atu(struct dw_pcie *pci,
+ const struct dw_pcie_ob_atu_cfg *atu)
{
+ u64 parent_bus_addr = atu->parent_bus_addr;
u32 retries, val;
+ u64 limit_addr;
- if (pci->ops->cpu_addr_fixup)
- cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr);
+ limit_addr = parent_bus_addr + atu->size - 1;
- if (pci->iatu_unroll_enabled) {
- dw_pcie_prog_outbound_atu_unroll(pci, index, type, cpu_addr,
- pci_addr, size);
- return;
+ if ((limit_addr & ~pci->region_limit) != (parent_bus_addr & ~pci->region_limit) ||
+ !IS_ALIGNED(parent_bus_addr, pci->region_align) ||
+ !IS_ALIGNED(atu->pci_addr, pci->region_align) || !atu->size) {
+ return -EINVAL;
}
- dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT,
- PCIE_ATU_REGION_OUTBOUND | index);
- dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE,
- lower_32_bits(cpu_addr));
- dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE,
- upper_32_bits(cpu_addr));
- dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT,
- lower_32_bits(cpu_addr + size - 1));
- dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET,
- lower_32_bits(pci_addr));
- dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET,
- upper_32_bits(pci_addr));
- dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type);
- dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE);
+ dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_LOWER_BASE,
+ lower_32_bits(parent_bus_addr));
+ dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_UPPER_BASE,
+ upper_32_bits(parent_bus_addr));
+
+ dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_LIMIT,
+ lower_32_bits(limit_addr));
+ if (dw_pcie_ver_is_ge(pci, 460A))
+ dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_UPPER_LIMIT,
+ upper_32_bits(limit_addr));
+
+ dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_LOWER_TARGET,
+ lower_32_bits(atu->pci_addr));
+ dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_UPPER_TARGET,
+ upper_32_bits(atu->pci_addr));
+
+ val = atu->type | atu->routing | PCIE_ATU_FUNC_NUM(atu->func_no);
+ if (upper_32_bits(limit_addr) > upper_32_bits(parent_bus_addr) &&
+ dw_pcie_ver_is_ge(pci, 460A))
+ val |= PCIE_ATU_INCREASE_REGION_SIZE;
+ if (dw_pcie_ver_is(pci, 490A))
+ val = dw_pcie_enable_ecrc(val);
+ dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_REGION_CTRL1, val);
+
+ val = PCIE_ATU_ENABLE | atu->ctrl2;
+ if (atu->type == PCIE_ATU_TYPE_MSG) {
+ /* The data-less messages only for now */
+ val |= PCIE_ATU_INHIBIT_PAYLOAD | atu->code;
+ }
+ dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_REGION_CTRL2, val);
/*
* Make sure ATU enable takes effect before any subsequent config
* and I/O accesses.
*/
for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
- val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
+ val = dw_pcie_readl_atu_ob(pci, atu->index, PCIE_ATU_REGION_CTRL2);
if (val & PCIE_ATU_ENABLE)
- return;
+ return 0;
mdelay(LINK_WAIT_IATU);
}
+
dev_err(pci->dev, "Outbound iATU is not being enabled\n");
+
+ return -ETIMEDOUT;
}
-static u32 dw_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg)
+static inline u32 dw_pcie_readl_atu_ib(struct dw_pcie *pci, u32 index, u32 reg)
{
- u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
-
- return dw_pcie_readl_atu(pci, offset + reg);
+ return dw_pcie_readl_atu(pci, PCIE_ATU_REGION_DIR_IB, index, reg);
}
-static void dw_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg,
- u32 val)
+static inline void dw_pcie_writel_atu_ib(struct dw_pcie *pci, u32 index, u32 reg,
+ u32 val)
{
- u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index);
-
- dw_pcie_writel_atu(pci, offset + reg, val);
+ dw_pcie_writel_atu(pci, PCIE_ATU_REGION_DIR_IB, index, reg, val);
}
-static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index,
- int bar, u64 cpu_addr,
- enum dw_pcie_as_type as_type)
+int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,
+ u64 parent_bus_addr, u64 pci_addr, u64 size)
{
- int type;
+ u64 limit_addr = pci_addr + size - 1;
u32 retries, val;
- dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET,
- lower_32_bits(cpu_addr));
- dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,
- upper_32_bits(cpu_addr));
-
- switch (as_type) {
- case DW_PCIE_AS_MEM:
- type = PCIE_ATU_TYPE_MEM;
- break;
- case DW_PCIE_AS_IO:
- type = PCIE_ATU_TYPE_IO;
- break;
- default:
+ if ((limit_addr & ~pci->region_limit) != (pci_addr & ~pci->region_limit) ||
+ !IS_ALIGNED(parent_bus_addr, pci->region_align) ||
+ !IS_ALIGNED(pci_addr, pci->region_align) || !size) {
return -EINVAL;
}
- dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, type);
- dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,
- PCIE_ATU_ENABLE |
- PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_BASE,
+ lower_32_bits(pci_addr));
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_BASE,
+ upper_32_bits(pci_addr));
+
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LIMIT,
+ lower_32_bits(limit_addr));
+ if (dw_pcie_ver_is_ge(pci, 460A))
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_LIMIT,
+ upper_32_bits(limit_addr));
+
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_TARGET,
+ lower_32_bits(parent_bus_addr));
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_TARGET,
+ upper_32_bits(parent_bus_addr));
+
+ val = type;
+ if (upper_32_bits(limit_addr) > upper_32_bits(pci_addr) &&
+ dw_pcie_ver_is_ge(pci, 460A))
+ val |= PCIE_ATU_INCREASE_REGION_SIZE;
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL1, val);
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2, PCIE_ATU_ENABLE);
/*
* Make sure ATU enable takes effect before any subsequent config
* and I/O accesses.
*/
for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
- val = dw_pcie_readl_ib_unroll(pci, index,
- PCIE_ATU_UNR_REGION_CTRL2);
+ val = dw_pcie_readl_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2);
if (val & PCIE_ATU_ENABLE)
return 0;
mdelay(LINK_WAIT_IATU);
}
+
dev_err(pci->dev, "Inbound iATU is not being enabled\n");
- return -EBUSY;
+ return -ETIMEDOUT;
}
-int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar,
- u64 cpu_addr, enum dw_pcie_as_type as_type)
+int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
+ int type, u64 parent_bus_addr, u8 bar, size_t size)
{
- int type;
u32 retries, val;
- if (pci->iatu_unroll_enabled)
- return dw_pcie_prog_inbound_atu_unroll(pci, index, bar,
- cpu_addr, as_type);
-
- dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND |
- index);
- dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, lower_32_bits(cpu_addr));
- dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, upper_32_bits(cpu_addr));
-
- switch (as_type) {
- case DW_PCIE_AS_MEM:
- type = PCIE_ATU_TYPE_MEM;
- break;
- case DW_PCIE_AS_IO:
- type = PCIE_ATU_TYPE_IO;
- break;
- default:
+ if (!IS_ALIGNED(parent_bus_addr, pci->region_align) ||
+ !IS_ALIGNED(parent_bus_addr, size))
return -EINVAL;
- }
- dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type);
- dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE
- | PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_TARGET,
+ lower_32_bits(parent_bus_addr));
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_TARGET,
+ upper_32_bits(parent_bus_addr));
+
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL1, type |
+ PCIE_ATU_FUNC_NUM(func_no));
+ dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2,
+ PCIE_ATU_ENABLE | PCIE_ATU_FUNC_NUM_MATCH_EN |
+ PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
/*
* Make sure ATU enable takes effect before any subsequent config
* and I/O accesses.
*/
for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
- val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2);
+ val = dw_pcie_readl_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2);
if (val & PCIE_ATU_ENABLE)
return 0;
mdelay(LINK_WAIT_IATU);
}
+
dev_err(pci->dev, "Inbound iATU is not being enabled\n");
- return -EBUSY;
+ return -ETIMEDOUT;
}
-void dw_pcie_disable_atu(struct dw_pcie *pci, int index,
- enum dw_pcie_region_type type)
+void dw_pcie_disable_atu(struct dw_pcie *pci, u32 dir, int index)
{
- int region;
-
- switch (type) {
- case DW_PCIE_REGION_INBOUND:
- region = PCIE_ATU_REGION_INBOUND;
- break;
- case DW_PCIE_REGION_OUTBOUND:
- region = PCIE_ATU_REGION_OUTBOUND;
- break;
- default:
- return;
- }
-
- dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, region | index);
- dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, ~PCIE_ATU_ENABLE);
+ dw_pcie_writel_atu(pci, dir, index, PCIE_ATU_REGION_CTRL2, 0);
}
int dw_pcie_wait_for_link(struct dw_pcie *pci)
{
+ u32 offset, val;
int retries;
/* Check if the link is up or not */
- for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
- if (dw_pcie_link_up(pci)) {
- dev_info(pci->dev, "Link up\n");
- return 0;
- }
- usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
+ for (retries = 0; retries < PCIE_LINK_WAIT_MAX_RETRIES; retries++) {
+ if (dw_pcie_link_up(pci))
+ break;
+
+ msleep(PCIE_LINK_WAIT_SLEEP_MS);
}
- dev_err(pci->dev, "Phy link never came up\n");
+ if (retries >= PCIE_LINK_WAIT_MAX_RETRIES) {
+ dev_info(pci->dev, "Phy link never came up\n");
+ return -ETIMEDOUT;
+ }
- return -ETIMEDOUT;
+ /*
+ * As per PCIe r6.0, sec 6.6.1, a Downstream Port that supports Link
+ * speeds greater than 5.0 GT/s, software must wait a minimum of 100 ms
+ * after Link training completes before sending a Configuration Request.
+ */
+ if (pci->max_link_speed > 2)
+ msleep(PCIE_RESET_CONFIG_WAIT_MS);
+
+ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ val = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA);
+
+ dev_info(pci->dev, "PCIe Gen.%u x%u link up\n",
+ FIELD_GET(PCI_EXP_LNKSTA_CLS, val),
+ FIELD_GET(PCI_EXP_LNKSTA_NLW, val));
+
+ return 0;
}
+EXPORT_SYMBOL_GPL(dw_pcie_wait_for_link);
-int dw_pcie_link_up(struct dw_pcie *pci)
+bool dw_pcie_link_up(struct dw_pcie *pci)
{
u32 val;
- if (pci->ops->link_up)
+ if (pci->ops && pci->ops->link_up)
return pci->ops->link_up(pci);
- val = readl(pci->dbi_base + PCIE_PHY_DEBUG_R1);
- return ((val & PCIE_PHY_DEBUG_R1_LINK_UP) &&
- (!(val & PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING)));
+ val = dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1);
+ return ((val & PCIE_PORT_DEBUG1_LINK_UP) &&
+ (!(val & PCIE_PORT_DEBUG1_LINK_IN_TRAINING)));
}
+EXPORT_SYMBOL_GPL(dw_pcie_link_up);
-void dw_pcie_setup(struct dw_pcie *pci)
+void dw_pcie_upconfig_setup(struct dw_pcie *pci)
{
- int ret;
u32 val;
- u32 lanes;
- struct device *dev = pci->dev;
- struct device_node *np = dev->of_node;
- ret = of_property_read_u32(np, "num-lanes", &lanes);
- if (ret)
- lanes = 0;
+ val = dw_pcie_readl_dbi(pci, PCIE_PORT_MULTI_LANE_CTRL);
+ val |= PORT_MLTI_UPCFG_SUPPORT;
+ dw_pcie_writel_dbi(pci, PCIE_PORT_MULTI_LANE_CTRL, val);
+}
+EXPORT_SYMBOL_GPL(dw_pcie_upconfig_setup);
- /* Set the number of lanes */
- val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
- val &= ~PORT_LINK_MODE_MASK;
- switch (lanes) {
- case 1:
- val |= PORT_LINK_MODE_1_LANES;
+static void dw_pcie_link_set_max_speed(struct dw_pcie *pci)
+{
+ u32 cap, ctrl2, link_speed;
+ u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+
+ cap = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
+
+ /*
+ * Even if the platform doesn't want to limit the maximum link speed,
+ * just cache the hardware default value so that the vendor drivers can
+ * use it to do any link specific configuration.
+ */
+ if (pci->max_link_speed < 1) {
+ pci->max_link_speed = FIELD_GET(PCI_EXP_LNKCAP_SLS, cap);
+ return;
+ }
+
+ ctrl2 = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCTL2);
+ ctrl2 &= ~PCI_EXP_LNKCTL2_TLS;
+
+ switch (pcie_link_speed[pci->max_link_speed]) {
+ case PCIE_SPEED_2_5GT:
+ link_speed = PCI_EXP_LNKCTL2_TLS_2_5GT;
break;
- case 2:
- val |= PORT_LINK_MODE_2_LANES;
+ case PCIE_SPEED_5_0GT:
+ link_speed = PCI_EXP_LNKCTL2_TLS_5_0GT;
break;
- case 4:
- val |= PORT_LINK_MODE_4_LANES;
+ case PCIE_SPEED_8_0GT:
+ link_speed = PCI_EXP_LNKCTL2_TLS_8_0GT;
break;
- case 8:
- val |= PORT_LINK_MODE_8_LANES;
+ case PCIE_SPEED_16_0GT:
+ link_speed = PCI_EXP_LNKCTL2_TLS_16_0GT;
break;
default:
- dev_err(pci->dev, "num-lanes %u: invalid value\n", lanes);
- return;
+ /* Use hardware capability */
+ link_speed = FIELD_GET(PCI_EXP_LNKCAP_SLS, cap);
+ ctrl2 &= ~PCI_EXP_LNKCTL2_HASD;
+ break;
}
- dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
+
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCTL2, ctrl2 | link_speed);
+
+ cap &= ~((u32)PCI_EXP_LNKCAP_SLS);
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, cap | link_speed);
+
+}
+
+int dw_pcie_link_get_max_link_width(struct dw_pcie *pci)
+{
+ u8 cap = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ u32 lnkcap = dw_pcie_readl_dbi(pci, cap + PCI_EXP_LNKCAP);
+
+ return FIELD_GET(PCI_EXP_LNKCAP_MLW, lnkcap);
+}
+
+static void dw_pcie_link_set_max_link_width(struct dw_pcie *pci, u32 num_lanes)
+{
+ u32 lnkcap, lwsc, plc;
+ u8 cap;
+
+ if (!num_lanes)
+ return;
+
+ /* Set the number of lanes */
+ plc = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
+ plc &= ~PORT_LINK_FAST_LINK_MODE;
+ plc &= ~PORT_LINK_MODE_MASK;
/* Set link width speed control register */
- val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
- val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
- switch (lanes) {
+ lwsc = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ lwsc &= ~PORT_LOGIC_LINK_WIDTH_MASK;
+ lwsc |= PORT_LOGIC_LINK_WIDTH_1_LANES;
+ switch (num_lanes) {
case 1:
- val |= PORT_LOGIC_LINK_WIDTH_1_LANES;
+ plc |= PORT_LINK_MODE_1_LANES;
break;
case 2:
- val |= PORT_LOGIC_LINK_WIDTH_2_LANES;
+ plc |= PORT_LINK_MODE_2_LANES;
break;
case 4:
- val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
+ plc |= PORT_LINK_MODE_4_LANES;
break;
case 8:
- val |= PORT_LOGIC_LINK_WIDTH_8_LANES;
+ plc |= PORT_LINK_MODE_8_LANES;
+ break;
+ case 16:
+ plc |= PORT_LINK_MODE_16_LANES;
break;
+ default:
+ dev_err(pci->dev, "num-lanes %u: invalid value\n", num_lanes);
+ return;
+ }
+ dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, plc);
+ dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, lwsc);
+
+ cap = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ lnkcap = dw_pcie_readl_dbi(pci, cap + PCI_EXP_LNKCAP);
+ lnkcap &= ~PCI_EXP_LNKCAP_MLW;
+ lnkcap |= FIELD_PREP(PCI_EXP_LNKCAP_MLW, num_lanes);
+ dw_pcie_writel_dbi(pci, cap + PCI_EXP_LNKCAP, lnkcap);
+}
+
+void dw_pcie_iatu_detect(struct dw_pcie *pci)
+{
+ int max_region, ob, ib;
+ u32 val, min, dir;
+ u64 max;
+
+ val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
+ if (val == 0xFFFFFFFF) {
+ dw_pcie_cap_set(pci, IATU_UNROLL);
+
+ max_region = min((int)pci->atu_size / 512, 256);
+ } else {
+ pci->atu_base = pci->dbi_base + PCIE_ATU_VIEWPORT_BASE;
+ pci->atu_size = PCIE_ATU_VIEWPORT_SIZE;
+
+ dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, 0xFF);
+ max_region = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT) + 1;
+ }
+
+ for (ob = 0; ob < max_region; ob++) {
+ dw_pcie_writel_atu_ob(pci, ob, PCIE_ATU_LOWER_TARGET, 0x11110000);
+ val = dw_pcie_readl_atu_ob(pci, ob, PCIE_ATU_LOWER_TARGET);
+ if (val != 0x11110000)
+ break;
+ }
+
+ for (ib = 0; ib < max_region; ib++) {
+ dw_pcie_writel_atu_ib(pci, ib, PCIE_ATU_LOWER_TARGET, 0x11110000);
+ val = dw_pcie_readl_atu_ib(pci, ib, PCIE_ATU_LOWER_TARGET);
+ if (val != 0x11110000)
+ break;
+ }
+
+ if (ob) {
+ dir = PCIE_ATU_REGION_DIR_OB;
+ } else if (ib) {
+ dir = PCIE_ATU_REGION_DIR_IB;
+ } else {
+ dev_err(pci->dev, "No iATU regions found\n");
+ return;
+ }
+
+ dw_pcie_writel_atu(pci, dir, 0, PCIE_ATU_LIMIT, 0x0);
+ min = dw_pcie_readl_atu(pci, dir, 0, PCIE_ATU_LIMIT);
+
+ if (dw_pcie_ver_is_ge(pci, 460A)) {
+ dw_pcie_writel_atu(pci, dir, 0, PCIE_ATU_UPPER_LIMIT, 0xFFFFFFFF);
+ max = dw_pcie_readl_atu(pci, dir, 0, PCIE_ATU_UPPER_LIMIT);
+ } else {
+ max = 0;
}
- dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
+
+ pci->num_ob_windows = ob;
+ pci->num_ib_windows = ib;
+ pci->region_align = 1 << fls(min);
+ pci->region_limit = (max << 32) | (SZ_4G - 1);
+
+ dev_info(pci->dev, "iATU: unroll %s, %u ob, %u ib, align %uK, limit %lluG\n",
+ dw_pcie_cap_is(pci, IATU_UNROLL) ? "T" : "F",
+ pci->num_ob_windows, pci->num_ib_windows,
+ pci->region_align / SZ_1K, (pci->region_limit + 1) / SZ_1G);
+}
+
+static u32 dw_pcie_readl_dma(struct dw_pcie *pci, u32 reg)
+{
+ u32 val = 0;
+ int ret;
+
+ if (pci->ops && pci->ops->read_dbi)
+ return pci->ops->read_dbi(pci, pci->edma.reg_base, reg, 4);
+
+ ret = dw_pcie_read(pci->edma.reg_base + reg, 4, &val);
+ if (ret)
+ dev_err(pci->dev, "Read DMA address failed\n");
+
+ return val;
+}
+
+static int dw_pcie_edma_irq_vector(struct device *dev, unsigned int nr)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ char name[6];
+ int ret;
+
+ if (nr >= EDMA_MAX_WR_CH + EDMA_MAX_RD_CH)
+ return -EINVAL;
+
+ ret = platform_get_irq_byname_optional(pdev, "dma");
+ if (ret > 0)
+ return ret;
+
+ snprintf(name, sizeof(name), "dma%u", nr);
+
+ return platform_get_irq_byname_optional(pdev, name);
+}
+
+static struct dw_edma_plat_ops dw_pcie_edma_ops = {
+ .irq_vector = dw_pcie_edma_irq_vector,
+};
+
+static void dw_pcie_edma_init_data(struct dw_pcie *pci)
+{
+ pci->edma.dev = pci->dev;
+
+ if (!pci->edma.ops)
+ pci->edma.ops = &dw_pcie_edma_ops;
+
+ pci->edma.flags |= DW_EDMA_CHIP_LOCAL;
+}
+
+static int dw_pcie_edma_find_mf(struct dw_pcie *pci)
+{
+ u32 val;
+
+ /*
+ * Bail out finding the mapping format if it is already set by the glue
+ * driver. Also ensure that the edma.reg_base is pointing to a valid
+ * memory region.
+ */
+ if (pci->edma.mf != EDMA_MF_EDMA_LEGACY)
+ return pci->edma.reg_base ? 0 : -ENODEV;
+
+ /*
+ * Indirect eDMA CSRs access has been completely removed since v5.40a
+ * thus no space is now reserved for the eDMA channels viewport and
+ * former DMA CTRL register is no longer fixed to FFs.
+ */
+ if (dw_pcie_ver_is_ge(pci, 540A))
+ val = 0xFFFFFFFF;
+ else
+ val = dw_pcie_readl_dbi(pci, PCIE_DMA_VIEWPORT_BASE + PCIE_DMA_CTRL);
+
+ if (val == 0xFFFFFFFF && pci->edma.reg_base) {
+ pci->edma.mf = EDMA_MF_EDMA_UNROLL;
+ } else if (val != 0xFFFFFFFF) {
+ pci->edma.mf = EDMA_MF_EDMA_LEGACY;
+
+ pci->edma.reg_base = pci->dbi_base + PCIE_DMA_VIEWPORT_BASE;
+ } else {
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int dw_pcie_edma_find_channels(struct dw_pcie *pci)
+{
+ u32 val;
+
+ /*
+ * Autodetect the read/write channels count only for non-HDMA platforms.
+ * HDMA platforms with native CSR mapping doesn't support autodetect,
+ * so the glue drivers should've passed the valid count already. If not,
+ * the below sanity check will catch it.
+ */
+ if (pci->edma.mf != EDMA_MF_HDMA_NATIVE) {
+ val = dw_pcie_readl_dma(pci, PCIE_DMA_CTRL);
+
+ pci->edma.ll_wr_cnt = FIELD_GET(PCIE_DMA_NUM_WR_CHAN, val);
+ pci->edma.ll_rd_cnt = FIELD_GET(PCIE_DMA_NUM_RD_CHAN, val);
+ }
+
+ /* Sanity check the channels count if the mapping was incorrect */
+ if (!pci->edma.ll_wr_cnt || pci->edma.ll_wr_cnt > EDMA_MAX_WR_CH ||
+ !pci->edma.ll_rd_cnt || pci->edma.ll_rd_cnt > EDMA_MAX_RD_CH)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int dw_pcie_edma_find_chip(struct dw_pcie *pci)
+{
+ int ret;
+
+ dw_pcie_edma_init_data(pci);
+
+ ret = dw_pcie_edma_find_mf(pci);
+ if (ret)
+ return ret;
+
+ return dw_pcie_edma_find_channels(pci);
+}
+
+static int dw_pcie_edma_irq_verify(struct dw_pcie *pci)
+{
+ struct platform_device *pdev = to_platform_device(pci->dev);
+ u16 ch_cnt = pci->edma.ll_wr_cnt + pci->edma.ll_rd_cnt;
+ char name[15];
+ int ret;
+
+ if (pci->edma.nr_irqs > 1)
+ return pci->edma.nr_irqs != ch_cnt ? -EINVAL : 0;
+
+ ret = platform_get_irq_byname_optional(pdev, "dma");
+ if (ret > 0) {
+ pci->edma.nr_irqs = 1;
+ return 0;
+ }
+
+ for (; pci->edma.nr_irqs < ch_cnt; pci->edma.nr_irqs++) {
+ snprintf(name, sizeof(name), "dma%d", pci->edma.nr_irqs);
+
+ ret = platform_get_irq_byname_optional(pdev, name);
+ if (ret <= 0)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dw_pcie_edma_ll_alloc(struct dw_pcie *pci)
+{
+ struct dw_edma_region *ll;
+ dma_addr_t paddr;
+ int i;
+
+ for (i = 0; i < pci->edma.ll_wr_cnt; i++) {
+ ll = &pci->edma.ll_region_wr[i];
+ ll->sz = DMA_LLP_MEM_SIZE;
+ ll->vaddr.mem = dmam_alloc_coherent(pci->dev, ll->sz,
+ &paddr, GFP_KERNEL);
+ if (!ll->vaddr.mem)
+ return -ENOMEM;
+
+ ll->paddr = paddr;
+ }
+
+ for (i = 0; i < pci->edma.ll_rd_cnt; i++) {
+ ll = &pci->edma.ll_region_rd[i];
+ ll->sz = DMA_LLP_MEM_SIZE;
+ ll->vaddr.mem = dmam_alloc_coherent(pci->dev, ll->sz,
+ &paddr, GFP_KERNEL);
+ if (!ll->vaddr.mem)
+ return -ENOMEM;
+
+ ll->paddr = paddr;
+ }
+
+ return 0;
+}
+
+int dw_pcie_edma_detect(struct dw_pcie *pci)
+{
+ int ret;
+
+ /* Don't fail if no eDMA was found (for the backward compatibility) */
+ ret = dw_pcie_edma_find_chip(pci);
+ if (ret)
+ return 0;
+
+ /* Don't fail on the IRQs verification (for the backward compatibility) */
+ ret = dw_pcie_edma_irq_verify(pci);
+ if (ret) {
+ dev_err(pci->dev, "Invalid eDMA IRQs found\n");
+ return 0;
+ }
+
+ ret = dw_pcie_edma_ll_alloc(pci);
+ if (ret) {
+ dev_err(pci->dev, "Couldn't allocate LLP memory\n");
+ return ret;
+ }
+
+ /* Don't fail if the DW eDMA driver can't find the device */
+ ret = dw_edma_probe(&pci->edma);
+ if (ret && ret != -ENODEV) {
+ dev_err(pci->dev, "Couldn't register eDMA device\n");
+ return ret;
+ }
+
+ dev_info(pci->dev, "eDMA: unroll %s, %hu wr, %hu rd\n",
+ pci->edma.mf == EDMA_MF_EDMA_UNROLL ? "T" : "F",
+ pci->edma.ll_wr_cnt, pci->edma.ll_rd_cnt);
+
+ return 0;
+}
+
+void dw_pcie_edma_remove(struct dw_pcie *pci)
+{
+ dw_edma_remove(&pci->edma);
+}
+
+void dw_pcie_hide_unsupported_l1ss(struct dw_pcie *pci)
+{
+ u16 l1ss;
+ u32 l1ss_cap;
+
+ if (pci->l1ss_support)
+ return;
+
+ l1ss = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_L1SS);
+ if (!l1ss)
+ return;
+
+ /*
+ * Unless the driver claims "l1ss_support", don't advertise L1 PM
+ * Substates because they require CLKREQ# and possibly other
+ * device-specific configuration.
+ */
+ l1ss_cap = dw_pcie_readl_dbi(pci, l1ss + PCI_L1SS_CAP);
+ l1ss_cap &= ~(PCI_L1SS_CAP_PCIPM_L1_1 | PCI_L1SS_CAP_ASPM_L1_1 |
+ PCI_L1SS_CAP_PCIPM_L1_2 | PCI_L1SS_CAP_ASPM_L1_2 |
+ PCI_L1SS_CAP_L1_PM_SS);
+ dw_pcie_writel_dbi(pci, l1ss + PCI_L1SS_CAP, l1ss_cap);
+}
+
+void dw_pcie_setup(struct dw_pcie *pci)
+{
+ u32 val;
+
+ dw_pcie_link_set_max_speed(pci);
+
+ /* Configure Gen1 N_FTS */
+ if (pci->n_fts[0]) {
+ val = dw_pcie_readl_dbi(pci, PCIE_PORT_AFR);
+ val &= ~(PORT_AFR_N_FTS_MASK | PORT_AFR_CC_N_FTS_MASK);
+ val |= PORT_AFR_N_FTS(pci->n_fts[0]);
+ val |= PORT_AFR_CC_N_FTS(pci->n_fts[0]);
+ dw_pcie_writel_dbi(pci, PCIE_PORT_AFR, val);
+ }
+
+ /* Configure Gen2+ N_FTS */
+ if (pci->n_fts[1]) {
+ val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ val &= ~PORT_LOGIC_N_FTS_MASK;
+ val |= pci->n_fts[1];
+ dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
+ }
+
+ if (dw_pcie_cap_is(pci, CDM_CHECK)) {
+ val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
+ val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS |
+ PCIE_PL_CHK_REG_CHK_REG_START;
+ dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
+ }
+
+ val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
+ val &= ~PORT_LINK_FAST_LINK_MODE;
+ val |= PORT_LINK_DLL_LINK_EN;
+ dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
+
+ dw_pcie_link_set_max_link_width(pci, pci->num_lanes);
+}
+
+resource_size_t dw_pcie_parent_bus_offset(struct dw_pcie *pci,
+ const char *reg_name,
+ resource_size_t cpu_phys_addr)
+{
+ struct device *dev = pci->dev;
+ struct device_node *np = dev->of_node;
+ int index;
+ u64 reg_addr, fixup_addr;
+ u64 (*fixup)(struct dw_pcie *pcie, u64 cpu_addr);
+
+ /* Look up reg_name address on parent bus */
+ index = of_property_match_string(np, "reg-names", reg_name);
+
+ if (index < 0) {
+ dev_err(dev, "No %s in devicetree \"reg\" property\n", reg_name);
+ return 0;
+ }
+
+ of_property_read_reg(np, index, &reg_addr, NULL);
+
+ fixup = pci->ops ? pci->ops->cpu_addr_fixup : NULL;
+ if (fixup) {
+ fixup_addr = fixup(pci, cpu_phys_addr);
+ if (reg_addr == fixup_addr) {
+ dev_info(dev, "%s reg[%d] %#010llx == %#010llx == fixup(cpu %#010llx); %ps is redundant with this devicetree\n",
+ reg_name, index, reg_addr, fixup_addr,
+ (unsigned long long) cpu_phys_addr, fixup);
+ } else {
+ dev_warn(dev, "%s reg[%d] %#010llx != %#010llx == fixup(cpu %#010llx); devicetree is broken\n",
+ reg_name, index, reg_addr, fixup_addr,
+ (unsigned long long) cpu_phys_addr);
+ reg_addr = fixup_addr;
+ }
+
+ return cpu_phys_addr - reg_addr;
+ }
+
+ if (pci->use_parent_dt_ranges) {
+
+ /*
+ * This platform once had a fixup, presumably because it
+ * translates between CPU and PCI controller addresses.
+ * Log a note if devicetree didn't describe a translation.
+ */
+ if (reg_addr == cpu_phys_addr)
+ dev_info(dev, "%s reg[%d] %#010llx == cpu %#010llx\n; no fixup was ever needed for this devicetree\n",
+ reg_name, index, reg_addr,
+ (unsigned long long) cpu_phys_addr);
+ } else {
+ if (reg_addr != cpu_phys_addr) {
+ dev_warn(dev, "%s reg[%d] %#010llx != cpu %#010llx; no fixup and devicetree \"ranges\" is broken, assuming no translation\n",
+ reg_name, index, reg_addr,
+ (unsigned long long) cpu_phys_addr);
+ return 0;
+ }
+ }
+
+ return cpu_phys_addr - reg_addr;
}
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index 9943d8c68335..31685951a080 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -3,7 +3,7 @@
* Synopsys DesignWare PCIe host controller driver
*
* Copyright (C) 2013 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
+ * https://www.samsung.com
*
* Author: Jingoo Han <jg1.han@samsung.com>
*/
@@ -11,42 +11,107 @@
#ifndef _PCIE_DESIGNWARE_H
#define _PCIE_DESIGNWARE_H
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
#include <linux/dma-mapping.h>
+#include <linux/dma/edma.h>
+#include <linux/gpio/consumer.h>
#include <linux/irq.h>
#include <linux/msi.h>
#include <linux/pci.h>
+#include <linux/pci-ecam.h>
+#include <linux/reset.h>
#include <linux/pci-epc.h>
#include <linux/pci-epf.h>
-/* Parameters for the waiting for link up routine */
-#define LINK_WAIT_MAX_RETRIES 10
-#define LINK_WAIT_USLEEP_MIN 90000
-#define LINK_WAIT_USLEEP_MAX 100000
+#include "../../pci.h"
+
+/* DWC PCIe IP-core versions (native support since v4.70a) */
+#define DW_PCIE_VER_365A 0x3336352a
+#define DW_PCIE_VER_460A 0x3436302a
+#define DW_PCIE_VER_470A 0x3437302a
+#define DW_PCIE_VER_480A 0x3438302a
+#define DW_PCIE_VER_490A 0x3439302a
+#define DW_PCIE_VER_520A 0x3532302a
+#define DW_PCIE_VER_540A 0x3534302a
+
+#define __dw_pcie_ver_cmp(_pci, _ver, _op) \
+ ((_pci)->version _op DW_PCIE_VER_ ## _ver)
+
+#define dw_pcie_ver_is(_pci, _ver) __dw_pcie_ver_cmp(_pci, _ver, ==)
+
+#define dw_pcie_ver_is_ge(_pci, _ver) __dw_pcie_ver_cmp(_pci, _ver, >=)
+
+#define dw_pcie_ver_type_is(_pci, _ver, _type) \
+ (__dw_pcie_ver_cmp(_pci, _ver, ==) && \
+ __dw_pcie_ver_cmp(_pci, TYPE_ ## _type, ==))
+
+#define dw_pcie_ver_type_is_ge(_pci, _ver, _type) \
+ (__dw_pcie_ver_cmp(_pci, _ver, ==) && \
+ __dw_pcie_ver_cmp(_pci, TYPE_ ## _type, >=))
+
+/* DWC PCIe controller capabilities */
+#define DW_PCIE_CAP_REQ_RES 0
+#define DW_PCIE_CAP_IATU_UNROLL 1
+#define DW_PCIE_CAP_CDM_CHECK 2
+
+#define dw_pcie_cap_is(_pci, _cap) \
+ test_bit(DW_PCIE_CAP_ ## _cap, &(_pci)->caps)
+
+#define dw_pcie_cap_set(_pci, _cap) \
+ set_bit(DW_PCIE_CAP_ ## _cap, &(_pci)->caps)
/* Parameters for the waiting for iATU enabled routine */
#define LINK_WAIT_MAX_IATU_RETRIES 5
#define LINK_WAIT_IATU 9
/* Synopsys-specific PCIe configuration registers */
+#define PCIE_PORT_FORCE 0x708
+#define PORT_FORCE_DO_DESKEW_FOR_SRIS BIT(23)
+
+#define PCIE_PORT_AFR 0x70C
+#define PORT_AFR_N_FTS_MASK GENMASK(15, 8)
+#define PORT_AFR_N_FTS(n) FIELD_PREP(PORT_AFR_N_FTS_MASK, n)
+#define PORT_AFR_CC_N_FTS_MASK GENMASK(23, 16)
+#define PORT_AFR_CC_N_FTS(n) FIELD_PREP(PORT_AFR_CC_N_FTS_MASK, n)
+#define PORT_AFR_ENTER_ASPM BIT(30)
+#define PORT_AFR_L0S_ENTRANCE_LAT_SHIFT 24
+#define PORT_AFR_L0S_ENTRANCE_LAT_MASK GENMASK(26, 24)
+#define PORT_AFR_L1_ENTRANCE_LAT_SHIFT 27
+#define PORT_AFR_L1_ENTRANCE_LAT_MASK GENMASK(29, 27)
+
#define PCIE_PORT_LINK_CONTROL 0x710
-#define PORT_LINK_MODE_MASK (0x3f << 16)
-#define PORT_LINK_MODE_1_LANES (0x1 << 16)
-#define PORT_LINK_MODE_2_LANES (0x3 << 16)
-#define PORT_LINK_MODE_4_LANES (0x7 << 16)
-#define PORT_LINK_MODE_8_LANES (0xf << 16)
+#define PORT_LINK_DLL_LINK_EN BIT(5)
+#define PORT_LINK_FAST_LINK_MODE BIT(7)
+#define PORT_LINK_MODE_MASK GENMASK(21, 16)
+#define PORT_LINK_MODE(n) FIELD_PREP(PORT_LINK_MODE_MASK, n)
+#define PORT_LINK_MODE_1_LANES PORT_LINK_MODE(0x1)
+#define PORT_LINK_MODE_2_LANES PORT_LINK_MODE(0x3)
+#define PORT_LINK_MODE_4_LANES PORT_LINK_MODE(0x7)
+#define PORT_LINK_MODE_8_LANES PORT_LINK_MODE(0xf)
+#define PORT_LINK_MODE_16_LANES PORT_LINK_MODE(0x1f)
+
+#define PCIE_PORT_LANE_SKEW 0x714
+#define PORT_LANE_SKEW_INSERT_MASK GENMASK(23, 0)
#define PCIE_PORT_DEBUG0 0x728
-#define PORT_LOGIC_LTSSM_STATE_MASK 0x1f
+#define PORT_LOGIC_LTSSM_STATE_MASK 0x3f
#define PORT_LOGIC_LTSSM_STATE_L0 0x11
+#define PCIE_PORT_DEBUG1 0x72C
+#define PCIE_PORT_DEBUG1_LINK_UP BIT(4)
+#define PCIE_PORT_DEBUG1_LINK_IN_TRAINING BIT(29)
#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
-#define PORT_LOGIC_SPEED_CHANGE (0x1 << 17)
-#define PORT_LOGIC_LINK_WIDTH_MASK (0x1f << 8)
-#define PORT_LOGIC_LINK_WIDTH_1_LANES (0x1 << 8)
-#define PORT_LOGIC_LINK_WIDTH_2_LANES (0x2 << 8)
-#define PORT_LOGIC_LINK_WIDTH_4_LANES (0x4 << 8)
-#define PORT_LOGIC_LINK_WIDTH_8_LANES (0x8 << 8)
+#define PORT_LOGIC_N_FTS_MASK GENMASK(7, 0)
+#define PORT_LOGIC_SPEED_CHANGE BIT(17)
+#define PORT_LOGIC_LINK_WIDTH_MASK GENMASK(12, 8)
+#define PORT_LOGIC_LINK_WIDTH(n) FIELD_PREP(PORT_LOGIC_LINK_WIDTH_MASK, n)
+#define PORT_LOGIC_LINK_WIDTH_1_LANES PORT_LOGIC_LINK_WIDTH(0x1)
+#define PORT_LOGIC_LINK_WIDTH_2_LANES PORT_LOGIC_LINK_WIDTH(0x2)
+#define PORT_LOGIC_LINK_WIDTH_4_LANES PORT_LOGIC_LINK_WIDTH(0x4)
+#define PORT_LOGIC_LINK_WIDTH_8_LANES PORT_LOGIC_LINK_WIDTH(0x8)
#define PCIE_MSI_ADDR_LO 0x820
#define PCIE_MSI_ADDR_HI 0x824
@@ -54,33 +119,124 @@
#define PCIE_MSI_INTR0_MASK 0x82C
#define PCIE_MSI_INTR0_STATUS 0x830
+#define GEN3_RELATED_OFF 0x890
+#define GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL BIT(0)
+#define GEN3_RELATED_OFF_EQ_PHASE_2_3 BIT(9)
+#define GEN3_RELATED_OFF_RXEQ_RGRDLESS_RXTS BIT(13)
+#define GEN3_RELATED_OFF_GEN3_EQ_DISABLE BIT(16)
+#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT 24
+#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK GENMASK(25, 24)
+
+#define GEN3_EQ_CONTROL_OFF 0x8A8
+#define GEN3_EQ_CONTROL_OFF_FB_MODE GENMASK(3, 0)
+#define GEN3_EQ_CONTROL_OFF_PHASE23_EXIT_MODE BIT(4)
+#define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC GENMASK(23, 8)
+#define GEN3_EQ_CONTROL_OFF_FOM_INC_INITIAL_EVAL BIT(24)
+
+#define GEN3_EQ_FB_MODE_DIR_CHANGE_OFF 0x8AC
+#define GEN3_EQ_FMDC_T_MIN_PHASE23 GENMASK(4, 0)
+#define GEN3_EQ_FMDC_N_EVALS GENMASK(9, 5)
+#define GEN3_EQ_FMDC_MAX_PRE_CURSOR_DELTA GENMASK(13, 10)
+#define GEN3_EQ_FMDC_MAX_POST_CURSOR_DELTA GENMASK(17, 14)
+
+#define COHERENCY_CONTROL_1_OFF 0x8E0
+#define CFG_MEMTYPE_BOUNDARY_LOW_ADDR_MASK GENMASK(31, 2)
+#define CFG_MEMTYPE_VALUE BIT(0)
+
+#define COHERENCY_CONTROL_2_OFF 0x8E4
+#define COHERENCY_CONTROL_3_OFF 0x8E8
+
+#define PCIE_PORT_MULTI_LANE_CTRL 0x8C0
+#define PORT_MLTI_UPCFG_SUPPORT BIT(7)
+
+#define PCIE_VERSION_NUMBER 0x8F8
+#define PCIE_VERSION_TYPE 0x8FC
+
+/*
+ * iATU inbound and outbound windows CSRs. Before the IP-core v4.80a each
+ * iATU region CSRs had been indirectly accessible by means of the dedicated
+ * viewport selector. The iATU/eDMA CSRs space was re-designed in DWC PCIe
+ * v4.80a in a way so the viewport was unrolled into the directly accessible
+ * iATU/eDMA CSRs space.
+ */
#define PCIE_ATU_VIEWPORT 0x900
-#define PCIE_ATU_REGION_INBOUND (0x1 << 31)
-#define PCIE_ATU_REGION_OUTBOUND (0x0 << 31)
-#define PCIE_ATU_REGION_INDEX2 (0x2 << 0)
-#define PCIE_ATU_REGION_INDEX1 (0x1 << 0)
-#define PCIE_ATU_REGION_INDEX0 (0x0 << 0)
-#define PCIE_ATU_CR1 0x904
-#define PCIE_ATU_TYPE_MEM (0x0 << 0)
-#define PCIE_ATU_TYPE_IO (0x2 << 0)
-#define PCIE_ATU_TYPE_CFG0 (0x4 << 0)
-#define PCIE_ATU_TYPE_CFG1 (0x5 << 0)
-#define PCIE_ATU_CR2 0x908
-#define PCIE_ATU_ENABLE (0x1 << 31)
-#define PCIE_ATU_BAR_MODE_ENABLE (0x1 << 30)
-#define PCIE_ATU_LOWER_BASE 0x90C
-#define PCIE_ATU_UPPER_BASE 0x910
-#define PCIE_ATU_LIMIT 0x914
-#define PCIE_ATU_LOWER_TARGET 0x918
-#define PCIE_ATU_BUS(x) (((x) & 0xff) << 24)
-#define PCIE_ATU_DEV(x) (((x) & 0x1f) << 19)
-#define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16)
-#define PCIE_ATU_UPPER_TARGET 0x91C
+#define PCIE_ATU_REGION_DIR_IB BIT(31)
+#define PCIE_ATU_REGION_DIR_OB 0
+#define PCIE_ATU_VIEWPORT_BASE 0x904
+#define PCIE_ATU_UNROLL_BASE(dir, index) \
+ (((index) << 9) | ((dir == PCIE_ATU_REGION_DIR_IB) ? BIT(8) : 0))
+#define PCIE_ATU_VIEWPORT_SIZE 0x2C
+#define PCIE_ATU_REGION_CTRL1 0x000
+#define PCIE_ATU_INCREASE_REGION_SIZE BIT(13)
+#define PCIE_ATU_TYPE_MEM 0x0
+#define PCIE_ATU_TYPE_IO 0x2
+#define PCIE_ATU_TYPE_CFG0 0x4
+#define PCIE_ATU_TYPE_CFG1 0x5
+#define PCIE_ATU_TYPE_MSG 0x10
+#define PCIE_ATU_TD BIT(8)
+#define PCIE_ATU_FUNC_NUM(pf) ((pf) << 20)
+#define PCIE_ATU_REGION_CTRL2 0x004
+#define PCIE_ATU_ENABLE BIT(31)
+#define PCIE_ATU_BAR_MODE_ENABLE BIT(30)
+#define PCIE_ATU_CFG_SHIFT_MODE_ENABLE BIT(28)
+#define PCIE_ATU_INHIBIT_PAYLOAD BIT(22)
+#define PCIE_ATU_FUNC_NUM_MATCH_EN BIT(19)
+#define PCIE_ATU_LOWER_BASE 0x008
+#define PCIE_ATU_UPPER_BASE 0x00C
+#define PCIE_ATU_LIMIT 0x010
+#define PCIE_ATU_LOWER_TARGET 0x014
+#define PCIE_ATU_BUS(x) FIELD_PREP(GENMASK(31, 24), x)
+#define PCIE_ATU_DEV(x) FIELD_PREP(GENMASK(23, 19), x)
+#define PCIE_ATU_FUNC(x) FIELD_PREP(GENMASK(18, 16), x)
+#define PCIE_ATU_UPPER_TARGET 0x018
+#define PCIE_ATU_UPPER_LIMIT 0x020
#define PCIE_MISC_CONTROL_1_OFF 0x8BC
-#define PCIE_DBI_RO_WR_EN (0x1 << 0)
+#define PCIE_DBI_RO_WR_EN BIT(0)
+
+#define PCIE_MSIX_DOORBELL 0x948
+#define PCIE_MSIX_DOORBELL_PF_SHIFT 24
/*
+ * eDMA CSRs. DW PCIe IP-core v4.70a and older had the eDMA registers accessible
+ * over the Port Logic registers space. Afterwards the unrolled mapping was
+ * introduced so eDMA and iATU could be accessed via a dedicated registers
+ * space.
+ */
+#define PCIE_DMA_VIEWPORT_BASE 0x970
+#define PCIE_DMA_UNROLL_BASE 0x80000
+#define PCIE_DMA_CTRL 0x008
+#define PCIE_DMA_NUM_WR_CHAN GENMASK(3, 0)
+#define PCIE_DMA_NUM_RD_CHAN GENMASK(19, 16)
+
+#define PCIE_PL_CHK_REG_CONTROL_STATUS 0xB20
+#define PCIE_PL_CHK_REG_CHK_REG_START BIT(0)
+#define PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS BIT(1)
+#define PCIE_PL_CHK_REG_CHK_REG_COMPARISON_ERROR BIT(16)
+#define PCIE_PL_CHK_REG_CHK_REG_LOGIC_ERROR BIT(17)
+#define PCIE_PL_CHK_REG_CHK_REG_COMPLETE BIT(18)
+
+#define PCIE_PL_CHK_REG_ERR_ADDR 0xB28
+
+/*
+ * 16.0 GT/s (Gen 4) lane margining register definitions
+ */
+#define GEN4_LANE_MARGINING_1_OFF 0xB80
+#define MARGINING_MAX_VOLTAGE_OFFSET GENMASK(29, 24)
+#define MARGINING_NUM_VOLTAGE_STEPS GENMASK(22, 16)
+#define MARGINING_MAX_TIMING_OFFSET GENMASK(13, 8)
+#define MARGINING_NUM_TIMING_STEPS GENMASK(5, 0)
+
+#define GEN4_LANE_MARGINING_2_OFF 0xB84
+#define MARGINING_IND_ERROR_SAMPLER BIT(28)
+#define MARGINING_SAMPLE_REPORTING_METHOD BIT(27)
+#define MARGINING_IND_LEFT_RIGHT_TIMING BIT(26)
+#define MARGINING_IND_UP_DOWN_VOLTAGE BIT(25)
+#define MARGINING_VOLTAGE_SUPPORTED BIT(24)
+#define MARGINING_MAXLANES GENMASK(20, 16)
+#define MARGINING_SAMPLE_RATE_TIMING GENMASK(13, 8)
+#define MARGINING_SAMPLE_RATE_VOLTAGE GENMASK(5, 0)
+/*
* iATU Unroll-specific register definitions
* From 4.80 core version the address translation will be made by unroll
*/
@@ -88,9 +244,44 @@
#define PCIE_ATU_UNR_REGION_CTRL2 0x04
#define PCIE_ATU_UNR_LOWER_BASE 0x08
#define PCIE_ATU_UNR_UPPER_BASE 0x0C
-#define PCIE_ATU_UNR_LIMIT 0x10
+#define PCIE_ATU_UNR_LOWER_LIMIT 0x10
#define PCIE_ATU_UNR_LOWER_TARGET 0x14
#define PCIE_ATU_UNR_UPPER_TARGET 0x18
+#define PCIE_ATU_UNR_UPPER_LIMIT 0x20
+
+/*
+ * RAS-DES register definitions
+ */
+#define PCIE_RAS_DES_EVENT_COUNTER_CONTROL 0x8
+#define EVENT_COUNTER_ALL_CLEAR 0x3
+#define EVENT_COUNTER_ENABLE_ALL 0x7
+#define EVENT_COUNTER_ENABLE_SHIFT 2
+#define EVENT_COUNTER_EVENT_SEL_MASK GENMASK(7, 0)
+#define EVENT_COUNTER_EVENT_SEL_SHIFT 16
+#define EVENT_COUNTER_EVENT_Tx_L0S 0x2
+#define EVENT_COUNTER_EVENT_Rx_L0S 0x3
+#define EVENT_COUNTER_EVENT_L1 0x5
+#define EVENT_COUNTER_EVENT_L1_1 0x7
+#define EVENT_COUNTER_EVENT_L1_2 0x8
+#define EVENT_COUNTER_GROUP_SEL_SHIFT 24
+#define EVENT_COUNTER_GROUP_5 0x5
+
+#define PCIE_RAS_DES_EVENT_COUNTER_DATA 0xc
+
+/* PTM register definitions */
+#define PTM_RES_REQ_CTRL 0x8
+#define PTM_RES_CCONTEXT_VALID BIT(0)
+#define PTM_REQ_AUTO_UPDATE_ENABLED BIT(0)
+#define PTM_REQ_START_UPDATE BIT(1)
+
+#define PTM_LOCAL_LSB 0x10
+#define PTM_LOCAL_MSB 0x14
+#define PTM_T1_T2_LSB 0x18
+#define PTM_T1_T2_MSB 0x1c
+#define PTM_T3_T4_LSB 0x28
+#define PTM_T3_T4_MSB 0x2c
+#define PTM_MASTER_LSB 0x38
+#define PTM_MASTER_MSB 0x3c
/*
* The default address offset between dbi_base and atu_base. Root controller
@@ -99,13 +290,7 @@
* this offset, if atu_base not set.
*/
#define DEFAULT_DBI_ATU_OFFSET (0x3 << 20)
-
-/* Register address builder */
-#define PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(region) \
- ((region) << 9)
-
-#define PCIE_GET_ATU_INB_UNR_REG_OFFSET(region) \
- (((region) << 9) | (0x1 << 8))
+#define DEFAULT_DBI_DMA_OFFSET PCIE_DMA_UNROLL_BASE
#define MAX_MSI_IRQS 256
#define MAX_MSI_IRQS_PER_CTRL 32
@@ -117,16 +302,13 @@
#define MAX_IATU_IN 256
#define MAX_IATU_OUT 256
-struct pcie_port;
+/* Default eDMA LLP memory size */
+#define DMA_LLP_MEM_SIZE PAGE_SIZE
+
struct dw_pcie;
+struct dw_pcie_rp;
struct dw_pcie_ep;
-enum dw_pcie_region_type {
- DW_PCIE_REGION_UNKNOWN,
- DW_PCIE_REGION_INBOUND,
- DW_PCIE_REGION_OUTBOUND,
-};
-
enum dw_pcie_device_mode {
DW_PCIE_UNKNOWN_TYPE,
DW_PCIE_EP_TYPE,
@@ -134,82 +316,169 @@ enum dw_pcie_device_mode {
DW_PCIE_RC_TYPE,
};
+enum dw_pcie_app_clk {
+ DW_PCIE_DBI_CLK,
+ DW_PCIE_MSTR_CLK,
+ DW_PCIE_SLV_CLK,
+ DW_PCIE_NUM_APP_CLKS
+};
+
+enum dw_pcie_core_clk {
+ DW_PCIE_PIPE_CLK,
+ DW_PCIE_CORE_CLK,
+ DW_PCIE_AUX_CLK,
+ DW_PCIE_REF_CLK,
+ DW_PCIE_NUM_CORE_CLKS
+};
+
+enum dw_pcie_app_rst {
+ DW_PCIE_DBI_RST,
+ DW_PCIE_MSTR_RST,
+ DW_PCIE_SLV_RST,
+ DW_PCIE_NUM_APP_RSTS
+};
+
+enum dw_pcie_core_rst {
+ DW_PCIE_NON_STICKY_RST,
+ DW_PCIE_STICKY_RST,
+ DW_PCIE_CORE_RST,
+ DW_PCIE_PIPE_RST,
+ DW_PCIE_PHY_RST,
+ DW_PCIE_HOT_RST,
+ DW_PCIE_PWR_RST,
+ DW_PCIE_NUM_CORE_RSTS
+};
+
+enum dw_pcie_ltssm {
+ /* Need to align with PCIE_PORT_DEBUG0 bits 0:5 */
+ DW_PCIE_LTSSM_DETECT_QUIET = 0x0,
+ DW_PCIE_LTSSM_DETECT_ACT = 0x1,
+ DW_PCIE_LTSSM_POLL_ACTIVE = 0x2,
+ DW_PCIE_LTSSM_POLL_COMPLIANCE = 0x3,
+ DW_PCIE_LTSSM_POLL_CONFIG = 0x4,
+ DW_PCIE_LTSSM_PRE_DETECT_QUIET = 0x5,
+ DW_PCIE_LTSSM_DETECT_WAIT = 0x6,
+ DW_PCIE_LTSSM_CFG_LINKWD_START = 0x7,
+ DW_PCIE_LTSSM_CFG_LINKWD_ACEPT = 0x8,
+ DW_PCIE_LTSSM_CFG_LANENUM_WAI = 0x9,
+ DW_PCIE_LTSSM_CFG_LANENUM_ACEPT = 0xa,
+ DW_PCIE_LTSSM_CFG_COMPLETE = 0xb,
+ DW_PCIE_LTSSM_CFG_IDLE = 0xc,
+ DW_PCIE_LTSSM_RCVRY_LOCK = 0xd,
+ DW_PCIE_LTSSM_RCVRY_SPEED = 0xe,
+ DW_PCIE_LTSSM_RCVRY_RCVRCFG = 0xf,
+ DW_PCIE_LTSSM_RCVRY_IDLE = 0x10,
+ DW_PCIE_LTSSM_L0 = 0x11,
+ DW_PCIE_LTSSM_L0S = 0x12,
+ DW_PCIE_LTSSM_L123_SEND_EIDLE = 0x13,
+ DW_PCIE_LTSSM_L1_IDLE = 0x14,
+ DW_PCIE_LTSSM_L2_IDLE = 0x15,
+ DW_PCIE_LTSSM_L2_WAKE = 0x16,
+ DW_PCIE_LTSSM_DISABLED_ENTRY = 0x17,
+ DW_PCIE_LTSSM_DISABLED_IDLE = 0x18,
+ DW_PCIE_LTSSM_DISABLED = 0x19,
+ DW_PCIE_LTSSM_LPBK_ENTRY = 0x1a,
+ DW_PCIE_LTSSM_LPBK_ACTIVE = 0x1b,
+ DW_PCIE_LTSSM_LPBK_EXIT = 0x1c,
+ DW_PCIE_LTSSM_LPBK_EXIT_TIMEOUT = 0x1d,
+ DW_PCIE_LTSSM_HOT_RESET_ENTRY = 0x1e,
+ DW_PCIE_LTSSM_HOT_RESET = 0x1f,
+ DW_PCIE_LTSSM_RCVRY_EQ0 = 0x20,
+ DW_PCIE_LTSSM_RCVRY_EQ1 = 0x21,
+ DW_PCIE_LTSSM_RCVRY_EQ2 = 0x22,
+ DW_PCIE_LTSSM_RCVRY_EQ3 = 0x23,
+
+ DW_PCIE_LTSSM_UNKNOWN = 0xFFFFFFFF,
+};
+
+struct dw_pcie_ob_atu_cfg {
+ int index;
+ int type;
+ u8 func_no;
+ u8 code;
+ u8 routing;
+ u32 ctrl2;
+ u64 parent_bus_addr;
+ u64 pci_addr;
+ u64 size;
+};
+
struct dw_pcie_host_ops {
- int (*rd_own_conf)(struct pcie_port *pp, int where, int size, u32 *val);
- int (*wr_own_conf)(struct pcie_port *pp, int where, int size, u32 val);
- int (*rd_other_conf)(struct pcie_port *pp, struct pci_bus *bus,
- unsigned int devfn, int where, int size, u32 *val);
- int (*wr_other_conf)(struct pcie_port *pp, struct pci_bus *bus,
- unsigned int devfn, int where, int size, u32 val);
- int (*host_init)(struct pcie_port *pp);
- void (*msi_set_irq)(struct pcie_port *pp, int irq);
- void (*msi_clear_irq)(struct pcie_port *pp, int irq);
- phys_addr_t (*get_msi_addr)(struct pcie_port *pp);
- u32 (*get_msi_data)(struct pcie_port *pp, int pos);
- void (*scan_bus)(struct pcie_port *pp);
- void (*set_num_vectors)(struct pcie_port *pp);
- int (*msi_host_init)(struct pcie_port *pp);
- void (*msi_irq_ack)(int irq, struct pcie_port *pp);
+ int (*init)(struct dw_pcie_rp *pp);
+ void (*deinit)(struct dw_pcie_rp *pp);
+ void (*post_init)(struct dw_pcie_rp *pp);
+ int (*msi_init)(struct dw_pcie_rp *pp);
+ void (*pme_turn_off)(struct dw_pcie_rp *pp);
};
-struct pcie_port {
- u8 root_bus_nr;
+struct dw_pcie_rp {
+ bool has_msi_ctrl:1;
+ bool cfg0_io_shared:1;
u64 cfg0_base;
void __iomem *va_cfg0_base;
u32 cfg0_size;
- u64 cfg1_base;
- void __iomem *va_cfg1_base;
- u32 cfg1_size;
resource_size_t io_base;
phys_addr_t io_bus_addr;
u32 io_size;
- u64 mem_base;
- phys_addr_t mem_bus_addr;
- u32 mem_size;
- struct resource *cfg;
- struct resource *io;
- struct resource *mem;
- struct resource *busn;
int irq;
const struct dw_pcie_host_ops *ops;
- int msi_irq;
+ int msi_irq[MAX_MSI_CTRLS];
struct irq_domain *irq_domain;
- struct irq_domain *msi_domain;
dma_addr_t msi_data;
+ struct irq_chip *msi_irq_chip;
u32 num_vectors;
- u32 irq_status[MAX_MSI_CTRLS];
+ u32 irq_mask[MAX_MSI_CTRLS];
+ struct pci_host_bridge *bridge;
raw_spinlock_t lock;
DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS);
-};
-
-enum dw_pcie_as_type {
- DW_PCIE_AS_UNKNOWN,
- DW_PCIE_AS_MEM,
- DW_PCIE_AS_IO,
+ bool use_atu_msg;
+ int msg_atu_index;
+ struct resource *msg_res;
+ bool use_linkup_irq;
+ struct pci_eq_presets presets;
+ struct pci_config_window *cfg;
+ bool ecam_enabled;
+ bool native_ecam;
};
struct dw_pcie_ep_ops {
- void (*ep_init)(struct dw_pcie_ep *ep);
+ void (*pre_init)(struct dw_pcie_ep *ep);
+ void (*init)(struct dw_pcie_ep *ep);
int (*raise_irq)(struct dw_pcie_ep *ep, u8 func_no,
- enum pci_epc_irq_type type, u16 interrupt_num);
+ unsigned int type, u16 interrupt_num);
+ const struct pci_epc_features* (*get_features)(struct dw_pcie_ep *ep);
+ /*
+ * Provide a method to implement the different func config space
+ * access for different platform, if different func have different
+ * offset, return the offset of func. if use write a register way
+ * return a 0, and implement code in callback function of platform
+ * driver.
+ */
+ unsigned int (*get_dbi_offset)(struct dw_pcie_ep *ep, u8 func_no);
+ unsigned int (*get_dbi2_offset)(struct dw_pcie_ep *ep, u8 func_no);
+};
+
+struct dw_pcie_ep_func {
+ struct list_head list;
+ u8 func_no;
+ u8 msi_cap; /* MSI capability offset */
+ u8 msix_cap; /* MSI-X capability offset */
};
struct dw_pcie_ep {
struct pci_epc *epc;
- struct dw_pcie_ep_ops *ops;
+ struct list_head func_list;
+ const struct dw_pcie_ep_ops *ops;
phys_addr_t phys_base;
size_t addr_size;
size_t page_size;
- u8 bar_to_atu[6];
+ u8 bar_to_atu[PCI_STD_NUM_BARS];
phys_addr_t *outbound_addr;
unsigned long *ib_window_map;
unsigned long *ob_window_map;
- u32 num_ib_windows;
- u32 num_ob_windows;
void __iomem *msi_mem;
phys_addr_t msi_mem_phys;
- u8 msi_cap; /* MSI capability offset */
- u8 msix_cap; /* MSI-X capability offset */
+ struct pci_epf_bar *epf_bar[PCI_STD_NUM_BARS];
};
struct dw_pcie_ops {
@@ -218,22 +487,68 @@ struct dw_pcie_ops {
size_t size);
void (*write_dbi)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
size_t size, u32 val);
- int (*link_up)(struct dw_pcie *pcie);
+ void (*write_dbi2)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
+ size_t size, u32 val);
+ bool (*link_up)(struct dw_pcie *pcie);
+ enum dw_pcie_ltssm (*get_ltssm)(struct dw_pcie *pcie);
int (*start_link)(struct dw_pcie *pcie);
void (*stop_link)(struct dw_pcie *pcie);
+ int (*assert_perst)(struct dw_pcie *pcie, bool assert);
+};
+
+struct debugfs_info {
+ struct dentry *debug_dir;
+ void *rasdes_info;
};
struct dw_pcie {
struct device *dev;
void __iomem *dbi_base;
+ resource_size_t dbi_phys_addr;
void __iomem *dbi_base2;
- /* Used when iatu_unroll_enabled is true */
void __iomem *atu_base;
- u32 num_viewport;
- u8 iatu_unroll_enabled;
- struct pcie_port pp;
+ void __iomem *elbi_base;
+ resource_size_t atu_phys_addr;
+ size_t atu_size;
+ resource_size_t parent_bus_offset;
+ u32 num_ib_windows;
+ u32 num_ob_windows;
+ u32 region_align;
+ u64 region_limit;
+ struct dw_pcie_rp pp;
struct dw_pcie_ep ep;
const struct dw_pcie_ops *ops;
+ u32 version;
+ u32 type;
+ unsigned long caps;
+ int num_lanes;
+ int max_link_speed;
+ u8 n_fts[2];
+ struct dw_edma_chip edma;
+ bool l1ss_support; /* L1 PM Substates support */
+ struct clk_bulk_data app_clks[DW_PCIE_NUM_APP_CLKS];
+ struct clk_bulk_data core_clks[DW_PCIE_NUM_CORE_CLKS];
+ struct reset_control_bulk_data app_rsts[DW_PCIE_NUM_APP_RSTS];
+ struct reset_control_bulk_data core_rsts[DW_PCIE_NUM_CORE_RSTS];
+ struct gpio_desc *pe_rst;
+ bool suspended;
+ struct debugfs_info *debugfs;
+ enum dw_pcie_device_mode mode;
+ u16 ptm_vsec_offset;
+ struct pci_ptm_debugfs *ptm_debugfs;
+
+ /*
+ * If iATU input addresses are offset from CPU physical addresses,
+ * we previously required .cpu_addr_fixup() to convert them. We
+ * now rely on the devicetree instead. If .cpu_addr_fixup()
+ * exists, we compare its results with devicetree.
+ *
+ * If .cpu_addr_fixup() does not exist, we assume the offset is
+ * zero and warn if devicetree claims otherwise. If we know all
+ * devicetrees correctly describe the offset, set
+ * use_parent_dt_ranges to true to avoid this warning.
+ */
+ bool use_parent_dt_ranges;
};
#define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp)
@@ -241,72 +556,210 @@ struct dw_pcie {
#define to_dw_pcie_from_ep(endpoint) \
container_of((endpoint), struct dw_pcie, ep)
+int dw_pcie_get_resources(struct dw_pcie *pci);
+
+void dw_pcie_version_detect(struct dw_pcie *pci);
+
+u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap);
+u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap);
+u16 dw_pcie_find_rasdes_capability(struct dw_pcie *pci);
+u16 dw_pcie_find_ptm_capability(struct dw_pcie *pci);
+
int dw_pcie_read(void __iomem *addr, int size, u32 *val);
int dw_pcie_write(void __iomem *addr, int size, u32 val);
-u32 __dw_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
- size_t size);
-void __dw_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg,
- size_t size, u32 val);
-int dw_pcie_link_up(struct dw_pcie *pci);
+u32 dw_pcie_read_dbi(struct dw_pcie *pci, u32 reg, size_t size);
+void dw_pcie_write_dbi(struct dw_pcie *pci, u32 reg, size_t size, u32 val);
+void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val);
+bool dw_pcie_link_up(struct dw_pcie *pci);
+void dw_pcie_upconfig_setup(struct dw_pcie *pci);
int dw_pcie_wait_for_link(struct dw_pcie *pci);
-void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index,
- int type, u64 cpu_addr, u64 pci_addr,
- u32 size);
-int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar,
- u64 cpu_addr, enum dw_pcie_as_type as_type);
-void dw_pcie_disable_atu(struct dw_pcie *pci, int index,
- enum dw_pcie_region_type type);
+int dw_pcie_link_get_max_link_width(struct dw_pcie *pci);
+int dw_pcie_prog_outbound_atu(struct dw_pcie *pci,
+ const struct dw_pcie_ob_atu_cfg *atu);
+int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,
+ u64 parent_bus_addr, u64 pci_addr, u64 size);
+int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
+ int type, u64 parent_bus_addr,
+ u8 bar, size_t size);
+void dw_pcie_disable_atu(struct dw_pcie *pci, u32 dir, int index);
+void dw_pcie_hide_unsupported_l1ss(struct dw_pcie *pci);
void dw_pcie_setup(struct dw_pcie *pci);
+void dw_pcie_iatu_detect(struct dw_pcie *pci);
+int dw_pcie_edma_detect(struct dw_pcie *pci);
+void dw_pcie_edma_remove(struct dw_pcie *pci);
+resource_size_t dw_pcie_parent_bus_offset(struct dw_pcie *pci,
+ const char *reg_name,
+ resource_size_t cpu_phy_addr);
static inline void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val)
{
- __dw_pcie_write_dbi(pci, pci->dbi_base, reg, 0x4, val);
+ dw_pcie_write_dbi(pci, reg, 0x4, val);
}
static inline u32 dw_pcie_readl_dbi(struct dw_pcie *pci, u32 reg)
{
- return __dw_pcie_read_dbi(pci, pci->dbi_base, reg, 0x4);
+ return dw_pcie_read_dbi(pci, reg, 0x4);
}
static inline void dw_pcie_writew_dbi(struct dw_pcie *pci, u32 reg, u16 val)
{
- __dw_pcie_write_dbi(pci, pci->dbi_base, reg, 0x2, val);
+ dw_pcie_write_dbi(pci, reg, 0x2, val);
}
static inline u16 dw_pcie_readw_dbi(struct dw_pcie *pci, u32 reg)
{
- return __dw_pcie_read_dbi(pci, pci->dbi_base, reg, 0x2);
+ return dw_pcie_read_dbi(pci, reg, 0x2);
}
static inline void dw_pcie_writeb_dbi(struct dw_pcie *pci, u32 reg, u8 val)
{
- __dw_pcie_write_dbi(pci, pci->dbi_base, reg, 0x1, val);
+ dw_pcie_write_dbi(pci, reg, 0x1, val);
}
static inline u8 dw_pcie_readb_dbi(struct dw_pcie *pci, u32 reg)
{
- return __dw_pcie_read_dbi(pci, pci->dbi_base, reg, 0x1);
+ return dw_pcie_read_dbi(pci, reg, 0x1);
}
static inline void dw_pcie_writel_dbi2(struct dw_pcie *pci, u32 reg, u32 val)
{
- __dw_pcie_write_dbi(pci, pci->dbi_base2, reg, 0x4, val);
+ dw_pcie_write_dbi2(pci, reg, 0x4, val);
+}
+
+static inline int dw_pcie_read_cfg_byte(struct dw_pcie *pci, int where,
+ u8 *val)
+{
+ *val = dw_pcie_readb_dbi(pci, where);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static inline int dw_pcie_read_cfg_word(struct dw_pcie *pci, int where,
+ u16 *val)
+{
+ *val = dw_pcie_readw_dbi(pci, where);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static inline int dw_pcie_read_cfg_dword(struct dw_pcie *pci, int where,
+ u32 *val)
+{
+ *val = dw_pcie_readl_dbi(pci, where);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static inline unsigned int dw_pcie_ep_get_dbi_offset(struct dw_pcie_ep *ep,
+ u8 func_no)
+{
+ unsigned int dbi_offset = 0;
+
+ if (ep->ops->get_dbi_offset)
+ dbi_offset = ep->ops->get_dbi_offset(ep, func_no);
+
+ return dbi_offset;
+}
+
+static inline u32 dw_pcie_ep_read_dbi(struct dw_pcie_ep *ep, u8 func_no,
+ u32 reg, size_t size)
+{
+ unsigned int offset = dw_pcie_ep_get_dbi_offset(ep, func_no);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ return dw_pcie_read_dbi(pci, offset + reg, size);
+}
+
+static inline void dw_pcie_ep_write_dbi(struct dw_pcie_ep *ep, u8 func_no,
+ u32 reg, size_t size, u32 val)
+{
+ unsigned int offset = dw_pcie_ep_get_dbi_offset(ep, func_no);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ dw_pcie_write_dbi(pci, offset + reg, size, val);
+}
+
+static inline void dw_pcie_ep_writel_dbi(struct dw_pcie_ep *ep, u8 func_no,
+ u32 reg, u32 val)
+{
+ dw_pcie_ep_write_dbi(ep, func_no, reg, 0x4, val);
+}
+
+static inline u32 dw_pcie_ep_readl_dbi(struct dw_pcie_ep *ep, u8 func_no,
+ u32 reg)
+{
+ return dw_pcie_ep_read_dbi(ep, func_no, reg, 0x4);
+}
+
+static inline void dw_pcie_ep_writew_dbi(struct dw_pcie_ep *ep, u8 func_no,
+ u32 reg, u16 val)
+{
+ dw_pcie_ep_write_dbi(ep, func_no, reg, 0x2, val);
+}
+
+static inline u16 dw_pcie_ep_readw_dbi(struct dw_pcie_ep *ep, u8 func_no,
+ u32 reg)
+{
+ return dw_pcie_ep_read_dbi(ep, func_no, reg, 0x2);
+}
+
+static inline void dw_pcie_ep_writeb_dbi(struct dw_pcie_ep *ep, u8 func_no,
+ u32 reg, u8 val)
+{
+ dw_pcie_ep_write_dbi(ep, func_no, reg, 0x1, val);
}
-static inline u32 dw_pcie_readl_dbi2(struct dw_pcie *pci, u32 reg)
+static inline u8 dw_pcie_ep_readb_dbi(struct dw_pcie_ep *ep, u8 func_no,
+ u32 reg)
{
- return __dw_pcie_read_dbi(pci, pci->dbi_base2, reg, 0x4);
+ return dw_pcie_ep_read_dbi(ep, func_no, reg, 0x1);
}
-static inline void dw_pcie_writel_atu(struct dw_pcie *pci, u32 reg, u32 val)
+static inline int dw_pcie_ep_read_cfg_byte(struct dw_pcie_ep *ep, u8 func_no,
+ int where, u8 *val)
{
- __dw_pcie_write_dbi(pci, pci->atu_base, reg, 0x4, val);
+ *val = dw_pcie_ep_readb_dbi(ep, func_no, where);
+ return PCIBIOS_SUCCESSFUL;
}
-static inline u32 dw_pcie_readl_atu(struct dw_pcie *pci, u32 reg)
+static inline int dw_pcie_ep_read_cfg_word(struct dw_pcie_ep *ep, u8 func_no,
+ int where, u16 *val)
{
- return __dw_pcie_read_dbi(pci, pci->atu_base, reg, 0x4);
+ *val = dw_pcie_ep_readw_dbi(ep, func_no, where);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static inline int dw_pcie_ep_read_cfg_dword(struct dw_pcie_ep *ep, u8 func_no,
+ int where, u32 *val)
+{
+ *val = dw_pcie_ep_readl_dbi(ep, func_no, where);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static inline unsigned int dw_pcie_ep_get_dbi2_offset(struct dw_pcie_ep *ep,
+ u8 func_no)
+{
+ unsigned int dbi2_offset = 0;
+
+ if (ep->ops->get_dbi2_offset)
+ dbi2_offset = ep->ops->get_dbi2_offset(ep, func_no);
+ else if (ep->ops->get_dbi_offset) /* for backward compatibility */
+ dbi2_offset = ep->ops->get_dbi_offset(ep, func_no);
+
+ return dbi2_offset;
+}
+
+static inline void dw_pcie_ep_write_dbi2(struct dw_pcie_ep *ep, u8 func_no,
+ u32 reg, size_t size, u32 val)
+{
+ unsigned int offset = dw_pcie_ep_get_dbi2_offset(ep, func_no);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ dw_pcie_write_dbi2(pci, offset + reg, size, val);
+}
+
+static inline void dw_pcie_ep_writel_dbi2(struct dw_pcie_ep *ep, u8 func_no,
+ u32 reg, u32 val)
+{
+ dw_pcie_ep_write_dbi2(ep, func_no, reg, 0x4, val);
}
static inline void dw_pcie_dbi_ro_wr_en(struct dw_pcie *pci)
@@ -331,67 +784,152 @@ static inline void dw_pcie_dbi_ro_wr_dis(struct dw_pcie *pci)
dw_pcie_writel_dbi(pci, reg, val);
}
+static inline int dw_pcie_start_link(struct dw_pcie *pci)
+{
+ if (pci->ops && pci->ops->start_link)
+ return pci->ops->start_link(pci);
+
+ return 0;
+}
+
+static inline void dw_pcie_stop_link(struct dw_pcie *pci)
+{
+ if (pci->ops && pci->ops->stop_link)
+ pci->ops->stop_link(pci);
+}
+
+static inline int dw_pcie_assert_perst(struct dw_pcie *pci, bool assert)
+{
+ if (pci->ops && pci->ops->assert_perst)
+ return pci->ops->assert_perst(pci, assert);
+
+ return 0;
+}
+
+static inline enum dw_pcie_ltssm dw_pcie_get_ltssm(struct dw_pcie *pci)
+{
+ u32 val;
+
+ if (pci->ops && pci->ops->get_ltssm)
+ return pci->ops->get_ltssm(pci);
+
+ val = dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0);
+
+ return (enum dw_pcie_ltssm)FIELD_GET(PORT_LOGIC_LTSSM_STATE_MASK, val);
+}
+
#ifdef CONFIG_PCIE_DW_HOST
-irqreturn_t dw_handle_msi_irq(struct pcie_port *pp);
-void dw_pcie_msi_init(struct pcie_port *pp);
-void dw_pcie_free_msi(struct pcie_port *pp);
-void dw_pcie_setup_rc(struct pcie_port *pp);
-int dw_pcie_host_init(struct pcie_port *pp);
-int dw_pcie_allocate_domains(struct pcie_port *pp);
+int dw_pcie_suspend_noirq(struct dw_pcie *pci);
+int dw_pcie_resume_noirq(struct dw_pcie *pci);
+irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp);
+void dw_pcie_msi_init(struct dw_pcie_rp *pp);
+int dw_pcie_msi_host_init(struct dw_pcie_rp *pp);
+void dw_pcie_free_msi(struct dw_pcie_rp *pp);
+int dw_pcie_setup_rc(struct dw_pcie_rp *pp);
+int dw_pcie_host_init(struct dw_pcie_rp *pp);
+void dw_pcie_host_deinit(struct dw_pcie_rp *pp);
+int dw_pcie_allocate_domains(struct dw_pcie_rp *pp);
+void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int where);
#else
-static inline irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
+static inline int dw_pcie_suspend_noirq(struct dw_pcie *pci)
{
- return IRQ_NONE;
+ return 0;
}
-static inline void dw_pcie_msi_init(struct pcie_port *pp)
+static inline int dw_pcie_resume_noirq(struct dw_pcie *pci)
{
+ return 0;
}
-static inline void dw_pcie_free_msi(struct pcie_port *pp)
+static inline irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp)
{
+ return IRQ_NONE;
}
-static inline void dw_pcie_setup_rc(struct pcie_port *pp)
+static inline void dw_pcie_msi_init(struct dw_pcie_rp *pp)
+{ }
+
+static inline int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
{
+ return -ENODEV;
}
-static inline int dw_pcie_host_init(struct pcie_port *pp)
+static inline void dw_pcie_free_msi(struct dw_pcie_rp *pp)
+{ }
+
+static inline int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
{
return 0;
}
-static inline int dw_pcie_allocate_domains(struct pcie_port *pp)
+static inline int dw_pcie_host_init(struct dw_pcie_rp *pp)
{
return 0;
}
+
+static inline void dw_pcie_host_deinit(struct dw_pcie_rp *pp)
+{
+}
+
+static inline int dw_pcie_allocate_domains(struct dw_pcie_rp *pp)
+{
+ return 0;
+}
+static inline void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus,
+ unsigned int devfn,
+ int where)
+{
+ return NULL;
+}
#endif
#ifdef CONFIG_PCIE_DW_EP
void dw_pcie_ep_linkup(struct dw_pcie_ep *ep);
+void dw_pcie_ep_linkdown(struct dw_pcie_ep *ep);
int dw_pcie_ep_init(struct dw_pcie_ep *ep);
-void dw_pcie_ep_exit(struct dw_pcie_ep *ep);
-int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no);
+int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep);
+void dw_pcie_ep_deinit(struct dw_pcie_ep *ep);
+void dw_pcie_ep_cleanup(struct dw_pcie_ep *ep);
+int dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep, u8 func_no);
int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
u8 interrupt_num);
int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
u16 interrupt_num);
+int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep, u8 func_no,
+ u16 interrupt_num);
void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar);
+int dw_pcie_ep_hide_ext_capability(struct dw_pcie *pci, u8 prev_cap, u8 cap);
+struct dw_pcie_ep_func *
+dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no);
#else
static inline void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
{
}
+static inline void dw_pcie_ep_linkdown(struct dw_pcie_ep *ep)
+{
+}
+
static inline int dw_pcie_ep_init(struct dw_pcie_ep *ep)
{
return 0;
}
-static inline void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
+static inline int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep)
+{
+ return 0;
+}
+
+static inline void dw_pcie_ep_deinit(struct dw_pcie_ep *ep)
{
}
-static inline int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no)
+static inline void dw_pcie_ep_cleanup(struct dw_pcie_ep *ep)
+{
+}
+
+static inline int dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep, u8 func_no)
{
return 0;
}
@@ -408,8 +946,41 @@ static inline int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
return 0;
}
+static inline int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep,
+ u8 func_no,
+ u16 interrupt_num)
+{
+ return 0;
+}
+
static inline void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
{
}
+
+static inline int dw_pcie_ep_hide_ext_capability(struct dw_pcie *pci,
+ u8 prev_cap, u8 cap)
+{
+ return 0;
+}
+
+static inline struct dw_pcie_ep_func *
+dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no)
+{
+ return NULL;
+}
#endif
+
+#ifdef CONFIG_PCIE_DW_DEBUGFS
+void dwc_pcie_debugfs_init(struct dw_pcie *pci, enum dw_pcie_device_mode mode);
+void dwc_pcie_debugfs_deinit(struct dw_pcie *pci);
+#else
+static inline void dwc_pcie_debugfs_init(struct dw_pcie *pci,
+ enum dw_pcie_device_mode mode)
+{
+}
+static inline void dwc_pcie_debugfs_deinit(struct dw_pcie *pci)
+{
+}
+#endif
+
#endif /* _PCIE_DESIGNWARE_H */
diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
new file mode 100644
index 000000000000..f8605fe61a41
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
@@ -0,0 +1,777 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Rockchip SoCs.
+ *
+ * Copyright (C) 2021 Rockchip Electronics Co., Ltd.
+ * http://www.rock-chips.com
+ *
+ * Author: Simon Xue <xxm@rock-chips.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
+#include <linux/hw_bitfield.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#include "../../pci.h"
+#include "pcie-designware.h"
+
+/*
+ * The upper 16 bits of PCIE_CLIENT_CONFIG are a write
+ * mask for the lower 16 bits.
+ */
+
+#define to_rockchip_pcie(x) dev_get_drvdata((x)->dev)
+
+/* General Control Register */
+#define PCIE_CLIENT_GENERAL_CON 0x0
+#define PCIE_CLIENT_MODE_MASK GENMASK(7, 4)
+#define PCIE_CLIENT_MODE_EP 0x0UL
+#define PCIE_CLIENT_MODE_RC 0x4UL
+#define PCIE_CLIENT_SET_MODE(x) FIELD_PREP_WM16(PCIE_CLIENT_MODE_MASK, (x))
+#define PCIE_CLIENT_LD_RQ_RST_GRT FIELD_PREP_WM16(BIT(3), 1)
+#define PCIE_CLIENT_ENABLE_LTSSM FIELD_PREP_WM16(BIT(2), 1)
+#define PCIE_CLIENT_DISABLE_LTSSM FIELD_PREP_WM16(BIT(2), 0)
+
+/* Interrupt Status Register Related to Legacy Interrupt */
+#define PCIE_CLIENT_INTR_STATUS_LEGACY 0x8
+
+/* Interrupt Status Register Related to Miscellaneous Operation */
+#define PCIE_CLIENT_INTR_STATUS_MISC 0x10
+#define PCIE_RDLH_LINK_UP_CHGED BIT(1)
+#define PCIE_LINK_REQ_RST_NOT_INT BIT(2)
+
+/* Interrupt Mask Register Related to Legacy Interrupt */
+#define PCIE_CLIENT_INTR_MASK_LEGACY 0x1c
+#define PCIE_INTR_MASK GENMASK(7, 0)
+#define PCIE_INTR_CLAMP(_x) ((BIT((_x)) & PCIE_INTR_MASK))
+#define PCIE_INTR_LEGACY_MASK(x) (PCIE_INTR_CLAMP((x)) | \
+ (PCIE_INTR_CLAMP((x)) << 16))
+#define PCIE_INTR_LEGACY_UNMASK(x) (PCIE_INTR_CLAMP((x)) << 16)
+
+/* Interrupt Mask Register Related to Miscellaneous Operation */
+#define PCIE_CLIENT_INTR_MASK_MISC 0x24
+
+/* Power Management Control Register */
+#define PCIE_CLIENT_POWER_CON 0x2c
+#define PCIE_CLKREQ_READY FIELD_PREP_WM16(BIT(0), 1)
+#define PCIE_CLKREQ_NOT_READY FIELD_PREP_WM16(BIT(0), 0)
+#define PCIE_CLKREQ_PULL_DOWN FIELD_PREP_WM16(GENMASK(13, 12), 1)
+
+/* Hot Reset Control Register */
+#define PCIE_CLIENT_HOT_RESET_CTRL 0x180
+#define PCIE_LTSSM_APP_DLY2_EN BIT(1)
+#define PCIE_LTSSM_APP_DLY2_DONE BIT(3)
+#define PCIE_LTSSM_ENABLE_ENHANCE BIT(4)
+
+/* LTSSM Status Register */
+#define PCIE_CLIENT_LTSSM_STATUS 0x300
+#define PCIE_LINKUP 0x3
+#define PCIE_LINKUP_MASK GENMASK(17, 16)
+#define PCIE_LTSSM_STATUS_MASK GENMASK(5, 0)
+
+struct rockchip_pcie {
+ struct dw_pcie pci;
+ void __iomem *apb_base;
+ struct phy *phy;
+ struct clk_bulk_data *clks;
+ unsigned int clk_cnt;
+ struct reset_control *rst;
+ struct gpio_desc *rst_gpio;
+ struct irq_domain *irq_domain;
+ const struct rockchip_pcie_of_data *data;
+ bool supports_clkreq;
+};
+
+struct rockchip_pcie_of_data {
+ enum dw_pcie_device_mode mode;
+ const struct pci_epc_features *epc_features;
+};
+
+static int rockchip_pcie_readl_apb(struct rockchip_pcie *rockchip, u32 reg)
+{
+ return readl_relaxed(rockchip->apb_base + reg);
+}
+
+static void rockchip_pcie_writel_apb(struct rockchip_pcie *rockchip, u32 val,
+ u32 reg)
+{
+ writel_relaxed(val, rockchip->apb_base + reg);
+}
+
+static void rockchip_pcie_intx_handler(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct rockchip_pcie *rockchip = irq_desc_get_handler_data(desc);
+ unsigned long reg, hwirq;
+
+ chained_irq_enter(chip, desc);
+
+ reg = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_INTR_STATUS_LEGACY);
+
+ for_each_set_bit(hwirq, &reg, 4)
+ generic_handle_domain_irq(rockchip->irq_domain, hwirq);
+
+ chained_irq_exit(chip, desc);
+}
+
+static void rockchip_intx_mask(struct irq_data *data)
+{
+ rockchip_pcie_writel_apb(irq_data_get_irq_chip_data(data),
+ PCIE_INTR_LEGACY_MASK(data->hwirq),
+ PCIE_CLIENT_INTR_MASK_LEGACY);
+};
+
+static void rockchip_intx_unmask(struct irq_data *data)
+{
+ rockchip_pcie_writel_apb(irq_data_get_irq_chip_data(data),
+ PCIE_INTR_LEGACY_UNMASK(data->hwirq),
+ PCIE_CLIENT_INTR_MASK_LEGACY);
+};
+
+static struct irq_chip rockchip_intx_irq_chip = {
+ .name = "INTx",
+ .irq_mask = rockchip_intx_mask,
+ .irq_unmask = rockchip_intx_unmask,
+ .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
+};
+
+static int rockchip_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &rockchip_intx_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = rockchip_pcie_intx_map,
+};
+
+static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->pci.dev;
+ struct device_node *intc;
+
+ intc = of_get_child_by_name(dev->of_node, "legacy-interrupt-controller");
+ if (!intc) {
+ dev_err(dev, "missing child interrupt-controller node\n");
+ return -EINVAL;
+ }
+
+ rockchip->irq_domain = irq_domain_create_linear(of_fwnode_handle(intc), PCI_NUM_INTX,
+ &intx_domain_ops, rockchip);
+ of_node_put(intc);
+ if (!rockchip->irq_domain) {
+ dev_err(dev, "failed to get a INTx IRQ domain\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static u32 rockchip_pcie_get_ltssm(struct rockchip_pcie *rockchip)
+{
+ return rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_LTSSM_STATUS);
+}
+
+static void rockchip_pcie_enable_ltssm(struct rockchip_pcie *rockchip)
+{
+ rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_ENABLE_LTSSM,
+ PCIE_CLIENT_GENERAL_CON);
+}
+
+static void rockchip_pcie_disable_ltssm(struct rockchip_pcie *rockchip)
+{
+ rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_DISABLE_LTSSM,
+ PCIE_CLIENT_GENERAL_CON);
+}
+
+static bool rockchip_pcie_link_up(struct dw_pcie *pci)
+{
+ struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
+ u32 val = rockchip_pcie_get_ltssm(rockchip);
+
+ return FIELD_GET(PCIE_LINKUP_MASK, val) == PCIE_LINKUP;
+}
+
+/*
+ * See e.g. section '11.6.6.4 L1 Substate' in the RK3588 TRM V1.0 for the steps
+ * needed to support L1 substates. Currently, just enable L1 substates for RC
+ * mode if CLKREQ# is properly connected and supports-clkreq is present in DT.
+ * For EP mode, there are more things should be done to actually save power in
+ * L1 substates, so disable L1 substates until there is proper support.
+ */
+static void rockchip_pcie_configure_l1ss(struct dw_pcie *pci)
+{
+ struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
+
+ /* Enable L1 substates if CLKREQ# is properly connected */
+ if (rockchip->supports_clkreq) {
+ rockchip_pcie_writel_apb(rockchip, PCIE_CLKREQ_READY,
+ PCIE_CLIENT_POWER_CON);
+ pci->l1ss_support = true;
+ return;
+ }
+
+ /*
+ * Otherwise, assert CLKREQ# unconditionally. Since
+ * pci->l1ss_support is not set, the DWC core will prevent L1
+ * Substates support from being advertised.
+ */
+ rockchip_pcie_writel_apb(rockchip,
+ PCIE_CLKREQ_PULL_DOWN | PCIE_CLKREQ_NOT_READY,
+ PCIE_CLIENT_POWER_CON);
+}
+
+static void rockchip_pcie_enable_l0s(struct dw_pcie *pci)
+{
+ u32 cap, lnkcap;
+
+ /* Enable L0S capability for all SoCs */
+ cap = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ if (cap) {
+ lnkcap = dw_pcie_readl_dbi(pci, cap + PCI_EXP_LNKCAP);
+ lnkcap |= PCI_EXP_LNKCAP_ASPM_L0S;
+ dw_pcie_dbi_ro_wr_en(pci);
+ dw_pcie_writel_dbi(pci, cap + PCI_EXP_LNKCAP, lnkcap);
+ dw_pcie_dbi_ro_wr_dis(pci);
+ }
+}
+
+static int rockchip_pcie_start_link(struct dw_pcie *pci)
+{
+ struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
+
+ /* Reset device */
+ gpiod_set_value_cansleep(rockchip->rst_gpio, 0);
+
+ rockchip_pcie_enable_ltssm(rockchip);
+
+ /*
+ * PCIe requires the refclk to be stable for 100µs prior to releasing
+ * PERST. See table 2-4 in section 2.6.2 AC Specifications of the PCI
+ * Express Card Electromechanical Specification, 1.1. However, we don't
+ * know if the refclk is coming from RC's PHY or external OSC. If it's
+ * from RC, so enabling LTSSM is the just right place to release #PERST.
+ * We need more extra time as before, rather than setting just
+ * 100us as we don't know how long should the device need to reset.
+ */
+ msleep(PCIE_T_PVPERL_MS);
+ gpiod_set_value_cansleep(rockchip->rst_gpio, 1);
+
+ return 0;
+}
+
+static void rockchip_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
+
+ rockchip_pcie_disable_ltssm(rockchip);
+}
+
+static int rockchip_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
+ struct device *dev = rockchip->pci.dev;
+ int irq, ret;
+
+ irq = of_irq_get_byname(dev->of_node, "legacy");
+ if (irq < 0)
+ return irq;
+
+ ret = rockchip_pcie_init_irq_domain(rockchip);
+ if (ret < 0)
+ dev_err(dev, "failed to init irq domain\n");
+
+ irq_set_chained_handler_and_data(irq, rockchip_pcie_intx_handler,
+ rockchip);
+
+ rockchip_pcie_configure_l1ss(pci);
+ rockchip_pcie_enable_l0s(pci);
+
+ return 0;
+}
+
+static const struct dw_pcie_host_ops rockchip_pcie_host_ops = {
+ .init = rockchip_pcie_host_init,
+};
+
+/*
+ * ATS does not work on RK3588 when running in EP mode.
+ *
+ * After the host has enabled ATS on the EP side, it will send an IOTLB
+ * invalidation request to the EP side. However, the RK3588 will never send
+ * a completion back and eventually the host will print an IOTLB_INV_TIMEOUT
+ * error, and the EP will not be operational. If we hide the ATS capability,
+ * things work as expected.
+ */
+static void rockchip_pcie_ep_hide_broken_ats_cap_rk3588(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct device *dev = pci->dev;
+
+ /* Only hide the ATS capability for RK3588 running in EP mode. */
+ if (!of_device_is_compatible(dev->of_node, "rockchip,rk3588-pcie-ep"))
+ return;
+
+ if (dw_pcie_ep_hide_ext_capability(pci, PCI_EXT_CAP_ID_SECPCI,
+ PCI_EXT_CAP_ID_ATS))
+ dev_err(dev, "failed to hide ATS capability\n");
+}
+
+static void rockchip_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar;
+
+ rockchip_pcie_enable_l0s(pci);
+ rockchip_pcie_ep_hide_broken_ats_cap_rk3588(ep);
+
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
+ dw_pcie_ep_reset_bar(pci, bar);
+};
+
+static int rockchip_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ unsigned int type, u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ switch (type) {
+ case PCI_IRQ_INTX:
+ return dw_pcie_ep_raise_intx_irq(ep, func_no);
+ case PCI_IRQ_MSI:
+ return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+ case PCI_IRQ_MSIX:
+ return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
+ default:
+ dev_err(pci->dev, "UNKNOWN IRQ type\n");
+ }
+
+ return 0;
+}
+
+static const struct pci_epc_features rockchip_pcie_epc_features_rk3568 = {
+ .linkup_notifier = true,
+ .msi_capable = true,
+ .msix_capable = true,
+ .align = SZ_64K,
+ .bar[BAR_0] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_1] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_2] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_3] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_4] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_5] = { .type = BAR_RESIZABLE, },
+};
+
+/*
+ * BAR4 on rk3588 exposes the ATU Port Logic Structure to the host regardless of
+ * iATU settings for BAR4. This means that BAR4 cannot be used by an EPF driver,
+ * so mark it as RESERVED. (rockchip_pcie_ep_init() will disable all BARs by
+ * default.) If the host could write to BAR4, the iATU settings (for all other
+ * BARs) would be overwritten, resulting in (all other BARs) no longer working.
+ */
+static const struct pci_epc_features rockchip_pcie_epc_features_rk3588 = {
+ .linkup_notifier = true,
+ .msi_capable = true,
+ .msix_capable = true,
+ .align = SZ_64K,
+ .bar[BAR_0] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_1] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_2] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_3] = { .type = BAR_RESIZABLE, },
+ .bar[BAR_4] = { .type = BAR_RESERVED, },
+ .bar[BAR_5] = { .type = BAR_RESIZABLE, },
+};
+
+static const struct pci_epc_features *
+rockchip_pcie_get_features(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct rockchip_pcie *rockchip = to_rockchip_pcie(pci);
+
+ return rockchip->data->epc_features;
+}
+
+static const struct dw_pcie_ep_ops rockchip_pcie_ep_ops = {
+ .init = rockchip_pcie_ep_init,
+ .raise_irq = rockchip_pcie_raise_irq,
+ .get_features = rockchip_pcie_get_features,
+};
+
+static int rockchip_pcie_clk_init(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->pci.dev;
+ int ret;
+
+ ret = devm_clk_bulk_get_all(dev, &rockchip->clks);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to get clocks\n");
+
+ rockchip->clk_cnt = ret;
+
+ ret = clk_bulk_prepare_enable(rockchip->clk_cnt, rockchip->clks);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to enable clocks\n");
+
+ return 0;
+}
+
+static int rockchip_pcie_resource_get(struct platform_device *pdev,
+ struct rockchip_pcie *rockchip)
+{
+ rockchip->apb_base = devm_platform_ioremap_resource_byname(pdev, "apb");
+ if (IS_ERR(rockchip->apb_base))
+ return dev_err_probe(&pdev->dev, PTR_ERR(rockchip->apb_base),
+ "failed to map apb registers\n");
+
+ rockchip->rst_gpio = devm_gpiod_get_optional(&pdev->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(rockchip->rst_gpio))
+ return dev_err_probe(&pdev->dev, PTR_ERR(rockchip->rst_gpio),
+ "failed to get reset gpio\n");
+
+ rockchip->rst = devm_reset_control_array_get_exclusive(&pdev->dev);
+ if (IS_ERR(rockchip->rst))
+ return dev_err_probe(&pdev->dev, PTR_ERR(rockchip->rst),
+ "failed to get reset lines\n");
+
+ rockchip->supports_clkreq = of_property_read_bool(pdev->dev.of_node,
+ "supports-clkreq");
+
+ return 0;
+}
+
+static int rockchip_pcie_phy_init(struct rockchip_pcie *rockchip)
+{
+ struct device *dev = rockchip->pci.dev;
+ int ret;
+
+ rockchip->phy = devm_phy_get(dev, "pcie-phy");
+ if (IS_ERR(rockchip->phy))
+ return dev_err_probe(dev, PTR_ERR(rockchip->phy),
+ "missing PHY\n");
+
+ ret = phy_init(rockchip->phy);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_power_on(rockchip->phy);
+ if (ret)
+ phy_exit(rockchip->phy);
+
+ return ret;
+}
+
+static void rockchip_pcie_phy_deinit(struct rockchip_pcie *rockchip)
+{
+ phy_power_off(rockchip->phy);
+ phy_exit(rockchip->phy);
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .link_up = rockchip_pcie_link_up,
+ .start_link = rockchip_pcie_start_link,
+ .stop_link = rockchip_pcie_stop_link,
+};
+
+static irqreturn_t rockchip_pcie_rc_sys_irq_thread(int irq, void *arg)
+{
+ struct rockchip_pcie *rockchip = arg;
+ struct dw_pcie *pci = &rockchip->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct device *dev = pci->dev;
+ u32 reg;
+
+ reg = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_INTR_STATUS_MISC);
+ rockchip_pcie_writel_apb(rockchip, reg, PCIE_CLIENT_INTR_STATUS_MISC);
+
+ dev_dbg(dev, "PCIE_CLIENT_INTR_STATUS_MISC: %#x\n", reg);
+ dev_dbg(dev, "LTSSM_STATUS: %#x\n", rockchip_pcie_get_ltssm(rockchip));
+
+ if (reg & PCIE_RDLH_LINK_UP_CHGED) {
+ if (rockchip_pcie_link_up(pci)) {
+ msleep(PCIE_RESET_CONFIG_WAIT_MS);
+ dev_dbg(dev, "Received Link up event. Starting enumeration!\n");
+ /* Rescan the bus to enumerate endpoint devices */
+ pci_lock_rescan_remove();
+ pci_rescan_bus(pp->bridge->bus);
+ pci_unlock_rescan_remove();
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rockchip_pcie_ep_sys_irq_thread(int irq, void *arg)
+{
+ struct rockchip_pcie *rockchip = arg;
+ struct dw_pcie *pci = &rockchip->pci;
+ struct device *dev = pci->dev;
+ u32 reg, val;
+
+ reg = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_INTR_STATUS_MISC);
+ rockchip_pcie_writel_apb(rockchip, reg, PCIE_CLIENT_INTR_STATUS_MISC);
+
+ dev_dbg(dev, "PCIE_CLIENT_INTR_STATUS_MISC: %#x\n", reg);
+ dev_dbg(dev, "LTSSM_STATUS: %#x\n", rockchip_pcie_get_ltssm(rockchip));
+
+ if (reg & PCIE_LINK_REQ_RST_NOT_INT) {
+ dev_dbg(dev, "hot reset or link-down reset\n");
+ dw_pcie_ep_linkdown(&pci->ep);
+ /* Stop delaying link training. */
+ val = FIELD_PREP_WM16(PCIE_LTSSM_APP_DLY2_DONE, 1);
+ rockchip_pcie_writel_apb(rockchip, val,
+ PCIE_CLIENT_HOT_RESET_CTRL);
+ }
+
+ if (reg & PCIE_RDLH_LINK_UP_CHGED) {
+ if (rockchip_pcie_link_up(pci)) {
+ dev_dbg(dev, "link up\n");
+ dw_pcie_ep_linkup(&pci->ep);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int rockchip_pcie_configure_rc(struct platform_device *pdev,
+ struct rockchip_pcie *rockchip)
+{
+ struct device *dev = &pdev->dev;
+ struct dw_pcie_rp *pp;
+ int irq, ret;
+ u32 val;
+
+ if (!IS_ENABLED(CONFIG_PCIE_ROCKCHIP_DW_HOST))
+ return -ENODEV;
+
+ irq = platform_get_irq_byname(pdev, "sys");
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ rockchip_pcie_rc_sys_irq_thread,
+ IRQF_ONESHOT, "pcie-sys-rc", rockchip);
+ if (ret) {
+ dev_err(dev, "failed to request PCIe sys IRQ\n");
+ return ret;
+ }
+
+ /* LTSSM enable control mode */
+ val = FIELD_PREP_WM16(PCIE_LTSSM_ENABLE_ENHANCE, 1);
+ rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL);
+
+ rockchip_pcie_writel_apb(rockchip,
+ PCIE_CLIENT_SET_MODE(PCIE_CLIENT_MODE_RC),
+ PCIE_CLIENT_GENERAL_CON);
+
+ pp = &rockchip->pci.pp;
+ pp->ops = &rockchip_pcie_host_ops;
+ pp->use_linkup_irq = true;
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(dev, "failed to initialize host\n");
+ return ret;
+ }
+
+ /* unmask DLL up/down indicator */
+ val = FIELD_PREP_WM16(PCIE_RDLH_LINK_UP_CHGED, 0);
+ rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_INTR_MASK_MISC);
+
+ return ret;
+}
+
+static int rockchip_pcie_configure_ep(struct platform_device *pdev,
+ struct rockchip_pcie *rockchip)
+{
+ struct device *dev = &pdev->dev;
+ int irq, ret;
+ u32 val;
+
+ if (!IS_ENABLED(CONFIG_PCIE_ROCKCHIP_DW_EP))
+ return -ENODEV;
+
+ irq = platform_get_irq_byname(pdev, "sys");
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ rockchip_pcie_ep_sys_irq_thread,
+ IRQF_ONESHOT, "pcie-sys-ep", rockchip);
+ if (ret) {
+ dev_err(dev, "failed to request PCIe sys IRQ\n");
+ return ret;
+ }
+
+ /*
+ * LTSSM enable control mode, and automatically delay link training on
+ * hot reset/link-down reset.
+ */
+ val = FIELD_PREP_WM16(PCIE_LTSSM_ENABLE_ENHANCE, 1) |
+ FIELD_PREP_WM16(PCIE_LTSSM_APP_DLY2_EN, 1);
+ rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL);
+
+ rockchip_pcie_writel_apb(rockchip,
+ PCIE_CLIENT_SET_MODE(PCIE_CLIENT_MODE_EP),
+ PCIE_CLIENT_GENERAL_CON);
+
+ rockchip->pci.ep.ops = &rockchip_pcie_ep_ops;
+ rockchip->pci.ep.page_size = SZ_64K;
+
+ dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+
+ ret = dw_pcie_ep_init(&rockchip->pci.ep);
+ if (ret) {
+ dev_err(dev, "failed to initialize endpoint\n");
+ return ret;
+ }
+
+ ret = dw_pcie_ep_init_registers(&rockchip->pci.ep);
+ if (ret) {
+ dev_err(dev, "failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(&rockchip->pci.ep);
+ return ret;
+ }
+
+ pci_epc_init_notify(rockchip->pci.ep.epc);
+
+ /* unmask DLL up/down indicator and hot reset/link-down reset */
+ val = FIELD_PREP_WM16(PCIE_RDLH_LINK_UP_CHGED, 0) |
+ FIELD_PREP_WM16(PCIE_LINK_REQ_RST_NOT_INT, 0);
+ rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_INTR_MASK_MISC);
+
+ return ret;
+}
+
+static int rockchip_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rockchip_pcie *rockchip;
+ const struct rockchip_pcie_of_data *data;
+ int ret;
+
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -EINVAL;
+
+ rockchip = devm_kzalloc(dev, sizeof(*rockchip), GFP_KERNEL);
+ if (!rockchip)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, rockchip);
+
+ rockchip->pci.dev = dev;
+ rockchip->pci.ops = &dw_pcie_ops;
+ rockchip->data = data;
+
+ /* Default N_FTS value (210) is broken, override it to 255 */
+ rockchip->pci.n_fts[0] = 255; /* Gen1 */
+ rockchip->pci.n_fts[1] = 255; /* Gen2+ */
+
+ ret = rockchip_pcie_resource_get(pdev, rockchip);
+ if (ret)
+ return ret;
+
+ ret = reset_control_assert(rockchip->rst);
+ if (ret)
+ return ret;
+
+ /* DON'T MOVE ME: must be enable before PHY init */
+ ret = devm_regulator_get_enable_optional(dev, "vpcie3v3");
+ if (ret < 0 && ret != -ENODEV)
+ return dev_err_probe(dev, ret,
+ "failed to enable vpcie3v3 regulator\n");
+
+ ret = rockchip_pcie_phy_init(rockchip);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to initialize the phy\n");
+
+ ret = reset_control_deassert(rockchip->rst);
+ if (ret)
+ goto deinit_phy;
+
+ ret = rockchip_pcie_clk_init(rockchip);
+ if (ret)
+ goto deinit_phy;
+
+ switch (data->mode) {
+ case DW_PCIE_RC_TYPE:
+ ret = rockchip_pcie_configure_rc(pdev, rockchip);
+ if (ret)
+ goto deinit_clk;
+ break;
+ case DW_PCIE_EP_TYPE:
+ ret = rockchip_pcie_configure_ep(pdev, rockchip);
+ if (ret)
+ goto deinit_clk;
+ break;
+ default:
+ dev_err(dev, "INVALID device type %d\n", data->mode);
+ ret = -EINVAL;
+ goto deinit_clk;
+ }
+
+ return 0;
+
+deinit_clk:
+ clk_bulk_disable_unprepare(rockchip->clk_cnt, rockchip->clks);
+deinit_phy:
+ rockchip_pcie_phy_deinit(rockchip);
+
+ return ret;
+}
+
+static const struct rockchip_pcie_of_data rockchip_pcie_rc_of_data_rk3568 = {
+ .mode = DW_PCIE_RC_TYPE,
+};
+
+static const struct rockchip_pcie_of_data rockchip_pcie_ep_of_data_rk3568 = {
+ .mode = DW_PCIE_EP_TYPE,
+ .epc_features = &rockchip_pcie_epc_features_rk3568,
+};
+
+static const struct rockchip_pcie_of_data rockchip_pcie_ep_of_data_rk3588 = {
+ .mode = DW_PCIE_EP_TYPE,
+ .epc_features = &rockchip_pcie_epc_features_rk3588,
+};
+
+static const struct of_device_id rockchip_pcie_of_match[] = {
+ {
+ .compatible = "rockchip,rk3568-pcie",
+ .data = &rockchip_pcie_rc_of_data_rk3568,
+ },
+ {
+ .compatible = "rockchip,rk3568-pcie-ep",
+ .data = &rockchip_pcie_ep_of_data_rk3568,
+ },
+ {
+ .compatible = "rockchip,rk3588-pcie-ep",
+ .data = &rockchip_pcie_ep_of_data_rk3588,
+ },
+ {},
+};
+
+static struct platform_driver rockchip_pcie_driver = {
+ .driver = {
+ .name = "rockchip-dw-pcie",
+ .of_match_table = rockchip_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = rockchip_pcie_probe,
+};
+builtin_platform_driver(rockchip_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-fu740.c b/drivers/pci/controller/dwc/pcie-fu740.c
new file mode 100644
index 000000000000..66367252032b
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-fu740.c
@@ -0,0 +1,357 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * FU740 DesignWare PCIe Controller integration
+ * Copyright (C) 2019-2021 SiFive, Inc.
+ * Paul Walmsley
+ * Greentime Hu
+ *
+ * Based in part on the i.MX6 PCIe host controller shim which is:
+ *
+ * Copyright (C) 2013 Kosagi
+ * https://www.kosagi.com
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/reset.h>
+
+#include "pcie-designware.h"
+
+#define to_fu740_pcie(x) dev_get_drvdata((x)->dev)
+
+struct fu740_pcie {
+ struct dw_pcie pci;
+ void __iomem *mgmt_base;
+ struct gpio_desc *reset;
+ struct gpio_desc *pwren;
+ struct clk *pcie_aux;
+ struct reset_control *rst;
+};
+
+#define SIFIVE_DEVICESRESETREG 0x28
+
+#define PCIEX8MGMT_PERST_N 0x0
+#define PCIEX8MGMT_APP_LTSSM_ENABLE 0x10
+#define PCIEX8MGMT_APP_HOLD_PHY_RST 0x18
+#define PCIEX8MGMT_DEVICE_TYPE 0x708
+#define PCIEX8MGMT_PHY0_CR_PARA_ADDR 0x860
+#define PCIEX8MGMT_PHY0_CR_PARA_RD_EN 0x870
+#define PCIEX8MGMT_PHY0_CR_PARA_RD_DATA 0x878
+#define PCIEX8MGMT_PHY0_CR_PARA_SEL 0x880
+#define PCIEX8MGMT_PHY0_CR_PARA_WR_DATA 0x888
+#define PCIEX8MGMT_PHY0_CR_PARA_WR_EN 0x890
+#define PCIEX8MGMT_PHY0_CR_PARA_ACK 0x898
+#define PCIEX8MGMT_PHY1_CR_PARA_ADDR 0x8a0
+#define PCIEX8MGMT_PHY1_CR_PARA_RD_EN 0x8b0
+#define PCIEX8MGMT_PHY1_CR_PARA_RD_DATA 0x8b8
+#define PCIEX8MGMT_PHY1_CR_PARA_SEL 0x8c0
+#define PCIEX8MGMT_PHY1_CR_PARA_WR_DATA 0x8c8
+#define PCIEX8MGMT_PHY1_CR_PARA_WR_EN 0x8d0
+#define PCIEX8MGMT_PHY1_CR_PARA_ACK 0x8d8
+
+#define PCIEX8MGMT_PHY_CDR_TRACK_EN BIT(0)
+#define PCIEX8MGMT_PHY_LOS_THRSHLD BIT(5)
+#define PCIEX8MGMT_PHY_TERM_EN BIT(9)
+#define PCIEX8MGMT_PHY_TERM_ACDC BIT(10)
+#define PCIEX8MGMT_PHY_EN BIT(11)
+#define PCIEX8MGMT_PHY_INIT_VAL (PCIEX8MGMT_PHY_CDR_TRACK_EN|\
+ PCIEX8MGMT_PHY_LOS_THRSHLD|\
+ PCIEX8MGMT_PHY_TERM_EN|\
+ PCIEX8MGMT_PHY_TERM_ACDC|\
+ PCIEX8MGMT_PHY_EN)
+
+#define PCIEX8MGMT_PHY_LANEN_DIG_ASIC_RX_OVRD_IN_3 0x1008
+#define PCIEX8MGMT_PHY_LANE_OFF 0x100
+#define PCIEX8MGMT_PHY_LANE0_BASE (PCIEX8MGMT_PHY_LANEN_DIG_ASIC_RX_OVRD_IN_3 + 0x100 * 0)
+#define PCIEX8MGMT_PHY_LANE1_BASE (PCIEX8MGMT_PHY_LANEN_DIG_ASIC_RX_OVRD_IN_3 + 0x100 * 1)
+#define PCIEX8MGMT_PHY_LANE2_BASE (PCIEX8MGMT_PHY_LANEN_DIG_ASIC_RX_OVRD_IN_3 + 0x100 * 2)
+#define PCIEX8MGMT_PHY_LANE3_BASE (PCIEX8MGMT_PHY_LANEN_DIG_ASIC_RX_OVRD_IN_3 + 0x100 * 3)
+
+static void fu740_pcie_assert_reset(struct fu740_pcie *afp)
+{
+ /* Assert PERST_N GPIO */
+ gpiod_set_value_cansleep(afp->reset, 0);
+ /* Assert controller PERST_N */
+ writel_relaxed(0x0, afp->mgmt_base + PCIEX8MGMT_PERST_N);
+}
+
+static void fu740_pcie_deassert_reset(struct fu740_pcie *afp)
+{
+ /* Deassert controller PERST_N */
+ writel_relaxed(0x1, afp->mgmt_base + PCIEX8MGMT_PERST_N);
+ /* Deassert PERST_N GPIO */
+ gpiod_set_value_cansleep(afp->reset, 1);
+}
+
+static void fu740_pcie_power_on(struct fu740_pcie *afp)
+{
+ gpiod_set_value_cansleep(afp->pwren, 1);
+ /*
+ * Ensure that PERST has been asserted for at least 100 ms.
+ * Section 2.2 of PCI Express Card Electromechanical Specification
+ * Revision 3.0
+ */
+ msleep(100);
+}
+
+static void fu740_pcie_drive_reset(struct fu740_pcie *afp)
+{
+ fu740_pcie_assert_reset(afp);
+ fu740_pcie_power_on(afp);
+ fu740_pcie_deassert_reset(afp);
+}
+
+static void fu740_phyregwrite(const uint8_t phy, const uint16_t addr,
+ const uint16_t wrdata, struct fu740_pcie *afp)
+{
+ struct device *dev = afp->pci.dev;
+ void __iomem *phy_cr_para_addr;
+ void __iomem *phy_cr_para_wr_data;
+ void __iomem *phy_cr_para_wr_en;
+ void __iomem *phy_cr_para_ack;
+ int ret, val;
+
+ /* Setup */
+ if (phy) {
+ phy_cr_para_addr = afp->mgmt_base + PCIEX8MGMT_PHY1_CR_PARA_ADDR;
+ phy_cr_para_wr_data = afp->mgmt_base + PCIEX8MGMT_PHY1_CR_PARA_WR_DATA;
+ phy_cr_para_wr_en = afp->mgmt_base + PCIEX8MGMT_PHY1_CR_PARA_WR_EN;
+ phy_cr_para_ack = afp->mgmt_base + PCIEX8MGMT_PHY1_CR_PARA_ACK;
+ } else {
+ phy_cr_para_addr = afp->mgmt_base + PCIEX8MGMT_PHY0_CR_PARA_ADDR;
+ phy_cr_para_wr_data = afp->mgmt_base + PCIEX8MGMT_PHY0_CR_PARA_WR_DATA;
+ phy_cr_para_wr_en = afp->mgmt_base + PCIEX8MGMT_PHY0_CR_PARA_WR_EN;
+ phy_cr_para_ack = afp->mgmt_base + PCIEX8MGMT_PHY0_CR_PARA_ACK;
+ }
+
+ writel_relaxed(addr, phy_cr_para_addr);
+ writel_relaxed(wrdata, phy_cr_para_wr_data);
+ writel_relaxed(1, phy_cr_para_wr_en);
+
+ /* Wait for wait_idle */
+ ret = readl_poll_timeout(phy_cr_para_ack, val, val, 10, 5000);
+ if (ret)
+ dev_warn(dev, "Wait for wait_idle state failed!\n");
+
+ /* Clear */
+ writel_relaxed(0, phy_cr_para_wr_en);
+
+ /* Wait for ~wait_idle */
+ ret = readl_poll_timeout(phy_cr_para_ack, val, !val, 10, 5000);
+ if (ret)
+ dev_warn(dev, "Wait for !wait_idle state failed!\n");
+}
+
+static void fu740_pcie_init_phy(struct fu740_pcie *afp)
+{
+ /* Enable phy cr_para_sel interfaces */
+ writel_relaxed(0x1, afp->mgmt_base + PCIEX8MGMT_PHY0_CR_PARA_SEL);
+ writel_relaxed(0x1, afp->mgmt_base + PCIEX8MGMT_PHY1_CR_PARA_SEL);
+
+ /*
+ * Wait 10 cr_para cycles to guarantee that the registers are ready
+ * to be edited.
+ */
+ ndelay(10);
+
+ /* Set PHY AC termination mode */
+ fu740_phyregwrite(0, PCIEX8MGMT_PHY_LANE0_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp);
+ fu740_phyregwrite(0, PCIEX8MGMT_PHY_LANE1_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp);
+ fu740_phyregwrite(0, PCIEX8MGMT_PHY_LANE2_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp);
+ fu740_phyregwrite(0, PCIEX8MGMT_PHY_LANE3_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp);
+ fu740_phyregwrite(1, PCIEX8MGMT_PHY_LANE0_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp);
+ fu740_phyregwrite(1, PCIEX8MGMT_PHY_LANE1_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp);
+ fu740_phyregwrite(1, PCIEX8MGMT_PHY_LANE2_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp);
+ fu740_phyregwrite(1, PCIEX8MGMT_PHY_LANE3_BASE, PCIEX8MGMT_PHY_INIT_VAL, afp);
+}
+
+static int fu740_pcie_start_link(struct dw_pcie *pci)
+{
+ struct device *dev = pci->dev;
+ struct fu740_pcie *afp = dev_get_drvdata(dev);
+ u8 cap_exp = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ int ret;
+ u32 orig, tmp;
+
+ /*
+ * Force 2.5GT/s when starting the link, due to some devices not
+ * probing at higher speeds. This happens with the PCIe switch
+ * on the Unmatched board when U-Boot has not initialised the PCIe.
+ * The fix in U-Boot is to force 2.5GT/s, which then gets cleared
+ * by the soft reset done by this driver.
+ */
+ dev_dbg(dev, "cap_exp at %x\n", cap_exp);
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ tmp = dw_pcie_readl_dbi(pci, cap_exp + PCI_EXP_LNKCAP);
+ orig = tmp & PCI_EXP_LNKCAP_SLS;
+ tmp &= ~PCI_EXP_LNKCAP_SLS;
+ tmp |= PCI_EXP_LNKCAP_SLS_2_5GB;
+ dw_pcie_writel_dbi(pci, cap_exp + PCI_EXP_LNKCAP, tmp);
+
+ /* Enable LTSSM */
+ writel_relaxed(0x1, afp->mgmt_base + PCIEX8MGMT_APP_LTSSM_ENABLE);
+
+ ret = dw_pcie_wait_for_link(pci);
+ if (ret) {
+ dev_err(dev, "error: link did not start\n");
+ goto err;
+ }
+
+ tmp = dw_pcie_readl_dbi(pci, cap_exp + PCI_EXP_LNKCAP);
+ if ((tmp & PCI_EXP_LNKCAP_SLS) != orig) {
+ dev_dbg(dev, "changing speed back to original\n");
+
+ tmp &= ~PCI_EXP_LNKCAP_SLS;
+ tmp |= orig;
+ dw_pcie_writel_dbi(pci, cap_exp + PCI_EXP_LNKCAP, tmp);
+
+ tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ tmp |= PORT_LOGIC_SPEED_CHANGE;
+ dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);
+
+ ret = dw_pcie_wait_for_link(pci);
+ if (ret) {
+ dev_err(dev, "error: link did not start at new speed\n");
+ goto err;
+ }
+ }
+
+ ret = 0;
+err:
+ WARN_ON(ret); /* we assume that errors will be very rare */
+ dw_pcie_dbi_ro_wr_dis(pci);
+ return ret;
+}
+
+static int fu740_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct fu740_pcie *afp = to_fu740_pcie(pci);
+ struct device *dev = pci->dev;
+ int ret;
+
+ /* Power on reset */
+ fu740_pcie_drive_reset(afp);
+
+ /* Enable pcieauxclk */
+ ret = clk_prepare_enable(afp->pcie_aux);
+ if (ret) {
+ dev_err(dev, "unable to enable pcie_aux clock\n");
+ return ret;
+ }
+
+ /*
+ * Assert hold_phy_rst (hold the controller LTSSM in reset after
+ * power_up_rst_n for register programming with cr_para)
+ */
+ writel_relaxed(0x1, afp->mgmt_base + PCIEX8MGMT_APP_HOLD_PHY_RST);
+
+ /* Deassert power_up_rst_n */
+ ret = reset_control_deassert(afp->rst);
+ if (ret) {
+ dev_err(dev, "unable to deassert pcie_power_up_rst_n\n");
+ return ret;
+ }
+
+ fu740_pcie_init_phy(afp);
+
+ /* Disable pcieauxclk */
+ clk_disable_unprepare(afp->pcie_aux);
+ /* Clear hold_phy_rst */
+ writel_relaxed(0x0, afp->mgmt_base + PCIEX8MGMT_APP_HOLD_PHY_RST);
+ /* Enable pcieauxclk */
+ clk_prepare_enable(afp->pcie_aux);
+ /* Set RC mode */
+ writel_relaxed(0x4, afp->mgmt_base + PCIEX8MGMT_DEVICE_TYPE);
+
+ return 0;
+}
+
+static const struct dw_pcie_host_ops fu740_pcie_host_ops = {
+ .init = fu740_pcie_host_init,
+};
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .start_link = fu740_pcie_start_link,
+};
+
+static int fu740_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dw_pcie *pci;
+ struct fu740_pcie *afp;
+
+ afp = devm_kzalloc(dev, sizeof(*afp), GFP_KERNEL);
+ if (!afp)
+ return -ENOMEM;
+ pci = &afp->pci;
+ pci->dev = dev;
+ pci->ops = &dw_pcie_ops;
+ pci->pp.ops = &fu740_pcie_host_ops;
+ pci->pp.num_vectors = MAX_MSI_IRQS;
+
+ /* SiFive specific region: mgmt */
+ afp->mgmt_base = devm_platform_ioremap_resource_byname(pdev, "mgmt");
+ if (IS_ERR(afp->mgmt_base))
+ return PTR_ERR(afp->mgmt_base);
+
+ /* Fetch GPIOs */
+ afp->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(afp->reset))
+ return dev_err_probe(dev, PTR_ERR(afp->reset), "unable to get reset-gpios\n");
+
+ afp->pwren = devm_gpiod_get_optional(dev, "pwren", GPIOD_OUT_LOW);
+ if (IS_ERR(afp->pwren))
+ return dev_err_probe(dev, PTR_ERR(afp->pwren), "unable to get pwren-gpios\n");
+
+ /* Fetch clocks */
+ afp->pcie_aux = devm_clk_get(dev, "pcie_aux");
+ if (IS_ERR(afp->pcie_aux))
+ return dev_err_probe(dev, PTR_ERR(afp->pcie_aux),
+ "pcie_aux clock source missing or invalid\n");
+
+ /* Fetch reset */
+ afp->rst = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(afp->rst))
+ return dev_err_probe(dev, PTR_ERR(afp->rst), "unable to get reset\n");
+
+ platform_set_drvdata(pdev, afp);
+
+ return dw_pcie_host_init(&pci->pp);
+}
+
+static void fu740_pcie_shutdown(struct platform_device *pdev)
+{
+ struct fu740_pcie *afp = platform_get_drvdata(pdev);
+
+ /* Bring down link, so bootloader gets clean state in case of reboot */
+ fu740_pcie_assert_reset(afp);
+}
+
+static const struct of_device_id fu740_pcie_of_match[] = {
+ { .compatible = "sifive,fu740-pcie", },
+ {},
+};
+
+static struct platform_driver fu740_pcie_driver = {
+ .driver = {
+ .name = "fu740-pcie",
+ .of_match_table = fu740_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = fu740_pcie_probe,
+ .shutdown = fu740_pcie_shutdown,
+};
+
+builtin_platform_driver(fu740_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-hisi.c b/drivers/pci/controller/dwc/pcie-hisi.c
index 6d9e1b2b8f7b..3c17897e56fc 100644
--- a/drivers/pci/controller/dwc/pcie-hisi.c
+++ b/drivers/pci/controller/dwc/pcie-hisi.c
@@ -10,19 +10,19 @@
*/
#include <linux/interrupt.h>
#include <linux/init.h>
-#include <linux/mfd/syscon.h>
-#include <linux/of_address.h>
-#include <linux/of_pci.h>
#include <linux/platform_device.h>
-#include <linux/of_device.h>
#include <linux/pci.h>
#include <linux/pci-acpi.h>
#include <linux/pci-ecam.h>
-#include <linux/regmap.h>
#include "../../pci.h"
+#include "../pci-host-common.h"
#if defined(CONFIG_PCI_HISI) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
+struct hisi_pcie {
+ void __iomem *reg_base;
+};
+
static int hisi_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
int size, u32 *val)
{
@@ -63,10 +63,10 @@ static void __iomem *hisi_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
int where)
{
struct pci_config_window *cfg = bus->sysdata;
- void __iomem *reg_base = cfg->priv;
+ struct hisi_pcie *pcie = cfg->priv;
if (bus->number == cfg->busr.start)
- return reg_base + where;
+ return pcie->reg_base + where;
else
return pci_ecam_map_bus(bus, devfn, where);
}
@@ -76,12 +76,16 @@ static void __iomem *hisi_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
static int hisi_pcie_init(struct pci_config_window *cfg)
{
struct device *dev = cfg->parent;
+ struct hisi_pcie *pcie;
struct acpi_device *adev = to_acpi_device(dev);
struct acpi_pci_root *root = acpi_driver_data(adev);
struct resource *res;
- void __iomem *reg_base;
int ret;
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
/*
* Retrieve RC base and size from a HISI0081 device with _UID
* matching our segment.
@@ -96,16 +100,15 @@ static int hisi_pcie_init(struct pci_config_window *cfg)
return -ENOMEM;
}
- reg_base = devm_pci_remap_cfgspace(dev, res->start, resource_size(res));
- if (!reg_base)
+ pcie->reg_base = devm_pci_remap_cfgspace(dev, res->start, resource_size(res));
+ if (!pcie->reg_base)
return -ENOMEM;
- cfg->priv = reg_base;
+ cfg->priv = pcie;
return 0;
}
-struct pci_ecam_ops hisi_pcie_ops = {
- .bus_shift = 20,
+const struct pci_ecam_ops hisi_pcie_ops = {
.init = hisi_pcie_init,
.pci_ops = {
.map_bus = hisi_pcie_map_bus,
@@ -118,235 +121,16 @@ struct pci_ecam_ops hisi_pcie_ops = {
#ifdef CONFIG_PCI_HISI
-#include "pcie-designware.h"
-
-#define PCIE_SUBCTRL_SYS_STATE4_REG 0x6818
-#define PCIE_HIP06_CTRL_OFF 0x1000
-#define PCIE_SYS_STATE4 (PCIE_HIP06_CTRL_OFF + 0x31c)
-#define PCIE_LTSSM_LINKUP_STATE 0x11
-#define PCIE_LTSSM_STATE_MASK 0x3F
-
-#define to_hisi_pcie(x) dev_get_drvdata((x)->dev)
-
-struct hisi_pcie;
-
-struct pcie_soc_ops {
- int (*hisi_pcie_link_up)(struct hisi_pcie *hisi_pcie);
-};
-
-struct hisi_pcie {
- struct dw_pcie *pci;
- struct regmap *subctrl;
- u32 port_id;
- const struct pcie_soc_ops *soc_ops;
-};
-
-/* HipXX PCIe host only supports 32-bit config access */
-static int hisi_pcie_cfg_read(struct pcie_port *pp, int where, int size,
- u32 *val)
-{
- u32 reg;
- u32 reg_val;
- void *walker = &reg_val;
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
-
- walker += (where & 0x3);
- reg = where & ~0x3;
- reg_val = dw_pcie_readl_dbi(pci, reg);
-
- if (size == 1)
- *val = *(u8 __force *) walker;
- else if (size == 2)
- *val = *(u16 __force *) walker;
- else if (size == 4)
- *val = reg_val;
- else
- return PCIBIOS_BAD_REGISTER_NUMBER;
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-/* HipXX PCIe host only supports 32-bit config access */
-static int hisi_pcie_cfg_write(struct pcie_port *pp, int where, int size,
- u32 val)
-{
- u32 reg_val;
- u32 reg;
- void *walker = &reg_val;
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
-
- walker += (where & 0x3);
- reg = where & ~0x3;
- if (size == 4)
- dw_pcie_writel_dbi(pci, reg, val);
- else if (size == 2) {
- reg_val = dw_pcie_readl_dbi(pci, reg);
- *(u16 __force *) walker = val;
- dw_pcie_writel_dbi(pci, reg, reg_val);
- } else if (size == 1) {
- reg_val = dw_pcie_readl_dbi(pci, reg);
- *(u8 __force *) walker = val;
- dw_pcie_writel_dbi(pci, reg, reg_val);
- } else
- return PCIBIOS_BAD_REGISTER_NUMBER;
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static int hisi_pcie_link_up_hip05(struct hisi_pcie *hisi_pcie)
-{
- u32 val;
-
- regmap_read(hisi_pcie->subctrl, PCIE_SUBCTRL_SYS_STATE4_REG +
- 0x100 * hisi_pcie->port_id, &val);
-
- return ((val & PCIE_LTSSM_STATE_MASK) == PCIE_LTSSM_LINKUP_STATE);
-}
-
-static int hisi_pcie_link_up_hip06(struct hisi_pcie *hisi_pcie)
-{
- struct dw_pcie *pci = hisi_pcie->pci;
- u32 val;
-
- val = dw_pcie_readl_dbi(pci, PCIE_SYS_STATE4);
-
- return ((val & PCIE_LTSSM_STATE_MASK) == PCIE_LTSSM_LINKUP_STATE);
-}
-
-static int hisi_pcie_link_up(struct dw_pcie *pci)
-{
- struct hisi_pcie *hisi_pcie = to_hisi_pcie(pci);
-
- return hisi_pcie->soc_ops->hisi_pcie_link_up(hisi_pcie);
-}
-
-static const struct dw_pcie_host_ops hisi_pcie_host_ops = {
- .rd_own_conf = hisi_pcie_cfg_read,
- .wr_own_conf = hisi_pcie_cfg_write,
-};
-
-static int hisi_add_pcie_port(struct hisi_pcie *hisi_pcie,
- struct platform_device *pdev)
-{
- struct dw_pcie *pci = hisi_pcie->pci;
- struct pcie_port *pp = &pci->pp;
- struct device *dev = &pdev->dev;
- int ret;
- u32 port_id;
-
- if (of_property_read_u32(dev->of_node, "port-id", &port_id)) {
- dev_err(dev, "failed to read port-id\n");
- return -EINVAL;
- }
- if (port_id > 3) {
- dev_err(dev, "Invalid port-id: %d\n", port_id);
- return -EINVAL;
- }
- hisi_pcie->port_id = port_id;
-
- pp->ops = &hisi_pcie_host_ops;
-
- ret = dw_pcie_host_init(pp);
- if (ret) {
- dev_err(dev, "failed to initialize host\n");
- return ret;
- }
-
- return 0;
-}
-
-static const struct dw_pcie_ops dw_pcie_ops = {
- .link_up = hisi_pcie_link_up,
-};
-
-static int hisi_pcie_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct dw_pcie *pci;
- struct hisi_pcie *hisi_pcie;
- struct resource *reg;
- int ret;
-
- hisi_pcie = devm_kzalloc(dev, sizeof(*hisi_pcie), GFP_KERNEL);
- if (!hisi_pcie)
- return -ENOMEM;
-
- pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
- if (!pci)
- return -ENOMEM;
-
- pci->dev = dev;
- pci->ops = &dw_pcie_ops;
-
- hisi_pcie->pci = pci;
-
- hisi_pcie->soc_ops = of_device_get_match_data(dev);
-
- hisi_pcie->subctrl =
- syscon_regmap_lookup_by_compatible("hisilicon,pcie-sas-subctrl");
- if (IS_ERR(hisi_pcie->subctrl)) {
- dev_err(dev, "cannot get subctrl base\n");
- return PTR_ERR(hisi_pcie->subctrl);
- }
-
- reg = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbi");
- pci->dbi_base = devm_pci_remap_cfg_resource(dev, reg);
- if (IS_ERR(pci->dbi_base))
- return PTR_ERR(pci->dbi_base);
- platform_set_drvdata(pdev, hisi_pcie);
-
- ret = hisi_add_pcie_port(hisi_pcie, pdev);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static struct pcie_soc_ops hip05_ops = {
- &hisi_pcie_link_up_hip05
-};
-
-static struct pcie_soc_ops hip06_ops = {
- &hisi_pcie_link_up_hip06
-};
-
-static const struct of_device_id hisi_pcie_of_match[] = {
- {
- .compatible = "hisilicon,hip05-pcie",
- .data = (void *) &hip05_ops,
- },
- {
- .compatible = "hisilicon,hip06-pcie",
- .data = (void *) &hip06_ops,
- },
- {},
-};
-
-static struct platform_driver hisi_pcie_driver = {
- .probe = hisi_pcie_probe,
- .driver = {
- .name = "hisi-pcie",
- .of_match_table = hisi_pcie_of_match,
- .suppress_bind_attrs = true,
- },
-};
-builtin_platform_driver(hisi_pcie_driver);
-
-static int hisi_pcie_almost_ecam_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct pci_ecam_ops *ops;
-
- ops = (struct pci_ecam_ops *)of_device_get_match_data(dev);
- return pci_host_common_probe(pdev, ops);
-}
-
static int hisi_pcie_platform_init(struct pci_config_window *cfg)
{
struct device *dev = cfg->parent;
+ struct hisi_pcie *pcie;
struct platform_device *pdev = to_platform_device(dev);
struct resource *res;
- void __iomem *reg_base;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!res) {
@@ -354,16 +138,15 @@ static int hisi_pcie_platform_init(struct pci_config_window *cfg)
return -EINVAL;
}
- reg_base = devm_pci_remap_cfgspace(dev, res->start, resource_size(res));
- if (!reg_base)
+ pcie->reg_base = devm_pci_remap_cfgspace(dev, res->start, resource_size(res));
+ if (!pcie->reg_base)
return -ENOMEM;
- cfg->priv = reg_base;
+ cfg->priv = pcie;
return 0;
}
-struct pci_ecam_ops hisi_pcie_platform_ops = {
- .bus_shift = 20,
+static const struct pci_ecam_ops hisi_pcie_platform_ops = {
.init = hisi_pcie_platform_init,
.pci_ops = {
.map_bus = hisi_pcie_map_bus,
@@ -375,17 +158,17 @@ struct pci_ecam_ops hisi_pcie_platform_ops = {
static const struct of_device_id hisi_pcie_almost_ecam_of_match[] = {
{
.compatible = "hisilicon,hip06-pcie-ecam",
- .data = (void *) &hisi_pcie_platform_ops,
+ .data = &hisi_pcie_platform_ops,
},
{
.compatible = "hisilicon,hip07-pcie-ecam",
- .data = (void *) &hisi_pcie_platform_ops,
+ .data = &hisi_pcie_platform_ops,
},
{},
};
static struct platform_driver hisi_pcie_almost_ecam_driver = {
- .probe = hisi_pcie_almost_ecam_probe,
+ .probe = pci_host_common_probe,
.driver = {
.name = "hisi-pcie-almost-ecam",
.of_match_table = hisi_pcie_almost_ecam_of_match,
diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c
index 954bc2b74bbc..a52071589377 100644
--- a/drivers/pci/controller/dwc/pcie-histb.c
+++ b/drivers/pci/controller/dwc/pcie-histb.c
@@ -10,11 +10,11 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/pci.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
@@ -60,7 +60,7 @@ struct histb_pcie {
struct reset_control *sys_reset;
struct reset_control *bus_reset;
void __iomem *ctrl;
- int reset_gpio;
+ struct gpio_desc *reset_gpio;
struct regulator *vpcie;
};
@@ -74,7 +74,7 @@ static void histb_pcie_writel(struct histb_pcie *histb_pcie, u32 reg, u32 val)
writel(val, histb_pcie->ctrl + reg);
}
-static void histb_pcie_dbi_w_mode(struct pcie_port *pp, bool enable)
+static void histb_pcie_dbi_w_mode(struct dw_pcie_rp *pp, bool enable)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct histb_pcie *hipcie = to_histb_pcie(pci);
@@ -88,7 +88,7 @@ static void histb_pcie_dbi_w_mode(struct pcie_port *pp, bool enable)
histb_pcie_writel(hipcie, PCIE_SYS_CTRL0, val);
}
-static void histb_pcie_dbi_r_mode(struct pcie_port *pp, bool enable)
+static void histb_pcie_dbi_r_mode(struct dw_pcie_rp *pp, bool enable)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct histb_pcie *hipcie = to_histb_pcie(pci);
@@ -122,33 +122,36 @@ static void histb_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base,
histb_pcie_dbi_w_mode(&pci->pp, false);
}
-static int histb_pcie_rd_own_conf(struct pcie_port *pp, int where,
- int size, u32 *val)
+static int histb_pcie_rd_own_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- int ret;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata);
- histb_pcie_dbi_r_mode(pp, true);
- ret = dw_pcie_read(pci->dbi_base + where, size, val);
- histb_pcie_dbi_r_mode(pp, false);
+ if (PCI_SLOT(devfn))
+ return PCIBIOS_DEVICE_NOT_FOUND;
- return ret;
+ *val = dw_pcie_read_dbi(pci, where, size);
+ return PCIBIOS_SUCCESSFUL;
}
-static int histb_pcie_wr_own_conf(struct pcie_port *pp, int where,
- int size, u32 val)
+static int histb_pcie_wr_own_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- int ret;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata);
- histb_pcie_dbi_w_mode(pp, true);
- ret = dw_pcie_write(pci->dbi_base + where, size, val);
- histb_pcie_dbi_w_mode(pp, false);
+ if (PCI_SLOT(devfn))
+ return PCIBIOS_DEVICE_NOT_FOUND;
- return ret;
+ dw_pcie_write_dbi(pci, where, size, val);
+ return PCIBIOS_SUCCESSFUL;
}
-static int histb_pcie_link_up(struct dw_pcie *pci)
+static struct pci_ops histb_pci_ops = {
+ .read = histb_pcie_rd_own_conf,
+ .write = histb_pcie_wr_own_conf,
+};
+
+static bool histb_pcie_link_up(struct dw_pcie *pci)
{
struct histb_pcie *hipcie = to_histb_pcie(pci);
u32 regval;
@@ -157,55 +160,42 @@ static int histb_pcie_link_up(struct dw_pcie *pci)
regval = histb_pcie_readl(hipcie, PCIE_SYS_STAT0);
status = histb_pcie_readl(hipcie, PCIE_SYS_STAT4);
status &= PCIE_LTSSM_STATE_MASK;
- if ((regval & PCIE_XMLH_LINK_UP) && (regval & PCIE_RDLH_LINK_UP) &&
- (status == PCIE_LTSSM_STATE_ACTIVE))
- return 1;
-
- return 0;
+ return ((regval & PCIE_XMLH_LINK_UP) && (regval & PCIE_RDLH_LINK_UP) &&
+ (status == PCIE_LTSSM_STATE_ACTIVE));
}
-static int histb_pcie_establish_link(struct pcie_port *pp)
+static int histb_pcie_start_link(struct dw_pcie *pci)
{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct histb_pcie *hipcie = to_histb_pcie(pci);
u32 regval;
- if (dw_pcie_link_up(pci)) {
- dev_info(pci->dev, "Link already up\n");
- return 0;
- }
-
- /* PCIe RC work mode */
- regval = histb_pcie_readl(hipcie, PCIE_SYS_CTRL0);
- regval &= ~PCIE_DEVICE_TYPE_MASK;
- regval |= PCIE_WM_RC;
- histb_pcie_writel(hipcie, PCIE_SYS_CTRL0, regval);
-
- /* setup root complex */
- dw_pcie_setup_rc(pp);
-
/* assert LTSSM enable */
regval = histb_pcie_readl(hipcie, PCIE_SYS_CTRL7);
regval |= PCIE_APP_LTSSM_ENABLE;
histb_pcie_writel(hipcie, PCIE_SYS_CTRL7, regval);
- return dw_pcie_wait_for_link(pci);
+ return 0;
}
-static int histb_pcie_host_init(struct pcie_port *pp)
+static int histb_pcie_host_init(struct dw_pcie_rp *pp)
{
- histb_pcie_establish_link(pp);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct histb_pcie *hipcie = to_histb_pcie(pci);
+ u32 regval;
- if (IS_ENABLED(CONFIG_PCI_MSI))
- dw_pcie_msi_init(pp);
+ pp->bridge->ops = &histb_pci_ops;
+
+ /* PCIe RC work mode */
+ regval = histb_pcie_readl(hipcie, PCIE_SYS_CTRL0);
+ regval &= ~PCIE_DEVICE_TYPE_MASK;
+ regval |= PCIE_WM_RC;
+ histb_pcie_writel(hipcie, PCIE_SYS_CTRL0, regval);
return 0;
}
static const struct dw_pcie_host_ops histb_pcie_host_ops = {
- .rd_own_conf = histb_pcie_rd_own_conf,
- .wr_own_conf = histb_pcie_wr_own_conf,
- .host_init = histb_pcie_host_init,
+ .init = histb_pcie_host_init,
};
static void histb_pcie_host_disable(struct histb_pcie *hipcie)
@@ -219,14 +209,14 @@ static void histb_pcie_host_disable(struct histb_pcie *hipcie)
clk_disable_unprepare(hipcie->sys_clk);
clk_disable_unprepare(hipcie->bus_clk);
- if (gpio_is_valid(hipcie->reset_gpio))
- gpio_set_value_cansleep(hipcie->reset_gpio, 0);
+ if (hipcie->reset_gpio)
+ gpiod_set_value_cansleep(hipcie->reset_gpio, 1);
if (hipcie->vpcie)
regulator_disable(hipcie->vpcie);
}
-static int histb_pcie_host_enable(struct pcie_port *pp)
+static int histb_pcie_host_enable(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct histb_pcie *hipcie = to_histb_pcie(pci);
@@ -242,8 +232,8 @@ static int histb_pcie_host_enable(struct pcie_port *pp)
}
}
- if (gpio_is_valid(hipcie->reset_gpio))
- gpio_set_value_cansleep(hipcie->reset_gpio, 1);
+ if (hipcie->reset_gpio)
+ gpiod_set_value_cansleep(hipcie->reset_gpio, 0);
ret = clk_prepare_enable(hipcie->bus_clk);
if (ret) {
@@ -297,18 +287,15 @@ static const struct dw_pcie_ops dw_pcie_ops = {
.read_dbi = histb_pcie_read_dbi,
.write_dbi = histb_pcie_write_dbi,
.link_up = histb_pcie_link_up,
+ .start_link = histb_pcie_start_link,
};
static int histb_pcie_probe(struct platform_device *pdev)
{
struct histb_pcie *hipcie;
struct dw_pcie *pci;
- struct pcie_port *pp;
- struct resource *res;
- struct device_node *np = pdev->dev.of_node;
+ struct dw_pcie_rp *pp;
struct device *dev = &pdev->dev;
- enum of_gpio_flags of_flags;
- unsigned long flag = GPIOF_DIR_OUT;
int ret;
hipcie = devm_kzalloc(dev, sizeof(*hipcie), GFP_KERNEL);
@@ -324,15 +311,13 @@ static int histb_pcie_probe(struct platform_device *pdev)
pci->dev = dev;
pci->ops = &dw_pcie_ops;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "control");
- hipcie->ctrl = devm_ioremap_resource(dev, res);
+ hipcie->ctrl = devm_platform_ioremap_resource_byname(pdev, "control");
if (IS_ERR(hipcie->ctrl)) {
dev_err(dev, "cannot get control reg base\n");
return PTR_ERR(hipcie->ctrl);
}
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc-dbi");
- pci->dbi_base = devm_ioremap_resource(dev, res);
+ pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "rc-dbi");
if (IS_ERR(pci->dbi_base)) {
dev_err(dev, "cannot get rc-dbi base\n");
return PTR_ERR(pci->dbi_base);
@@ -340,22 +325,24 @@ static int histb_pcie_probe(struct platform_device *pdev)
hipcie->vpcie = devm_regulator_get_optional(dev, "vpcie");
if (IS_ERR(hipcie->vpcie)) {
- if (PTR_ERR(hipcie->vpcie) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
+ if (PTR_ERR(hipcie->vpcie) != -ENODEV)
+ return PTR_ERR(hipcie->vpcie);
hipcie->vpcie = NULL;
}
- hipcie->reset_gpio = of_get_named_gpio_flags(np,
- "reset-gpios", 0, &of_flags);
- if (of_flags & OF_GPIO_ACTIVE_LOW)
- flag |= GPIOF_ACTIVE_LOW;
- if (gpio_is_valid(hipcie->reset_gpio)) {
- ret = devm_gpio_request_one(dev, hipcie->reset_gpio,
- flag, "PCIe device power control");
- if (ret) {
- dev_err(dev, "unable to request gpio\n");
- return ret;
- }
+ hipcie->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_HIGH);
+ ret = PTR_ERR_OR_ZERO(hipcie->reset_gpio);
+ if (ret) {
+ dev_err(dev, "unable to request reset gpio: %d\n", ret);
+ return ret;
+ }
+
+ ret = gpiod_set_consumer_name(hipcie->reset_gpio,
+ "PCIe device power control");
+ if (ret) {
+ dev_err(dev, "unable to set reset gpio name: %d\n", ret);
+ return ret;
}
hipcie->aux_clk = devm_clk_get(dev, "aux");
@@ -400,14 +387,6 @@ static int histb_pcie_probe(struct platform_device *pdev)
return PTR_ERR(hipcie->bus_reset);
}
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- pp->msi_irq = platform_get_irq_byname(pdev, "msi");
- if (pp->msi_irq < 0) {
- dev_err(dev, "Failed to get MSI IRQ\n");
- return pp->msi_irq;
- }
- }
-
hipcie->phy = devm_phy_get(dev, "phy");
if (IS_ERR(hipcie->phy)) {
dev_info(dev, "no pcie-phy found\n");
@@ -427,28 +406,30 @@ static int histb_pcie_probe(struct platform_device *pdev)
ret = histb_pcie_host_enable(pp);
if (ret) {
dev_err(dev, "failed to enable host\n");
- return ret;
+ goto err_exit_phy;
}
ret = dw_pcie_host_init(pp);
if (ret) {
dev_err(dev, "failed to initialize host\n");
- return ret;
+ goto err_exit_phy;
}
return 0;
+
+err_exit_phy:
+ phy_exit(hipcie->phy);
+
+ return ret;
}
-static int histb_pcie_remove(struct platform_device *pdev)
+static void histb_pcie_remove(struct platform_device *pdev)
{
struct histb_pcie *hipcie = platform_get_drvdata(pdev);
histb_pcie_host_disable(hipcie);
- if (hipcie->phy)
- phy_exit(hipcie->phy);
-
- return 0;
+ phy_exit(hipcie->phy);
}
static const struct of_device_id histb_pcie_of_match[] = {
@@ -459,7 +440,7 @@ MODULE_DEVICE_TABLE(of, histb_pcie_of_match);
static struct platform_driver histb_pcie_platform_driver = {
.probe = histb_pcie_probe,
- .remove = histb_pcie_remove,
+ .remove = histb_pcie_remove,
.driver = {
.name = "histb-pcie",
.of_match_table = histb_pcie_of_match,
@@ -468,4 +449,3 @@ static struct platform_driver histb_pcie_platform_driver = {
module_platform_driver(histb_pcie_platform_driver);
MODULE_DESCRIPTION("HiSilicon STB PCIe host controller driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/dwc/pcie-intel-gw.c b/drivers/pci/controller/dwc/pcie-intel-gw.c
new file mode 100644
index 000000000000..c21906eced61
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-intel-gw.c
@@ -0,0 +1,447 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for Intel Gateway SoCs
+ *
+ * Copyright (c) 2019 Intel Corporation.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
+#include <linux/iopoll.h>
+#include <linux/mod_devicetable.h>
+#include <linux/pci_regs.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/reset.h>
+
+#include "../../pci.h"
+#include "pcie-designware.h"
+
+#define PORT_AFR_N_FTS_GEN12_DFT (SZ_128 - 1)
+#define PORT_AFR_N_FTS_GEN3 180
+#define PORT_AFR_N_FTS_GEN4 196
+
+/* PCIe Application logic Registers */
+#define PCIE_APP_CCR 0x10
+#define PCIE_APP_CCR_LTSSM_ENABLE BIT(0)
+
+#define PCIE_APP_MSG_CR 0x30
+#define PCIE_APP_MSG_XMT_PM_TURNOFF BIT(0)
+
+#define PCIE_APP_PMC 0x44
+#define PCIE_APP_PMC_IN_L2 BIT(20)
+
+#define PCIE_APP_IRNEN 0xF4
+#define PCIE_APP_IRNCR 0xF8
+#define PCIE_APP_IRN_AER_REPORT BIT(0)
+#define PCIE_APP_IRN_PME BIT(2)
+#define PCIE_APP_IRN_RX_VDM_MSG BIT(4)
+#define PCIE_APP_IRN_PM_TO_ACK BIT(9)
+#define PCIE_APP_IRN_LINK_AUTO_BW_STAT BIT(11)
+#define PCIE_APP_IRN_BW_MGT BIT(12)
+#define PCIE_APP_IRN_INTA BIT(13)
+#define PCIE_APP_IRN_INTB BIT(14)
+#define PCIE_APP_IRN_INTC BIT(15)
+#define PCIE_APP_IRN_INTD BIT(16)
+#define PCIE_APP_IRN_MSG_LTR BIT(18)
+#define PCIE_APP_IRN_SYS_ERR_RC BIT(29)
+#define PCIE_APP_INTX_OFST 12
+
+#define PCIE_APP_IRN_INT \
+ (PCIE_APP_IRN_AER_REPORT | PCIE_APP_IRN_PME | \
+ PCIE_APP_IRN_RX_VDM_MSG | PCIE_APP_IRN_SYS_ERR_RC | \
+ PCIE_APP_IRN_PM_TO_ACK | PCIE_APP_IRN_MSG_LTR | \
+ PCIE_APP_IRN_BW_MGT | PCIE_APP_IRN_LINK_AUTO_BW_STAT | \
+ PCIE_APP_IRN_INTA | PCIE_APP_IRN_INTB | \
+ PCIE_APP_IRN_INTC | PCIE_APP_IRN_INTD)
+
+#define RESET_INTERVAL_MS 100
+
+struct intel_pcie {
+ struct dw_pcie pci;
+ void __iomem *app_base;
+ struct gpio_desc *reset_gpio;
+ u32 rst_intrvl;
+ struct clk *core_clk;
+ struct reset_control *core_rst;
+ struct phy *phy;
+};
+
+static void pcie_update_bits(void __iomem *base, u32 ofs, u32 mask, u32 val)
+{
+ u32 old;
+
+ old = readl(base + ofs);
+ val = (old & ~mask) | (val & mask);
+
+ if (val != old)
+ writel(val, base + ofs);
+}
+
+static inline void pcie_app_wr(struct intel_pcie *pcie, u32 ofs, u32 val)
+{
+ writel(val, pcie->app_base + ofs);
+}
+
+static void pcie_app_wr_mask(struct intel_pcie *pcie, u32 ofs,
+ u32 mask, u32 val)
+{
+ pcie_update_bits(pcie->app_base, ofs, mask, val);
+}
+
+static inline u32 pcie_rc_cfg_rd(struct intel_pcie *pcie, u32 ofs)
+{
+ return dw_pcie_readl_dbi(&pcie->pci, ofs);
+}
+
+static inline void pcie_rc_cfg_wr(struct intel_pcie *pcie, u32 ofs, u32 val)
+{
+ dw_pcie_writel_dbi(&pcie->pci, ofs, val);
+}
+
+static void pcie_rc_cfg_wr_mask(struct intel_pcie *pcie, u32 ofs,
+ u32 mask, u32 val)
+{
+ pcie_update_bits(pcie->pci.dbi_base, ofs, mask, val);
+}
+
+static void intel_pcie_ltssm_enable(struct intel_pcie *pcie)
+{
+ pcie_app_wr_mask(pcie, PCIE_APP_CCR, PCIE_APP_CCR_LTSSM_ENABLE,
+ PCIE_APP_CCR_LTSSM_ENABLE);
+}
+
+static void intel_pcie_ltssm_disable(struct intel_pcie *pcie)
+{
+ pcie_app_wr_mask(pcie, PCIE_APP_CCR, PCIE_APP_CCR_LTSSM_ENABLE, 0);
+}
+
+static void intel_pcie_link_setup(struct intel_pcie *pcie)
+{
+ u32 val;
+ u8 offset = dw_pcie_find_capability(&pcie->pci, PCI_CAP_ID_EXP);
+
+ val = pcie_rc_cfg_rd(pcie, offset + PCI_EXP_LNKCTL);
+
+ val &= ~(PCI_EXP_LNKCTL_LD | PCI_EXP_LNKCTL_ASPMC);
+ pcie_rc_cfg_wr(pcie, offset + PCI_EXP_LNKCTL, val);
+}
+
+static void intel_pcie_init_n_fts(struct dw_pcie *pci)
+{
+ switch (pci->max_link_speed) {
+ case 3:
+ pci->n_fts[1] = PORT_AFR_N_FTS_GEN3;
+ break;
+ case 4:
+ pci->n_fts[1] = PORT_AFR_N_FTS_GEN4;
+ break;
+ default:
+ pci->n_fts[1] = PORT_AFR_N_FTS_GEN12_DFT;
+ break;
+ }
+ pci->n_fts[0] = PORT_AFR_N_FTS_GEN12_DFT;
+}
+
+static int intel_pcie_ep_rst_init(struct intel_pcie *pcie)
+{
+ struct device *dev = pcie->pci.dev;
+ int ret;
+
+ pcie->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(pcie->reset_gpio)) {
+ ret = PTR_ERR(pcie->reset_gpio);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to request PCIe GPIO: %d\n", ret);
+ return ret;
+ }
+
+ /* Make initial reset last for 100us */
+ usleep_range(100, 200);
+
+ return 0;
+}
+
+static void intel_pcie_core_rst_assert(struct intel_pcie *pcie)
+{
+ reset_control_assert(pcie->core_rst);
+}
+
+static void intel_pcie_core_rst_deassert(struct intel_pcie *pcie)
+{
+ /*
+ * One micro-second delay to make sure the reset pulse
+ * wide enough so that core reset is clean.
+ */
+ udelay(1);
+ reset_control_deassert(pcie->core_rst);
+
+ /*
+ * Some SoC core reset also reset PHY, more delay needed
+ * to make sure the reset process is done.
+ */
+ usleep_range(1000, 2000);
+}
+
+static void intel_pcie_device_rst_assert(struct intel_pcie *pcie)
+{
+ gpiod_set_value_cansleep(pcie->reset_gpio, 1);
+}
+
+static void intel_pcie_device_rst_deassert(struct intel_pcie *pcie)
+{
+ msleep(pcie->rst_intrvl);
+ gpiod_set_value_cansleep(pcie->reset_gpio, 0);
+}
+
+static void intel_pcie_core_irq_disable(struct intel_pcie *pcie)
+{
+ pcie_app_wr(pcie, PCIE_APP_IRNEN, 0);
+ pcie_app_wr(pcie, PCIE_APP_IRNCR, PCIE_APP_IRN_INT);
+}
+
+static int intel_pcie_get_resources(struct platform_device *pdev)
+{
+ struct intel_pcie *pcie = platform_get_drvdata(pdev);
+ struct dw_pcie *pci = &pcie->pci;
+ struct device *dev = pci->dev;
+ int ret;
+
+ pcie->core_clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(pcie->core_clk)) {
+ ret = PTR_ERR(pcie->core_clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get clks: %d\n", ret);
+ return ret;
+ }
+
+ pcie->core_rst = devm_reset_control_get(dev, NULL);
+ if (IS_ERR(pcie->core_rst)) {
+ ret = PTR_ERR(pcie->core_rst);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get resets: %d\n", ret);
+ return ret;
+ }
+
+ ret = device_property_read_u32(dev, "reset-assert-ms",
+ &pcie->rst_intrvl);
+ if (ret)
+ pcie->rst_intrvl = RESET_INTERVAL_MS;
+
+ pcie->app_base = devm_platform_ioremap_resource_byname(pdev, "app");
+ if (IS_ERR(pcie->app_base))
+ return PTR_ERR(pcie->app_base);
+
+ pcie->phy = devm_phy_get(dev, "pcie");
+ if (IS_ERR(pcie->phy)) {
+ ret = PTR_ERR(pcie->phy);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Couldn't get pcie-phy: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int intel_pcie_wait_l2(struct intel_pcie *pcie)
+{
+ u32 value;
+ int ret;
+ struct dw_pcie *pci = &pcie->pci;
+
+ if (pci->max_link_speed < 3)
+ return 0;
+
+ /* Send PME_TURN_OFF message */
+ pcie_app_wr_mask(pcie, PCIE_APP_MSG_CR, PCIE_APP_MSG_XMT_PM_TURNOFF,
+ PCIE_APP_MSG_XMT_PM_TURNOFF);
+
+ /* Read PMC status and wait for falling into L2 link state */
+ ret = readl_poll_timeout(pcie->app_base + PCIE_APP_PMC, value,
+ value & PCIE_APP_PMC_IN_L2, 20,
+ jiffies_to_usecs(5 * HZ));
+ if (ret)
+ dev_err(pcie->pci.dev, "PCIe link enter L2 timeout!\n");
+
+ return ret;
+}
+
+static void intel_pcie_turn_off(struct intel_pcie *pcie)
+{
+ if (dw_pcie_link_up(&pcie->pci))
+ intel_pcie_wait_l2(pcie);
+
+ /* Put endpoint device in reset state */
+ intel_pcie_device_rst_assert(pcie);
+ pcie_rc_cfg_wr_mask(pcie, PCI_COMMAND, PCI_COMMAND_MEMORY, 0);
+}
+
+static int intel_pcie_host_setup(struct intel_pcie *pcie)
+{
+ int ret;
+ struct dw_pcie *pci = &pcie->pci;
+
+ intel_pcie_core_rst_assert(pcie);
+ intel_pcie_device_rst_assert(pcie);
+
+ ret = phy_init(pcie->phy);
+ if (ret)
+ return ret;
+
+ intel_pcie_core_rst_deassert(pcie);
+
+ ret = clk_prepare_enable(pcie->core_clk);
+ if (ret) {
+ dev_err(pcie->pci.dev, "Core clock enable failed: %d\n", ret);
+ goto clk_err;
+ }
+
+ pci->atu_base = pci->dbi_base + 0xC0000;
+
+ intel_pcie_ltssm_disable(pcie);
+ intel_pcie_link_setup(pcie);
+ intel_pcie_init_n_fts(pci);
+
+ ret = dw_pcie_setup_rc(&pci->pp);
+ if (ret)
+ goto app_init_err;
+
+ dw_pcie_upconfig_setup(pci);
+
+ intel_pcie_device_rst_deassert(pcie);
+ intel_pcie_ltssm_enable(pcie);
+
+ ret = dw_pcie_wait_for_link(pci);
+ if (ret)
+ goto app_init_err;
+
+ /* Enable integrated interrupts */
+ pcie_app_wr_mask(pcie, PCIE_APP_IRNEN, PCIE_APP_IRN_INT,
+ PCIE_APP_IRN_INT);
+
+ return 0;
+
+app_init_err:
+ clk_disable_unprepare(pcie->core_clk);
+clk_err:
+ intel_pcie_core_rst_assert(pcie);
+ phy_exit(pcie->phy);
+
+ return ret;
+}
+
+static void __intel_pcie_remove(struct intel_pcie *pcie)
+{
+ intel_pcie_core_irq_disable(pcie);
+ intel_pcie_turn_off(pcie);
+ clk_disable_unprepare(pcie->core_clk);
+ intel_pcie_core_rst_assert(pcie);
+ phy_exit(pcie->phy);
+}
+
+static void intel_pcie_remove(struct platform_device *pdev)
+{
+ struct intel_pcie *pcie = platform_get_drvdata(pdev);
+ struct dw_pcie_rp *pp = &pcie->pci.pp;
+
+ dw_pcie_host_deinit(pp);
+ __intel_pcie_remove(pcie);
+}
+
+static int intel_pcie_suspend_noirq(struct device *dev)
+{
+ struct intel_pcie *pcie = dev_get_drvdata(dev);
+ int ret;
+
+ intel_pcie_core_irq_disable(pcie);
+ ret = intel_pcie_wait_l2(pcie);
+ if (ret)
+ return ret;
+
+ phy_exit(pcie->phy);
+ clk_disable_unprepare(pcie->core_clk);
+ return ret;
+}
+
+static int intel_pcie_resume_noirq(struct device *dev)
+{
+ struct intel_pcie *pcie = dev_get_drvdata(dev);
+
+ return intel_pcie_host_setup(pcie);
+}
+
+static int intel_pcie_rc_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct intel_pcie *pcie = dev_get_drvdata(pci->dev);
+
+ return intel_pcie_host_setup(pcie);
+}
+
+static const struct dw_pcie_ops intel_pcie_ops = {
+};
+
+static const struct dw_pcie_host_ops intel_pcie_dw_ops = {
+ .init = intel_pcie_rc_init,
+};
+
+static int intel_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct intel_pcie *pcie;
+ struct dw_pcie_rp *pp;
+ struct dw_pcie *pci;
+ int ret;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, pcie);
+ pci = &pcie->pci;
+ pci->dev = dev;
+ pci->use_parent_dt_ranges = true;
+ pp = &pci->pp;
+
+ ret = intel_pcie_get_resources(pdev);
+ if (ret)
+ return ret;
+
+ ret = intel_pcie_ep_rst_init(pcie);
+ if (ret)
+ return ret;
+
+ pci->ops = &intel_pcie_ops;
+ pp->ops = &intel_pcie_dw_ops;
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(dev, "Cannot initialize host\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops intel_pcie_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(intel_pcie_suspend_noirq,
+ intel_pcie_resume_noirq)
+};
+
+static const struct of_device_id of_intel_pcie_match[] = {
+ { .compatible = "intel,lgm-pcie" },
+ {}
+};
+
+static struct platform_driver intel_pcie_driver = {
+ .probe = intel_pcie_probe,
+ .remove = intel_pcie_remove,
+ .driver = {
+ .name = "intel-gw-pcie",
+ .of_match_table = of_intel_pcie_match,
+ .pm = &intel_pcie_pm_ops,
+ },
+};
+builtin_platform_driver(intel_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-keembay.c b/drivers/pci/controller/dwc/pcie-keembay.c
new file mode 100644
index 000000000000..60e74ac782af
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-keembay.c
@@ -0,0 +1,483 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PCIe controller driver for Intel Keem Bay
+ * Copyright (C) 2020 Intel Corporation
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/init.h>
+#include <linux/iopoll.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+
+#include "pcie-designware.h"
+
+/* PCIE_REGS_APB_SLV Registers */
+#define PCIE_REGS_PCIE_CFG 0x0004
+#define PCIE_DEVICE_TYPE BIT(8)
+#define PCIE_RSTN BIT(0)
+#define PCIE_REGS_PCIE_APP_CNTRL 0x0008
+#define APP_LTSSM_ENABLE BIT(0)
+#define PCIE_REGS_INTERRUPT_ENABLE 0x0028
+#define MSI_CTRL_INT_EN BIT(8)
+#define EDMA_INT_EN GENMASK(7, 0)
+#define PCIE_REGS_INTERRUPT_STATUS 0x002c
+#define MSI_CTRL_INT BIT(8)
+#define PCIE_REGS_PCIE_SII_PM_STATE 0x00b0
+#define SMLH_LINK_UP BIT(19)
+#define RDLH_LINK_UP BIT(8)
+#define PCIE_REGS_PCIE_SII_LINK_UP (SMLH_LINK_UP | RDLH_LINK_UP)
+#define PCIE_REGS_PCIE_PHY_CNTL 0x0164
+#define PHY0_SRAM_BYPASS BIT(8)
+#define PCIE_REGS_PCIE_PHY_STAT 0x0168
+#define PHY0_MPLLA_STATE BIT(1)
+#define PCIE_REGS_LJPLL_STA 0x016c
+#define LJPLL_LOCK BIT(0)
+#define PCIE_REGS_LJPLL_CNTRL_0 0x0170
+#define LJPLL_EN BIT(29)
+#define LJPLL_FOUT_EN GENMASK(24, 21)
+#define PCIE_REGS_LJPLL_CNTRL_2 0x0178
+#define LJPLL_REF_DIV GENMASK(17, 12)
+#define LJPLL_FB_DIV GENMASK(11, 0)
+#define PCIE_REGS_LJPLL_CNTRL_3 0x017c
+#define LJPLL_POST_DIV3A GENMASK(24, 22)
+#define LJPLL_POST_DIV2A GENMASK(18, 16)
+
+#define PERST_DELAY_US 1000
+#define AUX_CLK_RATE_HZ 24000000
+
+struct keembay_pcie {
+ struct dw_pcie pci;
+ void __iomem *apb_base;
+ enum dw_pcie_device_mode mode;
+
+ struct clk *clk_master;
+ struct clk *clk_aux;
+ struct gpio_desc *reset;
+};
+
+struct keembay_pcie_of_data {
+ enum dw_pcie_device_mode mode;
+};
+
+static void keembay_ep_reset_assert(struct keembay_pcie *pcie)
+{
+ gpiod_set_value_cansleep(pcie->reset, 1);
+ usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
+}
+
+static void keembay_ep_reset_deassert(struct keembay_pcie *pcie)
+{
+ /*
+ * Ensure that PERST# is asserted for a minimum of 100ms.
+ *
+ * For more details, refer to PCI Express Card Electromechanical
+ * Specification Revision 1.1, Table-2.4.
+ */
+ msleep(100);
+
+ gpiod_set_value_cansleep(pcie->reset, 0);
+ usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
+}
+
+static void keembay_pcie_ltssm_set(struct keembay_pcie *pcie, bool enable)
+{
+ u32 val;
+
+ val = readl(pcie->apb_base + PCIE_REGS_PCIE_APP_CNTRL);
+ if (enable)
+ val |= APP_LTSSM_ENABLE;
+ else
+ val &= ~APP_LTSSM_ENABLE;
+ writel(val, pcie->apb_base + PCIE_REGS_PCIE_APP_CNTRL);
+}
+
+static bool keembay_pcie_link_up(struct dw_pcie *pci)
+{
+ struct keembay_pcie *pcie = dev_get_drvdata(pci->dev);
+ u32 val;
+
+ val = readl(pcie->apb_base + PCIE_REGS_PCIE_SII_PM_STATE);
+
+ return (val & PCIE_REGS_PCIE_SII_LINK_UP) == PCIE_REGS_PCIE_SII_LINK_UP;
+}
+
+static int keembay_pcie_start_link(struct dw_pcie *pci)
+{
+ struct keembay_pcie *pcie = dev_get_drvdata(pci->dev);
+ u32 val;
+ int ret;
+
+ if (pcie->mode == DW_PCIE_EP_TYPE)
+ return 0;
+
+ keembay_pcie_ltssm_set(pcie, false);
+
+ ret = readl_poll_timeout(pcie->apb_base + PCIE_REGS_PCIE_PHY_STAT,
+ val, val & PHY0_MPLLA_STATE, 20,
+ 500 * USEC_PER_MSEC);
+ if (ret) {
+ dev_err(pci->dev, "MPLLA is not locked\n");
+ return ret;
+ }
+
+ keembay_pcie_ltssm_set(pcie, true);
+
+ return 0;
+}
+
+static void keembay_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct keembay_pcie *pcie = dev_get_drvdata(pci->dev);
+
+ keembay_pcie_ltssm_set(pcie, false);
+}
+
+static const struct dw_pcie_ops keembay_pcie_ops = {
+ .link_up = keembay_pcie_link_up,
+ .start_link = keembay_pcie_start_link,
+ .stop_link = keembay_pcie_stop_link,
+};
+
+static inline void keembay_pcie_disable_clock(void *data)
+{
+ struct clk *clk = data;
+
+ clk_disable_unprepare(clk);
+}
+
+static inline struct clk *keembay_pcie_probe_clock(struct device *dev,
+ const char *id, u64 rate)
+{
+ struct clk *clk;
+ int ret;
+
+ clk = devm_clk_get(dev, id);
+ if (IS_ERR(clk))
+ return clk;
+
+ if (rate) {
+ ret = clk_set_rate(clk, rate);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = devm_add_action_or_reset(dev, keembay_pcie_disable_clock, clk);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return clk;
+}
+
+static int keembay_pcie_probe_clocks(struct keembay_pcie *pcie)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct device *dev = pci->dev;
+
+ pcie->clk_master = keembay_pcie_probe_clock(dev, "master", 0);
+ if (IS_ERR(pcie->clk_master))
+ return dev_err_probe(dev, PTR_ERR(pcie->clk_master),
+ "Failed to enable master clock");
+
+ pcie->clk_aux = keembay_pcie_probe_clock(dev, "aux", AUX_CLK_RATE_HZ);
+ if (IS_ERR(pcie->clk_aux))
+ return dev_err_probe(dev, PTR_ERR(pcie->clk_aux),
+ "Failed to enable auxiliary clock");
+
+ return 0;
+}
+
+/*
+ * Initialize the internal PCIe PLL in Host mode.
+ * See the following sections in Keem Bay data book,
+ * (1) 6.4.6.1 PCIe Subsystem Example Initialization,
+ * (2) 6.8 PCIe Low Jitter PLL for Ref Clk Generation.
+ */
+static int keembay_pcie_pll_init(struct keembay_pcie *pcie)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ u32 val;
+ int ret;
+
+ val = FIELD_PREP(LJPLL_REF_DIV, 0) | FIELD_PREP(LJPLL_FB_DIV, 0x32);
+ writel(val, pcie->apb_base + PCIE_REGS_LJPLL_CNTRL_2);
+
+ val = FIELD_PREP(LJPLL_POST_DIV3A, 0x2) |
+ FIELD_PREP(LJPLL_POST_DIV2A, 0x2);
+ writel(val, pcie->apb_base + PCIE_REGS_LJPLL_CNTRL_3);
+
+ val = FIELD_PREP(LJPLL_EN, 0x1) | FIELD_PREP(LJPLL_FOUT_EN, 0xc);
+ writel(val, pcie->apb_base + PCIE_REGS_LJPLL_CNTRL_0);
+
+ ret = readl_poll_timeout(pcie->apb_base + PCIE_REGS_LJPLL_STA,
+ val, val & LJPLL_LOCK, 20,
+ 500 * USEC_PER_MSEC);
+ if (ret)
+ dev_err(pci->dev, "Low jitter PLL is not locked\n");
+
+ return ret;
+}
+
+static void keembay_pcie_msi_irq_handler(struct irq_desc *desc)
+{
+ struct keembay_pcie *pcie = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ u32 val, mask, status;
+ struct dw_pcie_rp *pp;
+
+ /*
+ * Keem Bay PCIe Controller provides an additional IP logic on top of
+ * standard DWC IP to clear MSI IRQ by writing '1' to the respective
+ * bit of the status register.
+ *
+ * So, a chained irq handler is defined to handle this additional
+ * IP logic.
+ */
+
+ chained_irq_enter(chip, desc);
+
+ pp = &pcie->pci.pp;
+ val = readl(pcie->apb_base + PCIE_REGS_INTERRUPT_STATUS);
+ mask = readl(pcie->apb_base + PCIE_REGS_INTERRUPT_ENABLE);
+
+ status = val & mask;
+
+ if (status & MSI_CTRL_INT) {
+ dw_handle_msi_irq(pp);
+ writel(status, pcie->apb_base + PCIE_REGS_INTERRUPT_STATUS);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static int keembay_pcie_setup_msi_irq(struct keembay_pcie *pcie)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct device *dev = pci->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ int irq;
+
+ irq = platform_get_irq_byname(pdev, "pcie");
+ if (irq < 0)
+ return irq;
+
+ irq_set_chained_handler_and_data(irq, keembay_pcie_msi_irq_handler,
+ pcie);
+
+ return 0;
+}
+
+static void keembay_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct keembay_pcie *pcie = dev_get_drvdata(pci->dev);
+
+ writel(EDMA_INT_EN, pcie->apb_base + PCIE_REGS_INTERRUPT_ENABLE);
+}
+
+static int keembay_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ unsigned int type, u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ switch (type) {
+ case PCI_IRQ_INTX:
+ /* INTx interrupts are not supported in Keem Bay */
+ dev_err(pci->dev, "INTx IRQ is not supported\n");
+ return -EINVAL;
+ case PCI_IRQ_MSI:
+ return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+ case PCI_IRQ_MSIX:
+ return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
+ default:
+ dev_err(pci->dev, "Unknown IRQ type %d\n", type);
+ return -EINVAL;
+ }
+}
+
+static const struct pci_epc_features keembay_pcie_epc_features = {
+ .msi_capable = true,
+ .msix_capable = true,
+ .bar[BAR_0] = { .only_64bit = true, },
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_2] = { .only_64bit = true, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
+ .bar[BAR_4] = { .only_64bit = true, },
+ .bar[BAR_5] = { .type = BAR_RESERVED, },
+ .align = SZ_16K,
+};
+
+static const struct pci_epc_features *
+keembay_pcie_get_features(struct dw_pcie_ep *ep)
+{
+ return &keembay_pcie_epc_features;
+}
+
+static const struct dw_pcie_ep_ops keembay_pcie_ep_ops = {
+ .init = keembay_pcie_ep_init,
+ .raise_irq = keembay_pcie_ep_raise_irq,
+ .get_features = keembay_pcie_get_features,
+};
+
+static const struct dw_pcie_host_ops keembay_pcie_host_ops = {
+};
+
+static int keembay_pcie_add_pcie_port(struct keembay_pcie *pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct device *dev = &pdev->dev;
+ u32 val;
+ int ret;
+
+ pp->ops = &keembay_pcie_host_ops;
+ pp->msi_irq[0] = -ENODEV;
+
+ ret = keembay_pcie_setup_msi_irq(pcie);
+ if (ret)
+ return ret;
+
+ pcie->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(pcie->reset))
+ return PTR_ERR(pcie->reset);
+
+ ret = keembay_pcie_probe_clocks(pcie);
+ if (ret)
+ return ret;
+
+ val = readl(pcie->apb_base + PCIE_REGS_PCIE_PHY_CNTL);
+ val |= PHY0_SRAM_BYPASS;
+ writel(val, pcie->apb_base + PCIE_REGS_PCIE_PHY_CNTL);
+
+ writel(PCIE_DEVICE_TYPE, pcie->apb_base + PCIE_REGS_PCIE_CFG);
+
+ ret = keembay_pcie_pll_init(pcie);
+ if (ret)
+ return ret;
+
+ val = readl(pcie->apb_base + PCIE_REGS_PCIE_CFG);
+ writel(val | PCIE_RSTN, pcie->apb_base + PCIE_REGS_PCIE_CFG);
+ keembay_ep_reset_deassert(pcie);
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ keembay_ep_reset_assert(pcie);
+ dev_err(dev, "Failed to initialize host: %d\n", ret);
+ return ret;
+ }
+
+ val = readl(pcie->apb_base + PCIE_REGS_INTERRUPT_ENABLE);
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ val |= MSI_CTRL_INT_EN;
+ writel(val, pcie->apb_base + PCIE_REGS_INTERRUPT_ENABLE);
+
+ return 0;
+}
+
+static int keembay_pcie_probe(struct platform_device *pdev)
+{
+ const struct keembay_pcie_of_data *data;
+ struct device *dev = &pdev->dev;
+ struct keembay_pcie *pcie;
+ struct dw_pcie *pci;
+ enum dw_pcie_device_mode mode;
+ int ret;
+
+ data = device_get_match_data(dev);
+ if (!data)
+ return -ENODEV;
+
+ mode = (enum dw_pcie_device_mode)data->mode;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pci = &pcie->pci;
+ pci->dev = dev;
+ pci->ops = &keembay_pcie_ops;
+
+ pcie->mode = mode;
+
+ pcie->apb_base = devm_platform_ioremap_resource_byname(pdev, "apb");
+ if (IS_ERR(pcie->apb_base))
+ return PTR_ERR(pcie->apb_base);
+
+ platform_set_drvdata(pdev, pcie);
+
+ switch (pcie->mode) {
+ case DW_PCIE_RC_TYPE:
+ if (!IS_ENABLED(CONFIG_PCIE_KEEMBAY_HOST))
+ return -ENODEV;
+
+ return keembay_pcie_add_pcie_port(pcie, pdev);
+ case DW_PCIE_EP_TYPE:
+ if (!IS_ENABLED(CONFIG_PCIE_KEEMBAY_EP))
+ return -ENODEV;
+
+ pci->ep.ops = &keembay_pcie_ep_ops;
+ ret = dw_pcie_ep_init(&pci->ep);
+ if (ret)
+ return ret;
+
+ ret = dw_pcie_ep_init_registers(&pci->ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(&pci->ep);
+ return ret;
+ }
+
+ pci_epc_init_notify(pci->ep.epc);
+
+ break;
+ default:
+ dev_err(dev, "Invalid device type %d\n", pcie->mode);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static const struct keembay_pcie_of_data keembay_pcie_rc_of_data = {
+ .mode = DW_PCIE_RC_TYPE,
+};
+
+static const struct keembay_pcie_of_data keembay_pcie_ep_of_data = {
+ .mode = DW_PCIE_EP_TYPE,
+};
+
+static const struct of_device_id keembay_pcie_of_match[] = {
+ {
+ .compatible = "intel,keembay-pcie",
+ .data = &keembay_pcie_rc_of_data,
+ },
+ {
+ .compatible = "intel,keembay-pcie-ep",
+ .data = &keembay_pcie_ep_of_data,
+ },
+ {}
+};
+
+static struct platform_driver keembay_pcie_driver = {
+ .driver = {
+ .name = "keembay-pcie",
+ .of_match_table = keembay_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = keembay_pcie_probe,
+};
+builtin_platform_driver(keembay_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c
index 9b599296205d..91559c8b1866 100644
--- a/drivers/pci/controller/dwc/pcie-kirin.c
+++ b/drivers/pci/controller/dwc/pcie-kirin.c
@@ -2,22 +2,22 @@
/*
* PCIe host controller driver for Kirin Phone SoCs
*
- * Copyright (C) 2017 Hilisicon Electronics Co., Ltd.
- * http://www.huawei.com
+ * Copyright (C) 2017 HiSilicon Electronics Co., Ltd.
+ * https://www.huawei.com
*
* Author: Xiaowei Song <songxiaowei@huawei.com>
*/
-#include <linux/compiler.h>
#include <linux/clk.h>
+#include <linux/compiler.h>
#include <linux/delay.h>
#include <linux/err.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/mfd/syscon.h>
-#include <linux/of_address.h>
-#include <linux/of_gpio.h>
+#include <linux/of.h>
#include <linux/of_pci.h>
+#include <linux/phy/phy.h>
#include <linux/pci.h>
#include <linux/pci_regs.h>
#include <linux/platform_device.h>
@@ -28,26 +28,16 @@
#define to_kirin_pcie(x) dev_get_drvdata((x)->dev)
-#define REF_CLK_FREQ 100000000
-
/* PCIe ELBI registers */
#define SOC_PCIECTRL_CTRL0_ADDR 0x000
#define SOC_PCIECTRL_CTRL1_ADDR 0x004
-#define SOC_PCIEPHY_CTRL2_ADDR 0x008
-#define SOC_PCIEPHY_CTRL3_ADDR 0x00c
#define PCIE_ELBI_SLV_DBI_ENABLE (0x1 << 21)
/* info located in APB */
#define PCIE_APP_LTSSM_ENABLE 0x01c
-#define PCIE_APB_PHY_CTRL0 0x0
-#define PCIE_APB_PHY_CTRL1 0x4
#define PCIE_APB_PHY_STATUS0 0x400
#define PCIE_LINKUP_ENABLE (0x8020)
#define PCIE_LTSSM_ENABLE_BIT (0x1 << 11)
-#define PIPE_CLK_STABLE (0x1 << 19)
-#define PHY_REF_PAD_BIT (0x1 << 8)
-#define PHY_PWR_DOWN_BIT (0x1 << 22)
-#define PHY_RST_ACK_BIT (0x1 << 16)
/* info located in sysctrl */
#define SCTRL_PCIE_CMOS_OFFSET 0x60
@@ -60,17 +50,70 @@
#define PCIE_DEBOUNCE_PARAM 0xF0F400
#define PCIE_OE_BYPASS (0x3 << 28)
+/*
+ * Max number of connected PCI slots at an external PCI bridge
+ *
+ * This is used on HiKey 970, which has a PEX 8606 bridge with 4 connected
+ * lanes (lane 0 upstream, and the other three lanes, one connected to an
+ * in-board Ethernet adapter and the other two connected to M.2 and mini
+ * PCI slots.
+ *
+ * Each slot has a different clock source and uses a separate PERST# pin.
+ */
+#define MAX_PCI_SLOTS 3
+
+enum pcie_kirin_phy_type {
+ PCIE_KIRIN_INTERNAL_PHY,
+ PCIE_KIRIN_EXTERNAL_PHY
+};
+
+struct kirin_pcie {
+ enum pcie_kirin_phy_type type;
+
+ struct dw_pcie *pci;
+ struct regmap *apb;
+ struct phy *phy;
+ void *phy_priv; /* only for PCIE_KIRIN_INTERNAL_PHY */
+
+ /* DWC PERST# */
+ struct gpio_desc *id_dwc_perst_gpio;
+
+ /* Per-slot PERST# */
+ int num_slots;
+ struct gpio_desc *id_reset_gpio[MAX_PCI_SLOTS];
+ const char *reset_names[MAX_PCI_SLOTS];
+
+ /* Per-slot clkreq */
+ int n_gpio_clkreq;
+ struct gpio_desc *id_clkreq_gpio[MAX_PCI_SLOTS];
+ const char *clkreq_names[MAX_PCI_SLOTS];
+};
+
+/*
+ * Kirin 960 PHY. Can't be split into a PHY driver without changing the
+ * DT schema.
+ */
+
+#define REF_CLK_FREQ 100000000
+
+/* PHY info located in APB */
+#define PCIE_APB_PHY_CTRL0 0x0
+#define PCIE_APB_PHY_CTRL1 0x4
+#define PCIE_APB_PHY_STATUS0 0x400
+#define PIPE_CLK_STABLE BIT(19)
+#define PHY_REF_PAD_BIT BIT(8)
+#define PHY_PWR_DOWN_BIT BIT(22)
+#define PHY_RST_ACK_BIT BIT(16)
+
/* peri_crg ctrl */
#define CRGCTRL_PCIE_ASSERT_OFFSET 0x88
#define CRGCTRL_PCIE_ASSERT_BIT 0x8c000000
/* Time for delay */
-#define REF_2_PERST_MIN 20000
+#define REF_2_PERST_MIN 21000
#define REF_2_PERST_MAX 25000
#define PERST_2_ACCESS_MIN 10000
#define PERST_2_ACCESS_MAX 12000
-#define LINK_WAIT_MIN 900
-#define LINK_WAIT_MAX 1000
#define PIPE_CLK_WAIT_MIN 550
#define PIPE_CLK_WAIT_MAX 600
#define TIME_CMOS_MIN 100
@@ -78,233 +121,366 @@
#define TIME_PHY_PD_MIN 10
#define TIME_PHY_PD_MAX 11
-struct kirin_pcie {
- struct dw_pcie *pci;
- void __iomem *apb_base;
- void __iomem *phy_base;
+struct hi3660_pcie_phy {
+ struct device *dev;
+ void __iomem *base;
struct regmap *crgctrl;
struct regmap *sysctrl;
struct clk *apb_sys_clk;
struct clk *apb_phy_clk;
struct clk *phy_ref_clk;
- struct clk *pcie_aclk;
- struct clk *pcie_aux_clk;
- int gpio_id_reset;
+ struct clk *aclk;
+ struct clk *aux_clk;
};
-/* Registers in PCIeCTRL */
-static inline void kirin_apb_ctrl_writel(struct kirin_pcie *kirin_pcie,
- u32 val, u32 reg)
-{
- writel(val, kirin_pcie->apb_base + reg);
-}
-
-static inline u32 kirin_apb_ctrl_readl(struct kirin_pcie *kirin_pcie, u32 reg)
-{
- return readl(kirin_pcie->apb_base + reg);
-}
-
/* Registers in PCIePHY */
-static inline void kirin_apb_phy_writel(struct kirin_pcie *kirin_pcie,
+static inline void kirin_apb_phy_writel(struct hi3660_pcie_phy *hi3660_pcie_phy,
u32 val, u32 reg)
{
- writel(val, kirin_pcie->phy_base + reg);
+ writel(val, hi3660_pcie_phy->base + reg);
}
-static inline u32 kirin_apb_phy_readl(struct kirin_pcie *kirin_pcie, u32 reg)
+static inline u32 kirin_apb_phy_readl(struct hi3660_pcie_phy *hi3660_pcie_phy,
+ u32 reg)
{
- return readl(kirin_pcie->phy_base + reg);
+ return readl(hi3660_pcie_phy->base + reg);
}
-static long kirin_pcie_get_clk(struct kirin_pcie *kirin_pcie,
- struct platform_device *pdev)
+static int hi3660_pcie_phy_get_clk(struct hi3660_pcie_phy *phy)
{
- struct device *dev = &pdev->dev;
+ struct device *dev = phy->dev;
- kirin_pcie->phy_ref_clk = devm_clk_get(dev, "pcie_phy_ref");
- if (IS_ERR(kirin_pcie->phy_ref_clk))
- return PTR_ERR(kirin_pcie->phy_ref_clk);
+ phy->phy_ref_clk = devm_clk_get(dev, "pcie_phy_ref");
+ if (IS_ERR(phy->phy_ref_clk))
+ return PTR_ERR(phy->phy_ref_clk);
- kirin_pcie->pcie_aux_clk = devm_clk_get(dev, "pcie_aux");
- if (IS_ERR(kirin_pcie->pcie_aux_clk))
- return PTR_ERR(kirin_pcie->pcie_aux_clk);
+ phy->aux_clk = devm_clk_get(dev, "pcie_aux");
+ if (IS_ERR(phy->aux_clk))
+ return PTR_ERR(phy->aux_clk);
- kirin_pcie->apb_phy_clk = devm_clk_get(dev, "pcie_apb_phy");
- if (IS_ERR(kirin_pcie->apb_phy_clk))
- return PTR_ERR(kirin_pcie->apb_phy_clk);
+ phy->apb_phy_clk = devm_clk_get(dev, "pcie_apb_phy");
+ if (IS_ERR(phy->apb_phy_clk))
+ return PTR_ERR(phy->apb_phy_clk);
- kirin_pcie->apb_sys_clk = devm_clk_get(dev, "pcie_apb_sys");
- if (IS_ERR(kirin_pcie->apb_sys_clk))
- return PTR_ERR(kirin_pcie->apb_sys_clk);
+ phy->apb_sys_clk = devm_clk_get(dev, "pcie_apb_sys");
+ if (IS_ERR(phy->apb_sys_clk))
+ return PTR_ERR(phy->apb_sys_clk);
- kirin_pcie->pcie_aclk = devm_clk_get(dev, "pcie_aclk");
- if (IS_ERR(kirin_pcie->pcie_aclk))
- return PTR_ERR(kirin_pcie->pcie_aclk);
+ phy->aclk = devm_clk_get(dev, "pcie_aclk");
+ if (IS_ERR(phy->aclk))
+ return PTR_ERR(phy->aclk);
return 0;
}
-static long kirin_pcie_get_resource(struct kirin_pcie *kirin_pcie,
- struct platform_device *pdev)
+static int hi3660_pcie_phy_get_resource(struct hi3660_pcie_phy *phy)
{
- struct device *dev = &pdev->dev;
- struct resource *apb;
- struct resource *phy;
- struct resource *dbi;
-
- apb = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apb");
- kirin_pcie->apb_base = devm_ioremap_resource(dev, apb);
- if (IS_ERR(kirin_pcie->apb_base))
- return PTR_ERR(kirin_pcie->apb_base);
-
- phy = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy");
- kirin_pcie->phy_base = devm_ioremap_resource(dev, phy);
- if (IS_ERR(kirin_pcie->phy_base))
- return PTR_ERR(kirin_pcie->phy_base);
-
- dbi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
- kirin_pcie->pci->dbi_base = devm_ioremap_resource(dev, dbi);
- if (IS_ERR(kirin_pcie->pci->dbi_base))
- return PTR_ERR(kirin_pcie->pci->dbi_base);
-
- kirin_pcie->crgctrl =
- syscon_regmap_lookup_by_compatible("hisilicon,hi3660-crgctrl");
- if (IS_ERR(kirin_pcie->crgctrl))
- return PTR_ERR(kirin_pcie->crgctrl);
-
- kirin_pcie->sysctrl =
- syscon_regmap_lookup_by_compatible("hisilicon,hi3660-sctrl");
- if (IS_ERR(kirin_pcie->sysctrl))
- return PTR_ERR(kirin_pcie->sysctrl);
+ struct device *dev = phy->dev;
+ struct platform_device *pdev;
+
+ /* registers */
+ pdev = container_of(dev, struct platform_device, dev);
+
+ phy->base = devm_platform_ioremap_resource_byname(pdev, "phy");
+ if (IS_ERR(phy->base))
+ return PTR_ERR(phy->base);
+
+ phy->crgctrl = syscon_regmap_lookup_by_compatible("hisilicon,hi3660-crgctrl");
+ if (IS_ERR(phy->crgctrl))
+ return PTR_ERR(phy->crgctrl);
+
+ phy->sysctrl = syscon_regmap_lookup_by_compatible("hisilicon,hi3660-sctrl");
+ if (IS_ERR(phy->sysctrl))
+ return PTR_ERR(phy->sysctrl);
return 0;
}
-static int kirin_pcie_phy_init(struct kirin_pcie *kirin_pcie)
+static int hi3660_pcie_phy_start(struct hi3660_pcie_phy *phy)
{
- struct device *dev = kirin_pcie->pci->dev;
+ struct device *dev = phy->dev;
u32 reg_val;
- reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_CTRL1);
+ reg_val = kirin_apb_phy_readl(phy, PCIE_APB_PHY_CTRL1);
reg_val &= ~PHY_REF_PAD_BIT;
- kirin_apb_phy_writel(kirin_pcie, reg_val, PCIE_APB_PHY_CTRL1);
+ kirin_apb_phy_writel(phy, reg_val, PCIE_APB_PHY_CTRL1);
- reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_CTRL0);
+ reg_val = kirin_apb_phy_readl(phy, PCIE_APB_PHY_CTRL0);
reg_val &= ~PHY_PWR_DOWN_BIT;
- kirin_apb_phy_writel(kirin_pcie, reg_val, PCIE_APB_PHY_CTRL0);
+ kirin_apb_phy_writel(phy, reg_val, PCIE_APB_PHY_CTRL0);
usleep_range(TIME_PHY_PD_MIN, TIME_PHY_PD_MAX);
- reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_CTRL1);
+ reg_val = kirin_apb_phy_readl(phy, PCIE_APB_PHY_CTRL1);
reg_val &= ~PHY_RST_ACK_BIT;
- kirin_apb_phy_writel(kirin_pcie, reg_val, PCIE_APB_PHY_CTRL1);
+ kirin_apb_phy_writel(phy, reg_val, PCIE_APB_PHY_CTRL1);
usleep_range(PIPE_CLK_WAIT_MIN, PIPE_CLK_WAIT_MAX);
- reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_STATUS0);
- if (reg_val & PIPE_CLK_STABLE) {
- dev_err(dev, "PIPE clk is not stable\n");
- return -EINVAL;
- }
+ reg_val = kirin_apb_phy_readl(phy, PCIE_APB_PHY_STATUS0);
+ if (reg_val & PIPE_CLK_STABLE)
+ return dev_err_probe(dev, -ETIMEDOUT,
+ "PIPE clk is not stable\n");
return 0;
}
-static void kirin_pcie_oe_enable(struct kirin_pcie *kirin_pcie)
+static void hi3660_pcie_phy_oe_enable(struct hi3660_pcie_phy *phy)
{
u32 val;
- regmap_read(kirin_pcie->sysctrl, SCTRL_PCIE_OE_OFFSET, &val);
+ regmap_read(phy->sysctrl, SCTRL_PCIE_OE_OFFSET, &val);
val |= PCIE_DEBOUNCE_PARAM;
val &= ~PCIE_OE_BYPASS;
- regmap_write(kirin_pcie->sysctrl, SCTRL_PCIE_OE_OFFSET, val);
+ regmap_write(phy->sysctrl, SCTRL_PCIE_OE_OFFSET, val);
}
-static int kirin_pcie_clk_ctrl(struct kirin_pcie *kirin_pcie, bool enable)
+static int hi3660_pcie_phy_clk_ctrl(struct hi3660_pcie_phy *phy, bool enable)
{
int ret = 0;
if (!enable)
goto close_clk;
- ret = clk_set_rate(kirin_pcie->phy_ref_clk, REF_CLK_FREQ);
+ ret = clk_set_rate(phy->phy_ref_clk, REF_CLK_FREQ);
if (ret)
return ret;
- ret = clk_prepare_enable(kirin_pcie->phy_ref_clk);
+ ret = clk_prepare_enable(phy->phy_ref_clk);
if (ret)
return ret;
- ret = clk_prepare_enable(kirin_pcie->apb_sys_clk);
+ ret = clk_prepare_enable(phy->apb_sys_clk);
if (ret)
goto apb_sys_fail;
- ret = clk_prepare_enable(kirin_pcie->apb_phy_clk);
+ ret = clk_prepare_enable(phy->apb_phy_clk);
if (ret)
goto apb_phy_fail;
- ret = clk_prepare_enable(kirin_pcie->pcie_aclk);
+ ret = clk_prepare_enable(phy->aclk);
if (ret)
goto aclk_fail;
- ret = clk_prepare_enable(kirin_pcie->pcie_aux_clk);
+ ret = clk_prepare_enable(phy->aux_clk);
if (ret)
goto aux_clk_fail;
return 0;
close_clk:
- clk_disable_unprepare(kirin_pcie->pcie_aux_clk);
+ clk_disable_unprepare(phy->aux_clk);
aux_clk_fail:
- clk_disable_unprepare(kirin_pcie->pcie_aclk);
+ clk_disable_unprepare(phy->aclk);
aclk_fail:
- clk_disable_unprepare(kirin_pcie->apb_phy_clk);
+ clk_disable_unprepare(phy->apb_phy_clk);
apb_phy_fail:
- clk_disable_unprepare(kirin_pcie->apb_sys_clk);
+ clk_disable_unprepare(phy->apb_sys_clk);
apb_sys_fail:
- clk_disable_unprepare(kirin_pcie->phy_ref_clk);
+ clk_disable_unprepare(phy->phy_ref_clk);
return ret;
}
-static int kirin_pcie_power_on(struct kirin_pcie *kirin_pcie)
+static int hi3660_pcie_phy_power_on(struct kirin_pcie *pcie)
{
+ struct hi3660_pcie_phy *phy = pcie->phy_priv;
int ret;
/* Power supply for Host */
- regmap_write(kirin_pcie->sysctrl,
+ regmap_write(phy->sysctrl,
SCTRL_PCIE_CMOS_OFFSET, SCTRL_PCIE_CMOS_BIT);
usleep_range(TIME_CMOS_MIN, TIME_CMOS_MAX);
- kirin_pcie_oe_enable(kirin_pcie);
- ret = kirin_pcie_clk_ctrl(kirin_pcie, true);
+ hi3660_pcie_phy_oe_enable(phy);
+
+ ret = hi3660_pcie_phy_clk_ctrl(phy, true);
if (ret)
return ret;
/* ISO disable, PCIeCtrl, PHY assert and clk gate clear */
- regmap_write(kirin_pcie->sysctrl,
+ regmap_write(phy->sysctrl,
SCTRL_PCIE_ISO_OFFSET, SCTRL_PCIE_ISO_BIT);
- regmap_write(kirin_pcie->crgctrl,
+ regmap_write(phy->crgctrl,
CRGCTRL_PCIE_ASSERT_OFFSET, CRGCTRL_PCIE_ASSERT_BIT);
- regmap_write(kirin_pcie->sysctrl,
+ regmap_write(phy->sysctrl,
SCTRL_PCIE_HPCLK_OFFSET, SCTRL_PCIE_HPCLK_BIT);
- ret = kirin_pcie_phy_init(kirin_pcie);
+ ret = hi3660_pcie_phy_start(phy);
if (ret)
- goto close_clk;
+ goto disable_clks;
- /* perst assert Endpoint */
- if (!gpio_request(kirin_pcie->gpio_id_reset, "pcie_perst")) {
- usleep_range(REF_2_PERST_MIN, REF_2_PERST_MAX);
- ret = gpio_direction_output(kirin_pcie->gpio_id_reset, 1);
- if (ret)
- goto close_clk;
- usleep_range(PERST_2_ACCESS_MIN, PERST_2_ACCESS_MAX);
+ return 0;
+disable_clks:
+ hi3660_pcie_phy_clk_ctrl(phy, false);
+ return ret;
+}
+
+static int hi3660_pcie_phy_init(struct platform_device *pdev,
+ struct kirin_pcie *pcie)
+{
+ struct device *dev = &pdev->dev;
+ struct hi3660_pcie_phy *phy;
+ int ret;
+
+ phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ pcie->phy_priv = phy;
+ phy->dev = dev;
+
+ ret = hi3660_pcie_phy_get_clk(phy);
+ if (ret)
+ return ret;
+
+ return hi3660_pcie_phy_get_resource(phy);
+}
+
+static int hi3660_pcie_phy_power_off(struct kirin_pcie *pcie)
+{
+ struct hi3660_pcie_phy *phy = pcie->phy_priv;
+
+ /* Drop power supply for Host */
+ regmap_write(phy->sysctrl, SCTRL_PCIE_CMOS_OFFSET, 0x00);
+
+ hi3660_pcie_phy_clk_ctrl(phy, false);
+
+ return 0;
+}
+
+/*
+ * The non-PHY part starts here
+ */
+
+static const struct regmap_config pcie_kirin_regmap_conf = {
+ .name = "kirin_pcie_apb",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
+static int kirin_pcie_get_gpio_enable(struct kirin_pcie *pcie,
+ struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int ret, i;
+
+ /* This is an optional property */
+ ret = gpiod_count(dev, "hisilicon,clken");
+ if (ret < 0)
return 0;
+
+ if (ret > MAX_PCI_SLOTS)
+ return dev_err_probe(dev, -EINVAL,
+ "Too many GPIO clock requests!\n");
+
+ pcie->n_gpio_clkreq = ret;
+
+ for (i = 0; i < pcie->n_gpio_clkreq; i++) {
+ pcie->id_clkreq_gpio[i] = devm_gpiod_get_index(dev,
+ "hisilicon,clken", i,
+ GPIOD_OUT_LOW);
+ if (IS_ERR(pcie->id_clkreq_gpio[i]))
+ return dev_err_probe(dev, PTR_ERR(pcie->id_clkreq_gpio[i]),
+ "unable to get a valid clken gpio\n");
+
+ pcie->clkreq_names[i] = devm_kasprintf(dev, GFP_KERNEL,
+ "pcie_clkreq_%d", i);
+ if (!pcie->clkreq_names[i])
+ return -ENOMEM;
+
+ gpiod_set_consumer_name(pcie->id_clkreq_gpio[i],
+ pcie->clkreq_names[i]);
}
-close_clk:
- kirin_pcie_clk_ctrl(kirin_pcie, false);
- return ret;
+ return 0;
+}
+
+static int kirin_pcie_parse_port(struct kirin_pcie *pcie,
+ struct platform_device *pdev,
+ struct device_node *node)
+{
+ struct device *dev = &pdev->dev;
+ int ret, slot, i;
+
+ for_each_available_child_of_node_scoped(node, parent) {
+ for_each_available_child_of_node_scoped(parent, child) {
+ i = pcie->num_slots;
+
+ pcie->id_reset_gpio[i] = devm_fwnode_gpiod_get_index(dev,
+ of_fwnode_handle(child),
+ "reset", 0, GPIOD_OUT_LOW,
+ NULL);
+ if (IS_ERR(pcie->id_reset_gpio[i])) {
+ if (PTR_ERR(pcie->id_reset_gpio[i]) == -ENOENT)
+ continue;
+ return dev_err_probe(dev, PTR_ERR(pcie->id_reset_gpio[i]),
+ "unable to get a valid reset gpio\n");
+ }
+
+ if (pcie->num_slots + 1 >= MAX_PCI_SLOTS)
+ return dev_err_probe(dev, -EINVAL,
+ "Too many PCI slots!\n");
+
+ pcie->num_slots++;
+
+ ret = of_pci_get_devfn(child);
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "failed to parse devfn\n");
+
+ slot = PCI_SLOT(ret);
+
+ pcie->reset_names[i] = devm_kasprintf(dev, GFP_KERNEL,
+ "pcie_perst_%d",
+ slot);
+ if (!pcie->reset_names[i])
+ return -ENOMEM;
+
+ gpiod_set_consumer_name(pcie->id_reset_gpio[i],
+ pcie->reset_names[i]);
+ }
+ }
+
+ return 0;
+}
+
+static long kirin_pcie_get_resource(struct kirin_pcie *kirin_pcie,
+ struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ void __iomem *apb_base;
+ int ret;
+
+ apb_base = devm_platform_ioremap_resource_byname(pdev, "apb");
+ if (IS_ERR(apb_base))
+ return PTR_ERR(apb_base);
+
+ kirin_pcie->apb = devm_regmap_init_mmio(dev, apb_base,
+ &pcie_kirin_regmap_conf);
+ if (IS_ERR(kirin_pcie->apb))
+ return PTR_ERR(kirin_pcie->apb);
+
+ /* pcie internal PERST# gpio */
+ kirin_pcie->id_dwc_perst_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(kirin_pcie->id_dwc_perst_gpio))
+ return dev_err_probe(dev, PTR_ERR(kirin_pcie->id_dwc_perst_gpio),
+ "unable to get a valid gpio pin\n");
+ gpiod_set_consumer_name(kirin_pcie->id_dwc_perst_gpio, "pcie_perst_bridge");
+
+ ret = kirin_pcie_get_gpio_enable(kirin_pcie, pdev);
+ if (ret)
+ return ret;
+
+ /* Parse OF children */
+ for_each_available_child_of_node_scoped(node, child) {
+ ret = kirin_pcie_parse_port(kirin_pcie, pdev, child);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
static void kirin_pcie_sideband_dbi_w_mode(struct kirin_pcie *kirin_pcie,
@@ -312,13 +488,13 @@ static void kirin_pcie_sideband_dbi_w_mode(struct kirin_pcie *kirin_pcie,
{
u32 val;
- val = kirin_apb_ctrl_readl(kirin_pcie, SOC_PCIECTRL_CTRL0_ADDR);
+ regmap_read(kirin_pcie->apb, SOC_PCIECTRL_CTRL0_ADDR, &val);
if (on)
val = val | PCIE_ELBI_SLV_DBI_ENABLE;
else
val = val & ~PCIE_ELBI_SLV_DBI_ENABLE;
- kirin_apb_ctrl_writel(kirin_pcie, val, SOC_PCIECTRL_CTRL0_ADDR);
+ regmap_write(kirin_pcie->apb, SOC_PCIECTRL_CTRL0_ADDR, val);
}
static void kirin_pcie_sideband_dbi_r_mode(struct kirin_pcie *kirin_pcie,
@@ -326,43 +502,67 @@ static void kirin_pcie_sideband_dbi_r_mode(struct kirin_pcie *kirin_pcie,
{
u32 val;
- val = kirin_apb_ctrl_readl(kirin_pcie, SOC_PCIECTRL_CTRL1_ADDR);
+ regmap_read(kirin_pcie->apb, SOC_PCIECTRL_CTRL1_ADDR, &val);
if (on)
val = val | PCIE_ELBI_SLV_DBI_ENABLE;
else
val = val & ~PCIE_ELBI_SLV_DBI_ENABLE;
- kirin_apb_ctrl_writel(kirin_pcie, val, SOC_PCIECTRL_CTRL1_ADDR);
+ regmap_write(kirin_pcie->apb, SOC_PCIECTRL_CTRL1_ADDR, val);
}
-static int kirin_pcie_rd_own_conf(struct pcie_port *pp,
+static int kirin_pcie_rd_own_conf(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
- int ret;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata);
- kirin_pcie_sideband_dbi_r_mode(kirin_pcie, true);
- ret = dw_pcie_read(pci->dbi_base + where, size, val);
- kirin_pcie_sideband_dbi_r_mode(kirin_pcie, false);
+ if (PCI_SLOT(devfn))
+ return PCIBIOS_DEVICE_NOT_FOUND;
- return ret;
+ *val = dw_pcie_read_dbi(pci, where, size);
+ return PCIBIOS_SUCCESSFUL;
}
-static int kirin_pcie_wr_own_conf(struct pcie_port *pp,
+static int kirin_pcie_wr_own_conf(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata);
+
+ if (PCI_SLOT(devfn))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ dw_pcie_write_dbi(pci, where, size, val);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int kirin_pcie_add_bus(struct pci_bus *bus)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata);
struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
- int ret;
+ int i, ret;
- kirin_pcie_sideband_dbi_w_mode(kirin_pcie, true);
- ret = dw_pcie_write(pci->dbi_base + where, size, val);
- kirin_pcie_sideband_dbi_w_mode(kirin_pcie, false);
+ if (!kirin_pcie->num_slots)
+ return 0;
- return ret;
+ /* Send PERST# to each slot */
+ for (i = 0; i < kirin_pcie->num_slots; i++) {
+ ret = gpiod_direction_output_raw(kirin_pcie->id_reset_gpio[i], 1);
+ if (ret) {
+ dev_err(pci->dev, "PERST# %s error: %d\n",
+ kirin_pcie->reset_names[i], ret);
+ }
+ }
+ usleep_range(PERST_2_ACCESS_MIN, PERST_2_ACCESS_MAX);
+
+ return 0;
}
+static struct pci_ops kirin_pci_ops = {
+ .read = kirin_pcie_rd_own_conf,
+ .write = kirin_pcie_wr_own_conf,
+ .add_bus = kirin_pcie_add_bus,
+};
+
static u32 kirin_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base,
u32 reg, size_t size)
{
@@ -386,112 +586,142 @@ static void kirin_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base,
kirin_pcie_sideband_dbi_w_mode(kirin_pcie, false);
}
-static int kirin_pcie_link_up(struct dw_pcie *pci)
+static bool kirin_pcie_link_up(struct dw_pcie *pci)
{
struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
- u32 val = kirin_apb_ctrl_readl(kirin_pcie, PCIE_APB_PHY_STATUS0);
-
- if ((val & PCIE_LINKUP_ENABLE) == PCIE_LINKUP_ENABLE)
- return 1;
+ u32 val;
- return 0;
+ regmap_read(kirin_pcie->apb, PCIE_APB_PHY_STATUS0, &val);
+ return (val & PCIE_LINKUP_ENABLE) == PCIE_LINKUP_ENABLE;
}
-static int kirin_pcie_establish_link(struct pcie_port *pp)
+static int kirin_pcie_start_link(struct dw_pcie *pci)
{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
- struct device *dev = kirin_pcie->pci->dev;
- int count = 0;
-
- if (kirin_pcie_link_up(pci))
- return 0;
-
- dw_pcie_setup_rc(pp);
/* assert LTSSM enable */
- kirin_apb_ctrl_writel(kirin_pcie, PCIE_LTSSM_ENABLE_BIT,
- PCIE_APP_LTSSM_ENABLE);
-
- /* check if the link is up or not */
- while (!kirin_pcie_link_up(pci)) {
- usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX);
- count++;
- if (count == 1000) {
- dev_err(dev, "Link Fail\n");
- return -EINVAL;
- }
- }
+ regmap_write(kirin_pcie->apb, PCIE_APP_LTSSM_ENABLE,
+ PCIE_LTSSM_ENABLE_BIT);
return 0;
}
-static int kirin_pcie_host_init(struct pcie_port *pp)
+static int kirin_pcie_host_init(struct dw_pcie_rp *pp)
{
- kirin_pcie_establish_link(pp);
-
- if (IS_ENABLED(CONFIG_PCI_MSI))
- dw_pcie_msi_init(pp);
+ pp->bridge->ops = &kirin_pci_ops;
return 0;
}
-static struct dw_pcie_ops kirin_dw_pcie_ops = {
+static const struct dw_pcie_ops kirin_dw_pcie_ops = {
.read_dbi = kirin_pcie_read_dbi,
.write_dbi = kirin_pcie_write_dbi,
.link_up = kirin_pcie_link_up,
+ .start_link = kirin_pcie_start_link,
};
static const struct dw_pcie_host_ops kirin_pcie_host_ops = {
- .rd_own_conf = kirin_pcie_rd_own_conf,
- .wr_own_conf = kirin_pcie_wr_own_conf,
- .host_init = kirin_pcie_host_init,
+ .init = kirin_pcie_host_init,
};
-static int kirin_pcie_add_msi(struct dw_pcie *pci,
- struct platform_device *pdev)
+static int kirin_pcie_power_off(struct kirin_pcie *kirin_pcie)
{
- int irq;
-
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev,
- "failed to get MSI IRQ (%d)\n", irq);
- return irq;
- }
+ int i;
- pci->pp.msi_irq = irq;
- }
+ if (kirin_pcie->type == PCIE_KIRIN_INTERNAL_PHY)
+ return hi3660_pcie_phy_power_off(kirin_pcie);
+
+ for (i = 0; i < kirin_pcie->n_gpio_clkreq; i++)
+ gpiod_direction_output_raw(kirin_pcie->id_clkreq_gpio[i], 1);
+
+ phy_power_off(kirin_pcie->phy);
+ phy_exit(kirin_pcie->phy);
return 0;
}
-static int kirin_add_pcie_port(struct dw_pcie *pci,
- struct platform_device *pdev)
+static int kirin_pcie_power_on(struct platform_device *pdev,
+ struct kirin_pcie *kirin_pcie)
{
+ struct device *dev = &pdev->dev;
int ret;
- ret = kirin_pcie_add_msi(pci, pdev);
+ if (kirin_pcie->type == PCIE_KIRIN_INTERNAL_PHY) {
+ ret = hi3660_pcie_phy_init(pdev, kirin_pcie);
+ if (ret)
+ return ret;
+
+ ret = hi3660_pcie_phy_power_on(kirin_pcie);
+ if (ret)
+ return ret;
+ } else {
+ kirin_pcie->phy = devm_of_phy_get(dev, dev->of_node, NULL);
+ if (IS_ERR(kirin_pcie->phy))
+ return PTR_ERR(kirin_pcie->phy);
+
+ ret = phy_init(kirin_pcie->phy);
+ if (ret)
+ goto err;
+
+ ret = phy_power_on(kirin_pcie->phy);
+ if (ret)
+ goto err;
+ }
+
+ /* perst assert Endpoint */
+ usleep_range(REF_2_PERST_MIN, REF_2_PERST_MAX);
+
+ ret = gpiod_direction_output_raw(kirin_pcie->id_dwc_perst_gpio, 1);
if (ret)
- return ret;
+ goto err;
- pci->pp.ops = &kirin_pcie_host_ops;
+ usleep_range(PERST_2_ACCESS_MIN, PERST_2_ACCESS_MAX);
- return dw_pcie_host_init(&pci->pp);
+ return 0;
+err:
+ kirin_pcie_power_off(kirin_pcie);
+
+ return ret;
+}
+
+static void kirin_pcie_remove(struct platform_device *pdev)
+{
+ struct kirin_pcie *kirin_pcie = platform_get_drvdata(pdev);
+
+ dw_pcie_host_deinit(&kirin_pcie->pci->pp);
+
+ kirin_pcie_power_off(kirin_pcie);
}
+struct kirin_pcie_data {
+ enum pcie_kirin_phy_type phy_type;
+};
+
+static const struct kirin_pcie_data kirin_960_data = {
+ .phy_type = PCIE_KIRIN_INTERNAL_PHY,
+};
+
+static const struct kirin_pcie_data kirin_970_data = {
+ .phy_type = PCIE_KIRIN_EXTERNAL_PHY,
+};
+
+static const struct of_device_id kirin_pcie_match[] = {
+ { .compatible = "hisilicon,kirin960-pcie", .data = &kirin_960_data },
+ { .compatible = "hisilicon,kirin970-pcie", .data = &kirin_970_data },
+ {},
+};
+
static int kirin_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ const struct kirin_pcie_data *data;
struct kirin_pcie *kirin_pcie;
struct dw_pcie *pci;
int ret;
- if (!dev->of_node) {
- dev_err(dev, "NULL node\n");
- return -EINVAL;
- }
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return dev_err_probe(dev, -EINVAL, "OF data missing\n");
kirin_pcie = devm_kzalloc(dev, sizeof(struct kirin_pcie), GFP_KERNEL);
if (!kirin_pcie)
@@ -503,41 +733,35 @@ static int kirin_pcie_probe(struct platform_device *pdev)
pci->dev = dev;
pci->ops = &kirin_dw_pcie_ops;
+ pci->pp.ops = &kirin_pcie_host_ops;
kirin_pcie->pci = pci;
-
- ret = kirin_pcie_get_clk(kirin_pcie, pdev);
- if (ret)
- return ret;
+ kirin_pcie->type = data->phy_type;
ret = kirin_pcie_get_resource(kirin_pcie, pdev);
if (ret)
return ret;
- kirin_pcie->gpio_id_reset = of_get_named_gpio(dev->of_node,
- "reset-gpios", 0);
- if (kirin_pcie->gpio_id_reset < 0)
- return -ENODEV;
+ platform_set_drvdata(pdev, kirin_pcie);
- ret = kirin_pcie_power_on(kirin_pcie);
+ ret = kirin_pcie_power_on(pdev, kirin_pcie);
if (ret)
return ret;
- platform_set_drvdata(pdev, kirin_pcie);
-
- return kirin_add_pcie_port(pci, pdev);
+ return dw_pcie_host_init(&pci->pp);
}
-static const struct of_device_id kirin_pcie_match[] = {
- { .compatible = "hisilicon,kirin960-pcie" },
- {},
-};
-
static struct platform_driver kirin_pcie_driver = {
.probe = kirin_pcie_probe,
+ .remove = kirin_pcie_remove,
.driver = {
.name = "kirin-pcie",
- .of_match_table = kirin_pcie_match,
- .suppress_bind_attrs = true,
+ .of_match_table = kirin_pcie_match,
+ .suppress_bind_attrs = true,
},
};
-builtin_platform_driver(kirin_pcie_driver);
+module_platform_driver(kirin_pcie_driver);
+
+MODULE_DEVICE_TABLE(of, kirin_pcie_match);
+MODULE_DESCRIPTION("PCIe host controller driver for Kirin Phone SoCs");
+MODULE_AUTHOR("Xiaowei Song <songxiaowei@huawei.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/dwc/pcie-nxp-s32g.c b/drivers/pci/controller/dwc/pcie-nxp-s32g.c
new file mode 100644
index 000000000000..47745749f75c
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-nxp-s32g.c
@@ -0,0 +1,406 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe host controller driver for NXP S32G SoCs
+ *
+ * Copyright 2019-2025 NXP
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/sizes.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+
+/* PCIe controller Sub-System */
+
+/* PCIe controller 0 General Control 1 */
+#define PCIE_S32G_PE0_GEN_CTRL_1 0x50
+#define DEVICE_TYPE_MASK GENMASK(3, 0)
+#define SRIS_MODE BIT(8)
+
+/* PCIe controller 0 General Control 3 */
+#define PCIE_S32G_PE0_GEN_CTRL_3 0x58
+#define LTSSM_EN BIT(0)
+
+/* PCIe Controller 0 Interrupt Status */
+#define PCIE_S32G_PE0_INT_STS 0xE8
+#define HP_INT_STS BIT(6)
+
+/* Boundary between peripheral space and physical memory space */
+#define S32G_MEMORY_BOUNDARY_ADDR 0x80000000
+
+struct s32g_pcie_port {
+ struct list_head list;
+ struct phy *phy;
+};
+
+struct s32g_pcie {
+ struct dw_pcie pci;
+ void __iomem *ctrl_base;
+ struct list_head ports;
+};
+
+#define to_s32g_from_dw_pcie(x) \
+ container_of(x, struct s32g_pcie, pci)
+
+static void s32g_pcie_writel_ctrl(struct s32g_pcie *s32g_pp, u32 reg, u32 val)
+{
+ writel(val, s32g_pp->ctrl_base + reg);
+}
+
+static u32 s32g_pcie_readl_ctrl(struct s32g_pcie *s32g_pp, u32 reg)
+{
+ return readl(s32g_pp->ctrl_base + reg);
+}
+
+static void s32g_pcie_enable_ltssm(struct s32g_pcie *s32g_pp)
+{
+ u32 reg;
+
+ reg = s32g_pcie_readl_ctrl(s32g_pp, PCIE_S32G_PE0_GEN_CTRL_3);
+ reg |= LTSSM_EN;
+ s32g_pcie_writel_ctrl(s32g_pp, PCIE_S32G_PE0_GEN_CTRL_3, reg);
+}
+
+static void s32g_pcie_disable_ltssm(struct s32g_pcie *s32g_pp)
+{
+ u32 reg;
+
+ reg = s32g_pcie_readl_ctrl(s32g_pp, PCIE_S32G_PE0_GEN_CTRL_3);
+ reg &= ~LTSSM_EN;
+ s32g_pcie_writel_ctrl(s32g_pp, PCIE_S32G_PE0_GEN_CTRL_3, reg);
+}
+
+static int s32g_pcie_start_link(struct dw_pcie *pci)
+{
+ struct s32g_pcie *s32g_pp = to_s32g_from_dw_pcie(pci);
+
+ s32g_pcie_enable_ltssm(s32g_pp);
+
+ return 0;
+}
+
+static void s32g_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct s32g_pcie *s32g_pp = to_s32g_from_dw_pcie(pci);
+
+ s32g_pcie_disable_ltssm(s32g_pp);
+}
+
+static struct dw_pcie_ops s32g_pcie_ops = {
+ .start_link = s32g_pcie_start_link,
+ .stop_link = s32g_pcie_stop_link,
+};
+
+/* Configure the AMBA AXI Coherency Extensions (ACE) interface */
+static void s32g_pcie_reset_mstr_ace(struct dw_pcie *pci)
+{
+ u32 ddr_base_low = lower_32_bits(S32G_MEMORY_BOUNDARY_ADDR);
+ u32 ddr_base_high = upper_32_bits(S32G_MEMORY_BOUNDARY_ADDR);
+
+ dw_pcie_dbi_ro_wr_en(pci);
+ dw_pcie_writel_dbi(pci, COHERENCY_CONTROL_3_OFF, 0x0);
+
+ /*
+ * Ncore is a cache-coherent interconnect module that enables the
+ * integration of heterogeneous coherent and non-coherent agents in
+ * the chip. Ncore transactions to peripheral should be non-coherent
+ * or it might drop them.
+ *
+ * One example where this is needed are PCIe MSIs, which use NoSnoop=0
+ * and might end up routed to Ncore. PCIe coherent traffic (e.g. MSIs)
+ * that targets peripheral space will be dropped by Ncore because
+ * peripherals on S32G are not coherent as slaves. We add a hard
+ * boundary in the PCIe controller coherency control registers to
+ * separate physical memory space from peripheral space.
+ *
+ * Define the start of DDR as seen by Linux as this boundary between
+ * "memory" and "peripherals", with peripherals being below.
+ */
+ dw_pcie_writel_dbi(pci, COHERENCY_CONTROL_1_OFF,
+ (ddr_base_low & CFG_MEMTYPE_BOUNDARY_LOW_ADDR_MASK));
+ dw_pcie_writel_dbi(pci, COHERENCY_CONTROL_2_OFF, ddr_base_high);
+ dw_pcie_dbi_ro_wr_dis(pci);
+}
+
+static int s32g_init_pcie_controller(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct s32g_pcie *s32g_pp = to_s32g_from_dw_pcie(pci);
+ u32 val;
+
+ /* Set RP mode */
+ val = s32g_pcie_readl_ctrl(s32g_pp, PCIE_S32G_PE0_GEN_CTRL_1);
+ val &= ~DEVICE_TYPE_MASK;
+ val |= FIELD_PREP(DEVICE_TYPE_MASK, PCI_EXP_TYPE_ROOT_PORT);
+
+ /* Use default CRNS */
+ val &= ~SRIS_MODE;
+
+ s32g_pcie_writel_ctrl(s32g_pp, PCIE_S32G_PE0_GEN_CTRL_1, val);
+
+ /*
+ * Make sure we use the coherency defaults (just in case the settings
+ * have been changed from their reset values)
+ */
+ s32g_pcie_reset_mstr_ace(pci);
+
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ val = dw_pcie_readl_dbi(pci, PCIE_PORT_FORCE);
+ val |= PORT_FORCE_DO_DESKEW_FOR_SRIS;
+ dw_pcie_writel_dbi(pci, PCIE_PORT_FORCE, val);
+
+ val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
+ val |= GEN3_RELATED_OFF_EQ_PHASE_2_3;
+ dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
+
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ return 0;
+}
+
+static const struct dw_pcie_host_ops s32g_pcie_host_ops = {
+ .init = s32g_init_pcie_controller,
+};
+
+static int s32g_init_pcie_phy(struct s32g_pcie *s32g_pp)
+{
+ struct dw_pcie *pci = &s32g_pp->pci;
+ struct device *dev = pci->dev;
+ struct s32g_pcie_port *port, *tmp;
+ int ret;
+
+ list_for_each_entry(port, &s32g_pp->ports, list) {
+ ret = phy_init(port->phy);
+ if (ret) {
+ dev_err(dev, "Failed to init serdes PHY\n");
+ goto err_phy_revert;
+ }
+
+ ret = phy_set_mode_ext(port->phy, PHY_MODE_PCIE, 0);
+ if (ret) {
+ dev_err(dev, "Failed to set mode on serdes PHY\n");
+ goto err_phy_exit;
+ }
+
+ ret = phy_power_on(port->phy);
+ if (ret) {
+ dev_err(dev, "Failed to power on serdes PHY\n");
+ goto err_phy_exit;
+ }
+ }
+
+ return 0;
+
+err_phy_exit:
+ phy_exit(port->phy);
+
+err_phy_revert:
+ list_for_each_entry_continue_reverse(port, &s32g_pp->ports, list) {
+ phy_power_off(port->phy);
+ phy_exit(port->phy);
+ }
+
+ list_for_each_entry_safe(port, tmp, &s32g_pp->ports, list)
+ list_del(&port->list);
+
+ return ret;
+}
+
+static void s32g_deinit_pcie_phy(struct s32g_pcie *s32g_pp)
+{
+ struct s32g_pcie_port *port, *tmp;
+
+ list_for_each_entry_safe(port, tmp, &s32g_pp->ports, list) {
+ phy_power_off(port->phy);
+ phy_exit(port->phy);
+ list_del(&port->list);
+ }
+}
+
+static int s32g_pcie_init(struct device *dev, struct s32g_pcie *s32g_pp)
+{
+ s32g_pcie_disable_ltssm(s32g_pp);
+
+ return s32g_init_pcie_phy(s32g_pp);
+}
+
+static void s32g_pcie_deinit(struct s32g_pcie *s32g_pp)
+{
+ s32g_pcie_disable_ltssm(s32g_pp);
+
+ s32g_deinit_pcie_phy(s32g_pp);
+}
+
+static int s32g_pcie_parse_port(struct s32g_pcie *s32g_pp, struct device_node *node)
+{
+ struct device *dev = s32g_pp->pci.dev;
+ struct s32g_pcie_port *port;
+ int num_lanes;
+
+ port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ port->phy = devm_of_phy_get(dev, node, NULL);
+ if (IS_ERR(port->phy))
+ return dev_err_probe(dev, PTR_ERR(port->phy),
+ "Failed to get serdes PHY\n");
+
+ INIT_LIST_HEAD(&port->list);
+ list_add_tail(&port->list, &s32g_pp->ports);
+
+ /*
+ * The DWC core initialization code cannot yet parse the num-lanes
+ * attribute in the Root Port node. The S32G only supports one Root
+ * Port for now so its driver can parse the node and set the num_lanes
+ * field of struct dwc_pcie before calling dw_pcie_host_init().
+ */
+ if (!of_property_read_u32(node, "num-lanes", &num_lanes))
+ s32g_pp->pci.num_lanes = num_lanes;
+
+ return 0;
+}
+
+static int s32g_pcie_parse_ports(struct device *dev, struct s32g_pcie *s32g_pp)
+{
+ struct s32g_pcie_port *port, *tmp;
+ int ret = -ENOENT;
+
+ for_each_available_child_of_node_scoped(dev->of_node, of_port) {
+ if (!of_node_is_type(of_port, "pci"))
+ continue;
+
+ ret = s32g_pcie_parse_port(s32g_pp, of_port);
+ if (ret)
+ goto err_port;
+ }
+
+err_port:
+ list_for_each_entry_safe(port, tmp, &s32g_pp->ports, list)
+ list_del(&port->list);
+
+ return ret;
+}
+
+static int s32g_pcie_get_resources(struct platform_device *pdev,
+ struct s32g_pcie *s32g_pp)
+{
+ struct dw_pcie *pci = &s32g_pp->pci;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ pci->dev = dev;
+ pci->ops = &s32g_pcie_ops;
+
+ s32g_pp->ctrl_base = devm_platform_ioremap_resource_byname(pdev, "ctrl");
+ if (IS_ERR(s32g_pp->ctrl_base))
+ return PTR_ERR(s32g_pp->ctrl_base);
+
+ INIT_LIST_HEAD(&s32g_pp->ports);
+
+ ret = s32g_pcie_parse_ports(dev, s32g_pp);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to parse Root Port: %d\n", ret);
+
+ platform_set_drvdata(pdev, s32g_pp);
+
+ return 0;
+}
+
+static int s32g_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct s32g_pcie *s32g_pp;
+ struct dw_pcie_rp *pp;
+ int ret;
+
+ s32g_pp = devm_kzalloc(dev, sizeof(*s32g_pp), GFP_KERNEL);
+ if (!s32g_pp)
+ return -ENOMEM;
+
+ ret = s32g_pcie_get_resources(pdev, s32g_pp);
+ if (ret)
+ return ret;
+
+ pm_runtime_no_callbacks(dev);
+ devm_pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+ goto err_pm_runtime_put;
+
+ ret = s32g_pcie_init(dev, s32g_pp);
+ if (ret)
+ goto err_pm_runtime_put;
+
+ pp = &s32g_pp->pci.pp;
+ pp->ops = &s32g_pcie_host_ops;
+ pp->use_atu_msg = true;
+
+ ret = dw_pcie_host_init(pp);
+ if (ret)
+ goto err_pcie_deinit;
+
+ return 0;
+
+err_pcie_deinit:
+ s32g_pcie_deinit(s32g_pp);
+err_pm_runtime_put:
+ pm_runtime_put(dev);
+
+ return ret;
+}
+
+static int s32g_pcie_suspend_noirq(struct device *dev)
+{
+ struct s32g_pcie *s32g_pp = dev_get_drvdata(dev);
+ struct dw_pcie *pci = &s32g_pp->pci;
+
+ return dw_pcie_suspend_noirq(pci);
+}
+
+static int s32g_pcie_resume_noirq(struct device *dev)
+{
+ struct s32g_pcie *s32g_pp = dev_get_drvdata(dev);
+ struct dw_pcie *pci = &s32g_pp->pci;
+
+ return dw_pcie_resume_noirq(pci);
+}
+
+static const struct dev_pm_ops s32g_pcie_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(s32g_pcie_suspend_noirq,
+ s32g_pcie_resume_noirq)
+};
+
+static const struct of_device_id s32g_pcie_of_match[] = {
+ { .compatible = "nxp,s32g2-pcie" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, s32g_pcie_of_match);
+
+static struct platform_driver s32g_pcie_driver = {
+ .driver = {
+ .name = "s32g-pcie",
+ .of_match_table = s32g_pcie_of_match,
+ .suppress_bind_attrs = true,
+ .pm = pm_sleep_ptr(&s32g_pcie_pm_ops),
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+ .probe = s32g_pcie_probe,
+};
+
+builtin_platform_driver(s32g_pcie_driver);
+
+MODULE_AUTHOR("Ionut Vicovan <Ionut.Vicovan@nxp.com>");
+MODULE_DESCRIPTION("NXP S32G PCIe Host controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pci/controller/dwc/pcie-qcom-common.c b/drivers/pci/controller/dwc/pcie-qcom-common.c
new file mode 100644
index 000000000000..01c5387e53bf
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-qcom-common.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/pci.h>
+
+#include "pcie-designware.h"
+#include "pcie-qcom-common.h"
+
+void qcom_pcie_common_set_equalization(struct dw_pcie *pci)
+{
+ struct device *dev = pci->dev;
+ u32 reg;
+ u16 speed;
+
+ /*
+ * GEN3_RELATED_OFF register is repurposed to apply equalization
+ * settings at various data transmission rates through registers namely
+ * GEN3_EQ_*. The RATE_SHADOW_SEL bit field of GEN3_RELATED_OFF
+ * determines the data rate for which these equalization settings are
+ * applied.
+ */
+
+ for (speed = PCIE_SPEED_8_0GT; speed <= pcie_link_speed[pci->max_link_speed]; speed++) {
+ if (speed > PCIE_SPEED_32_0GT) {
+ dev_warn(dev, "Skipped equalization settings for unsupported data rate\n");
+ break;
+ }
+
+ reg = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
+ reg &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
+ reg &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
+ reg |= FIELD_PREP(GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK,
+ speed - PCIE_SPEED_8_0GT);
+ dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, reg);
+
+ reg = dw_pcie_readl_dbi(pci, GEN3_EQ_FB_MODE_DIR_CHANGE_OFF);
+ reg &= ~(GEN3_EQ_FMDC_T_MIN_PHASE23 |
+ GEN3_EQ_FMDC_N_EVALS |
+ GEN3_EQ_FMDC_MAX_PRE_CURSOR_DELTA |
+ GEN3_EQ_FMDC_MAX_POST_CURSOR_DELTA);
+ reg |= FIELD_PREP(GEN3_EQ_FMDC_T_MIN_PHASE23, 0x1) |
+ FIELD_PREP(GEN3_EQ_FMDC_N_EVALS, 0xd) |
+ FIELD_PREP(GEN3_EQ_FMDC_MAX_PRE_CURSOR_DELTA, 0x5) |
+ FIELD_PREP(GEN3_EQ_FMDC_MAX_POST_CURSOR_DELTA, 0x5);
+ dw_pcie_writel_dbi(pci, GEN3_EQ_FB_MODE_DIR_CHANGE_OFF, reg);
+
+ reg = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
+ reg &= ~(GEN3_EQ_CONTROL_OFF_FB_MODE |
+ GEN3_EQ_CONTROL_OFF_PHASE23_EXIT_MODE |
+ GEN3_EQ_CONTROL_OFF_FOM_INC_INITIAL_EVAL |
+ GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC);
+ dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, reg);
+ }
+}
+EXPORT_SYMBOL_GPL(qcom_pcie_common_set_equalization);
+
+void qcom_pcie_common_set_16gt_lane_margining(struct dw_pcie *pci)
+{
+ u32 reg;
+
+ reg = dw_pcie_readl_dbi(pci, GEN4_LANE_MARGINING_1_OFF);
+ reg &= ~(MARGINING_MAX_VOLTAGE_OFFSET |
+ MARGINING_NUM_VOLTAGE_STEPS |
+ MARGINING_MAX_TIMING_OFFSET |
+ MARGINING_NUM_TIMING_STEPS);
+ reg |= FIELD_PREP(MARGINING_MAX_VOLTAGE_OFFSET, 0x24) |
+ FIELD_PREP(MARGINING_NUM_VOLTAGE_STEPS, 0x78) |
+ FIELD_PREP(MARGINING_MAX_TIMING_OFFSET, 0x32) |
+ FIELD_PREP(MARGINING_NUM_TIMING_STEPS, 0x10);
+ dw_pcie_writel_dbi(pci, GEN4_LANE_MARGINING_1_OFF, reg);
+
+ reg = dw_pcie_readl_dbi(pci, GEN4_LANE_MARGINING_2_OFF);
+ reg |= MARGINING_IND_ERROR_SAMPLER |
+ MARGINING_SAMPLE_REPORTING_METHOD |
+ MARGINING_IND_LEFT_RIGHT_TIMING |
+ MARGINING_VOLTAGE_SUPPORTED;
+ reg &= ~(MARGINING_IND_UP_DOWN_VOLTAGE |
+ MARGINING_MAXLANES |
+ MARGINING_SAMPLE_RATE_TIMING |
+ MARGINING_SAMPLE_RATE_VOLTAGE);
+ reg |= FIELD_PREP(MARGINING_MAXLANES, pci->num_lanes) |
+ FIELD_PREP(MARGINING_SAMPLE_RATE_TIMING, 0x3f) |
+ FIELD_PREP(MARGINING_SAMPLE_RATE_VOLTAGE, 0x3f);
+ dw_pcie_writel_dbi(pci, GEN4_LANE_MARGINING_2_OFF, reg);
+}
+EXPORT_SYMBOL_GPL(qcom_pcie_common_set_16gt_lane_margining);
diff --git a/drivers/pci/controller/dwc/pcie-qcom-common.h b/drivers/pci/controller/dwc/pcie-qcom-common.h
new file mode 100644
index 000000000000..7f5ca2fd9a72
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-qcom-common.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _PCIE_QCOM_COMMON_H
+#define _PCIE_QCOM_COMMON_H
+
+struct dw_pcie;
+
+void qcom_pcie_common_set_equalization(struct dw_pcie *pci);
+void qcom_pcie_common_set_16gt_lane_margining(struct dw_pcie *pci);
+
+#endif
diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c
new file mode 100644
index 000000000000..f1bc0ac81a92
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c
@@ -0,0 +1,955 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Qualcomm PCIe Endpoint controller driver
+ *
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ * Author: Siddartha Mohanadoss <smohanad@codeaurora.org
+ *
+ * Copyright (c) 2021, Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org
+ */
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interconnect.h>
+#include <linux/mfd/syscon.h>
+#include <linux/phy/pcie.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/module.h>
+
+#include "../../pci.h"
+#include "pcie-designware.h"
+#include "pcie-qcom-common.h"
+
+/* PARF registers */
+#define PARF_SYS_CTRL 0x00
+#define PARF_DB_CTRL 0x10
+#define PARF_PM_CTRL 0x20
+#define PARF_MHI_CLOCK_RESET_CTRL 0x174
+#define PARF_MHI_BASE_ADDR_LOWER 0x178
+#define PARF_MHI_BASE_ADDR_UPPER 0x17c
+#define PARF_DEBUG_INT_EN 0x190
+#define PARF_AXI_MSTR_RD_HALT_NO_WRITES 0x1a4
+#define PARF_AXI_MSTR_WR_ADDR_HALT 0x1a8
+#define PARF_Q2A_FLUSH 0x1ac
+#define PARF_LTSSM 0x1b0
+#define PARF_CFG_BITS 0x210
+#define PARF_INT_ALL_STATUS 0x224
+#define PARF_INT_ALL_CLEAR 0x228
+#define PARF_INT_ALL_MASK 0x22c
+#define PARF_SLV_ADDR_MSB_CTRL 0x2c0
+#define PARF_DBI_BASE_ADDR 0x350
+#define PARF_DBI_BASE_ADDR_HI 0x354
+#define PARF_SLV_ADDR_SPACE_SIZE 0x358
+#define PARF_SLV_ADDR_SPACE_SIZE_HI 0x35c
+#define PARF_NO_SNOOP_OVERRIDE 0x3d4
+#define PARF_ATU_BASE_ADDR 0x634
+#define PARF_ATU_BASE_ADDR_HI 0x638
+#define PARF_SRIS_MODE 0x644
+#define PARF_DEBUG_CNT_PM_LINKST_IN_L2 0xc04
+#define PARF_DEBUG_CNT_PM_LINKST_IN_L1 0xc0c
+#define PARF_DEBUG_CNT_PM_LINKST_IN_L0S 0xc10
+#define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1 0xc84
+#define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2 0xc88
+#define PARF_DEVICE_TYPE 0x1000
+#define PARF_BDF_TO_SID_CFG 0x2c00
+#define PARF_INT_ALL_5_MASK 0x2dcc
+#define PARF_INT_ALL_3_MASK 0x2e18
+
+/* PARF_INT_ALL_{STATUS/CLEAR/MASK} register fields */
+#define PARF_INT_ALL_LINK_DOWN BIT(1)
+#define PARF_INT_ALL_BME BIT(2)
+#define PARF_INT_ALL_PM_TURNOFF BIT(3)
+#define PARF_INT_ALL_DEBUG BIT(4)
+#define PARF_INT_ALL_LTR BIT(5)
+#define PARF_INT_ALL_MHI_Q6 BIT(6)
+#define PARF_INT_ALL_MHI_A7 BIT(7)
+#define PARF_INT_ALL_DSTATE_CHANGE BIT(8)
+#define PARF_INT_ALL_L1SUB_TIMEOUT BIT(9)
+#define PARF_INT_ALL_MMIO_WRITE BIT(10)
+#define PARF_INT_ALL_CFG_WRITE BIT(11)
+#define PARF_INT_ALL_BRIDGE_FLUSH_N BIT(12)
+#define PARF_INT_ALL_LINK_UP BIT(13)
+#define PARF_INT_ALL_AER_LEGACY BIT(14)
+#define PARF_INT_ALL_PLS_ERR BIT(15)
+#define PARF_INT_ALL_PME_LEGACY BIT(16)
+#define PARF_INT_ALL_PLS_PME BIT(17)
+#define PARF_INT_ALL_EDMA BIT(22)
+
+/* PARF_BDF_TO_SID_CFG register fields */
+#define PARF_BDF_TO_SID_BYPASS BIT(0)
+
+/* PARF_DEBUG_INT_EN register fields */
+#define PARF_DEBUG_INT_PM_DSTATE_CHANGE BIT(1)
+#define PARF_DEBUG_INT_CFG_BUS_MASTER_EN BIT(2)
+#define PARF_DEBUG_INT_RADM_PM_TURNOFF BIT(3)
+
+/* PARF_NO_SNOOP_OVERRIDE register fields */
+#define WR_NO_SNOOP_OVERRIDE_EN BIT(1)
+#define RD_NO_SNOOP_OVERRIDE_EN BIT(3)
+
+/* PARF_DEVICE_TYPE register fields */
+#define PARF_DEVICE_TYPE_EP 0x0
+
+/* PARF_PM_CTRL register fields */
+#define PARF_PM_CTRL_REQ_EXIT_L1 BIT(1)
+#define PARF_PM_CTRL_READY_ENTR_L23 BIT(2)
+#define PARF_PM_CTRL_REQ_NOT_ENTR_L1 BIT(5)
+
+/* PARF_MHI_CLOCK_RESET_CTRL fields */
+#define PARF_MSTR_AXI_CLK_EN BIT(1)
+
+/* PARF_AXI_MSTR_RD_HALT_NO_WRITES register fields */
+#define PARF_AXI_MSTR_RD_HALT_NO_WRITE_EN BIT(0)
+
+/* PARF_AXI_MSTR_WR_ADDR_HALT register fields */
+#define PARF_AXI_MSTR_WR_ADDR_HALT_EN BIT(31)
+
+/* PARF_Q2A_FLUSH register fields */
+#define PARF_Q2A_FLUSH_EN BIT(16)
+
+/* PARF_SYS_CTRL register fields */
+#define PARF_SYS_CTRL_AUX_PWR_DET BIT(4)
+#define PARF_SYS_CTRL_CORE_CLK_CGC_DIS BIT(6)
+#define PARF_SYS_CTRL_MSTR_ACLK_CGC_DIS BIT(10)
+#define PARF_SYS_CTRL_SLV_DBI_WAKE_DISABLE BIT(11)
+
+/* PARF_DB_CTRL register fields */
+#define PARF_DB_CTRL_INSR_DBNCR_BLOCK BIT(0)
+#define PARF_DB_CTRL_RMVL_DBNCR_BLOCK BIT(1)
+#define PARF_DB_CTRL_DBI_WKP_BLOCK BIT(4)
+#define PARF_DB_CTRL_SLV_WKP_BLOCK BIT(5)
+#define PARF_DB_CTRL_MST_WKP_BLOCK BIT(6)
+
+/* PARF_CFG_BITS register fields */
+#define PARF_CFG_BITS_REQ_EXIT_L1SS_MSI_LTR_EN BIT(1)
+
+/* PARF_INT_ALL_5_MASK fields */
+#define PARF_INT_ALL_5_MHI_RAM_DATA_PARITY_ERR BIT(0)
+
+/* PARF_INT_ALL_3_MASK fields */
+#define PARF_INT_ALL_3_PTM_UPDATING BIT(4)
+
+/* ELBI registers */
+#define ELBI_SYS_STTS 0x08
+#define ELBI_CS2_ENABLE 0xa4
+
+/* DBI registers */
+#define DBI_CON_STATUS 0x44
+
+/* DBI register fields */
+#define DBI_CON_STATUS_POWER_STATE_MASK GENMASK(1, 0)
+
+#define XMLH_LINK_UP 0x400
+#define CORE_RESET_TIME_US_MIN 1000
+#define CORE_RESET_TIME_US_MAX 1005
+#define WAKE_DELAY_US 2000 /* 2 ms */
+
+#define QCOM_PCIE_LINK_SPEED_TO_BW(speed) \
+ Mbps_to_icc(PCIE_SPEED2MBS_ENC(pcie_link_speed[speed]))
+
+#define to_pcie_ep(x) dev_get_drvdata((x)->dev)
+
+enum qcom_pcie_ep_link_status {
+ QCOM_PCIE_EP_LINK_DISABLED,
+ QCOM_PCIE_EP_LINK_ENABLED,
+ QCOM_PCIE_EP_LINK_UP,
+ QCOM_PCIE_EP_LINK_DOWN,
+};
+
+/**
+ * struct qcom_pcie_ep_cfg - Per SoC config struct
+ * @hdma_support: HDMA support on this SoC
+ * @override_no_snoop: Override NO_SNOOP attribute in TLP to enable cache snooping
+ * @disable_mhi_ram_parity_check: Disable MHI RAM data parity error check
+ */
+struct qcom_pcie_ep_cfg {
+ bool hdma_support;
+ bool override_no_snoop;
+ bool disable_mhi_ram_parity_check;
+};
+
+/**
+ * struct qcom_pcie_ep - Qualcomm PCIe Endpoint Controller
+ * @pci: Designware PCIe controller struct
+ * @parf: Qualcomm PCIe specific PARF register base
+ * @mmio: MMIO register base
+ * @perst_map: PERST regmap
+ * @mmio_res: MMIO region resource
+ * @core_reset: PCIe Endpoint core reset
+ * @reset: PERST# GPIO
+ * @wake: WAKE# GPIO
+ * @phy: PHY controller block
+ * @debugfs: PCIe Endpoint Debugfs directory
+ * @icc_mem: Handle to an interconnect path between PCIe and MEM
+ * @clks: PCIe clocks
+ * @num_clks: PCIe clocks count
+ * @perst_en: Flag for PERST enable
+ * @perst_sep_en: Flag for PERST separation enable
+ * @cfg: PCIe EP config struct
+ * @link_status: PCIe Link status
+ * @global_irq: Qualcomm PCIe specific Global IRQ
+ * @perst_irq: PERST# IRQ
+ */
+struct qcom_pcie_ep {
+ struct dw_pcie pci;
+
+ void __iomem *parf;
+ void __iomem *mmio;
+ struct regmap *perst_map;
+ struct resource *mmio_res;
+
+ struct reset_control *core_reset;
+ struct gpio_desc *reset;
+ struct gpio_desc *wake;
+ struct phy *phy;
+ struct dentry *debugfs;
+
+ struct icc_path *icc_mem;
+
+ struct clk_bulk_data *clks;
+ int num_clks;
+
+ u32 perst_en;
+ u32 perst_sep_en;
+
+ const struct qcom_pcie_ep_cfg *cfg;
+ enum qcom_pcie_ep_link_status link_status;
+ int global_irq;
+ int perst_irq;
+};
+
+static int qcom_pcie_ep_core_reset(struct qcom_pcie_ep *pcie_ep)
+{
+ struct dw_pcie *pci = &pcie_ep->pci;
+ struct device *dev = pci->dev;
+ int ret;
+
+ ret = reset_control_assert(pcie_ep->core_reset);
+ if (ret) {
+ dev_err(dev, "Cannot assert core reset\n");
+ return ret;
+ }
+
+ usleep_range(CORE_RESET_TIME_US_MIN, CORE_RESET_TIME_US_MAX);
+
+ ret = reset_control_deassert(pcie_ep->core_reset);
+ if (ret) {
+ dev_err(dev, "Cannot de-assert core reset\n");
+ return ret;
+ }
+
+ usleep_range(CORE_RESET_TIME_US_MIN, CORE_RESET_TIME_US_MAX);
+
+ return 0;
+}
+
+/*
+ * Delatch PERST_EN and PERST_SEPARATION_ENABLE with TCSR to avoid
+ * device reset during host reboot and hibernation. The driver is
+ * expected to handle this situation.
+ */
+static void qcom_pcie_ep_configure_tcsr(struct qcom_pcie_ep *pcie_ep)
+{
+ if (pcie_ep->perst_map) {
+ regmap_write(pcie_ep->perst_map, pcie_ep->perst_en, 0);
+ regmap_write(pcie_ep->perst_map, pcie_ep->perst_sep_en, 0);
+ }
+}
+
+static bool qcom_pcie_dw_link_up(struct dw_pcie *pci)
+{
+ u32 reg;
+
+ reg = readl_relaxed(pci->elbi_base + ELBI_SYS_STTS);
+
+ return reg & XMLH_LINK_UP;
+}
+
+static int qcom_pcie_dw_start_link(struct dw_pcie *pci)
+{
+ struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
+
+ enable_irq(pcie_ep->perst_irq);
+
+ return 0;
+}
+
+static void qcom_pcie_dw_stop_link(struct dw_pcie *pci)
+{
+ struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
+
+ disable_irq(pcie_ep->perst_irq);
+}
+
+static void qcom_pcie_dw_write_dbi2(struct dw_pcie *pci, void __iomem *base,
+ u32 reg, size_t size, u32 val)
+{
+ int ret;
+
+ writel(1, pci->elbi_base + ELBI_CS2_ENABLE);
+
+ ret = dw_pcie_write(pci->dbi_base2 + reg, size, val);
+ if (ret)
+ dev_err(pci->dev, "Failed to write DBI2 register (0x%x): %d\n", reg, ret);
+
+ writel(0, pci->elbi_base + ELBI_CS2_ENABLE);
+}
+
+static void qcom_pcie_ep_icc_update(struct qcom_pcie_ep *pcie_ep)
+{
+ struct dw_pcie *pci = &pcie_ep->pci;
+ u32 offset, status;
+ int speed, width;
+ int ret;
+
+ if (!pcie_ep->icc_mem)
+ return;
+
+ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ status = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA);
+
+ speed = FIELD_GET(PCI_EXP_LNKSTA_CLS, status);
+ width = FIELD_GET(PCI_EXP_LNKSTA_NLW, status);
+
+ ret = icc_set_bw(pcie_ep->icc_mem, 0, width * QCOM_PCIE_LINK_SPEED_TO_BW(speed));
+ if (ret)
+ dev_err(pci->dev, "failed to set interconnect bandwidth: %d\n",
+ ret);
+}
+
+static int qcom_pcie_enable_resources(struct qcom_pcie_ep *pcie_ep)
+{
+ struct dw_pcie *pci = &pcie_ep->pci;
+ int ret;
+
+ ret = clk_bulk_prepare_enable(pcie_ep->num_clks, pcie_ep->clks);
+ if (ret)
+ return ret;
+
+ ret = qcom_pcie_ep_core_reset(pcie_ep);
+ if (ret)
+ goto err_disable_clk;
+
+ ret = phy_init(pcie_ep->phy);
+ if (ret)
+ goto err_disable_clk;
+
+ ret = phy_set_mode_ext(pcie_ep->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_EP);
+ if (ret)
+ goto err_phy_exit;
+
+ ret = phy_power_on(pcie_ep->phy);
+ if (ret)
+ goto err_phy_exit;
+
+ /*
+ * Some Qualcomm platforms require interconnect bandwidth constraints
+ * to be set before enabling interconnect clocks.
+ *
+ * Set an initial peak bandwidth corresponding to single-lane Gen 1
+ * for the pcie-mem path.
+ */
+ ret = icc_set_bw(pcie_ep->icc_mem, 0, QCOM_PCIE_LINK_SPEED_TO_BW(1));
+ if (ret) {
+ dev_err(pci->dev, "failed to set interconnect bandwidth: %d\n",
+ ret);
+ goto err_phy_off;
+ }
+
+ return 0;
+
+err_phy_off:
+ phy_power_off(pcie_ep->phy);
+err_phy_exit:
+ phy_exit(pcie_ep->phy);
+err_disable_clk:
+ clk_bulk_disable_unprepare(pcie_ep->num_clks, pcie_ep->clks);
+
+ return ret;
+}
+
+static void qcom_pcie_disable_resources(struct qcom_pcie_ep *pcie_ep)
+{
+ icc_set_bw(pcie_ep->icc_mem, 0, 0);
+ phy_power_off(pcie_ep->phy);
+ phy_exit(pcie_ep->phy);
+ clk_bulk_disable_unprepare(pcie_ep->num_clks, pcie_ep->clks);
+}
+
+static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
+{
+ struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
+ struct device *dev = pci->dev;
+ u32 val, offset;
+ int ret;
+
+ ret = qcom_pcie_enable_resources(pcie_ep);
+ if (ret) {
+ dev_err(dev, "Failed to enable resources: %d\n", ret);
+ return ret;
+ }
+
+ /* Perform cleanup that requires refclk */
+ pci_epc_deinit_notify(pci->ep.epc);
+ dw_pcie_ep_cleanup(&pci->ep);
+
+ /* Assert WAKE# to RC to indicate device is ready */
+ gpiod_set_value_cansleep(pcie_ep->wake, 1);
+ usleep_range(WAKE_DELAY_US, WAKE_DELAY_US + 500);
+ gpiod_set_value_cansleep(pcie_ep->wake, 0);
+
+ qcom_pcie_ep_configure_tcsr(pcie_ep);
+
+ /* Disable BDF to SID mapping */
+ val = readl_relaxed(pcie_ep->parf + PARF_BDF_TO_SID_CFG);
+ val |= PARF_BDF_TO_SID_BYPASS;
+ writel_relaxed(val, pcie_ep->parf + PARF_BDF_TO_SID_CFG);
+
+ /* Enable debug IRQ */
+ val = readl_relaxed(pcie_ep->parf + PARF_DEBUG_INT_EN);
+ val |= PARF_DEBUG_INT_RADM_PM_TURNOFF |
+ PARF_DEBUG_INT_CFG_BUS_MASTER_EN |
+ PARF_DEBUG_INT_PM_DSTATE_CHANGE;
+ writel_relaxed(val, pcie_ep->parf + PARF_DEBUG_INT_EN);
+
+ /* Configure PCIe to endpoint mode */
+ writel_relaxed(PARF_DEVICE_TYPE_EP, pcie_ep->parf + PARF_DEVICE_TYPE);
+
+ /* Allow entering L1 state */
+ val = readl_relaxed(pcie_ep->parf + PARF_PM_CTRL);
+ val &= ~PARF_PM_CTRL_REQ_NOT_ENTR_L1;
+ writel_relaxed(val, pcie_ep->parf + PARF_PM_CTRL);
+
+ /* Read halts write */
+ val = readl_relaxed(pcie_ep->parf + PARF_AXI_MSTR_RD_HALT_NO_WRITES);
+ val &= ~PARF_AXI_MSTR_RD_HALT_NO_WRITE_EN;
+ writel_relaxed(val, pcie_ep->parf + PARF_AXI_MSTR_RD_HALT_NO_WRITES);
+
+ /* Write after write halt */
+ val = readl_relaxed(pcie_ep->parf + PARF_AXI_MSTR_WR_ADDR_HALT);
+ val |= PARF_AXI_MSTR_WR_ADDR_HALT_EN;
+ writel_relaxed(val, pcie_ep->parf + PARF_AXI_MSTR_WR_ADDR_HALT);
+
+ /* Q2A flush disable */
+ val = readl_relaxed(pcie_ep->parf + PARF_Q2A_FLUSH);
+ val &= ~PARF_Q2A_FLUSH_EN;
+ writel_relaxed(val, pcie_ep->parf + PARF_Q2A_FLUSH);
+
+ /*
+ * Disable Master AXI clock during idle. Do not allow DBI access
+ * to take the core out of L1. Disable core clock gating that
+ * gates PIPE clock from propagating to core clock. Report to the
+ * host that Vaux is present.
+ */
+ val = readl_relaxed(pcie_ep->parf + PARF_SYS_CTRL);
+ val &= ~PARF_SYS_CTRL_MSTR_ACLK_CGC_DIS;
+ val |= PARF_SYS_CTRL_SLV_DBI_WAKE_DISABLE |
+ PARF_SYS_CTRL_CORE_CLK_CGC_DIS |
+ PARF_SYS_CTRL_AUX_PWR_DET;
+ writel_relaxed(val, pcie_ep->parf + PARF_SYS_CTRL);
+
+ /* Disable the debouncers */
+ val = readl_relaxed(pcie_ep->parf + PARF_DB_CTRL);
+ val |= PARF_DB_CTRL_INSR_DBNCR_BLOCK | PARF_DB_CTRL_RMVL_DBNCR_BLOCK |
+ PARF_DB_CTRL_DBI_WKP_BLOCK | PARF_DB_CTRL_SLV_WKP_BLOCK |
+ PARF_DB_CTRL_MST_WKP_BLOCK;
+ writel_relaxed(val, pcie_ep->parf + PARF_DB_CTRL);
+
+ /* Request to exit from L1SS for MSI and LTR MSG */
+ val = readl_relaxed(pcie_ep->parf + PARF_CFG_BITS);
+ val |= PARF_CFG_BITS_REQ_EXIT_L1SS_MSI_LTR_EN;
+ writel_relaxed(val, pcie_ep->parf + PARF_CFG_BITS);
+
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ /* Set the L0s Exit Latency to 2us-4us = 0x6 */
+ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
+ val &= ~PCI_EXP_LNKCAP_L0SEL;
+ val |= FIELD_PREP(PCI_EXP_LNKCAP_L0SEL, 0x6);
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, val);
+
+ /* Set the L1 Exit Latency to be 32us-64 us = 0x6 */
+ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
+ val &= ~PCI_EXP_LNKCAP_L1EL;
+ val |= FIELD_PREP(PCI_EXP_LNKCAP_L1EL, 0x6);
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, val);
+
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ writel_relaxed(0, pcie_ep->parf + PARF_INT_ALL_MASK);
+ val = PARF_INT_ALL_LINK_DOWN | PARF_INT_ALL_BME |
+ PARF_INT_ALL_PM_TURNOFF | PARF_INT_ALL_DSTATE_CHANGE |
+ PARF_INT_ALL_LINK_UP | PARF_INT_ALL_EDMA;
+ writel_relaxed(val, pcie_ep->parf + PARF_INT_ALL_MASK);
+
+ if (pcie_ep->cfg && pcie_ep->cfg->disable_mhi_ram_parity_check) {
+ val = readl_relaxed(pcie_ep->parf + PARF_INT_ALL_5_MASK);
+ val &= ~PARF_INT_ALL_5_MHI_RAM_DATA_PARITY_ERR;
+ writel_relaxed(val, pcie_ep->parf + PARF_INT_ALL_5_MASK);
+ }
+
+ val = readl_relaxed(pcie_ep->parf + PARF_INT_ALL_3_MASK);
+ val &= ~PARF_INT_ALL_3_PTM_UPDATING;
+ writel_relaxed(val, pcie_ep->parf + PARF_INT_ALL_3_MASK);
+
+ ret = dw_pcie_ep_init_registers(&pcie_ep->pci.ep);
+ if (ret) {
+ dev_err(dev, "Failed to complete initialization: %d\n", ret);
+ goto err_disable_resources;
+ }
+
+ qcom_pcie_common_set_equalization(pci);
+
+ if (pcie_link_speed[pci->max_link_speed] == PCIE_SPEED_16_0GT)
+ qcom_pcie_common_set_16gt_lane_margining(pci);
+
+ /*
+ * The physical address of the MMIO region which is exposed as the BAR
+ * should be written to MHI BASE registers.
+ */
+ writel_relaxed(pcie_ep->mmio_res->start,
+ pcie_ep->parf + PARF_MHI_BASE_ADDR_LOWER);
+ writel_relaxed(0, pcie_ep->parf + PARF_MHI_BASE_ADDR_UPPER);
+
+ /* Gate Master AXI clock to MHI bus during L1SS */
+ val = readl_relaxed(pcie_ep->parf + PARF_MHI_CLOCK_RESET_CTRL);
+ val &= ~PARF_MSTR_AXI_CLK_EN;
+ writel_relaxed(val, pcie_ep->parf + PARF_MHI_CLOCK_RESET_CTRL);
+
+ pci_epc_init_notify(pcie_ep->pci.ep.epc);
+
+ /* Enable LTSSM */
+ val = readl_relaxed(pcie_ep->parf + PARF_LTSSM);
+ val |= BIT(8);
+ writel_relaxed(val, pcie_ep->parf + PARF_LTSSM);
+
+ if (pcie_ep->cfg && pcie_ep->cfg->override_no_snoop)
+ writel_relaxed(WR_NO_SNOOP_OVERRIDE_EN | RD_NO_SNOOP_OVERRIDE_EN,
+ pcie_ep->parf + PARF_NO_SNOOP_OVERRIDE);
+
+ return 0;
+
+err_disable_resources:
+ qcom_pcie_disable_resources(pcie_ep);
+
+ return ret;
+}
+
+static void qcom_pcie_perst_assert(struct dw_pcie *pci)
+{
+ struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
+
+ qcom_pcie_disable_resources(pcie_ep);
+ pcie_ep->link_status = QCOM_PCIE_EP_LINK_DISABLED;
+}
+
+/* Common DWC controller ops */
+static const struct dw_pcie_ops pci_ops = {
+ .link_up = qcom_pcie_dw_link_up,
+ .start_link = qcom_pcie_dw_start_link,
+ .stop_link = qcom_pcie_dw_stop_link,
+ .write_dbi2 = qcom_pcie_dw_write_dbi2,
+};
+
+static int qcom_pcie_ep_get_io_resources(struct platform_device *pdev,
+ struct qcom_pcie_ep *pcie_ep)
+{
+ struct device *dev = &pdev->dev;
+ struct dw_pcie *pci = &pcie_ep->pci;
+ struct device_node *syscon;
+ struct resource *res;
+ int ret;
+
+ pcie_ep->parf = devm_platform_ioremap_resource_byname(pdev, "parf");
+ if (IS_ERR(pcie_ep->parf))
+ return PTR_ERR(pcie_ep->parf);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+ pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pci->dbi_base))
+ return PTR_ERR(pci->dbi_base);
+ pci->dbi_base2 = pci->dbi_base;
+
+ pcie_ep->mmio_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "mmio");
+ if (!pcie_ep->mmio_res) {
+ dev_err(dev, "Failed to get mmio resource\n");
+ return -EINVAL;
+ }
+
+ pcie_ep->mmio = devm_pci_remap_cfg_resource(dev, pcie_ep->mmio_res);
+ if (IS_ERR(pcie_ep->mmio))
+ return PTR_ERR(pcie_ep->mmio);
+
+ syscon = of_parse_phandle(dev->of_node, "qcom,perst-regs", 0);
+ if (!syscon) {
+ dev_dbg(dev, "PERST separation not available\n");
+ return 0;
+ }
+
+ pcie_ep->perst_map = syscon_node_to_regmap(syscon);
+ of_node_put(syscon);
+ if (IS_ERR(pcie_ep->perst_map))
+ return PTR_ERR(pcie_ep->perst_map);
+
+ ret = of_property_read_u32_index(dev->of_node, "qcom,perst-regs",
+ 1, &pcie_ep->perst_en);
+ if (ret < 0) {
+ dev_err(dev, "No Perst Enable offset in syscon\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32_index(dev->of_node, "qcom,perst-regs",
+ 2, &pcie_ep->perst_sep_en);
+ if (ret < 0) {
+ dev_err(dev, "No Perst Separation Enable offset in syscon\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qcom_pcie_ep_get_resources(struct platform_device *pdev,
+ struct qcom_pcie_ep *pcie_ep)
+{
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ ret = qcom_pcie_ep_get_io_resources(pdev, pcie_ep);
+ if (ret) {
+ dev_err(dev, "Failed to get io resources %d\n", ret);
+ return ret;
+ }
+
+ pcie_ep->num_clks = devm_clk_bulk_get_all(dev, &pcie_ep->clks);
+ if (pcie_ep->num_clks < 0) {
+ dev_err(dev, "Failed to get clocks\n");
+ return pcie_ep->num_clks;
+ }
+
+ pcie_ep->core_reset = devm_reset_control_get_exclusive(dev, "core");
+ if (IS_ERR(pcie_ep->core_reset))
+ return PTR_ERR(pcie_ep->core_reset);
+
+ pcie_ep->reset = devm_gpiod_get(dev, "reset", GPIOD_IN);
+ if (IS_ERR(pcie_ep->reset))
+ return PTR_ERR(pcie_ep->reset);
+
+ pcie_ep->wake = devm_gpiod_get_optional(dev, "wake", GPIOD_OUT_LOW);
+ if (IS_ERR(pcie_ep->wake))
+ return PTR_ERR(pcie_ep->wake);
+
+ pcie_ep->phy = devm_phy_optional_get(dev, "pciephy");
+ if (IS_ERR(pcie_ep->phy))
+ ret = PTR_ERR(pcie_ep->phy);
+
+ pcie_ep->icc_mem = devm_of_icc_get(dev, "pcie-mem");
+ if (IS_ERR(pcie_ep->icc_mem))
+ ret = PTR_ERR(pcie_ep->icc_mem);
+
+ return ret;
+}
+
+/* TODO: Notify clients about PCIe state change */
+static irqreturn_t qcom_pcie_ep_global_irq_thread(int irq, void *data)
+{
+ struct qcom_pcie_ep *pcie_ep = data;
+ struct dw_pcie *pci = &pcie_ep->pci;
+ struct device *dev = pci->dev;
+ u32 status = readl_relaxed(pcie_ep->parf + PARF_INT_ALL_STATUS);
+ u32 dstate, val;
+
+ writel_relaxed(status, pcie_ep->parf + PARF_INT_ALL_CLEAR);
+
+ if (FIELD_GET(PARF_INT_ALL_LINK_DOWN, status)) {
+ dev_dbg(dev, "Received Linkdown event\n");
+ pcie_ep->link_status = QCOM_PCIE_EP_LINK_DOWN;
+ dw_pcie_ep_linkdown(&pci->ep);
+ } else if (FIELD_GET(PARF_INT_ALL_BME, status)) {
+ dev_dbg(dev, "Received Bus Master Enable event\n");
+ pcie_ep->link_status = QCOM_PCIE_EP_LINK_ENABLED;
+ qcom_pcie_ep_icc_update(pcie_ep);
+ pci_epc_bus_master_enable_notify(pci->ep.epc);
+ } else if (FIELD_GET(PARF_INT_ALL_PM_TURNOFF, status)) {
+ dev_dbg(dev, "Received PM Turn-off event! Entering L23\n");
+ val = readl_relaxed(pcie_ep->parf + PARF_PM_CTRL);
+ val |= PARF_PM_CTRL_READY_ENTR_L23;
+ writel_relaxed(val, pcie_ep->parf + PARF_PM_CTRL);
+ } else if (FIELD_GET(PARF_INT_ALL_DSTATE_CHANGE, status)) {
+ dstate = dw_pcie_readl_dbi(pci, DBI_CON_STATUS) &
+ DBI_CON_STATUS_POWER_STATE_MASK;
+ dev_dbg(dev, "Received D%d state event\n", dstate);
+ if (dstate == 3) {
+ val = readl_relaxed(pcie_ep->parf + PARF_PM_CTRL);
+ val |= PARF_PM_CTRL_REQ_EXIT_L1;
+ writel_relaxed(val, pcie_ep->parf + PARF_PM_CTRL);
+ }
+ } else if (FIELD_GET(PARF_INT_ALL_LINK_UP, status)) {
+ dev_dbg(dev, "Received Linkup event. Enumeration complete!\n");
+ dw_pcie_ep_linkup(&pci->ep);
+ pcie_ep->link_status = QCOM_PCIE_EP_LINK_UP;
+ } else {
+ dev_WARN_ONCE(dev, 1, "Received unknown event. INT_STATUS: 0x%08x\n",
+ status);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qcom_pcie_ep_perst_irq_thread(int irq, void *data)
+{
+ struct qcom_pcie_ep *pcie_ep = data;
+ struct dw_pcie *pci = &pcie_ep->pci;
+ struct device *dev = pci->dev;
+ u32 perst;
+
+ perst = gpiod_get_value(pcie_ep->reset);
+ if (perst) {
+ dev_dbg(dev, "PERST asserted by host. Shutting down the PCIe link!\n");
+ qcom_pcie_perst_assert(pci);
+ } else {
+ dev_dbg(dev, "PERST de-asserted by host. Starting link training!\n");
+ qcom_pcie_perst_deassert(pci);
+ }
+
+ irq_set_irq_type(gpiod_to_irq(pcie_ep->reset),
+ (perst ? IRQF_TRIGGER_HIGH : IRQF_TRIGGER_LOW));
+
+ return IRQ_HANDLED;
+}
+
+static int qcom_pcie_ep_enable_irq_resources(struct platform_device *pdev,
+ struct qcom_pcie_ep *pcie_ep)
+{
+ struct device *dev = pcie_ep->pci.dev;
+ char *name;
+ int ret;
+
+ name = devm_kasprintf(dev, GFP_KERNEL, "qcom_pcie_ep_global_irq%d",
+ pcie_ep->pci.ep.epc->domain_nr);
+ if (!name)
+ return -ENOMEM;
+
+ pcie_ep->global_irq = platform_get_irq_byname(pdev, "global");
+ if (pcie_ep->global_irq < 0)
+ return pcie_ep->global_irq;
+
+ ret = devm_request_threaded_irq(&pdev->dev, pcie_ep->global_irq, NULL,
+ qcom_pcie_ep_global_irq_thread,
+ IRQF_ONESHOT,
+ name, pcie_ep);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request Global IRQ\n");
+ return ret;
+ }
+
+ name = devm_kasprintf(dev, GFP_KERNEL, "qcom_pcie_ep_perst_irq%d",
+ pcie_ep->pci.ep.epc->domain_nr);
+ if (!name)
+ return -ENOMEM;
+
+ pcie_ep->perst_irq = gpiod_to_irq(pcie_ep->reset);
+ irq_set_status_flags(pcie_ep->perst_irq, IRQ_NOAUTOEN);
+ ret = devm_request_threaded_irq(&pdev->dev, pcie_ep->perst_irq, NULL,
+ qcom_pcie_ep_perst_irq_thread,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ name, pcie_ep);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request PERST IRQ\n");
+ disable_irq(pcie_ep->global_irq);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qcom_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ unsigned int type, u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ switch (type) {
+ case PCI_IRQ_INTX:
+ return dw_pcie_ep_raise_intx_irq(ep, func_no);
+ case PCI_IRQ_MSI:
+ return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+ default:
+ dev_err(pci->dev, "Unknown IRQ type\n");
+ return -EINVAL;
+ }
+}
+
+static int qcom_pcie_ep_link_transition_count(struct seq_file *s, void *data)
+{
+ struct qcom_pcie_ep *pcie_ep = (struct qcom_pcie_ep *)
+ dev_get_drvdata(s->private);
+
+ seq_printf(s, "L0s transition count: %u\n",
+ readl_relaxed(pcie_ep->mmio + PARF_DEBUG_CNT_PM_LINKST_IN_L0S));
+
+ seq_printf(s, "L1 transition count: %u\n",
+ readl_relaxed(pcie_ep->mmio + PARF_DEBUG_CNT_PM_LINKST_IN_L1));
+
+ seq_printf(s, "L1.1 transition count: %u\n",
+ readl_relaxed(pcie_ep->mmio + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1));
+
+ seq_printf(s, "L1.2 transition count: %u\n",
+ readl_relaxed(pcie_ep->mmio + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2));
+
+ seq_printf(s, "L2 transition count: %u\n",
+ readl_relaxed(pcie_ep->mmio + PARF_DEBUG_CNT_PM_LINKST_IN_L2));
+
+ return 0;
+}
+
+static void qcom_pcie_ep_init_debugfs(struct qcom_pcie_ep *pcie_ep)
+{
+ struct dw_pcie *pci = &pcie_ep->pci;
+
+ debugfs_create_devm_seqfile(pci->dev, "link_transition_count", pcie_ep->debugfs,
+ qcom_pcie_ep_link_transition_count);
+}
+
+static const struct pci_epc_features qcom_pcie_epc_features = {
+ .linkup_notifier = true,
+ .msi_capable = true,
+ .align = SZ_4K,
+ .bar[BAR_0] = { .only_64bit = true, },
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_2] = { .only_64bit = true, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
+};
+
+static const struct pci_epc_features *
+qcom_pcie_epc_get_features(struct dw_pcie_ep *pci_ep)
+{
+ return &qcom_pcie_epc_features;
+}
+
+static void qcom_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar;
+
+ for (bar = BAR_0; bar <= BAR_5; bar++)
+ dw_pcie_ep_reset_bar(pci, bar);
+}
+
+static const struct dw_pcie_ep_ops pci_ep_ops = {
+ .init = qcom_pcie_ep_init,
+ .raise_irq = qcom_pcie_ep_raise_irq,
+ .get_features = qcom_pcie_epc_get_features,
+};
+
+static int qcom_pcie_ep_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct qcom_pcie_ep *pcie_ep;
+ char *name;
+ int ret;
+
+ pcie_ep = devm_kzalloc(dev, sizeof(*pcie_ep), GFP_KERNEL);
+ if (!pcie_ep)
+ return -ENOMEM;
+
+ pcie_ep->pci.dev = dev;
+ pcie_ep->pci.ops = &pci_ops;
+ pcie_ep->pci.ep.ops = &pci_ep_ops;
+
+ pcie_ep->cfg = of_device_get_match_data(dev);
+ if (pcie_ep->cfg && pcie_ep->cfg->hdma_support) {
+ pcie_ep->pci.edma.ll_wr_cnt = 8;
+ pcie_ep->pci.edma.ll_rd_cnt = 8;
+ pcie_ep->pci.edma.mf = EDMA_MF_HDMA_NATIVE;
+ }
+
+ platform_set_drvdata(pdev, pcie_ep);
+
+ ret = qcom_pcie_ep_get_resources(pdev, pcie_ep);
+ if (ret)
+ return ret;
+
+ ret = dw_pcie_ep_init(&pcie_ep->pci.ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize endpoint: %d\n", ret);
+ return ret;
+ }
+
+ ret = qcom_pcie_ep_enable_irq_resources(pdev, pcie_ep);
+ if (ret)
+ goto err_ep_deinit;
+
+ name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
+ if (!name) {
+ ret = -ENOMEM;
+ goto err_disable_irqs;
+ }
+
+ pcie_ep->debugfs = debugfs_create_dir(name, NULL);
+ qcom_pcie_ep_init_debugfs(pcie_ep);
+
+ return 0;
+
+err_disable_irqs:
+ disable_irq(pcie_ep->global_irq);
+ disable_irq(pcie_ep->perst_irq);
+
+err_ep_deinit:
+ dw_pcie_ep_deinit(&pcie_ep->pci.ep);
+
+ return ret;
+}
+
+static void qcom_pcie_ep_remove(struct platform_device *pdev)
+{
+ struct qcom_pcie_ep *pcie_ep = platform_get_drvdata(pdev);
+
+ disable_irq(pcie_ep->global_irq);
+ disable_irq(pcie_ep->perst_irq);
+
+ debugfs_remove_recursive(pcie_ep->debugfs);
+
+ if (pcie_ep->link_status == QCOM_PCIE_EP_LINK_DISABLED)
+ return;
+
+ qcom_pcie_disable_resources(pcie_ep);
+}
+
+static const struct qcom_pcie_ep_cfg cfg_1_34_0 = {
+ .hdma_support = true,
+ .override_no_snoop = true,
+ .disable_mhi_ram_parity_check = true,
+};
+
+static const struct of_device_id qcom_pcie_ep_match[] = {
+ { .compatible = "qcom,sa8775p-pcie-ep", .data = &cfg_1_34_0},
+ { .compatible = "qcom,sdx55-pcie-ep", },
+ { .compatible = "qcom,sm8450-pcie-ep", },
+ { .compatible = "qcom,sar2130p-pcie-ep", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, qcom_pcie_ep_match);
+
+static struct platform_driver qcom_pcie_ep_driver = {
+ .probe = qcom_pcie_ep_probe,
+ .remove = qcom_pcie_ep_remove,
+ .driver = {
+ .name = "qcom-pcie-ep",
+ .of_match_table = qcom_pcie_ep_match,
+ },
+};
+builtin_platform_driver(qcom_pcie_ep_driver);
+
+MODULE_AUTHOR("Siddartha Mohanadoss <smohanad@codeaurora.org>");
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
+MODULE_DESCRIPTION("Qualcomm PCIe Endpoint controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index d185ea5fe996..7b92e7a1c0d9 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -9,27 +9,84 @@
*/
#include <linux/clk.h>
+#include <linux/crc8.h>
+#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
+#include <linux/interconnect.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
+#include <linux/limits.h>
#include <linux/init.h>
-#include <linux/of_device.h>
-#include <linux/of_gpio.h>
+#include <linux/of.h>
+#include <linux/of_pci.h>
#include <linux/pci.h>
+#include <linux/pci-ecam.h>
+#include <linux/pm_opp.h>
#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
+#include <linux/phy/pcie.h>
#include <linux/phy/phy.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/types.h>
+#include <linux/units.h>
+#include "../../pci.h"
+#include "../pci-host-common.h"
#include "pcie-designware.h"
-
-#define PCIE20_PARF_SYS_CTRL 0x00
+#include "pcie-qcom-common.h"
+
+/* PARF registers */
+#define PARF_SYS_CTRL 0x00
+#define PARF_PM_CTRL 0x20
+#define PARF_PCS_DEEMPH 0x34
+#define PARF_PCS_SWING 0x38
+#define PARF_PHY_CTRL 0x40
+#define PARF_PHY_REFCLK 0x4c
+#define PARF_CONFIG_BITS 0x50
+#define PARF_DBI_BASE_ADDR 0x168
+#define PARF_SLV_ADDR_SPACE_SIZE 0x16c
+#define PARF_MHI_CLOCK_RESET_CTRL 0x174
+#define PARF_AXI_MSTR_WR_ADDR_HALT 0x178
+#define PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1a8
+#define PARF_Q2A_FLUSH 0x1ac
+#define PARF_LTSSM 0x1b0
+#define PARF_INT_ALL_STATUS 0x224
+#define PARF_INT_ALL_CLEAR 0x228
+#define PARF_INT_ALL_MASK 0x22c
+#define PARF_SID_OFFSET 0x234
+#define PARF_BDF_TRANSLATE_CFG 0x24c
+#define PARF_DBI_BASE_ADDR_V2 0x350
+#define PARF_DBI_BASE_ADDR_V2_HI 0x354
+#define PARF_SLV_ADDR_SPACE_SIZE_V2 0x358
+#define PARF_SLV_ADDR_SPACE_SIZE_V2_HI 0x35c
+#define PARF_NO_SNOOP_OVERRIDE 0x3d4
+#define PARF_ATU_BASE_ADDR 0x634
+#define PARF_ATU_BASE_ADDR_HI 0x638
+#define PARF_DEVICE_TYPE 0x1000
+#define PARF_BDF_TO_SID_TABLE_N 0x2000
+#define PARF_BDF_TO_SID_CFG 0x2c00
+
+/* ELBI registers */
+#define ELBI_SYS_CTRL 0x04
+
+/* DBI registers */
+#define AXI_MSTR_RESP_COMP_CTRL0 0x818
+#define AXI_MSTR_RESP_COMP_CTRL1 0x81c
+
+/* MHI registers */
+#define PARF_DEBUG_CNT_PM_LINKST_IN_L2 0xc04
+#define PARF_DEBUG_CNT_PM_LINKST_IN_L1 0xc0c
+#define PARF_DEBUG_CNT_PM_LINKST_IN_L0S 0xc10
+#define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1 0xc84
+#define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2 0xc88
+
+/* PARF_SYS_CTRL register fields */
+#define MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN BIT(29)
#define MST_WAKEUP_EN BIT(13)
#define SLV_WAKEUP_EN BIT(12)
#define MSTR_ACLK_CGC_DIS BIT(10)
@@ -39,104 +96,139 @@
#define L23_CLK_RMV_DIS BIT(2)
#define L1_CLK_RMV_DIS BIT(1)
-#define PCIE20_COMMAND_STATUS 0x04
-#define CMD_BME_VAL 0x4
-#define PCIE20_DEVICE_CONTROL2_STATUS2 0x98
-#define PCIE_CAP_CPL_TIMEOUT_DISABLE 0x10
-
-#define PCIE20_PARF_PHY_CTRL 0x40
-#define PCIE20_PARF_PHY_REFCLK 0x4C
-#define PCIE20_PARF_DBI_BASE_ADDR 0x168
-#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C
-#define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174
-#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x178
-#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1A8
-#define PCIE20_PARF_LTSSM 0x1B0
-#define PCIE20_PARF_SID_OFFSET 0x234
-#define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C
-
-#define PCIE20_ELBI_SYS_CTRL 0x04
-#define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0)
-
-#define PCIE20_AXI_MSTR_RESP_COMP_CTRL0 0x818
+/* PARF_PM_CTRL register fields */
+#define REQ_NOT_ENTR_L1 BIT(5)
+
+/* PARF_PCS_DEEMPH register fields */
+#define PCS_DEEMPH_TX_DEEMPH_GEN1(x) FIELD_PREP(GENMASK(21, 16), x)
+#define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x) FIELD_PREP(GENMASK(13, 8), x)
+#define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x) FIELD_PREP(GENMASK(5, 0), x)
+
+/* PARF_PCS_SWING register fields */
+#define PCS_SWING_TX_SWING_FULL(x) FIELD_PREP(GENMASK(14, 8), x)
+#define PCS_SWING_TX_SWING_LOW(x) FIELD_PREP(GENMASK(6, 0), x)
+
+/* PARF_PHY_CTRL register fields */
+#define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK GENMASK(20, 16)
+#define PHY_CTRL_PHY_TX0_TERM_OFFSET(x) FIELD_PREP(PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK, x)
+#define PHY_TEST_PWR_DOWN BIT(0)
+
+/* PARF_PHY_REFCLK register fields */
+#define PHY_REFCLK_SSP_EN BIT(16)
+#define PHY_REFCLK_USE_PAD BIT(12)
+
+/* PARF_CONFIG_BITS register fields */
+#define PHY_RX0_EQ(x) FIELD_PREP(GENMASK(26, 24), x)
+
+/* PARF_SLV_ADDR_SPACE_SIZE register value */
+#define SLV_ADDR_SPACE_SZ 0x80000000
+
+/* PARF_MHI_CLOCK_RESET_CTRL register fields */
+#define AHB_CLK_EN BIT(0)
+#define MSTR_AXI_CLK_EN BIT(1)
+#define BYPASS BIT(4)
+
+/* PARF_AXI_MSTR_WR_ADDR_HALT register fields */
+#define EN BIT(31)
+
+/* PARF_LTSSM register fields */
+#define LTSSM_EN BIT(8)
+
+/* PARF_INT_ALL_{STATUS/CLEAR/MASK} register fields */
+#define PARF_INT_ALL_LINK_UP BIT(13)
+#define PARF_INT_MSI_DEV_0_7 GENMASK(30, 23)
+
+/* PARF_NO_SNOOP_OVERRIDE register fields */
+#define WR_NO_SNOOP_OVERRIDE_EN BIT(1)
+#define RD_NO_SNOOP_OVERRIDE_EN BIT(3)
+
+/* PARF_DEVICE_TYPE register fields */
+#define DEVICE_TYPE_RC 0x4
+
+/* PARF_BDF_TO_SID_CFG fields */
+#define BDF_TO_SID_BYPASS BIT(0)
+
+/* ELBI_SYS_CTRL register fields */
+#define ELBI_SYS_CTRL_LT_ENABLE BIT(0)
+
+/* AXI_MSTR_RESP_COMP_CTRL0 register fields */
#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K 0x4
#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K 0x5
-#define PCIE20_AXI_MSTR_RESP_COMP_CTRL1 0x81c
-#define CFG_BRIDGE_SB_INIT BIT(0)
-
-#define PCIE20_CAP 0x70
-#define PCIE20_CAP_LINK_CAPABILITIES (PCIE20_CAP + 0xC)
-#define PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT (BIT(10) | BIT(11))
-#define PCIE20_CAP_LINK_1 (PCIE20_CAP + 0x14)
-#define PCIE_CAP_LINK1_VAL 0x2FD7F
-#define PCIE20_PARF_Q2A_FLUSH 0x1AC
+/* AXI_MSTR_RESP_COMP_CTRL1 register fields */
+#define CFG_BRIDGE_SB_INIT BIT(0)
-#define PCIE20_MISC_CONTROL_1_REG 0x8BC
-#define DBI_RO_WR_EN 1
+/* PCI_EXP_SLTCAP register fields */
+#define PCIE_CAP_SLOT_POWER_LIMIT_VAL FIELD_PREP(PCI_EXP_SLTCAP_SPLV, 250)
+#define PCIE_CAP_SLOT_POWER_LIMIT_SCALE FIELD_PREP(PCI_EXP_SLTCAP_SPLS, 1)
+#define PCIE_CAP_SLOT_VAL (PCI_EXP_SLTCAP_ABP | \
+ PCI_EXP_SLTCAP_PCP | \
+ PCI_EXP_SLTCAP_MRLSP | \
+ PCI_EXP_SLTCAP_AIP | \
+ PCI_EXP_SLTCAP_PIP | \
+ PCI_EXP_SLTCAP_HPS | \
+ PCI_EXP_SLTCAP_EIP | \
+ PCIE_CAP_SLOT_POWER_LIMIT_VAL | \
+ PCIE_CAP_SLOT_POWER_LIMIT_SCALE)
#define PERST_DELAY_US 1000
-#define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE 0x358
-#define SLV_ADDR_SPACE_SZ 0x10000000
+#define QCOM_PCIE_CRC8_POLYNOMIAL (BIT(2) | BIT(1) | BIT(0))
-#define QCOM_PCIE_2_1_0_MAX_SUPPLY 3
-struct qcom_pcie_resources_2_1_0 {
- struct clk *iface_clk;
- struct clk *core_clk;
- struct clk *phy_clk;
- struct reset_control *pci_reset;
- struct reset_control *axi_reset;
- struct reset_control *ahb_reset;
- struct reset_control *por_reset;
- struct reset_control *phy_reset;
- struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY];
-};
+#define QCOM_PCIE_LINK_SPEED_TO_BW(speed) \
+ Mbps_to_icc(PCIE_SPEED2MBS_ENC(pcie_link_speed[speed]))
struct qcom_pcie_resources_1_0_0 {
- struct clk *iface;
- struct clk *aux;
- struct clk *master_bus;
- struct clk *slave_bus;
+ struct clk_bulk_data *clks;
+ int num_clks;
struct reset_control *core;
struct regulator *vdda;
};
-#define QCOM_PCIE_2_3_2_MAX_SUPPLY 2
+#define QCOM_PCIE_2_1_0_MAX_RESETS 6
+#define QCOM_PCIE_2_1_0_MAX_SUPPLY 3
+struct qcom_pcie_resources_2_1_0 {
+ struct clk_bulk_data *clks;
+ int num_clks;
+ struct reset_control_bulk_data resets[QCOM_PCIE_2_1_0_MAX_RESETS];
+ int num_resets;
+ struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY];
+};
+
+#define QCOM_PCIE_2_3_2_MAX_SUPPLY 2
struct qcom_pcie_resources_2_3_2 {
- struct clk *aux_clk;
- struct clk *master_clk;
- struct clk *slave_clk;
- struct clk *cfg_clk;
- struct clk *pipe_clk;
+ struct clk_bulk_data *clks;
+ int num_clks;
struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY];
};
+#define QCOM_PCIE_2_3_3_MAX_RESETS 7
+struct qcom_pcie_resources_2_3_3 {
+ struct clk_bulk_data *clks;
+ int num_clks;
+ struct reset_control_bulk_data rst[QCOM_PCIE_2_3_3_MAX_RESETS];
+};
+
+#define QCOM_PCIE_2_4_0_MAX_RESETS 12
struct qcom_pcie_resources_2_4_0 {
- struct clk *aux_clk;
- struct clk *master_clk;
- struct clk *slave_clk;
- struct reset_control *axi_m_reset;
- struct reset_control *axi_s_reset;
- struct reset_control *pipe_reset;
- struct reset_control *axi_m_vmid_reset;
- struct reset_control *axi_s_xpu_reset;
- struct reset_control *parf_reset;
- struct reset_control *phy_reset;
- struct reset_control *axi_m_sticky_reset;
- struct reset_control *pipe_sticky_reset;
- struct reset_control *pwr_reset;
- struct reset_control *ahb_reset;
- struct reset_control *phy_ahb_reset;
+ struct clk_bulk_data *clks;
+ int num_clks;
+ struct reset_control_bulk_data resets[QCOM_PCIE_2_4_0_MAX_RESETS];
+ int num_resets;
};
-struct qcom_pcie_resources_2_3_3 {
- struct clk *iface;
- struct clk *axi_m_clk;
- struct clk *axi_s_clk;
- struct clk *ahb_clk;
- struct clk *aux_clk;
- struct reset_control *rst[7];
+#define QCOM_PCIE_2_7_0_MAX_SUPPLIES 2
+struct qcom_pcie_resources_2_7_0 {
+ struct clk_bulk_data *clks;
+ int num_clks;
+ struct regulator_bulk_data supplies[QCOM_PCIE_2_7_0_MAX_SUPPLIES];
+ struct reset_control *rst;
+};
+
+struct qcom_pcie_resources_2_9_0 {
+ struct clk_bulk_data *clks;
+ int num_clks;
+ struct reset_control *rst;
};
union qcom_pcie_resources {
@@ -145,6 +237,8 @@ union qcom_pcie_resources {
struct qcom_pcie_resources_2_3_2 v2_3_2;
struct qcom_pcie_resources_2_3_3 v2_3_3;
struct qcom_pcie_resources_2_4_0 v2_4_0;
+ struct qcom_pcie_resources_2_7_0 v2_7_0;
+ struct qcom_pcie_resources_2_9_0 v2_9_0;
};
struct qcom_pcie;
@@ -153,57 +247,178 @@ struct qcom_pcie_ops {
int (*get_resources)(struct qcom_pcie *pcie);
int (*init)(struct qcom_pcie *pcie);
int (*post_init)(struct qcom_pcie *pcie);
+ void (*host_post_init)(struct qcom_pcie *pcie);
void (*deinit)(struct qcom_pcie *pcie);
- void (*post_deinit)(struct qcom_pcie *pcie);
void (*ltssm_enable)(struct qcom_pcie *pcie);
+ int (*config_sid)(struct qcom_pcie *pcie);
+};
+
+ /**
+ * struct qcom_pcie_cfg - Per SoC config struct
+ * @ops: qcom PCIe ops structure
+ * @override_no_snoop: Override NO_SNOOP attribute in TLP to enable cache
+ * snooping
+ * @firmware_managed: Set if the Root Complex is firmware managed
+ */
+struct qcom_pcie_cfg {
+ const struct qcom_pcie_ops *ops;
+ bool override_no_snoop;
+ bool firmware_managed;
+ bool no_l0s;
+};
+
+struct qcom_pcie_port {
+ struct list_head list;
+ struct gpio_desc *reset;
+ struct phy *phy;
};
struct qcom_pcie {
struct dw_pcie *pci;
void __iomem *parf; /* DT parf */
- void __iomem *elbi; /* DT elbi */
+ void __iomem *mhi;
union qcom_pcie_resources res;
- struct phy *phy;
- struct gpio_desc *reset;
- const struct qcom_pcie_ops *ops;
+ struct icc_path *icc_mem;
+ struct icc_path *icc_cpu;
+ const struct qcom_pcie_cfg *cfg;
+ struct dentry *debugfs;
+ struct list_head ports;
+ bool suspended;
+ bool use_pm_opp;
};
#define to_qcom_pcie(x) dev_get_drvdata((x)->dev)
-static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
+static void qcom_perst_assert(struct qcom_pcie *pcie, bool assert)
{
- gpiod_set_value_cansleep(pcie->reset, 1);
+ struct qcom_pcie_port *port;
+ int val = assert ? 1 : 0;
+
+ list_for_each_entry(port, &pcie->ports, list)
+ gpiod_set_value_cansleep(port->reset, val);
+
usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
}
+static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
+{
+ qcom_perst_assert(pcie, true);
+}
+
static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
{
- gpiod_set_value_cansleep(pcie->reset, 0);
- usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
+ /* Ensure that PERST has been asserted for at least 100 ms */
+ msleep(PCIE_T_PVPERL_MS);
+ qcom_perst_assert(pcie, false);
}
-static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
+static int qcom_pcie_start_link(struct dw_pcie *pci)
{
- struct dw_pcie *pci = pcie->pci;
+ struct qcom_pcie *pcie = to_qcom_pcie(pci);
- if (dw_pcie_link_up(pci))
- return 0;
+ qcom_pcie_common_set_equalization(pci);
+
+ if (pcie_link_speed[pci->max_link_speed] == PCIE_SPEED_16_0GT)
+ qcom_pcie_common_set_16gt_lane_margining(pci);
/* Enable Link Training state machine */
- if (pcie->ops->ltssm_enable)
- pcie->ops->ltssm_enable(pcie);
+ if (pcie->cfg->ops->ltssm_enable)
+ pcie->cfg->ops->ltssm_enable(pcie);
+
+ return 0;
+}
+
+static void qcom_pcie_clear_aspm_l0s(struct dw_pcie *pci)
+{
+ struct qcom_pcie *pcie = to_qcom_pcie(pci);
+ u16 offset;
+ u32 val;
+
+ if (!pcie->cfg->no_l0s)
+ return;
+
+ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);
+ val &= ~PCI_EXP_LNKCAP_ASPM_L0S;
+ writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP);
+
+ dw_pcie_dbi_ro_wr_dis(pci);
+}
+
+static void qcom_pcie_clear_hpc(struct dw_pcie *pci)
+{
+ u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ u32 val;
+
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ val = readl(pci->dbi_base + offset + PCI_EXP_SLTCAP);
+ val &= ~PCI_EXP_SLTCAP_HPC;
+ writel(val, pci->dbi_base + offset + PCI_EXP_SLTCAP);
- return dw_pcie_wait_for_link(pci);
+ dw_pcie_dbi_ro_wr_dis(pci);
+}
+
+static void qcom_pcie_configure_dbi_base(struct qcom_pcie *pcie)
+{
+ struct dw_pcie *pci = pcie->pci;
+
+ if (pci->dbi_phys_addr) {
+ /*
+ * PARF_DBI_BASE_ADDR register is in CPU domain and require to
+ * be programmed with CPU physical address.
+ */
+ writel(lower_32_bits(pci->dbi_phys_addr), pcie->parf +
+ PARF_DBI_BASE_ADDR);
+ writel(SLV_ADDR_SPACE_SZ, pcie->parf +
+ PARF_SLV_ADDR_SPACE_SIZE);
+ }
+}
+
+static void qcom_pcie_configure_dbi_atu_base(struct qcom_pcie *pcie)
+{
+ struct dw_pcie *pci = pcie->pci;
+
+ if (pci->dbi_phys_addr) {
+ /*
+ * PARF_DBI_BASE_ADDR_V2 and PARF_ATU_BASE_ADDR registers are
+ * in CPU domain and require to be programmed with CPU
+ * physical addresses.
+ */
+ writel(lower_32_bits(pci->dbi_phys_addr), pcie->parf +
+ PARF_DBI_BASE_ADDR_V2);
+ writel(upper_32_bits(pci->dbi_phys_addr), pcie->parf +
+ PARF_DBI_BASE_ADDR_V2_HI);
+
+ if (pci->atu_phys_addr) {
+ writel(lower_32_bits(pci->atu_phys_addr), pcie->parf +
+ PARF_ATU_BASE_ADDR);
+ writel(upper_32_bits(pci->atu_phys_addr), pcie->parf +
+ PARF_ATU_BASE_ADDR_HI);
+ }
+
+ writel(0x0, pcie->parf + PARF_SLV_ADDR_SPACE_SIZE_V2);
+ writel(SLV_ADDR_SPACE_SZ, pcie->parf +
+ PARF_SLV_ADDR_SPACE_SIZE_V2_HI);
+ }
}
static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie)
{
+ struct dw_pcie *pci = pcie->pci;
u32 val;
+ if (!pci->elbi_base) {
+ dev_err(pci->dev, "ELBI is not present\n");
+ return;
+ }
/* enable link training */
- val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL);
- val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE;
- writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL);
+ val = readl(pci->elbi_base + ELBI_SYS_CTRL);
+ val |= ELBI_SYS_CTRL_LT_ENABLE;
+ writel(val, pci->elbi_base + ELBI_SYS_CTRL);
}
static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
@@ -211,6 +426,7 @@ static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
+ bool is_apq = of_device_is_compatible(dev->of_node, "qcom,pcie-apq8064");
int ret;
res->supplies[0].supply = "vdda";
@@ -221,50 +437,37 @@ static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
if (ret)
return ret;
- res->iface_clk = devm_clk_get(dev, "iface");
- if (IS_ERR(res->iface_clk))
- return PTR_ERR(res->iface_clk);
-
- res->core_clk = devm_clk_get(dev, "core");
- if (IS_ERR(res->core_clk))
- return PTR_ERR(res->core_clk);
-
- res->phy_clk = devm_clk_get(dev, "phy");
- if (IS_ERR(res->phy_clk))
- return PTR_ERR(res->phy_clk);
-
- res->pci_reset = devm_reset_control_get_exclusive(dev, "pci");
- if (IS_ERR(res->pci_reset))
- return PTR_ERR(res->pci_reset);
-
- res->axi_reset = devm_reset_control_get_exclusive(dev, "axi");
- if (IS_ERR(res->axi_reset))
- return PTR_ERR(res->axi_reset);
-
- res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb");
- if (IS_ERR(res->ahb_reset))
- return PTR_ERR(res->ahb_reset);
+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
+ if (res->num_clks < 0) {
+ dev_err(dev, "Failed to get clocks\n");
+ return res->num_clks;
+ }
- res->por_reset = devm_reset_control_get_exclusive(dev, "por");
- if (IS_ERR(res->por_reset))
- return PTR_ERR(res->por_reset);
+ res->resets[0].id = "pci";
+ res->resets[1].id = "axi";
+ res->resets[2].id = "ahb";
+ res->resets[3].id = "por";
+ res->resets[4].id = "phy";
+ res->resets[5].id = "ext";
+
+ /* ext is optional on APQ8016 */
+ res->num_resets = is_apq ? 5 : 6;
+ ret = devm_reset_control_bulk_get_exclusive(dev, res->num_resets, res->resets);
+ if (ret < 0)
+ return ret;
- res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
- return PTR_ERR_OR_ZERO(res->phy_reset);
+ return 0;
}
static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
- reset_control_assert(res->pci_reset);
- reset_control_assert(res->axi_reset);
- reset_control_assert(res->ahb_reset);
- reset_control_assert(res->por_reset);
- reset_control_assert(res->pci_reset);
- clk_disable_unprepare(res->iface_clk);
- clk_disable_unprepare(res->core_clk);
- clk_disable_unprepare(res->phy_clk);
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
+ reset_control_bulk_assert(res->num_resets, res->resets);
+
+ writel(1, pcie->parf + PARF_PHY_CTRL);
+
regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
}
@@ -273,101 +476,89 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
- u32 val;
int ret;
- ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
+ /* reset the PCIe interface as uboot can leave it undefined state */
+ ret = reset_control_bulk_assert(res->num_resets, res->resets);
if (ret < 0) {
- dev_err(dev, "cannot enable regulators\n");
+ dev_err(dev, "cannot assert resets\n");
return ret;
}
- ret = reset_control_assert(res->ahb_reset);
- if (ret) {
- dev_err(dev, "cannot assert ahb reset\n");
- goto err_assert_ahb;
- }
-
- ret = clk_prepare_enable(res->iface_clk);
- if (ret) {
- dev_err(dev, "cannot prepare/enable iface clock\n");
- goto err_assert_ahb;
+ ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
+ if (ret < 0) {
+ dev_err(dev, "cannot enable regulators\n");
+ return ret;
}
- ret = clk_prepare_enable(res->phy_clk);
- if (ret) {
- dev_err(dev, "cannot prepare/enable phy clock\n");
- goto err_clk_phy;
+ ret = reset_control_bulk_deassert(res->num_resets, res->resets);
+ if (ret < 0) {
+ dev_err(dev, "cannot deassert resets\n");
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+ return ret;
}
- ret = clk_prepare_enable(res->core_clk);
- if (ret) {
- dev_err(dev, "cannot prepare/enable core clock\n");
- goto err_clk_core;
- }
+ return 0;
+}
- ret = reset_control_deassert(res->ahb_reset);
- if (ret) {
- dev_err(dev, "cannot deassert ahb reset\n");
- goto err_deassert_ahb;
- }
+static int qcom_pcie_post_init_2_1_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ struct device_node *node = dev->of_node;
+ u32 val;
+ int ret;
/* enable PCIe clocks and resets */
- val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
- val &= ~BIT(0);
- writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
-
- /* enable external reference clock */
- val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK);
- val |= BIT(16);
- writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK);
+ val = readl(pcie->parf + PARF_PHY_CTRL);
+ val &= ~PHY_TEST_PWR_DOWN;
+ writel(val, pcie->parf + PARF_PHY_CTRL);
- ret = reset_control_deassert(res->phy_reset);
- if (ret) {
- dev_err(dev, "cannot deassert phy reset\n");
+ ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
+ if (ret)
return ret;
- }
- ret = reset_control_deassert(res->pci_reset);
- if (ret) {
- dev_err(dev, "cannot deassert pci reset\n");
- return ret;
+ if (of_device_is_compatible(node, "qcom,pcie-ipq8064") ||
+ of_device_is_compatible(node, "qcom,pcie-ipq8064-v2")) {
+ writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) |
+ PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) |
+ PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34),
+ pcie->parf + PARF_PCS_DEEMPH);
+ writel(PCS_SWING_TX_SWING_FULL(120) |
+ PCS_SWING_TX_SWING_LOW(120),
+ pcie->parf + PARF_PCS_SWING);
+ writel(PHY_RX0_EQ(4), pcie->parf + PARF_CONFIG_BITS);
}
- ret = reset_control_deassert(res->por_reset);
- if (ret) {
- dev_err(dev, "cannot deassert por reset\n");
- return ret;
+ if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) {
+ /* set TX termination offset */
+ val = readl(pcie->parf + PARF_PHY_CTRL);
+ val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK;
+ val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7);
+ writel(val, pcie->parf + PARF_PHY_CTRL);
}
- ret = reset_control_deassert(res->axi_reset);
- if (ret) {
- dev_err(dev, "cannot deassert axi reset\n");
- return ret;
- }
+ /* enable external reference clock */
+ val = readl(pcie->parf + PARF_PHY_REFCLK);
+ /* USE_PAD is required only for ipq806x */
+ if (!of_device_is_compatible(node, "qcom,pcie-apq8064"))
+ val &= ~PHY_REFCLK_USE_PAD;
+ val |= PHY_REFCLK_SSP_EN;
+ writel(val, pcie->parf + PARF_PHY_REFCLK);
/* wait for clock acquisition */
usleep_range(1000, 1500);
-
/* Set the Max TLP size to 2K, instead of using default of 4K */
writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K,
- pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0);
+ pci->dbi_base + AXI_MSTR_RESP_COMP_CTRL0);
writel(CFG_BRIDGE_SB_INIT,
- pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1);
+ pci->dbi_base + AXI_MSTR_RESP_COMP_CTRL1);
- return 0;
-
-err_deassert_ahb:
- clk_disable_unprepare(res->core_clk);
-err_clk_core:
- clk_disable_unprepare(res->phy_clk);
-err_clk_phy:
- clk_disable_unprepare(res->iface_clk);
-err_assert_ahb:
- regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+ qcom_pcie_clear_hpc(pcie->pci);
- return ret;
+ return 0;
}
static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie)
@@ -380,21 +571,11 @@ static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie)
if (IS_ERR(res->vdda))
return PTR_ERR(res->vdda);
- res->iface = devm_clk_get(dev, "iface");
- if (IS_ERR(res->iface))
- return PTR_ERR(res->iface);
-
- res->aux = devm_clk_get(dev, "aux");
- if (IS_ERR(res->aux))
- return PTR_ERR(res->aux);
-
- res->master_bus = devm_clk_get(dev, "master_bus");
- if (IS_ERR(res->master_bus))
- return PTR_ERR(res->master_bus);
-
- res->slave_bus = devm_clk_get(dev, "slave_bus");
- if (IS_ERR(res->slave_bus))
- return PTR_ERR(res->slave_bus);
+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
+ if (res->num_clks < 0) {
+ dev_err(dev, "Failed to get clocks\n");
+ return res->num_clks;
+ }
res->core = devm_reset_control_get_exclusive(dev, "core");
return PTR_ERR_OR_ZERO(res->core);
@@ -405,10 +586,7 @@ static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie)
struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
reset_control_assert(res->core);
- clk_disable_unprepare(res->slave_bus);
- clk_disable_unprepare(res->master_bus);
- clk_disable_unprepare(res->iface);
- clk_disable_unprepare(res->aux);
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
regulator_disable(res->vdda);
}
@@ -425,59 +603,54 @@ static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)
return ret;
}
- ret = clk_prepare_enable(res->aux);
+ ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
if (ret) {
- dev_err(dev, "cannot prepare/enable aux clock\n");
- goto err_res;
+ dev_err(dev, "cannot prepare/enable clocks\n");
+ goto err_assert_reset;
}
- ret = clk_prepare_enable(res->iface);
+ ret = regulator_enable(res->vdda);
if (ret) {
- dev_err(dev, "cannot prepare/enable iface clock\n");
- goto err_aux;
+ dev_err(dev, "cannot enable vdda regulator\n");
+ goto err_disable_clks;
}
- ret = clk_prepare_enable(res->master_bus);
- if (ret) {
- dev_err(dev, "cannot prepare/enable master_bus clock\n");
- goto err_iface;
- }
+ return 0;
- ret = clk_prepare_enable(res->slave_bus);
- if (ret) {
- dev_err(dev, "cannot prepare/enable slave_bus clock\n");
- goto err_master;
- }
+err_disable_clks:
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
+err_assert_reset:
+ reset_control_assert(res->core);
- ret = regulator_enable(res->vdda);
- if (ret) {
- dev_err(dev, "cannot enable vdda regulator\n");
- goto err_slave;
- }
+ return ret;
+}
- /* change DBI base address */
- writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
+static int qcom_pcie_post_init_1_0_0(struct qcom_pcie *pcie)
+{
+ qcom_pcie_configure_dbi_base(pcie);
if (IS_ENABLED(CONFIG_PCI_MSI)) {
- u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
+ u32 val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT);
- val |= BIT(31);
- writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
+ val |= EN;
+ writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT);
}
+ qcom_pcie_clear_hpc(pcie->pci);
+
return 0;
-err_slave:
- clk_disable_unprepare(res->slave_bus);
-err_master:
- clk_disable_unprepare(res->master_bus);
-err_iface:
- clk_disable_unprepare(res->iface);
-err_aux:
- clk_disable_unprepare(res->aux);
-err_res:
- reset_control_assert(res->core);
+}
- return ret;
+static int qcom_pcie_assert_perst(struct dw_pcie *pci, bool assert)
+{
+ struct qcom_pcie *pcie = to_qcom_pcie(pci);
+
+ if (assert)
+ qcom_ep_reset_assert(pcie);
+ else
+ qcom_ep_reset_deassert(pcie);
+
+ return 0;
}
static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie)
@@ -485,9 +658,9 @@ static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie)
u32 val;
/* enable link training */
- val = readl(pcie->parf + PCIE20_PARF_LTSSM);
- val |= BIT(8);
- writel(val, pcie->parf + PCIE20_PARF_LTSSM);
+ val = readl(pcie->parf + PARF_LTSSM);
+ val |= LTSSM_EN;
+ writel(val, pcie->parf + PARF_LTSSM);
}
static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie)
@@ -504,51 +677,28 @@ static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie)
if (ret)
return ret;
- res->aux_clk = devm_clk_get(dev, "aux");
- if (IS_ERR(res->aux_clk))
- return PTR_ERR(res->aux_clk);
-
- res->cfg_clk = devm_clk_get(dev, "cfg");
- if (IS_ERR(res->cfg_clk))
- return PTR_ERR(res->cfg_clk);
-
- res->master_clk = devm_clk_get(dev, "bus_master");
- if (IS_ERR(res->master_clk))
- return PTR_ERR(res->master_clk);
-
- res->slave_clk = devm_clk_get(dev, "bus_slave");
- if (IS_ERR(res->slave_clk))
- return PTR_ERR(res->slave_clk);
+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
+ if (res->num_clks < 0) {
+ dev_err(dev, "Failed to get clocks\n");
+ return res->num_clks;
+ }
- res->pipe_clk = devm_clk_get(dev, "pipe");
- return PTR_ERR_OR_ZERO(res->pipe_clk);
+ return 0;
}
static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
- clk_disable_unprepare(res->slave_clk);
- clk_disable_unprepare(res->master_clk);
- clk_disable_unprepare(res->cfg_clk);
- clk_disable_unprepare(res->aux_clk);
-
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
}
-static void qcom_pcie_post_deinit_2_3_2(struct qcom_pcie *pcie)
-{
- struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
-
- clk_disable_unprepare(res->pipe_clk);
-}
-
static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
- u32 val;
int ret;
ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
@@ -557,533 +707,594 @@ static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)
return ret;
}
- ret = clk_prepare_enable(res->aux_clk);
- if (ret) {
- dev_err(dev, "cannot prepare/enable aux clock\n");
- goto err_aux_clk;
- }
-
- ret = clk_prepare_enable(res->cfg_clk);
+ ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
if (ret) {
- dev_err(dev, "cannot prepare/enable cfg clock\n");
- goto err_cfg_clk;
+ dev_err(dev, "cannot prepare/enable clocks\n");
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+ return ret;
}
- ret = clk_prepare_enable(res->master_clk);
- if (ret) {
- dev_err(dev, "cannot prepare/enable master clock\n");
- goto err_master_clk;
- }
+ return 0;
+}
- ret = clk_prepare_enable(res->slave_clk);
- if (ret) {
- dev_err(dev, "cannot prepare/enable slave clock\n");
- goto err_slave_clk;
- }
+static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie)
+{
+ u32 val;
/* enable PCIe clocks and resets */
- val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
- val &= ~BIT(0);
- writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+ val = readl(pcie->parf + PARF_PHY_CTRL);
+ val &= ~PHY_TEST_PWR_DOWN;
+ writel(val, pcie->parf + PARF_PHY_CTRL);
- /* change DBI base address */
- writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
+ qcom_pcie_configure_dbi_base(pcie);
/* MAC PHY_POWERDOWN MUX DISABLE */
- val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
- val &= ~BIT(29);
- writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
-
- val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
- val |= BIT(4);
- writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
-
- val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
- val |= BIT(31);
- writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+ val = readl(pcie->parf + PARF_SYS_CTRL);
+ val &= ~MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN;
+ writel(val, pcie->parf + PARF_SYS_CTRL);
- return 0;
+ val = readl(pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
+ val |= BYPASS;
+ writel(val, pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
-err_slave_clk:
- clk_disable_unprepare(res->master_clk);
-err_master_clk:
- clk_disable_unprepare(res->cfg_clk);
-err_cfg_clk:
- clk_disable_unprepare(res->aux_clk);
+ val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+ val |= EN;
+ writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
-err_aux_clk:
- regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+ qcom_pcie_clear_hpc(pcie->pci);
- return ret;
+ return 0;
}
-static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie)
+static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
{
- struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
+ struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
+ bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019");
int ret;
- ret = clk_prepare_enable(res->pipe_clk);
- if (ret) {
- dev_err(dev, "cannot prepare/enable pipe clock\n");
- return ret;
+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
+ if (res->num_clks < 0) {
+ dev_err(dev, "Failed to get clocks\n");
+ return res->num_clks;
}
+ res->resets[0].id = "axi_m";
+ res->resets[1].id = "axi_s";
+ res->resets[2].id = "axi_m_sticky";
+ res->resets[3].id = "pipe_sticky";
+ res->resets[4].id = "pwr";
+ res->resets[5].id = "ahb";
+ res->resets[6].id = "pipe";
+ res->resets[7].id = "axi_m_vmid";
+ res->resets[8].id = "axi_s_xpu";
+ res->resets[9].id = "parf";
+ res->resets[10].id = "phy";
+ res->resets[11].id = "phy_ahb";
+
+ res->num_resets = is_ipq ? 12 : 6;
+
+ ret = devm_reset_control_bulk_get_exclusive(dev, res->num_resets, res->resets);
+ if (ret < 0)
+ return ret;
+
return 0;
}
-static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
+static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
- struct dw_pcie *pci = pcie->pci;
- struct device *dev = pci->dev;
-
- res->aux_clk = devm_clk_get(dev, "aux");
- if (IS_ERR(res->aux_clk))
- return PTR_ERR(res->aux_clk);
-
- res->master_clk = devm_clk_get(dev, "master_bus");
- if (IS_ERR(res->master_clk))
- return PTR_ERR(res->master_clk);
- res->slave_clk = devm_clk_get(dev, "slave_bus");
- if (IS_ERR(res->slave_clk))
- return PTR_ERR(res->slave_clk);
-
- res->axi_m_reset = devm_reset_control_get_exclusive(dev, "axi_m");
- if (IS_ERR(res->axi_m_reset))
- return PTR_ERR(res->axi_m_reset);
+ reset_control_bulk_assert(res->num_resets, res->resets);
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
+}
- res->axi_s_reset = devm_reset_control_get_exclusive(dev, "axi_s");
- if (IS_ERR(res->axi_s_reset))
- return PTR_ERR(res->axi_s_reset);
+static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ int ret;
- res->pipe_reset = devm_reset_control_get_exclusive(dev, "pipe");
- if (IS_ERR(res->pipe_reset))
- return PTR_ERR(res->pipe_reset);
+ ret = reset_control_bulk_assert(res->num_resets, res->resets);
+ if (ret < 0) {
+ dev_err(dev, "cannot assert resets\n");
+ return ret;
+ }
- res->axi_m_vmid_reset = devm_reset_control_get_exclusive(dev,
- "axi_m_vmid");
- if (IS_ERR(res->axi_m_vmid_reset))
- return PTR_ERR(res->axi_m_vmid_reset);
+ usleep_range(10000, 12000);
- res->axi_s_xpu_reset = devm_reset_control_get_exclusive(dev,
- "axi_s_xpu");
- if (IS_ERR(res->axi_s_xpu_reset))
- return PTR_ERR(res->axi_s_xpu_reset);
+ ret = reset_control_bulk_deassert(res->num_resets, res->resets);
+ if (ret < 0) {
+ dev_err(dev, "cannot deassert resets\n");
+ return ret;
+ }
- res->parf_reset = devm_reset_control_get_exclusive(dev, "parf");
- if (IS_ERR(res->parf_reset))
- return PTR_ERR(res->parf_reset);
+ usleep_range(10000, 12000);
- res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
- if (IS_ERR(res->phy_reset))
- return PTR_ERR(res->phy_reset);
+ ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
+ if (ret) {
+ reset_control_bulk_assert(res->num_resets, res->resets);
+ return ret;
+ }
- res->axi_m_sticky_reset = devm_reset_control_get_exclusive(dev,
- "axi_m_sticky");
- if (IS_ERR(res->axi_m_sticky_reset))
- return PTR_ERR(res->axi_m_sticky_reset);
+ return 0;
+}
- res->pipe_sticky_reset = devm_reset_control_get_exclusive(dev,
- "pipe_sticky");
- if (IS_ERR(res->pipe_sticky_reset))
- return PTR_ERR(res->pipe_sticky_reset);
+static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ int ret;
- res->pwr_reset = devm_reset_control_get_exclusive(dev, "pwr");
- if (IS_ERR(res->pwr_reset))
- return PTR_ERR(res->pwr_reset);
+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
+ if (res->num_clks < 0) {
+ dev_err(dev, "Failed to get clocks\n");
+ return res->num_clks;
+ }
- res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb");
- if (IS_ERR(res->ahb_reset))
- return PTR_ERR(res->ahb_reset);
+ res->rst[0].id = "axi_m";
+ res->rst[1].id = "axi_s";
+ res->rst[2].id = "pipe";
+ res->rst[3].id = "axi_m_sticky";
+ res->rst[4].id = "sticky";
+ res->rst[5].id = "ahb";
+ res->rst[6].id = "sleep";
- res->phy_ahb_reset = devm_reset_control_get_exclusive(dev, "phy_ahb");
- if (IS_ERR(res->phy_ahb_reset))
- return PTR_ERR(res->phy_ahb_reset);
+ ret = devm_reset_control_bulk_get_exclusive(dev, ARRAY_SIZE(res->rst), res->rst);
+ if (ret < 0)
+ return ret;
return 0;
}
-static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie)
+static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie)
{
- struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
+ struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
- reset_control_assert(res->axi_m_reset);
- reset_control_assert(res->axi_s_reset);
- reset_control_assert(res->pipe_reset);
- reset_control_assert(res->pipe_sticky_reset);
- reset_control_assert(res->phy_reset);
- reset_control_assert(res->phy_ahb_reset);
- reset_control_assert(res->axi_m_sticky_reset);
- reset_control_assert(res->pwr_reset);
- reset_control_assert(res->ahb_reset);
- clk_disable_unprepare(res->aux_clk);
- clk_disable_unprepare(res->master_clk);
- clk_disable_unprepare(res->slave_clk);
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
}
-static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
+static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
{
- struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
+ struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
struct dw_pcie *pci = pcie->pci;
struct device *dev = pci->dev;
- u32 val;
int ret;
- ret = reset_control_assert(res->axi_m_reset);
- if (ret) {
- dev_err(dev, "cannot assert axi master reset\n");
+ ret = reset_control_bulk_assert(ARRAY_SIZE(res->rst), res->rst);
+ if (ret < 0) {
+ dev_err(dev, "cannot assert resets\n");
return ret;
}
- ret = reset_control_assert(res->axi_s_reset);
- if (ret) {
- dev_err(dev, "cannot assert axi slave reset\n");
+ usleep_range(2000, 2500);
+
+ ret = reset_control_bulk_deassert(ARRAY_SIZE(res->rst), res->rst);
+ if (ret < 0) {
+ dev_err(dev, "cannot deassert resets\n");
return ret;
}
- usleep_range(10000, 12000);
+ /*
+ * Don't have a way to see if the reset has completed.
+ * Wait for some time.
+ */
+ usleep_range(2000, 2500);
- ret = reset_control_assert(res->pipe_reset);
+ ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
if (ret) {
- dev_err(dev, "cannot assert pipe reset\n");
- return ret;
+ dev_err(dev, "cannot prepare/enable clocks\n");
+ goto err_assert_resets;
}
- ret = reset_control_assert(res->pipe_sticky_reset);
- if (ret) {
- dev_err(dev, "cannot assert pipe sticky reset\n");
- return ret;
- }
+ return 0;
- ret = reset_control_assert(res->phy_reset);
- if (ret) {
- dev_err(dev, "cannot assert phy reset\n");
- return ret;
- }
+err_assert_resets:
+ /*
+ * Not checking for failure, will anyway return
+ * the original failure in 'ret'.
+ */
+ reset_control_bulk_assert(ARRAY_SIZE(res->rst), res->rst);
- ret = reset_control_assert(res->phy_ahb_reset);
- if (ret) {
- dev_err(dev, "cannot assert phy ahb reset\n");
- return ret;
- }
+ return ret;
+}
- usleep_range(10000, 12000);
+static int qcom_pcie_post_init_2_3_3(struct qcom_pcie *pcie)
+{
+ struct dw_pcie *pci = pcie->pci;
+ u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ u32 val;
- ret = reset_control_assert(res->axi_m_sticky_reset);
- if (ret) {
- dev_err(dev, "cannot assert axi master sticky reset\n");
- return ret;
- }
+ val = readl(pcie->parf + PARF_PHY_CTRL);
+ val &= ~PHY_TEST_PWR_DOWN;
+ writel(val, pcie->parf + PARF_PHY_CTRL);
- ret = reset_control_assert(res->pwr_reset);
- if (ret) {
- dev_err(dev, "cannot assert power reset\n");
- return ret;
- }
+ qcom_pcie_configure_dbi_atu_base(pcie);
- ret = reset_control_assert(res->ahb_reset);
- if (ret) {
- dev_err(dev, "cannot assert ahb reset\n");
- return ret;
- }
+ writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS
+ | SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
+ AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS,
+ pcie->parf + PARF_SYS_CTRL);
+ writel(0, pcie->parf + PARF_Q2A_FLUSH);
- usleep_range(10000, 12000);
+ writel(PCI_COMMAND_MASTER, pci->dbi_base + PCI_COMMAND);
- ret = reset_control_deassert(res->phy_ahb_reset);
- if (ret) {
- dev_err(dev, "cannot deassert phy ahb reset\n");
- return ret;
- }
+ dw_pcie_dbi_ro_wr_en(pci);
- ret = reset_control_deassert(res->phy_reset);
- if (ret) {
- dev_err(dev, "cannot deassert phy reset\n");
- goto err_rst_phy;
- }
+ writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP);
- ret = reset_control_deassert(res->pipe_reset);
- if (ret) {
- dev_err(dev, "cannot deassert pipe reset\n");
- goto err_rst_pipe;
- }
+ val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);
+ val &= ~PCI_EXP_LNKCAP_ASPMS;
+ writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP);
- ret = reset_control_deassert(res->pipe_sticky_reset);
- if (ret) {
- dev_err(dev, "cannot deassert pipe sticky reset\n");
- goto err_rst_pipe_sticky;
- }
+ writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset +
+ PCI_EXP_DEVCTL2);
- usleep_range(10000, 12000);
+ dw_pcie_dbi_ro_wr_dis(pci);
- ret = reset_control_deassert(res->axi_m_reset);
- if (ret) {
- dev_err(dev, "cannot deassert axi master reset\n");
- goto err_rst_axi_m;
- }
+ return 0;
+}
- ret = reset_control_deassert(res->axi_m_sticky_reset);
- if (ret) {
- dev_err(dev, "cannot deassert axi master sticky reset\n");
- goto err_rst_axi_m_sticky;
- }
+static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ int ret;
- ret = reset_control_deassert(res->axi_s_reset);
- if (ret) {
- dev_err(dev, "cannot deassert axi slave reset\n");
- goto err_rst_axi_s;
- }
+ res->rst = devm_reset_control_array_get_exclusive(dev);
+ if (IS_ERR(res->rst))
+ return PTR_ERR(res->rst);
- ret = reset_control_deassert(res->pwr_reset);
- if (ret) {
- dev_err(dev, "cannot deassert power reset\n");
- goto err_rst_pwr;
- }
+ res->supplies[0].supply = "vdda";
+ res->supplies[1].supply = "vddpe-3v3";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
+ res->supplies);
+ if (ret)
+ return ret;
- ret = reset_control_deassert(res->ahb_reset);
- if (ret) {
- dev_err(dev, "cannot deassert ahb reset\n");
- goto err_rst_ahb;
+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
+ if (res->num_clks < 0) {
+ dev_err(dev, "Failed to get clocks\n");
+ return res->num_clks;
}
- usleep_range(10000, 12000);
+ return 0;
+}
- ret = clk_prepare_enable(res->aux_clk);
- if (ret) {
- dev_err(dev, "cannot prepare/enable iface clock\n");
- goto err_clk_aux;
+static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ u32 val;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
+ if (ret < 0) {
+ dev_err(dev, "cannot enable regulators\n");
+ return ret;
}
- ret = clk_prepare_enable(res->master_clk);
+ ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
+ if (ret < 0)
+ goto err_disable_regulators;
+
+ ret = reset_control_assert(res->rst);
if (ret) {
- dev_err(dev, "cannot prepare/enable core clock\n");
- goto err_clk_axi_m;
+ dev_err(dev, "reset assert failed (%d)\n", ret);
+ goto err_disable_clocks;
}
- ret = clk_prepare_enable(res->slave_clk);
+ usleep_range(1000, 1500);
+
+ ret = reset_control_deassert(res->rst);
if (ret) {
- dev_err(dev, "cannot prepare/enable phy clock\n");
- goto err_clk_axi_s;
+ dev_err(dev, "reset deassert failed (%d)\n", ret);
+ goto err_disable_clocks;
}
+ /* Wait for reset to complete, required on SM8450 */
+ usleep_range(1000, 1500);
+
+ /* configure PCIe to RC mode */
+ writel(DEVICE_TYPE_RC, pcie->parf + PARF_DEVICE_TYPE);
+
/* enable PCIe clocks and resets */
- val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
- val &= ~BIT(0);
- writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+ val = readl(pcie->parf + PARF_PHY_CTRL);
+ val &= ~PHY_TEST_PWR_DOWN;
+ writel(val, pcie->parf + PARF_PHY_CTRL);
- /* change DBI base address */
- writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
+ qcom_pcie_configure_dbi_atu_base(pcie);
/* MAC PHY_POWERDOWN MUX DISABLE */
- val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
- val &= ~BIT(29);
- writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
+ val = readl(pcie->parf + PARF_SYS_CTRL);
+ val &= ~MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN;
+ writel(val, pcie->parf + PARF_SYS_CTRL);
- val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
- val |= BIT(4);
- writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+ val = readl(pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
+ val |= BYPASS;
+ writel(val, pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
- val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
- val |= BIT(31);
- writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+ /* Enable L1 and L1SS */
+ val = readl(pcie->parf + PARF_PM_CTRL);
+ val &= ~REQ_NOT_ENTR_L1;
+ writel(val, pcie->parf + PARF_PM_CTRL);
+
+ pci->l1ss_support = true;
+
+ val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+ val |= EN;
+ writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
return 0;
+err_disable_clocks:
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
+err_disable_regulators:
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
-err_clk_axi_s:
- clk_disable_unprepare(res->master_clk);
-err_clk_axi_m:
- clk_disable_unprepare(res->aux_clk);
-err_clk_aux:
- reset_control_assert(res->ahb_reset);
-err_rst_ahb:
- reset_control_assert(res->pwr_reset);
-err_rst_pwr:
- reset_control_assert(res->axi_s_reset);
-err_rst_axi_s:
- reset_control_assert(res->axi_m_sticky_reset);
-err_rst_axi_m_sticky:
- reset_control_assert(res->axi_m_reset);
-err_rst_axi_m:
- reset_control_assert(res->pipe_sticky_reset);
-err_rst_pipe_sticky:
- reset_control_assert(res->pipe_reset);
-err_rst_pipe:
- reset_control_assert(res->phy_reset);
-err_rst_phy:
- reset_control_assert(res->phy_ahb_reset);
return ret;
}
-static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie)
+static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)
{
- struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
- struct dw_pcie *pci = pcie->pci;
- struct device *dev = pci->dev;
- int i;
- const char *rst_names[] = { "axi_m", "axi_s", "pipe",
- "axi_m_sticky", "sticky",
- "ahb", "sleep", };
-
- res->iface = devm_clk_get(dev, "iface");
- if (IS_ERR(res->iface))
- return PTR_ERR(res->iface);
-
- res->axi_m_clk = devm_clk_get(dev, "axi_m");
- if (IS_ERR(res->axi_m_clk))
- return PTR_ERR(res->axi_m_clk);
-
- res->axi_s_clk = devm_clk_get(dev, "axi_s");
- if (IS_ERR(res->axi_s_clk))
- return PTR_ERR(res->axi_s_clk);
-
- res->ahb_clk = devm_clk_get(dev, "ahb");
- if (IS_ERR(res->ahb_clk))
- return PTR_ERR(res->ahb_clk);
-
- res->aux_clk = devm_clk_get(dev, "aux");
- if (IS_ERR(res->aux_clk))
- return PTR_ERR(res->aux_clk);
-
- for (i = 0; i < ARRAY_SIZE(rst_names); i++) {
- res->rst[i] = devm_reset_control_get(dev, rst_names[i]);
- if (IS_ERR(res->rst[i]))
- return PTR_ERR(res->rst[i]);
- }
+ const struct qcom_pcie_cfg *pcie_cfg = pcie->cfg;
+
+ if (pcie_cfg->override_no_snoop)
+ writel(WR_NO_SNOOP_OVERRIDE_EN | RD_NO_SNOOP_OVERRIDE_EN,
+ pcie->parf + PARF_NO_SNOOP_OVERRIDE);
+
+ qcom_pcie_clear_aspm_l0s(pcie->pci);
+ qcom_pcie_clear_hpc(pcie->pci);
return 0;
}
-static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie)
+static int qcom_pcie_enable_aspm(struct pci_dev *pdev, void *userdata)
{
- struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
+ /*
+ * Downstream devices need to be in D0 state before enabling PCI PM
+ * substates.
+ */
+ pci_set_power_state_locked(pdev, PCI_D0);
+ pci_enable_link_state_locked(pdev, PCIE_LINK_STATE_ALL);
- clk_disable_unprepare(res->iface);
- clk_disable_unprepare(res->axi_m_clk);
- clk_disable_unprepare(res->axi_s_clk);
- clk_disable_unprepare(res->ahb_clk);
- clk_disable_unprepare(res->aux_clk);
+ return 0;
}
-static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
+static void qcom_pcie_host_post_init_2_7_0(struct qcom_pcie *pcie)
{
- struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
- struct dw_pcie *pci = pcie->pci;
- struct device *dev = pci->dev;
- int i, ret;
+ struct dw_pcie_rp *pp = &pcie->pci->pp;
+
+ pci_walk_bus(pp->bridge->bus, qcom_pcie_enable_aspm, NULL);
+}
+
+static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
+
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
+
+ regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
+}
+
+static int qcom_pcie_config_sid_1_9_0(struct qcom_pcie *pcie)
+{
+ /* iommu map structure */
+ struct {
+ u32 bdf;
+ u32 phandle;
+ u32 smmu_sid;
+ u32 smmu_sid_len;
+ } *map;
+ void __iomem *bdf_to_sid_base = pcie->parf + PARF_BDF_TO_SID_TABLE_N;
+ struct device *dev = pcie->pci->dev;
+ u8 qcom_pcie_crc8_table[CRC8_TABLE_SIZE];
+ int i, nr_map, size = 0;
+ u32 smmu_sid_base;
u32 val;
- for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
- ret = reset_control_assert(res->rst[i]);
- if (ret) {
- dev_err(dev, "reset #%d assert failed (%d)\n", i, ret);
- return ret;
- }
- }
+ of_get_property(dev->of_node, "iommu-map", &size);
+ if (!size)
+ return 0;
- usleep_range(2000, 2500);
+ /* Enable BDF to SID translation by disabling bypass mode (default) */
+ val = readl(pcie->parf + PARF_BDF_TO_SID_CFG);
+ val &= ~BDF_TO_SID_BYPASS;
+ writel(val, pcie->parf + PARF_BDF_TO_SID_CFG);
- for (i = 0; i < ARRAY_SIZE(res->rst); i++) {
- ret = reset_control_deassert(res->rst[i]);
- if (ret) {
- dev_err(dev, "reset #%d deassert failed (%d)\n", i,
- ret);
- return ret;
+ map = kzalloc(size, GFP_KERNEL);
+ if (!map)
+ return -ENOMEM;
+
+ of_property_read_u32_array(dev->of_node, "iommu-map", (u32 *)map,
+ size / sizeof(u32));
+
+ nr_map = size / (sizeof(*map));
+
+ crc8_populate_msb(qcom_pcie_crc8_table, QCOM_PCIE_CRC8_POLYNOMIAL);
+
+ /* Registers need to be zero out first */
+ memset_io(bdf_to_sid_base, 0, CRC8_TABLE_SIZE * sizeof(u32));
+
+ /* Extract the SMMU SID base from the first entry of iommu-map */
+ smmu_sid_base = map[0].smmu_sid;
+
+ /* Look for an available entry to hold the mapping */
+ for (i = 0; i < nr_map; i++) {
+ __be16 bdf_be = cpu_to_be16(map[i].bdf);
+ u32 val;
+ u8 hash;
+
+ hash = crc8(qcom_pcie_crc8_table, (u8 *)&bdf_be, sizeof(bdf_be), 0);
+
+ val = readl(bdf_to_sid_base + hash * sizeof(u32));
+
+ /* If the register is already populated, look for next available entry */
+ while (val) {
+ u8 current_hash = hash++;
+ u8 next_mask = 0xff;
+
+ /* If NEXT field is NULL then update it with next hash */
+ if (!(val & next_mask)) {
+ val |= (u32)hash;
+ writel(val, bdf_to_sid_base + current_hash * sizeof(u32));
+ }
+
+ val = readl(bdf_to_sid_base + hash * sizeof(u32));
}
+
+ /* BDF [31:16] | SID [15:8] | NEXT [7:0] */
+ val = map[i].bdf << 16 | (map[i].smmu_sid - smmu_sid_base) << 8 | 0;
+ writel(val, bdf_to_sid_base + hash * sizeof(u32));
}
- /*
- * Don't have a way to see if the reset has completed.
- * Wait for some time.
- */
- usleep_range(2000, 2500);
+ kfree(map);
- ret = clk_prepare_enable(res->iface);
- if (ret) {
- dev_err(dev, "cannot prepare/enable core clock\n");
- goto err_clk_iface;
- }
+ return 0;
+}
- ret = clk_prepare_enable(res->axi_m_clk);
- if (ret) {
- dev_err(dev, "cannot prepare/enable core clock\n");
- goto err_clk_axi_m;
- }
+static int qcom_pcie_get_resources_2_9_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
- ret = clk_prepare_enable(res->axi_s_clk);
- if (ret) {
- dev_err(dev, "cannot prepare/enable axi slave clock\n");
- goto err_clk_axi_s;
+ res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
+ if (res->num_clks < 0) {
+ dev_err(dev, "Failed to get clocks\n");
+ return res->num_clks;
}
- ret = clk_prepare_enable(res->ahb_clk);
+ res->rst = devm_reset_control_array_get_exclusive(dev);
+ if (IS_ERR(res->rst))
+ return PTR_ERR(res->rst);
+
+ return 0;
+}
+
+static void qcom_pcie_deinit_2_9_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
+
+ clk_bulk_disable_unprepare(res->num_clks, res->clks);
+}
+
+static int qcom_pcie_init_2_9_0(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
+ struct device *dev = pcie->pci->dev;
+ int ret;
+
+ ret = reset_control_assert(res->rst);
if (ret) {
- dev_err(dev, "cannot prepare/enable ahb clock\n");
- goto err_clk_ahb;
+ dev_err(dev, "reset assert failed (%d)\n", ret);
+ return ret;
}
- ret = clk_prepare_enable(res->aux_clk);
+ /*
+ * Delay periods before and after reset deassert are working values
+ * from downstream Codeaurora kernel
+ */
+ usleep_range(2000, 2500);
+
+ ret = reset_control_deassert(res->rst);
if (ret) {
- dev_err(dev, "cannot prepare/enable aux clock\n");
- goto err_clk_aux;
+ dev_err(dev, "reset deassert failed (%d)\n", ret);
+ return ret;
}
- writel(SLV_ADDR_SPACE_SZ,
- pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE);
+ usleep_range(2000, 2500);
+
+ return clk_bulk_prepare_enable(res->num_clks, res->clks);
+}
- val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
- val &= ~BIT(0);
- writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie)
+{
+ struct dw_pcie *pci = pcie->pci;
+ u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ u32 val;
+ int i;
- writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
+ val = readl(pcie->parf + PARF_PHY_CTRL);
+ val &= ~PHY_TEST_PWR_DOWN;
+ writel(val, pcie->parf + PARF_PHY_CTRL);
- writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS
- | SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
+ qcom_pcie_configure_dbi_atu_base(pcie);
+
+ writel(DEVICE_TYPE_RC, pcie->parf + PARF_DEVICE_TYPE);
+ writel(BYPASS | MSTR_AXI_CLK_EN | AHB_CLK_EN,
+ pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
+ writel(GEN3_RELATED_OFF_RXEQ_RGRDLESS_RXTS |
+ GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL,
+ pci->dbi_base + GEN3_RELATED_OFF);
+
+ writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS |
+ SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS,
- pcie->parf + PCIE20_PARF_SYS_CTRL);
- writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH);
+ pcie->parf + PARF_SYS_CTRL);
+
+ writel(0, pcie->parf + PARF_Q2A_FLUSH);
+
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP);
+
+ val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);
+ val &= ~PCI_EXP_LNKCAP_ASPMS;
+ writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP);
- writel(CMD_BME_VAL, pci->dbi_base + PCIE20_COMMAND_STATUS);
- writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG);
- writel(PCIE_CAP_LINK1_VAL, pci->dbi_base + PCIE20_CAP_LINK_1);
+ writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset +
+ PCI_EXP_DEVCTL2);
- val = readl(pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES);
- val &= ~PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT;
- writel(val, pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES);
+ dw_pcie_dbi_ro_wr_dis(pci);
- writel(PCIE_CAP_CPL_TIMEOUT_DISABLE, pci->dbi_base +
- PCIE20_DEVICE_CONTROL2_STATUS2);
+ for (i = 0; i < 256; i++)
+ writel(0, pcie->parf + PARF_BDF_TO_SID_TABLE_N + (4 * i));
return 0;
+}
-err_clk_aux:
- clk_disable_unprepare(res->ahb_clk);
-err_clk_ahb:
- clk_disable_unprepare(res->axi_s_clk);
-err_clk_axi_s:
- clk_disable_unprepare(res->axi_m_clk);
-err_clk_axi_m:
- clk_disable_unprepare(res->iface);
-err_clk_iface:
- /*
- * Not checking for failure, will anyway return
- * the original failure in 'ret'.
- */
- for (i = 0; i < ARRAY_SIZE(res->rst); i++)
- reset_control_assert(res->rst[i]);
+static bool qcom_pcie_link_up(struct dw_pcie *pci)
+{
+ u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ u16 val = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA);
- return ret;
+ return val & PCI_EXP_LNKSTA_DLLLA;
+}
+
+static void qcom_pcie_phy_power_off(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_port *port;
+
+ list_for_each_entry(port, &pcie->ports, list)
+ phy_power_off(port->phy);
}
-static int qcom_pcie_link_up(struct dw_pcie *pci)
+static int qcom_pcie_phy_power_on(struct qcom_pcie *pcie)
{
- u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA);
+ struct qcom_pcie_port *port;
+ int ret;
+
+ list_for_each_entry(port, &pcie->ports, list) {
+ ret = phy_set_mode_ext(port->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC);
+ if (ret)
+ return ret;
+
+ ret = phy_power_on(port->phy);
+ if (ret) {
+ qcom_pcie_phy_power_off(pcie);
+ return ret;
+ }
+ }
- return !!(val & PCI_EXP_LNKSTA_DLLLA);
+ return 0;
}
-static int qcom_pcie_host_init(struct pcie_port *pp)
+static int qcom_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct qcom_pcie *pcie = to_qcom_pcie(pci);
@@ -1091,69 +1302,70 @@ static int qcom_pcie_host_init(struct pcie_port *pp)
qcom_ep_reset_assert(pcie);
- ret = pcie->ops->init(pcie);
+ ret = pcie->cfg->ops->init(pcie);
if (ret)
return ret;
- ret = phy_power_on(pcie->phy);
+ ret = qcom_pcie_phy_power_on(pcie);
if (ret)
goto err_deinit;
- if (pcie->ops->post_init) {
- ret = pcie->ops->post_init(pcie);
+ if (pcie->cfg->ops->post_init) {
+ ret = pcie->cfg->ops->post_init(pcie);
if (ret)
goto err_disable_phy;
}
- dw_pcie_setup_rc(pp);
-
- if (IS_ENABLED(CONFIG_PCI_MSI))
- dw_pcie_msi_init(pp);
-
qcom_ep_reset_deassert(pcie);
- ret = qcom_pcie_establish_link(pcie);
- if (ret)
- goto err;
+ if (pcie->cfg->ops->config_sid) {
+ ret = pcie->cfg->ops->config_sid(pcie);
+ if (ret)
+ goto err_assert_reset;
+ }
return 0;
-err:
+
+err_assert_reset:
qcom_ep_reset_assert(pcie);
- if (pcie->ops->post_deinit)
- pcie->ops->post_deinit(pcie);
err_disable_phy:
- phy_power_off(pcie->phy);
+ qcom_pcie_phy_power_off(pcie);
err_deinit:
- pcie->ops->deinit(pcie);
+ pcie->cfg->ops->deinit(pcie);
return ret;
}
-static int qcom_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
- u32 *val)
+static void qcom_pcie_host_deinit(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct qcom_pcie *pcie = to_qcom_pcie(pci);
- /* the device class is not reported correctly from the register */
- if (where == PCI_CLASS_REVISION && size == 4) {
- *val = readl(pci->dbi_base + PCI_CLASS_REVISION);
- *val &= 0xff; /* keep revision id */
- *val |= PCI_CLASS_BRIDGE_PCI << 16;
- return PCIBIOS_SUCCESSFUL;
- }
+ qcom_ep_reset_assert(pcie);
+ qcom_pcie_phy_power_off(pcie);
+ pcie->cfg->ops->deinit(pcie);
+}
+
+static void qcom_pcie_host_post_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct qcom_pcie *pcie = to_qcom_pcie(pci);
- return dw_pcie_read(pci->dbi_base + where, size, val);
+ if (pcie->cfg->ops->host_post_init)
+ pcie->cfg->ops->host_post_init(pcie);
}
static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
- .host_init = qcom_pcie_host_init,
- .rd_own_conf = qcom_pcie_rd_own_conf,
+ .init = qcom_pcie_host_init,
+ .deinit = qcom_pcie_host_deinit,
+ .post_init = qcom_pcie_host_post_init,
};
/* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */
static const struct qcom_pcie_ops ops_2_1_0 = {
.get_resources = qcom_pcie_get_resources_2_1_0,
.init = qcom_pcie_init_2_1_0,
+ .post_init = qcom_pcie_post_init_2_1_0,
.deinit = qcom_pcie_deinit_2_1_0,
.ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
};
@@ -1162,6 +1374,7 @@ static const struct qcom_pcie_ops ops_2_1_0 = {
static const struct qcom_pcie_ops ops_1_0_0 = {
.get_resources = qcom_pcie_get_resources_1_0_0,
.init = qcom_pcie_init_1_0_0,
+ .post_init = qcom_pcie_post_init_1_0_0,
.deinit = qcom_pcie_deinit_1_0_0,
.ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
};
@@ -1172,7 +1385,6 @@ static const struct qcom_pcie_ops ops_2_3_2 = {
.init = qcom_pcie_init_2_3_2,
.post_init = qcom_pcie_post_init_2_3_2,
.deinit = qcom_pcie_deinit_2_3_2,
- .post_deinit = qcom_pcie_post_deinit_2_3_2,
.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
};
@@ -1180,6 +1392,7 @@ static const struct qcom_pcie_ops ops_2_3_2 = {
static const struct qcom_pcie_ops ops_2_4_0 = {
.get_resources = qcom_pcie_get_resources_2_4_0,
.init = qcom_pcie_init_2_4_0,
+ .post_init = qcom_pcie_post_init_2_3_2,
.deinit = qcom_pcie_deinit_2_4_0,
.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
};
@@ -1188,110 +1401,605 @@ static const struct qcom_pcie_ops ops_2_4_0 = {
static const struct qcom_pcie_ops ops_2_3_3 = {
.get_resources = qcom_pcie_get_resources_2_3_3,
.init = qcom_pcie_init_2_3_3,
+ .post_init = qcom_pcie_post_init_2_3_3,
.deinit = qcom_pcie_deinit_2_3_3,
.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
};
+/* Qcom IP rev.: 2.7.0 Synopsys IP rev.: 4.30a */
+static const struct qcom_pcie_ops ops_2_7_0 = {
+ .get_resources = qcom_pcie_get_resources_2_7_0,
+ .init = qcom_pcie_init_2_7_0,
+ .post_init = qcom_pcie_post_init_2_7_0,
+ .deinit = qcom_pcie_deinit_2_7_0,
+ .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
+};
+
+/* Qcom IP rev.: 1.9.0 */
+static const struct qcom_pcie_ops ops_1_9_0 = {
+ .get_resources = qcom_pcie_get_resources_2_7_0,
+ .init = qcom_pcie_init_2_7_0,
+ .post_init = qcom_pcie_post_init_2_7_0,
+ .host_post_init = qcom_pcie_host_post_init_2_7_0,
+ .deinit = qcom_pcie_deinit_2_7_0,
+ .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
+ .config_sid = qcom_pcie_config_sid_1_9_0,
+};
+
+/* Qcom IP rev.: 1.21.0 Synopsys IP rev.: 5.60a */
+static const struct qcom_pcie_ops ops_1_21_0 = {
+ .get_resources = qcom_pcie_get_resources_2_7_0,
+ .init = qcom_pcie_init_2_7_0,
+ .post_init = qcom_pcie_post_init_2_7_0,
+ .host_post_init = qcom_pcie_host_post_init_2_7_0,
+ .deinit = qcom_pcie_deinit_2_7_0,
+ .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
+};
+
+/* Qcom IP rev.: 2.9.0 Synopsys IP rev.: 5.00a */
+static const struct qcom_pcie_ops ops_2_9_0 = {
+ .get_resources = qcom_pcie_get_resources_2_9_0,
+ .init = qcom_pcie_init_2_9_0,
+ .post_init = qcom_pcie_post_init_2_9_0,
+ .deinit = qcom_pcie_deinit_2_9_0,
+ .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
+};
+
+static const struct qcom_pcie_cfg cfg_1_0_0 = {
+ .ops = &ops_1_0_0,
+};
+
+static const struct qcom_pcie_cfg cfg_1_9_0 = {
+ .ops = &ops_1_9_0,
+};
+
+static const struct qcom_pcie_cfg cfg_1_34_0 = {
+ .ops = &ops_1_9_0,
+ .override_no_snoop = true,
+};
+
+static const struct qcom_pcie_cfg cfg_2_1_0 = {
+ .ops = &ops_2_1_0,
+};
+
+static const struct qcom_pcie_cfg cfg_2_3_2 = {
+ .ops = &ops_2_3_2,
+};
+
+static const struct qcom_pcie_cfg cfg_2_3_3 = {
+ .ops = &ops_2_3_3,
+};
+
+static const struct qcom_pcie_cfg cfg_2_4_0 = {
+ .ops = &ops_2_4_0,
+};
+
+static const struct qcom_pcie_cfg cfg_2_7_0 = {
+ .ops = &ops_2_7_0,
+};
+
+static const struct qcom_pcie_cfg cfg_2_9_0 = {
+ .ops = &ops_2_9_0,
+};
+
+static const struct qcom_pcie_cfg cfg_sc8280xp = {
+ .ops = &ops_1_21_0,
+ .no_l0s = true,
+};
+
+static const struct qcom_pcie_cfg cfg_fw_managed = {
+ .firmware_managed = true,
+};
+
static const struct dw_pcie_ops dw_pcie_ops = {
.link_up = qcom_pcie_link_up,
+ .start_link = qcom_pcie_start_link,
+ .assert_perst = qcom_pcie_assert_perst,
};
-static int qcom_pcie_probe(struct platform_device *pdev)
+static int qcom_pcie_icc_init(struct qcom_pcie *pcie)
{
- struct device *dev = &pdev->dev;
- struct resource *res;
- struct pcie_port *pp;
- struct dw_pcie *pci;
- struct qcom_pcie *pcie;
+ struct dw_pcie *pci = pcie->pci;
int ret;
- pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
- if (!pcie)
- return -ENOMEM;
+ pcie->icc_mem = devm_of_icc_get(pci->dev, "pcie-mem");
+ if (IS_ERR(pcie->icc_mem))
+ return PTR_ERR(pcie->icc_mem);
+
+ pcie->icc_cpu = devm_of_icc_get(pci->dev, "cpu-pcie");
+ if (IS_ERR(pcie->icc_cpu))
+ return PTR_ERR(pcie->icc_cpu);
+ /*
+ * Some Qualcomm platforms require interconnect bandwidth constraints
+ * to be set before enabling interconnect clocks.
+ *
+ * Set an initial peak bandwidth corresponding to single-lane Gen 1
+ * for the pcie-mem path.
+ */
+ ret = icc_set_bw(pcie->icc_mem, 0, QCOM_PCIE_LINK_SPEED_TO_BW(1));
+ if (ret) {
+ dev_err(pci->dev, "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n",
+ ret);
+ return ret;
+ }
+
+ /*
+ * Since the CPU-PCIe path is only used for activities like register
+ * access of the host controller and endpoint Config/BAR space access,
+ * HW team has recommended to use a minimal bandwidth of 1KBps just to
+ * keep the path active.
+ */
+ ret = icc_set_bw(pcie->icc_cpu, 0, kBps_to_icc(1));
+ if (ret) {
+ dev_err(pci->dev, "Failed to set bandwidth for CPU-PCIe interconnect path: %d\n",
+ ret);
+ icc_set_bw(pcie->icc_mem, 0, 0);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void qcom_pcie_icc_opp_update(struct qcom_pcie *pcie)
+{
+ u32 offset, status, width, speed;
+ struct dw_pcie *pci = pcie->pci;
+ struct dev_pm_opp_key key = {};
+ unsigned long freq_kbps;
+ struct dev_pm_opp *opp;
+ int ret, freq_mbps;
+
+ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ status = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA);
+
+ /* Only update constraints if link is up. */
+ if (!(status & PCI_EXP_LNKSTA_DLLLA))
+ return;
+
+ speed = FIELD_GET(PCI_EXP_LNKSTA_CLS, status);
+ width = FIELD_GET(PCI_EXP_LNKSTA_NLW, status);
+
+ if (pcie->icc_mem) {
+ ret = icc_set_bw(pcie->icc_mem, 0,
+ width * QCOM_PCIE_LINK_SPEED_TO_BW(speed));
+ if (ret) {
+ dev_err(pci->dev, "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n",
+ ret);
+ }
+ } else if (pcie->use_pm_opp) {
+ freq_mbps = pcie_dev_speed_mbps(pcie_link_speed[speed]);
+ if (freq_mbps < 0)
+ return;
+
+ freq_kbps = freq_mbps * KILO;
+ opp = dev_pm_opp_find_level_exact(pci->dev, speed);
+ if (IS_ERR(opp)) {
+ /* opp-level is not defined use only frequency */
+ opp = dev_pm_opp_find_freq_exact(pci->dev, freq_kbps * width,
+ true);
+ } else {
+ /* put opp-level OPP */
+ dev_pm_opp_put(opp);
+
+ key.freq = freq_kbps * width;
+ key.level = speed;
+ key.bw = 0;
+ opp = dev_pm_opp_find_key_exact(pci->dev, &key, true);
+ }
+ if (!IS_ERR(opp)) {
+ ret = dev_pm_opp_set_opp(pci->dev, opp);
+ if (ret)
+ dev_err(pci->dev, "Failed to set OPP for freq (%lu): %d\n",
+ freq_kbps * width, ret);
+ dev_pm_opp_put(opp);
+ }
+ }
+}
+
+static int qcom_pcie_link_transition_count(struct seq_file *s, void *data)
+{
+ struct qcom_pcie *pcie = (struct qcom_pcie *)dev_get_drvdata(s->private);
+
+ seq_printf(s, "L0s transition count: %u\n",
+ readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L0S));
+
+ seq_printf(s, "L1 transition count: %u\n",
+ readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L1));
+
+ seq_printf(s, "L1.1 transition count: %u\n",
+ readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1));
+
+ seq_printf(s, "L1.2 transition count: %u\n",
+ readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2));
+
+ seq_printf(s, "L2 transition count: %u\n",
+ readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L2));
+
+ return 0;
+}
+
+static void qcom_pcie_init_debugfs(struct qcom_pcie *pcie)
+{
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ char *name;
+
+ name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
+ if (!name)
+ return;
+
+ pcie->debugfs = debugfs_create_dir(name, NULL);
+ debugfs_create_devm_seqfile(dev, "link_transition_count", pcie->debugfs,
+ qcom_pcie_link_transition_count);
+}
+
+static irqreturn_t qcom_pcie_global_irq_thread(int irq, void *data)
+{
+ struct qcom_pcie *pcie = data;
+ struct dw_pcie_rp *pp = &pcie->pci->pp;
+ struct device *dev = pcie->pci->dev;
+ u32 status = readl_relaxed(pcie->parf + PARF_INT_ALL_STATUS);
+
+ writel_relaxed(status, pcie->parf + PARF_INT_ALL_CLEAR);
+
+ if (FIELD_GET(PARF_INT_ALL_LINK_UP, status)) {
+ msleep(PCIE_RESET_CONFIG_WAIT_MS);
+ dev_dbg(dev, "Received Link up event. Starting enumeration!\n");
+ /* Rescan the bus to enumerate endpoint devices */
+ pci_lock_rescan_remove();
+ pci_rescan_bus(pp->bridge->bus);
+ pci_unlock_rescan_remove();
+
+ qcom_pcie_icc_opp_update(pcie);
+ } else {
+ dev_WARN_ONCE(dev, 1, "Received unknown event. INT_STATUS: 0x%08x\n",
+ status);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void qcom_pci_free_msi(void *ptr)
+{
+ struct dw_pcie_rp *pp = (struct dw_pcie_rp *)ptr;
+
+ if (pp && pp->has_msi_ctrl)
+ dw_pcie_free_msi(pp);
+}
+
+static int qcom_pcie_ecam_host_init(struct pci_config_window *cfg)
+{
+ struct device *dev = cfg->parent;
+ struct dw_pcie_rp *pp;
+ struct dw_pcie *pci;
+ int ret;
pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
if (!pci)
return -ENOMEM;
+ pci->dev = dev;
+ pp = &pci->pp;
+ pci->dbi_base = cfg->win;
+ pp->num_vectors = MSI_DEF_NUM_VECTORS;
+
+ ret = dw_pcie_msi_host_init(pp);
+ if (ret)
+ return ret;
+
+ pp->has_msi_ctrl = true;
+ dw_pcie_msi_init(pp);
+
+ return devm_add_action_or_reset(dev, qcom_pci_free_msi, pp);
+}
+
+static const struct pci_ecam_ops pci_qcom_ecam_ops = {
+ .init = qcom_pcie_ecam_host_init,
+ .pci_ops = {
+ .map_bus = pci_ecam_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ }
+};
+
+static int qcom_pcie_parse_port(struct qcom_pcie *pcie, struct device_node *node)
+{
+ struct device *dev = pcie->pci->dev;
+ struct qcom_pcie_port *port;
+ struct gpio_desc *reset;
+ struct phy *phy;
+ int ret;
+
+ reset = devm_fwnode_gpiod_get(dev, of_fwnode_handle(node),
+ "reset", GPIOD_OUT_HIGH, "PERST#");
+ if (IS_ERR(reset))
+ return PTR_ERR(reset);
+
+ phy = devm_of_phy_get(dev, node, NULL);
+ if (IS_ERR(phy))
+ return PTR_ERR(phy);
+
+ port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ ret = phy_init(phy);
+ if (ret)
+ return ret;
+
+ port->reset = reset;
+ port->phy = phy;
+ INIT_LIST_HEAD(&port->list);
+ list_add_tail(&port->list, &pcie->ports);
+
+ return 0;
+}
+
+static int qcom_pcie_parse_ports(struct qcom_pcie *pcie)
+{
+ struct device *dev = pcie->pci->dev;
+ struct qcom_pcie_port *port, *tmp;
+ int ret = -ENOENT;
+
+ for_each_available_child_of_node_scoped(dev->of_node, of_port) {
+ if (!of_node_is_type(of_port, "pci"))
+ continue;
+ ret = qcom_pcie_parse_port(pcie, of_port);
+ if (ret)
+ goto err_port_del;
+ }
+
+ return ret;
+
+err_port_del:
+ list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
+ phy_exit(port->phy);
+ list_del(&port->list);
+ }
+
+ return ret;
+}
+
+static int qcom_pcie_parse_legacy_binding(struct qcom_pcie *pcie)
+{
+ struct device *dev = pcie->pci->dev;
+ struct qcom_pcie_port *port;
+ struct gpio_desc *reset;
+ struct phy *phy;
+ int ret;
+
+ phy = devm_phy_optional_get(dev, "pciephy");
+ if (IS_ERR(phy))
+ return PTR_ERR(phy);
+
+ reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH);
+ if (IS_ERR(reset))
+ return PTR_ERR(reset);
+
+ ret = phy_init(phy);
+ if (ret)
+ return ret;
+
+ port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ port->reset = reset;
+ port->phy = phy;
+ INIT_LIST_HEAD(&port->list);
+ list_add_tail(&port->list, &pcie->ports);
+
+ return 0;
+}
+
+static int qcom_pcie_probe(struct platform_device *pdev)
+{
+ const struct qcom_pcie_cfg *pcie_cfg;
+ unsigned long max_freq = ULONG_MAX;
+ struct qcom_pcie_port *port, *tmp;
+ struct device *dev = &pdev->dev;
+ struct dev_pm_opp *opp;
+ struct qcom_pcie *pcie;
+ struct dw_pcie_rp *pp;
+ struct resource *res;
+ struct dw_pcie *pci;
+ int ret, irq;
+ char *name;
+
+ pcie_cfg = of_device_get_match_data(dev);
+ if (!pcie_cfg) {
+ dev_err(dev, "No platform data\n");
+ return -ENODATA;
+ }
+
+ if (!pcie_cfg->firmware_managed && !pcie_cfg->ops) {
+ dev_err(dev, "No platform ops\n");
+ return -ENODATA;
+ }
+
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
- if (ret < 0) {
- pm_runtime_disable(dev);
- return ret;
+ if (ret < 0)
+ goto err_pm_runtime_put;
+
+ if (pcie_cfg->firmware_managed) {
+ struct pci_host_bridge *bridge;
+ struct pci_config_window *cfg;
+
+ bridge = devm_pci_alloc_host_bridge(dev, 0);
+ if (!bridge) {
+ ret = -ENOMEM;
+ goto err_pm_runtime_put;
+ }
+
+ /* Parse and map our ECAM configuration space area */
+ cfg = pci_host_common_ecam_create(dev, bridge,
+ &pci_qcom_ecam_ops);
+ if (IS_ERR(cfg)) {
+ ret = PTR_ERR(cfg);
+ goto err_pm_runtime_put;
+ }
+
+ bridge->sysdata = cfg;
+ bridge->ops = (struct pci_ops *)&pci_qcom_ecam_ops.pci_ops;
+ bridge->msi_domain = true;
+
+ ret = pci_host_probe(bridge);
+ if (ret)
+ goto err_pm_runtime_put;
+
+ return 0;
}
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie) {
+ ret = -ENOMEM;
+ goto err_pm_runtime_put;
+ }
+
+ pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+ if (!pci) {
+ ret = -ENOMEM;
+ goto err_pm_runtime_put;
+ }
+
+ INIT_LIST_HEAD(&pcie->ports);
+
pci->dev = dev;
pci->ops = &dw_pcie_ops;
pp = &pci->pp;
pcie->pci = pci;
- pcie->ops = of_device_get_match_data(dev);
-
- pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_LOW);
- if (IS_ERR(pcie->reset)) {
- ret = PTR_ERR(pcie->reset);
- goto err_pm_runtime_put;
- }
+ pcie->cfg = pcie_cfg;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "parf");
- pcie->parf = devm_ioremap_resource(dev, res);
+ pcie->parf = devm_platform_ioremap_resource_byname(pdev, "parf");
if (IS_ERR(pcie->parf)) {
ret = PTR_ERR(pcie->parf);
goto err_pm_runtime_put;
}
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
- pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
- if (IS_ERR(pci->dbi_base)) {
- ret = PTR_ERR(pci->dbi_base);
- goto err_pm_runtime_put;
+ /* MHI region is optional */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mhi");
+ if (res) {
+ pcie->mhi = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pcie->mhi)) {
+ ret = PTR_ERR(pcie->mhi);
+ goto err_pm_runtime_put;
+ }
}
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
- pcie->elbi = devm_ioremap_resource(dev, res);
- if (IS_ERR(pcie->elbi)) {
- ret = PTR_ERR(pcie->elbi);
+ /* OPP table is optional */
+ ret = devm_pm_opp_of_add_table(dev);
+ if (ret && ret != -ENODEV) {
+ dev_err_probe(dev, ret, "Failed to add OPP table\n");
goto err_pm_runtime_put;
}
- pcie->phy = devm_phy_optional_get(dev, "pciephy");
- if (IS_ERR(pcie->phy)) {
- ret = PTR_ERR(pcie->phy);
- goto err_pm_runtime_put;
+ /*
+ * Before the PCIe link is initialized, vote for highest OPP in the OPP
+ * table, so that we are voting for maximum voltage corner for the
+ * link to come up in maximum supported speed. At the end of the
+ * probe(), OPP will be updated using qcom_pcie_icc_opp_update().
+ */
+ if (!ret) {
+ opp = dev_pm_opp_find_freq_floor(dev, &max_freq);
+ if (IS_ERR(opp)) {
+ ret = PTR_ERR(opp);
+ dev_err_probe(pci->dev, ret,
+ "Unable to find max freq OPP\n");
+ goto err_pm_runtime_put;
+ } else {
+ ret = dev_pm_opp_set_opp(dev, opp);
+ }
+
+ dev_pm_opp_put(opp);
+ if (ret) {
+ dev_err_probe(pci->dev, ret,
+ "Failed to set OPP for freq %lu\n",
+ max_freq);
+ goto err_pm_runtime_put;
+ }
+
+ pcie->use_pm_opp = true;
+ } else {
+ /* Skip ICC init if OPP is supported as it is handled by OPP */
+ ret = qcom_pcie_icc_init(pcie);
+ if (ret)
+ goto err_pm_runtime_put;
}
- ret = pcie->ops->get_resources(pcie);
+ ret = pcie->cfg->ops->get_resources(pcie);
if (ret)
goto err_pm_runtime_put;
pp->ops = &qcom_pcie_dw_ops;
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- pp->msi_irq = platform_get_irq_byname(pdev, "msi");
- if (pp->msi_irq < 0) {
- ret = pp->msi_irq;
+ ret = qcom_pcie_parse_ports(pcie);
+ if (ret) {
+ if (ret != -ENOENT) {
+ dev_err_probe(pci->dev, ret,
+ "Failed to parse Root Port: %d\n", ret);
goto err_pm_runtime_put;
}
- }
- ret = phy_init(pcie->phy);
- if (ret) {
- pm_runtime_disable(&pdev->dev);
- goto err_pm_runtime_put;
+ /*
+ * In the case of properties not populated in Root Port node,
+ * fallback to the legacy method of parsing the Host Bridge
+ * node. This is to maintain DT backwards compatibility.
+ */
+ ret = qcom_pcie_parse_legacy_binding(pcie);
+ if (ret)
+ goto err_pm_runtime_put;
}
platform_set_drvdata(pdev, pcie);
+ irq = platform_get_irq_byname_optional(pdev, "global");
+ if (irq > 0)
+ pp->use_linkup_irq = true;
+
ret = dw_pcie_host_init(pp);
if (ret) {
dev_err(dev, "cannot initialize host\n");
- pm_runtime_disable(&pdev->dev);
- goto err_pm_runtime_put;
+ goto err_phy_exit;
+ }
+
+ name = devm_kasprintf(dev, GFP_KERNEL, "qcom_pcie_global_irq%d",
+ pci_domain_nr(pp->bridge->bus));
+ if (!name) {
+ ret = -ENOMEM;
+ goto err_host_deinit;
}
+ if (irq > 0) {
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ qcom_pcie_global_irq_thread,
+ IRQF_ONESHOT, name, pcie);
+ if (ret) {
+ dev_err_probe(&pdev->dev, ret,
+ "Failed to request Global IRQ\n");
+ goto err_host_deinit;
+ }
+
+ writel_relaxed(PARF_INT_ALL_LINK_UP | PARF_INT_MSI_DEV_0_7,
+ pcie->parf + PARF_INT_ALL_MASK);
+ }
+
+ qcom_pcie_icc_opp_update(pcie);
+
+ if (pcie->mhi)
+ qcom_pcie_init_debugfs(pcie);
+
return 0;
+err_host_deinit:
+ dw_pcie_host_deinit(pp);
+err_phy_exit:
+ list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
+ phy_exit(port->phy);
+ list_del(&port->list);
+ }
err_pm_runtime_put:
pm_runtime_put(dev);
pm_runtime_disable(dev);
@@ -1299,22 +2007,151 @@ err_pm_runtime_put:
return ret;
}
+static int qcom_pcie_suspend_noirq(struct device *dev)
+{
+ struct qcom_pcie *pcie;
+ int ret = 0;
+
+ pcie = dev_get_drvdata(dev);
+ if (!pcie)
+ return 0;
+
+ /*
+ * Set minimum bandwidth required to keep data path functional during
+ * suspend.
+ */
+ if (pcie->icc_mem) {
+ ret = icc_set_bw(pcie->icc_mem, 0, kBps_to_icc(1));
+ if (ret) {
+ dev_err(dev,
+ "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ /*
+ * Turn OFF the resources only for controllers without active PCIe
+ * devices. For controllers with active devices, the resources are kept
+ * ON and the link is expected to be in L0/L1 (sub)states.
+ *
+ * Turning OFF the resources for controllers with active PCIe devices
+ * will trigger access violation during the end of the suspend cycle,
+ * as kernel tries to access the PCIe devices config space for masking
+ * MSIs.
+ *
+ * Also, it is not desirable to put the link into L2/L3 state as that
+ * implies VDD supply will be removed and the devices may go into
+ * powerdown state. This will affect the lifetime of the storage devices
+ * like NVMe.
+ */
+ if (!dw_pcie_link_up(pcie->pci)) {
+ qcom_pcie_host_deinit(&pcie->pci->pp);
+ pcie->suspended = true;
+ }
+
+ /*
+ * Only disable CPU-PCIe interconnect path if the suspend is non-S2RAM.
+ * Because on some platforms, DBI access can happen very late during the
+ * S2RAM and a non-active CPU-PCIe interconnect path may lead to NoC
+ * error.
+ */
+ if (pm_suspend_target_state != PM_SUSPEND_MEM) {
+ ret = icc_disable(pcie->icc_cpu);
+ if (ret)
+ dev_err(dev, "Failed to disable CPU-PCIe interconnect path: %d\n", ret);
+
+ if (pcie->use_pm_opp)
+ dev_pm_opp_set_opp(pcie->pci->dev, NULL);
+ }
+ return ret;
+}
+
+static int qcom_pcie_resume_noirq(struct device *dev)
+{
+ struct qcom_pcie *pcie;
+ int ret;
+
+ pcie = dev_get_drvdata(dev);
+ if (!pcie)
+ return 0;
+
+ if (pm_suspend_target_state != PM_SUSPEND_MEM) {
+ ret = icc_enable(pcie->icc_cpu);
+ if (ret) {
+ dev_err(dev, "Failed to enable CPU-PCIe interconnect path: %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (pcie->suspended) {
+ ret = qcom_pcie_host_init(&pcie->pci->pp);
+ if (ret)
+ return ret;
+
+ pcie->suspended = false;
+ }
+
+ qcom_pcie_icc_opp_update(pcie);
+
+ return 0;
+}
+
static const struct of_device_id qcom_pcie_match[] = {
- { .compatible = "qcom,pcie-apq8084", .data = &ops_1_0_0 },
- { .compatible = "qcom,pcie-ipq8064", .data = &ops_2_1_0 },
- { .compatible = "qcom,pcie-apq8064", .data = &ops_2_1_0 },
- { .compatible = "qcom,pcie-msm8996", .data = &ops_2_3_2 },
- { .compatible = "qcom,pcie-ipq8074", .data = &ops_2_3_3 },
- { .compatible = "qcom,pcie-ipq4019", .data = &ops_2_4_0 },
+ { .compatible = "qcom,pcie-apq8064", .data = &cfg_2_1_0 },
+ { .compatible = "qcom,pcie-apq8084", .data = &cfg_1_0_0 },
+ { .compatible = "qcom,pcie-ipq4019", .data = &cfg_2_4_0 },
+ { .compatible = "qcom,pcie-ipq5018", .data = &cfg_2_9_0 },
+ { .compatible = "qcom,pcie-ipq6018", .data = &cfg_2_9_0 },
+ { .compatible = "qcom,pcie-ipq8064", .data = &cfg_2_1_0 },
+ { .compatible = "qcom,pcie-ipq8064-v2", .data = &cfg_2_1_0 },
+ { .compatible = "qcom,pcie-ipq8074", .data = &cfg_2_3_3 },
+ { .compatible = "qcom,pcie-ipq8074-gen3", .data = &cfg_2_9_0 },
+ { .compatible = "qcom,pcie-ipq9574", .data = &cfg_2_9_0 },
+ { .compatible = "qcom,pcie-msm8996", .data = &cfg_2_3_2 },
+ { .compatible = "qcom,pcie-qcs404", .data = &cfg_2_4_0 },
+ { .compatible = "qcom,pcie-sa8255p", .data = &cfg_fw_managed },
+ { .compatible = "qcom,pcie-sa8540p", .data = &cfg_sc8280xp },
+ { .compatible = "qcom,pcie-sa8775p", .data = &cfg_1_34_0},
+ { .compatible = "qcom,pcie-sc7280", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sc8180x", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sc8280xp", .data = &cfg_sc8280xp },
+ { .compatible = "qcom,pcie-sdm845", .data = &cfg_2_7_0 },
+ { .compatible = "qcom,pcie-sdx55", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sm8150", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sm8250", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sm8350", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sm8450-pcie0", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sm8450-pcie1", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sm8550", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-x1e80100", .data = &cfg_sc8280xp },
{ }
};
+static void qcom_fixup_class(struct pci_dev *dev)
+{
+ dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL;
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0101, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0104, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0106, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0107, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class);
+
+static const struct dev_pm_ops qcom_pcie_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(qcom_pcie_suspend_noirq, qcom_pcie_resume_noirq)
+};
+
static struct platform_driver qcom_pcie_driver = {
.probe = qcom_pcie_probe,
.driver = {
.name = "qcom-pcie",
.suppress_bind_attrs = true,
.of_match_table = qcom_pcie_match,
+ .pm = &qcom_pcie_pm_ops,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
builtin_platform_driver(qcom_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-rcar-gen4.c b/drivers/pci/controller/dwc/pcie-rcar-gen4.c
new file mode 100644
index 000000000000..80778917d2dd
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-rcar-gen4.c
@@ -0,0 +1,804 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PCIe controller driver for Renesas R-Car Gen4 Series SoCs
+ * Copyright (C) 2022-2023 Renesas Electronics Corporation
+ *
+ * The r8a779g0 (R-Car V4H) controller requires a specific firmware to be
+ * provided, to initialize the PHY. Otherwise, the PCIe controller will not
+ * work.
+ */
+
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include "../../pci.h"
+#include "pcie-designware.h"
+
+/* Renesas-specific */
+/* PCIe Mode Setting Register 0 */
+#define PCIEMSR0 0x0000
+#define APP_SRIS_MODE BIT(6)
+#define DEVICE_TYPE_EP 0
+#define DEVICE_TYPE_RC BIT(4)
+#define BIFUR_MOD_SET_ON BIT(0)
+
+/* PCIe Interrupt Status 0 */
+#define PCIEINTSTS0 0x0084
+
+/* PCIe Interrupt Status 0 Enable */
+#define PCIEINTSTS0EN 0x0310
+#define MSI_CTRL_INT BIT(26)
+#define SMLH_LINK_UP BIT(7)
+#define RDLH_LINK_UP BIT(6)
+
+/* PCIe DMA Interrupt Status Enable */
+#define PCIEDMAINTSTSEN 0x0314
+#define PCIEDMAINTSTSEN_INIT GENMASK(15, 0)
+
+/* Port Logic Registers 89 */
+#define PRTLGC89 0x0b70
+
+/* Port Logic Registers 90 */
+#define PRTLGC90 0x0b74
+
+/* PCIe Reset Control Register 1 */
+#define PCIERSTCTRL1 0x0014
+#define APP_HOLD_PHY_RST BIT(16)
+#define APP_LTSSM_ENABLE BIT(0)
+
+/* PCIe Power Management Control */
+#define PCIEPWRMNGCTRL 0x0070
+#define APP_CLK_REQ_N BIT(11)
+#define APP_CLK_PM_EN BIT(10)
+
+#define RCAR_NUM_SPEED_CHANGE_RETRIES 10
+#define RCAR_MAX_LINK_SPEED 4
+
+#define RCAR_GEN4_PCIE_EP_FUNC_DBI_OFFSET 0x1000
+#define RCAR_GEN4_PCIE_EP_FUNC_DBI2_OFFSET 0x800
+
+#define RCAR_GEN4_PCIE_FIRMWARE_NAME "rcar_gen4_pcie.bin"
+#define RCAR_GEN4_PCIE_FIRMWARE_BASE_ADDR 0xc000
+MODULE_FIRMWARE(RCAR_GEN4_PCIE_FIRMWARE_NAME);
+
+struct rcar_gen4_pcie;
+struct rcar_gen4_pcie_drvdata {
+ void (*additional_common_init)(struct rcar_gen4_pcie *rcar);
+ int (*ltssm_control)(struct rcar_gen4_pcie *rcar, bool enable);
+ enum dw_pcie_device_mode mode;
+};
+
+struct rcar_gen4_pcie {
+ struct dw_pcie dw;
+ void __iomem *base;
+ void __iomem *phy_base;
+ struct platform_device *pdev;
+ const struct rcar_gen4_pcie_drvdata *drvdata;
+};
+#define to_rcar_gen4_pcie(_dw) container_of(_dw, struct rcar_gen4_pcie, dw)
+
+/* Common */
+static bool rcar_gen4_pcie_link_up(struct dw_pcie *dw)
+{
+ struct rcar_gen4_pcie *rcar = to_rcar_gen4_pcie(dw);
+ u32 val, mask;
+
+ val = readl(rcar->base + PCIEINTSTS0);
+ mask = RDLH_LINK_UP | SMLH_LINK_UP;
+
+ return (val & mask) == mask;
+}
+
+/*
+ * Manually initiate the speed change. Return 0 if change succeeded; otherwise
+ * -ETIMEDOUT.
+ */
+static int rcar_gen4_pcie_speed_change(struct dw_pcie *dw)
+{
+ u32 val;
+ int i;
+
+ val = dw_pcie_readl_dbi(dw, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ val &= ~PORT_LOGIC_SPEED_CHANGE;
+ dw_pcie_writel_dbi(dw, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
+
+ val = dw_pcie_readl_dbi(dw, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ val |= PORT_LOGIC_SPEED_CHANGE;
+ dw_pcie_writel_dbi(dw, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
+
+ for (i = 0; i < RCAR_NUM_SPEED_CHANGE_RETRIES; i++) {
+ val = dw_pcie_readl_dbi(dw, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ if (!(val & PORT_LOGIC_SPEED_CHANGE))
+ return 0;
+ usleep_range(10000, 11000);
+ }
+
+ return -ETIMEDOUT;
+}
+
+/*
+ * Enable LTSSM of this controller and manually initiate the speed change.
+ * Always return 0.
+ */
+static int rcar_gen4_pcie_start_link(struct dw_pcie *dw)
+{
+ struct rcar_gen4_pcie *rcar = to_rcar_gen4_pcie(dw);
+ int i, changes, ret;
+
+ if (rcar->drvdata->ltssm_control) {
+ ret = rcar->drvdata->ltssm_control(rcar, true);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Require direct speed change with retrying here if the max_link_speed
+ * is PCIe Gen2 or higher.
+ */
+ changes = min_not_zero(dw->max_link_speed, RCAR_MAX_LINK_SPEED) - 1;
+
+ /*
+ * Since dw_pcie_setup_rc() sets it once, PCIe Gen2 will be trained.
+ * So, this needs remaining times for up to PCIe Gen4 if RC mode.
+ */
+ if (changes && rcar->drvdata->mode == DW_PCIE_RC_TYPE)
+ changes--;
+
+ for (i = 0; i < changes; i++) {
+ /* It may not be connected in EP mode yet. So, break the loop */
+ if (rcar_gen4_pcie_speed_change(dw))
+ break;
+ }
+
+ return 0;
+}
+
+static void rcar_gen4_pcie_stop_link(struct dw_pcie *dw)
+{
+ struct rcar_gen4_pcie *rcar = to_rcar_gen4_pcie(dw);
+
+ if (rcar->drvdata->ltssm_control)
+ rcar->drvdata->ltssm_control(rcar, false);
+}
+
+static int rcar_gen4_pcie_common_init(struct rcar_gen4_pcie *rcar)
+{
+ struct dw_pcie *dw = &rcar->dw;
+ u32 val;
+ int ret;
+
+ ret = clk_bulk_prepare_enable(DW_PCIE_NUM_CORE_CLKS, dw->core_clks);
+ if (ret) {
+ dev_err(dw->dev, "Enabling core clocks failed\n");
+ return ret;
+ }
+
+ if (!reset_control_status(dw->core_rsts[DW_PCIE_PWR_RST].rstc)) {
+ reset_control_assert(dw->core_rsts[DW_PCIE_PWR_RST].rstc);
+ /*
+ * R-Car V4H Reference Manual R19UH0186EJ0130 Rev.1.30 Apr.
+ * 21, 2025 page 585 Figure 9.3.2 Software Reset flow (B)
+ * indicates that for peripherals in HSC domain, after
+ * reset has been asserted by writing a matching reset bit
+ * into register SRCR, it is mandatory to wait 1ms.
+ */
+ fsleep(1000);
+ }
+
+ val = readl(rcar->base + PCIEMSR0);
+ if (rcar->drvdata->mode == DW_PCIE_RC_TYPE) {
+ val |= DEVICE_TYPE_RC;
+ } else if (rcar->drvdata->mode == DW_PCIE_EP_TYPE) {
+ val |= DEVICE_TYPE_EP;
+ } else {
+ ret = -EINVAL;
+ goto err_unprepare;
+ }
+
+ if (dw->num_lanes < 4)
+ val |= BIFUR_MOD_SET_ON;
+
+ writel(val, rcar->base + PCIEMSR0);
+
+ ret = reset_control_deassert(dw->core_rsts[DW_PCIE_PWR_RST].rstc);
+ if (ret)
+ goto err_unprepare;
+
+ /*
+ * Assure the reset is latched and the core is ready for DBI access.
+ * On R-Car V4H, the PCIe reset is asynchronous and does not take
+ * effect immediately, but needs a short time to complete. In case
+ * DBI access happens in that short time, that access generates an
+ * SError. To make sure that condition can never happen, read back the
+ * state of the reset, which should turn the asynchronous reset into
+ * synchronous one, and wait a little over 1ms to add additional
+ * safety margin.
+ */
+ reset_control_status(dw->core_rsts[DW_PCIE_PWR_RST].rstc);
+ fsleep(1000);
+
+ if (rcar->drvdata->additional_common_init)
+ rcar->drvdata->additional_common_init(rcar);
+
+ return 0;
+
+err_unprepare:
+ clk_bulk_disable_unprepare(DW_PCIE_NUM_CORE_CLKS, dw->core_clks);
+
+ return ret;
+}
+
+static void rcar_gen4_pcie_common_deinit(struct rcar_gen4_pcie *rcar)
+{
+ struct dw_pcie *dw = &rcar->dw;
+
+ reset_control_assert(dw->core_rsts[DW_PCIE_PWR_RST].rstc);
+ clk_bulk_disable_unprepare(DW_PCIE_NUM_CORE_CLKS, dw->core_clks);
+}
+
+static int rcar_gen4_pcie_prepare(struct rcar_gen4_pcie *rcar)
+{
+ struct device *dev = rcar->dw.dev;
+ int err;
+
+ pm_runtime_enable(dev);
+ err = pm_runtime_resume_and_get(dev);
+ if (err < 0) {
+ dev_err(dev, "Runtime resume failed\n");
+ pm_runtime_disable(dev);
+ }
+
+ return err;
+}
+
+static void rcar_gen4_pcie_unprepare(struct rcar_gen4_pcie *rcar)
+{
+ struct device *dev = rcar->dw.dev;
+
+ pm_runtime_put(dev);
+ pm_runtime_disable(dev);
+}
+
+static int rcar_gen4_pcie_get_resources(struct rcar_gen4_pcie *rcar)
+{
+ rcar->phy_base = devm_platform_ioremap_resource_byname(rcar->pdev, "phy");
+ if (IS_ERR(rcar->phy_base))
+ return PTR_ERR(rcar->phy_base);
+
+ /* Renesas-specific registers */
+ rcar->base = devm_platform_ioremap_resource_byname(rcar->pdev, "app");
+
+ return PTR_ERR_OR_ZERO(rcar->base);
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .start_link = rcar_gen4_pcie_start_link,
+ .stop_link = rcar_gen4_pcie_stop_link,
+ .link_up = rcar_gen4_pcie_link_up,
+};
+
+static struct rcar_gen4_pcie *rcar_gen4_pcie_alloc(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rcar_gen4_pcie *rcar;
+
+ rcar = devm_kzalloc(dev, sizeof(*rcar), GFP_KERNEL);
+ if (!rcar)
+ return ERR_PTR(-ENOMEM);
+
+ rcar->dw.ops = &dw_pcie_ops;
+ rcar->dw.dev = dev;
+ rcar->pdev = pdev;
+ rcar->dw.edma.mf = EDMA_MF_EDMA_UNROLL;
+ dw_pcie_cap_set(&rcar->dw, REQ_RES);
+ platform_set_drvdata(pdev, rcar);
+
+ return rcar;
+}
+
+/* Host mode */
+static int rcar_gen4_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *dw = to_dw_pcie_from_pp(pp);
+ struct rcar_gen4_pcie *rcar = to_rcar_gen4_pcie(dw);
+ int ret;
+ u32 val;
+
+ gpiod_set_value_cansleep(dw->pe_rst, 1);
+
+ ret = rcar_gen4_pcie_common_init(rcar);
+ if (ret)
+ return ret;
+
+ /*
+ * According to the section 3.5.7.2 "RC Mode" in DWC PCIe Dual Mode
+ * Rev.5.20a and 3.5.6.1 "RC mode" in DWC PCIe RC databook v5.20a, we
+ * should disable two BARs to avoid unnecessary memory assignment
+ * during device enumeration.
+ */
+ dw_pcie_writel_dbi2(dw, PCI_BASE_ADDRESS_0, 0x0);
+ dw_pcie_writel_dbi2(dw, PCI_BASE_ADDRESS_1, 0x0);
+
+ /* Enable MSI interrupt signal */
+ val = readl(rcar->base + PCIEINTSTS0EN);
+ val |= MSI_CTRL_INT;
+ writel(val, rcar->base + PCIEINTSTS0EN);
+
+ msleep(PCIE_T_PVPERL_MS); /* pe_rst requires 100msec delay */
+
+ gpiod_set_value_cansleep(dw->pe_rst, 0);
+
+ return 0;
+}
+
+static void rcar_gen4_pcie_host_deinit(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *dw = to_dw_pcie_from_pp(pp);
+ struct rcar_gen4_pcie *rcar = to_rcar_gen4_pcie(dw);
+
+ gpiod_set_value_cansleep(dw->pe_rst, 1);
+ rcar_gen4_pcie_common_deinit(rcar);
+}
+
+static const struct dw_pcie_host_ops rcar_gen4_pcie_host_ops = {
+ .init = rcar_gen4_pcie_host_init,
+ .deinit = rcar_gen4_pcie_host_deinit,
+};
+
+static int rcar_gen4_add_dw_pcie_rp(struct rcar_gen4_pcie *rcar)
+{
+ struct dw_pcie_rp *pp = &rcar->dw.pp;
+
+ if (!IS_ENABLED(CONFIG_PCIE_RCAR_GEN4_HOST))
+ return -ENODEV;
+
+ pp->num_vectors = MAX_MSI_IRQS;
+ pp->ops = &rcar_gen4_pcie_host_ops;
+
+ return dw_pcie_host_init(pp);
+}
+
+static void rcar_gen4_remove_dw_pcie_rp(struct rcar_gen4_pcie *rcar)
+{
+ dw_pcie_host_deinit(&rcar->dw.pp);
+}
+
+/* Endpoint mode */
+static void rcar_gen4_pcie_ep_pre_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *dw = to_dw_pcie_from_ep(ep);
+ struct rcar_gen4_pcie *rcar = to_rcar_gen4_pcie(dw);
+ int ret;
+
+ ret = rcar_gen4_pcie_common_init(rcar);
+ if (ret)
+ return;
+
+ writel(PCIEDMAINTSTSEN_INIT, rcar->base + PCIEDMAINTSTSEN);
+}
+
+static void rcar_gen4_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar;
+
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
+ dw_pcie_ep_reset_bar(pci, bar);
+}
+
+static void rcar_gen4_pcie_ep_deinit(struct rcar_gen4_pcie *rcar)
+{
+ writel(0, rcar->base + PCIEDMAINTSTSEN);
+ rcar_gen4_pcie_common_deinit(rcar);
+}
+
+static int rcar_gen4_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ unsigned int type, u16 interrupt_num)
+{
+ struct dw_pcie *dw = to_dw_pcie_from_ep(ep);
+
+ switch (type) {
+ case PCI_IRQ_INTX:
+ return dw_pcie_ep_raise_intx_irq(ep, func_no);
+ case PCI_IRQ_MSI:
+ return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+ default:
+ dev_err(dw->dev, "Unknown IRQ type\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct pci_epc_features rcar_gen4_pcie_epc_features = {
+ .msi_capable = true,
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
+ .bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = 256 },
+ .bar[BAR_5] = { .type = BAR_RESERVED, },
+ .align = SZ_1M,
+};
+
+static const struct pci_epc_features*
+rcar_gen4_pcie_ep_get_features(struct dw_pcie_ep *ep)
+{
+ return &rcar_gen4_pcie_epc_features;
+}
+
+static unsigned int rcar_gen4_pcie_ep_get_dbi_offset(struct dw_pcie_ep *ep,
+ u8 func_no)
+{
+ return func_no * RCAR_GEN4_PCIE_EP_FUNC_DBI_OFFSET;
+}
+
+static unsigned int rcar_gen4_pcie_ep_get_dbi2_offset(struct dw_pcie_ep *ep,
+ u8 func_no)
+{
+ return func_no * RCAR_GEN4_PCIE_EP_FUNC_DBI2_OFFSET;
+}
+
+static const struct dw_pcie_ep_ops pcie_ep_ops = {
+ .pre_init = rcar_gen4_pcie_ep_pre_init,
+ .init = rcar_gen4_pcie_ep_init,
+ .raise_irq = rcar_gen4_pcie_ep_raise_irq,
+ .get_features = rcar_gen4_pcie_ep_get_features,
+ .get_dbi_offset = rcar_gen4_pcie_ep_get_dbi_offset,
+ .get_dbi2_offset = rcar_gen4_pcie_ep_get_dbi2_offset,
+};
+
+static int rcar_gen4_add_dw_pcie_ep(struct rcar_gen4_pcie *rcar)
+{
+ struct dw_pcie_ep *ep = &rcar->dw.ep;
+ struct device *dev = rcar->dw.dev;
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_PCIE_RCAR_GEN4_EP))
+ return -ENODEV;
+
+ ep->ops = &pcie_ep_ops;
+
+ ret = dw_pcie_ep_init(ep);
+ if (ret) {
+ rcar_gen4_pcie_ep_deinit(rcar);
+ return ret;
+ }
+
+ ret = dw_pcie_ep_init_registers(ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(ep);
+ rcar_gen4_pcie_ep_deinit(rcar);
+ }
+
+ pci_epc_init_notify(ep->epc);
+
+ return ret;
+}
+
+static void rcar_gen4_remove_dw_pcie_ep(struct rcar_gen4_pcie *rcar)
+{
+ dw_pcie_ep_deinit(&rcar->dw.ep);
+ rcar_gen4_pcie_ep_deinit(rcar);
+}
+
+/* Common */
+static int rcar_gen4_add_dw_pcie(struct rcar_gen4_pcie *rcar)
+{
+ rcar->drvdata = of_device_get_match_data(&rcar->pdev->dev);
+ if (!rcar->drvdata)
+ return -EINVAL;
+
+ switch (rcar->drvdata->mode) {
+ case DW_PCIE_RC_TYPE:
+ return rcar_gen4_add_dw_pcie_rp(rcar);
+ case DW_PCIE_EP_TYPE:
+ return rcar_gen4_add_dw_pcie_ep(rcar);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int rcar_gen4_pcie_probe(struct platform_device *pdev)
+{
+ struct rcar_gen4_pcie *rcar;
+ int err;
+
+ rcar = rcar_gen4_pcie_alloc(pdev);
+ if (IS_ERR(rcar))
+ return PTR_ERR(rcar);
+
+ err = rcar_gen4_pcie_get_resources(rcar);
+ if (err)
+ return err;
+
+ err = rcar_gen4_pcie_prepare(rcar);
+ if (err)
+ return err;
+
+ err = rcar_gen4_add_dw_pcie(rcar);
+ if (err)
+ goto err_unprepare;
+
+ return 0;
+
+err_unprepare:
+ rcar_gen4_pcie_unprepare(rcar);
+
+ return err;
+}
+
+static void rcar_gen4_remove_dw_pcie(struct rcar_gen4_pcie *rcar)
+{
+ switch (rcar->drvdata->mode) {
+ case DW_PCIE_RC_TYPE:
+ rcar_gen4_remove_dw_pcie_rp(rcar);
+ break;
+ case DW_PCIE_EP_TYPE:
+ rcar_gen4_remove_dw_pcie_ep(rcar);
+ break;
+ default:
+ break;
+ }
+}
+
+static void rcar_gen4_pcie_remove(struct platform_device *pdev)
+{
+ struct rcar_gen4_pcie *rcar = platform_get_drvdata(pdev);
+
+ rcar_gen4_remove_dw_pcie(rcar);
+ rcar_gen4_pcie_unprepare(rcar);
+}
+
+static int r8a779f0_pcie_ltssm_control(struct rcar_gen4_pcie *rcar, bool enable)
+{
+ u32 val;
+
+ val = readl(rcar->base + PCIERSTCTRL1);
+ if (enable) {
+ val |= APP_LTSSM_ENABLE;
+ val &= ~APP_HOLD_PHY_RST;
+ } else {
+ /*
+ * Since the datasheet of R-Car doesn't mention how to assert
+ * the APP_HOLD_PHY_RST, don't assert it again. Otherwise,
+ * hang-up issue happened in the dw_edma_core_off() when
+ * the controller didn't detect a PCI device.
+ */
+ val &= ~APP_LTSSM_ENABLE;
+ }
+ writel(val, rcar->base + PCIERSTCTRL1);
+
+ return 0;
+}
+
+static void rcar_gen4_pcie_additional_common_init(struct rcar_gen4_pcie *rcar)
+{
+ struct dw_pcie *dw = &rcar->dw;
+ u32 val;
+
+ val = dw_pcie_readl_dbi(dw, PCIE_PORT_LANE_SKEW);
+ val &= ~PORT_LANE_SKEW_INSERT_MASK;
+ if (dw->num_lanes < 4)
+ val |= BIT(6);
+ dw_pcie_writel_dbi(dw, PCIE_PORT_LANE_SKEW, val);
+
+ val = readl(rcar->base + PCIEPWRMNGCTRL);
+ val |= APP_CLK_REQ_N | APP_CLK_PM_EN;
+ writel(val, rcar->base + PCIEPWRMNGCTRL);
+}
+
+static void rcar_gen4_pcie_phy_reg_update_bits(struct rcar_gen4_pcie *rcar,
+ u32 offset, u32 mask, u32 val)
+{
+ u32 tmp;
+
+ tmp = readl(rcar->phy_base + offset);
+ tmp &= ~mask;
+ tmp |= val;
+ writel(tmp, rcar->phy_base + offset);
+}
+
+/*
+ * SoC datasheet suggests checking port logic register bits during firmware
+ * write. If read returns non-zero value, then this function returns -EAGAIN
+ * indicating that the write needs to be done again. If read returns zero,
+ * then return 0 to indicate success.
+ */
+static int rcar_gen4_pcie_reg_test_bit(struct rcar_gen4_pcie *rcar,
+ u32 offset, u32 mask)
+{
+ struct dw_pcie *dw = &rcar->dw;
+
+ if (dw_pcie_readl_dbi(dw, offset) & mask)
+ return -EAGAIN;
+
+ return 0;
+}
+
+static int rcar_gen4_pcie_download_phy_firmware(struct rcar_gen4_pcie *rcar)
+{
+ /* The check_addr values are magical numbers in the datasheet */
+ static const u32 check_addr[] = {
+ 0x00101018,
+ 0x00101118,
+ 0x00101021,
+ 0x00101121,
+ };
+ struct dw_pcie *dw = &rcar->dw;
+ const struct firmware *fw;
+ unsigned int i, timeout;
+ u32 data;
+ int ret;
+
+ ret = request_firmware(&fw, RCAR_GEN4_PCIE_FIRMWARE_NAME, dw->dev);
+ if (ret) {
+ dev_err(dw->dev, "Failed to load firmware (%s): %d\n",
+ RCAR_GEN4_PCIE_FIRMWARE_NAME, ret);
+ return ret;
+ }
+
+ for (i = 0; i < (fw->size / 2); i++) {
+ data = fw->data[(i * 2) + 1] << 8 | fw->data[i * 2];
+ timeout = 100;
+ do {
+ dw_pcie_writel_dbi(dw, PRTLGC89, RCAR_GEN4_PCIE_FIRMWARE_BASE_ADDR + i);
+ dw_pcie_writel_dbi(dw, PRTLGC90, data);
+ if (!rcar_gen4_pcie_reg_test_bit(rcar, PRTLGC89, BIT(30)))
+ break;
+ if (!(--timeout)) {
+ ret = -ETIMEDOUT;
+ goto exit;
+ }
+ usleep_range(100, 200);
+ } while (1);
+ }
+
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x0f8, BIT(17), BIT(17));
+
+ for (i = 0; i < ARRAY_SIZE(check_addr); i++) {
+ timeout = 100;
+ do {
+ dw_pcie_writel_dbi(dw, PRTLGC89, check_addr[i]);
+ ret = rcar_gen4_pcie_reg_test_bit(rcar, PRTLGC89, BIT(30));
+ ret |= rcar_gen4_pcie_reg_test_bit(rcar, PRTLGC90, BIT(0));
+ if (!ret)
+ break;
+ if (!(--timeout)) {
+ ret = -ETIMEDOUT;
+ goto exit;
+ }
+ usleep_range(100, 200);
+ } while (1);
+ }
+
+exit:
+ release_firmware(fw);
+
+ return ret;
+}
+
+static int rcar_gen4_pcie_ltssm_control(struct rcar_gen4_pcie *rcar, bool enable)
+{
+ struct dw_pcie *dw = &rcar->dw;
+ u32 val;
+ int ret;
+
+ if (!enable) {
+ val = readl(rcar->base + PCIERSTCTRL1);
+ val &= ~APP_LTSSM_ENABLE;
+ writel(val, rcar->base + PCIERSTCTRL1);
+
+ return 0;
+ }
+
+ val = dw_pcie_readl_dbi(dw, PCIE_PORT_FORCE);
+ val |= PORT_FORCE_DO_DESKEW_FOR_SRIS;
+ dw_pcie_writel_dbi(dw, PCIE_PORT_FORCE, val);
+
+ val = readl(rcar->base + PCIEMSR0);
+ val |= APP_SRIS_MODE;
+ writel(val, rcar->base + PCIEMSR0);
+
+ /*
+ * The R-Car Gen4 datasheet doesn't describe the PHY registers' name.
+ * But, the initialization procedure describes these offsets. So,
+ * this driver has magical offset numbers.
+ */
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x700, BIT(28), 0);
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x700, BIT(20), 0);
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x700, BIT(12), 0);
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x700, BIT(4), 0);
+
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(23, 22), BIT(22));
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(18, 16), GENMASK(17, 16));
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(7, 6), BIT(6));
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(2, 0), GENMASK(1, 0));
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x1d4, GENMASK(16, 15), GENMASK(16, 15));
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x514, BIT(26), BIT(26));
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x0f8, BIT(16), 0);
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x0f8, BIT(19), BIT(19));
+
+ val = readl(rcar->base + PCIERSTCTRL1);
+ val &= ~APP_HOLD_PHY_RST;
+ writel(val, rcar->base + PCIERSTCTRL1);
+
+ ret = readl_poll_timeout(rcar->phy_base + 0x0f8, val, val & BIT(18), 100, 10000);
+ if (ret < 0)
+ return ret;
+
+ ret = rcar_gen4_pcie_download_phy_firmware(rcar);
+ if (ret)
+ return ret;
+
+ val = readl(rcar->base + PCIERSTCTRL1);
+ val |= APP_LTSSM_ENABLE;
+ writel(val, rcar->base + PCIERSTCTRL1);
+
+ return 0;
+}
+
+static struct rcar_gen4_pcie_drvdata drvdata_r8a779f0_pcie = {
+ .ltssm_control = r8a779f0_pcie_ltssm_control,
+ .mode = DW_PCIE_RC_TYPE,
+};
+
+static struct rcar_gen4_pcie_drvdata drvdata_r8a779f0_pcie_ep = {
+ .ltssm_control = r8a779f0_pcie_ltssm_control,
+ .mode = DW_PCIE_EP_TYPE,
+};
+
+static struct rcar_gen4_pcie_drvdata drvdata_rcar_gen4_pcie = {
+ .additional_common_init = rcar_gen4_pcie_additional_common_init,
+ .ltssm_control = rcar_gen4_pcie_ltssm_control,
+ .mode = DW_PCIE_RC_TYPE,
+};
+
+static struct rcar_gen4_pcie_drvdata drvdata_rcar_gen4_pcie_ep = {
+ .additional_common_init = rcar_gen4_pcie_additional_common_init,
+ .ltssm_control = rcar_gen4_pcie_ltssm_control,
+ .mode = DW_PCIE_EP_TYPE,
+};
+
+static const struct of_device_id rcar_gen4_pcie_of_match[] = {
+ {
+ .compatible = "renesas,r8a779f0-pcie",
+ .data = &drvdata_r8a779f0_pcie,
+ },
+ {
+ .compatible = "renesas,r8a779f0-pcie-ep",
+ .data = &drvdata_r8a779f0_pcie_ep,
+ },
+ {
+ .compatible = "renesas,rcar-gen4-pcie",
+ .data = &drvdata_rcar_gen4_pcie,
+ },
+ {
+ .compatible = "renesas,rcar-gen4-pcie-ep",
+ .data = &drvdata_rcar_gen4_pcie_ep,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, rcar_gen4_pcie_of_match);
+
+static struct platform_driver rcar_gen4_pcie_driver = {
+ .driver = {
+ .name = "pcie-rcar-gen4",
+ .of_match_table = rcar_gen4_pcie_of_match,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+ .probe = rcar_gen4_pcie_probe,
+ .remove = rcar_gen4_pcie_remove,
+};
+module_platform_driver(rcar_gen4_pcie_driver);
+
+MODULE_DESCRIPTION("Renesas R-Car Gen4 PCIe controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pci/controller/dwc/pcie-sophgo.c b/drivers/pci/controller/dwc/pcie-sophgo.c
new file mode 100644
index 000000000000..ad4baaa34ffa
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-sophgo.c
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Sophgo DesignWare based PCIe host controller driver
+ */
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/platform_device.h>
+
+#include "pcie-designware.h"
+
+#define to_sophgo_pcie(x) dev_get_drvdata((x)->dev)
+
+#define PCIE_INT_SIGNAL 0xc48
+#define PCIE_INT_EN 0xca0
+
+#define PCIE_INT_SIGNAL_INTX GENMASK(8, 5)
+
+#define PCIE_INT_EN_INTX GENMASK(4, 1)
+#define PCIE_INT_EN_INT_MSI BIT(5)
+
+struct sophgo_pcie {
+ struct dw_pcie pci;
+ void __iomem *app_base;
+ struct clk_bulk_data *clks;
+ unsigned int clk_cnt;
+ struct irq_domain *irq_domain;
+};
+
+static int sophgo_pcie_readl_app(struct sophgo_pcie *sophgo, u32 reg)
+{
+ return readl_relaxed(sophgo->app_base + reg);
+}
+
+static void sophgo_pcie_writel_app(struct sophgo_pcie *sophgo, u32 val, u32 reg)
+{
+ writel_relaxed(val, sophgo->app_base + reg);
+}
+
+static void sophgo_pcie_intx_handler(struct irq_desc *desc)
+{
+ struct dw_pcie_rp *pp = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct sophgo_pcie *sophgo = to_sophgo_pcie(pci);
+ unsigned long hwirq, reg;
+
+ chained_irq_enter(chip, desc);
+
+ reg = sophgo_pcie_readl_app(sophgo, PCIE_INT_SIGNAL);
+ reg = FIELD_GET(PCIE_INT_SIGNAL_INTX, reg);
+
+ for_each_set_bit(hwirq, &reg, PCI_NUM_INTX)
+ generic_handle_domain_irq(sophgo->irq_domain, hwirq);
+
+ chained_irq_exit(chip, desc);
+}
+
+static void sophgo_intx_irq_mask(struct irq_data *d)
+{
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct sophgo_pcie *sophgo = to_sophgo_pcie(pci);
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&pp->lock, flags);
+
+ val = sophgo_pcie_readl_app(sophgo, PCIE_INT_EN);
+ val &= ~FIELD_PREP(PCIE_INT_EN_INTX, BIT(d->hwirq));
+ sophgo_pcie_writel_app(sophgo, val, PCIE_INT_EN);
+
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
+};
+
+static void sophgo_intx_irq_unmask(struct irq_data *d)
+{
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct sophgo_pcie *sophgo = to_sophgo_pcie(pci);
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&pp->lock, flags);
+
+ val = sophgo_pcie_readl_app(sophgo, PCIE_INT_EN);
+ val |= FIELD_PREP(PCIE_INT_EN_INTX, BIT(d->hwirq));
+ sophgo_pcie_writel_app(sophgo, val, PCIE_INT_EN);
+
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
+};
+
+static struct irq_chip sophgo_intx_irq_chip = {
+ .name = "INTx",
+ .irq_mask = sophgo_intx_irq_mask,
+ .irq_unmask = sophgo_intx_irq_unmask,
+};
+
+static int sophgo_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &sophgo_intx_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = sophgo_pcie_intx_map,
+};
+
+static int sophgo_pcie_init_irq_domain(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct sophgo_pcie *sophgo = to_sophgo_pcie(pci);
+ struct device *dev = sophgo->pci.dev;
+ struct fwnode_handle *intc;
+ int irq;
+
+ intc = device_get_named_child_node(dev, "interrupt-controller");
+ if (!intc) {
+ dev_err(dev, "missing child interrupt-controller node\n");
+ return -ENODEV;
+ }
+
+ irq = fwnode_irq_get(intc, 0);
+ if (irq < 0) {
+ dev_err(dev, "failed to get INTx irq number\n");
+ fwnode_handle_put(intc);
+ return irq;
+ }
+
+ sophgo->irq_domain = irq_domain_create_linear(intc, PCI_NUM_INTX,
+ &intx_domain_ops, pp);
+ fwnode_handle_put(intc);
+ if (!sophgo->irq_domain) {
+ dev_err(dev, "failed to get a INTx irq domain\n");
+ return -EINVAL;
+ }
+
+ return irq;
+}
+
+static void sophgo_pcie_msi_enable(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct sophgo_pcie *sophgo = to_sophgo_pcie(pci);
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&pp->lock, flags);
+
+ val = sophgo_pcie_readl_app(sophgo, PCIE_INT_EN);
+ val |= PCIE_INT_EN_INT_MSI;
+ sophgo_pcie_writel_app(sophgo, val, PCIE_INT_EN);
+
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
+}
+
+static int sophgo_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ int irq;
+
+ irq = sophgo_pcie_init_irq_domain(pp);
+ if (irq < 0)
+ return irq;
+
+ irq_set_chained_handler_and_data(irq, sophgo_pcie_intx_handler, pp);
+
+ sophgo_pcie_msi_enable(pp);
+
+ return 0;
+}
+
+static const struct dw_pcie_host_ops sophgo_pcie_host_ops = {
+ .init = sophgo_pcie_host_init,
+};
+
+static int sophgo_pcie_clk_init(struct sophgo_pcie *sophgo)
+{
+ struct device *dev = sophgo->pci.dev;
+ int ret;
+
+ ret = devm_clk_bulk_get_all_enabled(dev, &sophgo->clks);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to get clocks\n");
+
+ sophgo->clk_cnt = ret;
+
+ return 0;
+}
+
+static int sophgo_pcie_resource_get(struct platform_device *pdev,
+ struct sophgo_pcie *sophgo)
+{
+ sophgo->app_base = devm_platform_ioremap_resource_byname(pdev, "app");
+ if (IS_ERR(sophgo->app_base))
+ return dev_err_probe(&pdev->dev, PTR_ERR(sophgo->app_base),
+ "failed to map app registers\n");
+
+ return 0;
+}
+
+static int sophgo_pcie_configure_rc(struct sophgo_pcie *sophgo)
+{
+ struct dw_pcie_rp *pp;
+
+ pp = &sophgo->pci.pp;
+ pp->ops = &sophgo_pcie_host_ops;
+
+ return dw_pcie_host_init(pp);
+}
+
+static int sophgo_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sophgo_pcie *sophgo;
+ int ret;
+
+ sophgo = devm_kzalloc(dev, sizeof(*sophgo), GFP_KERNEL);
+ if (!sophgo)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, sophgo);
+
+ sophgo->pci.dev = dev;
+
+ ret = sophgo_pcie_resource_get(pdev, sophgo);
+ if (ret)
+ return ret;
+
+ ret = sophgo_pcie_clk_init(sophgo);
+ if (ret)
+ return ret;
+
+ return sophgo_pcie_configure_rc(sophgo);
+}
+
+static const struct of_device_id sophgo_pcie_of_match[] = {
+ { .compatible = "sophgo,sg2044-pcie" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sophgo_pcie_of_match);
+
+static struct platform_driver sophgo_pcie_driver = {
+ .driver = {
+ .name = "sophgo-pcie",
+ .of_match_table = sophgo_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = sophgo_pcie_probe,
+};
+builtin_platform_driver(sophgo_pcie_driver);
diff --git a/drivers/pci/controller/dwc/pcie-spacemit-k1.c b/drivers/pci/controller/dwc/pcie-spacemit-k1.c
new file mode 100644
index 000000000000..be20a520255b
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-spacemit-k1.c
@@ -0,0 +1,357 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SpacemiT K1 PCIe host driver
+ *
+ * Copyright (C) 2025 by RISCstar Solutions Corporation. All rights reserved.
+ * Copyright (c) 2023, spacemit Corporation.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gfp.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mod_devicetable.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+
+#define PCI_VENDOR_ID_SPACEMIT 0x201f
+#define PCI_DEVICE_ID_SPACEMIT_K1 0x0001
+
+/* Offsets and field definitions for link management registers */
+#define K1_PHY_AHB_IRQ_EN 0x0000
+#define PCIE_INTERRUPT_EN BIT(0)
+
+#define K1_PHY_AHB_LINK_STS 0x0004
+#define SMLH_LINK_UP BIT(1)
+#define RDLH_LINK_UP BIT(12)
+
+#define INTR_ENABLE 0x0014
+#define MSI_CTRL_INT BIT(11)
+
+/* Some controls require APMU regmap access */
+#define SYSCON_APMU "spacemit,apmu"
+
+/* Offsets and field definitions for APMU registers */
+#define PCIE_CLK_RESET_CONTROL 0x0000
+#define LTSSM_EN BIT(6)
+#define PCIE_AUX_PWR_DET BIT(9)
+#define PCIE_RC_PERST BIT(12) /* 1: assert PERST# */
+#define APP_HOLD_PHY_RST BIT(30)
+#define DEVICE_TYPE_RC BIT(31) /* 0: endpoint; 1: RC */
+
+#define PCIE_CONTROL_LOGIC 0x0004
+#define PCIE_SOFT_RESET BIT(0)
+
+struct k1_pcie {
+ struct dw_pcie pci;
+ struct phy *phy;
+ void __iomem *link;
+ struct regmap *pmu; /* Errors ignored; MMIO-backed regmap */
+ u32 pmu_off;
+};
+
+#define to_k1_pcie(dw_pcie) \
+ platform_get_drvdata(to_platform_device((dw_pcie)->dev))
+
+static void k1_pcie_toggle_soft_reset(struct k1_pcie *k1)
+{
+ u32 offset;
+ u32 val;
+
+ /*
+ * Write, then read back to guarantee it has reached the device
+ * before we start the delay.
+ */
+ offset = k1->pmu_off + PCIE_CONTROL_LOGIC;
+ regmap_set_bits(k1->pmu, offset, PCIE_SOFT_RESET);
+ regmap_read(k1->pmu, offset, &val);
+
+ mdelay(2);
+
+ regmap_clear_bits(k1->pmu, offset, PCIE_SOFT_RESET);
+}
+
+/* Enable app clocks, deassert resets */
+static int k1_pcie_enable_resources(struct k1_pcie *k1)
+{
+ struct dw_pcie *pci = &k1->pci;
+ int ret;
+
+ ret = clk_bulk_prepare_enable(ARRAY_SIZE(pci->app_clks), pci->app_clks);
+ if (ret)
+ return ret;
+
+ ret = reset_control_bulk_deassert(ARRAY_SIZE(pci->app_rsts),
+ pci->app_rsts);
+ if (ret)
+ goto err_disable_clks;
+
+ return 0;
+
+err_disable_clks:
+ clk_bulk_disable_unprepare(ARRAY_SIZE(pci->app_clks), pci->app_clks);
+
+ return ret;
+}
+
+/* Assert resets, disable app clocks */
+static void k1_pcie_disable_resources(struct k1_pcie *k1)
+{
+ struct dw_pcie *pci = &k1->pci;
+
+ reset_control_bulk_assert(ARRAY_SIZE(pci->app_rsts), pci->app_rsts);
+ clk_bulk_disable_unprepare(ARRAY_SIZE(pci->app_clks), pci->app_clks);
+}
+
+/* FIXME: Disable ASPM L1 to avoid errors reported on some NVMe drives */
+static void k1_pcie_disable_aspm_l1(struct k1_pcie *k1)
+{
+ struct dw_pcie *pci = &k1->pci;
+ u8 offset;
+ u32 val;
+
+ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ offset += PCI_EXP_LNKCAP;
+
+ dw_pcie_dbi_ro_wr_en(pci);
+ val = dw_pcie_readl_dbi(pci, offset);
+ val &= ~PCI_EXP_LNKCAP_ASPM_L1;
+ dw_pcie_writel_dbi(pci, offset, val);
+ dw_pcie_dbi_ro_wr_dis(pci);
+}
+
+static int k1_pcie_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct k1_pcie *k1 = to_k1_pcie(pci);
+ u32 reset_ctrl;
+ u32 val;
+ int ret;
+
+ k1_pcie_toggle_soft_reset(k1);
+
+ ret = k1_pcie_enable_resources(k1);
+ if (ret)
+ return ret;
+
+ /* Set the PCI vendor and device ID */
+ dw_pcie_dbi_ro_wr_en(pci);
+ dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, PCI_VENDOR_ID_SPACEMIT);
+ dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, PCI_DEVICE_ID_SPACEMIT_K1);
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ /*
+ * Start by asserting fundamental reset (drive PERST# low). The
+ * PCI CEM spec says that PERST# should be deasserted at least
+ * 100ms after the power becomes stable, so we'll insert that
+ * delay first. Write, then read it back to guarantee the write
+ * reaches the device before we start the delay.
+ */
+ reset_ctrl = k1->pmu_off + PCIE_CLK_RESET_CONTROL;
+ regmap_set_bits(k1->pmu, reset_ctrl, PCIE_RC_PERST);
+ regmap_read(k1->pmu, reset_ctrl, &val);
+ mdelay(PCIE_T_PVPERL_MS);
+
+ /*
+ * Put the controller in root complex mode, and indicate that
+ * Vaux (3.3v) is present.
+ */
+ regmap_set_bits(k1->pmu, reset_ctrl, DEVICE_TYPE_RC | PCIE_AUX_PWR_DET);
+
+ ret = phy_init(k1->phy);
+ if (ret) {
+ k1_pcie_disable_resources(k1);
+
+ return ret;
+ }
+
+ /* Deassert fundamental reset (drive PERST# high) */
+ regmap_clear_bits(k1->pmu, reset_ctrl, PCIE_RC_PERST);
+
+ /* Finally, as a workaround, disable ASPM L1 */
+ k1_pcie_disable_aspm_l1(k1);
+
+ return 0;
+}
+
+static void k1_pcie_deinit(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct k1_pcie *k1 = to_k1_pcie(pci);
+
+ /* Assert fundamental reset (drive PERST# low) */
+ regmap_set_bits(k1->pmu, k1->pmu_off + PCIE_CLK_RESET_CONTROL,
+ PCIE_RC_PERST);
+
+ phy_exit(k1->phy);
+
+ k1_pcie_disable_resources(k1);
+}
+
+static const struct dw_pcie_host_ops k1_pcie_host_ops = {
+ .init = k1_pcie_init,
+ .deinit = k1_pcie_deinit,
+};
+
+static bool k1_pcie_link_up(struct dw_pcie *pci)
+{
+ struct k1_pcie *k1 = to_k1_pcie(pci);
+ u32 val;
+
+ val = readl_relaxed(k1->link + K1_PHY_AHB_LINK_STS);
+
+ return (val & RDLH_LINK_UP) && (val & SMLH_LINK_UP);
+}
+
+static int k1_pcie_start_link(struct dw_pcie *pci)
+{
+ struct k1_pcie *k1 = to_k1_pcie(pci);
+ u32 val;
+
+ /* Stop holding the PHY in reset, and enable link training */
+ regmap_update_bits(k1->pmu, k1->pmu_off + PCIE_CLK_RESET_CONTROL,
+ APP_HOLD_PHY_RST | LTSSM_EN, LTSSM_EN);
+
+ /* Enable the MSI interrupt */
+ writel_relaxed(MSI_CTRL_INT, k1->link + INTR_ENABLE);
+
+ /* Top-level interrupt enable */
+ val = readl_relaxed(k1->link + K1_PHY_AHB_IRQ_EN);
+ val |= PCIE_INTERRUPT_EN;
+ writel_relaxed(val, k1->link + K1_PHY_AHB_IRQ_EN);
+
+ return 0;
+}
+
+static void k1_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct k1_pcie *k1 = to_k1_pcie(pci);
+ u32 val;
+
+ /* Disable interrupts */
+ val = readl_relaxed(k1->link + K1_PHY_AHB_IRQ_EN);
+ val &= ~PCIE_INTERRUPT_EN;
+ writel_relaxed(val, k1->link + K1_PHY_AHB_IRQ_EN);
+
+ writel_relaxed(0, k1->link + INTR_ENABLE);
+
+ /* Disable the link and hold the PHY in reset */
+ regmap_update_bits(k1->pmu, k1->pmu_off + PCIE_CLK_RESET_CONTROL,
+ APP_HOLD_PHY_RST | LTSSM_EN, APP_HOLD_PHY_RST);
+}
+
+static const struct dw_pcie_ops k1_pcie_ops = {
+ .link_up = k1_pcie_link_up,
+ .start_link = k1_pcie_start_link,
+ .stop_link = k1_pcie_stop_link,
+};
+
+static int k1_pcie_parse_port(struct k1_pcie *k1)
+{
+ struct device *dev = k1->pci.dev;
+ struct device_node *root_port;
+ struct phy *phy;
+
+ /* We assume only one root port */
+ root_port = of_get_next_available_child(dev_of_node(dev), NULL);
+ if (!root_port)
+ return -EINVAL;
+
+ phy = devm_of_phy_get(dev, root_port, NULL);
+
+ of_node_put(root_port);
+
+ if (IS_ERR(phy))
+ return PTR_ERR(phy);
+
+ k1->phy = phy;
+
+ return 0;
+}
+
+static int k1_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct k1_pcie *k1;
+ int ret;
+
+ k1 = devm_kzalloc(dev, sizeof(*k1), GFP_KERNEL);
+ if (!k1)
+ return -ENOMEM;
+
+ k1->pmu = syscon_regmap_lookup_by_phandle_args(dev_of_node(dev),
+ SYSCON_APMU, 1,
+ &k1->pmu_off);
+ if (IS_ERR(k1->pmu))
+ return dev_err_probe(dev, PTR_ERR(k1->pmu),
+ "failed to lookup PMU registers\n");
+
+ k1->link = devm_platform_ioremap_resource_byname(pdev, "link");
+ if (IS_ERR(k1->link))
+ return dev_err_probe(dev, PTR_ERR(k1->link),
+ "failed to map \"link\" registers\n");
+
+ k1->pci.dev = dev;
+ k1->pci.ops = &k1_pcie_ops;
+ k1->pci.pp.num_vectors = MAX_MSI_IRQS;
+ dw_pcie_cap_set(&k1->pci, REQ_RES);
+
+ k1->pci.pp.ops = &k1_pcie_host_ops;
+
+ /* Hold the PHY in reset until we start the link */
+ regmap_set_bits(k1->pmu, k1->pmu_off + PCIE_CLK_RESET_CONTROL,
+ APP_HOLD_PHY_RST);
+
+ ret = devm_regulator_get_enable(dev, "vpcie3v3");
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to get \"vpcie3v3\" supply\n");
+
+ pm_runtime_set_active(dev);
+ pm_runtime_no_callbacks(dev);
+ devm_pm_runtime_enable(dev);
+
+ platform_set_drvdata(pdev, k1);
+
+ ret = k1_pcie_parse_port(k1);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to parse root port\n");
+
+ ret = dw_pcie_host_init(&k1->pci.pp);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to initialize host\n");
+
+ return 0;
+}
+
+static void k1_pcie_remove(struct platform_device *pdev)
+{
+ struct k1_pcie *k1 = platform_get_drvdata(pdev);
+
+ dw_pcie_host_deinit(&k1->pci.pp);
+}
+
+static const struct of_device_id k1_pcie_of_match_table[] = {
+ { .compatible = "spacemit,k1-pcie", },
+ { }
+};
+
+static struct platform_driver k1_pcie_driver = {
+ .probe = k1_pcie_probe,
+ .remove = k1_pcie_remove,
+ .driver = {
+ .name = "spacemit-k1-pcie",
+ .of_match_table = k1_pcie_of_match_table,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+module_platform_driver(k1_pcie_driver);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SpacemiT K1 PCIe host driver");
diff --git a/drivers/pci/controller/dwc/pcie-spear13xx.c b/drivers/pci/controller/dwc/pcie-spear13xx.c
index 7d0cdfd8138b..01794a9d3ad2 100644
--- a/drivers/pci/controller/dwc/pcie-spear13xx.c
+++ b/drivers/pci/controller/dwc/pcie-spear13xx.c
@@ -26,7 +26,6 @@ struct spear13xx_pcie {
void __iomem *app_base;
struct phy *phy;
struct clk *clk;
- bool is_gen1;
};
struct pcie_app_reg {
@@ -65,60 +64,12 @@ struct pcie_app_reg {
/* CR6 */
#define MSI_CTRL_INT (1 << 26)
-#define EXP_CAP_ID_OFFSET 0x70
-
#define to_spear13xx_pcie(x) dev_get_drvdata((x)->dev)
-static int spear13xx_pcie_establish_link(struct spear13xx_pcie *spear13xx_pcie)
+static int spear13xx_pcie_start_link(struct dw_pcie *pci)
{
- struct dw_pcie *pci = spear13xx_pcie->pci;
- struct pcie_port *pp = &pci->pp;
- struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
- u32 val;
- u32 exp_cap_off = EXP_CAP_ID_OFFSET;
-
- if (dw_pcie_link_up(pci)) {
- dev_err(pci->dev, "link already up\n");
- return 0;
- }
-
- dw_pcie_setup_rc(pp);
-
- /*
- * this controller support only 128 bytes read size, however its
- * default value in capability register is 512 bytes. So force
- * it to 128 here.
- */
- dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_DEVCTL, 2, &val);
- val &= ~PCI_EXP_DEVCTL_READRQ;
- dw_pcie_write(pci->dbi_base + exp_cap_off + PCI_EXP_DEVCTL, 2, val);
-
- dw_pcie_write(pci->dbi_base + PCI_VENDOR_ID, 2, 0x104A);
- dw_pcie_write(pci->dbi_base + PCI_DEVICE_ID, 2, 0xCD80);
-
- /*
- * if is_gen1 is set then handle it, so that some buggy card
- * also works
- */
- if (spear13xx_pcie->is_gen1) {
- dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCAP,
- 4, &val);
- if ((val & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
- val &= ~((u32)PCI_EXP_LNKCAP_SLS);
- val |= PCI_EXP_LNKCAP_SLS_2_5GB;
- dw_pcie_write(pci->dbi_base + exp_cap_off +
- PCI_EXP_LNKCAP, 4, val);
- }
-
- dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCTL2,
- 2, &val);
- if ((val & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
- val &= ~((u32)PCI_EXP_LNKCAP_SLS);
- val |= PCI_EXP_LNKCAP_SLS_2_5GB;
- dw_pcie_write(pci->dbi_base + exp_cap_off +
- PCI_EXP_LNKCTL2, 2, val);
- }
- }
+ struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci);
+ struct pcie_app_reg __iomem *app_reg = spear13xx_pcie->app_base;
/* enable ltssm */
writel(DEVICE_TYPE_RC | (1 << MISCTRL_EN_ID)
@@ -126,15 +77,15 @@ static int spear13xx_pcie_establish_link(struct spear13xx_pcie *spear13xx_pcie)
| ((u32)1 << REG_TRANSLATION_ENABLE),
&app_reg->app_ctrl_0);
- return dw_pcie_wait_for_link(pci);
+ return 0;
}
static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg)
{
struct spear13xx_pcie *spear13xx_pcie = arg;
- struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
+ struct pcie_app_reg __iomem *app_reg = spear13xx_pcie->app_base;
struct dw_pcie *pci = spear13xx_pcie->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
unsigned int status;
status = readl(&app_reg->int_sts);
@@ -151,57 +102,64 @@ static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg)
static void spear13xx_pcie_enable_interrupts(struct spear13xx_pcie *spear13xx_pcie)
{
- struct dw_pcie *pci = spear13xx_pcie->pci;
- struct pcie_port *pp = &pci->pp;
- struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
+ struct pcie_app_reg __iomem *app_reg = spear13xx_pcie->app_base;
/* Enable MSI interrupt */
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- dw_pcie_msi_init(pp);
+ if (IS_ENABLED(CONFIG_PCI_MSI))
writel(readl(&app_reg->int_mask) |
MSI_CTRL_INT, &app_reg->int_mask);
- }
}
-static int spear13xx_pcie_link_up(struct dw_pcie *pci)
+static bool spear13xx_pcie_link_up(struct dw_pcie *pci)
{
struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci);
- struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
-
- if (readl(&app_reg->app_status_1) & XMLH_LINK_UP)
- return 1;
+ struct pcie_app_reg __iomem *app_reg = spear13xx_pcie->app_base;
- return 0;
+ return readl(&app_reg->app_status_1) & XMLH_LINK_UP;
}
-static int spear13xx_pcie_host_init(struct pcie_port *pp)
+static int spear13xx_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci);
+ u32 exp_cap_off = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ u32 val;
+
+ spear13xx_pcie->app_base = pci->dbi_base + 0x2000;
+
+ /*
+ * this controller support only 128 bytes read size, however its
+ * default value in capability register is 512 bytes. So force
+ * it to 128 here.
+ */
+ val = dw_pcie_readw_dbi(pci, exp_cap_off + PCI_EXP_DEVCTL);
+ val &= ~PCI_EXP_DEVCTL_READRQ;
+ dw_pcie_writew_dbi(pci, exp_cap_off + PCI_EXP_DEVCTL, val);
+
+ dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, 0x104A);
+ dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, 0xCD80);
- spear13xx_pcie_establish_link(spear13xx_pcie);
spear13xx_pcie_enable_interrupts(spear13xx_pcie);
return 0;
}
static const struct dw_pcie_host_ops spear13xx_pcie_host_ops = {
- .host_init = spear13xx_pcie_host_init,
+ .init = spear13xx_pcie_host_init,
};
static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie,
struct platform_device *pdev)
{
struct dw_pcie *pci = spear13xx_pcie->pci;
- struct pcie_port *pp = &pci->pp;
+ struct dw_pcie_rp *pp = &pci->pp;
struct device *dev = &pdev->dev;
int ret;
pp->irq = platform_get_irq(pdev, 0);
- if (pp->irq < 0) {
- dev_err(dev, "failed to get irq\n");
+ if (pp->irq < 0)
return pp->irq;
- }
+
ret = devm_request_irq(dev, pp->irq, spear13xx_pcie_irq_handler,
IRQF_SHARED | IRQF_NO_THREAD,
"spear1340-pcie", spear13xx_pcie);
@@ -211,6 +169,7 @@ static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie,
}
pp->ops = &spear13xx_pcie_host_ops;
+ pp->msi_irq[0] = -ENODEV;
ret = dw_pcie_host_init(pp);
if (ret) {
@@ -223,6 +182,7 @@ static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie,
static const struct dw_pcie_ops dw_pcie_ops = {
.link_up = spear13xx_pcie_link_up,
+ .start_link = spear13xx_pcie_start_link,
};
static int spear13xx_pcie_probe(struct platform_device *pdev)
@@ -231,7 +191,6 @@ static int spear13xx_pcie_probe(struct platform_device *pdev)
struct dw_pcie *pci;
struct spear13xx_pcie *spear13xx_pcie;
struct device_node *np = dev->of_node;
- struct resource *dbi_base;
int ret;
spear13xx_pcie = devm_kzalloc(dev, sizeof(*spear13xx_pcie), GFP_KERNEL);
@@ -270,17 +229,8 @@ static int spear13xx_pcie_probe(struct platform_device *pdev)
return ret;
}
- dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
- pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base);
- if (IS_ERR(pci->dbi_base)) {
- dev_err(dev, "couldn't remap dbi base %p\n", dbi_base);
- ret = PTR_ERR(pci->dbi_base);
- goto fail_clk;
- }
- spear13xx_pcie->app_base = pci->dbi_base + 0x2000;
-
if (of_property_read_bool(np, "st,pcie-is-gen1"))
- spear13xx_pcie->is_gen1 = true;
+ pci->max_link_speed = 1;
platform_set_drvdata(pdev, spear13xx_pcie);
@@ -305,7 +255,7 @@ static struct platform_driver spear13xx_pcie_driver = {
.probe = spear13xx_pcie_probe,
.driver = {
.name = "spear-pcie",
- .of_match_table = of_match_ptr(spear13xx_pcie_of_match),
+ .of_match_table = spear13xx_pcie_of_match,
.suppress_bind_attrs = true,
},
};
diff --git a/drivers/pci/controller/dwc/pcie-stm32-ep.c b/drivers/pci/controller/dwc/pcie-stm32-ep.c
new file mode 100644
index 000000000000..2cecf32d2b0f
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-stm32-ep.c
@@ -0,0 +1,343 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * STMicroelectronics STM32MP25 PCIe endpoint driver.
+ *
+ * Copyright (C) 2025 STMicroelectronics
+ * Author: Christian Bruel <christian.bruel@foss.st.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_platform.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include "pcie-designware.h"
+#include "pcie-stm32.h"
+
+struct stm32_pcie {
+ struct dw_pcie pci;
+ struct regmap *regmap;
+ struct reset_control *rst;
+ struct phy *phy;
+ struct clk *clk;
+ struct gpio_desc *perst_gpio;
+ unsigned int perst_irq;
+};
+
+static void stm32_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar;
+
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
+ dw_pcie_ep_reset_bar(pci, bar);
+}
+
+static int stm32_pcie_start_link(struct dw_pcie *pci)
+{
+ struct stm32_pcie *stm32_pcie = to_stm32_pcie(pci);
+
+ enable_irq(stm32_pcie->perst_irq);
+
+ return 0;
+}
+
+static void stm32_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct stm32_pcie *stm32_pcie = to_stm32_pcie(pci);
+
+ disable_irq(stm32_pcie->perst_irq);
+}
+
+static int stm32_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ unsigned int type, u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ switch (type) {
+ case PCI_IRQ_INTX:
+ return dw_pcie_ep_raise_intx_irq(ep, func_no);
+ case PCI_IRQ_MSI:
+ return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+ default:
+ dev_err(pci->dev, "UNKNOWN IRQ type\n");
+ return -EINVAL;
+ }
+}
+
+static const struct pci_epc_features stm32_pcie_epc_features = {
+ .msi_capable = true,
+ .align = SZ_64K,
+};
+
+static const struct pci_epc_features*
+stm32_pcie_get_features(struct dw_pcie_ep *ep)
+{
+ return &stm32_pcie_epc_features;
+}
+
+static const struct dw_pcie_ep_ops stm32_pcie_ep_ops = {
+ .init = stm32_pcie_ep_init,
+ .raise_irq = stm32_pcie_raise_irq,
+ .get_features = stm32_pcie_get_features,
+};
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .start_link = stm32_pcie_start_link,
+ .stop_link = stm32_pcie_stop_link,
+};
+
+static int stm32_pcie_enable_resources(struct stm32_pcie *stm32_pcie)
+{
+ int ret;
+
+ ret = phy_init(stm32_pcie->phy);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(stm32_pcie->clk);
+ if (ret)
+ phy_exit(stm32_pcie->phy);
+
+ return ret;
+}
+
+static void stm32_pcie_disable_resources(struct stm32_pcie *stm32_pcie)
+{
+ clk_disable_unprepare(stm32_pcie->clk);
+
+ phy_exit(stm32_pcie->phy);
+}
+
+static void stm32_pcie_perst_assert(struct dw_pcie *pci)
+{
+ struct stm32_pcie *stm32_pcie = to_stm32_pcie(pci);
+ struct dw_pcie_ep *ep = &stm32_pcie->pci.ep;
+ struct device *dev = pci->dev;
+
+ dev_dbg(dev, "PERST asserted by host\n");
+
+ regmap_update_bits(stm32_pcie->regmap, SYSCFG_PCIECR,
+ STM32MP25_PCIECR_LTSSM_EN, 0);
+
+ pci_epc_deinit_notify(ep->epc);
+
+ stm32_pcie_disable_resources(stm32_pcie);
+
+ pm_runtime_put_sync(dev);
+}
+
+static void stm32_pcie_perst_deassert(struct dw_pcie *pci)
+{
+ struct stm32_pcie *stm32_pcie = to_stm32_pcie(pci);
+ struct device *dev = pci->dev;
+ struct dw_pcie_ep *ep = &pci->ep;
+ int ret;
+
+ dev_dbg(dev, "PERST de-asserted by host\n");
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to resume runtime PM: %d\n", ret);
+ return;
+ }
+
+ ret = stm32_pcie_enable_resources(stm32_pcie);
+ if (ret) {
+ dev_err(dev, "Failed to enable resources: %d\n", ret);
+ goto err_pm_put_sync;
+ }
+
+ /*
+ * Reprogram the configuration space registers here because the DBI
+ * registers were reset by the PHY RCC during phy_init().
+ */
+ ret = dw_pcie_ep_init_registers(ep);
+ if (ret) {
+ dev_err(dev, "Failed to complete initialization: %d\n", ret);
+ goto err_disable_resources;
+ }
+
+ pci_epc_init_notify(ep->epc);
+
+ /* Enable link training */
+ regmap_update_bits(stm32_pcie->regmap, SYSCFG_PCIECR,
+ STM32MP25_PCIECR_LTSSM_EN,
+ STM32MP25_PCIECR_LTSSM_EN);
+
+ return;
+
+err_disable_resources:
+ stm32_pcie_disable_resources(stm32_pcie);
+
+err_pm_put_sync:
+ pm_runtime_put_sync(dev);
+}
+
+static irqreturn_t stm32_pcie_ep_perst_irq_thread(int irq, void *data)
+{
+ struct stm32_pcie *stm32_pcie = data;
+ struct dw_pcie *pci = &stm32_pcie->pci;
+ u32 perst;
+
+ perst = gpiod_get_value(stm32_pcie->perst_gpio);
+ if (perst)
+ stm32_pcie_perst_assert(pci);
+ else
+ stm32_pcie_perst_deassert(pci);
+
+ irq_set_irq_type(gpiod_to_irq(stm32_pcie->perst_gpio),
+ (perst ? IRQF_TRIGGER_HIGH : IRQF_TRIGGER_LOW));
+
+ return IRQ_HANDLED;
+}
+
+static int stm32_add_pcie_ep(struct stm32_pcie *stm32_pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie_ep *ep = &stm32_pcie->pci.ep;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ ret = regmap_update_bits(stm32_pcie->regmap, SYSCFG_PCIECR,
+ STM32MP25_PCIECR_TYPE_MASK,
+ STM32MP25_PCIECR_EP);
+ if (ret)
+ return ret;
+
+ reset_control_assert(stm32_pcie->rst);
+ reset_control_deassert(stm32_pcie->rst);
+
+ ep->ops = &stm32_pcie_ep_ops;
+
+ ep->page_size = stm32_pcie_epc_features.align;
+
+ ret = dw_pcie_ep_init(ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize ep: %d\n", ret);
+ return ret;
+ }
+
+ ret = stm32_pcie_enable_resources(stm32_pcie);
+ if (ret) {
+ dev_err(dev, "Failed to enable resources: %d\n", ret);
+ dw_pcie_ep_deinit(ep);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int stm32_pcie_probe(struct platform_device *pdev)
+{
+ struct stm32_pcie *stm32_pcie;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ stm32_pcie = devm_kzalloc(dev, sizeof(*stm32_pcie), GFP_KERNEL);
+ if (!stm32_pcie)
+ return -ENOMEM;
+
+ stm32_pcie->pci.dev = dev;
+ stm32_pcie->pci.ops = &dw_pcie_ops;
+
+ stm32_pcie->regmap = syscon_regmap_lookup_by_compatible("st,stm32mp25-syscfg");
+ if (IS_ERR(stm32_pcie->regmap))
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->regmap),
+ "No syscfg specified\n");
+
+ stm32_pcie->phy = devm_phy_get(dev, NULL);
+ if (IS_ERR(stm32_pcie->phy))
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->phy),
+ "failed to get pcie-phy\n");
+
+ stm32_pcie->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(stm32_pcie->clk))
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->clk),
+ "Failed to get PCIe clock source\n");
+
+ stm32_pcie->rst = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(stm32_pcie->rst))
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->rst),
+ "Failed to get PCIe reset\n");
+
+ stm32_pcie->perst_gpio = devm_gpiod_get(dev, "reset", GPIOD_IN);
+ if (IS_ERR(stm32_pcie->perst_gpio))
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->perst_gpio),
+ "Failed to get reset GPIO\n");
+
+ ret = phy_set_mode(stm32_pcie->phy, PHY_MODE_PCIE);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, stm32_pcie);
+
+ pm_runtime_get_noresume(dev);
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&pdev->dev);
+ return dev_err_probe(dev, ret, "Failed to enable runtime PM\n");
+ }
+
+ stm32_pcie->perst_irq = gpiod_to_irq(stm32_pcie->perst_gpio);
+
+ /* Will be enabled in start_link when device is initialized. */
+ irq_set_status_flags(stm32_pcie->perst_irq, IRQ_NOAUTOEN);
+
+ ret = devm_request_threaded_irq(dev, stm32_pcie->perst_irq, NULL,
+ stm32_pcie_ep_perst_irq_thread,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ "perst_irq", stm32_pcie);
+ if (ret) {
+ pm_runtime_put_noidle(&pdev->dev);
+ return dev_err_probe(dev, ret, "Failed to request PERST IRQ\n");
+ }
+
+ ret = stm32_add_pcie_ep(stm32_pcie, pdev);
+ if (ret)
+ pm_runtime_put_noidle(&pdev->dev);
+
+ return ret;
+}
+
+static void stm32_pcie_remove(struct platform_device *pdev)
+{
+ struct stm32_pcie *stm32_pcie = platform_get_drvdata(pdev);
+ struct dw_pcie *pci = &stm32_pcie->pci;
+ struct dw_pcie_ep *ep = &pci->ep;
+
+ dw_pcie_stop_link(pci);
+
+ pci_epc_deinit_notify(ep->epc);
+ dw_pcie_ep_deinit(ep);
+
+ stm32_pcie_disable_resources(stm32_pcie);
+
+ pm_runtime_put_sync(&pdev->dev);
+}
+
+static const struct of_device_id stm32_pcie_ep_of_match[] = {
+ { .compatible = "st,stm32mp25-pcie-ep" },
+ {},
+};
+
+static struct platform_driver stm32_pcie_ep_driver = {
+ .probe = stm32_pcie_probe,
+ .remove = stm32_pcie_remove,
+ .driver = {
+ .name = "stm32-ep-pcie",
+ .of_match_table = stm32_pcie_ep_of_match,
+ },
+};
+
+module_platform_driver(stm32_pcie_ep_driver);
+
+MODULE_AUTHOR("Christian Bruel <christian.bruel@foss.st.com>");
+MODULE_DESCRIPTION("STM32MP25 PCIe Endpoint Controller driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(of, stm32_pcie_ep_of_match);
diff --git a/drivers/pci/controller/dwc/pcie-stm32.c b/drivers/pci/controller/dwc/pcie-stm32.c
new file mode 100644
index 000000000000..a9e77478443b
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-stm32.c
@@ -0,0 +1,370 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * STMicroelectronics STM32MP25 PCIe root complex driver.
+ *
+ * Copyright (C) 2025 STMicroelectronics
+ * Author: Christian Bruel <christian.bruel@foss.st.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/irq.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/phy/phy.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_wakeirq.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/stddef.h>
+
+#include "../../pci.h"
+
+#include "pcie-designware.h"
+#include "pcie-stm32.h"
+
+struct stm32_pcie {
+ struct dw_pcie pci;
+ struct regmap *regmap;
+ struct reset_control *rst;
+ struct phy *phy;
+ struct clk *clk;
+ struct gpio_desc *perst_gpio;
+ struct gpio_desc *wake_gpio;
+};
+
+static void stm32_pcie_deassert_perst(struct stm32_pcie *stm32_pcie)
+{
+ if (stm32_pcie->perst_gpio) {
+ msleep(PCIE_T_PVPERL_MS);
+ gpiod_set_value(stm32_pcie->perst_gpio, 0);
+ }
+
+ msleep(PCIE_RESET_CONFIG_WAIT_MS);
+}
+
+static void stm32_pcie_assert_perst(struct stm32_pcie *stm32_pcie)
+{
+ gpiod_set_value(stm32_pcie->perst_gpio, 1);
+}
+
+static int stm32_pcie_start_link(struct dw_pcie *pci)
+{
+ struct stm32_pcie *stm32_pcie = to_stm32_pcie(pci);
+
+ return regmap_update_bits(stm32_pcie->regmap, SYSCFG_PCIECR,
+ STM32MP25_PCIECR_LTSSM_EN,
+ STM32MP25_PCIECR_LTSSM_EN);
+}
+
+static void stm32_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct stm32_pcie *stm32_pcie = to_stm32_pcie(pci);
+
+ regmap_update_bits(stm32_pcie->regmap, SYSCFG_PCIECR,
+ STM32MP25_PCIECR_LTSSM_EN, 0);
+}
+
+static int stm32_pcie_suspend_noirq(struct device *dev)
+{
+ struct stm32_pcie *stm32_pcie = dev_get_drvdata(dev);
+ int ret;
+
+ ret = dw_pcie_suspend_noirq(&stm32_pcie->pci);
+ if (ret)
+ return ret;
+
+ stm32_pcie_assert_perst(stm32_pcie);
+
+ clk_disable_unprepare(stm32_pcie->clk);
+
+ if (!device_wakeup_path(dev))
+ phy_exit(stm32_pcie->phy);
+
+ return pinctrl_pm_select_sleep_state(dev);
+}
+
+static int stm32_pcie_resume_noirq(struct device *dev)
+{
+ struct stm32_pcie *stm32_pcie = dev_get_drvdata(dev);
+ int ret;
+
+ /*
+ * The core clock is gated with CLKREQ# from the COMBOPHY REFCLK,
+ * thus if no device is present, must deassert it with a GPIO from
+ * pinctrl pinmux before accessing the DBI registers.
+ */
+ ret = pinctrl_pm_select_init_state(dev);
+ if (ret) {
+ dev_err(dev, "Failed to activate pinctrl pm state: %d\n", ret);
+ return ret;
+ }
+
+ if (!device_wakeup_path(dev)) {
+ ret = phy_init(stm32_pcie->phy);
+ if (ret) {
+ pinctrl_pm_select_default_state(dev);
+ return ret;
+ }
+ }
+
+ ret = clk_prepare_enable(stm32_pcie->clk);
+ if (ret)
+ goto err_phy_exit;
+
+ stm32_pcie_deassert_perst(stm32_pcie);
+
+ ret = dw_pcie_resume_noirq(&stm32_pcie->pci);
+ if (ret)
+ goto err_disable_clk;
+
+ pinctrl_pm_select_default_state(dev);
+
+ return 0;
+
+err_disable_clk:
+ stm32_pcie_assert_perst(stm32_pcie);
+ clk_disable_unprepare(stm32_pcie->clk);
+
+err_phy_exit:
+ phy_exit(stm32_pcie->phy);
+ pinctrl_pm_select_default_state(dev);
+
+ return ret;
+}
+
+static const struct dev_pm_ops stm32_pcie_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(stm32_pcie_suspend_noirq,
+ stm32_pcie_resume_noirq)
+};
+
+static const struct dw_pcie_host_ops stm32_pcie_host_ops = {
+};
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .start_link = stm32_pcie_start_link,
+ .stop_link = stm32_pcie_stop_link
+};
+
+static int stm32_add_pcie_port(struct stm32_pcie *stm32_pcie)
+{
+ struct device *dev = stm32_pcie->pci.dev;
+ unsigned int wake_irq;
+ int ret;
+
+ ret = phy_set_mode(stm32_pcie->phy, PHY_MODE_PCIE);
+ if (ret)
+ return ret;
+
+ ret = phy_init(stm32_pcie->phy);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(stm32_pcie->regmap, SYSCFG_PCIECR,
+ STM32MP25_PCIECR_TYPE_MASK,
+ STM32MP25_PCIECR_RC);
+ if (ret)
+ goto err_phy_exit;
+
+ stm32_pcie_deassert_perst(stm32_pcie);
+
+ if (stm32_pcie->wake_gpio) {
+ wake_irq = gpiod_to_irq(stm32_pcie->wake_gpio);
+ ret = dev_pm_set_dedicated_wake_irq(dev, wake_irq);
+ if (ret) {
+ dev_err(dev, "Failed to enable wakeup irq %d\n", ret);
+ goto err_assert_perst;
+ }
+ irq_set_irq_type(wake_irq, IRQ_TYPE_EDGE_FALLING);
+ }
+
+ return 0;
+
+err_assert_perst:
+ stm32_pcie_assert_perst(stm32_pcie);
+
+err_phy_exit:
+ phy_exit(stm32_pcie->phy);
+
+ return ret;
+}
+
+static void stm32_remove_pcie_port(struct stm32_pcie *stm32_pcie)
+{
+ dev_pm_clear_wake_irq(stm32_pcie->pci.dev);
+
+ stm32_pcie_assert_perst(stm32_pcie);
+
+ phy_exit(stm32_pcie->phy);
+}
+
+static int stm32_pcie_parse_port(struct stm32_pcie *stm32_pcie)
+{
+ struct device *dev = stm32_pcie->pci.dev;
+ struct device_node *root_port;
+
+ root_port = of_get_next_available_child(dev->of_node, NULL);
+
+ stm32_pcie->phy = devm_of_phy_get(dev, root_port, NULL);
+ if (IS_ERR(stm32_pcie->phy)) {
+ of_node_put(root_port);
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->phy),
+ "Failed to get pcie-phy\n");
+ }
+
+ stm32_pcie->perst_gpio = devm_fwnode_gpiod_get(dev, of_fwnode_handle(root_port),
+ "reset", GPIOD_OUT_HIGH, NULL);
+ if (IS_ERR(stm32_pcie->perst_gpio)) {
+ if (PTR_ERR(stm32_pcie->perst_gpio) != -ENOENT) {
+ of_node_put(root_port);
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->perst_gpio),
+ "Failed to get reset GPIO\n");
+ }
+ stm32_pcie->perst_gpio = NULL;
+ }
+
+ stm32_pcie->wake_gpio = devm_fwnode_gpiod_get(dev, of_fwnode_handle(root_port),
+ "wake", GPIOD_IN, NULL);
+
+ if (IS_ERR(stm32_pcie->wake_gpio)) {
+ if (PTR_ERR(stm32_pcie->wake_gpio) != -ENOENT) {
+ of_node_put(root_port);
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->wake_gpio),
+ "Failed to get wake GPIO\n");
+ }
+ stm32_pcie->wake_gpio = NULL;
+ }
+
+ of_node_put(root_port);
+
+ return 0;
+}
+
+static int stm32_pcie_probe(struct platform_device *pdev)
+{
+ struct stm32_pcie *stm32_pcie;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ stm32_pcie = devm_kzalloc(dev, sizeof(*stm32_pcie), GFP_KERNEL);
+ if (!stm32_pcie)
+ return -ENOMEM;
+
+ stm32_pcie->pci.dev = dev;
+ stm32_pcie->pci.ops = &dw_pcie_ops;
+ stm32_pcie->pci.pp.ops = &stm32_pcie_host_ops;
+
+ stm32_pcie->regmap = syscon_regmap_lookup_by_compatible("st,stm32mp25-syscfg");
+ if (IS_ERR(stm32_pcie->regmap))
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->regmap),
+ "No syscfg specified\n");
+
+ stm32_pcie->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(stm32_pcie->clk))
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->clk),
+ "Failed to get PCIe clock source\n");
+
+ stm32_pcie->rst = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(stm32_pcie->rst))
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->rst),
+ "Failed to get PCIe reset\n");
+
+ ret = stm32_pcie_parse_port(stm32_pcie);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, stm32_pcie);
+
+ ret = stm32_add_pcie_port(stm32_pcie);
+ if (ret)
+ return ret;
+
+ reset_control_assert(stm32_pcie->rst);
+ reset_control_deassert(stm32_pcie->rst);
+
+ ret = clk_prepare_enable(stm32_pcie->clk);
+ if (ret) {
+ dev_err(dev, "Core clock enable failed %d\n", ret);
+ goto err_remove_port;
+ }
+
+ ret = pm_runtime_set_active(dev);
+ if (ret < 0) {
+ dev_err_probe(dev, ret, "Failed to activate runtime PM\n");
+ goto err_disable_clk;
+ }
+
+ pm_runtime_no_callbacks(dev);
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret < 0) {
+ dev_err_probe(dev, ret, "Failed to enable runtime PM\n");
+ goto err_disable_clk;
+ }
+
+ ret = dw_pcie_host_init(&stm32_pcie->pci.pp);
+ if (ret)
+ goto err_disable_clk;
+
+ if (stm32_pcie->wake_gpio)
+ device_init_wakeup(dev, true);
+
+ return 0;
+
+err_disable_clk:
+ clk_disable_unprepare(stm32_pcie->clk);
+
+err_remove_port:
+ stm32_remove_pcie_port(stm32_pcie);
+
+ return ret;
+}
+
+static void stm32_pcie_remove(struct platform_device *pdev)
+{
+ struct stm32_pcie *stm32_pcie = platform_get_drvdata(pdev);
+ struct dw_pcie_rp *pp = &stm32_pcie->pci.pp;
+
+ if (stm32_pcie->wake_gpio)
+ device_init_wakeup(&pdev->dev, false);
+
+ dw_pcie_host_deinit(pp);
+
+ clk_disable_unprepare(stm32_pcie->clk);
+
+ stm32_remove_pcie_port(stm32_pcie);
+
+ pm_runtime_put_noidle(&pdev->dev);
+}
+
+static const struct of_device_id stm32_pcie_of_match[] = {
+ { .compatible = "st,stm32mp25-pcie-rc" },
+ {},
+};
+
+static struct platform_driver stm32_pcie_driver = {
+ .probe = stm32_pcie_probe,
+ .remove = stm32_pcie_remove,
+ .driver = {
+ .name = "stm32-pcie",
+ .of_match_table = stm32_pcie_of_match,
+ .pm = &stm32_pcie_pm_ops,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+
+module_platform_driver(stm32_pcie_driver);
+
+MODULE_AUTHOR("Christian Bruel <christian.bruel@foss.st.com>");
+MODULE_DESCRIPTION("STM32MP25 PCIe Controller driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(of, stm32_pcie_of_match);
diff --git a/drivers/pci/controller/dwc/pcie-stm32.h b/drivers/pci/controller/dwc/pcie-stm32.h
new file mode 100644
index 000000000000..419cf1ff669d
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-stm32.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * ST PCIe driver definitions for STM32-MP25 SoC
+ *
+ * Copyright (C) 2025 STMicroelectronics - All Rights Reserved
+ * Author: Christian Bruel <christian.bruel@foss.st.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/device.h>
+
+#define to_stm32_pcie(x) dev_get_drvdata((x)->dev)
+
+#define STM32MP25_PCIECR_TYPE_MASK GENMASK(11, 8)
+#define STM32MP25_PCIECR_EP 0
+#define STM32MP25_PCIECR_LTSSM_EN BIT(2)
+#define STM32MP25_PCIECR_RC BIT(10)
+
+#define SYSCFG_PCIECR 0x6000
diff --git a/drivers/pci/controller/dwc/pcie-tegra194-acpi.c b/drivers/pci/controller/dwc/pcie-tegra194-acpi.c
new file mode 100644
index 000000000000..55f61914a986
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-tegra194-acpi.c
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ACPI quirks for Tegra194 PCIe host controller
+ *
+ * Copyright (C) 2021 NVIDIA Corporation.
+ *
+ * Author: Vidya Sagar <vidyas@nvidia.com>
+ */
+
+#include <linux/pci.h>
+#include <linux/pci-acpi.h>
+#include <linux/pci-ecam.h>
+
+#include "pcie-designware.h"
+
+struct tegra194_pcie_ecam {
+ void __iomem *config_base;
+ void __iomem *iatu_base;
+ void __iomem *dbi_base;
+};
+
+static int tegra194_acpi_init(struct pci_config_window *cfg)
+{
+ struct device *dev = cfg->parent;
+ struct tegra194_pcie_ecam *pcie_ecam;
+
+ pcie_ecam = devm_kzalloc(dev, sizeof(*pcie_ecam), GFP_KERNEL);
+ if (!pcie_ecam)
+ return -ENOMEM;
+
+ pcie_ecam->config_base = cfg->win;
+ pcie_ecam->iatu_base = cfg->win + SZ_256K;
+ pcie_ecam->dbi_base = cfg->win + SZ_512K;
+ cfg->priv = pcie_ecam;
+
+ return 0;
+}
+
+static void atu_reg_write(struct tegra194_pcie_ecam *pcie_ecam, int index,
+ u32 val, u32 reg)
+{
+ u32 offset = PCIE_ATU_UNROLL_BASE(PCIE_ATU_REGION_DIR_OB, index) +
+ PCIE_ATU_VIEWPORT_BASE;
+
+ writel(val, pcie_ecam->iatu_base + offset + reg);
+}
+
+static void program_outbound_atu(struct tegra194_pcie_ecam *pcie_ecam,
+ int index, int type, u64 cpu_addr,
+ u64 pci_addr, u64 size)
+{
+ atu_reg_write(pcie_ecam, index, lower_32_bits(cpu_addr),
+ PCIE_ATU_LOWER_BASE);
+ atu_reg_write(pcie_ecam, index, upper_32_bits(cpu_addr),
+ PCIE_ATU_UPPER_BASE);
+ atu_reg_write(pcie_ecam, index, lower_32_bits(pci_addr),
+ PCIE_ATU_LOWER_TARGET);
+ atu_reg_write(pcie_ecam, index, lower_32_bits(cpu_addr + size - 1),
+ PCIE_ATU_LIMIT);
+ atu_reg_write(pcie_ecam, index, upper_32_bits(pci_addr),
+ PCIE_ATU_UPPER_TARGET);
+ atu_reg_write(pcie_ecam, index, type, PCIE_ATU_REGION_CTRL1);
+ atu_reg_write(pcie_ecam, index, PCIE_ATU_ENABLE, PCIE_ATU_REGION_CTRL2);
+}
+
+static void __iomem *tegra194_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ struct tegra194_pcie_ecam *pcie_ecam = cfg->priv;
+ u32 busdev;
+ int type;
+
+ if (bus->number < cfg->busr.start || bus->number > cfg->busr.end)
+ return NULL;
+
+ if (bus->number == cfg->busr.start) {
+ if (PCI_SLOT(devfn) == 0)
+ return pcie_ecam->dbi_base + where;
+ else
+ return NULL;
+ }
+
+ busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
+ PCIE_ATU_FUNC(PCI_FUNC(devfn));
+
+ if (bus->parent->number == cfg->busr.start) {
+ if (PCI_SLOT(devfn) == 0)
+ type = PCIE_ATU_TYPE_CFG0;
+ else
+ return NULL;
+ } else {
+ type = PCIE_ATU_TYPE_CFG1;
+ }
+
+ program_outbound_atu(pcie_ecam, 0, type, cfg->res.start, busdev,
+ SZ_256K);
+
+ return pcie_ecam->config_base + where;
+}
+
+const struct pci_ecam_ops tegra194_pcie_ops = {
+ .init = tegra194_acpi_init,
+ .pci_ops = {
+ .map_bus = tegra194_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ }
+};
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
new file mode 100644
index 000000000000..0ddeef70726d
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -0,0 +1,2503 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * PCIe host controller driver for the following SoCs
+ * Tegra194
+ * Tegra234
+ *
+ * Copyright (C) 2019-2022 NVIDIA Corporation.
+ *
+ * Author: Vidya Sagar <vidyas@nvidia.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interconnect.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/random.h>
+#include <linux/reset.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+#include "pcie-designware.h"
+#include <soc/tegra/bpmp.h>
+#include <soc/tegra/bpmp-abi.h>
+#include "../../pci.h"
+
+#define TEGRA194_DWC_IP_VER 0x490A
+#define TEGRA234_DWC_IP_VER 0x562A
+
+#define APPL_PINMUX 0x0
+#define APPL_PINMUX_PEX_RST BIT(0)
+#define APPL_PINMUX_CLKREQ_OVERRIDE_EN BIT(2)
+#define APPL_PINMUX_CLKREQ_OVERRIDE BIT(3)
+#define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN BIT(4)
+#define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE BIT(5)
+
+#define APPL_CTRL 0x4
+#define APPL_CTRL_SYS_PRE_DET_STATE BIT(6)
+#define APPL_CTRL_LTSSM_EN BIT(7)
+#define APPL_CTRL_HW_HOT_RST_EN BIT(20)
+#define APPL_CTRL_HW_HOT_RST_MODE_MASK GENMASK(1, 0)
+#define APPL_CTRL_HW_HOT_RST_MODE_SHIFT 22
+#define APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST 0x1
+#define APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST_LTSSM_EN 0x2
+
+#define APPL_INTR_EN_L0_0 0x8
+#define APPL_INTR_EN_L0_0_LINK_STATE_INT_EN BIT(0)
+#define APPL_INTR_EN_L0_0_MSI_RCV_INT_EN BIT(4)
+#define APPL_INTR_EN_L0_0_INT_INT_EN BIT(8)
+#define APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN BIT(15)
+#define APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN BIT(19)
+#define APPL_INTR_EN_L0_0_SYS_INTR_EN BIT(30)
+#define APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN BIT(31)
+
+#define APPL_INTR_STATUS_L0 0xC
+#define APPL_INTR_STATUS_L0_LINK_STATE_INT BIT(0)
+#define APPL_INTR_STATUS_L0_INT_INT BIT(8)
+#define APPL_INTR_STATUS_L0_PCI_CMD_EN_INT BIT(15)
+#define APPL_INTR_STATUS_L0_PEX_RST_INT BIT(16)
+#define APPL_INTR_STATUS_L0_CDM_REG_CHK_INT BIT(18)
+
+#define APPL_INTR_EN_L1_0_0 0x1C
+#define APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN BIT(1)
+#define APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN BIT(3)
+#define APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN BIT(30)
+
+#define APPL_INTR_STATUS_L1_0_0 0x20
+#define APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED BIT(1)
+#define APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED BIT(3)
+#define APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE BIT(30)
+
+#define APPL_INTR_STATUS_L1_1 0x2C
+#define APPL_INTR_STATUS_L1_2 0x30
+#define APPL_INTR_STATUS_L1_3 0x34
+#define APPL_INTR_STATUS_L1_6 0x3C
+#define APPL_INTR_STATUS_L1_7 0x40
+#define APPL_INTR_STATUS_L1_15_CFG_BME_CHGED BIT(1)
+
+#define APPL_INTR_EN_L1_8_0 0x44
+#define APPL_INTR_EN_L1_8_BW_MGT_INT_EN BIT(2)
+#define APPL_INTR_EN_L1_8_AUTO_BW_INT_EN BIT(3)
+#define APPL_INTR_EN_L1_8_INTX_EN BIT(11)
+#define APPL_INTR_EN_L1_8_AER_INT_EN BIT(15)
+
+#define APPL_INTR_STATUS_L1_8_0 0x4C
+#define APPL_INTR_STATUS_L1_8_0_EDMA_INT_MASK GENMASK(11, 6)
+#define APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS BIT(2)
+#define APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS BIT(3)
+
+#define APPL_INTR_STATUS_L1_9 0x54
+#define APPL_INTR_STATUS_L1_10 0x58
+#define APPL_INTR_STATUS_L1_11 0x64
+#define APPL_INTR_STATUS_L1_13 0x74
+#define APPL_INTR_STATUS_L1_14 0x78
+#define APPL_INTR_STATUS_L1_15 0x7C
+#define APPL_INTR_STATUS_L1_17 0x88
+
+#define APPL_INTR_EN_L1_18 0x90
+#define APPL_INTR_EN_L1_18_CDM_REG_CHK_CMPLT BIT(2)
+#define APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR BIT(1)
+#define APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR BIT(0)
+
+#define APPL_INTR_STATUS_L1_18 0x94
+#define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT BIT(2)
+#define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR BIT(1)
+#define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR BIT(0)
+
+#define APPL_MSI_CTRL_1 0xAC
+
+#define APPL_MSI_CTRL_2 0xB0
+
+#define APPL_LEGACY_INTX 0xB8
+
+#define APPL_LTR_MSG_1 0xC4
+#define LTR_MSG_REQ BIT(15)
+#define LTR_NOSNOOP_MSG_REQ BIT(31)
+
+#define APPL_LTR_MSG_2 0xC8
+#define APPL_LTR_MSG_2_LTR_MSG_REQ_STATE BIT(3)
+
+#define APPL_LINK_STATUS 0xCC
+#define APPL_LINK_STATUS_RDLH_LINK_UP BIT(0)
+
+#define APPL_DEBUG 0xD0
+#define APPL_DEBUG_PM_LINKST_IN_L2_LAT BIT(21)
+#define APPL_DEBUG_PM_LINKST_IN_L0 0x11
+#define APPL_DEBUG_LTSSM_STATE_MASK GENMASK(8, 3)
+#define APPL_DEBUG_LTSSM_STATE_SHIFT 3
+#define LTSSM_STATE_PRE_DETECT 5
+
+#define APPL_RADM_STATUS 0xE4
+#define APPL_PM_XMT_TURNOFF_STATE BIT(0)
+
+#define APPL_DM_TYPE 0x100
+#define APPL_DM_TYPE_MASK GENMASK(3, 0)
+#define APPL_DM_TYPE_RP 0x4
+#define APPL_DM_TYPE_EP 0x0
+
+#define APPL_CFG_BASE_ADDR 0x104
+#define APPL_CFG_BASE_ADDR_MASK GENMASK(31, 12)
+
+#define APPL_CFG_IATU_DMA_BASE_ADDR 0x108
+#define APPL_CFG_IATU_DMA_BASE_ADDR_MASK GENMASK(31, 18)
+
+#define APPL_CFG_MISC 0x110
+#define APPL_CFG_MISC_SLV_EP_MODE BIT(14)
+#define APPL_CFG_MISC_ARCACHE_MASK GENMASK(13, 10)
+#define APPL_CFG_MISC_ARCACHE_SHIFT 10
+#define APPL_CFG_MISC_ARCACHE_VAL 3
+
+#define APPL_CFG_SLCG_OVERRIDE 0x114
+#define APPL_CFG_SLCG_OVERRIDE_SLCG_EN_MASTER BIT(0)
+
+#define APPL_CAR_RESET_OVRD 0x12C
+#define APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N BIT(0)
+
+#define IO_BASE_IO_DECODE BIT(0)
+#define IO_BASE_IO_DECODE_BIT8 BIT(8)
+
+#define CFG_PREF_MEM_LIMIT_BASE_MEM_DECODE BIT(0)
+#define CFG_PREF_MEM_LIMIT_BASE_MEM_LIMIT_DECODE BIT(16)
+
+#define CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF 0x718
+#define CFG_TIMER_CTRL_ACK_NAK_SHIFT (19)
+
+#define N_FTS_VAL 52
+#define FTS_VAL 52
+
+#define PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT 0x8D0
+#define AMBA_ERROR_RESPONSE_RRS_SHIFT 3
+#define AMBA_ERROR_RESPONSE_RRS_MASK GENMASK(1, 0)
+#define AMBA_ERROR_RESPONSE_RRS_OKAY 0
+#define AMBA_ERROR_RESPONSE_RRS_OKAY_FFFFFFFF 1
+#define AMBA_ERROR_RESPONSE_RRS_OKAY_FFFF0001 2
+
+#define MSIX_ADDR_MATCH_LOW_OFF 0x940
+#define MSIX_ADDR_MATCH_LOW_OFF_EN BIT(0)
+#define MSIX_ADDR_MATCH_LOW_OFF_MASK GENMASK(31, 2)
+
+#define MSIX_ADDR_MATCH_HIGH_OFF 0x944
+#define MSIX_ADDR_MATCH_HIGH_OFF_MASK GENMASK(31, 0)
+
+#define PORT_LOGIC_MSIX_DOORBELL 0x948
+
+#define CAP_SPCIE_CAP_OFF 0x154
+#define CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK GENMASK(3, 0)
+#define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK GENMASK(11, 8)
+#define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT 8
+
+#define PME_ACK_TIMEOUT 10000
+
+#define LTSSM_TIMEOUT 50000 /* 50ms */
+
+#define GEN3_GEN4_EQ_PRESET_INIT 5
+
+#define GEN1_CORE_CLK_FREQ 62500000
+#define GEN2_CORE_CLK_FREQ 125000000
+#define GEN3_CORE_CLK_FREQ 250000000
+#define GEN4_CORE_CLK_FREQ 500000000
+
+#define LTR_MSG_TIMEOUT (100 * 1000)
+
+#define PERST_DEBOUNCE_TIME (5 * 1000)
+
+#define EP_STATE_DISABLED 0
+#define EP_STATE_ENABLED 1
+
+static const unsigned int pcie_gen_freq[] = {
+ GEN1_CORE_CLK_FREQ, /* PCI_EXP_LNKSTA_CLS == 0; undefined */
+ GEN1_CORE_CLK_FREQ,
+ GEN2_CORE_CLK_FREQ,
+ GEN3_CORE_CLK_FREQ,
+ GEN4_CORE_CLK_FREQ
+};
+
+struct tegra_pcie_dw_of_data {
+ u32 version;
+ enum dw_pcie_device_mode mode;
+ bool has_msix_doorbell_access_fix;
+ bool has_sbr_reset_fix;
+ bool has_l1ss_exit_fix;
+ bool has_ltr_req_fix;
+ u32 cdm_chk_int_en_bit;
+ u32 gen4_preset_vec;
+ u8 n_fts[2];
+};
+
+struct tegra_pcie_dw {
+ struct device *dev;
+ struct resource *appl_res;
+ struct resource *dbi_res;
+ struct resource *atu_dma_res;
+ void __iomem *appl_base;
+ struct clk *core_clk;
+ struct reset_control *core_apb_rst;
+ struct reset_control *core_rst;
+ struct dw_pcie pci;
+ struct tegra_bpmp *bpmp;
+
+ struct tegra_pcie_dw_of_data *of_data;
+
+ bool supports_clkreq;
+ bool enable_cdm_check;
+ bool enable_srns;
+ bool link_state;
+ bool update_fc_fixup;
+ bool enable_ext_refclk;
+ u8 init_link_width;
+ u32 msi_ctrl_int;
+ u32 num_lanes;
+ u32 cid;
+ u32 ras_des_cap;
+ u32 pcie_cap_base;
+ u32 aspm_cmrt;
+ u32 aspm_pwr_on_t;
+ u32 aspm_l0s_enter_lat;
+
+ struct regulator *pex_ctl_supply;
+ struct regulator *slot_ctl_3v3;
+ struct regulator *slot_ctl_12v;
+
+ unsigned int phy_count;
+ struct phy **phys;
+
+ struct dentry *debugfs;
+
+ /* Endpoint mode specific */
+ struct gpio_desc *pex_rst_gpiod;
+ struct gpio_desc *pex_refclk_sel_gpiod;
+ unsigned int pex_rst_irq;
+ int ep_state;
+ long link_status;
+ struct icc_path *icc_path;
+};
+
+static inline struct tegra_pcie_dw *to_tegra_pcie(struct dw_pcie *pci)
+{
+ return container_of(pci, struct tegra_pcie_dw, pci);
+}
+
+static inline void appl_writel(struct tegra_pcie_dw *pcie, const u32 value,
+ const u32 reg)
+{
+ writel_relaxed(value, pcie->appl_base + reg);
+}
+
+static inline u32 appl_readl(struct tegra_pcie_dw *pcie, const u32 reg)
+{
+ return readl_relaxed(pcie->appl_base + reg);
+}
+
+static void tegra_pcie_icc_set(struct tegra_pcie_dw *pcie)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ u32 val, speed, width;
+
+ val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
+
+ speed = FIELD_GET(PCI_EXP_LNKSTA_CLS, val);
+ width = FIELD_GET(PCI_EXP_LNKSTA_NLW, val);
+
+ val = width * PCIE_SPEED2MBS_ENC(pcie_link_speed[speed]);
+
+ if (icc_set_bw(pcie->icc_path, Mbps_to_icc(val), 0))
+ dev_err(pcie->dev, "can't set bw[%u]\n", val);
+
+ if (speed >= ARRAY_SIZE(pcie_gen_freq))
+ speed = 0;
+
+ clk_set_rate(pcie->core_clk, pcie_gen_freq[speed]);
+}
+
+static void apply_bad_link_workaround(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+ u32 current_link_width;
+ u16 val;
+
+ /*
+ * NOTE:- Since this scenario is uncommon and link as such is not
+ * stable anyway, not waiting to confirm if link is really
+ * transitioning to Gen-2 speed
+ */
+ val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
+ if (val & PCI_EXP_LNKSTA_LBMS) {
+ current_link_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, val);
+ if (pcie->init_link_width > current_link_width) {
+ dev_warn(pci->dev, "PCIe link is bad, width reduced\n");
+ val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
+ PCI_EXP_LNKCTL2);
+ val &= ~PCI_EXP_LNKCTL2_TLS;
+ val |= PCI_EXP_LNKCTL2_TLS_2_5GT;
+ dw_pcie_writew_dbi(pci, pcie->pcie_cap_base +
+ PCI_EXP_LNKCTL2, val);
+
+ val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
+ PCI_EXP_LNKCTL);
+ val |= PCI_EXP_LNKCTL_RL;
+ dw_pcie_writew_dbi(pci, pcie->pcie_cap_base +
+ PCI_EXP_LNKCTL, val);
+ }
+ }
+}
+
+static irqreturn_t tegra_pcie_rp_irq_handler(int irq, void *arg)
+{
+ struct tegra_pcie_dw *pcie = arg;
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ u32 val, status_l0, status_l1;
+ u16 val_w;
+
+ status_l0 = appl_readl(pcie, APPL_INTR_STATUS_L0);
+ if (status_l0 & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
+ status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
+ appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_0_0);
+ if (!pcie->of_data->has_sbr_reset_fix &&
+ status_l1 & APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED) {
+ /* SBR & Surprise Link Down WAR */
+ val = appl_readl(pcie, APPL_CAR_RESET_OVRD);
+ val &= ~APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N;
+ appl_writel(pcie, val, APPL_CAR_RESET_OVRD);
+ udelay(1);
+ val = appl_readl(pcie, APPL_CAR_RESET_OVRD);
+ val |= APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N;
+ appl_writel(pcie, val, APPL_CAR_RESET_OVRD);
+
+ val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
+ val |= PORT_LOGIC_SPEED_CHANGE;
+ dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
+ }
+ }
+
+ if (status_l0 & APPL_INTR_STATUS_L0_INT_INT) {
+ status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_8_0);
+ if (status_l1 & APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS) {
+ appl_writel(pcie,
+ APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS,
+ APPL_INTR_STATUS_L1_8_0);
+ apply_bad_link_workaround(pp);
+ }
+ if (status_l1 & APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS) {
+ val_w = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
+ PCI_EXP_LNKSTA);
+ val_w |= PCI_EXP_LNKSTA_LBMS;
+ dw_pcie_writew_dbi(pci, pcie->pcie_cap_base +
+ PCI_EXP_LNKSTA, val_w);
+
+ appl_writel(pcie,
+ APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS,
+ APPL_INTR_STATUS_L1_8_0);
+
+ val_w = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
+ PCI_EXP_LNKSTA);
+ dev_dbg(pci->dev, "Link Speed : Gen-%u\n", val_w &
+ PCI_EXP_LNKSTA_CLS);
+ }
+ }
+
+ if (status_l0 & APPL_INTR_STATUS_L0_CDM_REG_CHK_INT) {
+ status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_18);
+ val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
+ if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT) {
+ dev_info(pci->dev, "CDM check complete\n");
+ val |= PCIE_PL_CHK_REG_CHK_REG_COMPLETE;
+ }
+ if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR) {
+ dev_err(pci->dev, "CDM comparison mismatch\n");
+ val |= PCIE_PL_CHK_REG_CHK_REG_COMPARISON_ERROR;
+ }
+ if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR) {
+ dev_err(pci->dev, "CDM Logic error\n");
+ val |= PCIE_PL_CHK_REG_CHK_REG_LOGIC_ERROR;
+ }
+ dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
+ val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_ERR_ADDR);
+ dev_err(pci->dev, "CDM Error Address Offset = 0x%08X\n", val);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void pex_ep_event_hot_rst_done(struct tegra_pcie_dw *pcie)
+{
+ u32 val;
+
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_MSI_CTRL_2);
+
+ val = appl_readl(pcie, APPL_CTRL);
+ val |= APPL_CTRL_LTSSM_EN;
+ appl_writel(pcie, val, APPL_CTRL);
+}
+
+static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg)
+{
+ struct tegra_pcie_dw *pcie = arg;
+ struct dw_pcie_ep *ep = &pcie->pci.ep;
+ struct dw_pcie *pci = &pcie->pci;
+ u32 val;
+
+ if (test_and_clear_bit(0, &pcie->link_status))
+ dw_pcie_ep_linkup(ep);
+
+ tegra_pcie_icc_set(pcie);
+
+ if (pcie->of_data->has_ltr_req_fix)
+ return IRQ_HANDLED;
+
+ /* If EP doesn't advertise L1SS, just return */
+ if (!pci->l1ss_support)
+ return IRQ_HANDLED;
+
+ /* Check if BME is set to '1' */
+ val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
+ if (val & PCI_COMMAND_MASTER) {
+ ktime_t timeout;
+
+ /* 110us for both snoop and no-snoop */
+ val = FIELD_PREP(PCI_LTR_VALUE_MASK, 110) |
+ FIELD_PREP(PCI_LTR_SCALE_MASK, 2) |
+ LTR_MSG_REQ |
+ FIELD_PREP(PCI_LTR_NOSNOOP_VALUE, 110) |
+ FIELD_PREP(PCI_LTR_NOSNOOP_SCALE, 2) |
+ LTR_NOSNOOP_MSG_REQ;
+ appl_writel(pcie, val, APPL_LTR_MSG_1);
+
+ /* Send LTR upstream */
+ val = appl_readl(pcie, APPL_LTR_MSG_2);
+ val |= APPL_LTR_MSG_2_LTR_MSG_REQ_STATE;
+ appl_writel(pcie, val, APPL_LTR_MSG_2);
+
+ timeout = ktime_add_us(ktime_get(), LTR_MSG_TIMEOUT);
+ for (;;) {
+ val = appl_readl(pcie, APPL_LTR_MSG_2);
+ if (!(val & APPL_LTR_MSG_2_LTR_MSG_REQ_STATE))
+ break;
+ if (ktime_after(ktime_get(), timeout))
+ break;
+ usleep_range(1000, 1100);
+ }
+ if (val & APPL_LTR_MSG_2_LTR_MSG_REQ_STATE)
+ dev_err(pcie->dev, "Failed to send LTR message\n");
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)
+{
+ struct tegra_pcie_dw *pcie = arg;
+ int spurious = 1;
+ u32 status_l0, status_l1, link_status;
+
+ status_l0 = appl_readl(pcie, APPL_INTR_STATUS_L0);
+ if (status_l0 & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
+ status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
+ appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_0_0);
+
+ if (status_l1 & APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE)
+ pex_ep_event_hot_rst_done(pcie);
+
+ if (status_l1 & APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED) {
+ link_status = appl_readl(pcie, APPL_LINK_STATUS);
+ if (link_status & APPL_LINK_STATUS_RDLH_LINK_UP) {
+ dev_dbg(pcie->dev, "Link is up with Host\n");
+ set_bit(0, &pcie->link_status);
+ return IRQ_WAKE_THREAD;
+ }
+ }
+
+ spurious = 0;
+ }
+
+ if (status_l0 & APPL_INTR_STATUS_L0_PCI_CMD_EN_INT) {
+ status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_15);
+ appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_15);
+
+ if (status_l1 & APPL_INTR_STATUS_L1_15_CFG_BME_CHGED)
+ return IRQ_WAKE_THREAD;
+
+ spurious = 0;
+ }
+
+ if (spurious) {
+ dev_warn(pcie->dev, "Random interrupt (STATUS = 0x%08X)\n",
+ status_l0);
+ appl_writel(pcie, status_l0, APPL_INTR_STATUS_L0);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int tegra_pcie_dw_rd_own_conf(struct pci_bus *bus, u32 devfn, int where,
+ int size, u32 *val)
+{
+ struct dw_pcie_rp *pp = bus->sysdata;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+
+ /*
+ * This is an endpoint mode specific register happen to appear even
+ * when controller is operating in root port mode and system hangs
+ * when it is accessed with link being in ASPM-L1 state.
+ * So skip accessing it altogether
+ */
+ if (!pcie->of_data->has_msix_doorbell_access_fix &&
+ !PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL) {
+ *val = 0x00000000;
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+ return pci_generic_config_read(bus, devfn, where, size, val);
+}
+
+static int tegra_pcie_dw_wr_own_conf(struct pci_bus *bus, u32 devfn, int where,
+ int size, u32 val)
+{
+ struct dw_pcie_rp *pp = bus->sysdata;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+
+ /*
+ * This is an endpoint mode specific register happen to appear even
+ * when controller is operating in root port mode and system hangs
+ * when it is accessed with link being in ASPM-L1 state.
+ * So skip accessing it altogether
+ */
+ if (!pcie->of_data->has_msix_doorbell_access_fix &&
+ !PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL)
+ return PCIBIOS_SUCCESSFUL;
+
+ return pci_generic_config_write(bus, devfn, where, size, val);
+}
+
+static struct pci_ops tegra_pci_ops = {
+ .map_bus = dw_pcie_own_conf_map_bus,
+ .read = tegra_pcie_dw_rd_own_conf,
+ .write = tegra_pcie_dw_wr_own_conf,
+};
+
+#if defined(CONFIG_PCIEASPM)
+static inline u32 event_counter_prog(struct tegra_pcie_dw *pcie, u32 event)
+{
+ u32 val;
+
+ val = dw_pcie_readl_dbi(&pcie->pci, pcie->ras_des_cap +
+ PCIE_RAS_DES_EVENT_COUNTER_CONTROL);
+ val &= ~(EVENT_COUNTER_EVENT_SEL_MASK << EVENT_COUNTER_EVENT_SEL_SHIFT);
+ val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
+ val |= event << EVENT_COUNTER_EVENT_SEL_SHIFT;
+ val |= EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
+ dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap +
+ PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val);
+ val = dw_pcie_readl_dbi(&pcie->pci, pcie->ras_des_cap +
+ PCIE_RAS_DES_EVENT_COUNTER_DATA);
+
+ return val;
+}
+
+static int aspm_state_cnt(struct seq_file *s, void *data)
+{
+ struct tegra_pcie_dw *pcie = (struct tegra_pcie_dw *)
+ dev_get_drvdata(s->private);
+ u32 val;
+
+ seq_printf(s, "Tx L0s entry count : %u\n",
+ event_counter_prog(pcie, EVENT_COUNTER_EVENT_Tx_L0S));
+
+ seq_printf(s, "Rx L0s entry count : %u\n",
+ event_counter_prog(pcie, EVENT_COUNTER_EVENT_Rx_L0S));
+
+ seq_printf(s, "Link L1 entry count : %u\n",
+ event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1));
+
+ seq_printf(s, "Link L1.1 entry count : %u\n",
+ event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_1));
+
+ seq_printf(s, "Link L1.2 entry count : %u\n",
+ event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_2));
+
+ /* Clear all counters */
+ dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap +
+ PCIE_RAS_DES_EVENT_COUNTER_CONTROL,
+ EVENT_COUNTER_ALL_CLEAR);
+
+ /* Re-enable counting */
+ val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
+ val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
+ dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap +
+ PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val);
+
+ return 0;
+}
+
+static void init_host_aspm(struct tegra_pcie_dw *pcie)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ u32 l1ss, val;
+
+ l1ss = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_L1SS);
+
+ pcie->ras_des_cap = dw_pcie_find_ext_capability(&pcie->pci,
+ PCI_EXT_CAP_ID_VNDR);
+
+ /* Enable ASPM counters */
+ val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
+ val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
+ dw_pcie_writel_dbi(pci, pcie->ras_des_cap +
+ PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val);
+
+ /* Program T_cmrt and T_pwr_on values */
+ val = dw_pcie_readl_dbi(pci, l1ss + PCI_L1SS_CAP);
+ val &= ~(PCI_L1SS_CAP_CM_RESTORE_TIME | PCI_L1SS_CAP_P_PWR_ON_VALUE);
+ val |= (pcie->aspm_cmrt << 8);
+ val |= (pcie->aspm_pwr_on_t << 19);
+ dw_pcie_writel_dbi(pci, l1ss + PCI_L1SS_CAP, val);
+
+ if (pcie->supports_clkreq)
+ pci->l1ss_support = true;
+
+ /* Program L0s and L1 entrance latencies */
+ val = dw_pcie_readl_dbi(pci, PCIE_PORT_AFR);
+ val &= ~PORT_AFR_L0S_ENTRANCE_LAT_MASK;
+ val |= (pcie->aspm_l0s_enter_lat << PORT_AFR_L0S_ENTRANCE_LAT_SHIFT);
+ val |= PORT_AFR_ENTER_ASPM;
+ dw_pcie_writel_dbi(pci, PCIE_PORT_AFR, val);
+}
+
+static void init_debugfs(struct tegra_pcie_dw *pcie)
+{
+ struct device *dev = pcie->dev;
+ char *name;
+
+ name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
+ if (!name)
+ return;
+
+ pcie->debugfs = debugfs_create_dir(name, NULL);
+
+ debugfs_create_devm_seqfile(dev, "aspm_state_cnt", pcie->debugfs,
+ aspm_state_cnt);
+}
+#else
+static inline void init_host_aspm(struct tegra_pcie_dw *pcie) { return; }
+static inline void init_debugfs(struct tegra_pcie_dw *pcie) { return; }
+#endif
+
+static void tegra_pcie_enable_system_interrupts(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+ u32 val;
+ u16 val_w;
+
+ val = appl_readl(pcie, APPL_INTR_EN_L0_0);
+ val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN;
+ appl_writel(pcie, val, APPL_INTR_EN_L0_0);
+
+ if (!pcie->of_data->has_sbr_reset_fix) {
+ val = appl_readl(pcie, APPL_INTR_EN_L1_0_0);
+ val |= APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN;
+ appl_writel(pcie, val, APPL_INTR_EN_L1_0_0);
+ }
+
+ if (pcie->enable_cdm_check) {
+ val = appl_readl(pcie, APPL_INTR_EN_L0_0);
+ val |= pcie->of_data->cdm_chk_int_en_bit;
+ appl_writel(pcie, val, APPL_INTR_EN_L0_0);
+
+ val = appl_readl(pcie, APPL_INTR_EN_L1_18);
+ val |= APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR;
+ val |= APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR;
+ appl_writel(pcie, val, APPL_INTR_EN_L1_18);
+ }
+
+ val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base +
+ PCI_EXP_LNKSTA);
+ pcie->init_link_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, val_w);
+
+ val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base +
+ PCI_EXP_LNKCTL);
+ val_w |= PCI_EXP_LNKCTL_LBMIE;
+ dw_pcie_writew_dbi(&pcie->pci, pcie->pcie_cap_base + PCI_EXP_LNKCTL,
+ val_w);
+}
+
+static void tegra_pcie_enable_intx_interrupts(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+ u32 val;
+
+ /* Enable INTX interrupt generation */
+ val = appl_readl(pcie, APPL_INTR_EN_L0_0);
+ val |= APPL_INTR_EN_L0_0_SYS_INTR_EN;
+ val |= APPL_INTR_EN_L0_0_INT_INT_EN;
+ appl_writel(pcie, val, APPL_INTR_EN_L0_0);
+
+ val = appl_readl(pcie, APPL_INTR_EN_L1_8_0);
+ val |= APPL_INTR_EN_L1_8_INTX_EN;
+ val |= APPL_INTR_EN_L1_8_AUTO_BW_INT_EN;
+ val |= APPL_INTR_EN_L1_8_BW_MGT_INT_EN;
+ if (IS_ENABLED(CONFIG_PCIEAER))
+ val |= APPL_INTR_EN_L1_8_AER_INT_EN;
+ appl_writel(pcie, val, APPL_INTR_EN_L1_8_0);
+}
+
+static void tegra_pcie_enable_msi_interrupts(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+ u32 val;
+
+ /* Enable MSI interrupt generation */
+ val = appl_readl(pcie, APPL_INTR_EN_L0_0);
+ val |= APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN;
+ val |= APPL_INTR_EN_L0_0_MSI_RCV_INT_EN;
+ appl_writel(pcie, val, APPL_INTR_EN_L0_0);
+}
+
+static void tegra_pcie_enable_interrupts(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+
+ /* Clear interrupt statuses before enabling interrupts */
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17);
+
+ tegra_pcie_enable_system_interrupts(pp);
+ tegra_pcie_enable_intx_interrupts(pp);
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ tegra_pcie_enable_msi_interrupts(pp);
+}
+
+static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ u32 val, offset, i;
+
+ /* Program init preset */
+ for (i = 0; i < pcie->num_lanes; i++) {
+ val = dw_pcie_readw_dbi(pci, CAP_SPCIE_CAP_OFF + (i * 2));
+ val &= ~CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK;
+ val |= GEN3_GEN4_EQ_PRESET_INIT;
+ val &= ~CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK;
+ val |= (GEN3_GEN4_EQ_PRESET_INIT <<
+ CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT);
+ dw_pcie_writew_dbi(pci, CAP_SPCIE_CAP_OFF + (i * 2), val);
+
+ offset = dw_pcie_find_ext_capability(pci,
+ PCI_EXT_CAP_ID_PL_16GT) +
+ PCI_PL_16GT_LE_CTRL;
+ val = dw_pcie_readb_dbi(pci, offset + i);
+ val &= ~PCI_PL_16GT_LE_CTRL_DSP_TX_PRESET_MASK;
+ val |= GEN3_GEN4_EQ_PRESET_INIT;
+ val &= ~PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_MASK;
+ val |= (GEN3_GEN4_EQ_PRESET_INIT <<
+ PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_SHIFT);
+ dw_pcie_writeb_dbi(pci, offset + i, val);
+ }
+
+ val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
+ val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
+ dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
+
+ val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
+ val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC;
+ val |= FIELD_PREP(GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC, 0x3ff);
+ val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE;
+ dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val);
+
+ val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
+ val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
+ val |= (0x1 << GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT);
+ dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
+
+ val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
+ val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC;
+ val |= FIELD_PREP(GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC,
+ pcie->of_data->gen4_preset_vec);
+ val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE;
+ dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val);
+
+ val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
+ val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
+ dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
+}
+
+static int tegra_pcie_dw_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+ u32 val;
+ u16 val_16;
+
+ pp->bridge->ops = &tegra_pci_ops;
+
+ if (!pcie->pcie_cap_base)
+ pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
+ PCI_CAP_ID_EXP);
+
+ val = dw_pcie_readl_dbi(pci, PCI_IO_BASE);
+ val &= ~(IO_BASE_IO_DECODE | IO_BASE_IO_DECODE_BIT8);
+ dw_pcie_writel_dbi(pci, PCI_IO_BASE, val);
+
+ val = dw_pcie_readl_dbi(pci, PCI_PREF_MEMORY_BASE);
+ val |= CFG_PREF_MEM_LIMIT_BASE_MEM_DECODE;
+ val |= CFG_PREF_MEM_LIMIT_BASE_MEM_LIMIT_DECODE;
+ dw_pcie_writel_dbi(pci, PCI_PREF_MEMORY_BASE, val);
+
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
+
+ /* Enable as 0xFFFF0001 response for RRS */
+ val = dw_pcie_readl_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT);
+ val &= ~(AMBA_ERROR_RESPONSE_RRS_MASK << AMBA_ERROR_RESPONSE_RRS_SHIFT);
+ val |= (AMBA_ERROR_RESPONSE_RRS_OKAY_FFFF0001 <<
+ AMBA_ERROR_RESPONSE_RRS_SHIFT);
+ dw_pcie_writel_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT, val);
+
+ /* Clear Slot Clock Configuration bit if SRNS configuration */
+ if (pcie->enable_srns) {
+ val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
+ PCI_EXP_LNKSTA);
+ val_16 &= ~PCI_EXP_LNKSTA_SLC;
+ dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA,
+ val_16);
+ }
+
+ config_gen3_gen4_eq_presets(pcie);
+
+ init_host_aspm(pcie);
+
+ if (!pcie->of_data->has_l1ss_exit_fix) {
+ val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
+ val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
+ dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
+ }
+
+ if (pcie->update_fc_fixup) {
+ val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF);
+ val |= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT;
+ dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val);
+ }
+
+ clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ);
+
+ return 0;
+}
+
+static int tegra_pcie_dw_start_link(struct dw_pcie *pci)
+{
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+ struct dw_pcie_rp *pp = &pci->pp;
+ u32 val, offset, tmp;
+ bool retry = true;
+
+ if (pcie->of_data->mode == DW_PCIE_EP_TYPE) {
+ enable_irq(pcie->pex_rst_irq);
+ return 0;
+ }
+
+retry_link:
+ /* Assert RST */
+ val = appl_readl(pcie, APPL_PINMUX);
+ val &= ~APPL_PINMUX_PEX_RST;
+ appl_writel(pcie, val, APPL_PINMUX);
+
+ usleep_range(100, 200);
+
+ /* Enable LTSSM */
+ val = appl_readl(pcie, APPL_CTRL);
+ val |= APPL_CTRL_LTSSM_EN;
+ appl_writel(pcie, val, APPL_CTRL);
+
+ /* De-assert RST */
+ val = appl_readl(pcie, APPL_PINMUX);
+ val |= APPL_PINMUX_PEX_RST;
+ appl_writel(pcie, val, APPL_PINMUX);
+
+ msleep(100);
+
+ if (dw_pcie_wait_for_link(pci)) {
+ if (!retry)
+ return 0;
+ /*
+ * There are some endpoints which can't get the link up if
+ * root port has Data Link Feature (DLF) enabled.
+ * Refer Spec rev 4.0 ver 1.0 sec 3.4.2 & 7.7.4 for more info
+ * on Scaled Flow Control and DLF.
+ * So, need to confirm that is indeed the case here and attempt
+ * link up once again with DLF disabled.
+ */
+ val = appl_readl(pcie, APPL_DEBUG);
+ val &= APPL_DEBUG_LTSSM_STATE_MASK;
+ val >>= APPL_DEBUG_LTSSM_STATE_SHIFT;
+ tmp = appl_readl(pcie, APPL_LINK_STATUS);
+ tmp &= APPL_LINK_STATUS_RDLH_LINK_UP;
+ if (!(val == 0x11 && !tmp)) {
+ /* Link is down for all good reasons */
+ return 0;
+ }
+
+ dev_info(pci->dev, "Link is down in DLL");
+ dev_info(pci->dev, "Trying again with DLFE disabled\n");
+ /* Disable LTSSM */
+ val = appl_readl(pcie, APPL_CTRL);
+ val &= ~APPL_CTRL_LTSSM_EN;
+ appl_writel(pcie, val, APPL_CTRL);
+
+ reset_control_assert(pcie->core_rst);
+ reset_control_deassert(pcie->core_rst);
+
+ offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_DLF);
+ val = dw_pcie_readl_dbi(pci, offset + PCI_DLF_CAP);
+ val &= ~PCI_DLF_EXCHANGE_ENABLE;
+ dw_pcie_writel_dbi(pci, offset + PCI_DLF_CAP, val);
+
+ tegra_pcie_dw_host_init(pp);
+ dw_pcie_setup_rc(pp);
+
+ retry = false;
+ goto retry_link;
+ }
+
+ tegra_pcie_icc_set(pcie);
+
+ tegra_pcie_enable_interrupts(pp);
+
+ return 0;
+}
+
+static bool tegra_pcie_dw_link_up(struct dw_pcie *pci)
+{
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+ u32 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
+
+ return val & PCI_EXP_LNKSTA_DLLLA;
+}
+
+static void tegra_pcie_dw_stop_link(struct dw_pcie *pci)
+{
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+
+ disable_irq(pcie->pex_rst_irq);
+}
+
+static const struct dw_pcie_ops tegra_dw_pcie_ops = {
+ .link_up = tegra_pcie_dw_link_up,
+ .start_link = tegra_pcie_dw_start_link,
+ .stop_link = tegra_pcie_dw_stop_link,
+};
+
+static const struct dw_pcie_host_ops tegra_pcie_dw_host_ops = {
+ .init = tegra_pcie_dw_host_init,
+};
+
+static void tegra_pcie_disable_phy(struct tegra_pcie_dw *pcie)
+{
+ unsigned int phy_count = pcie->phy_count;
+
+ while (phy_count--) {
+ phy_power_off(pcie->phys[phy_count]);
+ phy_exit(pcie->phys[phy_count]);
+ }
+}
+
+static int tegra_pcie_enable_phy(struct tegra_pcie_dw *pcie)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < pcie->phy_count; i++) {
+ ret = phy_init(pcie->phys[i]);
+ if (ret < 0)
+ goto phy_power_off;
+
+ ret = phy_power_on(pcie->phys[i]);
+ if (ret < 0)
+ goto phy_exit;
+ }
+
+ return 0;
+
+phy_power_off:
+ while (i--) {
+ phy_power_off(pcie->phys[i]);
+phy_exit:
+ phy_exit(pcie->phys[i]);
+ }
+
+ return ret;
+}
+
+static int tegra_pcie_dw_parse_dt(struct tegra_pcie_dw *pcie)
+{
+ struct platform_device *pdev = to_platform_device(pcie->dev);
+ struct device_node *np = pcie->dev->of_node;
+ int ret;
+
+ pcie->dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+ if (!pcie->dbi_res) {
+ dev_err(pcie->dev, "Failed to find \"dbi\" region\n");
+ return -ENODEV;
+ }
+
+ ret = of_property_read_u32(np, "nvidia,aspm-cmrt-us", &pcie->aspm_cmrt);
+ if (ret < 0) {
+ dev_info(pcie->dev, "Failed to read ASPM T_cmrt: %d\n", ret);
+ return ret;
+ }
+
+ ret = of_property_read_u32(np, "nvidia,aspm-pwr-on-t-us",
+ &pcie->aspm_pwr_on_t);
+ if (ret < 0)
+ dev_info(pcie->dev, "Failed to read ASPM Power On time: %d\n",
+ ret);
+
+ ret = of_property_read_u32(np, "nvidia,aspm-l0s-entrance-latency-us",
+ &pcie->aspm_l0s_enter_lat);
+ if (ret < 0)
+ dev_info(pcie->dev,
+ "Failed to read ASPM L0s Entrance latency: %d\n", ret);
+
+ ret = of_property_read_u32(np, "num-lanes", &pcie->num_lanes);
+ if (ret < 0) {
+ dev_err(pcie->dev, "Failed to read num-lanes: %d\n", ret);
+ return ret;
+ }
+
+ ret = of_property_read_u32_index(np, "nvidia,bpmp", 1, &pcie->cid);
+ if (ret) {
+ dev_err(pcie->dev, "Failed to read Controller-ID: %d\n", ret);
+ return ret;
+ }
+
+ ret = of_property_count_strings(np, "phy-names");
+ if (ret < 0) {
+ dev_err(pcie->dev, "Failed to find PHY entries: %d\n",
+ ret);
+ return ret;
+ }
+ pcie->phy_count = ret;
+
+ if (of_property_read_bool(np, "nvidia,update-fc-fixup"))
+ pcie->update_fc_fixup = true;
+
+ /* RP using an external REFCLK is supported only in Tegra234 */
+ if (pcie->of_data->version == TEGRA194_DWC_IP_VER) {
+ if (pcie->of_data->mode == DW_PCIE_EP_TYPE)
+ pcie->enable_ext_refclk = true;
+ } else {
+ pcie->enable_ext_refclk =
+ of_property_read_bool(pcie->dev->of_node,
+ "nvidia,enable-ext-refclk");
+ }
+
+ pcie->supports_clkreq =
+ of_property_read_bool(pcie->dev->of_node, "supports-clkreq");
+
+ pcie->enable_cdm_check =
+ of_property_read_bool(np, "snps,enable-cdm-check");
+
+ if (pcie->of_data->version == TEGRA234_DWC_IP_VER)
+ pcie->enable_srns =
+ of_property_read_bool(np, "nvidia,enable-srns");
+
+ if (pcie->of_data->mode == DW_PCIE_RC_TYPE)
+ return 0;
+
+ /* Endpoint mode specific DT entries */
+ pcie->pex_rst_gpiod = devm_gpiod_get(pcie->dev, "reset", GPIOD_IN);
+ if (IS_ERR(pcie->pex_rst_gpiod)) {
+ int err = PTR_ERR(pcie->pex_rst_gpiod);
+ const char *level = KERN_ERR;
+
+ if (err == -EPROBE_DEFER)
+ level = KERN_DEBUG;
+
+ dev_printk(level, pcie->dev,
+ dev_fmt("Failed to get PERST GPIO: %d\n"),
+ err);
+ return err;
+ }
+
+ pcie->pex_refclk_sel_gpiod = devm_gpiod_get(pcie->dev,
+ "nvidia,refclk-select",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(pcie->pex_refclk_sel_gpiod)) {
+ int err = PTR_ERR(pcie->pex_refclk_sel_gpiod);
+ const char *level = KERN_ERR;
+
+ if (err == -EPROBE_DEFER)
+ level = KERN_DEBUG;
+
+ dev_printk(level, pcie->dev,
+ dev_fmt("Failed to get REFCLK select GPIOs: %d\n"),
+ err);
+ pcie->pex_refclk_sel_gpiod = NULL;
+ }
+
+ return 0;
+}
+
+static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie,
+ bool enable)
+{
+ struct mrq_uphy_response resp;
+ struct tegra_bpmp_message msg;
+ struct mrq_uphy_request req;
+ int err;
+
+ /*
+ * Controller-5 doesn't need to have its state set by BPMP-FW in
+ * Tegra194
+ */
+ if (pcie->of_data->version == TEGRA194_DWC_IP_VER && pcie->cid == 5)
+ return 0;
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ req.cmd = CMD_UPHY_PCIE_CONTROLLER_STATE;
+ req.controller_state.pcie_controller = pcie->cid;
+ req.controller_state.enable = enable;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.mrq = MRQ_UPHY;
+ msg.tx.data = &req;
+ msg.tx.size = sizeof(req);
+ msg.rx.data = &resp;
+ msg.rx.size = sizeof(resp);
+
+ err = tegra_bpmp_transfer(pcie->bpmp, &msg);
+ if (err)
+ return err;
+ if (msg.rx.ret)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie,
+ bool enable)
+{
+ struct mrq_uphy_response resp;
+ struct tegra_bpmp_message msg;
+ struct mrq_uphy_request req;
+ int err;
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ if (enable) {
+ req.cmd = CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT;
+ req.ep_ctrlr_pll_init.ep_controller = pcie->cid;
+ } else {
+ req.cmd = CMD_UPHY_PCIE_EP_CONTROLLER_PLL_OFF;
+ req.ep_ctrlr_pll_off.ep_controller = pcie->cid;
+ }
+
+ memset(&msg, 0, sizeof(msg));
+ msg.mrq = MRQ_UPHY;
+ msg.tx.data = &req;
+ msg.tx.size = sizeof(req);
+ msg.rx.data = &resp;
+ msg.rx.size = sizeof(resp);
+
+ err = tegra_bpmp_transfer(pcie->bpmp, &msg);
+ if (err)
+ return err;
+ if (msg.rx.ret)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie)
+{
+ struct dw_pcie_rp *pp = &pcie->pci.pp;
+ struct pci_bus *child, *root_port_bus = NULL;
+ struct pci_dev *pdev;
+
+ /*
+ * link doesn't go into L2 state with some of the endpoints with Tegra
+ * if they are not in D0 state. So, need to make sure that immediate
+ * downstream devices are in D0 state before sending PME_TurnOff to put
+ * link into L2 state.
+ * This is as per PCI Express Base r4.0 v1.0 September 27-2017,
+ * 5.2 Link State Power Management (Page #428).
+ */
+
+ list_for_each_entry(child, &pp->bridge->bus->children, node) {
+ if (child->parent == pp->bridge->bus) {
+ root_port_bus = child;
+ break;
+ }
+ }
+
+ if (!root_port_bus) {
+ dev_err(pcie->dev, "Failed to find downstream bus of Root Port\n");
+ return;
+ }
+
+ /* Bring downstream devices to D0 if they are not already in */
+ list_for_each_entry(pdev, &root_port_bus->devices, bus_list) {
+ if (PCI_SLOT(pdev->devfn) == 0) {
+ if (pci_set_power_state(pdev, PCI_D0))
+ dev_err(pcie->dev,
+ "Failed to transition %s to D0 state\n",
+ dev_name(&pdev->dev));
+ }
+ }
+}
+
+static int tegra_pcie_get_slot_regulators(struct tegra_pcie_dw *pcie)
+{
+ pcie->slot_ctl_3v3 = devm_regulator_get_optional(pcie->dev, "vpcie3v3");
+ if (IS_ERR(pcie->slot_ctl_3v3)) {
+ if (PTR_ERR(pcie->slot_ctl_3v3) != -ENODEV)
+ return PTR_ERR(pcie->slot_ctl_3v3);
+
+ pcie->slot_ctl_3v3 = NULL;
+ }
+
+ pcie->slot_ctl_12v = devm_regulator_get_optional(pcie->dev, "vpcie12v");
+ if (IS_ERR(pcie->slot_ctl_12v)) {
+ if (PTR_ERR(pcie->slot_ctl_12v) != -ENODEV)
+ return PTR_ERR(pcie->slot_ctl_12v);
+
+ pcie->slot_ctl_12v = NULL;
+ }
+
+ return 0;
+}
+
+static int tegra_pcie_enable_slot_regulators(struct tegra_pcie_dw *pcie)
+{
+ int ret;
+
+ if (pcie->slot_ctl_3v3) {
+ ret = regulator_enable(pcie->slot_ctl_3v3);
+ if (ret < 0) {
+ dev_err(pcie->dev,
+ "Failed to enable 3.3V slot supply: %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (pcie->slot_ctl_12v) {
+ ret = regulator_enable(pcie->slot_ctl_12v);
+ if (ret < 0) {
+ dev_err(pcie->dev,
+ "Failed to enable 12V slot supply: %d\n", ret);
+ goto fail_12v_enable;
+ }
+ }
+
+ /*
+ * According to PCI Express Card Electromechanical Specification
+ * Revision 1.1, Table-2.4, T_PVPERL (Power stable to PERST# inactive)
+ * should be a minimum of 100ms.
+ */
+ if (pcie->slot_ctl_3v3 || pcie->slot_ctl_12v)
+ msleep(100);
+
+ return 0;
+
+fail_12v_enable:
+ if (pcie->slot_ctl_3v3)
+ regulator_disable(pcie->slot_ctl_3v3);
+ return ret;
+}
+
+static void tegra_pcie_disable_slot_regulators(struct tegra_pcie_dw *pcie)
+{
+ if (pcie->slot_ctl_12v)
+ regulator_disable(pcie->slot_ctl_12v);
+ if (pcie->slot_ctl_3v3)
+ regulator_disable(pcie->slot_ctl_3v3);
+}
+
+static int tegra_pcie_config_controller(struct tegra_pcie_dw *pcie,
+ bool en_hw_hot_rst)
+{
+ int ret;
+ u32 val;
+
+ ret = tegra_pcie_bpmp_set_ctrl_state(pcie, true);
+ if (ret) {
+ dev_err(pcie->dev,
+ "Failed to enable controller %u: %d\n", pcie->cid, ret);
+ return ret;
+ }
+
+ if (pcie->enable_ext_refclk) {
+ ret = tegra_pcie_bpmp_set_pll_state(pcie, true);
+ if (ret) {
+ dev_err(pcie->dev, "Failed to init UPHY: %d\n", ret);
+ goto fail_pll_init;
+ }
+ }
+
+ ret = tegra_pcie_enable_slot_regulators(pcie);
+ if (ret < 0)
+ goto fail_slot_reg_en;
+
+ ret = regulator_enable(pcie->pex_ctl_supply);
+ if (ret < 0) {
+ dev_err(pcie->dev, "Failed to enable regulator: %d\n", ret);
+ goto fail_reg_en;
+ }
+
+ ret = clk_prepare_enable(pcie->core_clk);
+ if (ret) {
+ dev_err(pcie->dev, "Failed to enable core clock: %d\n", ret);
+ goto fail_core_clk;
+ }
+
+ ret = reset_control_deassert(pcie->core_apb_rst);
+ if (ret) {
+ dev_err(pcie->dev, "Failed to deassert core APB reset: %d\n",
+ ret);
+ goto fail_core_apb_rst;
+ }
+
+ if (en_hw_hot_rst || pcie->of_data->has_sbr_reset_fix) {
+ /* Enable HW_HOT_RST mode */
+ val = appl_readl(pcie, APPL_CTRL);
+ val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
+ APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
+ val |= (APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST_LTSSM_EN <<
+ APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
+ val |= APPL_CTRL_HW_HOT_RST_EN;
+ appl_writel(pcie, val, APPL_CTRL);
+ }
+
+ ret = tegra_pcie_enable_phy(pcie);
+ if (ret) {
+ dev_err(pcie->dev, "Failed to enable PHY: %d\n", ret);
+ goto fail_phy;
+ }
+
+ /* Update CFG base address */
+ appl_writel(pcie, pcie->dbi_res->start & APPL_CFG_BASE_ADDR_MASK,
+ APPL_CFG_BASE_ADDR);
+
+ /* Configure this core for RP mode operation */
+ appl_writel(pcie, APPL_DM_TYPE_RP, APPL_DM_TYPE);
+
+ appl_writel(pcie, 0x0, APPL_CFG_SLCG_OVERRIDE);
+
+ val = appl_readl(pcie, APPL_CTRL);
+ appl_writel(pcie, val | APPL_CTRL_SYS_PRE_DET_STATE, APPL_CTRL);
+
+ val = appl_readl(pcie, APPL_CFG_MISC);
+ val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT);
+ appl_writel(pcie, val, APPL_CFG_MISC);
+
+ if (pcie->enable_srns || pcie->enable_ext_refclk) {
+ /*
+ * When Tegra PCIe RP is using external clock, it cannot supply
+ * same clock to its downstream hierarchy. Hence, gate PCIe RP
+ * REFCLK out pads when RP & EP are using separate clocks or RP
+ * is using an external REFCLK.
+ */
+ val = appl_readl(pcie, APPL_PINMUX);
+ val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN;
+ val &= ~APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE;
+ appl_writel(pcie, val, APPL_PINMUX);
+ }
+
+ if (!pcie->supports_clkreq) {
+ val = appl_readl(pcie, APPL_PINMUX);
+ val |= APPL_PINMUX_CLKREQ_OVERRIDE_EN;
+ val &= ~APPL_PINMUX_CLKREQ_OVERRIDE;
+ appl_writel(pcie, val, APPL_PINMUX);
+ }
+
+ /* Update iATU_DMA base address */
+ appl_writel(pcie,
+ pcie->atu_dma_res->start & APPL_CFG_IATU_DMA_BASE_ADDR_MASK,
+ APPL_CFG_IATU_DMA_BASE_ADDR);
+
+ reset_control_deassert(pcie->core_rst);
+
+ return ret;
+
+fail_phy:
+ reset_control_assert(pcie->core_apb_rst);
+fail_core_apb_rst:
+ clk_disable_unprepare(pcie->core_clk);
+fail_core_clk:
+ regulator_disable(pcie->pex_ctl_supply);
+fail_reg_en:
+ tegra_pcie_disable_slot_regulators(pcie);
+fail_slot_reg_en:
+ if (pcie->enable_ext_refclk)
+ tegra_pcie_bpmp_set_pll_state(pcie, false);
+fail_pll_init:
+ tegra_pcie_bpmp_set_ctrl_state(pcie, false);
+
+ return ret;
+}
+
+static void tegra_pcie_unconfig_controller(struct tegra_pcie_dw *pcie)
+{
+ int ret;
+
+ ret = reset_control_assert(pcie->core_rst);
+ if (ret)
+ dev_err(pcie->dev, "Failed to assert \"core\" reset: %d\n", ret);
+
+ tegra_pcie_disable_phy(pcie);
+
+ ret = reset_control_assert(pcie->core_apb_rst);
+ if (ret)
+ dev_err(pcie->dev, "Failed to assert APB reset: %d\n", ret);
+
+ clk_disable_unprepare(pcie->core_clk);
+
+ ret = regulator_disable(pcie->pex_ctl_supply);
+ if (ret)
+ dev_err(pcie->dev, "Failed to disable regulator: %d\n", ret);
+
+ tegra_pcie_disable_slot_regulators(pcie);
+
+ if (pcie->enable_ext_refclk) {
+ ret = tegra_pcie_bpmp_set_pll_state(pcie, false);
+ if (ret)
+ dev_err(pcie->dev, "Failed to deinit UPHY: %d\n", ret);
+ }
+
+ ret = tegra_pcie_bpmp_set_ctrl_state(pcie, false);
+ if (ret)
+ dev_err(pcie->dev, "Failed to disable controller %d: %d\n",
+ pcie->cid, ret);
+}
+
+static int tegra_pcie_init_controller(struct tegra_pcie_dw *pcie)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ int ret;
+
+ ret = tegra_pcie_config_controller(pcie, false);
+ if (ret < 0)
+ return ret;
+
+ pp->ops = &tegra_pcie_dw_host_ops;
+
+ ret = dw_pcie_host_init(pp);
+ if (ret < 0) {
+ dev_err(pcie->dev, "Failed to add PCIe port: %d\n", ret);
+ goto fail_host_init;
+ }
+
+ return 0;
+
+fail_host_init:
+ tegra_pcie_unconfig_controller(pcie);
+ return ret;
+}
+
+static int tegra_pcie_try_link_l2(struct tegra_pcie_dw *pcie)
+{
+ u32 val;
+
+ if (!tegra_pcie_dw_link_up(&pcie->pci))
+ return 0;
+
+ val = appl_readl(pcie, APPL_RADM_STATUS);
+ val |= APPL_PM_XMT_TURNOFF_STATE;
+ appl_writel(pcie, val, APPL_RADM_STATUS);
+
+ return readl_poll_timeout_atomic(pcie->appl_base + APPL_DEBUG, val,
+ val & APPL_DEBUG_PM_LINKST_IN_L2_LAT,
+ 1, PME_ACK_TIMEOUT);
+}
+
+static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw *pcie)
+{
+ u32 data;
+ int err;
+
+ if (!tegra_pcie_dw_link_up(&pcie->pci)) {
+ dev_dbg(pcie->dev, "PCIe link is not up...!\n");
+ return;
+ }
+
+ /*
+ * PCIe controller exits from L2 only if reset is applied, so
+ * controller doesn't handle interrupts. But in cases where
+ * L2 entry fails, PERST# is asserted which can trigger surprise
+ * link down AER. However this function call happens in
+ * suspend_noirq(), so AER interrupt will not be processed.
+ * Disable all interrupts to avoid such a scenario.
+ */
+ appl_writel(pcie, 0x0, APPL_INTR_EN_L0_0);
+
+ if (tegra_pcie_try_link_l2(pcie)) {
+ dev_info(pcie->dev, "Link didn't transition to L2 state\n");
+ /*
+ * TX lane clock freq will reset to Gen1 only if link is in L2
+ * or detect state.
+ * So apply pex_rst to end point to force RP to go into detect
+ * state
+ */
+ data = appl_readl(pcie, APPL_PINMUX);
+ data &= ~APPL_PINMUX_PEX_RST;
+ appl_writel(pcie, data, APPL_PINMUX);
+
+ /*
+ * Some cards do not go to detect state even after de-asserting
+ * PERST#. So, de-assert LTSSM to bring link to detect state.
+ */
+ data = readl(pcie->appl_base + APPL_CTRL);
+ data &= ~APPL_CTRL_LTSSM_EN;
+ writel(data, pcie->appl_base + APPL_CTRL);
+
+ err = readl_poll_timeout_atomic(pcie->appl_base + APPL_DEBUG,
+ data,
+ ((data &
+ APPL_DEBUG_LTSSM_STATE_MASK) >>
+ APPL_DEBUG_LTSSM_STATE_SHIFT) ==
+ LTSSM_STATE_PRE_DETECT,
+ 1, LTSSM_TIMEOUT);
+ if (err)
+ dev_info(pcie->dev, "Link didn't go to detect state\n");
+ }
+ /*
+ * DBI registers may not be accessible after this as PLL-E would be
+ * down depending on how CLKREQ is pulled by end point
+ */
+ data = appl_readl(pcie, APPL_PINMUX);
+ data |= (APPL_PINMUX_CLKREQ_OVERRIDE_EN | APPL_PINMUX_CLKREQ_OVERRIDE);
+ /* Cut REFCLK to slot */
+ data |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN;
+ data &= ~APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE;
+ appl_writel(pcie, data, APPL_PINMUX);
+}
+
+static void tegra_pcie_deinit_controller(struct tegra_pcie_dw *pcie)
+{
+ tegra_pcie_downstream_dev_to_D0(pcie);
+ dw_pcie_host_deinit(&pcie->pci.pp);
+ tegra_pcie_dw_pme_turnoff(pcie);
+ tegra_pcie_unconfig_controller(pcie);
+}
+
+static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie)
+{
+ struct device *dev = pcie->dev;
+ int ret;
+
+ pm_runtime_enable(dev);
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n",
+ ret);
+ goto fail_pm_get_sync;
+ }
+
+ ret = pinctrl_pm_select_default_state(dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to configure sideband pins: %d\n", ret);
+ goto fail_pm_get_sync;
+ }
+
+ ret = tegra_pcie_init_controller(pcie);
+ if (ret < 0) {
+ dev_err(dev, "Failed to initialize controller: %d\n", ret);
+ goto fail_pm_get_sync;
+ }
+
+ pcie->link_state = tegra_pcie_dw_link_up(&pcie->pci);
+ if (!pcie->link_state) {
+ ret = -ENOMEDIUM;
+ goto fail_host_init;
+ }
+
+ init_debugfs(pcie);
+
+ return ret;
+
+fail_host_init:
+ tegra_pcie_deinit_controller(pcie);
+fail_pm_get_sync:
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+ return ret;
+}
+
+static void pex_ep_event_pex_rst_assert(struct tegra_pcie_dw *pcie)
+{
+ u32 val;
+ int ret;
+
+ if (pcie->ep_state == EP_STATE_DISABLED)
+ return;
+
+ /* Disable LTSSM */
+ val = appl_readl(pcie, APPL_CTRL);
+ val &= ~APPL_CTRL_LTSSM_EN;
+ appl_writel(pcie, val, APPL_CTRL);
+
+ ret = readl_poll_timeout(pcie->appl_base + APPL_DEBUG, val,
+ ((val & APPL_DEBUG_LTSSM_STATE_MASK) >>
+ APPL_DEBUG_LTSSM_STATE_SHIFT) ==
+ LTSSM_STATE_PRE_DETECT,
+ 1, LTSSM_TIMEOUT);
+ if (ret)
+ dev_err(pcie->dev, "Failed to go Detect state: %d\n", ret);
+
+ reset_control_assert(pcie->core_rst);
+
+ tegra_pcie_disable_phy(pcie);
+
+ reset_control_assert(pcie->core_apb_rst);
+
+ clk_disable_unprepare(pcie->core_clk);
+
+ pm_runtime_put_sync(pcie->dev);
+
+ if (pcie->enable_ext_refclk) {
+ ret = tegra_pcie_bpmp_set_pll_state(pcie, false);
+ if (ret)
+ dev_err(pcie->dev, "Failed to turn off UPHY: %d\n",
+ ret);
+ }
+
+ ret = tegra_pcie_bpmp_set_ctrl_state(pcie, false);
+ if (ret)
+ dev_err(pcie->dev, "Failed to disable controller: %d\n", ret);
+
+ pcie->ep_state = EP_STATE_DISABLED;
+ dev_dbg(pcie->dev, "Uninitialization of endpoint is completed\n");
+}
+
+static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_ep *ep = &pci->ep;
+ struct device *dev = pcie->dev;
+ u32 val;
+ int ret;
+ u16 val_16;
+
+ if (pcie->ep_state == EP_STATE_ENABLED)
+ return;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n",
+ ret);
+ return;
+ }
+
+ ret = tegra_pcie_bpmp_set_ctrl_state(pcie, true);
+ if (ret) {
+ dev_err(pcie->dev, "Failed to enable controller %u: %d\n",
+ pcie->cid, ret);
+ goto fail_set_ctrl_state;
+ }
+
+ if (pcie->enable_ext_refclk) {
+ ret = tegra_pcie_bpmp_set_pll_state(pcie, true);
+ if (ret) {
+ dev_err(dev, "Failed to init UPHY for PCIe EP: %d\n",
+ ret);
+ goto fail_pll_init;
+ }
+ }
+
+ ret = clk_prepare_enable(pcie->core_clk);
+ if (ret) {
+ dev_err(dev, "Failed to enable core clock: %d\n", ret);
+ goto fail_core_clk_enable;
+ }
+
+ ret = reset_control_deassert(pcie->core_apb_rst);
+ if (ret) {
+ dev_err(dev, "Failed to deassert core APB reset: %d\n", ret);
+ goto fail_core_apb_rst;
+ }
+
+ ret = tegra_pcie_enable_phy(pcie);
+ if (ret) {
+ dev_err(dev, "Failed to enable PHY: %d\n", ret);
+ goto fail_phy;
+ }
+
+ /* Perform cleanup that requires refclk */
+ pci_epc_deinit_notify(pcie->pci.ep.epc);
+ dw_pcie_ep_cleanup(&pcie->pci.ep);
+
+ /* Clear any stale interrupt statuses */
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15);
+ appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17);
+
+ /* configure this core for EP mode operation */
+ val = appl_readl(pcie, APPL_DM_TYPE);
+ val &= ~APPL_DM_TYPE_MASK;
+ val |= APPL_DM_TYPE_EP;
+ appl_writel(pcie, val, APPL_DM_TYPE);
+
+ appl_writel(pcie, 0x0, APPL_CFG_SLCG_OVERRIDE);
+
+ val = appl_readl(pcie, APPL_CTRL);
+ val |= APPL_CTRL_SYS_PRE_DET_STATE;
+ val |= APPL_CTRL_HW_HOT_RST_EN;
+ appl_writel(pcie, val, APPL_CTRL);
+
+ val = appl_readl(pcie, APPL_CFG_MISC);
+ val |= APPL_CFG_MISC_SLV_EP_MODE;
+ val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT);
+ appl_writel(pcie, val, APPL_CFG_MISC);
+
+ val = appl_readl(pcie, APPL_PINMUX);
+ val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN;
+ val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE;
+ appl_writel(pcie, val, APPL_PINMUX);
+
+ appl_writel(pcie, pcie->dbi_res->start & APPL_CFG_BASE_ADDR_MASK,
+ APPL_CFG_BASE_ADDR);
+
+ appl_writel(pcie, pcie->atu_dma_res->start &
+ APPL_CFG_IATU_DMA_BASE_ADDR_MASK,
+ APPL_CFG_IATU_DMA_BASE_ADDR);
+
+ val = appl_readl(pcie, APPL_INTR_EN_L0_0);
+ val |= APPL_INTR_EN_L0_0_SYS_INTR_EN;
+ val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN;
+ val |= APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN;
+ appl_writel(pcie, val, APPL_INTR_EN_L0_0);
+
+ val = appl_readl(pcie, APPL_INTR_EN_L1_0_0);
+ val |= APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN;
+ val |= APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN;
+ appl_writel(pcie, val, APPL_INTR_EN_L1_0_0);
+
+ reset_control_deassert(pcie->core_rst);
+
+ if (pcie->update_fc_fixup) {
+ val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF);
+ val |= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT;
+ dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val);
+ }
+
+ config_gen3_gen4_eq_presets(pcie);
+
+ init_host_aspm(pcie);
+
+ if (!pcie->of_data->has_l1ss_exit_fix) {
+ val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
+ val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
+ dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
+ }
+
+ pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
+ PCI_CAP_ID_EXP);
+
+ /* Clear Slot Clock Configuration bit if SRNS configuration */
+ if (pcie->enable_srns) {
+ val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
+ PCI_EXP_LNKSTA);
+ val_16 &= ~PCI_EXP_LNKSTA_SLC;
+ dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA,
+ val_16);
+ }
+
+ clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ);
+
+ val = (ep->msi_mem_phys & MSIX_ADDR_MATCH_LOW_OFF_MASK);
+ val |= MSIX_ADDR_MATCH_LOW_OFF_EN;
+ dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_LOW_OFF, val);
+ val = (upper_32_bits(ep->msi_mem_phys) & MSIX_ADDR_MATCH_HIGH_OFF_MASK);
+ dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_HIGH_OFF, val);
+
+ ret = dw_pcie_ep_init_registers(ep);
+ if (ret) {
+ dev_err(dev, "Failed to complete initialization: %d\n", ret);
+ goto fail_init_complete;
+ }
+
+ pci_epc_init_notify(ep->epc);
+
+ /* Program the private control to allow sending LTR upstream */
+ if (pcie->of_data->has_ltr_req_fix) {
+ val = appl_readl(pcie, APPL_LTR_MSG_2);
+ val |= APPL_LTR_MSG_2_LTR_MSG_REQ_STATE;
+ appl_writel(pcie, val, APPL_LTR_MSG_2);
+ }
+
+ /* Enable LTSSM */
+ val = appl_readl(pcie, APPL_CTRL);
+ val |= APPL_CTRL_LTSSM_EN;
+ appl_writel(pcie, val, APPL_CTRL);
+
+ pcie->ep_state = EP_STATE_ENABLED;
+ dev_dbg(dev, "Initialization of endpoint is completed\n");
+
+ return;
+
+fail_init_complete:
+ reset_control_assert(pcie->core_rst);
+ tegra_pcie_disable_phy(pcie);
+fail_phy:
+ reset_control_assert(pcie->core_apb_rst);
+fail_core_apb_rst:
+ clk_disable_unprepare(pcie->core_clk);
+fail_core_clk_enable:
+ tegra_pcie_bpmp_set_pll_state(pcie, false);
+fail_pll_init:
+ tegra_pcie_bpmp_set_ctrl_state(pcie, false);
+fail_set_ctrl_state:
+ pm_runtime_put_sync(dev);
+}
+
+static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg)
+{
+ struct tegra_pcie_dw *pcie = arg;
+
+ if (gpiod_get_value(pcie->pex_rst_gpiod))
+ pex_ep_event_pex_rst_assert(pcie);
+ else
+ pex_ep_event_pex_rst_deassert(pcie);
+
+ return IRQ_HANDLED;
+}
+
+static void tegra_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar;
+
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
+ dw_pcie_ep_reset_bar(pci, bar);
+};
+
+static int tegra_pcie_ep_raise_intx_irq(struct tegra_pcie_dw *pcie, u16 irq)
+{
+ /* Tegra194 supports only INTA */
+ if (irq > 1)
+ return -EINVAL;
+
+ appl_writel(pcie, 1, APPL_LEGACY_INTX);
+ usleep_range(1000, 2000);
+ appl_writel(pcie, 0, APPL_LEGACY_INTX);
+ return 0;
+}
+
+static int tegra_pcie_ep_raise_msi_irq(struct tegra_pcie_dw *pcie, u16 irq)
+{
+ if (unlikely(irq > 32))
+ return -EINVAL;
+
+ appl_writel(pcie, BIT(irq - 1), APPL_MSI_CTRL_1);
+
+ return 0;
+}
+
+static int tegra_pcie_ep_raise_msix_irq(struct tegra_pcie_dw *pcie, u16 irq)
+{
+ struct dw_pcie_ep *ep = &pcie->pci.ep;
+
+ writel(irq, ep->msi_mem);
+
+ return 0;
+}
+
+static int tegra_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ unsigned int type, u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+
+ switch (type) {
+ case PCI_IRQ_INTX:
+ return tegra_pcie_ep_raise_intx_irq(pcie, interrupt_num);
+
+ case PCI_IRQ_MSI:
+ return tegra_pcie_ep_raise_msi_irq(pcie, interrupt_num);
+
+ case PCI_IRQ_MSIX:
+ return tegra_pcie_ep_raise_msix_irq(pcie, interrupt_num);
+
+ default:
+ dev_err(pci->dev, "Unknown IRQ type\n");
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+static const struct pci_epc_features tegra_pcie_epc_features = {
+ .linkup_notifier = true,
+ .msi_capable = true,
+ .bar[BAR_0] = { .type = BAR_FIXED, .fixed_size = SZ_1M,
+ .only_64bit = true, },
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_2] = { .type = BAR_RESERVED, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
+ .bar[BAR_4] = { .type = BAR_RESERVED, },
+ .bar[BAR_5] = { .type = BAR_RESERVED, },
+ .align = SZ_64K,
+};
+
+static const struct pci_epc_features*
+tegra_pcie_ep_get_features(struct dw_pcie_ep *ep)
+{
+ return &tegra_pcie_epc_features;
+}
+
+static const struct dw_pcie_ep_ops pcie_ep_ops = {
+ .init = tegra_pcie_ep_init,
+ .raise_irq = tegra_pcie_ep_raise_irq,
+ .get_features = tegra_pcie_ep_get_features,
+};
+
+static int tegra_pcie_config_ep(struct tegra_pcie_dw *pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct device *dev = pcie->dev;
+ struct dw_pcie_ep *ep;
+ char *name;
+ int ret;
+
+ ep = &pci->ep;
+ ep->ops = &pcie_ep_ops;
+
+ ep->page_size = SZ_64K;
+
+ ret = gpiod_set_debounce(pcie->pex_rst_gpiod, PERST_DEBOUNCE_TIME);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set PERST GPIO debounce time: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = gpiod_to_irq(pcie->pex_rst_gpiod);
+ if (ret < 0) {
+ dev_err(dev, "Failed to get IRQ for PERST GPIO: %d\n", ret);
+ return ret;
+ }
+ pcie->pex_rst_irq = (unsigned int)ret;
+
+ name = devm_kasprintf(dev, GFP_KERNEL, "tegra_pcie_%u_pex_rst_irq",
+ pcie->cid);
+ if (!name) {
+ dev_err(dev, "Failed to create PERST IRQ string\n");
+ return -ENOMEM;
+ }
+
+ irq_set_status_flags(pcie->pex_rst_irq, IRQ_NOAUTOEN);
+
+ pcie->ep_state = EP_STATE_DISABLED;
+
+ ret = devm_request_threaded_irq(dev, pcie->pex_rst_irq, NULL,
+ tegra_pcie_ep_pex_rst_irq,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ name, (void *)pcie);
+ if (ret < 0) {
+ dev_err(dev, "Failed to request IRQ for PERST: %d\n", ret);
+ return ret;
+ }
+
+ pm_runtime_enable(dev);
+
+ ret = dw_pcie_ep_init(ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC Endpoint subsystem: %d\n",
+ ret);
+ pm_runtime_disable(dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tegra_pcie_dw_probe(struct platform_device *pdev)
+{
+ const struct tegra_pcie_dw_of_data *data;
+ struct device *dev = &pdev->dev;
+ struct resource *atu_dma_res;
+ struct tegra_pcie_dw *pcie;
+ struct dw_pcie_rp *pp;
+ struct dw_pcie *pci;
+ struct phy **phys;
+ char *name;
+ int ret;
+ u32 i;
+
+ data = of_device_get_match_data(dev);
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pci = &pcie->pci;
+ pci->dev = &pdev->dev;
+ pci->ops = &tegra_dw_pcie_ops;
+ pcie->dev = &pdev->dev;
+ pcie->of_data = (struct tegra_pcie_dw_of_data *)data;
+ pci->n_fts[0] = pcie->of_data->n_fts[0];
+ pci->n_fts[1] = pcie->of_data->n_fts[1];
+ pp = &pci->pp;
+ pp->num_vectors = MAX_MSI_IRQS;
+
+ ret = tegra_pcie_dw_parse_dt(pcie);
+ if (ret < 0) {
+ const char *level = KERN_ERR;
+
+ if (ret == -EPROBE_DEFER)
+ level = KERN_DEBUG;
+
+ dev_printk(level, dev,
+ dev_fmt("Failed to parse device tree: %d\n"),
+ ret);
+ return ret;
+ }
+
+ ret = tegra_pcie_get_slot_regulators(pcie);
+ if (ret < 0) {
+ const char *level = KERN_ERR;
+
+ if (ret == -EPROBE_DEFER)
+ level = KERN_DEBUG;
+
+ dev_printk(level, dev,
+ dev_fmt("Failed to get slot regulators: %d\n"),
+ ret);
+ return ret;
+ }
+
+ if (pcie->pex_refclk_sel_gpiod)
+ gpiod_set_value(pcie->pex_refclk_sel_gpiod, 1);
+
+ pcie->pex_ctl_supply = devm_regulator_get(dev, "vddio-pex-ctl");
+ if (IS_ERR(pcie->pex_ctl_supply)) {
+ ret = PTR_ERR(pcie->pex_ctl_supply);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get regulator: %ld\n",
+ PTR_ERR(pcie->pex_ctl_supply));
+ return ret;
+ }
+
+ pcie->core_clk = devm_clk_get(dev, "core");
+ if (IS_ERR(pcie->core_clk)) {
+ dev_err(dev, "Failed to get core clock: %ld\n",
+ PTR_ERR(pcie->core_clk));
+ return PTR_ERR(pcie->core_clk);
+ }
+
+ pcie->appl_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "appl");
+ if (!pcie->appl_res) {
+ dev_err(dev, "Failed to find \"appl\" region\n");
+ return -ENODEV;
+ }
+
+ pcie->appl_base = devm_ioremap_resource(dev, pcie->appl_res);
+ if (IS_ERR(pcie->appl_base))
+ return PTR_ERR(pcie->appl_base);
+
+ pcie->core_apb_rst = devm_reset_control_get(dev, "apb");
+ if (IS_ERR(pcie->core_apb_rst)) {
+ dev_err(dev, "Failed to get APB reset: %ld\n",
+ PTR_ERR(pcie->core_apb_rst));
+ return PTR_ERR(pcie->core_apb_rst);
+ }
+
+ phys = devm_kcalloc(dev, pcie->phy_count, sizeof(*phys), GFP_KERNEL);
+ if (!phys)
+ return -ENOMEM;
+
+ for (i = 0; i < pcie->phy_count; i++) {
+ name = kasprintf(GFP_KERNEL, "p2u-%u", i);
+ if (!name) {
+ dev_err(dev, "Failed to create P2U string\n");
+ return -ENOMEM;
+ }
+ phys[i] = devm_phy_get(dev, name);
+ kfree(name);
+ if (IS_ERR(phys[i])) {
+ ret = PTR_ERR(phys[i]);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get PHY: %d\n", ret);
+ return ret;
+ }
+ }
+
+ pcie->phys = phys;
+
+ atu_dma_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "atu_dma");
+ if (!atu_dma_res) {
+ dev_err(dev, "Failed to find \"atu_dma\" region\n");
+ return -ENODEV;
+ }
+ pcie->atu_dma_res = atu_dma_res;
+
+ pci->atu_size = resource_size(atu_dma_res);
+ pci->atu_base = devm_ioremap_resource(dev, atu_dma_res);
+ if (IS_ERR(pci->atu_base))
+ return PTR_ERR(pci->atu_base);
+
+ pcie->core_rst = devm_reset_control_get(dev, "core");
+ if (IS_ERR(pcie->core_rst)) {
+ dev_err(dev, "Failed to get core reset: %ld\n",
+ PTR_ERR(pcie->core_rst));
+ return PTR_ERR(pcie->core_rst);
+ }
+
+ pp->irq = platform_get_irq_byname(pdev, "intr");
+ if (pp->irq < 0)
+ return pp->irq;
+
+ pcie->bpmp = tegra_bpmp_get(dev);
+ if (IS_ERR(pcie->bpmp))
+ return PTR_ERR(pcie->bpmp);
+
+ platform_set_drvdata(pdev, pcie);
+
+ pcie->icc_path = devm_of_icc_get(&pdev->dev, "write");
+ ret = PTR_ERR_OR_ZERO(pcie->icc_path);
+ if (ret) {
+ tegra_bpmp_put(pcie->bpmp);
+ dev_err_probe(&pdev->dev, ret, "failed to get write interconnect\n");
+ return ret;
+ }
+
+ switch (pcie->of_data->mode) {
+ case DW_PCIE_RC_TYPE:
+ ret = devm_request_irq(dev, pp->irq, tegra_pcie_rp_irq_handler,
+ IRQF_SHARED, "tegra-pcie-intr", pcie);
+ if (ret) {
+ dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq,
+ ret);
+ goto fail;
+ }
+
+ ret = tegra_pcie_config_rp(pcie);
+ if (ret && ret != -ENOMEDIUM)
+ goto fail;
+ else
+ return 0;
+ break;
+
+ case DW_PCIE_EP_TYPE:
+ ret = devm_request_threaded_irq(dev, pp->irq,
+ tegra_pcie_ep_hard_irq,
+ tegra_pcie_ep_irq_thread,
+ IRQF_SHARED | IRQF_ONESHOT,
+ "tegra-pcie-ep-intr", pcie);
+ if (ret) {
+ dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq,
+ ret);
+ goto fail;
+ }
+
+ ret = tegra_pcie_config_ep(pcie, pdev);
+ if (ret < 0)
+ goto fail;
+ else
+ return 0;
+ break;
+
+ default:
+ dev_err(dev, "Invalid PCIe device type %d\n",
+ pcie->of_data->mode);
+ ret = -EINVAL;
+ }
+
+fail:
+ tegra_bpmp_put(pcie->bpmp);
+ return ret;
+}
+
+static void tegra_pcie_dw_remove(struct platform_device *pdev)
+{
+ struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev);
+
+ if (pcie->of_data->mode == DW_PCIE_RC_TYPE) {
+ if (!pcie->link_state)
+ return;
+
+ debugfs_remove_recursive(pcie->debugfs);
+ tegra_pcie_deinit_controller(pcie);
+ pm_runtime_put_sync(pcie->dev);
+ } else {
+ disable_irq(pcie->pex_rst_irq);
+ pex_ep_event_pex_rst_assert(pcie);
+ }
+
+ pm_runtime_disable(pcie->dev);
+ tegra_bpmp_put(pcie->bpmp);
+ if (pcie->pex_refclk_sel_gpiod)
+ gpiod_set_value(pcie->pex_refclk_sel_gpiod, 0);
+}
+
+static int tegra_pcie_dw_suspend_late(struct device *dev)
+{
+ struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
+ u32 val;
+
+ if (pcie->of_data->mode == DW_PCIE_EP_TYPE) {
+ dev_err(dev, "Failed to Suspend as Tegra PCIe is in EP mode\n");
+ return -EPERM;
+ }
+
+ if (!pcie->link_state)
+ return 0;
+
+ /* Enable HW_HOT_RST mode */
+ if (!pcie->of_data->has_sbr_reset_fix) {
+ val = appl_readl(pcie, APPL_CTRL);
+ val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
+ APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
+ val |= APPL_CTRL_HW_HOT_RST_EN;
+ appl_writel(pcie, val, APPL_CTRL);
+ }
+
+ return 0;
+}
+
+static int tegra_pcie_dw_suspend_noirq(struct device *dev)
+{
+ struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
+
+ if (!pcie->link_state)
+ return 0;
+
+ tegra_pcie_downstream_dev_to_D0(pcie);
+ tegra_pcie_dw_pme_turnoff(pcie);
+ tegra_pcie_unconfig_controller(pcie);
+
+ return 0;
+}
+
+static int tegra_pcie_dw_resume_noirq(struct device *dev)
+{
+ struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
+ int ret;
+
+ if (!pcie->link_state)
+ return 0;
+
+ ret = tegra_pcie_config_controller(pcie, true);
+ if (ret < 0)
+ return ret;
+
+ ret = tegra_pcie_dw_host_init(&pcie->pci.pp);
+ if (ret < 0) {
+ dev_err(dev, "Failed to init host: %d\n", ret);
+ goto fail_host_init;
+ }
+
+ dw_pcie_setup_rc(&pcie->pci.pp);
+
+ ret = tegra_pcie_dw_start_link(&pcie->pci);
+ if (ret < 0)
+ goto fail_host_init;
+
+ return 0;
+
+fail_host_init:
+ tegra_pcie_unconfig_controller(pcie);
+ return ret;
+}
+
+static int tegra_pcie_dw_resume_early(struct device *dev)
+{
+ struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
+ u32 val;
+
+ if (pcie->of_data->mode == DW_PCIE_EP_TYPE) {
+ dev_err(dev, "Suspend is not supported in EP mode");
+ return -ENOTSUPP;
+ }
+
+ if (!pcie->link_state)
+ return 0;
+
+ /* Disable HW_HOT_RST mode */
+ if (!pcie->of_data->has_sbr_reset_fix) {
+ val = appl_readl(pcie, APPL_CTRL);
+ val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
+ APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
+ val |= APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST <<
+ APPL_CTRL_HW_HOT_RST_MODE_SHIFT;
+ val &= ~APPL_CTRL_HW_HOT_RST_EN;
+ appl_writel(pcie, val, APPL_CTRL);
+ }
+
+ return 0;
+}
+
+static void tegra_pcie_dw_shutdown(struct platform_device *pdev)
+{
+ struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev);
+
+ if (pcie->of_data->mode == DW_PCIE_RC_TYPE) {
+ if (!pcie->link_state)
+ return;
+
+ debugfs_remove_recursive(pcie->debugfs);
+ tegra_pcie_downstream_dev_to_D0(pcie);
+
+ disable_irq(pcie->pci.pp.irq);
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ disable_irq(pcie->pci.pp.msi_irq[0]);
+
+ tegra_pcie_dw_pme_turnoff(pcie);
+ tegra_pcie_unconfig_controller(pcie);
+ pm_runtime_put_sync(pcie->dev);
+ } else {
+ disable_irq(pcie->pex_rst_irq);
+ pex_ep_event_pex_rst_assert(pcie);
+ }
+}
+
+static const struct tegra_pcie_dw_of_data tegra194_pcie_dw_rc_of_data = {
+ .version = TEGRA194_DWC_IP_VER,
+ .mode = DW_PCIE_RC_TYPE,
+ .cdm_chk_int_en_bit = BIT(19),
+ /* Gen4 - 5, 6, 8 and 9 presets enabled */
+ .gen4_preset_vec = 0x360,
+ .n_fts = { 52, 52 },
+};
+
+static const struct tegra_pcie_dw_of_data tegra194_pcie_dw_ep_of_data = {
+ .version = TEGRA194_DWC_IP_VER,
+ .mode = DW_PCIE_EP_TYPE,
+ .cdm_chk_int_en_bit = BIT(19),
+ /* Gen4 - 5, 6, 8 and 9 presets enabled */
+ .gen4_preset_vec = 0x360,
+ .n_fts = { 52, 52 },
+};
+
+static const struct tegra_pcie_dw_of_data tegra234_pcie_dw_rc_of_data = {
+ .version = TEGRA234_DWC_IP_VER,
+ .mode = DW_PCIE_RC_TYPE,
+ .has_msix_doorbell_access_fix = true,
+ .has_sbr_reset_fix = true,
+ .has_l1ss_exit_fix = true,
+ .cdm_chk_int_en_bit = BIT(18),
+ /* Gen4 - 6, 8 and 9 presets enabled */
+ .gen4_preset_vec = 0x340,
+ .n_fts = { 52, 80 },
+};
+
+static const struct tegra_pcie_dw_of_data tegra234_pcie_dw_ep_of_data = {
+ .version = TEGRA234_DWC_IP_VER,
+ .mode = DW_PCIE_EP_TYPE,
+ .has_l1ss_exit_fix = true,
+ .has_ltr_req_fix = true,
+ .cdm_chk_int_en_bit = BIT(18),
+ /* Gen4 - 6, 8 and 9 presets enabled */
+ .gen4_preset_vec = 0x340,
+ .n_fts = { 52, 80 },
+};
+
+static const struct of_device_id tegra_pcie_dw_of_match[] = {
+ {
+ .compatible = "nvidia,tegra194-pcie",
+ .data = &tegra194_pcie_dw_rc_of_data,
+ },
+ {
+ .compatible = "nvidia,tegra194-pcie-ep",
+ .data = &tegra194_pcie_dw_ep_of_data,
+ },
+ {
+ .compatible = "nvidia,tegra234-pcie",
+ .data = &tegra234_pcie_dw_rc_of_data,
+ },
+ {
+ .compatible = "nvidia,tegra234-pcie-ep",
+ .data = &tegra234_pcie_dw_ep_of_data,
+ },
+ {}
+};
+
+static const struct dev_pm_ops tegra_pcie_dw_pm_ops = {
+ .suspend_late = tegra_pcie_dw_suspend_late,
+ .suspend_noirq = tegra_pcie_dw_suspend_noirq,
+ .resume_noirq = tegra_pcie_dw_resume_noirq,
+ .resume_early = tegra_pcie_dw_resume_early,
+};
+
+static struct platform_driver tegra_pcie_dw_driver = {
+ .probe = tegra_pcie_dw_probe,
+ .remove = tegra_pcie_dw_remove,
+ .shutdown = tegra_pcie_dw_shutdown,
+ .driver = {
+ .name = "tegra194-pcie",
+ .pm = &tegra_pcie_dw_pm_ops,
+ .of_match_table = tegra_pcie_dw_of_match,
+ },
+};
+module_platform_driver(tegra_pcie_dw_driver);
+
+MODULE_DEVICE_TABLE(of, tegra_pcie_dw_of_match);
+
+MODULE_AUTHOR("Vidya Sagar <vidyas@nvidia.com>");
+MODULE_DESCRIPTION("NVIDIA PCIe host controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/dwc/pcie-uniphier-ep.c b/drivers/pci/controller/dwc/pcie-uniphier-ep.c
new file mode 100644
index 000000000000..d6e73811216e
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-uniphier-ep.c
@@ -0,0 +1,474 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCIe endpoint controller driver for UniPhier SoCs
+ * Copyright 2018 Socionext Inc.
+ * Author: Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/iopoll.h>
+#include <linux/of.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include "pcie-designware.h"
+
+/* Link Glue registers */
+#define PCL_RSTCTRL0 0x0010
+#define PCL_RSTCTRL_AXI_REG BIT(3)
+#define PCL_RSTCTRL_AXI_SLAVE BIT(2)
+#define PCL_RSTCTRL_AXI_MASTER BIT(1)
+#define PCL_RSTCTRL_PIPE3 BIT(0)
+
+#define PCL_RSTCTRL1 0x0020
+#define PCL_RSTCTRL_PERST BIT(0)
+
+#define PCL_RSTCTRL2 0x0024
+#define PCL_RSTCTRL_PHY_RESET BIT(0)
+
+#define PCL_PINCTRL0 0x002c
+#define PCL_PERST_PLDN_REGEN BIT(12)
+#define PCL_PERST_NOE_REGEN BIT(11)
+#define PCL_PERST_OUT_REGEN BIT(8)
+#define PCL_PERST_PLDN_REGVAL BIT(4)
+#define PCL_PERST_NOE_REGVAL BIT(3)
+#define PCL_PERST_OUT_REGVAL BIT(0)
+
+#define PCL_PIPEMON 0x0044
+#define PCL_PCLK_ALIVE BIT(15)
+
+#define PCL_MODE 0x8000
+#define PCL_MODE_REGEN BIT(8)
+#define PCL_MODE_REGVAL BIT(0)
+
+#define PCL_APP_CLK_CTRL 0x8004
+#define PCL_APP_CLK_REQ BIT(0)
+
+#define PCL_APP_READY_CTRL 0x8008
+#define PCL_APP_LTSSM_ENABLE BIT(0)
+
+#define PCL_APP_MSI0 0x8040
+#define PCL_APP_VEN_MSI_TC_MASK GENMASK(10, 8)
+#define PCL_APP_VEN_MSI_VECTOR_MASK GENMASK(4, 0)
+
+#define PCL_APP_MSI1 0x8044
+#define PCL_APP_MSI_REQ BIT(0)
+
+#define PCL_APP_INTX 0x8074
+#define PCL_APP_INTX_SYS_INT BIT(0)
+
+#define PCL_APP_PM0 0x8078
+#define PCL_SYS_AUX_PWR_DET BIT(8)
+
+/* assertion time of INTx in usec */
+#define PCL_INTX_WIDTH_USEC 30
+
+struct uniphier_pcie_ep_priv {
+ void __iomem *base;
+ struct dw_pcie pci;
+ struct clk *clk, *clk_gio;
+ struct reset_control *rst, *rst_gio;
+ struct phy *phy;
+ const struct uniphier_pcie_ep_soc_data *data;
+};
+
+struct uniphier_pcie_ep_soc_data {
+ bool has_gio;
+ void (*init)(struct uniphier_pcie_ep_priv *priv);
+ int (*wait)(struct uniphier_pcie_ep_priv *priv);
+ const struct pci_epc_features features;
+};
+
+#define to_uniphier_pcie(x) dev_get_drvdata((x)->dev)
+
+static void uniphier_pcie_ltssm_enable(struct uniphier_pcie_ep_priv *priv,
+ bool enable)
+{
+ u32 val;
+
+ val = readl(priv->base + PCL_APP_READY_CTRL);
+ if (enable)
+ val |= PCL_APP_LTSSM_ENABLE;
+ else
+ val &= ~PCL_APP_LTSSM_ENABLE;
+ writel(val, priv->base + PCL_APP_READY_CTRL);
+}
+
+static void uniphier_pcie_phy_reset(struct uniphier_pcie_ep_priv *priv,
+ bool assert)
+{
+ u32 val;
+
+ val = readl(priv->base + PCL_RSTCTRL2);
+ if (assert)
+ val |= PCL_RSTCTRL_PHY_RESET;
+ else
+ val &= ~PCL_RSTCTRL_PHY_RESET;
+ writel(val, priv->base + PCL_RSTCTRL2);
+}
+
+static void uniphier_pcie_pro5_init_ep(struct uniphier_pcie_ep_priv *priv)
+{
+ u32 val;
+
+ /* set EP mode */
+ val = readl(priv->base + PCL_MODE);
+ val |= PCL_MODE_REGEN | PCL_MODE_REGVAL;
+ writel(val, priv->base + PCL_MODE);
+
+ /* clock request */
+ val = readl(priv->base + PCL_APP_CLK_CTRL);
+ val &= ~PCL_APP_CLK_REQ;
+ writel(val, priv->base + PCL_APP_CLK_CTRL);
+
+ /* deassert PIPE3 and AXI reset */
+ val = readl(priv->base + PCL_RSTCTRL0);
+ val |= PCL_RSTCTRL_AXI_REG | PCL_RSTCTRL_AXI_SLAVE
+ | PCL_RSTCTRL_AXI_MASTER | PCL_RSTCTRL_PIPE3;
+ writel(val, priv->base + PCL_RSTCTRL0);
+
+ uniphier_pcie_ltssm_enable(priv, false);
+
+ msleep(100);
+}
+
+static void uniphier_pcie_nx1_init_ep(struct uniphier_pcie_ep_priv *priv)
+{
+ u32 val;
+
+ /* set EP mode */
+ val = readl(priv->base + PCL_MODE);
+ val |= PCL_MODE_REGEN | PCL_MODE_REGVAL;
+ writel(val, priv->base + PCL_MODE);
+
+ /* use auxiliary power detection */
+ val = readl(priv->base + PCL_APP_PM0);
+ val |= PCL_SYS_AUX_PWR_DET;
+ writel(val, priv->base + PCL_APP_PM0);
+
+ /* assert PERST# */
+ val = readl(priv->base + PCL_PINCTRL0);
+ val &= ~(PCL_PERST_NOE_REGVAL | PCL_PERST_OUT_REGVAL
+ | PCL_PERST_PLDN_REGVAL);
+ val |= PCL_PERST_NOE_REGEN | PCL_PERST_OUT_REGEN
+ | PCL_PERST_PLDN_REGEN;
+ writel(val, priv->base + PCL_PINCTRL0);
+
+ uniphier_pcie_ltssm_enable(priv, false);
+
+ usleep_range(100000, 200000);
+
+ /* deassert PERST# */
+ val = readl(priv->base + PCL_PINCTRL0);
+ val |= PCL_PERST_OUT_REGVAL | PCL_PERST_OUT_REGEN;
+ writel(val, priv->base + PCL_PINCTRL0);
+}
+
+static int uniphier_pcie_nx1_wait_ep(struct uniphier_pcie_ep_priv *priv)
+{
+ u32 status;
+ int ret;
+
+ /* wait PIPE clock */
+ ret = readl_poll_timeout(priv->base + PCL_PIPEMON, status,
+ status & PCL_PCLK_ALIVE, 100000, 1000000);
+ if (ret) {
+ dev_err(priv->pci.dev,
+ "Failed to initialize controller in EP mode\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int uniphier_pcie_start_link(struct dw_pcie *pci)
+{
+ struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci);
+
+ uniphier_pcie_ltssm_enable(priv, true);
+
+ return 0;
+}
+
+static void uniphier_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci);
+
+ uniphier_pcie_ltssm_enable(priv, false);
+}
+
+static void uniphier_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar;
+
+ for (bar = BAR_0; bar <= BAR_5; bar++)
+ dw_pcie_ep_reset_bar(pci, bar);
+}
+
+static int uniphier_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci);
+ u32 val;
+
+ /*
+ * This makes pulse signal to send INTx to the RC, so this should
+ * be cleared as soon as possible. This sequence is covered with
+ * mutex in pci_epc_raise_irq().
+ */
+ /* assert INTx */
+ val = readl(priv->base + PCL_APP_INTX);
+ val |= PCL_APP_INTX_SYS_INT;
+ writel(val, priv->base + PCL_APP_INTX);
+
+ udelay(PCL_INTX_WIDTH_USEC);
+
+ /* deassert INTx */
+ val &= ~PCL_APP_INTX_SYS_INT;
+ writel(val, priv->base + PCL_APP_INTX);
+
+ return 0;
+}
+
+static int uniphier_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep,
+ u8 func_no, u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci);
+ u32 val;
+
+ val = FIELD_PREP(PCL_APP_VEN_MSI_TC_MASK, func_no)
+ | FIELD_PREP(PCL_APP_VEN_MSI_VECTOR_MASK, interrupt_num - 1);
+ writel(val, priv->base + PCL_APP_MSI0);
+
+ val = readl(priv->base + PCL_APP_MSI1);
+ val |= PCL_APP_MSI_REQ;
+ writel(val, priv->base + PCL_APP_MSI1);
+
+ return 0;
+}
+
+static int uniphier_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ unsigned int type, u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ switch (type) {
+ case PCI_IRQ_INTX:
+ return uniphier_pcie_ep_raise_intx_irq(ep);
+ case PCI_IRQ_MSI:
+ return uniphier_pcie_ep_raise_msi_irq(ep, func_no,
+ interrupt_num);
+ default:
+ dev_err(pci->dev, "UNKNOWN IRQ type (%d)\n", type);
+ }
+
+ return 0;
+}
+
+static const struct pci_epc_features*
+uniphier_pcie_get_features(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct uniphier_pcie_ep_priv *priv = to_uniphier_pcie(pci);
+
+ return &priv->data->features;
+}
+
+static const struct dw_pcie_ep_ops uniphier_pcie_ep_ops = {
+ .init = uniphier_pcie_ep_init,
+ .raise_irq = uniphier_pcie_ep_raise_irq,
+ .get_features = uniphier_pcie_get_features,
+};
+
+static int uniphier_pcie_ep_enable(struct uniphier_pcie_ep_priv *priv)
+{
+ int ret;
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(priv->clk_gio);
+ if (ret)
+ goto out_clk_disable;
+
+ ret = reset_control_deassert(priv->rst);
+ if (ret)
+ goto out_clk_gio_disable;
+
+ ret = reset_control_deassert(priv->rst_gio);
+ if (ret)
+ goto out_rst_assert;
+
+ if (priv->data->init)
+ priv->data->init(priv);
+
+ uniphier_pcie_phy_reset(priv, true);
+
+ ret = phy_init(priv->phy);
+ if (ret)
+ goto out_rst_gio_assert;
+
+ uniphier_pcie_phy_reset(priv, false);
+
+ if (priv->data->wait) {
+ ret = priv->data->wait(priv);
+ if (ret)
+ goto out_phy_exit;
+ }
+
+ return 0;
+
+out_phy_exit:
+ phy_exit(priv->phy);
+out_rst_gio_assert:
+ reset_control_assert(priv->rst_gio);
+out_rst_assert:
+ reset_control_assert(priv->rst);
+out_clk_gio_disable:
+ clk_disable_unprepare(priv->clk_gio);
+out_clk_disable:
+ clk_disable_unprepare(priv->clk);
+
+ return ret;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .start_link = uniphier_pcie_start_link,
+ .stop_link = uniphier_pcie_stop_link,
+};
+
+static int uniphier_pcie_ep_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct uniphier_pcie_ep_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->data = of_device_get_match_data(dev);
+ if (WARN_ON(!priv->data))
+ return -EINVAL;
+
+ priv->pci.dev = dev;
+ priv->pci.ops = &dw_pcie_ops;
+
+ priv->base = devm_platform_ioremap_resource_byname(pdev, "link");
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ if (priv->data->has_gio) {
+ priv->clk_gio = devm_clk_get(dev, "gio");
+ if (IS_ERR(priv->clk_gio))
+ return PTR_ERR(priv->clk_gio);
+
+ priv->rst_gio = devm_reset_control_get_shared(dev, "gio");
+ if (IS_ERR(priv->rst_gio))
+ return PTR_ERR(priv->rst_gio);
+ }
+
+ priv->clk = devm_clk_get(dev, "link");
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
+ priv->rst = devm_reset_control_get_shared(dev, "link");
+ if (IS_ERR(priv->rst))
+ return PTR_ERR(priv->rst);
+
+ priv->phy = devm_phy_optional_get(dev, "pcie-phy");
+ if (IS_ERR(priv->phy)) {
+ ret = PTR_ERR(priv->phy);
+ dev_err(dev, "Failed to get phy (%d)\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ ret = uniphier_pcie_ep_enable(priv);
+ if (ret)
+ return ret;
+
+ priv->pci.ep.ops = &uniphier_pcie_ep_ops;
+ ret = dw_pcie_ep_init(&priv->pci.ep);
+ if (ret)
+ return ret;
+
+ ret = dw_pcie_ep_init_registers(&priv->pci.ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize DWC endpoint registers\n");
+ dw_pcie_ep_deinit(&priv->pci.ep);
+ return ret;
+ }
+
+ pci_epc_init_notify(priv->pci.ep.epc);
+
+ return 0;
+}
+
+static const struct uniphier_pcie_ep_soc_data uniphier_pro5_data = {
+ .has_gio = true,
+ .init = uniphier_pcie_pro5_init_ep,
+ .wait = NULL,
+ .features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = false,
+ .align = 1 << 16,
+ .bar[BAR_0] = { .only_64bit = true, },
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_2] = { .only_64bit = true, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
+ .bar[BAR_4] = { .type = BAR_RESERVED, },
+ .bar[BAR_5] = { .type = BAR_RESERVED, },
+ },
+};
+
+static const struct uniphier_pcie_ep_soc_data uniphier_nx1_data = {
+ .has_gio = false,
+ .init = uniphier_pcie_nx1_init_ep,
+ .wait = uniphier_pcie_nx1_wait_ep,
+ .features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = false,
+ .align = 1 << 12,
+ .bar[BAR_0] = { .only_64bit = true, },
+ .bar[BAR_1] = { .type = BAR_RESERVED, },
+ .bar[BAR_2] = { .only_64bit = true, },
+ .bar[BAR_3] = { .type = BAR_RESERVED, },
+ .bar[BAR_4] = { .only_64bit = true, },
+ .bar[BAR_5] = { .type = BAR_RESERVED, },
+ },
+};
+
+static const struct of_device_id uniphier_pcie_ep_match[] = {
+ {
+ .compatible = "socionext,uniphier-pro5-pcie-ep",
+ .data = &uniphier_pro5_data,
+ },
+ {
+ .compatible = "socionext,uniphier-nx1-pcie-ep",
+ .data = &uniphier_nx1_data,
+ },
+ { /* sentinel */ },
+};
+
+static struct platform_driver uniphier_pcie_ep_driver = {
+ .probe = uniphier_pcie_ep_probe,
+ .driver = {
+ .name = "uniphier-pcie-ep",
+ .of_match_table = uniphier_pcie_ep_match,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver(uniphier_pcie_ep_driver);
diff --git a/drivers/pci/controller/dwc/pcie-uniphier.c b/drivers/pci/controller/dwc/pcie-uniphier.c
index d5dc40289cce..297e7a3d9b36 100644
--- a/drivers/pci/controller/dwc/pcie-uniphier.c
+++ b/drivers/pci/controller/dwc/pcie-uniphier.c
@@ -9,11 +9,11 @@
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
-#include <linux/module.h>
#include <linux/of_irq.h>
#include <linux/pci.h>
#include <linux/phy/phy.h>
@@ -33,6 +33,10 @@
#define PCL_PIPEMON 0x0044
#define PCL_PCLK_ALIVE BIT(15)
+#define PCL_MODE 0x8000
+#define PCL_MODE_REGEN BIT(8)
+#define PCL_MODE_REGVAL BIT(0)
+
#define PCL_APP_READY_CTRL 0x8008
#define PCL_APP_LTSSM_ENABLE BIT(0)
@@ -57,67 +61,73 @@
#define PCL_RDLH_LINK_UP BIT(1)
#define PCL_XMLH_LINK_UP BIT(0)
-struct uniphier_pcie_priv {
- void __iomem *base;
+struct uniphier_pcie {
struct dw_pcie pci;
+ void __iomem *base;
struct clk *clk;
struct reset_control *rst;
struct phy *phy;
- struct irq_domain *legacy_irq_domain;
+ struct irq_domain *intx_irq_domain;
};
#define to_uniphier_pcie(x) dev_get_drvdata((x)->dev)
-static void uniphier_pcie_ltssm_enable(struct uniphier_pcie_priv *priv,
+static void uniphier_pcie_ltssm_enable(struct uniphier_pcie *pcie,
bool enable)
{
u32 val;
- val = readl(priv->base + PCL_APP_READY_CTRL);
+ val = readl(pcie->base + PCL_APP_READY_CTRL);
if (enable)
val |= PCL_APP_LTSSM_ENABLE;
else
val &= ~PCL_APP_LTSSM_ENABLE;
- writel(val, priv->base + PCL_APP_READY_CTRL);
+ writel(val, pcie->base + PCL_APP_READY_CTRL);
}
-static void uniphier_pcie_init_rc(struct uniphier_pcie_priv *priv)
+static void uniphier_pcie_init_rc(struct uniphier_pcie *pcie)
{
u32 val;
+ /* set RC MODE */
+ val = readl(pcie->base + PCL_MODE);
+ val |= PCL_MODE_REGEN;
+ val &= ~PCL_MODE_REGVAL;
+ writel(val, pcie->base + PCL_MODE);
+
/* use auxiliary power detection */
- val = readl(priv->base + PCL_APP_PM0);
+ val = readl(pcie->base + PCL_APP_PM0);
val |= PCL_SYS_AUX_PWR_DET;
- writel(val, priv->base + PCL_APP_PM0);
+ writel(val, pcie->base + PCL_APP_PM0);
/* assert PERST# */
- val = readl(priv->base + PCL_PINCTRL0);
+ val = readl(pcie->base + PCL_PINCTRL0);
val &= ~(PCL_PERST_NOE_REGVAL | PCL_PERST_OUT_REGVAL
| PCL_PERST_PLDN_REGVAL);
val |= PCL_PERST_NOE_REGEN | PCL_PERST_OUT_REGEN
| PCL_PERST_PLDN_REGEN;
- writel(val, priv->base + PCL_PINCTRL0);
+ writel(val, pcie->base + PCL_PINCTRL0);
- uniphier_pcie_ltssm_enable(priv, false);
+ uniphier_pcie_ltssm_enable(pcie, false);
usleep_range(100000, 200000);
/* deassert PERST# */
- val = readl(priv->base + PCL_PINCTRL0);
+ val = readl(pcie->base + PCL_PINCTRL0);
val |= PCL_PERST_OUT_REGVAL | PCL_PERST_OUT_REGEN;
- writel(val, priv->base + PCL_PINCTRL0);
+ writel(val, pcie->base + PCL_PINCTRL0);
}
-static int uniphier_pcie_wait_rc(struct uniphier_pcie_priv *priv)
+static int uniphier_pcie_wait_rc(struct uniphier_pcie *pcie)
{
u32 status;
int ret;
/* wait PIPE clock */
- ret = readl_poll_timeout(priv->base + PCL_PIPEMON, status,
+ ret = readl_poll_timeout(pcie->base + PCL_PIPEMON, status,
status & PCL_PCLK_ALIVE, 100000, 1000000);
if (ret) {
- dev_err(priv->pci.dev,
+ dev_err(pcie->pci.dev,
"Failed to initialize controller in RC mode\n");
return ret;
}
@@ -125,90 +135,76 @@ static int uniphier_pcie_wait_rc(struct uniphier_pcie_priv *priv)
return 0;
}
-static int uniphier_pcie_link_up(struct dw_pcie *pci)
+static bool uniphier_pcie_link_up(struct dw_pcie *pci)
{
- struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+ struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
u32 val, mask;
- val = readl(priv->base + PCL_STATUS_LINK);
+ val = readl(pcie->base + PCL_STATUS_LINK);
mask = PCL_RDLH_LINK_UP | PCL_XMLH_LINK_UP;
return (val & mask) == mask;
}
-static int uniphier_pcie_establish_link(struct dw_pcie *pci)
+static int uniphier_pcie_start_link(struct dw_pcie *pci)
{
- struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+ struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
- if (dw_pcie_link_up(pci))
- return 0;
+ uniphier_pcie_ltssm_enable(pcie, true);
- uniphier_pcie_ltssm_enable(priv, true);
-
- return dw_pcie_wait_for_link(pci);
+ return 0;
}
static void uniphier_pcie_stop_link(struct dw_pcie *pci)
{
- struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+ struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
- uniphier_pcie_ltssm_enable(priv, false);
+ uniphier_pcie_ltssm_enable(pcie, false);
}
-static void uniphier_pcie_irq_enable(struct uniphier_pcie_priv *priv)
+static void uniphier_pcie_irq_enable(struct uniphier_pcie *pcie)
{
- writel(PCL_RCV_INT_ALL_ENABLE, priv->base + PCL_RCV_INT);
- writel(PCL_RCV_INTX_ALL_ENABLE, priv->base + PCL_RCV_INTX);
+ writel(PCL_RCV_INT_ALL_ENABLE, pcie->base + PCL_RCV_INT);
+ writel(PCL_RCV_INTX_ALL_ENABLE, pcie->base + PCL_RCV_INTX);
}
-static void uniphier_pcie_irq_disable(struct uniphier_pcie_priv *priv)
-{
- writel(0, priv->base + PCL_RCV_INT);
- writel(0, priv->base + PCL_RCV_INTX);
-}
-
-static void uniphier_pcie_irq_ack(struct irq_data *d)
-{
- struct pcie_port *pp = irq_data_get_irq_chip_data(d);
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
- u32 val;
-
- val = readl(priv->base + PCL_RCV_INTX);
- val &= ~PCL_RCV_INTX_ALL_STATUS;
- val |= BIT(irqd_to_hwirq(d) + PCL_RCV_INTX_STATUS_SHIFT);
- writel(val, priv->base + PCL_RCV_INTX);
-}
static void uniphier_pcie_irq_mask(struct irq_data *d)
{
- struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+ struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
+ unsigned long flags;
u32 val;
- val = readl(priv->base + PCL_RCV_INTX);
- val &= ~PCL_RCV_INTX_ALL_MASK;
+ raw_spin_lock_irqsave(&pp->lock, flags);
+
+ val = readl(pcie->base + PCL_RCV_INTX);
val |= BIT(irqd_to_hwirq(d) + PCL_RCV_INTX_MASK_SHIFT);
- writel(val, priv->base + PCL_RCV_INTX);
+ writel(val, pcie->base + PCL_RCV_INTX);
+
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
}
static void uniphier_pcie_irq_unmask(struct irq_data *d)
{
- struct pcie_port *pp = irq_data_get_irq_chip_data(d);
+ struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+ struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
+ unsigned long flags;
u32 val;
- val = readl(priv->base + PCL_RCV_INTX);
- val &= ~PCL_RCV_INTX_ALL_MASK;
+ raw_spin_lock_irqsave(&pp->lock, flags);
+
+ val = readl(pcie->base + PCL_RCV_INTX);
val &= ~BIT(irqd_to_hwirq(d) + PCL_RCV_INTX_MASK_SHIFT);
- writel(val, priv->base + PCL_RCV_INTX);
+ writel(val, pcie->base + PCL_RCV_INTX);
+
+ raw_spin_unlock_irqrestore(&pp->lock, flags);
}
static struct irq_chip uniphier_pcie_irq_chip = {
.name = "PCI",
- .irq_ack = uniphier_pcie_irq_ack,
.irq_mask = uniphier_pcie_irq_mask,
.irq_unmask = uniphier_pcie_irq_unmask,
};
@@ -229,15 +225,15 @@ static const struct irq_domain_ops uniphier_intx_domain_ops = {
static void uniphier_pcie_irq_handler(struct irq_desc *desc)
{
- struct pcie_port *pp = irq_desc_get_handler_data(desc);
+ struct dw_pcie_rp *pp = irq_desc_get_handler_data(desc);
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+ struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned long reg;
- u32 val, bit, virq;
+ u32 val, bit;
/* INT for debug */
- val = readl(priv->base + PCL_RCV_INT);
+ val = readl(pcie->base + PCL_RCV_INT);
if (val & PCL_CFG_BW_MGT_STATUS)
dev_dbg(pci->dev, "Link Bandwidth Management Event\n");
@@ -248,28 +244,27 @@ static void uniphier_pcie_irq_handler(struct irq_desc *desc)
if (val & PCL_CFG_PME_MSI_STATUS)
dev_dbg(pci->dev, "PME Interrupt\n");
- writel(val, priv->base + PCL_RCV_INT);
+ writel(val, pcie->base + PCL_RCV_INT);
/* INTx */
chained_irq_enter(chip, desc);
- val = readl(priv->base + PCL_RCV_INTX);
+ val = readl(pcie->base + PCL_RCV_INTX);
reg = FIELD_GET(PCL_RCV_INTX_ALL_STATUS, val);
- for_each_set_bit(bit, &reg, PCI_NUM_INTX) {
- virq = irq_linear_revmap(priv->legacy_irq_domain, bit);
- generic_handle_irq(virq);
- }
+ for_each_set_bit(bit, &reg, PCI_NUM_INTX)
+ generic_handle_domain_irq(pcie->intx_irq_domain, bit);
chained_irq_exit(chip, desc);
}
-static int uniphier_pcie_config_legacy_irq(struct pcie_port *pp)
+static int uniphier_pcie_config_intx_irq(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+ struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
struct device_node *np = pci->dev->of_node;
struct device_node *np_intc;
+ int ret = 0;
np_intc = of_get_child_by_name(np, "legacy-interrupt-controller");
if (!np_intc) {
@@ -280,118 +275,81 @@ static int uniphier_pcie_config_legacy_irq(struct pcie_port *pp)
pp->irq = irq_of_parse_and_map(np_intc, 0);
if (!pp->irq) {
dev_err(pci->dev, "Failed to get an IRQ entry in legacy-interrupt-controller\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_put_node;
}
- priv->legacy_irq_domain = irq_domain_add_linear(np_intc, PCI_NUM_INTX,
+ pcie->intx_irq_domain = irq_domain_create_linear(of_fwnode_handle(np_intc), PCI_NUM_INTX,
&uniphier_intx_domain_ops, pp);
- if (!priv->legacy_irq_domain) {
+ if (!pcie->intx_irq_domain) {
dev_err(pci->dev, "Failed to get INTx domain\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto out_put_node;
}
irq_set_chained_handler_and_data(pp->irq, uniphier_pcie_irq_handler,
pp);
- return 0;
+out_put_node:
+ of_node_put(np_intc);
+ return ret;
}
-static int uniphier_pcie_host_init(struct pcie_port *pp)
+static int uniphier_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+ struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
int ret;
- ret = uniphier_pcie_config_legacy_irq(pp);
- if (ret)
- return ret;
-
- uniphier_pcie_irq_enable(priv);
-
- dw_pcie_setup_rc(pp);
- ret = uniphier_pcie_establish_link(pci);
+ ret = uniphier_pcie_config_intx_irq(pp);
if (ret)
return ret;
- if (IS_ENABLED(CONFIG_PCI_MSI))
- dw_pcie_msi_init(pp);
+ uniphier_pcie_irq_enable(pcie);
return 0;
}
static const struct dw_pcie_host_ops uniphier_pcie_host_ops = {
- .host_init = uniphier_pcie_host_init,
+ .init = uniphier_pcie_host_init,
};
-static int uniphier_add_pcie_port(struct uniphier_pcie_priv *priv,
- struct platform_device *pdev)
+static int uniphier_pcie_host_enable(struct uniphier_pcie *pcie)
{
- struct dw_pcie *pci = &priv->pci;
- struct pcie_port *pp = &pci->pp;
- struct device *dev = &pdev->dev;
int ret;
- pp->ops = &uniphier_pcie_host_ops;
-
- if (IS_ENABLED(CONFIG_PCI_MSI)) {
- pp->msi_irq = platform_get_irq_byname(pdev, "msi");
- if (pp->msi_irq < 0)
- return pp->msi_irq;
- }
-
- ret = dw_pcie_host_init(pp);
- if (ret) {
- dev_err(dev, "Failed to initialize host (%d)\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-static int uniphier_pcie_host_enable(struct uniphier_pcie_priv *priv)
-{
- int ret;
-
- ret = clk_prepare_enable(priv->clk);
+ ret = clk_prepare_enable(pcie->clk);
if (ret)
return ret;
- ret = reset_control_deassert(priv->rst);
+ ret = reset_control_deassert(pcie->rst);
if (ret)
goto out_clk_disable;
- uniphier_pcie_init_rc(priv);
+ uniphier_pcie_init_rc(pcie);
- ret = phy_init(priv->phy);
+ ret = phy_init(pcie->phy);
if (ret)
goto out_rst_assert;
- ret = uniphier_pcie_wait_rc(priv);
+ ret = uniphier_pcie_wait_rc(pcie);
if (ret)
goto out_phy_exit;
return 0;
out_phy_exit:
- phy_exit(priv->phy);
+ phy_exit(pcie->phy);
out_rst_assert:
- reset_control_assert(priv->rst);
+ reset_control_assert(pcie->rst);
out_clk_disable:
- clk_disable_unprepare(priv->clk);
+ clk_disable_unprepare(pcie->clk);
return ret;
}
-static void uniphier_pcie_host_disable(struct uniphier_pcie_priv *priv)
-{
- uniphier_pcie_irq_disable(priv);
- phy_exit(priv->phy);
- reset_control_assert(priv->rst);
- clk_disable_unprepare(priv->clk);
-}
-
static const struct dw_pcie_ops dw_pcie_ops = {
- .start_link = uniphier_pcie_establish_link,
+ .start_link = uniphier_pcie_start_link,
.stop_link = uniphier_pcie_stop_link,
.link_up = uniphier_pcie_link_up,
};
@@ -399,73 +357,53 @@ static const struct dw_pcie_ops dw_pcie_ops = {
static int uniphier_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct uniphier_pcie_priv *priv;
- struct resource *res;
+ struct uniphier_pcie *pcie;
int ret;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
return -ENOMEM;
- priv->pci.dev = dev;
- priv->pci.ops = &dw_pcie_ops;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
- priv->pci.dbi_base = devm_pci_remap_cfg_resource(dev, res);
- if (IS_ERR(priv->pci.dbi_base))
- return PTR_ERR(priv->pci.dbi_base);
+ pcie->pci.dev = dev;
+ pcie->pci.ops = &dw_pcie_ops;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "link");
- priv->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(priv->base))
- return PTR_ERR(priv->base);
+ pcie->base = devm_platform_ioremap_resource_byname(pdev, "link");
+ if (IS_ERR(pcie->base))
+ return PTR_ERR(pcie->base);
- priv->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(priv->clk))
- return PTR_ERR(priv->clk);
+ pcie->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(pcie->clk))
+ return PTR_ERR(pcie->clk);
- priv->rst = devm_reset_control_get_shared(dev, NULL);
- if (IS_ERR(priv->rst))
- return PTR_ERR(priv->rst);
+ pcie->rst = devm_reset_control_get_shared(dev, NULL);
+ if (IS_ERR(pcie->rst))
+ return PTR_ERR(pcie->rst);
- priv->phy = devm_phy_optional_get(dev, "pcie-phy");
- if (IS_ERR(priv->phy))
- return PTR_ERR(priv->phy);
+ pcie->phy = devm_phy_optional_get(dev, "pcie-phy");
+ if (IS_ERR(pcie->phy))
+ return PTR_ERR(pcie->phy);
- platform_set_drvdata(pdev, priv);
+ platform_set_drvdata(pdev, pcie);
- ret = uniphier_pcie_host_enable(priv);
+ ret = uniphier_pcie_host_enable(pcie);
if (ret)
return ret;
- return uniphier_add_pcie_port(priv, pdev);
-}
-
-static int uniphier_pcie_remove(struct platform_device *pdev)
-{
- struct uniphier_pcie_priv *priv = platform_get_drvdata(pdev);
+ pcie->pci.pp.ops = &uniphier_pcie_host_ops;
- uniphier_pcie_host_disable(priv);
-
- return 0;
+ return dw_pcie_host_init(&pcie->pci.pp);
}
static const struct of_device_id uniphier_pcie_match[] = {
{ .compatible = "socionext,uniphier-pcie", },
{ /* sentinel */ },
};
-MODULE_DEVICE_TABLE(of, uniphier_pcie_match);
static struct platform_driver uniphier_pcie_driver = {
.probe = uniphier_pcie_probe,
- .remove = uniphier_pcie_remove,
.driver = {
.name = "uniphier-pcie",
.of_match_table = uniphier_pcie_match,
},
};
builtin_platform_driver(uniphier_pcie_driver);
-
-MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
-MODULE_DESCRIPTION("UniPhier PCIe host controller driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/dwc/pcie-visconti.c b/drivers/pci/controller/dwc/pcie-visconti.c
new file mode 100644
index 000000000000..cdeac6177143
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-visconti.c
@@ -0,0 +1,329 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DWC PCIe RC driver for Toshiba Visconti ARM SoC
+ *
+ * Copyright (C) 2021 Toshiba Electronic Device & Storage Corporation
+ * Copyright (C) 2021 TOSHIBA CORPORATION
+ *
+ * Nobuhiro Iwamatsu <nobuhiro1.iwamatsu@toshiba.co.jp>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+#include "../../pci.h"
+
+struct visconti_pcie {
+ struct dw_pcie pci;
+ void __iomem *ulreg_base;
+ void __iomem *smu_base;
+ void __iomem *mpu_base;
+ struct clk *refclk;
+ struct clk *coreclk;
+ struct clk *auxclk;
+};
+
+#define PCIE_UL_REG_S_PCIE_MODE 0x00F4
+#define PCIE_UL_REG_S_PCIE_MODE_EP 0x00
+#define PCIE_UL_REG_S_PCIE_MODE_RC 0x04
+
+#define PCIE_UL_REG_S_PERSTN_CTRL 0x00F8
+#define PCIE_UL_IOM_PCIE_PERSTN_I_EN BIT(3)
+#define PCIE_UL_DIRECT_PERSTN_EN BIT(2)
+#define PCIE_UL_PERSTN_OUT BIT(1)
+#define PCIE_UL_DIRECT_PERSTN BIT(0)
+#define PCIE_UL_REG_S_PERSTN_CTRL_INIT (PCIE_UL_IOM_PCIE_PERSTN_I_EN | \
+ PCIE_UL_DIRECT_PERSTN_EN | \
+ PCIE_UL_DIRECT_PERSTN)
+
+#define PCIE_UL_REG_S_PHY_INIT_02 0x0104
+#define PCIE_UL_PHY0_SRAM_EXT_LD_DONE BIT(0)
+
+#define PCIE_UL_REG_S_PHY_INIT_03 0x0108
+#define PCIE_UL_PHY0_SRAM_INIT_DONE BIT(0)
+
+#define PCIE_UL_REG_S_INT_EVENT_MASK1 0x0138
+#define PCIE_UL_CFG_PME_INT BIT(0)
+#define PCIE_UL_CFG_LINK_EQ_REQ_INT BIT(1)
+#define PCIE_UL_EDMA_INT0 BIT(2)
+#define PCIE_UL_EDMA_INT1 BIT(3)
+#define PCIE_UL_EDMA_INT2 BIT(4)
+#define PCIE_UL_EDMA_INT3 BIT(5)
+#define PCIE_UL_S_INT_EVENT_MASK1_ALL (PCIE_UL_CFG_PME_INT | \
+ PCIE_UL_CFG_LINK_EQ_REQ_INT | \
+ PCIE_UL_EDMA_INT0 | \
+ PCIE_UL_EDMA_INT1 | \
+ PCIE_UL_EDMA_INT2 | \
+ PCIE_UL_EDMA_INT3)
+
+#define PCIE_UL_REG_S_SB_MON 0x0198
+#define PCIE_UL_REG_S_SIG_MON 0x019C
+#define PCIE_UL_CORE_RST_N_MON BIT(0)
+
+#define PCIE_UL_REG_V_SII_DBG_00 0x0844
+#define PCIE_UL_REG_V_SII_GEN_CTRL_01 0x0860
+#define PCIE_UL_APP_LTSSM_ENABLE BIT(0)
+
+#define PCIE_UL_REG_V_PHY_ST_00 0x0864
+#define PCIE_UL_SMLH_LINK_UP BIT(0)
+
+#define PCIE_UL_REG_V_PHY_ST_02 0x0868
+#define PCIE_UL_S_DETECT_ACT 0x01
+#define PCIE_UL_S_L0 0x11
+
+#define PISMU_CKON_PCIE 0x0038
+#define PISMU_CKON_PCIE_AUX_CLK BIT(1)
+#define PISMU_CKON_PCIE_MSTR_ACLK BIT(0)
+
+#define PISMU_RSOFF_PCIE 0x0538
+#define PISMU_RSOFF_PCIE_ULREG_RST_N BIT(1)
+#define PISMU_RSOFF_PCIE_PWR_UP_RST_N BIT(0)
+
+#define PCIE_MPU_REG_MP_EN 0x0
+#define MPU_MP_EN_DISABLE BIT(0)
+
+/* Access registers in PCIe ulreg */
+static void visconti_ulreg_writel(struct visconti_pcie *pcie, u32 val, u32 reg)
+{
+ writel_relaxed(val, pcie->ulreg_base + reg);
+}
+
+static u32 visconti_ulreg_readl(struct visconti_pcie *pcie, u32 reg)
+{
+ return readl_relaxed(pcie->ulreg_base + reg);
+}
+
+/* Access registers in PCIe smu */
+static void visconti_smu_writel(struct visconti_pcie *pcie, u32 val, u32 reg)
+{
+ writel_relaxed(val, pcie->smu_base + reg);
+}
+
+/* Access registers in PCIe mpu */
+static void visconti_mpu_writel(struct visconti_pcie *pcie, u32 val, u32 reg)
+{
+ writel_relaxed(val, pcie->mpu_base + reg);
+}
+
+static u32 visconti_mpu_readl(struct visconti_pcie *pcie, u32 reg)
+{
+ return readl_relaxed(pcie->mpu_base + reg);
+}
+
+static bool visconti_pcie_link_up(struct dw_pcie *pci)
+{
+ struct visconti_pcie *pcie = dev_get_drvdata(pci->dev);
+ void __iomem *addr = pcie->ulreg_base;
+ u32 val = readl_relaxed(addr + PCIE_UL_REG_V_PHY_ST_02);
+
+ return val & PCIE_UL_S_L0;
+}
+
+static int visconti_pcie_start_link(struct dw_pcie *pci)
+{
+ struct visconti_pcie *pcie = dev_get_drvdata(pci->dev);
+ void __iomem *addr = pcie->ulreg_base;
+ u32 val;
+ int ret;
+
+ visconti_ulreg_writel(pcie, PCIE_UL_APP_LTSSM_ENABLE,
+ PCIE_UL_REG_V_SII_GEN_CTRL_01);
+
+ ret = readl_relaxed_poll_timeout(addr + PCIE_UL_REG_V_PHY_ST_02,
+ val, (val & PCIE_UL_S_L0),
+ 90000, 100000);
+ if (ret)
+ return ret;
+
+ visconti_ulreg_writel(pcie, PCIE_UL_S_INT_EVENT_MASK1_ALL,
+ PCIE_UL_REG_S_INT_EVENT_MASK1);
+
+ if (dw_pcie_link_up(pci)) {
+ val = visconti_mpu_readl(pcie, PCIE_MPU_REG_MP_EN);
+ visconti_mpu_writel(pcie, val & ~MPU_MP_EN_DISABLE,
+ PCIE_MPU_REG_MP_EN);
+ }
+
+ return 0;
+}
+
+static void visconti_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct visconti_pcie *pcie = dev_get_drvdata(pci->dev);
+ u32 val;
+
+ val = visconti_ulreg_readl(pcie, PCIE_UL_REG_V_SII_GEN_CTRL_01);
+ val &= ~PCIE_UL_APP_LTSSM_ENABLE;
+ visconti_ulreg_writel(pcie, val, PCIE_UL_REG_V_SII_GEN_CTRL_01);
+
+ val = visconti_mpu_readl(pcie, PCIE_MPU_REG_MP_EN);
+ visconti_mpu_writel(pcie, val | MPU_MP_EN_DISABLE, PCIE_MPU_REG_MP_EN);
+}
+
+/*
+ * In this SoC specification, the CPU bus outputs the offset value from
+ * 0x40000000 to the PCIe bus, so 0x40000000 is subtracted from the CPU
+ * bus address. This 0x40000000 is also based on io_base from DT.
+ */
+static u64 visconti_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 cpu_addr)
+{
+ struct dw_pcie_rp *pp = &pci->pp;
+
+ return cpu_addr & ~pp->io_base;
+}
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .cpu_addr_fixup = visconti_pcie_cpu_addr_fixup,
+ .link_up = visconti_pcie_link_up,
+ .start_link = visconti_pcie_start_link,
+ .stop_link = visconti_pcie_stop_link,
+};
+
+static int visconti_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct visconti_pcie *pcie = dev_get_drvdata(pci->dev);
+ void __iomem *addr;
+ int err;
+ u32 val;
+
+ visconti_smu_writel(pcie,
+ PISMU_CKON_PCIE_AUX_CLK | PISMU_CKON_PCIE_MSTR_ACLK,
+ PISMU_CKON_PCIE);
+ ndelay(250);
+
+ visconti_smu_writel(pcie, PISMU_RSOFF_PCIE_ULREG_RST_N,
+ PISMU_RSOFF_PCIE);
+ visconti_ulreg_writel(pcie, PCIE_UL_REG_S_PCIE_MODE_RC,
+ PCIE_UL_REG_S_PCIE_MODE);
+
+ val = PCIE_UL_REG_S_PERSTN_CTRL_INIT;
+ visconti_ulreg_writel(pcie, val, PCIE_UL_REG_S_PERSTN_CTRL);
+ udelay(100);
+
+ val |= PCIE_UL_PERSTN_OUT;
+ visconti_ulreg_writel(pcie, val, PCIE_UL_REG_S_PERSTN_CTRL);
+ udelay(100);
+
+ visconti_smu_writel(pcie, PISMU_RSOFF_PCIE_PWR_UP_RST_N,
+ PISMU_RSOFF_PCIE);
+
+ addr = pcie->ulreg_base + PCIE_UL_REG_S_PHY_INIT_03;
+ err = readl_relaxed_poll_timeout(addr, val,
+ (val & PCIE_UL_PHY0_SRAM_INIT_DONE),
+ 100, 1000);
+ if (err)
+ return err;
+
+ visconti_ulreg_writel(pcie, PCIE_UL_PHY0_SRAM_EXT_LD_DONE,
+ PCIE_UL_REG_S_PHY_INIT_02);
+
+ addr = pcie->ulreg_base + PCIE_UL_REG_S_SIG_MON;
+ return readl_relaxed_poll_timeout(addr, val,
+ (val & PCIE_UL_CORE_RST_N_MON), 100,
+ 1000);
+}
+
+static const struct dw_pcie_host_ops visconti_pcie_host_ops = {
+ .init = visconti_pcie_host_init,
+};
+
+static int visconti_get_resources(struct platform_device *pdev,
+ struct visconti_pcie *pcie)
+{
+ struct device *dev = &pdev->dev;
+
+ pcie->ulreg_base = devm_platform_ioremap_resource_byname(pdev, "ulreg");
+ if (IS_ERR(pcie->ulreg_base))
+ return PTR_ERR(pcie->ulreg_base);
+
+ pcie->smu_base = devm_platform_ioremap_resource_byname(pdev, "smu");
+ if (IS_ERR(pcie->smu_base))
+ return PTR_ERR(pcie->smu_base);
+
+ pcie->mpu_base = devm_platform_ioremap_resource_byname(pdev, "mpu");
+ if (IS_ERR(pcie->mpu_base))
+ return PTR_ERR(pcie->mpu_base);
+
+ pcie->refclk = devm_clk_get(dev, "ref");
+ if (IS_ERR(pcie->refclk))
+ return dev_err_probe(dev, PTR_ERR(pcie->refclk),
+ "Failed to get ref clock\n");
+
+ pcie->coreclk = devm_clk_get(dev, "core");
+ if (IS_ERR(pcie->coreclk))
+ return dev_err_probe(dev, PTR_ERR(pcie->coreclk),
+ "Failed to get core clock\n");
+
+ pcie->auxclk = devm_clk_get(dev, "aux");
+ if (IS_ERR(pcie->auxclk))
+ return dev_err_probe(dev, PTR_ERR(pcie->auxclk),
+ "Failed to get aux clock\n");
+
+ return 0;
+}
+
+static int visconti_add_pcie_port(struct visconti_pcie *pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie *pci = &pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+
+ pp->irq = platform_get_irq_byname(pdev, "intr");
+ if (pp->irq < 0)
+ return pp->irq;
+
+ pp->ops = &visconti_pcie_host_ops;
+
+ return dw_pcie_host_init(pp);
+}
+
+static int visconti_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct visconti_pcie *pcie;
+ struct dw_pcie *pci;
+ int ret;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pci = &pcie->pci;
+ pci->dev = dev;
+ pci->ops = &dw_pcie_ops;
+
+ ret = visconti_get_resources(pdev, pcie);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, pcie);
+
+ return visconti_add_pcie_port(pcie, pdev);
+}
+
+static const struct of_device_id visconti_pcie_match[] = {
+ { .compatible = "toshiba,visconti-pcie" },
+ {},
+};
+
+static struct platform_driver visconti_pcie_driver = {
+ .probe = visconti_pcie_probe,
+ .driver = {
+ .name = "visconti-pcie",
+ .of_match_table = visconti_pcie_match,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver(visconti_pcie_driver);