diff options
Diffstat (limited to 'drivers/pci')
126 files changed, 5014 insertions, 2474 deletions
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig index da28295b4aac..9a249c65aedc 100644 --- a/drivers/pci/Kconfig +++ b/drivers/pci/Kconfig @@ -21,6 +21,7 @@ config GENERIC_PCI_IOMAP menuconfig PCI bool "PCI support" depends on HAVE_PCI + depends on MMU help This option enables support for the PCI local bus, including support for PCI-X and the foundations for PCI Express support. @@ -222,6 +223,7 @@ config PCI_HYPERV tristate "Hyper-V PCI Frontend" depends on ((X86 && X86_64) || ARM64) && HYPERV && PCI_MSI && SYSFS select PCI_HYPERV_INTERFACE + select IRQ_MSI_LIB help The PCI device frontend driver allows the kernel to import arbitrary PCI devices from a PCI backend to support PCI driver domains. diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index b6851101ac36..b77fd30bbfd9 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c @@ -341,7 +341,6 @@ void pci_bus_add_device(struct pci_dev *dev) { struct device_node *dn = dev->dev.of_node; struct platform_device *pdev; - int retval; /* * Can not put in pci_device_add yet because resources @@ -369,10 +368,10 @@ void pci_bus_add_device(struct pci_dev *dev) pdev->name); } - dev->match_driver = !dn || of_device_is_available(dn); - retval = device_attach(&dev->dev); - if (retval < 0 && retval != -EPROBE_DEFER) - pci_warn(dev, "device attach failed (%d)\n", retval); + if (!dn || of_device_is_available(dn)) + pci_dev_allow_binding(dev); + + device_initial_probe(&dev->dev); pci_dev_assign_added(dev); } diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig index 9800b7681054..41748d083b93 100644 --- a/drivers/pci/controller/Kconfig +++ b/drivers/pci/controller/Kconfig @@ -3,12 +3,17 @@ menu "PCI controller drivers" depends on PCI +config PCI_HOST_COMMON + tristate + select PCI_ECAM + config PCI_AARDVARK tristate "Aardvark PCIe controller" depends on (ARCH_MVEBU && ARM64) || COMPILE_TEST depends on OF depends on PCI_MSI select PCI_BRIDGE_EMUL + select IRQ_MSI_LIB help Add support for Aardvark 64bit PCIe Host Controller. This controller is part of the South Bridge of the Marvel Armada @@ -25,6 +30,7 @@ config PCIE_ALTERA_MSI tristate "Altera PCIe MSI feature" depends on PCIE_ALTERA depends on PCI_MSI + select IRQ_MSI_LIB help Say Y here if you want PCIe MSI support for the Altera FPGA. This MSI driver supports Altera MSI to GIC controller IP. @@ -40,6 +46,7 @@ config PCIE_APPLE depends on OF depends on PCI_MSI select PCI_HOST_COMMON + select IRQ_MSI_LIB help Say Y here if you want to enable PCIe controller support on Apple system-on-chips, like the Apple M1. This is required for the USB @@ -57,6 +64,7 @@ config PCIE_BRCMSTB BMIPS_GENERIC || COMPILE_TEST depends on OF depends on PCI_MSI + select IRQ_MSI_LIB default ARCH_BRCMSTB || BMIPS_GENERIC help Say Y here to enable PCIe host controller support for @@ -93,6 +101,7 @@ config PCIE_IPROC_MSI bool "Broadcom iProc PCIe MSI support" depends on PCIE_IPROC_PLATFORM || PCIE_IPROC_BCMA depends on PCI_MSI + select IRQ_MSI_LIB default ARCH_BCM_IPROC help Say Y here if you want to enable MSI support for Broadcom's iProc @@ -119,10 +128,6 @@ config PCI_FTPCI100 depends on OF default ARCH_GEMINI -config PCI_HOST_COMMON - tristate - select PCI_ECAM - config PCI_HOST_GENERIC tristate "Generic PCI host controller" depends on OF @@ -151,6 +156,7 @@ config PCI_IXP4XX config VMD depends on PCI_MSI && X86_64 && !UML tristate "Intel Volume Management Device Driver" + select IRQ_MSI_LIB help Adds support for the Intel Volume Management Device (VMD). VMD is a secondary PCI host bridge that allows PCI Express root ports, @@ -190,6 +196,7 @@ config PCIE_MEDIATEK depends on ARCH_AIROHA || ARCH_MEDIATEK || COMPILE_TEST depends on OF depends on PCI_MSI + select IRQ_MSI_LIB help Say Y here if you want to enable PCIe controller support on MediaTek SoCs. @@ -198,6 +205,7 @@ config PCIE_MEDIATEK_GEN3 tristate "MediaTek Gen3 PCIe controller" depends on ARCH_AIROHA || ARCH_MEDIATEK || COMPILE_TEST depends on PCI_MSI + select IRQ_MSI_LIB help Adds support for PCIe Gen3 MAC controller for MediaTek SoCs. This PCIe controller is compatible with Gen3, Gen2 and Gen1 speed, @@ -227,6 +235,7 @@ config PCI_TEGRA bool "NVIDIA Tegra PCIe controller" depends on ARCH_TEGRA || COMPILE_TEST depends on PCI_MSI + select IRQ_MSI_LIB help Say Y here if you want support for the PCIe host controller found on NVIDIA Tegra SoCs. @@ -235,6 +244,7 @@ config PCIE_RCAR_HOST bool "Renesas R-Car PCIe controller (host mode)" depends on ARCH_RENESAS || COMPILE_TEST depends on PCI_MSI + select IRQ_MSI_LIB help Say Y here if you want PCIe controller support on R-Car SoCs in host mode. @@ -303,6 +313,7 @@ config PCI_XGENE_MSI bool "X-Gene v1 PCIe MSI feature" depends on PCI_XGENE depends on PCI_MSI + select IRQ_MSI_LIB default y help Say Y here if you want PCIe MSI support for the APM X-Gene v1 SoC. @@ -312,6 +323,7 @@ config PCIE_XILINX bool "Xilinx AXI PCIe controller" depends on OF depends on PCI_MSI + select IRQ_MSI_LIB help Say 'Y' here if you want kernel to support the Xilinx AXI PCIe Host Bridge driver. @@ -321,6 +333,7 @@ config PCIE_XILINX_DMA_PL depends on ARCH_ZYNQMP || COMPILE_TEST depends on PCI_MSI select PCI_HOST_COMMON + select IRQ_MSI_LIB help Say 'Y' here if you want kernel support for the Xilinx PL DMA PCIe host bridge. The controller is a Soft IP which can act as @@ -331,6 +344,7 @@ config PCIE_XILINX_NWL bool "Xilinx NWL PCIe controller" depends on ARCH_ZYNQMP || COMPILE_TEST depends on PCI_MSI + select IRQ_MSI_LIB help Say 'Y' here if you want kernel support for Xilinx NWL PCIe controller. The controller can act as Root Port diff --git a/drivers/pci/controller/cadence/Kconfig b/drivers/pci/controller/cadence/Kconfig index 8a0044bb3989..666e16b6367f 100644 --- a/drivers/pci/controller/cadence/Kconfig +++ b/drivers/pci/controller/cadence/Kconfig @@ -4,16 +4,16 @@ menu "Cadence-based PCIe controllers" depends on PCI config PCIE_CADENCE - bool + tristate config PCIE_CADENCE_HOST - bool + tristate depends on OF select IRQ_DOMAIN select PCIE_CADENCE config PCIE_CADENCE_EP - bool + tristate depends on OF depends on PCI_ENDPOINT select PCIE_CADENCE @@ -43,13 +43,14 @@ config PCIE_CADENCE_PLAT_EP different vendors SoCs. config PCI_J721E - bool + tristate + select PCIE_CADENCE_HOST if PCI_J721E_HOST != n + select PCIE_CADENCE_EP if PCI_J721E_EP != n config PCI_J721E_HOST - bool "TI J721E PCIe controller (host mode)" + tristate "TI J721E PCIe controller (host mode)" depends on ARCH_K3 || COMPILE_TEST depends on OF - select PCIE_CADENCE_HOST select PCI_J721E help Say Y here if you want to support the TI J721E PCIe platform @@ -57,11 +58,10 @@ config PCI_J721E_HOST core. config PCI_J721E_EP - bool "TI J721E PCIe controller (endpoint mode)" + tristate "TI J721E PCIe controller (endpoint mode)" depends on ARCH_K3 || COMPILE_TEST depends on OF depends on PCI_ENDPOINT - select PCIE_CADENCE_EP select PCI_J721E help Say Y here if you want to support the TI J721E PCIe platform diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c index ef1cfdae33bb..6c93f39d0288 100644 --- a/drivers/pci/controller/cadence/pci-j721e.c +++ b/drivers/pci/controller/cadence/pci-j721e.c @@ -15,6 +15,7 @@ #include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> #include <linux/mfd/syscon.h> +#include <linux/module.h> #include <linux/of.h> #include <linux/pci.h> #include <linux/platform_device.h> @@ -27,6 +28,7 @@ #define cdns_pcie_to_rc(p) container_of(p, struct cdns_pcie_rc, pcie) #define ENABLE_REG_SYS_2 0x108 +#define ENABLE_CLR_REG_SYS_2 0x308 #define STATUS_REG_SYS_2 0x508 #define STATUS_CLR_REG_SYS_2 0x708 #define LINK_DOWN BIT(1) @@ -116,6 +118,15 @@ static irqreturn_t j721e_pcie_link_irq_handler(int irq, void *priv) return IRQ_HANDLED; } +static void j721e_pcie_disable_link_irq(struct j721e_pcie *pcie) +{ + u32 reg; + + reg = j721e_pcie_intd_readl(pcie, ENABLE_CLR_REG_SYS_2); + reg |= pcie->linkdown_irq_regfield; + j721e_pcie_intd_writel(pcie, ENABLE_CLR_REG_SYS_2, reg); +} + static void j721e_pcie_config_link_irq(struct j721e_pcie *pcie) { u32 reg; @@ -153,11 +164,7 @@ static bool j721e_pcie_link_up(struct cdns_pcie *cdns_pcie) u32 reg; reg = j721e_pcie_user_readl(pcie, J721E_PCIE_USER_LINKSTATUS); - reg &= LINK_STATUS; - if (reg == LINK_UP_DL_COMPLETED) - return true; - - return false; + return (reg & LINK_STATUS) == LINK_UP_DL_COMPLETED; } static const struct cdns_pcie_ops j721e_pcie_ops = { @@ -464,7 +471,7 @@ static int j721e_pcie_probe(struct platform_device *pdev) switch (mode) { case PCI_MODE_RC: - if (!IS_ENABLED(CONFIG_PCIE_CADENCE_HOST)) + if (!IS_ENABLED(CONFIG_PCI_J721E_HOST)) return -ENODEV; bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc)); @@ -483,7 +490,7 @@ static int j721e_pcie_probe(struct platform_device *pdev) pcie->cdns_pcie = cdns_pcie; break; case PCI_MODE_EP: - if (!IS_ENABLED(CONFIG_PCIE_CADENCE_EP)) + if (!IS_ENABLED(CONFIG_PCI_J721E_EP)) return -ENODEV; ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL); @@ -633,9 +640,22 @@ static void j721e_pcie_remove(struct platform_device *pdev) struct j721e_pcie *pcie = platform_get_drvdata(pdev); struct cdns_pcie *cdns_pcie = pcie->cdns_pcie; struct device *dev = &pdev->dev; + struct cdns_pcie_ep *ep; + struct cdns_pcie_rc *rc; + + if (pcie->mode == PCI_MODE_RC) { + rc = container_of(cdns_pcie, struct cdns_pcie_rc, pcie); + cdns_pcie_host_disable(rc); + } else { + ep = container_of(cdns_pcie, struct cdns_pcie_ep, pcie); + cdns_pcie_ep_disable(ep); + } + + gpiod_set_value_cansleep(pcie->reset_gpio, 0); clk_disable_unprepare(pcie->refclk); cdns_pcie_disable_phy(cdns_pcie); + j721e_pcie_disable_link_irq(pcie); pm_runtime_put(dev); pm_runtime_disable(dev); } @@ -730,4 +750,8 @@ static struct platform_driver j721e_pcie_driver = { .pm = pm_sleep_ptr(&j721e_pcie_pm_ops), }, }; -builtin_platform_driver(j721e_pcie_driver); +module_platform_driver(j721e_pcie_driver); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("PCIe controller driver for TI's J721E and related SoCs"); +MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>"); diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c index 599ec4b1223e..77c5a19b2ab1 100644 --- a/drivers/pci/controller/cadence/pcie-cadence-ep.c +++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c @@ -6,12 +6,14 @@ #include <linux/bitfield.h> #include <linux/delay.h> #include <linux/kernel.h> +#include <linux/module.h> #include <linux/of.h> #include <linux/pci-epc.h> #include <linux/platform_device.h> #include <linux/sizes.h> #include "pcie-cadence.h" +#include "../../pci.h" #define CDNS_PCIE_EP_MIN_APERTURE 128 /* 128 bytes */ #define CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE 0x1 @@ -220,10 +222,11 @@ static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn, clear_bit(r, &ep->ob_region_map); } -static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn, u8 mmc) +static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn, u8 nr_irqs) { struct cdns_pcie_ep *ep = epc_get_drvdata(epc); struct cdns_pcie *pcie = &ep->pcie; + u8 mmc = order_base_2(nr_irqs); u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET; u16 flags; @@ -262,7 +265,7 @@ static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn) */ mme = FIELD_GET(PCI_MSI_FLAGS_QSIZE, flags); - return mme; + return 1 << mme; } static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no) @@ -281,12 +284,11 @@ static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no) val &= PCI_MSIX_FLAGS_QSIZE; - return val; + return val + 1; } static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u8 vfn, - u16 interrupts, enum pci_barno bir, - u32 offset) + u16 nr_irqs, enum pci_barno bir, u32 offset) { struct cdns_pcie_ep *ep = epc_get_drvdata(epc); struct cdns_pcie *pcie = &ep->pcie; @@ -298,7 +300,7 @@ static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u8 vfn, reg = cap + PCI_MSIX_FLAGS; val = cdns_pcie_ep_fn_readw(pcie, fn, reg); val &= ~PCI_MSIX_FLAGS_QSIZE; - val |= interrupts; + val |= nr_irqs - 1; /* encoded as N-1 */ cdns_pcie_ep_fn_writew(pcie, fn, reg, val); /* Set MSI-X BAR and offset */ @@ -308,7 +310,7 @@ static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u8 vfn, /* Set PBA BAR and offset. BAR must match MSI-X BAR */ reg = cap + PCI_MSIX_PBA; - val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir; + val = (offset + (nr_irqs * PCI_MSIX_ENTRY_SIZE)) | bir; cdns_pcie_ep_fn_writel(pcie, fn, reg, val); return 0; @@ -337,10 +339,10 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx, if (is_asserted) { ep->irq_pending |= BIT(intx); - msg_code = MSG_CODE_ASSERT_INTA + intx; + msg_code = PCIE_MSG_CODE_ASSERT_INTA + intx; } else { ep->irq_pending &= ~BIT(intx); - msg_code = MSG_CODE_DEASSERT_INTA + intx; + msg_code = PCIE_MSG_CODE_DEASSERT_INTA + intx; } spin_lock_irqsave(&ep->lock, flags); @@ -351,7 +353,7 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx, } spin_unlock_irqrestore(&ep->lock, flags); - offset = CDNS_PCIE_NORMAL_MSG_ROUTING(MSG_ROUTING_LOCAL) | + offset = CDNS_PCIE_NORMAL_MSG_ROUTING(PCIE_MSG_TYPE_R_LOCAL) | CDNS_PCIE_NORMAL_MSG_CODE(msg_code); writel(0, ep->irq_cpu_addr + offset); } @@ -644,6 +646,17 @@ static const struct pci_epc_ops cdns_pcie_epc_ops = { .get_features = cdns_pcie_ep_get_features, }; +void cdns_pcie_ep_disable(struct cdns_pcie_ep *ep) +{ + struct device *dev = ep->pcie.dev; + struct pci_epc *epc = to_pci_epc(dev); + + pci_epc_deinit_notify(epc); + pci_epc_mem_free_addr(epc, ep->irq_phys_addr, ep->irq_cpu_addr, + SZ_128K); + pci_epc_mem_exit(epc); +} +EXPORT_SYMBOL_GPL(cdns_pcie_ep_disable); int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep) { @@ -751,3 +764,8 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep) return ret; } +EXPORT_SYMBOL_GPL(cdns_pcie_ep_setup); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Cadence PCIe endpoint controller driver"); +MODULE_AUTHOR("Cyrille Pitchen <cyrille.pitchen@free-electrons.com>"); diff --git a/drivers/pci/controller/cadence/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c index 8af95e9da7ce..59a4631de79f 100644 --- a/drivers/pci/controller/cadence/pcie-cadence-host.c +++ b/drivers/pci/controller/cadence/pcie-cadence-host.c @@ -5,6 +5,7 @@ #include <linux/delay.h> #include <linux/kernel.h> +#include <linux/module.h> #include <linux/list_sort.h> #include <linux/of_address.h> #include <linux/of_pci.h> @@ -72,6 +73,7 @@ void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn, return rc->cfg_base + (where & 0xfff); } +EXPORT_SYMBOL_GPL(cdns_pci_map_bus); static struct pci_ops cdns_pcie_host_ops = { .map_bus = cdns_pci_map_bus, @@ -150,6 +152,14 @@ static int cdns_pcie_retrain(struct cdns_pcie *pcie) return ret; } +static void cdns_pcie_host_disable_ptm_response(struct cdns_pcie *pcie) +{ + u32 val; + + val = cdns_pcie_readl(pcie, CDNS_PCIE_LM_PTM_CTRL); + cdns_pcie_writel(pcie, CDNS_PCIE_LM_PTM_CTRL, val & ~CDNS_PCIE_LM_TPM_CTRL_PTMRSEN); +} + static void cdns_pcie_host_enable_ptm_response(struct cdns_pcie *pcie) { u32 val; @@ -175,6 +185,26 @@ static int cdns_pcie_host_start_link(struct cdns_pcie_rc *rc) return ret; } +static void cdns_pcie_host_deinit_root_port(struct cdns_pcie_rc *rc) +{ + struct cdns_pcie *pcie = &rc->pcie; + u32 value, ctrl; + + cdns_pcie_rp_writew(pcie, PCI_CLASS_DEVICE, 0xffff); + cdns_pcie_rp_writeb(pcie, PCI_CLASS_PROG, 0xff); + cdns_pcie_rp_writeb(pcie, PCI_CLASS_REVISION, 0xff); + cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, 0xffffffff); + cdns_pcie_rp_writew(pcie, PCI_DEVICE_ID, 0xffff); + ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED; + value = ~(CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(ctrl) | + CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(ctrl) | + CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE | + CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS | + CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE | + CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS); + cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value); +} + static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc) { struct cdns_pcie *pcie = &rc->pcie; @@ -391,6 +421,32 @@ static int cdns_pcie_host_dma_ranges_cmp(void *priv, const struct list_head *a, return resource_size(entry2->res) - resource_size(entry1->res); } +static void cdns_pcie_host_unmap_dma_ranges(struct cdns_pcie_rc *rc) +{ + struct cdns_pcie *pcie = &rc->pcie; + enum cdns_pcie_rp_bar bar; + u32 value; + + /* Reset inbound configuration for all BARs which were being used */ + for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++) { + if (rc->avail_ib_bar[bar]) + continue; + + cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR0(bar), 0); + cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR1(bar), 0); + + if (bar == RP_NO_BAR) + continue; + + value = ~(LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar) | + LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar) | + LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar) | + LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar) | + LM_RC_BAR_CFG_APERTURE(bar, bar_aperture_mask[bar] + 2)); + cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value); + } +} + static int cdns_pcie_host_map_dma_ranges(struct cdns_pcie_rc *rc) { struct cdns_pcie *pcie = &rc->pcie; @@ -428,6 +484,29 @@ static int cdns_pcie_host_map_dma_ranges(struct cdns_pcie_rc *rc) return 0; } +static void cdns_pcie_host_deinit_address_translation(struct cdns_pcie_rc *rc) +{ + struct cdns_pcie *pcie = &rc->pcie; + struct pci_host_bridge *bridge = pci_host_bridge_from_priv(rc); + struct resource_entry *entry; + int r; + + cdns_pcie_host_unmap_dma_ranges(rc); + + /* + * Reset outbound region 0 which was reserved for configuration space + * accesses. + */ + cdns_pcie_reset_outbound_region(pcie, 0); + + /* Reset rest of the outbound regions */ + r = 1; + resource_list_for_each_entry(entry, &bridge->windows) { + cdns_pcie_reset_outbound_region(pcie, r); + r++; + } +} + static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc) { struct cdns_pcie *pcie = &rc->pcie; @@ -485,6 +564,12 @@ static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc) return cdns_pcie_host_map_dma_ranges(rc); } +static void cdns_pcie_host_deinit(struct cdns_pcie_rc *rc) +{ + cdns_pcie_host_deinit_address_translation(rc); + cdns_pcie_host_deinit_root_port(rc); +} + int cdns_pcie_host_init(struct cdns_pcie_rc *rc) { int err; @@ -495,6 +580,15 @@ int cdns_pcie_host_init(struct cdns_pcie_rc *rc) return cdns_pcie_host_init_address_translation(rc); } +EXPORT_SYMBOL_GPL(cdns_pcie_host_init); + +static void cdns_pcie_host_link_disable(struct cdns_pcie_rc *rc) +{ + struct cdns_pcie *pcie = &rc->pcie; + + cdns_pcie_stop_link(pcie); + cdns_pcie_host_disable_ptm_response(pcie); +} int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc) { @@ -519,6 +613,20 @@ int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc) return 0; } +EXPORT_SYMBOL_GPL(cdns_pcie_host_link_setup); + +void cdns_pcie_host_disable(struct cdns_pcie_rc *rc) +{ + struct pci_host_bridge *bridge; + + bridge = pci_host_bridge_from_priv(rc); + pci_stop_root_bus(bridge->bus); + pci_remove_root_bus(bridge->bus); + + cdns_pcie_host_deinit(rc); + cdns_pcie_host_link_disable(rc); +} +EXPORT_SYMBOL_GPL(cdns_pcie_host_disable); int cdns_pcie_host_setup(struct cdns_pcie_rc *rc) { @@ -570,14 +678,10 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc) if (!bridge->ops) bridge->ops = &cdns_pcie_host_ops; - ret = pci_host_probe(bridge); - if (ret < 0) - goto err_init; - - return 0; - - err_init: - pm_runtime_put_sync(dev); - - return ret; + return pci_host_probe(bridge); } +EXPORT_SYMBOL_GPL(cdns_pcie_host_setup); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Cadence PCIe host controller driver"); +MODULE_AUTHOR("Cyrille Pitchen <cyrille.pitchen@free-electrons.com>"); diff --git a/drivers/pci/controller/cadence/pcie-cadence.c b/drivers/pci/controller/cadence/pcie-cadence.c index 204e045aed8c..70a19573440e 100644 --- a/drivers/pci/controller/cadence/pcie-cadence.c +++ b/drivers/pci/controller/cadence/pcie-cadence.c @@ -4,6 +4,7 @@ // Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com> #include <linux/kernel.h> +#include <linux/module.h> #include <linux/of.h> #include "pcie-cadence.h" @@ -23,6 +24,7 @@ void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie) cdns_pcie_writel(pcie, CDNS_PCIE_LTSSM_CONTROL_CAP, ltssm_control_cap); } +EXPORT_SYMBOL_GPL(cdns_pcie_detect_quiet_min_delay_set); void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn, u32 r, bool is_io, @@ -100,6 +102,7 @@ void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn, cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0); cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1); } +EXPORT_SYMBOL_GPL(cdns_pcie_set_outbound_region); void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie, u8 busnr, u8 fn, @@ -134,6 +137,7 @@ void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie, cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0); cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1); } +EXPORT_SYMBOL_GPL(cdns_pcie_set_outbound_region_for_normal_msg); void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r) { @@ -146,6 +150,7 @@ void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r) cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), 0); cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), 0); } +EXPORT_SYMBOL_GPL(cdns_pcie_reset_outbound_region); void cdns_pcie_disable_phy(struct cdns_pcie *pcie) { @@ -156,6 +161,7 @@ void cdns_pcie_disable_phy(struct cdns_pcie *pcie) phy_exit(pcie->phy[i]); } } +EXPORT_SYMBOL_GPL(cdns_pcie_disable_phy); int cdns_pcie_enable_phy(struct cdns_pcie *pcie) { @@ -184,6 +190,7 @@ err_phy: return ret; } +EXPORT_SYMBOL_GPL(cdns_pcie_enable_phy); int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie) { @@ -243,6 +250,7 @@ err_phy: return ret; } +EXPORT_SYMBOL_GPL(cdns_pcie_init_phy); static int cdns_pcie_suspend_noirq(struct device *dev) { @@ -271,3 +279,7 @@ const struct dev_pm_ops cdns_pcie_pm_ops = { NOIRQ_SYSTEM_SLEEP_PM_OPS(cdns_pcie_suspend_noirq, cdns_pcie_resume_noirq) }; + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Cadence PCIe controller driver"); +MODULE_AUTHOR("Cyrille Pitchen <cyrille.pitchen@free-electrons.com>"); diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h index 39ee9945c903..1d81c4bf6c6d 100644 --- a/drivers/pci/controller/cadence/pcie-cadence.h +++ b/drivers/pci/controller/cadence/pcie-cadence.h @@ -250,37 +250,6 @@ struct cdns_pcie_rp_ib_bar { struct cdns_pcie; -enum cdns_pcie_msg_code { - MSG_CODE_ASSERT_INTA = 0x20, - MSG_CODE_ASSERT_INTB = 0x21, - MSG_CODE_ASSERT_INTC = 0x22, - MSG_CODE_ASSERT_INTD = 0x23, - MSG_CODE_DEASSERT_INTA = 0x24, - MSG_CODE_DEASSERT_INTB = 0x25, - MSG_CODE_DEASSERT_INTC = 0x26, - MSG_CODE_DEASSERT_INTD = 0x27, -}; - -enum cdns_pcie_msg_routing { - /* Route to Root Complex */ - MSG_ROUTING_TO_RC, - - /* Use Address Routing */ - MSG_ROUTING_BY_ADDR, - - /* Use ID Routing */ - MSG_ROUTING_BY_ID, - - /* Route as Broadcast Message from Root Complex */ - MSG_ROUTING_BCAST, - - /* Local message; terminate at receiver (INTx messages) */ - MSG_ROUTING_LOCAL, - - /* Gather & route to Root Complex (PME_TO_Ack message) */ - MSG_ROUTING_GATHER, -}; - struct cdns_pcie_ops { int (*start_link)(struct cdns_pcie *pcie); void (*stop_link)(struct cdns_pcie *pcie); @@ -519,10 +488,11 @@ static inline bool cdns_pcie_link_up(struct cdns_pcie *pcie) return true; } -#ifdef CONFIG_PCIE_CADENCE_HOST +#if IS_ENABLED(CONFIG_PCIE_CADENCE_HOST) int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc); int cdns_pcie_host_init(struct cdns_pcie_rc *rc); int cdns_pcie_host_setup(struct cdns_pcie_rc *rc); +void cdns_pcie_host_disable(struct cdns_pcie_rc *rc); void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn, int where); #else @@ -541,6 +511,10 @@ static inline int cdns_pcie_host_setup(struct cdns_pcie_rc *rc) return 0; } +static inline void cdns_pcie_host_disable(struct cdns_pcie_rc *rc) +{ +} + static inline void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn, int where) { @@ -548,13 +522,18 @@ static inline void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int d } #endif -#ifdef CONFIG_PCIE_CADENCE_EP +#if IS_ENABLED(CONFIG_PCIE_CADENCE_EP) int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep); +void cdns_pcie_ep_disable(struct cdns_pcie_ep *ep); #else static inline int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep) { return 0; } + +static inline void cdns_pcie_ep_disable(struct cdns_pcie_ep *ep) +{ +} #endif void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie); diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig index d9f0386396ed..ff6b6d9e18ec 100644 --- a/drivers/pci/controller/dwc/Kconfig +++ b/drivers/pci/controller/dwc/Kconfig @@ -19,6 +19,7 @@ config PCIE_DW_DEBUGFS config PCIE_DW_HOST bool select PCIE_DW + select IRQ_MSI_LIB config PCIE_DW_EP bool @@ -296,6 +297,7 @@ config PCIE_QCOM select PCIE_DW_HOST select CRC8 select PCIE_QCOM_COMMON + select PCI_HOST_COMMON help Say Y here to enable PCIe controller support on Qualcomm SoCs. The PCIe controller uses the DesignWare core plus Qualcomm-specific @@ -402,6 +404,16 @@ config PCIE_UNIPHIER_EP Say Y here if you want PCIe endpoint controller support on UniPhier SoCs. This driver supports Pro5 SoC. +config PCIE_SOPHGO_DW + bool "Sophgo DesignWare PCIe controller (host mode)" + depends on ARCH_SOPHGO || COMPILE_TEST + depends on PCI_MSI + depends on OF + select PCIE_DW_HOST + help + Say Y here if you want PCIe host controller support on + Sophgo SoCs. + config PCIE_SPEAR13XX bool "STMicroelectronics SPEAr PCIe controller" depends on ARCH_SPEAR13XX || COMPILE_TEST diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile index 908cb7f345db..6919d27798d1 100644 --- a/drivers/pci/controller/dwc/Makefile +++ b/drivers/pci/controller/dwc/Makefile @@ -20,6 +20,7 @@ obj-$(CONFIG_PCIE_QCOM_EP) += pcie-qcom-ep.o obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o obj-$(CONFIG_PCIE_ROCKCHIP_DW) += pcie-dw-rockchip.o +obj-$(CONFIG_PCIE_SOPHGO_DW) += pcie-sophgo.o obj-$(CONFIG_PCIE_INTEL_GW) += pcie-intel-gw.o obj-$(CONFIG_PCIE_KEEMBAY) += pcie-keembay.o obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c index 33d6bf460ffe..f97f5266d196 100644 --- a/drivers/pci/controller/dwc/pci-dra7xx.c +++ b/drivers/pci/controller/dwc/pci-dra7xx.c @@ -118,12 +118,12 @@ static u64 dra7xx_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 cpu_addr) return cpu_addr & DRA7XX_CPU_TO_BUS_ADDR; } -static int dra7xx_pcie_link_up(struct dw_pcie *pci) +static bool dra7xx_pcie_link_up(struct dw_pcie *pci) { struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); u32 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS); - return !!(reg & LINK_UP); + return reg & LINK_UP; } static void dra7xx_pcie_stop_link(struct dw_pcie *pci) @@ -359,8 +359,8 @@ static int dra7xx_pcie_init_irq_domain(struct dw_pcie_rp *pp) irq_set_chained_handler_and_data(pp->irq, dra7xx_pcie_msi_irq_handler, pp); - dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, - &intx_domain_ops, pp); + dra7xx->irq_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), + PCI_NUM_INTX, &intx_domain_ops, pp); of_node_put(pcie_intc_node); if (!dra7xx->irq_domain) { dev_err(dev, "Failed to get a INTx IRQ domain\n"); diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c index ace736b025b1..1f0e98d07109 100644 --- a/drivers/pci/controller/dwc/pci-exynos.c +++ b/drivers/pci/controller/dwc/pci-exynos.c @@ -209,12 +209,12 @@ static struct pci_ops exynos_pci_ops = { .write = exynos_pcie_wr_own_conf, }; -static int exynos_pcie_link_up(struct dw_pcie *pci) +static bool exynos_pcie_link_up(struct dw_pcie *pci) { struct exynos_pcie *ep = to_exynos_pcie(pci); u32 val = exynos_pcie_readl(ep->elbi_base, PCIE_ELBI_RDLH_LINKUP); - return (val & PCIE_ELBI_XMLH_LINKUP); + return val & PCIE_ELBI_XMLH_LINKUP; } static int exynos_pcie_host_init(struct dw_pcie_rp *pp) diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c index 5f267dd261b5..80e48746bbaf 100644 --- a/drivers/pci/controller/dwc/pci-imx6.c +++ b/drivers/pci/controller/dwc/pci-imx6.c @@ -45,9 +45,14 @@ #define IMX95_PCIE_PHY_GEN_CTRL 0x0 #define IMX95_PCIE_REF_USE_PAD BIT(17) +#define IMX95_PCIE_PHY_MPLLA_CTRL 0x10 +#define IMX95_PCIE_PHY_MPLL_STATE BIT(30) + #define IMX95_PCIE_SS_RW_REG_0 0xf0 #define IMX95_PCIE_REF_CLKEN BIT(23) #define IMX95_PCIE_PHY_CR_PARA_SEL BIT(9) +#define IMX95_PCIE_SS_RW_REG_1 0xf4 +#define IMX95_PCIE_SYS_AUX_PWR_DET BIT(31) #define IMX95_PE0_GEN_CTRL_1 0x1050 #define IMX95_PCIE_DEVICE_TYPE GENMASK(3, 0) @@ -71,6 +76,9 @@ #define IMX95_SID_MASK GENMASK(5, 0) #define IMX95_MAX_LUT 32 +#define IMX95_PCIE_RST_CTRL 0x3010 +#define IMX95_PCIE_COLD_RST BIT(0) + #define to_imx_pcie(x) dev_get_drvdata((x)->dev) enum imx_pcie_variants { @@ -91,7 +99,7 @@ enum imx_pcie_variants { }; #define IMX_PCIE_FLAG_IMX_PHY BIT(0) -#define IMX_PCIE_FLAG_IMX_SPEED_CHANGE BIT(1) +#define IMX_PCIE_FLAG_SPEED_CHANGE_WORKAROUND BIT(1) #define IMX_PCIE_FLAG_SUPPORTS_SUSPEND BIT(2) #define IMX_PCIE_FLAG_HAS_PHYDRV BIT(3) #define IMX_PCIE_FLAG_HAS_APP_RESET BIT(4) @@ -105,6 +113,7 @@ enum imx_pcie_variants { */ #define IMX_PCIE_FLAG_BROKEN_SUSPEND BIT(9) #define IMX_PCIE_FLAG_HAS_LUT BIT(10) +#define IMX_PCIE_FLAG_8GT_ECN_ERR051586 BIT(11) #define imx_check_flag(pci, val) (pci->drvdata->flags & val) @@ -126,9 +135,15 @@ struct imx_pcie_drvdata { int (*init_phy)(struct imx_pcie *pcie); int (*enable_ref_clk)(struct imx_pcie *pcie, bool enable); int (*core_reset)(struct imx_pcie *pcie, bool assert); + int (*wait_pll_lock)(struct imx_pcie *pcie); const struct dw_pcie_host_ops *ops; }; +struct imx_lut_data { + u32 data1; + u32 data2; +}; + struct imx_pcie { struct dw_pcie *pci; struct gpio_desc *reset_gpiod; @@ -148,6 +163,8 @@ struct imx_pcie { struct regulator *vph; void __iomem *phy_base; + /* LUT data for pcie */ + struct imx_lut_data luts[IMX95_MAX_LUT]; /* power domain for pcie */ struct device *pd_pcie; /* power domain for pcie phy */ @@ -224,6 +241,19 @@ static unsigned int imx_pcie_grp_offset(const struct imx_pcie *imx_pcie) static int imx95_pcie_init_phy(struct imx_pcie *imx_pcie) { + /* + * ERR051624: The Controller Without Vaux Cannot Exit L23 Ready + * Through Beacon or PERST# De-assertion + * + * When the auxiliary power is not available, the controller + * cannot exit from L23 Ready with beacon or PERST# de-assertion + * when main power is not removed. + * + * Workaround: Set SS_RW_REG_1[SYS_AUX_PWR_DET] to 1. + */ + regmap_set_bits(imx_pcie->iomuxc_gpr, IMX95_PCIE_SS_RW_REG_1, + IMX95_PCIE_SYS_AUX_PWR_DET); + regmap_update_bits(imx_pcie->iomuxc_gpr, IMX95_PCIE_SS_RW_REG_0, IMX95_PCIE_PHY_CR_PARA_SEL, @@ -460,6 +490,23 @@ static void imx7d_pcie_wait_for_phy_pll_lock(struct imx_pcie *imx_pcie) dev_err(dev, "PCIe PLL lock timeout\n"); } +static int imx95_pcie_wait_for_phy_pll_lock(struct imx_pcie *imx_pcie) +{ + u32 val; + struct device *dev = imx_pcie->pci->dev; + + if (regmap_read_poll_timeout(imx_pcie->iomuxc_gpr, + IMX95_PCIE_PHY_MPLLA_CTRL, val, + val & IMX95_PCIE_PHY_MPLL_STATE, + PHY_PLL_LOCK_WAIT_USLEEP_MAX, + PHY_PLL_LOCK_WAIT_TIMEOUT)) { + dev_err(dev, "PCIe PLL lock timeout\n"); + return -ETIMEDOUT; + } + + return 0; +} + static int imx_setup_phy_mpll(struct imx_pcie *imx_pcie) { unsigned long phy_rate = 0; @@ -773,10 +820,46 @@ static int imx7d_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert) return 0; } +static int imx95_pcie_core_reset(struct imx_pcie *imx_pcie, bool assert) +{ + u32 val; + + if (assert) { + /* + * From i.MX95 PCIe PHY perspective, the COLD reset toggle + * should be complete after power-up by the following sequence. + * > 10us(at power-up) + * > 10ns(warm reset) + * |<------------>| + * ______________ + * phy_reset ____/ \________________ + * ____________ + * ref_clk_en_______________________/ + * Toggle COLD reset aligned with this sequence for i.MX95 PCIe. + */ + regmap_set_bits(imx_pcie->iomuxc_gpr, IMX95_PCIE_RST_CTRL, + IMX95_PCIE_COLD_RST); + /* + * Make sure the write to IMX95_PCIE_RST_CTRL is flushed to the + * hardware by doing a read. Otherwise, there is no guarantee + * that the write has reached the hardware before udelay(). + */ + regmap_read_bypassed(imx_pcie->iomuxc_gpr, IMX95_PCIE_RST_CTRL, + &val); + udelay(15); + regmap_clear_bits(imx_pcie->iomuxc_gpr, IMX95_PCIE_RST_CTRL, + IMX95_PCIE_COLD_RST); + regmap_read_bypassed(imx_pcie->iomuxc_gpr, IMX95_PCIE_RST_CTRL, + &val); + udelay(10); + } + + return 0; +} + static void imx_pcie_assert_core_reset(struct imx_pcie *imx_pcie) { reset_control_assert(imx_pcie->pciephy_reset); - reset_control_assert(imx_pcie->apps_reset); if (imx_pcie->drvdata->core_reset) imx_pcie->drvdata->core_reset(imx_pcie, true); @@ -788,7 +871,6 @@ static void imx_pcie_assert_core_reset(struct imx_pcie *imx_pcie) static int imx_pcie_deassert_core_reset(struct imx_pcie *imx_pcie) { reset_control_deassert(imx_pcie->pciephy_reset); - reset_control_deassert(imx_pcie->apps_reset); if (imx_pcie->drvdata->core_reset) imx_pcie->drvdata->core_reset(imx_pcie, false); @@ -860,6 +942,12 @@ static int imx_pcie_start_link(struct dw_pcie *pci) u32 tmp; int ret; + if (!(imx_pcie->drvdata->flags & + IMX_PCIE_FLAG_SPEED_CHANGE_WORKAROUND)) { + imx_pcie_ltssm_enable(dev); + return 0; + } + /* * Force Gen1 operation when starting the link. In case the link is * started in Gen2 mode, there is a possibility the devices on the @@ -875,11 +963,11 @@ static int imx_pcie_start_link(struct dw_pcie *pci) /* Start LTSSM. */ imx_pcie_ltssm_enable(dev); - ret = dw_pcie_wait_for_link(pci); - if (ret) - goto err_reset_phy; - if (pci->max_link_speed > 1) { + ret = dw_pcie_wait_for_link(pci); + if (ret) + goto err_reset_phy; + /* Allow faster modes after the link is up */ dw_pcie_dbi_ro_wr_en(pci); tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP); @@ -896,34 +984,15 @@ static int imx_pcie_start_link(struct dw_pcie *pci) dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp); dw_pcie_dbi_ro_wr_dis(pci); - if (imx_pcie->drvdata->flags & - IMX_PCIE_FLAG_IMX_SPEED_CHANGE) { - - /* - * On i.MX7, DIRECT_SPEED_CHANGE behaves differently - * from i.MX6 family when no link speed transition - * occurs and we go Gen1 -> yep, Gen1. The difference - * is that, in such case, it will not be cleared by HW - * which will cause the following code to report false - * failure. - */ - ret = imx_pcie_wait_for_speed_change(imx_pcie); - if (ret) { - dev_err(dev, "Failed to bring link up!\n"); - goto err_reset_phy; - } - } - - /* Make sure link training is finished as well! */ - ret = dw_pcie_wait_for_link(pci); - if (ret) + ret = imx_pcie_wait_for_speed_change(imx_pcie); + if (ret) { + dev_err(dev, "Failed to bring link up!\n"); goto err_reset_phy; + } } else { dev_info(dev, "Link: Only Gen1 is enabled\n"); } - tmp = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA); - dev_info(dev, "Link up, Gen%i\n", tmp & PCI_EXP_LNKSTA_CLS); return 0; err_reset_phy: @@ -992,7 +1061,10 @@ static int imx_pcie_add_lut(struct imx_pcie *imx_pcie, u16 rid, u8 sid) data1 |= IMX95_PE0_LUT_VLD; regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA1, data1); - data2 = IMX95_PE0_LUT_MASK; /* Match all bits of RID */ + if (imx_pcie->drvdata->mode == DW_PCIE_EP_TYPE) + data2 = 0x7; /* In the EP mode, only 'Device ID' is required */ + else + data2 = IMX95_PE0_LUT_MASK; /* Match all bits of RID */ data2 |= FIELD_PREP(IMX95_PE0_LUT_REQID, rid); regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, data2); @@ -1025,18 +1097,14 @@ static void imx_pcie_remove_lut(struct imx_pcie *imx_pcie, u16 rid) } } -static int imx_pcie_enable_device(struct pci_host_bridge *bridge, - struct pci_dev *pdev) +static int imx_pcie_add_lut_by_rid(struct imx_pcie *imx_pcie, u32 rid) { - struct imx_pcie *imx_pcie = to_imx_pcie(to_dw_pcie_from_pp(bridge->sysdata)); - u32 sid_i, sid_m, rid = pci_dev_id(pdev); + struct device *dev = imx_pcie->pci->dev; struct device_node *target; - struct device *dev; + u32 sid_i, sid_m; int err_i, err_m; u32 sid = 0; - dev = imx_pcie->pci->dev; - target = NULL; err_i = of_map_id(dev->of_node, rid, "iommu-map", "iommu-map-mask", &target, &sid_i); @@ -1111,6 +1179,13 @@ static int imx_pcie_enable_device(struct pci_host_bridge *bridge, return imx_pcie_add_lut(imx_pcie, rid, sid); } +static int imx_pcie_enable_device(struct pci_host_bridge *bridge, struct pci_dev *pdev) +{ + struct imx_pcie *imx_pcie = to_imx_pcie(to_dw_pcie_from_pp(bridge->sysdata)); + + return imx_pcie_add_lut_by_rid(imx_pcie, pci_dev_id(pdev)); +} + static void imx_pcie_disable_device(struct pci_host_bridge *bridge, struct pci_dev *pdev) { @@ -1176,12 +1251,21 @@ static int imx_pcie_host_init(struct dw_pcie_rp *pp) } } + /* Make sure that PCIe LTSSM is cleared */ + imx_pcie_ltssm_disable(dev); + ret = imx_pcie_deassert_core_reset(imx_pcie); if (ret < 0) { dev_err(dev, "pcie deassert core reset failed: %d\n", ret); goto err_phy_off; } + if (imx_pcie->drvdata->wait_pll_lock) { + ret = imx_pcie->drvdata->wait_pll_lock(imx_pcie); + if (ret < 0) + goto err_phy_off; + } + imx_setup_phy_mpll(imx_pcie); return 0; @@ -1214,6 +1298,32 @@ static void imx_pcie_host_exit(struct dw_pcie_rp *pp) regulator_disable(imx_pcie->vpcie); } +static void imx_pcie_host_post_init(struct dw_pcie_rp *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct imx_pcie *imx_pcie = to_imx_pcie(pci); + u32 val; + + if (imx_pcie->drvdata->flags & IMX_PCIE_FLAG_8GT_ECN_ERR051586) { + /* + * ERR051586: Compliance with 8GT/s Receiver Impedance ECN + * + * The default value of GEN3_RELATED_OFF[GEN3_ZRXDC_NONCOMPL] + * is 1 which makes receiver non-compliant with the ZRX-DC + * parameter for 2.5 GT/s when operating at 8 GT/s or higher. + * It causes unnecessary timeout in L1. + * + * Workaround: Program GEN3_RELATED_OFF[GEN3_ZRXDC_NONCOMPL] + * to 0. + */ + dw_pcie_dbi_ro_wr_en(pci); + val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); + val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL; + dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); + dw_pcie_dbi_ro_wr_dis(pci); + } +} + /* * In old DWC implementations, PCIE_ATU_INHIBIT_PAYLOAD in iATU Ctrl2 * register is reserved, so the generic DWC implementation of sending the @@ -1239,6 +1349,7 @@ static const struct dw_pcie_host_ops imx_pcie_host_ops = { static const struct dw_pcie_host_ops imx_pcie_host_dw_pme_ops = { .init = imx_pcie_host_init, .deinit = imx_pcie_host_exit, + .post_init = imx_pcie_host_post_init, }; static const struct dw_pcie_ops dw_pcie_ops = { @@ -1281,6 +1392,8 @@ static const struct pci_epc_features imx8m_pcie_epc_features = { .msix_capable = false, .bar[BAR_1] = { .type = BAR_RESERVED, }, .bar[BAR_3] = { .type = BAR_RESERVED, }, + .bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = SZ_256, }, + .bar[BAR_5] = { .type = BAR_RESERVED, }, .align = SZ_64K, }; @@ -1350,6 +1463,7 @@ static int imx_add_pcie_ep(struct imx_pcie *imx_pcie, dev_err(dev, "failed to initialize endpoint\n"); return ret; } + imx_pcie_host_post_init(pp); ret = dw_pcie_ep_init_registers(ep); if (ret) { @@ -1360,9 +1474,6 @@ static int imx_add_pcie_ep(struct imx_pcie *imx_pcie, pci_epc_init_notify(ep->epc); - /* Start LTSSM. */ - imx_pcie_ltssm_enable(dev); - return 0; } @@ -1386,6 +1497,42 @@ static void imx_pcie_msi_save_restore(struct imx_pcie *imx_pcie, bool save) } } +static void imx_pcie_lut_save(struct imx_pcie *imx_pcie) +{ + u32 data1, data2; + int i; + + for (i = 0; i < IMX95_MAX_LUT; i++) { + regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_ACSCTRL, + IMX95_PEO_LUT_RWA | i); + regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA1, &data1); + regmap_read(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, &data2); + if (data1 & IMX95_PE0_LUT_VLD) { + imx_pcie->luts[i].data1 = data1; + imx_pcie->luts[i].data2 = data2; + } else { + imx_pcie->luts[i].data1 = 0; + imx_pcie->luts[i].data2 = 0; + } + } +} + +static void imx_pcie_lut_restore(struct imx_pcie *imx_pcie) +{ + int i; + + for (i = 0; i < IMX95_MAX_LUT; i++) { + if ((imx_pcie->luts[i].data1 & IMX95_PE0_LUT_VLD) == 0) + continue; + + regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA1, + imx_pcie->luts[i].data1); + regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_DATA2, + imx_pcie->luts[i].data2); + regmap_write(imx_pcie->iomuxc_gpr, IMX95_PE0_LUT_ACSCTRL, i); + } +} + static int imx_pcie_suspend_noirq(struct device *dev) { struct imx_pcie *imx_pcie = dev_get_drvdata(dev); @@ -1394,6 +1541,8 @@ static int imx_pcie_suspend_noirq(struct device *dev) return 0; imx_pcie_msi_save_restore(imx_pcie, true); + if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_LUT)) + imx_pcie_lut_save(imx_pcie); if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_BROKEN_SUSPEND)) { /* * The minimum for a workaround would be to set PERST# and to @@ -1438,6 +1587,8 @@ static int imx_pcie_resume_noirq(struct device *dev) if (ret) return ret; } + if (imx_check_flag(imx_pcie, IMX_PCIE_FLAG_HAS_LUT)) + imx_pcie_lut_restore(imx_pcie); imx_pcie_msi_save_restore(imx_pcie, false); return 0; @@ -1619,6 +1770,12 @@ static int imx_pcie_probe(struct platform_device *pdev) ret = imx_add_pcie_ep(imx_pcie, pdev); if (ret < 0) return ret; + + /* + * FIXME: Only single Device (EPF) is supported due to the + * Endpoint framework limitation. + */ + imx_pcie_add_lut_by_rid(imx_pcie, 0); } else { pci->pp.use_atu_msg = true; ret = dw_pcie_host_init(&pci->pp); @@ -1649,7 +1806,7 @@ static const struct imx_pcie_drvdata drvdata[] = { [IMX6Q] = { .variant = IMX6Q, .flags = IMX_PCIE_FLAG_IMX_PHY | - IMX_PCIE_FLAG_IMX_SPEED_CHANGE | + IMX_PCIE_FLAG_SPEED_CHANGE_WORKAROUND | IMX_PCIE_FLAG_BROKEN_SUSPEND | IMX_PCIE_FLAG_SUPPORTS_SUSPEND, .dbi_length = 0x200, @@ -1665,7 +1822,7 @@ static const struct imx_pcie_drvdata drvdata[] = { [IMX6SX] = { .variant = IMX6SX, .flags = IMX_PCIE_FLAG_IMX_PHY | - IMX_PCIE_FLAG_IMX_SPEED_CHANGE | + IMX_PCIE_FLAG_SPEED_CHANGE_WORKAROUND | IMX_PCIE_FLAG_SUPPORTS_SUSPEND, .gpr = "fsl,imx6q-iomuxc-gpr", .ltssm_off = IOMUXC_GPR12, @@ -1680,7 +1837,7 @@ static const struct imx_pcie_drvdata drvdata[] = { [IMX6QP] = { .variant = IMX6QP, .flags = IMX_PCIE_FLAG_IMX_PHY | - IMX_PCIE_FLAG_IMX_SPEED_CHANGE | + IMX_PCIE_FLAG_SPEED_CHANGE_WORKAROUND | IMX_PCIE_FLAG_SUPPORTS_SUSPEND, .dbi_length = 0x200, .gpr = "fsl,imx6q-iomuxc-gpr", @@ -1747,12 +1904,15 @@ static const struct imx_pcie_drvdata drvdata[] = { .variant = IMX95, .flags = IMX_PCIE_FLAG_HAS_SERDES | IMX_PCIE_FLAG_HAS_LUT | + IMX_PCIE_FLAG_8GT_ECN_ERR051586 | IMX_PCIE_FLAG_SUPPORTS_SUSPEND, .ltssm_off = IMX95_PE0_GEN_CTRL_3, .ltssm_mask = IMX95_PCIE_LTSSM_EN, .mode_off[0] = IMX95_PE0_GEN_CTRL_1, .mode_mask[0] = IMX95_PCIE_DEVICE_TYPE, + .core_reset = imx95_pcie_core_reset, .init_phy = imx95_pcie_init_phy, + .wait_pll_lock = imx95_pcie_wait_for_phy_pll_lock, }, [IMX8MQ_EP] = { .variant = IMX8MQ_EP, @@ -1764,7 +1924,7 @@ static const struct imx_pcie_drvdata drvdata[] = { .mode_mask[0] = IMX6Q_GPR12_DEVICE_TYPE, .mode_off[1] = IOMUXC_GPR12, .mode_mask[1] = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE, - .epc_features = &imx8m_pcie_epc_features, + .epc_features = &imx8q_pcie_epc_features, .init_phy = imx8mq_pcie_init_phy, .enable_ref_clk = imx8mm_pcie_enable_ref_clk, }, @@ -1799,12 +1959,15 @@ static const struct imx_pcie_drvdata drvdata[] = { [IMX95_EP] = { .variant = IMX95_EP, .flags = IMX_PCIE_FLAG_HAS_SERDES | + IMX_PCIE_FLAG_8GT_ECN_ERR051586 | IMX_PCIE_FLAG_SUPPORT_64BIT, .ltssm_off = IMX95_PE0_GEN_CTRL_3, .ltssm_mask = IMX95_PCIE_LTSSM_EN, .mode_off[0] = IMX95_PE0_GEN_CTRL_1, .mode_mask[0] = IMX95_PCIE_DEVICE_TYPE, .init_phy = imx95_pcie_init_phy, + .core_reset = imx95_pcie_core_reset, + .wait_pll_lock = imx95_pcie_wait_for_phy_pll_lock, .epc_features = &imx95_pcie_epc_features, .mode = DW_PCIE_EP_TYPE, }, diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c index 76a37368ae4f..2b2632e513b5 100644 --- a/drivers/pci/controller/dwc/pci-keystone.c +++ b/drivers/pci/controller/dwc/pci-keystone.c @@ -492,13 +492,12 @@ static struct pci_ops ks_pcie_ops = { * @pci: A pointer to the dw_pcie structure which holds the DesignWare PCIe host * controller driver information. */ -static int ks_pcie_link_up(struct dw_pcie *pci) +static bool ks_pcie_link_up(struct dw_pcie *pci) { u32 val; val = dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0); - val &= PORT_LOGIC_LTSSM_STATE_MASK; - return (val == PORT_LOGIC_LTSSM_STATE_L0); + return (val & PORT_LOGIC_LTSSM_STATE_MASK) == PORT_LOGIC_LTSSM_STATE_L0; } static void ks_pcie_stop_link(struct dw_pcie *pci) @@ -761,7 +760,7 @@ static int ks_pcie_config_intx_irq(struct keystone_pcie *ks_pcie) ks_pcie); } - intx_irq_domain = irq_domain_add_linear(intc_np, PCI_NUM_INTX, + intx_irq_domain = irq_domain_create_linear(of_fwnode_handle(intc_np), PCI_NUM_INTX, &ks_pcie_intx_irq_domain_ops, NULL); if (!intx_irq_domain) { dev_err(dev, "Failed to add irq domain for INTX irqs\n"); diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c index db9482a113e9..787469d1b396 100644 --- a/drivers/pci/controller/dwc/pci-meson.c +++ b/drivers/pci/controller/dwc/pci-meson.c @@ -335,7 +335,7 @@ static struct pci_ops meson_pci_ops = { .write = pci_generic_config_write, }; -static int meson_pcie_link_up(struct dw_pcie *pci) +static bool meson_pcie_link_up(struct dw_pcie *pci) { struct meson_pcie *mp = to_meson_pcie(pci); struct device *dev = pci->dev; @@ -363,7 +363,7 @@ static int meson_pcie_link_up(struct dw_pcie *pci) dev_dbg(dev, "speed_okay\n"); if (smlh_up && rdlh_up && ltssm_up && speed_okay) - return 1; + return true; cnt++; @@ -371,7 +371,7 @@ static int meson_pcie_link_up(struct dw_pcie *pci) } while (cnt < WAIT_LINKUP_TIMEOUT); dev_err(dev, "error: wait linkup timeout\n"); - return 0; + return false; } static int meson_pcie_host_init(struct dw_pcie_rp *pp) diff --git a/drivers/pci/controller/dwc/pcie-amd-mdb.c b/drivers/pci/controller/dwc/pcie-amd-mdb.c index 4eb2a4e8189d..9f7251a16d32 100644 --- a/drivers/pci/controller/dwc/pcie-amd-mdb.c +++ b/drivers/pci/controller/dwc/pcie-amd-mdb.c @@ -290,8 +290,8 @@ static int amd_mdb_pcie_init_irq_domains(struct amd_mdb_pcie *pcie, return -ENODEV; } - pcie->mdb_domain = irq_domain_add_linear(pcie_intc_node, 32, - &event_domain_ops, pcie); + pcie->mdb_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), 32, + &event_domain_ops, pcie); if (!pcie->mdb_domain) { err = -ENOMEM; dev_err(dev, "Failed to add MDB domain\n"); @@ -300,8 +300,8 @@ static int amd_mdb_pcie_init_irq_domains(struct amd_mdb_pcie *pcie, irq_domain_update_bus_token(pcie->mdb_domain, DOMAIN_BUS_NEXUS); - pcie->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, - &amd_intx_domain_ops, pcie); + pcie->intx_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), + PCI_NUM_INTX, &amd_intx_domain_ops, pcie); if (!pcie->intx_domain) { err = -ENOMEM; dev_err(dev, "Failed to add INTx domain\n"); diff --git a/drivers/pci/controller/dwc/pcie-armada8k.c b/drivers/pci/controller/dwc/pcie-armada8k.c index b5c599ccaacf..c2650fd0d458 100644 --- a/drivers/pci/controller/dwc/pcie-armada8k.c +++ b/drivers/pci/controller/dwc/pcie-armada8k.c @@ -139,7 +139,7 @@ static int armada8k_pcie_setup_phys(struct armada8k_pcie *pcie) return ret; } -static int armada8k_pcie_link_up(struct dw_pcie *pci) +static bool armada8k_pcie_link_up(struct dw_pcie *pci) { u32 reg; u32 mask = PCIE_GLB_STS_RDLH_LINK_UP | PCIE_GLB_STS_PHY_LINK_UP; @@ -147,10 +147,10 @@ static int armada8k_pcie_link_up(struct dw_pcie *pci) reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_STATUS_REG); if ((reg & mask) == mask) - return 1; + return true; dev_dbg(pci->dev, "No link detected (Global-Status: 0x%08x).\n", reg); - return 0; + return false; } static int armada8k_pcie_start_link(struct dw_pcie *pci) diff --git a/drivers/pci/controller/dwc/pcie-designware-debugfs.c b/drivers/pci/controller/dwc/pcie-designware-debugfs.c index 9e6f4d00f262..0fbf86c0b97e 100644 --- a/drivers/pci/controller/dwc/pcie-designware-debugfs.c +++ b/drivers/pci/controller/dwc/pcie-designware-debugfs.c @@ -642,16 +642,262 @@ static void dwc_pcie_ltssm_debugfs_init(struct dw_pcie *pci, struct dentry *dir) &dwc_pcie_ltssm_status_ops); } +static int dw_pcie_ptm_check_capability(void *drvdata) +{ + struct dw_pcie *pci = drvdata; + + pci->ptm_vsec_offset = dw_pcie_find_ptm_capability(pci); + + return pci->ptm_vsec_offset; +} + +static int dw_pcie_ptm_context_update_write(void *drvdata, u8 mode) +{ + struct dw_pcie *pci = drvdata; + u32 val; + + if (mode == PCIE_PTM_CONTEXT_UPDATE_AUTO) { + val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL); + val |= PTM_REQ_AUTO_UPDATE_ENABLED; + dw_pcie_writel_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL, val); + } else if (mode == PCIE_PTM_CONTEXT_UPDATE_MANUAL) { + val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL); + val &= ~PTM_REQ_AUTO_UPDATE_ENABLED; + val |= PTM_REQ_START_UPDATE; + dw_pcie_writel_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL, val); + } else { + return -EINVAL; + } + + return 0; +} + +static int dw_pcie_ptm_context_update_read(void *drvdata, u8 *mode) +{ + struct dw_pcie *pci = drvdata; + u32 val; + + val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL); + if (FIELD_GET(PTM_REQ_AUTO_UPDATE_ENABLED, val)) + *mode = PCIE_PTM_CONTEXT_UPDATE_AUTO; + else + /* + * PTM_REQ_START_UPDATE is a self clearing register bit. So if + * PTM_REQ_AUTO_UPDATE_ENABLED is not set, then it implies that + * manual update is used. + */ + *mode = PCIE_PTM_CONTEXT_UPDATE_MANUAL; + + return 0; +} + +static int dw_pcie_ptm_context_valid_write(void *drvdata, bool valid) +{ + struct dw_pcie *pci = drvdata; + u32 val; + + if (valid) { + val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL); + val |= PTM_RES_CCONTEXT_VALID; + dw_pcie_writel_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL, val); + } else { + val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL); + val &= ~PTM_RES_CCONTEXT_VALID; + dw_pcie_writel_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL, val); + } + + return 0; +} + +static int dw_pcie_ptm_context_valid_read(void *drvdata, bool *valid) +{ + struct dw_pcie *pci = drvdata; + u32 val; + + val = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_RES_REQ_CTRL); + *valid = !!FIELD_GET(PTM_RES_CCONTEXT_VALID, val); + + return 0; +} + +static int dw_pcie_ptm_local_clock_read(void *drvdata, u64 *clock) +{ + struct dw_pcie *pci = drvdata; + u32 msb, lsb; + + do { + msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_LOCAL_MSB); + lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_LOCAL_LSB); + } while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_LOCAL_MSB)); + + *clock = ((u64) msb) << 32 | lsb; + + return 0; +} + +static int dw_pcie_ptm_master_clock_read(void *drvdata, u64 *clock) +{ + struct dw_pcie *pci = drvdata; + u32 msb, lsb; + + do { + msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_MASTER_MSB); + lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_MASTER_LSB); + } while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_MASTER_MSB)); + + *clock = ((u64) msb) << 32 | lsb; + + return 0; +} + +static int dw_pcie_ptm_t1_read(void *drvdata, u64 *clock) +{ + struct dw_pcie *pci = drvdata; + u32 msb, lsb; + + do { + msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_MSB); + lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_LSB); + } while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_MSB)); + + *clock = ((u64) msb) << 32 | lsb; + + return 0; +} + +static int dw_pcie_ptm_t2_read(void *drvdata, u64 *clock) +{ + struct dw_pcie *pci = drvdata; + u32 msb, lsb; + + do { + msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_MSB); + lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_LSB); + } while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T1_T2_MSB)); + + *clock = ((u64) msb) << 32 | lsb; + + return 0; +} + +static int dw_pcie_ptm_t3_read(void *drvdata, u64 *clock) +{ + struct dw_pcie *pci = drvdata; + u32 msb, lsb; + + do { + msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_MSB); + lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_LSB); + } while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_MSB)); + + *clock = ((u64) msb) << 32 | lsb; + + return 0; +} + +static int dw_pcie_ptm_t4_read(void *drvdata, u64 *clock) +{ + struct dw_pcie *pci = drvdata; + u32 msb, lsb; + + do { + msb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_MSB); + lsb = dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_LSB); + } while (msb != dw_pcie_readl_dbi(pci, pci->ptm_vsec_offset + PTM_T3_T4_MSB)); + + *clock = ((u64) msb) << 32 | lsb; + + return 0; +} + +static bool dw_pcie_ptm_context_update_visible(void *drvdata) +{ + struct dw_pcie *pci = drvdata; + + return pci->mode == DW_PCIE_EP_TYPE; +} + +static bool dw_pcie_ptm_context_valid_visible(void *drvdata) +{ + struct dw_pcie *pci = drvdata; + + return pci->mode == DW_PCIE_RC_TYPE; +} + +static bool dw_pcie_ptm_local_clock_visible(void *drvdata) +{ + /* PTM local clock is always visible */ + return true; +} + +static bool dw_pcie_ptm_master_clock_visible(void *drvdata) +{ + struct dw_pcie *pci = drvdata; + + return pci->mode == DW_PCIE_EP_TYPE; +} + +static bool dw_pcie_ptm_t1_visible(void *drvdata) +{ + struct dw_pcie *pci = drvdata; + + return pci->mode == DW_PCIE_EP_TYPE; +} + +static bool dw_pcie_ptm_t2_visible(void *drvdata) +{ + struct dw_pcie *pci = drvdata; + + return pci->mode == DW_PCIE_RC_TYPE; +} + +static bool dw_pcie_ptm_t3_visible(void *drvdata) +{ + struct dw_pcie *pci = drvdata; + + return pci->mode == DW_PCIE_RC_TYPE; +} + +static bool dw_pcie_ptm_t4_visible(void *drvdata) +{ + struct dw_pcie *pci = drvdata; + + return pci->mode == DW_PCIE_EP_TYPE; +} + +static const struct pcie_ptm_ops dw_pcie_ptm_ops = { + .check_capability = dw_pcie_ptm_check_capability, + .context_update_write = dw_pcie_ptm_context_update_write, + .context_update_read = dw_pcie_ptm_context_update_read, + .context_valid_write = dw_pcie_ptm_context_valid_write, + .context_valid_read = dw_pcie_ptm_context_valid_read, + .local_clock_read = dw_pcie_ptm_local_clock_read, + .master_clock_read = dw_pcie_ptm_master_clock_read, + .t1_read = dw_pcie_ptm_t1_read, + .t2_read = dw_pcie_ptm_t2_read, + .t3_read = dw_pcie_ptm_t3_read, + .t4_read = dw_pcie_ptm_t4_read, + .context_update_visible = dw_pcie_ptm_context_update_visible, + .context_valid_visible = dw_pcie_ptm_context_valid_visible, + .local_clock_visible = dw_pcie_ptm_local_clock_visible, + .master_clock_visible = dw_pcie_ptm_master_clock_visible, + .t1_visible = dw_pcie_ptm_t1_visible, + .t2_visible = dw_pcie_ptm_t2_visible, + .t3_visible = dw_pcie_ptm_t3_visible, + .t4_visible = dw_pcie_ptm_t4_visible, +}; + void dwc_pcie_debugfs_deinit(struct dw_pcie *pci) { if (!pci->debugfs) return; + pcie_ptm_destroy_debugfs(pci->ptm_debugfs); dwc_pcie_rasdes_debugfs_deinit(pci); debugfs_remove_recursive(pci->debugfs->debug_dir); } -void dwc_pcie_debugfs_init(struct dw_pcie *pci) +void dwc_pcie_debugfs_init(struct dw_pcie *pci, enum dw_pcie_device_mode mode) { char dirname[DWC_DEBUGFS_BUF_MAX]; struct device *dev = pci->dev; @@ -674,4 +920,8 @@ void dwc_pcie_debugfs_init(struct dw_pcie *pci) err); dwc_pcie_ltssm_debugfs_init(pci, dir); + + pci->mode = mode; + pci->ptm_debugfs = pcie_ptm_create_debugfs(pci->dev, pci, + &dw_pcie_ptm_ops); } diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c index 1a0bf9341542..0ae54a94809b 100644 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c @@ -256,11 +256,11 @@ static unsigned int dw_pcie_ep_get_rebar_offset(struct dw_pcie *pci, return offset; reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL); - nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >> PCI_REBAR_CTRL_NBAR_SHIFT; + nbars = FIELD_GET(PCI_REBAR_CTRL_NBAR_MASK, reg); for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) { reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL); - bar_index = reg & PCI_REBAR_CTRL_BAR_IDX; + bar_index = FIELD_GET(PCI_REBAR_CTRL_BAR_IDX, reg); if (bar_index == bar) return offset; } @@ -532,15 +532,16 @@ static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no) val = FIELD_GET(PCI_MSI_FLAGS_QSIZE, val); - return val; + return 1 << val; } static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, - u8 interrupts) + u8 nr_irqs) { struct dw_pcie_ep *ep = epc_get_drvdata(epc); struct dw_pcie *pci = to_dw_pcie_from_ep(ep); struct dw_pcie_ep_func *ep_func; + u8 mmc = order_base_2(nr_irqs); u32 val, reg; ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); @@ -550,7 +551,7 @@ static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, reg = ep_func->msi_cap + PCI_MSI_FLAGS; val = dw_pcie_ep_readw_dbi(ep, func_no, reg); val &= ~PCI_MSI_FLAGS_QMASK; - val |= FIELD_PREP(PCI_MSI_FLAGS_QMASK, interrupts); + val |= FIELD_PREP(PCI_MSI_FLAGS_QMASK, mmc); dw_pcie_dbi_ro_wr_en(pci); dw_pcie_ep_writew_dbi(ep, func_no, reg, val); dw_pcie_dbi_ro_wr_dis(pci); @@ -575,11 +576,11 @@ static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no) val &= PCI_MSIX_FLAGS_QSIZE; - return val; + return val + 1; } static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no, - u16 interrupts, enum pci_barno bir, u32 offset) + u16 nr_irqs, enum pci_barno bir, u32 offset) { struct dw_pcie_ep *ep = epc_get_drvdata(epc); struct dw_pcie *pci = to_dw_pcie_from_ep(ep); @@ -595,7 +596,7 @@ static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no, reg = ep_func->msix_cap + PCI_MSIX_FLAGS; val = dw_pcie_ep_readw_dbi(ep, func_no, reg); val &= ~PCI_MSIX_FLAGS_QSIZE; - val |= interrupts; + val |= nr_irqs - 1; /* encoded as N-1 */ dw_pcie_writew_dbi(pci, reg, val); reg = ep_func->msix_cap + PCI_MSIX_TABLE; @@ -603,7 +604,7 @@ static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no, dw_pcie_ep_writel_dbi(ep, func_no, reg, val); reg = ep_func->msix_cap + PCI_MSIX_PBA; - val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir; + val = (offset + (nr_irqs * PCI_MSIX_ENTRY_SIZE)) | bir; dw_pcie_ep_writel_dbi(ep, func_no, reg, val); dw_pcie_dbi_ro_wr_dis(pci); @@ -671,7 +672,7 @@ static const struct pci_epc_ops epc_ops = { * @ep: DWC EP device * @func_no: Function number of the endpoint * - * Return: 0 if success, errono otherwise. + * Return: 0 if success, errno otherwise. */ int dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep, u8 func_no) { @@ -690,7 +691,7 @@ EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_intx_irq); * @func_no: Function number of the endpoint * @interrupt_num: Interrupt number to be raised * - * Return: 0 if success, errono otherwise. + * Return: 0 if success, errno otherwise. */ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, u8 interrupt_num) @@ -875,8 +876,7 @@ static void dw_pcie_ep_init_non_sticky_registers(struct dw_pcie *pci) if (offset) { reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL); - nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >> - PCI_REBAR_CTRL_NBAR_SHIFT; + nbars = FIELD_GET(PCI_REBAR_CTRL_NBAR_MASK, reg); /* * PCIe r6.0, sec 7.8.6.2 require us to support at least one @@ -897,7 +897,7 @@ static void dw_pcie_ep_init_non_sticky_registers(struct dw_pcie *pci) * is why RESBAR_CAP_REG is written here. */ val = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL); - bar = val & PCI_REBAR_CTRL_BAR_IDX; + bar = FIELD_GET(PCI_REBAR_CTRL_BAR_IDX, val); if (ep->epf_bar[bar]) pci_epc_bar_size_to_rebar_cap(ep->epf_bar[bar]->size, &val); else @@ -1013,7 +1013,7 @@ int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep) dw_pcie_ep_init_non_sticky_registers(pci); - dwc_pcie_debugfs_init(pci); + dwc_pcie_debugfs_init(pci, DW_PCIE_EP_TYPE); return 0; diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c index ecc33f6789e3..952f8594b501 100644 --- a/drivers/pci/controller/dwc/pcie-designware-host.c +++ b/drivers/pci/controller/dwc/pcie-designware-host.c @@ -10,6 +10,7 @@ #include <linux/iopoll.h> #include <linux/irqchip/chained_irq.h> +#include <linux/irqchip/irq-msi-lib.h> #include <linux/irqdomain.h> #include <linux/msi.h> #include <linux/of_address.h> @@ -23,35 +24,21 @@ static struct pci_ops dw_pcie_ops; static struct pci_ops dw_child_pcie_ops; -static void dw_msi_ack_irq(struct irq_data *d) -{ - irq_chip_ack_parent(d); -} - -static void dw_msi_mask_irq(struct irq_data *d) -{ - pci_msi_mask_irq(d); - irq_chip_mask_parent(d); -} - -static void dw_msi_unmask_irq(struct irq_data *d) -{ - pci_msi_unmask_irq(d); - irq_chip_unmask_parent(d); -} - -static struct irq_chip dw_pcie_msi_irq_chip = { - .name = "PCI-MSI", - .irq_ack = dw_msi_ack_irq, - .irq_mask = dw_msi_mask_irq, - .irq_unmask = dw_msi_unmask_irq, -}; - -static struct msi_domain_info dw_pcie_msi_domain_info = { - .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX | - MSI_FLAG_MULTI_PCI_MSI, - .chip = &dw_pcie_msi_irq_chip, +#define DW_PCIE_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ + MSI_FLAG_USE_DEF_CHIP_OPS | \ + MSI_FLAG_NO_AFFINITY | \ + MSI_FLAG_PCI_MSI_MASK_PARENT) +#define DW_PCIE_MSI_FLAGS_SUPPORTED (MSI_FLAG_MULTI_PCI_MSI | \ + MSI_FLAG_PCI_MSIX | \ + MSI_GENERIC_FLAGS_MASK) + +static const struct msi_parent_ops dw_pcie_msi_parent_ops = { + .required_flags = DW_PCIE_MSI_FLAGS_REQUIRED, + .supported_flags = DW_PCIE_MSI_FLAGS_SUPPORTED, + .bus_select_token = DOMAIN_BUS_PCI_MSI, + .chip_flags = MSI_CHIP_FLAG_SET_ACK, + .prefix = "DW-", + .init_dev_msi_info = msi_lib_init_dev_msi_info, }; /* MSI int handler */ @@ -227,30 +214,23 @@ static const struct irq_domain_ops dw_pcie_msi_domain_ops = { int dw_pcie_allocate_domains(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node); - - pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors, - &dw_pcie_msi_domain_ops, pp); + struct irq_domain_info info = { + .fwnode = dev_fwnode(pci->dev), + .ops = &dw_pcie_msi_domain_ops, + .size = pp->num_vectors, + .host_data = pp, + }; + + pp->irq_domain = msi_create_parent_irq_domain(&info, &dw_pcie_msi_parent_ops); if (!pp->irq_domain) { dev_err(pci->dev, "Failed to create IRQ domain\n"); return -ENOMEM; } - irq_domain_update_bus_token(pp->irq_domain, DOMAIN_BUS_NEXUS); - - pp->msi_domain = pci_msi_create_irq_domain(fwnode, - &dw_pcie_msi_domain_info, - pp->irq_domain); - if (!pp->msi_domain) { - dev_err(pci->dev, "Failed to create MSI domain\n"); - irq_domain_remove(pp->irq_domain); - return -ENOMEM; - } - return 0; } -static void dw_pcie_free_msi(struct dw_pcie_rp *pp) +void dw_pcie_free_msi(struct dw_pcie_rp *pp) { u32 ctrl; @@ -260,22 +240,36 @@ static void dw_pcie_free_msi(struct dw_pcie_rp *pp) NULL, NULL); } - irq_domain_remove(pp->msi_domain); irq_domain_remove(pp->irq_domain); } +EXPORT_SYMBOL_GPL(dw_pcie_free_msi); -static void dw_pcie_msi_init(struct dw_pcie_rp *pp) +void dw_pcie_msi_init(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); u64 msi_target = (u64)pp->msi_data; + u32 ctrl, num_ctrls; if (!pci_msi_enabled() || !pp->has_msi_ctrl) return; + num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; + + /* Initialize IRQ Status array */ + for (ctrl = 0; ctrl < num_ctrls; ctrl++) { + dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + + (ctrl * MSI_REG_CTRL_BLOCK_SIZE), + pp->irq_mask[ctrl]); + dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE + + (ctrl * MSI_REG_CTRL_BLOCK_SIZE), + ~0); + } + /* Program the msi_data */ dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_LO, lower_32_bits(msi_target)); dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_HI, upper_32_bits(msi_target)); } +EXPORT_SYMBOL_GPL(dw_pcie_msi_init); static int dw_pcie_parse_split_msi_irq(struct dw_pcie_rp *pp) { @@ -317,7 +311,7 @@ static int dw_pcie_parse_split_msi_irq(struct dw_pcie_rp *pp) return 0; } -static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp) +int dw_pcie_msi_host_init(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct device *dev = pci->dev; @@ -391,6 +385,7 @@ static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp) return 0; } +EXPORT_SYMBOL_GPL(dw_pcie_msi_host_init); static void dw_pcie_host_request_msg_tlp_res(struct dw_pcie_rp *pp) { @@ -523,6 +518,13 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp) dw_pcie_iatu_detect(pci); + if (pci->num_lanes < 1) + pci->num_lanes = dw_pcie_link_get_max_link_width(pci); + + ret = of_pci_get_equalization_presets(dev, &pp->presets, pci->num_lanes); + if (ret) + goto err_free_msi; + /* * Allocate the resource for MSG TLP before programming the iATU * outbound window in dw_pcie_setup_rc(). Since the allocation depends @@ -567,7 +569,7 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp) if (pp->ops->post_init) pp->ops->post_init(pp); - dwc_pcie_debugfs_init(pci); + dwc_pcie_debugfs_init(pci, DW_PCIE_RC_TYPE); return 0; @@ -828,10 +830,81 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp) return 0; } +static void dw_pcie_program_presets(struct dw_pcie_rp *pp, enum pci_bus_speed speed) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + u8 lane_eq_offset, lane_reg_size, cap_id; + u8 *presets; + u32 cap; + int i; + + if (speed == PCIE_SPEED_8_0GT) { + presets = (u8 *)pp->presets.eq_presets_8gts; + lane_eq_offset = PCI_SECPCI_LE_CTRL; + cap_id = PCI_EXT_CAP_ID_SECPCI; + /* For data rate of 8 GT/S each lane equalization control is 16bits wide*/ + lane_reg_size = 0x2; + } else if (speed == PCIE_SPEED_16_0GT) { + presets = pp->presets.eq_presets_Ngts[EQ_PRESET_TYPE_16GTS - 1]; + lane_eq_offset = PCI_PL_16GT_LE_CTRL; + cap_id = PCI_EXT_CAP_ID_PL_16GT; + lane_reg_size = 0x1; + } else if (speed == PCIE_SPEED_32_0GT) { + presets = pp->presets.eq_presets_Ngts[EQ_PRESET_TYPE_32GTS - 1]; + lane_eq_offset = PCI_PL_32GT_LE_CTRL; + cap_id = PCI_EXT_CAP_ID_PL_32GT; + lane_reg_size = 0x1; + } else if (speed == PCIE_SPEED_64_0GT) { + presets = pp->presets.eq_presets_Ngts[EQ_PRESET_TYPE_64GTS - 1]; + lane_eq_offset = PCI_PL_64GT_LE_CTRL; + cap_id = PCI_EXT_CAP_ID_PL_64GT; + lane_reg_size = 0x1; + } else { + return; + } + + if (presets[0] == PCI_EQ_RESV) + return; + + cap = dw_pcie_find_ext_capability(pci, cap_id); + if (!cap) + return; + + /* + * Write preset values to the registers byte-by-byte for the given + * number of lanes and register size. + */ + for (i = 0; i < pci->num_lanes * lane_reg_size; i++) + dw_pcie_writeb_dbi(pci, cap + lane_eq_offset + i, presets[i]); +} + +static void dw_pcie_config_presets(struct dw_pcie_rp *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + enum pci_bus_speed speed = pcie_link_speed[pci->max_link_speed]; + + /* + * Lane equalization settings need to be applied for all data rates the + * controller supports and for all supported lanes. + */ + + if (speed >= PCIE_SPEED_8_0GT) + dw_pcie_program_presets(pp, PCIE_SPEED_8_0GT); + + if (speed >= PCIE_SPEED_16_0GT) + dw_pcie_program_presets(pp, PCIE_SPEED_16_0GT); + + if (speed >= PCIE_SPEED_32_0GT) + dw_pcie_program_presets(pp, PCIE_SPEED_32_0GT); + + if (speed >= PCIE_SPEED_64_0GT) + dw_pcie_program_presets(pp, PCIE_SPEED_64_0GT); +} + int dw_pcie_setup_rc(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - u32 val, ctrl, num_ctrls; + u32 val; int ret; /* @@ -842,20 +915,6 @@ int dw_pcie_setup_rc(struct dw_pcie_rp *pp) dw_pcie_setup(pci); - if (pp->has_msi_ctrl) { - num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; - - /* Initialize IRQ Status array */ - for (ctrl = 0; ctrl < num_ctrls; ctrl++) { - dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + - (ctrl * MSI_REG_CTRL_BLOCK_SIZE), - pp->irq_mask[ctrl]); - dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE + - (ctrl * MSI_REG_CTRL_BLOCK_SIZE), - ~0); - } - } - dw_pcie_msi_init(pp); /* Setup RC BARs */ @@ -881,6 +940,7 @@ int dw_pcie_setup_rc(struct dw_pcie_rp *pp) PCI_COMMAND_MASTER | PCI_COMMAND_SERR; dw_pcie_writel_dbi(pci, PCI_COMMAND, val); + dw_pcie_config_presets(pp); /* * If the platform provides its own child bus config accesses, it means * the platform uses its own address translation component rather than diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c index 97d76d3dc066..89aad5a08928 100644 --- a/drivers/pci/controller/dwc/pcie-designware.c +++ b/drivers/pci/controller/dwc/pcie-designware.c @@ -54,6 +54,14 @@ static const char * const dw_pcie_core_rsts[DW_PCIE_NUM_CORE_RSTS] = { [DW_PCIE_PWR_RST] = "pwr", }; +static const struct dwc_pcie_vsec_id dwc_pcie_ptm_vsec_ids[] = { + { .vendor_id = PCI_VENDOR_ID_QCOM, /* EP */ + .vsec_id = 0x03, .vsec_rev = 0x1 }, + { .vendor_id = PCI_VENDOR_ID_QCOM, /* RC */ + .vsec_id = 0x04, .vsec_rev = 0x1 }, + { } +}; + static int dw_pcie_get_clocks(struct dw_pcie *pci) { int i, ret; @@ -330,6 +338,12 @@ u16 dw_pcie_find_rasdes_capability(struct dw_pcie *pci) } EXPORT_SYMBOL_GPL(dw_pcie_find_rasdes_capability); +u16 dw_pcie_find_ptm_capability(struct dw_pcie *pci) +{ + return dw_pcie_find_vsec_capability(pci, dwc_pcie_ptm_vsec_ids); +} +EXPORT_SYMBOL_GPL(dw_pcie_find_ptm_capability); + int dw_pcie_read(void __iomem *addr, int size, u32 *val) { if (!IS_ALIGNED((uintptr_t)addr, size)) { @@ -688,18 +702,26 @@ int dw_pcie_wait_for_link(struct dw_pcie *pci) int retries; /* Check if the link is up or not */ - for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) { + for (retries = 0; retries < PCIE_LINK_WAIT_MAX_RETRIES; retries++) { if (dw_pcie_link_up(pci)) break; - msleep(LINK_WAIT_SLEEP_MS); + msleep(PCIE_LINK_WAIT_SLEEP_MS); } - if (retries >= LINK_WAIT_MAX_RETRIES) { + if (retries >= PCIE_LINK_WAIT_MAX_RETRIES) { dev_info(pci->dev, "Phy link never came up\n"); return -ETIMEDOUT; } + /* + * As per PCIe r6.0, sec 6.6.1, a Downstream Port that supports Link + * speeds greater than 5.0 GT/s, software must wait a minimum of 100 ms + * after Link training completes before sending a Configuration Request. + */ + if (pci->max_link_speed > 2) + msleep(PCIE_RESET_CONFIG_WAIT_MS); + offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); val = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA); @@ -711,7 +733,7 @@ int dw_pcie_wait_for_link(struct dw_pcie *pci) } EXPORT_SYMBOL_GPL(dw_pcie_wait_for_link); -int dw_pcie_link_up(struct dw_pcie *pci) +bool dw_pcie_link_up(struct dw_pcie *pci) { u32 val; @@ -781,6 +803,14 @@ static void dw_pcie_link_set_max_speed(struct dw_pcie *pci) } +int dw_pcie_link_get_max_link_width(struct dw_pcie *pci) +{ + u8 cap = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); + u32 lnkcap = dw_pcie_readl_dbi(pci, cap + PCI_EXP_LNKCAP); + + return FIELD_GET(PCI_EXP_LNKCAP_MLW, lnkcap); +} + static void dw_pcie_link_set_max_link_width(struct dw_pcie *pci, u32 num_lanes) { u32 lnkcap, lwsc, plc; @@ -797,22 +827,19 @@ static void dw_pcie_link_set_max_link_width(struct dw_pcie *pci, u32 num_lanes) /* Set link width speed control register */ lwsc = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); lwsc &= ~PORT_LOGIC_LINK_WIDTH_MASK; + lwsc |= PORT_LOGIC_LINK_WIDTH_1_LANES; switch (num_lanes) { case 1: plc |= PORT_LINK_MODE_1_LANES; - lwsc |= PORT_LOGIC_LINK_WIDTH_1_LANES; break; case 2: plc |= PORT_LINK_MODE_2_LANES; - lwsc |= PORT_LOGIC_LINK_WIDTH_2_LANES; break; case 4: plc |= PORT_LINK_MODE_4_LANES; - lwsc |= PORT_LOGIC_LINK_WIDTH_4_LANES; break; case 8: plc |= PORT_LINK_MODE_8_LANES; - lwsc |= PORT_LOGIC_LINK_WIDTH_8_LANES; break; default: dev_err(pci->dev, "num-lanes %u: invalid value\n", num_lanes); diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h index 56aafdbcdaca..00f52d472dcd 100644 --- a/drivers/pci/controller/dwc/pcie-designware.h +++ b/drivers/pci/controller/dwc/pcie-designware.h @@ -25,6 +25,8 @@ #include <linux/pci-epc.h> #include <linux/pci-epf.h> +#include "../../pci.h" + /* DWC PCIe IP-core versions (native support since v4.70a) */ #define DW_PCIE_VER_365A 0x3336352a #define DW_PCIE_VER_460A 0x3436302a @@ -60,10 +62,6 @@ #define dw_pcie_cap_set(_pci, _cap) \ set_bit(DW_PCIE_CAP_ ## _cap, &(_pci)->caps) -/* Parameters for the waiting for link up routine */ -#define LINK_WAIT_MAX_RETRIES 10 -#define LINK_WAIT_SLEEP_MS 90 - /* Parameters for the waiting for iATU enabled routine */ #define LINK_WAIT_MAX_IATU_RETRIES 5 #define LINK_WAIT_IATU 9 @@ -260,6 +258,21 @@ #define PCIE_RAS_DES_EVENT_COUNTER_DATA 0xc +/* PTM register definitions */ +#define PTM_RES_REQ_CTRL 0x8 +#define PTM_RES_CCONTEXT_VALID BIT(0) +#define PTM_REQ_AUTO_UPDATE_ENABLED BIT(0) +#define PTM_REQ_START_UPDATE BIT(1) + +#define PTM_LOCAL_LSB 0x10 +#define PTM_LOCAL_MSB 0x14 +#define PTM_T1_T2_LSB 0x18 +#define PTM_T1_T2_MSB 0x1c +#define PTM_T3_T4_LSB 0x28 +#define PTM_T3_T4_MSB 0x2c +#define PTM_MASTER_LSB 0x38 +#define PTM_MASTER_MSB 0x3c + /* * The default address offset between dbi_base and atu_base. Root controller * drivers are not required to initialize atu_base if the offset matches this @@ -400,7 +413,6 @@ struct dw_pcie_rp { const struct dw_pcie_host_ops *ops; int msi_irq[MAX_MSI_CTRLS]; struct irq_domain *irq_domain; - struct irq_domain *msi_domain; dma_addr_t msi_data; struct irq_chip *msi_irq_chip; u32 num_vectors; @@ -412,6 +424,7 @@ struct dw_pcie_rp { int msg_atu_index; struct resource *msg_res; bool use_linkup_irq; + struct pci_eq_presets presets; }; struct dw_pcie_ep_ops { @@ -462,7 +475,7 @@ struct dw_pcie_ops { size_t size, u32 val); void (*write_dbi2)(struct dw_pcie *pcie, void __iomem *base, u32 reg, size_t size, u32 val); - int (*link_up)(struct dw_pcie *pcie); + bool (*link_up)(struct dw_pcie *pcie); enum dw_pcie_ltssm (*get_ltssm)(struct dw_pcie *pcie); int (*start_link)(struct dw_pcie *pcie); void (*stop_link)(struct dw_pcie *pcie); @@ -503,6 +516,9 @@ struct dw_pcie { struct gpio_desc *pe_rst; bool suspended; struct debugfs_info *debugfs; + enum dw_pcie_device_mode mode; + u16 ptm_vsec_offset; + struct pci_ptm_debugfs *ptm_debugfs; /* * If iATU input addresses are offset from CPU physical addresses, @@ -530,6 +546,7 @@ void dw_pcie_version_detect(struct dw_pcie *pci); u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap); u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap); u16 dw_pcie_find_rasdes_capability(struct dw_pcie *pci); +u16 dw_pcie_find_ptm_capability(struct dw_pcie *pci); int dw_pcie_read(void __iomem *addr, int size, u32 *val); int dw_pcie_write(void __iomem *addr, int size, u32 val); @@ -537,9 +554,10 @@ int dw_pcie_write(void __iomem *addr, int size, u32 val); u32 dw_pcie_read_dbi(struct dw_pcie *pci, u32 reg, size_t size); void dw_pcie_write_dbi(struct dw_pcie *pci, u32 reg, size_t size, u32 val); void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val); -int dw_pcie_link_up(struct dw_pcie *pci); +bool dw_pcie_link_up(struct dw_pcie *pci); void dw_pcie_upconfig_setup(struct dw_pcie *pci); int dw_pcie_wait_for_link(struct dw_pcie *pci); +int dw_pcie_link_get_max_link_width(struct dw_pcie *pci); int dw_pcie_prog_outbound_atu(struct dw_pcie *pci, const struct dw_pcie_ob_atu_cfg *atu); int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type, @@ -736,6 +754,9 @@ static inline enum dw_pcie_ltssm dw_pcie_get_ltssm(struct dw_pcie *pci) int dw_pcie_suspend_noirq(struct dw_pcie *pci); int dw_pcie_resume_noirq(struct dw_pcie *pci); irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp); +void dw_pcie_msi_init(struct dw_pcie_rp *pp); +int dw_pcie_msi_host_init(struct dw_pcie_rp *pp); +void dw_pcie_free_msi(struct dw_pcie_rp *pp); int dw_pcie_setup_rc(struct dw_pcie_rp *pp); int dw_pcie_host_init(struct dw_pcie_rp *pp); void dw_pcie_host_deinit(struct dw_pcie_rp *pp); @@ -758,6 +779,17 @@ static inline irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp) return IRQ_NONE; } +static inline void dw_pcie_msi_init(struct dw_pcie_rp *pp) +{ } + +static inline int dw_pcie_msi_host_init(struct dw_pcie_rp *pp) +{ + return -ENODEV; +} + +static inline void dw_pcie_free_msi(struct dw_pcie_rp *pp) +{ } + static inline int dw_pcie_setup_rc(struct dw_pcie_rp *pp) { return 0; @@ -871,10 +903,11 @@ dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no) #endif #ifdef CONFIG_PCIE_DW_DEBUGFS -void dwc_pcie_debugfs_init(struct dw_pcie *pci); +void dwc_pcie_debugfs_init(struct dw_pcie *pci, enum dw_pcie_device_mode mode); void dwc_pcie_debugfs_deinit(struct dw_pcie *pci); #else -static inline void dwc_pcie_debugfs_init(struct dw_pcie *pci) +static inline void dwc_pcie_debugfs_init(struct dw_pcie *pci, + enum dw_pcie_device_mode mode) { } static inline void dwc_pcie_debugfs_deinit(struct dw_pcie *pci) diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c index c624b7ebd118..b5f5eee5a50e 100644 --- a/drivers/pci/controller/dwc/pcie-dw-rockchip.c +++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c @@ -8,6 +8,7 @@ * Author: Simon Xue <xxm@rock-chips.com> */ +#include <linux/bitfield.h> #include <linux/clk.h> #include <linux/gpio/consumer.h> #include <linux/irqchip/chained_irq.h> @@ -21,6 +22,7 @@ #include <linux/regmap.h> #include <linux/reset.h> +#include "../../pci.h" #include "pcie-designware.h" /* @@ -33,26 +35,38 @@ #define to_rockchip_pcie(x) dev_get_drvdata((x)->dev) -#define PCIE_CLIENT_RC_MODE HIWORD_UPDATE_BIT(0x40) -#define PCIE_CLIENT_EP_MODE HIWORD_UPDATE(0xf0, 0x0) -#define PCIE_CLIENT_ENABLE_LTSSM HIWORD_UPDATE_BIT(0xc) -#define PCIE_CLIENT_DISABLE_LTSSM HIWORD_UPDATE(0x0c, 0x8) -#define PCIE_CLIENT_INTR_STATUS_MISC 0x10 -#define PCIE_CLIENT_INTR_MASK_MISC 0x24 -#define PCIE_SMLH_LINKUP BIT(16) -#define PCIE_RDLH_LINKUP BIT(17) -#define PCIE_LINKUP (PCIE_SMLH_LINKUP | PCIE_RDLH_LINKUP) -#define PCIE_RDLH_LINK_UP_CHGED BIT(1) -#define PCIE_LINK_REQ_RST_NOT_INT BIT(2) -#define PCIE_L0S_ENTRY 0x11 -#define PCIE_CLIENT_GENERAL_CONTROL 0x0 +/* General Control Register */ +#define PCIE_CLIENT_GENERAL_CON 0x0 +#define PCIE_CLIENT_RC_MODE HIWORD_UPDATE_BIT(0x40) +#define PCIE_CLIENT_EP_MODE HIWORD_UPDATE(0xf0, 0x0) +#define PCIE_CLIENT_ENABLE_LTSSM HIWORD_UPDATE_BIT(0xc) +#define PCIE_CLIENT_DISABLE_LTSSM HIWORD_UPDATE(0x0c, 0x8) + +/* Interrupt Status Register Related to Legacy Interrupt */ #define PCIE_CLIENT_INTR_STATUS_LEGACY 0x8 + +/* Interrupt Status Register Related to Miscellaneous Operation */ +#define PCIE_CLIENT_INTR_STATUS_MISC 0x10 +#define PCIE_RDLH_LINK_UP_CHGED BIT(1) +#define PCIE_LINK_REQ_RST_NOT_INT BIT(2) + +/* Interrupt Mask Register Related to Legacy Interrupt */ #define PCIE_CLIENT_INTR_MASK_LEGACY 0x1c -#define PCIE_CLIENT_GENERAL_DEBUG 0x104 + +/* Interrupt Mask Register Related to Miscellaneous Operation */ +#define PCIE_CLIENT_INTR_MASK_MISC 0x24 + +/* Hot Reset Control Register */ #define PCIE_CLIENT_HOT_RESET_CTRL 0x180 +#define PCIE_LTSSM_APP_DLY2_EN BIT(1) +#define PCIE_LTSSM_APP_DLY2_DONE BIT(3) +#define PCIE_LTSSM_ENABLE_ENHANCE BIT(4) + +/* LTSSM Status Register */ #define PCIE_CLIENT_LTSSM_STATUS 0x300 -#define PCIE_LTSSM_ENABLE_ENHANCE BIT(4) -#define PCIE_LTSSM_STATUS_MASK GENMASK(5, 0) +#define PCIE_LINKUP 0x3 +#define PCIE_LINKUP_MASK GENMASK(17, 16) +#define PCIE_LTSSM_STATUS_MASK GENMASK(5, 0) struct rockchip_pcie { struct dw_pcie pci; @@ -144,8 +158,8 @@ static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip) return -EINVAL; } - rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX, - &intx_domain_ops, rockchip); + rockchip->irq_domain = irq_domain_create_linear(of_fwnode_handle(intc), PCI_NUM_INTX, + &intx_domain_ops, rockchip); of_node_put(intc); if (!rockchip->irq_domain) { dev_err(dev, "failed to get a INTx IRQ domain\n"); @@ -163,25 +177,36 @@ static u32 rockchip_pcie_get_ltssm(struct rockchip_pcie *rockchip) static void rockchip_pcie_enable_ltssm(struct rockchip_pcie *rockchip) { rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_ENABLE_LTSSM, - PCIE_CLIENT_GENERAL_CONTROL); + PCIE_CLIENT_GENERAL_CON); } static void rockchip_pcie_disable_ltssm(struct rockchip_pcie *rockchip) { rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_DISABLE_LTSSM, - PCIE_CLIENT_GENERAL_CONTROL); + PCIE_CLIENT_GENERAL_CON); } -static int rockchip_pcie_link_up(struct dw_pcie *pci) +static bool rockchip_pcie_link_up(struct dw_pcie *pci) { struct rockchip_pcie *rockchip = to_rockchip_pcie(pci); u32 val = rockchip_pcie_get_ltssm(rockchip); - if ((val & PCIE_LINKUP) == PCIE_LINKUP && - (val & PCIE_LTSSM_STATUS_MASK) == PCIE_L0S_ENTRY) - return 1; + return FIELD_GET(PCIE_LINKUP_MASK, val) == PCIE_LINKUP; +} - return 0; +static void rockchip_pcie_enable_l0s(struct dw_pcie *pci) +{ + u32 cap, lnkcap; + + /* Enable L0S capability for all SoCs */ + cap = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); + if (cap) { + lnkcap = dw_pcie_readl_dbi(pci, cap + PCI_EXP_LNKCAP); + lnkcap |= PCI_EXP_LNKCAP_ASPM_L0S; + dw_pcie_dbi_ro_wr_en(pci); + dw_pcie_writel_dbi(pci, cap + PCI_EXP_LNKCAP, lnkcap); + dw_pcie_dbi_ro_wr_dis(pci); + } } static int rockchip_pcie_start_link(struct dw_pcie *pci) @@ -202,7 +227,7 @@ static int rockchip_pcie_start_link(struct dw_pcie *pci) * We need more extra time as before, rather than setting just * 100us as we don't know how long should the device need to reset. */ - msleep(100); + msleep(PCIE_T_PVPERL_MS); gpiod_set_value_cansleep(rockchip->rst_gpio, 1); return 0; @@ -233,6 +258,8 @@ static int rockchip_pcie_host_init(struct dw_pcie_rp *pp) irq_set_chained_handler_and_data(irq, rockchip_pcie_intx_handler, rockchip); + rockchip_pcie_enable_l0s(pci); + return 0; } @@ -263,16 +290,14 @@ static void rockchip_pcie_ep_hide_broken_ats_cap_rk3588(struct dw_pcie_ep *ep) dev_err(dev, "failed to hide ATS capability\n"); } -static void rockchip_pcie_ep_pre_init(struct dw_pcie_ep *ep) -{ - rockchip_pcie_ep_hide_broken_ats_cap_rk3588(ep); -} - static void rockchip_pcie_ep_init(struct dw_pcie_ep *ep) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); enum pci_barno bar; + rockchip_pcie_enable_l0s(pci); + rockchip_pcie_ep_hide_broken_ats_cap_rk3588(ep); + for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) dw_pcie_ep_reset_bar(pci, bar); }; @@ -342,7 +367,6 @@ rockchip_pcie_get_features(struct dw_pcie_ep *ep) static const struct dw_pcie_ep_ops rockchip_pcie_ep_ops = { .init = rockchip_pcie_ep_init, - .pre_init = rockchip_pcie_ep_pre_init, .raise_irq = rockchip_pcie_raise_irq, .get_features = rockchip_pcie_get_features, }; @@ -410,8 +434,8 @@ static int rockchip_pcie_phy_init(struct rockchip_pcie *rockchip) static void rockchip_pcie_phy_deinit(struct rockchip_pcie *rockchip) { - phy_exit(rockchip->phy); phy_power_off(rockchip->phy); + phy_exit(rockchip->phy); } static const struct dw_pcie_ops dw_pcie_ops = { @@ -426,7 +450,7 @@ static irqreturn_t rockchip_pcie_rc_sys_irq_thread(int irq, void *arg) struct dw_pcie *pci = &rockchip->pci; struct dw_pcie_rp *pp = &pci->pp; struct device *dev = pci->dev; - u32 reg, val; + u32 reg; reg = rockchip_pcie_readl_apb(rockchip, PCIE_CLIENT_INTR_STATUS_MISC); rockchip_pcie_writel_apb(rockchip, reg, PCIE_CLIENT_INTR_STATUS_MISC); @@ -435,8 +459,8 @@ static irqreturn_t rockchip_pcie_rc_sys_irq_thread(int irq, void *arg) dev_dbg(dev, "LTSSM_STATUS: %#x\n", rockchip_pcie_get_ltssm(rockchip)); if (reg & PCIE_RDLH_LINK_UP_CHGED) { - val = rockchip_pcie_get_ltssm(rockchip); - if ((val & PCIE_LINKUP) == PCIE_LINKUP) { + if (rockchip_pcie_link_up(pci)) { + msleep(PCIE_RESET_CONFIG_WAIT_MS); dev_dbg(dev, "Received Link up event. Starting enumeration!\n"); /* Rescan the bus to enumerate endpoint devices */ pci_lock_rescan_remove(); @@ -464,11 +488,14 @@ static irqreturn_t rockchip_pcie_ep_sys_irq_thread(int irq, void *arg) if (reg & PCIE_LINK_REQ_RST_NOT_INT) { dev_dbg(dev, "hot reset or link-down reset\n"); dw_pcie_ep_linkdown(&pci->ep); + /* Stop delaying link training. */ + val = HIWORD_UPDATE_BIT(PCIE_LTSSM_APP_DLY2_DONE); + rockchip_pcie_writel_apb(rockchip, val, + PCIE_CLIENT_HOT_RESET_CTRL); } if (reg & PCIE_RDLH_LINK_UP_CHGED) { - val = rockchip_pcie_get_ltssm(rockchip); - if ((val & PCIE_LINKUP) == PCIE_LINKUP) { + if (rockchip_pcie_link_up(pci)) { dev_dbg(dev, "link up\n"); dw_pcie_ep_linkup(&pci->ep); } @@ -505,7 +532,7 @@ static int rockchip_pcie_configure_rc(struct platform_device *pdev, rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL); rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_RC_MODE, - PCIE_CLIENT_GENERAL_CONTROL); + PCIE_CLIENT_GENERAL_CON); pp = &rockchip->pci.pp; pp->ops = &rockchip_pcie_host_ops; @@ -546,12 +573,15 @@ static int rockchip_pcie_configure_ep(struct platform_device *pdev, return ret; } - /* LTSSM enable control mode */ - val = HIWORD_UPDATE_BIT(PCIE_LTSSM_ENABLE_ENHANCE); + /* + * LTSSM enable control mode, and automatically delay link training on + * hot reset/link-down reset. + */ + val = HIWORD_UPDATE_BIT(PCIE_LTSSM_ENABLE_ENHANCE | PCIE_LTSSM_APP_DLY2_EN); rockchip_pcie_writel_apb(rockchip, val, PCIE_CLIENT_HOT_RESET_CTRL); rockchip_pcie_writel_apb(rockchip, PCIE_CLIENT_EP_MODE, - PCIE_CLIENT_GENERAL_CONTROL); + PCIE_CLIENT_GENERAL_CON); rockchip->pci.ep.ops = &rockchip_pcie_ep_ops; rockchip->pci.ep.page_size = SZ_64K; @@ -601,6 +631,10 @@ static int rockchip_pcie_probe(struct platform_device *pdev) rockchip->pci.ops = &dw_pcie_ops; rockchip->data = data; + /* Default N_FTS value (210) is broken, override it to 255 */ + rockchip->pci.n_fts[0] = 255; /* Gen1 */ + rockchip->pci.n_fts[1] = 255; /* Gen2+ */ + ret = rockchip_pcie_resource_get(pdev, rockchip); if (ret) return ret; diff --git a/drivers/pci/controller/dwc/pcie-hisi.c b/drivers/pci/controller/dwc/pcie-hisi.c index 8904b5b85ee5..3c17897e56fc 100644 --- a/drivers/pci/controller/dwc/pcie-hisi.c +++ b/drivers/pci/controller/dwc/pcie-hisi.c @@ -15,6 +15,7 @@ #include <linux/pci-acpi.h> #include <linux/pci-ecam.h> #include "../../pci.h" +#include "../pci-host-common.h" #if defined(CONFIG_PCI_HISI) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)) diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c index 1f2f4c28a949..a52071589377 100644 --- a/drivers/pci/controller/dwc/pcie-histb.c +++ b/drivers/pci/controller/dwc/pcie-histb.c @@ -151,7 +151,7 @@ static struct pci_ops histb_pci_ops = { .write = histb_pcie_wr_own_conf, }; -static int histb_pcie_link_up(struct dw_pcie *pci) +static bool histb_pcie_link_up(struct dw_pcie *pci) { struct histb_pcie *hipcie = to_histb_pcie(pci); u32 regval; @@ -160,11 +160,8 @@ static int histb_pcie_link_up(struct dw_pcie *pci) regval = histb_pcie_readl(hipcie, PCIE_SYS_STAT0); status = histb_pcie_readl(hipcie, PCIE_SYS_STAT4); status &= PCIE_LTSSM_STATE_MASK; - if ((regval & PCIE_XMLH_LINK_UP) && (regval & PCIE_RDLH_LINK_UP) && - (status == PCIE_LTSSM_STATE_ACTIVE)) - return 1; - - return 0; + return ((regval & PCIE_XMLH_LINK_UP) && (regval & PCIE_RDLH_LINK_UP) && + (status == PCIE_LTSSM_STATE_ACTIVE)); } static int histb_pcie_start_link(struct dw_pcie *pci) diff --git a/drivers/pci/controller/dwc/pcie-keembay.c b/drivers/pci/controller/dwc/pcie-keembay.c index 278205db60a2..67dd3337b447 100644 --- a/drivers/pci/controller/dwc/pcie-keembay.c +++ b/drivers/pci/controller/dwc/pcie-keembay.c @@ -101,7 +101,7 @@ static void keembay_pcie_ltssm_set(struct keembay_pcie *pcie, bool enable) writel(val, pcie->apb_base + PCIE_REGS_PCIE_APP_CNTRL); } -static int keembay_pcie_link_up(struct dw_pcie *pci) +static bool keembay_pcie_link_up(struct dw_pcie *pci) { struct keembay_pcie *pcie = dev_get_drvdata(pci->dev); u32 val; diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c index d0e6a3811b00..91559c8b1866 100644 --- a/drivers/pci/controller/dwc/pcie-kirin.c +++ b/drivers/pci/controller/dwc/pcie-kirin.c @@ -586,16 +586,13 @@ static void kirin_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, kirin_pcie_sideband_dbi_w_mode(kirin_pcie, false); } -static int kirin_pcie_link_up(struct dw_pcie *pci) +static bool kirin_pcie_link_up(struct dw_pcie *pci) { struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); u32 val; regmap_read(kirin_pcie->apb, PCIE_APB_PHY_STATUS0, &val); - if ((val & PCIE_LINKUP_ENABLE) == PCIE_LINKUP_ENABLE) - return 1; - - return 0; + return (val & PCIE_LINKUP_ENABLE) == PCIE_LINKUP_ENABLE; } static int kirin_pcie_start_link(struct dw_pcie *pci) diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c index 46b1c6d19974..bf7c6ac0f3e3 100644 --- a/drivers/pci/controller/dwc/pcie-qcom-ep.c +++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c @@ -60,6 +60,7 @@ #define PARF_DEVICE_TYPE 0x1000 #define PARF_BDF_TO_SID_CFG 0x2c00 #define PARF_INT_ALL_5_MASK 0x2dcc +#define PARF_INT_ALL_3_MASK 0x2e18 /* PARF_INT_ALL_{STATUS/CLEAR/MASK} register fields */ #define PARF_INT_ALL_LINK_DOWN BIT(1) @@ -132,6 +133,9 @@ /* PARF_INT_ALL_5_MASK fields */ #define PARF_INT_ALL_5_MHI_RAM_DATA_PARITY_ERR BIT(0) +/* PARF_INT_ALL_3_MASK fields */ +#define PARF_INT_ALL_3_PTM_UPDATING BIT(4) + /* ELBI registers */ #define ELBI_SYS_STTS 0x08 #define ELBI_CS2_ENABLE 0xa4 @@ -261,7 +265,7 @@ static void qcom_pcie_ep_configure_tcsr(struct qcom_pcie_ep *pcie_ep) } } -static int qcom_pcie_dw_link_up(struct dw_pcie *pci) +static bool qcom_pcie_dw_link_up(struct dw_pcie *pci) { struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci); u32 reg; @@ -497,6 +501,10 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci) writel_relaxed(val, pcie_ep->parf + PARF_INT_ALL_5_MASK); } + val = readl_relaxed(pcie_ep->parf + PARF_INT_ALL_3_MASK); + val &= ~PARF_INT_ALL_3_PTM_UPDATING; + writel_relaxed(val, pcie_ep->parf + PARF_INT_ALL_3_MASK); + ret = dw_pcie_ep_init_registers(&pcie_ep->pci.ep); if (ret) { dev_err(dev, "Failed to complete initialization: %d\n", ret); diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c index dc98ae63362d..294babe1816e 100644 --- a/drivers/pci/controller/dwc/pcie-qcom.c +++ b/drivers/pci/controller/dwc/pcie-qcom.c @@ -21,7 +21,9 @@ #include <linux/limits.h> #include <linux/init.h> #include <linux/of.h> +#include <linux/of_pci.h> #include <linux/pci.h> +#include <linux/pci-ecam.h> #include <linux/pm_opp.h> #include <linux/pm_runtime.h> #include <linux/platform_device.h> @@ -34,6 +36,7 @@ #include <linux/units.h> #include "../../pci.h" +#include "../pci-host-common.h" #include "pcie-designware.h" #include "pcie-qcom-common.h" @@ -255,13 +258,21 @@ struct qcom_pcie_ops { * @ops: qcom PCIe ops structure * @override_no_snoop: Override NO_SNOOP attribute in TLP to enable cache * snooping + * @firmware_managed: Set if the Root Complex is firmware managed */ struct qcom_pcie_cfg { const struct qcom_pcie_ops *ops; bool override_no_snoop; + bool firmware_managed; bool no_l0s; }; +struct qcom_pcie_port { + struct list_head list; + struct gpio_desc *reset; + struct phy *phy; +}; + struct qcom_pcie { struct dw_pcie *pci; void __iomem *parf; /* DT parf */ @@ -274,24 +285,37 @@ struct qcom_pcie { struct icc_path *icc_cpu; const struct qcom_pcie_cfg *cfg; struct dentry *debugfs; + struct list_head ports; bool suspended; bool use_pm_opp; }; #define to_qcom_pcie(x) dev_get_drvdata((x)->dev) -static void qcom_ep_reset_assert(struct qcom_pcie *pcie) +static void qcom_perst_assert(struct qcom_pcie *pcie, bool assert) { - gpiod_set_value_cansleep(pcie->reset, 1); + struct qcom_pcie_port *port; + int val = assert ? 1 : 0; + + if (list_empty(&pcie->ports)) + gpiod_set_value_cansleep(pcie->reset, val); + else + list_for_each_entry(port, &pcie->ports, list) + gpiod_set_value_cansleep(port->reset, val); + usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); } +static void qcom_ep_reset_assert(struct qcom_pcie *pcie) +{ + qcom_perst_assert(pcie, true); +} + static void qcom_ep_reset_deassert(struct qcom_pcie *pcie) { /* Ensure that PERST has been asserted for at least 100 ms */ - msleep(100); - gpiod_set_value_cansleep(pcie->reset, 0); - usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); + msleep(PCIE_T_PVPERL_MS); + qcom_perst_assert(pcie, false); } static int qcom_pcie_start_link(struct dw_pcie *pci) @@ -1221,12 +1245,65 @@ static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie) return 0; } -static int qcom_pcie_link_up(struct dw_pcie *pci) +static bool qcom_pcie_link_up(struct dw_pcie *pci) { u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); u16 val = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA); - return !!(val & PCI_EXP_LNKSTA_DLLLA); + return val & PCI_EXP_LNKSTA_DLLLA; +} + +static void qcom_pcie_phy_exit(struct qcom_pcie *pcie) +{ + struct qcom_pcie_port *port; + + if (list_empty(&pcie->ports)) + phy_exit(pcie->phy); + else + list_for_each_entry(port, &pcie->ports, list) + phy_exit(port->phy); +} + +static void qcom_pcie_phy_power_off(struct qcom_pcie *pcie) +{ + struct qcom_pcie_port *port; + + if (list_empty(&pcie->ports)) { + phy_power_off(pcie->phy); + } else { + list_for_each_entry(port, &pcie->ports, list) + phy_power_off(port->phy); + } +} + +static int qcom_pcie_phy_power_on(struct qcom_pcie *pcie) +{ + struct qcom_pcie_port *port; + int ret = 0; + + if (list_empty(&pcie->ports)) { + ret = phy_set_mode_ext(pcie->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC); + if (ret) + return ret; + + ret = phy_power_on(pcie->phy); + if (ret) + return ret; + } else { + list_for_each_entry(port, &pcie->ports, list) { + ret = phy_set_mode_ext(port->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC); + if (ret) + return ret; + + ret = phy_power_on(port->phy); + if (ret) { + qcom_pcie_phy_power_off(pcie); + return ret; + } + } + } + + return ret; } static int qcom_pcie_host_init(struct dw_pcie_rp *pp) @@ -1241,11 +1318,7 @@ static int qcom_pcie_host_init(struct dw_pcie_rp *pp) if (ret) return ret; - ret = phy_set_mode_ext(pcie->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC); - if (ret) - goto err_deinit; - - ret = phy_power_on(pcie->phy); + ret = qcom_pcie_phy_power_on(pcie); if (ret) goto err_deinit; @@ -1268,7 +1341,7 @@ static int qcom_pcie_host_init(struct dw_pcie_rp *pp) err_assert_reset: qcom_ep_reset_assert(pcie); err_disable_phy: - phy_power_off(pcie->phy); + qcom_pcie_phy_power_off(pcie); err_deinit: pcie->cfg->ops->deinit(pcie); @@ -1281,7 +1354,7 @@ static void qcom_pcie_host_deinit(struct dw_pcie_rp *pp) struct qcom_pcie *pcie = to_qcom_pcie(pci); qcom_ep_reset_assert(pcie); - phy_power_off(pcie->phy); + qcom_pcie_phy_power_off(pcie); pcie->cfg->ops->deinit(pcie); } @@ -1426,6 +1499,10 @@ static const struct qcom_pcie_cfg cfg_sc8280xp = { .no_l0s = true, }; +static const struct qcom_pcie_cfg cfg_fw_managed = { + .firmware_managed = true, +}; + static const struct dw_pcie_ops dw_pcie_ops = { .link_up = qcom_pcie_link_up, .start_link = qcom_pcie_start_link, @@ -1564,6 +1641,7 @@ static irqreturn_t qcom_pcie_global_irq_thread(int irq, void *data) writel_relaxed(status, pcie->parf + PARF_INT_ALL_CLEAR); if (FIELD_GET(PARF_INT_ALL_LINK_UP, status)) { + msleep(PCIE_RESET_CONFIG_WAIT_MS); dev_dbg(dev, "Received Link up event. Starting enumeration!\n"); /* Rescan the bus to enumerate endpoint devices */ pci_lock_rescan_remove(); @@ -1579,10 +1657,128 @@ static irqreturn_t qcom_pcie_global_irq_thread(int irq, void *data) return IRQ_HANDLED; } +static void qcom_pci_free_msi(void *ptr) +{ + struct dw_pcie_rp *pp = (struct dw_pcie_rp *)ptr; + + if (pp && pp->has_msi_ctrl) + dw_pcie_free_msi(pp); +} + +static int qcom_pcie_ecam_host_init(struct pci_config_window *cfg) +{ + struct device *dev = cfg->parent; + struct dw_pcie_rp *pp; + struct dw_pcie *pci; + int ret; + + pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); + if (!pci) + return -ENOMEM; + + pci->dev = dev; + pp = &pci->pp; + pci->dbi_base = cfg->win; + pp->num_vectors = MSI_DEF_NUM_VECTORS; + + ret = dw_pcie_msi_host_init(pp); + if (ret) + return ret; + + pp->has_msi_ctrl = true; + dw_pcie_msi_init(pp); + + return devm_add_action_or_reset(dev, qcom_pci_free_msi, pp); +} + +static const struct pci_ecam_ops pci_qcom_ecam_ops = { + .init = qcom_pcie_ecam_host_init, + .pci_ops = { + .map_bus = pci_ecam_map_bus, + .read = pci_generic_config_read, + .write = pci_generic_config_write, + } +}; + +static int qcom_pcie_parse_port(struct qcom_pcie *pcie, struct device_node *node) +{ + struct device *dev = pcie->pci->dev; + struct qcom_pcie_port *port; + struct gpio_desc *reset; + struct phy *phy; + int ret; + + reset = devm_fwnode_gpiod_get(dev, of_fwnode_handle(node), + "reset", GPIOD_OUT_HIGH, "PERST#"); + if (IS_ERR(reset)) + return PTR_ERR(reset); + + phy = devm_of_phy_get(dev, node, NULL); + if (IS_ERR(phy)) + return PTR_ERR(phy); + + port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL); + if (!port) + return -ENOMEM; + + ret = phy_init(phy); + if (ret) + return ret; + + port->reset = reset; + port->phy = phy; + INIT_LIST_HEAD(&port->list); + list_add_tail(&port->list, &pcie->ports); + + return 0; +} + +static int qcom_pcie_parse_ports(struct qcom_pcie *pcie) +{ + struct device *dev = pcie->pci->dev; + struct qcom_pcie_port *port, *tmp; + int ret = -ENOENT; + + for_each_available_child_of_node_scoped(dev->of_node, of_port) { + ret = qcom_pcie_parse_port(pcie, of_port); + if (ret) + goto err_port_del; + } + + return ret; + +err_port_del: + list_for_each_entry_safe(port, tmp, &pcie->ports, list) + list_del(&port->list); + + return ret; +} + +static int qcom_pcie_parse_legacy_binding(struct qcom_pcie *pcie) +{ + struct device *dev = pcie->pci->dev; + int ret; + + pcie->phy = devm_phy_optional_get(dev, "pciephy"); + if (IS_ERR(pcie->phy)) + return PTR_ERR(pcie->phy); + + pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH); + if (IS_ERR(pcie->reset)) + return PTR_ERR(pcie->reset); + + ret = phy_init(pcie->phy); + if (ret) + return ret; + + return 0; +} + static int qcom_pcie_probe(struct platform_device *pdev) { const struct qcom_pcie_cfg *pcie_cfg; unsigned long max_freq = ULONG_MAX; + struct qcom_pcie_port *port, *tmp; struct device *dev = &pdev->dev; struct dev_pm_opp *opp; struct qcom_pcie *pcie; @@ -1593,24 +1789,64 @@ static int qcom_pcie_probe(struct platform_device *pdev) char *name; pcie_cfg = of_device_get_match_data(dev); - if (!pcie_cfg || !pcie_cfg->ops) { - dev_err(dev, "Invalid platform data\n"); - return -EINVAL; + if (!pcie_cfg) { + dev_err(dev, "No platform data\n"); + return -ENODATA; } - pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); - if (!pcie) - return -ENOMEM; - - pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); - if (!pci) - return -ENOMEM; + if (!pcie_cfg->firmware_managed && !pcie_cfg->ops) { + dev_err(dev, "No platform ops\n"); + return -ENODATA; + } pm_runtime_enable(dev); ret = pm_runtime_get_sync(dev); if (ret < 0) goto err_pm_runtime_put; + if (pcie_cfg->firmware_managed) { + struct pci_host_bridge *bridge; + struct pci_config_window *cfg; + + bridge = devm_pci_alloc_host_bridge(dev, 0); + if (!bridge) { + ret = -ENOMEM; + goto err_pm_runtime_put; + } + + /* Parse and map our ECAM configuration space area */ + cfg = pci_host_common_ecam_create(dev, bridge, + &pci_qcom_ecam_ops); + if (IS_ERR(cfg)) { + ret = PTR_ERR(cfg); + goto err_pm_runtime_put; + } + + bridge->sysdata = cfg; + bridge->ops = (struct pci_ops *)&pci_qcom_ecam_ops.pci_ops; + bridge->msi_domain = true; + + ret = pci_host_probe(bridge); + if (ret) + goto err_pm_runtime_put; + + return 0; + } + + pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); + if (!pcie) { + ret = -ENOMEM; + goto err_pm_runtime_put; + } + + pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); + if (!pci) { + ret = -ENOMEM; + goto err_pm_runtime_put; + } + + INIT_LIST_HEAD(&pcie->ports); + pci->dev = dev; pci->ops = &dw_pcie_ops; pp = &pci->pp; @@ -1619,12 +1855,6 @@ static int qcom_pcie_probe(struct platform_device *pdev) pcie->cfg = pcie_cfg; - pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH); - if (IS_ERR(pcie->reset)) { - ret = PTR_ERR(pcie->reset); - goto err_pm_runtime_put; - } - pcie->parf = devm_platform_ioremap_resource_byname(pdev, "parf"); if (IS_ERR(pcie->parf)) { ret = PTR_ERR(pcie->parf); @@ -1647,12 +1877,6 @@ static int qcom_pcie_probe(struct platform_device *pdev) } } - pcie->phy = devm_phy_optional_get(dev, "pciephy"); - if (IS_ERR(pcie->phy)) { - ret = PTR_ERR(pcie->phy); - goto err_pm_runtime_put; - } - /* OPP table is optional */ ret = devm_pm_opp_of_add_table(dev); if (ret && ret != -ENODEV) { @@ -1699,9 +1923,23 @@ static int qcom_pcie_probe(struct platform_device *pdev) pp->ops = &qcom_pcie_dw_ops; - ret = phy_init(pcie->phy); - if (ret) - goto err_pm_runtime_put; + ret = qcom_pcie_parse_ports(pcie); + if (ret) { + if (ret != -ENOENT) { + dev_err_probe(pci->dev, ret, + "Failed to parse Root Port: %d\n", ret); + goto err_pm_runtime_put; + } + + /* + * In the case of properties not populated in Root Port node, + * fallback to the legacy method of parsing the Host Bridge + * node. This is to maintain DT backwards compatibility. + */ + ret = qcom_pcie_parse_legacy_binding(pcie); + if (ret) + goto err_pm_runtime_put; + } platform_set_drvdata(pdev, pcie); @@ -1746,7 +1984,9 @@ static int qcom_pcie_probe(struct platform_device *pdev) err_host_deinit: dw_pcie_host_deinit(pp); err_phy_exit: - phy_exit(pcie->phy); + qcom_pcie_phy_exit(pcie); + list_for_each_entry_safe(port, tmp, &pcie->ports, list) + list_del(&port->list); err_pm_runtime_put: pm_runtime_put(dev); pm_runtime_disable(dev); @@ -1756,9 +1996,13 @@ err_pm_runtime_put: static int qcom_pcie_suspend_noirq(struct device *dev) { - struct qcom_pcie *pcie = dev_get_drvdata(dev); + struct qcom_pcie *pcie; int ret = 0; + pcie = dev_get_drvdata(dev); + if (!pcie) + return 0; + /* * Set minimum bandwidth required to keep data path functional during * suspend. @@ -1812,9 +2056,13 @@ static int qcom_pcie_suspend_noirq(struct device *dev) static int qcom_pcie_resume_noirq(struct device *dev) { - struct qcom_pcie *pcie = dev_get_drvdata(dev); + struct qcom_pcie *pcie; int ret; + pcie = dev_get_drvdata(dev); + if (!pcie) + return 0; + if (pm_suspend_target_state != PM_SUSPEND_MEM) { ret = icc_enable(pcie->icc_cpu); if (ret) { @@ -1840,6 +2088,7 @@ static const struct of_device_id qcom_pcie_match[] = { { .compatible = "qcom,pcie-apq8064", .data = &cfg_2_1_0 }, { .compatible = "qcom,pcie-apq8084", .data = &cfg_1_0_0 }, { .compatible = "qcom,pcie-ipq4019", .data = &cfg_2_4_0 }, + { .compatible = "qcom,pcie-ipq5018", .data = &cfg_2_9_0 }, { .compatible = "qcom,pcie-ipq6018", .data = &cfg_2_9_0 }, { .compatible = "qcom,pcie-ipq8064", .data = &cfg_2_1_0 }, { .compatible = "qcom,pcie-ipq8064-v2", .data = &cfg_2_1_0 }, @@ -1848,6 +2097,7 @@ static const struct of_device_id qcom_pcie_match[] = { { .compatible = "qcom,pcie-ipq9574", .data = &cfg_2_9_0 }, { .compatible = "qcom,pcie-msm8996", .data = &cfg_2_3_2 }, { .compatible = "qcom,pcie-qcs404", .data = &cfg_2_4_0 }, + { .compatible = "qcom,pcie-sa8255p", .data = &cfg_fw_managed }, { .compatible = "qcom,pcie-sa8540p", .data = &cfg_sc8280xp }, { .compatible = "qcom,pcie-sa8775p", .data = &cfg_1_34_0}, { .compatible = "qcom,pcie-sc7280", .data = &cfg_1_9_0 }, diff --git a/drivers/pci/controller/dwc/pcie-rcar-gen4.c b/drivers/pci/controller/dwc/pcie-rcar-gen4.c index fc872dd35029..18055807a4f5 100644 --- a/drivers/pci/controller/dwc/pcie-rcar-gen4.c +++ b/drivers/pci/controller/dwc/pcie-rcar-gen4.c @@ -87,7 +87,7 @@ struct rcar_gen4_pcie { #define to_rcar_gen4_pcie(_dw) container_of(_dw, struct rcar_gen4_pcie, dw) /* Common */ -static int rcar_gen4_pcie_link_up(struct dw_pcie *dw) +static bool rcar_gen4_pcie_link_up(struct dw_pcie *dw) { struct rcar_gen4_pcie *rcar = to_rcar_gen4_pcie(dw); u32 val, mask; @@ -403,6 +403,7 @@ static const struct pci_epc_features rcar_gen4_pcie_epc_features = { .msix_capable = false, .bar[BAR_1] = { .type = BAR_RESERVED, }, .bar[BAR_3] = { .type = BAR_RESERVED, }, + .bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = 256 }, .bar[BAR_5] = { .type = BAR_RESERVED, }, .align = SZ_1M, }; diff --git a/drivers/pci/controller/dwc/pcie-sophgo.c b/drivers/pci/controller/dwc/pcie-sophgo.c new file mode 100644 index 000000000000..ad4baaa34ffa --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-sophgo.c @@ -0,0 +1,257 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Sophgo DesignWare based PCIe host controller driver + */ + +#include <linux/bits.h> +#include <linux/clk.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/irqdomain.h> +#include <linux/module.h> +#include <linux/property.h> +#include <linux/platform_device.h> + +#include "pcie-designware.h" + +#define to_sophgo_pcie(x) dev_get_drvdata((x)->dev) + +#define PCIE_INT_SIGNAL 0xc48 +#define PCIE_INT_EN 0xca0 + +#define PCIE_INT_SIGNAL_INTX GENMASK(8, 5) + +#define PCIE_INT_EN_INTX GENMASK(4, 1) +#define PCIE_INT_EN_INT_MSI BIT(5) + +struct sophgo_pcie { + struct dw_pcie pci; + void __iomem *app_base; + struct clk_bulk_data *clks; + unsigned int clk_cnt; + struct irq_domain *irq_domain; +}; + +static int sophgo_pcie_readl_app(struct sophgo_pcie *sophgo, u32 reg) +{ + return readl_relaxed(sophgo->app_base + reg); +} + +static void sophgo_pcie_writel_app(struct sophgo_pcie *sophgo, u32 val, u32 reg) +{ + writel_relaxed(val, sophgo->app_base + reg); +} + +static void sophgo_pcie_intx_handler(struct irq_desc *desc) +{ + struct dw_pcie_rp *pp = irq_desc_get_handler_data(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct sophgo_pcie *sophgo = to_sophgo_pcie(pci); + unsigned long hwirq, reg; + + chained_irq_enter(chip, desc); + + reg = sophgo_pcie_readl_app(sophgo, PCIE_INT_SIGNAL); + reg = FIELD_GET(PCIE_INT_SIGNAL_INTX, reg); + + for_each_set_bit(hwirq, ®, PCI_NUM_INTX) + generic_handle_domain_irq(sophgo->irq_domain, hwirq); + + chained_irq_exit(chip, desc); +} + +static void sophgo_intx_irq_mask(struct irq_data *d) +{ + struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d); + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct sophgo_pcie *sophgo = to_sophgo_pcie(pci); + unsigned long flags; + u32 val; + + raw_spin_lock_irqsave(&pp->lock, flags); + + val = sophgo_pcie_readl_app(sophgo, PCIE_INT_EN); + val &= ~FIELD_PREP(PCIE_INT_EN_INTX, BIT(d->hwirq)); + sophgo_pcie_writel_app(sophgo, val, PCIE_INT_EN); + + raw_spin_unlock_irqrestore(&pp->lock, flags); +}; + +static void sophgo_intx_irq_unmask(struct irq_data *d) +{ + struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d); + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct sophgo_pcie *sophgo = to_sophgo_pcie(pci); + unsigned long flags; + u32 val; + + raw_spin_lock_irqsave(&pp->lock, flags); + + val = sophgo_pcie_readl_app(sophgo, PCIE_INT_EN); + val |= FIELD_PREP(PCIE_INT_EN_INTX, BIT(d->hwirq)); + sophgo_pcie_writel_app(sophgo, val, PCIE_INT_EN); + + raw_spin_unlock_irqrestore(&pp->lock, flags); +}; + +static struct irq_chip sophgo_intx_irq_chip = { + .name = "INTx", + .irq_mask = sophgo_intx_irq_mask, + .irq_unmask = sophgo_intx_irq_unmask, +}; + +static int sophgo_pcie_intx_map(struct irq_domain *domain, unsigned int irq, + irq_hw_number_t hwirq) +{ + irq_set_chip_and_handler(irq, &sophgo_intx_irq_chip, handle_level_irq); + irq_set_chip_data(irq, domain->host_data); + + return 0; +} + +static const struct irq_domain_ops intx_domain_ops = { + .map = sophgo_pcie_intx_map, +}; + +static int sophgo_pcie_init_irq_domain(struct dw_pcie_rp *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct sophgo_pcie *sophgo = to_sophgo_pcie(pci); + struct device *dev = sophgo->pci.dev; + struct fwnode_handle *intc; + int irq; + + intc = device_get_named_child_node(dev, "interrupt-controller"); + if (!intc) { + dev_err(dev, "missing child interrupt-controller node\n"); + return -ENODEV; + } + + irq = fwnode_irq_get(intc, 0); + if (irq < 0) { + dev_err(dev, "failed to get INTx irq number\n"); + fwnode_handle_put(intc); + return irq; + } + + sophgo->irq_domain = irq_domain_create_linear(intc, PCI_NUM_INTX, + &intx_domain_ops, pp); + fwnode_handle_put(intc); + if (!sophgo->irq_domain) { + dev_err(dev, "failed to get a INTx irq domain\n"); + return -EINVAL; + } + + return irq; +} + +static void sophgo_pcie_msi_enable(struct dw_pcie_rp *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct sophgo_pcie *sophgo = to_sophgo_pcie(pci); + unsigned long flags; + u32 val; + + raw_spin_lock_irqsave(&pp->lock, flags); + + val = sophgo_pcie_readl_app(sophgo, PCIE_INT_EN); + val |= PCIE_INT_EN_INT_MSI; + sophgo_pcie_writel_app(sophgo, val, PCIE_INT_EN); + + raw_spin_unlock_irqrestore(&pp->lock, flags); +} + +static int sophgo_pcie_host_init(struct dw_pcie_rp *pp) +{ + int irq; + + irq = sophgo_pcie_init_irq_domain(pp); + if (irq < 0) + return irq; + + irq_set_chained_handler_and_data(irq, sophgo_pcie_intx_handler, pp); + + sophgo_pcie_msi_enable(pp); + + return 0; +} + +static const struct dw_pcie_host_ops sophgo_pcie_host_ops = { + .init = sophgo_pcie_host_init, +}; + +static int sophgo_pcie_clk_init(struct sophgo_pcie *sophgo) +{ + struct device *dev = sophgo->pci.dev; + int ret; + + ret = devm_clk_bulk_get_all_enabled(dev, &sophgo->clks); + if (ret < 0) + return dev_err_probe(dev, ret, "failed to get clocks\n"); + + sophgo->clk_cnt = ret; + + return 0; +} + +static int sophgo_pcie_resource_get(struct platform_device *pdev, + struct sophgo_pcie *sophgo) +{ + sophgo->app_base = devm_platform_ioremap_resource_byname(pdev, "app"); + if (IS_ERR(sophgo->app_base)) + return dev_err_probe(&pdev->dev, PTR_ERR(sophgo->app_base), + "failed to map app registers\n"); + + return 0; +} + +static int sophgo_pcie_configure_rc(struct sophgo_pcie *sophgo) +{ + struct dw_pcie_rp *pp; + + pp = &sophgo->pci.pp; + pp->ops = &sophgo_pcie_host_ops; + + return dw_pcie_host_init(pp); +} + +static int sophgo_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct sophgo_pcie *sophgo; + int ret; + + sophgo = devm_kzalloc(dev, sizeof(*sophgo), GFP_KERNEL); + if (!sophgo) + return -ENOMEM; + + platform_set_drvdata(pdev, sophgo); + + sophgo->pci.dev = dev; + + ret = sophgo_pcie_resource_get(pdev, sophgo); + if (ret) + return ret; + + ret = sophgo_pcie_clk_init(sophgo); + if (ret) + return ret; + + return sophgo_pcie_configure_rc(sophgo); +} + +static const struct of_device_id sophgo_pcie_of_match[] = { + { .compatible = "sophgo,sg2044-pcie" }, + { } +}; +MODULE_DEVICE_TABLE(of, sophgo_pcie_of_match); + +static struct platform_driver sophgo_pcie_driver = { + .driver = { + .name = "sophgo-pcie", + .of_match_table = sophgo_pcie_of_match, + .suppress_bind_attrs = true, + }, + .probe = sophgo_pcie_probe, +}; +builtin_platform_driver(sophgo_pcie_driver); diff --git a/drivers/pci/controller/dwc/pcie-spear13xx.c b/drivers/pci/controller/dwc/pcie-spear13xx.c index ff986ced56b2..01794a9d3ad2 100644 --- a/drivers/pci/controller/dwc/pcie-spear13xx.c +++ b/drivers/pci/controller/dwc/pcie-spear13xx.c @@ -110,15 +110,12 @@ static void spear13xx_pcie_enable_interrupts(struct spear13xx_pcie *spear13xx_pc MSI_CTRL_INT, &app_reg->int_mask); } -static int spear13xx_pcie_link_up(struct dw_pcie *pci) +static bool spear13xx_pcie_link_up(struct dw_pcie *pci) { struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci); struct pcie_app_reg __iomem *app_reg = spear13xx_pcie->app_base; - if (readl(&app_reg->app_status_1) & XMLH_LINK_UP) - return 1; - - return 0; + return readl(&app_reg->app_status_1) & XMLH_LINK_UP; } static int spear13xx_pcie_host_init(struct dw_pcie_rp *pp) diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c index 5103995cd6c7..4f26086f25da 100644 --- a/drivers/pci/controller/dwc/pcie-tegra194.c +++ b/drivers/pci/controller/dwc/pcie-tegra194.c @@ -713,7 +713,16 @@ static void init_host_aspm(struct tegra_pcie_dw *pcie) static void init_debugfs(struct tegra_pcie_dw *pcie) { - debugfs_create_devm_seqfile(pcie->dev, "aspm_state_cnt", pcie->debugfs, + struct device *dev = pcie->dev; + char *name; + + name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node); + if (!name) + return; + + pcie->debugfs = debugfs_create_dir(name, NULL); + + debugfs_create_devm_seqfile(dev, "aspm_state_cnt", pcie->debugfs, aspm_state_cnt); } #else @@ -1027,12 +1036,12 @@ retry_link: return 0; } -static int tegra_pcie_dw_link_up(struct dw_pcie *pci) +static bool tegra_pcie_dw_link_up(struct dw_pcie *pci) { struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); u32 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA); - return !!(val & PCI_EXP_LNKSTA_DLLLA); + return val & PCI_EXP_LNKSTA_DLLLA; } static void tegra_pcie_dw_stop_link(struct dw_pcie *pci) @@ -1634,7 +1643,6 @@ static void tegra_pcie_deinit_controller(struct tegra_pcie_dw *pcie) static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie) { struct device *dev = pcie->dev; - char *name; int ret; pm_runtime_enable(dev); @@ -1664,13 +1672,6 @@ static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie) goto fail_host_init; } - name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node); - if (!name) { - ret = -ENOMEM; - goto fail_host_init; - } - - pcie->debugfs = debugfs_create_dir(name, NULL); init_debugfs(pcie); return ret; diff --git a/drivers/pci/controller/dwc/pcie-uniphier.c b/drivers/pci/controller/dwc/pcie-uniphier.c index 5757ca3803c9..297e7a3d9b36 100644 --- a/drivers/pci/controller/dwc/pcie-uniphier.c +++ b/drivers/pci/controller/dwc/pcie-uniphier.c @@ -135,7 +135,7 @@ static int uniphier_pcie_wait_rc(struct uniphier_pcie *pcie) return 0; } -static int uniphier_pcie_link_up(struct dw_pcie *pci) +static bool uniphier_pcie_link_up(struct dw_pcie *pci) { struct uniphier_pcie *pcie = to_uniphier_pcie(pci); u32 val, mask; @@ -279,7 +279,7 @@ static int uniphier_pcie_config_intx_irq(struct dw_pcie_rp *pp) goto out_put_node; } - pcie->intx_irq_domain = irq_domain_add_linear(np_intc, PCI_NUM_INTX, + pcie->intx_irq_domain = irq_domain_create_linear(of_fwnode_handle(np_intc), PCI_NUM_INTX, &uniphier_intx_domain_ops, pp); if (!pcie->intx_irq_domain) { dev_err(pci->dev, "Failed to get INTx domain\n"); diff --git a/drivers/pci/controller/dwc/pcie-visconti.c b/drivers/pci/controller/dwc/pcie-visconti.c index 318c278e65c8..cdeac6177143 100644 --- a/drivers/pci/controller/dwc/pcie-visconti.c +++ b/drivers/pci/controller/dwc/pcie-visconti.c @@ -121,13 +121,13 @@ static u32 visconti_mpu_readl(struct visconti_pcie *pcie, u32 reg) return readl_relaxed(pcie->mpu_base + reg); } -static int visconti_pcie_link_up(struct dw_pcie *pci) +static bool visconti_pcie_link_up(struct dw_pcie *pci) { struct visconti_pcie *pcie = dev_get_drvdata(pci->dev); void __iomem *addr = pcie->ulreg_base; u32 val = readl_relaxed(addr + PCIE_UL_REG_V_PHY_ST_02); - return !!(val & PCIE_UL_S_L0); + return val & PCIE_UL_S_L0; } static int visconti_pcie_start_link(struct dw_pcie *pci) diff --git a/drivers/pci/controller/mobiveil/Kconfig b/drivers/pci/controller/mobiveil/Kconfig index 58ce034f701a..c50c4625937f 100644 --- a/drivers/pci/controller/mobiveil/Kconfig +++ b/drivers/pci/controller/mobiveil/Kconfig @@ -9,6 +9,7 @@ config PCIE_MOBIVEIL config PCIE_MOBIVEIL_HOST bool depends on PCI_MSI + select IRQ_MSI_LIB select PCIE_MOBIVEIL config PCIE_LAYERSCAPE_GEN4 diff --git a/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c b/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c index 5af22bee913b..4919b27eaf44 100644 --- a/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c +++ b/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c @@ -53,18 +53,13 @@ static inline void ls_g4_pcie_pf_writel(struct ls_g4_pcie *pcie, iowrite32(val, pcie->pci.csr_axi_slave_base + PCIE_PF_OFF + off); } -static int ls_g4_pcie_link_up(struct mobiveil_pcie *pci) +static bool ls_g4_pcie_link_up(struct mobiveil_pcie *pci) { struct ls_g4_pcie *pcie = to_ls_g4_pcie(pci); u32 state; state = ls_g4_pcie_pf_readl(pcie, PCIE_PF_DBG); - state = state & PF_DBG_LTSSM_MASK; - - if (state == PF_DBG_LTSSM_L0) - return 1; - - return 0; + return (state & PF_DBG_LTSSM_MASK) == PF_DBG_LTSSM_L0; } static void ls_g4_pcie_disable_interrupt(struct ls_g4_pcie *pcie) @@ -174,8 +169,7 @@ static int ls_g4_pcie_interrupt_init(struct mobiveil_pcie *mv_pci) static void ls_g4_pcie_reset(struct work_struct *work) { - struct delayed_work *dwork = container_of(work, struct delayed_work, - work); + struct delayed_work *dwork = to_delayed_work(work); struct ls_g4_pcie *pcie = container_of(dwork, struct ls_g4_pcie, dwork); struct mobiveil_pcie *mv_pci = &pcie->pci; u16 ctrl; diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c index 0e088e74155d..dbc72c73fd0a 100644 --- a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c +++ b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c @@ -12,6 +12,7 @@ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> +#include <linux/irqchip/irq-msi-lib.h> #include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> #include <linux/kernel.h> @@ -353,16 +354,19 @@ static const struct irq_domain_ops intx_domain_ops = { .map = mobiveil_pcie_intx_map, }; -static struct irq_chip mobiveil_msi_irq_chip = { - .name = "Mobiveil PCIe MSI", - .irq_mask = pci_msi_mask_irq, - .irq_unmask = pci_msi_unmask_irq, -}; +#define MOBIVEIL_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ + MSI_FLAG_USE_DEF_CHIP_OPS | \ + MSI_FLAG_NO_AFFINITY) + +#define MOBIVEIL_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \ + MSI_FLAG_PCI_MSIX) -static struct msi_domain_info mobiveil_msi_domain_info = { - .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX, - .chip = &mobiveil_msi_irq_chip, +static const struct msi_parent_ops mobiveil_msi_parent_ops = { + .required_flags = MOBIVEIL_MSI_FLAGS_REQUIRED, + .supported_flags = MOBIVEIL_MSI_FLAGS_SUPPORTED, + .bus_select_token = DOMAIN_BUS_PCI_MSI, + .prefix = "Mobiveil-", + .init_dev_msi_info = msi_lib_init_dev_msi_info, }; static void mobiveil_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) @@ -435,23 +439,20 @@ static const struct irq_domain_ops msi_domain_ops = { static int mobiveil_allocate_msi_domains(struct mobiveil_pcie *pcie) { struct device *dev = &pcie->pdev->dev; - struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node); struct mobiveil_msi *msi = &pcie->rp.msi; mutex_init(&msi->lock); - msi->dev_domain = irq_domain_add_linear(NULL, msi->num_of_vectors, - &msi_domain_ops, pcie); - if (!msi->dev_domain) { - dev_err(dev, "failed to create IRQ domain\n"); - return -ENOMEM; - } - msi->msi_domain = pci_msi_create_irq_domain(fwnode, - &mobiveil_msi_domain_info, - msi->dev_domain); - if (!msi->msi_domain) { + struct irq_domain_info info = { + .fwnode = dev_fwnode(dev), + .ops = &msi_domain_ops, + .host_data = pcie, + .size = msi->num_of_vectors, + }; + + msi->dev_domain = msi_create_parent_irq_domain(&info, &mobiveil_msi_parent_ops); + if (!msi->dev_domain) { dev_err(dev, "failed to create MSI domain\n"); - irq_domain_remove(msi->dev_domain); return -ENOMEM; } @@ -461,13 +462,11 @@ static int mobiveil_allocate_msi_domains(struct mobiveil_pcie *pcie) static int mobiveil_pcie_init_irq_domain(struct mobiveil_pcie *pcie) { struct device *dev = &pcie->pdev->dev; - struct device_node *node = dev->of_node; struct mobiveil_root_port *rp = &pcie->rp; /* setup INTx */ - rp->intx_domain = irq_domain_add_linear(node, PCI_NUM_INTX, - &intx_domain_ops, pcie); - + rp->intx_domain = irq_domain_create_linear(dev_fwnode(dev), PCI_NUM_INTX, &intx_domain_ops, + pcie); if (!rp->intx_domain) { dev_err(dev, "Failed to get a INTx IRQ domain\n"); return -ENOMEM; diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil.h b/drivers/pci/controller/mobiveil/pcie-mobiveil.h index e63abb887ee3..7246de6a7176 100644 --- a/drivers/pci/controller/mobiveil/pcie-mobiveil.h +++ b/drivers/pci/controller/mobiveil/pcie-mobiveil.h @@ -135,7 +135,6 @@ struct mobiveil_msi { /* MSI information */ struct mutex lock; /* protect bitmap variable */ - struct irq_domain *msi_domain; struct irq_domain *dev_domain; phys_addr_t msi_pages_phys; int num_of_vectors; @@ -160,7 +159,7 @@ struct mobiveil_root_port { }; struct mobiveil_pab_ops { - int (*link_up)(struct mobiveil_pcie *pcie); + bool (*link_up)(struct mobiveil_pcie *pcie); }; struct mobiveil_pcie { diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c index a29796cce420..e34bea1ff0ac 100644 --- a/drivers/pci/controller/pci-aardvark.c +++ b/drivers/pci/controller/pci-aardvark.c @@ -13,6 +13,7 @@ #include <linux/gpio/consumer.h> #include <linux/interrupt.h> #include <linux/irq.h> +#include <linux/irqchip/irq-msi-lib.h> #include <linux/irqdomain.h> #include <linux/kernel.h> #include <linux/module.h> @@ -278,7 +279,6 @@ struct advk_pcie { struct irq_domain *irq_domain; struct irq_chip irq_chip; raw_spinlock_t irq_lock; - struct irq_domain *msi_domain; struct irq_domain *msi_inner_domain; raw_spinlock_t msi_irq_lock; DECLARE_BITMAP(msi_used, MSI_IRQ_NUM); @@ -1332,18 +1332,6 @@ static void advk_msi_irq_unmask(struct irq_data *d) raw_spin_unlock_irqrestore(&pcie->msi_irq_lock, flags); } -static void advk_msi_top_irq_mask(struct irq_data *d) -{ - pci_msi_mask_irq(d); - irq_chip_mask_parent(d); -} - -static void advk_msi_top_irq_unmask(struct irq_data *d) -{ - pci_msi_unmask_irq(d); - irq_chip_unmask_parent(d); -} - static struct irq_chip advk_msi_bottom_irq_chip = { .name = "MSI", .irq_compose_msi_msg = advk_msi_irq_compose_msi_msg, @@ -1436,17 +1424,20 @@ static const struct irq_domain_ops advk_pcie_irq_domain_ops = { .xlate = irq_domain_xlate_onecell, }; -static struct irq_chip advk_msi_irq_chip = { - .name = "advk-MSI", - .irq_mask = advk_msi_top_irq_mask, - .irq_unmask = advk_msi_top_irq_unmask, -}; - -static struct msi_domain_info advk_msi_domain_info = { - .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI | - MSI_FLAG_PCI_MSIX, - .chip = &advk_msi_irq_chip, +#define ADVK_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ + MSI_FLAG_USE_DEF_CHIP_OPS | \ + MSI_FLAG_PCI_MSI_MASK_PARENT | \ + MSI_FLAG_NO_AFFINITY) +#define ADVK_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \ + MSI_FLAG_PCI_MSIX | \ + MSI_FLAG_MULTI_PCI_MSI) + +static const struct msi_parent_ops advk_msi_parent_ops = { + .required_flags = ADVK_MSI_FLAGS_REQUIRED, + .supported_flags = ADVK_MSI_FLAGS_SUPPORTED, + .bus_select_token = DOMAIN_BUS_PCI_MSI, + .prefix = "advk-", + .init_dev_msi_info = msi_lib_init_dev_msi_info, }; static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie) @@ -1456,27 +1447,22 @@ static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie) raw_spin_lock_init(&pcie->msi_irq_lock); mutex_init(&pcie->msi_used_lock); - pcie->msi_inner_domain = - irq_domain_add_linear(NULL, MSI_IRQ_NUM, - &advk_msi_domain_ops, pcie); - if (!pcie->msi_inner_domain) - return -ENOMEM; + struct irq_domain_info info = { + .fwnode = dev_fwnode(dev), + .ops = &advk_msi_domain_ops, + .host_data = pcie, + .size = MSI_IRQ_NUM, + }; - pcie->msi_domain = - pci_msi_create_irq_domain(dev_fwnode(dev), - &advk_msi_domain_info, - pcie->msi_inner_domain); - if (!pcie->msi_domain) { - irq_domain_remove(pcie->msi_inner_domain); + pcie->msi_inner_domain = msi_create_parent_irq_domain(&info, &advk_msi_parent_ops); + if (!pcie->msi_inner_domain) return -ENOMEM; - } return 0; } static void advk_pcie_remove_msi_irq_domain(struct advk_pcie *pcie) { - irq_domain_remove(pcie->msi_domain); irq_domain_remove(pcie->msi_inner_domain); } @@ -1508,9 +1494,8 @@ static int advk_pcie_init_irq_domain(struct advk_pcie *pcie) irq_chip->irq_mask = advk_pcie_irq_mask; irq_chip->irq_unmask = advk_pcie_irq_unmask; - pcie->irq_domain = - irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, - &advk_pcie_irq_domain_ops, pcie); + pcie->irq_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX, + &advk_pcie_irq_domain_ops, pcie); if (!pcie->irq_domain) { dev_err(dev, "Failed to get a INTx IRQ domain\n"); ret = -ENOMEM; @@ -1549,9 +1534,7 @@ static const struct irq_domain_ops advk_pcie_rp_irq_domain_ops = { static int advk_pcie_init_rp_irq_domain(struct advk_pcie *pcie) { - pcie->rp_irq_domain = irq_domain_add_linear(NULL, 1, - &advk_pcie_rp_irq_domain_ops, - pcie); + pcie->rp_irq_domain = irq_domain_create_linear(NULL, 1, &advk_pcie_rp_irq_domain_ops, pcie); if (!pcie->rp_irq_domain) { dev_err(&pcie->pdev->dev, "Failed to add Root Port IRQ domain\n"); return -ENOMEM; diff --git a/drivers/pci/controller/pci-ftpci100.c b/drivers/pci/controller/pci-ftpci100.c index ffdeed25e961..28e43831c0f1 100644 --- a/drivers/pci/controller/pci-ftpci100.c +++ b/drivers/pci/controller/pci-ftpci100.c @@ -345,8 +345,8 @@ static int faraday_pci_setup_cascaded_irq(struct faraday_pci *p) return irq ?: -EINVAL; } - p->irqdomain = irq_domain_add_linear(intc, PCI_NUM_INTX, - &faraday_pci_irqdomain_ops, p); + p->irqdomain = irq_domain_create_linear(of_fwnode_handle(intc), PCI_NUM_INTX, + &faraday_pci_irqdomain_ops, p); of_node_put(intc); if (!p->irqdomain) { dev_err(p->dev, "failed to create Gemini PCI IRQ domain\n"); diff --git a/drivers/pci/controller/pci-host-common.c b/drivers/pci/controller/pci-host-common.c index f441bfd6f96a..810d1c8de24e 100644 --- a/drivers/pci/controller/pci-host-common.c +++ b/drivers/pci/controller/pci-host-common.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Generic PCI host driver common code + * Common library for PCI host controller drivers * * Copyright (C) 2014 ARM Limited * @@ -15,12 +15,14 @@ #include <linux/pci-ecam.h> #include <linux/platform_device.h> +#include "pci-host-common.h" + static void gen_pci_unmap_cfg(void *ptr) { pci_ecam_free((struct pci_config_window *)ptr); } -static struct pci_config_window *gen_pci_init(struct device *dev, +struct pci_config_window *pci_host_common_ecam_create(struct device *dev, struct pci_host_bridge *bridge, const struct pci_ecam_ops *ops) { int err; @@ -48,28 +50,25 @@ static struct pci_config_window *gen_pci_init(struct device *dev, return cfg; } +EXPORT_SYMBOL_GPL(pci_host_common_ecam_create); -int pci_host_common_probe(struct platform_device *pdev) +int pci_host_common_init(struct platform_device *pdev, + const struct pci_ecam_ops *ops) { struct device *dev = &pdev->dev; struct pci_host_bridge *bridge; struct pci_config_window *cfg; - const struct pci_ecam_ops *ops; - - ops = of_device_get_match_data(&pdev->dev); - if (!ops) - return -ENODEV; bridge = devm_pci_alloc_host_bridge(dev, 0); if (!bridge) return -ENOMEM; - platform_set_drvdata(pdev, bridge); - of_pci_check_probe_only(); + platform_set_drvdata(pdev, bridge); + /* Parse and map our Configuration Space windows */ - cfg = gen_pci_init(dev, bridge, ops); + cfg = pci_host_common_ecam_create(dev, bridge, ops); if (IS_ERR(cfg)) return PTR_ERR(cfg); @@ -81,6 +80,18 @@ int pci_host_common_probe(struct platform_device *pdev) return pci_host_probe(bridge); } +EXPORT_SYMBOL_GPL(pci_host_common_init); + +int pci_host_common_probe(struct platform_device *pdev) +{ + const struct pci_ecam_ops *ops; + + ops = of_device_get_match_data(&pdev->dev); + if (!ops) + return -ENODEV; + + return pci_host_common_init(pdev, ops); +} EXPORT_SYMBOL_GPL(pci_host_common_probe); void pci_host_common_remove(struct platform_device *pdev) @@ -94,5 +105,5 @@ void pci_host_common_remove(struct platform_device *pdev) } EXPORT_SYMBOL_GPL(pci_host_common_remove); -MODULE_DESCRIPTION("Generic PCI host common driver"); +MODULE_DESCRIPTION("Common library for PCI host controller drivers"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/controller/pci-host-common.h b/drivers/pci/controller/pci-host-common.h new file mode 100644 index 000000000000..51c35ec0cf37 --- /dev/null +++ b/drivers/pci/controller/pci-host-common.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common library for PCI host controller drivers + * + * Copyright (C) 2014 ARM Limited + * + * Author: Will Deacon <will.deacon@arm.com> + */ + +#ifndef _PCI_HOST_COMMON_H +#define _PCI_HOST_COMMON_H + +struct pci_ecam_ops; + +int pci_host_common_probe(struct platform_device *pdev); +int pci_host_common_init(struct platform_device *pdev, + const struct pci_ecam_ops *ops); +void pci_host_common_remove(struct platform_device *pdev); + +struct pci_config_window *pci_host_common_ecam_create(struct device *dev, + struct pci_host_bridge *bridge, const struct pci_ecam_ops *ops); +#endif diff --git a/drivers/pci/controller/pci-host-generic.c b/drivers/pci/controller/pci-host-generic.c index 4051b9b61dac..c1bc0d34348f 100644 --- a/drivers/pci/controller/pci-host-generic.c +++ b/drivers/pci/controller/pci-host-generic.c @@ -14,6 +14,8 @@ #include <linux/pci-ecam.h> #include <linux/platform_device.h> +#include "pci-host-common.h" + static const struct pci_ecam_ops gen_pci_cfg_cam_bus_ops = { .bus_shift = 16, .pci_ops = { diff --git a/drivers/pci/controller/pci-hyperv-intf.c b/drivers/pci/controller/pci-hyperv-intf.c index cc96be450360..28b3e93d31c0 100644 --- a/drivers/pci/controller/pci-hyperv-intf.c +++ b/drivers/pci/controller/pci-hyperv-intf.c @@ -14,6 +14,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/hyperv.h> +#include <linux/export.h> struct hyperv_pci_block_ops hvpci_block_ops; EXPORT_SYMBOL_GPL(hvpci_block_ops); diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index ac27bda5ba26..d2b7e8ea710b 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c @@ -44,12 +44,14 @@ #include <linux/delay.h> #include <linux/semaphore.h> #include <linux/irq.h> +#include <linux/irqchip/irq-msi-lib.h> #include <linux/msi.h> #include <linux/hyperv.h> #include <linux/refcount.h> #include <linux/irqdomain.h> #include <linux/acpi.h> #include <linux/sizes.h> +#include <linux/of_irq.h> #include <asm/mshyperv.h> /* @@ -309,8 +311,6 @@ struct pci_packet { void (*completion_func)(void *context, struct pci_response *resp, int resp_packet_size); void *compl_ctxt; - - struct pci_message message[]; }; /* @@ -509,7 +509,6 @@ struct hv_pcibus_device { struct list_head children; struct list_head dr_list; - struct msi_domain_info msi_info; struct irq_domain *irq_domain; struct workqueue_struct *wq; @@ -577,9 +576,8 @@ struct hv_pci_compl { static void hv_pci_onchannelcallback(void *context); #ifdef CONFIG_X86 -#define DELIVERY_MODE APIC_DELIVERY_MODE_FIXED -#define FLOW_HANDLER handle_edge_irq -#define FLOW_NAME "edge" +#define DELIVERY_MODE APIC_DELIVERY_MODE_FIXED +#define HV_MSI_CHIP_FLAGS MSI_CHIP_FLAG_SET_ACK static int hv_pci_irqchip_init(void) { @@ -601,7 +599,7 @@ static unsigned int hv_msi_get_int_vector(struct irq_data *data) #define hv_msi_prepare pci_msi_prepare /** - * hv_arch_irq_unmask() - "Unmask" the IRQ by setting its current + * hv_irq_retarget_interrupt() - "Unmask" the IRQ by setting its current * affinity. * @data: Describes the IRQ * @@ -610,7 +608,7 @@ static unsigned int hv_msi_get_int_vector(struct irq_data *data) * is built out of this PCI bus's instance GUID and the function * number of the device. */ -static void hv_arch_irq_unmask(struct irq_data *data) +static void hv_irq_retarget_interrupt(struct irq_data *data) { struct msi_desc *msi_desc = irq_data_get_msi_desc(data); struct hv_retarget_device_interrupt *params; @@ -715,6 +713,20 @@ out: dev_err(&hbus->hdev->device, "%s() failed: %#llx", __func__, res); } + +static void hv_arch_irq_unmask(struct irq_data *data) +{ + if (hv_root_partition()) + /* + * In case of the nested root partition, the nested hypervisor + * is taking care of interrupt remapping and thus the + * MAP_DEVICE_INTERRUPT hypercall is required instead of + * RETARGET_INTERRUPT. + */ + (void)hv_map_msi_interrupt(data, NULL); + else + hv_irq_retarget_interrupt(data); +} #elif defined(CONFIG_ARM64) /* * SPI vectors to use for vPCI; arch SPIs range is [32, 1019], but leaving a bit @@ -724,8 +736,7 @@ out: #define HV_PCI_MSI_SPI_START 64 #define HV_PCI_MSI_SPI_NR (1020 - HV_PCI_MSI_SPI_START) #define DELIVERY_MODE 0 -#define FLOW_HANDLER NULL -#define FLOW_NAME NULL +#define HV_MSI_CHIP_FLAGS MSI_CHIP_FLAG_SET_EOI #define hv_msi_prepare NULL struct hv_pci_chip_data { @@ -817,9 +828,17 @@ static int hv_pci_vec_irq_gic_domain_alloc(struct irq_domain *domain, int ret; fwspec.fwnode = domain->parent->fwnode; - fwspec.param_count = 2; - fwspec.param[0] = hwirq; - fwspec.param[1] = IRQ_TYPE_EDGE_RISING; + if (is_of_node(fwspec.fwnode)) { + /* SPI lines for OF translations start at offset 32 */ + fwspec.param_count = 3; + fwspec.param[0] = 0; + fwspec.param[1] = hwirq - 32; + fwspec.param[2] = IRQ_TYPE_EDGE_RISING; + } else { + fwspec.param_count = 2; + fwspec.param[0] = hwirq; + fwspec.param[1] = IRQ_TYPE_EDGE_RISING; + } ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec); if (ret) @@ -887,10 +906,44 @@ static const struct irq_domain_ops hv_pci_domain_ops = { .activate = hv_pci_vec_irq_domain_activate, }; +#ifdef CONFIG_OF + +static struct irq_domain *hv_pci_of_irq_domain_parent(void) +{ + struct device_node *parent; + struct irq_domain *domain; + + parent = of_irq_find_parent(hv_get_vmbus_root_device()->of_node); + if (!parent) + return NULL; + domain = irq_find_host(parent); + of_node_put(parent); + + return domain; +} + +#endif + +#ifdef CONFIG_ACPI + +static struct irq_domain *hv_pci_acpi_irq_domain_parent(void) +{ + acpi_gsi_domain_disp_fn gsi_domain_disp_fn; + + gsi_domain_disp_fn = acpi_get_gsi_dispatcher(); + if (!gsi_domain_disp_fn) + return NULL; + return irq_find_matching_fwnode(gsi_domain_disp_fn(0), + DOMAIN_BUS_ANY); +} + +#endif + static int hv_pci_irqchip_init(void) { static struct hv_pci_chip_data *chip_data; struct fwnode_handle *fn = NULL; + struct irq_domain *irq_domain_parent = NULL; int ret = -ENOMEM; chip_data = kzalloc(sizeof(*chip_data), GFP_KERNEL); @@ -907,9 +960,24 @@ static int hv_pci_irqchip_init(void) * way to ensure that all the corresponding devices are also gone and * no interrupts will be generated. */ - hv_msi_gic_irq_domain = acpi_irq_create_hierarchy(0, HV_PCI_MSI_SPI_NR, - fn, &hv_pci_domain_ops, - chip_data); +#ifdef CONFIG_ACPI + if (!acpi_disabled) + irq_domain_parent = hv_pci_acpi_irq_domain_parent(); +#endif +#ifdef CONFIG_OF + if (!irq_domain_parent) + irq_domain_parent = hv_pci_of_irq_domain_parent(); +#endif + if (!irq_domain_parent) { + WARN_ONCE(1, "Invalid firmware configuration for VMBus interrupts\n"); + ret = -EINVAL; + goto free_chip; + } + + hv_msi_gic_irq_domain = irq_domain_create_hierarchy(irq_domain_parent, 0, + HV_PCI_MSI_SPI_NR, + fn, &hv_pci_domain_ops, + chip_data); if (!hv_msi_gic_irq_domain) { pr_err("Failed to create Hyper-V arm64 vPCI MSI IRQ domain\n"); @@ -1438,7 +1506,7 @@ static int hv_read_config_block(struct pci_dev *pdev, void *buf, memset(&pkt, 0, sizeof(pkt)); pkt.pkt.completion_func = hv_pci_read_config_compl; pkt.pkt.compl_ctxt = &comp_pkt; - read_blk = (struct pci_read_block *)&pkt.pkt.message; + read_blk = (struct pci_read_block *)pkt.buf; read_blk->message_type.type = PCI_READ_BLOCK; read_blk->wslot.slot = devfn_to_wslot(pdev->devfn); read_blk->block_id = block_id; @@ -1518,7 +1586,7 @@ static int hv_write_config_block(struct pci_dev *pdev, void *buf, memset(&pkt, 0, sizeof(pkt)); pkt.pkt.completion_func = hv_pci_write_config_compl; pkt.pkt.compl_ctxt = &comp_pkt; - write_blk = (struct pci_write_block *)&pkt.pkt.message; + write_blk = (struct pci_write_block *)pkt.buf; write_blk->message_type.type = PCI_WRITE_BLOCK; write_blk->wslot.slot = devfn_to_wslot(pdev->devfn); write_blk->block_id = block_id; @@ -1599,7 +1667,7 @@ static void hv_int_desc_free(struct hv_pci_dev *hpdev, return; } memset(&ctxt, 0, sizeof(ctxt)); - int_pkt = (struct pci_delete_interrupt *)&ctxt.pkt.message; + int_pkt = (struct pci_delete_interrupt *)ctxt.buffer; int_pkt->message_type.type = PCI_DELETE_INTERRUPT_MESSAGE; int_pkt->wslot.slot = hpdev->desc.win_slot.slot; @@ -1631,7 +1699,7 @@ static void hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info, struct msi_desc *msi = irq_data_get_msi_desc(irq_data); pdev = msi_desc_to_pci_dev(msi); - hbus = info->data; + hbus = domain->host_data; int_desc = irq_data_get_irq_chip_data(irq_data); if (!int_desc) return; @@ -1649,7 +1717,6 @@ static void hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info, static void hv_irq_mask(struct irq_data *data) { - pci_msi_mask_irq(data); if (data->parent_data->chip->irq_mask) irq_chip_mask_parent(data); } @@ -1660,7 +1727,6 @@ static void hv_irq_unmask(struct irq_data *data) if (data->parent_data->chip->irq_unmask) irq_chip_unmask_parent(data); - pci_msi_unmask_irq(data); } struct compose_comp_ctxt { @@ -2045,24 +2111,87 @@ return_null_message: msg->data = 0; } +static bool hv_pcie_init_dev_msi_info(struct device *dev, struct irq_domain *domain, + struct irq_domain *real_parent, struct msi_domain_info *info) +{ + struct irq_chip *chip = info->chip; + + if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info)) + return false; + + info->ops->msi_prepare = hv_msi_prepare; + + chip->irq_set_affinity = irq_chip_set_affinity_parent; + + if (IS_ENABLED(CONFIG_X86)) + chip->flags |= IRQCHIP_MOVE_DEFERRED; + + return true; +} + +#define HV_PCIE_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ + MSI_FLAG_USE_DEF_CHIP_OPS | \ + MSI_FLAG_PCI_MSI_MASK_PARENT) +#define HV_PCIE_MSI_FLAGS_SUPPORTED (MSI_FLAG_MULTI_PCI_MSI | \ + MSI_FLAG_PCI_MSIX | \ + MSI_FLAG_PCI_MSIX_ALLOC_DYN | \ + MSI_GENERIC_FLAGS_MASK) + +static const struct msi_parent_ops hv_pcie_msi_parent_ops = { + .required_flags = HV_PCIE_MSI_FLAGS_REQUIRED, + .supported_flags = HV_PCIE_MSI_FLAGS_SUPPORTED, + .bus_select_token = DOMAIN_BUS_PCI_MSI, + .chip_flags = HV_MSI_CHIP_FLAGS, + .prefix = "HV-", + .init_dev_msi_info = hv_pcie_init_dev_msi_info, +}; + /* HW Interrupt Chip Descriptor */ static struct irq_chip hv_msi_irq_chip = { .name = "Hyper-V PCIe MSI", .irq_compose_msi_msg = hv_compose_msi_msg, .irq_set_affinity = irq_chip_set_affinity_parent, -#ifdef CONFIG_X86 .irq_ack = irq_chip_ack_parent, - .flags = IRQCHIP_MOVE_DEFERRED, -#elif defined(CONFIG_ARM64) .irq_eoi = irq_chip_eoi_parent, -#endif .irq_mask = hv_irq_mask, .irq_unmask = hv_irq_unmask, }; -static struct msi_domain_ops hv_msi_ops = { - .msi_prepare = hv_msi_prepare, - .msi_free = hv_msi_free, +static int hv_pcie_domain_alloc(struct irq_domain *d, unsigned int virq, unsigned int nr_irqs, + void *arg) +{ + /* + * TODO: Allocating and populating struct tran_int_desc in hv_compose_msi_msg() + * should be moved here. + */ + int ret; + + ret = irq_domain_alloc_irqs_parent(d, virq, nr_irqs, arg); + if (ret < 0) + return ret; + + for (int i = 0; i < nr_irqs; i++) { + irq_domain_set_hwirq_and_chip(d, virq + i, 0, &hv_msi_irq_chip, NULL); + if (IS_ENABLED(CONFIG_X86)) + __irq_set_handler(virq + i, handle_edge_irq, 0, "edge"); + } + + return 0; +} + +static void hv_pcie_domain_free(struct irq_domain *d, unsigned int virq, unsigned int nr_irqs) +{ + struct msi_domain_info *info = d->host_data; + + for (int i = 0; i < nr_irqs; i++) + hv_msi_free(d, info, virq + i); + + irq_domain_free_irqs_top(d, virq, nr_irqs); +} + +static const struct irq_domain_ops hv_pcie_domain_ops = { + .alloc = hv_pcie_domain_alloc, + .free = hv_pcie_domain_free, }; /** @@ -2080,17 +2209,14 @@ static struct msi_domain_ops hv_msi_ops = { */ static int hv_pcie_init_irq_domain(struct hv_pcibus_device *hbus) { - hbus->msi_info.chip = &hv_msi_irq_chip; - hbus->msi_info.ops = &hv_msi_ops; - hbus->msi_info.flags = (MSI_FLAG_USE_DEF_DOM_OPS | - MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_MULTI_PCI_MSI | - MSI_FLAG_PCI_MSIX); - hbus->msi_info.handler = FLOW_HANDLER; - hbus->msi_info.handler_name = FLOW_NAME; - hbus->msi_info.data = hbus; - hbus->irq_domain = pci_msi_create_irq_domain(hbus->fwnode, - &hbus->msi_info, - hv_pci_get_root_domain()); + struct irq_domain_info info = { + .fwnode = hbus->fwnode, + .ops = &hv_pcie_domain_ops, + .host_data = hbus, + .parent = hv_pci_get_root_domain(), + }; + + hbus->irq_domain = msi_create_parent_irq_domain(&info, &hv_pcie_msi_parent_ops); if (!hbus->irq_domain) { dev_err(&hbus->hdev->device, "Failed to build an MSI IRQ domain\n"); @@ -2482,7 +2608,7 @@ static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus, comp_pkt.hpdev = hpdev; pkt.init_packet.compl_ctxt = &comp_pkt; pkt.init_packet.completion_func = q_resource_requirements; - res_req = (struct pci_child_message *)&pkt.init_packet.message; + res_req = (struct pci_child_message *)pkt.buffer; res_req->message_type.type = PCI_QUERY_RESOURCE_REQUIREMENTS; res_req->wslot.slot = desc->win_slot.slot; @@ -2860,7 +2986,7 @@ static void hv_eject_device_work(struct work_struct *work) pci_destroy_slot(hpdev->pci_slot); memset(&ctxt, 0, sizeof(ctxt)); - ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message; + ejct_pkt = (struct pci_eject_response *)ctxt.buffer; ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE; ejct_pkt->wslot.slot = hpdev->desc.win_slot.slot; vmbus_sendpacket(hbus->hdev->channel, ejct_pkt, @@ -3118,7 +3244,7 @@ static int hv_pci_protocol_negotiation(struct hv_device *hdev, init_completion(&comp_pkt.host_event); pkt->completion_func = hv_pci_generic_compl; pkt->compl_ctxt = &comp_pkt; - version_req = (struct pci_version_request *)&pkt->message; + version_req = (struct pci_version_request *)(pkt + 1); version_req->message_type.type = PCI_QUERY_PROTOCOL_VERSION; for (i = 0; i < num_version; i++) { @@ -3340,7 +3466,7 @@ enter_d0_retry: init_completion(&comp_pkt.host_event); pkt->completion_func = hv_pci_generic_compl; pkt->compl_ctxt = &comp_pkt; - d0_entry = (struct pci_bus_d0_entry *)&pkt->message; + d0_entry = (struct pci_bus_d0_entry *)(pkt + 1); d0_entry->message_type.type = PCI_BUS_D0ENTRY; d0_entry->mmio_base = hbus->mem_config->start; @@ -3498,20 +3624,20 @@ static int hv_send_resources_allocated(struct hv_device *hdev) if (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_2) { res_assigned = - (struct pci_resources_assigned *)&pkt->message; + (struct pci_resources_assigned *)(pkt + 1); res_assigned->message_type.type = PCI_RESOURCES_ASSIGNED; res_assigned->wslot.slot = hpdev->desc.win_slot.slot; } else { res_assigned2 = - (struct pci_resources_assigned2 *)&pkt->message; + (struct pci_resources_assigned2 *)(pkt + 1); res_assigned2->message_type.type = PCI_RESOURCES_ASSIGNED2; res_assigned2->wslot.slot = hpdev->desc.win_slot.slot; } put_pcichild(hpdev); - ret = vmbus_sendpacket(hdev->channel, &pkt->message, + ret = vmbus_sendpacket(hdev->channel, pkt + 1, size_res, (unsigned long)pkt, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); @@ -3809,6 +3935,7 @@ static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs) struct pci_packet teardown_packet; u8 buffer[sizeof(struct pci_message)]; } pkt; + struct pci_message *msg; struct hv_pci_compl comp_pkt; struct hv_pci_dev *hpdev, *tmp; unsigned long flags; @@ -3854,10 +3981,10 @@ static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs) init_completion(&comp_pkt.host_event); pkt.teardown_packet.completion_func = hv_pci_generic_compl; pkt.teardown_packet.compl_ctxt = &comp_pkt; - pkt.teardown_packet.message[0].type = PCI_BUS_D0EXIT; + msg = (struct pci_message *)pkt.buffer; + msg->type = PCI_BUS_D0EXIT; - ret = vmbus_sendpacket_getid(chan, &pkt.teardown_packet.message, - sizeof(struct pci_message), + ret = vmbus_sendpacket_getid(chan, msg, sizeof(*msg), (unsigned long)&pkt.teardown_packet, &trans_id, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); @@ -3975,24 +4102,18 @@ static int hv_pci_restore_msi_msg(struct pci_dev *pdev, void *arg) { struct irq_data *irq_data; struct msi_desc *entry; - int ret = 0; if (!pdev->msi_enabled && !pdev->msix_enabled) return 0; - msi_lock_descs(&pdev->dev); + guard(msi_descs_lock)(&pdev->dev); msi_for_each_desc(entry, &pdev->dev, MSI_DESC_ASSOCIATED) { irq_data = irq_get_irq_data(entry->irq); - if (WARN_ON_ONCE(!irq_data)) { - ret = -EINVAL; - break; - } - + if (WARN_ON_ONCE(!irq_data)) + return -EINVAL; hv_compose_msi_msg(irq_data, &entry->msg); } - msi_unlock_descs(&pdev->dev); - - return ret; + return 0; } /* @@ -4093,6 +4214,9 @@ static int __init init_hv_pci_drv(void) if (!hv_is_hyperv_initialized()) return -ENODEV; + if (hv_root_partition() && !hv_nested) + return -ENODEV; + ret = hv_pci_irqchip_init(); if (ret) return ret; diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c index b0e3bce10aa4..755651f33811 100644 --- a/drivers/pci/controller/pci-mvebu.c +++ b/drivers/pci/controller/pci-mvebu.c @@ -1078,9 +1078,9 @@ static int mvebu_pcie_init_irq_domain(struct mvebu_pcie_port *port) return -ENODEV; } - port->intx_irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, - &mvebu_pcie_intx_irq_domain_ops, - port); + port->intx_irq_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), + PCI_NUM_INTX, + &mvebu_pcie_intx_irq_domain_ops, port); of_node_put(pcie_intc_node); if (!port->intx_irq_domain) { dev_err(dev, "Failed to get INTx IRQ domain for %s\n", port->name); @@ -1179,37 +1179,29 @@ static int mvebu_get_tgt_attr(struct device_node *np, int devfn, unsigned int *tgt, unsigned int *attr) { - const int na = 3, ns = 2; - const __be32 *range; - int rlen, nranges, rangesz, pna, i; + struct of_range range; + struct of_range_parser parser; *tgt = -1; *attr = -1; - range = of_get_property(np, "ranges", &rlen); - if (!range) + if (of_pci_range_parser_init(&parser, np)) return -EINVAL; - pna = of_n_addr_cells(np); - rangesz = pna + na + ns; - nranges = rlen / sizeof(__be32) / rangesz; - - for (i = 0; i < nranges; i++, range += rangesz) { - u32 flags = of_read_number(range, 1); - u32 slot = of_read_number(range + 1, 1); - u64 cpuaddr = of_read_number(range + na, pna); + for_each_of_range(&parser, &range) { unsigned long rtype; + u32 slot = upper_32_bits(range.bus_addr); - if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_IO) + if (DT_FLAGS_TO_TYPE(range.flags) == DT_TYPE_IO) rtype = IORESOURCE_IO; - else if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_MEM32) + else if (DT_FLAGS_TO_TYPE(range.flags) == DT_TYPE_MEM32) rtype = IORESOURCE_MEM; else continue; if (slot == PCI_SLOT(devfn) && type == rtype) { - *tgt = DT_CPUADDR_TO_TARGET(cpuaddr); - *attr = DT_CPUADDR_TO_ATTR(cpuaddr); + *tgt = DT_CPUADDR_TO_TARGET(range.cpu_addr); + *attr = DT_CPUADDR_TO_ATTR(range.cpu_addr); return 0; } } @@ -1361,11 +1353,9 @@ static int mvebu_pcie_parse_port(struct mvebu_pcie *pcie, goto skip; } - ret = devm_add_action(dev, mvebu_pcie_port_clk_put, port); - if (ret < 0) { - clk_put(port->clk); + ret = devm_add_action_or_reset(dev, mvebu_pcie_port_clk_put, port); + if (ret < 0) goto err; - } return 1; diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c index d2f88997283a..467ddc701adc 100644 --- a/drivers/pci/controller/pci-tegra.c +++ b/drivers/pci/controller/pci-tegra.c @@ -22,6 +22,7 @@ #include <linux/iopoll.h> #include <linux/irq.h> #include <linux/irqchip/chained_irq.h> +#include <linux/irqchip/irq-msi-lib.h> #include <linux/irqdomain.h> #include <linux/kernel.h> #include <linux/init.h> @@ -1547,7 +1548,7 @@ static void tegra_pcie_msi_irq(struct irq_desc *desc) unsigned int index = i * 32 + offset; int ret; - ret = generic_handle_domain_irq(msi->domain->parent, index); + ret = generic_handle_domain_irq(msi->domain, index); if (ret) { /* * that's weird who triggered this? @@ -1565,30 +1566,6 @@ static void tegra_pcie_msi_irq(struct irq_desc *desc) chained_irq_exit(chip, desc); } -static void tegra_msi_top_irq_ack(struct irq_data *d) -{ - irq_chip_ack_parent(d); -} - -static void tegra_msi_top_irq_mask(struct irq_data *d) -{ - pci_msi_mask_irq(d); - irq_chip_mask_parent(d); -} - -static void tegra_msi_top_irq_unmask(struct irq_data *d) -{ - pci_msi_unmask_irq(d); - irq_chip_unmask_parent(d); -} - -static struct irq_chip tegra_msi_top_chip = { - .name = "Tegra PCIe MSI", - .irq_ack = tegra_msi_top_irq_ack, - .irq_mask = tegra_msi_top_irq_mask, - .irq_unmask = tegra_msi_top_irq_unmask, -}; - static void tegra_msi_irq_ack(struct irq_data *d) { struct tegra_msi *msi = irq_data_get_irq_chip_data(d); @@ -1690,42 +1667,40 @@ static const struct irq_domain_ops tegra_msi_domain_ops = { .free = tegra_msi_domain_free, }; -static struct msi_domain_info tegra_msi_info = { - .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX, - .chip = &tegra_msi_top_chip, +static const struct msi_parent_ops tegra_msi_parent_ops = { + .supported_flags = (MSI_GENERIC_FLAGS_MASK | + MSI_FLAG_PCI_MSIX), + .required_flags = (MSI_FLAG_USE_DEF_DOM_OPS | + MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_PCI_MSI_MASK_PARENT | + MSI_FLAG_NO_AFFINITY), + .chip_flags = MSI_CHIP_FLAG_SET_ACK, + .bus_select_token = DOMAIN_BUS_PCI_MSI, + .init_dev_msi_info = msi_lib_init_dev_msi_info, }; static int tegra_allocate_domains(struct tegra_msi *msi) { struct tegra_pcie *pcie = msi_to_pcie(msi); struct fwnode_handle *fwnode = dev_fwnode(pcie->dev); - struct irq_domain *parent; - - parent = irq_domain_create_linear(fwnode, INT_PCI_MSI_NR, - &tegra_msi_domain_ops, msi); - if (!parent) { - dev_err(pcie->dev, "failed to create IRQ domain\n"); - return -ENOMEM; - } - irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS); + struct irq_domain_info info = { + .fwnode = fwnode, + .ops = &tegra_msi_domain_ops, + .size = INT_PCI_MSI_NR, + .host_data = msi, + }; - msi->domain = pci_msi_create_irq_domain(fwnode, &tegra_msi_info, parent); + msi->domain = msi_create_parent_irq_domain(&info, &tegra_msi_parent_ops); if (!msi->domain) { dev_err(pcie->dev, "failed to create MSI domain\n"); - irq_domain_remove(parent); return -ENOMEM; } - return 0; } static void tegra_free_domains(struct tegra_msi *msi) { - struct irq_domain *parent = msi->domain->parent; - irq_domain_remove(msi->domain); - irq_domain_remove(parent); } static int tegra_pcie_msi_setup(struct tegra_pcie *pcie) diff --git a/drivers/pci/controller/pci-thunder-ecam.c b/drivers/pci/controller/pci-thunder-ecam.c index 08161065a89c..b5b4a958e6a2 100644 --- a/drivers/pci/controller/pci-thunder-ecam.c +++ b/drivers/pci/controller/pci-thunder-ecam.c @@ -11,6 +11,8 @@ #include <linux/pci-ecam.h> #include <linux/platform_device.h> +#include "pci-host-common.h" + #if defined(CONFIG_PCI_HOST_THUNDER_ECAM) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)) static void set_val(u32 v, int where, int size, u32 *val) diff --git a/drivers/pci/controller/pci-thunder-pem.c b/drivers/pci/controller/pci-thunder-pem.c index f1bd5de67997..5fa037fb61dc 100644 --- a/drivers/pci/controller/pci-thunder-pem.c +++ b/drivers/pci/controller/pci-thunder-pem.c @@ -14,6 +14,7 @@ #include <linux/platform_device.h> #include <linux/io-64-nonatomic-lo-hi.h> #include "../pci.h" +#include "pci-host-common.h" #if defined(CONFIG_PCI_HOST_THUNDER_PEM) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)) diff --git a/drivers/pci/controller/pci-xgene-msi.c b/drivers/pci/controller/pci-xgene-msi.c index 7bce327897c9..0a37a3f1809c 100644 --- a/drivers/pci/controller/pci-xgene-msi.c +++ b/drivers/pci/controller/pci-xgene-msi.c @@ -6,12 +6,14 @@ * Author: Tanmay Inamdar <tinamdar@apm.com> * Duc Dang <dhdang@apm.com> */ +#include <linux/bitfield.h> #include <linux/cpu.h> #include <linux/interrupt.h> #include <linux/irqdomain.h> #include <linux/module.h> #include <linux/msi.h> #include <linux/irqchip/chained_irq.h> +#include <linux/irqchip/irq-msi-lib.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/of_pci.h> @@ -21,46 +23,49 @@ #define IDX_PER_GROUP 8 #define IRQS_PER_IDX 16 #define NR_HW_IRQS 16 -#define NR_MSI_VEC (IDX_PER_GROUP * IRQS_PER_IDX * NR_HW_IRQS) +#define NR_MSI_BITS (IDX_PER_GROUP * IRQS_PER_IDX * NR_HW_IRQS) +#define NR_MSI_VEC (NR_MSI_BITS / num_possible_cpus()) -struct xgene_msi_group { - struct xgene_msi *msi; - int gic_irq; - u32 msi_grp; -}; +#define MSI_GROUP_MASK GENMASK(22, 19) +#define MSI_INDEX_MASK GENMASK(18, 16) +#define MSI_INTR_MASK GENMASK(19, 16) + +#define MSInRx_HWIRQ_MASK GENMASK(6, 4) +#define DATA_HWIRQ_MASK GENMASK(3, 0) struct xgene_msi { - struct device_node *node; struct irq_domain *inner_domain; - struct irq_domain *msi_domain; u64 msi_addr; void __iomem *msi_regs; unsigned long *bitmap; struct mutex bitmap_lock; - struct xgene_msi_group *msi_groups; - int num_cpus; + unsigned int gic_irq[NR_HW_IRQS]; }; /* Global data */ -static struct xgene_msi xgene_msi_ctrl; - -static struct irq_chip xgene_msi_top_irq_chip = { - .name = "X-Gene1 MSI", - .irq_enable = pci_msi_unmask_irq, - .irq_disable = pci_msi_mask_irq, - .irq_mask = pci_msi_mask_irq, - .irq_unmask = pci_msi_unmask_irq, -}; - -static struct msi_domain_info xgene_msi_domain_info = { - .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_PCI_MSIX), - .chip = &xgene_msi_top_irq_chip, -}; +static struct xgene_msi *xgene_msi_ctrl; /* - * X-Gene v1 has 16 groups of MSI termination registers MSInIRx, where - * n is group number (0..F), x is index of registers in each group (0..7) + * X-Gene v1 has 16 frames of MSI termination registers MSInIRx, where n is + * frame number (0..15), x is index of registers in each frame (0..7). Each + * 32b register is at the beginning of a 64kB region, each frame occupying + * 512kB (and the whole thing 8MB of PA space). + * + * Each register supports 16 MSI vectors (0..15) to generate interrupts. A + * write to the MSInIRx from the PCI side generates an interrupt. A read + * from the MSInRx on the CPU side returns a bitmap of the pending MSIs in + * the lower 16 bits. A side effect of this read is that all pending + * interrupts are acknowledged and cleared). + * + * Additionally, each MSI termination frame has 1 MSIINTn register (n is + * 0..15) to indicate the MSI pending status caused by any of its 8 + * termination registers, reported as a bitmap in the lower 8 bits. Each 32b + * register is at the beginning of a 64kB region (and overall occupying an + * extra 1MB). + * + * There is one GIC IRQ assigned for each MSI termination frame, 16 in + * total. + * * The register layout is as follows: * MSI0IR0 base_addr * MSI0IR1 base_addr + 0x10000 @@ -81,107 +86,74 @@ static struct msi_domain_info xgene_msi_domain_info = { * MSIINT1 base_addr + 0x810000 * ... ... * MSIINTF base_addr + 0x8F0000 - * - * Each index register supports 16 MSI vectors (0..15) to generate interrupt. - * There are total 16 GIC IRQs assigned for these 16 groups of MSI termination - * registers. - * - * Each MSI termination group has 1 MSIINTn register (n is 0..15) to indicate - * the MSI pending status caused by 1 of its 8 index registers. */ /* MSInIRx read helper */ -static u32 xgene_msi_ir_read(struct xgene_msi *msi, - u32 msi_grp, u32 msir_idx) +static u32 xgene_msi_ir_read(struct xgene_msi *msi, u32 msi_grp, u32 msir_idx) { return readl_relaxed(msi->msi_regs + MSI_IR0 + - (msi_grp << 19) + (msir_idx << 16)); + (FIELD_PREP(MSI_GROUP_MASK, msi_grp) | + FIELD_PREP(MSI_INDEX_MASK, msir_idx))); } /* MSIINTn read helper */ static u32 xgene_msi_int_read(struct xgene_msi *msi, u32 msi_grp) { - return readl_relaxed(msi->msi_regs + MSI_INT0 + (msi_grp << 16)); + return readl_relaxed(msi->msi_regs + MSI_INT0 + + FIELD_PREP(MSI_INTR_MASK, msi_grp)); } /* - * With 2048 MSI vectors supported, the MSI message can be constructed using - * following scheme: - * - Divide into 8 256-vector groups - * Group 0: 0-255 - * Group 1: 256-511 - * Group 2: 512-767 - * ... - * Group 7: 1792-2047 - * - Each 256-vector group is divided into 16 16-vector groups - * As an example: 16 16-vector groups for 256-vector group 0-255 is - * Group 0: 0-15 - * Group 1: 16-32 - * ... - * Group 15: 240-255 - * - The termination address of MSI vector in 256-vector group n and 16-vector - * group x is the address of MSIxIRn - * - The data for MSI vector in 16-vector group x is x + * In order to allow an MSI to be moved from one CPU to another without + * having to repaint both the address and the data (which cannot be done + * atomically), we statically partitions the MSI frames between CPUs. Given + * that XGene-1 has 8 CPUs, each CPU gets two frames assigned to it + * + * We adopt the convention that when an MSI is moved, it is configured to + * target the same register number in the congruent frame assigned to the + * new target CPU. This reserves a given MSI across all CPUs, and reduces + * the MSI capacity from 2048 to 256. + * + * Effectively, this amounts to: + * - hwirq[7]::cpu[2:0] is the target frame number (n in MSInIRx) + * - hwirq[6:4] is the register index in any given frame (x in MSInIRx) + * - hwirq[3:0] is the MSI data */ -static u32 hwirq_to_reg_set(unsigned long hwirq) -{ - return (hwirq / (NR_HW_IRQS * IRQS_PER_IDX)); -} - -static u32 hwirq_to_group(unsigned long hwirq) -{ - return (hwirq % NR_HW_IRQS); -} - -static u32 hwirq_to_msi_data(unsigned long hwirq) +static irq_hw_number_t compute_hwirq(u8 frame, u8 index, u8 data) { - return ((hwirq / NR_HW_IRQS) % IRQS_PER_IDX); + return (FIELD_PREP(BIT(7), FIELD_GET(BIT(3), frame)) | + FIELD_PREP(MSInRx_HWIRQ_MASK, index) | + FIELD_PREP(DATA_HWIRQ_MASK, data)); } static void xgene_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) { struct xgene_msi *msi = irq_data_get_irq_chip_data(data); - u32 reg_set = hwirq_to_reg_set(data->hwirq); - u32 group = hwirq_to_group(data->hwirq); - u64 target_addr = msi->msi_addr + (((8 * group) + reg_set) << 16); + u64 target_addr; + u32 frame, msir; + int cpu; - msg->address_hi = upper_32_bits(target_addr); - msg->address_lo = lower_32_bits(target_addr); - msg->data = hwirq_to_msi_data(data->hwirq); -} + cpu = cpumask_first(irq_data_get_effective_affinity_mask(data)); + msir = FIELD_GET(MSInRx_HWIRQ_MASK, data->hwirq); + frame = FIELD_PREP(BIT(3), FIELD_GET(BIT(7), data->hwirq)) | cpu; -/* - * X-Gene v1 only has 16 MSI GIC IRQs for 2048 MSI vectors. To maintain - * the expected behaviour of .set_affinity for each MSI interrupt, the 16 - * MSI GIC IRQs are statically allocated to 8 X-Gene v1 cores (2 GIC IRQs - * for each core). The MSI vector is moved from 1 MSI GIC IRQ to another - * MSI GIC IRQ to steer its MSI interrupt to correct X-Gene v1 core. As a - * consequence, the total MSI vectors that X-Gene v1 supports will be - * reduced to 256 (2048/8) vectors. - */ -static int hwirq_to_cpu(unsigned long hwirq) -{ - return (hwirq % xgene_msi_ctrl.num_cpus); -} + target_addr = msi->msi_addr; + target_addr += (FIELD_PREP(MSI_GROUP_MASK, frame) | + FIELD_PREP(MSI_INTR_MASK, msir)); -static unsigned long hwirq_to_canonical_hwirq(unsigned long hwirq) -{ - return (hwirq - hwirq_to_cpu(hwirq)); + msg->address_hi = upper_32_bits(target_addr); + msg->address_lo = lower_32_bits(target_addr); + msg->data = FIELD_GET(DATA_HWIRQ_MASK, data->hwirq); } static int xgene_msi_set_affinity(struct irq_data *irqdata, const struct cpumask *mask, bool force) { int target_cpu = cpumask_first(mask); - int curr_cpu; - - curr_cpu = hwirq_to_cpu(irqdata->hwirq); - if (curr_cpu == target_cpu) - return IRQ_SET_MASK_OK_DONE; - /* Update MSI number to target the new CPU */ - irqdata->hwirq = hwirq_to_canonical_hwirq(irqdata->hwirq) + target_cpu; + irq_data_update_effective_affinity(irqdata, cpumask_of(target_cpu)); + /* Force the core code to regenerate the message */ return IRQ_SET_MASK_OK; } @@ -195,25 +167,23 @@ static int xgene_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *args) { struct xgene_msi *msi = domain->host_data; - int msi_irq; + irq_hw_number_t hwirq; mutex_lock(&msi->bitmap_lock); - msi_irq = bitmap_find_next_zero_area(msi->bitmap, NR_MSI_VEC, 0, - msi->num_cpus, 0); - if (msi_irq < NR_MSI_VEC) - bitmap_set(msi->bitmap, msi_irq, msi->num_cpus); - else - msi_irq = -ENOSPC; + hwirq = find_first_zero_bit(msi->bitmap, NR_MSI_VEC); + if (hwirq < NR_MSI_VEC) + set_bit(hwirq, msi->bitmap); mutex_unlock(&msi->bitmap_lock); - if (msi_irq < 0) - return msi_irq; + if (hwirq >= NR_MSI_VEC) + return -ENOSPC; - irq_domain_set_info(domain, virq, msi_irq, + irq_domain_set_info(domain, virq, hwirq, &xgene_msi_bottom_irq_chip, domain->host_data, handle_simple_irq, NULL, NULL); + irqd_set_resend_when_in_progress(irq_get_irq_data(virq)); return 0; } @@ -223,204 +193,149 @@ static void xgene_irq_domain_free(struct irq_domain *domain, { struct irq_data *d = irq_domain_get_irq_data(domain, virq); struct xgene_msi *msi = irq_data_get_irq_chip_data(d); - u32 hwirq; mutex_lock(&msi->bitmap_lock); - hwirq = hwirq_to_canonical_hwirq(d->hwirq); - bitmap_clear(msi->bitmap, hwirq, msi->num_cpus); + clear_bit(d->hwirq, msi->bitmap); mutex_unlock(&msi->bitmap_lock); irq_domain_free_irqs_parent(domain, virq, nr_irqs); } -static const struct irq_domain_ops msi_domain_ops = { +static const struct irq_domain_ops xgene_msi_domain_ops = { .alloc = xgene_irq_domain_alloc, .free = xgene_irq_domain_free, }; -static int xgene_allocate_domains(struct xgene_msi *msi) -{ - msi->inner_domain = irq_domain_add_linear(NULL, NR_MSI_VEC, - &msi_domain_ops, msi); - if (!msi->inner_domain) - return -ENOMEM; - - msi->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(msi->node), - &xgene_msi_domain_info, - msi->inner_domain); - - if (!msi->msi_domain) { - irq_domain_remove(msi->inner_domain); - return -ENOMEM; - } - - return 0; -} +static const struct msi_parent_ops xgene_msi_parent_ops = { + .supported_flags = (MSI_GENERIC_FLAGS_MASK | + MSI_FLAG_PCI_MSIX), + .required_flags = (MSI_FLAG_USE_DEF_DOM_OPS | + MSI_FLAG_USE_DEF_CHIP_OPS), + .bus_select_token = DOMAIN_BUS_PCI_MSI, + .init_dev_msi_info = msi_lib_init_dev_msi_info, +}; -static void xgene_free_domains(struct xgene_msi *msi) +static int xgene_allocate_domains(struct device_node *node, + struct xgene_msi *msi) { - if (msi->msi_domain) - irq_domain_remove(msi->msi_domain); - if (msi->inner_domain) - irq_domain_remove(msi->inner_domain); + struct irq_domain_info info = { + .fwnode = of_fwnode_handle(node), + .ops = &xgene_msi_domain_ops, + .size = NR_MSI_VEC, + .host_data = msi, + }; + + msi->inner_domain = msi_create_parent_irq_domain(&info, &xgene_msi_parent_ops); + return msi->inner_domain ? 0 : -ENOMEM; } -static int xgene_msi_init_allocator(struct xgene_msi *xgene_msi) +static int xgene_msi_init_allocator(struct device *dev) { - xgene_msi->bitmap = bitmap_zalloc(NR_MSI_VEC, GFP_KERNEL); - if (!xgene_msi->bitmap) + xgene_msi_ctrl->bitmap = devm_bitmap_zalloc(dev, NR_MSI_VEC, GFP_KERNEL); + if (!xgene_msi_ctrl->bitmap) return -ENOMEM; - mutex_init(&xgene_msi->bitmap_lock); - - xgene_msi->msi_groups = kcalloc(NR_HW_IRQS, - sizeof(struct xgene_msi_group), - GFP_KERNEL); - if (!xgene_msi->msi_groups) - return -ENOMEM; + mutex_init(&xgene_msi_ctrl->bitmap_lock); return 0; } static void xgene_msi_isr(struct irq_desc *desc) { + unsigned int *irqp = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); - struct xgene_msi_group *msi_groups; - struct xgene_msi *xgene_msi; - int msir_index, msir_val, hw_irq, ret; - u32 intr_index, grp_select, msi_grp; + struct xgene_msi *xgene_msi = xgene_msi_ctrl; + unsigned long grp_pending; + int msir_idx; + u32 msi_grp; chained_irq_enter(chip, desc); - msi_groups = irq_desc_get_handler_data(desc); - xgene_msi = msi_groups->msi; - msi_grp = msi_groups->msi_grp; - - /* - * MSIINTn (n is 0..F) indicates if there is a pending MSI interrupt - * If bit x of this register is set (x is 0..7), one or more interrupts - * corresponding to MSInIRx is set. - */ - grp_select = xgene_msi_int_read(xgene_msi, msi_grp); - while (grp_select) { - msir_index = ffs(grp_select) - 1; - /* - * Calculate MSInIRx address to read to check for interrupts - * (refer to termination address and data assignment - * described in xgene_compose_msi_msg() ) - */ - msir_val = xgene_msi_ir_read(xgene_msi, msi_grp, msir_index); - while (msir_val) { - intr_index = ffs(msir_val) - 1; - /* - * Calculate MSI vector number (refer to the termination - * address and data assignment described in - * xgene_compose_msi_msg function) - */ - hw_irq = (((msir_index * IRQS_PER_IDX) + intr_index) * - NR_HW_IRQS) + msi_grp; - /* - * As we have multiple hw_irq that maps to single MSI, - * always look up the virq using the hw_irq as seen from - * CPU0 - */ - hw_irq = hwirq_to_canonical_hwirq(hw_irq); - ret = generic_handle_domain_irq(xgene_msi->inner_domain, hw_irq); + msi_grp = irqp - xgene_msi->gic_irq; + + grp_pending = xgene_msi_int_read(xgene_msi, msi_grp); + + for_each_set_bit(msir_idx, &grp_pending, IDX_PER_GROUP) { + unsigned long msir; + int intr_idx; + + msir = xgene_msi_ir_read(xgene_msi, msi_grp, msir_idx); + + for_each_set_bit(intr_idx, &msir, IRQS_PER_IDX) { + irq_hw_number_t hwirq; + int ret; + + hwirq = compute_hwirq(msi_grp, msir_idx, intr_idx); + ret = generic_handle_domain_irq(xgene_msi->inner_domain, + hwirq); WARN_ON_ONCE(ret); - msir_val &= ~(1 << intr_index); - } - grp_select &= ~(1 << msir_index); - - if (!grp_select) { - /* - * We handled all interrupts happened in this group, - * resample this group MSI_INTx register in case - * something else has been made pending in the meantime - */ - grp_select = xgene_msi_int_read(xgene_msi, msi_grp); } } chained_irq_exit(chip, desc); } -static enum cpuhp_state pci_xgene_online; - static void xgene_msi_remove(struct platform_device *pdev) { - struct xgene_msi *msi = platform_get_drvdata(pdev); - - if (pci_xgene_online) - cpuhp_remove_state(pci_xgene_online); - cpuhp_remove_state(CPUHP_PCI_XGENE_DEAD); - - kfree(msi->msi_groups); - - bitmap_free(msi->bitmap); - msi->bitmap = NULL; + for (int i = 0; i < NR_HW_IRQS; i++) { + unsigned int irq = xgene_msi_ctrl->gic_irq[i]; + if (!irq) + continue; + irq_set_chained_handler_and_data(irq, NULL, NULL); + } - xgene_free_domains(msi); + if (xgene_msi_ctrl->inner_domain) + irq_domain_remove(xgene_msi_ctrl->inner_domain); } -static int xgene_msi_hwirq_alloc(unsigned int cpu) +static int xgene_msi_handler_setup(struct platform_device *pdev) { - struct xgene_msi *msi = &xgene_msi_ctrl; - struct xgene_msi_group *msi_group; - cpumask_var_t mask; + struct xgene_msi *xgene_msi = xgene_msi_ctrl; int i; - int err; - for (i = cpu; i < NR_HW_IRQS; i += msi->num_cpus) { - msi_group = &msi->msi_groups[i]; - if (!msi_group->gic_irq) - continue; + for (i = 0; i < NR_HW_IRQS; i++) { + u32 msi_val; + int irq, err; + + /* + * MSInIRx registers are read-to-clear; before registering + * interrupt handlers, read all of them to clear spurious + * interrupts that may occur before the driver is probed. + */ + for (int msi_idx = 0; msi_idx < IDX_PER_GROUP; msi_idx++) + xgene_msi_ir_read(xgene_msi, i, msi_idx); + + /* Read MSIINTn to confirm */ + msi_val = xgene_msi_int_read(xgene_msi, i); + if (msi_val) { + dev_err(&pdev->dev, "Failed to clear spurious IRQ\n"); + return EINVAL; + } - irq_set_chained_handler_and_data(msi_group->gic_irq, - xgene_msi_isr, msi_group); + irq = platform_get_irq(pdev, i); + if (irq < 0) + return irq; + + xgene_msi->gic_irq[i] = irq; /* * Statically allocate MSI GIC IRQs to each CPU core. * With 8-core X-Gene v1, 2 MSI GIC IRQs are allocated * to each core. */ - if (alloc_cpumask_var(&mask, GFP_KERNEL)) { - cpumask_clear(mask); - cpumask_set_cpu(cpu, mask); - err = irq_set_affinity(msi_group->gic_irq, mask); - if (err) - pr_err("failed to set affinity for GIC IRQ"); - free_cpumask_var(mask); - } else { - pr_err("failed to alloc CPU mask for affinity\n"); - err = -EINVAL; - } - + irq_set_status_flags(irq, IRQ_NO_BALANCING); + err = irq_set_affinity(irq, cpumask_of(i % num_possible_cpus())); if (err) { - irq_set_chained_handler_and_data(msi_group->gic_irq, - NULL, NULL); + pr_err("failed to set affinity for GIC IRQ"); return err; } - } - return 0; -} - -static int xgene_msi_hwirq_free(unsigned int cpu) -{ - struct xgene_msi *msi = &xgene_msi_ctrl; - struct xgene_msi_group *msi_group; - int i; - - for (i = cpu; i < NR_HW_IRQS; i += msi->num_cpus) { - msi_group = &msi->msi_groups[i]; - if (!msi_group->gic_irq) - continue; - - irq_set_chained_handler_and_data(msi_group->gic_irq, NULL, - NULL); + irq_set_chained_handler_and_data(irq, xgene_msi_isr, + &xgene_msi_ctrl->gic_irq[i]); } + return 0; } @@ -432,14 +347,15 @@ static const struct of_device_id xgene_msi_match_table[] = { static int xgene_msi_probe(struct platform_device *pdev) { struct resource *res; - int rc, irq_index; struct xgene_msi *xgene_msi; - int virt_msir; - u32 msi_val, msi_idx; + int rc; - xgene_msi = &xgene_msi_ctrl; + xgene_msi_ctrl = devm_kzalloc(&pdev->dev, sizeof(*xgene_msi_ctrl), + GFP_KERNEL); + if (!xgene_msi_ctrl) + return -ENOMEM; - platform_set_drvdata(pdev, xgene_msi); + xgene_msi = xgene_msi_ctrl; xgene_msi->msi_regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(xgene_msi->msi_regs)) { @@ -447,66 +363,26 @@ static int xgene_msi_probe(struct platform_device *pdev) goto error; } xgene_msi->msi_addr = res->start; - xgene_msi->node = pdev->dev.of_node; - xgene_msi->num_cpus = num_possible_cpus(); - rc = xgene_msi_init_allocator(xgene_msi); + rc = xgene_msi_init_allocator(&pdev->dev); if (rc) { dev_err(&pdev->dev, "Error allocating MSI bitmap\n"); goto error; } - rc = xgene_allocate_domains(xgene_msi); + rc = xgene_allocate_domains(dev_of_node(&pdev->dev), xgene_msi); if (rc) { dev_err(&pdev->dev, "Failed to allocate MSI domain\n"); goto error; } - for (irq_index = 0; irq_index < NR_HW_IRQS; irq_index++) { - virt_msir = platform_get_irq(pdev, irq_index); - if (virt_msir < 0) { - rc = virt_msir; - goto error; - } - xgene_msi->msi_groups[irq_index].gic_irq = virt_msir; - xgene_msi->msi_groups[irq_index].msi_grp = irq_index; - xgene_msi->msi_groups[irq_index].msi = xgene_msi; - } - - /* - * MSInIRx registers are read-to-clear; before registering - * interrupt handlers, read all of them to clear spurious - * interrupts that may occur before the driver is probed. - */ - for (irq_index = 0; irq_index < NR_HW_IRQS; irq_index++) { - for (msi_idx = 0; msi_idx < IDX_PER_GROUP; msi_idx++) - xgene_msi_ir_read(xgene_msi, irq_index, msi_idx); - - /* Read MSIINTn to confirm */ - msi_val = xgene_msi_int_read(xgene_msi, irq_index); - if (msi_val) { - dev_err(&pdev->dev, "Failed to clear spurious IRQ\n"); - rc = -EINVAL; - goto error; - } - } - - rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "pci/xgene:online", - xgene_msi_hwirq_alloc, NULL); - if (rc < 0) - goto err_cpuhp; - pci_xgene_online = rc; - rc = cpuhp_setup_state(CPUHP_PCI_XGENE_DEAD, "pci/xgene:dead", NULL, - xgene_msi_hwirq_free); + rc = xgene_msi_handler_setup(pdev); if (rc) - goto err_cpuhp; + goto error; dev_info(&pdev->dev, "APM X-Gene PCIe MSI driver loaded\n"); return 0; - -err_cpuhp: - dev_err(&pdev->dev, "failed to add CPU MSI notifier\n"); error: xgene_msi_remove(pdev); return rc; @@ -520,9 +396,4 @@ static struct platform_driver xgene_msi_driver = { .probe = xgene_msi_probe, .remove = xgene_msi_remove, }; - -static int __init xgene_pcie_msi_init(void) -{ - return platform_driver_register(&xgene_msi_driver); -} -subsys_initcall(xgene_pcie_msi_init); +builtin_platform_driver(xgene_msi_driver); diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c index 1e2ebbfa36d1..b95afa35201d 100644 --- a/drivers/pci/controller/pci-xgene.c +++ b/drivers/pci/controller/pci-xgene.c @@ -12,6 +12,7 @@ #include <linux/jiffies.h> #include <linux/memblock.h> #include <linux/init.h> +#include <linux/irqdomain.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_pci.h> @@ -53,11 +54,9 @@ #define XGENE_V1_PCI_EXP_CAP 0x40 /* PCIe IP version */ -#define XGENE_PCIE_IP_VER_UNKN 0 #define XGENE_PCIE_IP_VER_1 1 #define XGENE_PCIE_IP_VER_2 2 -#if defined(CONFIG_PCI_XGENE) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)) struct xgene_pcie { struct device_node *node; struct device *dev; @@ -188,7 +187,6 @@ static int xgene_pcie_config_read32(struct pci_bus *bus, unsigned int devfn, return PCIBIOS_SUCCESSFUL; } -#endif #if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) static int xgene_get_csr_resource(struct acpi_device *adev, @@ -279,7 +277,6 @@ const struct pci_ecam_ops xgene_v2_pcie_ecam_ops = { }; #endif -#if defined(CONFIG_PCI_XGENE) static u64 xgene_pcie_set_ib_mask(struct xgene_pcie *port, u32 addr, u32 flags, u64 size) { @@ -594,6 +591,24 @@ static struct pci_ops xgene_pcie_ops = { .write = pci_generic_config_write32, }; +static bool xgene_check_pcie_msi_ready(void) +{ + struct device_node *np; + struct irq_domain *d; + + if (!IS_ENABLED(CONFIG_PCI_XGENE_MSI)) + return true; + + np = of_find_compatible_node(NULL, NULL, "apm,xgene1-msi"); + if (!np) + return true; + + d = irq_find_matching_host(np, DOMAIN_BUS_PCI_MSI); + of_node_put(np); + + return d && irq_domain_is_msi_parent(d); +} + static int xgene_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -602,6 +617,10 @@ static int xgene_pcie_probe(struct platform_device *pdev) struct pci_host_bridge *bridge; int ret; + if (!xgene_check_pcie_msi_ready()) + return dev_err_probe(&pdev->dev, -EPROBE_DEFER, + "MSI driver not ready\n"); + bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port)); if (!bridge) return -ENOMEM; @@ -610,10 +629,7 @@ static int xgene_pcie_probe(struct platform_device *pdev) port->node = of_node_get(dn); port->dev = dev; - - port->version = XGENE_PCIE_IP_VER_UNKN; - if (of_device_is_compatible(port->node, "apm,xgene-pcie")) - port->version = XGENE_PCIE_IP_VER_1; + port->version = XGENE_PCIE_IP_VER_1; ret = xgene_pcie_map_reg(port, pdev); if (ret) @@ -647,4 +663,3 @@ static struct platform_driver xgene_pcie_driver = { .probe = xgene_pcie_probe, }; builtin_platform_driver(xgene_pcie_driver); -#endif diff --git a/drivers/pci/controller/pcie-altera-msi.c b/drivers/pci/controller/pcie-altera-msi.c index e1cee3c0575f..ea2ca2e70f20 100644 --- a/drivers/pci/controller/pcie-altera-msi.c +++ b/drivers/pci/controller/pcie-altera-msi.c @@ -9,6 +9,7 @@ #include <linux/interrupt.h> #include <linux/irqchip/chained_irq.h> +#include <linux/irqchip/irq-msi-lib.h> #include <linux/irqdomain.h> #include <linux/init.h> #include <linux/module.h> @@ -29,7 +30,6 @@ struct altera_msi { DECLARE_BITMAP(used, MAX_MSI_VECTORS); struct mutex lock; /* protect "used" bitmap */ struct platform_device *pdev; - struct irq_domain *msi_domain; struct irq_domain *inner_domain; void __iomem *csr_base; void __iomem *vector_base; @@ -74,18 +74,20 @@ static void altera_msi_isr(struct irq_desc *desc) chained_irq_exit(chip, desc); } -static struct irq_chip altera_msi_irq_chip = { - .name = "Altera PCIe MSI", - .irq_mask = pci_msi_mask_irq, - .irq_unmask = pci_msi_unmask_irq, -}; +#define ALTERA_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ + MSI_FLAG_USE_DEF_CHIP_OPS | \ + MSI_FLAG_NO_AFFINITY) -static struct msi_domain_info altera_msi_domain_info = { - .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX, - .chip = &altera_msi_irq_chip, -}; +#define ALTERA_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \ + MSI_FLAG_PCI_MSIX) +static const struct msi_parent_ops altera_msi_parent_ops = { + .required_flags = ALTERA_MSI_FLAGS_REQUIRED, + .supported_flags = ALTERA_MSI_FLAGS_SUPPORTED, + .bus_select_token = DOMAIN_BUS_PCI_MSI, + .prefix = "Altera-", + .init_dev_msi_info = msi_lib_init_dev_msi_info, +}; static void altera_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) { struct altera_msi *msi = irq_data_get_irq_chip_data(data); @@ -164,20 +166,16 @@ static const struct irq_domain_ops msi_domain_ops = { static int altera_allocate_domains(struct altera_msi *msi) { - struct fwnode_handle *fwnode = of_node_to_fwnode(msi->pdev->dev.of_node); - - msi->inner_domain = irq_domain_add_linear(NULL, msi->num_of_vectors, - &msi_domain_ops, msi); + struct irq_domain_info info = { + .fwnode = dev_fwnode(&msi->pdev->dev), + .ops = &msi_domain_ops, + .host_data = msi, + .size = msi->num_of_vectors, + }; + + msi->inner_domain = msi_create_parent_irq_domain(&info, &altera_msi_parent_ops); if (!msi->inner_domain) { - dev_err(&msi->pdev->dev, "failed to create IRQ domain\n"); - return -ENOMEM; - } - - msi->msi_domain = pci_msi_create_irq_domain(fwnode, - &altera_msi_domain_info, msi->inner_domain); - if (!msi->msi_domain) { dev_err(&msi->pdev->dev, "failed to create MSI domain\n"); - irq_domain_remove(msi->inner_domain); return -ENOMEM; } @@ -186,7 +184,6 @@ static int altera_allocate_domains(struct altera_msi *msi) static void altera_free_domains(struct altera_msi *msi) { - irq_domain_remove(msi->msi_domain); irq_domain_remove(msi->inner_domain); } diff --git a/drivers/pci/controller/pcie-altera.c b/drivers/pci/controller/pcie-altera.c index 70409e71a18f..3dbb7adc421c 100644 --- a/drivers/pci/controller/pcie-altera.c +++ b/drivers/pci/controller/pcie-altera.c @@ -852,10 +852,9 @@ static void aglx_isr(struct irq_desc *desc) static int altera_pcie_init_irq_domain(struct altera_pcie *pcie) { struct device *dev = &pcie->pdev->dev; - struct device_node *node = dev->of_node; /* Setup INTx */ - pcie->irq_domain = irq_domain_add_linear(node, PCI_NUM_INTX, + pcie->irq_domain = irq_domain_create_linear(dev_fwnode(dev), PCI_NUM_INTX, &intx_domain_ops, pcie); if (!pcie->irq_domain) { dev_err(dev, "Failed to get a INTx IRQ domain\n"); diff --git a/drivers/pci/controller/pcie-apple.c b/drivers/pci/controller/pcie-apple.c index 18e11b9a7f46..0380d300adca 100644 --- a/drivers/pci/controller/pcie-apple.c +++ b/drivers/pci/controller/pcie-apple.c @@ -18,10 +18,12 @@ * Author: Marc Zyngier <maz@kernel.org> */ +#include <linux/bitfield.h> #include <linux/gpio/consumer.h> #include <linux/kernel.h> #include <linux/iopoll.h> #include <linux/irqchip/chained_irq.h> +#include <linux/irqchip/irq-msi-lib.h> #include <linux/irqdomain.h> #include <linux/list.h> #include <linux/module.h> @@ -29,6 +31,9 @@ #include <linux/of_irq.h> #include <linux/pci-ecam.h> +#include "pci-host-common.h" + +/* T8103 (original M1) and related SoCs */ #define CORE_RC_PHYIF_CTL 0x00024 #define CORE_RC_PHYIF_CTL_RUN BIT(0) #define CORE_RC_PHYIF_STAT 0x00028 @@ -39,14 +44,18 @@ #define CORE_RC_STAT_READY BIT(0) #define CORE_FABRIC_STAT 0x04000 #define CORE_FABRIC_STAT_MASK 0x001F001F -#define CORE_LANE_CFG(port) (0x84000 + 0x4000 * (port)) -#define CORE_LANE_CFG_REFCLK0REQ BIT(0) -#define CORE_LANE_CFG_REFCLK1REQ BIT(1) -#define CORE_LANE_CFG_REFCLK0ACK BIT(2) -#define CORE_LANE_CFG_REFCLK1ACK BIT(3) -#define CORE_LANE_CFG_REFCLKEN (BIT(9) | BIT(10)) -#define CORE_LANE_CTL(port) (0x84004 + 0x4000 * (port)) -#define CORE_LANE_CTL_CFGACC BIT(15) + +#define CORE_PHY_DEFAULT_BASE(port) (0x84000 + 0x4000 * (port)) + +#define PHY_LANE_CFG 0x00000 +#define PHY_LANE_CFG_REFCLK0REQ BIT(0) +#define PHY_LANE_CFG_REFCLK1REQ BIT(1) +#define PHY_LANE_CFG_REFCLK0ACK BIT(2) +#define PHY_LANE_CFG_REFCLK1ACK BIT(3) +#define PHY_LANE_CFG_REFCLKEN (BIT(9) | BIT(10)) +#define PHY_LANE_CFG_REFCLKCGEN (BIT(30) | BIT(31)) +#define PHY_LANE_CTL 0x00004 +#define PHY_LANE_CTL_CFGACC BIT(15) #define PORT_LTSSMCTL 0x00080 #define PORT_LTSSMCTL_START BIT(0) @@ -100,7 +109,7 @@ #define PORT_REFCLK_CGDIS BIT(8) #define PORT_PERST 0x00814 #define PORT_PERST_OFF BIT(0) -#define PORT_RID2SID(i16) (0x00828 + 4 * (i16)) +#define PORT_RID2SID 0x00828 #define PORT_RID2SID_VALID BIT(31) #define PORT_RID2SID_SID_SHIFT 16 #define PORT_RID2SID_BUS_SHIFT 8 @@ -118,7 +127,15 @@ #define PORT_TUNSTAT_PERST_ACK_PEND BIT(1) #define PORT_PREFMEM_ENABLE 0x00994 -#define MAX_RID2SID 64 +/* T602x (M2-pro and co) */ +#define PORT_T602X_MSIADDR 0x016c +#define PORT_T602X_MSIADDR_HI 0x0170 +#define PORT_T602X_PERST 0x082c +#define PORT_T602X_RID2SID 0x3000 +#define PORT_T602X_MSIMAP 0x3800 + +#define PORT_MSIMAP_ENABLE BIT(31) +#define PORT_MSIMAP_TARGET GENMASK(7, 0) /* * The doorbell address is set to 0xfffff000, which by convention @@ -129,29 +146,69 @@ */ #define DOORBELL_ADDR CONFIG_PCIE_APPLE_MSI_DOORBELL_ADDR +struct hw_info { + u32 phy_lane_ctl; + u32 port_msiaddr; + u32 port_msiaddr_hi; + u32 port_refclk; + u32 port_perst; + u32 port_rid2sid; + u32 port_msimap; + u32 max_rid2sid; +}; + +static const struct hw_info t8103_hw = { + .phy_lane_ctl = PHY_LANE_CTL, + .port_msiaddr = PORT_MSIADDR, + .port_msiaddr_hi = 0, + .port_refclk = PORT_REFCLK, + .port_perst = PORT_PERST, + .port_rid2sid = PORT_RID2SID, + .port_msimap = 0, + .max_rid2sid = 64, +}; + +static const struct hw_info t602x_hw = { + .phy_lane_ctl = 0, + .port_msiaddr = PORT_T602X_MSIADDR, + .port_msiaddr_hi = PORT_T602X_MSIADDR_HI, + .port_refclk = 0, + .port_perst = PORT_T602X_PERST, + .port_rid2sid = PORT_T602X_RID2SID, + .port_msimap = PORT_T602X_MSIMAP, + /* 16 on t602x, guess for autodetect on future HW */ + .max_rid2sid = 512, +}; + struct apple_pcie { struct mutex lock; struct device *dev; void __iomem *base; - struct irq_domain *domain; + const struct hw_info *hw; unsigned long *bitmap; struct list_head ports; + struct list_head entry; struct completion event; struct irq_fwspec fwspec; u32 nvecs; }; struct apple_pcie_port { + raw_spinlock_t lock; struct apple_pcie *pcie; struct device_node *np; void __iomem *base; + void __iomem *phy; struct irq_domain *domain; struct list_head entry; - DECLARE_BITMAP(sid_map, MAX_RID2SID); + unsigned long *sid_map; int sid_map_sz; int idx; }; +static LIST_HEAD(pcie_list); +static DEFINE_MUTEX(pcie_list_lock); + static void rmw_set(u32 set, void __iomem *addr) { writel_relaxed(readl_relaxed(addr) | set, addr); @@ -162,27 +219,6 @@ static void rmw_clear(u32 clr, void __iomem *addr) writel_relaxed(readl_relaxed(addr) & ~clr, addr); } -static void apple_msi_top_irq_mask(struct irq_data *d) -{ - pci_msi_mask_irq(d); - irq_chip_mask_parent(d); -} - -static void apple_msi_top_irq_unmask(struct irq_data *d) -{ - pci_msi_unmask_irq(d); - irq_chip_unmask_parent(d); -} - -static struct irq_chip apple_msi_top_chip = { - .name = "PCIe MSI", - .irq_mask = apple_msi_top_irq_mask, - .irq_unmask = apple_msi_top_irq_unmask, - .irq_eoi = irq_chip_eoi_parent, - .irq_set_affinity = irq_chip_set_affinity_parent, - .irq_set_type = irq_chip_set_type_parent, -}; - static void apple_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) { msg->address_hi = upper_32_bits(DOORBELL_ADDR); @@ -226,8 +262,7 @@ static int apple_msi_domain_alloc(struct irq_domain *domain, unsigned int virq, for (i = 0; i < nr_irqs; i++) { irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, - &apple_msi_bottom_chip, - domain->host_data); + &apple_msi_bottom_chip, pcie); } return 0; @@ -251,24 +286,20 @@ static const struct irq_domain_ops apple_msi_domain_ops = { .free = apple_msi_domain_free, }; -static struct msi_domain_info apple_msi_info = { - .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX), - .chip = &apple_msi_top_chip, -}; - static void apple_port_irq_mask(struct irq_data *data) { struct apple_pcie_port *port = irq_data_get_irq_chip_data(data); - writel_relaxed(BIT(data->hwirq), port->base + PORT_INTMSKSET); + guard(raw_spinlock_irqsave)(&port->lock); + rmw_set(BIT(data->hwirq), port->base + PORT_INTMSK); } static void apple_port_irq_unmask(struct irq_data *data) { struct apple_pcie_port *port = irq_data_get_irq_chip_data(data); - writel_relaxed(BIT(data->hwirq), port->base + PORT_INTMSKCLR); + guard(raw_spinlock_irqsave)(&port->lock); + rmw_clear(BIT(data->hwirq), port->base + PORT_INTMSK); } static bool hwirq_is_intx(unsigned int hwirq) @@ -372,7 +403,9 @@ static void apple_port_irq_handler(struct irq_desc *desc) static int apple_pcie_port_setup_irq(struct apple_pcie_port *port) { struct fwnode_handle *fwnode = &port->np->fwnode; + struct apple_pcie *pcie = port->pcie; unsigned int irq; + u32 val = 0; /* FIXME: consider moving each interrupt under each port */ irq = irq_of_parse_and_map(to_of_node(dev_fwnode(port->pcie->dev)), @@ -387,20 +420,31 @@ static int apple_pcie_port_setup_irq(struct apple_pcie_port *port) return -ENOMEM; /* Disable all interrupts */ - writel_relaxed(~0, port->base + PORT_INTMSKSET); + writel_relaxed(~0, port->base + PORT_INTMSK); writel_relaxed(~0, port->base + PORT_INTSTAT); + writel_relaxed(~0, port->base + PORT_LINKCMDSTS); irq_set_chained_handler_and_data(irq, apple_port_irq_handler, port); /* Configure MSI base address */ BUILD_BUG_ON(upper_32_bits(DOORBELL_ADDR)); - writel_relaxed(lower_32_bits(DOORBELL_ADDR), port->base + PORT_MSIADDR); + writel_relaxed(lower_32_bits(DOORBELL_ADDR), + port->base + pcie->hw->port_msiaddr); + if (pcie->hw->port_msiaddr_hi) + writel_relaxed(0, port->base + pcie->hw->port_msiaddr_hi); /* Enable MSIs, shared between all ports */ - writel_relaxed(0, port->base + PORT_MSIBASE); - writel_relaxed((ilog2(port->pcie->nvecs) << PORT_MSICFG_L2MSINUM_SHIFT) | - PORT_MSICFG_EN, port->base + PORT_MSICFG); + if (pcie->hw->port_msimap) { + for (int i = 0; i < pcie->nvecs; i++) + writel_relaxed(FIELD_PREP(PORT_MSIMAP_TARGET, i) | + PORT_MSIMAP_ENABLE, + port->base + pcie->hw->port_msimap + 4 * i); + } else { + writel_relaxed(0, port->base + PORT_MSIBASE); + val = ilog2(pcie->nvecs) << PORT_MSICFG_L2MSINUM_SHIFT; + } + writel_relaxed(val | PORT_MSICFG_EN, port->base + PORT_MSICFG); return 0; } @@ -467,43 +511,47 @@ static int apple_pcie_setup_refclk(struct apple_pcie *pcie, u32 stat; int res; - res = readl_relaxed_poll_timeout(pcie->base + CORE_RC_PHYIF_STAT, stat, - stat & CORE_RC_PHYIF_STAT_REFCLK, - 100, 50000); - if (res < 0) - return res; + if (pcie->hw->phy_lane_ctl) + rmw_set(PHY_LANE_CTL_CFGACC, port->phy + pcie->hw->phy_lane_ctl); - rmw_set(CORE_LANE_CTL_CFGACC, pcie->base + CORE_LANE_CTL(port->idx)); - rmw_set(CORE_LANE_CFG_REFCLK0REQ, pcie->base + CORE_LANE_CFG(port->idx)); + rmw_set(PHY_LANE_CFG_REFCLK0REQ, port->phy + PHY_LANE_CFG); - res = readl_relaxed_poll_timeout(pcie->base + CORE_LANE_CFG(port->idx), - stat, stat & CORE_LANE_CFG_REFCLK0ACK, + res = readl_relaxed_poll_timeout(port->phy + PHY_LANE_CFG, + stat, stat & PHY_LANE_CFG_REFCLK0ACK, 100, 50000); if (res < 0) return res; - rmw_set(CORE_LANE_CFG_REFCLK1REQ, pcie->base + CORE_LANE_CFG(port->idx)); - res = readl_relaxed_poll_timeout(pcie->base + CORE_LANE_CFG(port->idx), - stat, stat & CORE_LANE_CFG_REFCLK1ACK, + rmw_set(PHY_LANE_CFG_REFCLK1REQ, port->phy + PHY_LANE_CFG); + res = readl_relaxed_poll_timeout(port->phy + PHY_LANE_CFG, + stat, stat & PHY_LANE_CFG_REFCLK1ACK, 100, 50000); if (res < 0) return res; - rmw_clear(CORE_LANE_CTL_CFGACC, pcie->base + CORE_LANE_CTL(port->idx)); + if (pcie->hw->phy_lane_ctl) + rmw_clear(PHY_LANE_CTL_CFGACC, port->phy + pcie->hw->phy_lane_ctl); - rmw_set(CORE_LANE_CFG_REFCLKEN, pcie->base + CORE_LANE_CFG(port->idx)); - rmw_set(PORT_REFCLK_EN, port->base + PORT_REFCLK); + rmw_set(PHY_LANE_CFG_REFCLKEN, port->phy + PHY_LANE_CFG); + + if (pcie->hw->port_refclk) + rmw_set(PORT_REFCLK_EN, port->base + pcie->hw->port_refclk); return 0; } +static void __iomem *port_rid2sid_addr(struct apple_pcie_port *port, int idx) +{ + return port->base + port->pcie->hw->port_rid2sid + 4 * idx; +} + static u32 apple_pcie_rid2sid_write(struct apple_pcie_port *port, int idx, u32 val) { - writel_relaxed(val, port->base + PORT_RID2SID(idx)); + writel_relaxed(val, port_rid2sid_addr(port, idx)); /* Read back to ensure completion of the write */ - return readl_relaxed(port->base + PORT_RID2SID(idx)); + return readl_relaxed(port_rid2sid_addr(port, idx)); } static int apple_pcie_setup_port(struct apple_pcie *pcie, @@ -512,6 +560,8 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie, struct platform_device *platform = to_platform_device(pcie->dev); struct apple_pcie_port *port; struct gpio_desc *reset; + struct resource *res; + char name[16]; u32 stat, idx; int ret, i; @@ -524,6 +574,10 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie, if (!port) return -ENOMEM; + port->sid_map = devm_bitmap_zalloc(pcie->dev, pcie->hw->max_rid2sid, GFP_KERNEL); + if (!port->sid_map) + return -ENOMEM; + ret = of_property_read_u32_index(np, "reg", 0, &idx); if (ret) return ret; @@ -533,14 +587,28 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie, port->pcie = pcie; port->np = np; - port->base = devm_platform_ioremap_resource(platform, port->idx + 2); + raw_spin_lock_init(&port->lock); + + snprintf(name, sizeof(name), "port%d", port->idx); + res = platform_get_resource_byname(platform, IORESOURCE_MEM, name); + if (!res) + res = platform_get_resource(platform, IORESOURCE_MEM, port->idx + 2); + + port->base = devm_ioremap_resource(&platform->dev, res); if (IS_ERR(port->base)) return PTR_ERR(port->base); + snprintf(name, sizeof(name), "phy%d", port->idx); + res = platform_get_resource_byname(platform, IORESOURCE_MEM, name); + if (res) + port->phy = devm_ioremap_resource(&platform->dev, res); + else + port->phy = pcie->base + CORE_PHY_DEFAULT_BASE(port->idx); + rmw_set(PORT_APPCLK_EN, port->base + PORT_APPCLK); /* Assert PERST# before setting up the clock */ - gpiod_set_value(reset, 1); + gpiod_set_value_cansleep(reset, 1); ret = apple_pcie_setup_refclk(pcie, port); if (ret < 0) @@ -550,8 +618,8 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie, usleep_range(100, 200); /* Deassert PERST# */ - rmw_set(PORT_PERST_OFF, port->base + PORT_PERST); - gpiod_set_value(reset, 0); + rmw_set(PORT_PERST_OFF, port->base + pcie->hw->port_perst); + gpiod_set_value_cansleep(reset, 0); /* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */ msleep(100); @@ -563,7 +631,11 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie, return ret; } - rmw_clear(PORT_REFCLK_CGDIS, port->base + PORT_REFCLK); + if (pcie->hw->port_refclk) + rmw_clear(PORT_REFCLK_CGDIS, port->base + pcie->hw->port_refclk); + else + rmw_set(PHY_LANE_CFG_REFCLKCGEN, port->phy + PHY_LANE_CFG); + rmw_clear(PORT_APPCLK_CGDIS, port->base + PORT_APPCLK); ret = apple_pcie_port_setup_irq(port); @@ -571,7 +643,7 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie, return ret; /* Reset all RID/SID mappings, and check for RAZ/WI registers */ - for (i = 0; i < MAX_RID2SID; i++) { + for (i = 0; i < pcie->hw->max_rid2sid; i++) { if (apple_pcie_rid2sid_write(port, i, 0xbad1d) != 0xbad1d) break; apple_pcie_rid2sid_write(port, i, 0); @@ -584,6 +656,9 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie, list_add_tail(&port->entry, &pcie->ports); init_completion(&pcie->event); + /* In the success path, we keep a reference to np around */ + of_node_get(np); + ret = apple_pcie_port_register_irqs(port); WARN_ON(ret); @@ -595,11 +670,28 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie, return 0; } +static const struct msi_parent_ops apple_msi_parent_ops = { + .supported_flags = (MSI_GENERIC_FLAGS_MASK | + MSI_FLAG_PCI_MSIX | + MSI_FLAG_MULTI_PCI_MSI), + .required_flags = (MSI_FLAG_USE_DEF_DOM_OPS | + MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_PCI_MSI_MASK_PARENT), + .chip_flags = MSI_CHIP_FLAG_SET_EOI, + .bus_select_token = DOMAIN_BUS_PCI_MSI, + .init_dev_msi_info = msi_lib_init_dev_msi_info, +}; + static int apple_msi_init(struct apple_pcie *pcie) { struct fwnode_handle *fwnode = dev_fwnode(pcie->dev); + struct irq_domain_info info = { + .fwnode = fwnode, + .ops = &apple_msi_domain_ops, + .size = pcie->nvecs, + .host_data = pcie, + }; struct of_phandle_args args = {}; - struct irq_domain *parent; int ret; ret = of_parse_phandle_with_args(to_of_node(fwnode), "msi-ranges", @@ -619,38 +711,58 @@ static int apple_msi_init(struct apple_pcie *pcie) if (!pcie->bitmap) return -ENOMEM; - parent = irq_find_matching_fwspec(&pcie->fwspec, DOMAIN_BUS_WIRED); - if (!parent) { + info.parent = irq_find_matching_fwspec(&pcie->fwspec, DOMAIN_BUS_WIRED); + if (!info.parent) { dev_err(pcie->dev, "failed to find parent domain\n"); return -ENXIO; } - parent = irq_domain_create_hierarchy(parent, 0, pcie->nvecs, fwnode, - &apple_msi_domain_ops, pcie); - if (!parent) { + if (!msi_create_parent_irq_domain(&info, &apple_msi_parent_ops)) { dev_err(pcie->dev, "failed to create IRQ domain\n"); return -ENOMEM; } - irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS); + return 0; +} - pcie->domain = pci_msi_create_irq_domain(fwnode, &apple_msi_info, - parent); - if (!pcie->domain) { - dev_err(pcie->dev, "failed to create MSI domain\n"); - irq_domain_remove(parent); - return -ENOMEM; +static void apple_pcie_register(struct apple_pcie *pcie) +{ + guard(mutex)(&pcie_list_lock); + + list_add_tail(&pcie->entry, &pcie_list); +} + +static void apple_pcie_unregister(struct apple_pcie *pcie) +{ + guard(mutex)(&pcie_list_lock); + + list_del(&pcie->entry); +} + +static struct apple_pcie *apple_pcie_lookup(struct device *dev) +{ + struct apple_pcie *pcie; + + guard(mutex)(&pcie_list_lock); + + list_for_each_entry(pcie, &pcie_list, entry) { + if (pcie->dev == dev) + return pcie; } - return 0; + return NULL; } static struct apple_pcie_port *apple_pcie_get_port(struct pci_dev *pdev) { struct pci_config_window *cfg = pdev->sysdata; - struct apple_pcie *pcie = cfg->priv; + struct apple_pcie *pcie; struct pci_dev *port_pdev; struct apple_pcie_port *port; + pcie = apple_pcie_lookup(cfg->parent); + if (WARN_ON(!pcie)) + return NULL; + /* Find the root port this device is on */ port_pdev = pcie_find_root_port(pdev); @@ -716,7 +828,7 @@ static void apple_pcie_disable_device(struct pci_host_bridge *bridge, struct pci for_each_set_bit(idx, port->sid_map, port->sid_map_sz) { u32 val; - val = readl_relaxed(port->base + PORT_RID2SID(idx)); + val = readl_relaxed(port_rid2sid_addr(port, idx)); if ((val & 0xffff) == rid) { apple_pcie_rid2sid_write(port, idx, 0); bitmap_release_region(port->sid_map, idx, 0); @@ -731,33 +843,17 @@ static void apple_pcie_disable_device(struct pci_host_bridge *bridge, struct pci static int apple_pcie_init(struct pci_config_window *cfg) { struct device *dev = cfg->parent; - struct platform_device *platform = to_platform_device(dev); struct apple_pcie *pcie; int ret; - pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); - if (!pcie) - return -ENOMEM; - - pcie->dev = dev; - - mutex_init(&pcie->lock); - - pcie->base = devm_platform_ioremap_resource(platform, 1); - if (IS_ERR(pcie->base)) - return PTR_ERR(pcie->base); - - cfg->priv = pcie; - INIT_LIST_HEAD(&pcie->ports); - - ret = apple_msi_init(pcie); - if (ret) - return ret; + pcie = apple_pcie_lookup(dev); + if (WARN_ON(!pcie)) + return -ENOENT; - for_each_child_of_node_scoped(dev->of_node, of_port) { + for_each_available_child_of_node_scoped(dev->of_node, of_port) { ret = apple_pcie_setup_port(pcie, of_port); if (ret) { - dev_err(pcie->dev, "Port %pOF setup fail: %d\n", of_port, ret); + dev_err(dev, "Port %pOF setup fail: %d\n", of_port, ret); return ret; } } @@ -776,14 +872,49 @@ static const struct pci_ecam_ops apple_pcie_cfg_ecam_ops = { } }; +static int apple_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct apple_pcie *pcie; + int ret; + + pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); + if (!pcie) + return -ENOMEM; + + pcie->dev = dev; + pcie->hw = of_device_get_match_data(dev); + if (!pcie->hw) + return -ENODEV; + pcie->base = devm_platform_ioremap_resource(pdev, 1); + if (IS_ERR(pcie->base)) + return PTR_ERR(pcie->base); + + mutex_init(&pcie->lock); + INIT_LIST_HEAD(&pcie->ports); + + ret = apple_msi_init(pcie); + if (ret) + return ret; + + apple_pcie_register(pcie); + + ret = pci_host_common_init(pdev, &apple_pcie_cfg_ecam_ops); + if (ret) + apple_pcie_unregister(pcie); + + return ret; +} + static const struct of_device_id apple_pcie_of_match[] = { - { .compatible = "apple,pcie", .data = &apple_pcie_cfg_ecam_ops }, + { .compatible = "apple,t6020-pcie", .data = &t602x_hw }, + { .compatible = "apple,pcie", .data = &t8103_hw }, { } }; MODULE_DEVICE_TABLE(of, apple_pcie_of_match); static struct platform_driver apple_pcie_driver = { - .probe = pci_host_common_probe, + .probe = apple_pcie_probe, .driver = { .name = "pcie-apple", .of_match_table = apple_pcie_of_match, diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c index e19628e13898..9afbd02ded35 100644 --- a/drivers/pci/controller/pcie-brcmstb.c +++ b/drivers/pci/controller/pcie-brcmstb.c @@ -12,6 +12,7 @@ #include <linux/iopoll.h> #include <linux/ioport.h> #include <linux/irqchip/chained_irq.h> +#include <linux/irqchip/irq-msi-lib.h> #include <linux/irqdomain.h> #include <linux/kernel.h> #include <linux/list.h> @@ -46,6 +47,7 @@ #define PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK 0xffffff #define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY 0x04dc +#define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_MAX_LINK_WIDTH_MASK 0x1f0 #define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK 0xc00 #define PCIE_RC_CFG_PRIV1_ROOT_CAP 0x4f8 @@ -55,6 +57,9 @@ #define PCIE_RC_DL_MDIO_WR_DATA 0x1104 #define PCIE_RC_DL_MDIO_RD_DATA 0x1108 +#define PCIE_RC_PL_REG_PHY_CTL_1 0x1804 +#define PCIE_RC_PL_REG_PHY_CTL_1_REG_P2_POWERDOWN_ENA_NOSYNC_MASK 0x8 + #define PCIE_RC_PL_PHY_CTL_15 0x184c #define PCIE_RC_PL_PHY_CTL_15_DIS_PLL_PD_MASK 0x400000 #define PCIE_RC_PL_PHY_CTL_15_PM_CLK_PERIOD_MASK 0xff @@ -265,7 +270,6 @@ struct brcm_msi { struct device *dev; void __iomem *base; struct device_node *np; - struct irq_domain *msi_domain; struct irq_domain *inner_domain; struct mutex lock; /* guards the alloc/free operations */ u64 target_addr; @@ -465,17 +469,20 @@ static void brcm_pcie_set_outbound_win(struct brcm_pcie *pcie, writel(tmp, pcie->base + PCIE_MEM_WIN0_LIMIT_HI(win)); } -static struct irq_chip brcm_msi_irq_chip = { - .name = "BRCM STB PCIe MSI", - .irq_ack = irq_chip_ack_parent, - .irq_mask = pci_msi_mask_irq, - .irq_unmask = pci_msi_unmask_irq, -}; +#define BRCM_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ + MSI_FLAG_USE_DEF_CHIP_OPS | \ + MSI_FLAG_NO_AFFINITY) -static struct msi_domain_info brcm_msi_domain_info = { - .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI, - .chip = &brcm_msi_irq_chip, +#define BRCM_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \ + MSI_FLAG_MULTI_PCI_MSI) + +static const struct msi_parent_ops brcm_msi_parent_ops = { + .required_flags = BRCM_MSI_FLAGS_REQUIRED, + .supported_flags = BRCM_MSI_FLAGS_SUPPORTED, + .bus_select_token = DOMAIN_BUS_PCI_MSI, + .chip_flags = MSI_CHIP_FLAG_SET_ACK, + .prefix = "BRCM-", + .init_dev_msi_info = msi_lib_init_dev_msi_info, }; static void brcm_pcie_msi_isr(struct irq_desc *desc) @@ -581,21 +588,18 @@ static const struct irq_domain_ops msi_domain_ops = { static int brcm_allocate_domains(struct brcm_msi *msi) { - struct fwnode_handle *fwnode = of_node_to_fwnode(msi->np); struct device *dev = msi->dev; - msi->inner_domain = irq_domain_add_linear(NULL, msi->nr, &msi_domain_ops, msi); - if (!msi->inner_domain) { - dev_err(dev, "failed to create IRQ domain\n"); - return -ENOMEM; - } + struct irq_domain_info info = { + .fwnode = of_fwnode_handle(msi->np), + .ops = &msi_domain_ops, + .host_data = msi, + .size = msi->nr, + }; - msi->msi_domain = pci_msi_create_irq_domain(fwnode, - &brcm_msi_domain_info, - msi->inner_domain); - if (!msi->msi_domain) { + msi->inner_domain = msi_create_parent_irq_domain(&info, &brcm_msi_parent_ops); + if (!msi->inner_domain) { dev_err(dev, "failed to create MSI domain\n"); - irq_domain_remove(msi->inner_domain); return -ENOMEM; } @@ -604,7 +608,6 @@ static int brcm_allocate_domains(struct brcm_msi *msi) static void brcm_free_domains(struct brcm_msi *msi) { - irq_domain_remove(msi->msi_domain); irq_domain_remove(msi->inner_domain); } @@ -970,7 +973,7 @@ static int brcm_pcie_get_inbound_wins(struct brcm_pcie *pcie, * * The PCIe host controller by design must set the inbound viewport to * be a contiguous arrangement of all of the system's memory. In - * addition, its size mut be a power of two. To further complicate + * addition, its size must be a power of two. To further complicate * matters, the viewport must start on a pcie-address that is aligned * on a multiple of its size. If a portion of the viewport does not * represent system memory -- e.g. 3GB of memory requires a 4GB @@ -1072,7 +1075,7 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie) void __iomem *base = pcie->base; struct pci_host_bridge *bridge; struct resource_entry *entry; - u32 tmp, burst, aspm_support; + u32 tmp, burst, aspm_support, num_lanes, num_lanes_cap; u8 num_out_wins = 0; int num_inbound_wins = 0; int memc, ret; @@ -1180,6 +1183,27 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie) PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK); writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY); + /* 'tmp' still holds the contents of PRIV1_LINK_CAPABILITY */ + num_lanes_cap = u32_get_bits(tmp, PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_MAX_LINK_WIDTH_MASK); + num_lanes = 0; + + /* + * Use hardware negotiated Max Link Width value by default. If the + * "num-lanes" DT property is present, assume that the chip's default + * link width capability information is incorrect/undesired and use the + * specified value instead. + */ + if (!of_property_read_u32(pcie->np, "num-lanes", &num_lanes) && + num_lanes && num_lanes <= 4 && num_lanes_cap != num_lanes) { + u32p_replace_bits(&tmp, num_lanes, + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_MAX_LINK_WIDTH_MASK); + writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY); + tmp = readl(base + PCIE_RC_PL_REG_PHY_CTL_1); + u32p_replace_bits(&tmp, 1, + PCIE_RC_PL_REG_PHY_CTL_1_REG_P2_POWERDOWN_ENA_NOSYNC_MASK); + writel(tmp, base + PCIE_RC_PL_REG_PHY_CTL_1); + } + /* * For config space accesses on the RC, show the right class for * a PCIe-PCIe bridge (the default setting is to be EP mode). @@ -1333,11 +1357,7 @@ static int brcm_pcie_start_link(struct brcm_pcie *pcie) if (ret) return ret; - /* - * Wait for 100ms after PERST# deassertion; see PCIe CEM specification - * sections 2.2, PCIe r5.0, 6.6.1. - */ - msleep(100); + msleep(PCIE_RESET_CONFIG_WAIT_MS); /* * Give the RC/EP even more time to wake up, before trying to diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c index 649fcb449f34..9ba242ab9596 100644 --- a/drivers/pci/controller/pcie-iproc-msi.c +++ b/drivers/pci/controller/pcie-iproc-msi.c @@ -5,6 +5,7 @@ #include <linux/interrupt.h> #include <linux/irqchip/chained_irq.h> +#include <linux/irqchip/irq-msi-lib.h> #include <linux/irqdomain.h> #include <linux/msi.h> #include <linux/of_irq.h> @@ -81,7 +82,6 @@ struct iproc_msi_grp { * @bitmap_lock: lock to protect access to the MSI bitmap * @nr_msi_vecs: total number of MSI vectors * @inner_domain: inner IRQ domain - * @msi_domain: MSI IRQ domain * @nr_eq_region: required number of 4K aligned memory region for MSI event * queues * @nr_msi_region: required number of 4K aligned address region for MSI posted @@ -101,7 +101,6 @@ struct iproc_msi { struct mutex bitmap_lock; unsigned int nr_msi_vecs; struct irq_domain *inner_domain; - struct irq_domain *msi_domain; unsigned int nr_eq_region; unsigned int nr_msi_region; void *eq_cpu; @@ -165,16 +164,18 @@ static inline unsigned int iproc_msi_eq_offset(struct iproc_msi *msi, u32 eq) return eq * EQ_LEN * sizeof(u32); } -static struct irq_chip iproc_msi_irq_chip = { - .name = "iProc-MSI", +#define IPROC_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ + MSI_FLAG_USE_DEF_CHIP_OPS) +#define IPROC_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \ + MSI_FLAG_PCI_MSIX) + +static struct msi_parent_ops iproc_msi_parent_ops = { + .required_flags = IPROC_MSI_FLAGS_REQUIRED, + .supported_flags = IPROC_MSI_FLAGS_SUPPORTED, + .bus_select_token = DOMAIN_BUS_PCI_MSI, + .prefix = "iProc-", + .init_dev_msi_info = msi_lib_init_dev_msi_info, }; - -static struct msi_domain_info iproc_msi_domain_info = { - .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_PCI_MSIX, - .chip = &iproc_msi_irq_chip, -}; - /* * In iProc PCIe core, each MSI group is serviced by a GIC interrupt and a * dedicated event queue. Each MSI group can support up to 64 MSI vectors. @@ -446,27 +447,22 @@ static void iproc_msi_disable(struct iproc_msi *msi) static int iproc_msi_alloc_domains(struct device_node *node, struct iproc_msi *msi) { - msi->inner_domain = irq_domain_add_linear(NULL, msi->nr_msi_vecs, - &msi_domain_ops, msi); + struct irq_domain_info info = { + .fwnode = of_fwnode_handle(node), + .ops = &msi_domain_ops, + .host_data = msi, + .size = msi->nr_msi_vecs, + }; + + msi->inner_domain = msi_create_parent_irq_domain(&info, &iproc_msi_parent_ops); if (!msi->inner_domain) return -ENOMEM; - msi->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node), - &iproc_msi_domain_info, - msi->inner_domain); - if (!msi->msi_domain) { - irq_domain_remove(msi->inner_domain); - return -ENOMEM; - } - return 0; } static void iproc_msi_free_domains(struct iproc_msi *msi) { - if (msi->msi_domain) - irq_domain_remove(msi->msi_domain); - if (msi->inner_domain) irq_domain_remove(msi->inner_domain); } @@ -542,7 +538,7 @@ int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node) msi->nr_cpus = num_possible_cpus(); if (msi->nr_cpus == 1) - iproc_msi_domain_info.flags |= MSI_FLAG_MULTI_PCI_MSI; + iproc_msi_parent_ops.supported_flags |= MSI_FLAG_MULTI_PCI_MSI; msi->nr_irqs = of_irq_count(node); if (!msi->nr_irqs) { diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c index 9d52504acae4..97147f43e41c 100644 --- a/drivers/pci/controller/pcie-mediatek-gen3.c +++ b/drivers/pci/controller/pcie-mediatek-gen3.c @@ -12,6 +12,7 @@ #include <linux/delay.h> #include <linux/iopoll.h> #include <linux/irq.h> +#include <linux/irqchip/irq-msi-lib.h> #include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> #include <linux/kernel.h> @@ -187,7 +188,6 @@ struct mtk_msi_set { * @saved_irq_state: IRQ enable state saved at suspend time * @irq_lock: lock protecting IRQ register access * @intx_domain: legacy INTx IRQ domain - * @msi_domain: MSI IRQ domain * @msi_bottom_domain: MSI IRQ bottom domain * @msi_sets: MSI sets information * @lock: lock protecting IRQ bit map @@ -210,7 +210,6 @@ struct mtk_gen3_pcie { u32 saved_irq_state; raw_spinlock_t irq_lock; struct irq_domain *intx_domain; - struct irq_domain *msi_domain; struct irq_domain *msi_bottom_domain; struct mtk_msi_set msi_sets[PCIE_MSI_SET_NUM]; struct mutex lock; @@ -526,30 +525,22 @@ static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie) return 0; } -static void mtk_pcie_msi_irq_mask(struct irq_data *data) -{ - pci_msi_mask_irq(data); - irq_chip_mask_parent(data); -} - -static void mtk_pcie_msi_irq_unmask(struct irq_data *data) -{ - pci_msi_unmask_irq(data); - irq_chip_unmask_parent(data); -} - -static struct irq_chip mtk_msi_irq_chip = { - .irq_ack = irq_chip_ack_parent, - .irq_mask = mtk_pcie_msi_irq_mask, - .irq_unmask = mtk_pcie_msi_irq_unmask, - .name = "MSI", -}; - -static struct msi_domain_info mtk_msi_domain_info = { - .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX | - MSI_FLAG_MULTI_PCI_MSI, - .chip = &mtk_msi_irq_chip, +#define MTK_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ + MSI_FLAG_USE_DEF_CHIP_OPS | \ + MSI_FLAG_NO_AFFINITY | \ + MSI_FLAG_PCI_MSI_MASK_PARENT) + +#define MTK_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \ + MSI_FLAG_PCI_MSIX | \ + MSI_FLAG_MULTI_PCI_MSI) + +static const struct msi_parent_ops mtk_msi_parent_ops = { + .required_flags = MTK_MSI_FLAGS_REQUIRED, + .supported_flags = MTK_MSI_FLAGS_SUPPORTED, + .bus_select_token = DOMAIN_BUS_PCI_MSI, + .chip_flags = MSI_CHIP_FLAG_SET_ACK, + .prefix = "MTK3-", + .init_dev_msi_info = msi_lib_init_dev_msi_info, }; static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) @@ -745,8 +736,8 @@ static int mtk_pcie_init_irq_domains(struct mtk_gen3_pcie *pcie) return -ENODEV; } - pcie->intx_domain = irq_domain_add_linear(intc_node, PCI_NUM_INTX, - &intx_domain_ops, pcie); + pcie->intx_domain = irq_domain_create_linear(of_fwnode_handle(intc_node), PCI_NUM_INTX, + &intx_domain_ops, pcie); if (!pcie->intx_domain) { dev_err(dev, "failed to create INTx IRQ domain\n"); ret = -ENODEV; @@ -756,28 +747,23 @@ static int mtk_pcie_init_irq_domains(struct mtk_gen3_pcie *pcie) /* Setup MSI */ mutex_init(&pcie->lock); - pcie->msi_bottom_domain = irq_domain_add_linear(node, PCIE_MSI_IRQS_NUM, - &mtk_msi_bottom_domain_ops, pcie); + struct irq_domain_info info = { + .fwnode = dev_fwnode(dev), + .ops = &mtk_msi_bottom_domain_ops, + .host_data = pcie, + .size = PCIE_MSI_IRQS_NUM, + }; + + pcie->msi_bottom_domain = msi_create_parent_irq_domain(&info, &mtk_msi_parent_ops); if (!pcie->msi_bottom_domain) { dev_err(dev, "failed to create MSI bottom domain\n"); ret = -ENODEV; goto err_msi_bottom_domain; } - pcie->msi_domain = pci_msi_create_irq_domain(dev->fwnode, - &mtk_msi_domain_info, - pcie->msi_bottom_domain); - if (!pcie->msi_domain) { - dev_err(dev, "failed to create MSI domain\n"); - ret = -ENODEV; - goto err_msi_domain; - } - of_node_put(intc_node); return 0; -err_msi_domain: - irq_domain_remove(pcie->msi_bottom_domain); err_msi_bottom_domain: irq_domain_remove(pcie->intx_domain); out_put_node: @@ -792,9 +778,6 @@ static void mtk_pcie_irq_teardown(struct mtk_gen3_pcie *pcie) if (pcie->intx_domain) irq_domain_remove(pcie->intx_domain); - if (pcie->msi_domain) - irq_domain_remove(pcie->msi_domain); - if (pcie->msi_bottom_domain) irq_domain_remove(pcie->msi_bottom_domain); diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c index 811a8b4acd50..24cc30a2ab6c 100644 --- a/drivers/pci/controller/pcie-mediatek.c +++ b/drivers/pci/controller/pcie-mediatek.c @@ -12,6 +12,7 @@ #include <linux/iopoll.h> #include <linux/irq.h> #include <linux/irqchip/chained_irq.h> +#include <linux/irqchip/irq-msi-lib.h> #include <linux/irqdomain.h> #include <linux/kernel.h> #include <linux/mfd/syscon.h> @@ -180,7 +181,6 @@ struct mtk_pcie_soc { * @irq: GIC irq * @irq_domain: legacy INTx IRQ domain * @inner_domain: inner IRQ domain - * @msi_domain: MSI IRQ domain * @lock: protect the msi_irq_in_use bitmap * @msi_irq_in_use: bit map for assigned MSI IRQ */ @@ -200,7 +200,6 @@ struct mtk_pcie_port { int irq; struct irq_domain *irq_domain; struct irq_domain *inner_domain; - struct irq_domain *msi_domain; struct mutex lock; DECLARE_BITMAP(msi_irq_in_use, MTK_MSI_IRQS_NUM); }; @@ -470,40 +469,39 @@ static const struct irq_domain_ops msi_domain_ops = { .free = mtk_pcie_irq_domain_free, }; -static struct irq_chip mtk_msi_irq_chip = { - .name = "MTK PCIe MSI", - .irq_ack = irq_chip_ack_parent, - .irq_mask = pci_msi_mask_irq, - .irq_unmask = pci_msi_unmask_irq, -}; +#define MTK_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ + MSI_FLAG_USE_DEF_CHIP_OPS | \ + MSI_FLAG_NO_AFFINITY) + +#define MTK_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \ + MSI_FLAG_PCI_MSIX) -static struct msi_domain_info mtk_msi_domain_info = { - .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX, - .chip = &mtk_msi_irq_chip, +static const struct msi_parent_ops mtk_msi_parent_ops = { + .required_flags = MTK_MSI_FLAGS_REQUIRED, + .supported_flags = MTK_MSI_FLAGS_SUPPORTED, + .bus_select_token = DOMAIN_BUS_PCI_MSI, + .chip_flags = MSI_CHIP_FLAG_SET_ACK, + .prefix = "MTK-", + .init_dev_msi_info = msi_lib_init_dev_msi_info, }; static int mtk_pcie_allocate_msi_domains(struct mtk_pcie_port *port) { - struct fwnode_handle *fwnode = of_node_to_fwnode(port->pcie->dev->of_node); - mutex_init(&port->lock); - port->inner_domain = irq_domain_create_linear(fwnode, MTK_MSI_IRQS_NUM, - &msi_domain_ops, port); + struct irq_domain_info info = { + .fwnode = dev_fwnode(port->pcie->dev), + .ops = &msi_domain_ops, + .host_data = port, + .size = MTK_MSI_IRQS_NUM, + }; + + port->inner_domain = msi_create_parent_irq_domain(&info, &mtk_msi_parent_ops); if (!port->inner_domain) { dev_err(port->pcie->dev, "failed to create IRQ domain\n"); return -ENOMEM; } - port->msi_domain = pci_msi_create_irq_domain(fwnode, &mtk_msi_domain_info, - port->inner_domain); - if (!port->msi_domain) { - dev_err(port->pcie->dev, "failed to create MSI domain\n"); - irq_domain_remove(port->inner_domain); - return -ENOMEM; - } - return 0; } @@ -532,8 +530,6 @@ static void mtk_pcie_irq_teardown(struct mtk_pcie *pcie) irq_domain_remove(port->irq_domain); if (IS_ENABLED(CONFIG_PCI_MSI)) { - if (port->msi_domain) - irq_domain_remove(port->msi_domain); if (port->inner_domain) irq_domain_remove(port->inner_domain); } @@ -569,8 +565,8 @@ static int mtk_pcie_init_irq_domain(struct mtk_pcie_port *port, return -ENODEV; } - port->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, - &intx_domain_ops, port); + port->irq_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX, + &intx_domain_ops, port); of_node_put(pcie_intc_node); if (!port->irq_domain) { dev_err(dev, "failed to get INTx IRQ domain\n"); diff --git a/drivers/pci/controller/pcie-rcar-ep.c b/drivers/pci/controller/pcie-rcar-ep.c index c5e0d025bc43..a8a966844cf3 100644 --- a/drivers/pci/controller/pcie-rcar-ep.c +++ b/drivers/pci/controller/pcie-rcar-ep.c @@ -256,15 +256,15 @@ static void rcar_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, u8 vfn, clear_bit(atu_index + 1, ep->ib_window_map); } -static int rcar_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn, - u8 interrupts) +static int rcar_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn, u8 nr_irqs) { struct rcar_pcie_endpoint *ep = epc_get_drvdata(epc); struct rcar_pcie *pcie = &ep->pcie; + u8 mmc = order_base_2(nr_irqs); u32 flags; flags = rcar_pci_read_reg(pcie, MSICAP(fn)); - flags |= interrupts << MSICAP0_MMESCAP_OFFSET; + flags |= mmc << MSICAP0_MMESCAP_OFFSET; rcar_pci_write_reg(pcie, flags, MSICAP(fn)); return 0; @@ -280,7 +280,7 @@ static int rcar_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn) if (!(flags & MSICAP0_MSIE)) return -EINVAL; - return ((flags & MSICAP0_MMESE_MASK) >> MSICAP0_MMESE_OFFSET); + return 1 << ((flags & MSICAP0_MMESE_MASK) >> MSICAP0_MMESE_OFFSET); } static int rcar_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn, diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c index c32b803a47c7..fe288fd770c4 100644 --- a/drivers/pci/controller/pcie-rcar-host.c +++ b/drivers/pci/controller/pcie-rcar-host.c @@ -17,6 +17,7 @@ #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/irq.h> +#include <linux/irqchip/irq-msi-lib.h> #include <linux/irqdomain.h> #include <linux/kernel.h> #include <linux/init.h> @@ -597,30 +598,6 @@ static irqreturn_t rcar_pcie_msi_irq(int irq, void *data) return IRQ_HANDLED; } -static void rcar_msi_top_irq_ack(struct irq_data *d) -{ - irq_chip_ack_parent(d); -} - -static void rcar_msi_top_irq_mask(struct irq_data *d) -{ - pci_msi_mask_irq(d); - irq_chip_mask_parent(d); -} - -static void rcar_msi_top_irq_unmask(struct irq_data *d) -{ - pci_msi_unmask_irq(d); - irq_chip_unmask_parent(d); -} - -static struct irq_chip rcar_msi_top_chip = { - .name = "PCIe MSI", - .irq_ack = rcar_msi_top_irq_ack, - .irq_mask = rcar_msi_top_irq_mask, - .irq_unmask = rcar_msi_top_irq_unmask, -}; - static void rcar_msi_irq_ack(struct irq_data *d) { struct rcar_msi *msi = irq_data_get_irq_chip_data(d); @@ -718,30 +695,36 @@ static const struct irq_domain_ops rcar_msi_domain_ops = { .free = rcar_msi_domain_free, }; -static struct msi_domain_info rcar_msi_info = { - .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI, - .chip = &rcar_msi_top_chip, +#define RCAR_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ + MSI_FLAG_USE_DEF_CHIP_OPS | \ + MSI_FLAG_PCI_MSI_MASK_PARENT | \ + MSI_FLAG_NO_AFFINITY) + +#define RCAR_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \ + MSI_FLAG_MULTI_PCI_MSI) + +static const struct msi_parent_ops rcar_msi_parent_ops = { + .required_flags = RCAR_MSI_FLAGS_REQUIRED, + .supported_flags = RCAR_MSI_FLAGS_SUPPORTED, + .bus_select_token = DOMAIN_BUS_PCI_MSI, + .chip_flags = MSI_CHIP_FLAG_SET_ACK, + .prefix = "RCAR-", + .init_dev_msi_info = msi_lib_init_dev_msi_info, }; static int rcar_allocate_domains(struct rcar_msi *msi) { struct rcar_pcie *pcie = &msi_to_host(msi)->pcie; - struct fwnode_handle *fwnode = dev_fwnode(pcie->dev); - struct irq_domain *parent; - - parent = irq_domain_create_linear(fwnode, INT_PCI_MSI_NR, - &rcar_msi_domain_ops, msi); - if (!parent) { - dev_err(pcie->dev, "failed to create IRQ domain\n"); - return -ENOMEM; - } - irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS); - - msi->domain = pci_msi_create_irq_domain(fwnode, &rcar_msi_info, parent); + struct irq_domain_info info = { + .fwnode = dev_fwnode(pcie->dev), + .ops = &rcar_msi_domain_ops, + .host_data = msi, + .size = INT_PCI_MSI_NR, + }; + + msi->domain = msi_create_parent_irq_domain(&info, &rcar_msi_parent_ops); if (!msi->domain) { - dev_err(pcie->dev, "failed to create MSI domain\n"); - irq_domain_remove(parent); + dev_err(pcie->dev, "failed to create IRQ domain\n"); return -ENOMEM; } @@ -750,10 +733,7 @@ static int rcar_allocate_domains(struct rcar_msi *msi) static void rcar_free_domains(struct rcar_msi *msi) { - struct irq_domain *parent = msi->domain->parent; - irq_domain_remove(msi->domain); - irq_domain_remove(parent); } static int rcar_pcie_enable_msi(struct rcar_pcie_host *host) diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c index 85ea36df2f59..300cd85fa035 100644 --- a/drivers/pci/controller/pcie-rockchip-ep.c +++ b/drivers/pci/controller/pcie-rockchip-ep.c @@ -308,10 +308,11 @@ static void rockchip_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn, } static int rockchip_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn, - u8 multi_msg_cap) + u8 nr_irqs) { struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); struct rockchip_pcie *rockchip = &ep->rockchip; + u8 mmc = order_base_2(nr_irqs); u32 flags; flags = rockchip_pcie_read(rockchip, @@ -319,7 +320,7 @@ static int rockchip_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn, ROCKCHIP_PCIE_EP_MSI_CTRL_REG); flags &= ~ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_MASK; flags |= - (multi_msg_cap << ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_OFFSET) | + (mmc << ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_OFFSET) | (PCI_MSI_FLAGS_64BIT << ROCKCHIP_PCIE_EP_MSI_FLAGS_OFFSET); flags &= ~ROCKCHIP_PCIE_EP_MSI_CTRL_MASK_MSI_CAP; rockchip_pcie_write(rockchip, flags, @@ -340,8 +341,8 @@ static int rockchip_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn) if (!(flags & ROCKCHIP_PCIE_EP_MSI_CTRL_ME)) return -EINVAL; - return ((flags & ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK) >> - ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET); + return 1 << ((flags & ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK) >> + ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET); } static void rockchip_pcie_ep_assert_intx(struct rockchip_pcie_ep *ep, u8 fn, @@ -517,9 +518,9 @@ static void rockchip_pcie_ep_retrain_link(struct rockchip_pcie *rockchip) { u32 status; - status = rockchip_pcie_read(rockchip, PCIE_EP_CONFIG_LCS); + status = rockchip_pcie_read(rockchip, PCIE_EP_CONFIG_BASE + PCI_EXP_LNKCTL); status |= PCI_EXP_LNKCTL_RL; - rockchip_pcie_write(rockchip, status, PCIE_EP_CONFIG_LCS); + rockchip_pcie_write(rockchip, status, PCIE_EP_CONFIG_BASE + PCI_EXP_LNKCTL); } static bool rockchip_pcie_ep_link_up(struct rockchip_pcie *rockchip) @@ -694,6 +695,7 @@ static const struct pci_epc_features rockchip_pcie_epc_features = { .linkup_notifier = true, .msi_capable = true, .msix_capable = false, + .intx_capable = true, .align = ROCKCHIP_PCIE_AT_SIZE_ALIGN, }; diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c index 6a46be17aa91..ee1822ca01db 100644 --- a/drivers/pci/controller/pcie-rockchip-host.c +++ b/drivers/pci/controller/pcie-rockchip-host.c @@ -11,27 +11,19 @@ * ARM PCI Host generic driver. */ +#include <linux/bitfield.h> #include <linux/bitrev.h> -#include <linux/clk.h> -#include <linux/delay.h> #include <linux/gpio/consumer.h> -#include <linux/init.h> #include <linux/interrupt.h> #include <linux/iopoll.h> #include <linux/irq.h> #include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> -#include <linux/kernel.h> -#include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_pci.h> -#include <linux/pci.h> -#include <linux/pci_ids.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> -#include <linux/reset.h> -#include <linux/regmap.h> #include "../pci.h" #include "pcie-rockchip.h" @@ -40,18 +32,18 @@ static void rockchip_pcie_enable_bw_int(struct rockchip_pcie *rockchip) { u32 status; - status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL); status |= (PCI_EXP_LNKCTL_LBMIE | PCI_EXP_LNKCTL_LABIE); - rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL); } static void rockchip_pcie_clr_bw_int(struct rockchip_pcie *rockchip) { u32 status; - status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL); status |= (PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_LABS) << 16; - rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL); } static void rockchip_pcie_update_txcredit_mui(struct rockchip_pcie *rockchip) @@ -269,7 +261,7 @@ static void rockchip_pcie_set_power_limit(struct rockchip_pcie *rockchip) scale = 3; /* 0.001x */ curr = curr / 1000; /* convert to mA */ power = (curr * 3300) / 1000; /* milliwatt */ - while (power > PCIE_RC_CONFIG_DCR_CSPL_LIMIT) { + while (power > FIELD_MAX(PCI_EXP_DEVCAP_PWR_VAL)) { if (!scale) { dev_warn(rockchip->dev, "invalid power supply\n"); return; @@ -278,10 +270,10 @@ static void rockchip_pcie_set_power_limit(struct rockchip_pcie *rockchip) power = power / 10; } - status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCR); - status |= (power << PCIE_RC_CONFIG_DCR_CSPL_SHIFT) | - (scale << PCIE_RC_CONFIG_DCR_CPLS_SHIFT); - rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCR); + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_DEVCAP); + status |= FIELD_PREP(PCI_EXP_DEVCAP_PWR_VAL, power); + status |= FIELD_PREP(PCI_EXP_DEVCAP_PWR_SCL, scale); + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_DEVCAP); } /** @@ -309,14 +301,14 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip) rockchip_pcie_set_power_limit(rockchip); /* Set RC's clock architecture as common clock */ - status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL); status |= PCI_EXP_LNKSTA_SLC << 16; - rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL); /* Set RC's RCB to 128 */ - status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL); status |= PCI_EXP_LNKCTL_RCB; - rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL); /* Enable Gen1 training */ rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE, @@ -325,7 +317,7 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip) msleep(PCIE_T_PVPERL_MS); gpiod_set_value_cansleep(rockchip->perst_gpio, 1); - msleep(PCIE_T_RRS_READY_MS); + msleep(PCIE_RESET_CONFIG_WAIT_MS); /* 500ms timeout value should be enough for Gen1/2 training */ err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_BASIC_STATUS1, @@ -341,9 +333,13 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip) * Enable retrain for gen2. This should be configured only after * gen1 finished. */ - status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL2); + status &= ~PCI_EXP_LNKCTL2_TLS; + status |= PCI_EXP_LNKCTL2_TLS_5_0GT; + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL2); + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL); status |= PCI_EXP_LNKCTL_RL; - rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCTL); err = readl_poll_timeout(rockchip->apb_base + PCIE_CORE_CTRL, status, PCIE_LINK_IS_GEN2(status), 20, @@ -380,15 +376,15 @@ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip) /* Clear L0s from RC's link cap */ if (of_property_read_bool(dev->of_node, "aspm-no-l0s")) { - status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LINK_CAP); - status &= ~PCIE_RC_CONFIG_LINK_CAP_L0S; - rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LINK_CAP); + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCAP); + status &= ~PCI_EXP_LNKCAP_ASPM_L0S; + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_LNKCAP); } - status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCSR); - status &= ~PCIE_RC_CONFIG_DCSR_MPS_MASK; - status |= PCIE_RC_CONFIG_DCSR_MPS_256; - rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCSR); + status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_CR + PCI_EXP_DEVCTL); + status &= ~PCI_EXP_DEVCTL_PAYLOAD; + status |= PCI_EXP_DEVCTL_PAYLOAD_256B; + rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_CR + PCI_EXP_DEVCTL); return 0; err_power_off_phy: @@ -439,7 +435,7 @@ static irqreturn_t rockchip_pcie_subsys_irq_handler(int irq, void *arg) dev_dbg(dev, "malformed TLP received from the link\n"); if (sub_reg & PCIE_CORE_INT_UCR) - dev_dbg(dev, "malformed TLP received from the link\n"); + dev_dbg(dev, "Unexpected Completion received from the link\n"); if (sub_reg & PCIE_CORE_INT_FCE) dev_dbg(dev, "an error was observed in the flow control advertisements from the other side\n"); @@ -489,7 +485,7 @@ static irqreturn_t rockchip_pcie_client_irq_handler(int irq, void *arg) dev_dbg(dev, "fatal error interrupt received\n"); if (reg & PCIE_CLIENT_INT_NFATAL_ERR) - dev_dbg(dev, "no fatal error interrupt received\n"); + dev_dbg(dev, "non fatal error interrupt received\n"); if (reg & PCIE_CLIENT_INT_CORR_ERR) dev_dbg(dev, "correctable error interrupt received\n"); @@ -693,8 +689,8 @@ static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip) return -EINVAL; } - rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX, - &intx_domain_ops, rockchip); + rockchip->irq_domain = irq_domain_create_linear(of_fwnode_handle(intc), PCI_NUM_INTX, + &intx_domain_ops, rockchip); of_node_put(intc); if (!rockchip->irq_domain) { dev_err(dev, "failed to get a INTx IRQ domain\n"); diff --git a/drivers/pci/controller/pcie-rockchip.h b/drivers/pci/controller/pcie-rockchip.h index 14954f43e5e9..72a2c045f6fe 100644 --- a/drivers/pci/controller/pcie-rockchip.h +++ b/drivers/pci/controller/pcie-rockchip.h @@ -155,17 +155,7 @@ #define PCIE_EP_CONFIG_DID_VID (PCIE_EP_CONFIG_BASE + 0x00) #define PCIE_EP_CONFIG_LCS (PCIE_EP_CONFIG_BASE + 0xd0) #define PCIE_RC_CONFIG_RID_CCR (PCIE_RC_CONFIG_BASE + 0x08) -#define PCIE_RC_CONFIG_DCR (PCIE_RC_CONFIG_BASE + 0xc4) -#define PCIE_RC_CONFIG_DCR_CSPL_SHIFT 18 -#define PCIE_RC_CONFIG_DCR_CSPL_LIMIT 0xff -#define PCIE_RC_CONFIG_DCR_CPLS_SHIFT 26 -#define PCIE_RC_CONFIG_DCSR (PCIE_RC_CONFIG_BASE + 0xc8) -#define PCIE_RC_CONFIG_DCSR_MPS_MASK GENMASK(7, 5) -#define PCIE_RC_CONFIG_DCSR_MPS_256 (0x1 << 5) -#define PCIE_RC_CONFIG_LINK_CAP (PCIE_RC_CONFIG_BASE + 0xcc) -#define PCIE_RC_CONFIG_LINK_CAP_L0S BIT(10) -#define PCIE_RC_CONFIG_LCS (PCIE_RC_CONFIG_BASE + 0xd0) -#define PCIE_EP_CONFIG_LCS (PCIE_EP_CONFIG_BASE + 0xd0) +#define PCIE_RC_CONFIG_CR (PCIE_RC_CONFIG_BASE + 0xc0) #define PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2 (PCIE_RC_CONFIG_BASE + 0x90c) #define PCIE_RC_CONFIG_THP_CAP (PCIE_RC_CONFIG_BASE + 0x274) #define PCIE_RC_CONFIG_THP_CAP_NEXT_MASK GENMASK(31, 20) @@ -215,20 +205,6 @@ #define RC_REGION_0_TYPE_MASK GENMASK(3, 0) #define MAX_AXI_WRAPPER_REGION_NUM 33 -#define ROCKCHIP_PCIE_MSG_ROUTING_TO_RC 0x0 -#define ROCKCHIP_PCIE_MSG_ROUTING_VIA_ADDR 0x1 -#define ROCKCHIP_PCIE_MSG_ROUTING_VIA_ID 0x2 -#define ROCKCHIP_PCIE_MSG_ROUTING_BROADCAST 0x3 -#define ROCKCHIP_PCIE_MSG_ROUTING_LOCAL_INTX 0x4 -#define ROCKCHIP_PCIE_MSG_ROUTING_PME_ACK 0x5 -#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTA 0x20 -#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTB 0x21 -#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTC 0x22 -#define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTD 0x23 -#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTA 0x24 -#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTB 0x25 -#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTC 0x26 -#define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTD 0x27 #define ROCKCHIP_PCIE_MSG_ROUTING_MASK GENMASK(7, 5) #define ROCKCHIP_PCIE_MSG_ROUTING(route) \ (((route) << 5) & ROCKCHIP_PCIE_MSG_ROUTING_MASK) @@ -319,11 +295,12 @@ static const char * const rockchip_pci_pm_rsts[] = { "aclk", }; +/* NOTE: Do not reorder the deassert sequence of the following reset pins */ static const char * const rockchip_pci_core_rsts[] = { - "mgmt-sticky", - "core", - "mgmt", "pipe", + "mgmt", + "core", + "mgmt-sticky", }; struct rockchip_pcie { diff --git a/drivers/pci/controller/pcie-xilinx-cpm.c b/drivers/pci/controller/pcie-xilinx-cpm.c index 13ca493d22bd..d38f27e20761 100644 --- a/drivers/pci/controller/pcie-xilinx-cpm.c +++ b/drivers/pci/controller/pcie-xilinx-cpm.c @@ -395,17 +395,15 @@ static int xilinx_cpm_pcie_init_irq_domain(struct xilinx_cpm_pcie *port) return -EINVAL; } - port->cpm_domain = irq_domain_add_linear(pcie_intc_node, 32, - &event_domain_ops, - port); + port->cpm_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), 32, + &event_domain_ops, port); if (!port->cpm_domain) goto out; irq_domain_update_bus_token(port->cpm_domain, DOMAIN_BUS_NEXUS); - port->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, - &intx_domain_ops, - port); + port->intx_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX, + &intx_domain_ops, port); if (!port->intx_domain) goto out; diff --git a/drivers/pci/controller/pcie-xilinx-dma-pl.c b/drivers/pci/controller/pcie-xilinx-dma-pl.c index dd117f07fc95..b037c8f315e4 100644 --- a/drivers/pci/controller/pcie-xilinx-dma-pl.c +++ b/drivers/pci/controller/pcie-xilinx-dma-pl.c @@ -7,6 +7,7 @@ #include <linux/bitfield.h> #include <linux/interrupt.h> #include <linux/irq.h> +#include <linux/irqchip/irq-msi-lib.h> #include <linux/irqdomain.h> #include <linux/kernel.h> #include <linux/module.h> @@ -90,7 +91,6 @@ struct xilinx_pl_dma_variant { }; struct xilinx_msi { - struct irq_domain *msi_domain; unsigned long *bitmap; struct irq_domain *dev_domain; struct mutex lock; /* Protect bitmap variable */ @@ -373,20 +373,20 @@ static irqreturn_t xilinx_pl_dma_pcie_intr_handler(int irq, void *dev_id) return IRQ_HANDLED; } -static struct irq_chip xilinx_msi_irq_chip = { - .name = "pl_dma:PCIe MSI", - .irq_enable = pci_msi_unmask_irq, - .irq_disable = pci_msi_mask_irq, - .irq_mask = pci_msi_mask_irq, - .irq_unmask = pci_msi_unmask_irq, -}; +#define XILINX_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ + MSI_FLAG_USE_DEF_CHIP_OPS | \ + MSI_FLAG_NO_AFFINITY) -static struct msi_domain_info xilinx_msi_domain_info = { - .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI, - .chip = &xilinx_msi_irq_chip, -}; +#define XILINX_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \ + MSI_FLAG_MULTI_PCI_MSI) +static const struct msi_parent_ops xilinx_msi_parent_ops = { + .required_flags = XILINX_MSI_FLAGS_REQUIRED, + .supported_flags = XILINX_MSI_FLAGS_SUPPORTED, + .bus_select_token = DOMAIN_BUS_PCI_MSI, + .prefix = "pl_dma-", + .init_dev_msi_info = msi_lib_init_dev_msi_info, +}; static void xilinx_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) { struct pl_dma_pcie *pcie = irq_data_get_irq_chip_data(data); @@ -458,11 +458,6 @@ static void xilinx_pl_dma_pcie_free_irq_domains(struct pl_dma_pcie *port) irq_domain_remove(msi->dev_domain); msi->dev_domain = NULL; } - - if (msi->msi_domain) { - irq_domain_remove(msi->msi_domain); - msi->msi_domain = NULL; - } } static int xilinx_pl_dma_pcie_init_msi_irq_domain(struct pl_dma_pcie *port) @@ -470,19 +465,17 @@ static int xilinx_pl_dma_pcie_init_msi_irq_domain(struct pl_dma_pcie *port) struct device *dev = port->dev; struct xilinx_msi *msi = &port->msi; int size = BITS_TO_LONGS(XILINX_NUM_MSI_IRQS) * sizeof(long); - struct fwnode_handle *fwnode = of_node_to_fwnode(port->dev->of_node); - - msi->dev_domain = irq_domain_add_linear(NULL, XILINX_NUM_MSI_IRQS, - &dev_msi_domain_ops, port); + struct irq_domain_info info = { + .fwnode = dev_fwnode(port->dev), + .ops = &dev_msi_domain_ops, + .host_data = port, + .size = XILINX_NUM_MSI_IRQS, + }; + + msi->dev_domain = msi_create_parent_irq_domain(&info, &xilinx_msi_parent_ops); if (!msi->dev_domain) goto out; - msi->msi_domain = pci_msi_create_irq_domain(fwnode, - &xilinx_msi_domain_info, - msi->dev_domain); - if (!msi->msi_domain) - goto out; - mutex_init(&msi->lock); msi->bitmap = kzalloc(size, GFP_KERNEL); if (!msi->bitmap) @@ -585,15 +578,15 @@ static int xilinx_pl_dma_pcie_init_irq_domain(struct pl_dma_pcie *port) return -EINVAL; } - port->pldma_domain = irq_domain_add_linear(pcie_intc_node, 32, - &event_domain_ops, port); + port->pldma_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), 32, + &event_domain_ops, port); if (!port->pldma_domain) return -ENOMEM; irq_domain_update_bus_token(port->pldma_domain, DOMAIN_BUS_NEXUS); - port->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, - &intx_domain_ops, port); + port->intx_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX, + &intx_domain_ops, port); if (!port->intx_domain) { dev_err(dev, "Failed to get a INTx IRQ domain\n"); return -ENOMEM; diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c index 8d6e2a89b067..05b8c205493c 100644 --- a/drivers/pci/controller/pcie-xilinx-nwl.c +++ b/drivers/pci/controller/pcie-xilinx-nwl.c @@ -10,6 +10,7 @@ #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/irq.h> +#include <linux/irqchip/irq-msi-lib.h> #include <linux/irqdomain.h> #include <linux/kernel.h> #include <linux/init.h> @@ -145,7 +146,6 @@ #define LINK_WAIT_USLEEP_MAX 100000 struct nwl_msi { /* MSI information */ - struct irq_domain *msi_domain; DECLARE_BITMAP(bitmap, INT_PCI_MSI_NR); struct irq_domain *dev_domain; struct mutex lock; /* protect bitmap variable */ @@ -418,19 +418,22 @@ static const struct irq_domain_ops intx_domain_ops = { }; #ifdef CONFIG_PCI_MSI -static struct irq_chip nwl_msi_irq_chip = { - .name = "nwl_pcie:msi", - .irq_enable = pci_msi_unmask_irq, - .irq_disable = pci_msi_mask_irq, - .irq_mask = pci_msi_mask_irq, - .irq_unmask = pci_msi_unmask_irq, -}; -static struct msi_domain_info nwl_msi_domain_info = { - .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI, - .chip = &nwl_msi_irq_chip, +#define NWL_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ + MSI_FLAG_USE_DEF_CHIP_OPS | \ + MSI_FLAG_NO_AFFINITY) + +#define NWL_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \ + MSI_FLAG_MULTI_PCI_MSI) + +static const struct msi_parent_ops nwl_msi_parent_ops = { + .required_flags = NWL_MSI_FLAGS_REQUIRED, + .supported_flags = NWL_MSI_FLAGS_SUPPORTED, + .bus_select_token = DOMAIN_BUS_PCI_MSI, + .prefix = "nwl-", + .init_dev_msi_info = msi_lib_init_dev_msi_info, }; + #endif static void nwl_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) @@ -495,23 +498,19 @@ static int nwl_pcie_init_msi_irq_domain(struct nwl_pcie *pcie) { #ifdef CONFIG_PCI_MSI struct device *dev = pcie->dev; - struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node); struct nwl_msi *msi = &pcie->msi; - - msi->dev_domain = irq_domain_add_linear(NULL, INT_PCI_MSI_NR, - &dev_msi_domain_ops, pcie); + struct irq_domain_info info = { + .fwnode = dev_fwnode(dev), + .ops = &dev_msi_domain_ops, + .host_data = pcie, + .size = INT_PCI_MSI_NR, + }; + + msi->dev_domain = msi_create_parent_irq_domain(&info, &nwl_msi_parent_ops); if (!msi->dev_domain) { dev_err(dev, "failed to create dev IRQ domain\n"); return -ENOMEM; } - msi->msi_domain = pci_msi_create_irq_domain(fwnode, - &nwl_msi_domain_info, - msi->dev_domain); - if (!msi->msi_domain) { - dev_err(dev, "failed to create msi IRQ domain\n"); - irq_domain_remove(msi->dev_domain); - return -ENOMEM; - } #endif return 0; } @@ -582,10 +581,8 @@ static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie) return -EINVAL; } - pcie->intx_irq_domain = irq_domain_add_linear(intc_node, - PCI_NUM_INTX, - &intx_domain_ops, - pcie); + pcie->intx_irq_domain = irq_domain_create_linear(of_fwnode_handle(intc_node), PCI_NUM_INTX, + &intx_domain_ops, pcie); of_node_put(intc_node); if (!pcie->intx_irq_domain) { dev_err(dev, "failed to create IRQ domain\n"); diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c index 0b534f73a942..f121836c3cf4 100644 --- a/drivers/pci/controller/pcie-xilinx.c +++ b/drivers/pci/controller/pcie-xilinx.c @@ -12,6 +12,7 @@ #include <linux/interrupt.h> #include <linux/irq.h> +#include <linux/irqchip/irq-msi-lib.h> #include <linux/irqdomain.h> #include <linux/kernel.h> #include <linux/init.h> @@ -203,11 +204,6 @@ static void xilinx_msi_top_irq_ack(struct irq_data *d) */ } -static struct irq_chip xilinx_msi_top_chip = { - .name = "PCIe MSI", - .irq_ack = xilinx_msi_top_irq_ack, -}; - static void xilinx_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) { struct xilinx_pcie *pcie = irq_data_get_irq_chip_data(data); @@ -264,29 +260,42 @@ static const struct irq_domain_ops xilinx_msi_domain_ops = { .free = xilinx_msi_domain_free, }; -static struct msi_domain_info xilinx_msi_info = { - .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_NO_AFFINITY, - .chip = &xilinx_msi_top_chip, +static bool xilinx_init_dev_msi_info(struct device *dev, struct irq_domain *domain, + struct irq_domain *real_parent, struct msi_domain_info *info) +{ + struct irq_chip *chip = info->chip; + + if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info)) + return false; + + chip->irq_ack = xilinx_msi_top_irq_ack; + return true; +} + +#define XILINX_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ + MSI_FLAG_USE_DEF_CHIP_OPS | \ + MSI_FLAG_NO_AFFINITY) + +static const struct msi_parent_ops xilinx_msi_parent_ops = { + .required_flags = XILINX_MSI_FLAGS_REQUIRED, + .supported_flags = MSI_GENERIC_FLAGS_MASK, + .bus_select_token = DOMAIN_BUS_PCI_MSI, + .prefix = "xilinx-", + .init_dev_msi_info = xilinx_init_dev_msi_info, }; static int xilinx_allocate_msi_domains(struct xilinx_pcie *pcie) { - struct fwnode_handle *fwnode = dev_fwnode(pcie->dev); - struct irq_domain *parent; - - parent = irq_domain_create_linear(fwnode, XILINX_NUM_MSI_IRQS, - &xilinx_msi_domain_ops, pcie); - if (!parent) { - dev_err(pcie->dev, "failed to create IRQ domain\n"); - return -ENOMEM; - } - irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS); - - pcie->msi_domain = pci_msi_create_irq_domain(fwnode, &xilinx_msi_info, parent); + struct irq_domain_info info = { + .fwnode = dev_fwnode(pcie->dev), + .ops = &xilinx_msi_domain_ops, + .host_data = pcie, + .size = XILINX_NUM_MSI_IRQS, + }; + + pcie->msi_domain = msi_create_parent_irq_domain(&info, &xilinx_msi_parent_ops); if (!pcie->msi_domain) { dev_err(pcie->dev, "failed to create MSI domain\n"); - irq_domain_remove(parent); return -ENOMEM; } @@ -295,10 +304,7 @@ static int xilinx_allocate_msi_domains(struct xilinx_pcie *pcie) static void xilinx_free_msi_domains(struct xilinx_pcie *pcie) { - struct irq_domain *parent = pcie->msi_domain->parent; - irq_domain_remove(pcie->msi_domain); - irq_domain_remove(parent); } /* INTx Functions */ @@ -461,9 +467,8 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie *pcie) return -ENODEV; } - pcie->leg_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, - &intx_domain_ops, - pcie); + pcie->leg_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX, + &intx_domain_ops, pcie); of_node_put(pcie_intc_node); if (!pcie->leg_domain) { dev_err(dev, "Failed to get a INTx IRQ domain\n"); diff --git a/drivers/pci/controller/plda/Kconfig b/drivers/pci/controller/plda/Kconfig index c0e14146d7e4..62120101139c 100644 --- a/drivers/pci/controller/plda/Kconfig +++ b/drivers/pci/controller/plda/Kconfig @@ -5,6 +5,7 @@ menu "PLDA-based PCIe controllers" config PCIE_PLDA_HOST bool + select IRQ_MSI_LIB config PCIE_MICROCHIP_HOST tristate "Microchip AXI PCIe controller" diff --git a/drivers/pci/controller/plda/pcie-microchip-host.c b/drivers/pci/controller/plda/pcie-microchip-host.c index 3fdfffdf0270..24bbf93b8051 100644 --- a/drivers/pci/controller/plda/pcie-microchip-host.c +++ b/drivers/pci/controller/plda/pcie-microchip-host.c @@ -23,6 +23,7 @@ #include <linux/wordpart.h> #include "../../pci.h" +#include "../pci-host-common.h" #include "pcie-plda.h" #define MC_MAX_NUM_INBOUND_WINDOWS 8 diff --git a/drivers/pci/controller/plda/pcie-plda-host.c b/drivers/pci/controller/plda/pcie-plda-host.c index 4153214ca410..8e2db2e5b64b 100644 --- a/drivers/pci/controller/plda/pcie-plda-host.c +++ b/drivers/pci/controller/plda/pcie-plda-host.c @@ -11,6 +11,7 @@ #include <linux/align.h> #include <linux/bitfield.h> #include <linux/irqchip/chained_irq.h> +#include <linux/irqchip/irq-msi-lib.h> #include <linux/irqdomain.h> #include <linux/msi.h> #include <linux/pci_regs.h> @@ -134,43 +135,41 @@ static const struct irq_domain_ops msi_domain_ops = { .free = plda_irq_msi_domain_free, }; -static struct irq_chip plda_msi_irq_chip = { - .name = "PLDA PCIe MSI", - .irq_ack = irq_chip_ack_parent, - .irq_mask = pci_msi_mask_irq, - .irq_unmask = pci_msi_unmask_irq, -}; - -static struct msi_domain_info plda_msi_domain_info = { - .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX, - .chip = &plda_msi_irq_chip, +#define PLDA_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ + MSI_FLAG_USE_DEF_CHIP_OPS | \ + MSI_FLAG_NO_AFFINITY) +#define PLDA_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \ + MSI_FLAG_PCI_MSIX) + +static const struct msi_parent_ops plda_msi_parent_ops = { + .required_flags = PLDA_MSI_FLAGS_REQUIRED, + .supported_flags = PLDA_MSI_FLAGS_SUPPORTED, + .chip_flags = MSI_CHIP_FLAG_SET_ACK, + .bus_select_token = DOMAIN_BUS_PCI_MSI, + .prefix = "PLDA-", + .init_dev_msi_info = msi_lib_init_dev_msi_info, }; static int plda_allocate_msi_domains(struct plda_pcie_rp *port) { struct device *dev = port->dev; - struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node); struct plda_msi *msi = &port->msi; mutex_init(&port->msi.lock); - msi->dev_domain = irq_domain_add_linear(NULL, msi->num_vectors, - &msi_domain_ops, port); + struct irq_domain_info info = { + .fwnode = dev_fwnode(dev), + .ops = &msi_domain_ops, + .host_data = port, + .size = msi->num_vectors, + }; + + msi->dev_domain = msi_create_parent_irq_domain(&info, &plda_msi_parent_ops); if (!msi->dev_domain) { dev_err(dev, "failed to create IRQ domain\n"); return -ENOMEM; } - msi->msi_domain = pci_msi_create_irq_domain(fwnode, - &plda_msi_domain_info, - msi->dev_domain); - if (!msi->msi_domain) { - dev_err(dev, "failed to create MSI domain\n"); - irq_domain_remove(msi->dev_domain); - return -ENOMEM; - } - return 0; } @@ -393,10 +392,9 @@ static int plda_pcie_init_irq_domains(struct plda_pcie_rp *port) return -EINVAL; } - port->event_domain = irq_domain_add_linear(pcie_intc_node, - port->num_events, - &plda_event_domain_ops, - port); + port->event_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), + port->num_events, &plda_event_domain_ops, + port); if (!port->event_domain) { dev_err(dev, "failed to get event domain\n"); of_node_put(pcie_intc_node); @@ -405,8 +403,8 @@ static int plda_pcie_init_irq_domains(struct plda_pcie_rp *port) irq_domain_update_bus_token(port->event_domain, DOMAIN_BUS_NEXUS); - port->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, - &intx_domain_ops, port); + port->intx_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX, + &intx_domain_ops, port); if (!port->intx_domain) { dev_err(dev, "failed to get an INTx IRQ domain\n"); of_node_put(pcie_intc_node); @@ -565,7 +563,6 @@ static void plda_pcie_irq_domain_deinit(struct plda_pcie_rp *pcie) irq_set_chained_handler_and_data(pcie->msi_irq, NULL, NULL); irq_set_chained_handler_and_data(pcie->intx_irq, NULL, NULL); - irq_domain_remove(pcie->msi.msi_domain); irq_domain_remove(pcie->msi.dev_domain); irq_domain_remove(pcie->intx_domain); diff --git a/drivers/pci/controller/plda/pcie-plda.h b/drivers/pci/controller/plda/pcie-plda.h index 61ece26065ea..6b8665df7bf0 100644 --- a/drivers/pci/controller/plda/pcie-plda.h +++ b/drivers/pci/controller/plda/pcie-plda.h @@ -164,7 +164,6 @@ struct plda_pcie_host_ops { struct plda_msi { struct mutex lock; /* Protect used bitmap */ - struct irq_domain *msi_domain; struct irq_domain *dev_domain; u32 num_vectors; u64 vector_phy; diff --git a/drivers/pci/controller/plda/pcie-starfive.c b/drivers/pci/controller/plda/pcie-starfive.c index e73c1b7bc8ef..3caf53c6c082 100644 --- a/drivers/pci/controller/plda/pcie-starfive.c +++ b/drivers/pci/controller/plda/pcie-starfive.c @@ -368,7 +368,7 @@ static int starfive_pcie_host_init(struct plda_pcie_rp *plda) * of 100ms following exit from a conventional reset before * sending a configuration request to the device. */ - msleep(PCIE_RESET_CONFIG_DEVICE_WAIT_MS); + msleep(PCIE_RESET_CONFIG_WAIT_MS); if (starfive_pcie_host_wait_for_link(pcie)) dev_info(dev, "port link down\n"); diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c index 8df064b62a2f..9bbb0ff4cc15 100644 --- a/drivers/pci/controller/vmd.c +++ b/drivers/pci/controller/vmd.c @@ -7,6 +7,7 @@ #include <linux/device.h> #include <linux/interrupt.h> #include <linux/irq.h> +#include <linux/irqchip/irq-msi-lib.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/msi.h> @@ -174,58 +175,52 @@ static void vmd_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) msg->arch_addr_lo.destid_0_7 = index_from_irqs(vmd, irq); } -/* - * We rely on MSI_FLAG_USE_DEF_CHIP_OPS to set the IRQ mask/unmask ops. - */ static void vmd_irq_enable(struct irq_data *data) { struct vmd_irq *vmdirq = data->chip_data; - unsigned long flags; - raw_spin_lock_irqsave(&list_lock, flags); - WARN_ON(vmdirq->enabled); - list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list); - vmdirq->enabled = true; - raw_spin_unlock_irqrestore(&list_lock, flags); + scoped_guard(raw_spinlock_irqsave, &list_lock) { + WARN_ON(vmdirq->enabled); + list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list); + vmdirq->enabled = true; + } +} +static void vmd_pci_msi_enable(struct irq_data *data) +{ + vmd_irq_enable(data->parent_data); data->chip->irq_unmask(data); } static void vmd_irq_disable(struct irq_data *data) { struct vmd_irq *vmdirq = data->chip_data; - unsigned long flags; - - data->chip->irq_mask(data); - raw_spin_lock_irqsave(&list_lock, flags); - if (vmdirq->enabled) { - list_del_rcu(&vmdirq->node); - vmdirq->enabled = false; + scoped_guard(raw_spinlock_irqsave, &list_lock) { + if (vmdirq->enabled) { + list_del_rcu(&vmdirq->node); + vmdirq->enabled = false; + } } - raw_spin_unlock_irqrestore(&list_lock, flags); +} + +static void vmd_pci_msi_disable(struct irq_data *data) +{ + data->chip->irq_mask(data); + vmd_irq_disable(data->parent_data); } static struct irq_chip vmd_msi_controller = { .name = "VMD-MSI", - .irq_enable = vmd_irq_enable, - .irq_disable = vmd_irq_disable, .irq_compose_msi_msg = vmd_compose_msi_msg, }; -static irq_hw_number_t vmd_get_hwirq(struct msi_domain_info *info, - msi_alloc_info_t *arg) -{ - return 0; -} - /* * XXX: We can be even smarter selecting the best IRQ once we solve the * affinity problem. */ static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd, struct msi_desc *desc) { - unsigned long flags; int i, best; if (vmd->msix_count == 1 + vmd->first_vec) @@ -242,113 +237,129 @@ static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd, struct msi_desc *d return &vmd->irqs[vmd->first_vec]; } - raw_spin_lock_irqsave(&list_lock, flags); - best = vmd->first_vec + 1; - for (i = best; i < vmd->msix_count; i++) - if (vmd->irqs[i].count < vmd->irqs[best].count) - best = i; - vmd->irqs[best].count++; - raw_spin_unlock_irqrestore(&list_lock, flags); + scoped_guard(raw_spinlock_irq, &list_lock) { + best = vmd->first_vec + 1; + for (i = best; i < vmd->msix_count; i++) + if (vmd->irqs[i].count < vmd->irqs[best].count) + best = i; + vmd->irqs[best].count++; + } return &vmd->irqs[best]; } -static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info, - unsigned int virq, irq_hw_number_t hwirq, - msi_alloc_info_t *arg) +static void vmd_msi_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs); + +static int vmd_msi_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) { - struct msi_desc *desc = arg->desc; - struct vmd_dev *vmd = vmd_from_bus(msi_desc_to_pci_dev(desc)->bus); - struct vmd_irq *vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL); + struct msi_desc *desc = ((msi_alloc_info_t *)arg)->desc; + struct vmd_dev *vmd = domain->host_data; + struct vmd_irq *vmdirq; - if (!vmdirq) - return -ENOMEM; + for (int i = 0; i < nr_irqs; ++i) { + vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL); + if (!vmdirq) { + vmd_msi_free(domain, virq, i); + return -ENOMEM; + } - INIT_LIST_HEAD(&vmdirq->node); - vmdirq->irq = vmd_next_irq(vmd, desc); - vmdirq->virq = virq; + INIT_LIST_HEAD(&vmdirq->node); + vmdirq->irq = vmd_next_irq(vmd, desc); + vmdirq->virq = virq + i; + + irq_domain_set_info(domain, virq + i, vmdirq->irq->virq, + &vmd_msi_controller, vmdirq, + handle_untracked_irq, vmd, NULL); + } - irq_domain_set_info(domain, virq, vmdirq->irq->virq, info->chip, vmdirq, - handle_untracked_irq, vmd, NULL); return 0; } -static void vmd_msi_free(struct irq_domain *domain, - struct msi_domain_info *info, unsigned int virq) +static void vmd_msi_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) { - struct vmd_irq *vmdirq = irq_get_chip_data(virq); - unsigned long flags; + struct vmd_irq *vmdirq; + + for (int i = 0; i < nr_irqs; ++i) { + vmdirq = irq_get_chip_data(virq + i); - synchronize_srcu(&vmdirq->irq->srcu); + synchronize_srcu(&vmdirq->irq->srcu); - /* XXX: Potential optimization to rebalance */ - raw_spin_lock_irqsave(&list_lock, flags); - vmdirq->irq->count--; - raw_spin_unlock_irqrestore(&list_lock, flags); + /* XXX: Potential optimization to rebalance */ + scoped_guard(raw_spinlock_irq, &list_lock) + vmdirq->irq->count--; - kfree(vmdirq); + kfree(vmdirq); + } } -static int vmd_msi_prepare(struct irq_domain *domain, struct device *dev, - int nvec, msi_alloc_info_t *arg) -{ - struct pci_dev *pdev = to_pci_dev(dev); - struct vmd_dev *vmd = vmd_from_bus(pdev->bus); +static const struct irq_domain_ops vmd_msi_domain_ops = { + .alloc = vmd_msi_alloc, + .free = vmd_msi_free, +}; - if (nvec > vmd->msix_count) - return vmd->msix_count; +static bool vmd_init_dev_msi_info(struct device *dev, struct irq_domain *domain, + struct irq_domain *real_parent, + struct msi_domain_info *info) +{ + if (WARN_ON_ONCE(info->bus_token != DOMAIN_BUS_PCI_DEVICE_MSIX)) + return false; - memset(arg, 0, sizeof(*arg)); - return 0; -} + if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info)) + return false; -static void vmd_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc) -{ - arg->desc = desc; + info->chip->irq_enable = vmd_pci_msi_enable; + info->chip->irq_disable = vmd_pci_msi_disable; + return true; } -static struct msi_domain_ops vmd_msi_domain_ops = { - .get_hwirq = vmd_get_hwirq, - .msi_init = vmd_msi_init, - .msi_free = vmd_msi_free, - .msi_prepare = vmd_msi_prepare, - .set_desc = vmd_set_desc, -}; +#define VMD_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | MSI_FLAG_PCI_MSIX) +#define VMD_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_NO_AFFINITY) -static struct msi_domain_info vmd_msi_domain_info = { - .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | - MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX, - .ops = &vmd_msi_domain_ops, - .chip = &vmd_msi_controller, +static const struct msi_parent_ops vmd_msi_parent_ops = { + .supported_flags = VMD_MSI_FLAGS_SUPPORTED, + .required_flags = VMD_MSI_FLAGS_REQUIRED, + .bus_select_token = DOMAIN_BUS_VMD_MSI, + .bus_select_mask = MATCH_PCI_MSI, + .prefix = "VMD-", + .init_dev_msi_info = vmd_init_dev_msi_info, }; -static void vmd_set_msi_remapping(struct vmd_dev *vmd, bool enable) -{ - u16 reg; - - pci_read_config_word(vmd->dev, PCI_REG_VMCONFIG, ®); - reg = enable ? (reg & ~VMCONFIG_MSI_REMAP) : - (reg | VMCONFIG_MSI_REMAP); - pci_write_config_word(vmd->dev, PCI_REG_VMCONFIG, reg); -} - static int vmd_create_irq_domain(struct vmd_dev *vmd) { - struct fwnode_handle *fn; + struct irq_domain_info info = { + .size = vmd->msix_count, + .ops = &vmd_msi_domain_ops, + .host_data = vmd, + }; - fn = irq_domain_alloc_named_id_fwnode("VMD-MSI", vmd->sysdata.domain); - if (!fn) + info.fwnode = irq_domain_alloc_named_id_fwnode("VMD-MSI", + vmd->sysdata.domain); + if (!info.fwnode) return -ENODEV; - vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info, NULL); + vmd->irq_domain = msi_create_parent_irq_domain(&info, + &vmd_msi_parent_ops); if (!vmd->irq_domain) { - irq_domain_free_fwnode(fn); + irq_domain_free_fwnode(info.fwnode); return -ENODEV; } return 0; } +static void vmd_set_msi_remapping(struct vmd_dev *vmd, bool enable) +{ + u16 reg; + + pci_read_config_word(vmd->dev, PCI_REG_VMCONFIG, ®); + reg = enable ? (reg & ~VMCONFIG_MSI_REMAP) : + (reg | VMCONFIG_MSI_REMAP); + pci_write_config_word(vmd->dev, PCI_REG_VMCONFIG, reg); +} + static void vmd_remove_irq_domain(struct vmd_dev *vmd) { /* @@ -387,29 +398,24 @@ static int vmd_pci_read(struct pci_bus *bus, unsigned int devfn, int reg, { struct vmd_dev *vmd = vmd_from_bus(bus); void __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len); - unsigned long flags; - int ret = 0; if (!addr) return -EFAULT; - raw_spin_lock_irqsave(&vmd->cfg_lock, flags); + guard(raw_spinlock_irqsave)(&vmd->cfg_lock); switch (len) { case 1: *value = readb(addr); - break; + return 0; case 2: *value = readw(addr); - break; + return 0; case 4: *value = readl(addr); - break; + return 0; default: - ret = -EINVAL; - break; + return -EINVAL; } - raw_spin_unlock_irqrestore(&vmd->cfg_lock, flags); - return ret; } /* @@ -422,32 +428,27 @@ static int vmd_pci_write(struct pci_bus *bus, unsigned int devfn, int reg, { struct vmd_dev *vmd = vmd_from_bus(bus); void __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len); - unsigned long flags; - int ret = 0; if (!addr) return -EFAULT; - raw_spin_lock_irqsave(&vmd->cfg_lock, flags); + guard(raw_spinlock_irqsave)(&vmd->cfg_lock); switch (len) { case 1: writeb(value, addr); readb(addr); - break; + return 0; case 2: writew(value, addr); readw(addr); - break; + return 0; case 4: writel(value, addr); readl(addr); - break; + return 0; default: - ret = -EINVAL; - break; + return -EINVAL; } - raw_spin_unlock_irqrestore(&vmd->cfg_lock, flags); - return ret; } static struct pci_ops vmd_ops = { @@ -889,12 +890,6 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) ret = vmd_create_irq_domain(vmd); if (ret) return ret; - - /* - * Override the IRQ domain bus token so the domain can be - * distinguished from a regular PCI/MSI domain. - */ - irq_domain_update_bus_token(vmd->irq_domain, DOMAIN_BUS_VMD_MSI); } else { vmd_set_msi_remapping(vmd, false); } @@ -1129,6 +1124,8 @@ static const struct pci_device_id vmd_ids[] = { .driver_data = VMD_FEATS_CLIENT,}, {PCI_VDEVICE(INTEL, 0xb06f), .driver_data = VMD_FEATS_CLIENT,}, + {PCI_VDEVICE(INTEL, 0xb07f), + .driver_data = VMD_FEATS_CLIENT,}, {0,} }; MODULE_DEVICE_TABLE(pci, vmd_ids); diff --git a/drivers/pci/devres.c b/drivers/pci/devres.c index 73047316889e..9f4190501395 100644 --- a/drivers/pci/devres.c +++ b/drivers/pci/devres.c @@ -6,30 +6,13 @@ /* * On the state of PCI's devres implementation: * - * The older devres API for PCI has two significant problems: + * The older PCI devres API has one significant problem: * - * 1. It is very strongly tied to the statically allocated mapping table in - * struct pcim_iomap_devres below. This is mostly solved in the sense of the - * pcim_ functions in this file providing things like ranged mapping by - * bypassing this table, whereas the functions that were present in the old - * API still enter the mapping addresses into the table for users of the old - * API. - * - * 2. The region-request-functions in pci.c do become managed IF the device has - * been enabled with pcim_enable_device() instead of pci_enable_device(). - * This resulted in the API becoming inconsistent: Some functions have an - * obviously managed counter-part (e.g., pci_iomap() <-> pcim_iomap()), - * whereas some don't and are never managed, while others don't and are - * _sometimes_ managed (e.g. pci_request_region()). - * - * Consequently, in the new API, region requests performed by the pcim_ - * functions are automatically cleaned up through the devres callback - * pcim_addr_resource_release(). - * - * Users of pcim_enable_device() + pci_*region*() are redirected in - * pci.c to the managed functions here in this file. This isn't exactly - * perfect, but the only alternative way would be to port ALL drivers - * using said combination to pcim_ functions. + * It is very strongly tied to the statically allocated mapping table in struct + * pcim_iomap_devres below. This is mostly solved in the sense of the pcim_ + * functions in this file providing things like ranged mapping by bypassing + * this table, whereas the functions that were present in the old API still + * enter the mapping addresses into the table for users of the old API. * * TODO: * Remove the legacy table entirely once all calls to pcim_iomap_table() in @@ -87,104 +70,6 @@ static inline void pcim_addr_devres_clear(struct pcim_addr_devres *res) res->bar = -1; } -/* - * The following functions, __pcim_*_region*, exist as counterparts to the - * versions from pci.c - which, unfortunately, can be in "hybrid mode", i.e., - * sometimes managed, sometimes not. - * - * To separate the APIs cleanly, we define our own, simplified versions here. - */ - -/** - * __pcim_request_region_range - Request a ranged region - * @pdev: PCI device the region belongs to - * @bar: BAR the range is within - * @offset: offset from the BAR's start address - * @maxlen: length in bytes, beginning at @offset - * @name: name of the driver requesting the resource - * @req_flags: flags for the request, e.g., for kernel-exclusive requests - * - * Returns: 0 on success, a negative error code on failure. - * - * Request a range within a device's PCI BAR. Sanity check the input. - */ -static int __pcim_request_region_range(struct pci_dev *pdev, int bar, - unsigned long offset, - unsigned long maxlen, - const char *name, int req_flags) -{ - resource_size_t start = pci_resource_start(pdev, bar); - resource_size_t len = pci_resource_len(pdev, bar); - unsigned long dev_flags = pci_resource_flags(pdev, bar); - - if (start == 0 || len == 0) /* Unused BAR. */ - return 0; - if (len <= offset) - return -EINVAL; - - start += offset; - len -= offset; - - if (len > maxlen && maxlen != 0) - len = maxlen; - - if (dev_flags & IORESOURCE_IO) { - if (!request_region(start, len, name)) - return -EBUSY; - } else if (dev_flags & IORESOURCE_MEM) { - if (!__request_mem_region(start, len, name, req_flags)) - return -EBUSY; - } else { - /* That's not a device we can request anything on. */ - return -ENODEV; - } - - return 0; -} - -static void __pcim_release_region_range(struct pci_dev *pdev, int bar, - unsigned long offset, - unsigned long maxlen) -{ - resource_size_t start = pci_resource_start(pdev, bar); - resource_size_t len = pci_resource_len(pdev, bar); - unsigned long flags = pci_resource_flags(pdev, bar); - - if (len <= offset || start == 0) - return; - - if (len == 0 || maxlen == 0) /* This an unused BAR. Do nothing. */ - return; - - start += offset; - len -= offset; - - if (len > maxlen) - len = maxlen; - - if (flags & IORESOURCE_IO) - release_region(start, len); - else if (flags & IORESOURCE_MEM) - release_mem_region(start, len); -} - -static int __pcim_request_region(struct pci_dev *pdev, int bar, - const char *name, int flags) -{ - unsigned long offset = 0; - unsigned long len = pci_resource_len(pdev, bar); - - return __pcim_request_region_range(pdev, bar, offset, len, name, flags); -} - -static void __pcim_release_region(struct pci_dev *pdev, int bar) -{ - unsigned long offset = 0; - unsigned long len = pci_resource_len(pdev, bar); - - __pcim_release_region_range(pdev, bar, offset, len); -} - static void pcim_addr_resource_release(struct device *dev, void *resource_raw) { struct pci_dev *pdev = to_pci_dev(dev); @@ -192,11 +77,11 @@ static void pcim_addr_resource_release(struct device *dev, void *resource_raw) switch (res->type) { case PCIM_ADDR_DEVRES_TYPE_REGION: - __pcim_release_region(pdev, res->bar); + pci_release_region(pdev, res->bar); break; case PCIM_ADDR_DEVRES_TYPE_REGION_MAPPING: pci_iounmap(pdev, res->baseaddr); - __pcim_release_region(pdev, res->bar); + pci_release_region(pdev, res->bar); break; case PCIM_ADDR_DEVRES_TYPE_MAPPING: pci_iounmap(pdev, res->baseaddr); @@ -735,7 +620,7 @@ void __iomem *pcim_iomap_region(struct pci_dev *pdev, int bar, res->type = PCIM_ADDR_DEVRES_TYPE_REGION_MAPPING; res->bar = bar; - ret = __pcim_request_region(pdev, bar, name, 0); + ret = pci_request_region(pdev, bar, name); if (ret != 0) goto err_region; @@ -749,7 +634,7 @@ void __iomem *pcim_iomap_region(struct pci_dev *pdev, int bar, return res->baseaddr; err_iomap: - __pcim_release_region(pdev, bar); + pci_release_region(pdev, bar); err_region: pcim_addr_devres_free(res); @@ -823,8 +708,20 @@ err: } EXPORT_SYMBOL(pcim_iomap_regions); -static int _pcim_request_region(struct pci_dev *pdev, int bar, const char *name, - int request_flags) +/** + * pcim_request_region - Request a PCI BAR + * @pdev: PCI device to request region for + * @bar: Index of BAR to request + * @name: Name of the driver requesting the resource + * + * Returns: 0 on success, a negative error code on failure. + * + * Request region specified by @bar. + * + * The region will automatically be released on driver detach. If desired, + * release manually only with pcim_release_region(). + */ +int pcim_request_region(struct pci_dev *pdev, int bar, const char *name) { int ret; struct pcim_addr_devres *res; @@ -838,7 +735,7 @@ static int _pcim_request_region(struct pci_dev *pdev, int bar, const char *name, res->type = PCIM_ADDR_DEVRES_TYPE_REGION; res->bar = bar; - ret = __pcim_request_region(pdev, bar, name, request_flags); + ret = pci_request_region(pdev, bar, name); if (ret != 0) { pcim_addr_devres_free(res); return ret; @@ -847,45 +744,9 @@ static int _pcim_request_region(struct pci_dev *pdev, int bar, const char *name, devres_add(&pdev->dev, res); return 0; } - -/** - * pcim_request_region - Request a PCI BAR - * @pdev: PCI device to request region for - * @bar: Index of BAR to request - * @name: Name of the driver requesting the resource - * - * Returns: 0 on success, a negative error code on failure. - * - * Request region specified by @bar. - * - * The region will automatically be released on driver detach. If desired, - * release manually only with pcim_release_region(). - */ -int pcim_request_region(struct pci_dev *pdev, int bar, const char *name) -{ - return _pcim_request_region(pdev, bar, name, 0); -} EXPORT_SYMBOL(pcim_request_region); /** - * pcim_request_region_exclusive - Request a PCI BAR exclusively - * @pdev: PCI device to request region for - * @bar: Index of BAR to request - * @name: Name of the driver requesting the resource - * - * Returns: 0 on success, a negative error code on failure. - * - * Request region specified by @bar exclusively. - * - * The region will automatically be released on driver detach. If desired, - * release manually only with pcim_release_region(). - */ -int pcim_request_region_exclusive(struct pci_dev *pdev, int bar, const char *name) -{ - return _pcim_request_region(pdev, bar, name, IORESOURCE_EXCLUSIVE); -} - -/** * pcim_release_region - Release a PCI BAR * @pdev: PCI device to operate on * @bar: Index of BAR to release @@ -893,7 +754,7 @@ int pcim_request_region_exclusive(struct pci_dev *pdev, int bar, const char *nam * Release a region manually that was previously requested by * pcim_request_region(). */ -void pcim_release_region(struct pci_dev *pdev, int bar) +static void pcim_release_region(struct pci_dev *pdev, int bar) { struct pcim_addr_devres res_searched; @@ -956,30 +817,6 @@ err: EXPORT_SYMBOL(pcim_request_all_regions); /** - * pcim_iounmap_regions - Unmap and release PCI BARs (DEPRECATED) - * @pdev: PCI device to map IO resources for - * @mask: Mask of BARs to unmap and release - * - * Unmap and release regions specified by @mask. - * - * This function is DEPRECATED. Do not use it in new code. - * Use pcim_iounmap_region() instead. - */ -void pcim_iounmap_regions(struct pci_dev *pdev, int mask) -{ - int i; - - for (i = 0; i < PCI_STD_NUM_BARS; i++) { - if (!mask_contains_bar(mask, i)) - continue; - - pcim_iounmap_region(pdev, i); - pcim_remove_bar_from_legacy_table(pdev, i); - } -} -EXPORT_SYMBOL(pcim_iounmap_regions); - -/** * pcim_iomap_range - Create a ranged __iomap mapping within a PCI BAR * @pdev: PCI device to map IO resources for * @bar: Index of the BAR diff --git a/drivers/pci/endpoint/Kconfig b/drivers/pci/endpoint/Kconfig index 1c5d82eb57d4..8dad291be8b8 100644 --- a/drivers/pci/endpoint/Kconfig +++ b/drivers/pci/endpoint/Kconfig @@ -28,6 +28,14 @@ config PCI_ENDPOINT_CONFIGFS configure the endpoint function and used to bind the function with an endpoint controller. +config PCI_ENDPOINT_MSI_DOORBELL + bool "PCI Endpoint MSI Doorbell Support" + depends on PCI_ENDPOINT && GENERIC_MSI_IRQ + help + This enables the EP's MSI interrupt controller to function as a + doorbell. The RC can trigger doorbell in EP by writing data to a + dedicated BAR, which the EP maps to the controller's message address. + source "drivers/pci/endpoint/functions/Kconfig" endmenu diff --git a/drivers/pci/endpoint/Makefile b/drivers/pci/endpoint/Makefile index 95b2fe47e3b0..b4869d52053a 100644 --- a/drivers/pci/endpoint/Makefile +++ b/drivers/pci/endpoint/Makefile @@ -6,3 +6,4 @@ obj-$(CONFIG_PCI_ENDPOINT_CONFIGFS) += pci-ep-cfs.o obj-$(CONFIG_PCI_ENDPOINT) += pci-epc-core.o pci-epf-core.o\ pci-epc-mem.o functions/ +obj-$(CONFIG_PCI_ENDPOINT_MSI_DOORBELL) += pci-ep-msi.o diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c index 50eb4106369f..e091193bd8a8 100644 --- a/drivers/pci/endpoint/functions/pci-epf-test.c +++ b/drivers/pci/endpoint/functions/pci-epf-test.c @@ -11,12 +11,14 @@ #include <linux/dmaengine.h> #include <linux/io.h> #include <linux/module.h> +#include <linux/msi.h> #include <linux/slab.h> #include <linux/pci_ids.h> #include <linux/random.h> #include <linux/pci-epc.h> #include <linux/pci-epf.h> +#include <linux/pci-ep-msi.h> #include <linux/pci_regs.h> #define IRQ_TYPE_INTX 0 @@ -29,6 +31,8 @@ #define COMMAND_READ BIT(3) #define COMMAND_WRITE BIT(4) #define COMMAND_COPY BIT(5) +#define COMMAND_ENABLE_DOORBELL BIT(6) +#define COMMAND_DISABLE_DOORBELL BIT(7) #define STATUS_READ_SUCCESS BIT(0) #define STATUS_READ_FAIL BIT(1) @@ -39,6 +43,11 @@ #define STATUS_IRQ_RAISED BIT(6) #define STATUS_SRC_ADDR_INVALID BIT(7) #define STATUS_DST_ADDR_INVALID BIT(8) +#define STATUS_DOORBELL_SUCCESS BIT(9) +#define STATUS_DOORBELL_ENABLE_SUCCESS BIT(10) +#define STATUS_DOORBELL_ENABLE_FAIL BIT(11) +#define STATUS_DOORBELL_DISABLE_SUCCESS BIT(12) +#define STATUS_DOORBELL_DISABLE_FAIL BIT(13) #define FLAG_USE_DMA BIT(0) @@ -66,6 +75,7 @@ struct pci_epf_test { bool dma_supported; bool dma_private; const struct pci_epc_features *epc_features; + struct pci_epf_bar db_bar; }; struct pci_epf_test_reg { @@ -80,6 +90,9 @@ struct pci_epf_test_reg { __le32 irq_number; __le32 flags; __le32 caps; + __le32 doorbell_bar; + __le32 doorbell_offset; + __le32 doorbell_data; } __packed; static struct pci_epf_header test_header = { @@ -667,6 +680,115 @@ static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, } } +static irqreturn_t pci_epf_test_doorbell_handler(int irq, void *data) +{ + struct pci_epf_test *epf_test = data; + enum pci_barno test_reg_bar = epf_test->test_reg_bar; + struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar]; + u32 status = le32_to_cpu(reg->status); + + status |= STATUS_DOORBELL_SUCCESS; + reg->status = cpu_to_le32(status); + pci_epf_test_raise_irq(epf_test, reg); + + return IRQ_HANDLED; +} + +static void pci_epf_test_doorbell_cleanup(struct pci_epf_test *epf_test) +{ + struct pci_epf_test_reg *reg = epf_test->reg[epf_test->test_reg_bar]; + struct pci_epf *epf = epf_test->epf; + + free_irq(epf->db_msg[0].virq, epf_test); + reg->doorbell_bar = cpu_to_le32(NO_BAR); + + pci_epf_free_doorbell(epf); +} + +static void pci_epf_test_enable_doorbell(struct pci_epf_test *epf_test, + struct pci_epf_test_reg *reg) +{ + u32 status = le32_to_cpu(reg->status); + struct pci_epf *epf = epf_test->epf; + struct pci_epc *epc = epf->epc; + struct msi_msg *msg; + enum pci_barno bar; + size_t offset; + int ret; + + ret = pci_epf_alloc_doorbell(epf, 1); + if (ret) + goto set_status_err; + + msg = &epf->db_msg[0].msg; + bar = pci_epc_get_next_free_bar(epf_test->epc_features, epf_test->test_reg_bar + 1); + if (bar < BAR_0) + goto err_doorbell_cleanup; + + ret = request_irq(epf->db_msg[0].virq, pci_epf_test_doorbell_handler, 0, + "pci-ep-test-doorbell", epf_test); + if (ret) { + dev_err(&epf->dev, + "Failed to request doorbell IRQ: %d\n", + epf->db_msg[0].virq); + goto err_doorbell_cleanup; + } + + reg->doorbell_data = cpu_to_le32(msg->data); + reg->doorbell_bar = cpu_to_le32(bar); + + msg = &epf->db_msg[0].msg; + ret = pci_epf_align_inbound_addr(epf, bar, ((u64)msg->address_hi << 32) | msg->address_lo, + &epf_test->db_bar.phys_addr, &offset); + + if (ret) + goto err_doorbell_cleanup; + + reg->doorbell_offset = cpu_to_le32(offset); + + epf_test->db_bar.barno = bar; + epf_test->db_bar.size = epf->bar[bar].size; + epf_test->db_bar.flags = epf->bar[bar].flags; + + ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, &epf_test->db_bar); + if (ret) + goto err_doorbell_cleanup; + + status |= STATUS_DOORBELL_ENABLE_SUCCESS; + reg->status = cpu_to_le32(status); + return; + +err_doorbell_cleanup: + pci_epf_test_doorbell_cleanup(epf_test); +set_status_err: + status |= STATUS_DOORBELL_ENABLE_FAIL; + reg->status = cpu_to_le32(status); +} + +static void pci_epf_test_disable_doorbell(struct pci_epf_test *epf_test, + struct pci_epf_test_reg *reg) +{ + enum pci_barno bar = le32_to_cpu(reg->doorbell_bar); + u32 status = le32_to_cpu(reg->status); + struct pci_epf *epf = epf_test->epf; + struct pci_epc *epc = epf->epc; + + if (bar < BAR_0) + goto set_status_err; + + pci_epf_test_doorbell_cleanup(epf_test); + pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no, &epf_test->db_bar); + + status |= STATUS_DOORBELL_DISABLE_SUCCESS; + reg->status = cpu_to_le32(status); + + return; + +set_status_err: + status |= STATUS_DOORBELL_DISABLE_FAIL; + reg->status = cpu_to_le32(status); +} + static void pci_epf_test_cmd_handler(struct work_struct *work) { u32 command; @@ -714,6 +836,14 @@ static void pci_epf_test_cmd_handler(struct work_struct *work) pci_epf_test_copy(epf_test, reg); pci_epf_test_raise_irq(epf_test, reg); break; + case COMMAND_ENABLE_DOORBELL: + pci_epf_test_enable_doorbell(epf_test, reg); + pci_epf_test_raise_irq(epf_test, reg); + break; + case COMMAND_DISABLE_DOORBELL: + pci_epf_test_disable_doorbell(epf_test, reg); + pci_epf_test_raise_irq(epf_test, reg); + break; default: dev_err(dev, "Invalid command 0x%x\n", command); break; diff --git a/drivers/pci/endpoint/functions/pci-epf-vntb.c b/drivers/pci/endpoint/functions/pci-epf-vntb.c index 874cb097b093..83e9ab10f9c4 100644 --- a/drivers/pci/endpoint/functions/pci-epf-vntb.c +++ b/drivers/pci/endpoint/functions/pci-epf-vntb.c @@ -70,9 +70,11 @@ static struct workqueue_struct *kpcintb_workqueue; enum epf_ntb_bar { BAR_CONFIG, BAR_DB, - BAR_MW0, BAR_MW1, BAR_MW2, + BAR_MW3, + BAR_MW4, + VNTB_BAR_NUM, }; /* @@ -132,7 +134,7 @@ struct epf_ntb { bool linkup; u32 spad_size; - enum pci_barno epf_ntb_bar[6]; + enum pci_barno epf_ntb_bar[VNTB_BAR_NUM]; struct epf_ntb_ctrl *reg; @@ -408,11 +410,9 @@ static void epf_ntb_config_spad_bar_free(struct epf_ntb *ntb) */ static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb) { - size_t align; enum pci_barno barno; struct epf_ntb_ctrl *ctrl; u32 spad_size, ctrl_size; - u64 size; struct pci_epf *epf = ntb->epf; struct device *dev = &epf->dev; u32 spad_count; @@ -422,31 +422,13 @@ static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb) epf->func_no, epf->vfunc_no); barno = ntb->epf_ntb_bar[BAR_CONFIG]; - size = epc_features->bar[barno].fixed_size; - align = epc_features->align; - - if ((!IS_ALIGNED(size, align))) - return -EINVAL; - spad_count = ntb->spad_count; - ctrl_size = sizeof(struct epf_ntb_ctrl); + ctrl_size = ALIGN(sizeof(struct epf_ntb_ctrl), sizeof(u32)); spad_size = 2 * spad_count * sizeof(u32); - if (!align) { - ctrl_size = roundup_pow_of_two(ctrl_size); - spad_size = roundup_pow_of_two(spad_size); - } else { - ctrl_size = ALIGN(ctrl_size, align); - spad_size = ALIGN(spad_size, align); - } - - if (!size) - size = ctrl_size + spad_size; - else if (size < ctrl_size + spad_size) - return -EINVAL; - - base = pci_epf_alloc_space(epf, size, barno, epc_features, 0); + base = pci_epf_alloc_space(epf, ctrl_size + spad_size, + barno, epc_features, 0); if (!base) { dev_err(dev, "Config/Status/SPAD alloc region fail\n"); return -ENOMEM; @@ -530,7 +512,7 @@ static int epf_ntb_db_bar_init(struct epf_ntb *ntb) struct device *dev = &ntb->epf->dev; int ret; struct pci_epf_bar *epf_bar; - void __iomem *mw_addr; + void *mw_addr; enum pci_barno barno; size_t size = sizeof(u32) * ntb->db_count; @@ -596,7 +578,7 @@ static int epf_ntb_mw_bar_init(struct epf_ntb *ntb) for (i = 0; i < ntb->num_mws; i++) { size = ntb->mws_size[i]; - barno = ntb->epf_ntb_bar[BAR_MW0 + i]; + barno = ntb->epf_ntb_bar[BAR_MW1 + i]; ntb->epf->bar[barno].barno = barno; ntb->epf->bar[barno].size = size; @@ -649,7 +631,7 @@ static void epf_ntb_mw_bar_clear(struct epf_ntb *ntb, int num_mws) int i; for (i = 0; i < num_mws; i++) { - barno = ntb->epf_ntb_bar[BAR_MW0 + i]; + barno = ntb->epf_ntb_bar[BAR_MW1 + i]; pci_epc_clear_bar(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no, @@ -674,6 +656,63 @@ static void epf_ntb_epc_destroy(struct epf_ntb *ntb) pci_epc_put(ntb->epf->epc); } + +/** + * epf_ntb_is_bar_used() - Check if a bar is used in the ntb configuration + * @ntb: NTB device that facilitates communication between HOST and VHOST + * @barno: Checked bar number + * + * Returns: true if used, false if free. + */ +static bool epf_ntb_is_bar_used(struct epf_ntb *ntb, + enum pci_barno barno) +{ + int i; + + for (i = 0; i < VNTB_BAR_NUM; i++) { + if (ntb->epf_ntb_bar[i] == barno) + return true; + } + + return false; +} + +/** + * epf_ntb_find_bar() - Assign BAR number when no configuration is provided + * @ntb: NTB device that facilitates communication between HOST and VHOST + * @epc_features: The features provided by the EPC specific to this EPF + * @bar: NTB BAR index + * @barno: Bar start index + * + * When the BAR configuration was not provided through the userspace + * configuration, automatically assign BAR as it has been historically + * done by this endpoint function. + * + * Returns: the BAR number found, if any. -1 otherwise + */ +static int epf_ntb_find_bar(struct epf_ntb *ntb, + const struct pci_epc_features *epc_features, + enum epf_ntb_bar bar, + enum pci_barno barno) +{ + while (ntb->epf_ntb_bar[bar] < 0) { + barno = pci_epc_get_next_free_bar(epc_features, barno); + if (barno < 0) + break; /* No more BAR available */ + + /* + * Verify if the BAR found is not already assigned + * through the provided configuration + */ + if (!epf_ntb_is_bar_used(ntb, barno)) + ntb->epf_ntb_bar[bar] = barno; + + barno += 1; + } + + return barno; +} + /** * epf_ntb_init_epc_bar() - Identify BARs to be used for each of the NTB * constructs (scratchpad region, doorbell, memorywindow) @@ -696,23 +735,21 @@ static int epf_ntb_init_epc_bar(struct epf_ntb *ntb) epc_features = pci_epc_get_features(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no); /* These are required BARs which are mandatory for NTB functionality */ - for (bar = BAR_CONFIG; bar <= BAR_MW0; bar++, barno++) { - barno = pci_epc_get_next_free_bar(epc_features, barno); + for (bar = BAR_CONFIG; bar <= BAR_MW1; bar++) { + barno = epf_ntb_find_bar(ntb, epc_features, bar, barno); if (barno < 0) { dev_err(dev, "Fail to get NTB function BAR\n"); - return barno; + return -ENOENT; } - ntb->epf_ntb_bar[bar] = barno; } /* These are optional BARs which don't impact NTB functionality */ - for (bar = BAR_MW1, i = 1; i < num_mws; bar++, barno++, i++) { - barno = pci_epc_get_next_free_bar(epc_features, barno); + for (bar = BAR_MW1, i = 1; i < num_mws; bar++, i++) { + barno = epf_ntb_find_bar(ntb, epc_features, bar, barno); if (barno < 0) { ntb->num_mws = i; dev_dbg(dev, "BAR not available for > MW%d\n", i + 1); } - ntb->epf_ntb_bar[bar] = barno; } return 0; @@ -880,6 +917,37 @@ static ssize_t epf_ntb_##_name##_store(struct config_item *item, \ return len; \ } +#define EPF_NTB_BAR_R(_name, _id) \ + static ssize_t epf_ntb_##_name##_show(struct config_item *item, \ + char *page) \ + { \ + struct config_group *group = to_config_group(item); \ + struct epf_ntb *ntb = to_epf_ntb(group); \ + \ + return sprintf(page, "%d\n", ntb->epf_ntb_bar[_id]); \ + } + +#define EPF_NTB_BAR_W(_name, _id) \ + static ssize_t epf_ntb_##_name##_store(struct config_item *item, \ + const char *page, size_t len) \ + { \ + struct config_group *group = to_config_group(item); \ + struct epf_ntb *ntb = to_epf_ntb(group); \ + int val; \ + int ret; \ + \ + ret = kstrtoint(page, 0, &val); \ + if (ret) \ + return ret; \ + \ + if (val < NO_BAR || val > BAR_5) \ + return -EINVAL; \ + \ + ntb->epf_ntb_bar[_id] = val; \ + \ + return len; \ + } + static ssize_t epf_ntb_num_mws_store(struct config_item *item, const char *page, size_t len) { @@ -919,6 +987,18 @@ EPF_NTB_MW_R(mw3) EPF_NTB_MW_W(mw3) EPF_NTB_MW_R(mw4) EPF_NTB_MW_W(mw4) +EPF_NTB_BAR_R(ctrl_bar, BAR_CONFIG) +EPF_NTB_BAR_W(ctrl_bar, BAR_CONFIG) +EPF_NTB_BAR_R(db_bar, BAR_DB) +EPF_NTB_BAR_W(db_bar, BAR_DB) +EPF_NTB_BAR_R(mw1_bar, BAR_MW1) +EPF_NTB_BAR_W(mw1_bar, BAR_MW1) +EPF_NTB_BAR_R(mw2_bar, BAR_MW2) +EPF_NTB_BAR_W(mw2_bar, BAR_MW2) +EPF_NTB_BAR_R(mw3_bar, BAR_MW3) +EPF_NTB_BAR_W(mw3_bar, BAR_MW3) +EPF_NTB_BAR_R(mw4_bar, BAR_MW4) +EPF_NTB_BAR_W(mw4_bar, BAR_MW4) CONFIGFS_ATTR(epf_ntb_, spad_count); CONFIGFS_ATTR(epf_ntb_, db_count); @@ -930,6 +1010,12 @@ CONFIGFS_ATTR(epf_ntb_, mw4); CONFIGFS_ATTR(epf_ntb_, vbus_number); CONFIGFS_ATTR(epf_ntb_, vntb_pid); CONFIGFS_ATTR(epf_ntb_, vntb_vid); +CONFIGFS_ATTR(epf_ntb_, ctrl_bar); +CONFIGFS_ATTR(epf_ntb_, db_bar); +CONFIGFS_ATTR(epf_ntb_, mw1_bar); +CONFIGFS_ATTR(epf_ntb_, mw2_bar); +CONFIGFS_ATTR(epf_ntb_, mw3_bar); +CONFIGFS_ATTR(epf_ntb_, mw4_bar); static struct configfs_attribute *epf_ntb_attrs[] = { &epf_ntb_attr_spad_count, @@ -942,6 +1028,12 @@ static struct configfs_attribute *epf_ntb_attrs[] = { &epf_ntb_attr_vbus_number, &epf_ntb_attr_vntb_pid, &epf_ntb_attr_vntb_vid, + &epf_ntb_attr_ctrl_bar, + &epf_ntb_attr_db_bar, + &epf_ntb_attr_mw1_bar, + &epf_ntb_attr_mw2_bar, + &epf_ntb_attr_mw3_bar, + &epf_ntb_attr_mw4_bar, NULL, }; @@ -1068,7 +1160,7 @@ static int vntb_epf_mw_set_trans(struct ntb_dev *ndev, int pidx, int idx, struct device *dev; dev = &ntb->ntb.dev; - barno = ntb->epf_ntb_bar[BAR_MW0 + idx]; + barno = ntb->epf_ntb_bar[BAR_MW1 + idx]; epf_bar = &ntb->epf->bar[barno]; epf_bar->phys_addr = addr; epf_bar->barno = barno; @@ -1399,6 +1491,7 @@ static int epf_ntb_probe(struct pci_epf *epf, { struct epf_ntb *ntb; struct device *dev; + int i; dev = &epf->dev; @@ -1409,6 +1502,11 @@ static int epf_ntb_probe(struct pci_epf *epf, epf->header = &epf_ntb_header; ntb->epf = epf; ntb->vbus_number = 0xff; + + /* Initially, no bar is assigned */ + for (i = 0; i < VNTB_BAR_NUM; i++) + ntb->epf_ntb_bar[i] = NO_BAR; + epf_set_drvdata(epf, ntb); dev_info(dev, "pci-ep epf driver loaded\n"); diff --git a/drivers/pci/endpoint/pci-ep-cfs.c b/drivers/pci/endpoint/pci-ep-cfs.c index d712c7a866d2..ef50c82e647f 100644 --- a/drivers/pci/endpoint/pci-ep-cfs.c +++ b/drivers/pci/endpoint/pci-ep-cfs.c @@ -691,6 +691,7 @@ void pci_ep_cfs_remove_epf_group(struct config_group *group) if (IS_ERR_OR_NULL(group)) return; + list_del(&group->group_entry); configfs_unregister_default_group(group); } EXPORT_SYMBOL(pci_ep_cfs_remove_epf_group); diff --git a/drivers/pci/endpoint/pci-ep-msi.c b/drivers/pci/endpoint/pci-ep-msi.c new file mode 100644 index 000000000000..9ca89cbfec15 --- /dev/null +++ b/drivers/pci/endpoint/pci-ep-msi.c @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCI Endpoint *Controller* (EPC) MSI library + * + * Copyright (C) 2025 NXP + * Author: Frank Li <Frank.Li@nxp.com> + */ + +#include <linux/device.h> +#include <linux/export.h> +#include <linux/irqdomain.h> +#include <linux/module.h> +#include <linux/msi.h> +#include <linux/of_irq.h> +#include <linux/pci-epc.h> +#include <linux/pci-epf.h> +#include <linux/pci-ep-cfs.h> +#include <linux/pci-ep-msi.h> +#include <linux/slab.h> + +static void pci_epf_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg) +{ + struct pci_epc *epc; + struct pci_epf *epf; + + epc = pci_epc_get(dev_name(msi_desc_to_dev(desc))); + if (!epc) + return; + + epf = list_first_entry_or_null(&epc->pci_epf, struct pci_epf, list); + + if (epf && epf->db_msg && desc->msi_index < epf->num_db) + memcpy(&epf->db_msg[desc->msi_index].msg, msg, sizeof(*msg)); + + pci_epc_put(epc); +} + +int pci_epf_alloc_doorbell(struct pci_epf *epf, u16 num_db) +{ + struct pci_epc *epc = epf->epc; + struct device *dev = &epf->dev; + struct irq_domain *domain; + void *msg; + int ret; + int i; + + /* TODO: Multi-EPF support */ + if (list_first_entry_or_null(&epc->pci_epf, struct pci_epf, list) != epf) { + dev_err(dev, "MSI doorbell doesn't support multiple EPF\n"); + return -EINVAL; + } + + domain = of_msi_map_get_device_domain(epc->dev.parent, 0, + DOMAIN_BUS_PLATFORM_MSI); + if (!domain) { + dev_err(dev, "Can't find MSI domain for EPC\n"); + return -ENODEV; + } + + if (!irq_domain_is_msi_parent(domain)) + return -ENODEV; + + if (!irq_domain_is_msi_immutable(domain)) { + dev_err(dev, "Mutable MSI controller not supported\n"); + return -ENODEV; + } + + dev_set_msi_domain(epc->dev.parent, domain); + + msg = kcalloc(num_db, sizeof(struct pci_epf_doorbell_msg), GFP_KERNEL); + if (!msg) + return -ENOMEM; + + epf->num_db = num_db; + epf->db_msg = msg; + + ret = platform_device_msi_init_and_alloc_irqs(epc->dev.parent, num_db, + pci_epf_write_msi_msg); + if (ret) { + dev_err(dev, "Failed to allocate MSI\n"); + kfree(msg); + return ret; + } + + for (i = 0; i < num_db; i++) + epf->db_msg[i].virq = msi_get_virq(epc->dev.parent, i); + + return ret; +} +EXPORT_SYMBOL_GPL(pci_epf_alloc_doorbell); + +void pci_epf_free_doorbell(struct pci_epf *epf) +{ + platform_device_msi_free_irqs_all(epf->epc->dev.parent); + + kfree(epf->db_msg); + epf->db_msg = NULL; + epf->num_db = 0; +} +EXPORT_SYMBOL_GPL(pci_epf_free_doorbell); diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c index beabea00af91..ca7f19cc973a 100644 --- a/drivers/pci/endpoint/pci-epc-core.c +++ b/drivers/pci/endpoint/pci-epc-core.c @@ -293,8 +293,6 @@ int pci_epc_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no) if (interrupt < 0) return 0; - interrupt = 1 << interrupt; - return interrupt; } EXPORT_SYMBOL_GPL(pci_epc_get_msi); @@ -304,28 +302,25 @@ EXPORT_SYMBOL_GPL(pci_epc_get_msi); * @epc: the EPC device on which MSI has to be configured * @func_no: the physical endpoint function number in the EPC device * @vfunc_no: the virtual endpoint function number in the physical function - * @interrupts: number of MSI interrupts required by the EPF + * @nr_irqs: number of MSI interrupts required by the EPF * * Invoke to set the required number of MSI interrupts. */ -int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, u8 interrupts) +int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, u8 nr_irqs) { int ret; - u8 encode_int; if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) return -EINVAL; - if (interrupts < 1 || interrupts > 32) + if (nr_irqs < 1 || nr_irqs > 32) return -EINVAL; if (!epc->ops->set_msi) return 0; - encode_int = order_base_2(interrupts); - mutex_lock(&epc->lock); - ret = epc->ops->set_msi(epc, func_no, vfunc_no, encode_int); + ret = epc->ops->set_msi(epc, func_no, vfunc_no, nr_irqs); mutex_unlock(&epc->lock); return ret; @@ -357,7 +352,7 @@ int pci_epc_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no) if (interrupt < 0) return 0; - return interrupt + 1; + return interrupt; } EXPORT_SYMBOL_GPL(pci_epc_get_msix); @@ -366,29 +361,28 @@ EXPORT_SYMBOL_GPL(pci_epc_get_msix); * @epc: the EPC device on which MSI-X has to be configured * @func_no: the physical endpoint function number in the EPC device * @vfunc_no: the virtual endpoint function number in the physical function - * @interrupts: number of MSI-X interrupts required by the EPF + * @nr_irqs: number of MSI-X interrupts required by the EPF * @bir: BAR where the MSI-X table resides * @offset: Offset pointing to the start of MSI-X table * * Invoke to set the required number of MSI-X interrupts. */ -int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no, - u16 interrupts, enum pci_barno bir, u32 offset) +int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no, u16 nr_irqs, + enum pci_barno bir, u32 offset) { int ret; if (!pci_epc_function_is_valid(epc, func_no, vfunc_no)) return -EINVAL; - if (interrupts < 1 || interrupts > 2048) + if (nr_irqs < 1 || nr_irqs > 2048) return -EINVAL; if (!epc->ops->set_msix) return 0; mutex_lock(&epc->lock); - ret = epc->ops->set_msix(epc, func_no, vfunc_no, interrupts - 1, bir, - offset); + ret = epc->ops->set_msix(epc, func_no, vfunc_no, nr_irqs, bir, offset); mutex_unlock(&epc->lock); return ret; diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c index 394395c7f8de..d54e18872aef 100644 --- a/drivers/pci/endpoint/pci-epf-core.c +++ b/drivers/pci/endpoint/pci-epf-core.c @@ -236,12 +236,13 @@ void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar, } dev = epc->dev.parent; - dma_free_coherent(dev, epf_bar[bar].size, addr, + dma_free_coherent(dev, epf_bar[bar].aligned_size, addr, epf_bar[bar].phys_addr); epf_bar[bar].phys_addr = 0; epf_bar[bar].addr = NULL; epf_bar[bar].size = 0; + epf_bar[bar].aligned_size = 0; epf_bar[bar].barno = 0; epf_bar[bar].flags = 0; } @@ -264,7 +265,7 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar, enum pci_epc_interface_type type) { u64 bar_fixed_size = epc_features->bar[bar].fixed_size; - size_t align = epc_features->align; + size_t aligned_size, align = epc_features->align; struct pci_epf_bar *epf_bar; dma_addr_t phys_addr; struct pci_epc *epc; @@ -285,12 +286,18 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar, return NULL; } size = bar_fixed_size; + } else { + /* BAR size must be power of two */ + size = roundup_pow_of_two(size); } - if (align) - size = ALIGN(size, align); - else - size = roundup_pow_of_two(size); + /* + * Allocate enough memory to accommodate the iATU alignment + * requirement. In most cases, this will be the same as .size but + * it might be different if, for example, the fixed size of a BAR + * is smaller than align. + */ + aligned_size = align ? ALIGN(size, align) : size; if (type == PRIMARY_INTERFACE) { epc = epf->epc; @@ -301,7 +308,7 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar, } dev = epc->dev.parent; - space = dma_alloc_coherent(dev, size, &phys_addr, GFP_KERNEL); + space = dma_alloc_coherent(dev, aligned_size, &phys_addr, GFP_KERNEL); if (!space) { dev_err(dev, "failed to allocate mem space\n"); return NULL; @@ -310,6 +317,7 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar, epf_bar[bar].phys_addr = phys_addr; epf_bar[bar].addr = space; epf_bar[bar].size = size; + epf_bar[bar].aligned_size = aligned_size; epf_bar[bar].barno = bar; if (upper_32_bits(size) || epc_features->bar[bar].only_64bit) epf_bar[bar].flags |= PCI_BASE_ADDRESS_MEM_TYPE_64; @@ -330,7 +338,7 @@ static void pci_epf_remove_cfs(struct pci_epf_driver *driver) mutex_lock(&pci_epf_mutex); list_for_each_entry_safe(group, tmp, &driver->epf_group, group_entry) pci_ep_cfs_remove_epf_group(group); - list_del(&driver->epf_group); + WARN_ON(!list_empty(&driver->epf_group)); mutex_unlock(&pci_epf_mutex); } @@ -469,6 +477,44 @@ struct pci_epf *pci_epf_create(const char *name) } EXPORT_SYMBOL_GPL(pci_epf_create); +/** + * pci_epf_align_inbound_addr() - Align the given address based on the BAR + * alignment requirement + * @epf: the EPF device + * @addr: inbound address to be aligned + * @bar: the BAR number corresponding to the given addr + * @base: base address matching the @bar alignment requirement + * @off: offset to be added to the @base address + * + * Helper function to align input @addr based on BAR's alignment requirement. + * The aligned base address and offset are returned via @base and @off. + * + * NOTE: The pci_epf_alloc_space() function already accounts for alignment. + * This API is primarily intended for use with other memory regions not + * allocated by pci_epf_alloc_space(), such as peripheral register spaces or + * the message address of a platform MSI controller. + * + * Return: 0 on success, errno otherwise. + */ +int pci_epf_align_inbound_addr(struct pci_epf *epf, enum pci_barno bar, + u64 addr, dma_addr_t *base, size_t *off) +{ + /* + * Most EP controllers require the BAR start address to be aligned to + * the BAR size, because they mask off the lower bits. + * + * Alignment to BAR size also works for controllers that support + * unaligned addresses. + */ + u64 align = epf->bar[bar].size; + + *base = round_down(addr, align); + *off = addr & (align - 1); + + return 0; +} +EXPORT_SYMBOL_GPL(pci_epf_align_inbound_addr); + static void pci_epf_dev_release(struct device *dev) { struct pci_epf *epf = to_pci_epf(dev); diff --git a/drivers/pci/hotplug/TODO b/drivers/pci/hotplug/TODO index 92e6e20e8595..7397374af171 100644 --- a/drivers/pci/hotplug/TODO +++ b/drivers/pci/hotplug/TODO @@ -2,10 +2,6 @@ Contributions are solicited in particular to remedy the following issues: cpcihp: -* There are no implementations of the ->hardware_test, ->get_power and - ->set_power callbacks in struct cpci_hp_controller_ops. Why were they - introduced? Can they be removed from the struct? - * Returned code from pci_hp_add_bridge() is not checked. cpqphp: diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c index b3aa34e3a4a2..18e01cd55a8e 100644 --- a/drivers/pci/hotplug/acpiphp_ibm.c +++ b/drivers/pci/hotplug/acpiphp_ibm.c @@ -98,7 +98,7 @@ static struct bin_attribute ibm_apci_table_attr __ro_after_init = { .name = "apci_table", .mode = S_IRUGO, }, - .read_new = ibm_read_apci_table, + .read = ibm_read_apci_table, .write = NULL, }; static struct acpiphp_attention_info ibm_attention_info = diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c index c01968ef0bd7..760a5dec0431 100644 --- a/drivers/pci/hotplug/cpqphp_ctrl.c +++ b/drivers/pci/hotplug/cpqphp_ctrl.c @@ -1794,7 +1794,7 @@ static void interrupt_event_handler(struct controller *ctrl) } else if (ctrl->event_queue[loop].event_type == INT_BUTTON_CANCEL) { dbg("button cancel\n"); - del_timer(&p_slot->task_event); + timer_delete(&p_slot->task_event); mutex_lock(&ctrl->crit_sect); @@ -1883,7 +1883,7 @@ void cpqhp_pushbutton_thread(struct timer_list *t) { u8 hp_slot; struct pci_func *func; - struct slot *p_slot = from_timer(p_slot, t, task_event); + struct slot *p_slot = timer_container_of(p_slot, t, task_event); struct controller *ctrl = (struct controller *) p_slot->ctrl; pushbutton_pending = NULL; diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c index d30f1316c98e..fadcf98a8a66 100644 --- a/drivers/pci/hotplug/pci_hotplug_core.c +++ b/drivers/pci/hotplug/pci_hotplug_core.c @@ -20,13 +20,9 @@ #include <linux/types.h> #include <linux/kobject.h> #include <linux/sysfs.h> -#include <linux/pagemap.h> #include <linux/init.h> -#include <linux/mount.h> -#include <linux/namei.h> #include <linux/pci.h> #include <linux/pci_hotplug.h> -#include <linux/uaccess.h> #include "../pci.h" #include "cpci_hotplug.h" @@ -492,6 +488,75 @@ void pci_hp_destroy(struct hotplug_slot *slot) } EXPORT_SYMBOL_GPL(pci_hp_destroy); +static DECLARE_WAIT_QUEUE_HEAD(pci_hp_link_change_wq); + +/** + * pci_hp_ignore_link_change - begin code section causing spurious link changes + * @pdev: PCI hotplug bridge + * + * Mark the beginning of a code section causing spurious link changes on the + * Secondary Bus of @pdev, e.g. as a side effect of a Secondary Bus Reset, + * D3cold transition, firmware update or FPGA reconfiguration. + * + * Hotplug drivers can thus check whether such a code section is executing + * concurrently, await it with pci_hp_spurious_link_change() and ignore the + * resulting link change events. + * + * Must be paired with pci_hp_unignore_link_change(). May be called both + * from the PCI core and from Endpoint drivers. May be called for bridges + * which are not hotplug-capable, in which case it has no effect because + * no hotplug driver is bound to the bridge. + */ +void pci_hp_ignore_link_change(struct pci_dev *pdev) +{ + set_bit(PCI_LINK_CHANGING, &pdev->priv_flags); + smp_mb__after_atomic(); /* pairs with implied barrier of wait_event() */ +} + +/** + * pci_hp_unignore_link_change - end code section causing spurious link changes + * @pdev: PCI hotplug bridge + * + * Mark the end of a code section causing spurious link changes on the + * Secondary Bus of @pdev. Must be paired with pci_hp_ignore_link_change(). + */ +void pci_hp_unignore_link_change(struct pci_dev *pdev) +{ + set_bit(PCI_LINK_CHANGED, &pdev->priv_flags); + mb(); /* ensure pci_hp_spurious_link_change() sees either bit set */ + clear_bit(PCI_LINK_CHANGING, &pdev->priv_flags); + wake_up_all(&pci_hp_link_change_wq); +} + +/** + * pci_hp_spurious_link_change - check for spurious link changes + * @pdev: PCI hotplug bridge + * + * Check whether a code section is executing concurrently which is causing + * spurious link changes on the Secondary Bus of @pdev. Await the end of the + * code section if so. + * + * May be called by hotplug drivers to check whether a link change is spurious + * and can be ignored. + * + * Because a genuine link change may have occurred in-between a spurious link + * change and the invocation of this function, hotplug drivers should perform + * sanity checks such as retrieving the current link state and bringing down + * the slot if the link is down. + * + * Return: %true if such a code section has been executing concurrently, + * otherwise %false. Also return %true if such a code section has not been + * executing concurrently, but at least once since the last invocation of this + * function. + */ +bool pci_hp_spurious_link_change(struct pci_dev *pdev) +{ + wait_event(pci_hp_link_change_wq, + !test_bit(PCI_LINK_CHANGING, &pdev->priv_flags)); + + return test_and_clear_bit(PCI_LINK_CHANGED, &pdev->priv_flags); +} + static int __init pci_hotplug_init(void) { int result; diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index 273dd8c66f4e..debc79b0adfb 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h @@ -187,6 +187,7 @@ int pciehp_card_present(struct controller *ctrl); int pciehp_card_present_or_link_active(struct controller *ctrl); int pciehp_check_link_status(struct controller *ctrl); int pciehp_check_link_active(struct controller *ctrl); +bool pciehp_device_replaced(struct controller *ctrl); void pciehp_release_ctrl(struct controller *ctrl); int pciehp_sysfs_enable_slot(struct hotplug_slot *hotplug_slot); diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index 997841c69893..f59baa912970 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c @@ -284,35 +284,6 @@ static int pciehp_suspend(struct pcie_device *dev) return 0; } -static bool pciehp_device_replaced(struct controller *ctrl) -{ - struct pci_dev *pdev __free(pci_dev_put) = NULL; - u32 reg; - - if (pci_dev_is_disconnected(ctrl->pcie->port)) - return false; - - pdev = pci_get_slot(ctrl->pcie->port->subordinate, PCI_DEVFN(0, 0)); - if (!pdev) - return true; - - if (pci_read_config_dword(pdev, PCI_VENDOR_ID, ®) || - reg != (pdev->vendor | (pdev->device << 16)) || - pci_read_config_dword(pdev, PCI_CLASS_REVISION, ®) || - reg != (pdev->revision | (pdev->class << 8))) - return true; - - if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL && - (pci_read_config_dword(pdev, PCI_SUBSYSTEM_VENDOR_ID, ®) || - reg != (pdev->subsystem_vendor | (pdev->subsystem_device << 16)))) - return true; - - if (pci_get_dsn(pdev) != ctrl->dsn) - return true; - - return false; -} - static int pciehp_resume_noirq(struct pcie_device *dev) { struct controller *ctrl = get_service_data(dev); diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c index d603a7aa7483..bcc938d4420f 100644 --- a/drivers/pci/hotplug/pciehp_ctrl.c +++ b/drivers/pci/hotplug/pciehp_ctrl.c @@ -131,7 +131,7 @@ static void remove_board(struct controller *ctrl, bool safe_removal) INDICATOR_NOOP); /* Don't carry LBMS indications across */ - pcie_reset_lbms_count(ctrl->pcie->port); + pcie_reset_lbms(ctrl->pcie->port); } static int pciehp_enable_slot(struct controller *ctrl); diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 8a09fb6083e2..bcc51b26d03d 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -563,20 +563,50 @@ void pciehp_power_off_slot(struct controller *ctrl) PCI_EXP_SLTCTL_PWR_OFF); } -static void pciehp_ignore_dpc_link_change(struct controller *ctrl, - struct pci_dev *pdev, int irq) +bool pciehp_device_replaced(struct controller *ctrl) +{ + struct pci_dev *pdev __free(pci_dev_put) = NULL; + u32 reg; + + if (pci_dev_is_disconnected(ctrl->pcie->port)) + return false; + + pdev = pci_get_slot(ctrl->pcie->port->subordinate, PCI_DEVFN(0, 0)); + if (!pdev) + return true; + + if (pci_read_config_dword(pdev, PCI_VENDOR_ID, ®) || + reg != (pdev->vendor | (pdev->device << 16)) || + pci_read_config_dword(pdev, PCI_CLASS_REVISION, ®) || + reg != (pdev->revision | (pdev->class << 8))) + return true; + + if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL && + (pci_read_config_dword(pdev, PCI_SUBSYSTEM_VENDOR_ID, ®) || + reg != (pdev->subsystem_vendor | (pdev->subsystem_device << 16)))) + return true; + + if (pci_get_dsn(pdev) != ctrl->dsn) + return true; + + return false; +} + +static void pciehp_ignore_link_change(struct controller *ctrl, + struct pci_dev *pdev, int irq, + u16 ignored_events) { /* * Ignore link changes which occurred while waiting for DPC recovery. * Could be several if DPC triggered multiple times consecutively. + * Also ignore link changes caused by Secondary Bus Reset, etc. */ synchronize_hardirq(irq); - atomic_and(~PCI_EXP_SLTSTA_DLLSC, &ctrl->pending_events); + atomic_and(~ignored_events, &ctrl->pending_events); if (pciehp_poll_mode) pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, - PCI_EXP_SLTSTA_DLLSC); - ctrl_info(ctrl, "Slot(%s): Link Down/Up ignored (recovered by DPC)\n", - slot_name(ctrl)); + ignored_events); + ctrl_info(ctrl, "Slot(%s): Link Down/Up ignored\n", slot_name(ctrl)); /* * If the link is unexpectedly down after successful recovery, @@ -584,8 +614,8 @@ static void pciehp_ignore_dpc_link_change(struct controller *ctrl, * Synthesize it to ensure that it is acted on. */ down_read_nested(&ctrl->reset_lock, ctrl->depth); - if (!pciehp_check_link_active(ctrl)) - pciehp_request(ctrl, PCI_EXP_SLTSTA_DLLSC); + if (!pciehp_check_link_active(ctrl) || pciehp_device_replaced(ctrl)) + pciehp_request(ctrl, ignored_events); up_read(&ctrl->reset_lock); } @@ -732,12 +762,19 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id) /* * Ignore Link Down/Up events caused by Downstream Port Containment - * if recovery from the error succeeded. + * if recovery succeeded, or caused by Secondary Bus Reset, + * suspend to D3cold, firmware update, FPGA reconfiguration, etc. */ - if ((events & PCI_EXP_SLTSTA_DLLSC) && pci_dpc_recovered(pdev) && + if ((events & (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC)) && + (pci_dpc_recovered(pdev) || pci_hp_spurious_link_change(pdev)) && ctrl->state == ON_STATE) { - events &= ~PCI_EXP_SLTSTA_DLLSC; - pciehp_ignore_dpc_link_change(ctrl, pdev, irq); + u16 ignored_events = PCI_EXP_SLTSTA_DLLSC; + + if (!ctrl->inband_presence_disabled) + ignored_events |= PCI_EXP_SLTSTA_PDC; + + events &= ~ignored_events; + pciehp_ignore_link_change(ctrl, pdev, irq, ignored_events); } /* @@ -902,7 +939,6 @@ int pciehp_reset_slot(struct hotplug_slot *hotplug_slot, bool probe) { struct controller *ctrl = to_ctrl(hotplug_slot); struct pci_dev *pdev = ctrl_dev(ctrl); - u16 stat_mask = 0, ctrl_mask = 0; int rc; if (probe) @@ -910,23 +946,11 @@ int pciehp_reset_slot(struct hotplug_slot *hotplug_slot, bool probe) down_write_nested(&ctrl->reset_lock, ctrl->depth); - if (!ATTN_BUTTN(ctrl)) { - ctrl_mask |= PCI_EXP_SLTCTL_PDCE; - stat_mask |= PCI_EXP_SLTSTA_PDC; - } - ctrl_mask |= PCI_EXP_SLTCTL_DLLSCE; - stat_mask |= PCI_EXP_SLTSTA_DLLSC; - - pcie_write_cmd(ctrl, 0, ctrl_mask); - ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, - pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, 0); + pci_hp_ignore_link_change(pdev); rc = pci_bridge_secondary_bus_reset(ctrl->pcie->port); - pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, stat_mask); - pcie_write_cmd_nowait(ctrl, ctrl_mask, ctrl_mask); - ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, - pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, ctrl_mask); + pci_hp_unignore_link_change(pdev); up_write(&ctrl->reset_lock); return rc; @@ -971,7 +995,7 @@ static inline int pcie_hotplug_depth(struct pci_dev *dev) while (bus->parent) { bus = bus->parent; - if (bus->self && bus->self->is_hotplug_bridge) + if (bus->self && bus->self->is_pciehp) depth++; } diff --git a/drivers/pci/hotplug/pnv_php.c b/drivers/pci/hotplug/pnv_php.c index 573a41869c15..c5345bff9a55 100644 --- a/drivers/pci/hotplug/pnv_php.c +++ b/drivers/pci/hotplug/pnv_php.c @@ -3,12 +3,15 @@ * PCI Hotplug Driver for PowerPC PowerNV platform. * * Copyright Gavin Shan, IBM Corporation 2016. + * Copyright (C) 2025 Raptor Engineering, LLC + * Copyright (C) 2025 Raptor Computing Systems, LLC */ #include <linux/bitfield.h> #include <linux/libfdt.h> #include <linux/module.h> #include <linux/pci.h> +#include <linux/delay.h> #include <linux/pci_hotplug.h> #include <linux/of_fdt.h> @@ -36,8 +39,10 @@ static void pnv_php_register(struct device_node *dn); static void pnv_php_unregister_one(struct device_node *dn); static void pnv_php_unregister(struct device_node *dn); +static void pnv_php_enable_irq(struct pnv_php_slot *php_slot); + static void pnv_php_disable_irq(struct pnv_php_slot *php_slot, - bool disable_device) + bool disable_device, bool disable_msi) { struct pci_dev *pdev = php_slot->pdev; u16 ctrl; @@ -53,19 +58,15 @@ static void pnv_php_disable_irq(struct pnv_php_slot *php_slot, php_slot->irq = 0; } - if (php_slot->wq) { - destroy_workqueue(php_slot->wq); - php_slot->wq = NULL; - } - - if (disable_device) { + if (disable_device || disable_msi) { if (pdev->msix_enabled) pci_disable_msix(pdev); else if (pdev->msi_enabled) pci_disable_msi(pdev); + } + if (disable_device) pci_disable_device(pdev); - } } static void pnv_php_free_slot(struct kref *kref) @@ -74,7 +75,8 @@ static void pnv_php_free_slot(struct kref *kref) struct pnv_php_slot, kref); WARN_ON(!list_empty(&php_slot->children)); - pnv_php_disable_irq(php_slot, false); + pnv_php_disable_irq(php_slot, false, false); + destroy_workqueue(php_slot->wq); kfree(php_slot->name); kfree(php_slot); } @@ -391,6 +393,20 @@ static int pnv_php_get_power_state(struct hotplug_slot *slot, u8 *state) return 0; } +static int pcie_check_link_active(struct pci_dev *pdev) +{ + u16 lnk_status; + int ret; + + ret = pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status); + if (ret == PCIBIOS_DEVICE_NOT_FOUND || PCI_POSSIBLE_ERROR(lnk_status)) + return -ENODEV; + + ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA); + + return ret; +} + static int pnv_php_get_adapter_state(struct hotplug_slot *slot, u8 *state) { struct pnv_php_slot *php_slot = to_pnv_php_slot(slot); @@ -403,6 +419,19 @@ static int pnv_php_get_adapter_state(struct hotplug_slot *slot, u8 *state) */ ret = pnv_pci_get_presence_state(php_slot->id, &presence); if (ret >= 0) { + if (pci_pcie_type(php_slot->pdev) == PCI_EXP_TYPE_DOWNSTREAM && + presence == OPAL_PCI_SLOT_EMPTY) { + /* + * Similar to pciehp_hpc, check whether the Link Active + * bit is set to account for broken downstream bridges + * that don't properly assert Presence Detect State, as + * was observed on the Microsemi Switchtec PM8533 PFX + * [11f8:8533]. + */ + if (pcie_check_link_active(php_slot->pdev) > 0) + presence = OPAL_PCI_SLOT_PRESENT; + } + *state = presence; ret = 0; } else { @@ -412,10 +441,23 @@ static int pnv_php_get_adapter_state(struct hotplug_slot *slot, u8 *state) return ret; } +static int pnv_php_get_raw_indicator_status(struct hotplug_slot *slot, u8 *state) +{ + struct pnv_php_slot *php_slot = to_pnv_php_slot(slot); + struct pci_dev *bridge = php_slot->pdev; + u16 status; + + pcie_capability_read_word(bridge, PCI_EXP_SLTCTL, &status); + *state = (status & (PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_PIC)) >> 6; + return 0; +} + + static int pnv_php_get_attention_state(struct hotplug_slot *slot, u8 *state) { struct pnv_php_slot *php_slot = to_pnv_php_slot(slot); + pnv_php_get_raw_indicator_status(slot, &php_slot->attention_state); *state = php_slot->attention_state; return 0; } @@ -433,7 +475,7 @@ static int pnv_php_set_attention_state(struct hotplug_slot *slot, u8 state) mask = PCI_EXP_SLTCTL_AIC; if (state) - new = PCI_EXP_SLTCTL_ATTN_IND_ON; + new = FIELD_PREP(PCI_EXP_SLTCTL_AIC, state); else new = PCI_EXP_SLTCTL_ATTN_IND_OFF; @@ -442,6 +484,61 @@ static int pnv_php_set_attention_state(struct hotplug_slot *slot, u8 state) return 0; } +static int pnv_php_activate_slot(struct pnv_php_slot *php_slot, + struct hotplug_slot *slot) +{ + int ret, i; + + /* + * Issue initial slot activation command to firmware + * + * Firmware will power slot on, attempt to train the link, and + * discover any downstream devices. If this process fails, firmware + * will return an error code and an invalid device tree. Failure + * can be caused for multiple reasons, including a faulty + * downstream device, poor connection to the downstream device, or + * a previously latched PHB fence. On failure, issue fundamental + * reset up to three times before aborting. + */ + ret = pnv_php_set_slot_power_state(slot, OPAL_PCI_SLOT_POWER_ON); + if (ret) { + SLOT_WARN( + php_slot, + "PCI slot activation failed with error code %d, possible frozen PHB", + ret); + SLOT_WARN( + php_slot, + "Attempting complete PHB reset before retrying slot activation\n"); + for (i = 0; i < 3; i++) { + /* + * Slot activation failed, PHB may be fenced from a + * prior device failure. + * + * Use the OPAL fundamental reset call to both try a + * device reset and clear any potentially active PHB + * fence / freeze. + */ + SLOT_WARN(php_slot, "Try %d...\n", i + 1); + pci_set_pcie_reset_state(php_slot->pdev, + pcie_warm_reset); + msleep(250); + pci_set_pcie_reset_state(php_slot->pdev, + pcie_deassert_reset); + + ret = pnv_php_set_slot_power_state( + slot, OPAL_PCI_SLOT_POWER_ON); + if (!ret) + break; + } + + if (i >= 3) + SLOT_WARN(php_slot, + "Failed to bring slot online, aborting!\n"); + } + + return ret; +} + static int pnv_php_enable(struct pnv_php_slot *php_slot, bool rescan) { struct hotplug_slot *slot = &php_slot->slot; @@ -504,7 +601,7 @@ static int pnv_php_enable(struct pnv_php_slot *php_slot, bool rescan) goto scan; /* Power is off, turn it on and then scan the slot */ - ret = pnv_php_set_slot_power_state(slot, OPAL_PCI_SLOT_POWER_ON); + ret = pnv_php_activate_slot(php_slot, slot); if (ret) return ret; @@ -561,8 +658,58 @@ static int pnv_php_reset_slot(struct hotplug_slot *slot, bool probe) static int pnv_php_enable_slot(struct hotplug_slot *slot) { struct pnv_php_slot *php_slot = to_pnv_php_slot(slot); + u32 prop32; + int ret; + + ret = pnv_php_enable(php_slot, true); + if (ret) + return ret; + + /* (Re-)enable interrupt if the slot supports surprise hotplug */ + ret = of_property_read_u32(php_slot->dn, "ibm,slot-surprise-pluggable", + &prop32); + if (!ret && prop32) + pnv_php_enable_irq(php_slot); + + return 0; +} + +/* + * Disable any hotplug interrupts for all slots on the provided bus, as well as + * all downstream slots in preparation for a hot unplug. + */ +static int pnv_php_disable_all_irqs(struct pci_bus *bus) +{ + struct pci_bus *child_bus; + struct pci_slot *slot; + + /* First go down child buses */ + list_for_each_entry(child_bus, &bus->children, node) + pnv_php_disable_all_irqs(child_bus); + + /* Disable IRQs for all pnv_php slots on this bus */ + list_for_each_entry(slot, &bus->slots, list) { + struct pnv_php_slot *php_slot = to_pnv_php_slot(slot->hotplug); - return pnv_php_enable(php_slot, true); + pnv_php_disable_irq(php_slot, false, true); + } + + return 0; +} + +/* + * Disable any hotplug interrupts for all downstream slots on the provided + * bus in preparation for a hot unplug. + */ +static int pnv_php_disable_all_downstream_irqs(struct pci_bus *bus) +{ + struct pci_bus *child_bus; + + /* Go down child buses, recursively deactivating their IRQs */ + list_for_each_entry(child_bus, &bus->children, node) + pnv_php_disable_all_irqs(child_bus); + + return 0; } static int pnv_php_disable_slot(struct hotplug_slot *slot) @@ -579,6 +726,13 @@ static int pnv_php_disable_slot(struct hotplug_slot *slot) php_slot->state != PNV_PHP_STATE_REGISTERED) return 0; + /* + * Free all IRQ resources from all child slots before remove. + * Note that we do not disable the root slot IRQ here as that + * would also deactivate the slot hot (re)plug interrupt! + */ + pnv_php_disable_all_downstream_irqs(php_slot->bus); + /* Remove all devices behind the slot */ pci_lock_rescan_remove(); pci_hp_remove_devices(php_slot->bus); @@ -647,6 +801,15 @@ static struct pnv_php_slot *pnv_php_alloc_slot(struct device_node *dn) return NULL; } + /* Allocate workqueue for this slot's interrupt handling */ + php_slot->wq = alloc_workqueue("pciehp-%s", 0, 0, php_slot->name); + if (!php_slot->wq) { + SLOT_WARN(php_slot, "Cannot alloc workqueue\n"); + kfree(php_slot->name); + kfree(php_slot); + return NULL; + } + if (dn->child && PCI_DN(dn->child)) php_slot->slot_no = PCI_SLOT(PCI_DN(dn->child)->devfn); else @@ -745,16 +908,63 @@ static int pnv_php_enable_msix(struct pnv_php_slot *php_slot) return entry.vector; } +static void +pnv_php_detect_clear_suprise_removal_freeze(struct pnv_php_slot *php_slot) +{ + struct pci_dev *pdev = php_slot->pdev; + struct eeh_dev *edev; + struct eeh_pe *pe; + int i, rc; + + /* + * When a device is surprise removed from a downstream bridge slot, + * the upstream bridge port can still end up frozen due to related EEH + * events, which will in turn block the MSI interrupts for slot hotplug + * detection. + * + * Detect and thaw any frozen upstream PE after slot deactivation. + */ + edev = pci_dev_to_eeh_dev(pdev); + pe = edev ? edev->pe : NULL; + rc = eeh_pe_get_state(pe); + if ((rc == -ENODEV) || (rc == -ENOENT)) { + SLOT_WARN( + php_slot, + "Upstream bridge PE state unknown, hotplug detect may fail\n"); + } else { + if (pe->state & EEH_PE_ISOLATED) { + SLOT_WARN( + php_slot, + "Upstream bridge PE %02x frozen, thawing...\n", + pe->addr); + for (i = 0; i < 3; i++) + if (!eeh_unfreeze_pe(pe)) + break; + if (i >= 3) + SLOT_WARN( + php_slot, + "Unable to thaw PE %02x, hotplug detect will fail!\n", + pe->addr); + else + SLOT_WARN(php_slot, + "PE %02x thawed successfully\n", + pe->addr); + } + } +} + static void pnv_php_event_handler(struct work_struct *work) { struct pnv_php_event *event = container_of(work, struct pnv_php_event, work); struct pnv_php_slot *php_slot = event->php_slot; - if (event->added) + if (event->added) { pnv_php_enable_slot(&php_slot->slot); - else + } else { pnv_php_disable_slot(&php_slot->slot); + pnv_php_detect_clear_suprise_removal_freeze(php_slot); + } kfree(event); } @@ -843,14 +1053,6 @@ static void pnv_php_init_irq(struct pnv_php_slot *php_slot, int irq) u16 sts, ctrl; int ret; - /* Allocate workqueue */ - php_slot->wq = alloc_workqueue("pciehp-%s", 0, 0, php_slot->name); - if (!php_slot->wq) { - SLOT_WARN(php_slot, "Cannot alloc workqueue\n"); - pnv_php_disable_irq(php_slot, true); - return; - } - /* Check PDC (Presence Detection Change) is broken or not */ ret = of_property_read_u32(php_slot->dn, "ibm,slot-broken-pdc", &broken_pdc); @@ -869,7 +1071,7 @@ static void pnv_php_init_irq(struct pnv_php_slot *php_slot, int irq) ret = request_irq(irq, pnv_php_interrupt, IRQF_SHARED, php_slot->name, php_slot); if (ret) { - pnv_php_disable_irq(php_slot, true); + pnv_php_disable_irq(php_slot, true, true); SLOT_WARN(php_slot, "Error %d enabling IRQ %d\n", ret, irq); return; } diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c index 055518ee354d..d9996516f49e 100644 --- a/drivers/pci/hotplug/s390_pci_hpc.c +++ b/drivers/pci/hotplug/s390_pci_hpc.c @@ -59,16 +59,15 @@ static int disable_slot(struct hotplug_slot *hotplug_slot) pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn); if (pdev && pci_num_vf(pdev)) { - pci_dev_put(pdev); rc = -EBUSY; goto out; } rc = zpci_deconfigure_device(zdev); out: - mutex_unlock(&zdev->state_lock); if (pdev) pci_dev_put(pdev); + mutex_unlock(&zdev->state_lock); return rc; } diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c index bfbec7c1a6b1..183bf43510a1 100644 --- a/drivers/pci/hotplug/shpchp_hpc.c +++ b/drivers/pci/hotplug/shpchp_hpc.c @@ -211,7 +211,7 @@ static inline int shpc_indirect_read(struct controller *ctrl, int index, */ static void int_poll_timeout(struct timer_list *t) { - struct controller *ctrl = from_timer(ctrl, t, poll_timer); + struct controller *ctrl = timer_container_of(ctrl, t, poll_timer); /* Poll for interrupt events. regs == NULL => polling */ shpc_isr(0, ctrl); @@ -564,7 +564,7 @@ void shpchp_release_ctlr(struct controller *ctrl) shpc_writel(ctrl, SERR_INTR_ENABLE, serr_int); if (shpchp_poll_mode) - del_timer(&ctrl->poll_timer); + timer_delete(&ctrl->poll_timer); else { free_irq(ctrl->pci_dev->irq, ctrl); pci_disable_msi(ctrl->pci_dev); diff --git a/drivers/pci/iomap.c b/drivers/pci/iomap.c index fe706ed946df..ea86c282a386 100644 --- a/drivers/pci/iomap.c +++ b/drivers/pci/iomap.c @@ -25,10 +25,6 @@ * * @maxlen specifies the maximum length to map. If you want to get access to * the complete BAR from offset to the end, pass %0 here. - * - * NOTE: - * This function is never managed, even if you initialized with - * pcim_enable_device(). * */ void __iomem *pci_iomap_range(struct pci_dev *dev, int bar, @@ -76,10 +72,6 @@ EXPORT_SYMBOL(pci_iomap_range); * * @maxlen specifies the maximum length to map. If you want to get access to * the complete BAR from offset to the end, pass %0 here. - * - * NOTE: - * This function is never managed, even if you initialized with - * pcim_enable_device(). * */ void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar, @@ -127,10 +119,6 @@ EXPORT_SYMBOL_GPL(pci_iomap_wc_range); * * @maxlen specifies the maximum length to map. If you want to get access to * the complete BAR without checking for its length first, pass %0 here. - * - * NOTE: - * This function is never managed, even if you initialized with - * pcim_enable_device(). If you need automatic cleanup, use pcim_iomap(). * */ void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) { @@ -152,10 +140,6 @@ EXPORT_SYMBOL(pci_iomap); * * @maxlen specifies the maximum length to map. If you want to get access to * the complete BAR without checking for its length first, pass %0 here. - * - * NOTE: - * This function is never managed, even if you initialized with - * pcim_enable_device(). * */ void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen) { diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index 10693b5d7eb6..ac4375954c94 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c @@ -7,11 +7,16 @@ * Copyright (C) 2009 Intel Corporation, Yu Zhao <yu.zhao@intel.com> */ +#include <linux/bitfield.h> +#include <linux/bits.h> +#include <linux/log2.h> #include <linux/pci.h> +#include <linux/sizes.h> #include <linux/slab.h> #include <linux/export.h> #include <linux/string.h> #include <linux/delay.h> +#include <asm/div64.h> #include "pci.h" #define VIRTFN_ID_LEN 17 /* "virtfn%u\0" for 2^32 - 1 */ @@ -150,7 +155,28 @@ resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno) if (!dev->is_physfn) return 0; - return dev->sriov->barsz[resno - PCI_IOV_RESOURCES]; + return dev->sriov->barsz[pci_resource_num_to_vf_bar(resno)]; +} + +void pci_iov_resource_set_size(struct pci_dev *dev, int resno, + resource_size_t size) +{ + if (!pci_resource_is_iov(resno)) { + pci_warn(dev, "%s is not an IOV resource\n", + pci_resource_name(dev, resno)); + return; + } + + dev->sriov->barsz[pci_resource_num_to_vf_bar(resno)] = size; +} + +bool pci_iov_is_memory_decoding_enabled(struct pci_dev *dev) +{ + u16 cmd; + + pci_read_config_word(dev, dev->sriov->pos + PCI_SRIOV_CTRL, &cmd); + + return cmd & PCI_SRIOV_CTRL_MSE; } static void pci_read_vf_config_common(struct pci_dev *virtfn) @@ -341,12 +367,14 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id) virtfn->multifunction = 0; for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { - res = &dev->resource[i + PCI_IOV_RESOURCES]; + int idx = pci_resource_num_from_vf_bar(i); + + res = &dev->resource[idx]; if (!res->parent) continue; virtfn->resource[i].name = pci_name(virtfn); virtfn->resource[i].flags = res->flags; - size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES); + size = pci_iov_resource_size(dev, idx); resource_set_range(&virtfn->resource[i], res->start + size * id, size); rc = request_resource(res, &virtfn->resource[i]); @@ -643,8 +671,13 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn) nres = 0; for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { - bars |= (1 << (i + PCI_IOV_RESOURCES)); - res = &dev->resource[i + PCI_IOV_RESOURCES]; + int idx = pci_resource_num_from_vf_bar(i); + resource_size_t vf_bar_sz = pci_iov_resource_size(dev, idx); + + bars |= (1 << idx); + res = &dev->resource[idx]; + if (vf_bar_sz * nr_virtfn > resource_size(res)) + continue; if (res->parent) nres++; } @@ -810,8 +843,10 @@ found: nres = 0; for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { - res = &dev->resource[i + PCI_IOV_RESOURCES]; - res_name = pci_resource_name(dev, i + PCI_IOV_RESOURCES); + int idx = pci_resource_num_from_vf_bar(i); + + res = &dev->resource[idx]; + res_name = pci_resource_name(dev, idx); /* * If it is already FIXED, don't change it, something @@ -850,6 +885,7 @@ found: pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) iov->link = PCI_DEVFN(PCI_SLOT(dev->devfn), iov->link); + iov->vf_rebar_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_VF_REBAR); if (pdev) iov->dev = pci_dev_get(pdev); @@ -869,7 +905,7 @@ fail_max_buses: dev->is_physfn = 0; failed: for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { - res = &dev->resource[i + PCI_IOV_RESOURCES]; + res = &dev->resource[pci_resource_num_from_vf_bar(i)]; res->flags = 0; } @@ -888,6 +924,30 @@ static void sriov_release(struct pci_dev *dev) dev->sriov = NULL; } +static void sriov_restore_vf_rebar_state(struct pci_dev *dev) +{ + unsigned int pos, nbars, i; + u32 ctrl; + + pos = pci_iov_vf_rebar_cap(dev); + if (!pos) + return; + + pci_read_config_dword(dev, pos + PCI_VF_REBAR_CTRL, &ctrl); + nbars = FIELD_GET(PCI_VF_REBAR_CTRL_NBAR_MASK, ctrl); + + for (i = 0; i < nbars; i++, pos += 8) { + int bar_idx, size; + + pci_read_config_dword(dev, pos + PCI_VF_REBAR_CTRL, &ctrl); + bar_idx = FIELD_GET(PCI_VF_REBAR_CTRL_BAR_IDX, ctrl); + size = pci_rebar_bytes_to_size(dev->sriov->barsz[bar_idx]); + ctrl &= ~PCI_VF_REBAR_CTRL_BAR_SIZE; + ctrl |= FIELD_PREP(PCI_VF_REBAR_CTRL_BAR_SIZE, size); + pci_write_config_dword(dev, pos + PCI_VF_REBAR_CTRL, ctrl); + } +} + static void sriov_restore_state(struct pci_dev *dev) { int i; @@ -907,7 +967,7 @@ static void sriov_restore_state(struct pci_dev *dev) pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, ctrl); for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) - pci_update_resource(dev, i + PCI_IOV_RESOURCES); + pci_update_resource(dev, pci_resource_num_from_vf_bar(i)); pci_write_config_dword(dev, iov->pos + PCI_SRIOV_SYS_PGSIZE, iov->pgsz); pci_iov_set_numvfs(dev, iov->num_VFs); @@ -973,7 +1033,7 @@ void pci_iov_update_resource(struct pci_dev *dev, int resno) { struct pci_sriov *iov = dev->is_physfn ? dev->sriov : NULL; struct resource *res = pci_resource_n(dev, resno); - int vf_bar = resno - PCI_IOV_RESOURCES; + int vf_bar = pci_resource_num_to_vf_bar(resno); struct pci_bus_region region; u16 cmd; u32 new; @@ -1047,8 +1107,10 @@ resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno) */ void pci_restore_iov_state(struct pci_dev *dev) { - if (dev->is_physfn) + if (dev->is_physfn) { + sriov_restore_vf_rebar_state(dev); sriov_restore_state(dev); + } } /** @@ -1255,3 +1317,72 @@ int pci_sriov_configure_simple(struct pci_dev *dev, int nr_virtfn) return nr_virtfn; } EXPORT_SYMBOL_GPL(pci_sriov_configure_simple); + +/** + * pci_iov_vf_bar_set_size - set a new size for a VF BAR + * @dev: the PCI device + * @resno: the resource number + * @size: new size as defined in the spec (0=1MB, 31=128TB) + * + * Set the new size of a VF BAR that supports VF resizable BAR capability. + * Unlike pci_resize_resource(), this does not cause the resource that + * reserves the MMIO space (originally up to total_VFs) to be resized, which + * means that following calls to pci_enable_sriov() can fail if the resources + * no longer fit. + * + * Return: 0 on success, or negative on failure. + */ +int pci_iov_vf_bar_set_size(struct pci_dev *dev, int resno, int size) +{ + u32 sizes; + int ret; + + if (!pci_resource_is_iov(resno)) + return -EINVAL; + + if (pci_iov_is_memory_decoding_enabled(dev)) + return -EBUSY; + + sizes = pci_rebar_get_possible_sizes(dev, resno); + if (!sizes) + return -ENOTSUPP; + + if (!(sizes & BIT(size))) + return -EINVAL; + + ret = pci_rebar_set_size(dev, resno, size); + if (ret) + return ret; + + pci_iov_resource_set_size(dev, resno, pci_rebar_size_to_bytes(size)); + + return 0; +} +EXPORT_SYMBOL_GPL(pci_iov_vf_bar_set_size); + +/** + * pci_iov_vf_bar_get_sizes - get VF BAR sizes allowing to create up to num_vfs + * @dev: the PCI device + * @resno: the resource number + * @num_vfs: number of VFs + * + * Get the sizes of a VF resizable BAR that can accommodate @num_vfs within + * the currently assigned size of the resource @resno. + * + * Return: A bitmask of sizes in format defined in the spec (bit 0=1MB, + * bit 31=128TB). + */ +u32 pci_iov_vf_bar_get_sizes(struct pci_dev *dev, int resno, int num_vfs) +{ + u64 vf_len = pci_resource_len(dev, resno); + u32 sizes; + + if (!num_vfs) + return 0; + + do_div(vf_len, num_vfs); + sizes = (roundup_pow_of_two(vf_len + 1) - 1) >> ilog2(SZ_1M); + + return sizes & pci_rebar_get_possible_sizes(dev, resno); +} +EXPORT_SYMBOL_GPL(pci_iov_vf_bar_get_sizes); diff --git a/drivers/pci/msi/api.c b/drivers/pci/msi/api.c index 17ec6332cb1d..818d55fbad0d 100644 --- a/drivers/pci/msi/api.c +++ b/drivers/pci/msi/api.c @@ -53,10 +53,9 @@ void pci_disable_msi(struct pci_dev *dev) if (!pci_msi_enabled() || !dev || !dev->msi_enabled) return; - msi_lock_descs(&dev->dev); + guard(msi_descs_lock)(&dev->dev); pci_msi_shutdown(dev); pci_free_msi_irqs(dev); - msi_unlock_descs(&dev->dev); } EXPORT_SYMBOL(pci_disable_msi); @@ -196,10 +195,9 @@ void pci_disable_msix(struct pci_dev *dev) if (!pci_msi_enabled() || !dev || !dev->msix_enabled) return; - msi_lock_descs(&dev->dev); + guard(msi_descs_lock)(&dev->dev); pci_msix_shutdown(dev); pci_free_msi_irqs(dev); - msi_unlock_descs(&dev->dev); } EXPORT_SYMBOL(pci_disable_msix); @@ -401,7 +399,7 @@ EXPORT_SYMBOL_GPL(pci_restore_msi_state); * Return: true if MSI has not been globally disabled through ACPI FADT, * PCI bridge quirks, or the "pci=nomsi" kernel command-line option. */ -int pci_msi_enabled(void) +bool pci_msi_enabled(void) { return pci_msi_enable; } diff --git a/drivers/pci/msi/irqdomain.c b/drivers/pci/msi/irqdomain.c index d7ba8795d60f..0938ef7ebabf 100644 --- a/drivers/pci/msi/irqdomain.c +++ b/drivers/pci/msi/irqdomain.c @@ -222,13 +222,14 @@ static void pci_irq_unmask_msix(struct irq_data *data) pci_msix_unmask(irq_data_get_msi_desc(data)); } -static void pci_msix_prepare_desc(struct irq_domain *domain, msi_alloc_info_t *arg, - struct msi_desc *desc) +void pci_msix_prepare_desc(struct irq_domain *domain, msi_alloc_info_t *arg, + struct msi_desc *desc) { /* Don't fiddle with preallocated MSI descriptors */ if (!desc->pci.mask_base) msix_prepare_msi_desc(to_pci_dev(desc->dev), desc); } +EXPORT_SYMBOL_GPL(pci_msix_prepare_desc); static const struct msi_domain_template pci_msix_template = { .chip = { @@ -271,6 +272,7 @@ static bool pci_create_device_domain(struct pci_dev *pdev, const struct msi_doma /** * pci_setup_msi_device_domain - Setup a device MSI interrupt domain * @pdev: The PCI device to create the domain on + * @hwsize: The maximum number of MSI vectors * * Return: * True when: @@ -287,7 +289,7 @@ static bool pci_create_device_domain(struct pci_dev *pdev, const struct msi_doma * - The device is removed * - MSI is disabled and a MSI-X domain is created */ -bool pci_setup_msi_device_domain(struct pci_dev *pdev) +bool pci_setup_msi_device_domain(struct pci_dev *pdev, unsigned int hwsize) { if (WARN_ON_ONCE(pdev->msix_enabled)) return false; @@ -297,7 +299,7 @@ bool pci_setup_msi_device_domain(struct pci_dev *pdev) if (pci_match_device_domain(pdev, DOMAIN_BUS_PCI_DEVICE_MSIX)) msi_remove_device_irq_domain(&pdev->dev, MSI_DEFAULT_DOMAIN); - return pci_create_device_domain(pdev, &pci_msi_template, 1); + return pci_create_device_domain(pdev, &pci_msi_template, hwsize); } /** @@ -427,6 +429,26 @@ u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev) } /** + * pci_msi_map_rid_ctlr_node - Get the MSI controller node and MSI requester id (RID) + * @pdev: The PCI device + * @node: Pointer to store the MSI controller device node + * + * Use the firmware data to find the MSI controller node for @pdev. + * If found map the RID and initialize @node with it. @node value must + * be set to NULL on entry. + * + * Returns: The RID. + */ +u32 pci_msi_map_rid_ctlr_node(struct pci_dev *pdev, struct device_node **node) +{ + u32 rid = pci_dev_id(pdev); + + pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid); + + return of_msi_xlate(&pdev->dev, node, rid); +} + +/** * pci_msi_get_device_domain - Get the MSI domain for a given PCI device * @pdev: The PCI device * diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c index 6569ba3577fe..34d664139f48 100644 --- a/drivers/pci/msi/msi.c +++ b/drivers/pci/msi/msi.c @@ -15,7 +15,7 @@ #include "../pci.h" #include "msi.h" -int pci_msi_enable = 1; +bool pci_msi_enable = true; /** * pci_msi_supported - check whether MSI may be enabled on a device @@ -113,7 +113,8 @@ static int pci_setup_msi_context(struct pci_dev *dev) void pci_msi_update_mask(struct msi_desc *desc, u32 clear, u32 set) { - raw_spinlock_t *lock = &to_pci_dev(desc->dev)->msi_lock; + struct pci_dev *dev = msi_desc_to_pci_dev(desc); + raw_spinlock_t *lock = &dev->msi_lock; unsigned long flags; if (!desc->pci.msi_attrib.can_mask) @@ -122,8 +123,7 @@ void pci_msi_update_mask(struct msi_desc *desc, u32 clear, u32 set) raw_spin_lock_irqsave(lock, flags); desc->pci.msi_mask &= ~clear; desc->pci.msi_mask |= set; - pci_write_config_dword(msi_desc_to_pci_dev(desc), desc->pci.mask_pos, - desc->pci.msi_mask); + pci_write_config_dword(dev, desc->pci.mask_pos, desc->pci.msi_mask); raw_spin_unlock_irqrestore(lock, flags); } @@ -335,43 +335,13 @@ static int msi_verify_entries(struct pci_dev *dev) return !entry ? 0 : -EIO; } -/** - * msi_capability_init - configure device's MSI capability structure - * @dev: pointer to the pci_dev data structure of MSI device function - * @nvec: number of interrupts to allocate - * @affd: description of automatic IRQ affinity assignments (may be %NULL) - * - * Setup the MSI capability structure of the device with the requested - * number of interrupts. A return value of zero indicates the successful - * setup of an entry with the new MSI IRQ. A negative return value indicates - * an error, and a positive return value indicates the number of interrupts - * which could have been allocated. - */ -static int msi_capability_init(struct pci_dev *dev, int nvec, - struct irq_affinity *affd) +static int __msi_capability_init(struct pci_dev *dev, int nvec, struct irq_affinity_desc *masks) { - struct irq_affinity_desc *masks = NULL; + int ret = msi_setup_msi_desc(dev, nvec, masks); struct msi_desc *entry, desc; - int ret; - - /* Reject multi-MSI early on irq domain enabled architectures */ - if (nvec > 1 && !pci_msi_domain_supports(dev, MSI_FLAG_MULTI_PCI_MSI, ALLOW_LEGACY)) - return 1; - /* - * Disable MSI during setup in the hardware, but mark it enabled - * so that setup code can evaluate it. - */ - pci_msi_set_enable(dev, 0); - dev->msi_enabled = 1; - - if (affd) - masks = irq_create_affinity_masks(nvec, affd); - - msi_lock_descs(&dev->dev); - ret = msi_setup_msi_desc(dev, nvec, masks); if (ret) - goto fail; + return ret; /* All MSIs are unmasked by default; mask them all */ entry = msi_first_desc(&dev->dev, MSI_DESC_ALL); @@ -393,24 +363,51 @@ static int msi_capability_init(struct pci_dev *dev, int nvec, goto err; /* Set MSI enabled bits */ + dev->msi_enabled = 1; pci_intx_for_msi(dev, 0); pci_msi_set_enable(dev, 1); pcibios_free_irq(dev); dev->irq = entry->irq; - goto unlock; - + return 0; err: pci_msi_unmask(&desc, msi_multi_mask(&desc)); pci_free_msi_irqs(dev); -fail: - dev->msi_enabled = 0; -unlock: - msi_unlock_descs(&dev->dev); - kfree(masks); return ret; } +/** + * msi_capability_init - configure device's MSI capability structure + * @dev: pointer to the pci_dev data structure of MSI device function + * @nvec: number of interrupts to allocate + * @affd: description of automatic IRQ affinity assignments (may be %NULL) + * + * Setup the MSI capability structure of the device with the requested + * number of interrupts. A return value of zero indicates the successful + * setup of an entry with the new MSI IRQ. A negative return value indicates + * an error, and a positive return value indicates the number of interrupts + * which could have been allocated. + */ +static int msi_capability_init(struct pci_dev *dev, int nvec, + struct irq_affinity *affd) +{ + /* Reject multi-MSI early on irq domain enabled architectures */ + if (nvec > 1 && !pci_msi_domain_supports(dev, MSI_FLAG_MULTI_PCI_MSI, ALLOW_LEGACY)) + return 1; + + /* + * Disable MSI during setup in the hardware, but mark it enabled + * so that setup code can evaluate it. + */ + pci_msi_set_enable(dev, 0); + + struct irq_affinity_desc *masks __free(kfree) = + affd ? irq_create_affinity_masks(nvec, affd) : NULL; + + guard(msi_descs_lock)(&dev->dev); + return __msi_capability_init(dev, nvec, masks); +} + int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, struct irq_affinity *affd) { @@ -442,16 +439,16 @@ int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, if (nvec < minvec) return -ENOSPC; - if (nvec > maxvec) - nvec = maxvec; - rc = pci_setup_msi_context(dev); if (rc) return rc; - if (!pci_setup_msi_device_domain(dev)) + if (!pci_setup_msi_device_domain(dev, nvec)) return -ENODEV; + if (nvec > maxvec) + nvec = maxvec; + for (;;) { if (affd) { nvec = irq_calc_affinity_vectors(minvec, nvec, affd); @@ -615,6 +612,9 @@ void msix_prepare_msi_desc(struct pci_dev *dev, struct msi_desc *desc) void __iomem *addr = pci_msix_desc_addr(desc); desc->pci.msi_attrib.can_mask = 1; + /* Workaround for SUN NIU insanity, which requires write before read */ + if (dev->dev_flags & PCI_DEV_FLAGS_MSIX_TOUCH_ENTRY_DATA_FIRST) + writel(0, addr + PCI_MSIX_ENTRY_DATA); desc->pci.msix_ctrl = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL); } } @@ -663,38 +663,39 @@ static void msix_mask_all(void __iomem *base, int tsize) writel(ctrl, base + PCI_MSIX_ENTRY_VECTOR_CTRL); } -static int msix_setup_interrupts(struct pci_dev *dev, struct msix_entry *entries, - int nvec, struct irq_affinity *affd) -{ - struct irq_affinity_desc *masks = NULL; - int ret; +DEFINE_FREE(free_msi_irqs, struct pci_dev *, if (_T) pci_free_msi_irqs(_T)); - if (affd) - masks = irq_create_affinity_masks(nvec, affd); +static int __msix_setup_interrupts(struct pci_dev *__dev, struct msix_entry *entries, + int nvec, struct irq_affinity_desc *masks) +{ + struct pci_dev *dev __free(free_msi_irqs) = __dev; - msi_lock_descs(&dev->dev); - ret = msix_setup_msi_descs(dev, entries, nvec, masks); + int ret = msix_setup_msi_descs(dev, entries, nvec, masks); if (ret) - goto out_free; + return ret; ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); if (ret) - goto out_free; + return ret; /* Check if all MSI entries honor device restrictions */ ret = msi_verify_entries(dev); if (ret) - goto out_free; + return ret; msix_update_entries(dev, entries); - goto out_unlock; + retain_and_null_ptr(dev); + return 0; +} -out_free: - pci_free_msi_irqs(dev); -out_unlock: - msi_unlock_descs(&dev->dev); - kfree(masks); - return ret; +static int msix_setup_interrupts(struct pci_dev *dev, struct msix_entry *entries, + int nvec, struct irq_affinity *affd) +{ + struct irq_affinity_desc *masks __free(kfree) = + affd ? irq_create_affinity_masks(nvec, affd) : NULL; + + guard(msi_descs_lock)(&dev->dev); + return __msix_setup_interrupts(dev, entries, nvec, masks); } /** @@ -870,13 +871,13 @@ void __pci_restore_msix_state(struct pci_dev *dev) write_msg = arch_restore_msi_irqs(dev); - msi_lock_descs(&dev->dev); - msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) { - if (write_msg) - __pci_write_msi_msg(entry, &entry->msg); - pci_msix_write_vector_ctrl(entry, entry->pci.msix_ctrl); + scoped_guard (msi_descs_lock, &dev->dev) { + msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) { + if (write_msg) + __pci_write_msi_msg(entry, &entry->msg); + pci_msix_write_vector_ctrl(entry, entry->pci.msix_ctrl); + } } - msi_unlock_descs(&dev->dev); pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0); } @@ -915,6 +916,55 @@ void pci_free_msi_irqs(struct pci_dev *dev) } } +#ifdef CONFIG_PCIE_TPH +/** + * pci_msix_write_tph_tag - Update the TPH tag for a given MSI-X vector + * @pdev: The PCIe device to update + * @index: The MSI-X index to update + * @tag: The tag to write + * + * Returns: 0 on success, error code on failure + */ +int pci_msix_write_tph_tag(struct pci_dev *pdev, unsigned int index, u16 tag) +{ + struct msi_desc *msi_desc; + struct irq_desc *irq_desc; + unsigned int virq; + + if (!pdev->msix_enabled) + return -ENXIO; + + virq = msi_get_virq(&pdev->dev, index); + if (!virq) + return -ENXIO; + + guard(msi_descs_lock)(&pdev->dev); + + /* + * This is a horrible hack, but short of implementing a PCI + * specific interrupt chip callback and a huge pile of + * infrastructure, this is the minor nuisance. It provides the + * protection against concurrent operations on this entry and keeps + * the control word cache in sync. + */ + irq_desc = irq_to_desc(virq); + if (!irq_desc) + return -ENXIO; + + guard(raw_spinlock_irq)(&irq_desc->lock); + msi_desc = irq_data_get_msi_desc(&irq_desc->irq_data); + if (!msi_desc || msi_desc->pci.msi_attrib.is_virtual) + return -ENXIO; + + msi_desc->pci.msix_ctrl &= ~PCI_MSIX_ENTRY_CTRL_ST; + msi_desc->pci.msix_ctrl |= FIELD_PREP(PCI_MSIX_ENTRY_CTRL_ST, tag); + pci_msix_write_vector_ctrl(msi_desc, msi_desc->pci.msix_ctrl); + /* Flush the write */ + readl(pci_msix_desc_addr(msi_desc)); + return 0; +} +#endif + /* Misc. infrastructure */ struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc) @@ -925,5 +975,5 @@ EXPORT_SYMBOL(msi_desc_to_pci_dev); void pci_no_msi(void) { - pci_msi_enable = 0; + pci_msi_enable = false; } diff --git a/drivers/pci/msi/msi.h b/drivers/pci/msi/msi.h index ee53cf079f4e..0b420b319f50 100644 --- a/drivers/pci/msi/msi.h +++ b/drivers/pci/msi/msi.h @@ -87,7 +87,7 @@ static inline __attribute_const__ u32 msi_multi_mask(struct msi_desc *desc) void msix_prepare_msi_desc(struct pci_dev *dev, struct msi_desc *desc); /* Subsystem variables */ -extern int pci_msi_enable; +extern bool pci_msi_enable; /* MSI internal functions invoked from the public APIs */ void pci_msi_shutdown(struct pci_dev *dev); @@ -107,7 +107,7 @@ enum support_mode { }; bool pci_msi_domain_supports(struct pci_dev *dev, unsigned int feature_mask, enum support_mode mode); -bool pci_setup_msi_device_domain(struct pci_dev *pdev); +bool pci_setup_msi_device_domain(struct pci_dev *pdev, unsigned int hwsize); bool pci_setup_msix_device_domain(struct pci_dev *pdev, unsigned int hwsize); /* Legacy (!IRQDOMAIN) fallbacks */ diff --git a/drivers/pci/of.c b/drivers/pci/of.c index ab7a8252bf41..3579265f1198 100644 --- a/drivers/pci/of.c +++ b/drivers/pci/of.c @@ -966,3 +966,47 @@ u32 of_pci_get_slot_power_limit(struct device_node *node, return slot_power_limit_mw; } EXPORT_SYMBOL_GPL(of_pci_get_slot_power_limit); + +/** + * of_pci_get_equalization_presets - Parses the "eq-presets-Ngts" property. + * + * @dev: Device containing the properties. + * @presets: Pointer to store the parsed data. + * @num_lanes: Maximum number of lanes supported. + * + * If the property is present, read and store the data in the @presets structure. + * Else, assign a default value of PCI_EQ_RESV. + * + * Return: 0 if the property is not available or successfully parsed else + * errno otherwise. + */ +int of_pci_get_equalization_presets(struct device *dev, + struct pci_eq_presets *presets, + int num_lanes) +{ + char name[20]; + int ret; + + presets->eq_presets_8gts[0] = PCI_EQ_RESV; + ret = of_property_read_u16_array(dev->of_node, "eq-presets-8gts", + presets->eq_presets_8gts, num_lanes); + if (ret && ret != -EINVAL) { + dev_err(dev, "Error reading eq-presets-8gts: %d\n", ret); + return ret; + } + + for (int i = 0; i < EQ_PRESET_TYPE_MAX - 1; i++) { + presets->eq_presets_Ngts[i][0] = PCI_EQ_RESV; + snprintf(name, sizeof(name), "eq-presets-%dgts", 8 << (i + 1)); + ret = of_property_read_u8_array(dev->of_node, name, + presets->eq_presets_Ngts[i], + num_lanes); + if (ret && ret != -EINVAL) { + dev_err(dev, "Error reading %s: %d\n", name, ret); + return ret; + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(of_pci_get_equalization_presets); diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c index 19214ec81fbb..da5657a02007 100644 --- a/drivers/pci/p2pdma.c +++ b/drivers/pci/p2pdma.c @@ -196,7 +196,7 @@ static const struct bin_attribute *const p2pmem_bin_attrs[] = { static const struct attribute_group p2pmem_group = { .attrs = p2pmem_attrs, - .bin_attrs_new = p2pmem_bin_attrs, + .bin_attrs = p2pmem_bin_attrs, .name = "p2pmem", }; @@ -1004,40 +1004,12 @@ static enum pci_p2pdma_map_type pci_p2pdma_map_type(struct dev_pagemap *pgmap, return type; } -/** - * pci_p2pdma_map_segment - map an sg segment determining the mapping type - * @state: State structure that should be declared outside of the for_each_sg() - * loop and initialized to zero. - * @dev: DMA device that's doing the mapping operation - * @sg: scatterlist segment to map - * - * This is a helper to be used by non-IOMMU dma_map_sg() implementations where - * the sg segment is the same for the page_link and the dma_address. - * - * Attempt to map a single segment in an SGL with the PCI bus address. - * The segment must point to a PCI P2PDMA page and thus must be - * wrapped in a is_pci_p2pdma_page(sg_page(sg)) check. - * - * Returns the type of mapping used and maps the page if the type is - * PCI_P2PDMA_MAP_BUS_ADDR. - */ -enum pci_p2pdma_map_type -pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev, - struct scatterlist *sg) +void __pci_p2pdma_update_state(struct pci_p2pdma_map_state *state, + struct device *dev, struct page *page) { - if (state->pgmap != page_pgmap(sg_page(sg))) { - state->pgmap = page_pgmap(sg_page(sg)); - state->map = pci_p2pdma_map_type(state->pgmap, dev); - state->bus_off = to_p2p_pgmap(state->pgmap)->bus_offset; - } - - if (state->map == PCI_P2PDMA_MAP_BUS_ADDR) { - sg->dma_address = sg_phys(sg) + state->bus_off; - sg_dma_len(sg) = sg->length; - sg_dma_mark_bus_address(sg); - } - - return state->map; + state->pgmap = page_pgmap(page); + state->map = pci_p2pdma_map_type(state->pgmap, dev); + state->bus_off = to_p2p_pgmap(state->pgmap)->bus_offset; } /** diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index af370628e583..ddb25960ea47 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -816,15 +816,10 @@ int pci_acpi_program_hp_params(struct pci_dev *dev) bool pciehp_is_native(struct pci_dev *bridge) { const struct pci_host_bridge *host; - u32 slot_cap; if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)) return false; - pcie_capability_read_dword(bridge, PCI_EXP_SLTCAP, &slot_cap); - if (!(slot_cap & PCI_EXP_SLTCAP_HPC)) - return false; - if (pcie_ports_native) return true; @@ -1002,7 +997,7 @@ bool acpi_pci_bridge_d3(struct pci_dev *dev) struct acpi_device *adev, *rpadev; const union acpi_object *obj; - if (acpi_pci_disabled || !dev->is_hotplug_bridge) + if (acpi_pci_disabled || !dev->is_pciehp) return false; adev = ACPI_COMPANION(&dev->dev); diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index c8bd71a739f7..63665240ae87 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -555,12 +555,6 @@ static void pci_pm_default_resume(struct pci_dev *pci_dev) pci_enable_wake(pci_dev, PCI_D0, false); } -static void pci_pm_power_up_and_verify_state(struct pci_dev *pci_dev) -{ - pci_power_up(pci_dev); - pci_update_current_state(pci_dev, PCI_D0); -} - static void pci_pm_default_resume_early(struct pci_dev *pci_dev) { pci_pm_power_up_and_verify_state(pci_dev); @@ -714,6 +708,8 @@ static int pci_pm_prepare(struct device *dev) struct pci_dev *pci_dev = to_pci_dev(dev); const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + dev_pm_set_strict_midlayer(dev, true); + if (pm && pm->prepare) { int error = pm->prepare(dev); if (error < 0) @@ -755,6 +751,8 @@ static void pci_pm_complete(struct device *dev) if (pci_dev->current_state < pre_sleep_state) pm_request_resume(dev); } + + dev_pm_set_strict_midlayer(dev, false); } #else /* !CONFIG_PM_SLEEP */ @@ -1507,7 +1505,7 @@ static int pci_bus_match(struct device *dev, const struct device_driver *drv) struct pci_driver *pci_drv; const struct pci_device_id *found_id; - if (!pci_dev->match_driver) + if (pci_dev_binding_disallowed(pci_dev)) return 0; pci_drv = (struct pci_driver *)to_pci_driver(drv); @@ -1634,7 +1632,7 @@ static int pci_bus_num_vf(struct device *dev) */ static int pci_dma_configure(struct device *dev) { - struct pci_driver *driver = to_pci_driver(dev->driver); + const struct device_driver *drv = READ_ONCE(dev->driver); struct device *bridge; int ret = 0; @@ -1651,8 +1649,8 @@ static int pci_dma_configure(struct device *dev) pci_put_host_bridge_device(bridge); - /* @driver may not be valid when we're called from the IOMMU layer */ - if (!ret && dev->driver && !driver->driver_managed_dma) { + /* @drv may not be valid when we're called from the IOMMU layer */ + if (!ret && drv && !to_pci_driver(drv)->driver_managed_dma) { ret = iommu_device_use_default_domain(dev); if (ret) arch_teardown_dma_ops(dev); diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index c6cda56ca52c..5eea14c1f7f5 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -857,7 +857,7 @@ static size_t pci_dev_config_attr_bin_size(struct kobject *kobj, } static const struct attribute_group pci_dev_config_attr_group = { - .bin_attrs_new = pci_dev_config_attrs, + .bin_attrs = pci_dev_config_attrs, .bin_size = pci_dev_config_attr_bin_size, }; @@ -1004,8 +1004,8 @@ void pci_create_legacy_files(struct pci_bus *b) b->legacy_io->attr.name = "legacy_io"; b->legacy_io->size = 0xffff; b->legacy_io->attr.mode = 0600; - b->legacy_io->read_new = pci_read_legacy_io; - b->legacy_io->write_new = pci_write_legacy_io; + b->legacy_io->read = pci_read_legacy_io; + b->legacy_io->write = pci_write_legacy_io; /* See pci_create_attr() for motivation */ b->legacy_io->llseek = pci_llseek_resource; b->legacy_io->mmap = pci_mmap_legacy_io; @@ -1211,8 +1211,8 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine) } else { sprintf(res_attr_name, "resource%d", num); if (pci_resource_flags(pdev, num) & IORESOURCE_IO) { - res_attr->read_new = pci_read_resource_io; - res_attr->write_new = pci_write_resource_io; + res_attr->read = pci_read_resource_io; + res_attr->write = pci_write_resource_io; if (arch_can_pci_mmap_io()) res_attr->mmap = pci_mmap_resource_uc; } else { @@ -1377,7 +1377,7 @@ static size_t pci_dev_rom_attr_bin_size(struct kobject *kobj, } static const struct attribute_group pci_dev_rom_attr_group = { - .bin_attrs_new = pci_dev_rom_attrs, + .bin_attrs = pci_dev_rom_attrs, .is_bin_visible = pci_dev_rom_attr_is_visible, .bin_size = pci_dev_rom_attr_bin_size, }; @@ -1475,6 +1475,9 @@ static ssize_t reset_method_store(struct device *dev, return count; } + pm_runtime_get_sync(dev); + struct device *pmdev __free(pm_runtime_put) = dev; + if (sysfs_streq(buf, "default")) { pci_init_reset_methods(pdev); return count; @@ -1805,6 +1808,7 @@ const struct attribute_group *pci_dev_attr_groups[] = { &pcie_dev_attr_group, #ifdef CONFIG_PCIEAER &aer_stats_attr_group, + &aer_attr_group, #endif #ifdef CONFIG_PCIEASPM &aspm_ctrl_attr_group, diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 4d7c9f64ea24..b0f4d98036cd 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -3030,8 +3030,12 @@ static const struct dmi_system_id bridge_d3_blacklist[] = { * pci_bridge_d3_possible - Is it possible to put the bridge into D3 * @bridge: Bridge to check * - * This function checks if it is possible to move the bridge to D3. * Currently we only allow D3 for some PCIe ports and for Thunderbolt. + * + * Return: Whether it is possible to move the bridge to D3. + * + * The return value is guaranteed to be constant across the entire lifetime + * of the bridge, including its hot-removal. */ bool pci_bridge_d3_possible(struct pci_dev *bridge) { @@ -3046,10 +3050,14 @@ bool pci_bridge_d3_possible(struct pci_dev *bridge) return false; /* - * Hotplug ports handled by firmware in System Management Mode - * may not be put into D3 by the OS (Thunderbolt on non-Macs). + * Hotplug ports handled by platform firmware may not be put + * into D3 by the OS, e.g. ACPI slots ... */ - if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge)) + if (bridge->is_hotplug_bridge && !bridge->is_pciehp) + return false; + + /* ... or PCIe hotplug ports not handled natively by the OS. */ + if (bridge->is_pciehp && !pciehp_is_native(bridge)) return false; if (pci_bridge_d3_force) @@ -3068,7 +3076,7 @@ bool pci_bridge_d3_possible(struct pci_dev *bridge) * by vendors for runtime D3 at least until 2018 because there * was no OS support. */ - if (bridge->is_hotplug_bridge) + if (bridge->is_pciehp) return false; if (dmi_check_system(bridge_d3_blacklist)) @@ -3192,6 +3200,12 @@ void pci_d3cold_disable(struct pci_dev *dev) } EXPORT_SYMBOL_GPL(pci_d3cold_disable); +void pci_pm_power_up_and_verify_state(struct pci_dev *pci_dev) +{ + pci_power_up(pci_dev); + pci_update_current_state(pci_dev, PCI_D0); +} + /** * pci_pm_init - Initialize PM functions of given PCI device * @dev: PCI device to handle. @@ -3199,12 +3213,8 @@ EXPORT_SYMBOL_GPL(pci_d3cold_disable); void pci_pm_init(struct pci_dev *dev) { int pm; - u16 status; u16 pmc; - pm_runtime_forbid(&dev->dev); - pm_runtime_set_active(&dev->dev); - pm_runtime_enable(&dev->dev); device_enable_async_suspend(&dev->dev); dev->wakeup_prepared = false; @@ -3214,14 +3224,14 @@ void pci_pm_init(struct pci_dev *dev) /* find PCI PM capability in list */ pm = pci_find_capability(dev, PCI_CAP_ID_PM); if (!pm) - return; + goto poweron; /* Check device's ability to generate PME# */ pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc); if ((pmc & PCI_PM_CAP_VER_MASK) > 3) { pci_err(dev, "unsupported PM cap regs version (%u)\n", pmc & PCI_PM_CAP_VER_MASK); - return; + goto poweron; } dev->pm_cap = pm; @@ -3263,9 +3273,11 @@ void pci_pm_init(struct pci_dev *dev) pci_pme_active(dev, false); } - pci_read_config_word(dev, PCI_STATUS, &status); - if (status & PCI_STATUS_IMM_READY) - dev->imm_ready = 1; +poweron: + pci_pm_power_up_and_verify_state(dev); + pm_runtime_forbid(&dev->dev); + pm_runtime_set_active(&dev->dev); + pm_runtime_enable(&dev->dev); } static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop) @@ -3745,7 +3757,13 @@ static int pci_rebar_find_pos(struct pci_dev *pdev, int bar) unsigned int pos, nbars, i; u32 ctrl; - pos = pdev->rebar_cap; + if (pci_resource_is_iov(bar)) { + pos = pci_iov_vf_rebar_cap(pdev); + bar = pci_resource_num_to_vf_bar(bar); + } else { + pos = pdev->rebar_cap; + } + if (!pos) return -ENOTSUPP; @@ -3937,16 +3955,6 @@ void pci_release_region(struct pci_dev *pdev, int bar) if (!pci_bar_index_is_valid(bar)) return; - /* - * This is done for backwards compatibility, because the old PCI devres - * API had a mode in which the function became managed if it had been - * enabled with pcim_enable_device() instead of pci_enable_device(). - */ - if (pci_is_managed(pdev)) { - pcim_release_region(pdev, bar); - return; - } - if (pci_resource_len(pdev, bar) == 0) return; if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) @@ -3984,13 +3992,6 @@ static int __pci_request_region(struct pci_dev *pdev, int bar, if (!pci_bar_index_is_valid(bar)) return -EINVAL; - if (pci_is_managed(pdev)) { - if (exclusive == IORESOURCE_EXCLUSIVE) - return pcim_request_region_exclusive(pdev, bar, name); - - return pcim_request_region(pdev, bar, name); - } - if (pci_resource_len(pdev, bar) == 0) return 0; @@ -4027,11 +4028,6 @@ err_out: * * Returns 0 on success, or %EBUSY on error. A warning * message is also printed on failure. - * - * NOTE: - * This is a "hybrid" function: It's normally unmanaged, but becomes managed - * when pcim_enable_device() has been called in advance. This hybrid feature is - * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead. */ int pci_request_region(struct pci_dev *pdev, int bar, const char *name) { @@ -4084,11 +4080,6 @@ err_out: * @name: Name of the driver requesting the resources * * Returns: 0 on success, negative error code on failure. - * - * NOTE: - * This is a "hybrid" function: It's normally unmanaged, but becomes managed - * when pcim_enable_device() has been called in advance. This hybrid feature is - * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead. */ int pci_request_selected_regions(struct pci_dev *pdev, int bars, const char *name) @@ -4104,11 +4095,6 @@ EXPORT_SYMBOL(pci_request_selected_regions); * @name: name of the driver requesting the resources * * Returns: 0 on success, negative error code on failure. - * - * NOTE: - * This is a "hybrid" function: It's normally unmanaged, but becomes managed - * when pcim_enable_device() has been called in advance. This hybrid feature is - * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead. */ int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars, const char *name) @@ -4144,11 +4130,6 @@ EXPORT_SYMBOL(pci_release_regions); * * Returns 0 on success, or %EBUSY on error. A warning * message is also printed on failure. - * - * NOTE: - * This is a "hybrid" function: It's normally unmanaged, but becomes managed - * when pcim_enable_device() has been called in advance. This hybrid feature is - * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead. */ int pci_request_regions(struct pci_dev *pdev, const char *name) { @@ -4173,11 +4154,6 @@ EXPORT_SYMBOL(pci_request_regions); * * Returns 0 on success, or %EBUSY on error. A warning message is also * printed on failure. - * - * NOTE: - * This is a "hybrid" function: It's normally unmanaged, but becomes managed - * when pcim_enable_device() has been called in advance. This hybrid feature is - * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead. */ int pci_request_regions_exclusive(struct pci_dev *pdev, const char *name) { @@ -4257,7 +4233,7 @@ unsigned long __weak pci_address_to_pio(phys_addr_t address) #ifndef pci_remap_iospace int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr) { -#if defined(PCI_IOBASE) && defined(CONFIG_MMU) +#if defined(PCI_IOBASE) unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start; if (!(res->flags & IORESOURCE_IO)) @@ -4290,7 +4266,7 @@ EXPORT_SYMBOL(pci_remap_iospace); */ void pci_unmap_iospace(struct resource *res) { -#if defined(PCI_IOBASE) && defined(CONFIG_MMU) +#if defined(PCI_IOBASE) unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start; vunmap_range(vaddr, vaddr + resource_size(res)); @@ -4718,6 +4694,11 @@ static int pcie_wait_for_link_status(struct pci_dev *pdev, * @pdev: Device whose link to retrain. * @use_lt: Use the LT bit if TRUE, or the DLLLA bit if FALSE, for status. * + * Trigger retraining of the PCIe Link and wait for the completion of the + * retraining. As link retraining is known to asserts LBMS and may change + * the Link Speed, LBMS is cleared after the retraining and the Link Speed + * of the subordinate bus is updated. + * * Retrain completion status is retrieved from the Link Status Register * according to @use_lt. It is not verified whether the use of the DLLLA * bit is valid. @@ -4757,7 +4738,19 @@ int pcie_retrain_link(struct pci_dev *pdev, bool use_lt) * to track link speed or width changes made by hardware itself * in attempt to correct unreliable link operation. */ - pcie_reset_lbms_count(pdev); + pcie_reset_lbms(pdev); + + /* + * Ensure the Link Speed updates after retraining in case the Link + * Speed was changed because of the retraining. While the bwctrl's + * IRQ handler normally picks up the new Link Speed, clearing LBMS + * races with the IRQ handler reading the Link Status register and + * can result in the handler returning early without updating the + * Link Speed. + */ + if (pdev->subordinate) + pcie_update_link_speed(pdev->subordinate); + return rc; } @@ -4954,7 +4947,7 @@ int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type) delay); if (!pcie_wait_for_link_delay(dev, true, delay)) { /* Did not train, no need to wait any further */ - pci_info(dev, "Data Link Layer Link Active not set in 1000 msec\n"); + pci_info(dev, "Data Link Layer Link Active not set in %d msec\n", delay); return -ENOTTY; } @@ -5429,8 +5422,6 @@ static bool pci_bus_resettable(struct pci_bus *bus) return false; list_for_each_entry(dev, &bus->devices, bus_list) { - if (!pci_reset_supported(dev)) - return false; if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET || (dev->subordinate && !pci_bus_resettable(dev->subordinate))) return false; @@ -5507,8 +5498,6 @@ static bool pci_slot_resettable(struct pci_slot *slot) list_for_each_entry(dev, &slot->bus->devices, bus_list) { if (!dev->slot || dev->slot != slot) continue; - if (!pci_reset_supported(dev)) - return false; if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET || (dev->subordinate && !pci_bus_resettable(dev->subordinate))) return false; @@ -5542,7 +5531,8 @@ static void pci_slot_unlock(struct pci_slot *slot) continue; if (dev->subordinate) pci_bus_unlock(dev->subordinate); - pci_dev_unlock(dev); + else + pci_dev_unlock(dev); } } @@ -6806,11 +6796,6 @@ int __weak pci_ext_cfg_avail(void) return 1; } -void __weak pci_fixup_cardbus(struct pci_bus *bus) -{ -} -EXPORT_SYMBOL(pci_fixup_cardbus); - static int __init pci_setup(char *str) { while (str) { diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index b81e99cd4b62..34f65d69662e 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -9,6 +9,8 @@ struct pcie_tlp_log; /* Number of possible devfns: 0.0 to 1f.7 inclusive */ #define MAX_NR_DEVFNS 256 +#define MAX_NR_LANES 16 + #define PCI_FIND_CAP_TTL 48 #define PCI_VSEC_ID_INTEL_TBT 0x1234 /* Thunderbolt */ @@ -34,13 +36,6 @@ struct pcie_tlp_log; #define PCIE_T_PERST_CLK_US 100 /* - * End of conventional reset (PERST# de-asserted) to first configuration - * request (device able to respond with a "Request Retry Status" completion), - * from PCIe r6.0, sec 6.6.1. - */ -#define PCIE_T_RRS_READY_MS 100 - -/* * PCIe r6.0, sec 5.3.3.2.1 <PME Synchronization> * Recommends 1ms to 10ms timeout to check L2 ready. */ @@ -59,7 +54,11 @@ struct pcie_tlp_log; * completes before sending a Configuration Request to the device * immediately below that Port." */ -#define PCIE_RESET_CONFIG_DEVICE_WAIT_MS 100 +#define PCIE_RESET_CONFIG_WAIT_MS 100 + +/* Parameters for the waiting for link up routine */ +#define PCIE_LINK_WAIT_MAX_RETRIES 10 +#define PCIE_LINK_WAIT_SLEEP_MS 90 /* Message Routing (r[2:0]); PCIe r6.0, sec 2.2.8 */ #define PCIE_MSG_TYPE_R_RC 0 @@ -148,6 +147,7 @@ void pci_dev_adjust_pme(struct pci_dev *dev); void pci_dev_complete_resume(struct pci_dev *pci_dev); void pci_config_pm_runtime_get(struct pci_dev *dev); void pci_config_pm_runtime_put(struct pci_dev *dev); +void pci_pm_power_up_and_verify_state(struct pci_dev *pci_dev); void pci_pm_init(struct pci_dev *dev); void pci_ea_init(struct pci_dev *dev); void pci_msi_init(struct pci_dev *dev); @@ -227,6 +227,7 @@ static inline int pci_proc_detach_bus(struct pci_bus *bus) { return 0; } /* Functions for PCI Hotplug drivers to use */ int pci_hp_add_bridge(struct pci_dev *dev); +bool pci_hp_spurious_link_change(struct pci_dev *pdev); #if defined(CONFIG_SYSFS) && defined(HAVE_PCI_LEGACY) void pci_create_legacy_files(struct pci_bus *bus); @@ -387,12 +388,14 @@ void pci_bus_put(struct pci_bus *bus); #define PCIE_LNKCAP_SLS2SPEED(lnkcap) \ ({ \ - ((lnkcap) == PCI_EXP_LNKCAP_SLS_64_0GB ? PCIE_SPEED_64_0GT : \ - (lnkcap) == PCI_EXP_LNKCAP_SLS_32_0GB ? PCIE_SPEED_32_0GT : \ - (lnkcap) == PCI_EXP_LNKCAP_SLS_16_0GB ? PCIE_SPEED_16_0GT : \ - (lnkcap) == PCI_EXP_LNKCAP_SLS_8_0GB ? PCIE_SPEED_8_0GT : \ - (lnkcap) == PCI_EXP_LNKCAP_SLS_5_0GB ? PCIE_SPEED_5_0GT : \ - (lnkcap) == PCI_EXP_LNKCAP_SLS_2_5GB ? PCIE_SPEED_2_5GT : \ + u32 lnkcap_sls = (lnkcap) & PCI_EXP_LNKCAP_SLS; \ + \ + (lnkcap_sls == PCI_EXP_LNKCAP_SLS_64_0GB ? PCIE_SPEED_64_0GT : \ + lnkcap_sls == PCI_EXP_LNKCAP_SLS_32_0GB ? PCIE_SPEED_32_0GT : \ + lnkcap_sls == PCI_EXP_LNKCAP_SLS_16_0GB ? PCIE_SPEED_16_0GT : \ + lnkcap_sls == PCI_EXP_LNKCAP_SLS_8_0GB ? PCIE_SPEED_8_0GT : \ + lnkcap_sls == PCI_EXP_LNKCAP_SLS_5_0GB ? PCIE_SPEED_5_0GT : \ + lnkcap_sls == PCI_EXP_LNKCAP_SLS_2_5GB ? PCIE_SPEED_2_5GT : \ PCI_SPEED_UNKNOWN); \ }) @@ -407,13 +410,17 @@ void pci_bus_put(struct pci_bus *bus); PCI_SPEED_UNKNOWN) #define PCIE_LNKCTL2_TLS2SPEED(lnkctl2) \ - ((lnkctl2) == PCI_EXP_LNKCTL2_TLS_64_0GT ? PCIE_SPEED_64_0GT : \ - (lnkctl2) == PCI_EXP_LNKCTL2_TLS_32_0GT ? PCIE_SPEED_32_0GT : \ - (lnkctl2) == PCI_EXP_LNKCTL2_TLS_16_0GT ? PCIE_SPEED_16_0GT : \ - (lnkctl2) == PCI_EXP_LNKCTL2_TLS_8_0GT ? PCIE_SPEED_8_0GT : \ - (lnkctl2) == PCI_EXP_LNKCTL2_TLS_5_0GT ? PCIE_SPEED_5_0GT : \ - (lnkctl2) == PCI_EXP_LNKCTL2_TLS_2_5GT ? PCIE_SPEED_2_5GT : \ - PCI_SPEED_UNKNOWN) +({ \ + u16 lnkctl2_tls = (lnkctl2) & PCI_EXP_LNKCTL2_TLS; \ + \ + (lnkctl2_tls == PCI_EXP_LNKCTL2_TLS_64_0GT ? PCIE_SPEED_64_0GT : \ + lnkctl2_tls == PCI_EXP_LNKCTL2_TLS_32_0GT ? PCIE_SPEED_32_0GT : \ + lnkctl2_tls == PCI_EXP_LNKCTL2_TLS_16_0GT ? PCIE_SPEED_16_0GT : \ + lnkctl2_tls == PCI_EXP_LNKCTL2_TLS_8_0GT ? PCIE_SPEED_8_0GT : \ + lnkctl2_tls == PCI_EXP_LNKCTL2_TLS_5_0GT ? PCIE_SPEED_5_0GT : \ + lnkctl2_tls == PCI_EXP_LNKCTL2_TLS_2_5GT ? PCIE_SPEED_2_5GT : \ + PCI_SPEED_UNKNOWN); \ +}) /* PCIe speed to Mb/s reduced by encoding overhead */ #define PCIE_SPEED2MBS_ENC(speed) \ @@ -482,6 +489,7 @@ struct pci_sriov { u16 subsystem_vendor; /* VF subsystem vendor */ u16 subsystem_device; /* VF subsystem device */ resource_size_t barsz[PCI_SRIOV_NUM_BARS]; /* VF BAR size */ + u16 vf_rebar_cap; /* VF Resizable BAR capability offset */ bool drivers_autoprobe; /* Auto probing of VFs by driver */ }; @@ -557,6 +565,10 @@ static inline int pci_dev_set_disconnected(struct pci_dev *dev, void *unused) #define PCI_DPC_RECOVERED 1 #define PCI_DPC_RECOVERING 2 #define PCI_DEV_REMOVED 3 +#define PCI_LINK_CHANGED 4 +#define PCI_LINK_CHANGING 5 +#define PCI_LINK_LBMS_SEEN 6 +#define PCI_DEV_ALLOW_BINDING 7 static inline void pci_dev_assign_added(struct pci_dev *dev) { @@ -580,6 +592,16 @@ static inline bool pci_dev_test_and_set_removed(struct pci_dev *dev) return test_and_set_bit(PCI_DEV_REMOVED, &dev->priv_flags); } +static inline void pci_dev_allow_binding(struct pci_dev *dev) +{ + set_bit(PCI_DEV_ALLOW_BINDING, &dev->priv_flags); +} + +static inline bool pci_dev_binding_disallowed(struct pci_dev *dev) +{ + return !test_bit(PCI_DEV_ALLOW_BINDING, &dev->priv_flags); +} + #ifdef CONFIG_PCIEAER #include <linux/aer.h> @@ -587,12 +609,15 @@ static inline bool pci_dev_test_and_set_removed(struct pci_dev *dev) struct aer_err_info { struct pci_dev *dev[AER_MAX_MULTI_ERR_DEVICES]; + int ratelimit_print[AER_MAX_MULTI_ERR_DEVICES]; int error_dev_num; + const char *level; /* printk level */ unsigned int id:16; unsigned int severity:2; /* 0:NONFATAL | 1:FATAL | 2:COR */ - unsigned int __pad1:5; + unsigned int root_ratelimit_print:1; /* 0=skip, 1=print */ + unsigned int __pad1:4; unsigned int multi_error_valid:1; unsigned int first_error:5; @@ -604,15 +629,16 @@ struct aer_err_info { struct pcie_tlp_log tlp; /* TLP Header */ }; -int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info); -void aer_print_error(struct pci_dev *dev, struct aer_err_info *info); +int aer_get_device_error_info(struct aer_err_info *info, int i); +void aer_print_error(struct aer_err_info *info, int i); int pcie_read_tlp_log(struct pci_dev *dev, int where, int where2, unsigned int tlp_len, bool flit, struct pcie_tlp_log *log); unsigned int aer_tlp_log_len(struct pci_dev *dev, u32 aercc); void pcie_print_tlp_log(const struct pci_dev *dev, - const struct pcie_tlp_log *log, const char *pfx); + const struct pcie_tlp_log *log, const char *level, + const char *pfx); #endif /* CONFIG_PCIEAER */ #ifdef CONFIG_PCIEPORTBUS @@ -688,10 +714,28 @@ void pci_iov_update_resource(struct pci_dev *dev, int resno); resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno); void pci_restore_iov_state(struct pci_dev *dev); int pci_iov_bus_range(struct pci_bus *bus); +void pci_iov_resource_set_size(struct pci_dev *dev, int resno, + resource_size_t size); +bool pci_iov_is_memory_decoding_enabled(struct pci_dev *dev); +static inline u16 pci_iov_vf_rebar_cap(struct pci_dev *dev) +{ + if (!dev->is_physfn) + return 0; + + return dev->sriov->vf_rebar_cap; +} static inline bool pci_resource_is_iov(int resno) { return resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END; } +static inline int pci_resource_num_from_vf_bar(int resno) +{ + return resno + PCI_IOV_RESOURCES; +} +static inline int pci_resource_num_to_vf_bar(int resno) +{ + return resno - PCI_IOV_RESOURCES; +} extern const struct attribute_group sriov_pf_dev_attr_group; extern const struct attribute_group sriov_vf_dev_attr_group; #else @@ -712,10 +756,30 @@ static inline int pci_iov_bus_range(struct pci_bus *bus) { return 0; } +static inline void pci_iov_resource_set_size(struct pci_dev *dev, int resno, + resource_size_t size) { } +static inline bool pci_iov_is_memory_decoding_enabled(struct pci_dev *dev) +{ + return false; +} +static inline u16 pci_iov_vf_rebar_cap(struct pci_dev *dev) +{ + return 0; +} static inline bool pci_resource_is_iov(int resno) { return false; } +static inline int pci_resource_num_from_vf_bar(int resno) +{ + WARN_ON_ONCE(1); + return -ENODEV; +} +static inline int pci_resource_num_to_vf_bar(int resno) +{ + WARN_ON_ONCE(1); + return -ENODEV; +} #endif /* CONFIG_PCI_IOV */ #ifdef CONFIG_PCIE_TPH @@ -824,14 +888,9 @@ static inline void pcie_ecrc_get_policy(char *str) { } #endif #ifdef CONFIG_PCIEPORTBUS -void pcie_reset_lbms_count(struct pci_dev *port); -int pcie_lbms_count(struct pci_dev *port, unsigned long *val); +void pcie_reset_lbms(struct pci_dev *port); #else -static inline void pcie_reset_lbms_count(struct pci_dev *port) {} -static inline int pcie_lbms_count(struct pci_dev *port, unsigned long *val) -{ - return -EOPNOTSUPP; -} +static inline void pcie_reset_lbms(struct pci_dev *port) {} #endif struct pci_dev_reset_methods { @@ -876,6 +935,21 @@ static inline u64 pci_rebar_size_to_bytes(int size) struct device_node; +#define PCI_EQ_RESV 0xff + +enum equalization_preset_type { + EQ_PRESET_TYPE_8GTS, + EQ_PRESET_TYPE_16GTS, + EQ_PRESET_TYPE_32GTS, + EQ_PRESET_TYPE_64GTS, + EQ_PRESET_TYPE_MAX +}; + +struct pci_eq_presets { + u16 eq_presets_8gts[MAX_NR_LANES]; + u8 eq_presets_Ngts[EQ_PRESET_TYPE_MAX - 1][MAX_NR_LANES]; +}; + #ifdef CONFIG_OF int of_get_pci_domain_nr(struct device_node *node); int of_pci_get_max_link_speed(struct device_node *node); @@ -890,7 +964,9 @@ void pci_release_bus_of_node(struct pci_bus *bus); int devm_of_pci_bridge_init(struct device *dev, struct pci_host_bridge *bridge); bool of_pci_supply_present(struct device_node *np); - +int of_pci_get_equalization_presets(struct device *dev, + struct pci_eq_presets *presets, + int num_lanes); #else static inline int of_get_pci_domain_nr(struct device_node *node) @@ -935,6 +1011,17 @@ static inline bool of_pci_supply_present(struct device_node *np) { return false; } + +static inline int of_pci_get_equalization_presets(struct device *dev, + struct pci_eq_presets *presets, + int num_lanes) +{ + presets->eq_presets_8gts[0] = PCI_EQ_RESV; + for (int i = 0; i < EQ_PRESET_TYPE_MAX - 1; i++) + presets->eq_presets_Ngts[i][0] = PCI_EQ_RESV; + + return 0; +} #endif /* CONFIG_OF */ struct of_changeset; @@ -961,6 +1048,7 @@ void pci_no_aer(void); void pci_aer_init(struct pci_dev *dev); void pci_aer_exit(struct pci_dev *dev); extern const struct attribute_group aer_stats_attr_group; +extern const struct attribute_group aer_attr_group; void pci_aer_clear_fatal_status(struct pci_dev *dev); int pci_aer_clear_status(struct pci_dev *dev); int pci_aer_raw_clear_status(struct pci_dev *dev); @@ -1059,10 +1147,14 @@ static inline pci_power_t mid_pci_get_power_state(struct pci_dev *pdev) } #endif -int pcim_intx(struct pci_dev *dev, int enable); -int pcim_request_region_exclusive(struct pci_dev *pdev, int bar, - const char *name); -void pcim_release_region(struct pci_dev *pdev, int bar); +#ifdef CONFIG_PCI_MSI +int pci_msix_write_tph_tag(struct pci_dev *pdev, unsigned int index, u16 tag); +#else +static inline int pci_msix_write_tph_tag(struct pci_dev *pdev, unsigned int index, u16 tag) +{ + return -ENODEV; +} +#endif /* * Config Address for PCI Configuration Mechanism #1 diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c index a1cf8c7ef628..e286c197d716 100644 --- a/drivers/pci/pcie/aer.c +++ b/drivers/pci/pcie/aer.c @@ -28,6 +28,7 @@ #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/kfifo.h> +#include <linux/ratelimit.h> #include <linux/slab.h> #include <acpi/apei.h> #include <acpi/ghes.h> @@ -54,8 +55,8 @@ struct aer_rpc { DECLARE_KFIFO(aer_fifo, struct aer_err_source, AER_ERROR_SOURCES_MAX); }; -/* AER stats for the device */ -struct aer_stats { +/* AER info for the device */ +struct aer_info { /* * Fields for all AER capable devices. They indicate the errors @@ -88,6 +89,10 @@ struct aer_stats { u64 rootport_total_cor_errs; u64 rootport_total_fatal_errs; u64 rootport_total_nonfatal_errs; + + /* Ratelimits for errors */ + struct ratelimit_state correctable_ratelimit; + struct ratelimit_state nonfatal_ratelimit; }; #define AER_LOG_TLP_MASKS (PCI_ERR_UNC_POISON_TLP| \ @@ -111,12 +116,12 @@ struct aer_stats { PCI_ERR_ROOT_MULTI_COR_RCV | \ PCI_ERR_ROOT_MULTI_UNCOR_RCV) -static int pcie_aer_disable; +static bool pcie_aer_disable; static pci_ers_result_t aer_root_reset(struct pci_dev *dev); void pci_no_aer(void) { - pcie_aer_disable = 1; + pcie_aer_disable = true; } bool pci_aer_available(void) @@ -377,7 +382,12 @@ void pci_aer_init(struct pci_dev *dev) if (!dev->aer_cap) return; - dev->aer_stats = kzalloc(sizeof(struct aer_stats), GFP_KERNEL); + dev->aer_info = kzalloc(sizeof(*dev->aer_info), GFP_KERNEL); + + ratelimit_state_init(&dev->aer_info->correctable_ratelimit, + DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); + ratelimit_state_init(&dev->aer_info->nonfatal_ratelimit, + DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); /* * We save/restore PCI_ERR_UNCOR_MASK, PCI_ERR_UNCOR_SEVER, @@ -398,8 +408,8 @@ void pci_aer_init(struct pci_dev *dev) void pci_aer_exit(struct pci_dev *dev) { - kfree(dev->aer_stats); - dev->aer_stats = NULL; + kfree(dev->aer_info); + dev->aer_info = NULL; } #define AER_AGENT_RECEIVER 0 @@ -537,10 +547,10 @@ static const char *aer_agent_string[] = { { \ unsigned int i; \ struct pci_dev *pdev = to_pci_dev(dev); \ - u64 *stats = pdev->aer_stats->stats_array; \ + u64 *stats = pdev->aer_info->stats_array; \ size_t len = 0; \ \ - for (i = 0; i < ARRAY_SIZE(pdev->aer_stats->stats_array); i++) {\ + for (i = 0; i < ARRAY_SIZE(pdev->aer_info->stats_array); i++) { \ if (strings_array[i]) \ len += sysfs_emit_at(buf, len, "%s %llu\n", \ strings_array[i], \ @@ -551,7 +561,7 @@ static const char *aer_agent_string[] = { i, stats[i]); \ } \ len += sysfs_emit_at(buf, len, "TOTAL_%s %llu\n", total_string, \ - pdev->aer_stats->total_field); \ + pdev->aer_info->total_field); \ return len; \ } \ static DEVICE_ATTR_RO(name) @@ -572,7 +582,7 @@ aer_stats_dev_attr(aer_dev_nonfatal, dev_nonfatal_errs, char *buf) \ { \ struct pci_dev *pdev = to_pci_dev(dev); \ - return sysfs_emit(buf, "%llu\n", pdev->aer_stats->field); \ + return sysfs_emit(buf, "%llu\n", pdev->aer_info->field); \ } \ static DEVICE_ATTR_RO(name) @@ -599,7 +609,7 @@ static umode_t aer_stats_attrs_are_visible(struct kobject *kobj, struct device *dev = kobj_to_dev(kobj); struct pci_dev *pdev = to_pci_dev(dev); - if (!pdev->aer_stats) + if (!pdev->aer_info) return 0; if ((a == &dev_attr_aer_rootport_total_err_cor.attr || @@ -617,31 +627,136 @@ const struct attribute_group aer_stats_attr_group = { .is_visible = aer_stats_attrs_are_visible, }; +/* + * Ratelimit interval + * <=0: disabled with ratelimit.interval = 0 + * >0: enabled with ratelimit.interval in ms + */ +#define aer_ratelimit_interval_attr(name, ratelimit) \ + static ssize_t \ + name##_show(struct device *dev, struct device_attribute *attr, \ + char *buf) \ + { \ + struct pci_dev *pdev = to_pci_dev(dev); \ + \ + return sysfs_emit(buf, "%d\n", \ + pdev->aer_info->ratelimit.interval); \ + } \ + \ + static ssize_t \ + name##_store(struct device *dev, struct device_attribute *attr, \ + const char *buf, size_t count) \ + { \ + struct pci_dev *pdev = to_pci_dev(dev); \ + int interval; \ + \ + if (!capable(CAP_SYS_ADMIN)) \ + return -EPERM; \ + \ + if (kstrtoint(buf, 0, &interval) < 0) \ + return -EINVAL; \ + \ + if (interval <= 0) \ + interval = 0; \ + else \ + interval = msecs_to_jiffies(interval); \ + \ + pdev->aer_info->ratelimit.interval = interval; \ + \ + return count; \ + } \ + static DEVICE_ATTR_RW(name); + +#define aer_ratelimit_burst_attr(name, ratelimit) \ + static ssize_t \ + name##_show(struct device *dev, struct device_attribute *attr, \ + char *buf) \ + { \ + struct pci_dev *pdev = to_pci_dev(dev); \ + \ + return sysfs_emit(buf, "%d\n", \ + pdev->aer_info->ratelimit.burst); \ + } \ + \ + static ssize_t \ + name##_store(struct device *dev, struct device_attribute *attr, \ + const char *buf, size_t count) \ + { \ + struct pci_dev *pdev = to_pci_dev(dev); \ + int burst; \ + \ + if (!capable(CAP_SYS_ADMIN)) \ + return -EPERM; \ + \ + if (kstrtoint(buf, 0, &burst) < 0) \ + return -EINVAL; \ + \ + pdev->aer_info->ratelimit.burst = burst; \ + \ + return count; \ + } \ + static DEVICE_ATTR_RW(name); + +#define aer_ratelimit_attrs(name) \ + aer_ratelimit_interval_attr(name##_ratelimit_interval_ms, \ + name##_ratelimit) \ + aer_ratelimit_burst_attr(name##_ratelimit_burst, \ + name##_ratelimit) + +aer_ratelimit_attrs(correctable) +aer_ratelimit_attrs(nonfatal) + +static struct attribute *aer_attrs[] = { + &dev_attr_correctable_ratelimit_interval_ms.attr, + &dev_attr_correctable_ratelimit_burst.attr, + &dev_attr_nonfatal_ratelimit_interval_ms.attr, + &dev_attr_nonfatal_ratelimit_burst.attr, + NULL +}; + +static umode_t aer_attrs_are_visible(struct kobject *kobj, + struct attribute *a, int n) +{ + struct device *dev = kobj_to_dev(kobj); + struct pci_dev *pdev = to_pci_dev(dev); + + if (!pdev->aer_info) + return 0; + + return a->mode; +} + +const struct attribute_group aer_attr_group = { + .name = "aer", + .attrs = aer_attrs, + .is_visible = aer_attrs_are_visible, +}; + static void pci_dev_aer_stats_incr(struct pci_dev *pdev, struct aer_err_info *info) { unsigned long status = info->status & ~info->mask; int i, max = -1; u64 *counter = NULL; - struct aer_stats *aer_stats = pdev->aer_stats; + struct aer_info *aer_info = pdev->aer_info; - if (!aer_stats) + if (!aer_info) return; switch (info->severity) { case AER_CORRECTABLE: - aer_stats->dev_total_cor_errs++; - counter = &aer_stats->dev_cor_errs[0]; + aer_info->dev_total_cor_errs++; + counter = &aer_info->dev_cor_errs[0]; max = AER_MAX_TYPEOF_COR_ERRS; break; case AER_NONFATAL: - aer_stats->dev_total_nonfatal_errs++; - counter = &aer_stats->dev_nonfatal_errs[0]; + aer_info->dev_total_nonfatal_errs++; + counter = &aer_info->dev_nonfatal_errs[0]; max = AER_MAX_TYPEOF_UNCOR_ERRS; break; case AER_FATAL: - aer_stats->dev_total_fatal_errs++; - counter = &aer_stats->dev_fatal_errs[0]; + aer_info->dev_total_fatal_errs++; + counter = &aer_info->dev_fatal_errs[0]; max = AER_MAX_TYPEOF_UNCOR_ERRS; break; } @@ -653,37 +768,46 @@ static void pci_dev_aer_stats_incr(struct pci_dev *pdev, static void pci_rootport_aer_stats_incr(struct pci_dev *pdev, struct aer_err_source *e_src) { - struct aer_stats *aer_stats = pdev->aer_stats; + struct aer_info *aer_info = pdev->aer_info; - if (!aer_stats) + if (!aer_info) return; if (e_src->status & PCI_ERR_ROOT_COR_RCV) - aer_stats->rootport_total_cor_errs++; + aer_info->rootport_total_cor_errs++; if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) { if (e_src->status & PCI_ERR_ROOT_FATAL_RCV) - aer_stats->rootport_total_fatal_errs++; + aer_info->rootport_total_fatal_errs++; else - aer_stats->rootport_total_nonfatal_errs++; + aer_info->rootport_total_nonfatal_errs++; + } +} + +static int aer_ratelimit(struct pci_dev *dev, unsigned int severity) +{ + switch (severity) { + case AER_NONFATAL: + return __ratelimit(&dev->aer_info->nonfatal_ratelimit); + case AER_CORRECTABLE: + return __ratelimit(&dev->aer_info->correctable_ratelimit); + default: + return 1; /* Don't ratelimit fatal errors */ } } -static void __aer_print_error(struct pci_dev *dev, - struct aer_err_info *info) +static void __aer_print_error(struct pci_dev *dev, struct aer_err_info *info) { const char **strings; unsigned long status = info->status & ~info->mask; - const char *level, *errmsg; + const char *level = info->level; + const char *errmsg; int i; - if (info->severity == AER_CORRECTABLE) { + if (info->severity == AER_CORRECTABLE) strings = aer_correctable_error_string; - level = KERN_WARNING; - } else { + else strings = aer_uncorrectable_error_string; - level = KERN_ERR; - } for_each_set_bit(i, &status, 32) { errmsg = strings[i]; @@ -693,14 +817,39 @@ static void __aer_print_error(struct pci_dev *dev, aer_printk(level, dev, " [%2d] %-22s%s\n", i, errmsg, info->first_error == i ? " (First)" : ""); } - pci_dev_aer_stats_incr(dev, info); } -void aer_print_error(struct pci_dev *dev, struct aer_err_info *info) +static void aer_print_source(struct pci_dev *dev, struct aer_err_info *info, + bool found) +{ + u16 source = info->id; + + pci_info(dev, "%s%s error message received from %04x:%02x:%02x.%d%s\n", + info->multi_error_valid ? "Multiple " : "", + aer_error_severity_string[info->severity], + pci_domain_nr(dev->bus), PCI_BUS_NUM(source), + PCI_SLOT(source), PCI_FUNC(source), + found ? "" : " (no details found"); +} + +void aer_print_error(struct aer_err_info *info, int i) { - int layer, agent; - int id = pci_dev_id(dev); - const char *level; + struct pci_dev *dev; + int layer, agent, id; + const char *level = info->level; + + if (WARN_ON_ONCE(i >= AER_MAX_MULTI_ERR_DEVICES)) + return; + + dev = info->dev[i]; + id = pci_dev_id(dev); + + pci_dev_aer_stats_incr(dev, info); + trace_aer_event(pci_name(dev), (info->status & ~info->mask), + info->severity, info->tlp_header_valid, &info->tlp); + + if (!info->ratelimit_print[i]) + return; if (!info->status) { pci_err(dev, "PCIe Bus Error: severity=%s, type=Inaccessible, (Unregistered Agent ID)\n", @@ -711,8 +860,6 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info) layer = AER_GET_LAYER_ERROR(info->severity, info->status); agent = AER_GET_AGENT(info->severity, info->status); - level = (info->severity == AER_CORRECTABLE) ? KERN_WARNING : KERN_ERR; - aer_printk(level, dev, "PCIe Bus Error: severity=%s, type=%s, (%s)\n", aer_error_severity_string[info->severity], aer_error_layer[layer], aer_agent_string[agent]); @@ -723,26 +870,11 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info) __aer_print_error(dev, info); if (info->tlp_header_valid) - pcie_print_tlp_log(dev, &info->tlp, dev_fmt(" ")); + pcie_print_tlp_log(dev, &info->tlp, level, dev_fmt(" ")); out: if (info->id && info->error_dev_num > 1 && info->id == id) pci_err(dev, " Error of this Agent is reported first\n"); - - trace_aer_event(dev_name(&dev->dev), (info->status & ~info->mask), - info->severity, info->tlp_header_valid, &info->tlp); -} - -static void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info) -{ - u8 bus = info->id >> 8; - u8 devfn = info->id & 0xff; - - pci_info(dev, "%s%s error message received from %04x:%02x:%02x.%d\n", - info->multi_error_valid ? "Multiple " : "", - aer_error_severity_string[info->severity], - pci_domain_nr(dev->bus), bus, PCI_SLOT(devfn), - PCI_FUNC(devfn)); } #ifdef CONFIG_ACPI_APEI_PCIEAER @@ -765,40 +897,48 @@ void pci_print_aer(struct pci_dev *dev, int aer_severity, { int layer, agent, tlp_header_valid = 0; u32 status, mask; - struct aer_err_info info; + struct aer_err_info info = { + .severity = aer_severity, + .first_error = PCI_ERR_CAP_FEP(aer->cap_control), + }; if (aer_severity == AER_CORRECTABLE) { status = aer->cor_status; mask = aer->cor_mask; + info.level = KERN_WARNING; } else { status = aer->uncor_status; mask = aer->uncor_mask; + info.level = KERN_ERR; tlp_header_valid = status & AER_LOG_TLP_MASKS; } - layer = AER_GET_LAYER_ERROR(aer_severity, status); - agent = AER_GET_AGENT(aer_severity, status); - - memset(&info, 0, sizeof(info)); - info.severity = aer_severity; info.status = status; info.mask = mask; - info.first_error = PCI_ERR_CAP_FEP(aer->cap_control); - pci_err(dev, "aer_status: 0x%08x, aer_mask: 0x%08x\n", status, mask); + pci_dev_aer_stats_incr(dev, &info); + trace_aer_event(pci_name(dev), (status & ~mask), + aer_severity, tlp_header_valid, &aer->header_log); + + if (!aer_ratelimit(dev, info.severity)) + return; + + layer = AER_GET_LAYER_ERROR(aer_severity, status); + agent = AER_GET_AGENT(aer_severity, status); + + aer_printk(info.level, dev, "aer_status: 0x%08x, aer_mask: 0x%08x\n", + status, mask); __aer_print_error(dev, &info); - pci_err(dev, "aer_layer=%s, aer_agent=%s\n", - aer_error_layer[layer], aer_agent_string[agent]); + aer_printk(info.level, dev, "aer_layer=%s, aer_agent=%s\n", + aer_error_layer[layer], aer_agent_string[agent]); if (aer_severity != AER_CORRECTABLE) - pci_err(dev, "aer_uncor_severity: 0x%08x\n", - aer->uncor_severity); + aer_printk(info.level, dev, "aer_uncor_severity: 0x%08x\n", + aer->uncor_severity); if (tlp_header_valid) - pcie_print_tlp_log(dev, &aer->header_log, dev_fmt(" ")); - - trace_aer_event(dev_name(&dev->dev), (status & ~mask), - aer_severity, tlp_header_valid, &aer->header_log); + pcie_print_tlp_log(dev, &aer->header_log, info.level, + dev_fmt(" ")); } EXPORT_SYMBOL_NS_GPL(pci_print_aer, "CXL"); @@ -809,12 +949,27 @@ EXPORT_SYMBOL_NS_GPL(pci_print_aer, "CXL"); */ static int add_error_device(struct aer_err_info *e_info, struct pci_dev *dev) { - if (e_info->error_dev_num < AER_MAX_MULTI_ERR_DEVICES) { - e_info->dev[e_info->error_dev_num] = pci_dev_get(dev); - e_info->error_dev_num++; - return 0; + int i = e_info->error_dev_num; + + if (i >= AER_MAX_MULTI_ERR_DEVICES) + return -ENOSPC; + + e_info->dev[i] = pci_dev_get(dev); + e_info->error_dev_num++; + + /* + * Ratelimit AER log messages. "dev" is either the source + * identified by the root's Error Source ID or it has an unmasked + * error logged in its own AER Capability. Messages are emitted + * when "ratelimit_print[i]" is non-zero. If we will print detail + * for a downstream device, make sure we print the Error Source ID + * from the root as well. + */ + if (aer_ratelimit(dev, e_info->severity)) { + e_info->ratelimit_print[i] = 1; + e_info->root_ratelimit_print = 1; } - return -ENOSPC; + return 0; } /** @@ -884,7 +1039,8 @@ static int find_device_iter(struct pci_dev *dev, void *data) /* List this device */ if (add_error_device(e_info, dev)) { /* We cannot handle more... Stop iteration */ - /* TODO: Should print error message here? */ + pci_err(dev, "Exceeded max supported (%d) devices with errors logged\n", + AER_MAX_MULTI_ERR_DEVICES); return 1; } @@ -908,7 +1064,7 @@ static int find_device_iter(struct pci_dev *dev, void *data) * e_info->error_dev_num and e_info->dev[], based on the given information. */ static bool find_source_device(struct pci_dev *parent, - struct aer_err_info *e_info) + struct aer_err_info *e_info) { struct pci_dev *dev = parent; int result; @@ -926,15 +1082,8 @@ static bool find_source_device(struct pci_dev *parent, else pci_walk_bus(parent->subordinate, find_device_iter, e_info); - if (!e_info->error_dev_num) { - u8 bus = e_info->id >> 8; - u8 devfn = e_info->id & 0xff; - - pci_info(parent, "found no error details for %04x:%02x:%02x.%d\n", - pci_domain_nr(parent->bus), bus, PCI_SLOT(devfn), - PCI_FUNC(devfn)); + if (!e_info->error_dev_num) return false; - } return true; } @@ -1141,9 +1290,10 @@ static void aer_recover_work_func(struct work_struct *work) pdev = pci_get_domain_bus_and_slot(entry.domain, entry.bus, entry.devfn); if (!pdev) { - pr_err("no pci_dev for %04x:%02x:%02x.%x\n", - entry.domain, entry.bus, - PCI_SLOT(entry.devfn), PCI_FUNC(entry.devfn)); + pr_err_ratelimited("%04x:%02x:%02x.%x: no pci_dev found\n", + entry.domain, entry.bus, + PCI_SLOT(entry.devfn), + PCI_FUNC(entry.devfn)); continue; } pci_print_aer(pdev, entry.severity, entry.regs); @@ -1199,19 +1349,26 @@ EXPORT_SYMBOL_GPL(aer_recover_queue); /** * aer_get_device_error_info - read error status from dev and store it to info - * @dev: pointer to the device expected to have an error record * @info: pointer to structure to store the error record + * @i: index into info->dev[] * * Return: 1 on success, 0 on error. * * Note that @info is reused among all error devices. Clear fields properly. */ -int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info) +int aer_get_device_error_info(struct aer_err_info *info, int i) { - int type = pci_pcie_type(dev); - int aer = dev->aer_cap; + struct pci_dev *dev; + int type, aer; u32 aercc; + if (i >= AER_MAX_MULTI_ERR_DEVICES) + return 0; + + dev = info->dev[i]; + aer = dev->aer_cap; + type = pci_pcie_type(dev); + /* Must reset in this function */ info->status = 0; info->tlp_header_valid = 0; @@ -1263,63 +1420,87 @@ static inline void aer_process_err_devices(struct aer_err_info *e_info) /* Report all before handling them, to not lose records by reset etc. */ for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) { - if (aer_get_device_error_info(e_info->dev[i], e_info)) - aer_print_error(e_info->dev[i], e_info); + if (aer_get_device_error_info(e_info, i)) + aer_print_error(e_info, i); } for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) { - if (aer_get_device_error_info(e_info->dev[i], e_info)) + if (aer_get_device_error_info(e_info, i)) handle_error_source(e_info->dev[i], e_info); } } /** - * aer_isr_one_error - consume an error detected by Root Port - * @rpc: pointer to the Root Port which holds an error - * @e_src: pointer to an error source + * aer_isr_one_error_type - consume a Correctable or Uncorrectable Error + * detected by Root Port or RCEC + * @root: pointer to Root Port or RCEC that signaled AER interrupt + * @info: pointer to AER error info */ -static void aer_isr_one_error(struct aer_rpc *rpc, - struct aer_err_source *e_src) +static void aer_isr_one_error_type(struct pci_dev *root, + struct aer_err_info *info) { - struct pci_dev *pdev = rpc->rpd; - struct aer_err_info e_info; + bool found; - pci_rootport_aer_stats_incr(pdev, e_src); + found = find_source_device(root, info); /* - * There is a possibility that both correctable error and - * uncorrectable error being logged. Report correctable error first. + * If we're going to log error messages, we've already set + * "info->root_ratelimit_print" and "info->ratelimit_print[i]" to + * non-zero (which enables printing) because this is either an + * ERR_FATAL or we found a device with an error logged in its AER + * Capability. + * + * If we didn't find the Error Source device, at least log the + * Requester ID from the ERR_* Message received by the Root Port or + * RCEC, ratelimited by the RP or RCEC. */ - if (e_src->status & PCI_ERR_ROOT_COR_RCV) { - e_info.id = ERR_COR_ID(e_src->id); - e_info.severity = AER_CORRECTABLE; - - if (e_src->status & PCI_ERR_ROOT_MULTI_COR_RCV) - e_info.multi_error_valid = 1; - else - e_info.multi_error_valid = 0; - aer_print_port_info(pdev, &e_info); + if (info->root_ratelimit_print || + (!found && aer_ratelimit(root, info->severity))) + aer_print_source(root, info, found); - if (find_source_device(pdev, &e_info)) - aer_process_err_devices(&e_info); - } - - if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) { - e_info.id = ERR_UNCOR_ID(e_src->id); + if (found) + aer_process_err_devices(info); +} - if (e_src->status & PCI_ERR_ROOT_FATAL_RCV) - e_info.severity = AER_FATAL; - else - e_info.severity = AER_NONFATAL; +/** + * aer_isr_one_error - consume error(s) signaled by an AER interrupt from + * Root Port or RCEC + * @root: pointer to Root Port or RCEC that signaled AER interrupt + * @e_src: pointer to an error source + */ +static void aer_isr_one_error(struct pci_dev *root, + struct aer_err_source *e_src) +{ + u32 status = e_src->status; - if (e_src->status & PCI_ERR_ROOT_MULTI_UNCOR_RCV) - e_info.multi_error_valid = 1; - else - e_info.multi_error_valid = 0; + pci_rootport_aer_stats_incr(root, e_src); - aer_print_port_info(pdev, &e_info); + /* + * There is a possibility that both correctable error and + * uncorrectable error being logged. Report correctable error first. + */ + if (status & PCI_ERR_ROOT_COR_RCV) { + int multi = status & PCI_ERR_ROOT_MULTI_COR_RCV; + struct aer_err_info e_info = { + .id = ERR_COR_ID(e_src->id), + .severity = AER_CORRECTABLE, + .level = KERN_WARNING, + .multi_error_valid = multi ? 1 : 0, + }; + + aer_isr_one_error_type(root, &e_info); + } - if (find_source_device(pdev, &e_info)) - aer_process_err_devices(&e_info); + if (status & PCI_ERR_ROOT_UNCOR_RCV) { + int fatal = status & PCI_ERR_ROOT_FATAL_RCV; + int multi = status & PCI_ERR_ROOT_MULTI_UNCOR_RCV; + struct aer_err_info e_info = { + .id = ERR_UNCOR_ID(e_src->id), + .severity = fatal ? AER_FATAL : AER_NONFATAL, + .level = KERN_ERR, + .multi_error_valid = multi ? 1 : 0, + }; + + aer_isr_one_error_type(root, &e_info); } } @@ -1340,7 +1521,7 @@ static irqreturn_t aer_isr(int irq, void *context) return IRQ_NONE; while (kfifo_get(&rpc->aer_fifo, &e_src)) - aer_isr_one_error(rpc, &e_src); + aer_isr_one_error(rpc->rpd, &e_src); return IRQ_HANDLED; } diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 29fcb0689a91..919a05b97647 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -245,7 +245,7 @@ struct pcie_link_state { u32 clkpm_disable:1; /* Clock PM disabled */ }; -static int aspm_disabled, aspm_force; +static bool aspm_disabled, aspm_force; static bool aspm_support_enabled = true; static DEFINE_MUTEX(aspm_lock); static LIST_HEAD(link_list); @@ -884,10 +884,9 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) /* Configure the ASPM L1 substates. Caller must disable L1 first. */ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state) { - u32 val; + u32 val = 0; struct pci_dev *child = link->downstream, *parent = link->pdev; - val = 0; if (state & PCIE_LINK_STATE_L1_1) val |= PCI_L1SS_CTL1_ASPM_L1_1; if (state & PCIE_LINK_STATE_L1_2) @@ -1712,11 +1711,11 @@ static int __init pcie_aspm_disable(char *str) { if (!strcmp(str, "off")) { aspm_policy = POLICY_DEFAULT; - aspm_disabled = 1; + aspm_disabled = true; aspm_support_enabled = false; pr_info("PCIe ASPM is disabled\n"); } else if (!strcmp(str, "force")) { - aspm_force = 1; + aspm_force = true; pr_info("PCIe ASPM is forcibly enabled\n"); } return 1; @@ -1734,7 +1733,7 @@ void pcie_no_aspm(void) */ if (!aspm_force) { aspm_policy = POLICY_DEFAULT; - aspm_disabled = 1; + aspm_disabled = true; } } diff --git a/drivers/pci/pcie/bwctrl.c b/drivers/pci/pcie/bwctrl.c index d8d2aa85a229..36f939f23d34 100644 --- a/drivers/pci/pcie/bwctrl.c +++ b/drivers/pci/pcie/bwctrl.c @@ -38,24 +38,14 @@ /** * struct pcie_bwctrl_data - PCIe bandwidth controller * @set_speed_mutex: Serializes link speed changes - * @lbms_count: Count for LBMS (since last reset) * @cdev: Thermal cooling device associated with the port */ struct pcie_bwctrl_data { struct mutex set_speed_mutex; - atomic_t lbms_count; struct thermal_cooling_device *cdev; }; -/* - * Prevent port removal during LBMS count accessors and Link Speed changes. - * - * These have to be differentiated because pcie_bwctrl_change_speed() calls - * pcie_retrain_link() which uses LBMS count reset accessor on success - * (using just one rwsem triggers "possible recursive locking detected" - * warning). - */ -static DECLARE_RWSEM(pcie_bwctrl_lbms_rwsem); +/* Prevent port removal during Link Speed changes. */ static DECLARE_RWSEM(pcie_bwctrl_setspeed_rwsem); static bool pcie_valid_speed(enum pci_bus_speed speed) @@ -127,18 +117,7 @@ static int pcie_bwctrl_change_speed(struct pci_dev *port, u16 target_speed, bool if (ret != PCIBIOS_SUCCESSFUL) return pcibios_err_to_errno(ret); - ret = pcie_retrain_link(port, use_lt); - if (ret < 0) - return ret; - - /* - * Ensure link speed updates also with platforms that have problems - * with notifications. - */ - if (port->subordinate) - pcie_update_link_speed(port->subordinate); - - return 0; + return pcie_retrain_link(port, use_lt); } /** @@ -202,15 +181,14 @@ int pcie_set_target_speed(struct pci_dev *port, enum pci_bus_speed speed_req, static void pcie_bwnotif_enable(struct pcie_device *srv) { - struct pcie_bwctrl_data *data = srv->port->link_bwctrl; struct pci_dev *port = srv->port; u16 link_status; int ret; - /* Count LBMS seen so far as one */ + /* Note if LBMS has been seen so far */ ret = pcie_capability_read_word(port, PCI_EXP_LNKSTA, &link_status); if (ret == PCIBIOS_SUCCESSFUL && link_status & PCI_EXP_LNKSTA_LBMS) - atomic_inc(&data->lbms_count); + set_bit(PCI_LINK_LBMS_SEEN, &port->priv_flags); pcie_capability_set_word(port, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_LBMIE | PCI_EXP_LNKCTL_LABIE); @@ -233,7 +211,6 @@ static void pcie_bwnotif_disable(struct pci_dev *port) static irqreturn_t pcie_bwnotif_irq(int irq, void *context) { struct pcie_device *srv = context; - struct pcie_bwctrl_data *data = srv->port->link_bwctrl; struct pci_dev *port = srv->port; u16 link_status, events; int ret; @@ -247,7 +224,7 @@ static irqreturn_t pcie_bwnotif_irq(int irq, void *context) return IRQ_NONE; if (events & PCI_EXP_LNKSTA_LBMS) - atomic_inc(&data->lbms_count); + set_bit(PCI_LINK_LBMS_SEEN, &port->priv_flags); pcie_capability_write_word(port, PCI_EXP_LNKSTA, events); @@ -262,31 +239,10 @@ static irqreturn_t pcie_bwnotif_irq(int irq, void *context) return IRQ_HANDLED; } -void pcie_reset_lbms_count(struct pci_dev *port) +void pcie_reset_lbms(struct pci_dev *port) { - struct pcie_bwctrl_data *data; - - guard(rwsem_read)(&pcie_bwctrl_lbms_rwsem); - data = port->link_bwctrl; - if (data) - atomic_set(&data->lbms_count, 0); - else - pcie_capability_write_word(port, PCI_EXP_LNKSTA, - PCI_EXP_LNKSTA_LBMS); -} - -int pcie_lbms_count(struct pci_dev *port, unsigned long *val) -{ - struct pcie_bwctrl_data *data; - - guard(rwsem_read)(&pcie_bwctrl_lbms_rwsem); - data = port->link_bwctrl; - if (!data) - return -ENOTTY; - - *val = atomic_read(&data->lbms_count); - - return 0; + clear_bit(PCI_LINK_LBMS_SEEN, &port->priv_flags); + pcie_capability_write_word(port, PCI_EXP_LNKSTA, PCI_EXP_LNKSTA_LBMS); } static int pcie_bwnotif_probe(struct pcie_device *srv) @@ -308,18 +264,16 @@ static int pcie_bwnotif_probe(struct pcie_device *srv) return ret; scoped_guard(rwsem_write, &pcie_bwctrl_setspeed_rwsem) { - scoped_guard(rwsem_write, &pcie_bwctrl_lbms_rwsem) { - port->link_bwctrl = data; - - ret = request_irq(srv->irq, pcie_bwnotif_irq, - IRQF_SHARED, "PCIe bwctrl", srv); - if (ret) { - port->link_bwctrl = NULL; - return ret; - } + port->link_bwctrl = data; - pcie_bwnotif_enable(srv); + ret = request_irq(srv->irq, pcie_bwnotif_irq, + IRQF_SHARED, "PCIe bwctrl", srv); + if (ret) { + port->link_bwctrl = NULL; + return ret; } + + pcie_bwnotif_enable(srv); } pci_dbg(port, "enabled with IRQ %d\n", srv->irq); @@ -339,13 +293,11 @@ static void pcie_bwnotif_remove(struct pcie_device *srv) pcie_cooling_device_unregister(data->cdev); scoped_guard(rwsem_write, &pcie_bwctrl_setspeed_rwsem) { - scoped_guard(rwsem_write, &pcie_bwctrl_lbms_rwsem) { - pcie_bwnotif_disable(srv->port); + pcie_bwnotif_disable(srv->port); - free_irq(srv->irq, srv); + free_irq(srv->irq, srv); - srv->port->link_bwctrl = NULL; - } + srv->port->link_bwctrl = NULL; } } diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c index df42f15c9829..fc18349614d7 100644 --- a/drivers/pci/pcie/dpc.c +++ b/drivers/pci/pcie/dpc.c @@ -222,7 +222,7 @@ static void dpc_process_rp_pio_error(struct pci_dev *pdev) dpc_tlp_log_len(pdev), pdev->subordinate->flit_mode, &tlp_log); - pcie_print_tlp_log(pdev, &tlp_log, dev_fmt("")); + pcie_print_tlp_log(pdev, &tlp_log, KERN_ERR, dev_fmt("")); if (pdev->dpc_rp_log_size < PCIE_STD_NUM_TLP_HEADERLOG + 1) goto clear_status; @@ -252,46 +252,59 @@ static int dpc_get_aer_uncorrect_severity(struct pci_dev *dev, else info->severity = AER_NONFATAL; + info->level = KERN_ERR; + + info->dev[0] = dev; + info->error_dev_num = 1; + return 1; } void dpc_process_error(struct pci_dev *pdev) { u16 cap = pdev->dpc_cap, status, source, reason, ext_reason; - struct aer_err_info info; + struct aer_err_info info = {}; pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status); - pci_read_config_word(pdev, cap + PCI_EXP_DPC_SOURCE_ID, &source); - - pci_info(pdev, "containment event, status:%#06x source:%#06x\n", - status, source); reason = status & PCI_EXP_DPC_STATUS_TRIGGER_RSN; - ext_reason = status & PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT; - pci_warn(pdev, "%s detected\n", - (reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_UNCOR) ? - "unmasked uncorrectable error" : - (reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_NFE) ? - "ERR_NONFATAL" : - (reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_FE) ? - "ERR_FATAL" : - (ext_reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_RP_PIO) ? - "RP PIO error" : - (ext_reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_SW_TRIGGER) ? - "software trigger" : - "reserved error"); - - /* show RP PIO error detail information */ - if (pdev->dpc_rp_extensions && - reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_IN_EXT && - ext_reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_RP_PIO) - dpc_process_rp_pio_error(pdev); - else if (reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_UNCOR && - dpc_get_aer_uncorrect_severity(pdev, &info) && - aer_get_device_error_info(pdev, &info)) { - aer_print_error(pdev, &info); - pci_aer_clear_nonfatal_status(pdev); - pci_aer_clear_fatal_status(pdev); + + switch (reason) { + case PCI_EXP_DPC_STATUS_TRIGGER_RSN_UNCOR: + pci_warn(pdev, "containment event, status:%#06x: unmasked uncorrectable error detected\n", + status); + if (dpc_get_aer_uncorrect_severity(pdev, &info) && + aer_get_device_error_info(&info, 0)) { + aer_print_error(&info, 0); + pci_aer_clear_nonfatal_status(pdev); + pci_aer_clear_fatal_status(pdev); + } + break; + case PCI_EXP_DPC_STATUS_TRIGGER_RSN_NFE: + case PCI_EXP_DPC_STATUS_TRIGGER_RSN_FE: + pci_read_config_word(pdev, cap + PCI_EXP_DPC_SOURCE_ID, + &source); + pci_warn(pdev, "containment event, status:%#06x, %s received from %04x:%02x:%02x.%d\n", + status, + (reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_FE) ? + "ERR_FATAL" : "ERR_NONFATAL", + pci_domain_nr(pdev->bus), PCI_BUS_NUM(source), + PCI_SLOT(source), PCI_FUNC(source)); + break; + case PCI_EXP_DPC_STATUS_TRIGGER_RSN_IN_EXT: + ext_reason = status & PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT; + pci_warn(pdev, "containment event, status:%#06x: %s detected\n", + status, + (ext_reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_RP_PIO) ? + "RP PIO error" : + (ext_reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_SW_TRIGGER) ? + "software trigger" : + "reserved error"); + /* show RP PIO error detail information */ + if (ext_reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_RP_PIO && + pdev->dpc_rp_extensions) + dpc_process_rp_pio_error(pdev); + break; } } diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c index 31090770fffc..de6381c690f5 100644 --- a/drivers/pci/pcie/err.c +++ b/drivers/pci/pcie/err.c @@ -271,7 +271,6 @@ failed: pci_uevent_ers(bridge, PCI_ERS_RESULT_DISCONNECT); - /* TODO: Should kernel panic here? */ pci_info(bridge, "device recovery failed\n"); return status; diff --git a/drivers/pci/pcie/portdrv.c b/drivers/pci/pcie/portdrv.c index e8318fd5f6ed..d1b68c18444f 100644 --- a/drivers/pci/pcie/portdrv.c +++ b/drivers/pci/pcie/portdrv.c @@ -220,7 +220,7 @@ static int get_port_device_capability(struct pci_dev *dev) struct pci_host_bridge *host = pci_find_host_bridge(dev->bus); int services = 0; - if (dev->is_hotplug_bridge && + if (dev->is_pciehp && (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT || pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) && (pcie_ports_native || host->native_pcie_hotplug)) { diff --git a/drivers/pci/pcie/ptm.c b/drivers/pci/pcie/ptm.c index 7cfb6c0d5dcb..65e4b008be00 100644 --- a/drivers/pci/pcie/ptm.c +++ b/drivers/pci/pcie/ptm.c @@ -5,6 +5,7 @@ */ #include <linux/bitfield.h> +#include <linux/debugfs.h> #include <linux/module.h> #include <linux/init.h> #include <linux/pci.h> @@ -252,3 +253,304 @@ bool pcie_ptm_enabled(struct pci_dev *dev) return dev->ptm_enabled; } EXPORT_SYMBOL(pcie_ptm_enabled); + +#if IS_ENABLED(CONFIG_DEBUG_FS) +static ssize_t context_update_write(struct file *file, const char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct pci_ptm_debugfs *ptm_debugfs = file->private_data; + char buf[7]; + int ret; + u8 mode; + + if (!ptm_debugfs->ops->context_update_write) + return -EOPNOTSUPP; + + if (count < 1 || count >= sizeof(buf)) + return -EINVAL; + + ret = copy_from_user(buf, ubuf, count); + if (ret) + return -EFAULT; + + buf[count] = '\0'; + + if (sysfs_streq(buf, "auto")) + mode = PCIE_PTM_CONTEXT_UPDATE_AUTO; + else if (sysfs_streq(buf, "manual")) + mode = PCIE_PTM_CONTEXT_UPDATE_MANUAL; + else + return -EINVAL; + + mutex_lock(&ptm_debugfs->lock); + ret = ptm_debugfs->ops->context_update_write(ptm_debugfs->pdata, mode); + mutex_unlock(&ptm_debugfs->lock); + if (ret) + return ret; + + return count; +} + +static ssize_t context_update_read(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct pci_ptm_debugfs *ptm_debugfs = file->private_data; + char buf[8]; /* Extra space for NULL termination at the end */ + ssize_t pos; + u8 mode; + + if (!ptm_debugfs->ops->context_update_read) + return -EOPNOTSUPP; + + mutex_lock(&ptm_debugfs->lock); + ptm_debugfs->ops->context_update_read(ptm_debugfs->pdata, &mode); + mutex_unlock(&ptm_debugfs->lock); + + if (mode == PCIE_PTM_CONTEXT_UPDATE_AUTO) + pos = scnprintf(buf, sizeof(buf), "auto\n"); + else + pos = scnprintf(buf, sizeof(buf), "manual\n"); + + return simple_read_from_buffer(ubuf, count, ppos, buf, pos); +} + +static const struct file_operations context_update_fops = { + .open = simple_open, + .read = context_update_read, + .write = context_update_write, +}; + +static int context_valid_get(void *data, u64 *val) +{ + struct pci_ptm_debugfs *ptm_debugfs = data; + bool valid; + int ret; + + if (!ptm_debugfs->ops->context_valid_read) + return -EOPNOTSUPP; + + mutex_lock(&ptm_debugfs->lock); + ret = ptm_debugfs->ops->context_valid_read(ptm_debugfs->pdata, &valid); + mutex_unlock(&ptm_debugfs->lock); + if (ret) + return ret; + + *val = valid; + + return 0; +} + +static int context_valid_set(void *data, u64 val) +{ + struct pci_ptm_debugfs *ptm_debugfs = data; + int ret; + + if (!ptm_debugfs->ops->context_valid_write) + return -EOPNOTSUPP; + + mutex_lock(&ptm_debugfs->lock); + ret = ptm_debugfs->ops->context_valid_write(ptm_debugfs->pdata, !!val); + mutex_unlock(&ptm_debugfs->lock); + + return ret; +} + +DEFINE_DEBUGFS_ATTRIBUTE(context_valid_fops, context_valid_get, + context_valid_set, "%llu\n"); + +static int local_clock_get(void *data, u64 *val) +{ + struct pci_ptm_debugfs *ptm_debugfs = data; + u64 clock; + int ret; + + if (!ptm_debugfs->ops->local_clock_read) + return -EOPNOTSUPP; + + ret = ptm_debugfs->ops->local_clock_read(ptm_debugfs->pdata, &clock); + if (ret) + return ret; + + *val = clock; + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(local_clock_fops, local_clock_get, NULL, "%llu\n"); + +static int master_clock_get(void *data, u64 *val) +{ + struct pci_ptm_debugfs *ptm_debugfs = data; + u64 clock; + int ret; + + if (!ptm_debugfs->ops->master_clock_read) + return -EOPNOTSUPP; + + ret = ptm_debugfs->ops->master_clock_read(ptm_debugfs->pdata, &clock); + if (ret) + return ret; + + *val = clock; + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(master_clock_fops, master_clock_get, NULL, "%llu\n"); + +static int t1_get(void *data, u64 *val) +{ + struct pci_ptm_debugfs *ptm_debugfs = data; + u64 clock; + int ret; + + if (!ptm_debugfs->ops->t1_read) + return -EOPNOTSUPP; + + ret = ptm_debugfs->ops->t1_read(ptm_debugfs->pdata, &clock); + if (ret) + return ret; + + *val = clock; + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(t1_fops, t1_get, NULL, "%llu\n"); + +static int t2_get(void *data, u64 *val) +{ + struct pci_ptm_debugfs *ptm_debugfs = data; + u64 clock; + int ret; + + if (!ptm_debugfs->ops->t2_read) + return -EOPNOTSUPP; + + ret = ptm_debugfs->ops->t2_read(ptm_debugfs->pdata, &clock); + if (ret) + return ret; + + *val = clock; + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(t2_fops, t2_get, NULL, "%llu\n"); + +static int t3_get(void *data, u64 *val) +{ + struct pci_ptm_debugfs *ptm_debugfs = data; + u64 clock; + int ret; + + if (!ptm_debugfs->ops->t3_read) + return -EOPNOTSUPP; + + ret = ptm_debugfs->ops->t3_read(ptm_debugfs->pdata, &clock); + if (ret) + return ret; + + *val = clock; + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(t3_fops, t3_get, NULL, "%llu\n"); + +static int t4_get(void *data, u64 *val) +{ + struct pci_ptm_debugfs *ptm_debugfs = data; + u64 clock; + int ret; + + if (!ptm_debugfs->ops->t4_read) + return -EOPNOTSUPP; + + ret = ptm_debugfs->ops->t4_read(ptm_debugfs->pdata, &clock); + if (ret) + return ret; + + *val = clock; + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(t4_fops, t4_get, NULL, "%llu\n"); + +#define pcie_ptm_create_debugfs_file(pdata, mode, attr) \ + do { \ + if (ops->attr##_visible && ops->attr##_visible(pdata)) \ + debugfs_create_file(#attr, mode, ptm_debugfs->debugfs, \ + ptm_debugfs, &attr##_fops); \ + } while (0) + +/* + * pcie_ptm_create_debugfs() - Create debugfs entries for the PTM context + * @dev: PTM capable component device + * @pdata: Private data of the PTM capable component device + * @ops: PTM callback structure + * + * Create debugfs entries for exposing the PTM context of the PTM capable + * components such as Root Complex and Endpoint controllers. + * + * Return: Pointer to 'struct pci_ptm_debugfs' if success, NULL otherwise. + */ +struct pci_ptm_debugfs *pcie_ptm_create_debugfs(struct device *dev, void *pdata, + const struct pcie_ptm_ops *ops) +{ + struct pci_ptm_debugfs *ptm_debugfs; + char *dirname; + int ret; + + /* Caller must provide check_capability() callback */ + if (!ops->check_capability) + return NULL; + + /* Check for PTM capability before creating debugfs attributes */ + ret = ops->check_capability(pdata); + if (!ret) { + dev_dbg(dev, "PTM capability not present\n"); + return NULL; + } + + ptm_debugfs = kzalloc(sizeof(*ptm_debugfs), GFP_KERNEL); + if (!ptm_debugfs) + return NULL; + + dirname = devm_kasprintf(dev, GFP_KERNEL, "pcie_ptm_%s", dev_name(dev)); + if (!dirname) + return NULL; + + ptm_debugfs->debugfs = debugfs_create_dir(dirname, NULL); + ptm_debugfs->pdata = pdata; + ptm_debugfs->ops = ops; + mutex_init(&ptm_debugfs->lock); + + pcie_ptm_create_debugfs_file(pdata, 0644, context_update); + pcie_ptm_create_debugfs_file(pdata, 0644, context_valid); + pcie_ptm_create_debugfs_file(pdata, 0444, local_clock); + pcie_ptm_create_debugfs_file(pdata, 0444, master_clock); + pcie_ptm_create_debugfs_file(pdata, 0444, t1); + pcie_ptm_create_debugfs_file(pdata, 0444, t2); + pcie_ptm_create_debugfs_file(pdata, 0444, t3); + pcie_ptm_create_debugfs_file(pdata, 0444, t4); + + return ptm_debugfs; +} +EXPORT_SYMBOL_GPL(pcie_ptm_create_debugfs); + +/* + * pcie_ptm_destroy_debugfs() - Destroy debugfs entries for the PTM context + * @ptm_debugfs: Pointer to the PTM debugfs struct + */ +void pcie_ptm_destroy_debugfs(struct pci_ptm_debugfs *ptm_debugfs) +{ + if (!ptm_debugfs) + return; + + mutex_destroy(&ptm_debugfs->lock); + debugfs_remove_recursive(ptm_debugfs->debugfs); +} +EXPORT_SYMBOL_GPL(pcie_ptm_destroy_debugfs); +#endif diff --git a/drivers/pci/pcie/tlp.c b/drivers/pci/pcie/tlp.c index 890d5391d7f5..71f8fc9ea2ed 100644 --- a/drivers/pci/pcie/tlp.c +++ b/drivers/pci/pcie/tlp.c @@ -98,12 +98,14 @@ int pcie_read_tlp_log(struct pci_dev *dev, int where, int where2, * pcie_print_tlp_log - Print TLP Header / Prefix Log contents * @dev: PCIe device * @log: TLP Log structure + * @level: Printk log level * @pfx: String prefix * * Prints TLP Header and Prefix Log information held by @log. */ void pcie_print_tlp_log(const struct pci_dev *dev, - const struct pcie_tlp_log *log, const char *pfx) + const struct pcie_tlp_log *log, const char *level, + const char *pfx) { /* EE_PREFIX_STR fits the extended DW space needed for the Flit mode */ char buf[11 * PCIE_STD_MAX_TLP_HEADERLOG + 1]; @@ -130,6 +132,6 @@ void pcie_print_tlp_log(const struct pci_dev *dev, } } - pci_err(dev, "%sTLP Header%s: %s\n", pfx, + dev_printk(level, &dev->dev, "%sTLP Header%s: %s\n", pfx, log->flit ? " (Flit)" : "", buf); } diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 364fa2a514f8..f41128f91ca7 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -1678,7 +1678,7 @@ void set_pcie_hotplug_bridge(struct pci_dev *pdev) pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, ®32); if (reg32 & PCI_EXP_SLTCAP_HPC) - pdev->is_hotplug_bridge = 1; + pdev->is_hotplug_bridge = pdev->is_pciehp = 1; } static void set_pcie_thunderbolt(struct pci_dev *dev) @@ -2058,7 +2058,7 @@ int pci_setup_device(struct pci_dev *dev) if (class == PCI_CLASS_BRIDGE_PCI) goto bad; pci_read_irq(dev); - pci_read_bases(dev, 6, PCI_ROM_ADDRESS); + pci_read_bases(dev, PCI_STD_NUM_BARS, PCI_ROM_ADDRESS); pci_subsystem_ids(dev, &dev->subsystem_vendor, &dev->subsystem_device); @@ -2508,6 +2508,7 @@ bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l, } EXPORT_SYMBOL(pci_bus_read_dev_vendor_id); +#if IS_ENABLED(CONFIG_PCI_PWRCTRL) static struct platform_device *pci_pwrctrl_create_device(struct pci_bus *bus, int devfn) { struct pci_host_bridge *host = pci_find_host_bridge(bus); @@ -2537,6 +2538,12 @@ static struct platform_device *pci_pwrctrl_create_device(struct pci_bus *bus, in return pdev; } +#else +static struct platform_device *pci_pwrctrl_create_device(struct pci_bus *bus, int devfn) +{ + return NULL; +} +#endif /* * Read the config data for a PCI device, sanity-check it, @@ -2595,6 +2602,15 @@ void pcie_report_downtraining(struct pci_dev *dev) __pcie_print_link_status(dev, false); } +static void pci_imm_ready_init(struct pci_dev *dev) +{ + u16 status; + + pci_read_config_word(dev, PCI_STATUS, &status); + if (status & PCI_STATUS_IMM_READY) + dev->imm_ready = 1; +} + static void pci_init_capabilities(struct pci_dev *dev) { pci_ea_init(dev); /* Enhanced Allocation */ @@ -2604,6 +2620,7 @@ static void pci_init_capabilities(struct pci_dev *dev) /* Buffers for saving PCIe and PCI-X capabilities */ pci_allocate_cap_save_buffers(dev); + pci_imm_ready_init(dev); /* Immediate Readiness */ pci_pm_init(dev); /* Power Management */ pci_vpd_init(dev); /* Vital Product Data */ pci_configure_ari(dev); /* Alternative Routing-ID Forwarding */ @@ -2711,7 +2728,6 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) pci_set_msi_domain(dev); /* Notifier could use PCI capabilities */ - dev->match_driver = false; ret = device_add(&dev->dev); WARN_ON(ret < 0); diff --git a/drivers/pci/pwrctrl/Kconfig b/drivers/pci/pwrctrl/Kconfig index 990cab67d413..6956c1854811 100644 --- a/drivers/pci/pwrctrl/Kconfig +++ b/drivers/pci/pwrctrl/Kconfig @@ -1,19 +1,19 @@ # SPDX-License-Identifier: GPL-2.0-only -config HAVE_PWRCTL +config HAVE_PWRCTRL bool -config PCI_PWRCTL +config PCI_PWRCTRL tristate -config PCI_PWRCTL_PWRSEQ +config PCI_PWRCTRL_PWRSEQ tristate select POWER_SEQUENCING - select PCI_PWRCTL + select PCI_PWRCTRL -config PCI_PWRCTL_SLOT +config PCI_PWRCTRL_SLOT tristate "PCI Power Control driver for PCI slots" - select PCI_PWRCTL + select PCI_PWRCTRL help Say Y here to enable the PCI Power Control driver to control the power state of PCI slots. @@ -21,3 +21,13 @@ config PCI_PWRCTL_SLOT This is a generic driver that controls the power state of different PCI slots. The voltage regulators powering the rails of the PCI slots are expected to be defined in the devicetree node of the PCI bridge. + +# deprecated +config HAVE_PWRCTL + bool + select HAVE_PWRCTRL + +# deprecated +config PCI_PWRCTL_PWRSEQ + tristate + select PCI_PWRCTRL_PWRSEQ diff --git a/drivers/pci/pwrctrl/Makefile b/drivers/pci/pwrctrl/Makefile index ddfb12c5aadf..a4e5808d7850 100644 --- a/drivers/pci/pwrctrl/Makefile +++ b/drivers/pci/pwrctrl/Makefile @@ -1,9 +1,9 @@ # SPDX-License-Identifier: GPL-2.0-only -obj-$(CONFIG_PCI_PWRCTL) += pci-pwrctrl-core.o +obj-$(CONFIG_PCI_PWRCTRL) += pci-pwrctrl-core.o pci-pwrctrl-core-y := core.o -obj-$(CONFIG_PCI_PWRCTL_PWRSEQ) += pci-pwrctrl-pwrseq.o +obj-$(CONFIG_PCI_PWRCTRL_PWRSEQ) += pci-pwrctrl-pwrseq.o -obj-$(CONFIG_PCI_PWRCTL_SLOT) += pci-pwrctl-slot.o -pci-pwrctl-slot-y := slot.o +obj-$(CONFIG_PCI_PWRCTRL_SLOT) += pci-pwrctrl-slot.o +pci-pwrctrl-slot-y := slot.o diff --git a/drivers/pci/pwrctrl/core.c b/drivers/pci/pwrctrl/core.c index 9cc7e2b7f2b5..6bdbfed584d6 100644 --- a/drivers/pci/pwrctrl/core.c +++ b/drivers/pci/pwrctrl/core.c @@ -101,6 +101,8 @@ EXPORT_SYMBOL_GPL(pci_pwrctrl_device_set_ready); */ void pci_pwrctrl_device_unset_ready(struct pci_pwrctrl *pwrctrl) { + cancel_work_sync(&pwrctrl->work); + /* * We don't have to delete the link here. Typically, this function * is only called when the power control device is being detached. If diff --git a/drivers/pci/pwrctrl/slot.c b/drivers/pci/pwrctrl/slot.c index 18becc144913..6e138310b45b 100644 --- a/drivers/pci/pwrctrl/slot.c +++ b/drivers/pci/pwrctrl/slot.c @@ -4,6 +4,7 @@ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> */ +#include <linux/clk.h> #include <linux/device.h> #include <linux/mod_devicetable.h> #include <linux/module.h> @@ -30,6 +31,7 @@ static int pci_pwrctrl_slot_probe(struct platform_device *pdev) { struct pci_pwrctrl_slot_data *slot; struct device *dev = &pdev->dev; + struct clk *clk; int ret; slot = devm_kzalloc(dev, sizeof(*slot), GFP_KERNEL); @@ -55,6 +57,12 @@ static int pci_pwrctrl_slot_probe(struct platform_device *pdev) if (ret) goto err_regulator_disable; + clk = devm_clk_get_optional_enabled(dev, NULL); + if (IS_ERR(clk)) { + return dev_err_probe(dev, PTR_ERR(clk), + "Failed to enable slot clock\n"); + } + pci_pwrctrl_init(&slot->ctx, dev); ret = devm_pci_pwrctrl_device_set_ready(dev, &slot->ctx); diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 8d610c17e0f2..d97335a40193 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -38,14 +38,10 @@ static bool pcie_lbms_seen(struct pci_dev *dev, u16 lnksta) { - unsigned long count; - int ret; - - ret = pcie_lbms_count(dev, &count); - if (ret < 0) - return lnksta & PCI_EXP_LNKSTA_LBMS; + if (test_bit(PCI_LINK_LBMS_SEEN, &dev->priv_flags)) + return true; - return count > 0; + return lnksta & PCI_EXP_LNKSTA_LBMS; } /* @@ -109,13 +105,13 @@ int pcie_failed_link_retrain(struct pci_dev *dev) !pcie_cap_has_lnkctl2(dev) || !dev->link_active_reporting) return ret; - pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &lnkctl2); pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); if (!(lnksta & PCI_EXP_LNKSTA_DLLLA) && pcie_lbms_seen(dev, lnksta)) { - u16 oldlnkctl2 = lnkctl2; + u16 oldlnkctl2; pci_info(dev, "broken device, retraining non-functional downstream link at 2.5GT/s\n"); + pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &oldlnkctl2); ret = pcie_set_target_speed(dev, PCIE_SPEED_2_5GT, false); if (ret) { pci_info(dev, "retraining failed\n"); @@ -127,6 +123,8 @@ int pcie_failed_link_retrain(struct pci_dev *dev) pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); } + pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &lnkctl2); + if ((lnksta & PCI_EXP_LNKSTA_DLLLA) && (lnkctl2 & PCI_EXP_LNKCTL2_TLS) == PCI_EXP_LNKCTL2_TLS_2_5GT && pci_match_id(ids, dev)) { @@ -1990,12 +1988,12 @@ static void quirk_huawei_pcie_sva(struct pci_dev *pdev) device_create_managed_software_node(&pdev->dev, properties, NULL)) pci_warn(pdev, "could not add stall property"); } -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa250, quirk_huawei_pcie_sva); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa251, quirk_huawei_pcie_sva); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa255, quirk_huawei_pcie_sva); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa256, quirk_huawei_pcie_sva); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa258, quirk_huawei_pcie_sva); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa259, quirk_huawei_pcie_sva); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HUAWEI, 0xa250, quirk_huawei_pcie_sva); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HUAWEI, 0xa251, quirk_huawei_pcie_sva); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HUAWEI, 0xa255, quirk_huawei_pcie_sva); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HUAWEI, 0xa256, quirk_huawei_pcie_sva); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HUAWEI, 0xa258, quirk_huawei_pcie_sva); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HUAWEI, 0xa259, quirk_huawei_pcie_sva); /* * It's possible for the MSI to get corrupted if SHPC and ACPI are used @@ -4995,6 +4993,18 @@ static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags) PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); } +static int pci_quirk_loongson_acs(struct pci_dev *dev, u16 acs_flags) +{ + /* + * Loongson PCIe Root Ports don't advertise an ACS capability, but + * they do not allow peer-to-peer transactions between Root Ports. + * Allow each Root Port to be in a separate IOMMU group by masking + * SV/RR/CR/UF bits. + */ + return pci_acs_ctrl_enabled(acs_flags, + PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); +} + /* * Wangxun 40G/25G/10G/1G NICs have no ACS capability, but on * multi-function devices, the hardware isolates the functions by @@ -5128,6 +5138,17 @@ static const struct pci_dev_acs_enabled { { PCI_VENDOR_ID_BROADCOM, 0x1762, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_BROADCOM, 0x1763, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_BROADCOM, 0xD714, pci_quirk_brcm_acs }, + /* Loongson PCIe Root Ports */ + { PCI_VENDOR_ID_LOONGSON, 0x3C09, pci_quirk_loongson_acs }, + { PCI_VENDOR_ID_LOONGSON, 0x3C19, pci_quirk_loongson_acs }, + { PCI_VENDOR_ID_LOONGSON, 0x3C29, pci_quirk_loongson_acs }, + { PCI_VENDOR_ID_LOONGSON, 0x7A09, pci_quirk_loongson_acs }, + { PCI_VENDOR_ID_LOONGSON, 0x7A19, pci_quirk_loongson_acs }, + { PCI_VENDOR_ID_LOONGSON, 0x7A29, pci_quirk_loongson_acs }, + { PCI_VENDOR_ID_LOONGSON, 0x7A39, pci_quirk_loongson_acs }, + { PCI_VENDOR_ID_LOONGSON, 0x7A49, pci_quirk_loongson_acs }, + { PCI_VENDOR_ID_LOONGSON, 0x7A59, pci_quirk_loongson_acs }, + { PCI_VENDOR_ID_LOONGSON, 0x7A69, pci_quirk_loongson_acs }, /* Amazon Annapurna Labs */ { PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, pci_quirk_al_acs }, /* Zhaoxin multi-function devices */ @@ -6284,6 +6305,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_XILINX, 0x5020, of_pci_make_dev_node); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_XILINX, 0x5021, of_pci_make_dev_node); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_REDHAT, 0x0005, of_pci_make_dev_node); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_EFAR, 0x9660, of_pci_make_dev_node); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_RPI, PCI_DEVICE_ID_RPI_RP1_C0, of_pci_make_dev_node); /* * Devices known to require a longer delay before first config space access diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 54d6f4fa3ce1..7853ac6999e2 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -187,6 +187,9 @@ static void pdev_sort_resources(struct pci_dev *dev, struct list_head *head) panic("%s: kzalloc() failed!\n", __func__); tmp->res = r; tmp->dev = dev; + tmp->start = r->start; + tmp->end = r->end; + tmp->flags = r->flags; /* Fallback is smallest one or list is empty */ n = head; @@ -545,6 +548,7 @@ assign: pci_dbg(dev, "%s %pR: releasing\n", res_name, res); release_resource(res); + restore_dev_resource(dev_res); } /* Restore start/end/flags from saved list */ list_for_each_entry(save_res, &save_head, list) @@ -772,8 +776,7 @@ static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type) { struct pci_dev *bridge = bus->self; - pci_info(bridge, "PCI bridge to %pR\n", - &bus->busn_res); + pci_info(bridge, "PCI bridge to %pR\n", &bus->busn_res); if (type & IORESOURCE_IO) pci_setup_bridge_io(bridge); @@ -1885,7 +1888,8 @@ static int iov_resources_unassigned(struct pci_dev *dev, void *data) bool *unassigned = data; for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { - struct resource *r = &dev->resource[i + PCI_IOV_RESOURCES]; + int idx = pci_resource_num_from_vf_bar(i); + struct resource *r = &dev->resource[idx]; struct pci_bus_region region; /* Not assigned or rejected by kernel? */ @@ -2298,8 +2302,8 @@ void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus) /* Depth last, allocate resources and update the hardware. */ __pci_bus_assign_resources(bus, add_list, &fail_head); - if (add_list) - BUG_ON(!list_empty(add_list)); + if (WARN_ON_ONCE(add_list && !list_empty(add_list))) + free_list(add_list); tried_times++; /* Any device complain? */ @@ -2361,7 +2365,8 @@ void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge) pci_bridge_distribute_available_resources(bridge, &add_list); __pci_bridge_assign_resources(bridge, &add_list, &fail_head); - BUG_ON(!list_empty(&add_list)); + if (WARN_ON_ONCE(!list_empty(&add_list))) + free_list(&add_list); tried_times++; if (list_empty(&fail_head)) @@ -2437,7 +2442,8 @@ int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type) __pci_bus_size_bridges(bridge->subordinate, &added); __pci_bridge_assign_resources(bridge, &added, &failed); - BUG_ON(!list_empty(&added)); + if (WARN_ON_ONCE(!list_empty(&added))) + free_list(&added); if (!list_empty(&failed)) { ret = -ENOSPC; @@ -2493,6 +2499,7 @@ void pci_assign_unassigned_bus_resources(struct pci_bus *bus) __pci_bus_size_bridges(dev->subordinate, &add_list); up_read(&pci_bus_sem); __pci_bus_assign_resources(bus, &add_list, NULL); - BUG_ON(!list_empty(&add_list)); + if (WARN_ON_ONCE(!list_empty(&add_list))) + free_list(&add_list); } EXPORT_SYMBOL_GPL(pci_assign_unassigned_bus_resources); diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index c6657cdd06f6..d2b3ed51e880 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c @@ -423,13 +423,39 @@ void pci_release_resource(struct pci_dev *dev, int resno) } EXPORT_SYMBOL(pci_release_resource); +static bool pci_resize_is_memory_decoding_enabled(struct pci_dev *dev, + int resno) +{ + u16 cmd; + + if (pci_resource_is_iov(resno)) + return pci_iov_is_memory_decoding_enabled(dev); + + pci_read_config_word(dev, PCI_COMMAND, &cmd); + + return cmd & PCI_COMMAND_MEMORY; +} + +static void pci_resize_resource_set_size(struct pci_dev *dev, int resno, + int size) +{ + resource_size_t res_size = pci_rebar_size_to_bytes(size); + struct resource *res = pci_resource_n(dev, resno); + + if (!pci_resource_is_iov(resno)) { + resource_set_size(res, res_size); + } else { + resource_set_size(res, res_size * pci_sriov_get_totalvfs(dev)); + pci_iov_resource_set_size(dev, resno, res_size); + } +} + int pci_resize_resource(struct pci_dev *dev, int resno, int size) { struct resource *res = pci_resource_n(dev, resno); struct pci_host_bridge *host; int old, ret; u32 sizes; - u16 cmd; /* Check if we must preserve the firmware's resource assignment */ host = pci_find_host_bridge(dev->bus); @@ -440,8 +466,7 @@ int pci_resize_resource(struct pci_dev *dev, int resno, int size) if (!(res->flags & IORESOURCE_UNSET)) return -EBUSY; - pci_read_config_word(dev, PCI_COMMAND, &cmd); - if (cmd & PCI_COMMAND_MEMORY) + if (pci_resize_is_memory_decoding_enabled(dev, resno)) return -EBUSY; sizes = pci_rebar_get_possible_sizes(dev, resno); @@ -459,7 +484,7 @@ int pci_resize_resource(struct pci_dev *dev, int resno, int size) if (ret) return ret; - resource_set_size(res, pci_rebar_size_to_bytes(size)); + pci_resize_resource_set_size(dev, resno, size); /* Check if the new config works by trying to assign everything. */ if (dev->bus->self) { @@ -471,7 +496,7 @@ int pci_resize_resource(struct pci_dev *dev, int resno, int size) error_resize: pci_rebar_set_size(dev, resno, old); - resource_set_size(res, pci_rebar_size_to_bytes(old)); + pci_resize_resource_set_size(dev, resno, old); return ret; } EXPORT_SYMBOL(pci_resize_resource); diff --git a/drivers/pci/tph.c b/drivers/pci/tph.c index 07de59ca2ebf..cc64f93709a4 100644 --- a/drivers/pci/tph.c +++ b/drivers/pci/tph.c @@ -168,7 +168,7 @@ static u32 get_st_table_loc(struct pci_dev *pdev) * Return the size of ST table. If ST table is not in TPH Requester Extended * Capability space, return 0. Otherwise return the ST Table Size + 1. */ -static u16 get_st_table_size(struct pci_dev *pdev) +u16 pcie_tph_get_st_table_size(struct pci_dev *pdev) { u32 reg; u32 loc; @@ -185,6 +185,7 @@ static u16 get_st_table_size(struct pci_dev *pdev) return FIELD_GET(PCI_TPH_CAP_ST_MASK, reg) + 1; } +EXPORT_SYMBOL(pcie_tph_get_st_table_size); /* Return device's Root Port completer capability */ static u8 get_rp_completer_type(struct pci_dev *pdev) @@ -204,48 +205,6 @@ static u8 get_rp_completer_type(struct pci_dev *pdev) return FIELD_GET(PCI_EXP_DEVCAP2_TPH_COMP_MASK, reg); } -/* Write ST to MSI-X vector control reg - Return 0 if OK, otherwise -errno */ -static int write_tag_to_msix(struct pci_dev *pdev, int msix_idx, u16 tag) -{ -#ifdef CONFIG_PCI_MSI - struct msi_desc *msi_desc = NULL; - void __iomem *vec_ctrl; - u32 val; - int err = 0; - - msi_lock_descs(&pdev->dev); - - /* Find the msi_desc entry with matching msix_idx */ - msi_for_each_desc(msi_desc, &pdev->dev, MSI_DESC_ASSOCIATED) { - if (msi_desc->msi_index == msix_idx) - break; - } - - if (!msi_desc) { - err = -ENXIO; - goto err_out; - } - - /* Get the vector control register (offset 0xc) pointed by msix_idx */ - vec_ctrl = pdev->msix_base + msix_idx * PCI_MSIX_ENTRY_SIZE; - vec_ctrl += PCI_MSIX_ENTRY_VECTOR_CTRL; - - val = readl(vec_ctrl); - val &= ~PCI_MSIX_ENTRY_CTRL_ST; - val |= FIELD_PREP(PCI_MSIX_ENTRY_CTRL_ST, tag); - writel(val, vec_ctrl); - - /* Read back to flush the update */ - val = readl(vec_ctrl); - -err_out: - msi_unlock_descs(&pdev->dev); - return err; -#else - return -ENODEV; -#endif -} - /* Write tag to ST table - Return 0 if OK, otherwise -errno */ static int write_tag_to_st_table(struct pci_dev *pdev, int index, u16 tag) { @@ -253,7 +212,7 @@ static int write_tag_to_st_table(struct pci_dev *pdev, int index, u16 tag) int offset; /* Check if index is out of bound */ - st_table_size = get_st_table_size(pdev); + st_table_size = pcie_tph_get_st_table_size(pdev); if (index >= st_table_size) return -ENXIO; @@ -346,7 +305,7 @@ int pcie_tph_set_st_entry(struct pci_dev *pdev, unsigned int index, u16 tag) switch (loc) { case PCI_TPH_LOC_MSIX: - err = write_tag_to_msix(pdev, index, tag); + err = pci_msix_write_tph_tag(pdev, index, tag); break; case PCI_TPH_LOC_CAP: err = write_tag_to_st_table(pdev, index, tag); @@ -485,7 +444,7 @@ void pci_restore_tph_state(struct pci_dev *pdev) pci_write_config_dword(pdev, pdev->tph_cap + PCI_TPH_CTRL, *cap++); st_entry = (u16 *)cap; offset = PCI_TPH_BASE_SIZEOF; - num_entries = get_st_table_size(pdev); + num_entries = pcie_tph_get_st_table_size(pdev); for (i = 0; i < num_entries; i++) { pci_write_config_word(pdev, pdev->tph_cap + offset, *st_entry++); @@ -517,7 +476,7 @@ void pci_save_tph_state(struct pci_dev *pdev) /* Save all ST entries in extended capability structure */ st_entry = (u16 *)cap; offset = PCI_TPH_BASE_SIZEOF; - num_entries = get_st_table_size(pdev); + num_entries = pcie_tph_get_st_table_size(pdev); for (i = 0; i < num_entries; i++) { pci_read_config_word(pdev, pdev->tph_cap + offset, st_entry++); @@ -541,7 +500,7 @@ void pci_tph_init(struct pci_dev *pdev) if (!pdev->tph_cap) return; - num_entries = get_st_table_size(pdev); + num_entries = pcie_tph_get_st_table_size(pdev); save_size = sizeof(u32) + num_entries * sizeof(u16); pci_add_ext_cap_save_buffer(pdev, PCI_EXT_CAP_ID_TPH, save_size); } diff --git a/drivers/pci/vpd.c b/drivers/pci/vpd.c index 3d29b2602d0f..153394a652d3 100644 --- a/drivers/pci/vpd.c +++ b/drivers/pci/vpd.c @@ -336,7 +336,7 @@ static umode_t vpd_attr_is_visible(struct kobject *kobj, } const struct attribute_group pci_dev_vpd_attr_group = { - .bin_attrs_new = vpd_attrs, + .bin_attrs = vpd_attrs, .is_bin_visible = vpd_attr_is_visible, }; |