diff options
Diffstat (limited to 'drivers/pci/controller/cadence/pcie-cadence-ep.c')
| -rw-r--r-- | drivers/pci/controller/cadence/pcie-cadence-ep.c | 134 |
1 files changed, 83 insertions, 51 deletions
diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c index b8b655d4047e..c0e1194a936b 100644 --- a/drivers/pci/controller/cadence/pcie-cadence-ep.c +++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c @@ -3,14 +3,17 @@ // Cadence PCIe endpoint controller driver. // Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com> +#include <linux/bitfield.h> #include <linux/delay.h> #include <linux/kernel.h> +#include <linux/module.h> #include <linux/of.h> #include <linux/pci-epc.h> #include <linux/platform_device.h> #include <linux/sizes.h> #include "pcie-cadence.h" +#include "../../pci.h" #define CDNS_PCIE_EP_MIN_APERTURE 128 /* 128 bytes */ #define CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE 0x1 @@ -18,12 +21,13 @@ static u8 cdns_pcie_get_fn_from_vfn(struct cdns_pcie *pcie, u8 fn, u8 vfn) { - u32 cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET; u32 first_vf_offset, stride; + u16 cap; if (vfn == 0) return fn; + cap = cdns_pcie_find_ext_capability(pcie, PCI_EXT_CAP_ID_SRIOV); first_vf_offset = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_SRIOV_VF_OFFSET); stride = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_SRIOV_VF_STRIDE); fn = fn + first_vf_offset + ((vfn - 1) * stride); @@ -35,10 +39,11 @@ static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn, struct pci_epf_header *hdr) { struct cdns_pcie_ep *ep = epc_get_drvdata(epc); - u32 cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET; struct cdns_pcie *pcie = &ep->pcie; u32 reg; + u16 cap; + cap = cdns_pcie_find_ext_capability(pcie, PCI_EXT_CAP_ID_SRIOV); if (vfn > 1) { dev_err(&epc->dev, "Only Virtual Function #1 has deviceID\n"); return -EINVAL; @@ -98,14 +103,11 @@ static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, u8 vfn, ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS; } else { bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH); - bool is_64bits = sz > SZ_2G; + bool is_64bits = !!(flags & PCI_BASE_ADDRESS_MEM_TYPE_64); if (is_64bits && (bar & 1)) return -EINVAL; - if (is_64bits && !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64)) - epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64; - if (is_64bits && is_prefetch) ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS; else if (is_prefetch) @@ -222,13 +224,15 @@ static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn, clear_bit(r, &ep->ob_region_map); } -static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn, u8 mmc) +static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn, u8 nr_irqs) { struct cdns_pcie_ep *ep = epc_get_drvdata(epc); struct cdns_pcie *pcie = &ep->pcie; - u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET; + u8 mmc = order_base_2(nr_irqs); u16 flags; + u8 cap; + cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_MSI); fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); /* @@ -248,9 +252,10 @@ static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn) { struct cdns_pcie_ep *ep = epc_get_drvdata(epc); struct cdns_pcie *pcie = &ep->pcie; - u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET; u16 flags, mme; + u8 cap; + cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_MSI); fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); /* Validate that the MSI feature is actually enabled. */ @@ -262,18 +267,19 @@ static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn) * Get the Multiple Message Enable bitfield from the Message Control * register. */ - mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4; + mme = FIELD_GET(PCI_MSI_FLAGS_QSIZE, flags); - return mme; + return 1 << mme; } static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no) { struct cdns_pcie_ep *ep = epc_get_drvdata(epc); struct cdns_pcie *pcie = &ep->pcie; - u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET; u32 val, reg; + u8 cap; + cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_MSIX); func_no = cdns_pcie_get_fn_from_vfn(pcie, func_no, vfunc_no); reg = cap + PCI_MSIX_FLAGS; @@ -283,34 +289,34 @@ static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no) val &= PCI_MSIX_FLAGS_QSIZE; - return val; + return val + 1; } static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u8 vfn, - u16 interrupts, enum pci_barno bir, - u32 offset) + u16 nr_irqs, enum pci_barno bir, u32 offset) { struct cdns_pcie_ep *ep = epc_get_drvdata(epc); struct cdns_pcie *pcie = &ep->pcie; - u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET; u32 val, reg; + u8 cap; + cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_MSIX); fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); reg = cap + PCI_MSIX_FLAGS; val = cdns_pcie_ep_fn_readw(pcie, fn, reg); val &= ~PCI_MSIX_FLAGS_QSIZE; - val |= interrupts; + val |= nr_irqs - 1; /* encoded as N-1 */ cdns_pcie_ep_fn_writew(pcie, fn, reg, val); - /* Set MSIX BAR and offset */ + /* Set MSI-X BAR and offset */ reg = cap + PCI_MSIX_TABLE; val = offset | bir; cdns_pcie_ep_fn_writel(pcie, fn, reg, val); - /* Set PBA BAR and offset. BAR must match MSIX BAR */ + /* Set PBA BAR and offset. BAR must match MSI-X BAR */ reg = cap + PCI_MSIX_PBA; - val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir; + val = (offset + (nr_irqs * PCI_MSIX_ENTRY_SIZE)) | bir; cdns_pcie_ep_fn_writel(pcie, fn, reg, val); return 0; @@ -339,10 +345,10 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx, if (is_asserted) { ep->irq_pending |= BIT(intx); - msg_code = MSG_CODE_ASSERT_INTA + intx; + msg_code = PCIE_MSG_CODE_ASSERT_INTA + intx; } else { ep->irq_pending &= ~BIT(intx); - msg_code = MSG_CODE_DEASSERT_INTA + intx; + msg_code = PCIE_MSG_CODE_DEASSERT_INTA + intx; } spin_lock_irqsave(&ep->lock, flags); @@ -353,14 +359,13 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx, } spin_unlock_irqrestore(&ep->lock, flags); - offset = CDNS_PCIE_NORMAL_MSG_ROUTING(MSG_ROUTING_LOCAL) | - CDNS_PCIE_NORMAL_MSG_CODE(msg_code) | - CDNS_PCIE_MSG_NO_DATA; + offset = CDNS_PCIE_NORMAL_MSG_ROUTING(PCIE_MSG_TYPE_R_LOCAL) | + CDNS_PCIE_NORMAL_MSG_CODE(msg_code); writel(0, ep->irq_cpu_addr + offset); } -static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn, - u8 intx) +static int cdns_pcie_ep_send_intx_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn, + u8 intx) { u16 cmd; @@ -370,7 +375,7 @@ static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn, cdns_pcie_ep_assert_intx(ep, fn, intx, true); /* - * The mdelay() value was taken from dra7xx_pcie_raise_legacy_irq() + * The mdelay() value was taken from dra7xx_pcie_raise_intx_irq() */ mdelay(1); cdns_pcie_ep_assert_intx(ep, fn, intx, false); @@ -381,11 +386,11 @@ static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn, u8 interrupt_num) { struct cdns_pcie *pcie = &ep->pcie; - u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET; u16 flags, mme, data, data_mask; - u8 msi_count; u64 pci_addr, pci_addr_mask = 0xff; + u8 msi_count, cap; + cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_MSI); fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); /* Check whether the MSI feature has been enabled by the PCI host. */ @@ -394,7 +399,7 @@ static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn, return -EINVAL; /* Get the number of enabled MSIs */ - mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4; + mme = FIELD_GET(PCI_MSI_FLAGS_QSIZE, flags); msi_count = 1 << mme; if (!interrupt_num || interrupt_num > msi_count) return -EINVAL; @@ -433,14 +438,14 @@ static int cdns_pcie_ep_map_msi_irq(struct pci_epc *epc, u8 fn, u8 vfn, u32 *msi_addr_offset) { struct cdns_pcie_ep *ep = epc_get_drvdata(epc); - u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET; struct cdns_pcie *pcie = &ep->pcie; u64 pci_addr, pci_addr_mask = 0xff; u16 flags, mme, data, data_mask; - u8 msi_count; + u8 msi_count, cap; int ret; int i; + cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_MSI); fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); /* Check whether the MSI feature has been enabled by the PCI host. */ @@ -449,7 +454,7 @@ static int cdns_pcie_ep_map_msi_irq(struct pci_epc *epc, u8 fn, u8 vfn, return -EINVAL; /* Get the number of enabled MSIs */ - mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4; + mme = FIELD_GET(PCI_MSI_FLAGS_QSIZE, flags); msi_count = 1 << mme; if (!interrupt_num || interrupt_num > msi_count) return -EINVAL; @@ -483,16 +488,16 @@ static int cdns_pcie_ep_map_msi_irq(struct pci_epc *epc, u8 fn, u8 vfn, static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn, u16 interrupt_num) { - u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET; u32 tbl_offset, msg_data, reg; struct cdns_pcie *pcie = &ep->pcie; struct pci_epf_msix_tbl *msix_tbl; struct cdns_pcie_epf *epf; u64 pci_addr_mask = 0xff; u64 msg_addr; + u8 bir, cap; u16 flags; - u8 bir; + cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_MSIX); epf = &ep->epf[fn]; if (vfn > 0) epf = &epf->epf[vfn - 1]; @@ -506,7 +511,7 @@ static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn, reg = cap + PCI_MSIX_TABLE; tbl_offset = cdns_pcie_ep_fn_readl(pcie, fn, reg); - bir = tbl_offset & PCI_MSIX_TABLE_BIR; + bir = FIELD_GET(PCI_MSIX_TABLE_BIR, tbl_offset); tbl_offset &= PCI_MSIX_TABLE_OFFSET; msix_tbl = epf->epf_bar[bir]->addr + tbl_offset; @@ -531,25 +536,24 @@ static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn, } static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn, - enum pci_epc_irq_type type, - u16 interrupt_num) + unsigned int type, u16 interrupt_num) { struct cdns_pcie_ep *ep = epc_get_drvdata(epc); struct cdns_pcie *pcie = &ep->pcie; struct device *dev = pcie->dev; switch (type) { - case PCI_EPC_IRQ_LEGACY: + case PCI_IRQ_INTX: if (vfn > 0) { - dev_err(dev, "Cannot raise legacy interrupts for VF\n"); + dev_err(dev, "Cannot raise INTX interrupts for VF\n"); return -EINVAL; } - return cdns_pcie_ep_send_legacy_irq(ep, fn, vfn, 0); + return cdns_pcie_ep_send_intx_irq(ep, fn, vfn, 0); - case PCI_EPC_IRQ_MSI: + case PCI_IRQ_MSI: return cdns_pcie_ep_send_msi_irq(ep, fn, vfn, interrupt_num); - case PCI_EPC_IRQ_MSIX: + case PCI_IRQ_MSIX: return cdns_pcie_ep_send_msix_irq(ep, fn, vfn, interrupt_num); default: @@ -565,26 +569,38 @@ static int cdns_pcie_ep_start(struct pci_epc *epc) struct cdns_pcie *pcie = &ep->pcie; struct device *dev = pcie->dev; int max_epfs = sizeof(epc->function_num_map) * 8; - int ret, value, epf; + int ret, epf, last_fn; + u32 reg, value; + u8 cap; + cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_EXP); /* * BIT(0) is hardwired to 1, hence function 0 is always enabled * and can't be disabled anyway. */ cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, epc->function_num_map); + /* + * Next function field in ARI_CAP_AND_CTR register for last function + * should be 0. Clear Next Function Number field for the last + * function used. + */ + last_fn = find_last_bit(&epc->function_num_map, BITS_PER_LONG); + reg = CDNS_PCIE_CORE_PF_I_ARI_CAP_AND_CTRL(last_fn); + value = cdns_pcie_readl(pcie, reg); + value &= ~CDNS_PCIE_ARI_CAP_NFN_MASK; + cdns_pcie_writel(pcie, reg, value); + if (ep->quirk_disable_flr) { for (epf = 0; epf < max_epfs; epf++) { if (!(epc->function_num_map & BIT(epf))) continue; value = cdns_pcie_ep_fn_readl(pcie, epf, - CDNS_PCIE_EP_FUNC_DEV_CAP_OFFSET + - PCI_EXP_DEVCAP); + cap + PCI_EXP_DEVCAP); value &= ~PCI_EXP_DEVCAP_FLR; cdns_pcie_ep_fn_writel(pcie, epf, - CDNS_PCIE_EP_FUNC_DEV_CAP_OFFSET + - PCI_EXP_DEVCAP, value); + cap + PCI_EXP_DEVCAP, value); } } @@ -598,14 +614,12 @@ static int cdns_pcie_ep_start(struct pci_epc *epc) } static const struct pci_epc_features cdns_pcie_epc_vf_features = { - .linkup_notifier = false, .msi_capable = true, .msix_capable = true, .align = 65536, }; static const struct pci_epc_features cdns_pcie_epc_features = { - .linkup_notifier = false, .msi_capable = true, .msix_capable = true, .align = 256, @@ -636,6 +650,17 @@ static const struct pci_epc_ops cdns_pcie_epc_ops = { .get_features = cdns_pcie_ep_get_features, }; +void cdns_pcie_ep_disable(struct cdns_pcie_ep *ep) +{ + struct device *dev = ep->pcie.dev; + struct pci_epc *epc = to_pci_epc(dev); + + pci_epc_deinit_notify(epc); + pci_epc_mem_free_addr(epc, ep->irq_phys_addr, ep->irq_cpu_addr, + SZ_128K); + pci_epc_mem_exit(epc); +} +EXPORT_SYMBOL_GPL(cdns_pcie_ep_disable); int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep) { @@ -734,6 +759,8 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep) spin_lock_init(&ep->lock); + pci_epc_init_notify(epc); + return 0; free_epc_mem: @@ -741,3 +768,8 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep) return ret; } +EXPORT_SYMBOL_GPL(cdns_pcie_ep_setup); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Cadence PCIe endpoint controller driver"); +MODULE_AUTHOR("Cyrille Pitchen <cyrille.pitchen@free-electrons.com>"); |
