summaryrefslogtreecommitdiff
path: root/drivers/pci/controller/cadence/pcie-cadence-ep.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci/controller/cadence/pcie-cadence-ep.c')
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-ep.c484
1 files changed, 390 insertions, 94 deletions
diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c
index 1c15c8352125..c0e1194a936b 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-ep.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c
@@ -3,25 +3,55 @@
// Cadence PCIe endpoint controller driver.
// Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
+#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/pci-epc.h>
#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
#include <linux/sizes.h>
#include "pcie-cadence.h"
+#include "../../pci.h"
#define CDNS_PCIE_EP_MIN_APERTURE 128 /* 128 bytes */
#define CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE 0x1
#define CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY 0x3
-static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
+static u8 cdns_pcie_get_fn_from_vfn(struct cdns_pcie *pcie, u8 fn, u8 vfn)
+{
+ u32 first_vf_offset, stride;
+ u16 cap;
+
+ if (vfn == 0)
+ return fn;
+
+ cap = cdns_pcie_find_ext_capability(pcie, PCI_EXT_CAP_ID_SRIOV);
+ first_vf_offset = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_SRIOV_VF_OFFSET);
+ stride = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_SRIOV_VF_STRIDE);
+ fn = fn + first_vf_offset + ((vfn - 1) * stride);
+
+ return fn;
+}
+
+static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn,
struct pci_epf_header *hdr)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
+ u32 reg;
+ u16 cap;
+
+ cap = cdns_pcie_find_ext_capability(pcie, PCI_EXT_CAP_ID_SRIOV);
+ if (vfn > 1) {
+ dev_err(&epc->dev, "Only Virtual Function #1 has deviceID\n");
+ return -EINVAL;
+ } else if (vfn == 1) {
+ reg = cap + PCI_SRIOV_VF_DID;
+ cdns_pcie_ep_fn_writew(pcie, fn, reg, hdr->deviceid);
+ return 0;
+ }
cdns_pcie_ep_fn_writew(pcie, fn, PCI_DEVICE_ID, hdr->deviceid);
cdns_pcie_ep_fn_writeb(pcie, fn, PCI_REVISION_ID, hdr->revid);
@@ -48,10 +78,11 @@ static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
return 0;
}
-static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn,
+static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, u8 vfn,
struct pci_epf_bar *epf_bar)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
+ struct cdns_pcie_epf *epf = &ep->epf[fn];
struct cdns_pcie *pcie = &ep->pcie;
dma_addr_t bar_phys = epf_bar->phys_addr;
enum pci_barno bar = epf_bar->barno;
@@ -72,14 +103,11 @@ static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn,
ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS;
} else {
bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH);
- bool is_64bits = sz > SZ_2G;
+ bool is_64bits = !!(flags & PCI_BASE_ADDRESS_MEM_TYPE_64);
if (is_64bits && (bar & 1))
return -EINVAL;
- if (is_64bits && !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64))
- epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
-
if (is_64bits && is_prefetch)
ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS;
else if (is_prefetch)
@@ -92,71 +120,83 @@ static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn,
addr0 = lower_32_bits(bar_phys);
addr1 = upper_32_bits(bar_phys);
+
+ if (vfn == 1)
+ reg = CDNS_PCIE_LM_EP_VFUNC_BAR_CFG(bar, fn);
+ else
+ reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG(bar, fn);
+ b = (bar < BAR_4) ? bar : bar - BAR_4;
+
+ if (vfn == 0 || vfn == 1) {
+ cfg = cdns_pcie_readl(pcie, reg);
+ cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
+ CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
+ cfg |= (CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) |
+ CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl));
+ cdns_pcie_writel(pcie, reg, cfg);
+ }
+
+ fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar),
addr0);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar),
addr1);
- if (bar < BAR_4) {
- reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn);
- b = bar;
- } else {
- reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn);
- b = bar - BAR_4;
- }
-
- cfg = cdns_pcie_readl(pcie, reg);
- cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
- CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
- cfg |= (CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) |
- CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl));
- cdns_pcie_writel(pcie, reg, cfg);
+ if (vfn > 0)
+ epf = &epf->epf[vfn - 1];
+ epf->epf_bar[bar] = epf_bar;
return 0;
}
-static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn,
+static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, u8 vfn,
struct pci_epf_bar *epf_bar)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
+ struct cdns_pcie_epf *epf = &ep->epf[fn];
struct cdns_pcie *pcie = &ep->pcie;
enum pci_barno bar = epf_bar->barno;
u32 reg, cfg, b, ctrl;
- if (bar < BAR_4) {
- reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn);
- b = bar;
- } else {
- reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn);
- b = bar - BAR_4;
+ if (vfn == 1)
+ reg = CDNS_PCIE_LM_EP_VFUNC_BAR_CFG(bar, fn);
+ else
+ reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG(bar, fn);
+ b = (bar < BAR_4) ? bar : bar - BAR_4;
+
+ if (vfn == 0 || vfn == 1) {
+ ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED;
+ cfg = cdns_pcie_readl(pcie, reg);
+ cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
+ CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
+ cfg |= CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl);
+ cdns_pcie_writel(pcie, reg, cfg);
}
- ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED;
- cfg = cdns_pcie_readl(pcie, reg);
- cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) |
- CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b));
- cfg |= CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl);
- cdns_pcie_writel(pcie, reg, cfg);
-
+ fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar), 0);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar), 0);
+
+ if (vfn > 0)
+ epf = &epf->epf[vfn - 1];
+ epf->epf_bar[bar] = NULL;
}
-static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, phys_addr_t addr,
- u64 pci_addr, size_t size)
+static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn,
+ phys_addr_t addr, u64 pci_addr, size_t size)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
u32 r;
- r = find_first_zero_bit(&ep->ob_region_map,
- sizeof(ep->ob_region_map) * BITS_PER_LONG);
+ r = find_first_zero_bit(&ep->ob_region_map, BITS_PER_LONG);
if (r >= ep->max_regions - 1) {
dev_err(&epc->dev, "no free outbound region\n");
return -EINVAL;
}
- cdns_pcie_set_outbound_region(pcie, fn, r, false, addr, pci_addr, size);
+ fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
+ cdns_pcie_set_outbound_region(pcie, 0, fn, r, false, addr, pci_addr, size);
set_bit(r, &ep->ob_region_map);
ep->ob_addr[r] = addr;
@@ -164,7 +204,7 @@ static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, phys_addr_t addr,
return 0;
}
-static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn,
+static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn,
phys_addr_t addr)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
@@ -184,12 +224,16 @@ static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn,
clear_bit(r, &ep->ob_region_map);
}
-static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 mmc)
+static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn, u8 nr_irqs)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
- u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
+ u8 mmc = order_base_2(nr_irqs);
u16 flags;
+ u8 cap;
+
+ cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_MSI);
+ fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
/*
* Set the Multiple Message Capable bitfield into the Message Control
@@ -204,12 +248,15 @@ static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 mmc)
return 0;
}
-static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
+static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
- u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
u16 flags, mme;
+ u8 cap;
+
+ cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_MSI);
+ fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
/* Validate that the MSI feature is actually enabled. */
flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
@@ -220,15 +267,66 @@ static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn)
* Get the Multiple Message Enable bitfield from the Message Control
* register.
*/
- mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4;
+ mme = FIELD_GET(PCI_MSI_FLAGS_QSIZE, flags);
- return mme;
+ return 1 << mme;
}
-static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn,
- u8 intx, bool is_asserted)
+static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
+ struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
+ u32 val, reg;
+ u8 cap;
+
+ cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_MSIX);
+ func_no = cdns_pcie_get_fn_from_vfn(pcie, func_no, vfunc_no);
+
+ reg = cap + PCI_MSIX_FLAGS;
+ val = cdns_pcie_ep_fn_readw(pcie, func_no, reg);
+ if (!(val & PCI_MSIX_FLAGS_ENABLE))
+ return -EINVAL;
+
+ val &= PCI_MSIX_FLAGS_QSIZE;
+
+ return val + 1;
+}
+
+static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u8 vfn,
+ u16 nr_irqs, enum pci_barno bir, u32 offset)
+{
+ struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
+ struct cdns_pcie *pcie = &ep->pcie;
+ u32 val, reg;
+ u8 cap;
+
+ cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_MSIX);
+ fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
+
+ reg = cap + PCI_MSIX_FLAGS;
+ val = cdns_pcie_ep_fn_readw(pcie, fn, reg);
+ val &= ~PCI_MSIX_FLAGS_QSIZE;
+ val |= nr_irqs - 1; /* encoded as N-1 */
+ cdns_pcie_ep_fn_writew(pcie, fn, reg, val);
+
+ /* Set MSI-X BAR and offset */
+ reg = cap + PCI_MSIX_TABLE;
+ val = offset | bir;
+ cdns_pcie_ep_fn_writel(pcie, fn, reg, val);
+
+ /* Set PBA BAR and offset. BAR must match MSI-X BAR */
+ reg = cap + PCI_MSIX_PBA;
+ val = (offset + (nr_irqs * PCI_MSIX_ENTRY_SIZE)) | bir;
+ cdns_pcie_ep_fn_writel(pcie, fn, reg, val);
+
+ return 0;
+}
+
+static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx,
+ bool is_asserted)
+{
+ struct cdns_pcie *pcie = &ep->pcie;
+ unsigned long flags;
u32 offset;
u16 status;
u8 msg_code;
@@ -239,7 +337,7 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn,
if (unlikely(ep->irq_pci_addr != CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY ||
ep->irq_pci_fn != fn)) {
/* First region was reserved for IRQ writes. */
- cdns_pcie_set_outbound_region_for_normal_msg(pcie, fn, 0,
+ cdns_pcie_set_outbound_region_for_normal_msg(pcie, 0, fn, 0,
ep->irq_phys_addr);
ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY;
ep->irq_pci_fn = fn;
@@ -247,25 +345,27 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn,
if (is_asserted) {
ep->irq_pending |= BIT(intx);
- msg_code = MSG_CODE_ASSERT_INTA + intx;
+ msg_code = PCIE_MSG_CODE_ASSERT_INTA + intx;
} else {
ep->irq_pending &= ~BIT(intx);
- msg_code = MSG_CODE_DEASSERT_INTA + intx;
+ msg_code = PCIE_MSG_CODE_DEASSERT_INTA + intx;
}
+ spin_lock_irqsave(&ep->lock, flags);
status = cdns_pcie_ep_fn_readw(pcie, fn, PCI_STATUS);
if (((status & PCI_STATUS_INTERRUPT) != 0) ^ (ep->irq_pending != 0)) {
status ^= PCI_STATUS_INTERRUPT;
cdns_pcie_ep_fn_writew(pcie, fn, PCI_STATUS, status);
}
+ spin_unlock_irqrestore(&ep->lock, flags);
- offset = CDNS_PCIE_NORMAL_MSG_ROUTING(MSG_ROUTING_LOCAL) |
- CDNS_PCIE_NORMAL_MSG_CODE(msg_code) |
- CDNS_PCIE_MSG_NO_DATA;
+ offset = CDNS_PCIE_NORMAL_MSG_ROUTING(PCIE_MSG_TYPE_R_LOCAL) |
+ CDNS_PCIE_NORMAL_MSG_CODE(msg_code);
writel(0, ep->irq_cpu_addr + offset);
}
-static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 intx)
+static int cdns_pcie_ep_send_intx_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
+ u8 intx)
{
u16 cmd;
@@ -275,22 +375,23 @@ static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 intx)
cdns_pcie_ep_assert_intx(ep, fn, intx, true);
/*
- * The mdelay() value was taken from dra7xx_pcie_raise_legacy_irq()
- * from drivers/pci/dwc/pci-dra7xx.c
+ * The mdelay() value was taken from dra7xx_pcie_raise_intx_irq()
*/
mdelay(1);
cdns_pcie_ep_assert_intx(ep, fn, intx, false);
return 0;
}
-static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn,
+static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
u8 interrupt_num)
{
struct cdns_pcie *pcie = &ep->pcie;
- u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
u16 flags, mme, data, data_mask;
- u8 msi_count;
u64 pci_addr, pci_addr_mask = 0xff;
+ u8 msi_count, cap;
+
+ cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_MSI);
+ fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
/* Check whether the MSI feature has been enabled by the PCI host. */
flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
@@ -298,7 +399,7 @@ static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn,
return -EINVAL;
/* Get the number of enabled MSIs */
- mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4;
+ mme = FIELD_GET(PCI_MSI_FLAGS_QSIZE, flags);
msi_count = 1 << mme;
if (!interrupt_num || interrupt_num > msi_count)
return -EINVAL;
@@ -318,7 +419,7 @@ static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn,
if (unlikely(ep->irq_pci_addr != (pci_addr & ~pci_addr_mask) ||
ep->irq_pci_fn != fn)) {
/* First region was reserved for IRQ writes. */
- cdns_pcie_set_outbound_region(pcie, fn, 0,
+ cdns_pcie_set_outbound_region(pcie, 0, fn, 0,
false,
ep->irq_phys_addr,
pci_addr & ~pci_addr_mask,
@@ -331,18 +432,129 @@ static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn,
return 0;
}
-static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn,
- enum pci_epc_irq_type type,
- u16 interrupt_num)
+static int cdns_pcie_ep_map_msi_irq(struct pci_epc *epc, u8 fn, u8 vfn,
+ phys_addr_t addr, u8 interrupt_num,
+ u32 entry_size, u32 *msi_data,
+ u32 *msi_addr_offset)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
+ struct cdns_pcie *pcie = &ep->pcie;
+ u64 pci_addr, pci_addr_mask = 0xff;
+ u16 flags, mme, data, data_mask;
+ u8 msi_count, cap;
+ int ret;
+ int i;
+
+ cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_MSI);
+ fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
+
+ /* Check whether the MSI feature has been enabled by the PCI host. */
+ flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS);
+ if (!(flags & PCI_MSI_FLAGS_ENABLE))
+ return -EINVAL;
+
+ /* Get the number of enabled MSIs */
+ mme = FIELD_GET(PCI_MSI_FLAGS_QSIZE, flags);
+ msi_count = 1 << mme;
+ if (!interrupt_num || interrupt_num > msi_count)
+ return -EINVAL;
+
+ /* Compute the data value to be written. */
+ data_mask = msi_count - 1;
+ data = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_DATA_64);
+ data = data & ~data_mask;
+
+ /* Get the PCI address where to write the data into. */
+ pci_addr = cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_HI);
+ pci_addr <<= 32;
+ pci_addr |= cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_LO);
+ pci_addr &= GENMASK_ULL(63, 2);
+
+ for (i = 0; i < interrupt_num; i++) {
+ ret = cdns_pcie_ep_map_addr(epc, fn, vfn, addr,
+ pci_addr & ~pci_addr_mask,
+ entry_size);
+ if (ret)
+ return ret;
+ addr = addr + entry_size;
+ }
+
+ *msi_data = data;
+ *msi_addr_offset = pci_addr & pci_addr_mask;
+
+ return 0;
+}
+
+static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
+ u16 interrupt_num)
+{
+ u32 tbl_offset, msg_data, reg;
+ struct cdns_pcie *pcie = &ep->pcie;
+ struct pci_epf_msix_tbl *msix_tbl;
+ struct cdns_pcie_epf *epf;
+ u64 pci_addr_mask = 0xff;
+ u64 msg_addr;
+ u8 bir, cap;
+ u16 flags;
+
+ cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_MSIX);
+ epf = &ep->epf[fn];
+ if (vfn > 0)
+ epf = &epf->epf[vfn - 1];
+
+ fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
+
+ /* Check whether the MSI-X feature has been enabled by the PCI host. */
+ flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSIX_FLAGS);
+ if (!(flags & PCI_MSIX_FLAGS_ENABLE))
+ return -EINVAL;
+
+ reg = cap + PCI_MSIX_TABLE;
+ tbl_offset = cdns_pcie_ep_fn_readl(pcie, fn, reg);
+ bir = FIELD_GET(PCI_MSIX_TABLE_BIR, tbl_offset);
+ tbl_offset &= PCI_MSIX_TABLE_OFFSET;
+
+ msix_tbl = epf->epf_bar[bir]->addr + tbl_offset;
+ msg_addr = msix_tbl[(interrupt_num - 1)].msg_addr;
+ msg_data = msix_tbl[(interrupt_num - 1)].msg_data;
+
+ /* Set the outbound region if needed. */
+ if (ep->irq_pci_addr != (msg_addr & ~pci_addr_mask) ||
+ ep->irq_pci_fn != fn) {
+ /* First region was reserved for IRQ writes. */
+ cdns_pcie_set_outbound_region(pcie, 0, fn, 0,
+ false,
+ ep->irq_phys_addr,
+ msg_addr & ~pci_addr_mask,
+ pci_addr_mask + 1);
+ ep->irq_pci_addr = (msg_addr & ~pci_addr_mask);
+ ep->irq_pci_fn = fn;
+ }
+ writel(msg_data, ep->irq_cpu_addr + (msg_addr & pci_addr_mask));
+
+ return 0;
+}
+
+static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn,
+ unsigned int type, u16 interrupt_num)
+{
+ struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
+ struct cdns_pcie *pcie = &ep->pcie;
+ struct device *dev = pcie->dev;
switch (type) {
- case PCI_EPC_IRQ_LEGACY:
- return cdns_pcie_ep_send_legacy_irq(ep, fn, 0);
+ case PCI_IRQ_INTX:
+ if (vfn > 0) {
+ dev_err(dev, "Cannot raise INTX interrupts for VF\n");
+ return -EINVAL;
+ }
+ return cdns_pcie_ep_send_intx_irq(ep, fn, vfn, 0);
- case PCI_EPC_IRQ_MSI:
- return cdns_pcie_ep_send_msi_irq(ep, fn, interrupt_num);
+ case PCI_IRQ_MSI:
+ return cdns_pcie_ep_send_msi_irq(ep, fn, vfn, interrupt_num);
+
+ case PCI_IRQ_MSIX:
+ return cdns_pcie_ep_send_msix_irq(ep, fn, vfn, interrupt_num);
default:
break;
@@ -355,31 +567,71 @@ static int cdns_pcie_ep_start(struct pci_epc *epc)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
- struct pci_epf *epf;
- u32 cfg;
+ struct device *dev = pcie->dev;
+ int max_epfs = sizeof(epc->function_num_map) * 8;
+ int ret, epf, last_fn;
+ u32 reg, value;
+ u8 cap;
+ cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_EXP);
/*
* BIT(0) is hardwired to 1, hence function 0 is always enabled
* and can't be disabled anyway.
*/
- cfg = BIT(0);
- list_for_each_entry(epf, &epc->pci_epf, list)
- cfg |= BIT(epf->func_no);
- cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, cfg);
+ cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, epc->function_num_map);
+
+ /*
+ * Next function field in ARI_CAP_AND_CTR register for last function
+ * should be 0. Clear Next Function Number field for the last
+ * function used.
+ */
+ last_fn = find_last_bit(&epc->function_num_map, BITS_PER_LONG);
+ reg = CDNS_PCIE_CORE_PF_I_ARI_CAP_AND_CTRL(last_fn);
+ value = cdns_pcie_readl(pcie, reg);
+ value &= ~CDNS_PCIE_ARI_CAP_NFN_MASK;
+ cdns_pcie_writel(pcie, reg, value);
+
+ if (ep->quirk_disable_flr) {
+ for (epf = 0; epf < max_epfs; epf++) {
+ if (!(epc->function_num_map & BIT(epf)))
+ continue;
+
+ value = cdns_pcie_ep_fn_readl(pcie, epf,
+ cap + PCI_EXP_DEVCAP);
+ value &= ~PCI_EXP_DEVCAP_FLR;
+ cdns_pcie_ep_fn_writel(pcie, epf,
+ cap + PCI_EXP_DEVCAP, value);
+ }
+ }
+
+ ret = cdns_pcie_start_link(pcie);
+ if (ret) {
+ dev_err(dev, "Failed to start link\n");
+ return ret;
+ }
return 0;
}
+static const struct pci_epc_features cdns_pcie_epc_vf_features = {
+ .msi_capable = true,
+ .msix_capable = true,
+ .align = 65536,
+};
+
static const struct pci_epc_features cdns_pcie_epc_features = {
- .linkup_notifier = false,
.msi_capable = true,
- .msix_capable = false,
+ .msix_capable = true,
+ .align = 256,
};
static const struct pci_epc_features*
-cdns_pcie_ep_get_features(struct pci_epc *epc, u8 func_no)
+cdns_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
- return &cdns_pcie_epc_features;
+ if (!vfunc_no)
+ return &cdns_pcie_epc_features;
+
+ return &cdns_pcie_epc_vf_features;
}
static const struct pci_epc_ops cdns_pcie_epc_ops = {
@@ -390,11 +642,25 @@ static const struct pci_epc_ops cdns_pcie_epc_ops = {
.unmap_addr = cdns_pcie_ep_unmap_addr,
.set_msi = cdns_pcie_ep_set_msi,
.get_msi = cdns_pcie_ep_get_msi,
+ .set_msix = cdns_pcie_ep_set_msix,
+ .get_msix = cdns_pcie_ep_get_msix,
.raise_irq = cdns_pcie_ep_raise_irq,
+ .map_msi_irq = cdns_pcie_ep_map_msi_irq,
.start = cdns_pcie_ep_start,
.get_features = cdns_pcie_ep_get_features,
};
+void cdns_pcie_ep_disable(struct cdns_pcie_ep *ep)
+{
+ struct device *dev = ep->pcie.dev;
+ struct pci_epc *epc = to_pci_epc(dev);
+
+ pci_epc_deinit_notify(epc);
+ pci_epc_mem_free_addr(epc, ep->irq_phys_addr, ep->irq_cpu_addr,
+ SZ_128K);
+ pci_epc_mem_exit(epc);
+}
+EXPORT_SYMBOL_GPL(cdns_pcie_ep_disable);
int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
{
@@ -402,14 +668,15 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
struct platform_device *pdev = to_platform_device(dev);
struct device_node *np = dev->of_node;
struct cdns_pcie *pcie = &ep->pcie;
+ struct cdns_pcie_epf *epf;
struct resource *res;
struct pci_epc *epc;
int ret;
+ int i;
pcie->is_rc = false;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg");
- pcie->reg_base = devm_ioremap_resource(dev, res);
+ pcie->reg_base = devm_platform_ioremap_resource_byname(pdev, "reg");
if (IS_ERR(pcie->reg_base)) {
dev_err(dev, "missing \"reg\"\n");
return PTR_ERR(pcie->reg_base);
@@ -422,12 +689,9 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
}
pcie->mem_res = res;
- ret = of_property_read_u32(np, "cdns,max-outbound-regions",
- &ep->max_regions);
- if (ret < 0) {
- dev_err(dev, "missing \"cdns,max-outbound-regions\"\n");
- return ret;
- }
+ ep->max_regions = CDNS_PCIE_MAX_OB;
+ of_property_read_u32(np, "cdns,max-outbound-regions", &ep->max_regions);
+
ep->ob_addr = devm_kcalloc(dev,
ep->max_regions, sizeof(*ep->ob_addr),
GFP_KERNEL);
@@ -440,8 +704,7 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
epc = devm_pci_epc_create(dev, &cdns_pcie_epc_ops);
if (IS_ERR(epc)) {
dev_err(dev, "failed to create epc device\n");
- ret = PTR_ERR(epc);
- goto err_init;
+ return PTR_ERR(epc);
}
epc_set_drvdata(epc, ep);
@@ -449,11 +712,35 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
if (of_property_read_u8(np, "max-functions", &epc->max_functions) < 0)
epc->max_functions = 1;
+ ep->epf = devm_kcalloc(dev, epc->max_functions, sizeof(*ep->epf),
+ GFP_KERNEL);
+ if (!ep->epf)
+ return -ENOMEM;
+
+ epc->max_vfs = devm_kcalloc(dev, epc->max_functions,
+ sizeof(*epc->max_vfs), GFP_KERNEL);
+ if (!epc->max_vfs)
+ return -ENOMEM;
+
+ ret = of_property_read_u8_array(np, "max-virtual-functions",
+ epc->max_vfs, epc->max_functions);
+ if (ret == 0) {
+ for (i = 0; i < epc->max_functions; i++) {
+ epf = &ep->epf[i];
+ if (epc->max_vfs[i] == 0)
+ continue;
+ epf->epf = devm_kcalloc(dev, epc->max_vfs[i],
+ sizeof(*ep->epf), GFP_KERNEL);
+ if (!epf->epf)
+ return -ENOMEM;
+ }
+ }
+
ret = pci_epc_mem_init(epc, pcie->mem_res->start,
resource_size(pcie->mem_res), PAGE_SIZE);
if (ret < 0) {
dev_err(dev, "failed to initialize the memory space\n");
- goto err_init;
+ return ret;
}
ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &ep->irq_phys_addr,
@@ -467,13 +754,22 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
/* Reserve region 0 for IRQs */
set_bit(0, &ep->ob_region_map);
+ if (ep->quirk_detect_quiet_flag)
+ cdns_pcie_detect_quiet_min_delay_set(&ep->pcie);
+
+ spin_lock_init(&ep->lock);
+
+ pci_epc_init_notify(epc);
+
return 0;
free_epc_mem:
pci_epc_mem_exit(epc);
- err_init:
- pm_runtime_put_sync(dev);
-
return ret;
}
+EXPORT_SYMBOL_GPL(cdns_pcie_ep_setup);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Cadence PCIe endpoint controller driver");
+MODULE_AUTHOR("Cyrille Pitchen <cyrille.pitchen@free-electrons.com>");