summaryrefslogtreecommitdiff
path: root/drivers/pci/controller/cadence/pcie-cadence.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci/controller/cadence/pcie-cadence.h')
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.h612
1 files changed, 369 insertions, 243 deletions
diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h
index df14ad002fe9..443033c607d7 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.h
+++ b/drivers/pci/controller/cadence/pcie-cadence.h
@@ -7,263 +7,136 @@
#define _PCIE_CADENCE_H
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/pci-epf.h>
#include <linux/phy/phy.h>
-
-/*
- * Local Management Registers
- */
-#define CDNS_PCIE_LM_BASE 0x00100000
-
-/* Vendor ID Register */
-#define CDNS_PCIE_LM_ID (CDNS_PCIE_LM_BASE + 0x0044)
-#define CDNS_PCIE_LM_ID_VENDOR_MASK GENMASK(15, 0)
-#define CDNS_PCIE_LM_ID_VENDOR_SHIFT 0
-#define CDNS_PCIE_LM_ID_VENDOR(vid) \
- (((vid) << CDNS_PCIE_LM_ID_VENDOR_SHIFT) & CDNS_PCIE_LM_ID_VENDOR_MASK)
-#define CDNS_PCIE_LM_ID_SUBSYS_MASK GENMASK(31, 16)
-#define CDNS_PCIE_LM_ID_SUBSYS_SHIFT 16
-#define CDNS_PCIE_LM_ID_SUBSYS(sub) \
- (((sub) << CDNS_PCIE_LM_ID_SUBSYS_SHIFT) & CDNS_PCIE_LM_ID_SUBSYS_MASK)
-
-/* Root Port Requestor ID Register */
-#define CDNS_PCIE_LM_RP_RID (CDNS_PCIE_LM_BASE + 0x0228)
-#define CDNS_PCIE_LM_RP_RID_MASK GENMASK(15, 0)
-#define CDNS_PCIE_LM_RP_RID_SHIFT 0
-#define CDNS_PCIE_LM_RP_RID_(rid) \
- (((rid) << CDNS_PCIE_LM_RP_RID_SHIFT) & CDNS_PCIE_LM_RP_RID_MASK)
-
-/* Endpoint Bus and Device Number Register */
-#define CDNS_PCIE_LM_EP_ID (CDNS_PCIE_LM_BASE + 0x022c)
-#define CDNS_PCIE_LM_EP_ID_DEV_MASK GENMASK(4, 0)
-#define CDNS_PCIE_LM_EP_ID_DEV_SHIFT 0
-#define CDNS_PCIE_LM_EP_ID_BUS_MASK GENMASK(15, 8)
-#define CDNS_PCIE_LM_EP_ID_BUS_SHIFT 8
-
-/* Endpoint Function f BAR b Configuration Registers */
-#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn) \
- (CDNS_PCIE_LM_BASE + 0x0240 + (fn) * 0x0008)
-#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn) \
- (CDNS_PCIE_LM_BASE + 0x0244 + (fn) * 0x0008)
-#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) \
- (GENMASK(4, 0) << ((b) * 8))
-#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, a) \
- (((a) << ((b) * 8)) & CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b))
-#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b) \
- (GENMASK(7, 5) << ((b) * 8))
-#define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, c) \
- (((c) << ((b) * 8 + 5)) & CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b))
-
-/* Endpoint Function Configuration Register */
-#define CDNS_PCIE_LM_EP_FUNC_CFG (CDNS_PCIE_LM_BASE + 0x02c0)
-
-/* Root Complex BAR Configuration Register */
-#define CDNS_PCIE_LM_RC_BAR_CFG (CDNS_PCIE_LM_BASE + 0x0300)
-#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE_MASK GENMASK(5, 0)
-#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE(a) \
- (((a) << 0) & CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE_MASK)
-#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL_MASK GENMASK(8, 6)
-#define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(c) \
- (((c) << 6) & CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL_MASK)
-#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE_MASK GENMASK(13, 9)
-#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE(a) \
- (((a) << 9) & CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE_MASK)
-#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL_MASK GENMASK(16, 14)
-#define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(c) \
- (((c) << 14) & CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL_MASK)
-#define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE BIT(17)
-#define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_32BITS 0
-#define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS BIT(18)
-#define CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE BIT(19)
-#define CDNS_PCIE_LM_RC_BAR_CFG_IO_16BITS 0
-#define CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS BIT(20)
-#define CDNS_PCIE_LM_RC_BAR_CFG_CHECK_ENABLE BIT(31)
-
-/* BAR control values applicable to both Endpoint Function and Root Complex */
-#define CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED 0x0
-#define CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS 0x1
-#define CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS 0x4
-#define CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS 0x5
-#define CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS 0x6
-#define CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS 0x7
-
-
-/*
- * Endpoint Function Registers (PCI configuration space for endpoint functions)
- */
-#define CDNS_PCIE_EP_FUNC_BASE(fn) (((fn) << 12) & GENMASK(19, 12))
-
-#define CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET 0x90
-
-/*
- * Root Port Registers (PCI configuration space for the root port function)
- */
-#define CDNS_PCIE_RP_BASE 0x00200000
-
-
-/*
- * Address Translation Registers
- */
-#define CDNS_PCIE_AT_BASE 0x00400000
-
-/* Region r Outbound AXI to PCIe Address Translation Register 0 */
-#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r) \
- (CDNS_PCIE_AT_BASE + 0x0000 + ((r) & 0x1f) * 0x0020)
-#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS_MASK GENMASK(5, 0)
-#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) \
- (((nbits) - 1) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS_MASK)
-#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK GENMASK(19, 12)
-#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) \
- (((devfn) << 12) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK)
-#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK GENMASK(27, 20)
-#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(bus) \
- (((bus) << 20) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK)
-
-/* Region r Outbound AXI to PCIe Address Translation Register 1 */
-#define CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r) \
- (CDNS_PCIE_AT_BASE + 0x0004 + ((r) & 0x1f) * 0x0020)
-
-/* Region r Outbound PCIe Descriptor Register 0 */
-#define CDNS_PCIE_AT_OB_REGION_DESC0(r) \
- (CDNS_PCIE_AT_BASE + 0x0008 + ((r) & 0x1f) * 0x0020)
-#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MASK GENMASK(3, 0)
-#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MEM 0x2
-#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_IO 0x6
-#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0 0xa
-#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1 0xb
-#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG 0xc
-#define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_VENDOR_MSG 0xd
-/* Bit 23 MUST be set in RC mode. */
-#define CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID BIT(23)
-#define CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK GENMASK(31, 24)
-#define CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(devfn) \
- (((devfn) << 24) & CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK)
-
-/* Region r Outbound PCIe Descriptor Register 1 */
-#define CDNS_PCIE_AT_OB_REGION_DESC1(r) \
- (CDNS_PCIE_AT_BASE + 0x000c + ((r) & 0x1f) * 0x0020)
-#define CDNS_PCIE_AT_OB_REGION_DESC1_BUS_MASK GENMASK(7, 0)
-#define CDNS_PCIE_AT_OB_REGION_DESC1_BUS(bus) \
- ((bus) & CDNS_PCIE_AT_OB_REGION_DESC1_BUS_MASK)
-
-/* Region r AXI Region Base Address Register 0 */
-#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r) \
- (CDNS_PCIE_AT_BASE + 0x0018 + ((r) & 0x1f) * 0x0020)
-#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS_MASK GENMASK(5, 0)
-#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) \
- (((nbits) - 1) & CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS_MASK)
-
-/* Region r AXI Region Base Address Register 1 */
-#define CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r) \
- (CDNS_PCIE_AT_BASE + 0x001c + ((r) & 0x1f) * 0x0020)
-
-/* Root Port BAR Inbound PCIe to AXI Address Translation Register */
-#define CDNS_PCIE_AT_IB_RP_BAR_ADDR0(bar) \
- (CDNS_PCIE_AT_BASE + 0x0800 + (bar) * 0x0008)
-#define CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS_MASK GENMASK(5, 0)
-#define CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(nbits) \
- (((nbits) - 1) & CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS_MASK)
-#define CDNS_PCIE_AT_IB_RP_BAR_ADDR1(bar) \
- (CDNS_PCIE_AT_BASE + 0x0804 + (bar) * 0x0008)
-
-/* AXI link down register */
-#define CDNS_PCIE_AT_LINKDOWN (CDNS_PCIE_AT_BASE + 0x0824)
+#include "pcie-cadence-lga-regs.h"
+#include "pcie-cadence-hpa-regs.h"
enum cdns_pcie_rp_bar {
+ RP_BAR_UNDEFINED = -1,
RP_BAR0,
RP_BAR1,
RP_NO_BAR
};
-/* Endpoint Function BAR Inbound PCIe to AXI Address Translation Register */
-#define CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar) \
- (CDNS_PCIE_AT_BASE + 0x0840 + (fn) * 0x0040 + (bar) * 0x0008)
-#define CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar) \
- (CDNS_PCIE_AT_BASE + 0x0844 + (fn) * 0x0040 + (bar) * 0x0008)
-
-/* Normal/Vendor specific message access: offset inside some outbound region */
-#define CDNS_PCIE_NORMAL_MSG_ROUTING_MASK GENMASK(7, 5)
-#define CDNS_PCIE_NORMAL_MSG_ROUTING(route) \
- (((route) << 5) & CDNS_PCIE_NORMAL_MSG_ROUTING_MASK)
-#define CDNS_PCIE_NORMAL_MSG_CODE_MASK GENMASK(15, 8)
-#define CDNS_PCIE_NORMAL_MSG_CODE(code) \
- (((code) << 8) & CDNS_PCIE_NORMAL_MSG_CODE_MASK)
-#define CDNS_PCIE_MSG_NO_DATA BIT(16)
+struct cdns_pcie_rp_ib_bar {
+ u64 size;
+ bool free;
+};
struct cdns_pcie;
-
-enum cdns_pcie_msg_code {
- MSG_CODE_ASSERT_INTA = 0x20,
- MSG_CODE_ASSERT_INTB = 0x21,
- MSG_CODE_ASSERT_INTC = 0x22,
- MSG_CODE_ASSERT_INTD = 0x23,
- MSG_CODE_DEASSERT_INTA = 0x24,
- MSG_CODE_DEASSERT_INTB = 0x25,
- MSG_CODE_DEASSERT_INTC = 0x26,
- MSG_CODE_DEASSERT_INTD = 0x27,
+struct cdns_pcie_rc;
+
+enum cdns_pcie_reg_bank {
+ REG_BANK_RP,
+ REG_BANK_IP_REG,
+ REG_BANK_IP_CFG_CTRL_REG,
+ REG_BANK_AXI_MASTER_COMMON,
+ REG_BANK_AXI_MASTER,
+ REG_BANK_AXI_SLAVE,
+ REG_BANK_AXI_HLS,
+ REG_BANK_AXI_RAS,
+ REG_BANK_AXI_DTI,
+ REG_BANKS_MAX,
};
-enum cdns_pcie_msg_routing {
- /* Route to Root Complex */
- MSG_ROUTING_TO_RC,
-
- /* Use Address Routing */
- MSG_ROUTING_BY_ADDR,
-
- /* Use ID Routing */
- MSG_ROUTING_BY_ID,
-
- /* Route as Broadcast Message from Root Complex */
- MSG_ROUTING_BCAST,
-
- /* Local message; terminate at receiver (INTx messages) */
- MSG_ROUTING_LOCAL,
+struct cdns_pcie_ops {
+ int (*start_link)(struct cdns_pcie *pcie);
+ void (*stop_link)(struct cdns_pcie *pcie);
+ bool (*link_up)(struct cdns_pcie *pcie);
+ u64 (*cpu_addr_fixup)(struct cdns_pcie *pcie, u64 cpu_addr);
+};
- /* Gather & route to Root Complex (PME_TO_Ack message) */
- MSG_ROUTING_GATHER,
+/**
+ * struct cdns_plat_pcie_of_data - Register bank offset for a platform
+ * @is_rc: controller is a RC
+ * @ip_reg_bank_offset: ip register bank start offset
+ * @ip_cfg_ctrl_reg_offset: ip config control register start offset
+ * @axi_mstr_common_offset: AXI master common register start offset
+ * @axi_slave_offset: AXI slave start offset
+ * @axi_master_offset: AXI master start offset
+ * @axi_hls_offset: AXI HLS offset start
+ * @axi_ras_offset: AXI RAS offset
+ * @axi_dti_offset: AXI DTI offset
+ */
+struct cdns_plat_pcie_of_data {
+ u32 is_rc:1;
+ u32 ip_reg_bank_offset;
+ u32 ip_cfg_ctrl_reg_offset;
+ u32 axi_mstr_common_offset;
+ u32 axi_slave_offset;
+ u32 axi_master_offset;
+ u32 axi_hls_offset;
+ u32 axi_ras_offset;
+ u32 axi_dti_offset;
};
/**
* struct cdns_pcie - private data for Cadence PCIe controller drivers
* @reg_base: IO mapped register base
* @mem_res: start/end offsets in the physical system memory to map PCI accesses
+ * @msg_res: Region for send message to map PCI accesses
+ * @dev: PCIe controller
* @is_rc: tell whether the PCIe controller mode is Root Complex or Endpoint.
- * @bus: In Root Complex mode, the bus number
+ * @phy_count: number of supported PHY devices
+ * @phy: list of pointers to specific PHY control blocks
+ * @link: list of pointers to corresponding device link representations
+ * @ops: Platform-specific ops to control various inputs from Cadence PCIe
+ * wrapper
+ * @cdns_pcie_reg_offsets: Register bank offsets for different SoC
*/
struct cdns_pcie {
- void __iomem *reg_base;
- struct resource *mem_res;
- struct device *dev;
- bool is_rc;
- u8 bus;
- int phy_count;
- struct phy **phy;
- struct device_link **link;
- const struct cdns_pcie_common_ops *ops;
+ void __iomem *reg_base;
+ struct resource *mem_res;
+ struct resource *msg_res;
+ struct device *dev;
+ bool is_rc;
+ int phy_count;
+ struct phy **phy;
+ struct device_link **link;
+ const struct cdns_pcie_ops *ops;
+ const struct cdns_plat_pcie_of_data *cdns_pcie_reg_offsets;
};
/**
* struct cdns_pcie_rc - private data for this PCIe Root Complex driver
* @pcie: Cadence PCIe controller
- * @dev: pointer to PCIe device
* @cfg_res: start/end offsets in the physical system memory to map PCI
* configuration space accesses
- * @bus_range: first/last buses behind the PCIe host controller
* @cfg_base: IO mapped window to access the PCI configuration space of a
* single function at a time
- * @no_bar_nbits: Number of bits to keep for inbound (PCIe -> CPU) address
- * translation (nbits sets into the "no BAR match" register)
* @vendor_id: PCI vendor ID
* @device_id: PCI device ID
+ * @avail_ib_bar: Status of RP_BAR0, RP_BAR1 and RP_NO_BAR if it's free or
+ * available
+ * @quirk_retrain_flag: Retrain link as quirk for PCIe Gen2
+ * @quirk_detect_quiet_flag: LTSSM Detect Quiet min delay set as quirk
+ * @ecam_supported: Whether the ECAM is supported
+ * @no_inbound_map: Whether inbound mapping is supported
*/
struct cdns_pcie_rc {
struct cdns_pcie pcie;
struct resource *cfg_res;
- struct resource *bus_range;
void __iomem *cfg_base;
- u32 no_bar_nbits;
u32 vendor_id;
u32 device_id;
+ bool avail_ib_bar[CDNS_PCIE_RP_MAX_IB];
+ unsigned int quirk_retrain_flag:1;
+ unsigned int quirk_detect_quiet_flag:1;
+ unsigned int ecam_supported:1;
+ unsigned int no_inbound_map:1;
+};
+
+/**
+ * struct cdns_pcie_epf - Structure to hold info about endpoint function
+ * @epf: Info about virtual functions attached to the physical function
+ * @epf_bar: reference to the pci_epf_bar for the six Base Address Registers
+ */
+struct cdns_pcie_epf {
+ struct cdns_pcie_epf *epf;
+ struct pci_epf_bar *epf_bar[PCI_STD_NUM_BARS];
};
/**
@@ -272,16 +145,22 @@ struct cdns_pcie_rc {
* @max_regions: maximum number of regions supported by hardware
* @ob_region_map: bitmask of mapped outbound regions
* @ob_addr: base addresses in the AXI bus where the outbound regions start
- * @irq_phys_addr: base address on the AXI bus where the MSI/legacy IRQ
+ * @irq_phys_addr: base address on the AXI bus where the MSI/INTX IRQ
* dedicated outbound regions is mapped.
* @irq_cpu_addr: base address in the CPU space where a write access triggers
- * the sending of a memory write (MSI) / normal message (legacy
+ * the sending of a memory write (MSI) / normal message (INTX
* IRQ) TLP through the PCIe bus.
- * @irq_pci_addr: used to save the current mapping of the MSI/legacy IRQ
+ * @irq_pci_addr: used to save the current mapping of the MSI/INTX IRQ
* dedicated outbound region.
* @irq_pci_fn: the latest PCI function that has updated the mapping of
- * the MSI/legacy IRQ dedicated outbound region.
- * @irq_pending: bitmask of asserted legacy IRQs.
+ * the MSI/INTX IRQ dedicated outbound region.
+ * @irq_pending: bitmask of asserted INTX IRQs.
+ * @lock: spin lock to disable interrupts while modifying PCIe controller
+ * registers fields (RMW) accessible by both remote RC and EP to
+ * minimize time between read and write
+ * @epf: Structure to hold info about endpoint function
+ * @quirk_detect_quiet_flag: LTSSM Detect Quiet min delay set as quirk
+ * @quirk_disable_flr: Disable FLR (Function Level Reset) quirk flag
*/
struct cdns_pcie_ep {
struct cdns_pcie pcie;
@@ -293,54 +172,216 @@ struct cdns_pcie_ep {
u64 irq_pci_addr;
u8 irq_pci_fn;
u8 irq_pending;
+ /* protect writing to PCI_STATUS while raising INTX interrupts */
+ spinlock_t lock;
+ struct cdns_pcie_epf *epf;
+ unsigned int quirk_detect_quiet_flag:1;
+ unsigned int quirk_disable_flr:1;
};
+static inline u32 cdns_reg_bank_to_off(struct cdns_pcie *pcie, enum cdns_pcie_reg_bank bank)
+{
+ u32 offset = 0x0;
+
+ switch (bank) {
+ case REG_BANK_RP:
+ offset = 0;
+ break;
+ case REG_BANK_IP_REG:
+ offset = pcie->cdns_pcie_reg_offsets->ip_reg_bank_offset;
+ break;
+ case REG_BANK_IP_CFG_CTRL_REG:
+ offset = pcie->cdns_pcie_reg_offsets->ip_cfg_ctrl_reg_offset;
+ break;
+ case REG_BANK_AXI_MASTER_COMMON:
+ offset = pcie->cdns_pcie_reg_offsets->axi_mstr_common_offset;
+ break;
+ case REG_BANK_AXI_MASTER:
+ offset = pcie->cdns_pcie_reg_offsets->axi_master_offset;
+ break;
+ case REG_BANK_AXI_SLAVE:
+ offset = pcie->cdns_pcie_reg_offsets->axi_slave_offset;
+ break;
+ case REG_BANK_AXI_HLS:
+ offset = pcie->cdns_pcie_reg_offsets->axi_hls_offset;
+ break;
+ case REG_BANK_AXI_RAS:
+ offset = pcie->cdns_pcie_reg_offsets->axi_ras_offset;
+ break;
+ case REG_BANK_AXI_DTI:
+ offset = pcie->cdns_pcie_reg_offsets->axi_dti_offset;
+ break;
+ default:
+ break;
+ }
+ return offset;
+}
/* Register access */
-static inline void cdns_pcie_writeb(struct cdns_pcie *pcie, u32 reg, u8 value)
+static inline void cdns_pcie_writel(struct cdns_pcie *pcie, u32 reg, u32 value)
{
- writeb(value, pcie->reg_base + reg);
+ writel(value, pcie->reg_base + reg);
}
-static inline void cdns_pcie_writew(struct cdns_pcie *pcie, u32 reg, u16 value)
+static inline u32 cdns_pcie_readl(struct cdns_pcie *pcie, u32 reg)
{
- writew(value, pcie->reg_base + reg);
+ return readl(pcie->reg_base + reg);
}
-static inline void cdns_pcie_writel(struct cdns_pcie *pcie, u32 reg, u32 value)
+static inline void cdns_pcie_hpa_writel(struct cdns_pcie *pcie,
+ enum cdns_pcie_reg_bank bank,
+ u32 reg,
+ u32 value)
{
+ u32 offset = cdns_reg_bank_to_off(pcie, bank);
+
+ reg += offset;
writel(value, pcie->reg_base + reg);
}
-static inline u32 cdns_pcie_readl(struct cdns_pcie *pcie, u32 reg)
+static inline u32 cdns_pcie_hpa_readl(struct cdns_pcie *pcie,
+ enum cdns_pcie_reg_bank bank,
+ u32 reg)
{
+ u32 offset = cdns_reg_bank_to_off(pcie, bank);
+
+ reg += offset;
return readl(pcie->reg_base + reg);
}
+static inline u16 cdns_pcie_readw(struct cdns_pcie *pcie, u32 reg)
+{
+ return readw(pcie->reg_base + reg);
+}
+
+static inline u8 cdns_pcie_readb(struct cdns_pcie *pcie, u32 reg)
+{
+ return readb(pcie->reg_base + reg);
+}
+
+static inline int cdns_pcie_read_cfg_byte(struct cdns_pcie *pcie, int where,
+ u8 *val)
+{
+ *val = cdns_pcie_readb(pcie, where);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static inline int cdns_pcie_read_cfg_word(struct cdns_pcie *pcie, int where,
+ u16 *val)
+{
+ *val = cdns_pcie_readw(pcie, where);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static inline int cdns_pcie_read_cfg_dword(struct cdns_pcie *pcie, int where,
+ u32 *val)
+{
+ *val = cdns_pcie_readl(pcie, where);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static inline u32 cdns_pcie_read_sz(void __iomem *addr, int size)
+{
+ void __iomem *aligned_addr = PTR_ALIGN_DOWN(addr, 0x4);
+ unsigned int offset = (unsigned long)addr & 0x3;
+ u32 val = readl(aligned_addr);
+
+ if (!IS_ALIGNED((uintptr_t)addr, size)) {
+ pr_warn("Address %p and size %d are not aligned\n", addr, size);
+ return 0;
+ }
+
+ if (size > 2)
+ return val;
+
+ return (val >> (8 * offset)) & ((1 << (size * 8)) - 1);
+}
+
+static inline void cdns_pcie_write_sz(void __iomem *addr, int size, u32 value)
+{
+ void __iomem *aligned_addr = PTR_ALIGN_DOWN(addr, 0x4);
+ unsigned int offset = (unsigned long)addr & 0x3;
+ u32 mask;
+ u32 val;
+
+ if (!IS_ALIGNED((uintptr_t)addr, size)) {
+ pr_warn("Address %p and size %d are not aligned\n", addr, size);
+ return;
+ }
+
+ if (size > 2) {
+ writel(value, addr);
+ return;
+ }
+
+ mask = ~(((1 << (size * 8)) - 1) << (offset * 8));
+ val = readl(aligned_addr) & mask;
+ val |= value << (offset * 8);
+ writel(val, aligned_addr);
+}
+
/* Root Port register access */
static inline void cdns_pcie_rp_writeb(struct cdns_pcie *pcie,
u32 reg, u8 value)
{
- writeb(value, pcie->reg_base + CDNS_PCIE_RP_BASE + reg);
+ void __iomem *addr = pcie->reg_base + CDNS_PCIE_RP_BASE + reg;
+
+ cdns_pcie_write_sz(addr, 0x1, value);
}
static inline void cdns_pcie_rp_writew(struct cdns_pcie *pcie,
u32 reg, u16 value)
{
- writew(value, pcie->reg_base + CDNS_PCIE_RP_BASE + reg);
+ void __iomem *addr = pcie->reg_base + CDNS_PCIE_RP_BASE + reg;
+
+ cdns_pcie_write_sz(addr, 0x2, value);
+}
+
+static inline u16 cdns_pcie_rp_readw(struct cdns_pcie *pcie, u32 reg)
+{
+ void __iomem *addr = pcie->reg_base + CDNS_PCIE_RP_BASE + reg;
+
+ return cdns_pcie_read_sz(addr, 0x2);
+}
+
+static inline void cdns_pcie_hpa_rp_writeb(struct cdns_pcie *pcie,
+ u32 reg, u8 value)
+{
+ void __iomem *addr = pcie->reg_base + CDNS_PCIE_HPA_RP_BASE + reg;
+
+ cdns_pcie_write_sz(addr, 0x1, value);
+}
+
+static inline void cdns_pcie_hpa_rp_writew(struct cdns_pcie *pcie,
+ u32 reg, u16 value)
+{
+ void __iomem *addr = pcie->reg_base + CDNS_PCIE_HPA_RP_BASE + reg;
+
+ cdns_pcie_write_sz(addr, 0x2, value);
+}
+
+static inline u16 cdns_pcie_hpa_rp_readw(struct cdns_pcie *pcie, u32 reg)
+{
+ void __iomem *addr = pcie->reg_base + CDNS_PCIE_HPA_RP_BASE + reg;
+
+ return cdns_pcie_read_sz(addr, 0x2);
}
/* Endpoint Function register access */
static inline void cdns_pcie_ep_fn_writeb(struct cdns_pcie *pcie, u8 fn,
u32 reg, u8 value)
{
- writeb(value, pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
+ void __iomem *addr = pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg;
+
+ cdns_pcie_write_sz(addr, 0x1, value);
}
static inline void cdns_pcie_ep_fn_writew(struct cdns_pcie *pcie, u8 fn,
u32 reg, u16 value)
{
- writew(value, pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
+ void __iomem *addr = pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg;
+
+ cdns_pcie_write_sz(addr, 0x2, value);
}
static inline void cdns_pcie_ep_fn_writel(struct cdns_pcie *pcie, u8 fn,
@@ -349,14 +390,11 @@ static inline void cdns_pcie_ep_fn_writel(struct cdns_pcie *pcie, u8 fn,
writel(value, pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
}
-static inline u8 cdns_pcie_ep_fn_readb(struct cdns_pcie *pcie, u8 fn, u32 reg)
-{
- return readb(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
-}
-
static inline u16 cdns_pcie_ep_fn_readw(struct cdns_pcie *pcie, u8 fn, u32 reg)
{
- return readw(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
+ void __iomem *addr = pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg;
+
+ return cdns_pcie_read_sz(addr, 0x2);
}
static inline u32 cdns_pcie_ep_fn_readl(struct cdns_pcie *pcie, u8 fn, u32 reg)
@@ -364,34 +402,122 @@ static inline u32 cdns_pcie_ep_fn_readl(struct cdns_pcie *pcie, u8 fn, u32 reg)
return readl(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
}
-#ifdef CONFIG_PCIE_CADENCE_HOST
+static inline int cdns_pcie_start_link(struct cdns_pcie *pcie)
+{
+ if (pcie->ops && pcie->ops->start_link)
+ return pcie->ops->start_link(pcie);
+
+ return 0;
+}
+
+static inline void cdns_pcie_stop_link(struct cdns_pcie *pcie)
+{
+ if (pcie->ops && pcie->ops->stop_link)
+ pcie->ops->stop_link(pcie);
+}
+
+static inline bool cdns_pcie_link_up(struct cdns_pcie *pcie)
+{
+ if (pcie->ops && pcie->ops->link_up)
+ return pcie->ops->link_up(pcie);
+
+ return true;
+}
+
+#if IS_ENABLED(CONFIG_PCIE_CADENCE_HOST)
+int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc);
+int cdns_pcie_host_init(struct cdns_pcie_rc *rc);
int cdns_pcie_host_setup(struct cdns_pcie_rc *rc);
+void cdns_pcie_host_disable(struct cdns_pcie_rc *rc);
+void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int where);
+int cdns_pcie_hpa_host_setup(struct cdns_pcie_rc *rc);
#else
+static inline int cdns_pcie_host_link_setup(struct cdns_pcie_rc *rc)
+{
+ return 0;
+}
+
+static inline int cdns_pcie_host_init(struct cdns_pcie_rc *rc)
+{
+ return 0;
+}
+
static inline int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
{
return 0;
}
+
+static inline int cdns_pcie_hpa_host_setup(struct cdns_pcie_rc *rc)
+{
+ return 0;
+}
+
+static inline void cdns_pcie_host_disable(struct cdns_pcie_rc *rc)
+{
+}
+
+static inline void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int where)
+{
+ return NULL;
+}
#endif
-#ifdef CONFIG_PCIE_CADENCE_EP
+#if IS_ENABLED(CONFIG_PCIE_CADENCE_EP)
int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep);
+void cdns_pcie_ep_disable(struct cdns_pcie_ep *ep);
+int cdns_pcie_hpa_ep_setup(struct cdns_pcie_ep *ep);
#else
static inline int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
{
return 0;
}
+
+static inline void cdns_pcie_ep_disable(struct cdns_pcie_ep *ep)
+{
+}
+
+static inline int cdns_pcie_hpa_ep_setup(struct cdns_pcie_ep *ep)
+{
+ return 0;
+}
+
#endif
-void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 fn,
+
+u8 cdns_pcie_find_capability(struct cdns_pcie *pcie, u8 cap);
+u16 cdns_pcie_find_ext_capability(struct cdns_pcie *pcie, u8 cap);
+bool cdns_pcie_linkup(struct cdns_pcie *pcie);
+
+void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie);
+
+void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
u32 r, bool is_io,
u64 cpu_addr, u64 pci_addr, size_t size);
-void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie, u8 fn,
+void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie,
+ u8 busnr, u8 fn,
u32 r, u64 cpu_addr);
void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r);
void cdns_pcie_disable_phy(struct cdns_pcie *pcie);
-int cdns_pcie_enable_phy(struct cdns_pcie *pcie);
-int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie);
+int cdns_pcie_enable_phy(struct cdns_pcie *pcie);
+int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie);
+void cdns_pcie_hpa_detect_quiet_min_delay_set(struct cdns_pcie *pcie);
+void cdns_pcie_hpa_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
+ u32 r, bool is_io,
+ u64 cpu_addr, u64 pci_addr, size_t size);
+void cdns_pcie_hpa_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie,
+ u8 busnr, u8 fn,
+ u32 r, u64 cpu_addr);
+int cdns_pcie_hpa_host_link_setup(struct cdns_pcie_rc *rc);
+void __iomem *cdns_pci_hpa_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int where);
+int cdns_pcie_hpa_host_start_link(struct cdns_pcie_rc *rc);
+int cdns_pcie_hpa_start_link(struct cdns_pcie *pcie);
+void cdns_pcie_hpa_stop_link(struct cdns_pcie *pcie);
+bool cdns_pcie_hpa_link_up(struct cdns_pcie *pcie);
+
extern const struct dev_pm_ops cdns_pcie_pm_ops;
#endif /* _PCIE_CADENCE_H */