summaryrefslogtreecommitdiff
path: root/drivers/pci/controller/dwc
diff options
context:
space:
mode:
authorBjorn Helgaas <bhelgaas@google.com>2025-07-31 16:12:16 -0500
committerBjorn Helgaas <bhelgaas@google.com>2025-07-31 16:12:16 -0500
commit81b3be6cc58a43c9272ca94c9c0f1ce38b3107cb (patch)
tree89dcc3e242a359be745f34887e80646b42efbd3a /drivers/pci/controller/dwc
parentd5b0b60ab054ef8006a6339ba8f6eeda3ba8a120 (diff)
parenta2fbecdbbb9d7706fd3ec25f0dead83a2d542943 (diff)
Merge branch 'pci/controller/qcom'
- Export DWC MSI controller related APIs for use by upcoming DWC-based ECAM implementation (Mayank Rana) - Rename gen_pci_init() to pci_host_common_ecam_create() and export for use by controller drivers (Mayank Rana) - Add DT binding and driver support for SA8255p, which supports ECAM for Configuration Space access (Mayank Rana) - Update DT binding and driver to describe PHYs and per-Root Port resets in a Root Port stanza and deprecate describing them in the host bridge; this makes it possible to support multiple Root Ports in the future (Krishna Chaitanya Chundru) * pci/controller/qcom: PCI: qcom: Add support for parsing the new Root Port binding dt-bindings: PCI: qcom: Move PHY & reset GPIO to Root Port node PCI: qcom: Add support for Qualcomm SA8255p based PCIe Root Complex dt-bindings: PCI: qcom,pcie-sa8255p: Document ECAM compliant PCIe root complex PCI: host-generic: Rename and export gen_pci_init() for PCIe controller drivers PCI: dwc: Export DWC MSI controller related APIs
Diffstat (limited to 'drivers/pci/controller/dwc')
-rw-r--r--drivers/pci/controller/dwc/Kconfig1
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c38
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h14
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c326
4 files changed, 322 insertions, 57 deletions
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index 5dfa5592bf07..d8c36bfd64f5 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -297,6 +297,7 @@ config PCIE_QCOM
select PCIE_DW_HOST
select CRC8
select PCIE_QCOM_COMMON
+ select PCI_HOST_COMMON
help
Say Y here to enable PCIe controller support on Qualcomm SoCs. The
PCIe controller uses the DesignWare core plus Qualcomm-specific
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 101a64187f2a..952f8594b501 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -230,7 +230,7 @@ int dw_pcie_allocate_domains(struct dw_pcie_rp *pp)
return 0;
}
-static void dw_pcie_free_msi(struct dw_pcie_rp *pp)
+void dw_pcie_free_msi(struct dw_pcie_rp *pp)
{
u32 ctrl;
@@ -242,19 +242,34 @@ static void dw_pcie_free_msi(struct dw_pcie_rp *pp)
irq_domain_remove(pp->irq_domain);
}
+EXPORT_SYMBOL_GPL(dw_pcie_free_msi);
-static void dw_pcie_msi_init(struct dw_pcie_rp *pp)
+void dw_pcie_msi_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
u64 msi_target = (u64)pp->msi_data;
+ u32 ctrl, num_ctrls;
if (!pci_msi_enabled() || !pp->has_msi_ctrl)
return;
+ num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
+
+ /* Initialize IRQ Status array */
+ for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK +
+ (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
+ pp->irq_mask[ctrl]);
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE +
+ (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
+ ~0);
+ }
+
/* Program the msi_data */
dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_LO, lower_32_bits(msi_target));
dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_HI, upper_32_bits(msi_target));
}
+EXPORT_SYMBOL_GPL(dw_pcie_msi_init);
static int dw_pcie_parse_split_msi_irq(struct dw_pcie_rp *pp)
{
@@ -296,7 +311,7 @@ static int dw_pcie_parse_split_msi_irq(struct dw_pcie_rp *pp)
return 0;
}
-static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
+int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct device *dev = pci->dev;
@@ -370,6 +385,7 @@ static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
return 0;
}
+EXPORT_SYMBOL_GPL(dw_pcie_msi_host_init);
static void dw_pcie_host_request_msg_tlp_res(struct dw_pcie_rp *pp)
{
@@ -888,7 +904,7 @@ static void dw_pcie_config_presets(struct dw_pcie_rp *pp)
int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- u32 val, ctrl, num_ctrls;
+ u32 val;
int ret;
/*
@@ -899,20 +915,6 @@ int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
dw_pcie_setup(pci);
- if (pp->has_msi_ctrl) {
- num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
-
- /* Initialize IRQ Status array */
- for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
- dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK +
- (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
- pp->irq_mask[ctrl]);
- dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE +
- (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
- ~0);
- }
- }
-
dw_pcie_msi_init(pp);
/* Setup RC BARs */
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index ea4284ee06d4..00f52d472dcd 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -754,6 +754,9 @@ static inline enum dw_pcie_ltssm dw_pcie_get_ltssm(struct dw_pcie *pci)
int dw_pcie_suspend_noirq(struct dw_pcie *pci);
int dw_pcie_resume_noirq(struct dw_pcie *pci);
irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp);
+void dw_pcie_msi_init(struct dw_pcie_rp *pp);
+int dw_pcie_msi_host_init(struct dw_pcie_rp *pp);
+void dw_pcie_free_msi(struct dw_pcie_rp *pp);
int dw_pcie_setup_rc(struct dw_pcie_rp *pp);
int dw_pcie_host_init(struct dw_pcie_rp *pp);
void dw_pcie_host_deinit(struct dw_pcie_rp *pp);
@@ -776,6 +779,17 @@ static inline irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp)
return IRQ_NONE;
}
+static inline void dw_pcie_msi_init(struct dw_pcie_rp *pp)
+{ }
+
+static inline int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
+{
+ return -ENODEV;
+}
+
+static inline void dw_pcie_free_msi(struct dw_pcie_rp *pp)
+{ }
+
static inline int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
{
return 0;
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 9b12f2f02042..294babe1816e 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -21,7 +21,9 @@
#include <linux/limits.h>
#include <linux/init.h>
#include <linux/of.h>
+#include <linux/of_pci.h>
#include <linux/pci.h>
+#include <linux/pci-ecam.h>
#include <linux/pm_opp.h>
#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
@@ -34,6 +36,7 @@
#include <linux/units.h>
#include "../../pci.h"
+#include "../pci-host-common.h"
#include "pcie-designware.h"
#include "pcie-qcom-common.h"
@@ -255,13 +258,21 @@ struct qcom_pcie_ops {
* @ops: qcom PCIe ops structure
* @override_no_snoop: Override NO_SNOOP attribute in TLP to enable cache
* snooping
+ * @firmware_managed: Set if the Root Complex is firmware managed
*/
struct qcom_pcie_cfg {
const struct qcom_pcie_ops *ops;
bool override_no_snoop;
+ bool firmware_managed;
bool no_l0s;
};
+struct qcom_pcie_port {
+ struct list_head list;
+ struct gpio_desc *reset;
+ struct phy *phy;
+};
+
struct qcom_pcie {
struct dw_pcie *pci;
void __iomem *parf; /* DT parf */
@@ -274,24 +285,37 @@ struct qcom_pcie {
struct icc_path *icc_cpu;
const struct qcom_pcie_cfg *cfg;
struct dentry *debugfs;
+ struct list_head ports;
bool suspended;
bool use_pm_opp;
};
#define to_qcom_pcie(x) dev_get_drvdata((x)->dev)
-static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
+static void qcom_perst_assert(struct qcom_pcie *pcie, bool assert)
{
- gpiod_set_value_cansleep(pcie->reset, 1);
+ struct qcom_pcie_port *port;
+ int val = assert ? 1 : 0;
+
+ if (list_empty(&pcie->ports))
+ gpiod_set_value_cansleep(pcie->reset, val);
+ else
+ list_for_each_entry(port, &pcie->ports, list)
+ gpiod_set_value_cansleep(port->reset, val);
+
usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
}
+static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
+{
+ qcom_perst_assert(pcie, true);
+}
+
static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
{
/* Ensure that PERST has been asserted for at least 100 ms */
msleep(PCIE_T_PVPERL_MS);
- gpiod_set_value_cansleep(pcie->reset, 0);
- usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
+ qcom_perst_assert(pcie, false);
}
static int qcom_pcie_start_link(struct dw_pcie *pci)
@@ -1229,6 +1253,59 @@ static bool qcom_pcie_link_up(struct dw_pcie *pci)
return val & PCI_EXP_LNKSTA_DLLLA;
}
+static void qcom_pcie_phy_exit(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_port *port;
+
+ if (list_empty(&pcie->ports))
+ phy_exit(pcie->phy);
+ else
+ list_for_each_entry(port, &pcie->ports, list)
+ phy_exit(port->phy);
+}
+
+static void qcom_pcie_phy_power_off(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_port *port;
+
+ if (list_empty(&pcie->ports)) {
+ phy_power_off(pcie->phy);
+ } else {
+ list_for_each_entry(port, &pcie->ports, list)
+ phy_power_off(port->phy);
+ }
+}
+
+static int qcom_pcie_phy_power_on(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_port *port;
+ int ret = 0;
+
+ if (list_empty(&pcie->ports)) {
+ ret = phy_set_mode_ext(pcie->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC);
+ if (ret)
+ return ret;
+
+ ret = phy_power_on(pcie->phy);
+ if (ret)
+ return ret;
+ } else {
+ list_for_each_entry(port, &pcie->ports, list) {
+ ret = phy_set_mode_ext(port->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC);
+ if (ret)
+ return ret;
+
+ ret = phy_power_on(port->phy);
+ if (ret) {
+ qcom_pcie_phy_power_off(pcie);
+ return ret;
+ }
+ }
+ }
+
+ return ret;
+}
+
static int qcom_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
@@ -1241,11 +1318,7 @@ static int qcom_pcie_host_init(struct dw_pcie_rp *pp)
if (ret)
return ret;
- ret = phy_set_mode_ext(pcie->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC);
- if (ret)
- goto err_deinit;
-
- ret = phy_power_on(pcie->phy);
+ ret = qcom_pcie_phy_power_on(pcie);
if (ret)
goto err_deinit;
@@ -1268,7 +1341,7 @@ static int qcom_pcie_host_init(struct dw_pcie_rp *pp)
err_assert_reset:
qcom_ep_reset_assert(pcie);
err_disable_phy:
- phy_power_off(pcie->phy);
+ qcom_pcie_phy_power_off(pcie);
err_deinit:
pcie->cfg->ops->deinit(pcie);
@@ -1281,7 +1354,7 @@ static void qcom_pcie_host_deinit(struct dw_pcie_rp *pp)
struct qcom_pcie *pcie = to_qcom_pcie(pci);
qcom_ep_reset_assert(pcie);
- phy_power_off(pcie->phy);
+ qcom_pcie_phy_power_off(pcie);
pcie->cfg->ops->deinit(pcie);
}
@@ -1426,6 +1499,10 @@ static const struct qcom_pcie_cfg cfg_sc8280xp = {
.no_l0s = true,
};
+static const struct qcom_pcie_cfg cfg_fw_managed = {
+ .firmware_managed = true,
+};
+
static const struct dw_pcie_ops dw_pcie_ops = {
.link_up = qcom_pcie_link_up,
.start_link = qcom_pcie_start_link,
@@ -1580,10 +1657,128 @@ static irqreturn_t qcom_pcie_global_irq_thread(int irq, void *data)
return IRQ_HANDLED;
}
+static void qcom_pci_free_msi(void *ptr)
+{
+ struct dw_pcie_rp *pp = (struct dw_pcie_rp *)ptr;
+
+ if (pp && pp->has_msi_ctrl)
+ dw_pcie_free_msi(pp);
+}
+
+static int qcom_pcie_ecam_host_init(struct pci_config_window *cfg)
+{
+ struct device *dev = cfg->parent;
+ struct dw_pcie_rp *pp;
+ struct dw_pcie *pci;
+ int ret;
+
+ pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+ if (!pci)
+ return -ENOMEM;
+
+ pci->dev = dev;
+ pp = &pci->pp;
+ pci->dbi_base = cfg->win;
+ pp->num_vectors = MSI_DEF_NUM_VECTORS;
+
+ ret = dw_pcie_msi_host_init(pp);
+ if (ret)
+ return ret;
+
+ pp->has_msi_ctrl = true;
+ dw_pcie_msi_init(pp);
+
+ return devm_add_action_or_reset(dev, qcom_pci_free_msi, pp);
+}
+
+static const struct pci_ecam_ops pci_qcom_ecam_ops = {
+ .init = qcom_pcie_ecam_host_init,
+ .pci_ops = {
+ .map_bus = pci_ecam_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ }
+};
+
+static int qcom_pcie_parse_port(struct qcom_pcie *pcie, struct device_node *node)
+{
+ struct device *dev = pcie->pci->dev;
+ struct qcom_pcie_port *port;
+ struct gpio_desc *reset;
+ struct phy *phy;
+ int ret;
+
+ reset = devm_fwnode_gpiod_get(dev, of_fwnode_handle(node),
+ "reset", GPIOD_OUT_HIGH, "PERST#");
+ if (IS_ERR(reset))
+ return PTR_ERR(reset);
+
+ phy = devm_of_phy_get(dev, node, NULL);
+ if (IS_ERR(phy))
+ return PTR_ERR(phy);
+
+ port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ ret = phy_init(phy);
+ if (ret)
+ return ret;
+
+ port->reset = reset;
+ port->phy = phy;
+ INIT_LIST_HEAD(&port->list);
+ list_add_tail(&port->list, &pcie->ports);
+
+ return 0;
+}
+
+static int qcom_pcie_parse_ports(struct qcom_pcie *pcie)
+{
+ struct device *dev = pcie->pci->dev;
+ struct qcom_pcie_port *port, *tmp;
+ int ret = -ENOENT;
+
+ for_each_available_child_of_node_scoped(dev->of_node, of_port) {
+ ret = qcom_pcie_parse_port(pcie, of_port);
+ if (ret)
+ goto err_port_del;
+ }
+
+ return ret;
+
+err_port_del:
+ list_for_each_entry_safe(port, tmp, &pcie->ports, list)
+ list_del(&port->list);
+
+ return ret;
+}
+
+static int qcom_pcie_parse_legacy_binding(struct qcom_pcie *pcie)
+{
+ struct device *dev = pcie->pci->dev;
+ int ret;
+
+ pcie->phy = devm_phy_optional_get(dev, "pciephy");
+ if (IS_ERR(pcie->phy))
+ return PTR_ERR(pcie->phy);
+
+ pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH);
+ if (IS_ERR(pcie->reset))
+ return PTR_ERR(pcie->reset);
+
+ ret = phy_init(pcie->phy);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static int qcom_pcie_probe(struct platform_device *pdev)
{
const struct qcom_pcie_cfg *pcie_cfg;
unsigned long max_freq = ULONG_MAX;
+ struct qcom_pcie_port *port, *tmp;
struct device *dev = &pdev->dev;
struct dev_pm_opp *opp;
struct qcom_pcie *pcie;
@@ -1594,24 +1789,64 @@ static int qcom_pcie_probe(struct platform_device *pdev)
char *name;
pcie_cfg = of_device_get_match_data(dev);
- if (!pcie_cfg || !pcie_cfg->ops) {
- dev_err(dev, "Invalid platform data\n");
- return -EINVAL;
+ if (!pcie_cfg) {
+ dev_err(dev, "No platform data\n");
+ return -ENODATA;
}
- pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
- if (!pcie)
- return -ENOMEM;
-
- pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
- if (!pci)
- return -ENOMEM;
+ if (!pcie_cfg->firmware_managed && !pcie_cfg->ops) {
+ dev_err(dev, "No platform ops\n");
+ return -ENODATA;
+ }
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
if (ret < 0)
goto err_pm_runtime_put;
+ if (pcie_cfg->firmware_managed) {
+ struct pci_host_bridge *bridge;
+ struct pci_config_window *cfg;
+
+ bridge = devm_pci_alloc_host_bridge(dev, 0);
+ if (!bridge) {
+ ret = -ENOMEM;
+ goto err_pm_runtime_put;
+ }
+
+ /* Parse and map our ECAM configuration space area */
+ cfg = pci_host_common_ecam_create(dev, bridge,
+ &pci_qcom_ecam_ops);
+ if (IS_ERR(cfg)) {
+ ret = PTR_ERR(cfg);
+ goto err_pm_runtime_put;
+ }
+
+ bridge->sysdata = cfg;
+ bridge->ops = (struct pci_ops *)&pci_qcom_ecam_ops.pci_ops;
+ bridge->msi_domain = true;
+
+ ret = pci_host_probe(bridge);
+ if (ret)
+ goto err_pm_runtime_put;
+
+ return 0;
+ }
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie) {
+ ret = -ENOMEM;
+ goto err_pm_runtime_put;
+ }
+
+ pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+ if (!pci) {
+ ret = -ENOMEM;
+ goto err_pm_runtime_put;
+ }
+
+ INIT_LIST_HEAD(&pcie->ports);
+
pci->dev = dev;
pci->ops = &dw_pcie_ops;
pp = &pci->pp;
@@ -1620,12 +1855,6 @@ static int qcom_pcie_probe(struct platform_device *pdev)
pcie->cfg = pcie_cfg;
- pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH);
- if (IS_ERR(pcie->reset)) {
- ret = PTR_ERR(pcie->reset);
- goto err_pm_runtime_put;
- }
-
pcie->parf = devm_platform_ioremap_resource_byname(pdev, "parf");
if (IS_ERR(pcie->parf)) {
ret = PTR_ERR(pcie->parf);
@@ -1648,12 +1877,6 @@ static int qcom_pcie_probe(struct platform_device *pdev)
}
}
- pcie->phy = devm_phy_optional_get(dev, "pciephy");
- if (IS_ERR(pcie->phy)) {
- ret = PTR_ERR(pcie->phy);
- goto err_pm_runtime_put;
- }
-
/* OPP table is optional */
ret = devm_pm_opp_of_add_table(dev);
if (ret && ret != -ENODEV) {
@@ -1700,9 +1923,23 @@ static int qcom_pcie_probe(struct platform_device *pdev)
pp->ops = &qcom_pcie_dw_ops;
- ret = phy_init(pcie->phy);
- if (ret)
- goto err_pm_runtime_put;
+ ret = qcom_pcie_parse_ports(pcie);
+ if (ret) {
+ if (ret != -ENOENT) {
+ dev_err_probe(pci->dev, ret,
+ "Failed to parse Root Port: %d\n", ret);
+ goto err_pm_runtime_put;
+ }
+
+ /*
+ * In the case of properties not populated in Root Port node,
+ * fallback to the legacy method of parsing the Host Bridge
+ * node. This is to maintain DT backwards compatibility.
+ */
+ ret = qcom_pcie_parse_legacy_binding(pcie);
+ if (ret)
+ goto err_pm_runtime_put;
+ }
platform_set_drvdata(pdev, pcie);
@@ -1747,7 +1984,9 @@ static int qcom_pcie_probe(struct platform_device *pdev)
err_host_deinit:
dw_pcie_host_deinit(pp);
err_phy_exit:
- phy_exit(pcie->phy);
+ qcom_pcie_phy_exit(pcie);
+ list_for_each_entry_safe(port, tmp, &pcie->ports, list)
+ list_del(&port->list);
err_pm_runtime_put:
pm_runtime_put(dev);
pm_runtime_disable(dev);
@@ -1757,9 +1996,13 @@ err_pm_runtime_put:
static int qcom_pcie_suspend_noirq(struct device *dev)
{
- struct qcom_pcie *pcie = dev_get_drvdata(dev);
+ struct qcom_pcie *pcie;
int ret = 0;
+ pcie = dev_get_drvdata(dev);
+ if (!pcie)
+ return 0;
+
/*
* Set minimum bandwidth required to keep data path functional during
* suspend.
@@ -1813,9 +2056,13 @@ static int qcom_pcie_suspend_noirq(struct device *dev)
static int qcom_pcie_resume_noirq(struct device *dev)
{
- struct qcom_pcie *pcie = dev_get_drvdata(dev);
+ struct qcom_pcie *pcie;
int ret;
+ pcie = dev_get_drvdata(dev);
+ if (!pcie)
+ return 0;
+
if (pm_suspend_target_state != PM_SUSPEND_MEM) {
ret = icc_enable(pcie->icc_cpu);
if (ret) {
@@ -1850,6 +2097,7 @@ static const struct of_device_id qcom_pcie_match[] = {
{ .compatible = "qcom,pcie-ipq9574", .data = &cfg_2_9_0 },
{ .compatible = "qcom,pcie-msm8996", .data = &cfg_2_3_2 },
{ .compatible = "qcom,pcie-qcs404", .data = &cfg_2_4_0 },
+ { .compatible = "qcom,pcie-sa8255p", .data = &cfg_fw_managed },
{ .compatible = "qcom,pcie-sa8540p", .data = &cfg_sc8280xp },
{ .compatible = "qcom,pcie-sa8775p", .data = &cfg_1_34_0},
{ .compatible = "qcom,pcie-sc7280", .data = &cfg_1_9_0 },