diff options
Diffstat (limited to 'drivers/pci/probe.c')
-rw-r--r-- | drivers/pci/probe.c | 349 |
1 files changed, 255 insertions, 94 deletions
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 1325fbae2f28..e6a34db77826 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -9,6 +9,8 @@ #include <linux/pci.h> #include <linux/msi.h> #include <linux/of_pci.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> #include <linux/pci_hotplug.h> #include <linux/slab.h> #include <linux/module.h> @@ -95,7 +97,7 @@ static void release_pcibus_dev(struct device *dev) kfree(pci_bus); } -static struct class pcibus_class = { +static const struct class pcibus_class = { .name = "pci_bus", .dev_release = &release_pcibus_dev, .dev_groups = pcibus_groups, @@ -165,40 +167,66 @@ static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar) #define PCI_COMMAND_DECODE_ENABLE (PCI_COMMAND_MEMORY | PCI_COMMAND_IO) /** + * __pci_size_bars - Read the raw BAR mask for a range of PCI BARs + * @dev: the PCI device + * @count: number of BARs to size + * @pos: starting config space position + * @sizes: array to store mask values + * @rom: indicate whether to use ROM mask, which avoids enabling ROM BARs + * + * Provided @sizes array must be sufficiently sized to store results for + * @count u32 BARs. Caller is responsible for disabling decode to specified + * BAR range around calling this function. This function is intended to avoid + * disabling decode around sizing each BAR individually, which can result in + * non-trivial overhead in virtualized environments with very large PCI BARs. + */ +static void __pci_size_bars(struct pci_dev *dev, int count, + unsigned int pos, u32 *sizes, bool rom) +{ + u32 orig, mask = rom ? PCI_ROM_ADDRESS_MASK : ~0; + int i; + + for (i = 0; i < count; i++, pos += 4, sizes++) { + pci_read_config_dword(dev, pos, &orig); + pci_write_config_dword(dev, pos, mask); + pci_read_config_dword(dev, pos, sizes); + pci_write_config_dword(dev, pos, orig); + } +} + +void __pci_size_stdbars(struct pci_dev *dev, int count, + unsigned int pos, u32 *sizes) +{ + __pci_size_bars(dev, count, pos, sizes, false); +} + +static void __pci_size_rom(struct pci_dev *dev, unsigned int pos, u32 *sizes) +{ + __pci_size_bars(dev, 1, pos, sizes, true); +} + +/** * __pci_read_base - Read a PCI BAR * @dev: the PCI device * @type: type of the BAR * @res: resource buffer to be filled in * @pos: BAR position in the config space + * @sizes: array of one or more pre-read BAR masks * * Returns 1 if the BAR is 64-bit, or 0 if 32-bit. */ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, - struct resource *res, unsigned int pos) + struct resource *res, unsigned int pos, u32 *sizes) { - u32 l = 0, sz = 0, mask; + u32 l = 0, sz; u64 l64, sz64, mask64; - u16 orig_cmd; struct pci_bus_region region, inverted_region; const char *res_name = pci_resource_name(dev, res - dev->resource); - mask = type ? PCI_ROM_ADDRESS_MASK : ~0; - - /* No printks while decoding is disabled! */ - if (!dev->mmio_always_on) { - pci_read_config_word(dev, PCI_COMMAND, &orig_cmd); - if (orig_cmd & PCI_COMMAND_DECODE_ENABLE) { - pci_write_config_word(dev, PCI_COMMAND, - orig_cmd & ~PCI_COMMAND_DECODE_ENABLE); - } - } - res->name = pci_name(dev); pci_read_config_dword(dev, pos, &l); - pci_write_config_dword(dev, pos, l | mask); - pci_read_config_dword(dev, pos, &sz); - pci_write_config_dword(dev, pos, l); + sz = sizes[0]; /* * All bits set in sz means the device isn't working properly. @@ -238,18 +266,13 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, if (res->flags & IORESOURCE_MEM_64) { pci_read_config_dword(dev, pos + 4, &l); - pci_write_config_dword(dev, pos + 4, ~0); - pci_read_config_dword(dev, pos + 4, &sz); - pci_write_config_dword(dev, pos + 4, l); + sz = sizes[1]; l64 |= ((u64)l << 32); sz64 |= ((u64)sz << 32); mask64 |= ((u64)~0 << 32); } - if (!dev->mmio_always_on && (orig_cmd & PCI_COMMAND_DECODE_ENABLE)) - pci_write_config_word(dev, PCI_COMMAND, orig_cmd); - if (!sz64) goto fail; @@ -318,9 +341,14 @@ out: return (res->flags & IORESOURCE_MEM_64) ? 1 : 0; } -static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) +static __always_inline void pci_read_bases(struct pci_dev *dev, + unsigned int howmany, int rom) { + u32 rombar, stdbars[PCI_STD_NUM_BARS]; unsigned int pos, reg; + u16 orig_cmd; + + BUILD_BUG_ON(statically_true(howmany > PCI_STD_NUM_BARS)); if (dev->non_compliant_bars) return; @@ -329,10 +357,28 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) if (dev->is_virtfn) return; + /* No printks while decoding is disabled! */ + if (!dev->mmio_always_on) { + pci_read_config_word(dev, PCI_COMMAND, &orig_cmd); + if (orig_cmd & PCI_COMMAND_DECODE_ENABLE) { + pci_write_config_word(dev, PCI_COMMAND, + orig_cmd & ~PCI_COMMAND_DECODE_ENABLE); + } + } + + __pci_size_stdbars(dev, howmany, PCI_BASE_ADDRESS_0, stdbars); + if (rom) + __pci_size_rom(dev, rom, &rombar); + + if (!dev->mmio_always_on && + (orig_cmd & PCI_COMMAND_DECODE_ENABLE)) + pci_write_config_word(dev, PCI_COMMAND, orig_cmd); + for (pos = 0; pos < howmany; pos++) { struct resource *res = &dev->resource[pos]; reg = PCI_BASE_ADDRESS_0 + (pos << 2); - pos += __pci_read_base(dev, pci_bar_unknown, res, reg); + pos += __pci_read_base(dev, pci_bar_unknown, + res, reg, &stdbars[pos]); } if (rom) { @@ -340,7 +386,7 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) dev->rom_base_reg = rom; res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH | IORESOURCE_READONLY | IORESOURCE_SIZEALIGN; - __pci_read_base(dev, pci_bar_mem32, res, rom); + __pci_read_base(dev, pci_bar_mem32, res, rom, &rombar); } } @@ -543,15 +589,15 @@ void pci_read_bridge_bases(struct pci_bus *child) pci_read_bridge_mmio(child->self, child->resource[1], false); pci_read_bridge_mmio_pref(child->self, child->resource[2], false); - if (dev->transparent) { - pci_bus_for_each_resource(child->parent, res) { - if (res && res->flags) { - pci_bus_add_resource(child, res, - PCI_SUBTRACTIVE_DECODE); - pci_info(dev, " bridge window %pR (subtractive decode)\n", - res); - } - } + if (!dev->transparent) + return; + + pci_bus_for_each_resource(child->parent, res) { + if (!res || !res->flags) + continue; + + pci_bus_add_resource(child, res); + pci_info(dev, " bridge window %pR (subtractive decode)\n", res); } } @@ -742,9 +788,14 @@ const char *pci_speed_string(enum pci_bus_speed speed) } EXPORT_SYMBOL_GPL(pci_speed_string); -void pcie_update_link_speed(struct pci_bus *bus, u16 linksta) +void pcie_update_link_speed(struct pci_bus *bus) { - bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS]; + struct pci_dev *bridge = bus->self; + u16 linksta, linksta2; + + pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta); + pcie_capability_read_word(bridge, PCI_EXP_LNKSTA2, &linksta2); + __pcie_update_link_speed(bus, linksta, linksta2); } EXPORT_SYMBOL_GPL(pcie_update_link_speed); @@ -827,13 +878,11 @@ static void pci_set_bus_speed(struct pci_bus *bus) if (pci_is_pcie(bridge)) { u32 linkcap; - u16 linksta; pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap); bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS]; - pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta); - pcie_update_link_speed(bus, linksta); + pcie_update_link_speed(bus); } } @@ -889,6 +938,17 @@ static void pci_set_bus_msi_domain(struct pci_bus *bus) dev_set_msi_domain(&bus->dev, d); } +static bool pci_preserve_config(struct pci_host_bridge *host_bridge) +{ + if (pci_acpi_preserve_config(host_bridge)) + return true; + + if (host_bridge->dev.parent && host_bridge->dev.parent->of_node) + return of_pci_preserve_config(host_bridge->dev.parent->of_node); + + return false; +} + static int pci_register_host_bridge(struct pci_host_bridge *bridge) { struct device *parent = bridge->dev.parent; @@ -897,6 +957,7 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge) resource_size_t offset, next_offset; LIST_HEAD(resources); struct resource *res, *next_res; + bool bus_registered = false; char addr[64], *fmt; const char *name; int err; @@ -939,10 +1000,9 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge) /* Temporarily move resources off the list */ list_splice_init(&bridge->windows, &resources); err = device_add(&bridge->dev); - if (err) { - put_device(&bridge->dev); + if (err) goto free; - } + bus->bridge = get_device(&bridge->dev); device_enable_async_suspend(bus->bridge); pci_set_bus_of_node(bus); @@ -961,6 +1021,7 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge) name = dev_name(&bus->dev); err = device_register(&bus->dev); + bus_registered = true; if (err) goto unregister; @@ -983,6 +1044,9 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge) if (nr_node_ids > 1 && pcibus_to_node(bus) == NUMA_NO_NODE) dev_warn(&bus->dev, "Unknown NUMA node; performance will be reduced\n"); + /* Check if the boot configuration by FW needs to be preserved */ + bridge->preserve_config = pci_preserve_config(bridge); + /* Coalesce contiguous windows */ resource_list_for_each_entry_safe(window, n, &resources) { if (list_is_last(&window->node, &resources)) @@ -1018,7 +1082,7 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge) if (res->flags & IORESOURCE_BUS) pci_bus_insert_busn_res(bus, bus->number, res->end); else - pci_bus_add_resource(bus, res, 0); + pci_bus_add_resource(bus, res); if (offset) { if (resource_type(res) == IORESOURCE_IO) @@ -1035,6 +1099,8 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge) dev_info(&bus->dev, "root bus resource %pR%s\n", res, addr); } + of_pci_make_host_bridge_node(bridge); + down_write(&pci_bus_sem); list_add_tail(&bus->node, &pci_root_buses); up_write(&pci_bus_sem); @@ -1044,12 +1110,15 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge) unregister: put_device(&bridge->dev); device_del(&bridge->dev); - free: #ifdef CONFIG_PCI_DOMAINS_GENERIC - pci_bus_release_domain_nr(bus, parent); + pci_bus_release_domain_nr(parent, bus->domain_nr); #endif - kfree(bus); + if (bus_registered) + put_device(&bus->dev); + else + kfree(bus); + return err; } @@ -1158,7 +1227,10 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent, add_dev: pci_set_bus_msi_domain(child); ret = device_register(&child->dev); - WARN_ON(ret < 0); + if (WARN_ON(ret < 0)) { + put_device(&child->dev); + return NULL; + } pcibios_add_bus(child); @@ -1189,15 +1261,17 @@ struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, } EXPORT_SYMBOL(pci_add_new_bus); -static void pci_enable_crs(struct pci_dev *pdev) +static void pci_enable_rrs_sv(struct pci_dev *pdev) { u16 root_cap = 0; - /* Enable CRS Software Visibility if supported */ + /* Enable Configuration RRS Software Visibility if supported */ pcie_capability_read_word(pdev, PCI_EXP_RTCAP, &root_cap); - if (root_cap & PCI_EXP_RTCAP_CRSVIS) + if (root_cap & PCI_EXP_RTCAP_RRS_SV) { pcie_capability_set_word(pdev, PCI_EXP_RTCTL, - PCI_EXP_RTCTL_CRSSVE); + PCI_EXP_RTCTL_RRS_SVE); + pdev->config_rrs_sv = 1; + } } static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus, @@ -1312,8 +1386,6 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev, pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT); - pci_enable_crs(dev); - if ((secondary || subordinate) && !pcibios_assign_all_busses() && !is_cardbus && !broken) { unsigned int cmax, buses; @@ -1482,6 +1554,9 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev, } out: + /* Clear errors in the Secondary Status Register */ + pci_write_config_word(dev, PCI_SEC_STATUS, 0xffff); + pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl); pm_runtime_put(&dev->dev); @@ -1551,6 +1626,11 @@ void set_pcie_port_type(struct pci_dev *pdev) pdev->pcie_cap = pos; pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); pdev->pcie_flags_reg = reg16; + + type = pci_pcie_type(pdev); + if (type == PCI_EXP_TYPE_ROOT_PORT) + pci_enable_rrs_sv(pdev); + pci_read_config_dword(pdev, pos + PCI_EXP_DEVCAP, &pdev->devcap); pdev->pcie_mpss = FIELD_GET(PCI_EXP_DEVCAP_PAYLOAD, pdev->devcap); @@ -1567,7 +1647,6 @@ void set_pcie_port_type(struct pci_dev *pdev) * correctly so detect impossible configurations here and correct * the port type accordingly. */ - type = pci_pcie_type(pdev); if (type == PCI_EXP_TYPE_DOWNSTREAM) { /* * If pdev claims to be downstream port but the parent @@ -1614,23 +1693,33 @@ static void set_pcie_thunderbolt(struct pci_dev *dev) static void set_pcie_untrusted(struct pci_dev *dev) { - struct pci_dev *parent; + struct pci_dev *parent = pci_upstream_bridge(dev); + if (!parent) + return; /* - * If the upstream bridge is untrusted we treat this device + * If the upstream bridge is untrusted we treat this device as * untrusted as well. */ - parent = pci_upstream_bridge(dev); - if (parent && (parent->untrusted || parent->external_facing)) + if (parent->untrusted) { + dev->untrusted = true; + return; + } + + if (arch_pci_dev_is_removable(dev)) { + pci_dbg(dev, "marking as untrusted\n"); dev->untrusted = true; + } } static void pci_set_removable(struct pci_dev *dev) { struct pci_dev *parent = pci_upstream_bridge(dev); + if (!parent) + return; /* - * We (only) consider everything downstream from an external_facing + * We (only) consider everything tunneled below an external_facing * device to be removable by the user. We're mainly concerned with * consumer platforms with user accessible thunderbolt ports that are * vulnerable to DMA attacks, and we expect those ports to be marked by @@ -1640,9 +1729,15 @@ static void pci_set_removable(struct pci_dev *dev) * accessible to user / may not be removed by end user, and thus not * exposed as "removable" to userspace. */ - if (parent && - (parent->external_facing || dev_is_removable(&parent->dev))) + if (dev_is_removable(&parent->dev)) { dev_set_removable(&dev->dev, DEVICE_REMOVABLE); + return; + } + + if (arch_pci_dev_is_removable(dev)) { + pci_dbg(dev, "marking as removable\n"); + dev_set_removable(&dev->dev, DEVICE_REMOVABLE); + } } /** @@ -1928,6 +2023,9 @@ int pci_setup_device(struct pci_dev *dev) set_pcie_untrusted(dev); + if (pci_is_pcie(dev)) + dev->supported_speeds = pcie_get_supported_speeds(dev); + /* "Unknown power state" */ dev->current_state = PCI_UNKNOWN; @@ -1960,7 +2058,7 @@ int pci_setup_device(struct pci_dev *dev) if (class == PCI_CLASS_BRIDGE_PCI) goto bad; pci_read_irq(dev); - pci_read_bases(dev, 6, PCI_ROM_ADDRESS); + pci_read_bases(dev, PCI_STD_NUM_BARS, PCI_ROM_ADDRESS); pci_subsystem_ids(dev, &dev->subsystem_vendor, &dev->subsystem_device); @@ -2211,8 +2309,8 @@ static void pci_configure_relaxed_ordering(struct pci_dev *dev) static void pci_configure_eetlp_prefix(struct pci_dev *dev) { -#ifdef CONFIG_PCI_PASID struct pci_dev *bridge; + unsigned int eetlp_max; int pcie_type; u32 cap; @@ -2224,15 +2322,19 @@ static void pci_configure_eetlp_prefix(struct pci_dev *dev) return; pcie_type = pci_pcie_type(dev); + + eetlp_max = FIELD_GET(PCI_EXP_DEVCAP2_EE_PREFIX_MAX, cap); + /* 00b means 4 */ + eetlp_max = eetlp_max ?: 4; + if (pcie_type == PCI_EXP_TYPE_ROOT_PORT || pcie_type == PCI_EXP_TYPE_RC_END) - dev->eetlp_prefix_path = 1; + dev->eetlp_prefix_max = eetlp_max; else { bridge = pci_upstream_bridge(dev); - if (bridge && bridge->eetlp_prefix_path) - dev->eetlp_prefix_path = 1; + if (bridge && bridge->eetlp_prefix_max) + dev->eetlp_prefix_max = eetlp_max; } -#endif } static void pci_configure_serr(struct pci_dev *dev) @@ -2326,28 +2428,23 @@ struct pci_dev *pci_alloc_dev(struct pci_bus *bus) } EXPORT_SYMBOL(pci_alloc_dev); -static bool pci_bus_crs_vendor_id(u32 l) -{ - return (l & 0xffff) == PCI_VENDOR_ID_PCI_SIG; -} - -static bool pci_bus_wait_crs(struct pci_bus *bus, int devfn, u32 *l, +static bool pci_bus_wait_rrs(struct pci_bus *bus, int devfn, u32 *l, int timeout) { int delay = 1; - if (!pci_bus_crs_vendor_id(*l)) - return true; /* not a CRS completion */ + if (!pci_bus_rrs_vendor_id(*l)) + return true; /* not a Configuration RRS completion */ if (!timeout) - return false; /* CRS, but caller doesn't want to wait */ + return false; /* RRS, but caller doesn't want to wait */ /* * We got the reserved Vendor ID that indicates a completion with - * Configuration Request Retry Status (CRS). Retry until we get a + * Configuration Request Retry Status (RRS). Retry until we get a * valid Vendor ID or we time out. */ - while (pci_bus_crs_vendor_id(*l)) { + while (pci_bus_rrs_vendor_id(*l)) { if (delay > timeout) { pr_warn("pci %04x:%02x:%02x.%d: not ready after %dms; giving up\n", pci_domain_nr(bus), bus->number, @@ -2386,8 +2483,8 @@ bool pci_bus_generic_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l, *l == 0x0000ffff || *l == 0xffff0000) return false; - if (pci_bus_crs_vendor_id(*l)) - return pci_bus_wait_crs(bus, devfn, l, timeout); + if (pci_bus_rrs_vendor_id(*l)) + return pci_bus_wait_rrs(bus, devfn, l, timeout); return true; } @@ -2411,6 +2508,43 @@ bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l, } EXPORT_SYMBOL(pci_bus_read_dev_vendor_id); +#if IS_ENABLED(CONFIG_PCI_PWRCTRL) +static struct platform_device *pci_pwrctrl_create_device(struct pci_bus *bus, int devfn) +{ + struct pci_host_bridge *host = pci_find_host_bridge(bus); + struct platform_device *pdev; + struct device_node *np; + + np = of_pci_find_child_device(dev_of_node(&bus->dev), devfn); + if (!np || of_find_device_by_node(np)) + return NULL; + + /* + * First check whether the pwrctrl device really needs to be created or + * not. This is decided based on at least one of the power supplies + * being defined in the devicetree node of the device. + */ + if (!of_pci_supply_present(np)) { + pr_debug("PCI/pwrctrl: Skipping OF node: %s\n", np->name); + return NULL; + } + + /* Now create the pwrctrl device */ + pdev = of_platform_device_create(np, NULL, &host->dev); + if (!pdev) { + pr_err("PCI/pwrctrl: Failed to create pwrctrl device for node: %s\n", np->name); + return NULL; + } + + return pdev; +} +#else +static struct platform_device *pci_pwrctrl_create_device(struct pci_bus *bus, int devfn) +{ + return NULL; +} +#endif + /* * Read the config data for a PCI device, sanity-check it, * and fill in the dev structure. @@ -2420,6 +2554,15 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn) struct pci_dev *dev; u32 l; + /* + * Create pwrctrl device (if required) for the PCI device to handle the + * power state. If the pwrctrl device is created, then skip scanning + * further as the pwrctrl core will rescan the bus after powering on + * the device. + */ + if (pci_pwrctrl_create_device(bus, devfn)) + return NULL; + if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000)) return NULL; @@ -2481,6 +2624,8 @@ static void pci_init_capabilities(struct pci_dev *dev) pci_dpc_init(dev); /* Downstream Port Containment */ pci_rcec_init(dev); /* Root Complex Event Collector */ pci_doe_init(dev); /* Data Object Exchange */ + pci_tph_init(dev); /* TLP Processing Hints */ + pci_rebar_init(dev); /* Resizable BAR */ pcie_report_downtraining(dev); pci_init_reset_methods(dev); @@ -2573,9 +2718,12 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) pci_set_msi_domain(dev); /* Notifier could use PCI capabilities */ - dev->match_driver = false; ret = device_add(&dev->dev); WARN_ON(ret < 0); + + pci_npem_create(dev); + + pci_doe_sysfs_init(dev); } struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn) @@ -3066,7 +3214,9 @@ int pci_host_probe(struct pci_host_bridge *bridge) struct pci_bus *bus, *child; int ret; + pci_lock_rescan_remove(); ret = pci_scan_root_bus_bridge(bridge); + pci_unlock_rescan_remove(); if (ret < 0) { dev_err(bridge->dev.parent, "Scanning root bridge failed"); return ret; @@ -3074,22 +3224,33 @@ int pci_host_probe(struct pci_host_bridge *bridge) bus = bridge->bus; + /* If we must preserve the resource configuration, claim now */ + if (bridge->preserve_config) + pci_bus_claim_resources(bus); + /* - * We insert PCI resources into the iomem_resource and - * ioport_resource trees in either pci_bus_claim_resources() - * or pci_bus_assign_resources(). + * Assign whatever was left unassigned. If we didn't claim above, + * this will reassign everything. */ - if (pci_has_flag(PCI_PROBE_ONLY)) { - pci_bus_claim_resources(bus); - } else { - pci_bus_size_bridges(bus); - pci_bus_assign_resources(bus); + pci_assign_unassigned_root_bus_resources(bus); - list_for_each_entry(child, &bus->children, node) - pcie_bus_configure_settings(child); - } + list_for_each_entry(child, &bus->children, node) + pcie_bus_configure_settings(child); + pci_lock_rescan_remove(); pci_bus_add_devices(bus); + pci_unlock_rescan_remove(); + + /* + * Ensure pm_runtime_enable() is called for the controller drivers + * before calling pci_host_probe(). The PM framework expects that + * if the parent device supports runtime PM, it will be enabled + * before child runtime PM is enabled. + */ + pm_runtime_set_active(&bridge->dev); + pm_runtime_no_callbacks(&bridge->dev); + devm_pm_runtime_enable(&bridge->dev); + return 0; } EXPORT_SYMBOL_GPL(pci_host_probe); |