diff options
Diffstat (limited to 'drivers/pci/pci.c')
| -rw-r--r-- | drivers/pci/pci.c | 4569 |
1 files changed, 2915 insertions, 1654 deletions
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index af0cc3456dc1..13dbb405dc31 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -1,10 +1,11 @@ +// SPDX-License-Identifier: GPL-2.0 /* - * PCI Bus Services, see include/linux/pci.h for further explanation. + * PCI Bus Services, see include/linux/pci.h for further explanation. * - * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter, - * David Mosberger-Tang + * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter, + * David Mosberger-Tang * - * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz> + * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz> */ #include <linux/acpi.h> @@ -12,8 +13,8 @@ #include <linux/delay.h> #include <linux/dmi.h> #include <linux/init.h> +#include <linux/msi.h> #include <linux/of.h> -#include <linux/of_pci.h> #include <linux/pci.h> #include <linux/pm.h> #include <linux/slab.h> @@ -21,31 +22,32 @@ #include <linux/spinlock.h> #include <linux/string.h> #include <linux/log2.h> -#include <linux/pci-aspm.h> -#include <linux/pm_wakeup.h> -#include <linux/interrupt.h> +#include <linux/logic_pio.h> #include <linux/device.h> #include <linux/pm_runtime.h> #include <linux/pci_hotplug.h> #include <linux/vmalloc.h> -#include <linux/pci-ats.h> -#include <asm/setup.h> #include <asm/dma.h> #include <linux/aer.h> +#include <linux/bitfield.h> #include "pci.h" +DEFINE_MUTEX(pci_slot_mutex); + const char *pci_power_names[] = { "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown", }; EXPORT_SYMBOL_GPL(pci_power_names); +#ifdef CONFIG_X86_32 int isa_dma_bridge_buggy; EXPORT_SYMBOL(isa_dma_bridge_buggy); +#endif int pci_pci_problems; EXPORT_SYMBOL(pci_pci_problems); -unsigned int pci_pm_d3_delay; +unsigned int pci_pm_d3hot_delay; static void pci_pme_list_scan(struct work_struct *work); @@ -60,15 +62,37 @@ struct pci_pme_device { #define PME_TIMEOUT 1000 /* How long between PME checks */ +/* + * Following exit from Conventional Reset, devices must be ready within 1 sec + * (PCIe r6.0 sec 6.6.1). A D3cold to D0 transition implies a Conventional + * Reset (PCIe r6.0 sec 5.8). + */ +#define PCI_RESET_WAIT 1000 /* msec */ + +/* + * Devices may extend the 1 sec period through Request Retry Status + * completions (PCIe r6.0 sec 2.3.1). The spec does not provide an upper + * limit, but 60 sec ought to be enough for any device to become + * responsive. + */ +#define PCIE_RESET_READY_POLL_MS 60000 /* msec */ + static void pci_dev_d3_sleep(struct pci_dev *dev) { - unsigned int delay = dev->d3_delay; + unsigned int delay_ms = max(dev->d3hot_delay, pci_pm_d3hot_delay); + unsigned int upper; - if (delay < pci_pm_d3_delay) - delay = pci_pm_d3_delay; + if (delay_ms) { + /* Use a 20% upper bound, 1ms minimum */ + upper = max(DIV_ROUND_CLOSEST(delay_ms, 5), 1U); + usleep_range(delay_ms * USEC_PER_MSEC, + (delay_ms + upper) * USEC_PER_MSEC); + } +} - if (delay) - msleep(delay); +bool pci_reset_supported(struct pci_dev *dev) +{ + return dev->reset_methods[0] != 0; } #ifdef CONFIG_PCI_DOMAINS @@ -82,15 +106,34 @@ unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE; unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE; #define DEFAULT_HOTPLUG_IO_SIZE (256) -#define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024) -/* pci=hpmemsize=nnM,hpiosize=nn can override this */ +#define DEFAULT_HOTPLUG_MMIO_SIZE (2*1024*1024) +#define DEFAULT_HOTPLUG_MMIO_PREF_SIZE (2*1024*1024) +/* hpiosize=nn can override this */ unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE; -unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE; +/* + * pci=hpmmiosize=nnM overrides non-prefetchable MMIO size, + * pci=hpmmioprefsize=nnM overrides prefetchable MMIO size; + * pci=hpmemsize=nnM overrides both + */ +unsigned long pci_hotplug_mmio_size = DEFAULT_HOTPLUG_MMIO_SIZE; +unsigned long pci_hotplug_mmio_pref_size = DEFAULT_HOTPLUG_MMIO_PREF_SIZE; #define DEFAULT_HOTPLUG_BUS_SIZE 1 unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE; + +/* PCIe MPS/MRRS strategy; can be overridden by kernel command-line param */ +#ifdef CONFIG_PCIE_BUS_TUNE_OFF +enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF; +#elif defined CONFIG_PCIE_BUS_SAFE +enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE; +#elif defined CONFIG_PCIE_BUS_PERFORMANCE +enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PERFORMANCE; +#elif defined CONFIG_PCIE_BUS_PEER2PEER +enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PEER2PEER; +#else enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT; +#endif /* * The default CLS is used if arch didn't set CLS explicitly and not @@ -98,8 +141,8 @@ enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT; * the dfl or actual value as it sees fit. Don't forget this is * measured in 32-bit words, not bytes. */ -u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2; -u8 pci_cache_line_size; +u8 pci_dfl_cache_line_size __ro_after_init = L1_CACHE_BYTES >> 2; +u8 pci_cache_line_size __ro_after_init ; /* * If we set up a device for bus mastering, we need to check the latency @@ -110,6 +153,18 @@ unsigned int pcibios_max_latency = 255; /* If set, the PCIe ARI capability will not be used. */ static bool pcie_ari_disabled; +/* If set, the PCIe ATS capability will not be used. */ +static bool pcie_ats_disabled; + +/* If set, the PCI config space of each device is printed during boot. */ +bool pci_early_dump; + +bool pci_ats_disabled(void) +{ + return pcie_ats_disabled; +} +EXPORT_SYMBOL_GPL(pci_ats_disabled); + /* Disable bridge_d3 for all PCIe ports */ static bool pci_bridge_d3_disable; /* Force bridge_d3 for all PCIe ports */ @@ -147,78 +202,241 @@ unsigned char pci_bus_max_busnr(struct pci_bus *bus) } EXPORT_SYMBOL_GPL(pci_bus_max_busnr); +/** + * pci_status_get_and_clear_errors - return and clear error bits in PCI_STATUS + * @pdev: the PCI device + * + * Returns error bits set in PCI_STATUS and clears them. + */ +int pci_status_get_and_clear_errors(struct pci_dev *pdev) +{ + u16 status; + int ret; + + ret = pci_read_config_word(pdev, PCI_STATUS, &status); + if (ret != PCIBIOS_SUCCESSFUL) + return -EIO; + + status &= PCI_STATUS_ERROR_BITS; + if (status) + pci_write_config_word(pdev, PCI_STATUS, status); + + return status; +} +EXPORT_SYMBOL_GPL(pci_status_get_and_clear_errors); + #ifdef CONFIG_HAS_IOMEM -void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar) +static void __iomem *__pci_ioremap_resource(struct pci_dev *pdev, int bar, + bool write_combine) { struct resource *res = &pdev->resource[bar]; + resource_size_t start = res->start; + resource_size_t size = resource_size(res); /* * Make sure the BAR is actually a memory resource, not an IO resource */ if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) { - dev_warn(&pdev->dev, "can't ioremap BAR %d: %pR\n", bar, res); + pci_err(pdev, "can't ioremap BAR %d: %pR\n", bar, res); return NULL; } - return ioremap_nocache(res->start, resource_size(res)); + + if (write_combine) + return ioremap_wc(start, size); + + return ioremap(start, size); +} + +void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar) +{ + return __pci_ioremap_resource(pdev, bar, false); } EXPORT_SYMBOL_GPL(pci_ioremap_bar); void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar) { - /* - * Make sure the BAR is actually a memory resource, not an IO resource - */ - if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) { - WARN_ON(1); - return NULL; - } - return ioremap_wc(pci_resource_start(pdev, bar), - pci_resource_len(pdev, bar)); + return __pci_ioremap_resource(pdev, bar, true); } EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar); #endif - -static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn, - u8 pos, int cap, int *ttl) +/** + * pci_dev_str_match_path - test if a path string matches a device + * @dev: the PCI device to test + * @path: string to match the device against + * @endptr: pointer to the string after the match + * + * Test if a string (typically from a kernel parameter) formatted as a + * path of device/function addresses matches a PCI device. The string must + * be of the form: + * + * [<domain>:]<bus>:<device>.<func>[/<device>.<func>]* + * + * A path for a device can be obtained using 'lspci -t'. Using a path + * is more robust against bus renumbering than using only a single bus, + * device and function address. + * + * Returns 1 if the string matches the device, 0 if it does not and + * a negative error code if it fails to parse the string. + */ +static int pci_dev_str_match_path(struct pci_dev *dev, const char *path, + const char **endptr) { - u8 id; - u16 ent; + int ret; + unsigned int seg, bus, slot, func; + char *wpath, *p; + char end; - pci_bus_read_config_byte(bus, devfn, pos, &pos); + *endptr = strchrnul(path, ';'); - while ((*ttl)--) { - if (pos < 0x40) - break; - pos &= ~3; - pci_bus_read_config_word(bus, devfn, pos, &ent); + wpath = kmemdup_nul(path, *endptr - path, GFP_ATOMIC); + if (!wpath) + return -ENOMEM; - id = ent & 0xff; - if (id == 0xff) + while (1) { + p = strrchr(wpath, '/'); + if (!p) break; - if (id == cap) - return pos; - pos = (ent >> 8); + ret = sscanf(p, "/%x.%x%c", &slot, &func, &end); + if (ret != 2) { + ret = -EINVAL; + goto free_and_exit; + } + + if (dev->devfn != PCI_DEVFN(slot, func)) { + ret = 0; + goto free_and_exit; + } + + /* + * Note: we don't need to get a reference to the upstream + * bridge because we hold a reference to the top level + * device which should hold a reference to the bridge, + * and so on. + */ + dev = pci_upstream_bridge(dev); + if (!dev) { + ret = 0; + goto free_and_exit; + } + + *p = 0; } - return 0; + + ret = sscanf(wpath, "%x:%x:%x.%x%c", &seg, &bus, &slot, + &func, &end); + if (ret != 4) { + seg = 0; + ret = sscanf(wpath, "%x:%x.%x%c", &bus, &slot, &func, &end); + if (ret != 3) { + ret = -EINVAL; + goto free_and_exit; + } + } + + ret = (seg == pci_domain_nr(dev->bus) && + bus == dev->bus->number && + dev->devfn == PCI_DEVFN(slot, func)); + +free_and_exit: + kfree(wpath); + return ret; } -static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn, - u8 pos, int cap) +/** + * pci_dev_str_match - test if a string matches a device + * @dev: the PCI device to test + * @p: string to match the device against + * @endptr: pointer to the string after the match + * + * Test if a string (typically from a kernel parameter) matches a specified + * PCI device. The string may be of one of the following formats: + * + * [<domain>:]<bus>:<device>.<func>[/<device>.<func>]* + * pci:<vendor>:<device>[:<subvendor>:<subdevice>] + * + * The first format specifies a PCI bus/device/function address which + * may change if new hardware is inserted, if motherboard firmware changes, + * or due to changes caused in kernel parameters. If the domain is + * left unspecified, it is taken to be 0. In order to be robust against + * bus renumbering issues, a path of PCI device/function numbers may be used + * to address the specific device. The path for a device can be determined + * through the use of 'lspci -t'. + * + * The second format matches devices using IDs in the configuration + * space which may match multiple devices in the system. A value of 0 + * for any field will match all devices. (Note: this differs from + * in-kernel code that uses PCI_ANY_ID which is ~0; this is for + * legacy reasons and convenience so users don't have to specify + * FFFFFFFFs on the command line.) + * + * Returns 1 if the string matches the device, 0 if it does not and + * a negative error code if the string cannot be parsed. + */ +static int pci_dev_str_match(struct pci_dev *dev, const char *p, + const char **endptr) { - int ttl = PCI_FIND_CAP_TTL; + int ret; + int count; + unsigned short vendor, device, subsystem_vendor, subsystem_device; - return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl); + if (strncmp(p, "pci:", 4) == 0) { + /* PCI vendor/device (subvendor/subdevice) IDs are specified */ + p += 4; + ret = sscanf(p, "%hx:%hx:%hx:%hx%n", &vendor, &device, + &subsystem_vendor, &subsystem_device, &count); + if (ret != 4) { + ret = sscanf(p, "%hx:%hx%n", &vendor, &device, &count); + if (ret != 2) + return -EINVAL; + + subsystem_vendor = 0; + subsystem_device = 0; + } + + p += count; + + if ((!vendor || vendor == dev->vendor) && + (!device || device == dev->device) && + (!subsystem_vendor || + subsystem_vendor == dev->subsystem_vendor) && + (!subsystem_device || + subsystem_device == dev->subsystem_device)) + goto found; + } else { + /* + * PCI Bus, Device, Function IDs are specified + * (optionally, may include a path of devfns following it) + */ + ret = pci_dev_str_match_path(dev, p, &p); + if (ret < 0) + return ret; + else if (ret) + goto found; + } + + *endptr = p; + return 0; + +found: + *endptr = p; + return 1; } -int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap) +static u8 __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn, + u8 pos, int cap) +{ + return PCI_FIND_NEXT_CAP(pci_bus_read_config, pos, cap, bus, devfn); +} + +u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap) { return __pci_find_next_cap(dev->bus, dev->devfn, pos + PCI_CAP_LIST_NEXT, cap); } EXPORT_SYMBOL_GPL(pci_find_next_capability); -static int __pci_bus_find_cap_start(struct pci_bus *bus, +static u8 __pci_bus_find_cap_start(struct pci_bus *bus, unsigned int devfn, u8 hdr_type) { u16 status; @@ -246,7 +464,7 @@ static int __pci_bus_find_cap_start(struct pci_bus *bus, * Tell if a device supports a given PCI capability. * Returns the address of the requested capability structure within the * device's PCI configuration space or 0 in case the device does not - * support it. Possible values for @cap: + * support it. Possible values for @cap include: * * %PCI_CAP_ID_PM Power Management * %PCI_CAP_ID_AGP Accelerated Graphics Port @@ -257,9 +475,9 @@ static int __pci_bus_find_cap_start(struct pci_bus *bus, * %PCI_CAP_ID_PCIX PCI-X * %PCI_CAP_ID_EXP PCI Express */ -int pci_find_capability(struct pci_dev *dev, int cap) +u8 pci_find_capability(struct pci_dev *dev, int cap) { - int pos; + u8 pos; pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type); if (pos) @@ -271,25 +489,24 @@ EXPORT_SYMBOL(pci_find_capability); /** * pci_bus_find_capability - query for devices' capabilities - * @bus: the PCI bus to query + * @bus: the PCI bus to query * @devfn: PCI device to query - * @cap: capability code + * @cap: capability code * - * Like pci_find_capability() but works for pci devices that do not have a + * Like pci_find_capability() but works for PCI devices that do not have a * pci_dev structure set up yet. * * Returns the address of the requested capability structure within the * device's PCI configuration space or 0 in case the device does not * support it. */ -int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap) +u8 pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap) { - int pos; - u8 hdr_type; + u8 hdr_type, pos; pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type); - pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f); + pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & PCI_HEADER_TYPE_MASK); if (pos) pos = __pci_find_next_cap(bus, devfn, pos, cap); @@ -308,44 +525,13 @@ EXPORT_SYMBOL(pci_bus_find_capability); * not support it. Some capabilities can occur several times, e.g., the * vendor-specific capability, and this provides a way to find them all. */ -int pci_find_next_ext_capability(struct pci_dev *dev, int start, int cap) +u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 start, int cap) { - u32 header; - int ttl; - int pos = PCI_CFG_SPACE_SIZE; - - /* minimum 8 bytes per capability */ - ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8; - if (dev->cfg_size <= PCI_CFG_SPACE_SIZE) return 0; - if (start) - pos = start; - - if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) - return 0; - - /* - * If we have no capabilities, this is indicated by cap ID, - * cap version and next pointer all being 0. - */ - if (header == 0) - return 0; - - while (ttl-- > 0) { - if (PCI_EXT_CAP_ID(header) == cap && pos != start) - return pos; - - pos = PCI_EXT_CAP_NEXT(header); - if (pos < PCI_CFG_SPACE_SIZE) - break; - - if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) - break; - } - - return 0; + return PCI_FIND_NEXT_EXT_CAP(pci_bus_read_config, start, cap, + dev->bus, dev->devfn); } EXPORT_SYMBOL_GPL(pci_find_next_ext_capability); @@ -356,22 +542,56 @@ EXPORT_SYMBOL_GPL(pci_find_next_ext_capability); * * Returns the address of the requested extended capability structure * within the device's PCI configuration space or 0 if the device does - * not support it. Possible values for @cap: + * not support it. Possible values for @cap include: * * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting * %PCI_EXT_CAP_ID_VC Virtual Channel * %PCI_EXT_CAP_ID_DSN Device Serial Number * %PCI_EXT_CAP_ID_PWR Power Budgeting */ -int pci_find_ext_capability(struct pci_dev *dev, int cap) +u16 pci_find_ext_capability(struct pci_dev *dev, int cap) { return pci_find_next_ext_capability(dev, 0, cap); } EXPORT_SYMBOL_GPL(pci_find_ext_capability); -static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap) +/** + * pci_get_dsn - Read and return the 8-byte Device Serial Number + * @dev: PCI device to query + * + * Looks up the PCI_EXT_CAP_ID_DSN and reads the 8 bytes of the Device Serial + * Number. + * + * Returns the DSN, or zero if the capability does not exist. + */ +u64 pci_get_dsn(struct pci_dev *dev) +{ + u32 dword; + u64 dsn; + int pos; + + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DSN); + if (!pos) + return 0; + + /* + * The Device Serial Number is two dwords offset 4 bytes from the + * capability position. The specification says that the first dword is + * the lower half, and the second dword is the upper half. + */ + pos += 4; + pci_read_config_dword(dev, pos, &dword); + dsn = (u64)dword; + pci_read_config_dword(dev, pos + 4, &dword); + dsn |= ((u64)dword) << 32; + + return dsn; +} +EXPORT_SYMBOL_GPL(pci_get_dsn); + +static u8 __pci_find_next_ht_cap(struct pci_dev *dev, u8 pos, int ht_cap) { - int rc, ttl = PCI_FIND_CAP_TTL; + int rc; u8 cap, mask; if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST) @@ -379,8 +599,8 @@ static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap) else mask = HT_5BIT_CAP_MASK; - pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos, - PCI_CAP_ID_HT, &ttl); + pos = PCI_FIND_NEXT_CAP(pci_bus_read_config, pos, + PCI_CAP_ID_HT, dev->bus, dev->devfn); while (pos) { rc = pci_read_config_byte(dev, pos + 3, &cap); if (rc != PCIBIOS_SUCCESSFUL) @@ -389,18 +609,20 @@ static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap) if ((cap & mask) == ht_cap) return pos; - pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, - pos + PCI_CAP_LIST_NEXT, - PCI_CAP_ID_HT, &ttl); + pos = PCI_FIND_NEXT_CAP(pci_bus_read_config, + pos + PCI_CAP_LIST_NEXT, + PCI_CAP_ID_HT, dev->bus, + dev->devfn); } return 0; } + /** - * pci_find_next_ht_capability - query a device's Hypertransport capabilities + * pci_find_next_ht_capability - query a device's HyperTransport capabilities * @dev: PCI device to query * @pos: Position from which to continue searching - * @ht_cap: Hypertransport capability code + * @ht_cap: HyperTransport capability code * * To be used in conjunction with pci_find_ht_capability() to search for * all capabilities matching @ht_cap. @pos should always be a value returned @@ -409,26 +631,26 @@ static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap) * NB. To be 100% safe against broken PCI devices, the caller should take * steps to avoid an infinite loop. */ -int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap) +u8 pci_find_next_ht_capability(struct pci_dev *dev, u8 pos, int ht_cap) { return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap); } EXPORT_SYMBOL_GPL(pci_find_next_ht_capability); /** - * pci_find_ht_capability - query a device's Hypertransport capabilities + * pci_find_ht_capability - query a device's HyperTransport capabilities * @dev: PCI device to query - * @ht_cap: Hypertransport capability code + * @ht_cap: HyperTransport capability code * - * Tell if a device supports a given Hypertransport capability. + * Tell if a device supports a given HyperTransport capability. * Returns an address within the device's PCI configuration space * or 0 in case the device does not support the request capability. * The address points to the PCI capability, of type PCI_CAP_ID_HT, - * which has a Hypertransport capability matching @ht_cap. + * which has a HyperTransport capability matching @ht_cap. */ -int pci_find_ht_capability(struct pci_dev *dev, int ht_cap) +u8 pci_find_ht_capability(struct pci_dev *dev, int ht_cap) { - int pos; + u8 pos; pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type); if (pos) @@ -439,21 +661,86 @@ int pci_find_ht_capability(struct pci_dev *dev, int ht_cap) EXPORT_SYMBOL_GPL(pci_find_ht_capability); /** - * pci_find_parent_resource - return resource region of parent bus of given region + * pci_find_vsec_capability - Find a vendor-specific extended capability + * @dev: PCI device to query + * @vendor: Vendor ID for which capability is defined + * @cap: Vendor-specific capability ID + * + * If @dev has Vendor ID @vendor, search for a VSEC capability with + * VSEC ID @cap. If found, return the capability offset in + * config space; otherwise return 0. + */ +u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap) +{ + u16 vsec = 0; + u32 header; + int ret; + + if (vendor != dev->vendor) + return 0; + + while ((vsec = pci_find_next_ext_capability(dev, vsec, + PCI_EXT_CAP_ID_VNDR))) { + ret = pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER, &header); + if (ret != PCIBIOS_SUCCESSFUL) + continue; + + if (PCI_VNDR_HEADER_ID(header) == cap) + return vsec; + } + + return 0; +} +EXPORT_SYMBOL_GPL(pci_find_vsec_capability); + +/** + * pci_find_dvsec_capability - Find DVSEC for vendor + * @dev: PCI device to query + * @vendor: Vendor ID to match for the DVSEC + * @dvsec: Designated Vendor-specific capability ID + * + * If DVSEC has Vendor ID @vendor and DVSEC ID @dvsec return the capability + * offset in config space; otherwise return 0. + */ +u16 pci_find_dvsec_capability(struct pci_dev *dev, u16 vendor, u16 dvsec) +{ + int pos; + + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DVSEC); + if (!pos) + return 0; + + while (pos) { + u16 v, id; + + pci_read_config_word(dev, pos + PCI_DVSEC_HEADER1, &v); + pci_read_config_word(dev, pos + PCI_DVSEC_HEADER2, &id); + if (vendor == v && dvsec == id) + return pos; + + pos = pci_find_next_ext_capability(dev, pos, PCI_EXT_CAP_ID_DVSEC); + } + + return 0; +} +EXPORT_SYMBOL_GPL(pci_find_dvsec_capability); + +/** + * pci_find_parent_resource - return resource region of parent bus of given + * region * @dev: PCI device structure contains resources to be searched * @res: child resource record for which parent is sought * - * For given resource region of given device, return the resource - * region of parent bus the given region is contained in. + * For given resource region of given device, return the resource region of + * parent bus the given region is contained in. */ struct resource *pci_find_parent_resource(const struct pci_dev *dev, struct resource *res) { const struct pci_bus *bus = dev->bus; struct resource *r; - int i; - pci_bus_for_each_resource(bus, r, i) { + pci_bus_for_each_resource(bus, r) { if (!r) continue; if (resource_contains(r, res)) { @@ -494,7 +781,7 @@ struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res) { int i; - for (i = 0; i < PCI_ROM_RESOURCE; i++) { + for (i = 0; i < PCI_STD_NUM_BARS; i++) { struct resource *r = &dev->resource[i]; if (r->start && resource_contains(r, res)) @@ -506,28 +793,64 @@ struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res) EXPORT_SYMBOL(pci_find_resource); /** - * pci_find_pcie_root_port - return PCIe Root Port + * pci_resource_name - Return the name of the PCI resource * @dev: PCI device to query + * @i: index of the resource * - * Traverse up the parent chain and return the PCIe Root Port PCI Device - * for a given PCI Device. + * Return the standard PCI resource (BAR) name according to their index. */ -struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev) +const char *pci_resource_name(struct pci_dev *dev, unsigned int i) { - struct pci_dev *bridge, *highest_pcie_bridge = NULL; + static const char * const bar_name[] = { + "BAR 0", + "BAR 1", + "BAR 2", + "BAR 3", + "BAR 4", + "BAR 5", + "ROM", +#ifdef CONFIG_PCI_IOV + "VF BAR 0", + "VF BAR 1", + "VF BAR 2", + "VF BAR 3", + "VF BAR 4", + "VF BAR 5", +#endif + "bridge window", /* "io" included in %pR */ + "bridge window", /* "mem" included in %pR */ + "bridge window", /* "mem pref" included in %pR */ + }; + static const char * const cardbus_name[] = { + "BAR 1", + "unknown", + "unknown", + "unknown", + "unknown", + "unknown", +#ifdef CONFIG_PCI_IOV + "unknown", + "unknown", + "unknown", + "unknown", + "unknown", + "unknown", +#endif + "CardBus bridge window 0", /* I/O */ + "CardBus bridge window 1", /* I/O */ + "CardBus bridge window 0", /* mem */ + "CardBus bridge window 1", /* mem */ + }; - bridge = pci_upstream_bridge(dev); - while (bridge && pci_is_pcie(bridge)) { - highest_pcie_bridge = bridge; - bridge = pci_upstream_bridge(bridge); - } + if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS && + i < ARRAY_SIZE(cardbus_name)) + return cardbus_name[i]; - if (pci_pcie_type(highest_pcie_bridge) != PCI_EXP_TYPE_ROOT_PORT) - return NULL; + if (i < ARRAY_SIZE(bar_name)) + return bar_name[i]; - return highest_pcie_bridge; + return "unknown"; } -EXPORT_SYMBOL(pci_find_pcie_root_port); /** * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos @@ -555,6 +878,178 @@ int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask) return 0; } +static int pci_acs_enable; + +/** + * pci_request_acs - ask for ACS to be enabled if supported + */ +void pci_request_acs(void) +{ + pci_acs_enable = 1; +} + +static const char *disable_acs_redir_param; +static const char *config_acs_param; + +struct pci_acs { + u16 cap; + u16 ctrl; + u16 fw_ctrl; +}; + +static void __pci_config_acs(struct pci_dev *dev, struct pci_acs *caps, + const char *p, const u16 acs_mask, const u16 acs_flags) +{ + u16 flags = acs_flags; + u16 mask = acs_mask; + char *delimit; + int ret = 0; + + if (!p) + return; + + while (*p) { + if (!acs_mask) { + /* Check for ACS flags */ + delimit = strstr(p, "@"); + if (delimit) { + int end; + u32 shift = 0; + + end = delimit - p - 1; + mask = 0; + flags = 0; + + while (end > -1) { + if (*(p + end) == '0') { + mask |= 1 << shift; + shift++; + end--; + } else if (*(p + end) == '1') { + mask |= 1 << shift; + flags |= 1 << shift; + shift++; + end--; + } else if ((*(p + end) == 'x') || (*(p + end) == 'X')) { + shift++; + end--; + } else { + pci_err(dev, "Invalid ACS flags... Ignoring\n"); + return; + } + } + p = delimit + 1; + } else { + pci_err(dev, "ACS Flags missing\n"); + return; + } + } + + if (mask & ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR | PCI_ACS_CR | + PCI_ACS_UF | PCI_ACS_EC | PCI_ACS_DT)) { + pci_err(dev, "Invalid ACS flags specified\n"); + return; + } + + ret = pci_dev_str_match(dev, p, &p); + if (ret < 0) { + pr_info_once("PCI: Can't parse ACS command line parameter\n"); + break; + } else if (ret == 1) { + /* Found a match */ + break; + } + + if (*p != ';' && *p != ',') { + /* End of param or invalid format */ + break; + } + p++; + } + + if (ret != 1) + return; + + if (!pci_dev_specific_disable_acs_redir(dev)) + return; + + pci_dbg(dev, "ACS mask = %#06x\n", mask); + pci_dbg(dev, "ACS flags = %#06x\n", flags); + pci_dbg(dev, "ACS control = %#06x\n", caps->ctrl); + pci_dbg(dev, "ACS fw_ctrl = %#06x\n", caps->fw_ctrl); + + /* + * For mask bits that are 0, copy them from the firmware setting + * and apply flags for all the mask bits that are 1. + */ + caps->ctrl = (caps->fw_ctrl & ~mask) | (flags & mask); + + pci_info(dev, "Configured ACS to %#06x\n", caps->ctrl); +} + +/** + * pci_std_enable_acs - enable ACS on devices using standard ACS capabilities + * @dev: the PCI device + * @caps: default ACS controls + */ +static void pci_std_enable_acs(struct pci_dev *dev, struct pci_acs *caps) +{ + /* Source Validation */ + caps->ctrl |= (caps->cap & PCI_ACS_SV); + + /* P2P Request Redirect */ + caps->ctrl |= (caps->cap & PCI_ACS_RR); + + /* P2P Completion Redirect */ + caps->ctrl |= (caps->cap & PCI_ACS_CR); + + /* Upstream Forwarding */ + caps->ctrl |= (caps->cap & PCI_ACS_UF); + + /* Enable Translation Blocking for external devices and noats */ + if (pci_ats_disabled() || dev->external_facing || dev->untrusted) + caps->ctrl |= (caps->cap & PCI_ACS_TB); +} + +/** + * pci_enable_acs - enable ACS if hardware support it + * @dev: the PCI device + */ +static void pci_enable_acs(struct pci_dev *dev) +{ + struct pci_acs caps; + bool enable_acs = false; + int pos; + + /* If an iommu is present we start with kernel default caps */ + if (pci_acs_enable) { + if (pci_dev_specific_enable_acs(dev)) + enable_acs = true; + } + + pos = dev->acs_cap; + if (!pos) + return; + + pci_read_config_word(dev, pos + PCI_ACS_CAP, &caps.cap); + pci_read_config_word(dev, pos + PCI_ACS_CTRL, &caps.ctrl); + caps.fw_ctrl = caps.ctrl; + + if (enable_acs) + pci_std_enable_acs(dev, &caps); + + /* + * Always apply caps from the command line, even if there is no iommu. + * Trust that the admin has a reason to change the ACS settings. + */ + __pci_config_acs(dev, &caps, disable_acs_redir_param, + PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC, + ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC)); + __pci_config_acs(dev, &caps, config_acs_param, 0, 0); + + pci_write_config_word(dev, pos + PCI_ACS_CTRL, caps.ctrl); +} + /** * pci_restore_bars - restore a device's BAR values (e.g. after wake-up) * @dev: PCI device to have its BARs restored @@ -570,155 +1065,67 @@ static void pci_restore_bars(struct pci_dev *dev) pci_update_resource(dev, i); } -static const struct pci_platform_pm_ops *pci_platform_pm; - -int pci_set_platform_pm(const struct pci_platform_pm_ops *ops) -{ - if (!ops->is_manageable || !ops->set_state || !ops->get_state || - !ops->choose_state || !ops->set_wakeup || !ops->need_resume) - return -EINVAL; - pci_platform_pm = ops; - return 0; -} - static inline bool platform_pci_power_manageable(struct pci_dev *dev) { - return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false; + if (pci_use_mid_pm()) + return true; + + return acpi_pci_power_manageable(dev); } static inline int platform_pci_set_power_state(struct pci_dev *dev, pci_power_t t) { - return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS; + if (pci_use_mid_pm()) + return mid_pci_set_power_state(dev, t); + + return acpi_pci_set_power_state(dev, t); } static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev) { - return pci_platform_pm ? pci_platform_pm->get_state(dev) : PCI_UNKNOWN; + if (pci_use_mid_pm()) + return mid_pci_get_power_state(dev); + + return acpi_pci_get_power_state(dev); +} + +static inline void platform_pci_refresh_power_state(struct pci_dev *dev) +{ + if (!pci_use_mid_pm()) + acpi_pci_refresh_power_state(dev); } static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev) { - return pci_platform_pm ? - pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR; + if (pci_use_mid_pm()) + return PCI_POWER_ERROR; + + return acpi_pci_choose_state(dev); } static inline int platform_pci_set_wakeup(struct pci_dev *dev, bool enable) { - return pci_platform_pm ? - pci_platform_pm->set_wakeup(dev, enable) : -ENODEV; + if (pci_use_mid_pm()) + return PCI_POWER_ERROR; + + return acpi_pci_wakeup(dev, enable); } static inline bool platform_pci_need_resume(struct pci_dev *dev) { - return pci_platform_pm ? pci_platform_pm->need_resume(dev) : false; + if (pci_use_mid_pm()) + return false; + + return acpi_pci_need_resume(dev); } -/** - * pci_raw_set_power_state - Use PCI PM registers to set the power state of - * given PCI device - * @dev: PCI device to handle. - * @state: PCI power state (D0, D1, D2, D3hot) to put the device into. - * - * RETURN VALUE: - * -EINVAL if the requested state is invalid. - * -EIO if device does not support PCI PM or its PM capabilities register has a - * wrong version, or device doesn't support the requested state. - * 0 if device already is in the requested state. - * 0 if device's power state has been successfully changed. - */ -static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state) +static inline bool platform_pci_bridge_d3(struct pci_dev *dev) { - u16 pmcsr; - bool need_restore = false; - - /* Check if we're already there */ - if (dev->current_state == state) - return 0; - - if (!dev->pm_cap) - return -EIO; - - if (state < PCI_D0 || state > PCI_D3hot) - return -EINVAL; - - /* Validate current state: - * Can enter D0 from any state, but if we can only go deeper - * to sleep if we're already in a low power state - */ - if (state != PCI_D0 && dev->current_state <= PCI_D3cold - && dev->current_state > state) { - dev_err(&dev->dev, "invalid power transition (from state %d to %d)\n", - dev->current_state, state); - return -EINVAL; - } - - /* check if this device supports the desired state */ - if ((state == PCI_D1 && !dev->d1_support) - || (state == PCI_D2 && !dev->d2_support)) - return -EIO; - - pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); - - /* If we're (effectively) in D3, force entire word to 0. - * This doesn't affect PME_Status, disables PME_En, and - * sets PowerState to 0. - */ - switch (dev->current_state) { - case PCI_D0: - case PCI_D1: - case PCI_D2: - pmcsr &= ~PCI_PM_CTRL_STATE_MASK; - pmcsr |= state; - break; - case PCI_D3hot: - case PCI_D3cold: - case PCI_UNKNOWN: /* Boot-up */ - if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot - && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET)) - need_restore = true; - /* Fall-through: force to D0 */ - default: - pmcsr = 0; - break; - } - - /* enter specified state */ - pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); - - /* Mandatory power management transition delays */ - /* see PCI PM 1.1 5.6.1 table 18 */ - if (state == PCI_D3hot || dev->current_state == PCI_D3hot) - pci_dev_d3_sleep(dev); - else if (state == PCI_D2 || dev->current_state == PCI_D2) - udelay(PCI_PM_D2_DELAY); - - pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); - dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK); - if (dev->current_state != state && printk_ratelimit()) - dev_info(&dev->dev, "Refused to change power state, currently in D%d\n", - dev->current_state); - - /* - * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT - * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning - * from D3hot to D0 _may_ perform an internal reset, thereby - * going to "D0 Uninitialized" rather than "D0 Initialized". - * For example, at least some versions of the 3c905B and the - * 3c556B exhibit this behaviour. - * - * At least some laptop BIOSen (e.g. the Thinkpad T21) leave - * devices in a D3hot state at boot. Consequently, we need to - * restore at least the BARs so that the device will be - * accessible to its driver. - */ - if (need_restore) - pci_restore_bars(dev); - - if (dev->bus->self) - pcie_aspm_pm_state_change(dev->bus->self); + if (pci_use_mid_pm()) + return false; - return 0; + return acpi_pci_bridge_d3(dev); } /** @@ -735,30 +1142,33 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state) */ void pci_update_current_state(struct pci_dev *dev, pci_power_t state) { - if (platform_pci_get_power_state(dev) == PCI_D3cold || - !pci_device_is_present(dev)) { + if (platform_pci_get_power_state(dev) == PCI_D3cold) { dev->current_state = PCI_D3cold; } else if (dev->pm_cap) { u16 pmcsr; pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); - dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK); + if (PCI_POSSIBLE_ERROR(pmcsr)) { + dev->current_state = PCI_D3cold; + return; + } + dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK; } else { dev->current_state = state; } } /** - * pci_power_up - Put the given device into D0 forcibly - * @dev: PCI device to power up + * pci_refresh_power_state - Refresh the given device's power state data + * @dev: Target PCI device. + * + * Ask the platform to refresh the devices power state information and invoke + * pci_update_current_state() to update its current PCI power state. */ -void pci_power_up(struct pci_dev *dev) +void pci_refresh_power_state(struct pci_dev *dev) { - if (platform_pci_power_manageable(dev)) - platform_pci_set_power_state(dev, PCI_D0); - - pci_raw_set_power_state(dev, PCI_D0); - pci_update_current_state(dev, PCI_D0); + platform_pci_refresh_power_state(dev); + pci_update_current_state(dev, dev->current_state); } /** @@ -766,73 +1176,241 @@ void pci_power_up(struct pci_dev *dev) * @dev: PCI device to handle. * @state: State to put the device into. */ -static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state) +int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state) { int error; - if (platform_pci_power_manageable(dev)) { - error = platform_pci_set_power_state(dev, state); - if (!error) - pci_update_current_state(dev, state); - } else - error = -ENODEV; - - if (error && !dev->pm_cap) /* Fall back to PCI_D0 */ + error = platform_pci_set_power_state(dev, state); + if (!error) + pci_update_current_state(dev, state); + else if (!dev->pm_cap) /* Fall back to PCI_D0 */ dev->current_state = PCI_D0; return error; } +EXPORT_SYMBOL_GPL(pci_platform_power_transition); -/** - * pci_wakeup - Wake up a PCI device - * @pci_dev: Device to handle. - * @ign: ignored parameter - */ -static int pci_wakeup(struct pci_dev *pci_dev, void *ign) +static int pci_resume_one(struct pci_dev *pci_dev, void *ign) { - pci_wakeup_event(pci_dev); pm_request_resume(&pci_dev->dev); return 0; } /** - * pci_wakeup_bus - Walk given bus and wake up devices on it + * pci_resume_bus - Walk given bus and runtime resume devices on it * @bus: Top bus of the subtree to walk. */ -static void pci_wakeup_bus(struct pci_bus *bus) +void pci_resume_bus(struct pci_bus *bus) { if (bus) - pci_walk_bus(bus, pci_wakeup, NULL); + pci_walk_bus(bus, pci_resume_one, NULL); +} + +static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout) +{ + int delay = 1; + bool retrain = false; + struct pci_dev *root, *bridge; + + root = pcie_find_root_port(dev); + + if (pci_is_pcie(dev)) { + bridge = pci_upstream_bridge(dev); + if (bridge) + retrain = true; + } + + /* + * The caller has already waited long enough after a reset that the + * device should respond to config requests, but it may respond + * with Request Retry Status (RRS) if it needs more time to + * initialize. + * + * If the device is below a Root Port with Configuration RRS + * Software Visibility enabled, reading the Vendor ID returns a + * special data value if the device responded with RRS. Read the + * Vendor ID until we get non-RRS status. + * + * If there's no Root Port or Configuration RRS Software Visibility + * is not enabled, the device may still respond with RRS, but + * hardware may retry the config request. If no retries receive + * Successful Completion, hardware generally synthesizes ~0 + * (PCI_ERROR_RESPONSE) data to complete the read. Reading Vendor + * ID for VFs and non-existent devices also returns ~0, so read the + * Command register until it returns something other than ~0. + */ + for (;;) { + u32 id; + + if (pci_dev_is_disconnected(dev)) { + pci_dbg(dev, "disconnected; not waiting\n"); + return -ENOTTY; + } + + if (root && root->config_rrs_sv) { + pci_read_config_dword(dev, PCI_VENDOR_ID, &id); + if (!pci_bus_rrs_vendor_id(id)) + break; + } else { + pci_read_config_dword(dev, PCI_COMMAND, &id); + if (!PCI_POSSIBLE_ERROR(id)) + break; + } + + if (delay > timeout) { + pci_warn(dev, "not ready %dms after %s; giving up\n", + delay - 1, reset_type); + return -ENOTTY; + } + + if (delay > PCI_RESET_WAIT) { + if (retrain) { + retrain = false; + if (pcie_failed_link_retrain(bridge) == 0) { + delay = 1; + continue; + } + } + pci_info(dev, "not ready %dms after %s; waiting\n", + delay - 1, reset_type); + } + + msleep(delay); + delay *= 2; + } + + if (delay > PCI_RESET_WAIT) + pci_info(dev, "ready %dms after %s\n", delay - 1, + reset_type); + else + pci_dbg(dev, "ready %dms after %s\n", delay - 1, + reset_type); + + return 0; } /** - * __pci_start_power_transition - Start power transition of a PCI device - * @dev: PCI device to handle. - * @state: State to put the device into. + * pci_power_up - Put the given device into D0 + * @dev: PCI device to power up + * + * On success, return 0 or 1, depending on whether or not it is necessary to + * restore the device's BARs subsequently (1 is returned in that case). + * + * On failure, return a negative error code. Always return failure if @dev + * lacks a Power Management Capability, even if the platform was able to + * put the device in D0 via non-PCI means. + */ +int pci_power_up(struct pci_dev *dev) +{ + bool need_restore; + pci_power_t state; + u16 pmcsr; + + platform_pci_set_power_state(dev, PCI_D0); + + if (!dev->pm_cap) { + state = platform_pci_get_power_state(dev); + if (state == PCI_UNKNOWN) + dev->current_state = PCI_D0; + else + dev->current_state = state; + + return -EIO; + } + + if (pci_dev_is_disconnected(dev)) { + dev->current_state = PCI_D3cold; + return -EIO; + } + + pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); + if (PCI_POSSIBLE_ERROR(pmcsr)) { + pci_err(dev, "Unable to change power state from %s to D0, device inaccessible\n", + pci_power_name(dev->current_state)); + dev->current_state = PCI_D3cold; + return -EIO; + } + + state = pmcsr & PCI_PM_CTRL_STATE_MASK; + + need_restore = (state == PCI_D3hot || dev->current_state >= PCI_D3hot) && + !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET); + + if (state == PCI_D0) + goto end; + + /* + * Force the entire word to 0. This doesn't affect PME_Status, disables + * PME_En, and sets PowerState to 0. + */ + pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, 0); + + /* Mandatory transition delays; see PCI PM 1.2. */ + if (state == PCI_D3hot) + pci_dev_d3_sleep(dev); + else if (state == PCI_D2) + udelay(PCI_PM_D2_DELAY); + +end: + dev->current_state = PCI_D0; + if (need_restore) + return 1; + + return 0; +} + +/** + * pci_set_full_power_state - Put a PCI device into D0 and update its state + * @dev: PCI device to power up + * @locked: whether pci_bus_sem is held + * + * Call pci_power_up() to put @dev into D0, read from its PCI_PM_CTRL register + * to confirm the state change, restore its BARs if they might be lost and + * reconfigure ASPM in accordance with the new power state. + * + * If pci_restore_state() is going to be called right after a power state change + * to D0, it is more efficient to use pci_power_up() directly instead of this + * function. */ -static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state) +static int pci_set_full_power_state(struct pci_dev *dev, bool locked) { - if (state == PCI_D0) { - pci_platform_power_transition(dev, PCI_D0); + u16 pmcsr; + int ret; + + ret = pci_power_up(dev); + if (ret < 0) { + if (dev->current_state == PCI_D0) + return 0; + + return ret; + } + + pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); + dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK; + if (dev->current_state != PCI_D0) { + pci_info_ratelimited(dev, "Refused to change power state from %s to D0\n", + pci_power_name(dev->current_state)); + } else if (ret > 0) { /* - * Mandatory power management transition delays, see - * PCI Express Base Specification Revision 2.0 Section - * 6.6.1: Conventional Reset. Do not delay for - * devices powered on/off by corresponding bridge, - * because have already delayed for the bridge. + * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT + * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning + * from D3hot to D0 _may_ perform an internal reset, thereby + * going to "D0 Uninitialized" rather than "D0 Initialized". + * For example, at least some versions of the 3c905B and the + * 3c556B exhibit this behaviour. + * + * At least some laptop BIOSen (e.g. the Thinkpad T21) leave + * devices in a D3hot state at boot. Consequently, we need to + * restore at least the BARs so that the device will be + * accessible to its driver. */ - if (dev->runtime_d3cold) { - if (dev->d3cold_delay) - msleep(dev->d3cold_delay); - /* - * When powering on a bridge from D3cold, the - * whole hierarchy may be powered on into - * D0uninitialized state, resume them to give - * them a chance to suspend again - */ - pci_wakeup_bus(dev->subordinate); - } + pci_restore_bars(dev); } + + if (dev->bus->self) + pcie_aspm_pm_state_change(dev->bus->self, locked); + + return 0; } /** @@ -849,44 +1427,34 @@ static int __pci_dev_set_current_state(struct pci_dev *dev, void *data) } /** - * __pci_bus_set_current_state - Walk given bus and set current state of devices + * pci_bus_set_current_state - Walk given bus and set current state of devices * @bus: Top bus of the subtree to walk. * @state: state to be set */ -static void __pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state) +void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state) { if (bus) pci_walk_bus(bus, __pci_dev_set_current_state, &state); } -/** - * __pci_complete_power_transition - Complete power transition of a PCI device - * @dev: PCI device to handle. - * @state: State to put the device into. - * - * This function should not be called directly by device drivers. - */ -int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state) +static void __pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state, bool locked) { - int ret; + if (!bus) + return; - if (state <= PCI_D0) - return -EINVAL; - ret = pci_platform_power_transition(dev, state); - /* Power off the bridge may power off the whole hierarchy */ - if (!ret && state == PCI_D3cold) - __pci_bus_set_current_state(dev->subordinate, PCI_D3cold); - return ret; + if (locked) + pci_walk_bus_locked(bus, __pci_dev_set_current_state, &state); + else + pci_walk_bus(bus, __pci_dev_set_current_state, &state); } -EXPORT_SYMBOL_GPL(__pci_complete_power_transition); /** - * pci_set_power_state - Set the power state of a PCI device + * pci_set_low_power_state - Put a PCI device into a low-power state. * @dev: PCI device to handle. - * @state: PCI power state (D0, D1, D2, D3hot) to put the device into. + * @state: PCI power state (D1, D2, D3hot) to put the device into. + * @locked: whether pci_bus_sem is held * - * Transition a device to a new power state, using the platform firmware and/or - * the device's PCI PM registers. + * Use the device's PCI_PM_CTRL register to put it into a low-power state. * * RETURN VALUE: * -EINVAL if the requested state is invalid. @@ -895,20 +1463,81 @@ EXPORT_SYMBOL_GPL(__pci_complete_power_transition); * 0 if device already is in the requested state. * 0 if device's power state has been successfully changed. */ -int pci_set_power_state(struct pci_dev *dev, pci_power_t state) +static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state, bool locked) +{ + u16 pmcsr; + + if (!dev->pm_cap) + return -EIO; + + /* + * Validate transition: We can enter D0 from any state, but if + * we're already in a low-power state, we can only go deeper. E.g., + * we can go from D1 to D3, but we can't go directly from D3 to D1; + * we'd have to go from D3 to D0, then to D1. + */ + if (dev->current_state <= PCI_D3cold && dev->current_state > state) { + pci_dbg(dev, "Invalid power transition (from %s to %s)\n", + pci_power_name(dev->current_state), + pci_power_name(state)); + return -EINVAL; + } + + /* Check if this device supports the desired state */ + if ((state == PCI_D1 && !dev->d1_support) + || (state == PCI_D2 && !dev->d2_support)) + return -EIO; + + pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); + if (PCI_POSSIBLE_ERROR(pmcsr)) { + pci_err(dev, "Unable to change power state from %s to %s, device inaccessible\n", + pci_power_name(dev->current_state), + pci_power_name(state)); + dev->current_state = PCI_D3cold; + return -EIO; + } + + pmcsr &= ~PCI_PM_CTRL_STATE_MASK; + pmcsr |= state; + + /* Enter specified state */ + pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); + + /* Mandatory power management transition delays; see PCI PM 1.2. */ + if (state == PCI_D3hot) + pci_dev_d3_sleep(dev); + else if (state == PCI_D2) + udelay(PCI_PM_D2_DELAY); + + pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); + dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK; + if (dev->current_state != state) + pci_info_ratelimited(dev, "Refused to change power state from %s to %s\n", + pci_power_name(dev->current_state), + pci_power_name(state)); + + if (dev->bus->self) + pcie_aspm_pm_state_change(dev->bus->self, locked); + + return 0; +} + +static int __pci_set_power_state(struct pci_dev *dev, pci_power_t state, bool locked) { int error; - /* bound the state we're entering */ + /* Bound the state we're entering */ if (state > PCI_D3cold) state = PCI_D3cold; else if (state < PCI_D0) state = PCI_D0; else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev)) + /* - * If the device or the parent bridge do not support PCI PM, - * ignore the request if we're doing anything other than putting - * it into D0 (which would only happen on boot). + * If the device or the parent bridge do not support PCI + * PM, ignore the request if we're doing anything other + * than putting it into D0 (which would only happen on + * boot). */ return 0; @@ -916,65 +1545,69 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state) if (dev->current_state == state) return 0; - __pci_start_power_transition(dev, state); + if (state == PCI_D0) + return pci_set_full_power_state(dev, locked); - /* This device is quirked not to be put into D3, so - don't put it in D3 */ + /* + * This device is quirked not to be put into D3, so don't put it in + * D3 + */ if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3)) return 0; - /* - * To put device in D3cold, we put device into D3hot in native - * way, then put device into D3cold with platform ops - */ - error = pci_raw_set_power_state(dev, state > PCI_D3hot ? - PCI_D3hot : state); + if (state == PCI_D3cold) { + /* + * To put the device in D3cold, put it into D3hot in the native + * way, then put it into D3cold using platform ops. + */ + error = pci_set_low_power_state(dev, PCI_D3hot, locked); - if (!__pci_complete_power_transition(dev, state)) - error = 0; + if (pci_platform_power_transition(dev, PCI_D3cold)) + return error; - return error; + /* Powering off a bridge may power off the whole hierarchy */ + if (dev->current_state == PCI_D3cold) + __pci_bus_set_current_state(dev->subordinate, PCI_D3cold, locked); + } else { + error = pci_set_low_power_state(dev, state, locked); + + if (pci_platform_power_transition(dev, state)) + return error; + } + + return 0; } -EXPORT_SYMBOL(pci_set_power_state); /** - * pci_choose_state - Choose the power state of a PCI device - * @dev: PCI device to be suspended - * @state: target sleep state for the whole system. This is the value - * that is passed to suspend() function. + * pci_set_power_state - Set the power state of a PCI device + * @dev: PCI device to handle. + * @state: PCI power state (D0, D1, D2, D3hot) to put the device into. * - * Returns PCI power state suitable for given device and given system - * message. + * Transition a device to a new power state, using the platform firmware and/or + * the device's PCI PM registers. + * + * RETURN VALUE: + * -EINVAL if the requested state is invalid. + * -EIO if device does not support PCI PM or its PM capabilities register has a + * wrong version, or device doesn't support the requested state. + * 0 if the transition is to D1 or D2 but D1 and D2 are not supported. + * 0 if device already is in the requested state. + * 0 if the transition is to D3 but D3 is not supported. + * 0 if device's power state has been successfully changed. */ - -pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state) +int pci_set_power_state(struct pci_dev *dev, pci_power_t state) { - pci_power_t ret; - - if (!dev->pm_cap) - return PCI_D0; + return __pci_set_power_state(dev, state, false); +} +EXPORT_SYMBOL(pci_set_power_state); - ret = platform_pci_choose_state(dev); - if (ret != PCI_POWER_ERROR) - return ret; +int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state) +{ + lockdep_assert_held(&pci_bus_sem); - switch (state.event) { - case PM_EVENT_ON: - return PCI_D0; - case PM_EVENT_FREEZE: - case PM_EVENT_PRETHAW: - /* REVISIT both freeze and pre-thaw "should" use D0 */ - case PM_EVENT_SUSPEND: - case PM_EVENT_HIBERNATE: - return PCI_D3hot; - default: - dev_info(&dev->dev, "unrecognized suspend event %d\n", - state.event); - BUG(); - } - return PCI_D0; + return __pci_set_power_state(dev, state, true); } -EXPORT_SYMBOL(pci_choose_state); +EXPORT_SYMBOL(pci_set_power_state_locked); #define PCI_EXP_SAVE_REGS 7 @@ -1011,7 +1644,7 @@ static int pci_save_pcie_state(struct pci_dev *dev) save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); if (!save_state) { - dev_err(&dev->dev, "buffer not found in %s\n", __func__); + pci_err(dev, "buffer not found in %s\n", __func__); return -ENOMEM; } @@ -1024,6 +1657,9 @@ static int pci_save_pcie_state(struct pci_dev *dev) pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]); pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]); + pci_save_aspm_l1ss_state(dev); + pci_save_ltr_state(dev); + return 0; } @@ -1033,10 +1669,24 @@ static void pci_restore_pcie_state(struct pci_dev *dev) struct pci_cap_saved_state *save_state; u16 *cap; + /* + * Restore max latencies (in the LTR capability) before enabling + * LTR itself in PCI_EXP_DEVCTL2. + */ + pci_restore_ltr_state(dev); + pci_restore_aspm_l1ss_state(dev); + save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); if (!save_state) return; + /* + * Downstream ports reset the LTR enable bit when link goes down. + * Check and re-configure the bit here before restoring device. + * PCIe r5.0, sec 7.5.3.16. + */ + pci_bridge_reconfigure_ltr(dev); + cap = (u16 *)&save_state->cap.data[0]; pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]); pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]); @@ -1047,7 +1697,6 @@ static void pci_restore_pcie_state(struct pci_dev *dev) pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]); } - static int pci_save_pcix_state(struct pci_dev *dev) { int pos; @@ -1059,7 +1708,7 @@ static int pci_save_pcix_state(struct pci_dev *dev) save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX); if (!save_state) { - dev_err(&dev->dev, "buffer not found in %s\n", __func__); + pci_err(dev, "buffer not found in %s\n", __func__); return -ENOMEM; } @@ -1084,17 +1733,20 @@ static void pci_restore_pcix_state(struct pci_dev *dev) pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]); } - /** - * pci_save_state - save the PCI configuration space of a device before suspending - * @dev: - PCI device that we're dealing with + * pci_save_state - save the PCI configuration space of a device before + * suspending + * @dev: PCI device that we're dealing with */ int pci_save_state(struct pci_dev *dev) { int i; /* XXX: 100% dword access ok here? */ - for (i = 0; i < 16; i++) + for (i = 0; i < 16; i++) { pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]); + pci_dbg(dev, "save config %#04x: %#010x\n", + i * 4, dev->saved_config_space[i]); + } dev->state_saved = true; i = pci_save_pcie_state(dev); @@ -1105,21 +1757,25 @@ int pci_save_state(struct pci_dev *dev) if (i != 0) return i; + pci_save_dpc_state(dev); + pci_save_aer_state(dev); + pci_save_ptm_state(dev); + pci_save_tph_state(dev); return pci_save_vc_state(dev); } EXPORT_SYMBOL(pci_save_state); static void pci_restore_config_dword(struct pci_dev *pdev, int offset, - u32 saved_val, int retry) + u32 saved_val, int retry, bool force) { u32 val; pci_read_config_dword(pdev, offset, &val); - if (val == saved_val) + if (!force && val == saved_val) return; for (;;) { - dev_dbg(&pdev->dev, "restoring config space at offset %#x (was %#x, writing %#x)\n", + pci_dbg(pdev, "restore config %#04x: %#010x -> %#010x\n", offset, val, saved_val); pci_write_config_dword(pdev, offset, saved_val); if (retry-- <= 0) @@ -1134,45 +1790,57 @@ static void pci_restore_config_dword(struct pci_dev *pdev, int offset, } static void pci_restore_config_space_range(struct pci_dev *pdev, - int start, int end, int retry) + int start, int end, int retry, + bool force) { int index; for (index = end; index >= start; index--) pci_restore_config_dword(pdev, 4 * index, pdev->saved_config_space[index], - retry); + retry, force); } static void pci_restore_config_space(struct pci_dev *pdev) { if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) { - pci_restore_config_space_range(pdev, 10, 15, 0); + pci_restore_config_space_range(pdev, 10, 15, 0, false); /* Restore BARs before the command register. */ - pci_restore_config_space_range(pdev, 4, 9, 10); - pci_restore_config_space_range(pdev, 0, 3, 0); + pci_restore_config_space_range(pdev, 4, 9, 10, false); + pci_restore_config_space_range(pdev, 0, 3, 0, false); + } else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) { + pci_restore_config_space_range(pdev, 12, 15, 0, false); + + /* + * Force rewriting of prefetch registers to avoid S3 resume + * issues on Intel PCI bridges that occur when these + * registers are not explicitly written. + */ + pci_restore_config_space_range(pdev, 9, 11, 0, true); + pci_restore_config_space_range(pdev, 0, 8, 0, false); } else { - pci_restore_config_space_range(pdev, 0, 15, 0); + pci_restore_config_space_range(pdev, 0, 15, 0, false); } } /** * pci_restore_state - Restore the saved state of a PCI device - * @dev: - PCI device that we're dealing with + * @dev: PCI device that we're dealing with */ void pci_restore_state(struct pci_dev *dev) { - if (!dev->state_saved) - return; - - /* PCI Express register must be restored first */ pci_restore_pcie_state(dev); pci_restore_pasid_state(dev); pci_restore_pri_state(dev); pci_restore_ats_state(dev); pci_restore_vc_state(dev); + pci_restore_rebar_state(dev); + pci_restore_dpc_state(dev); + pci_restore_ptm_state(dev); + pci_restore_tph_state(dev); - pci_cleanup_aer_error_status_regs(dev); + pci_aer_clear_status(dev); + pci_restore_aer_state(dev); pci_restore_config_space(dev); @@ -1189,7 +1857,7 @@ EXPORT_SYMBOL(pci_restore_state); struct pci_saved_state { u32 config_space[16]; - struct pci_cap_saved_data cap[0]; + struct pci_cap_saved_data cap[]; }; /** @@ -1290,6 +1958,28 @@ int __weak pcibios_enable_device(struct pci_dev *dev, int bars) return pci_enable_resources(dev, bars); } +static int pci_host_bridge_enable_device(struct pci_dev *dev) +{ + struct pci_host_bridge *host_bridge = pci_find_host_bridge(dev->bus); + int err; + + if (host_bridge && host_bridge->enable_device) { + err = host_bridge->enable_device(host_bridge, dev); + if (err) + return err; + } + + return 0; +} + +static void pci_host_bridge_disable_device(struct pci_dev *dev) +{ + struct pci_host_bridge *host_bridge = pci_find_host_bridge(dev->bus); + + if (host_bridge && host_bridge->disable_device) + host_bridge->disable_device(host_bridge, dev); +} + static int do_pci_enable_device(struct pci_dev *dev, int bars) { int err; @@ -1305,9 +1995,13 @@ static int do_pci_enable_device(struct pci_dev *dev, int bars) if (bridge) pcie_aspm_powersave_config_link(bridge); + err = pci_host_bridge_enable_device(dev); + if (err) + return err; + err = pcibios_enable_device(dev, bars); if (err < 0) - return err; + goto err_enable; pci_fixup_device(pci_fixup_enable, dev); if (dev->msi_enabled || dev->msix_enabled) @@ -1322,14 +2016,20 @@ static int do_pci_enable_device(struct pci_dev *dev, int bars) } return 0; + +err_enable: + pci_host_bridge_disable_device(dev); + + return err; + } /** * pci_reenable_device - Resume abandoned device * @dev: PCI device to be resumed * - * Note this function is a backend of pci_default_resume and is not supposed - * to be called by normal code, write proper resume handler and use it instead. + * NOTE: This function is a backend of pci_default_resume() and is not supposed + * to be called by normal code, write proper resume handler and use it instead. */ int pci_reenable_device(struct pci_dev *dev) { @@ -1356,7 +2056,7 @@ static void pci_enable_bridge(struct pci_dev *dev) retval = pci_enable_device(dev); if (retval) - dev_err(&dev->dev, "Error enabling bridge (%d), continuing\n", + pci_err(dev, "Error enabling bridge (%d), continuing\n", retval); pci_set_master(dev); } @@ -1373,11 +2073,7 @@ static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags) * so that things like MSI message writing will behave as expected * (e.g. if the device really is in D0 at enable time). */ - if (dev->pm_cap) { - u16 pmcsr; - pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); - dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK); - } + pci_update_current_state(dev, dev->current_state); if (atomic_inc_return(&dev->enable_cnt) > 1) return 0; /* already enabled */ @@ -1401,26 +2097,12 @@ static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags) } /** - * pci_enable_device_io - Initialize a device for use with IO space - * @dev: PCI device to be initialized - * - * Initialize device before it's used by a driver. Ask low-level code - * to enable I/O resources. Wake up the device if it was suspended. - * Beware, this function can fail. - */ -int pci_enable_device_io(struct pci_dev *dev) -{ - return pci_enable_device_flags(dev, IORESOURCE_IO); -} -EXPORT_SYMBOL(pci_enable_device_io); - -/** * pci_enable_device_mem - Initialize a device for use with Memory space * @dev: PCI device to be initialized * - * Initialize device before it's used by a driver. Ask low-level code - * to enable Memory resources. Wake up the device if it was suspended. - * Beware, this function can fail. + * Initialize device before it's used by a driver. Ask low-level code + * to enable Memory resources. Wake up the device if it was suspended. + * Beware, this function can fail. */ int pci_enable_device_mem(struct pci_dev *dev) { @@ -1432,12 +2114,12 @@ EXPORT_SYMBOL(pci_enable_device_mem); * pci_enable_device - Initialize device before it's used by a driver. * @dev: PCI device to be initialized * - * Initialize device before it's used by a driver. Ask low-level code - * to enable I/O and memory. Wake up the device if it was suspended. - * Beware, this function can fail. + * Initialize device before it's used by a driver. Ask low-level code + * to enable I/O and memory. Wake up the device if it was suspended. + * Beware, this function can fail. * - * Note we don't actually enable the device many times if we call - * this function repeatedly (we just increment the count). + * Note we don't actually enable the device many times if we call + * this function repeatedly (we just increment the count). */ int pci_enable_device(struct pci_dev *dev) { @@ -1446,122 +2128,21 @@ int pci_enable_device(struct pci_dev *dev) EXPORT_SYMBOL(pci_enable_device); /* - * Managed PCI resources. This manages device on/off, intx/msi/msix - * on/off and BAR regions. pci_dev itself records msi/msix status, so - * there's no need to track it separately. pci_devres is initialized - * when a device is enabled using managed PCI device enable interface. - */ -struct pci_devres { - unsigned int enabled:1; - unsigned int pinned:1; - unsigned int orig_intx:1; - unsigned int restore_intx:1; - u32 region_mask; -}; - -static void pcim_release(struct device *gendev, void *res) -{ - struct pci_dev *dev = to_pci_dev(gendev); - struct pci_devres *this = res; - int i; - - if (dev->msi_enabled) - pci_disable_msi(dev); - if (dev->msix_enabled) - pci_disable_msix(dev); - - for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) - if (this->region_mask & (1 << i)) - pci_release_region(dev, i); - - if (this->restore_intx) - pci_intx(dev, this->orig_intx); - - if (this->enabled && !this->pinned) - pci_disable_device(dev); -} - -static struct pci_devres *get_pci_dr(struct pci_dev *pdev) -{ - struct pci_devres *dr, *new_dr; - - dr = devres_find(&pdev->dev, pcim_release, NULL, NULL); - if (dr) - return dr; - - new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL); - if (!new_dr) - return NULL; - return devres_get(&pdev->dev, new_dr, NULL, NULL); -} - -static struct pci_devres *find_pci_dr(struct pci_dev *pdev) -{ - if (pci_is_managed(pdev)) - return devres_find(&pdev->dev, pcim_release, NULL, NULL); - return NULL; -} - -/** - * pcim_enable_device - Managed pci_enable_device() - * @pdev: PCI device to be initialized - * - * Managed pci_enable_device(). - */ -int pcim_enable_device(struct pci_dev *pdev) -{ - struct pci_devres *dr; - int rc; - - dr = get_pci_dr(pdev); - if (unlikely(!dr)) - return -ENOMEM; - if (dr->enabled) - return 0; - - rc = pci_enable_device(pdev); - if (!rc) { - pdev->is_managed = 1; - dr->enabled = 1; - } - return rc; -} -EXPORT_SYMBOL(pcim_enable_device); - -/** - * pcim_pin_device - Pin managed PCI device - * @pdev: PCI device to pin - * - * Pin managed PCI device @pdev. Pinned device won't be disabled on - * driver detach. @pdev must have been enabled with - * pcim_enable_device(). - */ -void pcim_pin_device(struct pci_dev *pdev) -{ - struct pci_devres *dr; - - dr = find_pci_dr(pdev); - WARN_ON(!dr || !dr->enabled); - if (dr) - dr->pinned = 1; -} -EXPORT_SYMBOL(pcim_pin_device); - -/* - * pcibios_add_device - provide arch specific hooks when adding device dev + * pcibios_device_add - provide arch specific hooks when adding device dev * @dev: the PCI device being added * * Permits the platform to provide architecture specific functionality when * devices are added. This is the default implementation. Architecture * implementations can override this. */ -int __weak pcibios_add_device(struct pci_dev *dev) +int __weak pcibios_device_add(struct pci_dev *dev) { return 0; } /** - * pcibios_release_device - provide arch specific hooks when releasing device dev + * pcibios_release_device - provide arch specific hooks when releasing + * device dev * @dev: the PCI device being released * * Permits the platform to provide architecture specific functionality when @@ -1580,17 +2161,6 @@ void __weak pcibios_release_device(struct pci_dev *dev) {} */ void __weak pcibios_disable_device(struct pci_dev *dev) {} -/** - * pcibios_penalize_isa_irq - penalize an ISA IRQ - * @irq: ISA IRQ to penalize - * @active: IRQ active or not - * - * Permits the platform to provide architecture-specific functionality when - * penalizing ISA IRQs. This is the default implementation. Architecture - * implementations can override this. - */ -void __weak pcibios_penalize_isa_irq(int irq, int active) {} - static void do_pci_disable_device(struct pci_dev *dev) { u16 pci_command; @@ -1629,18 +2199,14 @@ void pci_disable_enabled_device(struct pci_dev *dev) */ void pci_disable_device(struct pci_dev *dev) { - struct pci_devres *dr; - - dr = find_pci_dr(dev); - if (dr) - dr->enabled = 0; - dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0, "disabling already-disabled device"); if (atomic_dec_return(&dev->enable_cnt) != 0) return; + pci_host_bridge_disable_device(dev); + do_pci_disable_device(dev); dev->is_busmaster = 0; @@ -1652,8 +2218,7 @@ EXPORT_SYMBOL(pci_disable_device); * @dev: the PCIe device reset * @state: Reset state to enter into * - * - * Sets the PCIe reset state for the device. This is the default + * Set the PCIe reset state for the device. This is the default * implementation. Architecture implementations can override this. */ int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev, @@ -1667,7 +2232,6 @@ int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev, * @dev: the PCIe device reset * @state: Reset state to enter into * - * * Sets the PCI reset state for the device. */ int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state) @@ -1676,6 +2240,25 @@ int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state) } EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state); +#ifdef CONFIG_PCIEAER +void pcie_clear_device_status(struct pci_dev *dev) +{ + u16 sta; + + pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &sta); + pcie_capability_write_word(dev, PCI_EXP_DEVSTA, sta); +} +#endif + +/** + * pcie_clear_root_pme_status - Clear root port PME interrupt status. + * @dev: PCIe root port or event collector. + */ +void pcie_clear_root_pme_status(struct pci_dev *dev) +{ + pcie_capability_set_dword(dev, PCI_EXP_RTSTA, PCI_EXP_RTSTA_PME); +} + /** * pci_check_pme_status - Check if given device has generated PME. * @dev: Device to check. @@ -1762,18 +2345,41 @@ static void pci_pme_list_scan(struct work_struct *work) mutex_lock(&pci_pme_list_mutex); list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) { - if (pme_dev->dev->pme_poll) { - struct pci_dev *bridge; + struct pci_dev *pdev = pme_dev->dev; + + if (pdev->pme_poll) { + struct pci_dev *bridge = pdev->bus->self; + struct device *dev = &pdev->dev; + struct device *bdev = bridge ? &bridge->dev : NULL; + int bref = 0; - bridge = pme_dev->dev->bus->self; /* - * If bridge is in low power state, the - * configuration space of subordinate devices - * may be not accessible + * If we have a bridge, it should be in an active/D0 + * state or the configuration space of subordinate + * devices may not be accessible or stable over the + * course of the call. */ - if (bridge && bridge->current_state != PCI_D0) - continue; - pci_pme_wakeup(pme_dev->dev, NULL); + if (bdev) { + bref = pm_runtime_get_if_active(bdev); + if (!bref) + continue; + + if (bridge->current_state != PCI_D0) + goto put_bridge; + } + + /* + * The device itself should be suspended but config + * space must be accessible, therefore it cannot be in + * D3cold. + */ + if (pm_runtime_suspended(dev) && + pdev->current_state != PCI_D3cold) + pci_pme_wakeup(pdev, NULL); + +put_bridge: + if (bref > 0) + pm_runtime_put(bdev); } else { list_del(&pme_dev->list); kfree(pme_dev); @@ -1861,7 +2467,7 @@ void pci_pme_active(struct pci_dev *dev, bool enable) pme_dev = kmalloc(sizeof(struct pci_pme_device), GFP_KERNEL); if (!pme_dev) { - dev_warn(&dev->dev, "can't enable PME#\n"); + pci_warn(dev, "can't enable PME#\n"); return; } pme_dev->dev = dev; @@ -1885,12 +2491,12 @@ void pci_pme_active(struct pci_dev *dev, bool enable) } } - dev_dbg(&dev->dev, "PME# %s\n", enable ? "enabled" : "disabled"); + pci_dbg(dev, "PME# %s\n", enable ? "enabled" : "disabled"); } EXPORT_SYMBOL(pci_pme_active); /** - * pci_enable_wake - enable PCI device as wakeup event source + * __pci_enable_wake - enable PCI device as wakeup event source * @dev: PCI device affected * @state: PCI state from which device will issue wakeup events * @enable: True to enable event generation; false to disable @@ -1908,10 +2514,20 @@ EXPORT_SYMBOL(pci_pme_active); * Error code depending on the platform is returned if both the platform and * the native mechanism fail to enable the generation of wake-up events */ -int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable) +static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable) { int ret = 0; + /* + * Bridges that are not power-manageable directly only signal + * wakeup on behalf of subordinate devices which is set up + * elsewhere, so skip them. However, bridges that are + * power-manageable may signal wakeup for themselves (for example, + * on a hotplug event) and they need to be covered here. + */ + if (!pci_power_manageable(dev)) + return 0; + /* Don't do the same thing twice in a row for one device. */ if (!!enable == !!dev->wakeup_prepared) return 0; @@ -1925,7 +2541,14 @@ int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable) if (enable) { int error; - if (pci_pme_capable(dev, state)) + /* + * Enable PME signaling if the device can signal PME from + * D3cold regardless of whether or not it can signal PME from + * the current target state, because that will allow it to + * signal PME when the hierarchy above it goes into D3cold and + * the device itself ends up in D3cold as a result of that. + */ + if (pci_pme_capable(dev, state) || pci_pme_capable(dev, PCI_D3cold)) pci_pme_active(dev, true); else ret = 1; @@ -1942,6 +2565,23 @@ int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable) return ret; } + +/** + * pci_enable_wake - change wakeup settings for a PCI device + * @pci_dev: Target device + * @state: PCI state from which device will issue wakeup events + * @enable: Whether or not to enable event generation + * + * If @enable is set, check device_may_wakeup() for the device before calling + * __pci_enable_wake() for it. + */ +int pci_enable_wake(struct pci_dev *pci_dev, pci_power_t state, bool enable) +{ + if (enable && !device_may_wakeup(&pci_dev->dev)) + return -EINVAL; + + return __pci_enable_wake(pci_dev, state, enable); +} EXPORT_SYMBOL(pci_enable_wake); /** @@ -1954,9 +2594,9 @@ EXPORT_SYMBOL(pci_enable_wake); * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI * ordering constraints. * - * This function only returns error code if the device is not capable of - * generating PME# from both D3_hot and D3_cold, and the platform is unable to - * enable wake-up power for it. + * This function only returns error code if the device is not allowed to wake + * up the system from sleep or it is not capable of generating PME# from both + * D3_hot and D3_cold and the platform is unable to enable wake-up power for it. */ int pci_wake_from_d3(struct pci_dev *dev, bool enable) { @@ -1977,59 +2617,58 @@ EXPORT_SYMBOL(pci_wake_from_d3); */ static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup) { - pci_power_t target_state = PCI_D3hot; - if (platform_pci_power_manageable(dev)) { /* - * Call the platform to choose the target state of the device - * and enable wake-up from this state if supported. + * Call the platform to find the target state for the device. */ pci_power_t state = platform_pci_choose_state(dev); switch (state) { case PCI_POWER_ERROR: case PCI_UNKNOWN: - break; + return PCI_D3hot; + case PCI_D1: case PCI_D2: if (pci_no_d1d2(dev)) - break; - default: - target_state = state; + return PCI_D3hot; } - return target_state; + return state; } - if (!dev->pm_cap) - target_state = PCI_D0; - /* * If the device is in D3cold even though it's not power-manageable by * the platform, it may have been powered down by non-standard means. * Best to let it slumber. */ if (dev->current_state == PCI_D3cold) - target_state = PCI_D3cold; + return PCI_D3cold; + else if (!dev->pm_cap) + return PCI_D0; + + if (wakeup && dev->pme_support) { + pci_power_t state = PCI_D3hot; - if (wakeup) { /* * Find the deepest state from which the device can generate - * wake-up events, make it the target state and enable device - * to generate PME#. + * PME#. */ - if (dev->pme_support) { - while (target_state - && !(dev->pme_support & (1 << target_state))) - target_state--; - } + while (state && !(dev->pme_support & (1 << state))) + state--; + + if (state) + return state; + else if (dev->pme_support & 1) + return PCI_D0; } - return target_state; + return PCI_D3hot; } /** - * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state + * pci_prepare_to_sleep - prepare PCI device for system-wide transition + * into a sleep state * @dev: Device to handle. * * Choose the power state appropriate for the device depending on whether @@ -2057,15 +2696,21 @@ int pci_prepare_to_sleep(struct pci_dev *dev) EXPORT_SYMBOL(pci_prepare_to_sleep); /** - * pci_back_from_sleep - turn PCI device on during system-wide transition into working state + * pci_back_from_sleep - turn PCI device on during system-wide transition + * into working state * @dev: Device to handle. * * Disable device's system wake-up capability and put it into D0. */ int pci_back_from_sleep(struct pci_dev *dev) { + int ret = pci_set_power_state(dev, PCI_D0); + + if (ret) + return ret; + pci_enable_wake(dev, PCI_D0, false); - return pci_set_power_state(dev, PCI_D0); + return 0; } EXPORT_SYMBOL(pci_back_from_sleep); @@ -2085,16 +2730,12 @@ int pci_finish_runtime_suspend(struct pci_dev *dev) if (target_state == PCI_POWER_ERROR) return -EIO; - dev->runtime_d3cold = target_state == PCI_D3cold; - - pci_enable_wake(dev, target_state, pci_dev_run_wake(dev)); + __pci_enable_wake(dev, target_state, pci_dev_run_wake(dev)); error = pci_set_power_state(dev, target_state); - if (error) { + if (error) pci_enable_wake(dev, target_state, false); - dev->runtime_d3cold = false; - } return error; } @@ -2111,16 +2752,16 @@ bool pci_dev_run_wake(struct pci_dev *dev) { struct pci_bus *bus = dev->bus; - if (device_can_wakeup(&dev->dev)) - return true; - if (!dev->pme_support) return false; /* PME-capable in principle, but not from the target power state */ - if (!pci_pme_capable(dev, pci_target_state(dev, false))) + if (!pci_pme_capable(dev, pci_target_state(dev, true))) return false; + if (device_can_wakeup(&dev->dev)) + return true; + while (bus->parent) { struct pci_dev *bridge = bus->self; @@ -2139,46 +2780,56 @@ bool pci_dev_run_wake(struct pci_dev *dev) EXPORT_SYMBOL_GPL(pci_dev_run_wake); /** - * pci_dev_keep_suspended - Check if the device can stay in the suspended state. + * pci_dev_need_resume - Check if it is necessary to resume the device. * @pci_dev: Device to check. * - * Return 'true' if the device is runtime-suspended, it doesn't have to be + * Return 'true' if the device is not runtime-suspended or it has to be * reconfigured due to wakeup settings difference between system and runtime - * suspend and the current power state of it is suitable for the upcoming - * (system) transition. - * - * If the device is not configured for system wakeup, disable PME for it before - * returning 'true' to prevent it from waking up the system unnecessarily. + * suspend, or the current power state of it is not suitable for the upcoming + * (system-wide) transition. */ -bool pci_dev_keep_suspended(struct pci_dev *pci_dev) +bool pci_dev_need_resume(struct pci_dev *pci_dev) { struct device *dev = &pci_dev->dev; - bool wakeup = device_may_wakeup(dev); + pci_power_t target_state; - if (!pm_runtime_suspended(dev) - || pci_target_state(pci_dev, wakeup) != pci_dev->current_state - || platform_pci_need_resume(pci_dev) - || (pci_dev->dev_flags & PCI_DEV_FLAGS_NEEDS_RESUME)) - return false; + if (!pm_runtime_suspended(dev) || platform_pci_need_resume(pci_dev)) + return true; + + target_state = pci_target_state(pci_dev, device_may_wakeup(dev)); /* - * At this point the device is good to go unless it's been configured - * to generate PME at the runtime suspend time, but it is not supposed - * to wake up the system. In that case, simply disable PME for it - * (it will have to be re-enabled on exit from system resume). - * - * If the device's power state is D3cold and the platform check above - * hasn't triggered, the device's configuration is suitable and we don't - * need to manipulate it at all. + * If the earlier platform check has not triggered, D3cold is just power + * removal on top of D3hot, so no need to resume the device in that + * case. */ + return target_state != pci_dev->current_state && + target_state != PCI_D3cold && + pci_dev->current_state != PCI_D3hot; +} + +/** + * pci_dev_adjust_pme - Adjust PME setting for a suspended device. + * @pci_dev: Device to check. + * + * If the device is suspended and it is not configured for system wakeup, + * disable PME for it to prevent it from waking up the system unnecessarily. + * + * Note that if the device's power state is D3cold and the platform check in + * pci_dev_need_resume() has not triggered, the device's configuration need not + * be changed. + */ +void pci_dev_adjust_pme(struct pci_dev *pci_dev) +{ + struct device *dev = &pci_dev->dev; + spin_lock_irq(&dev->power.lock); - if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold && - !wakeup) + if (pm_runtime_suspended(dev) && !device_may_wakeup(dev) && + pci_dev->current_state < PCI_D3cold) __pci_pme_active(pci_dev, false); spin_unlock_irq(&dev->power.lock); - return true; } /** @@ -2204,6 +2855,22 @@ void pci_dev_complete_resume(struct pci_dev *pci_dev) spin_unlock_irq(&dev->power.lock); } +/** + * pci_choose_state - Choose the power state of a PCI device. + * @dev: Target PCI device. + * @state: Target state for the whole system. + * + * Returns PCI power state suitable for @dev and @state. + */ +pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state) +{ + if (state.event == PM_EVENT_ON) + return PCI_D0; + + return pci_target_state(dev, false); +} +EXPORT_SYMBOL(pci_choose_state); + void pci_config_pm_runtime_get(struct pci_dev *pdev) { struct device *dev = &pdev->dev; @@ -2236,17 +2903,62 @@ void pci_config_pm_runtime_put(struct pci_dev *pdev) pm_runtime_put_sync(parent); } +static const struct dmi_system_id bridge_d3_blacklist[] = { +#ifdef CONFIG_X86 + { + /* + * Gigabyte X299 root port is not marked as hotplug capable + * which allows Linux to power manage it. However, this + * confuses the BIOS SMI handler so don't power manage root + * ports on that system. + */ + .ident = "X299 DESIGNARE EX-CF", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."), + DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"), + }, + }, + { + /* + * Downstream device is not accessible after putting a root port + * into D3cold and back into D0 on Elo Continental Z2 board + */ + .ident = "Elo Continental Z2", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Elo Touch Solutions"), + DMI_MATCH(DMI_BOARD_NAME, "Geminilake"), + DMI_MATCH(DMI_BOARD_VERSION, "Continental Z2"), + }, + }, + { + /* + * Changing power state of root port dGPU is connected fails + * https://gitlab.freedesktop.org/drm/amd/-/issues/3229 + */ + .ident = "Hewlett-Packard HP Pavilion 17 Notebook PC/1972", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_BOARD_NAME, "1972"), + DMI_MATCH(DMI_BOARD_VERSION, "95.33"), + }, + }, +#endif + { } +}; + /** * pci_bridge_d3_possible - Is it possible to put the bridge into D3 * @bridge: Bridge to check * - * This function checks if it is possible to move the bridge to D3. - * Currently we only allow D3 for recent enough PCIe ports. + * Currently we only allow D3 for some PCIe ports and for Thunderbolt. + * + * Return: Whether it is possible to move the bridge to D3. + * + * The return value is guaranteed to be constant across the entire lifetime + * of the bridge, including its hot-removal. */ bool pci_bridge_d3_possible(struct pci_dev *bridge) { - unsigned int year; - if (!pci_is_pcie(bridge)) return false; @@ -2258,26 +2970,44 @@ bool pci_bridge_d3_possible(struct pci_dev *bridge) return false; /* - * Hotplug interrupts cannot be delivered if the link is down, - * so parents of a hotplug port must stay awake. In addition, - * hotplug ports handled by firmware in System Management Mode - * may not be put into D3 by the OS (Thunderbolt on non-Macs). - * For simplicity, disallow in general for now. + * Hotplug ports handled by platform firmware may not be put + * into D3 by the OS, e.g. ACPI slots ... */ - if (bridge->is_hotplug_bridge) + if (bridge->is_hotplug_bridge && !bridge->is_pciehp) + return false; + + /* ... or PCIe hotplug ports not handled natively by the OS. */ + if (bridge->is_pciehp && !pciehp_is_native(bridge)) return false; if (pci_bridge_d3_force) return true; + /* Even the oldest 2010 Thunderbolt controller supports D3. */ + if (bridge->is_thunderbolt) + return true; + + /* Platform might know better if the bridge supports D3 */ + if (platform_pci_bridge_d3(bridge)) + return true; + + /* + * Hotplug ports handled natively by the OS were not validated + * by vendors for runtime D3 at least until 2018 because there + * was no OS support. + */ + if (bridge->is_pciehp) + return false; + + if (dmi_check_system(bridge_d3_blacklist)) + return false; + /* - * It should be safe to put PCIe ports from 2015 or newer - * to D3. + * Out of caution, we only allow PCIe ports from 2015 or newer + * into D3 on x86. */ - if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && - year >= 2015) { + if (!IS_ENABLED(CONFIG_X86) || dmi_get_bios_year() >= 2015) return true; - } break; } @@ -2390,6 +3120,12 @@ void pci_d3cold_disable(struct pci_dev *dev) } EXPORT_SYMBOL_GPL(pci_d3cold_disable); +void pci_pm_power_up_and_verify_state(struct pci_dev *pci_dev) +{ + pci_power_up(pci_dev); + pci_update_current_state(pci_dev, PCI_D0); +} + /** * pci_pm_init - Initialize PM functions of given PCI device * @dev: PCI device to handle. @@ -2399,9 +3135,6 @@ void pci_pm_init(struct pci_dev *dev) int pm; u16 pmc; - pm_runtime_forbid(&dev->dev); - pm_runtime_set_active(&dev->dev); - pm_runtime_enable(&dev->dev); device_enable_async_suspend(&dev->dev); dev->wakeup_prepared = false; @@ -2411,18 +3144,18 @@ void pci_pm_init(struct pci_dev *dev) /* find PCI PM capability in list */ pm = pci_find_capability(dev, PCI_CAP_ID_PM); if (!pm) - return; + goto poweron; /* Check device's ability to generate PME# */ pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc); if ((pmc & PCI_PM_CAP_VER_MASK) > 3) { - dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n", + pci_err(dev, "unsupported PM cap regs version (%u)\n", pmc & PCI_PM_CAP_VER_MASK); - return; + goto poweron; } dev->pm_cap = pm; - dev->d3_delay = PCI_PM_D3_WAIT; + dev->d3hot_delay = PCI_PM_D3HOT_WAIT; dev->d3cold_delay = PCI_PM_D3COLD_WAIT; dev->bridge_d3 = pci_bridge_d3_possible(dev); dev->d3cold_allowed = true; @@ -2436,21 +3169,20 @@ void pci_pm_init(struct pci_dev *dev) dev->d2_support = true; if (dev->d1_support || dev->d2_support) - dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n", + pci_info(dev, "supports%s%s\n", dev->d1_support ? " D1" : "", dev->d2_support ? " D2" : ""); } pmc &= PCI_PM_CAP_PME_MASK; if (pmc) { - dev_printk(KERN_DEBUG, &dev->dev, - "PME# supported from%s%s%s%s%s\n", + pci_info(dev, "PME# supported from%s%s%s%s%s\n", (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "", (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "", (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "", - (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "", + (pmc & PCI_PM_CAP_PME_D3hot) ? " D3hot" : "", (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : ""); - dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT; + dev->pme_support = FIELD_GET(PCI_PM_CAP_PME_MASK, pmc); dev->pme_poll = true; /* * Make device's PM flags reflect the wake-up capability, but @@ -2460,6 +3192,12 @@ void pci_pm_init(struct pci_dev *dev) /* Disable the PME# generation functionality */ pci_pme_active(dev, false); } + +poweron: + pci_pm_power_up_and_verify_state(dev); + pm_runtime_forbid(&dev->dev); + pm_runtime_set_active(&dev->dev); + pm_runtime_enable(&dev->dev); } static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop) @@ -2506,6 +3244,7 @@ static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei, static int pci_ea_read(struct pci_dev *dev, int offset) { struct resource *res; + const char *res_name; int ent_size, ent_offset = offset; resource_size_t start, end; unsigned long flags; @@ -2517,32 +3256,33 @@ static int pci_ea_read(struct pci_dev *dev, int offset) ent_offset += 4; /* Entry size field indicates DWORDs after 1st */ - ent_size = ((dw0 & PCI_EA_ES) + 1) << 2; + ent_size = (FIELD_GET(PCI_EA_ES, dw0) + 1) << 2; if (!(dw0 & PCI_EA_ENABLE)) /* Entry not enabled */ goto out; - bei = (dw0 & PCI_EA_BEI) >> 4; - prop = (dw0 & PCI_EA_PP) >> 8; + bei = FIELD_GET(PCI_EA_BEI, dw0); + prop = FIELD_GET(PCI_EA_PP, dw0); /* * If the Property is in the reserved range, try the Secondary * Property instead. */ if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED) - prop = (dw0 & PCI_EA_SP) >> 16; + prop = FIELD_GET(PCI_EA_SP, dw0); if (prop > PCI_EA_P_BRIDGE_IO) goto out; res = pci_ea_get_resource(dev, bei, prop); + res_name = pci_resource_name(dev, bei); if (!res) { - dev_err(&dev->dev, "Unsupported EA entry BEI: %u\n", bei); + pci_err(dev, "Unsupported EA entry BEI: %u\n", bei); goto out; } flags = pci_ea_flags(dev, prop); if (!flags) { - dev_err(&dev->dev, "Unsupported EA properties: %#x\n", prop); + pci_err(dev, "Unsupported EA properties: %#x\n", prop); goto out; } @@ -2592,13 +3332,12 @@ static int pci_ea_read(struct pci_dev *dev, int offset) } if (end < start) { - dev_err(&dev->dev, "EA Entry crosses address boundary\n"); + pci_err(dev, "EA Entry crosses address boundary\n"); goto out; } if (ent_size != ent_offset - offset) { - dev_err(&dev->dev, - "EA Entry Size (%d) does not match length read (%d)\n", + pci_err(dev, "EA Entry Size (%d) does not match length read (%d)\n", ent_size, ent_offset - offset); goto out; } @@ -2609,16 +3348,16 @@ static int pci_ea_read(struct pci_dev *dev, int offset) res->flags = flags; if (bei <= PCI_EA_BEI_BAR5) - dev_printk(KERN_DEBUG, &dev->dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n", - bei, res, prop); + pci_info(dev, "%s %pR: from Enhanced Allocation, properties %#02x\n", + res_name, res, prop); else if (bei == PCI_EA_BEI_ROM) - dev_printk(KERN_DEBUG, &dev->dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n", - res, prop); + pci_info(dev, "%s %pR: from Enhanced Allocation, properties %#02x\n", + res_name, res, prop); else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5) - dev_printk(KERN_DEBUG, &dev->dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n", - bei - PCI_EA_BEI_VF_BAR0, res, prop); + pci_info(dev, "%s %pR: from Enhanced Allocation, properties %#02x\n", + res_name, res, prop); else - dev_printk(KERN_DEBUG, &dev->dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n", + pci_info(dev, "BEI %d %pR: from Enhanced Allocation, properties %#02x\n", bei, res, prop); out: @@ -2662,7 +3401,7 @@ static void pci_add_saved_cap(struct pci_dev *pci_dev, /** * _pci_add_cap_save_buffer - allocate buffer for saving given - * capability registers + * capability registers * @dev: the PCI device * @cap: the capability to allocate the buffer for * @extended: Standard or Extended capability ID @@ -2715,13 +3454,16 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev) error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP, PCI_EXP_SAVE_REGS * sizeof(u16)); if (error) - dev_err(&dev->dev, - "unable to preallocate PCI Express save buffer\n"); + pci_err(dev, "unable to preallocate PCI Express save buffer\n"); error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16)); if (error) - dev_err(&dev->dev, - "unable to preallocate PCI-X save buffer\n"); + pci_err(dev, "unable to preallocate PCI-X save buffer\n"); + + error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_LTR, + 2 * sizeof(u16)); + if (error) + pci_err(dev, "unable to allocate suspend buffer for LTR\n"); pci_allocate_vc_save_buffers(dev); } @@ -2769,69 +3511,12 @@ void pci_configure_ari(struct pci_dev *dev) } } -static int pci_acs_enable; - -/** - * pci_request_acs - ask for ACS to be enabled if supported - */ -void pci_request_acs(void) -{ - pci_acs_enable = 1; -} - -/** - * pci_std_enable_acs - enable ACS on devices using standard ACS capabilites - * @dev: the PCI device - */ -static void pci_std_enable_acs(struct pci_dev *dev) -{ - int pos; - u16 cap; - u16 ctrl; - - pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS); - if (!pos) - return; - - pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap); - pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl); - - /* Source Validation */ - ctrl |= (cap & PCI_ACS_SV); - - /* P2P Request Redirect */ - ctrl |= (cap & PCI_ACS_RR); - - /* P2P Completion Redirect */ - ctrl |= (cap & PCI_ACS_CR); - - /* Upstream Forwarding */ - ctrl |= (cap & PCI_ACS_UF); - - pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl); -} - -/** - * pci_enable_acs - enable ACS if hardware support it - * @dev: the PCI device - */ -void pci_enable_acs(struct pci_dev *dev) -{ - if (!pci_acs_enable) - return; - - if (!pci_dev_specific_enable_acs(dev)) - return; - - pci_std_enable_acs(dev); -} - static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags) { int pos; u16 cap, ctrl; - pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS); + pos = pdev->acs_cap; if (!pos) return false; @@ -2928,7 +3613,7 @@ bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags) } /** - * pci_acs_path_enable - test ACS flags from start to end in a hierarchy + * pci_acs_path_enabled - test ACS flags from start to end in a hierarchy * @start: starting downstream device * @end: ending upstream device or NULL to search to the root bus * @acs_flags: required flags @@ -2957,77 +3642,119 @@ bool pci_acs_path_enabled(struct pci_dev *start, } /** - * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge + * pci_acs_init - Initialize ACS if hardware supports it * @dev: the PCI device - * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD) - * - * Perform INTx swizzling for a device behind one level of bridge. This is - * required by section 9.1 of the PCI-to-PCI bridge specification for devices - * behind bridges on add-in cards. For devices with ARI enabled, the slot - * number is always 0 (see the Implementation Note in section 2.2.8.1 of - * the PCI Express Base Specification, Revision 2.1) */ -u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin) +void pci_acs_init(struct pci_dev *dev) { - int slot; + dev->acs_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS); - if (pci_ari_enabled(dev->bus)) - slot = 0; - else - slot = PCI_SLOT(dev->devfn); - - return (((pin - 1) + slot) % 4) + 1; -} - -int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge) -{ - u8 pin; - - pin = dev->pin; - if (!pin) - return -1; - - while (!pci_is_root_bus(dev->bus)) { - pin = pci_swizzle_interrupt_pin(dev, pin); - dev = dev->bus->self; - } - *bridge = dev; - return pin; + /* + * Attempt to enable ACS regardless of capability because some Root + * Ports (e.g. those quirked with *_intel_pch_acs_*) do not have + * the standard ACS capability but still support ACS via those + * quirks. + */ + pci_enable_acs(dev); } /** - * pci_common_swizzle - swizzle INTx all the way to root bridge + * pci_enable_atomic_ops_to_root - enable AtomicOp requests to root port * @dev: the PCI device - * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD) + * @cap_mask: mask of desired AtomicOp sizes, including one or more of: + * PCI_EXP_DEVCAP2_ATOMIC_COMP32 + * PCI_EXP_DEVCAP2_ATOMIC_COMP64 + * PCI_EXP_DEVCAP2_ATOMIC_COMP128 * - * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI - * bridges all the way up to a PCI root bus. + * Return 0 if all upstream bridges support AtomicOp routing, egress + * blocking is disabled on all upstream ports, and the root port supports + * the requested completion capabilities (32-bit, 64-bit and/or 128-bit + * AtomicOp completion), or negative otherwise. */ -u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp) +int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask) { - u8 pin = *pinp; + struct pci_bus *bus = dev->bus; + struct pci_dev *bridge; + u32 cap, ctl2; + + /* + * Per PCIe r5.0, sec 9.3.5.10, the AtomicOp Requester Enable bit + * in Device Control 2 is reserved in VFs and the PF value applies + * to all associated VFs. + */ + if (dev->is_virtfn) + return -EINVAL; + + if (!pci_is_pcie(dev)) + return -EINVAL; + + /* + * Per PCIe r4.0, sec 6.15, endpoints and root ports may be + * AtomicOp requesters. For now, we only support endpoints as + * requesters and root ports as completers. No endpoints as + * completers, and no peer-to-peer. + */ + + switch (pci_pcie_type(dev)) { + case PCI_EXP_TYPE_ENDPOINT: + case PCI_EXP_TYPE_LEG_END: + case PCI_EXP_TYPE_RC_END: + break; + default: + return -EINVAL; + } + + while (bus->parent) { + bridge = bus->self; + + pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap); - while (!pci_is_root_bus(dev->bus)) { - pin = pci_swizzle_interrupt_pin(dev, pin); - dev = dev->bus->self; + switch (pci_pcie_type(bridge)) { + /* Ensure switch ports support AtomicOp routing */ + case PCI_EXP_TYPE_UPSTREAM: + case PCI_EXP_TYPE_DOWNSTREAM: + if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE)) + return -EINVAL; + break; + + /* Ensure root port supports all the sizes we care about */ + case PCI_EXP_TYPE_ROOT_PORT: + if ((cap & cap_mask) != cap_mask) + return -EINVAL; + break; + } + + /* Ensure upstream ports don't block AtomicOps on egress */ + if (pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM) { + pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2, + &ctl2); + if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK) + return -EINVAL; + } + + bus = bus->parent; } - *pinp = pin; - return PCI_SLOT(dev->devfn); + + pcie_capability_set_word(dev, PCI_EXP_DEVCTL2, + PCI_EXP_DEVCTL2_ATOMIC_REQ); + return 0; } -EXPORT_SYMBOL_GPL(pci_common_swizzle); +EXPORT_SYMBOL(pci_enable_atomic_ops_to_root); /** - * pci_release_region - Release a PCI bar - * @pdev: PCI device whose resources were previously reserved by pci_request_region - * @bar: BAR to release + * pci_release_region - Release a PCI bar + * @pdev: PCI device whose resources were previously reserved by + * pci_request_region() + * @bar: BAR to release * - * Releases the PCI I/O and memory resources previously reserved by a - * successful call to pci_request_region. Call this function only - * after all use of the PCI regions has ceased. + * Releases the PCI I/O and memory resources previously reserved by a + * successful call to pci_request_region(). Call this function only + * after all use of the PCI regions has ceased. */ void pci_release_region(struct pci_dev *pdev, int bar) { - struct pci_devres *dr; + if (!pci_bar_index_is_valid(bar)) + return; if (pci_resource_len(pdev, bar) == 0) return; @@ -3037,107 +3764,77 @@ void pci_release_region(struct pci_dev *pdev, int bar) else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) release_mem_region(pci_resource_start(pdev, bar), pci_resource_len(pdev, bar)); - - dr = find_pci_dr(pdev); - if (dr) - dr->region_mask &= ~(1 << bar); } EXPORT_SYMBOL(pci_release_region); /** - * __pci_request_region - Reserved PCI I/O and memory resource - * @pdev: PCI device whose resources are to be reserved - * @bar: BAR to be reserved - * @res_name: Name to be associated with resource. - * @exclusive: whether the region access is exclusive or not + * __pci_request_region - Reserved PCI I/O and memory resource + * @pdev: PCI device whose resources are to be reserved + * @bar: BAR to be reserved + * @name: name of the driver requesting the resource + * @exclusive: whether the region access is exclusive or not * - * Mark the PCI region associated with PCI device @pdev BR @bar as - * being reserved by owner @res_name. Do not access any - * address inside the PCI regions unless this call returns - * successfully. + * Returns: 0 on success, negative error code on failure. * - * If @exclusive is set, then the region is marked so that userspace - * is explicitly not allowed to map the resource via /dev/mem or - * sysfs MMIO access. + * Mark the PCI region associated with PCI device @pdev BAR @bar as being + * reserved by owner @name. Do not access any address inside the PCI regions + * unless this call returns successfully. * - * Returns 0 on success, or %EBUSY on error. A warning - * message is also printed on failure. + * If @exclusive is set, then the region is marked so that userspace + * is explicitly not allowed to map the resource via /dev/mem or + * sysfs MMIO access. + * + * Returns 0 on success, or %EBUSY on error. A warning + * message is also printed on failure. */ static int __pci_request_region(struct pci_dev *pdev, int bar, - const char *res_name, int exclusive) + const char *name, int exclusive) { - struct pci_devres *dr; + if (!pci_bar_index_is_valid(bar)) + return -EINVAL; if (pci_resource_len(pdev, bar) == 0) return 0; if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) { if (!request_region(pci_resource_start(pdev, bar), - pci_resource_len(pdev, bar), res_name)) + pci_resource_len(pdev, bar), name)) goto err_out; } else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) { if (!__request_mem_region(pci_resource_start(pdev, bar), - pci_resource_len(pdev, bar), res_name, + pci_resource_len(pdev, bar), name, exclusive)) goto err_out; } - dr = find_pci_dr(pdev); - if (dr) - dr->region_mask |= 1 << bar; - return 0; err_out: - dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar, + pci_warn(pdev, "BAR %d: can't reserve %pR\n", bar, &pdev->resource[bar]); return -EBUSY; } /** - * pci_request_region - Reserve PCI I/O and memory resource - * @pdev: PCI device whose resources are to be reserved - * @bar: BAR to be reserved - * @res_name: Name to be associated with resource - * - * Mark the PCI region associated with PCI device @pdev BAR @bar as - * being reserved by owner @res_name. Do not access any - * address inside the PCI regions unless this call returns - * successfully. - * - * Returns 0 on success, or %EBUSY on error. A warning - * message is also printed on failure. - */ -int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) -{ - return __pci_request_region(pdev, bar, res_name, 0); -} -EXPORT_SYMBOL(pci_request_region); - -/** - * pci_request_region_exclusive - Reserved PCI I/O and memory resource - * @pdev: PCI device whose resources are to be reserved - * @bar: BAR to be reserved - * @res_name: Name to be associated with resource. + * pci_request_region - Reserve PCI I/O and memory resource + * @pdev: PCI device whose resources are to be reserved + * @bar: BAR to be reserved + * @name: name of the driver requesting the resource * - * Mark the PCI region associated with PCI device @pdev BR @bar as - * being reserved by owner @res_name. Do not access any - * address inside the PCI regions unless this call returns - * successfully. + * Returns: 0 on success, negative error code on failure. * - * Returns 0 on success, or %EBUSY on error. A warning - * message is also printed on failure. + * Mark the PCI region associated with PCI device @pdev BAR @bar as being + * reserved by owner @name. Do not access any address inside the PCI regions + * unless this call returns successfully. * - * The key difference that _exclusive makes it that userspace is - * explicitly not allowed to map the resource via /dev/mem or - * sysfs. + * Returns 0 on success, or %EBUSY on error. A warning + * message is also printed on failure. */ -int pci_request_region_exclusive(struct pci_dev *pdev, int bar, - const char *res_name) +int pci_request_region(struct pci_dev *pdev, int bar, const char *name) { - return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE); + return __pci_request_region(pdev, bar, name, 0); } -EXPORT_SYMBOL(pci_request_region_exclusive); +EXPORT_SYMBOL(pci_request_region); /** * pci_release_selected_regions - Release selected PCI I/O and memory resources @@ -3151,20 +3848,20 @@ void pci_release_selected_regions(struct pci_dev *pdev, int bars) { int i; - for (i = 0; i < 6; i++) + for (i = 0; i < PCI_STD_NUM_BARS; i++) if (bars & (1 << i)) pci_release_region(pdev, i); } EXPORT_SYMBOL(pci_release_selected_regions); static int __pci_request_selected_regions(struct pci_dev *pdev, int bars, - const char *res_name, int excl) + const char *name, int excl) { int i; - for (i = 0; i < 6; i++) + for (i = 0; i < PCI_STD_NUM_BARS; i++) if (bars & (1 << i)) - if (__pci_request_region(pdev, i, res_name, excl)) + if (__pci_request_region(pdev, i, name, excl)) goto err_out; return 0; @@ -3181,187 +3878,141 @@ err_out: * pci_request_selected_regions - Reserve selected PCI I/O and memory resources * @pdev: PCI device whose resources are to be reserved * @bars: Bitmask of BARs to be requested - * @res_name: Name to be associated with resource + * @name: Name of the driver requesting the resources + * + * Returns: 0 on success, negative error code on failure. */ int pci_request_selected_regions(struct pci_dev *pdev, int bars, - const char *res_name) + const char *name) { - return __pci_request_selected_regions(pdev, bars, res_name, 0); + return __pci_request_selected_regions(pdev, bars, name, 0); } EXPORT_SYMBOL(pci_request_selected_regions); +/** + * pci_request_selected_regions_exclusive - Request regions exclusively + * @pdev: PCI device to request regions from + * @bars: bit mask of BARs to request + * @name: name of the driver requesting the resources + * + * Returns: 0 on success, negative error code on failure. + */ int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars, - const char *res_name) + const char *name) { - return __pci_request_selected_regions(pdev, bars, res_name, + return __pci_request_selected_regions(pdev, bars, name, IORESOURCE_EXCLUSIVE); } EXPORT_SYMBOL(pci_request_selected_regions_exclusive); /** - * pci_release_regions - Release reserved PCI I/O and memory resources - * @pdev: PCI device whose resources were previously reserved by pci_request_regions + * pci_release_regions - Release reserved PCI I/O and memory resources + * @pdev: PCI device whose resources were previously reserved by + * pci_request_regions() * - * Releases all PCI I/O and memory resources previously reserved by a - * successful call to pci_request_regions. Call this function only - * after all use of the PCI regions has ceased. + * Releases all PCI I/O and memory resources previously reserved by a + * successful call to pci_request_regions(). Call this function only + * after all use of the PCI regions has ceased. */ - void pci_release_regions(struct pci_dev *pdev) { - pci_release_selected_regions(pdev, (1 << 6) - 1); + pci_release_selected_regions(pdev, (1 << PCI_STD_NUM_BARS) - 1); } EXPORT_SYMBOL(pci_release_regions); /** - * pci_request_regions - Reserved PCI I/O and memory resources - * @pdev: PCI device whose resources are to be reserved - * @res_name: Name to be associated with resource. + * pci_request_regions - Reserve PCI I/O and memory resources + * @pdev: PCI device whose resources are to be reserved + * @name: name of the driver requesting the resources * - * Mark all PCI regions associated with PCI device @pdev as - * being reserved by owner @res_name. Do not access any - * address inside the PCI regions unless this call returns - * successfully. + * Mark all PCI regions associated with PCI device @pdev as being reserved by + * owner @name. Do not access any address inside the PCI regions unless this + * call returns successfully. * - * Returns 0 on success, or %EBUSY on error. A warning - * message is also printed on failure. + * Returns 0 on success, or %EBUSY on error. A warning + * message is also printed on failure. */ -int pci_request_regions(struct pci_dev *pdev, const char *res_name) +int pci_request_regions(struct pci_dev *pdev, const char *name) { - return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name); + return pci_request_selected_regions(pdev, + ((1 << PCI_STD_NUM_BARS) - 1), name); } EXPORT_SYMBOL(pci_request_regions); /** - * pci_request_regions_exclusive - Reserved PCI I/O and memory resources - * @pdev: PCI device whose resources are to be reserved - * @res_name: Name to be associated with resource. + * pci_request_regions_exclusive - Reserve PCI I/O and memory resources + * @pdev: PCI device whose resources are to be reserved + * @name: name of the driver requesting the resources + * + * Returns: 0 on success, negative error code on failure. * - * Mark all PCI regions associated with PCI device @pdev as - * being reserved by owner @res_name. Do not access any - * address inside the PCI regions unless this call returns - * successfully. + * Mark all PCI regions associated with PCI device @pdev as being reserved + * by owner @name. Do not access any address inside the PCI regions + * unless this call returns successfully. * - * pci_request_regions_exclusive() will mark the region so that - * /dev/mem and the sysfs MMIO access will not be allowed. + * pci_request_regions_exclusive() will mark the region so that /dev/mem + * and the sysfs MMIO access will not be allowed. * - * Returns 0 on success, or %EBUSY on error. A warning - * message is also printed on failure. + * Returns 0 on success, or %EBUSY on error. A warning message is also + * printed on failure. */ -int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name) +int pci_request_regions_exclusive(struct pci_dev *pdev, const char *name) { return pci_request_selected_regions_exclusive(pdev, - ((1 << 6) - 1), res_name); + ((1 << PCI_STD_NUM_BARS) - 1), name); } EXPORT_SYMBOL(pci_request_regions_exclusive); -#ifdef PCI_IOBASE -struct io_range { - struct list_head list; - phys_addr_t start; - resource_size_t size; -}; - -static LIST_HEAD(io_range_list); -static DEFINE_SPINLOCK(io_range_lock); -#endif - /* * Record the PCI IO range (expressed as CPU physical address + size). - * Return a negative value if an error has occured, zero otherwise + * Return a negative value if an error has occurred, zero otherwise */ -int __weak pci_register_io_range(phys_addr_t addr, resource_size_t size) +int pci_register_io_range(const struct fwnode_handle *fwnode, phys_addr_t addr, + resource_size_t size) { - int err = 0; - + int ret = 0; #ifdef PCI_IOBASE - struct io_range *range; - resource_size_t allocated_size = 0; - - /* check if the range hasn't been previously recorded */ - spin_lock(&io_range_lock); - list_for_each_entry(range, &io_range_list, list) { - if (addr >= range->start && addr + size <= range->start + size) { - /* range already registered, bail out */ - goto end_register; - } - allocated_size += range->size; - } - - /* range not registed yet, check for available space */ - if (allocated_size + size - 1 > IO_SPACE_LIMIT) { - /* if it's too big check if 64K space can be reserved */ - if (allocated_size + SZ_64K - 1 > IO_SPACE_LIMIT) { - err = -E2BIG; - goto end_register; - } + struct logic_pio_hwaddr *range; - size = SZ_64K; - pr_warn("Requested IO range too big, new size set to 64K\n"); - } + if (!size || addr + size < addr) + return -EINVAL; - /* add the range to the list */ range = kzalloc(sizeof(*range), GFP_ATOMIC); - if (!range) { - err = -ENOMEM; - goto end_register; - } + if (!range) + return -ENOMEM; - range->start = addr; + range->fwnode = fwnode; range->size = size; + range->hw_start = addr; + range->flags = LOGIC_PIO_CPU_MMIO; - list_add_tail(&range->list, &io_range_list); + ret = logic_pio_register_range(range); + if (ret) + kfree(range); -end_register: - spin_unlock(&io_range_lock); + /* Ignore duplicates due to deferred probing */ + if (ret == -EEXIST) + ret = 0; #endif - return err; + return ret; } phys_addr_t pci_pio_to_address(unsigned long pio) { - phys_addr_t address = (phys_addr_t)OF_BAD_ADDR; - #ifdef PCI_IOBASE - struct io_range *range; - resource_size_t allocated_size = 0; - - if (pio > IO_SPACE_LIMIT) - return address; - - spin_lock(&io_range_lock); - list_for_each_entry(range, &io_range_list, list) { - if (pio >= allocated_size && pio < allocated_size + range->size) { - address = range->start + pio - allocated_size; - break; - } - allocated_size += range->size; - } - spin_unlock(&io_range_lock); + if (pio < MMIO_UPPER_LIMIT) + return logic_pio_to_hwaddr(pio); #endif - return address; + return (phys_addr_t) OF_BAD_ADDR; } +EXPORT_SYMBOL_GPL(pci_pio_to_address); unsigned long __weak pci_address_to_pio(phys_addr_t address) { #ifdef PCI_IOBASE - struct io_range *res; - resource_size_t offset = 0; - unsigned long addr = -1; - - spin_lock(&io_range_lock); - list_for_each_entry(res, &io_range_list, list) { - if (address >= res->start && address < res->start + res->size) { - addr = address - res->start + offset; - break; - } - offset += res->size; - } - spin_unlock(&io_range_lock); - - return addr; + return logic_pio_trans_cpuaddr(address); #else if (address > IO_SPACE_LIMIT) return (unsigned long)-1; @@ -3371,18 +4022,19 @@ unsigned long __weak pci_address_to_pio(phys_addr_t address) } /** - * pci_remap_iospace - Remap the memory mapped I/O space - * @res: Resource describing the I/O space - * @phys_addr: physical address of range to be mapped + * pci_remap_iospace - Remap the memory mapped I/O space + * @res: Resource describing the I/O space + * @phys_addr: physical address of range to be mapped * - * Remap the memory mapped I/O space described by the @res - * and the CPU physical address @phys_addr into virtual address space. - * Only architectures that have memory mapped IO functions defined - * (and the PCI_IOBASE value defined) should call this function. + * Remap the memory mapped I/O space described by the @res and the CPU + * physical address @phys_addr into virtual address space. Only + * architectures that have memory mapped IO functions defined (and the + * PCI_IOBASE value defined) should call this function. */ +#ifndef pci_remap_iospace int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr) { -#if defined(PCI_IOBASE) && defined(CONFIG_MMU) +#if defined(PCI_IOBASE) unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start; if (!(res->flags & IORESOURCE_IO)) @@ -3391,117 +4043,38 @@ int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr) if (res->end > IO_SPACE_LIMIT) return -EINVAL; - return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr, - pgprot_device(PAGE_KERNEL)); + return vmap_page_range(vaddr, vaddr + resource_size(res), phys_addr, + pgprot_device(PAGE_KERNEL)); #else - /* this architecture does not have memory mapped I/O space, - so this function should never be called */ + /* + * This architecture does not have memory mapped I/O space, + * so this function should never be called + */ WARN_ONCE(1, "This architecture does not support memory mapped I/O\n"); return -ENODEV; #endif } EXPORT_SYMBOL(pci_remap_iospace); +#endif /** - * pci_unmap_iospace - Unmap the memory mapped I/O space - * @res: resource to be unmapped + * pci_unmap_iospace - Unmap the memory mapped I/O space + * @res: resource to be unmapped * - * Unmap the CPU virtual address @res from virtual address space. - * Only architectures that have memory mapped IO functions defined - * (and the PCI_IOBASE value defined) should call this function. + * Unmap the CPU virtual address @res from virtual address space. Only + * architectures that have memory mapped IO functions defined (and the + * PCI_IOBASE value defined) should call this function. */ void pci_unmap_iospace(struct resource *res) { -#if defined(PCI_IOBASE) && defined(CONFIG_MMU) +#if defined(PCI_IOBASE) unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start; - unmap_kernel_range(vaddr, resource_size(res)); + vunmap_range(vaddr, vaddr + resource_size(res)); #endif } EXPORT_SYMBOL(pci_unmap_iospace); -/** - * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace() - * @dev: Generic device to remap IO address for - * @offset: Resource address to map - * @size: Size of map - * - * Managed pci_remap_cfgspace(). Map is automatically unmapped on driver - * detach. - */ -void __iomem *devm_pci_remap_cfgspace(struct device *dev, - resource_size_t offset, - resource_size_t size) -{ - void __iomem **ptr, *addr; - - ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL); - if (!ptr) - return NULL; - - addr = pci_remap_cfgspace(offset, size); - if (addr) { - *ptr = addr; - devres_add(dev, ptr); - } else - devres_free(ptr); - - return addr; -} -EXPORT_SYMBOL(devm_pci_remap_cfgspace); - -/** - * devm_pci_remap_cfg_resource - check, request region and ioremap cfg resource - * @dev: generic device to handle the resource for - * @res: configuration space resource to be handled - * - * Checks that a resource is a valid memory region, requests the memory - * region and ioremaps with pci_remap_cfgspace() API that ensures the - * proper PCI configuration space memory attributes are guaranteed. - * - * All operations are managed and will be undone on driver detach. - * - * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code - * on failure. Usage example: - * - * res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - * base = devm_pci_remap_cfg_resource(&pdev->dev, res); - * if (IS_ERR(base)) - * return PTR_ERR(base); - */ -void __iomem *devm_pci_remap_cfg_resource(struct device *dev, - struct resource *res) -{ - resource_size_t size; - const char *name; - void __iomem *dest_ptr; - - BUG_ON(!dev); - - if (!res || resource_type(res) != IORESOURCE_MEM) { - dev_err(dev, "invalid resource\n"); - return IOMEM_ERR_PTR(-EINVAL); - } - - size = resource_size(res); - name = res->name ?: dev_name(dev); - - if (!devm_request_mem_region(dev, res->start, size, name)) { - dev_err(dev, "can't request region for resource %pR\n", res); - return IOMEM_ERR_PTR(-EBUSY); - } - - dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size); - if (!dest_ptr) { - dev_err(dev, "ioremap failed for resource %pR\n", res); - devm_release_mem_region(dev, res->start, size); - dest_ptr = IOMEM_ERR_PTR(-ENOMEM); - } - - return dest_ptr; -} -EXPORT_SYMBOL(devm_pci_remap_cfg_resource); - static void __pci_set_master(struct pci_dev *dev, bool enable) { u16 old_cmd, cmd; @@ -3512,7 +4085,7 @@ static void __pci_set_master(struct pci_dev *dev, bool enable) else cmd = old_cmd & ~PCI_COMMAND_MASTER; if (cmd != old_cmd) { - dev_dbg(&dev->dev, "%s bus mastering\n", + pci_dbg(dev, "%s bus mastering\n", enable ? "enabling" : "disabling"); pci_write_config_word(dev, PCI_COMMAND, cmd); } @@ -3613,7 +4186,7 @@ int pci_set_cacheline_size(struct pci_dev *dev) if (cacheline_size == pci_cache_line_size) return 0; - dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not supported\n", + pci_dbg(dev, "cache line size of %d is not supported\n", pci_cache_line_size << 2); return -EINVAL; @@ -3642,7 +4215,7 @@ int pci_set_mwi(struct pci_dev *dev) pci_read_config_word(dev, PCI_COMMAND, &cmd); if (!(cmd & PCI_COMMAND_INVALIDATE)) { - dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n"); + pci_dbg(dev, "enabling Mem-Wr-Inval\n"); cmd |= PCI_COMMAND_INVALIDATE; pci_write_config_word(dev, PCI_COMMAND, cmd); } @@ -3691,11 +4264,28 @@ void pci_clear_mwi(struct pci_dev *dev) EXPORT_SYMBOL(pci_clear_mwi); /** + * pci_disable_parity - disable parity checking for device + * @dev: the PCI device to operate on + * + * Disable parity checking for device @dev + */ +void pci_disable_parity(struct pci_dev *dev) +{ + u16 cmd; + + pci_read_config_word(dev, PCI_COMMAND, &cmd); + if (cmd & PCI_COMMAND_PARITY) { + cmd &= ~PCI_COMMAND_PARITY; + pci_write_config_word(dev, PCI_COMMAND, cmd); + } +} + +/** * pci_intx - enables/disables PCI INTx for device dev * @pdev: the PCI device to operate on * @enable: boolean: whether to enable or disable PCI INTx * - * Enables/disables PCI INTx for device dev + * Enables/disables PCI INTx for device @pdev */ void pci_intx(struct pci_dev *pdev, int enable) { @@ -3708,95 +4298,15 @@ void pci_intx(struct pci_dev *pdev, int enable) else new = pci_command | PCI_COMMAND_INTX_DISABLE; - if (new != pci_command) { - struct pci_devres *dr; - - pci_write_config_word(pdev, PCI_COMMAND, new); + if (new == pci_command) + return; - dr = find_pci_dr(pdev); - if (dr && !dr->restore_intx) { - dr->restore_intx = 1; - dr->orig_intx = !enable; - } - } + pci_write_config_word(pdev, PCI_COMMAND, new); } EXPORT_SYMBOL_GPL(pci_intx); -static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask) -{ - struct pci_bus *bus = dev->bus; - bool mask_updated = true; - u32 cmd_status_dword; - u16 origcmd, newcmd; - unsigned long flags; - bool irq_pending; - - /* - * We do a single dword read to retrieve both command and status. - * Document assumptions that make this possible. - */ - BUILD_BUG_ON(PCI_COMMAND % 4); - BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS); - - raw_spin_lock_irqsave(&pci_lock, flags); - - bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword); - - irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT; - - /* - * Check interrupt status register to see whether our device - * triggered the interrupt (when masking) or the next IRQ is - * already pending (when unmasking). - */ - if (mask != irq_pending) { - mask_updated = false; - goto done; - } - - origcmd = cmd_status_dword; - newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE; - if (mask) - newcmd |= PCI_COMMAND_INTX_DISABLE; - if (newcmd != origcmd) - bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd); - -done: - raw_spin_unlock_irqrestore(&pci_lock, flags); - - return mask_updated; -} - -/** - * pci_check_and_mask_intx - mask INTx on pending interrupt - * @dev: the PCI device to operate on - * - * Check if the device dev has its INTx line asserted, mask it and - * return true in that case. False is returned if no interrupt was - * pending. - */ -bool pci_check_and_mask_intx(struct pci_dev *dev) -{ - return pci_check_and_set_intx_mask(dev, true); -} -EXPORT_SYMBOL_GPL(pci_check_and_mask_intx); - /** - * pci_check_and_unmask_intx - unmask INTx if no interrupt is pending - * @dev: the PCI device to operate on - * - * Check if the device dev has its INTx line asserted, unmask it if not - * and return true. False is returned and the mask remains active if - * there was still an interrupt pending. - */ -bool pci_check_and_unmask_intx(struct pci_dev *dev) -{ - return pci_check_and_set_intx_mask(dev, false); -} -EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx); - -/** - * pci_wait_for_pending_transaction - waits for pending transaction + * pci_wait_for_pending_transaction - wait for pending transaction * @dev: the PCI device to operate on * * Return 0 if transaction is pending 1 otherwise. @@ -3811,66 +4321,57 @@ int pci_wait_for_pending_transaction(struct pci_dev *dev) } EXPORT_SYMBOL(pci_wait_for_pending_transaction); -/* - * We should only need to wait 100ms after FLR, but some devices take longer. - * Wait for up to 1000ms for config space to return something other than -1. - * Intel IGD requires this when an LCD panel is attached. We read the 2nd - * dword because VFs don't implement the 1st dword. +/** + * pcie_flr - initiate a PCIe function level reset + * @dev: device to reset + * + * Initiate a function level reset unconditionally on @dev without + * checking any flags and DEVCAP */ -static void pci_flr_wait(struct pci_dev *dev) +int pcie_flr(struct pci_dev *dev) { - int i = 0; - u32 id; + if (!pci_wait_for_pending_transaction(dev)) + pci_err(dev, "timed out waiting for pending transaction; performing function level reset anyway\n"); - do { - msleep(100); - pci_read_config_dword(dev, PCI_COMMAND, &id); - } while (i++ < 10 && id == ~0); + pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR); - if (id == ~0) - dev_warn(&dev->dev, "Failed to return from FLR\n"); - else if (i > 1) - dev_info(&dev->dev, "Required additional %dms to return from FLR\n", - (i - 1) * 100); + if (dev->imm_ready) + return 0; + + /* + * Per PCIe r4.0, sec 6.6.2, a device must complete an FLR within + * 100ms, but may silently discard requests while the FLR is in + * progress. Wait 100ms before trying to access the device. + */ + msleep(100); + + return pci_dev_wait(dev, "FLR", PCIE_RESET_READY_POLL_MS); } +EXPORT_SYMBOL_GPL(pcie_flr); /** - * pcie_has_flr - check if a device supports function level resets - * @dev: device to check + * pcie_reset_flr - initiate a PCIe function level reset + * @dev: device to reset + * @probe: if true, return 0 if device can be reset this way * - * Returns true if the device advertises support for PCIe function level - * resets. + * Initiate a function level reset on @dev. */ -static bool pcie_has_flr(struct pci_dev *dev) +int pcie_reset_flr(struct pci_dev *dev, bool probe) { - u32 cap; - if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET) - return false; + return -ENOTTY; - pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap); - return cap & PCI_EXP_DEVCAP_FLR; -} + if (!(dev->devcap & PCI_EXP_DEVCAP_FLR)) + return -ENOTTY; -/** - * pcie_flr - initiate a PCIe function level reset - * @dev: device to reset - * - * Initiate a function level reset on @dev. The caller should ensure the - * device supports FLR before calling this function, e.g. by using the - * pcie_has_flr() helper. - */ -void pcie_flr(struct pci_dev *dev) -{ - if (!pci_wait_for_pending_transaction(dev)) - dev_err(&dev->dev, "timed out waiting for pending transaction; performing function level reset anyway\n"); + if (probe) + return 0; - pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR); - pci_flr_wait(dev); + return pcie_flr(dev); } -EXPORT_SYMBOL_GPL(pcie_flr); +EXPORT_SYMBOL_GPL(pcie_reset_flr); -static int pci_af_flr(struct pci_dev *dev, int probe) +static int pci_af_flr(struct pci_dev *dev, bool probe) { int pos; u8 cap; @@ -3891,22 +4392,33 @@ static int pci_af_flr(struct pci_dev *dev, int probe) /* * Wait for Transaction Pending bit to clear. A word-aligned test - * is used, so we use the conrol offset rather than status and shift + * is used, so we use the control offset rather than status and shift * the test bit to match. */ if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL, PCI_AF_STATUS_TP << 8)) - dev_err(&dev->dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n"); + pci_err(dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n"); pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR); - pci_flr_wait(dev); - return 0; + + if (dev->imm_ready) + return 0; + + /* + * Per Advanced Capabilities for Conventional PCI ECN, 13 April 2006, + * updated 27 July 2006; a device must complete an FLR within + * 100ms, but may silently discard requests while the FLR is in + * progress. Wait 100ms before trying to access the device. + */ + msleep(100); + + return pci_dev_wait(dev, "AF_FLR", PCIE_RESET_READY_POLL_MS); } /** * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0. * @dev: Device to reset. - * @probe: If set, only check if the device can be reset this way. + * @probe: if true, return 0 if the device can be reset this way. * * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is * unset, it will be reinitialized internally when going from PCI_D3hot to @@ -3915,10 +4427,10 @@ static int pci_af_flr(struct pci_dev *dev, int probe) * * NOTE: This causes the caller to sleep for twice the device power transition * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms - * by default (i.e. unless the @dev's d3_delay field has a different value). + * by default (i.e. unless the @dev's d3hot_delay field has a different value). * Moreover, only devices in D0 can be reset by this function. */ -static int pci_pm_reset(struct pci_dev *dev, int probe) +static int pci_pm_reset(struct pci_dev *dev, bool probe) { u16 csr; @@ -3945,7 +4457,303 @@ static int pci_pm_reset(struct pci_dev *dev, int probe) pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr); pci_dev_d3_sleep(dev); - return 0; + return pci_dev_wait(dev, "PM D3hot->D0", PCIE_RESET_READY_POLL_MS); +} + +/** + * pcie_wait_for_link_status - Wait for link status change + * @pdev: Device whose link to wait for. + * @use_lt: Use the LT bit if TRUE, or the DLLLA bit if FALSE. + * @active: Waiting for active or inactive? + * + * Return 0 if successful, or -ETIMEDOUT if status has not changed within + * PCIE_LINK_RETRAIN_TIMEOUT_MS milliseconds. + */ +static int pcie_wait_for_link_status(struct pci_dev *pdev, + bool use_lt, bool active) +{ + u16 lnksta_mask, lnksta_match; + unsigned long end_jiffies; + u16 lnksta; + + lnksta_mask = use_lt ? PCI_EXP_LNKSTA_LT : PCI_EXP_LNKSTA_DLLLA; + lnksta_match = active ? lnksta_mask : 0; + + end_jiffies = jiffies + msecs_to_jiffies(PCIE_LINK_RETRAIN_TIMEOUT_MS); + do { + pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnksta); + if ((lnksta & lnksta_mask) == lnksta_match) + return 0; + msleep(1); + } while (time_before(jiffies, end_jiffies)); + + return -ETIMEDOUT; +} + +/** + * pcie_retrain_link - Request a link retrain and wait for it to complete + * @pdev: Device whose link to retrain. + * @use_lt: Use the LT bit if TRUE, or the DLLLA bit if FALSE, for status. + * + * Trigger retraining of the PCIe Link and wait for the completion of the + * retraining. As link retraining is known to asserts LBMS and may change + * the Link Speed, LBMS is cleared after the retraining and the Link Speed + * of the subordinate bus is updated. + * + * Retrain completion status is retrieved from the Link Status Register + * according to @use_lt. It is not verified whether the use of the DLLLA + * bit is valid. + * + * Return 0 if successful, or -ETIMEDOUT if training has not completed + * within PCIE_LINK_RETRAIN_TIMEOUT_MS milliseconds. + */ +int pcie_retrain_link(struct pci_dev *pdev, bool use_lt) +{ + int rc; + + /* + * Ensure the updated LNKCTL parameters are used during link + * training by checking that there is no ongoing link training that + * may have started before link parameters were changed, so as to + * avoid LTSSM race as recommended in Implementation Note at the end + * of PCIe r6.1 sec 7.5.3.7. + */ + rc = pcie_wait_for_link_status(pdev, true, false); + if (rc) + return rc; + + pcie_capability_set_word(pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_RL); + if (pdev->clear_retrain_link) { + /* + * Due to an erratum in some devices the Retrain Link bit + * needs to be cleared again manually to allow the link + * training to succeed. + */ + pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_RL); + } + + rc = pcie_wait_for_link_status(pdev, use_lt, !use_lt); + + /* + * Clear LBMS after a manual retrain so that the bit can be used + * to track link speed or width changes made by hardware itself + * in attempt to correct unreliable link operation. + */ + pcie_reset_lbms(pdev); + + /* + * Ensure the Link Speed updates after retraining in case the Link + * Speed was changed because of the retraining. While the bwctrl's + * IRQ handler normally picks up the new Link Speed, clearing LBMS + * races with the IRQ handler reading the Link Status register and + * can result in the handler returning early without updating the + * Link Speed. + */ + if (pdev->subordinate) + pcie_update_link_speed(pdev->subordinate); + + return rc; +} + +/** + * pcie_wait_for_link_delay - Wait until link is active or inactive + * @pdev: Bridge device + * @active: waiting for active or inactive? + * @delay: Delay to wait after link has become active (in ms) + * + * Use this to wait till link becomes active or inactive. + */ +static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active, + int delay) +{ + int rc; + + /* + * Some controllers might not implement link active reporting. In this + * case, we wait for 1000 ms + any delay requested by the caller. + */ + if (!pdev->link_active_reporting) { + msleep(PCIE_LINK_RETRAIN_TIMEOUT_MS + delay); + return true; + } + + /* + * PCIe r4.0 sec 6.6.1, a component must enter LTSSM Detect within 20ms, + * after which we should expect the link to be active if the reset was + * successful. If so, software must wait a minimum 100ms before sending + * configuration requests to devices downstream this port. + * + * If the link fails to activate, either the device was physically + * removed or the link is permanently failed. + */ + if (active) + msleep(20); + rc = pcie_wait_for_link_status(pdev, false, active); + if (active) { + if (rc) + rc = pcie_failed_link_retrain(pdev); + if (rc) + return false; + + msleep(delay); + return true; + } + + if (rc) + return false; + + return true; +} + +/** + * pcie_wait_for_link - Wait until link is active or inactive + * @pdev: Bridge device + * @active: waiting for active or inactive? + * + * Use this to wait till link becomes active or inactive. + */ +bool pcie_wait_for_link(struct pci_dev *pdev, bool active) +{ + return pcie_wait_for_link_delay(pdev, active, 100); +} + +/* + * Find maximum D3cold delay required by all the devices on the bus. The + * spec says 100 ms, but firmware can lower it and we allow drivers to + * increase it as well. + * + * Called with @pci_bus_sem locked for reading. + */ +static int pci_bus_max_d3cold_delay(const struct pci_bus *bus) +{ + const struct pci_dev *pdev; + int min_delay = 100; + int max_delay = 0; + + list_for_each_entry(pdev, &bus->devices, bus_list) { + if (pdev->d3cold_delay < min_delay) + min_delay = pdev->d3cold_delay; + if (pdev->d3cold_delay > max_delay) + max_delay = pdev->d3cold_delay; + } + + return max(min_delay, max_delay); +} + +/** + * pci_bridge_wait_for_secondary_bus - Wait for secondary bus to be accessible + * @dev: PCI bridge + * @reset_type: reset type in human-readable form + * + * Handle necessary delays before access to the devices on the secondary + * side of the bridge are permitted after D3cold to D0 transition + * or Conventional Reset. + * + * For PCIe this means the delays in PCIe 5.0 section 6.6.1. For + * conventional PCI it means Tpvrh + Trhfa specified in PCI 3.0 section + * 4.3.2. + * + * Return 0 on success or -ENOTTY if the first device on the secondary bus + * failed to become accessible. + */ +int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type) +{ + struct pci_dev *child __free(pci_dev_put) = NULL; + int delay; + + if (pci_dev_is_disconnected(dev)) + return 0; + + if (!pci_is_bridge(dev)) + return 0; + + down_read(&pci_bus_sem); + + /* + * We only deal with devices that are present currently on the bus. + * For any hot-added devices the access delay is handled in pciehp + * board_added(). In case of ACPI hotplug the firmware is expected + * to configure the devices before OS is notified. + */ + if (!dev->subordinate || list_empty(&dev->subordinate->devices)) { + up_read(&pci_bus_sem); + return 0; + } + + /* Take d3cold_delay requirements into account */ + delay = pci_bus_max_d3cold_delay(dev->subordinate); + if (!delay) { + up_read(&pci_bus_sem); + return 0; + } + + child = pci_dev_get(list_first_entry(&dev->subordinate->devices, + struct pci_dev, bus_list)); + up_read(&pci_bus_sem); + + /* + * Conventional PCI and PCI-X we need to wait Tpvrh + Trhfa before + * accessing the device after reset (that is 1000 ms + 100 ms). + */ + if (!pci_is_pcie(dev)) { + pci_dbg(dev, "waiting %d ms for secondary bus\n", 1000 + delay); + msleep(1000 + delay); + return 0; + } + + /* + * For PCIe downstream and root ports that do not support speeds + * greater than 5 GT/s need to wait minimum 100 ms. For higher + * speeds (gen3) we need to wait first for the data link layer to + * become active. + * + * However, 100 ms is the minimum and the PCIe spec says the + * software must allow at least 1s before it can determine that the + * device that did not respond is a broken device. Also device can + * take longer than that to respond if it indicates so through Request + * Retry Status completions. + * + * Therefore we wait for 100 ms and check for the device presence + * until the timeout expires. + */ + if (!pcie_downstream_port(dev)) + return 0; + + if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) { + u16 status; + + pci_dbg(dev, "waiting %d ms for downstream link\n", delay); + msleep(delay); + + if (!pci_dev_wait(child, reset_type, PCI_RESET_WAIT - delay)) + return 0; + + /* + * If the port supports active link reporting we now check + * whether the link is active and if not bail out early with + * the assumption that the device is not present anymore. + */ + if (!dev->link_active_reporting) + return -ENOTTY; + + pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &status); + if (!(status & PCI_EXP_LNKSTA_DLLLA)) + return -ENOTTY; + + return pci_dev_wait(child, reset_type, + PCIE_RESET_READY_POLL_MS - PCI_RESET_WAIT); + } + + pci_dbg(dev, "waiting %d ms for downstream link, after activation\n", + delay); + if (!pcie_wait_for_link_delay(dev, true, delay)) { + /* Did not train, no need to wait any further */ + pci_info(dev, "Data Link Layer Link Active not set in %d msec\n", delay); + return -ENOTTY; + } + + return pci_dev_wait(child, reset_type, + PCIE_RESET_READY_POLL_MS - delay); } void pci_reset_secondary_bus(struct pci_dev *dev) @@ -3955,6 +4763,7 @@ void pci_reset_secondary_bus(struct pci_dev *dev) pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl); ctrl |= PCI_BRIDGE_CTL_BUS_RESET; pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl); + /* * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms. Double * this to 2ms to ensure that we meet the minimum requirement. @@ -3963,15 +4772,6 @@ void pci_reset_secondary_bus(struct pci_dev *dev) ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET; pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl); - - /* - * Trhfa for conventional PCI is 2^25 clock cycles. - * Assuming a minimum 33MHz clock this results in a 1s - * delay before we can consider subordinate devices to - * be re-initialized. PCIe has some ways to shorten this, - * but we don't make use of them yet. - */ - ssleep(1); } void __weak pcibios_reset_secondary_bus(struct pci_dev *dev) @@ -3980,19 +4780,24 @@ void __weak pcibios_reset_secondary_bus(struct pci_dev *dev) } /** - * pci_reset_bridge_secondary_bus - Reset the secondary bus on a PCI bridge. + * pci_bridge_secondary_bus_reset - Reset the secondary bus on a PCI bridge. * @dev: Bridge device * * Use the bridge control register to assert reset on the secondary bus. * Devices on the secondary bus are left in power-on state. */ -void pci_reset_bridge_secondary_bus(struct pci_dev *dev) +int pci_bridge_secondary_bus_reset(struct pci_dev *dev) { + if (!dev->block_cfg_access) + pci_warn_once(dev, "unlocked secondary bus reset via: %pS\n", + __builtin_return_address(0)); pcibios_reset_secondary_bus(dev); + + return pci_bridge_wait_for_secondary_bus(dev, "bus reset"); } -EXPORT_SYMBOL_GPL(pci_reset_bridge_secondary_bus); +EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset); -static int pci_parent_bus_reset(struct pci_dev *dev, int probe) +static int pci_parent_bus_reset(struct pci_dev *dev, bool probe) { struct pci_dev *pdev; @@ -4007,65 +4812,150 @@ static int pci_parent_bus_reset(struct pci_dev *dev, int probe) if (probe) return 0; - pci_reset_bridge_secondary_bus(dev->bus->self); - - return 0; + return pci_bridge_secondary_bus_reset(dev->bus->self); } -static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe) +static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, bool probe) { int rc = -ENOTTY; - if (!hotplug || !try_module_get(hotplug->ops->owner)) + if (!hotplug || !try_module_get(hotplug->owner)) return rc; if (hotplug->ops->reset_slot) rc = hotplug->ops->reset_slot(hotplug, probe); - module_put(hotplug->ops->owner); + module_put(hotplug->owner); return rc; } -static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe) +static int pci_dev_reset_slot_function(struct pci_dev *dev, bool probe) { - struct pci_dev *pdev; - - if (dev->subordinate || !dev->slot || + if (dev->multifunction || dev->subordinate || !dev->slot || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET) return -ENOTTY; - list_for_each_entry(pdev, &dev->bus->devices, bus_list) - if (pdev != dev && pdev->slot == dev->slot) - return -ENOTTY; - return pci_reset_hotplug_slot(dev->slot->hotplug, probe); } -static void pci_dev_lock(struct pci_dev *dev) +static u16 cxl_port_dvsec(struct pci_dev *dev) +{ + return pci_find_dvsec_capability(dev, PCI_VENDOR_ID_CXL, + PCI_DVSEC_CXL_PORT); +} + +static bool cxl_sbr_masked(struct pci_dev *dev) +{ + u16 dvsec, reg; + int rc; + + dvsec = cxl_port_dvsec(dev); + if (!dvsec) + return false; + + rc = pci_read_config_word(dev, dvsec + PCI_DVSEC_CXL_PORT_CTL, ®); + if (rc || PCI_POSSIBLE_ERROR(reg)) + return false; + + /* + * Per CXL spec r3.1, sec 8.1.5.2, when "Unmask SBR" is 0, the SBR + * bit in Bridge Control has no effect. When 1, the Port generates + * hot reset when the SBR bit is set to 1. + */ + if (reg & PCI_DVSEC_CXL_PORT_CTL_UNMASK_SBR) + return false; + + return true; +} + +static int pci_reset_bus_function(struct pci_dev *dev, bool probe) +{ + struct pci_dev *bridge = pci_upstream_bridge(dev); + int rc; + + /* + * If "dev" is below a CXL port that has SBR control masked, SBR + * won't do anything, so return error. + */ + if (bridge && cxl_sbr_masked(bridge)) { + if (probe) + return 0; + + return -ENOTTY; + } + + rc = pci_dev_reset_slot_function(dev, probe); + if (rc != -ENOTTY) + return rc; + return pci_parent_bus_reset(dev, probe); +} + +static int cxl_reset_bus_function(struct pci_dev *dev, bool probe) +{ + struct pci_dev *bridge; + u16 dvsec, reg, val; + int rc; + + bridge = pci_upstream_bridge(dev); + if (!bridge) + return -ENOTTY; + + dvsec = cxl_port_dvsec(bridge); + if (!dvsec) + return -ENOTTY; + + if (probe) + return 0; + + rc = pci_read_config_word(bridge, dvsec + PCI_DVSEC_CXL_PORT_CTL, ®); + if (rc) + return -ENOTTY; + + if (reg & PCI_DVSEC_CXL_PORT_CTL_UNMASK_SBR) { + val = reg; + } else { + val = reg | PCI_DVSEC_CXL_PORT_CTL_UNMASK_SBR; + pci_write_config_word(bridge, dvsec + PCI_DVSEC_CXL_PORT_CTL, + val); + } + + rc = pci_reset_bus_function(dev, probe); + + if (reg != val) + pci_write_config_word(bridge, dvsec + PCI_DVSEC_CXL_PORT_CTL, + reg); + + return rc; +} + +void pci_dev_lock(struct pci_dev *dev) { - pci_cfg_access_lock(dev); /* block PM suspend, driver probe, etc. */ device_lock(&dev->dev); + pci_cfg_access_lock(dev); } +EXPORT_SYMBOL_GPL(pci_dev_lock); /* Return 1 on successful lock, 0 on contention */ -static int pci_dev_trylock(struct pci_dev *dev) +int pci_dev_trylock(struct pci_dev *dev) { - if (pci_cfg_access_trylock(dev)) { - if (device_trylock(&dev->dev)) + if (device_trylock(&dev->dev)) { + if (pci_cfg_access_trylock(dev)) return 1; - pci_cfg_access_unlock(dev); + device_unlock(&dev->dev); } return 0; } +EXPORT_SYMBOL_GPL(pci_dev_trylock); -static void pci_dev_unlock(struct pci_dev *dev) +void pci_dev_unlock(struct pci_dev *dev) { - device_unlock(&dev->dev); pci_cfg_access_unlock(dev); + device_unlock(&dev->dev); } +EXPORT_SYMBOL_GPL(pci_dev_unlock); static void pci_dev_save_and_disable(struct pci_dev *dev) { @@ -4079,6 +4969,8 @@ static void pci_dev_save_and_disable(struct pci_dev *dev) */ if (err_handler && err_handler->reset_prepare) err_handler->reset_prepare(dev); + else if (dev->driver) + pci_warn(dev, "resetting"); /* * Wake-up device prior to save. PM registers default to D0 after @@ -4112,17 +5004,34 @@ static void pci_dev_restore(struct pci_dev *dev) */ if (err_handler && err_handler->reset_done) err_handler->reset_done(dev); -} + else if (dev->driver) + pci_warn(dev, "reset done"); +} + +/* dev->reset_methods[] is a 0-terminated list of indices into this array */ +const struct pci_reset_fn_method pci_reset_fn_methods[] = { + { }, + { pci_dev_specific_reset, .name = "device_specific" }, + { pci_dev_acpi_reset, .name = "acpi" }, + { pcie_reset_flr, .name = "flr" }, + { pci_af_flr, .name = "af_flr" }, + { pci_pm_reset, .name = "pm" }, + { pci_reset_bus_function, .name = "bus" }, + { cxl_reset_bus_function, .name = "cxl_bus" }, +}; /** - * __pci_reset_function - reset a PCI device function + * __pci_reset_function_locked - reset a PCI device function while holding + * the @dev mutex lock. * @dev: PCI device to reset * * Some devices allow an individual function to be reset without affecting * other functions in the same device. The PCI device must be responsive * to PCI config space in order to use this function. * - * The device function is presumed to be unused when this function is called. + * The device function is presumed to be unused and the caller is holding + * the device mutex lock when this function is called. + * * Resetting the device will make the contents of PCI configuration space * random, so any caller of this must be prepared to reinitialise the * device including MSI, bus mastering, BARs, decoding IO and memory spaces, @@ -4131,100 +5040,122 @@ static void pci_dev_restore(struct pci_dev *dev) * Returns 0 if the device function was successfully reset or negative if the * device doesn't support resetting a single function. */ -int __pci_reset_function(struct pci_dev *dev) +int __pci_reset_function_locked(struct pci_dev *dev) { - int ret; + int i, m, rc; + const struct pci_reset_fn_method *method; - pci_dev_lock(dev); - ret = __pci_reset_function_locked(dev); - pci_dev_unlock(dev); + might_sleep(); - return ret; + /* + * A reset method returns -ENOTTY if it doesn't support this device and + * we should try the next method. + * + * If it returns 0 (success), we're finished. If it returns any other + * error, we're also finished: this indicates that further reset + * mechanisms might be broken on the device. + */ + for (i = 0; i < PCI_NUM_RESET_METHODS; i++) { + m = dev->reset_methods[i]; + if (!m) + return -ENOTTY; + + method = &pci_reset_fn_methods[m]; + pci_dbg(dev, "reset via %s\n", method->name); + rc = method->reset_fn(dev, PCI_RESET_DO_RESET); + if (!rc) + return 0; + + pci_dbg(dev, "%s failed with %d\n", method->name, rc); + if (rc != -ENOTTY) + return rc; + } + + return -ENOTTY; } -EXPORT_SYMBOL_GPL(__pci_reset_function); +EXPORT_SYMBOL_GPL(__pci_reset_function_locked); /** - * __pci_reset_function_locked - reset a PCI device function while holding - * the @dev mutex lock. - * @dev: PCI device to reset + * pci_init_reset_methods - check whether device can be safely reset + * and store supported reset mechanisms. + * @dev: PCI device to check for reset mechanisms * * Some devices allow an individual function to be reset without affecting - * other functions in the same device. The PCI device must be responsive - * to PCI config space in order to use this function. + * other functions in the same device. The PCI device must be in D0-D3hot + * state. * - * The device function is presumed to be unused and the caller is holding - * the device mutex lock when this function is called. - * Resetting the device will make the contents of PCI configuration space - * random, so any caller of this must be prepared to reinitialise the - * device including MSI, bus mastering, BARs, decoding IO and memory spaces, - * etc. - * - * Returns 0 if the device function was successfully reset or negative if the - * device doesn't support resetting a single function. + * Stores reset mechanisms supported by device in reset_methods byte array + * which is a member of struct pci_dev. */ -int __pci_reset_function_locked(struct pci_dev *dev) +void pci_init_reset_methods(struct pci_dev *dev) { - int rc; + int m, i, rc; + + BUILD_BUG_ON(ARRAY_SIZE(pci_reset_fn_methods) != PCI_NUM_RESET_METHODS); might_sleep(); - rc = pci_dev_specific_reset(dev, 0); - if (rc != -ENOTTY) - return rc; - if (pcie_has_flr(dev)) { - pcie_flr(dev); - return 0; + i = 0; + for (m = 1; m < PCI_NUM_RESET_METHODS; m++) { + rc = pci_reset_fn_methods[m].reset_fn(dev, PCI_RESET_PROBE); + if (!rc) + dev->reset_methods[i++] = m; + else if (rc != -ENOTTY) + break; } - rc = pci_af_flr(dev, 0); - if (rc != -ENOTTY) - return rc; - rc = pci_pm_reset(dev, 0); - if (rc != -ENOTTY) - return rc; - rc = pci_dev_reset_slot_function(dev, 0); - if (rc != -ENOTTY) - return rc; - return pci_parent_bus_reset(dev, 0); + + dev->reset_methods[i] = 0; } -EXPORT_SYMBOL_GPL(__pci_reset_function_locked); /** - * pci_probe_reset_function - check whether the device can be safely reset + * pci_reset_function - quiesce and reset a PCI device function * @dev: PCI device to reset * * Some devices allow an individual function to be reset without affecting * other functions in the same device. The PCI device must be responsive * to PCI config space in order to use this function. * - * Returns 0 if the device function can be reset or negative if the + * This function does not just reset the PCI portion of a device, but + * clears all the state associated with the device. This function differs + * from __pci_reset_function_locked() in that it saves and restores device state + * over the reset and takes the PCI device lock. + * + * Returns 0 if the device function was successfully reset or negative if the * device doesn't support resetting a single function. */ -int pci_probe_reset_function(struct pci_dev *dev) +int pci_reset_function(struct pci_dev *dev) { + struct pci_dev *bridge; int rc; - might_sleep(); + if (!pci_reset_supported(dev)) + return -ENOTTY; - rc = pci_dev_specific_reset(dev, 1); - if (rc != -ENOTTY) - return rc; - if (pcie_has_flr(dev)) - return 0; - rc = pci_af_flr(dev, 1); - if (rc != -ENOTTY) - return rc; - rc = pci_pm_reset(dev, 1); - if (rc != -ENOTTY) - return rc; - rc = pci_dev_reset_slot_function(dev, 1); - if (rc != -ENOTTY) - return rc; + /* + * If there's no upstream bridge, no locking is needed since there is + * no upstream bridge configuration to hold consistent. + */ + bridge = pci_upstream_bridge(dev); + if (bridge) + pci_dev_lock(bridge); + + pci_dev_lock(dev); + pci_dev_save_and_disable(dev); + + rc = __pci_reset_function_locked(dev); - return pci_parent_bus_reset(dev, 1); + pci_dev_restore(dev); + pci_dev_unlock(dev); + + if (bridge) + pci_dev_unlock(bridge); + + return rc; } +EXPORT_SYMBOL_GPL(pci_reset_function); /** - * pci_reset_function - quiesce and reset a PCI device function + * pci_reset_function_locked - quiesce and reset a PCI device function * @dev: PCI device to reset * * Some devices allow an individual function to be reset without affecting @@ -4233,31 +5164,29 @@ int pci_probe_reset_function(struct pci_dev *dev) * * This function does not just reset the PCI portion of a device, but * clears all the state associated with the device. This function differs - * from __pci_reset_function in that it saves and restores device state - * over the reset. + * from __pci_reset_function_locked() in that it saves and restores device state + * over the reset. It also differs from pci_reset_function() in that it + * requires the PCI device lock to be held. * * Returns 0 if the device function was successfully reset or negative if the * device doesn't support resetting a single function. */ -int pci_reset_function(struct pci_dev *dev) +int pci_reset_function_locked(struct pci_dev *dev) { int rc; - rc = pci_probe_reset_function(dev); - if (rc) - return rc; + if (!pci_reset_supported(dev)) + return -ENOTTY; - pci_dev_lock(dev); pci_dev_save_and_disable(dev); rc = __pci_reset_function_locked(dev); pci_dev_restore(dev); - pci_dev_unlock(dev); return rc; } -EXPORT_SYMBOL_GPL(pci_reset_function); +EXPORT_SYMBOL_GPL(pci_reset_function_locked); /** * pci_try_reset_function - quiesce and reset a PCI device function @@ -4269,30 +5198,33 @@ int pci_try_reset_function(struct pci_dev *dev) { int rc; - rc = pci_probe_reset_function(dev); - if (rc) - return rc; + if (!pci_reset_supported(dev)) + return -ENOTTY; if (!pci_dev_trylock(dev)) return -EAGAIN; pci_dev_save_and_disable(dev); rc = __pci_reset_function_locked(dev); + pci_dev_restore(dev); pci_dev_unlock(dev); - pci_dev_restore(dev); return rc; } EXPORT_SYMBOL_GPL(pci_try_reset_function); /* Do any devices on or below this bus prevent a bus reset? */ -static bool pci_bus_resetable(struct pci_bus *bus) +static bool pci_bus_resettable(struct pci_bus *bus) { struct pci_dev *dev; + + if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)) + return false; + list_for_each_entry(dev, &bus->devices, bus_list) { if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET || - (dev->subordinate && !pci_bus_resetable(dev->subordinate))) + (dev->subordinate && !pci_bus_resettable(dev->subordinate))) return false; } @@ -4304,10 +5236,12 @@ static void pci_bus_lock(struct pci_bus *bus) { struct pci_dev *dev; + pci_dev_lock(bus->self); list_for_each_entry(dev, &bus->devices, bus_list) { - pci_dev_lock(dev); if (dev->subordinate) pci_bus_lock(dev->subordinate); + else + pci_dev_lock(dev); } } @@ -4319,8 +5253,10 @@ static void pci_bus_unlock(struct pci_bus *bus) list_for_each_entry(dev, &bus->devices, bus_list) { if (dev->subordinate) pci_bus_unlock(dev->subordinate); - pci_dev_unlock(dev); + else + pci_dev_unlock(dev); } + pci_dev_unlock(bus->self); } /* Return 1 on successful lock, 0 on contention */ @@ -4328,15 +5264,15 @@ static int pci_bus_trylock(struct pci_bus *bus) { struct pci_dev *dev; + if (!pci_dev_trylock(bus->self)) + return 0; + list_for_each_entry(dev, &bus->devices, bus_list) { - if (!pci_dev_trylock(dev)) - goto unlock; if (dev->subordinate) { - if (!pci_bus_trylock(dev->subordinate)) { - pci_dev_unlock(dev); + if (!pci_bus_trylock(dev->subordinate)) goto unlock; - } - } + } else if (!pci_dev_trylock(dev)) + goto unlock; } return 1; @@ -4344,21 +5280,27 @@ unlock: list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) { if (dev->subordinate) pci_bus_unlock(dev->subordinate); - pci_dev_unlock(dev); + else + pci_dev_unlock(dev); } + pci_dev_unlock(bus->self); return 0; } /* Do any devices on or below this slot prevent a bus reset? */ -static bool pci_slot_resetable(struct pci_slot *slot) +static bool pci_slot_resettable(struct pci_slot *slot) { struct pci_dev *dev; + if (slot->bus->self && + (slot->bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)) + return false; + list_for_each_entry(dev, &slot->bus->devices, bus_list) { if (!dev->slot || dev->slot != slot) continue; if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET || - (dev->subordinate && !pci_bus_resetable(dev->subordinate))) + (dev->subordinate && !pci_bus_resettable(dev->subordinate))) return false; } @@ -4373,9 +5315,10 @@ static void pci_slot_lock(struct pci_slot *slot) list_for_each_entry(dev, &slot->bus->devices, bus_list) { if (!dev->slot || dev->slot != slot) continue; - pci_dev_lock(dev); if (dev->subordinate) pci_bus_lock(dev->subordinate); + else + pci_dev_lock(dev); } } @@ -4389,7 +5332,8 @@ static void pci_slot_unlock(struct pci_slot *slot) continue; if (dev->subordinate) pci_bus_unlock(dev->subordinate); - pci_dev_unlock(dev); + else + pci_dev_unlock(dev); } } @@ -4401,14 +5345,13 @@ static int pci_slot_trylock(struct pci_slot *slot) list_for_each_entry(dev, &slot->bus->devices, bus_list) { if (!dev->slot || dev->slot != slot) continue; - if (!pci_dev_trylock(dev)) - goto unlock; if (dev->subordinate) { if (!pci_bus_trylock(dev->subordinate)) { pci_dev_unlock(dev); goto unlock; } - } + } else if (!pci_dev_trylock(dev)) + goto unlock; } return 1; @@ -4419,44 +5362,50 @@ unlock: continue; if (dev->subordinate) pci_bus_unlock(dev->subordinate); - pci_dev_unlock(dev); + else + pci_dev_unlock(dev); } return 0; } -/* Save and disable devices from the top of the tree down */ -static void pci_bus_save_and_disable(struct pci_bus *bus) +/* + * Save and disable devices from the top of the tree down while holding + * the @dev mutex lock for the entire tree. + */ +static void pci_bus_save_and_disable_locked(struct pci_bus *bus) { struct pci_dev *dev; list_for_each_entry(dev, &bus->devices, bus_list) { - pci_dev_lock(dev); pci_dev_save_and_disable(dev); - pci_dev_unlock(dev); if (dev->subordinate) - pci_bus_save_and_disable(dev->subordinate); + pci_bus_save_and_disable_locked(dev->subordinate); } } /* - * Restore devices from top of the tree down - parent bridges need to be - * restored before we can get to subordinate devices. + * Restore devices from top of the tree down while holding @dev mutex lock + * for the entire tree. Parent bridges need to be restored before we can + * get to subordinate devices. */ -static void pci_bus_restore(struct pci_bus *bus) +static void pci_bus_restore_locked(struct pci_bus *bus) { struct pci_dev *dev; list_for_each_entry(dev, &bus->devices, bus_list) { - pci_dev_lock(dev); pci_dev_restore(dev); - pci_dev_unlock(dev); - if (dev->subordinate) - pci_bus_restore(dev->subordinate); + if (dev->subordinate) { + pci_bridge_wait_for_secondary_bus(dev, "bus reset"); + pci_bus_restore_locked(dev->subordinate); + } } } -/* Save and disable devices from the top of the tree down */ -static void pci_slot_save_and_disable(struct pci_slot *slot) +/* + * Save and disable devices from the top of the tree down while holding + * the @dev mutex lock for the entire tree. + */ +static void pci_slot_save_and_disable_locked(struct pci_slot *slot) { struct pci_dev *dev; @@ -4465,15 +5414,16 @@ static void pci_slot_save_and_disable(struct pci_slot *slot) continue; pci_dev_save_and_disable(dev); if (dev->subordinate) - pci_bus_save_and_disable(dev->subordinate); + pci_bus_save_and_disable_locked(dev->subordinate); } } /* - * Restore devices from top of the tree down - parent bridges need to be - * restored before we can get to subordinate devices. + * Restore devices from top of the tree down while holding @dev mutex lock + * for the entire tree. Parent bridges need to be restored before we can + * get to subordinate devices. */ -static void pci_slot_restore(struct pci_slot *slot) +static void pci_slot_restore_locked(struct pci_slot *slot) { struct pci_dev *dev; @@ -4481,16 +5431,18 @@ static void pci_slot_restore(struct pci_slot *slot) if (!dev->slot || dev->slot != slot) continue; pci_dev_restore(dev); - if (dev->subordinate) - pci_bus_restore(dev->subordinate); + if (dev->subordinate) { + pci_bridge_wait_for_secondary_bus(dev, "slot reset"); + pci_bus_restore_locked(dev->subordinate); + } } } -static int pci_slot_reset(struct pci_slot *slot, int probe) +static int pci_slot_reset(struct pci_slot *slot, bool probe) { int rc; - if (!slot || !pci_slot_resetable(slot)) + if (!slot || !pci_slot_resettable(slot)) return -ENOTTY; if (!probe) @@ -4514,12 +5466,12 @@ static int pci_slot_reset(struct pci_slot *slot, int probe) */ int pci_probe_reset_slot(struct pci_slot *slot) { - return pci_slot_reset(slot, 1); + return pci_slot_reset(slot, PCI_RESET_PROBE); } EXPORT_SYMBOL_GPL(pci_probe_reset_slot); /** - * pci_reset_slot - reset a PCI slot + * __pci_reset_slot - Try to reset a PCI slot * @slot: PCI slot to reset * * A PCI bus may host multiple slots, each slot may support a reset mechanism @@ -4531,58 +5483,33 @@ EXPORT_SYMBOL_GPL(pci_probe_reset_slot); * through this function. PCI config space of all devices in the slot and * behind the slot is saved before and restored after reset. * - * Return 0 on success, non-zero on error. - */ -int pci_reset_slot(struct pci_slot *slot) -{ - int rc; - - rc = pci_slot_reset(slot, 1); - if (rc) - return rc; - - pci_slot_save_and_disable(slot); - - rc = pci_slot_reset(slot, 0); - - pci_slot_restore(slot); - - return rc; -} -EXPORT_SYMBOL_GPL(pci_reset_slot); - -/** - * pci_try_reset_slot - Try to reset a PCI slot - * @slot: PCI slot to reset - * * Same as above except return -EAGAIN if the slot cannot be locked */ -int pci_try_reset_slot(struct pci_slot *slot) +static int __pci_reset_slot(struct pci_slot *slot) { int rc; - rc = pci_slot_reset(slot, 1); + rc = pci_slot_reset(slot, PCI_RESET_PROBE); if (rc) return rc; - pci_slot_save_and_disable(slot); - if (pci_slot_trylock(slot)) { + pci_slot_save_and_disable_locked(slot); might_sleep(); - rc = pci_reset_hotplug_slot(slot->hotplug, 0); + rc = pci_reset_hotplug_slot(slot->hotplug, PCI_RESET_DO_RESET); + pci_slot_restore_locked(slot); pci_slot_unlock(slot); } else rc = -EAGAIN; - pci_slot_restore(slot); - return rc; } -EXPORT_SYMBOL_GPL(pci_try_reset_slot); -static int pci_bus_reset(struct pci_bus *bus, int probe) +static int pci_bus_reset(struct pci_bus *bus, bool probe) { - if (!bus->self || !pci_bus_resetable(bus)) + int ret; + + if (!bus->self || !pci_bus_resettable(bus)) return -ENOTTY; if (probe) @@ -4592,11 +5519,46 @@ static int pci_bus_reset(struct pci_bus *bus, int probe) might_sleep(); - pci_reset_bridge_secondary_bus(bus->self); + ret = pci_bridge_secondary_bus_reset(bus->self); pci_bus_unlock(bus); + return ret; +} + +/** + * pci_bus_error_reset - reset the bridge's subordinate bus + * @bridge: The parent device that connects to the bus to reset + * + * This function will first try to reset the slots on this bus if the method is + * available. If slot reset fails or is not available, this will fall back to a + * secondary bus reset. + */ +int pci_bus_error_reset(struct pci_dev *bridge) +{ + struct pci_bus *bus = bridge->subordinate; + struct pci_slot *slot; + + if (!bus) + return -ENOTTY; + + mutex_lock(&pci_slot_mutex); + if (list_empty(&bus->slots)) + goto bus_reset; + + list_for_each_entry(slot, &bus->slots, list) + if (pci_probe_reset_slot(slot)) + goto bus_reset; + + list_for_each_entry(slot, &bus->slots, list) + if (pci_slot_reset(slot, PCI_RESET_DO_RESET)) + goto bus_reset; + + mutex_unlock(&pci_slot_mutex); return 0; +bus_reset: + mutex_unlock(&pci_slot_mutex); + return pci_bus_reset(bridge->subordinate, PCI_RESET_DO_RESET); } /** @@ -4607,72 +5569,55 @@ static int pci_bus_reset(struct pci_bus *bus, int probe) */ int pci_probe_reset_bus(struct pci_bus *bus) { - return pci_bus_reset(bus, 1); + return pci_bus_reset(bus, PCI_RESET_PROBE); } EXPORT_SYMBOL_GPL(pci_probe_reset_bus); /** - * pci_reset_bus - reset a PCI bus + * __pci_reset_bus - Try to reset a PCI bus * @bus: top level PCI bus to reset * - * Do a bus reset on the given bus and any subordinate buses, saving - * and restoring state of all devices. - * - * Return 0 on success, non-zero on error. + * Same as above except return -EAGAIN if the bus cannot be locked */ -int pci_reset_bus(struct pci_bus *bus) +int __pci_reset_bus(struct pci_bus *bus) { int rc; - rc = pci_bus_reset(bus, 1); + rc = pci_bus_reset(bus, PCI_RESET_PROBE); if (rc) return rc; - pci_bus_save_and_disable(bus); - - rc = pci_bus_reset(bus, 0); - - pci_bus_restore(bus); + if (pci_bus_trylock(bus)) { + pci_bus_save_and_disable_locked(bus); + might_sleep(); + rc = pci_bridge_secondary_bus_reset(bus->self); + pci_bus_restore_locked(bus); + pci_bus_unlock(bus); + } else + rc = -EAGAIN; return rc; } -EXPORT_SYMBOL_GPL(pci_reset_bus); /** - * pci_try_reset_bus - Try to reset a PCI bus - * @bus: top level PCI bus to reset + * pci_reset_bus - Try to reset a PCI bus + * @pdev: top level PCI device to reset via slot/bus * * Same as above except return -EAGAIN if the bus cannot be locked */ -int pci_try_reset_bus(struct pci_bus *bus) +int pci_reset_bus(struct pci_dev *pdev) { - int rc; - - rc = pci_bus_reset(bus, 1); - if (rc) - return rc; - - pci_bus_save_and_disable(bus); - - if (pci_bus_trylock(bus)) { - might_sleep(); - pci_reset_bridge_secondary_bus(bus->self); - pci_bus_unlock(bus); - } else - rc = -EAGAIN; - - pci_bus_restore(bus); - - return rc; + return (!pci_probe_reset_slot(pdev->slot)) ? + __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus); } -EXPORT_SYMBOL_GPL(pci_try_reset_bus); +EXPORT_SYMBOL_GPL(pci_reset_bus); /** * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count * @dev: PCI device to query * - * Returns mmrbc: maximum designed memory read count in bytes - * or appropriate error value. + * Returns mmrbc: maximum designed memory read count in bytes or + * appropriate error value. */ int pcix_get_max_mmrbc(struct pci_dev *dev) { @@ -4686,7 +5631,7 @@ int pcix_get_max_mmrbc(struct pci_dev *dev) if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat)) return -EINVAL; - return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21); + return 512 << FIELD_GET(PCI_X_STATUS_MAX_READ, stat); } EXPORT_SYMBOL(pcix_get_max_mmrbc); @@ -4694,8 +5639,8 @@ EXPORT_SYMBOL(pcix_get_max_mmrbc); * pcix_get_mmrbc - get PCI-X maximum memory read byte count * @dev: PCI device to query * - * Returns mmrbc: maximum memory read count in bytes - * or appropriate error value. + * Returns mmrbc: maximum memory read count in bytes or appropriate error + * value. */ int pcix_get_mmrbc(struct pci_dev *dev) { @@ -4709,7 +5654,7 @@ int pcix_get_mmrbc(struct pci_dev *dev) if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd)) return -EINVAL; - return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2); + return 512 << FIELD_GET(PCI_X_CMD_MAX_READ, cmd); } EXPORT_SYMBOL(pcix_get_mmrbc); @@ -4719,7 +5664,7 @@ EXPORT_SYMBOL(pcix_get_mmrbc); * @mmrbc: maximum memory read count in bytes * valid values are 512, 1024, 2048, 4096 * - * If possible sets maximum memory read byte count, some bridges have erratas + * If possible sets maximum memory read byte count, some bridges have errata * that prevent this. */ int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc) @@ -4740,19 +5685,19 @@ int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc) if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat)) return -EINVAL; - if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21) + if (v > FIELD_GET(PCI_X_STATUS_MAX_READ, stat)) return -E2BIG; if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd)) return -EINVAL; - o = (cmd & PCI_X_CMD_MAX_READ) >> 2; + o = FIELD_GET(PCI_X_CMD_MAX_READ, cmd); if (o != v) { if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC)) return -EIO; cmd &= ~PCI_X_CMD_MAX_READ; - cmd |= v << 2; + cmd |= FIELD_PREP(PCI_X_CMD_MAX_READ, v); if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd)) return -EIO; } @@ -4764,8 +5709,7 @@ EXPORT_SYMBOL(pcix_set_mmrbc); * pcie_get_readrq - get PCI Express read request size * @dev: PCI device to query * - * Returns maximum memory read request in bytes - * or appropriate error value. + * Returns maximum memory read request in bytes or appropriate error value. */ int pcie_get_readrq(struct pci_dev *dev) { @@ -4773,7 +5717,7 @@ int pcie_get_readrq(struct pci_dev *dev) pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl); - return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12); + return 128 << FIELD_GET(PCI_EXP_DEVCTL_READRQ, ctl); } EXPORT_SYMBOL(pcie_get_readrq); @@ -4788,15 +5732,17 @@ EXPORT_SYMBOL(pcie_get_readrq); int pcie_set_readrq(struct pci_dev *dev, int rq) { u16 v; + int ret; + unsigned int firstbit; + struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus); if (rq < 128 || rq > 4096 || !is_power_of_2(rq)) return -EINVAL; /* - * If using the "performance" PCIe config, we clamp the - * read rq size to the max packet size to prevent the - * host bridge generating requests larger than we can - * cope with + * If using the "performance" PCIe config, we clamp the read rq + * size to the max packet size to keep the host bridge from + * generating requests larger than we can cope with. */ if (pcie_bus_config == PCIE_BUS_PERFORMANCE) { int mps = pcie_get_mps(dev); @@ -4805,10 +5751,24 @@ int pcie_set_readrq(struct pci_dev *dev, int rq) rq = mps; } - v = (ffs(rq) - 8) << 12; + firstbit = ffs(rq); + if (firstbit < 8) + return -EINVAL; + v = FIELD_PREP(PCI_EXP_DEVCTL_READRQ, firstbit - 8); + + if (bridge->no_inc_mrrs) { + int max_mrrs = pcie_get_readrq(dev); - return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, + if (rq > max_mrrs) { + pci_info(dev, "can't set Max_Read_Request_Size to %d; max is %d\n", rq, max_mrrs); + return -EINVAL; + } + } + + ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_READRQ, v); + + return pcibios_err_to_errno(ret); } EXPORT_SYMBOL(pcie_set_readrq); @@ -4824,7 +5784,7 @@ int pcie_get_mps(struct pci_dev *dev) pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl); - return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5); + return 128 << FIELD_GET(PCI_EXP_DEVCTL_PAYLOAD, ctl); } EXPORT_SYMBOL(pcie_get_mps); @@ -4839,6 +5799,7 @@ EXPORT_SYMBOL(pcie_get_mps); int pcie_set_mps(struct pci_dev *dev, int mps) { u16 v; + int ret; if (mps < 128 || mps > 4096 || !is_power_of_2(mps)) return -EINVAL; @@ -4846,55 +5807,244 @@ int pcie_set_mps(struct pci_dev *dev, int mps) v = ffs(mps) - 8; if (v > dev->pcie_mpss) return -EINVAL; - v <<= 5; + v = FIELD_PREP(PCI_EXP_DEVCTL_PAYLOAD, v); - return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, + ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_PAYLOAD, v); + + return pcibios_err_to_errno(ret); } EXPORT_SYMBOL(pcie_set_mps); +static enum pci_bus_speed to_pcie_link_speed(u16 lnksta) +{ + return pcie_link_speed[FIELD_GET(PCI_EXP_LNKSTA_CLS, lnksta)]; +} + +int pcie_link_speed_mbps(struct pci_dev *pdev) +{ + u16 lnksta; + int err; + + err = pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnksta); + if (err) + return err; + + return pcie_dev_speed_mbps(to_pcie_link_speed(lnksta)); +} +EXPORT_SYMBOL(pcie_link_speed_mbps); + /** - * pcie_get_minimum_link - determine minimum link settings of a PCI device + * pcie_bandwidth_available - determine minimum link settings of a PCIe + * device and its bandwidth limitation * @dev: PCI device to query - * @speed: storage for minimum speed - * @width: storage for minimum width + * @limiting_dev: storage for device causing the bandwidth limitation + * @speed: storage for speed of limiting device + * @width: storage for width of limiting device * - * This function will walk up the PCI device chain and determine the minimum - * link width and speed of the device. + * Walk up the PCI device chain and find the point where the minimum + * bandwidth is available. Return the bandwidth available there and (if + * limiting_dev, speed, and width pointers are supplied) information about + * that point. The bandwidth returned is in Mb/s, i.e., megabits/second of + * raw bandwidth. */ -int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed, - enum pcie_link_width *width) +u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev, + enum pci_bus_speed *speed, + enum pcie_link_width *width) { - int ret; + u16 lnksta; + enum pci_bus_speed next_speed; + enum pcie_link_width next_width; + u32 bw, next_bw; + + if (speed) + *speed = PCI_SPEED_UNKNOWN; + if (width) + *width = PCIE_LNK_WIDTH_UNKNOWN; - *speed = PCI_SPEED_UNKNOWN; - *width = PCIE_LNK_WIDTH_UNKNOWN; + bw = 0; while (dev) { - u16 lnksta; - enum pci_bus_speed next_speed; - enum pcie_link_width next_width; + pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); - ret = pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); - if (ret) - return ret; + next_speed = to_pcie_link_speed(lnksta); + next_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, lnksta); - next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS]; - next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> - PCI_EXP_LNKSTA_NLW_SHIFT; + next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed); - if (next_speed < *speed) - *speed = next_speed; + /* Check if current device limits the total bandwidth */ + if (!bw || next_bw <= bw) { + bw = next_bw; - if (next_width < *width) - *width = next_width; + if (limiting_dev) + *limiting_dev = dev; + if (speed) + *speed = next_speed; + if (width) + *width = next_width; + } - dev = dev->bus->self; + dev = pci_upstream_bridge(dev); } - return 0; + return bw; +} +EXPORT_SYMBOL(pcie_bandwidth_available); + +/** + * pcie_get_supported_speeds - query Supported Link Speed Vector + * @dev: PCI device to query + * + * Query @dev supported link speeds. + * + * Implementation Note in PCIe r6.0 sec 7.5.3.18 recommends determining + * supported link speeds using the Supported Link Speeds Vector in the Link + * Capabilities 2 Register (when available). + * + * Link Capabilities 2 was added in PCIe r3.0, sec 7.8.18. + * + * Without Link Capabilities 2, i.e., prior to PCIe r3.0, Supported Link + * Speeds field in Link Capabilities is used and only 2.5 GT/s and 5.0 GT/s + * speeds were defined. + * + * For @dev without Supported Link Speed Vector, the field is synthesized + * from the Max Link Speed field in the Link Capabilities Register. + * + * Return: Supported Link Speeds Vector (+ reserved 0 at LSB). + */ +u8 pcie_get_supported_speeds(struct pci_dev *dev) +{ + u32 lnkcap2, lnkcap; + u8 speeds; + + /* + * Speeds retain the reserved 0 at LSB before PCIe Supported Link + * Speeds Vector to allow using SLS Vector bit defines directly. + */ + pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2); + speeds = lnkcap2 & PCI_EXP_LNKCAP2_SLS; + + /* Ignore speeds higher than Max Link Speed */ + pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap); + speeds &= GENMASK(lnkcap & PCI_EXP_LNKCAP_SLS, 0); + + /* PCIe r3.0-compliant */ + if (speeds) + return speeds; + + /* Synthesize from the Max Link Speed field */ + if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB) + speeds = PCI_EXP_LNKCAP2_SLS_5_0GB | PCI_EXP_LNKCAP2_SLS_2_5GB; + else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_2_5GB) + speeds = PCI_EXP_LNKCAP2_SLS_2_5GB; + + return speeds; +} + +/** + * pcie_get_speed_cap - query for the PCI device's link speed capability + * @dev: PCI device to query + * + * Query the PCI device speed capability. + * + * Return: the maximum link speed supported by the device. + */ +enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev) +{ + return PCIE_LNKCAP2_SLS2SPEED(dev->supported_speeds); +} +EXPORT_SYMBOL(pcie_get_speed_cap); + +/** + * pcie_get_width_cap - query for the PCI device's link width capability + * @dev: PCI device to query + * + * Query the PCI device width capability. Return the maximum link width + * supported by the device. + */ +enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev) +{ + u32 lnkcap; + + pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap); + if (lnkcap) + return FIELD_GET(PCI_EXP_LNKCAP_MLW, lnkcap); + + return PCIE_LNK_WIDTH_UNKNOWN; +} +EXPORT_SYMBOL(pcie_get_width_cap); + +/** + * pcie_bandwidth_capable - calculate a PCI device's link bandwidth capability + * @dev: PCI device + * @speed: storage for link speed + * @width: storage for link width + * + * Calculate a PCI device's link bandwidth by querying for its link speed + * and width, multiplying them, and applying encoding overhead. The result + * is in Mb/s, i.e., megabits/second of raw bandwidth. + */ +static u32 pcie_bandwidth_capable(struct pci_dev *dev, + enum pci_bus_speed *speed, + enum pcie_link_width *width) +{ + *speed = pcie_get_speed_cap(dev); + *width = pcie_get_width_cap(dev); + + if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN) + return 0; + + return *width * PCIE_SPEED2MBS_ENC(*speed); +} + +/** + * __pcie_print_link_status - Report the PCI device's link speed and width + * @dev: PCI device to query + * @verbose: Print info even when enough bandwidth is available + * + * If the available bandwidth at the device is less than the device is + * capable of, report the device's maximum possible bandwidth and the + * upstream link that limits its performance. If @verbose, always print + * the available bandwidth, even if the device isn't constrained. + */ +void __pcie_print_link_status(struct pci_dev *dev, bool verbose) +{ + enum pcie_link_width width, width_cap; + enum pci_bus_speed speed, speed_cap; + struct pci_dev *limiting_dev = NULL; + u32 bw_avail, bw_cap; + char *flit_mode = ""; + + bw_cap = pcie_bandwidth_capable(dev, &speed_cap, &width_cap); + bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width); + + if (dev->bus && dev->bus->flit_mode) + flit_mode = ", in Flit mode"; + + if (bw_avail >= bw_cap && verbose) + pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)%s\n", + bw_cap / 1000, bw_cap % 1000, + pci_speed_string(speed_cap), width_cap, flit_mode); + else if (bw_avail < bw_cap) + pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)%s\n", + bw_avail / 1000, bw_avail % 1000, + pci_speed_string(speed), width, + limiting_dev ? pci_name(limiting_dev) : "<unknown>", + bw_cap / 1000, bw_cap % 1000, + pci_speed_string(speed_cap), width_cap, flit_mode); +} + +/** + * pcie_print_link_status - Report the PCI device's link speed and width + * @dev: PCI device to query + * + * Report the available bandwidth at the device. + */ +void pcie_print_link_status(struct pci_dev *dev) +{ + __pcie_print_link_status(dev, true); } -EXPORT_SYMBOL(pcie_get_minimum_link); +EXPORT_SYMBOL(pcie_print_link_status); /** * pci_select_bars - Make BAR mask from the type of resource @@ -4955,7 +6105,7 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode, if (flags & PCI_VGA_STATE_CHANGE_DECODES) { pci_read_config_word(dev, PCI_COMMAND, &cmd); - if (decode == true) + if (decode) cmd |= command_bits; else cmd &= ~command_bits; @@ -4971,7 +6121,7 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode, if (bridge) { pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, &cmd); - if (decode == true) + if (decode) cmd |= PCI_BRIDGE_CTL_VGA; else cmd &= ~PCI_BRIDGE_CTL_VGA; @@ -4983,27 +6133,68 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode, return 0; } +#ifdef CONFIG_ACPI +bool pci_pr3_present(struct pci_dev *pdev) +{ + struct acpi_device *adev; + + if (acpi_disabled) + return false; + + adev = ACPI_COMPANION(&pdev->dev); + if (!adev) + return false; + + return adev->power.flags.power_resources && + acpi_has_method(adev->handle, "_PR3"); +} +EXPORT_SYMBOL_GPL(pci_pr3_present); +#endif + /** * pci_add_dma_alias - Add a DMA devfn alias for a device * @dev: the PCI device for which alias is added - * @devfn: alias slot and function + * @devfn_from: alias slot and function + * @nr_devfns: number of subsequent devfns to alias * - * This helper encodes 8-bit devfn as bit number in dma_alias_mask. - * It should be called early, preferably as PCI fixup header quirk. + * This helper encodes an 8-bit devfn as a bit number in dma_alias_mask + * which is used to program permissible bus-devfn source addresses for DMA + * requests in an IOMMU. These aliases factor into IOMMU group creation + * and are useful for devices generating DMA requests beyond or different + * from their logical bus-devfn. Examples include device quirks where the + * device simply uses the wrong devfn, as well as non-transparent bridges + * where the alias may be a proxy for devices in another domain. + * + * IOMMU group creation is performed during device discovery or addition, + * prior to any potential DMA mapping and therefore prior to driver probing + * (especially for userspace assigned devices where IOMMU group definition + * cannot be left as a userspace activity). DMA aliases should therefore + * be configured via quirks, such as the PCI fixup header quirk. */ -void pci_add_dma_alias(struct pci_dev *dev, u8 devfn) +void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from, + unsigned int nr_devfns) { + int devfn_to; + + nr_devfns = min(nr_devfns, (unsigned int)MAX_NR_DEVFNS - devfn_from); + devfn_to = devfn_from + nr_devfns - 1; + if (!dev->dma_alias_mask) - dev->dma_alias_mask = kcalloc(BITS_TO_LONGS(U8_MAX), - sizeof(long), GFP_KERNEL); + dev->dma_alias_mask = bitmap_zalloc(MAX_NR_DEVFNS, GFP_KERNEL); if (!dev->dma_alias_mask) { - dev_warn(&dev->dev, "Unable to allocate DMA alias mask\n"); + pci_warn(dev, "Unable to allocate DMA alias mask\n"); return; } - set_bit(devfn, dev->dma_alias_mask); - dev_info(&dev->dev, "Enabling fixed DMA alias to %02x.%d\n", - PCI_SLOT(devfn), PCI_FUNC(devfn)); + bitmap_set(dev->dma_alias_mask, devfn_from, nr_devfns); + + if (nr_devfns == 1) + pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n", + PCI_SLOT(devfn_from), PCI_FUNC(devfn_from)); + else if (nr_devfns > 1) + pci_info(dev, "Enabling fixed DMA alias for devfn range from %02x.%d to %02x.%d\n", + PCI_SLOT(devfn_from), PCI_FUNC(devfn_from), + PCI_SLOT(devfn_to), PCI_FUNC(devfn_to)); } bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2) @@ -5011,13 +6202,17 @@ bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2) return (dev1->dma_alias_mask && test_bit(dev2->devfn, dev1->dma_alias_mask)) || (dev2->dma_alias_mask && - test_bit(dev1->devfn, dev2->dma_alias_mask)); + test_bit(dev1->devfn, dev2->dma_alias_mask)) || + pci_real_dma_dev(dev1) == dev2 || + pci_real_dma_dev(dev2) == dev1; } bool pci_device_is_present(struct pci_dev *pdev) { u32 v; + /* Check PF if pdev is a VF, since VF Vendor/Device IDs are 0xffff */ + pdev = pci_physfn(pdev); if (pci_dev_is_disconnected(pdev)) return false; return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0); @@ -5035,13 +6230,39 @@ void pci_ignore_hotplug(struct pci_dev *dev) } EXPORT_SYMBOL_GPL(pci_ignore_hotplug); +/** + * pci_real_dma_dev - Get PCI DMA device for PCI device + * @dev: the PCI device that may have a PCI DMA alias + * + * Permits the platform to provide architecture-specific functionality to + * devices needing to alias DMA to another PCI device on another PCI bus. If + * the PCI device is on the same bus, it is recommended to use + * pci_add_dma_alias(). This is the default implementation. Architecture + * implementations can override this. + */ +struct pci_dev __weak *pci_real_dma_dev(struct pci_dev *dev) +{ + return dev; +} + resource_size_t __weak pcibios_default_alignment(void) { return 0; } -#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE -static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0}; +/* + * Arches that don't want to expose struct resource to userland as-is in + * sysfs and /proc can implement their own pci_resource_to_user(). + */ +void __weak pci_resource_to_user(const struct pci_dev *dev, int bar, + const struct resource *rsrc, + resource_size_t *start, resource_size_t *end) +{ + *start = rsrc->start; + *end = rsrc->end; +} + +static char *resource_alignment_param; static DEFINE_SPINLOCK(resource_alignment_lock); /** @@ -5055,14 +6276,14 @@ static DEFINE_SPINLOCK(resource_alignment_lock); static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev, bool *resize) { - int seg, bus, slot, func, align_order, count; - unsigned short vendor, device, subsystem_vendor, subsystem_device; + int align_order, count; resource_size_t align = pcibios_default_alignment(); - char *p; + const char *p; + int ret; spin_lock(&resource_alignment_lock); p = resource_alignment_param; - if (!*p && !align) + if (!p || !*p) goto out; if (pci_has_flag(PCI_PROBE_ONLY)) { align = 0; @@ -5073,63 +6294,28 @@ static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev, while (*p) { count = 0; if (sscanf(p, "%d%n", &align_order, &count) == 1 && - p[count] == '@') { + p[count] == '@') { p += count + 1; - } else { - align_order = -1; - } - if (strncmp(p, "pci:", 4) == 0) { - /* PCI vendor/device (subvendor/subdevice) ids are specified */ - p += 4; - if (sscanf(p, "%hx:%hx:%hx:%hx%n", - &vendor, &device, &subsystem_vendor, &subsystem_device, &count) != 4) { - if (sscanf(p, "%hx:%hx%n", &vendor, &device, &count) != 2) { - printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: pci:%s\n", - p); - break; - } - subsystem_vendor = subsystem_device = 0; - } - p += count; - if ((!vendor || (vendor == dev->vendor)) && - (!device || (device == dev->device)) && - (!subsystem_vendor || (subsystem_vendor == dev->subsystem_vendor)) && - (!subsystem_device || (subsystem_device == dev->subsystem_device))) { - *resize = true; - if (align_order == -1) - align = PAGE_SIZE; - else - align = 1 << align_order; - /* Found */ - break; + if (align_order > 63) { + pr_err("PCI: Invalid requested alignment (order %d)\n", + align_order); + align_order = PAGE_SHIFT; } + } else { + align_order = PAGE_SHIFT; } - else { - if (sscanf(p, "%x:%x:%x.%x%n", - &seg, &bus, &slot, &func, &count) != 4) { - seg = 0; - if (sscanf(p, "%x:%x.%x%n", - &bus, &slot, &func, &count) != 3) { - /* Invalid format */ - printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n", - p); - break; - } - } - p += count; - if (seg == pci_domain_nr(dev->bus) && - bus == dev->bus->number && - slot == PCI_SLOT(dev->devfn) && - func == PCI_FUNC(dev->devfn)) { - *resize = true; - if (align_order == -1) - align = PAGE_SIZE; - else - align = 1 << align_order; - /* Found */ - break; - } + + ret = pci_dev_str_match(dev, p, &p); + if (ret == 1) { + *resize = true; + align = 1ULL << align_order; + break; + } else if (ret < 0) { + pr_err("PCI: Can't parse resource_alignment parameter: %s\n", + p); + break; } + if (*p != ';' && *p != ',') { /* End of param or invalid format */ break; @@ -5145,14 +6331,15 @@ static void pci_request_resource_alignment(struct pci_dev *dev, int bar, resource_size_t align, bool resize) { struct resource *r = &dev->resource[bar]; + const char *r_name = pci_resource_name(dev, bar); resource_size_t size; if (!(r->flags & IORESOURCE_MEM)) return; if (r->flags & IORESOURCE_PCI_FIXED) { - dev_info(&dev->dev, "BAR%d %pR: ignoring requested alignment %#llx\n", - bar, r, (unsigned long long)align); + pci_info(dev, "%s %pR: ignoring requested alignment %#llx\n", + r_name, r, (unsigned long long)align); return; } @@ -5188,8 +6375,8 @@ static void pci_request_resource_alignment(struct pci_dev *dev, int bar, * devices and we use the second. */ - dev_info(&dev->dev, "BAR%d %pR: requesting alignment to %#llx\n", - bar, r, (unsigned long long)align); + pci_info(dev, "%s %pR: requesting alignment to %#llx\n", + r_name, r, (unsigned long long)align); if (resize) { r->start = 0; @@ -5197,8 +6384,7 @@ static void pci_request_resource_alignment(struct pci_dev *dev, int bar, } else { r->flags &= ~IORESOURCE_SIZEALIGN; r->flags |= IORESOURCE_STARTALIGN; - r->start = align; - r->end = r->start + size - 1; + resource_set_range(r, align, size); } r->flags |= IORESOURCE_UNSET; } @@ -5234,13 +6420,10 @@ void pci_reassigndev_resource_alignment(struct pci_dev *dev) if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL && (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) { - dev_warn(&dev->dev, - "Can't reassign resources to host bridge.\n"); + pci_warn(dev, "Can't reassign resources to host bridge\n"); return; } - dev_info(&dev->dev, - "Disabling memory decoding and releasing memory resources.\n"); pci_read_config_word(dev, PCI_COMMAND, &command); command &= ~PCI_COMMAND_MEMORY; pci_write_config_word(dev, PCI_COMMAND, command); @@ -5253,8 +6436,7 @@ void pci_reassigndev_resource_alignment(struct pci_dev *dev) * to enable the kernel to reassign new resource * window later on. */ - if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE && - (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { + if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) { for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) { r = &dev->resource[i]; if (!(r->flags & IORESOURCE_MEM)) @@ -5267,39 +6449,50 @@ void pci_reassigndev_resource_alignment(struct pci_dev *dev) } } -static ssize_t pci_set_resource_alignment_param(const char *buf, size_t count) +static ssize_t resource_alignment_show(const struct bus_type *bus, char *buf) { - if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1) - count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1; + size_t count = 0; + spin_lock(&resource_alignment_lock); - strncpy(resource_alignment_param, buf, count); - resource_alignment_param[count] = '\0'; + if (resource_alignment_param) + count = sysfs_emit(buf, "%s\n", resource_alignment_param); spin_unlock(&resource_alignment_lock); + return count; } -static ssize_t pci_get_resource_alignment_param(char *buf, size_t size) +static ssize_t resource_alignment_store(const struct bus_type *bus, + const char *buf, size_t count) { - size_t count; + char *param, *old, *end; + + if (count >= (PAGE_SIZE - 1)) + return -EINVAL; + + param = kstrndup(buf, count, GFP_KERNEL); + if (!param) + return -ENOMEM; + + end = strchr(param, '\n'); + if (end) + *end = '\0'; + spin_lock(&resource_alignment_lock); - count = snprintf(buf, size, "%s", resource_alignment_param); + old = resource_alignment_param; + if (strlen(param)) { + resource_alignment_param = param; + } else { + kfree(param); + resource_alignment_param = NULL; + } spin_unlock(&resource_alignment_lock); - return count; -} -static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf) -{ - return pci_get_resource_alignment_param(buf, PAGE_SIZE); -} + kfree(old); -static ssize_t pci_resource_alignment_store(struct bus_type *bus, - const char *buf, size_t count) -{ - return pci_set_resource_alignment_param(buf, count); + return count; } -static BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show, - pci_resource_alignment_store); +static BUS_ATTR_RW(resource_alignment); static int __init pci_resource_alignment_sysfs_init(void) { @@ -5316,59 +6509,92 @@ static void pci_no_domains(void) } #ifdef CONFIG_PCI_DOMAINS -static atomic_t __domain_nr = ATOMIC_INIT(-1); +static DEFINE_IDA(pci_domain_nr_dynamic_ida); -int pci_get_new_domain_nr(void) +/** + * pci_bus_find_emul_domain_nr() - allocate a PCI domain number per constraints + * @hint: desired domain, 0 if any ID in the range of @min to @max is acceptable + * @min: minimum allowable domain + * @max: maximum allowable domain, no IDs higher than INT_MAX will be returned + */ +int pci_bus_find_emul_domain_nr(u32 hint, u32 min, u32 max) { - return atomic_inc_return(&__domain_nr); + return ida_alloc_range(&pci_domain_nr_dynamic_ida, max(hint, min), max, + GFP_KERNEL); } +EXPORT_SYMBOL_GPL(pci_bus_find_emul_domain_nr); + +void pci_bus_release_emul_domain_nr(int domain_nr) +{ + ida_free(&pci_domain_nr_dynamic_ida, domain_nr); +} +EXPORT_SYMBOL_GPL(pci_bus_release_emul_domain_nr); +#endif #ifdef CONFIG_PCI_DOMAINS_GENERIC +static DEFINE_IDA(pci_domain_nr_static_ida); + +static void of_pci_reserve_static_domain_nr(void) +{ + struct device_node *np; + int domain_nr; + + for_each_node_by_type(np, "pci") { + domain_nr = of_get_pci_domain_nr(np); + if (domain_nr < 0) + continue; + /* + * Permanently allocate domain_nr in dynamic_ida + * to prevent it from dynamic allocation. + */ + ida_alloc_range(&pci_domain_nr_dynamic_ida, + domain_nr, domain_nr, GFP_KERNEL); + } +} + static int of_pci_bus_find_domain_nr(struct device *parent) { - static int use_dt_domains = -1; - int domain = -1; + static bool static_domains_reserved = false; + int domain_nr; + + /* On the first call scan device tree for static allocations. */ + if (!static_domains_reserved) { + of_pci_reserve_static_domain_nr(); + static_domains_reserved = true; + } + + if (parent) { + /* + * If domain is in DT, allocate it in static IDA. This + * prevents duplicate static allocations in case of errors + * in DT. + */ + domain_nr = of_get_pci_domain_nr(parent->of_node); + if (domain_nr >= 0) + return ida_alloc_range(&pci_domain_nr_static_ida, + domain_nr, domain_nr, + GFP_KERNEL); + } - if (parent) - domain = of_get_pci_domain_nr(parent->of_node); /* - * Check DT domain and use_dt_domains values. - * - * If DT domain property is valid (domain >= 0) and - * use_dt_domains != 0, the DT assignment is valid since this means - * we have not previously allocated a domain number by using - * pci_get_new_domain_nr(); we should also update use_dt_domains to - * 1, to indicate that we have just assigned a domain number from - * DT. - * - * If DT domain property value is not valid (ie domain < 0), and we - * have not previously assigned a domain number from DT - * (use_dt_domains != 1) we should assign a domain number by - * using the: - * - * pci_get_new_domain_nr() - * - * API and update the use_dt_domains value to keep track of method we - * are using to assign domain numbers (use_dt_domains = 0). - * - * All other combinations imply we have a platform that is trying - * to mix domain numbers obtained from DT and pci_get_new_domain_nr(), - * which is a recipe for domain mishandling and it is prevented by - * invalidating the domain value (domain = -1) and printing a - * corresponding error. + * If domain was not specified in DT, choose a free ID from dynamic + * allocations. All domain numbers from DT are permanently in + * dynamic allocations to prevent assigning them to other DT nodes + * without static domain. */ - if (domain >= 0 && use_dt_domains) { - use_dt_domains = 1; - } else if (domain < 0 && use_dt_domains != 1) { - use_dt_domains = 0; - domain = pci_get_new_domain_nr(); - } else { - dev_err(parent, "Node %s has inconsistent \"linux,pci-domain\" property in DT\n", - parent->of_node->full_name); - domain = -1; - } + return ida_alloc(&pci_domain_nr_dynamic_ida, GFP_KERNEL); +} + +static void of_pci_bus_release_domain_nr(struct device *parent, int domain_nr) +{ + if (domain_nr < 0) + return; - return domain; + /* Release domain from IDA where it was allocated. */ + if (of_get_pci_domain_nr(parent->of_node) == domain_nr) + ida_free(&pci_domain_nr_static_ida, domain_nr); + else + ida_free(&pci_domain_nr_dynamic_ida, domain_nr); } int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent) @@ -5376,7 +6602,13 @@ int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent) return acpi_disabled ? of_pci_bus_find_domain_nr(parent) : acpi_pci_bus_find_domain_nr(bus); } -#endif + +void pci_bus_release_domain_nr(struct device *parent, int domain_nr) +{ + if (!acpi_disabled) + return; + of_pci_bus_release_domain_nr(parent, domain_nr); +} #endif /** @@ -5391,11 +6623,6 @@ int __weak pci_ext_cfg_avail(void) return 1; } -void __weak pci_fixup_cardbus(struct pci_bus *bus) -{ -} -EXPORT_SYMBOL(pci_fixup_cardbus); - static int __init pci_setup(char *str) { while (str) { @@ -5405,8 +6632,13 @@ static int __init pci_setup(char *str) if (*str && (str = pcibios_setup(str)) && *str) { if (!strcmp(str, "nomsi")) { pci_no_msi(); + } else if (!strncmp(str, "noats", 5)) { + pr_info("PCIe: ATS is disabled\n"); + pcie_ats_disabled = true; } else if (!strcmp(str, "noaer")) { pci_no_aer(); + } else if (!strcmp(str, "earlydump")) { + pci_early_dump = true; } else if (!strncmp(str, "realloc=", 8)) { pci_realloc_get_opt(str + 8); } else if (!strncmp(str, "realloc", 7)) { @@ -5415,19 +6647,25 @@ static int __init pci_setup(char *str) pci_no_domains(); } else if (!strncmp(str, "noari", 5)) { pcie_ari_disabled = true; + } else if (!strncmp(str, "notph", 5)) { + pci_no_tph(); } else if (!strncmp(str, "cbiosize=", 9)) { pci_cardbus_io_size = memparse(str + 9, &str); } else if (!strncmp(str, "cbmemsize=", 10)) { pci_cardbus_mem_size = memparse(str + 10, &str); } else if (!strncmp(str, "resource_alignment=", 19)) { - pci_set_resource_alignment_param(str + 19, - strlen(str + 19)); + resource_alignment_param = str + 19; } else if (!strncmp(str, "ecrc=", 5)) { pcie_ecrc_get_policy(str + 5); } else if (!strncmp(str, "hpiosize=", 9)) { pci_hotplug_io_size = memparse(str + 9, &str); + } else if (!strncmp(str, "hpmmiosize=", 11)) { + pci_hotplug_mmio_size = memparse(str + 11, &str); + } else if (!strncmp(str, "hpmmioprefsize=", 15)) { + pci_hotplug_mmio_pref_size = memparse(str + 15, &str); } else if (!strncmp(str, "hpmemsize=", 10)) { - pci_hotplug_mem_size = memparse(str + 10, &str); + pci_hotplug_mmio_size = memparse(str + 10, &str); + pci_hotplug_mmio_pref_size = pci_hotplug_mmio_size; } else if (!strncmp(str, "hpbussize=", 10)) { pci_hotplug_bus_size = simple_strtoul(str + 10, &str, 0); @@ -5443,9 +6681,12 @@ static int __init pci_setup(char *str) pcie_bus_config = PCIE_BUS_PEER2PEER; } else if (!strncmp(str, "pcie_scan_all", 13)) { pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS); + } else if (!strncmp(str, "disable_acs_redir=", 18)) { + disable_acs_redir_param = str + 18; + } else if (!strncmp(str, "config_acs=", 11)) { + config_acs_param = str + 11; } else { - printk(KERN_ERR "PCI: Unknown option `%s'\n", - str); + pr_err("PCI: Unknown option `%s'\n", str); } } str = k; @@ -5453,3 +6694,23 @@ static int __init pci_setup(char *str) return 0; } early_param("pci", pci_setup); + +/* + * 'resource_alignment_param' and 'disable_acs_redir_param' are initialized + * in pci_setup(), above, to point to data in the __initdata section which + * will be freed after the init sequence is complete. We can't allocate memory + * in pci_setup() because some architectures do not have any memory allocation + * service available during an early_param() call. So we allocate memory and + * copy the variable here before the init section is freed. + * + */ +static int __init pci_realloc_setup_params(void) +{ + resource_alignment_param = kstrdup(resource_alignment_param, + GFP_KERNEL); + disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL); + config_acs_param = kstrdup(config_acs_param, GFP_KERNEL); + + return 0; +} +pure_initcall(pci_realloc_setup_params); |
