summaryrefslogtreecommitdiff
path: root/drivers/pci
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/access.c5
-rw-r--r--drivers/pci/hotplug/acpiphp_ibm.c2
-rw-r--r--drivers/pci/hotplug/rpadlpar_core.c4
-rw-r--r--drivers/pci/iov.c7
-rw-r--r--drivers/pci/msi.c120
-rw-r--r--drivers/pci/pci-driver.c2
-rw-r--r--drivers/pci/pci-sysfs.c23
-rw-r--r--drivers/pci/pci.h2
-rw-r--r--drivers/pci/pcie/Kconfig8
-rw-r--r--drivers/pci/pcie/aspm.c291
-rw-r--r--drivers/pci/pcie/pcie-dpc.c34
-rw-r--r--drivers/pci/pcie/portdrv_core.c161
-rw-r--r--drivers/pci/probe.c33
-rw-r--r--drivers/pci/quirks.c28
-rw-r--r--drivers/pci/setup-bus.c11
15 files changed, 464 insertions, 267 deletions
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index db239547fefd..b9dd37c8c9ce 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -367,7 +367,7 @@ static size_t pci_vpd_size(struct pci_dev *dev, size_t old_size)
static int pci_vpd_wait(struct pci_dev *dev)
{
struct pci_vpd *vpd = dev->vpd;
- unsigned long timeout = jiffies + msecs_to_jiffies(50);
+ unsigned long timeout = jiffies + msecs_to_jiffies(125);
unsigned long max_sleep = 16;
u16 status;
int ret;
@@ -684,8 +684,9 @@ void pci_cfg_access_unlock(struct pci_dev *dev)
WARN_ON(!dev->block_cfg_access);
dev->block_cfg_access = 0;
- wake_up_all(&pci_cfg_wait);
raw_spin_unlock_irqrestore(&pci_lock, flags);
+
+ wake_up_all(&pci_cfg_wait);
}
EXPORT_SYMBOL_GPL(pci_cfg_access_unlock);
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
index 68d105aaf4e2..984c7e8cec5a 100644
--- a/drivers/pci/hotplug/acpiphp_ibm.c
+++ b/drivers/pci/hotplug/acpiphp_ibm.c
@@ -107,7 +107,7 @@ static void __exit ibm_acpiphp_exit(void);
static acpi_handle ibm_acpi_handle;
static struct notification ibm_note;
-static struct bin_attribute ibm_apci_table_attr = {
+static struct bin_attribute ibm_apci_table_attr __ro_after_init = {
.attr = {
.name = "apci_table",
.mode = S_IRUGO,
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
index c614ff7c3bc3..3f93a4e79595 100644
--- a/drivers/pci/hotplug/rpadlpar_core.c
+++ b/drivers/pci/hotplug/rpadlpar_core.c
@@ -463,7 +463,6 @@ static inline int is_dlpar_capable(void)
int __init rpadlpar_io_init(void)
{
- int rc = 0;
if (!is_dlpar_capable()) {
printk(KERN_WARNING "%s: partition not DLPAR capable\n",
@@ -471,8 +470,7 @@ int __init rpadlpar_io_init(void)
return -EPERM;
}
- rc = dlpar_sysfs_init();
- return rc;
+ return dlpar_sysfs_init();
}
void rpadlpar_io_exit(void)
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 47227820406d..2479ae876482 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -124,7 +124,6 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset)
struct pci_sriov *iov = dev->sriov;
struct pci_bus *bus;
- mutex_lock(&iov->dev->sriov->lock);
bus = virtfn_add_bus(dev->bus, pci_iov_virtfn_bus(dev, id));
if (!bus)
goto failed;
@@ -162,7 +161,6 @@ int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset)
__pci_reset_function(virtfn);
pci_device_add(virtfn, virtfn->bus);
- mutex_unlock(&iov->dev->sriov->lock);
pci_bus_add_device(virtfn);
sprintf(buf, "virtfn%u", id);
@@ -181,12 +179,10 @@ failed2:
sysfs_remove_link(&dev->dev.kobj, buf);
failed1:
pci_dev_put(dev);
- mutex_lock(&iov->dev->sriov->lock);
pci_stop_and_remove_bus_device(virtfn);
failed0:
virtfn_remove_bus(dev->bus, bus);
failed:
- mutex_unlock(&iov->dev->sriov->lock);
return rc;
}
@@ -195,7 +191,6 @@ void pci_iov_remove_virtfn(struct pci_dev *dev, int id, int reset)
{
char buf[VIRTFN_ID_LEN];
struct pci_dev *virtfn;
- struct pci_sriov *iov = dev->sriov;
virtfn = pci_get_domain_bus_and_slot(pci_domain_nr(dev->bus),
pci_iov_virtfn_bus(dev, id),
@@ -218,10 +213,8 @@ void pci_iov_remove_virtfn(struct pci_dev *dev, int id, int reset)
if (virtfn->dev.kobj.sd)
sysfs_remove_link(&virtfn->dev.kobj, "physfn");
- mutex_lock(&iov->dev->sriov->lock);
pci_stop_and_remove_bus_device(virtfn);
virtfn_remove_bus(dev->bus, virtfn->bus);
- mutex_unlock(&iov->dev->sriov->lock);
/* balance pci_get_domain_bus_and_slot() */
pci_dev_put(virtfn);
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 50c5003295ca..93cc268c6ff1 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -32,32 +32,13 @@ int pci_msi_ignore_mask;
#define msix_table_size(flags) ((flags & PCI_MSIX_FLAGS_QSIZE) + 1)
#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
-static struct irq_domain *pci_msi_default_domain;
-static DEFINE_MUTEX(pci_msi_domain_lock);
-
-struct irq_domain * __weak arch_get_pci_msi_domain(struct pci_dev *dev)
-{
- return pci_msi_default_domain;
-}
-
-static struct irq_domain *pci_msi_get_domain(struct pci_dev *dev)
-{
- struct irq_domain *domain;
-
- domain = dev_get_msi_domain(&dev->dev);
- if (domain)
- return domain;
-
- return arch_get_pci_msi_domain(dev);
-}
-
static int pci_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
{
struct irq_domain *domain;
- domain = pci_msi_get_domain(dev);
+ domain = dev_get_msi_domain(&dev->dev);
if (domain && irq_domain_is_hierarchy(domain))
- return pci_msi_domain_alloc_irqs(domain, dev, nvec, type);
+ return msi_domain_alloc_irqs(domain, &dev->dev, nvec);
return arch_setup_msi_irqs(dev, nvec, type);
}
@@ -66,9 +47,9 @@ static void pci_msi_teardown_msi_irqs(struct pci_dev *dev)
{
struct irq_domain *domain;
- domain = pci_msi_get_domain(dev);
+ domain = dev_get_msi_domain(&dev->dev);
if (domain && irq_domain_is_hierarchy(domain))
- pci_msi_domain_free_irqs(domain, dev);
+ msi_domain_free_irqs(domain, &dev->dev);
else
arch_teardown_msi_irqs(dev);
}
@@ -610,7 +591,7 @@ static int msi_verify_entries(struct pci_dev *dev)
* msi_capability_init - configure device's MSI capability structure
* @dev: pointer to the pci_dev data structure of MSI device function
* @nvec: number of interrupts to allocate
- * @affinity: flag to indicate cpu irq affinity mask should be set
+ * @affd: description of automatic irq affinity assignments (may be %NULL)
*
* Setup the MSI capability structure of the device with the requested
* number of interrupts. A return value of zero indicates the successful
@@ -731,7 +712,7 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
ret = 0;
out:
kfree(masks);
- return 0;
+ return ret;
}
static void msix_program_entries(struct pci_dev *dev,
@@ -1084,7 +1065,7 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
if (nvec < 0)
return nvec;
if (nvec < minvec)
- return -EINVAL;
+ return -ENOSPC;
if (nvec > maxvec)
nvec = maxvec;
@@ -1109,23 +1090,15 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
}
}
-/**
- * pci_enable_msi_range - configure device's MSI capability structure
- * @dev: device to configure
- * @minvec: minimal number of interrupts to configure
- * @maxvec: maximum number of interrupts to configure
- *
- * This function tries to allocate a maximum possible number of interrupts in a
- * range between @minvec and @maxvec. It returns a negative errno if an error
- * occurs. If it succeeds, it returns the actual number of interrupts allocated
- * and updates the @dev's irq member to the lowest new interrupt number;
- * the other interrupt numbers allocated to this device are consecutive.
- **/
-int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec)
+/* deprecated, don't use */
+int pci_enable_msi(struct pci_dev *dev)
{
- return __pci_enable_msi_range(dev, minvec, maxvec, NULL);
+ int rc = __pci_enable_msi_range(dev, 1, 1, NULL);
+ if (rc < 0)
+ return rc;
+ return 0;
}
-EXPORT_SYMBOL(pci_enable_msi_range);
+EXPORT_SYMBOL(pci_enable_msi);
static int __pci_enable_msix_range(struct pci_dev *dev,
struct msix_entry *entries, int minvec,
@@ -1225,9 +1198,11 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
}
/* use legacy irq if allowed */
- if ((flags & PCI_IRQ_LEGACY) && min_vecs == 1) {
- pci_intx(dev, 1);
- return 1;
+ if (flags & PCI_IRQ_LEGACY) {
+ if (min_vecs == 1 && dev->irq) {
+ pci_intx(dev, 1);
+ return 1;
+ }
}
return vecs;
@@ -1381,7 +1356,7 @@ int pci_msi_domain_check_cap(struct irq_domain *domain,
{
struct msi_desc *desc = first_pci_msi_entry(to_pci_dev(dev));
- /* Special handling to support pci_enable_msi_range() */
+ /* Special handling to support __pci_enable_msi_range() */
if (pci_msi_desc_is_multi_msi(desc) &&
!(info->flags & MSI_FLAG_MULTI_PCI_MSI))
return 1;
@@ -1394,7 +1369,7 @@ int pci_msi_domain_check_cap(struct irq_domain *domain,
static int pci_msi_domain_handle_error(struct irq_domain *domain,
struct msi_desc *desc, int error)
{
- /* Special handling to support pci_enable_msi_range() */
+ /* Special handling to support __pci_enable_msi_range() */
if (pci_msi_desc_is_multi_msi(desc) && error == -ENOSPC)
return 1;
@@ -1481,59 +1456,6 @@ struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
}
EXPORT_SYMBOL_GPL(pci_msi_create_irq_domain);
-/**
- * pci_msi_domain_alloc_irqs - Allocate interrupts for @dev in @domain
- * @domain: The interrupt domain to allocate from
- * @dev: The device for which to allocate
- * @nvec: The number of interrupts to allocate
- * @type: Unused to allow simpler migration from the arch_XXX interfaces
- *
- * Returns:
- * A virtual interrupt number or an error code in case of failure
- */
-int pci_msi_domain_alloc_irqs(struct irq_domain *domain, struct pci_dev *dev,
- int nvec, int type)
-{
- return msi_domain_alloc_irqs(domain, &dev->dev, nvec);
-}
-
-/**
- * pci_msi_domain_free_irqs - Free interrupts for @dev in @domain
- * @domain: The interrupt domain
- * @dev: The device for which to free interrupts
- */
-void pci_msi_domain_free_irqs(struct irq_domain *domain, struct pci_dev *dev)
-{
- msi_domain_free_irqs(domain, &dev->dev);
-}
-
-/**
- * pci_msi_create_default_irq_domain - Create a default MSI interrupt domain
- * @fwnode: Optional fwnode of the interrupt controller
- * @info: MSI domain info
- * @parent: Parent irq domain
- *
- * Returns: A domain pointer or NULL in case of failure. If successful
- * the default PCI/MSI irqdomain pointer is updated.
- */
-struct irq_domain *pci_msi_create_default_irq_domain(struct fwnode_handle *fwnode,
- struct msi_domain_info *info, struct irq_domain *parent)
-{
- struct irq_domain *domain;
-
- mutex_lock(&pci_msi_domain_lock);
- if (pci_msi_default_domain) {
- pr_err("PCI: default irq domain for PCI MSI has already been created.\n");
- domain = NULL;
- } else {
- domain = pci_msi_create_irq_domain(fwnode, info, parent);
- pci_msi_default_domain = domain;
- }
- mutex_unlock(&pci_msi_domain_lock);
-
- return domain;
-}
-
static int get_msi_id_cb(struct pci_dev *pdev, u16 alias, void *data)
{
u32 *pa = data;
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 1ccce1cd6aca..3e0516ee9eab 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -381,8 +381,6 @@ static int __pci_device_probe(struct pci_driver *drv, struct pci_dev *pci_dev)
id = pci_match_device(drv, pci_dev);
if (id)
error = pci_call_probe(drv, pci_dev, id);
- if (error >= 0)
- error = 0;
}
return error;
}
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 066628776e1b..25d010d449a3 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -472,6 +472,7 @@ static ssize_t sriov_numvfs_store(struct device *dev,
const char *buf, size_t count)
{
struct pci_dev *pdev = to_pci_dev(dev);
+ struct pci_sriov *iov = pdev->sriov;
int ret;
u16 num_vfs;
@@ -482,38 +483,46 @@ static ssize_t sriov_numvfs_store(struct device *dev,
if (num_vfs > pci_sriov_get_totalvfs(pdev))
return -ERANGE;
+ mutex_lock(&iov->dev->sriov->lock);
+
if (num_vfs == pdev->sriov->num_VFs)
- return count; /* no change */
+ goto exit;
/* is PF driver loaded w/callback */
if (!pdev->driver || !pdev->driver->sriov_configure) {
dev_info(&pdev->dev, "Driver doesn't support SRIOV configuration via sysfs\n");
- return -ENOSYS;
+ ret = -ENOENT;
+ goto exit;
}
if (num_vfs == 0) {
/* disable VFs */
ret = pdev->driver->sriov_configure(pdev, 0);
- if (ret < 0)
- return ret;
- return count;
+ goto exit;
}
/* enable VFs */
if (pdev->sriov->num_VFs) {
dev_warn(&pdev->dev, "%d VFs already enabled. Disable before enabling %d VFs\n",
pdev->sriov->num_VFs, num_vfs);
- return -EBUSY;
+ ret = -EBUSY;
+ goto exit;
}
ret = pdev->driver->sriov_configure(pdev, num_vfs);
if (ret < 0)
- return ret;
+ goto exit;
if (ret != num_vfs)
dev_warn(&pdev->dev, "%d VFs requested; only %d enabled\n",
num_vfs, ret);
+exit:
+ mutex_unlock(&iov->dev->sriov->lock);
+
+ if (ret < 0)
+ return ret;
+
return count;
}
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index cb17db242f30..8dd38e69d6f2 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -270,7 +270,7 @@ struct pci_sriov {
u16 driver_max_VFs; /* max num VFs driver supports */
struct pci_dev *dev; /* lowest numbered PF */
struct pci_dev *self; /* this PF */
- struct mutex lock; /* lock for VF bus */
+ struct mutex lock; /* lock for setting sriov_numvfs in sysfs */
resource_size_t barsz[PCI_SRIOV_NUM_BARS]; /* VF BAR size */
};
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index 7ce77635e5ad..ac53edbc9613 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -71,6 +71,14 @@ config PCIEASPM_POWERSAVE
Enable PCI Express ASPM L0s and L1 where possible, even if the
BIOS did not.
+config PCIEASPM_POWER_SUPERSAVE
+ bool "Power Supersave"
+ depends on PCIEASPM
+ help
+ Same as PCIEASPM_POWERSAVE, except it also enables L1 substates where
+ possible. This would result in higher power savings while staying in L1
+ where the components support it.
+
config PCIEASPM_PERFORMANCE
bool "Performance"
depends on PCIEASPM
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 17ac1dce3286..a9bcd56e41ed 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -30,8 +30,29 @@
#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
#define ASPM_STATE_L1 (4) /* L1 state */
+#define ASPM_STATE_L1_1 (8) /* ASPM L1.1 state */
+#define ASPM_STATE_L1_2 (0x10) /* ASPM L1.2 state */
+#define ASPM_STATE_L1_1_PCIPM (0x20) /* PCI PM L1.1 state */
+#define ASPM_STATE_L1_2_PCIPM (0x40) /* PCI PM L1.2 state */
+#define ASPM_STATE_L1_SS_PCIPM (ASPM_STATE_L1_1_PCIPM | ASPM_STATE_L1_2_PCIPM)
+#define ASPM_STATE_L1_2_MASK (ASPM_STATE_L1_2 | ASPM_STATE_L1_2_PCIPM)
+#define ASPM_STATE_L1SS (ASPM_STATE_L1_1 | ASPM_STATE_L1_1_PCIPM |\
+ ASPM_STATE_L1_2_MASK)
#define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
-#define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
+#define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1 | \
+ ASPM_STATE_L1SS)
+
+/*
+ * When L1 substates are enabled, the LTR L1.2 threshold is a timing parameter
+ * that decides whether L1.1 or L1.2 is entered (Refer PCIe spec for details).
+ * Not sure is there is a way to "calculate" this on the fly, but maybe we
+ * could turn it into a parameter in future. This value has been taken from
+ * the following files from Intel's coreboot (which is the only code I found
+ * to have used this):
+ * https://www.coreboot.org/pipermail/coreboot-gerrit/2015-March/021134.html
+ * https://review.coreboot.org/#/c/8832/
+ */
+#define LTR_L1_2_THRESHOLD_BITS ((1 << 21) | (1 << 23) | (1 << 30))
struct aspm_latency {
u32 l0s; /* L0s latency (nsec) */
@@ -40,6 +61,7 @@ struct aspm_latency {
struct pcie_link_state {
struct pci_dev *pdev; /* Upstream component of the Link */
+ struct pci_dev *downstream; /* Downstream component, function 0 */
struct pcie_link_state *root; /* pointer to the root port link */
struct pcie_link_state *parent; /* pointer to the parent Link state */
struct list_head sibling; /* node in link_list */
@@ -47,11 +69,11 @@ struct pcie_link_state {
struct list_head link; /* node in parent's children list */
/* ASPM state */
- u32 aspm_support:3; /* Supported ASPM state */
- u32 aspm_enabled:3; /* Enabled ASPM state */
- u32 aspm_capable:3; /* Capable ASPM state with latency */
- u32 aspm_default:3; /* Default ASPM state by BIOS */
- u32 aspm_disable:3; /* Disabled ASPM state */
+ u32 aspm_support:7; /* Supported ASPM state */
+ u32 aspm_enabled:7; /* Enabled ASPM state */
+ u32 aspm_capable:7; /* Capable ASPM state with latency */
+ u32 aspm_default:7; /* Default ASPM state by BIOS */
+ u32 aspm_disable:7; /* Disabled ASPM state */
/* Clock PM state */
u32 clkpm_capable:1; /* Clock PM capable? */
@@ -66,6 +88,14 @@ struct pcie_link_state {
* has one slot under it, so at most there are 8 functions.
*/
struct aspm_latency acceptable[8];
+
+ /* L1 PM Substate info */
+ struct {
+ u32 up_cap_ptr; /* L1SS cap ptr in upstream dev */
+ u32 dw_cap_ptr; /* L1SS cap ptr in downstream dev */
+ u32 ctl1; /* value to be programmed in ctl1 */
+ u32 ctl2; /* value to be programmed in ctl2 */
+ } l1ss;
};
static int aspm_disabled, aspm_force;
@@ -76,11 +106,14 @@ static LIST_HEAD(link_list);
#define POLICY_DEFAULT 0 /* BIOS default setting */
#define POLICY_PERFORMANCE 1 /* high performance */
#define POLICY_POWERSAVE 2 /* high power saving */
+#define POLICY_POWER_SUPERSAVE 3 /* possibly even more power saving */
#ifdef CONFIG_PCIEASPM_PERFORMANCE
static int aspm_policy = POLICY_PERFORMANCE;
#elif defined CONFIG_PCIEASPM_POWERSAVE
static int aspm_policy = POLICY_POWERSAVE;
+#elif defined CONFIG_PCIEASPM_POWER_SUPERSAVE
+static int aspm_policy = POLICY_POWER_SUPERSAVE;
#else
static int aspm_policy;
#endif
@@ -88,7 +121,8 @@ static int aspm_policy;
static const char *policy_str[] = {
[POLICY_DEFAULT] = "default",
[POLICY_PERFORMANCE] = "performance",
- [POLICY_POWERSAVE] = "powersave"
+ [POLICY_POWERSAVE] = "powersave",
+ [POLICY_POWER_SUPERSAVE] = "powersupersave"
};
#define LINK_RETRAIN_TIMEOUT HZ
@@ -101,6 +135,9 @@ static int policy_to_aspm_state(struct pcie_link_state *link)
return 0;
case POLICY_POWERSAVE:
/* Enable ASPM L0s/L1 */
+ return (ASPM_STATE_L0S | ASPM_STATE_L1);
+ case POLICY_POWER_SUPERSAVE:
+ /* Enable Everything */
return ASPM_STATE_ALL;
case POLICY_DEFAULT:
return link->aspm_default;
@@ -115,7 +152,8 @@ static int policy_to_clkpm_state(struct pcie_link_state *link)
/* Disable ASPM and Clock PM */
return 0;
case POLICY_POWERSAVE:
- /* Disable Clock PM */
+ case POLICY_POWER_SUPERSAVE:
+ /* Enable Clock PM */
return 1;
case POLICY_DEFAULT:
return link->clkpm_default;
@@ -278,11 +316,33 @@ static u32 calc_l1_acceptable(u32 encoding)
return (1000 << encoding);
}
+/* Convert L1SS T_pwr encoding to usec */
+static u32 calc_l1ss_pwron(struct pci_dev *pdev, u32 scale, u32 val)
+{
+ switch (scale) {
+ case 0:
+ return val * 2;
+ case 1:
+ return val * 10;
+ case 2:
+ return val * 100;
+ }
+ dev_err(&pdev->dev, "%s: Invalid T_PwrOn scale: %u\n",
+ __func__, scale);
+ return 0;
+}
+
struct aspm_register_info {
u32 support:2;
u32 enabled:2;
u32 latency_encoding_l0s;
u32 latency_encoding_l1;
+
+ /* L1 substates */
+ u32 l1ss_cap_ptr;
+ u32 l1ss_cap;
+ u32 l1ss_ctl1;
+ u32 l1ss_ctl2;
};
static void pcie_get_aspm_reg(struct pci_dev *pdev,
@@ -297,6 +357,22 @@ static void pcie_get_aspm_reg(struct pci_dev *pdev,
info->latency_encoding_l1 = (reg32 & PCI_EXP_LNKCAP_L1EL) >> 15;
pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &reg16);
info->enabled = reg16 & PCI_EXP_LNKCTL_ASPMC;
+
+ /* Read L1 PM substate capabilities */
+ info->l1ss_cap = info->l1ss_ctl1 = info->l1ss_ctl2 = 0;
+ info->l1ss_cap_ptr = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
+ if (!info->l1ss_cap_ptr)
+ return;
+ pci_read_config_dword(pdev, info->l1ss_cap_ptr + PCI_L1SS_CAP,
+ &info->l1ss_cap);
+ if (!(info->l1ss_cap & PCI_L1SS_CAP_L1_PM_SS)) {
+ info->l1ss_cap = 0;
+ return;
+ }
+ pci_read_config_dword(pdev, info->l1ss_cap_ptr + PCI_L1SS_CTL1,
+ &info->l1ss_ctl1);
+ pci_read_config_dword(pdev, info->l1ss_cap_ptr + PCI_L1SS_CTL2,
+ &info->l1ss_ctl2);
}
static void pcie_aspm_check_latency(struct pci_dev *endpoint)
@@ -327,6 +403,14 @@ static void pcie_aspm_check_latency(struct pci_dev *endpoint)
* Check L1 latency.
* Every switch on the path to root complex need 1
* more microsecond for L1. Spec doesn't mention L0s.
+ *
+ * The exit latencies for L1 substates are not advertised
+ * by a device. Since the spec also doesn't mention a way
+ * to determine max latencies introduced by enabling L1
+ * substates on the components, it is not clear how to do
+ * a L1 substate exit latency check. We assume that the
+ * L1 exit latencies advertised by a device include L1
+ * substate latencies (and hence do not do any check).
*/
latency = max_t(u32, link->latency_up.l1, link->latency_dw.l1);
if ((link->aspm_capable & ASPM_STATE_L1) &&
@@ -338,6 +422,60 @@ static void pcie_aspm_check_latency(struct pci_dev *endpoint)
}
}
+/*
+ * The L1 PM substate capability is only implemented in function 0 in a
+ * multi function device.
+ */
+static struct pci_dev *pci_function_0(struct pci_bus *linkbus)
+{
+ struct pci_dev *child;
+
+ list_for_each_entry(child, &linkbus->devices, bus_list)
+ if (PCI_FUNC(child->devfn) == 0)
+ return child;
+ return NULL;
+}
+
+/* Calculate L1.2 PM substate timing parameters */
+static void aspm_calc_l1ss_info(struct pcie_link_state *link,
+ struct aspm_register_info *upreg,
+ struct aspm_register_info *dwreg)
+{
+ u32 val1, val2, scale1, scale2;
+
+ link->l1ss.up_cap_ptr = upreg->l1ss_cap_ptr;
+ link->l1ss.dw_cap_ptr = dwreg->l1ss_cap_ptr;
+ link->l1ss.ctl1 = link->l1ss.ctl2 = 0;
+
+ if (!(link->aspm_support & ASPM_STATE_L1_2_MASK))
+ return;
+
+ /* Choose the greater of the two T_cmn_mode_rstr_time */
+ val1 = (upreg->l1ss_cap >> 8) & 0xFF;
+ val2 = (upreg->l1ss_cap >> 8) & 0xFF;
+ if (val1 > val2)
+ link->l1ss.ctl1 |= val1 << 8;
+ else
+ link->l1ss.ctl1 |= val2 << 8;
+ /*
+ * We currently use LTR L1.2 threshold to be fixed constant picked from
+ * Intel's coreboot.
+ */
+ link->l1ss.ctl1 |= LTR_L1_2_THRESHOLD_BITS;
+
+ /* Choose the greater of the two T_pwr_on */
+ val1 = (upreg->l1ss_cap >> 19) & 0x1F;
+ scale1 = (upreg->l1ss_cap >> 16) & 0x03;
+ val2 = (dwreg->l1ss_cap >> 19) & 0x1F;
+ scale2 = (dwreg->l1ss_cap >> 16) & 0x03;
+
+ if (calc_l1ss_pwron(link->pdev, scale1, val1) >
+ calc_l1ss_pwron(link->downstream, scale2, val2))
+ link->l1ss.ctl2 |= scale1 | (val1 << 3);
+ else
+ link->l1ss.ctl2 |= scale2 | (val2 << 3);
+}
+
static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
{
struct pci_dev *child, *parent = link->pdev;
@@ -353,8 +491,9 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
/* Get upstream/downstream components' register state */
pcie_get_aspm_reg(parent, &upreg);
- child = list_entry(linkbus->devices.next, struct pci_dev, bus_list);
+ child = pci_function_0(linkbus);
pcie_get_aspm_reg(child, &dwreg);
+ link->downstream = child;
/*
* If ASPM not supported, don't mess with the clocks and link,
@@ -397,6 +536,28 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
link->latency_up.l1 = calc_l1_latency(upreg.latency_encoding_l1);
link->latency_dw.l1 = calc_l1_latency(dwreg.latency_encoding_l1);
+ /* Setup L1 substate */
+ if (upreg.l1ss_cap & dwreg.l1ss_cap & PCI_L1SS_CAP_ASPM_L1_1)
+ link->aspm_support |= ASPM_STATE_L1_1;
+ if (upreg.l1ss_cap & dwreg.l1ss_cap & PCI_L1SS_CAP_ASPM_L1_2)
+ link->aspm_support |= ASPM_STATE_L1_2;
+ if (upreg.l1ss_cap & dwreg.l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_1)
+ link->aspm_support |= ASPM_STATE_L1_1_PCIPM;
+ if (upreg.l1ss_cap & dwreg.l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_2)
+ link->aspm_support |= ASPM_STATE_L1_2_PCIPM;
+
+ if (upreg.l1ss_ctl1 & dwreg.l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_1)
+ link->aspm_enabled |= ASPM_STATE_L1_1;
+ if (upreg.l1ss_ctl1 & dwreg.l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_2)
+ link->aspm_enabled |= ASPM_STATE_L1_2;
+ if (upreg.l1ss_ctl1 & dwreg.l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_1)
+ link->aspm_enabled |= ASPM_STATE_L1_1_PCIPM;
+ if (upreg.l1ss_ctl1 & dwreg.l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_2)
+ link->aspm_enabled |= ASPM_STATE_L1_2_PCIPM;
+
+ if (link->aspm_support & ASPM_STATE_L1SS)
+ aspm_calc_l1ss_info(link, &upreg, &dwreg);
+
/* Save default state */
link->aspm_default = link->aspm_enabled;
@@ -435,6 +596,92 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
}
}
+static void pci_clear_and_set_dword(struct pci_dev *pdev, int pos,
+ u32 clear, u32 set)
+{
+ u32 val;
+
+ pci_read_config_dword(pdev, pos, &val);
+ val &= ~clear;
+ val |= set;
+ pci_write_config_dword(pdev, pos, val);
+}
+
+/* Configure the ASPM L1 substates */
+static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
+{
+ u32 val, enable_req;
+ struct pci_dev *child = link->downstream, *parent = link->pdev;
+ u32 up_cap_ptr = link->l1ss.up_cap_ptr;
+ u32 dw_cap_ptr = link->l1ss.dw_cap_ptr;
+
+ enable_req = (link->aspm_enabled ^ state) & state;
+
+ /*
+ * Here are the rules specified in the PCIe spec for enabling L1SS:
+ * - When enabling L1.x, enable bit at parent first, then at child
+ * - When disabling L1.x, disable bit at child first, then at parent
+ * - When enabling ASPM L1.x, need to disable L1
+ * (at child followed by parent).
+ * - The ASPM/PCIPM L1.2 must be disabled while programming timing
+ * parameters
+ *
+ * To keep it simple, disable all L1SS bits first, and later enable
+ * what is needed.
+ */
+
+ /* Disable all L1 substates */
+ pci_clear_and_set_dword(child, dw_cap_ptr + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_L1SS_MASK, 0);
+ pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_L1SS_MASK, 0);
+ /*
+ * If needed, disable L1, and it gets enabled later
+ * in pcie_config_aspm_link().
+ */
+ if (enable_req & (ASPM_STATE_L1_1 | ASPM_STATE_L1_2)) {
+ pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_ASPM_L1, 0);
+ pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_ASPM_L1, 0);
+ }
+
+ if (enable_req & ASPM_STATE_L1_2_MASK) {
+
+ /* Program T_pwr_on in both ports */
+ pci_write_config_dword(parent, up_cap_ptr + PCI_L1SS_CTL2,
+ link->l1ss.ctl2);
+ pci_write_config_dword(child, dw_cap_ptr + PCI_L1SS_CTL2,
+ link->l1ss.ctl2);
+
+ /* Program T_cmn_mode in parent */
+ pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1,
+ 0xFF00, link->l1ss.ctl1);
+
+ /* Program LTR L1.2 threshold in both ports */
+ pci_clear_and_set_dword(parent, dw_cap_ptr + PCI_L1SS_CTL1,
+ 0xE3FF0000, link->l1ss.ctl1);
+ pci_clear_and_set_dword(child, dw_cap_ptr + PCI_L1SS_CTL1,
+ 0xE3FF0000, link->l1ss.ctl1);
+ }
+
+ val = 0;
+ if (state & ASPM_STATE_L1_1)
+ val |= PCI_L1SS_CTL1_ASPM_L1_1;
+ if (state & ASPM_STATE_L1_2)
+ val |= PCI_L1SS_CTL1_ASPM_L1_2;
+ if (state & ASPM_STATE_L1_1_PCIPM)
+ val |= PCI_L1SS_CTL1_PCIPM_L1_1;
+ if (state & ASPM_STATE_L1_2_PCIPM)
+ val |= PCI_L1SS_CTL1_PCIPM_L1_2;
+
+ /* Enable what we need to enable */
+ pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1,
+ PCI_L1SS_CAP_L1_PM_SS, val);
+ pci_clear_and_set_dword(child, dw_cap_ptr + PCI_L1SS_CTL1,
+ PCI_L1SS_CAP_L1_PM_SS, val);
+}
+
static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val)
{
pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL,
@@ -444,11 +691,23 @@ static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val)
static void pcie_config_aspm_link(struct pcie_link_state *link, u32 state)
{
u32 upstream = 0, dwstream = 0;
- struct pci_dev *child, *parent = link->pdev;
+ struct pci_dev *child = link->downstream, *parent = link->pdev;
struct pci_bus *linkbus = parent->subordinate;
- /* Nothing to do if the link is already in the requested state */
+ /* Enable only the states that were not explicitly disabled */
state &= (link->aspm_capable & ~link->aspm_disable);
+
+ /* Can't enable any substates if L1 is not enabled */
+ if (!(state & ASPM_STATE_L1))
+ state &= ~ASPM_STATE_L1SS;
+
+ /* Spec says both ports must be in D0 before enabling PCI PM substates*/
+ if (parent->current_state != PCI_D0 || child->current_state != PCI_D0) {
+ state &= ~ASPM_STATE_L1_SS_PCIPM;
+ state |= (link->aspm_enabled & ASPM_STATE_L1_SS_PCIPM);
+ }
+
+ /* Nothing to do if the link is already in the requested state */
if (link->aspm_enabled == state)
return;
/* Convert ASPM state to upstream/downstream ASPM register state */
@@ -460,6 +719,10 @@ static void pcie_config_aspm_link(struct pcie_link_state *link, u32 state)
upstream |= PCI_EXP_LNKCTL_ASPM_L1;
dwstream |= PCI_EXP_LNKCTL_ASPM_L1;
}
+
+ if (link->aspm_capable & ASPM_STATE_L1SS)
+ pcie_config_aspm_l1ss(link, state);
+
/*
* Spec 2.0 suggests all functions should be configured the
* same setting for ASPM. Enabling ASPM L1 should be done in
@@ -612,7 +875,8 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev)
* the BIOS's expectation, we'll do so once pci_enable_device() is
* called.
*/
- if (aspm_policy != POLICY_POWERSAVE) {
+ if (aspm_policy != POLICY_POWERSAVE &&
+ aspm_policy != POLICY_POWER_SUPERSAVE) {
pcie_config_aspm_path(link);
pcie_set_clkpm(link, policy_to_clkpm_state(link));
}
@@ -712,7 +976,8 @@ void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
if (aspm_disabled || !link)
return;
- if (aspm_policy != POLICY_POWERSAVE)
+ if (aspm_policy != POLICY_POWERSAVE &&
+ aspm_policy != POLICY_POWER_SUPERSAVE)
return;
down_read(&pci_bus_sem);
diff --git a/drivers/pci/pcie/pcie-dpc.c b/drivers/pci/pcie/pcie-dpc.c
index 9811b14d9ad8..d4d70ef4a2d7 100644
--- a/drivers/pci/pcie/pcie-dpc.c
+++ b/drivers/pci/pcie/pcie-dpc.c
@@ -19,8 +19,28 @@ struct dpc_dev {
struct pcie_device *dev;
struct work_struct work;
int cap_pos;
+ bool rp;
};
+static int dpc_wait_rp_inactive(struct dpc_dev *dpc)
+{
+ unsigned long timeout = jiffies + HZ;
+ struct pci_dev *pdev = dpc->dev->port;
+ u16 status;
+
+ pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_STATUS, &status);
+ while (status & PCI_EXP_DPC_RP_BUSY &&
+ !time_after(jiffies, timeout)) {
+ msleep(10);
+ pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_STATUS, &status);
+ }
+ if (status & PCI_EXP_DPC_RP_BUSY) {
+ dev_warn(&pdev->dev, "DPC root port still busy\n");
+ return -EBUSY;
+ }
+ return 0;
+}
+
static void dpc_wait_link_inactive(struct pci_dev *pdev)
{
unsigned long timeout = jiffies + HZ;
@@ -33,7 +53,7 @@ static void dpc_wait_link_inactive(struct pci_dev *pdev)
pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
}
if (lnk_status & PCI_EXP_LNKSTA_DLLLA)
- dev_warn(&pdev->dev, "Link state not disabled for DPC event");
+ dev_warn(&pdev->dev, "Link state not disabled for DPC event\n");
}
static void interrupt_event_handler(struct work_struct *work)
@@ -52,6 +72,8 @@ static void interrupt_event_handler(struct work_struct *work)
pci_unlock_rescan_remove();
dpc_wait_link_inactive(pdev);
+ if (dpc->rp && dpc_wait_rp_inactive(dpc))
+ return;
pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_STATUS,
PCI_EXP_DPC_STATUS_TRIGGER | PCI_EXP_DPC_STATUS_INTERRUPT);
}
@@ -73,11 +95,15 @@ static irqreturn_t dpc_irq(int irq, void *context)
if (status & PCI_EXP_DPC_STATUS_TRIGGER) {
u16 reason = (status >> 1) & 0x3;
+ u16 ext_reason = (status >> 5) & 0x3;
- dev_warn(&dpc->dev->device, "DPC %s triggered, remove downstream devices\n",
+ dev_warn(&dpc->dev->device, "DPC %s detected, remove downstream devices\n",
(reason == 0) ? "unmasked uncorrectable error" :
(reason == 1) ? "ERR_NONFATAL" :
- (reason == 2) ? "ERR_FATAL" : "extended error");
+ (reason == 2) ? "ERR_FATAL" :
+ (ext_reason == 0) ? "RP PIO error" :
+ (ext_reason == 1) ? "software trigger" :
+ "reserved error");
schedule_work(&dpc->work);
}
return IRQ_HANDLED;
@@ -111,6 +137,8 @@ static int dpc_probe(struct pcie_device *dev)
pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CAP, &cap);
pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, &ctl);
+ dpc->rp = (cap & PCI_EXP_DPC_CAP_RP_EXT);
+
ctl |= PCI_EXP_DPC_CTL_EN_NONFATAL | PCI_EXP_DPC_CTL_INT_EN;
pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, ctl);
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 9698289f105c..cea504f6f478 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -44,52 +44,16 @@ static void release_pcie_device(struct device *dev)
}
/**
- * pcie_port_msix_add_entry - add entry to given array of MSI-X entries
- * @entries: Array of MSI-X entries
- * @new_entry: Index of the entry to add to the array
- * @nr_entries: Number of entries already in the array
- *
- * Return value: Position of the added entry in the array
- */
-static int pcie_port_msix_add_entry(
- struct msix_entry *entries, int new_entry, int nr_entries)
-{
- int j;
-
- for (j = 0; j < nr_entries; j++)
- if (entries[j].entry == new_entry)
- return j;
-
- entries[j].entry = new_entry;
- return j;
-}
-
-/**
* pcie_port_enable_msix - try to set up MSI-X as interrupt mode for given port
* @dev: PCI Express port to handle
- * @vectors: Array of interrupt vectors to populate
+ * @irqs: Array of interrupt vectors to populate
* @mask: Bitmask of port capabilities returned by get_port_device_capability()
*
* Return value: 0 on success, error code on failure
*/
-static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask)
+static int pcie_port_enable_msix(struct pci_dev *dev, int *irqs, int mask)
{
- struct msix_entry *msix_entries;
- int idx[PCIE_PORT_DEVICE_MAXSERVICES];
- int nr_entries, status, pos, i, nvec;
- u16 reg16;
- u32 reg32;
-
- nr_entries = pci_msix_vec_count(dev);
- if (nr_entries < 0)
- return nr_entries;
- BUG_ON(!nr_entries);
- if (nr_entries > PCIE_PORT_MAX_MSIX_ENTRIES)
- nr_entries = PCIE_PORT_MAX_MSIX_ENTRIES;
-
- msix_entries = kzalloc(sizeof(*msix_entries) * nr_entries, GFP_KERNEL);
- if (!msix_entries)
- return -ENOMEM;
+ int nr_entries, entry, nvec = 0;
/*
* Allocate as many entries as the port wants, so that we can check
@@ -97,20 +61,13 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask)
* equal to the number of entries this port actually uses, we'll happily
* go through without any tricks.
*/
- for (i = 0; i < nr_entries; i++)
- msix_entries[i].entry = i;
-
- status = pci_enable_msix_exact(dev, msix_entries, nr_entries);
- if (status)
- goto Exit;
-
- for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
- idx[i] = -1;
- status = -EIO;
- nvec = 0;
+ nr_entries = pci_alloc_irq_vectors(dev, 1, PCIE_PORT_MAX_MSIX_ENTRIES,
+ PCI_IRQ_MSIX);
+ if (nr_entries < 0)
+ return nr_entries;
if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP)) {
- int entry;
+ u16 reg16;
/*
* The code below follows the PCI Express Base Specification 2.0
@@ -125,18 +82,16 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask)
pcie_capability_read_word(dev, PCI_EXP_FLAGS, &reg16);
entry = (reg16 & PCI_EXP_FLAGS_IRQ) >> 9;
if (entry >= nr_entries)
- goto Error;
+ goto out_free_irqs;
- i = pcie_port_msix_add_entry(msix_entries, entry, nvec);
- if (i == nvec)
- nvec++;
+ irqs[PCIE_PORT_SERVICE_PME_SHIFT] = pci_irq_vector(dev, entry);
+ irqs[PCIE_PORT_SERVICE_HP_SHIFT] = pci_irq_vector(dev, entry);
- idx[PCIE_PORT_SERVICE_PME_SHIFT] = i;
- idx[PCIE_PORT_SERVICE_HP_SHIFT] = i;
+ nvec = max(nvec, entry + 1);
}
if (mask & PCIE_PORT_SERVICE_AER) {
- int entry;
+ u32 reg32, pos;
/*
* The code below follows Section 7.10.10 of the PCI Express
@@ -151,13 +106,11 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask)
pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &reg32);
entry = reg32 >> 27;
if (entry >= nr_entries)
- goto Error;
+ goto out_free_irqs;
- i = pcie_port_msix_add_entry(msix_entries, entry, nvec);
- if (i == nvec)
- nvec++;
+ irqs[PCIE_PORT_SERVICE_AER_SHIFT] = pci_irq_vector(dev, entry);
- idx[PCIE_PORT_SERVICE_AER_SHIFT] = i;
+ nvec = max(nvec, entry + 1);
}
/*
@@ -165,41 +118,39 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask)
* what we have. Otherwise, the port has some extra entries not for the
* services we know and we need to work around that.
*/
- if (nvec == nr_entries) {
- status = 0;
- } else {
+ if (nvec != nr_entries) {
/* Drop the temporary MSI-X setup */
- pci_disable_msix(dev);
+ pci_free_irq_vectors(dev);
/* Now allocate the MSI-X vectors for real */
- status = pci_enable_msix_exact(dev, msix_entries, nvec);
- if (status)
- goto Exit;
+ nr_entries = pci_alloc_irq_vectors(dev, nvec, nvec,
+ PCI_IRQ_MSIX);
+ if (nr_entries < 0)
+ return nr_entries;
}
- for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
- vectors[i] = idx[i] >= 0 ? msix_entries[idx[i]].vector : -1;
-
- Exit:
- kfree(msix_entries);
- return status;
+ return 0;
- Error:
- pci_disable_msix(dev);
- goto Exit;
+out_free_irqs:
+ pci_free_irq_vectors(dev);
+ return -EIO;
}
/**
- * init_service_irqs - initialize irqs for PCI Express port services
+ * pcie_init_service_irqs - initialize irqs for PCI Express port services
* @dev: PCI Express port to handle
* @irqs: Array of irqs to populate
* @mask: Bitmask of port capabilities returned by get_port_device_capability()
*
* Return value: Interrupt mode associated with the port
*/
-static int init_service_irqs(struct pci_dev *dev, int *irqs, int mask)
+static int pcie_init_service_irqs(struct pci_dev *dev, int *irqs, int mask)
{
- int i, irq = -1;
+ unsigned flags = PCI_IRQ_LEGACY | PCI_IRQ_MSI;
+ int ret, i;
+
+ for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
+ irqs[i] = -1;
/*
* If MSI cannot be used for PCIe PME or hotplug, we have to use
@@ -207,41 +158,25 @@ static int init_service_irqs(struct pci_dev *dev, int *irqs, int mask)
*/
if (((mask & PCIE_PORT_SERVICE_PME) && pcie_pme_no_msi()) ||
((mask & PCIE_PORT_SERVICE_HP) && pciehp_no_msi())) {
- if (dev->irq)
- irq = dev->irq;
- goto no_msi;
+ flags &= ~PCI_IRQ_MSI;
+ } else {
+ /* Try to use MSI-X if supported */
+ if (!pcie_port_enable_msix(dev, irqs, mask))
+ return 0;
}
- /* Try to use MSI-X if supported */
- if (!pcie_port_enable_msix(dev, irqs, mask))
- return 0;
-
- /*
- * We're not going to use MSI-X, so try MSI and fall back to INTx.
- * If neither MSI/MSI-X nor INTx available, try other interrupt. On
- * some platforms, root port doesn't support MSI/MSI-X/INTx in RC mode.
- */
- if (!pci_enable_msi(dev) || dev->irq)
- irq = dev->irq;
+ ret = pci_alloc_irq_vectors(dev, 1, 1, flags);
+ if (ret < 0)
+ return -ENODEV;
- no_msi:
- for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
- irqs[i] = irq;
- irqs[PCIE_PORT_SERVICE_VC_SHIFT] = -1;
+ for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) {
+ if (i != PCIE_PORT_SERVICE_VC_SHIFT)
+ irqs[i] = pci_irq_vector(dev, 0);
+ }
- if (irq < 0)
- return -ENODEV;
return 0;
}
-static void cleanup_service_irqs(struct pci_dev *dev)
-{
- if (dev->msix_enabled)
- pci_disable_msix(dev);
- else if (dev->msi_enabled)
- pci_disable_msi(dev);
-}
-
/**
* get_port_device_capability - discover capabilities of a PCI Express port
* @dev: PCI Express port to examine
@@ -378,7 +313,7 @@ int pcie_port_device_register(struct pci_dev *dev)
* that can be used in the absence of irqs. Allow them to determine
* if that is to be used.
*/
- status = init_service_irqs(dev, irqs, capabilities);
+ status = pcie_init_service_irqs(dev, irqs, capabilities);
if (status) {
capabilities &= PCIE_PORT_SERVICE_VC | PCIE_PORT_SERVICE_HP;
if (!capabilities)
@@ -401,7 +336,7 @@ int pcie_port_device_register(struct pci_dev *dev)
return 0;
error_cleanup_irqs:
- cleanup_service_irqs(dev);
+ pci_free_irq_vectors(dev);
error_disable:
pci_disable_device(dev);
return status;
@@ -469,7 +404,7 @@ static int remove_iter(struct device *dev, void *data)
void pcie_port_device_remove(struct pci_dev *dev)
{
device_for_each_child(&dev->dev, NULL, remove_iter);
- cleanup_service_irqs(dev);
+ pci_free_irq_vectors(dev);
pci_disable_device(dev);
}
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index e164b5c9f0f0..3abc94212197 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1554,8 +1554,16 @@ static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp)
static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp)
{
- if (hpp)
- dev_warn(&dev->dev, "PCI-X settings not supported\n");
+ int pos;
+
+ if (!hpp)
+ return;
+
+ pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
+ if (!pos)
+ return;
+
+ dev_warn(&dev->dev, "PCI-X settings not supported\n");
}
static bool pcie_root_rcb_set(struct pci_dev *dev)
@@ -1581,6 +1589,9 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
if (!hpp)
return;
+ if (!pci_is_pcie(dev))
+ return;
+
if (hpp->revision > 1) {
dev_warn(&dev->dev, "PCIe settings rev %d not supported\n",
hpp->revision);
@@ -1650,12 +1661,30 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
*/
}
+static void pci_configure_extended_tags(struct pci_dev *dev)
+{
+ u32 dev_cap;
+ int ret;
+
+ if (!pci_is_pcie(dev))
+ return;
+
+ ret = pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &dev_cap);
+ if (ret)
+ return;
+
+ if (dev_cap & PCI_EXP_DEVCAP_EXT_TAG)
+ pcie_capability_set_word(dev, PCI_EXP_DEVCTL,
+ PCI_EXP_DEVCTL_EXT_TAG);
+}
+
static void pci_configure_device(struct pci_dev *dev)
{
struct hotplug_params hpp;
int ret;
pci_configure_mps(dev);
+ pci_configure_extended_tags(dev);
memset(&hpp, 0, sizeof(hpp));
ret = pci_get_hp_params(dev, &hpp);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 1800befa8b8b..a0b3cd5726dc 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -4150,15 +4150,35 @@ static int pci_quirk_intel_pch_acs(struct pci_dev *dev, u16 acs_flags)
*
* N.B. This doesn't fix what lspci shows.
*
+ * The 100 series chipset specification update includes this as errata #23[3].
+ *
+ * The 200 series chipset (Union Point) has the same bug according to the
+ * specification update (Intel 200 Series Chipset Family Platform Controller
+ * Hub, Specification Update, January 2017, Revision 001, Document# 335194-001,
+ * Errata 22)[4]. Per the datasheet[5], root port PCI Device IDs for this
+ * chipset include:
+ *
+ * 0xa290-0xa29f PCI Express Root port #{0-16}
+ * 0xa2e7-0xa2ee PCI Express Root port #{17-24}
+ *
* [1] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-2.html
* [2] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-1.html
+ * [3] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-spec-update.html
+ * [4] http://www.intel.com/content/www/us/en/chipsets/200-series-chipset-pch-spec-update.html
+ * [5] http://www.intel.com/content/www/us/en/chipsets/200-series-chipset-pch-datasheet-vol-1.html
*/
static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev)
{
- return pci_is_pcie(dev) &&
- pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT &&
- ((dev->device & ~0xf) == 0xa110 ||
- (dev->device >= 0xa167 && dev->device <= 0xa16a));
+ if (!pci_is_pcie(dev) || pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
+ return false;
+
+ switch (dev->device) {
+ case 0xa110 ... 0xa11f: case 0xa167 ... 0xa16a: /* Sunrise Point */
+ case 0xa290 ... 0xa29f: case 0xa2e7 ... 0xa2ee: /* Union Point */
+ return true;
+ }
+
+ return false;
}
#define INTEL_SPT_ACS_CTRL (PCI_ACS_CAP + 4)
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index f30ca75b5b6c..cb389277df41 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -105,17 +105,8 @@ static struct pci_dev_resource *res_to_dev_res(struct list_head *head,
struct pci_dev_resource *dev_res;
list_for_each_entry(dev_res, head, list) {
- if (dev_res->res == res) {
- int idx = res - &dev_res->dev->resource[0];
-
- dev_printk(KERN_DEBUG, &dev_res->dev->dev,
- "res[%d]=%pR res_to_dev_res add_size %llx min_align %llx\n",
- idx, dev_res->res,
- (unsigned long long)dev_res->add_size,
- (unsigned long long)dev_res->min_align);
-
+ if (dev_res->res == res)
return dev_res;
- }
}
return NULL;