diff options
Diffstat (limited to 'drivers/perf/dwc_pcie_pmu.c')
-rw-r--r-- | drivers/perf/dwc_pcie_pmu.c | 114 |
1 files changed, 57 insertions, 57 deletions
diff --git a/drivers/perf/dwc_pcie_pmu.c b/drivers/perf/dwc_pcie_pmu.c index 957058ad0099..cccecae9823f 100644 --- a/drivers/perf/dwc_pcie_pmu.c +++ b/drivers/perf/dwc_pcie_pmu.c @@ -20,7 +20,6 @@ #include <linux/sysfs.h> #include <linux/types.h> -#define DWC_PCIE_VSEC_RAS_DES_ID 0x02 #define DWC_PCIE_EVENT_CNT_CTL 0x8 /* @@ -82,7 +81,6 @@ struct dwc_pcie_pmu { u16 ras_des_offset; u32 nr_lanes; - struct list_head pmu_node; struct hlist_node cpuhp_node; struct perf_event *event[DWC_PCIE_EVENT_TYPE_MAX]; int on_cpu; @@ -101,12 +99,23 @@ struct dwc_pcie_dev_info { struct list_head dev_node; }; -struct dwc_pcie_vendor_id { - int vendor_id; +struct dwc_pcie_pmu_vsec_id { + u16 vendor_id; + u16 vsec_id; + u8 vsec_rev; }; -static const struct dwc_pcie_vendor_id dwc_pcie_vendor_ids[] = { - {.vendor_id = PCI_VENDOR_ID_ALIBABA }, +/* + * VSEC IDs are allocated by the vendor, so a given ID may mean different + * things to different vendors. See PCIe r6.0, sec 7.9.5.2. + */ +static const struct dwc_pcie_pmu_vsec_id dwc_pcie_pmu_vsec_ids[] = { + { .vendor_id = PCI_VENDOR_ID_ALIBABA, + .vsec_id = 0x02, .vsec_rev = 0x4 }, + { .vendor_id = PCI_VENDOR_ID_AMPERE, + .vsec_id = 0x02, .vsec_rev = 0x4 }, + { .vendor_id = PCI_VENDOR_ID_QCOM, + .vsec_id = 0x02, .vsec_rev = 0x4 }, {} /* terminator */ }; @@ -198,14 +207,14 @@ static struct attribute *dwc_pcie_pmu_time_event_attrs[] = { DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1_1, 0x05), DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1_2, 0x06), DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(CFG_RCVRY, 0x07), - DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(TX_RX_L0S, 0x08), - DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1_AUX, 0x09), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1_AUX, 0x08), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(TX_RX_L0S, 0x09), /* Group #1 */ - DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(Tx_PCIe_TLP_Data_Payload, 0x20), - DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(Rx_PCIe_TLP_Data_Payload, 0x21), - DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(Tx_CCIX_TLP_Data_Payload, 0x22), - DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(Rx_CCIX_TLP_Data_Payload, 0x23), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(tx_pcie_tlp_data_payload, 0x20), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(rx_pcie_tlp_data_payload, 0x21), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(tx_ccix_tlp_data_payload, 0x22), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(rx_ccix_tlp_data_payload, 0x23), /* * Leave it to the user to specify the lane ID to avoid generating @@ -215,9 +224,9 @@ static struct attribute *dwc_pcie_pmu_time_event_attrs[] = { DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_update_fc_dllp, 0x601), DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_ack_dllp, 0x602), DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_update_fc_dllp, 0x603), - DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_nulified_tlp, 0x604), - DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_nulified_tlp, 0x605), - DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_duplicate_tl, 0x606), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_nullified_tlp, 0x604), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_nullified_tlp, 0x605), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_duplicate_tlp, 0x606), DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_memory_write, 0x700), DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_memory_read, 0x701), DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_configuration_write, 0x702), @@ -518,31 +527,28 @@ static void dwc_pcie_unregister_pmu(void *data) perf_pmu_unregister(&pcie_pmu->pmu); } -static bool dwc_pcie_match_des_cap(struct pci_dev *pdev) +static u16 dwc_pcie_des_cap(struct pci_dev *pdev) { - const struct dwc_pcie_vendor_id *vid; - u16 vsec = 0; + const struct dwc_pcie_pmu_vsec_id *vid; + u16 vsec; u32 val; if (!pci_is_pcie(pdev) || !(pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT)) - return false; + return 0; - for (vid = dwc_pcie_vendor_ids; vid->vendor_id; vid++) { + for (vid = dwc_pcie_pmu_vsec_ids; vid->vendor_id; vid++) { vsec = pci_find_vsec_capability(pdev, vid->vendor_id, - DWC_PCIE_VSEC_RAS_DES_ID); - if (vsec) - break; + vid->vsec_id); + if (vsec) { + pci_read_config_dword(pdev, vsec + PCI_VNDR_HEADER, + &val); + if (PCI_VNDR_HEADER_REV(val) == vid->vsec_rev) { + pci_dbg(pdev, "Detected PCIe Vendor-Specific Extended Capability RAS DES\n"); + return vsec; + } + } } - if (!vsec) - return false; - - pci_read_config_dword(pdev, vsec + PCI_VNDR_HEADER, &val); - if (PCI_VNDR_HEADER_REV(val) != 0x04) - return false; - - pci_dbg(pdev, - "Detected PCIe Vendor-Specific Extended Capability RAS DES\n"); - return true; + return 0; } static void dwc_pcie_unregister_dev(struct dwc_pcie_dev_info *dev_info) @@ -556,10 +562,10 @@ static int dwc_pcie_register_dev(struct pci_dev *pdev) { struct platform_device *plat_dev; struct dwc_pcie_dev_info *dev_info; - u32 bdf; + u32 sbdf; - bdf = PCI_DEVID(pdev->bus->number, pdev->devfn); - plat_dev = platform_device_register_data(NULL, "dwc_pcie_pmu", bdf, + sbdf = (pci_domain_nr(pdev->bus) << 16) | PCI_DEVID(pdev->bus->number, pdev->devfn); + plat_dev = platform_device_register_data(NULL, "dwc_pcie_pmu", sbdf, pdev, sizeof(*pdev)); if (IS_ERR(plat_dev)) @@ -586,7 +592,7 @@ static int dwc_pcie_pmu_notifier(struct notifier_block *nb, switch (action) { case BUS_NOTIFY_ADD_DEVICE: - if (!dwc_pcie_match_des_cap(pdev)) + if (!dwc_pcie_des_cap(pdev)) return NOTIFY_DONE; if (dwc_pcie_register_dev(pdev)) return NOTIFY_BAD; @@ -611,15 +617,16 @@ static int dwc_pcie_pmu_probe(struct platform_device *plat_dev) struct pci_dev *pdev = plat_dev->dev.platform_data; struct dwc_pcie_pmu *pcie_pmu; char *name; - u32 bdf, val; + u32 sbdf; u16 vsec; int ret; - vsec = pci_find_vsec_capability(pdev, pdev->vendor, - DWC_PCIE_VSEC_RAS_DES_ID); - pci_read_config_dword(pdev, vsec + PCI_VNDR_HEADER, &val); - bdf = PCI_DEVID(pdev->bus->number, pdev->devfn); - name = devm_kasprintf(&plat_dev->dev, GFP_KERNEL, "dwc_rootport_%x", bdf); + vsec = dwc_pcie_des_cap(pdev); + if (!vsec) + return -ENODEV; + + sbdf = plat_dev->id; + name = devm_kasprintf(&plat_dev->dev, GFP_KERNEL, "dwc_rootport_%x", sbdf); if (!name) return -ENOMEM; @@ -650,7 +657,7 @@ static int dwc_pcie_pmu_probe(struct platform_device *plat_dev) ret = cpuhp_state_add_instance(dwc_pcie_pmu_hp_state, &pcie_pmu->cpuhp_node); if (ret) { - pci_err(pdev, "Error %d registering hotplug @%x\n", ret, bdf); + pci_err(pdev, "Error %d registering hotplug @%x\n", ret, sbdf); return ret; } @@ -663,7 +670,7 @@ static int dwc_pcie_pmu_probe(struct platform_device *plat_dev) ret = perf_pmu_register(&pcie_pmu->pmu, name, -1); if (ret) { - pci_err(pdev, "Error %d registering PMU @%x\n", ret, bdf); + pci_err(pdev, "Error %d registering PMU @%x\n", ret, sbdf); return ret; } ret = devm_add_action_or_reset(&plat_dev->dev, dwc_pcie_unregister_pmu, @@ -690,9 +697,8 @@ static int dwc_pcie_pmu_offline_cpu(unsigned int cpu, struct hlist_node *cpuhp_n { struct dwc_pcie_pmu *pcie_pmu; struct pci_dev *pdev; - int node; - cpumask_t mask; unsigned int target; + int node; pcie_pmu = hlist_entry_safe(cpuhp_node, struct dwc_pcie_pmu, cpuhp_node); /* Nothing to do if this CPU doesn't own the PMU */ @@ -702,10 +708,9 @@ static int dwc_pcie_pmu_offline_cpu(unsigned int cpu, struct hlist_node *cpuhp_n pcie_pmu->on_cpu = -1; pdev = pcie_pmu->pdev; node = dev_to_node(&pdev->dev); - if (cpumask_and(&mask, cpumask_of_node(node), cpu_online_mask) && - cpumask_andnot(&mask, &mask, cpumask_of(cpu))) - target = cpumask_any(&mask); - else + + target = cpumask_any_and_but(cpumask_of_node(node), cpu_online_mask, cpu); + if (target >= nr_cpu_ids) target = cpumask_any_but(cpu_online_mask, cpu); if (target >= nr_cpu_ids) { @@ -728,11 +733,10 @@ static struct platform_driver dwc_pcie_pmu_driver = { static int __init dwc_pcie_pmu_init(void) { struct pci_dev *pdev = NULL; - bool found = false; int ret; for_each_pci_dev(pdev) { - if (!dwc_pcie_match_des_cap(pdev)) + if (!dwc_pcie_des_cap(pdev)) continue; ret = dwc_pcie_register_dev(pdev); @@ -740,11 +744,7 @@ static int __init dwc_pcie_pmu_init(void) pci_dev_put(pdev); return ret; } - - found = true; } - if (!found) - return -ENODEV; ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "perf/dwc_pcie_pmu:online", |