summaryrefslogtreecommitdiff
path: root/drivers/perf
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/perf')
-rw-r--r--drivers/perf/Kconfig79
-rw-r--r--drivers/perf/Makefile9
-rw-r--r--drivers/perf/alibaba_uncore_drw_pmu.c29
-rw-r--r--drivers/perf/amlogic/meson_ddr_pmu_core.c3
-rw-r--r--drivers/perf/amlogic/meson_g12_ddr_pmu.c5
-rw-r--r--drivers/perf/apple_m1_cpu_pmu.c296
-rw-r--r--drivers/perf/arm-cci.c19
-rw-r--r--drivers/perf/arm-ccn.c24
-rw-r--r--drivers/perf/arm-cmn.c648
-rw-r--r--drivers/perf/arm-ni.c862
-rw-r--r--drivers/perf/arm_brbe.c805
-rw-r--r--drivers/perf/arm_brbe.h47
-rw-r--r--drivers/perf/arm_cspmu/Kconfig19
-rw-r--r--drivers/perf/arm_cspmu/Makefile8
-rw-r--r--drivers/perf/arm_cspmu/ampere_cspmu.c261
-rw-r--r--drivers/perf/arm_cspmu/arm_cspmu.c490
-rw-r--r--drivers/perf/arm_cspmu/arm_cspmu.h128
-rw-r--r--drivers/perf/arm_cspmu/nvidia_cspmu.c316
-rw-r--r--drivers/perf/arm_cspmu/nvidia_cspmu.h17
-rw-r--r--drivers/perf/arm_dmc620_pmu.c14
-rw-r--r--drivers/perf/arm_dsu_pmu.c41
-rw-r--r--drivers/perf/arm_pmu.c100
-rw-r--r--drivers/perf/arm_pmu_acpi.c2
-rw-r--r--drivers/perf/arm_pmu_platform.c23
-rw-r--r--drivers/perf/arm_pmuv3.c611
-rw-r--r--drivers/perf/arm_smmuv3_pmu.c28
-rw-r--r--drivers/perf/arm_spe_pmu.c239
-rw-r--r--drivers/perf/arm_v6_pmu.c431
-rw-r--r--drivers/perf/arm_v7_pmu.c1924
-rw-r--r--drivers/perf/arm_xscale_pmu.c747
-rw-r--r--drivers/perf/cxl_pmu.c51
-rw-r--r--drivers/perf/dwc_pcie_pmu.c889
-rw-r--r--drivers/perf/fsl_imx8_ddr_perf.c143
-rw-r--r--drivers/perf/fsl_imx9_ddr_perf.c392
-rw-r--r--drivers/perf/fujitsu_uncore_pmu.c613
-rw-r--r--drivers/perf/hisilicon/Makefile3
-rw-r--r--drivers/perf/hisilicon/hisi_pcie_pmu.c182
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c45
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c414
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_hha_pmu.c57
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c590
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_mn_pmu.c411
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_noc_pmu.c443
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pa_pmu.c62
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.c196
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.h61
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c258
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_uc_pmu.c91
-rw-r--r--drivers/perf/hisilicon/hns3_pmu.c37
-rw-r--r--drivers/perf/marvell_cn10k_ddr_pmu.c538
-rw-r--r--drivers/perf/marvell_cn10k_tad_pmu.c70
-rw-r--r--drivers/perf/marvell_pem_pmu.c425
-rw-r--r--drivers/perf/qcom_l2_pmu.c12
-rw-r--r--drivers/perf/qcom_l3_pmu.c16
-rw-r--r--drivers/perf/riscv_pmu.c139
-rw-r--r--drivers/perf/riscv_pmu_legacy.c43
-rw-r--r--drivers/perf/riscv_pmu_sbi.c790
-rw-r--r--drivers/perf/starfive_starlink_pmu.c642
-rw-r--r--drivers/perf/thunderx2_pmu.c38
-rw-r--r--drivers/perf/xgene_pmu.c53
60 files changed, 13638 insertions, 2291 deletions
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index 273d67ecf6d2..638321fc9800 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -48,6 +48,13 @@ config ARM_CMN
Support for PMU events monitoring on the Arm CMN-600 Coherent Mesh
Network interconnect.
+config ARM_NI
+ tristate "Arm NI-700 PMU support"
+ depends on ARM64 || COMPILE_TEST
+ help
+ Support for PMU events monitoring on the Arm NI-700 Network-on-Chip
+ interconnect and family.
+
config ARM_PMU
depends on ARM || ARM64
bool "ARM PMU framework"
@@ -56,6 +63,18 @@ config ARM_PMU
Say y if you want to use CPU performance monitors on ARM-based
systems.
+config ARM_V6_PMU
+ depends on ARM_PMU && (CPU_V6 || CPU_V6K)
+ def_bool y
+
+config ARM_V7_PMU
+ depends on ARM_PMU && CPU_V7
+ def_bool y
+
+config ARM_XSCALE_PMU
+ depends on ARM_PMU && CPU_XSCALE
+ def_bool y
+
config RISCV_PMU
depends on RISCV
bool "RISC-V PMU framework"
@@ -86,6 +105,30 @@ config RISCV_PMU_SBI
full perf feature support i.e. counter overflow, privilege mode
filtering, counter configuration.
+config STARFIVE_STARLINK_PMU
+ depends on ARCH_STARFIVE || COMPILE_TEST
+ depends on 64BIT
+ bool "StarFive StarLink PMU"
+ help
+ Provide support for StarLink Performance Monitor Unit.
+ StarLink Performance Monitor Unit integrates one or more cores with
+ an L3 memory system. The L3 cache events are added into perf event
+ subsystem, allowing monitoring of various L3 cache perf events.
+
+config ANDES_CUSTOM_PMU
+ bool "Andes custom PMU support"
+ depends on ARCH_RENESAS && RISCV_ALTERNATIVE && RISCV_PMU_SBI
+ default y
+ help
+ The Andes cores implement the PMU overflow extension very
+ similar to the standard Sscofpmf and Smcntrpmf extension.
+
+ This will patch the overflow and pending CSRs and handle the
+ non-standard behaviour via the regular SBI PMU driver and
+ interface.
+
+ If you don't know what to do here, say "Y".
+
config ARM_PMU_ACPI
depends on ARM_PMU && ACPI
def_bool y
@@ -135,6 +178,15 @@ config FSL_IMX9_DDR_PMU
can give information about memory throughput and other related
events.
+config FUJITSU_UNCORE_PMU
+ tristate "Fujitsu Uncore PMU"
+ depends on (ARM64 && ACPI) || (COMPILE_TEST && 64BIT)
+ help
+ Provides support for the Uncore performance monitor unit (PMU)
+ in Fujitsu processors.
+ Adds the Uncore PMU into the perf events subsystem for
+ monitoring Uncore events.
+
config QCOM_L2_PMU
bool "Qualcomm Technologies L2-cache PMU"
depends on ARCH_QCOM && ARM64 && ACPI
@@ -159,7 +211,7 @@ config THUNDERX2_PMU
tristate "Cavium ThunderX2 SoC PMU UNCORE"
depends on ARCH_THUNDER2 || COMPILE_TEST
depends on NUMA && ACPI
- default m
+ default m if ARCH_THUNDER2
help
Provides support for ThunderX2 UNCORE events.
The SoC has PMU support in its L3 cache controller (L3C) and
@@ -180,6 +232,17 @@ config ARM_SPE_PMU
Extension, which provides periodic sampling of operations in
the CPU pipeline and reports this via the perf AUX interface.
+config ARM64_BRBE
+ bool "Enable support for branch stack sampling using FEAT_BRBE"
+ depends on ARM_PMUV3 && ARM64
+ default y
+ help
+ Enable perf support for Branch Record Buffer Extension (BRBE) which
+ records all branches taken in an execution path. This supports some
+ branch types and privilege based filtering. It captures additional
+ relevant information such as cycle count, misprediction and branch
+ type, branch privilege level etc.
+
config ARM_DMC620_PMU
tristate "Enable PMU support for the ARM DMC-620 memory controller"
depends on (ARM64 && ACPI) || COMPILE_TEST
@@ -217,6 +280,13 @@ config MARVELL_CN10K_DDR_PMU
Enable perf support for Marvell DDR Performance monitoring
event on CN10K platform.
+config DWC_PCIE_PMU
+ tristate "Synopsys DesignWare PCIe PMU"
+ depends on PCI
+ help
+ Enable perf support for Synopsys DesignWare PCIe PMU Performance
+ monitoring event on platform including the Alibaba Yitian 710.
+
source "drivers/perf/arm_cspmu/Kconfig"
source "drivers/perf/amlogic/Kconfig"
@@ -234,4 +304,11 @@ config CXL_PMU
If unsure say 'm'.
+config MARVELL_PEM_PMU
+ tristate "MARVELL PEM PMU Support"
+ depends on ARCH_THUNDER || (COMPILE_TEST && 64BIT)
+ help
+ Enable support for PCIe Interface performance monitoring
+ on Marvell platform.
+
endmenu
diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile
index 16b3ec4db916..ea52711a87e3 100644
--- a/drivers/perf/Makefile
+++ b/drivers/perf/Makefile
@@ -3,26 +3,35 @@ obj-$(CONFIG_ARM_CCI_PMU) += arm-cci.o
obj-$(CONFIG_ARM_CCN) += arm-ccn.o
obj-$(CONFIG_ARM_CMN) += arm-cmn.o
obj-$(CONFIG_ARM_DSU_PMU) += arm_dsu_pmu.o
+obj-$(CONFIG_ARM_NI) += arm-ni.o
obj-$(CONFIG_ARM_PMU) += arm_pmu.o arm_pmu_platform.o
obj-$(CONFIG_ARM_PMU_ACPI) += arm_pmu_acpi.o
obj-$(CONFIG_ARM_PMUV3) += arm_pmuv3.o
+obj-$(CONFIG_ARM_V6_PMU) += arm_v6_pmu.o
+obj-$(CONFIG_ARM_V7_PMU) += arm_v7_pmu.o
+obj-$(CONFIG_ARM_XSCALE_PMU) += arm_xscale_pmu.o
obj-$(CONFIG_ARM_SMMU_V3_PMU) += arm_smmuv3_pmu.o
obj-$(CONFIG_FSL_IMX8_DDR_PMU) += fsl_imx8_ddr_perf.o
obj-$(CONFIG_FSL_IMX9_DDR_PMU) += fsl_imx9_ddr_perf.o
+obj-$(CONFIG_FUJITSU_UNCORE_PMU) += fujitsu_uncore_pmu.o
obj-$(CONFIG_HISI_PMU) += hisilicon/
obj-$(CONFIG_QCOM_L2_PMU) += qcom_l2_pmu.o
obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o
obj-$(CONFIG_RISCV_PMU) += riscv_pmu.o
obj-$(CONFIG_RISCV_PMU_LEGACY) += riscv_pmu_legacy.o
obj-$(CONFIG_RISCV_PMU_SBI) += riscv_pmu_sbi.o
+obj-$(CONFIG_STARFIVE_STARLINK_PMU) += starfive_starlink_pmu.o
obj-$(CONFIG_THUNDERX2_PMU) += thunderx2_pmu.o
obj-$(CONFIG_XGENE_PMU) += xgene_pmu.o
obj-$(CONFIG_ARM_SPE_PMU) += arm_spe_pmu.o
+obj-$(CONFIG_ARM64_BRBE) += arm_brbe.o
obj-$(CONFIG_ARM_DMC620_PMU) += arm_dmc620_pmu.o
obj-$(CONFIG_MARVELL_CN10K_TAD_PMU) += marvell_cn10k_tad_pmu.o
obj-$(CONFIG_MARVELL_CN10K_DDR_PMU) += marvell_cn10k_ddr_pmu.o
+obj-$(CONFIG_MARVELL_PEM_PMU) += marvell_pem_pmu.o
obj-$(CONFIG_APPLE_M1_CPU_PMU) += apple_m1_cpu_pmu.o
obj-$(CONFIG_ALIBABA_UNCORE_DRW_PMU) += alibaba_uncore_drw_pmu.o
+obj-$(CONFIG_DWC_PCIE_PMU) += dwc_pcie_pmu.o
obj-$(CONFIG_ARM_CORESIGHT_PMU_ARCH_SYSTEM_PMU) += arm_cspmu/
obj-$(CONFIG_MESON_DDR_PMU) += amlogic/
obj-$(CONFIG_CXL_PMU) += cxl_pmu.o
diff --git a/drivers/perf/alibaba_uncore_drw_pmu.c b/drivers/perf/alibaba_uncore_drw_pmu.c
index 19d459a36be5..99a0ef9817e0 100644
--- a/drivers/perf/alibaba_uncore_drw_pmu.c
+++ b/drivers/perf/alibaba_uncore_drw_pmu.c
@@ -236,24 +236,16 @@ static const struct attribute_group ali_drw_pmu_cpumask_attr_group = {
.attrs = ali_drw_pmu_cpumask_attrs,
};
-static ssize_t ali_drw_pmu_identifier_show(struct device *dev,
- struct device_attribute *attr,
- char *page)
-{
- return sysfs_emit(page, "%s\n", "ali_drw_pmu");
-}
-
static umode_t ali_drw_pmu_identifier_attr_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
return attr->mode;
}
-static struct device_attribute ali_drw_pmu_identifier_attr =
- __ATTR(identifier, 0444, ali_drw_pmu_identifier_show, NULL);
+static DEVICE_STRING_ATTR_RO(ali_drw_pmu_identifier, 0444, "ali_drw_pmu");
static struct attribute *ali_drw_pmu_identifier_attrs[] = {
- &ali_drw_pmu_identifier_attr.attr,
+ &dev_attr_ali_drw_pmu_identifier.attr.attr,
NULL
};
@@ -408,7 +400,7 @@ static irqreturn_t ali_drw_pmu_isr(int irq_num, void *data)
}
/* clear common counter intr status */
- clr_status = FIELD_PREP(ALI_DRW_PMCOM_CNT_OV_INTR_MASK, 1);
+ clr_status = FIELD_PREP(ALI_DRW_PMCOM_CNT_OV_INTR_MASK, status);
writel(clr_status,
drw_pmu->cfg_base + ALI_DRW_PMU_OV_INTR_CLR);
}
@@ -709,6 +701,7 @@ static int ali_drw_pmu_probe(struct platform_device *pdev)
drw_pmu->pmu = (struct pmu) {
.module = THIS_MODULE,
+ .parent = &pdev->dev,
.task_ctx_nr = perf_invalid_context,
.event_init = ali_drw_pmu_event_init,
.add = ali_drw_pmu_add,
@@ -729,7 +722,7 @@ static int ali_drw_pmu_probe(struct platform_device *pdev)
return ret;
}
-static int ali_drw_pmu_remove(struct platform_device *pdev)
+static void ali_drw_pmu_remove(struct platform_device *pdev)
{
struct ali_drw_pmu *drw_pmu = platform_get_drvdata(pdev);
@@ -739,8 +732,6 @@ static int ali_drw_pmu_remove(struct platform_device *pdev)
ali_drw_pmu_uninit_irq(drw_pmu);
perf_pmu_unregister(&drw_pmu->pmu);
-
- return 0;
}
static int ali_drw_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
@@ -748,18 +739,14 @@ static int ali_drw_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
struct ali_drw_pmu_irq *irq;
struct ali_drw_pmu *drw_pmu;
unsigned int target;
- int ret;
- cpumask_t node_online_cpus;
irq = hlist_entry_safe(node, struct ali_drw_pmu_irq, node);
if (cpu != irq->cpu)
return 0;
- ret = cpumask_and(&node_online_cpus,
- cpumask_of_node(cpu_to_node(cpu)), cpu_online_mask);
- if (ret)
- target = cpumask_any_but(&node_online_cpus, cpu);
- else
+ target = cpumask_any_and_but(cpumask_of_node(cpu_to_node(cpu)),
+ cpu_online_mask, cpu);
+ if (target >= nr_cpu_ids)
target = cpumask_any_but(cpu_online_mask, cpu);
if (target >= nr_cpu_ids)
diff --git a/drivers/perf/amlogic/meson_ddr_pmu_core.c b/drivers/perf/amlogic/meson_ddr_pmu_core.c
index bbc7285fd934..c1e755c356a3 100644
--- a/drivers/perf/amlogic/meson_ddr_pmu_core.c
+++ b/drivers/perf/amlogic/meson_ddr_pmu_core.c
@@ -492,6 +492,7 @@ int meson_ddr_pmu_create(struct platform_device *pdev)
*pmu = (struct ddr_pmu) {
.pmu = {
.module = THIS_MODULE,
+ .parent = &pdev->dev,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
.task_ctx_nr = perf_invalid_context,
.attr_groups = attr_groups,
@@ -510,7 +511,7 @@ int meson_ddr_pmu_create(struct platform_device *pdev)
fmt_attr_fill(pmu->info.hw_info->fmt_attr);
- pmu->cpu = smp_processor_id();
+ pmu->cpu = raw_smp_processor_id();
name = devm_kasprintf(&pdev->dev, GFP_KERNEL, DDR_PERF_DEV_NAME);
if (!name)
diff --git a/drivers/perf/amlogic/meson_g12_ddr_pmu.c b/drivers/perf/amlogic/meson_g12_ddr_pmu.c
index 8b643888d503..f33e9a456e85 100644
--- a/drivers/perf/amlogic/meson_g12_ddr_pmu.c
+++ b/drivers/perf/amlogic/meson_g12_ddr_pmu.c
@@ -355,11 +355,9 @@ static int g12_ddr_pmu_probe(struct platform_device *pdev)
return meson_ddr_pmu_create(pdev);
}
-static int g12_ddr_pmu_remove(struct platform_device *pdev)
+static void g12_ddr_pmu_remove(struct platform_device *pdev)
{
meson_ddr_pmu_remove(pdev);
-
- return 0;
}
static const struct of_device_id meson_ddr_pmu_dt_match[] = {
@@ -377,6 +375,7 @@ static const struct of_device_id meson_ddr_pmu_dt_match[] = {
},
{}
};
+MODULE_DEVICE_TABLE(of, meson_ddr_pmu_dt_match);
static struct platform_driver g12_ddr_pmu_driver = {
.probe = g12_ddr_pmu_probe,
diff --git a/drivers/perf/apple_m1_cpu_pmu.c b/drivers/perf/apple_m1_cpu_pmu.c
index cd2de44b61b9..81b6f1a62349 100644
--- a/drivers/perf/apple_m1_cpu_pmu.c
+++ b/drivers/perf/apple_m1_cpu_pmu.c
@@ -12,6 +12,7 @@
#include <linux/of.h>
#include <linux/perf/arm_pmu.h>
+#include <linux/perf/arm_pmuv3.h>
#include <linux/platform_device.h>
#include <asm/apple_m1_pmu.h>
@@ -47,46 +48,81 @@
* implementations, we'll have to introduce per cpu-type tables.
*/
enum m1_pmu_events {
- M1_PMU_PERFCTR_UNKNOWN_01 = 0x01,
- M1_PMU_PERFCTR_CPU_CYCLES = 0x02,
- M1_PMU_PERFCTR_INSTRUCTIONS = 0x8c,
- M1_PMU_PERFCTR_UNKNOWN_8d = 0x8d,
- M1_PMU_PERFCTR_UNKNOWN_8e = 0x8e,
- M1_PMU_PERFCTR_UNKNOWN_8f = 0x8f,
- M1_PMU_PERFCTR_UNKNOWN_90 = 0x90,
- M1_PMU_PERFCTR_UNKNOWN_93 = 0x93,
- M1_PMU_PERFCTR_UNKNOWN_94 = 0x94,
- M1_PMU_PERFCTR_UNKNOWN_95 = 0x95,
- M1_PMU_PERFCTR_UNKNOWN_96 = 0x96,
- M1_PMU_PERFCTR_UNKNOWN_97 = 0x97,
- M1_PMU_PERFCTR_UNKNOWN_98 = 0x98,
- M1_PMU_PERFCTR_UNKNOWN_99 = 0x99,
- M1_PMU_PERFCTR_UNKNOWN_9a = 0x9a,
- M1_PMU_PERFCTR_UNKNOWN_9b = 0x9b,
- M1_PMU_PERFCTR_UNKNOWN_9c = 0x9c,
- M1_PMU_PERFCTR_UNKNOWN_9f = 0x9f,
- M1_PMU_PERFCTR_UNKNOWN_bf = 0xbf,
- M1_PMU_PERFCTR_UNKNOWN_c0 = 0xc0,
- M1_PMU_PERFCTR_UNKNOWN_c1 = 0xc1,
- M1_PMU_PERFCTR_UNKNOWN_c4 = 0xc4,
- M1_PMU_PERFCTR_UNKNOWN_c5 = 0xc5,
- M1_PMU_PERFCTR_UNKNOWN_c6 = 0xc6,
- M1_PMU_PERFCTR_UNKNOWN_c8 = 0xc8,
- M1_PMU_PERFCTR_UNKNOWN_ca = 0xca,
- M1_PMU_PERFCTR_UNKNOWN_cb = 0xcb,
- M1_PMU_PERFCTR_UNKNOWN_f5 = 0xf5,
- M1_PMU_PERFCTR_UNKNOWN_f6 = 0xf6,
- M1_PMU_PERFCTR_UNKNOWN_f7 = 0xf7,
- M1_PMU_PERFCTR_UNKNOWN_f8 = 0xf8,
- M1_PMU_PERFCTR_UNKNOWN_fd = 0xfd,
- M1_PMU_PERFCTR_LAST = M1_PMU_CFG_EVENT,
+ M1_PMU_PERFCTR_RETIRE_UOP = 0x1,
+ M1_PMU_PERFCTR_CORE_ACTIVE_CYCLE = 0x2,
+ M1_PMU_PERFCTR_L1I_TLB_FILL = 0x4,
+ M1_PMU_PERFCTR_L1D_TLB_FILL = 0x5,
+ M1_PMU_PERFCTR_MMU_TABLE_WALK_INSTRUCTION = 0x7,
+ M1_PMU_PERFCTR_MMU_TABLE_WALK_DATA = 0x8,
+ M1_PMU_PERFCTR_L2_TLB_MISS_INSTRUCTION = 0xa,
+ M1_PMU_PERFCTR_L2_TLB_MISS_DATA = 0xb,
+ M1_PMU_PERFCTR_MMU_VIRTUAL_MEMORY_FAULT_NONSPEC = 0xd,
+ M1_PMU_PERFCTR_SCHEDULE_UOP = 0x52,
+ M1_PMU_PERFCTR_INTERRUPT_PENDING = 0x6c,
+ M1_PMU_PERFCTR_MAP_STALL_DISPATCH = 0x70,
+ M1_PMU_PERFCTR_MAP_REWIND = 0x75,
+ M1_PMU_PERFCTR_MAP_STALL = 0x76,
+ M1_PMU_PERFCTR_MAP_INT_UOP = 0x7c,
+ M1_PMU_PERFCTR_MAP_LDST_UOP = 0x7d,
+ M1_PMU_PERFCTR_MAP_SIMD_UOP = 0x7e,
+ M1_PMU_PERFCTR_FLUSH_RESTART_OTHER_NONSPEC = 0x84,
+ M1_PMU_PERFCTR_INST_ALL = 0x8c,
+ M1_PMU_PERFCTR_INST_BRANCH = 0x8d,
+ M1_PMU_PERFCTR_INST_BRANCH_CALL = 0x8e,
+ M1_PMU_PERFCTR_INST_BRANCH_RET = 0x8f,
+ M1_PMU_PERFCTR_INST_BRANCH_TAKEN = 0x90,
+ M1_PMU_PERFCTR_INST_BRANCH_INDIR = 0x93,
+ M1_PMU_PERFCTR_INST_BRANCH_COND = 0x94,
+ M1_PMU_PERFCTR_INST_INT_LD = 0x95,
+ M1_PMU_PERFCTR_INST_INT_ST = 0x96,
+ M1_PMU_PERFCTR_INST_INT_ALU = 0x97,
+ M1_PMU_PERFCTR_INST_SIMD_LD = 0x98,
+ M1_PMU_PERFCTR_INST_SIMD_ST = 0x99,
+ M1_PMU_PERFCTR_INST_SIMD_ALU = 0x9a,
+ M1_PMU_PERFCTR_INST_LDST = 0x9b,
+ M1_PMU_PERFCTR_INST_BARRIER = 0x9c,
+ M1_PMU_PERFCTR_UNKNOWN_9f = 0x9f,
+ M1_PMU_PERFCTR_L1D_TLB_ACCESS = 0xa0,
+ M1_PMU_PERFCTR_L1D_TLB_MISS = 0xa1,
+ M1_PMU_PERFCTR_L1D_CACHE_MISS_ST = 0xa2,
+ M1_PMU_PERFCTR_L1D_CACHE_MISS_LD = 0xa3,
+ M1_PMU_PERFCTR_LD_UNIT_UOP = 0xa6,
+ M1_PMU_PERFCTR_ST_UNIT_UOP = 0xa7,
+ M1_PMU_PERFCTR_L1D_CACHE_WRITEBACK = 0xa8,
+ M1_PMU_PERFCTR_LDST_X64_UOP = 0xb1,
+ M1_PMU_PERFCTR_LDST_XPG_UOP = 0xb2,
+ M1_PMU_PERFCTR_ATOMIC_OR_EXCLUSIVE_SUCC = 0xb3,
+ M1_PMU_PERFCTR_ATOMIC_OR_EXCLUSIVE_FAIL = 0xb4,
+ M1_PMU_PERFCTR_L1D_CACHE_MISS_LD_NONSPEC = 0xbf,
+ M1_PMU_PERFCTR_L1D_CACHE_MISS_ST_NONSPEC = 0xc0,
+ M1_PMU_PERFCTR_L1D_TLB_MISS_NONSPEC = 0xc1,
+ M1_PMU_PERFCTR_ST_MEMORY_ORDER_VIOLATION_NONSPEC = 0xc4,
+ M1_PMU_PERFCTR_BRANCH_COND_MISPRED_NONSPEC = 0xc5,
+ M1_PMU_PERFCTR_BRANCH_INDIR_MISPRED_NONSPEC = 0xc6,
+ M1_PMU_PERFCTR_BRANCH_RET_INDIR_MISPRED_NONSPEC = 0xc8,
+ M1_PMU_PERFCTR_BRANCH_CALL_INDIR_MISPRED_NONSPEC = 0xca,
+ M1_PMU_PERFCTR_BRANCH_MISPRED_NONSPEC = 0xcb,
+ M1_PMU_PERFCTR_L1I_TLB_MISS_DEMAND = 0xd4,
+ M1_PMU_PERFCTR_MAP_DISPATCH_BUBBLE = 0xd6,
+ M1_PMU_PERFCTR_L1I_CACHE_MISS_DEMAND = 0xdb,
+ M1_PMU_PERFCTR_FETCH_RESTART = 0xde,
+ M1_PMU_PERFCTR_ST_NT_UOP = 0xe5,
+ M1_PMU_PERFCTR_LD_NT_UOP = 0xe6,
+ M1_PMU_PERFCTR_UNKNOWN_f5 = 0xf5,
+ M1_PMU_PERFCTR_UNKNOWN_f6 = 0xf6,
+ M1_PMU_PERFCTR_UNKNOWN_f7 = 0xf7,
+ M1_PMU_PERFCTR_UNKNOWN_f8 = 0xf8,
+ M1_PMU_PERFCTR_UNKNOWN_fd = 0xfd,
+ M1_PMU_PERFCTR_LAST = M1_PMU_CFG_EVENT,
/*
* From this point onwards, these are not actual HW events,
* but attributes that get stored in hw->config_base.
*/
- M1_PMU_CFG_COUNT_USER = BIT(8),
- M1_PMU_CFG_COUNT_KERNEL = BIT(9),
+ M1_PMU_CFG_COUNT_USER = BIT(8),
+ M1_PMU_CFG_COUNT_KERNEL = BIT(9),
+ M1_PMU_CFG_COUNT_HOST = BIT(10),
+ M1_PMU_CFG_COUNT_GUEST = BIT(11),
};
/*
@@ -96,46 +132,58 @@ enum m1_pmu_events {
* counters had strange affinities.
*/
static const u16 m1_pmu_event_affinity[M1_PMU_PERFCTR_LAST + 1] = {
- [0 ... M1_PMU_PERFCTR_LAST] = ANY_BUT_0_1,
- [M1_PMU_PERFCTR_UNKNOWN_01] = BIT(7),
- [M1_PMU_PERFCTR_CPU_CYCLES] = ANY_BUT_0_1 | BIT(0),
- [M1_PMU_PERFCTR_INSTRUCTIONS] = BIT(7) | BIT(1),
- [M1_PMU_PERFCTR_UNKNOWN_8d] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_8e] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_8f] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_90] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_93] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_94] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_95] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_96] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_97] = BIT(7),
- [M1_PMU_PERFCTR_UNKNOWN_98] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_99] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_9a] = BIT(7),
- [M1_PMU_PERFCTR_UNKNOWN_9b] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_9c] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_9f] = BIT(7),
- [M1_PMU_PERFCTR_UNKNOWN_bf] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_c0] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_c1] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_c4] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_c5] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_c6] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_c8] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_ca] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_cb] = ONLY_5_6_7,
- [M1_PMU_PERFCTR_UNKNOWN_f5] = ONLY_2_4_6,
- [M1_PMU_PERFCTR_UNKNOWN_f6] = ONLY_2_4_6,
- [M1_PMU_PERFCTR_UNKNOWN_f7] = ONLY_2_4_6,
- [M1_PMU_PERFCTR_UNKNOWN_f8] = ONLY_2_TO_7,
- [M1_PMU_PERFCTR_UNKNOWN_fd] = ONLY_2_4_6,
+ [0 ... M1_PMU_PERFCTR_LAST] = ANY_BUT_0_1,
+ [M1_PMU_PERFCTR_RETIRE_UOP] = BIT(7),
+ [M1_PMU_PERFCTR_CORE_ACTIVE_CYCLE] = ANY_BUT_0_1 | BIT(0),
+ [M1_PMU_PERFCTR_INST_ALL] = BIT(7) | BIT(1),
+ [M1_PMU_PERFCTR_INST_BRANCH] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_INST_BRANCH_CALL] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_INST_BRANCH_RET] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_INST_BRANCH_TAKEN] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_INST_BRANCH_INDIR] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_INST_BRANCH_COND] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_INST_INT_LD] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_INST_INT_ST] = BIT(7),
+ [M1_PMU_PERFCTR_INST_INT_ALU] = BIT(7),
+ [M1_PMU_PERFCTR_INST_SIMD_LD] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_INST_SIMD_ST] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_INST_SIMD_ALU] = BIT(7),
+ [M1_PMU_PERFCTR_INST_LDST] = BIT(7),
+ [M1_PMU_PERFCTR_INST_BARRIER] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_UNKNOWN_9f] = BIT(7),
+ [M1_PMU_PERFCTR_L1D_CACHE_MISS_LD_NONSPEC] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_L1D_CACHE_MISS_ST_NONSPEC] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_L1D_TLB_MISS_NONSPEC] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_ST_MEMORY_ORDER_VIOLATION_NONSPEC] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_BRANCH_COND_MISPRED_NONSPEC] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_BRANCH_INDIR_MISPRED_NONSPEC] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_BRANCH_RET_INDIR_MISPRED_NONSPEC] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_BRANCH_CALL_INDIR_MISPRED_NONSPEC] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_BRANCH_MISPRED_NONSPEC] = ONLY_5_6_7,
+ [M1_PMU_PERFCTR_UNKNOWN_f5] = ONLY_2_4_6,
+ [M1_PMU_PERFCTR_UNKNOWN_f6] = ONLY_2_4_6,
+ [M1_PMU_PERFCTR_UNKNOWN_f7] = ONLY_2_4_6,
+ [M1_PMU_PERFCTR_UNKNOWN_f8] = ONLY_2_TO_7,
+ [M1_PMU_PERFCTR_UNKNOWN_fd] = ONLY_2_4_6,
};
static const unsigned m1_pmu_perf_map[PERF_COUNT_HW_MAX] = {
PERF_MAP_ALL_UNSUPPORTED,
- [PERF_COUNT_HW_CPU_CYCLES] = M1_PMU_PERFCTR_CPU_CYCLES,
- [PERF_COUNT_HW_INSTRUCTIONS] = M1_PMU_PERFCTR_INSTRUCTIONS,
- /* No idea about the rest yet */
+ [PERF_COUNT_HW_CPU_CYCLES] = M1_PMU_PERFCTR_CORE_ACTIVE_CYCLE,
+ [PERF_COUNT_HW_INSTRUCTIONS] = M1_PMU_PERFCTR_INST_ALL,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = M1_PMU_PERFCTR_INST_BRANCH,
+ [PERF_COUNT_HW_BRANCH_MISSES] = M1_PMU_PERFCTR_BRANCH_MISPRED_NONSPEC,
+};
+
+#define M1_PMUV3_EVENT_MAP(pmuv3_event, m1_event) \
+ [ARMV8_PMUV3_PERFCTR_##pmuv3_event] = M1_PMU_PERFCTR_##m1_event
+
+static const u16 m1_pmu_pmceid_map[ARMV8_PMUV3_MAX_COMMON_EVENTS] = {
+ [0 ... ARMV8_PMUV3_MAX_COMMON_EVENTS - 1] = HW_OP_UNSUPPORTED,
+ M1_PMUV3_EVENT_MAP(INST_RETIRED, INST_ALL),
+ M1_PMUV3_EVENT_MAP(CPU_CYCLES, CORE_ACTIVE_CYCLE),
+ M1_PMUV3_EVENT_MAP(BR_RETIRED, INST_BRANCH),
+ M1_PMUV3_EVENT_MAP(BR_MIS_PRED_RETIRED, BRANCH_MISPRED_NONSPEC),
};
/* sysfs definitions */
@@ -154,8 +202,8 @@ static ssize_t m1_pmu_events_sysfs_show(struct device *dev,
PMU_EVENT_ATTR_ID(name, m1_pmu_events_sysfs_show, config)
static struct attribute *m1_pmu_event_attrs[] = {
- M1_PMU_EVENT_ATTR(cycles, M1_PMU_PERFCTR_CPU_CYCLES),
- M1_PMU_EVENT_ATTR(instructions, M1_PMU_PERFCTR_INSTRUCTIONS),
+ M1_PMU_EVENT_ATTR(cycles, M1_PMU_PERFCTR_CORE_ACTIVE_CYCLE),
+ M1_PMU_EVENT_ATTR(instructions, M1_PMU_PERFCTR_INST_ALL),
NULL,
};
@@ -293,11 +341,10 @@ static void m1_pmu_disable_counter_interrupt(unsigned int index)
__m1_pmu_enable_counter_interrupt(index, false);
}
-static void m1_pmu_configure_counter(unsigned int index, u8 event,
- bool user, bool kernel)
+static void __m1_pmu_configure_event_filter(unsigned int index, bool user,
+ bool kernel, bool host)
{
- u64 val, user_bit, kernel_bit;
- int shift;
+ u64 clear, set, user_bit, kernel_bit;
switch (index) {
case 0 ... 7:
@@ -312,19 +359,27 @@ static void m1_pmu_configure_counter(unsigned int index, u8 event,
BUG();
}
- val = read_sysreg_s(SYS_IMP_APL_PMCR1_EL1);
-
+ clear = set = 0;
if (user)
- val |= user_bit;
+ set |= user_bit;
else
- val &= ~user_bit;
+ clear |= user_bit;
if (kernel)
- val |= kernel_bit;
+ set |= kernel_bit;
else
- val &= ~kernel_bit;
+ clear |= kernel_bit;
- write_sysreg_s(val, SYS_IMP_APL_PMCR1_EL1);
+ if (host)
+ sysreg_clear_set_s(SYS_IMP_APL_PMCR1_EL1, clear, set);
+ else if (is_kernel_in_hyp_mode())
+ sysreg_clear_set_s(SYS_IMP_APL_PMCR1_EL12, clear, set);
+}
+
+static void __m1_pmu_configure_eventsel(unsigned int index, u8 event)
+{
+ u64 clear = 0, set = 0;
+ int shift;
/*
* Counters 0 and 1 have fixed events. For anything else,
@@ -337,21 +392,32 @@ static void m1_pmu_configure_counter(unsigned int index, u8 event,
break;
case 2 ... 5:
shift = (index - 2) * 8;
- val = read_sysreg_s(SYS_IMP_APL_PMESR0_EL1);
- val &= ~((u64)0xff << shift);
- val |= (u64)event << shift;
- write_sysreg_s(val, SYS_IMP_APL_PMESR0_EL1);
+ clear |= (u64)0xff << shift;
+ set |= (u64)event << shift;
+ sysreg_clear_set_s(SYS_IMP_APL_PMESR0_EL1, clear, set);
break;
case 6 ... 9:
shift = (index - 6) * 8;
- val = read_sysreg_s(SYS_IMP_APL_PMESR1_EL1);
- val &= ~((u64)0xff << shift);
- val |= (u64)event << shift;
- write_sysreg_s(val, SYS_IMP_APL_PMESR1_EL1);
+ clear |= (u64)0xff << shift;
+ set |= (u64)event << shift;
+ sysreg_clear_set_s(SYS_IMP_APL_PMESR1_EL1, clear, set);
break;
}
}
+static void m1_pmu_configure_counter(unsigned int index, unsigned long config_base)
+{
+ bool kernel = config_base & M1_PMU_CFG_COUNT_KERNEL;
+ bool guest = config_base & M1_PMU_CFG_COUNT_GUEST;
+ bool host = config_base & M1_PMU_CFG_COUNT_HOST;
+ bool user = config_base & M1_PMU_CFG_COUNT_USER;
+ u8 evt = config_base & M1_PMU_CFG_EVENT;
+
+ __m1_pmu_configure_event_filter(index, user && host, kernel && host, true);
+ __m1_pmu_configure_event_filter(index, user && guest, kernel && guest, false);
+ __m1_pmu_configure_eventsel(index, evt);
+}
+
/* arm_pmu backend */
static void m1_pmu_enable_event(struct perf_event *event)
{
@@ -362,11 +428,7 @@ static void m1_pmu_enable_event(struct perf_event *event)
user = event->hw.config_base & M1_PMU_CFG_COUNT_USER;
kernel = event->hw.config_base & M1_PMU_CFG_COUNT_KERNEL;
- m1_pmu_disable_counter_interrupt(event->hw.idx);
- m1_pmu_disable_counter(event->hw.idx);
- isb();
-
- m1_pmu_configure_counter(event->hw.idx, evt, user, kernel);
+ m1_pmu_configure_counter(event->hw.idx, event->hw.config_base);
m1_pmu_enable_counter(event->hw.idx);
m1_pmu_enable_counter_interrupt(event->hw.idx);
isb();
@@ -400,7 +462,7 @@ static irqreturn_t m1_pmu_handle_irq(struct arm_pmu *cpu_pmu)
regs = get_irq_regs();
- for (idx = 0; idx < cpu_pmu->num_events; idx++) {
+ for_each_set_bit(idx, cpu_pmu->cntr_mask, M1_PMU_NR_COUNTERS) {
struct perf_event *event = cpuc->events[idx];
struct perf_sample_data data;
@@ -412,8 +474,7 @@ static irqreturn_t m1_pmu_handle_irq(struct arm_pmu *cpu_pmu)
if (!armpmu_event_set_period(event))
continue;
- if (perf_event_overflow(event, &data, regs))
- m1_pmu_disable_event(event);
+ perf_event_overflow(event, &data, regs);
}
cpu_pmu->start(cpu_pmu);
@@ -504,6 +565,26 @@ static int m2_pmu_map_event(struct perf_event *event)
return armpmu_map_event(event, &m1_pmu_perf_map, NULL, M1_PMU_CFG_EVENT);
}
+static int m1_pmu_map_pmuv3_event(unsigned int eventsel)
+{
+ u16 m1_event = HW_OP_UNSUPPORTED;
+
+ if (eventsel < ARMV8_PMUV3_MAX_COMMON_EVENTS)
+ m1_event = m1_pmu_pmceid_map[eventsel];
+
+ return m1_event == HW_OP_UNSUPPORTED ? -EOPNOTSUPP : m1_event;
+}
+
+static void m1_pmu_init_pmceid(struct arm_pmu *pmu)
+{
+ unsigned int event;
+
+ for (event = 0; event < ARMV8_PMUV3_MAX_COMMON_EVENTS; event++) {
+ if (m1_pmu_map_pmuv3_event(event) >= 0)
+ set_bit(event, pmu->pmceid_bitmap);
+ }
+}
+
static void m1_pmu_reset(void *info)
{
int i;
@@ -524,12 +605,18 @@ static int m1_pmu_set_event_filter(struct hw_perf_event *event,
{
unsigned long config_base = 0;
- if (!attr->exclude_guest)
- return -EINVAL;
+ if (!attr->exclude_guest && !is_kernel_in_hyp_mode()) {
+ pr_debug("ARM performance counters do not support mode exclusion\n");
+ return -EOPNOTSUPP;
+ }
if (!attr->exclude_kernel)
config_base |= M1_PMU_CFG_COUNT_KERNEL;
if (!attr->exclude_user)
config_base |= M1_PMU_CFG_COUNT_USER;
+ if (!attr->exclude_host)
+ config_base |= M1_PMU_CFG_COUNT_HOST;
+ if (!attr->exclude_guest)
+ config_base |= M1_PMU_CFG_COUNT_GUEST;
event->config_base = config_base;
@@ -558,7 +645,10 @@ static int m1_pmu_init(struct arm_pmu *cpu_pmu, u32 flags)
cpu_pmu->reset = m1_pmu_reset;
cpu_pmu->set_event_filter = m1_pmu_set_event_filter;
- cpu_pmu->num_events = M1_PMU_NR_COUNTERS;
+ cpu_pmu->map_pmuv3_event = m1_pmu_map_pmuv3_event;
+ m1_pmu_init_pmceid(cpu_pmu);
+
+ bitmap_set(cpu_pmu->cntr_mask, 0, M1_PMU_NR_COUNTERS);
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = &m1_pmu_events_attr_group;
cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = &m1_pmu_format_attr_group;
return 0;
diff --git a/drivers/perf/arm-cci.c b/drivers/perf/arm-cci.c
index 61de861eaf91..1cc3214d6b6d 100644
--- a/drivers/perf/arm-cci.c
+++ b/drivers/perf/arm-cci.c
@@ -127,8 +127,6 @@ enum cci_models {
static void pmu_write_counters(struct cci_pmu *cci_pmu,
unsigned long *mask);
-static ssize_t __maybe_unused cci_pmu_format_show(struct device *dev,
- struct device_attribute *attr, char *buf);
static ssize_t __maybe_unused cci_pmu_event_show(struct device *dev,
struct device_attribute *attr, char *buf);
@@ -138,7 +136,7 @@ static ssize_t __maybe_unused cci_pmu_event_show(struct device *dev,
})[0].attr.attr
#define CCI_FORMAT_EXT_ATTR_ENTRY(_name, _config) \
- CCI_EXT_ATTR_ENTRY(_name, cci_pmu_format_show, (char *)_config)
+ CCI_EXT_ATTR_ENTRY(_name, device_show_string, _config)
#define CCI_EVENT_EXT_ATTR_ENTRY(_name, _config) \
CCI_EXT_ATTR_ENTRY(_name, cci_pmu_event_show, (unsigned long)_config)
@@ -688,14 +686,6 @@ static void __cci_pmu_disable(struct cci_pmu *cci_pmu)
writel(val, cci_pmu->ctrl_base + CCI_PMCR);
}
-static ssize_t cci_pmu_format_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct dev_ext_attribute *eattr = container_of(attr,
- struct dev_ext_attribute, attr);
- return sysfs_emit(buf, "%s\n", (char *)eattr->var);
-}
-
static ssize_t cci_pmu_event_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1409,6 +1399,7 @@ static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev)
cci_pmu->pmu = (struct pmu) {
.module = THIS_MODULE,
+ .parent = &pdev->dev,
.name = cci_pmu->model->name,
.task_ctx_nr = perf_invalid_context,
.pmu_enable = cci_pmu_enable,
@@ -1697,16 +1688,14 @@ error_pmu_init:
return ret;
}
-static int cci_pmu_remove(struct platform_device *pdev)
+static void cci_pmu_remove(struct platform_device *pdev)
{
if (!g_cci_pmu)
- return 0;
+ return;
cpuhp_remove_state(CPUHP_AP_PERF_ARM_CCI_ONLINE);
perf_pmu_unregister(&g_cci_pmu->pmu);
g_cci_pmu = NULL;
-
- return 0;
}
static struct platform_driver cci_pmu_driver = {
diff --git a/drivers/perf/arm-ccn.c b/drivers/perf/arm-ccn.c
index 728d13d8e98a..8af3563fdf60 100644
--- a/drivers/perf/arm-ccn.c
+++ b/drivers/perf/arm-ccn.c
@@ -215,18 +215,9 @@ static void arm_ccn_pmu_config_set(u64 *config, u32 node_xp, u32 type, u32 port)
*config |= (node_xp << 0) | (type << 8) | (port << 24);
}
-static ssize_t arm_ccn_pmu_format_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct dev_ext_attribute *ea = container_of(attr,
- struct dev_ext_attribute, attr);
-
- return sysfs_emit(buf, "%s\n", (char *)ea->var);
-}
-
#define CCN_FORMAT_ATTR(_name, _config) \
struct dev_ext_attribute arm_ccn_pmu_format_attr_##_name = \
- { __ATTR(_name, S_IRUGO, arm_ccn_pmu_format_show, \
+ { __ATTR(_name, S_IRUGO, device_show_string, \
NULL), _config }
static CCN_FORMAT_ATTR(node, "config:0-7");
@@ -574,7 +565,7 @@ module_param_named(pmu_poll_period_us, arm_ccn_pmu_poll_period_us, uint,
static ktime_t arm_ccn_pmu_timer_period(void)
{
- return ns_to_ktime((u64)arm_ccn_pmu_poll_period_us * 1000);
+ return us_to_ktime((u64)arm_ccn_pmu_poll_period_us);
}
@@ -1265,6 +1256,7 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
/* Perf driver registration */
ccn->dt.pmu = (struct pmu) {
.module = THIS_MODULE,
+ .parent = ccn->dev,
.attr_groups = arm_ccn_pmu_attr_groups,
.task_ctx_nr = perf_invalid_context,
.event_init = arm_ccn_pmu_event_init,
@@ -1281,9 +1273,8 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
/* No overflow interrupt? Have to use a timer instead. */
if (!ccn->irq) {
dev_info(ccn->dev, "No access to interrupts, using timer.\n");
- hrtimer_init(&ccn->dt.hrtimer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- ccn->dt.hrtimer.function = arm_ccn_pmu_timer_handler;
+ hrtimer_setup(&ccn->dt.hrtimer, arm_ccn_pmu_timer_handler, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
/* Pick one CPU which we will use to collect data from CCN... */
@@ -1515,13 +1506,11 @@ static int arm_ccn_probe(struct platform_device *pdev)
return arm_ccn_pmu_init(ccn);
}
-static int arm_ccn_remove(struct platform_device *pdev)
+static void arm_ccn_remove(struct platform_device *pdev)
{
struct arm_ccn *ccn = platform_get_drvdata(pdev);
arm_ccn_pmu_cleanup(ccn);
-
- return 0;
}
static const struct of_device_id arm_ccn_match[] = {
@@ -1571,4 +1560,5 @@ module_init(arm_ccn_init);
module_exit(arm_ccn_exit);
MODULE_AUTHOR("Pawel Moll <pawel.moll@arm.com>");
+MODULE_DESCRIPTION("ARM CCN (Cache Coherent Network) Performance Monitor Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
index 913dc04b3a40..23245352a3fc 100644
--- a/drivers/perf/arm-cmn.c
+++ b/drivers/perf/arm-cmn.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2016-2020 Arm Limited
-// CMN-600 Coherent Mesh Network PMU driver
+// ARM CMN/CI interconnect PMU driver
#include <linux/acpi.h>
#include <linux/bitfield.h>
@@ -24,14 +24,6 @@
#define CMN_NI_NODE_ID GENMASK_ULL(31, 16)
#define CMN_NI_LOGICAL_ID GENMASK_ULL(47, 32)
-#define CMN_NODEID_DEVID(reg) ((reg) & 3)
-#define CMN_NODEID_EXT_DEVID(reg) ((reg) & 1)
-#define CMN_NODEID_PID(reg) (((reg) >> 2) & 1)
-#define CMN_NODEID_EXT_PID(reg) (((reg) >> 1) & 3)
-#define CMN_NODEID_1x1_PID(reg) (((reg) >> 2) & 7)
-#define CMN_NODEID_X(reg, bits) ((reg) >> (3 + (bits)))
-#define CMN_NODEID_Y(reg, bits) (((reg) >> 3) & ((1U << (bits)) - 1))
-
#define CMN_CHILD_INFO 0x0080
#define CMN_CI_CHILD_COUNT GENMASK_ULL(15, 0)
#define CMN_CI_CHILD_PTR_OFFSET GENMASK_ULL(31, 16)
@@ -43,6 +35,9 @@
#define CMN_MAX_XPS (CMN_MAX_DIMENSION * CMN_MAX_DIMENSION)
#define CMN_MAX_DTMS (CMN_MAX_XPS + (CMN_MAX_DIMENSION - 1) * 4)
+/* Currently XPs are the node type we can have most of; others top out at 128 */
+#define CMN_MAX_NODES_PER_EVENT CMN_MAX_XPS
+
/* The CFG node has various info besides the discovery tree */
#define CMN_CFGM_PERIPH_ID_01 0x0008
#define CMN_CFGM_PID0_PART_0 GENMASK_ULL(7, 0)
@@ -50,24 +45,28 @@
#define CMN_CFGM_PERIPH_ID_23 0x0010
#define CMN_CFGM_PID2_REVISION GENMASK_ULL(7, 4)
-#define CMN_CFGM_INFO_GLOBAL 0x900
+#define CMN_CFGM_INFO_GLOBAL 0x0900
#define CMN_INFO_MULTIPLE_DTM_EN BIT_ULL(63)
#define CMN_INFO_RSP_VC_NUM GENMASK_ULL(53, 52)
#define CMN_INFO_DAT_VC_NUM GENMASK_ULL(51, 50)
+#define CMN_INFO_DEVICE_ISO_ENABLE BIT_ULL(44)
-#define CMN_CFGM_INFO_GLOBAL_1 0x908
+#define CMN_CFGM_INFO_GLOBAL_1 0x0908
#define CMN_INFO_SNP_VC_NUM GENMASK_ULL(3, 2)
#define CMN_INFO_REQ_VC_NUM GENMASK_ULL(1, 0)
/* XPs also have some local topology info which has uses too */
#define CMN_MXP__CONNECT_INFO(p) (0x0008 + 8 * (p))
-#define CMN__CONNECT_INFO_DEVICE_TYPE GENMASK_ULL(4, 0)
+#define CMN__CONNECT_INFO_DEVICE_TYPE GENMASK_ULL(5, 0)
#define CMN_MAX_PORTS 6
#define CI700_CONNECT_INFO_P2_5_OFFSET 0x10
/* PMU registers occupy the 3rd 4KB page of each node's region */
#define CMN_PMU_OFFSET 0x2000
+/* ...except when they don't :( */
+#define CMN_S3_R1_DTM_OFFSET 0xa000
+#define CMN_S3_PMU_OFFSET 0xd900
/* For most nodes, this is all there is */
#define CMN_PMU_EVENT_SEL 0x000
@@ -78,7 +77,8 @@
/* Technically this is 4 bits wide on DNs, but we only use 2 there anyway */
#define CMN__PMU_OCCUP1_ID GENMASK_ULL(34, 32)
-/* HN-Ps are weird... */
+/* Some types are designed to coexist with another device in the same node */
+#define CMN_CCLA_PMU_EVENT_SEL 0x008
#define CMN_HNP_PMU_EVENT_SEL 0x008
/* DTMs live in the PMU space of XP registers */
@@ -112,7 +112,9 @@
#define CMN_DTM_PMEVCNTSR 0x240
-#define CMN_DTM_UNIT_INFO 0x0910
+#define CMN650_DTM_UNIT_INFO 0x0910
+#define CMN_DTM_UNIT_INFO 0x0960
+#define CMN_DTM_UNIT_INFO_DTC_DOMAIN GENMASK_ULL(1, 0)
#define CMN_DTM_NUM_COUNTERS 4
/* Want more local counters? Why not replicate the whole DTM! Ugh... */
@@ -121,27 +123,28 @@
/* The DTC node is where the magic happens */
#define CMN_DT_DTC_CTL 0x0a00
#define CMN_DT_DTC_CTL_DT_EN BIT(0)
+#define CMN_DT_DTC_CTL_CG_DISABLE BIT(10)
/* DTC counters are paired in 64-bit registers on a 16-byte stride. Yuck */
#define _CMN_DT_CNT_REG(n) ((((n) / 2) * 4 + (n) % 2) * 4)
-#define CMN_DT_PMEVCNT(n) (CMN_PMU_OFFSET + _CMN_DT_CNT_REG(n))
-#define CMN_DT_PMCCNTR (CMN_PMU_OFFSET + 0x40)
+#define CMN_DT_PMEVCNT(dtc, n) ((dtc)->pmu_base + _CMN_DT_CNT_REG(n))
+#define CMN_DT_PMCCNTR(dtc) ((dtc)->pmu_base + 0x40)
-#define CMN_DT_PMEVCNTSR(n) (CMN_PMU_OFFSET + 0x50 + _CMN_DT_CNT_REG(n))
-#define CMN_DT_PMCCNTRSR (CMN_PMU_OFFSET + 0x90)
+#define CMN_DT_PMEVCNTSR(dtc, n) ((dtc)->pmu_base + 0x50 + _CMN_DT_CNT_REG(n))
+#define CMN_DT_PMCCNTRSR(dtc) ((dtc)->pmu_base + 0x90)
-#define CMN_DT_PMCR (CMN_PMU_OFFSET + 0x100)
+#define CMN_DT_PMCR(dtc) ((dtc)->pmu_base + 0x100)
#define CMN_DT_PMCR_PMU_EN BIT(0)
#define CMN_DT_PMCR_CNTR_RST BIT(5)
#define CMN_DT_PMCR_OVFL_INTR_EN BIT(6)
-#define CMN_DT_PMOVSR (CMN_PMU_OFFSET + 0x118)
-#define CMN_DT_PMOVSR_CLR (CMN_PMU_OFFSET + 0x120)
+#define CMN_DT_PMOVSR(dtc) ((dtc)->pmu_base + 0x118)
+#define CMN_DT_PMOVSR_CLR(dtc) ((dtc)->pmu_base + 0x120)
-#define CMN_DT_PMSSR (CMN_PMU_OFFSET + 0x128)
+#define CMN_DT_PMSSR(dtc) ((dtc)->pmu_base + 0x128)
#define CMN_DT_PMSSR_SS_STATUS(n) BIT(n)
-#define CMN_DT_PMSRR (CMN_PMU_OFFSET + 0x130)
+#define CMN_DT_PMSRR(dtc) ((dtc)->pmu_base + 0x130)
#define CMN_DT_PMSRR_SS_REQ BIT(0)
#define CMN_DT_NUM_COUNTERS 8
@@ -172,9 +175,8 @@
#define CMN_CONFIG_WP_COMBINE GENMASK_ULL(30, 27)
#define CMN_CONFIG_WP_DEV_SEL GENMASK_ULL(50, 48)
#define CMN_CONFIG_WP_CHN_SEL GENMASK_ULL(55, 51)
-/* Note that we don't yet support the tertiary match group on newer IPs */
-#define CMN_CONFIG_WP_GRP BIT_ULL(56)
-#define CMN_CONFIG_WP_EXCLUSIVE BIT_ULL(57)
+#define CMN_CONFIG_WP_GRP GENMASK_ULL(57, 56)
+#define CMN_CONFIG_WP_EXCLUSIVE BIT_ULL(58)
#define CMN_CONFIG1_WP_VAL GENMASK_ULL(63, 0)
#define CMN_CONFIG2_WP_MASK GENMASK_ULL(63, 0)
@@ -197,10 +199,11 @@ enum cmn_model {
CMN650 = 2,
CMN700 = 4,
CI700 = 8,
+ CMNS3 = 16,
/* ...and then we can use bitmap tricks for commonality */
CMN_ANY = -1,
NOT_CMN600 = -2,
- CMN_650ON = CMN650 | CMN700,
+ CMN_650ON = CMN650 | CMN700 | CMNS3,
};
/* Actual part numbers and revision IDs defined by the hardware */
@@ -209,6 +212,7 @@ enum cmn_part {
PART_CMN650 = 0x436,
PART_CMN700 = 0x43c,
PART_CI700 = 0x43a,
+ PART_CMN_S3 = 0x43e,
};
/* CMN-600 r0px shouldn't exist in silicon, thankfully */
@@ -229,6 +233,9 @@ enum cmn_revision {
REV_CMN700_R1P0,
REV_CMN700_R2P0,
REV_CMN700_R3P0,
+ REV_CMNS3_R0P0 = 0,
+ REV_CMNS3_R0P1,
+ REV_CMNS3_R1P0,
REV_CI700_R0P0 = 0,
REV_CI700_R1P0,
REV_CI700_R2P0,
@@ -260,6 +267,7 @@ enum cmn_node_type {
CMN_TYPE_HNS = 0x200,
CMN_TYPE_HNS_MPAM_S,
CMN_TYPE_HNS_MPAM_NS,
+ CMN_TYPE_APB = 0x1000,
/* Not a real node type */
CMN_TYPE_WP = 0x7770
};
@@ -279,16 +287,16 @@ struct arm_cmn_node {
u16 id, logid;
enum cmn_node_type type;
- int dtm;
- union {
- /* DN/HN-F/CXHA */
- struct {
- u8 val : 4;
- u8 count : 4;
- } occupid[SEL_MAX];
- /* XP */
- u8 dtc;
- };
+ /* XP properties really, but replicated to children for convenience */
+ u8 dtm;
+ s8 dtc;
+ u8 portid_bits:4;
+ u8 deviceid_bits:4;
+ /* DN/HN-F/CXHA */
+ struct {
+ u8 val : 4;
+ u8 count : 4;
+ } occupid[SEL_MAX];
union {
u8 event[4];
__le32 event_sel;
@@ -309,8 +317,9 @@ struct arm_cmn_dtm {
struct arm_cmn_dtc {
void __iomem *base;
+ void __iomem *pmu_base;
int irq;
- int irq_friend;
+ s8 irq_friend;
bool cc_active;
struct perf_event *counters[CMN_DT_NUM_COUNTERS];
@@ -359,49 +368,33 @@ struct arm_cmn {
static int arm_cmn_hp_state;
struct arm_cmn_nodeid {
- u8 x;
- u8 y;
u8 port;
u8 dev;
};
static int arm_cmn_xyidbits(const struct arm_cmn *cmn)
{
- return fls((cmn->mesh_x - 1) | (cmn->mesh_y - 1) | 2);
+ return fls((cmn->mesh_x - 1) | (cmn->mesh_y - 1));
}
-static struct arm_cmn_nodeid arm_cmn_nid(const struct arm_cmn *cmn, u16 id)
+static struct arm_cmn_nodeid arm_cmn_nid(const struct arm_cmn_node *dn)
{
struct arm_cmn_nodeid nid;
- if (cmn->num_xps == 1) {
- nid.x = 0;
- nid.y = 0;
- nid.port = CMN_NODEID_1x1_PID(id);
- nid.dev = CMN_NODEID_DEVID(id);
- } else {
- int bits = arm_cmn_xyidbits(cmn);
-
- nid.x = CMN_NODEID_X(id, bits);
- nid.y = CMN_NODEID_Y(id, bits);
- if (cmn->ports_used & 0xc) {
- nid.port = CMN_NODEID_EXT_PID(id);
- nid.dev = CMN_NODEID_EXT_DEVID(id);
- } else {
- nid.port = CMN_NODEID_PID(id);
- nid.dev = CMN_NODEID_DEVID(id);
- }
- }
+ nid.dev = dn->id & ((1U << dn->deviceid_bits) - 1);
+ nid.port = (dn->id >> dn->deviceid_bits) & ((1U << dn->portid_bits) - 1);
return nid;
}
static struct arm_cmn_node *arm_cmn_node_to_xp(const struct arm_cmn *cmn,
const struct arm_cmn_node *dn)
{
- struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id);
- int xp_idx = cmn->mesh_x * nid.y + nid.x;
+ int id = dn->id >> (dn->portid_bits + dn->deviceid_bits);
+ int bits = arm_cmn_xyidbits(cmn);
+ int x = id >> bits;
+ int y = id & ((1U << bits) - 1);
- return cmn->xps + xp_idx;
+ return cmn->xps + cmn->mesh_x * y + x;
}
static struct arm_cmn_node *arm_cmn_node(const struct arm_cmn *cmn,
enum cmn_node_type type)
@@ -425,15 +418,27 @@ static enum cmn_model arm_cmn_model(const struct arm_cmn *cmn)
return CMN700;
case PART_CI700:
return CI700;
+ case PART_CMN_S3:
+ return CMNS3;
default:
return 0;
};
}
+static int arm_cmn_pmu_offset(const struct arm_cmn *cmn, const struct arm_cmn_node *dn)
+{
+ if (cmn->part == PART_CMN_S3) {
+ if (cmn->rev >= REV_CMNS3_R1P0 && dn->type == CMN_TYPE_XP)
+ return CMN_S3_R1_DTM_OFFSET;
+ return CMN_S3_PMU_OFFSET;
+ }
+ return CMN_PMU_OFFSET;
+}
+
static u32 arm_cmn_device_connect_info(const struct arm_cmn *cmn,
const struct arm_cmn_node *xp, int port)
{
- int offset = CMN_MXP__CONNECT_INFO(port);
+ int offset = CMN_MXP__CONNECT_INFO(port) - arm_cmn_pmu_offset(cmn, xp);
if (port >= 2) {
if (cmn->part == PART_CMN600 || cmn->part == PART_CMN650)
@@ -446,7 +451,7 @@ static u32 arm_cmn_device_connect_info(const struct arm_cmn *cmn,
offset += CI700_CONNECT_INFO_P2_5_OFFSET;
}
- return readl_relaxed(xp->pmu_base - CMN_PMU_OFFSET + offset);
+ return readl_relaxed(xp->pmu_base + offset);
}
static struct dentry *arm_cmn_debugfs;
@@ -480,20 +485,26 @@ static const char *arm_cmn_device_type(u8 type)
case 0x17: return "RN-F_C_E|";
case 0x18: return " RN-F_E |";
case 0x19: return "RN-F_E_E|";
+ case 0x1a: return " HN-S |";
+ case 0x1b: return " LCN |";
case 0x1c: return " MTSX |";
case 0x1d: return " HN-V |";
case 0x1e: return " CCG |";
+ case 0x20: return " RN-F_F |";
+ case 0x21: return "RN-F_F_E|";
+ case 0x22: return " SN-F_F |";
default: return " ???? |";
}
}
-static void arm_cmn_show_logid(struct seq_file *s, int x, int y, int p, int d)
+static void arm_cmn_show_logid(struct seq_file *s, const struct arm_cmn_node *xp, int p, int d)
{
struct arm_cmn *cmn = s->private;
struct arm_cmn_node *dn;
+ u16 id = xp->id | d | (p << xp->deviceid_bits);
for (dn = cmn->dns; dn->type; dn++) {
- struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id);
+ int pad = dn->logid < 10;
if (dn->type == CMN_TYPE_XP)
continue;
@@ -501,10 +512,10 @@ static void arm_cmn_show_logid(struct seq_file *s, int x, int y, int p, int d)
if (dn->type < CMN_TYPE_HNI)
continue;
- if (nid.x != x || nid.y != y || nid.port != p || nid.dev != d)
+ if (dn->id != id)
continue;
- seq_printf(s, " #%-2d |", dn->logid);
+ seq_printf(s, " %*c#%-*d |", pad + 1, ' ', 3 - pad, dn->logid);
return;
}
seq_puts(s, " |");
@@ -517,33 +528,32 @@ static int arm_cmn_map_show(struct seq_file *s, void *data)
seq_puts(s, " X");
for (x = 0; x < cmn->mesh_x; x++)
- seq_printf(s, " %d ", x);
+ seq_printf(s, " %-2d ", x);
seq_puts(s, "\nY P D+");
y = cmn->mesh_y;
while (y--) {
int xp_base = cmn->mesh_x * y;
+ struct arm_cmn_node *xp = cmn->xps + xp_base;
u8 port[CMN_MAX_PORTS][CMN_MAX_DIMENSION];
for (x = 0; x < cmn->mesh_x; x++)
seq_puts(s, "--------+");
- seq_printf(s, "\n%d |", y);
+ seq_printf(s, "\n%-2d |", y);
for (x = 0; x < cmn->mesh_x; x++) {
- struct arm_cmn_node *xp = cmn->xps + xp_base + x;
-
for (p = 0; p < CMN_MAX_PORTS; p++)
- port[p][x] = arm_cmn_device_connect_info(cmn, xp, p);
- seq_printf(s, " XP #%-2d |", xp_base + x);
+ port[p][x] = arm_cmn_device_connect_info(cmn, xp + x, p);
+ seq_printf(s, " XP #%-3d|", xp_base + x);
}
seq_puts(s, "\n |");
for (x = 0; x < cmn->mesh_x; x++) {
- u8 dtc = cmn->xps[xp_base + x].dtc;
+ s8 dtc = xp[x].dtc;
- if (dtc & (dtc - 1))
+ if (dtc < 0)
seq_puts(s, " DTC ?? |");
else
- seq_printf(s, " DTC %ld |", __ffs(dtc));
+ seq_printf(s, " DTC %d |", dtc);
}
seq_puts(s, "\n |");
for (x = 0; x < cmn->mesh_x; x++)
@@ -555,10 +565,10 @@ static int arm_cmn_map_show(struct seq_file *s, void *data)
seq_puts(s, arm_cmn_device_type(port[p][x]));
seq_puts(s, "\n 0|");
for (x = 0; x < cmn->mesh_x; x++)
- arm_cmn_show_logid(s, x, y, p, 0);
+ arm_cmn_show_logid(s, xp + x, p, 0);
seq_puts(s, "\n 1|");
for (x = 0; x < cmn->mesh_x; x++)
- arm_cmn_show_logid(s, x, y, p, 1);
+ arm_cmn_show_logid(s, xp + x, p, 1);
}
seq_puts(s, "\n-----+");
}
@@ -586,21 +596,31 @@ static void arm_cmn_debugfs_init(struct arm_cmn *cmn, int id) {}
struct arm_cmn_hw_event {
struct arm_cmn_node *dn;
- u64 dtm_idx[4];
- unsigned int dtc_idx;
- u8 dtcs_used;
+ u64 dtm_idx[DIV_ROUND_UP(CMN_MAX_NODES_PER_EVENT * 2, 64)];
+ s8 dtc_idx[CMN_MAX_DTCS];
u8 num_dns;
u8 dtm_offset;
+
+ /*
+ * WP config registers are divided to UP and DOWN events. We need to
+ * keep to track only one of them.
+ */
+ DECLARE_BITMAP(wp_idx, CMN_MAX_XPS);
+
bool wide_sel;
enum cmn_filter_select filter_sel;
};
+static_assert(sizeof(struct arm_cmn_hw_event) <= offsetof(struct hw_perf_event, target));
#define for_each_hw_dn(hw, dn, i) \
for (i = 0, dn = hw->dn; i < hw->num_dns; i++, dn++)
+/* @i is the DTC number, @idx is the counter index on that DTC */
+#define for_each_hw_dtc_idx(hw, i, idx) \
+ for (int i = 0, idx; i < CMN_MAX_DTCS; i++) if ((idx = hw->dtc_idx[i]) >= 0)
+
static struct arm_cmn_hw_event *to_cmn_hw(struct perf_event *event)
{
- BUILD_BUG_ON(sizeof(struct arm_cmn_hw_event) > offsetof(struct hw_perf_event, target));
return (struct arm_cmn_hw_event *)&event->hw;
}
@@ -614,6 +634,17 @@ static unsigned int arm_cmn_get_index(u64 x[], unsigned int pos)
return (x[pos / 32] >> ((pos % 32) * 2)) & 3;
}
+static void arm_cmn_set_wp_idx(unsigned long *wp_idx, unsigned int pos, bool val)
+{
+ if (val)
+ set_bit(pos, wp_idx);
+}
+
+static unsigned int arm_cmn_get_wp_idx(unsigned long *wp_idx, unsigned int pos)
+{
+ return test_bit(pos, wp_idx);
+}
+
struct arm_cmn_event_attr {
struct device_attribute attr;
enum cmn_model model;
@@ -699,8 +730,8 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
if ((chan == 5 && cmn->rsp_vc_num < 2) ||
(chan == 6 && cmn->dat_vc_num < 2) ||
- (chan == 7 && cmn->snp_vc_num < 2) ||
- (chan == 8 && cmn->req_vc_num < 2))
+ (chan == 7 && cmn->req_vc_num < 2) ||
+ (chan == 8 && cmn->snp_vc_num < 2))
return 0;
}
@@ -770,12 +801,10 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
CMN_EVENT_ATTR(CMN_ANY, cxha_##_name, CMN_TYPE_CXHA, _event)
#define CMN_EVENT_CCRA(_name, _event) \
CMN_EVENT_ATTR(CMN_ANY, ccra_##_name, CMN_TYPE_CCRA, _event)
-#define CMN_EVENT_CCHA(_name, _event) \
- CMN_EVENT_ATTR(CMN_ANY, ccha_##_name, CMN_TYPE_CCHA, _event)
+#define CMN_EVENT_CCHA(_model, _name, _event) \
+ CMN_EVENT_ATTR(_model, ccha_##_name, CMN_TYPE_CCHA, _event)
#define CMN_EVENT_CCLA(_name, _event) \
CMN_EVENT_ATTR(CMN_ANY, ccla_##_name, CMN_TYPE_CCLA, _event)
-#define CMN_EVENT_CCLA_RNI(_name, _event) \
- CMN_EVENT_ATTR(CMN_ANY, ccla_rni_##_name, CMN_TYPE_CCLA_RNI, _event)
#define CMN_EVENT_HNS(_name, _event) \
CMN_EVENT_ATTR(CMN_ANY, hns_##_name, CMN_TYPE_HNS, _event)
@@ -809,7 +838,7 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
#define CMN_EVENT_HNF_OCC(_model, _name, _event) \
CMN_EVENT_HN_OCC(_model, hnf_##_name, CMN_TYPE_HNF, _event)
#define CMN_EVENT_HNF_CLS(_model, _name, _event) \
- CMN_EVENT_HN_CLS(_model, hnf_##_name, CMN_TYPE_HNS, _event)
+ CMN_EVENT_HN_CLS(_model, hnf_##_name, CMN_TYPE_HNF, _event)
#define CMN_EVENT_HNF_SNT(_model, _name, _event) \
CMN_EVENT_HN_SNT(_model, hnf_##_name, CMN_TYPE_HNF, _event)
@@ -856,8 +885,8 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
_CMN_EVENT_XP(pub_##_name, (_event) | (4 << 5)), \
_CMN_EVENT_XP(rsp2_##_name, (_event) | (5 << 5)), \
_CMN_EVENT_XP(dat2_##_name, (_event) | (6 << 5)), \
- _CMN_EVENT_XP(snp2_##_name, (_event) | (7 << 5)), \
- _CMN_EVENT_XP(req2_##_name, (_event) | (8 << 5))
+ _CMN_EVENT_XP(req2_##_name, (_event) | (7 << 5)), \
+ _CMN_EVENT_XP(snp2_##_name, (_event) | (8 << 5))
#define CMN_EVENT_XP_DAT(_name, _event) \
_CMN_EVENT_XP_PORT(dat_##_name, (_event) | (3 << 5)), \
@@ -1129,42 +1158,43 @@ static struct attribute *arm_cmn_event_attrs[] = {
CMN_EVENT_CCRA(wdb_alloc, 0x59),
CMN_EVENT_CCRA(ssb_alloc, 0x5a),
- CMN_EVENT_CCHA(rddatbyp, 0x61),
- CMN_EVENT_CCHA(chirsp_up_stall, 0x62),
- CMN_EVENT_CCHA(chidat_up_stall, 0x63),
- CMN_EVENT_CCHA(snppcrd_link0_stall, 0x64),
- CMN_EVENT_CCHA(snppcrd_link1_stall, 0x65),
- CMN_EVENT_CCHA(snppcrd_link2_stall, 0x66),
- CMN_EVENT_CCHA(reqtrk_occ, 0x67),
- CMN_EVENT_CCHA(rdb_occ, 0x68),
- CMN_EVENT_CCHA(rdbyp_occ, 0x69),
- CMN_EVENT_CCHA(wdb_occ, 0x6a),
- CMN_EVENT_CCHA(snptrk_occ, 0x6b),
- CMN_EVENT_CCHA(sdb_occ, 0x6c),
- CMN_EVENT_CCHA(snphaz_occ, 0x6d),
- CMN_EVENT_CCHA(reqtrk_alloc, 0x6e),
- CMN_EVENT_CCHA(rdb_alloc, 0x6f),
- CMN_EVENT_CCHA(rdbyp_alloc, 0x70),
- CMN_EVENT_CCHA(wdb_alloc, 0x71),
- CMN_EVENT_CCHA(snptrk_alloc, 0x72),
- CMN_EVENT_CCHA(sdb_alloc, 0x73),
- CMN_EVENT_CCHA(snphaz_alloc, 0x74),
- CMN_EVENT_CCHA(pb_rhu_req_occ, 0x75),
- CMN_EVENT_CCHA(pb_rhu_req_alloc, 0x76),
- CMN_EVENT_CCHA(pb_rhu_pcie_req_occ, 0x77),
- CMN_EVENT_CCHA(pb_rhu_pcie_req_alloc, 0x78),
- CMN_EVENT_CCHA(pb_pcie_wr_req_occ, 0x79),
- CMN_EVENT_CCHA(pb_pcie_wr_req_alloc, 0x7a),
- CMN_EVENT_CCHA(pb_pcie_reg_req_occ, 0x7b),
- CMN_EVENT_CCHA(pb_pcie_reg_req_alloc, 0x7c),
- CMN_EVENT_CCHA(pb_pcie_rsvd_req_occ, 0x7d),
- CMN_EVENT_CCHA(pb_pcie_rsvd_req_alloc, 0x7e),
- CMN_EVENT_CCHA(pb_rhu_dat_occ, 0x7f),
- CMN_EVENT_CCHA(pb_rhu_dat_alloc, 0x80),
- CMN_EVENT_CCHA(pb_rhu_pcie_dat_occ, 0x81),
- CMN_EVENT_CCHA(pb_rhu_pcie_dat_alloc, 0x82),
- CMN_EVENT_CCHA(pb_pcie_wr_dat_occ, 0x83),
- CMN_EVENT_CCHA(pb_pcie_wr_dat_alloc, 0x84),
+ CMN_EVENT_CCHA(CMN_ANY, rddatbyp, 0x61),
+ CMN_EVENT_CCHA(CMN_ANY, chirsp_up_stall, 0x62),
+ CMN_EVENT_CCHA(CMN_ANY, chidat_up_stall, 0x63),
+ CMN_EVENT_CCHA(CMN_ANY, snppcrd_link0_stall, 0x64),
+ CMN_EVENT_CCHA(CMN_ANY, snppcrd_link1_stall, 0x65),
+ CMN_EVENT_CCHA(CMN_ANY, snppcrd_link2_stall, 0x66),
+ CMN_EVENT_CCHA(CMN_ANY, reqtrk_occ, 0x67),
+ CMN_EVENT_CCHA(CMN_ANY, rdb_occ, 0x68),
+ CMN_EVENT_CCHA(CMN_ANY, rdbyp_occ, 0x69),
+ CMN_EVENT_CCHA(CMN_ANY, wdb_occ, 0x6a),
+ CMN_EVENT_CCHA(CMN_ANY, snptrk_occ, 0x6b),
+ CMN_EVENT_CCHA(CMN_ANY, sdb_occ, 0x6c),
+ CMN_EVENT_CCHA(CMN_ANY, snphaz_occ, 0x6d),
+ CMN_EVENT_CCHA(CMN_ANY, reqtrk_alloc, 0x6e),
+ CMN_EVENT_CCHA(CMN_ANY, rdb_alloc, 0x6f),
+ CMN_EVENT_CCHA(CMN_ANY, rdbyp_alloc, 0x70),
+ CMN_EVENT_CCHA(CMN_ANY, wdb_alloc, 0x71),
+ CMN_EVENT_CCHA(CMN_ANY, snptrk_alloc, 0x72),
+ CMN_EVENT_CCHA(CMN_ANY, db_alloc, 0x73),
+ CMN_EVENT_CCHA(CMN_ANY, snphaz_alloc, 0x74),
+ CMN_EVENT_CCHA(CMN_ANY, pb_rhu_req_occ, 0x75),
+ CMN_EVENT_CCHA(CMN_ANY, pb_rhu_req_alloc, 0x76),
+ CMN_EVENT_CCHA(CMN_ANY, pb_rhu_pcie_req_occ, 0x77),
+ CMN_EVENT_CCHA(CMN_ANY, pb_rhu_pcie_req_alloc, 0x78),
+ CMN_EVENT_CCHA(CMN_ANY, pb_pcie_wr_req_occ, 0x79),
+ CMN_EVENT_CCHA(CMN_ANY, pb_pcie_wr_req_alloc, 0x7a),
+ CMN_EVENT_CCHA(CMN_ANY, pb_pcie_reg_req_occ, 0x7b),
+ CMN_EVENT_CCHA(CMN_ANY, pb_pcie_reg_req_alloc, 0x7c),
+ CMN_EVENT_CCHA(CMN_ANY, pb_pcie_rsvd_req_occ, 0x7d),
+ CMN_EVENT_CCHA(CMN_ANY, pb_pcie_rsvd_req_alloc, 0x7e),
+ CMN_EVENT_CCHA(CMN_ANY, pb_rhu_dat_occ, 0x7f),
+ CMN_EVENT_CCHA(CMN_ANY, pb_rhu_dat_alloc, 0x80),
+ CMN_EVENT_CCHA(CMN_ANY, pb_rhu_pcie_dat_occ, 0x81),
+ CMN_EVENT_CCHA(CMN_ANY, pb_rhu_pcie_dat_alloc, 0x82),
+ CMN_EVENT_CCHA(CMN_ANY, pb_pcie_wr_dat_occ, 0x83),
+ CMN_EVENT_CCHA(CMN_ANY, pb_pcie_wr_dat_alloc, 0x84),
+ CMN_EVENT_CCHA(CMNS3, chirsp1_up_stall, 0x85),
CMN_EVENT_CCLA(rx_cxs, 0x21),
CMN_EVENT_CCLA(tx_cxs, 0x22),
@@ -1251,15 +1281,11 @@ static ssize_t arm_cmn_format_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct arm_cmn_format_attr *fmt = container_of(attr, typeof(*fmt), attr);
- int lo = __ffs(fmt->field), hi = __fls(fmt->field);
-
- if (lo == hi)
- return sysfs_emit(buf, "config:%d\n", lo);
if (!fmt->config)
- return sysfs_emit(buf, "config:%d-%d\n", lo, hi);
+ return sysfs_emit(buf, "config:%*pbl\n", 64, &fmt->field);
- return sysfs_emit(buf, "config%d:%d-%d\n", fmt->config, lo, hi);
+ return sysfs_emit(buf, "config%d:%*pbl\n", fmt->config, 64, &fmt->field);
}
#define _CMN_FORMAT_ATTR(_name, _cfg, _fld) \
@@ -1333,12 +1359,37 @@ static const struct attribute_group *arm_cmn_attr_groups[] = {
NULL
};
-static int arm_cmn_wp_idx(struct perf_event *event)
+static int arm_cmn_find_free_wp_idx(struct arm_cmn_dtm *dtm,
+ struct perf_event *event)
{
- return CMN_EVENT_EVENTID(event) + CMN_EVENT_WP_GRP(event);
+ int wp_idx = CMN_EVENT_EVENTID(event);
+
+ if (dtm->wp_event[wp_idx] >= 0)
+ if (dtm->wp_event[++wp_idx] >= 0)
+ return -ENOSPC;
+
+ return wp_idx;
+}
+
+static int arm_cmn_get_assigned_wp_idx(struct perf_event *event,
+ struct arm_cmn_hw_event *hw,
+ unsigned int pos)
+{
+ return CMN_EVENT_EVENTID(event) + arm_cmn_get_wp_idx(hw->wp_idx, pos);
+}
+
+static void arm_cmn_claim_wp_idx(struct arm_cmn_dtm *dtm,
+ struct perf_event *event,
+ unsigned int dtc, int wp_idx,
+ unsigned int pos)
+{
+ struct arm_cmn_hw_event *hw = to_cmn_hw(event);
+
+ dtm->wp_event[wp_idx] = hw->dtc_idx[dtc];
+ arm_cmn_set_wp_idx(hw->wp_idx, pos, wp_idx - CMN_EVENT_EVENTID(event));
}
-static u32 arm_cmn_wp_config(struct perf_event *event)
+static u32 arm_cmn_wp_config(struct perf_event *event, int wp_idx)
{
u32 config;
u32 dev = CMN_EVENT_WP_DEV_SEL(event);
@@ -1348,6 +1399,10 @@ static u32 arm_cmn_wp_config(struct perf_event *event)
u32 combine = CMN_EVENT_WP_COMBINE(event);
bool is_cmn600 = to_cmn(event->pmu)->part == PART_CMN600;
+ /* CMN-600 supports only primary and secondary matching groups */
+ if (is_cmn600)
+ grp &= 1;
+
config = FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_DEV_SEL, dev) |
FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_CHN_SEL, chn) |
FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_GRP, grp) |
@@ -1355,7 +1410,9 @@ static u32 arm_cmn_wp_config(struct perf_event *event)
if (exc)
config |= is_cmn600 ? CMN600_WPn_CONFIG_WP_EXCLUSIVE :
CMN_DTM_WPn_CONFIG_WP_EXCLUSIVE;
- if (combine && !grp)
+
+ /* wp_combine is available only on WP0 and WP2 */
+ if (combine && !(wp_idx & 0x1))
config |= is_cmn600 ? CMN600_WPn_CONFIG_WP_COMBINE :
CMN_DTM_WPn_CONFIG_WP_COMBINE;
return config;
@@ -1364,7 +1421,7 @@ static u32 arm_cmn_wp_config(struct perf_event *event)
static void arm_cmn_set_state(struct arm_cmn *cmn, u32 state)
{
if (!cmn->state)
- writel_relaxed(0, cmn->dtc[0].base + CMN_DT_PMCR);
+ writel_relaxed(0, CMN_DT_PMCR(&cmn->dtc[0]));
cmn->state |= state;
}
@@ -1373,7 +1430,7 @@ static void arm_cmn_clear_state(struct arm_cmn *cmn, u32 state)
cmn->state &= ~state;
if (!cmn->state)
writel_relaxed(CMN_DT_PMCR_PMU_EN | CMN_DT_PMCR_OVFL_INTR_EN,
- cmn->dtc[0].base + CMN_DT_PMCR);
+ CMN_DT_PMCR(&cmn->dtc[0]));
}
static void arm_cmn_pmu_enable(struct pmu *pmu)
@@ -1408,18 +1465,19 @@ static u64 arm_cmn_read_dtm(struct arm_cmn *cmn, struct arm_cmn_hw_event *hw,
static u64 arm_cmn_read_cc(struct arm_cmn_dtc *dtc)
{
- u64 val = readq_relaxed(dtc->base + CMN_DT_PMCCNTR);
+ void __iomem *pmccntr = CMN_DT_PMCCNTR(dtc);
+ u64 val = readq_relaxed(pmccntr);
- writeq_relaxed(CMN_CC_INIT, dtc->base + CMN_DT_PMCCNTR);
+ writeq_relaxed(CMN_CC_INIT, pmccntr);
return (val - CMN_CC_INIT) & ((CMN_CC_INIT << 1) - 1);
}
static u32 arm_cmn_read_counter(struct arm_cmn_dtc *dtc, int idx)
{
- u32 val, pmevcnt = CMN_DT_PMEVCNT(idx);
+ void __iomem *pmevcnt = CMN_DT_PMEVCNT(dtc, idx);
+ u32 val = readl_relaxed(pmevcnt);
- val = readl_relaxed(dtc->base + pmevcnt);
- writel_relaxed(CMN_COUNTER_INIT, dtc->base + pmevcnt);
+ writel_relaxed(CMN_COUNTER_INIT, pmevcnt);
return val - CMN_COUNTER_INIT;
}
@@ -1427,12 +1485,11 @@ static void arm_cmn_init_counter(struct perf_event *event)
{
struct arm_cmn *cmn = to_cmn(event->pmu);
struct arm_cmn_hw_event *hw = to_cmn_hw(event);
- unsigned int i, pmevcnt = CMN_DT_PMEVCNT(hw->dtc_idx);
u64 count;
- for (i = 0; hw->dtcs_used & (1U << i); i++) {
- writel_relaxed(CMN_COUNTER_INIT, cmn->dtc[i].base + pmevcnt);
- cmn->dtc[i].counters[hw->dtc_idx] = event;
+ for_each_hw_dtc_idx(hw, i, idx) {
+ writel_relaxed(CMN_COUNTER_INIT, CMN_DT_PMEVCNT(&cmn->dtc[i], idx));
+ cmn->dtc[i].counters[idx] = event;
}
count = arm_cmn_read_dtm(cmn, hw, false);
@@ -1445,11 +1502,9 @@ static void arm_cmn_event_read(struct perf_event *event)
struct arm_cmn_hw_event *hw = to_cmn_hw(event);
u64 delta, new, prev;
unsigned long flags;
- unsigned int i;
- if (hw->dtc_idx == CMN_DT_NUM_COUNTERS) {
- i = __ffs(hw->dtcs_used);
- delta = arm_cmn_read_cc(cmn->dtc + i);
+ if (CMN_EVENT_TYPE(event) == CMN_TYPE_DTC) {
+ delta = arm_cmn_read_cc(cmn->dtc + hw->dtc_idx[0]);
local64_add(delta, &event->count);
return;
}
@@ -1459,8 +1514,8 @@ static void arm_cmn_event_read(struct perf_event *event)
delta = new - prev;
local_irq_save(flags);
- for (i = 0; hw->dtcs_used & (1U << i); i++) {
- new = arm_cmn_read_counter(cmn->dtc + i, hw->dtc_idx);
+ for_each_hw_dtc_idx(hw, i, idx) {
+ new = arm_cmn_read_counter(cmn->dtc + i, idx);
delta += new << 16;
}
local_irq_restore(flags);
@@ -1516,16 +1571,19 @@ static void arm_cmn_event_start(struct perf_event *event, int flags)
int i;
if (type == CMN_TYPE_DTC) {
- i = __ffs(hw->dtcs_used);
- writeq_relaxed(CMN_CC_INIT, cmn->dtc[i].base + CMN_DT_PMCCNTR);
- cmn->dtc[i].cc_active = true;
+ struct arm_cmn_dtc *dtc = cmn->dtc + hw->dtc_idx[0];
+
+ writel_relaxed(CMN_DT_DTC_CTL_DT_EN | CMN_DT_DTC_CTL_CG_DISABLE,
+ dtc->base + CMN_DT_DTC_CTL);
+ writeq_relaxed(CMN_CC_INIT, CMN_DT_PMCCNTR(dtc));
+ dtc->cc_active = true;
} else if (type == CMN_TYPE_WP) {
- int wp_idx = arm_cmn_wp_idx(event);
u64 val = CMN_EVENT_WP_VAL(event);
u64 mask = CMN_EVENT_WP_MASK(event);
for_each_hw_dn(hw, dn, i) {
void __iomem *base = dn->pmu_base + CMN_DTM_OFFSET(hw->dtm_offset);
+ int wp_idx = arm_cmn_get_assigned_wp_idx(event, hw, i);
writeq_relaxed(val, base + CMN_DTM_WPn_VAL(wp_idx));
writeq_relaxed(mask, base + CMN_DTM_WPn_MASK(wp_idx));
@@ -1547,13 +1605,14 @@ static void arm_cmn_event_stop(struct perf_event *event, int flags)
int i;
if (type == CMN_TYPE_DTC) {
- i = __ffs(hw->dtcs_used);
- cmn->dtc[i].cc_active = false;
- } else if (type == CMN_TYPE_WP) {
- int wp_idx = arm_cmn_wp_idx(event);
+ struct arm_cmn_dtc *dtc = cmn->dtc + hw->dtc_idx[0];
+ dtc->cc_active = false;
+ writel_relaxed(CMN_DT_DTC_CTL_DT_EN, dtc->base + CMN_DT_DTC_CTL);
+ } else if (type == CMN_TYPE_WP) {
for_each_hw_dn(hw, dn, i) {
void __iomem *base = dn->pmu_base + CMN_DTM_OFFSET(hw->dtm_offset);
+ int wp_idx = arm_cmn_get_assigned_wp_idx(event, hw, i);
writeq_relaxed(0, base + CMN_DTM_WPn_MASK(wp_idx));
writeq_relaxed(~0ULL, base + CMN_DTM_WPn_VAL(wp_idx));
@@ -1571,10 +1630,23 @@ struct arm_cmn_val {
u8 dtm_count[CMN_MAX_DTMS];
u8 occupid[CMN_MAX_DTMS][SEL_MAX];
u8 wp[CMN_MAX_DTMS][4];
- int dtc_count;
+ u8 wp_combine[CMN_MAX_DTMS][2];
+ int dtc_count[CMN_MAX_DTCS];
bool cycles;
};
+static int arm_cmn_val_find_free_wp_config(struct perf_event *event,
+ struct arm_cmn_val *val, int dtm)
+{
+ int wp_idx = CMN_EVENT_EVENTID(event);
+
+ if (val->wp[dtm][wp_idx])
+ if (val->wp[dtm][++wp_idx])
+ return -ENOSPC;
+
+ return wp_idx;
+}
+
static void arm_cmn_val_add_event(struct arm_cmn *cmn, struct arm_cmn_val *val,
struct perf_event *event)
{
@@ -1592,7 +1664,8 @@ static void arm_cmn_val_add_event(struct arm_cmn *cmn, struct arm_cmn_val *val,
return;
}
- val->dtc_count++;
+ for_each_hw_dtc_idx(hw, dtc, idx)
+ val->dtc_count[dtc]++;
for_each_hw_dn(hw, dn, i) {
int wp_idx, dtm = dn->dtm, sel = hw->filter_sel;
@@ -1605,8 +1678,9 @@ static void arm_cmn_val_add_event(struct arm_cmn *cmn, struct arm_cmn_val *val,
if (type != CMN_TYPE_WP)
continue;
- wp_idx = arm_cmn_wp_idx(event);
- val->wp[dtm][wp_idx] = CMN_EVENT_WP_COMBINE(event) + 1;
+ wp_idx = arm_cmn_val_find_free_wp_config(event, val, dtm);
+ val->wp[dtm][wp_idx] = 1;
+ val->wp_combine[dtm][wp_idx >> 1] += !!CMN_EVENT_WP_COMBINE(event);
}
}
@@ -1630,6 +1704,7 @@ static int arm_cmn_validate_group(struct arm_cmn *cmn, struct perf_event *event)
return -ENOMEM;
arm_cmn_val_add_event(cmn, val, leader);
+
for_each_sibling_event(sibling, leader)
arm_cmn_val_add_event(cmn, val, sibling);
@@ -1639,11 +1714,12 @@ static int arm_cmn_validate_group(struct arm_cmn *cmn, struct perf_event *event)
goto done;
}
- if (val->dtc_count == CMN_DT_NUM_COUNTERS)
- goto done;
+ for_each_hw_dtc_idx(hw, dtc, idx)
+ if (val->dtc_count[dtc] == CMN_DT_NUM_COUNTERS)
+ goto done;
for_each_hw_dn(hw, dn, i) {
- int wp_idx, wp_cmb, dtm = dn->dtm, sel = hw->filter_sel;
+ int wp_idx, dtm = dn->dtm, sel = hw->filter_sel;
if (val->dtm_count[dtm] == CMN_DTM_NUM_COUNTERS)
goto done;
@@ -1655,12 +1731,12 @@ static int arm_cmn_validate_group(struct arm_cmn *cmn, struct perf_event *event)
if (type != CMN_TYPE_WP)
continue;
- wp_idx = arm_cmn_wp_idx(event);
- if (val->wp[dtm][wp_idx])
+ wp_idx = arm_cmn_val_find_free_wp_config(event, val, dtm);
+ if (wp_idx < 0)
goto done;
- wp_cmb = val->wp[dtm][wp_idx ^ 1];
- if (wp_cmb && wp_cmb != CMN_EVENT_WP_COMBINE(event) + 1)
+ if (wp_idx & 1 &&
+ val->wp_combine[dtm][wp_idx >> 1] != !!CMN_EVENT_WP_COMBINE(event))
goto done;
}
@@ -1720,8 +1796,12 @@ static int arm_cmn_event_init(struct perf_event *event)
/* ...but the DTM may depend on which port we're watching */
if (cmn->multi_dtm)
hw->dtm_offset = CMN_EVENT_WP_DEV_SEL(event) / 2;
- } else if (type == CMN_TYPE_XP && cmn->part == PART_CMN700) {
+ } else if (type == CMN_TYPE_XP &&
+ (cmn->part == PART_CMN700 || cmn->part == PART_CMN_S3)) {
hw->wide_sel = true;
+ } else if (type == CMN_TYPE_RND) {
+ /* Secretly permit this as an alias for "rnid" events */
+ type = CMN_TYPE_RNI;
}
/* This is sufficiently annoying to recalculate, so cache it */
@@ -1733,29 +1813,27 @@ static int arm_cmn_event_init(struct perf_event *event)
hw->dn = arm_cmn_node(cmn, type);
if (!hw->dn)
return -EINVAL;
+
+ memset(hw->dtc_idx, -1, sizeof(hw->dtc_idx));
for (dn = hw->dn; dn->type == type; dn++) {
if (bynodeid && dn->id != nodeid) {
hw->dn++;
continue;
}
hw->num_dns++;
+ if (dn->dtc < 0)
+ memset(hw->dtc_idx, 0, cmn->num_dtcs);
+ else
+ hw->dtc_idx[dn->dtc] = 0;
+
if (bynodeid)
break;
}
if (!hw->num_dns) {
- struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, nodeid);
-
- dev_dbg(cmn->dev, "invalid node 0x%x (%d,%d,%d,%d) type 0x%x\n",
- nodeid, nid.x, nid.y, nid.port, nid.dev, type);
+ dev_dbg(cmn->dev, "invalid node 0x%x type 0x%x\n", nodeid, type);
return -EINVAL;
}
- /*
- * Keep assuming non-cycles events count in all DTC domains; turns out
- * it's hard to make a worthwhile optimisation around this, short of
- * going all-in with domain-local counter allocation as well.
- */
- hw->dtcs_used = (1U << cmn->num_dtcs) - 1;
return arm_cmn_validate_group(cmn, event);
}
@@ -1770,8 +1848,11 @@ static void arm_cmn_event_clear(struct arm_cmn *cmn, struct perf_event *event,
struct arm_cmn_dtm *dtm = &cmn->dtms[hw->dn[i].dtm] + hw->dtm_offset;
unsigned int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i);
- if (type == CMN_TYPE_WP)
- dtm->wp_event[arm_cmn_wp_idx(event)] = -1;
+ if (type == CMN_TYPE_WP) {
+ int wp_idx = arm_cmn_get_assigned_wp_idx(event, hw, i);
+
+ dtm->wp_event[wp_idx] = -1;
+ }
if (hw->filter_sel > SEL_NONE)
hw->dn[i].occupid[hw->filter_sel].count--;
@@ -1780,47 +1861,50 @@ static void arm_cmn_event_clear(struct arm_cmn *cmn, struct perf_event *event,
writel_relaxed(dtm->pmu_config_low, dtm->base + CMN_DTM_PMU_CONFIG);
}
memset(hw->dtm_idx, 0, sizeof(hw->dtm_idx));
+ memset(hw->wp_idx, 0, sizeof(hw->wp_idx));
- for (i = 0; hw->dtcs_used & (1U << i); i++)
- cmn->dtc[i].counters[hw->dtc_idx] = NULL;
+ for_each_hw_dtc_idx(hw, j, idx)
+ cmn->dtc[j].counters[idx] = NULL;
}
static int arm_cmn_event_add(struct perf_event *event, int flags)
{
struct arm_cmn *cmn = to_cmn(event->pmu);
struct arm_cmn_hw_event *hw = to_cmn_hw(event);
- struct arm_cmn_dtc *dtc = &cmn->dtc[0];
struct arm_cmn_node *dn;
enum cmn_node_type type = CMN_EVENT_TYPE(event);
- unsigned int i, dtc_idx, input_sel;
+ unsigned int input_sel, i = 0;
if (type == CMN_TYPE_DTC) {
- i = 0;
while (cmn->dtc[i].cycles)
if (++i == cmn->num_dtcs)
return -ENOSPC;
cmn->dtc[i].cycles = event;
- hw->dtc_idx = CMN_DT_NUM_COUNTERS;
- hw->dtcs_used = 1U << i;
+ hw->dtc_idx[0] = i;
if (flags & PERF_EF_START)
arm_cmn_event_start(event, 0);
return 0;
}
- /* Grab a free global counter first... */
- dtc_idx = 0;
- while (dtc->counters[dtc_idx])
- if (++dtc_idx == CMN_DT_NUM_COUNTERS)
- return -ENOSPC;
-
- hw->dtc_idx = dtc_idx;
+ /* Grab the global counters first... */
+ for_each_hw_dtc_idx(hw, j, idx) {
+ if (cmn->part == PART_CMN600 && j > 0) {
+ idx = hw->dtc_idx[0];
+ } else {
+ idx = 0;
+ while (cmn->dtc[j].counters[idx])
+ if (++idx == CMN_DT_NUM_COUNTERS)
+ return -ENOSPC;
+ }
+ hw->dtc_idx[j] = idx;
+ }
- /* ...then the local counters to feed it. */
+ /* ...then the local counters to feed them */
for_each_hw_dn(hw, dn, i) {
struct arm_cmn_dtm *dtm = &cmn->dtms[dn->dtm] + hw->dtm_offset;
- unsigned int dtm_idx, shift;
+ unsigned int dtm_idx, shift, d = max_t(int, dn->dtc, 0);
u64 reg;
dtm_idx = 0;
@@ -1831,22 +1915,26 @@ static int arm_cmn_event_add(struct perf_event *event, int flags)
if (type == CMN_TYPE_XP) {
input_sel = CMN__PMEVCNT0_INPUT_SEL_XP + dtm_idx;
} else if (type == CMN_TYPE_WP) {
- int tmp, wp_idx = arm_cmn_wp_idx(event);
- u32 cfg = arm_cmn_wp_config(event);
+ int tmp, wp_idx;
+ u32 cfg;
- if (dtm->wp_event[wp_idx] >= 0)
+ wp_idx = arm_cmn_find_free_wp_idx(dtm, event);
+ if (wp_idx < 0)
goto free_dtms;
+ cfg = arm_cmn_wp_config(event, wp_idx);
+
tmp = dtm->wp_event[wp_idx ^ 1];
if (tmp >= 0 && CMN_EVENT_WP_COMBINE(event) !=
- CMN_EVENT_WP_COMBINE(dtc->counters[tmp]))
+ CMN_EVENT_WP_COMBINE(cmn->dtc[d].counters[tmp]))
goto free_dtms;
input_sel = CMN__PMEVCNT0_INPUT_SEL_WP + wp_idx;
- dtm->wp_event[wp_idx] = dtc_idx;
+
+ arm_cmn_claim_wp_idx(dtm, event, d, wp_idx, i);
writel_relaxed(cfg, dtm->base + CMN_DTM_WPn_CONFIG(wp_idx));
} else {
- struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id);
+ struct arm_cmn_nodeid nid = arm_cmn_nid(dn);
if (cmn->multi_dtm)
nid.port %= 2;
@@ -1863,7 +1951,7 @@ static int arm_cmn_event_add(struct perf_event *event, int flags)
dtm->input_sel[dtm_idx] = input_sel;
shift = CMN__PMEVCNTn_GLOBAL_NUM_SHIFT(dtm_idx);
dtm->pmu_config_low &= ~(CMN__PMEVCNT0_GLOBAL_NUM << shift);
- dtm->pmu_config_low |= FIELD_PREP(CMN__PMEVCNT0_GLOBAL_NUM, dtc_idx) << shift;
+ dtm->pmu_config_low |= FIELD_PREP(CMN__PMEVCNT0_GLOBAL_NUM, hw->dtc_idx[d]) << shift;
dtm->pmu_config_low |= CMN__PMEVCNT_PAIRED(dtm_idx);
reg = (u64)le32_to_cpu(dtm->pmu_config_high) << 32 | dtm->pmu_config_low;
writeq_relaxed(reg, dtm->base + CMN_DTM_PMU_CONFIG);
@@ -1891,7 +1979,7 @@ static void arm_cmn_event_del(struct perf_event *event, int flags)
arm_cmn_event_stop(event, PERF_EF_UPDATE);
if (type == CMN_TYPE_DTC)
- cmn->dtc[__ffs(hw->dtcs_used)].cycles = NULL;
+ cmn->dtc[hw->dtc_idx[0]].cycles = NULL;
else
arm_cmn_event_clear(cmn, event, hw->num_dns);
}
@@ -1935,7 +2023,7 @@ static int arm_cmn_pmu_online_cpu(unsigned int cpu, struct hlist_node *cpuhp_nod
cmn = hlist_entry_safe(cpuhp_node, struct arm_cmn, cpuhp_node);
node = dev_to_node(cmn->dev);
- if (node != NUMA_NO_NODE && cpu_to_node(cmn->cpu) != node && cpu_to_node(cpu) == node)
+ if (cpu_to_node(cmn->cpu) != node && cpu_to_node(cpu) == node)
arm_cmn_migrate(cmn, cpu);
return 0;
}
@@ -1945,20 +2033,20 @@ static int arm_cmn_pmu_offline_cpu(unsigned int cpu, struct hlist_node *cpuhp_no
struct arm_cmn *cmn;
unsigned int target;
int node;
- cpumask_t mask;
cmn = hlist_entry_safe(cpuhp_node, struct arm_cmn, cpuhp_node);
if (cpu != cmn->cpu)
return 0;
node = dev_to_node(cmn->dev);
- if (cpumask_and(&mask, cpumask_of_node(node), cpu_online_mask) &&
- cpumask_andnot(&mask, &mask, cpumask_of(cpu)))
- target = cpumask_any(&mask);
- else
+
+ target = cpumask_any_and_but(cpumask_of_node(node), cpu_online_mask, cpu);
+ if (target >= nr_cpu_ids)
target = cpumask_any_but(cpu_online_mask, cpu);
+
if (target < nr_cpu_ids)
arm_cmn_migrate(cmn, target);
+
return 0;
}
@@ -1968,11 +2056,11 @@ static irqreturn_t arm_cmn_handle_irq(int irq, void *dev_id)
irqreturn_t ret = IRQ_NONE;
for (;;) {
- u32 status = readl_relaxed(dtc->base + CMN_DT_PMOVSR);
+ u32 status = readl_relaxed(CMN_DT_PMOVSR(dtc));
u64 delta;
int i;
- for (i = 0; i < CMN_DTM_NUM_COUNTERS; i++) {
+ for (i = 0; i < CMN_DT_NUM_COUNTERS; i++) {
if (status & (1U << i)) {
ret = IRQ_HANDLED;
if (WARN_ON(!dtc->counters[i]))
@@ -1990,7 +2078,7 @@ static irqreturn_t arm_cmn_handle_irq(int irq, void *dev_id)
}
}
- writel_relaxed(status, dtc->base + CMN_DT_PMOVSR_CLR);
+ writel_relaxed(status, CMN_DT_PMOVSR_CLR(dtc));
if (!dtc->irq_friend)
return ret;
@@ -2044,15 +2132,16 @@ static int arm_cmn_init_dtc(struct arm_cmn *cmn, struct arm_cmn_node *dn, int id
{
struct arm_cmn_dtc *dtc = cmn->dtc + idx;
- dtc->base = dn->pmu_base - CMN_PMU_OFFSET;
+ dtc->pmu_base = dn->pmu_base;
+ dtc->base = dtc->pmu_base - arm_cmn_pmu_offset(cmn, dn);
dtc->irq = platform_get_irq(to_platform_device(cmn->dev), idx);
if (dtc->irq < 0)
return dtc->irq;
writel_relaxed(CMN_DT_DTC_CTL_DT_EN, dtc->base + CMN_DT_DTC_CTL);
- writel_relaxed(CMN_DT_PMCR_PMU_EN | CMN_DT_PMCR_OVFL_INTR_EN, dtc->base + CMN_DT_PMCR);
- writeq_relaxed(0, dtc->base + CMN_DT_PMCCNTR);
- writel_relaxed(0x1ff, dtc->base + CMN_DT_PMOVSR_CLR);
+ writel_relaxed(CMN_DT_PMCR_PMU_EN | CMN_DT_PMCR_OVFL_INTR_EN, CMN_DT_PMCR(dtc));
+ writeq_relaxed(0, CMN_DT_PMCCNTR(dtc));
+ writel_relaxed(0x1ff, CMN_DT_PMOVSR_CLR(dtc));
return 0;
}
@@ -2072,7 +2161,6 @@ static int arm_cmn_init_dtcs(struct arm_cmn *cmn)
{
struct arm_cmn_node *dn, *xp;
int dtc_idx = 0;
- u8 dtcs_present = (1 << cmn->num_dtcs) - 1;
cmn->dtc = devm_kcalloc(cmn->dev, cmn->num_dtcs, sizeof(cmn->dtc[0]), GFP_KERNEL);
if (!cmn->dtc)
@@ -2083,22 +2171,18 @@ static int arm_cmn_init_dtcs(struct arm_cmn *cmn)
cmn->xps = arm_cmn_node(cmn, CMN_TYPE_XP);
for (dn = cmn->dns; dn->type; dn++) {
- if (dn->type == CMN_TYPE_XP) {
- dn->dtc &= dtcs_present;
+ if (dn->type == CMN_TYPE_XP)
continue;
- }
xp = arm_cmn_node_to_xp(cmn, dn);
+ dn->dtc = xp->dtc;
dn->dtm = xp->dtm;
if (cmn->multi_dtm)
- dn->dtm += arm_cmn_nid(cmn, dn->id).port / 2;
+ dn->dtm += arm_cmn_nid(dn).port / 2;
if (dn->type == CMN_TYPE_DTC) {
- int err;
- /* We do at least know that a DTC's XP must be in that DTC's domain */
- if (xp->dtc == 0xf)
- xp->dtc = 1 << dtc_idx;
- err = arm_cmn_init_dtc(cmn, dn, dtc_idx++);
+ int err = arm_cmn_init_dtc(cmn, dn, dtc_idx++);
+
if (err)
return err;
}
@@ -2117,6 +2201,16 @@ static int arm_cmn_init_dtcs(struct arm_cmn *cmn)
return 0;
}
+static unsigned int arm_cmn_dtc_domain(struct arm_cmn *cmn, void __iomem *xp_region)
+{
+ int offset = CMN_DTM_UNIT_INFO;
+
+ if (cmn->part == PART_CMN650 || cmn->part == PART_CI700)
+ offset = CMN650_DTM_UNIT_INFO;
+
+ return FIELD_GET(CMN_DTM_UNIT_INFO_DTC_DOMAIN, readl_relaxed(xp_region + offset));
+}
+
static void arm_cmn_init_node_info(struct arm_cmn *cmn, u32 offset, struct arm_cmn_node *node)
{
int level;
@@ -2126,7 +2220,7 @@ static void arm_cmn_init_node_info(struct arm_cmn *cmn, u32 offset, struct arm_c
node->id = FIELD_GET(CMN_NI_NODE_ID, reg);
node->logid = FIELD_GET(CMN_NI_LOGICAL_ID, reg);
- node->pmu_base = cmn->base + offset + CMN_PMU_OFFSET;
+ node->pmu_base = cmn->base + offset + arm_cmn_pmu_offset(cmn, node);
if (node->type == CMN_TYPE_CFG)
level = 0;
@@ -2154,12 +2248,11 @@ static enum cmn_node_type arm_cmn_subtype(enum cmn_node_type type)
static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
{
- void __iomem *cfg_region;
+ void __iomem *cfg_region, __iomem *xp_region;
struct arm_cmn_node cfg, *dn;
struct arm_cmn_dtm *dtm;
enum cmn_part part;
u16 child_count, child_poff;
- u32 xp_offset[CMN_MAX_XPS];
u64 reg;
int i, j;
size_t sz;
@@ -2184,7 +2277,17 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
reg = readl_relaxed(cfg_region + CMN_CFGM_PERIPH_ID_23);
cmn->rev = FIELD_GET(CMN_CFGM_PID2_REVISION, reg);
+ /*
+ * With the device isolation feature, if firmware has neglected to enable
+ * an XP port then we risk locking up if we try to access anything behind
+ * it; however we also have no way to tell from Non-Secure whether any
+ * given port is disabled or not, so the only way to win is not to play...
+ */
reg = readq_relaxed(cfg_region + CMN_CFGM_INFO_GLOBAL);
+ if (reg & CMN_INFO_DEVICE_ISO_ENABLE) {
+ dev_err(cmn->dev, "Device isolation enabled, not continuing due to risk of lockup\n");
+ return -ENODEV;
+ }
cmn->multi_dtm = reg & CMN_INFO_MULTIPLE_DTM_EN;
cmn->rsp_vc_num = FIELD_GET(CMN_INFO_RSP_VC_NUM, reg);
cmn->dat_vc_num = FIELD_GET(CMN_INFO_DAT_VC_NUM, reg);
@@ -2201,11 +2304,12 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
cmn->num_dns = cmn->num_xps;
/* Pass 1: visit the XPs, enumerate their children */
+ cfg_region += child_poff;
for (i = 0; i < cmn->num_xps; i++) {
- reg = readq_relaxed(cfg_region + child_poff + i * 8);
- xp_offset[i] = reg & CMN_CHILD_NODE_ADDR;
+ reg = readq_relaxed(cfg_region + i * 8);
+ xp_region = cmn->base + (reg & CMN_CHILD_NODE_ADDR);
- reg = readq_relaxed(cmn->base + xp_offset[i] + CMN_CHILD_INFO);
+ reg = readq_relaxed(xp_region + CMN_CHILD_INFO);
cmn->num_dns += FIELD_GET(CMN_CI_CHILD_COUNT, reg);
}
@@ -2231,11 +2335,12 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
cmn->dns = dn;
cmn->dtms = dtm;
for (i = 0; i < cmn->num_xps; i++) {
- void __iomem *xp_region = cmn->base + xp_offset[i];
struct arm_cmn_node *xp = dn++;
unsigned int xp_ports = 0;
- arm_cmn_init_node_info(cmn, xp_offset[i], xp);
+ reg = readq_relaxed(cfg_region + i * 8);
+ xp_region = cmn->base + (reg & CMN_CHILD_NODE_ADDR);
+ arm_cmn_init_node_info(cmn, reg & CMN_CHILD_NODE_ADDR, xp);
/*
* Thanks to the order in which XP logical IDs seem to be
* assigned, we can handily infer the mesh X dimension by
@@ -2246,26 +2351,35 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
cmn->mesh_x = xp->logid;
if (cmn->part == PART_CMN600)
- xp->dtc = 0xf;
+ xp->dtc = -1;
else
- xp->dtc = 1 << readl_relaxed(xp_region + CMN_DTM_UNIT_INFO);
+ xp->dtc = arm_cmn_dtc_domain(cmn, xp_region);
xp->dtm = dtm - cmn->dtms;
arm_cmn_init_dtm(dtm++, xp, 0);
/*
* Keeping track of connected ports will let us filter out
- * unnecessary XP events easily. We can also reliably infer the
- * "extra device ports" configuration for the node ID format
- * from this, since in that case we will see at least one XP
- * with port 2 connected, for the HN-D.
+ * unnecessary XP events easily, and also infer the per-XP
+ * part of the node ID format.
*/
for (int p = 0; p < CMN_MAX_PORTS; p++)
if (arm_cmn_device_connect_info(cmn, xp, p))
xp_ports |= BIT(p);
- if (cmn->multi_dtm && (xp_ports & 0xc))
+ if (cmn->num_xps == 1) {
+ xp->portid_bits = 3;
+ xp->deviceid_bits = 2;
+ } else if (xp_ports > 0x3) {
+ xp->portid_bits = 2;
+ xp->deviceid_bits = 1;
+ } else {
+ xp->portid_bits = 1;
+ xp->deviceid_bits = 2;
+ }
+
+ if (cmn->multi_dtm && (xp_ports > 0x3))
arm_cmn_init_dtm(dtm++, xp, 1);
- if (cmn->multi_dtm && (xp_ports & 0x30))
+ if (cmn->multi_dtm && (xp_ports > 0xf))
arm_cmn_init_dtm(dtm++, xp, 2);
cmn->ports_used |= xp_ports;
@@ -2289,8 +2403,21 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
dev_dbg(cmn->dev, "ignoring external node %llx\n", reg);
continue;
}
+ /*
+ * AmpereOneX erratum AC04_MESH_1 makes some XPs report a bogus
+ * child count larger than the number of valid child pointers.
+ * A child offset of 0 can only occur on CMN-600; otherwise it
+ * would imply the root node being its own grandchild, which
+ * we can safely dismiss in general.
+ */
+ if (reg == 0 && cmn->part != PART_CMN600) {
+ dev_dbg(cmn->dev, "bogus child pointer?\n");
+ continue;
+ }
arm_cmn_init_node_info(cmn, reg & CMN_CHILD_NODE_ADDR, dn);
+ dn->portid_bits = xp->portid_bits;
+ dn->deviceid_bits = xp->deviceid_bits;
switch (dn->type) {
case CMN_TYPE_DTC:
@@ -2309,10 +2436,13 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
case CMN_TYPE_CXHA:
case CMN_TYPE_CCRA:
case CMN_TYPE_CCHA:
- case CMN_TYPE_CCLA:
case CMN_TYPE_HNS:
dn++;
break;
+ case CMN_TYPE_CCLA:
+ dn->pmu_base += CMN_CCLA_PMU_EVENT_SEL;
+ dn++;
+ break;
/* Nothing to see here */
case CMN_TYPE_MPAM_S:
case CMN_TYPE_MPAM_NS:
@@ -2320,6 +2450,7 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
case CMN_TYPE_CXLA:
case CMN_TYPE_HNS_MPAM_S:
case CMN_TYPE_HNS_MPAM_NS:
+ case CMN_TYPE_APB:
break;
/*
* Split "optimised" combination nodes into separate
@@ -2330,7 +2461,7 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
case CMN_TYPE_HNP:
case CMN_TYPE_CCLA_RNI:
dn[1] = dn[0];
- dn[0].pmu_base += CMN_HNP_PMU_EVENT_SEL;
+ dn[0].pmu_base += CMN_CCLA_PMU_EVENT_SEL;
dn[1].type = arm_cmn_subtype(dn->type);
dn += 2;
break;
@@ -2424,6 +2555,7 @@ static int arm_cmn_probe(struct platform_device *pdev)
cmn->dev = &pdev->dev;
cmn->part = (unsigned long)device_get_match_data(cmn->dev);
+ cmn->cpu = cpumask_local_spread(0, dev_to_node(cmn->dev));
platform_set_drvdata(pdev, cmn);
if (cmn->part == PART_CMN600 && has_acpi_companion(cmn->dev)) {
@@ -2451,9 +2583,9 @@ static int arm_cmn_probe(struct platform_device *pdev)
if (err)
return err;
- cmn->cpu = cpumask_local_spread(0, dev_to_node(cmn->dev));
cmn->pmu = (struct pmu) {
.module = THIS_MODULE,
+ .parent = cmn->dev,
.attr_groups = arm_cmn_attr_groups,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
.task_ctx_nr = perf_invalid_context,
@@ -2488,7 +2620,7 @@ static int arm_cmn_probe(struct platform_device *pdev)
return err;
}
-static int arm_cmn_remove(struct platform_device *pdev)
+static void arm_cmn_remove(struct platform_device *pdev)
{
struct arm_cmn *cmn = platform_get_drvdata(pdev);
@@ -2497,7 +2629,6 @@ static int arm_cmn_remove(struct platform_device *pdev)
perf_pmu_unregister(&cmn->pmu);
cpuhp_state_remove_instance_nocalls(arm_cmn_hp_state, &cmn->cpuhp_node);
debugfs_remove(cmn->debug);
- return 0;
}
#ifdef CONFIG_OF
@@ -2505,6 +2636,7 @@ static const struct of_device_id arm_cmn_of_match[] = {
{ .compatible = "arm,cmn-600", .data = (void *)PART_CMN600 },
{ .compatible = "arm,cmn-650" },
{ .compatible = "arm,cmn-700" },
+ { .compatible = "arm,cmn-s3" },
{ .compatible = "arm,ci-700" },
{}
};
@@ -2516,6 +2648,7 @@ static const struct acpi_device_id arm_cmn_acpi_match[] = {
{ "ARMHC600", PART_CMN600 },
{ "ARMHC650" },
{ "ARMHC700" },
+ { "ARMHC003" },
{}
};
MODULE_DEVICE_TABLE(acpi, arm_cmn_acpi_match);
@@ -2526,6 +2659,7 @@ static struct platform_driver arm_cmn_driver = {
.name = "arm-cmn",
.of_match_table = of_match_ptr(arm_cmn_of_match),
.acpi_match_table = ACPI_PTR(arm_cmn_acpi_match),
+ .suppress_bind_attrs = true,
},
.probe = arm_cmn_probe,
.remove = arm_cmn_remove,
@@ -2564,5 +2698,5 @@ module_init(arm_cmn_init);
module_exit(arm_cmn_exit);
MODULE_AUTHOR("Robin Murphy <robin.murphy@arm.com>");
-MODULE_DESCRIPTION("Arm CMN-600 PMU driver");
+MODULE_DESCRIPTION("Arm CMN/CI interconnect PMU driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/perf/arm-ni.c b/drivers/perf/arm-ni.c
new file mode 100644
index 000000000000..66858c65215d
--- /dev/null
+++ b/drivers/perf/arm-ni.c
@@ -0,0 +1,862 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2022-2024 Arm Limited
+// NI-700 Network-on-Chip PMU driver
+
+#include <linux/acpi.h>
+#include <linux/bitfield.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/perf_event.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* Common registers */
+#define NI_NODE_TYPE 0x000
+#define NI_NODE_TYPE_NODE_ID GENMASK(31, 16)
+#define NI_NODE_TYPE_NODE_TYPE GENMASK(15, 0)
+
+#define NI_CHILD_NODE_INFO 0x004
+#define NI_CHILD_PTR(n) (0x008 + (n) * 4)
+#define NI_NUM_SUB_FEATURES 0x100
+#define NI_SUB_FEATURE_TYPE(n) (0x108 + (n) * 8)
+#define NI_SUB_FEATURE_PTR(n) (0x10c + (n) * 8)
+
+#define NI_SUB_FEATURE_TYPE_FCU 0x2
+
+#define NI700_PMUSELA 0x00c
+
+/* Config node */
+#define NI_PERIPHERAL_ID0 0xfe0
+#define NI_PIDR0_PART_7_0 GENMASK(7, 0)
+#define NI_PERIPHERAL_ID1 0xfe4
+#define NI_PIDR1_PART_11_8 GENMASK(3, 0)
+#define NI_PERIPHERAL_ID2 0xfe8
+#define NI_PIDR2_VERSION GENMASK(7, 4)
+
+/* PMU node */
+#define NI700_PMEVCNTR(n) (0x008 + (n) * 8)
+#define NI700_PMCCNTR_L 0x0f8
+#define NI_PMEVCNTR(n) (0x200 + (n) * 8)
+#define NI_PMCCNTR_L 0x2f8
+#define NI_PMEVTYPER(n) (0x400 + (n) * 4)
+#define NI_PMEVTYPER_NODE_TYPE GENMASK(12, 9)
+#define NI_PMEVTYPER_NODE_ID GENMASK(8, 0)
+#define NI_PMCNTENSET 0xc00
+#define NI_PMCNTENCLR 0xc20
+#define NI_PMINTENSET 0xc40
+#define NI_PMINTENCLR 0xc60
+#define NI_PMOVSCLR 0xc80
+#define NI_PMOVSSET 0xcc0
+#define NI_PMCFGR 0xe00
+#define NI_PMCR 0xe04
+#define NI_PMCR_RESET_CCNT BIT(2)
+#define NI_PMCR_RESET_EVCNT BIT(1)
+#define NI_PMCR_ENABLE BIT(0)
+
+#define NI_NUM_COUNTERS 8
+#define NI_CCNT_IDX 31
+
+/* Event attributes */
+#define NI_CONFIG_TYPE GENMASK_ULL(15, 0)
+#define NI_CONFIG_NODEID GENMASK_ULL(31, 16)
+#define NI_CONFIG_EVENTID GENMASK_ULL(47, 32)
+
+#define NI_EVENT_TYPE(event) FIELD_GET(NI_CONFIG_TYPE, (event)->attr.config)
+#define NI_EVENT_NODEID(event) FIELD_GET(NI_CONFIG_NODEID, (event)->attr.config)
+#define NI_EVENT_EVENTID(event) FIELD_GET(NI_CONFIG_EVENTID, (event)->attr.config)
+
+enum ni_part {
+ PART_NI_700 = 0x43b,
+ PART_NI_710AE = 0x43d,
+ PART_NOC_S3 = 0x43f,
+ PART_SI_L1 = 0x455,
+};
+
+enum ni_node_type {
+ NI_GLOBAL,
+ NI_VOLTAGE,
+ NI_POWER,
+ NI_CLOCK,
+ NI_ASNI,
+ NI_AMNI,
+ NI_PMU,
+ NI_HSNI,
+ NI_HMNI,
+ NI_PMNI,
+ NI_TSNI,
+ NI_TMNI,
+ NI_CMNI = 0x0e,
+ NI_MCN = 0x63,
+};
+
+struct arm_ni_node {
+ void __iomem *base;
+ enum ni_node_type type;
+ u16 id;
+ u32 num_components;
+};
+
+struct arm_ni_unit {
+ void __iomem *pmusela;
+ enum ni_node_type type;
+ u16 id;
+ bool ns;
+ union {
+ __le64 pmusel;
+ u8 event[8];
+ };
+};
+
+struct arm_ni_cd {
+ void __iomem *pmu_base;
+ u16 id;
+ s8 irq_friend;
+ int num_units;
+ int irq;
+ struct pmu pmu;
+ struct arm_ni_unit *units;
+ struct perf_event *evcnt[NI_NUM_COUNTERS];
+ struct perf_event *ccnt;
+};
+
+struct arm_ni {
+ struct device *dev;
+ void __iomem *base;
+ enum ni_part part;
+ int id;
+ int cpu;
+ int num_cds;
+ struct hlist_node cpuhp_node;
+ struct arm_ni_cd cds[] __counted_by(num_cds);
+};
+
+#define cd_to_ni(cd) container_of((cd), struct arm_ni, cds[(cd)->id])
+#define pmu_to_cd(p) container_of((p), struct arm_ni_cd, pmu)
+
+#define ni_for_each_cd(n, c) \
+ for (struct arm_ni_cd *c = n->cds; c < n->cds + n->num_cds; c++) if (c->pmu_base)
+
+#define cd_for_each_unit(cd, u) \
+ for (struct arm_ni_unit *u = cd->units; u < cd->units + cd->num_units; u++)
+
+static int arm_ni_hp_state;
+
+struct arm_ni_event_attr {
+ struct device_attribute attr;
+ enum ni_node_type type;
+};
+
+#define NI_EVENT_ATTR(_name, _type) \
+ (&((struct arm_ni_event_attr[]) {{ \
+ .attr = __ATTR(_name, 0444, arm_ni_event_show, NULL), \
+ .type = _type, \
+ }})[0].attr.attr)
+
+static ssize_t arm_ni_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct arm_ni_event_attr *eattr = container_of(attr, typeof(*eattr), attr);
+
+ if (eattr->type == NI_PMU)
+ return sysfs_emit(buf, "type=0x%x\n", eattr->type);
+
+ return sysfs_emit(buf, "type=0x%x,eventid=?,nodeid=?\n", eattr->type);
+}
+
+static umode_t arm_ni_event_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int unused)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct arm_ni_cd *cd = pmu_to_cd(dev_get_drvdata(dev));
+ struct arm_ni_event_attr *eattr;
+
+ eattr = container_of(attr, typeof(*eattr), attr.attr);
+
+ cd_for_each_unit(cd, unit) {
+ if (unit->type == eattr->type && unit->ns)
+ return attr->mode;
+ }
+
+ return 0;
+}
+
+static struct attribute *arm_ni_event_attrs[] = {
+ NI_EVENT_ATTR(asni, NI_ASNI),
+ NI_EVENT_ATTR(amni, NI_AMNI),
+ NI_EVENT_ATTR(cycles, NI_PMU),
+ NI_EVENT_ATTR(hsni, NI_HSNI),
+ NI_EVENT_ATTR(hmni, NI_HMNI),
+ NI_EVENT_ATTR(pmni, NI_PMNI),
+ NI_EVENT_ATTR(tsni, NI_TSNI),
+ NI_EVENT_ATTR(tmni, NI_TMNI),
+ NI_EVENT_ATTR(cmni, NI_CMNI),
+ NULL
+};
+
+static const struct attribute_group arm_ni_event_attrs_group = {
+ .name = "events",
+ .attrs = arm_ni_event_attrs,
+ .is_visible = arm_ni_event_attr_is_visible,
+};
+
+struct arm_ni_format_attr {
+ struct device_attribute attr;
+ u64 field;
+};
+
+#define NI_FORMAT_ATTR(_name, _fld) \
+ (&((struct arm_ni_format_attr[]) {{ \
+ .attr = __ATTR(_name, 0444, arm_ni_format_show, NULL), \
+ .field = _fld, \
+ }})[0].attr.attr)
+
+static ssize_t arm_ni_format_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct arm_ni_format_attr *fmt = container_of(attr, typeof(*fmt), attr);
+
+ return sysfs_emit(buf, "config:%*pbl\n", 64, &fmt->field);
+}
+
+static struct attribute *arm_ni_format_attrs[] = {
+ NI_FORMAT_ATTR(type, NI_CONFIG_TYPE),
+ NI_FORMAT_ATTR(nodeid, NI_CONFIG_NODEID),
+ NI_FORMAT_ATTR(eventid, NI_CONFIG_EVENTID),
+ NULL
+};
+
+static const struct attribute_group arm_ni_format_attrs_group = {
+ .name = "format",
+ .attrs = arm_ni_format_attrs,
+};
+
+static ssize_t arm_ni_cpumask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct arm_ni *ni = cd_to_ni(pmu_to_cd(dev_get_drvdata(dev)));
+
+ return cpumap_print_to_pagebuf(true, buf, cpumask_of(ni->cpu));
+}
+
+static struct device_attribute arm_ni_cpumask_attr =
+ __ATTR(cpumask, 0444, arm_ni_cpumask_show, NULL);
+
+static ssize_t arm_ni_identifier_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct arm_ni *ni = cd_to_ni(pmu_to_cd(dev_get_drvdata(dev)));
+ u32 reg = readl_relaxed(ni->base + NI_PERIPHERAL_ID2);
+ int version = FIELD_GET(NI_PIDR2_VERSION, reg);
+
+ return sysfs_emit(buf, "%03x%02x\n", ni->part, version);
+}
+
+static struct device_attribute arm_ni_identifier_attr =
+ __ATTR(identifier, 0444, arm_ni_identifier_show, NULL);
+
+static struct attribute *arm_ni_other_attrs[] = {
+ &arm_ni_cpumask_attr.attr,
+ &arm_ni_identifier_attr.attr,
+ NULL
+};
+
+static const struct attribute_group arm_ni_other_attr_group = {
+ .attrs = arm_ni_other_attrs,
+};
+
+static const struct attribute_group *arm_ni_attr_groups[] = {
+ &arm_ni_event_attrs_group,
+ &arm_ni_format_attrs_group,
+ &arm_ni_other_attr_group,
+ NULL
+};
+
+static void arm_ni_pmu_enable(struct pmu *pmu)
+{
+ writel_relaxed(NI_PMCR_ENABLE, pmu_to_cd(pmu)->pmu_base + NI_PMCR);
+}
+
+static void arm_ni_pmu_disable(struct pmu *pmu)
+{
+ writel_relaxed(0, pmu_to_cd(pmu)->pmu_base + NI_PMCR);
+}
+
+struct arm_ni_val {
+ unsigned int evcnt;
+ unsigned int ccnt;
+};
+
+static bool arm_ni_val_count_event(struct perf_event *evt, struct arm_ni_val *val)
+{
+ if (is_software_event(evt))
+ return true;
+
+ if (NI_EVENT_TYPE(evt) == NI_PMU) {
+ val->ccnt++;
+ return val->ccnt <= 1;
+ }
+
+ val->evcnt++;
+ return val->evcnt <= NI_NUM_COUNTERS;
+}
+
+static int arm_ni_validate_group(struct perf_event *event)
+{
+ struct perf_event *sibling, *leader = event->group_leader;
+ struct arm_ni_val val = { 0 };
+
+ if (leader == event)
+ return 0;
+
+ arm_ni_val_count_event(event, &val);
+ if (!arm_ni_val_count_event(leader, &val))
+ return -EINVAL;
+
+ for_each_sibling_event(sibling, leader) {
+ if (!arm_ni_val_count_event(sibling, &val))
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static bool arm_ni_is_7xx(const struct arm_ni *ni)
+{
+ return ni->part == PART_NI_700 || ni->part == PART_NI_710AE;
+}
+
+static int arm_ni_event_init(struct perf_event *event)
+{
+ struct arm_ni_cd *cd = pmu_to_cd(event->pmu);
+ struct arm_ni *ni;
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ if (is_sampling_event(event))
+ return -EINVAL;
+
+ ni = cd_to_ni(cd);
+ event->cpu = ni->cpu;
+ event->hw.flags = arm_ni_is_7xx(ni);
+
+ if (NI_EVENT_TYPE(event) == NI_PMU)
+ return arm_ni_validate_group(event);
+
+ cd_for_each_unit(cd, unit) {
+ if (unit->type == NI_EVENT_TYPE(event) &&
+ unit->id == NI_EVENT_NODEID(event) && unit->ns) {
+ event->hw.config_base = (unsigned long)unit;
+ return arm_ni_validate_group(event);
+ }
+ }
+ return -EINVAL;
+}
+
+static u64 arm_ni_read_ccnt(void __iomem *pmccntr)
+{
+ u64 l, u_old, u_new;
+ int retries = 3; /* 1st time unlucky, 2nd improbable, 3rd just broken */
+
+ u_new = readl_relaxed(pmccntr + 4);
+ do {
+ u_old = u_new;
+ l = readl_relaxed(pmccntr);
+ u_new = readl_relaxed(pmccntr + 4);
+ } while (u_new != u_old && --retries);
+ WARN_ON(!retries);
+
+ return (u_new << 32) | l;
+}
+
+static void arm_ni_event_read(struct perf_event *event)
+{
+ struct hw_perf_event *hw = &event->hw;
+ u64 count, prev;
+ bool ccnt = hw->idx == NI_CCNT_IDX;
+
+ do {
+ prev = local64_read(&hw->prev_count);
+ if (ccnt)
+ count = arm_ni_read_ccnt((void __iomem *)event->hw.event_base);
+ else
+ count = readl_relaxed((void __iomem *)event->hw.event_base);
+ } while (local64_cmpxchg(&hw->prev_count, prev, count) != prev);
+
+ count -= prev;
+ if (!ccnt)
+ count = (u32)count;
+ local64_add(count, &event->count);
+}
+
+static void arm_ni_event_start(struct perf_event *event, int flags)
+{
+ struct arm_ni_cd *cd = pmu_to_cd(event->pmu);
+
+ writel_relaxed(1U << event->hw.idx, cd->pmu_base + NI_PMCNTENSET);
+}
+
+static void arm_ni_event_stop(struct perf_event *event, int flags)
+{
+ struct arm_ni_cd *cd = pmu_to_cd(event->pmu);
+
+ writel_relaxed(1U << event->hw.idx, cd->pmu_base + NI_PMCNTENCLR);
+ if (flags & PERF_EF_UPDATE)
+ arm_ni_event_read(event);
+}
+
+static void arm_ni_init_ccnt(struct hw_perf_event *hw)
+{
+ local64_set(&hw->prev_count, S64_MIN);
+ lo_hi_writeq_relaxed(S64_MIN, (void __iomem *)hw->event_base);
+}
+
+static void arm_ni_init_evcnt(struct hw_perf_event *hw)
+{
+ local64_set(&hw->prev_count, S32_MIN);
+ writel_relaxed(S32_MIN, (void __iomem *)hw->event_base);
+}
+
+static int arm_ni_event_add(struct perf_event *event, int flags)
+{
+ struct arm_ni_cd *cd = pmu_to_cd(event->pmu);
+ struct hw_perf_event *hw = &event->hw;
+ struct arm_ni_unit *unit;
+ enum ni_node_type type = NI_EVENT_TYPE(event);
+ u32 reg;
+
+ if (type == NI_PMU) {
+ if (cd->ccnt)
+ return -ENOSPC;
+ hw->idx = NI_CCNT_IDX;
+ hw->event_base = (unsigned long)cd->pmu_base +
+ (hw->flags ? NI700_PMCCNTR_L : NI_PMCCNTR_L);
+ cd->ccnt = event;
+ arm_ni_init_ccnt(hw);
+ } else {
+ hw->idx = 0;
+ while (cd->evcnt[hw->idx]) {
+ if (++hw->idx == NI_NUM_COUNTERS)
+ return -ENOSPC;
+ }
+ cd->evcnt[hw->idx] = event;
+ unit = (void *)hw->config_base;
+ unit->event[hw->idx] = NI_EVENT_EVENTID(event);
+ hw->event_base = (unsigned long)cd->pmu_base +
+ (hw->flags ? NI700_PMEVCNTR(hw->idx) : NI_PMEVCNTR(hw->idx));
+ arm_ni_init_evcnt(hw);
+ lo_hi_writeq_relaxed(le64_to_cpu(unit->pmusel), unit->pmusela);
+
+ reg = FIELD_PREP(NI_PMEVTYPER_NODE_TYPE, type) |
+ FIELD_PREP(NI_PMEVTYPER_NODE_ID, NI_EVENT_NODEID(event));
+ writel_relaxed(reg, cd->pmu_base + NI_PMEVTYPER(hw->idx));
+ }
+ if (flags & PERF_EF_START)
+ arm_ni_event_start(event, 0);
+ return 0;
+}
+
+static void arm_ni_event_del(struct perf_event *event, int flags)
+{
+ struct arm_ni_cd *cd = pmu_to_cd(event->pmu);
+ struct hw_perf_event *hw = &event->hw;
+
+ arm_ni_event_stop(event, PERF_EF_UPDATE);
+
+ if (hw->idx == NI_CCNT_IDX)
+ cd->ccnt = NULL;
+ else
+ cd->evcnt[hw->idx] = NULL;
+}
+
+static irqreturn_t arm_ni_handle_irq(int irq, void *dev_id)
+{
+ struct arm_ni_cd *cd = dev_id;
+ irqreturn_t ret = IRQ_NONE;
+
+ for (;;) {
+ u32 reg = readl_relaxed(cd->pmu_base + NI_PMOVSCLR);
+
+ if (reg & (1U << NI_CCNT_IDX)) {
+ ret = IRQ_HANDLED;
+ if (!(WARN_ON(!cd->ccnt))) {
+ arm_ni_event_read(cd->ccnt);
+ arm_ni_init_ccnt(&cd->ccnt->hw);
+ }
+ }
+ for (int i = 0; i < NI_NUM_COUNTERS; i++) {
+ if (!(reg & (1U << i)))
+ continue;
+ ret = IRQ_HANDLED;
+ if (!(WARN_ON(!cd->evcnt[i]))) {
+ arm_ni_event_read(cd->evcnt[i]);
+ arm_ni_init_evcnt(&cd->evcnt[i]->hw);
+ }
+ }
+ writel_relaxed(reg, cd->pmu_base + NI_PMOVSCLR);
+ if (!cd->irq_friend)
+ return ret;
+ cd += cd->irq_friend;
+ }
+}
+
+static void __iomem *arm_ni_get_pmusel(struct arm_ni *ni, void __iomem *unit_base)
+{
+ u32 type, ptr, num;
+
+ if (arm_ni_is_7xx(ni))
+ return unit_base + NI700_PMUSELA;
+
+ num = readl_relaxed(unit_base + NI_NUM_SUB_FEATURES);
+ for (int i = 0; i < num; i++) {
+ type = readl_relaxed(unit_base + NI_SUB_FEATURE_TYPE(i));
+ if (type != NI_SUB_FEATURE_TYPE_FCU)
+ continue;
+ ptr = readl_relaxed(unit_base + NI_SUB_FEATURE_PTR(i));
+ return ni->base + ptr;
+ }
+ /* Should be impossible */
+ return NULL;
+}
+
+static int arm_ni_init_cd(struct arm_ni *ni, struct arm_ni_node *node, u64 res_start)
+{
+ struct arm_ni_cd *cd = ni->cds + node->id;
+ const char *name;
+
+ cd->id = node->id;
+ cd->num_units = node->num_components;
+ cd->units = devm_kcalloc(ni->dev, cd->num_units, sizeof(*(cd->units)), GFP_KERNEL);
+ if (!cd->units)
+ return -ENOMEM;
+
+ for (int i = 0; i < cd->num_units; i++) {
+ u32 reg = readl_relaxed(node->base + NI_CHILD_PTR(i));
+ void __iomem *unit_base = ni->base + reg;
+ struct arm_ni_unit *unit = cd->units + i;
+
+ reg = readl_relaxed(unit_base + NI_NODE_TYPE);
+ unit->type = FIELD_GET(NI_NODE_TYPE_NODE_TYPE, reg);
+ unit->id = FIELD_GET(NI_NODE_TYPE_NODE_ID, reg);
+
+ switch (unit->type) {
+ case NI_PMU:
+ reg = readl_relaxed(unit_base + NI_PMCFGR);
+ if (!reg) {
+ dev_info(ni->dev, "No access to PMU %d\n", cd->id);
+ devm_kfree(ni->dev, cd->units);
+ return 0;
+ }
+ unit->ns = true;
+ cd->pmu_base = unit_base;
+ break;
+ case NI_ASNI:
+ case NI_AMNI:
+ case NI_HSNI:
+ case NI_HMNI:
+ case NI_PMNI:
+ case NI_TSNI:
+ case NI_TMNI:
+ case NI_CMNI:
+ unit->pmusela = arm_ni_get_pmusel(ni, unit_base);
+ writel_relaxed(1, unit->pmusela);
+ if (readl_relaxed(unit->pmusela) != 1)
+ dev_info(ni->dev, "No access to node 0x%04x%04x\n", unit->id, unit->type);
+ else
+ unit->ns = true;
+ break;
+ case NI_MCN:
+ break;
+ default:
+ /*
+ * e.g. FMU - thankfully bits 3:2 of FMU_ERR_FR0 are RES0 so
+ * can't alias any of the leaf node types we're looking for.
+ */
+ dev_dbg(ni->dev, "Mystery node 0x%04x%04x\n", unit->id, unit->type);
+ break;
+ }
+ }
+
+ res_start += cd->pmu_base - ni->base;
+ if (!devm_request_mem_region(ni->dev, res_start, SZ_4K, dev_name(ni->dev))) {
+ dev_err(ni->dev, "Failed to request PMU region 0x%llx\n", res_start);
+ return -EBUSY;
+ }
+
+ writel_relaxed(NI_PMCR_RESET_CCNT | NI_PMCR_RESET_EVCNT,
+ cd->pmu_base + NI_PMCR);
+ writel_relaxed(U32_MAX, cd->pmu_base + NI_PMCNTENCLR);
+ writel_relaxed(U32_MAX, cd->pmu_base + NI_PMOVSCLR);
+
+ cd->irq = platform_get_irq(to_platform_device(ni->dev), cd->id);
+ if (cd->irq < 0)
+ return cd->irq;
+
+ cd->pmu = (struct pmu) {
+ .module = THIS_MODULE,
+ .parent = ni->dev,
+ .attr_groups = arm_ni_attr_groups,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
+ .task_ctx_nr = perf_invalid_context,
+ .pmu_enable = arm_ni_pmu_enable,
+ .pmu_disable = arm_ni_pmu_disable,
+ .event_init = arm_ni_event_init,
+ .add = arm_ni_event_add,
+ .del = arm_ni_event_del,
+ .start = arm_ni_event_start,
+ .stop = arm_ni_event_stop,
+ .read = arm_ni_event_read,
+ };
+
+ name = devm_kasprintf(ni->dev, GFP_KERNEL, "arm_ni_%d_cd_%d", ni->id, cd->id);
+ if (!name)
+ return -ENOMEM;
+
+ return perf_pmu_register(&cd->pmu, name, -1);
+}
+
+static void arm_ni_remove(struct platform_device *pdev)
+{
+ struct arm_ni *ni = platform_get_drvdata(pdev);
+
+ ni_for_each_cd(ni, cd) {
+ writel_relaxed(0, cd->pmu_base + NI_PMCR);
+ writel_relaxed(U32_MAX, cd->pmu_base + NI_PMINTENCLR);
+ perf_pmu_unregister(&cd->pmu);
+ }
+ cpuhp_state_remove_instance_nocalls(arm_ni_hp_state, &ni->cpuhp_node);
+}
+
+static void arm_ni_probe_domain(void __iomem *base, struct arm_ni_node *node)
+{
+ u32 reg = readl_relaxed(base + NI_NODE_TYPE);
+
+ node->base = base;
+ node->type = FIELD_GET(NI_NODE_TYPE_NODE_TYPE, reg);
+ node->id = FIELD_GET(NI_NODE_TYPE_NODE_ID, reg);
+ node->num_components = readl_relaxed(base + NI_CHILD_NODE_INFO);
+}
+
+static int arm_ni_init_irqs(struct arm_ni *ni)
+{
+ int err;
+
+ ni_for_each_cd(ni, cd) {
+ for (struct arm_ni_cd *prev = cd; prev-- > ni->cds; ) {
+ if (prev->irq == cd->irq) {
+ prev->irq_friend = cd - prev;
+ goto set_inten;
+ }
+ }
+ err = devm_request_irq(ni->dev, cd->irq, arm_ni_handle_irq,
+ IRQF_NOBALANCING | IRQF_NO_THREAD | IRQF_NO_AUTOEN,
+ dev_name(ni->dev), cd);
+ if (err)
+ return err;
+
+ irq_set_affinity(cd->irq, cpumask_of(ni->cpu));
+set_inten:
+ writel_relaxed(U32_MAX, cd->pmu_base + NI_PMINTENSET);
+ }
+
+ ni_for_each_cd(ni, cd)
+ if (!cd->irq_friend)
+ enable_irq(cd->irq);
+ return 0;
+}
+
+static int arm_ni_probe(struct platform_device *pdev)
+{
+ struct arm_ni_node cfg, vd, pd, cd;
+ struct arm_ni *ni;
+ struct resource *res;
+ void __iomem *base;
+ static atomic_t id;
+ int ret, num_cds;
+ u32 reg, part;
+
+ /*
+ * We want to map the whole configuration space for ease of discovery,
+ * but the PMU pages are the only ones for which we can honestly claim
+ * exclusive ownership, so we'll request them explicitly once found.
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!base)
+ return -ENOMEM;
+
+ arm_ni_probe_domain(base, &cfg);
+ if (cfg.type != NI_GLOBAL)
+ return -ENODEV;
+
+ reg = readl_relaxed(cfg.base + NI_PERIPHERAL_ID0);
+ part = FIELD_GET(NI_PIDR0_PART_7_0, reg);
+ reg = readl_relaxed(cfg.base + NI_PERIPHERAL_ID1);
+ part |= FIELD_GET(NI_PIDR1_PART_11_8, reg) << 8;
+
+ switch (part) {
+ case PART_NI_700:
+ case PART_NI_710AE:
+ case PART_NOC_S3:
+ case PART_SI_L1:
+ break;
+ default:
+ dev_WARN(&pdev->dev, "Unknown part number: 0x%03x, this may go badly\n", part);
+ break;
+ }
+
+ num_cds = 0;
+ for (int v = 0; v < cfg.num_components; v++) {
+ reg = readl_relaxed(cfg.base + NI_CHILD_PTR(v));
+ arm_ni_probe_domain(base + reg, &vd);
+ for (int p = 0; p < vd.num_components; p++) {
+ reg = readl_relaxed(vd.base + NI_CHILD_PTR(p));
+ arm_ni_probe_domain(base + reg, &pd);
+ num_cds += pd.num_components;
+ }
+ }
+
+ ni = devm_kzalloc(&pdev->dev, struct_size(ni, cds, num_cds), GFP_KERNEL);
+ if (!ni)
+ return -ENOMEM;
+
+ ni->dev = &pdev->dev;
+ ni->base = base;
+ ni->num_cds = num_cds;
+ ni->part = part;
+ ni->id = atomic_fetch_inc(&id);
+ ni->cpu = cpumask_local_spread(0, dev_to_node(ni->dev));
+ platform_set_drvdata(pdev, ni);
+
+ ret = cpuhp_state_add_instance_nocalls(arm_ni_hp_state, &ni->cpuhp_node);
+ if (ret)
+ return ret;
+
+ for (int v = 0; v < cfg.num_components; v++) {
+ reg = readl_relaxed(cfg.base + NI_CHILD_PTR(v));
+ arm_ni_probe_domain(base + reg, &vd);
+ for (int p = 0; p < vd.num_components; p++) {
+ reg = readl_relaxed(vd.base + NI_CHILD_PTR(p));
+ arm_ni_probe_domain(base + reg, &pd);
+ for (int c = 0; c < pd.num_components; c++) {
+ reg = readl_relaxed(pd.base + NI_CHILD_PTR(c));
+ arm_ni_probe_domain(base + reg, &cd);
+ ret = arm_ni_init_cd(ni, &cd, res->start);
+ if (ret) {
+ ni->cds[cd.id].pmu_base = NULL;
+ arm_ni_remove(pdev);
+ return ret;
+ }
+ }
+ }
+ }
+
+ ret = arm_ni_init_irqs(ni);
+ if (ret)
+ arm_ni_remove(pdev);
+
+ return ret;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id arm_ni_of_match[] = {
+ { .compatible = "arm,ni-700" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, arm_ni_of_match);
+#endif
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id arm_ni_acpi_match[] = {
+ { "ARMHCB70" },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, arm_ni_acpi_match);
+#endif
+
+static struct platform_driver arm_ni_driver = {
+ .driver = {
+ .name = "arm-ni",
+ .of_match_table = of_match_ptr(arm_ni_of_match),
+ .acpi_match_table = ACPI_PTR(arm_ni_acpi_match),
+ .suppress_bind_attrs = true,
+ },
+ .probe = arm_ni_probe,
+ .remove = arm_ni_remove,
+};
+
+static void arm_ni_pmu_migrate(struct arm_ni *ni, unsigned int cpu)
+{
+ ni_for_each_cd(ni, cd) {
+ perf_pmu_migrate_context(&cd->pmu, ni->cpu, cpu);
+ irq_set_affinity(cd->irq, cpumask_of(cpu));
+ }
+ ni->cpu = cpu;
+}
+
+static int arm_ni_pmu_online_cpu(unsigned int cpu, struct hlist_node *cpuhp_node)
+{
+ struct arm_ni *ni;
+ int node;
+
+ ni = hlist_entry_safe(cpuhp_node, struct arm_ni, cpuhp_node);
+ node = dev_to_node(ni->dev);
+ if (cpu_to_node(ni->cpu) != node && cpu_to_node(cpu) == node)
+ arm_ni_pmu_migrate(ni, cpu);
+ return 0;
+}
+
+static int arm_ni_pmu_offline_cpu(unsigned int cpu, struct hlist_node *cpuhp_node)
+{
+ struct arm_ni *ni;
+ unsigned int target;
+ int node;
+
+ ni = hlist_entry_safe(cpuhp_node, struct arm_ni, cpuhp_node);
+ if (cpu != ni->cpu)
+ return 0;
+
+ node = dev_to_node(ni->dev);
+ target = cpumask_any_and_but(cpumask_of_node(node), cpu_online_mask, cpu);
+ if (target >= nr_cpu_ids)
+ target = cpumask_any_but(cpu_online_mask, cpu);
+
+ if (target < nr_cpu_ids)
+ arm_ni_pmu_migrate(ni, target);
+ return 0;
+}
+
+static int __init arm_ni_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ "perf/arm/ni:online",
+ arm_ni_pmu_online_cpu,
+ arm_ni_pmu_offline_cpu);
+ if (ret < 0)
+ return ret;
+
+ arm_ni_hp_state = ret;
+
+ ret = platform_driver_register(&arm_ni_driver);
+ if (ret)
+ cpuhp_remove_multi_state(arm_ni_hp_state);
+ return ret;
+}
+
+static void __exit arm_ni_exit(void)
+{
+ platform_driver_unregister(&arm_ni_driver);
+ cpuhp_remove_multi_state(arm_ni_hp_state);
+}
+
+module_init(arm_ni_init);
+module_exit(arm_ni_exit);
+
+MODULE_AUTHOR("Robin Murphy <robin.murphy@arm.com>");
+MODULE_DESCRIPTION("Arm NI-700 PMU driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/perf/arm_brbe.c b/drivers/perf/arm_brbe.c
new file mode 100644
index 000000000000..ba554e0c846c
--- /dev/null
+++ b/drivers/perf/arm_brbe.c
@@ -0,0 +1,805 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Branch Record Buffer Extension Driver.
+ *
+ * Copyright (C) 2022-2025 ARM Limited
+ *
+ * Author: Anshuman Khandual <anshuman.khandual@arm.com>
+ */
+#include <linux/types.h>
+#include <linux/bitmap.h>
+#include <linux/perf/arm_pmu.h>
+#include "arm_brbe.h"
+
+#define BRBFCR_EL1_BRANCH_FILTERS (BRBFCR_EL1_DIRECT | \
+ BRBFCR_EL1_INDIRECT | \
+ BRBFCR_EL1_RTN | \
+ BRBFCR_EL1_INDCALL | \
+ BRBFCR_EL1_DIRCALL | \
+ BRBFCR_EL1_CONDDIR)
+
+/*
+ * BRBTS_EL1 is currently not used for branch stack implementation
+ * purpose but BRBCR_ELx.TS needs to have a valid value from all
+ * available options. BRBCR_ELx_TS_VIRTUAL is selected for this.
+ */
+#define BRBCR_ELx_DEFAULT_TS FIELD_PREP(BRBCR_ELx_TS_MASK, BRBCR_ELx_TS_VIRTUAL)
+
+/*
+ * BRBE Buffer Organization
+ *
+ * BRBE buffer is arranged as multiple banks of 32 branch record
+ * entries each. An individual branch record in a given bank could
+ * be accessed, after selecting the bank in BRBFCR_EL1.BANK and
+ * accessing the registers i.e [BRBSRC, BRBTGT, BRBINF] set with
+ * indices [0..31].
+ *
+ * Bank 0
+ *
+ * --------------------------------- ------
+ * | 00 | BRBSRC | BRBTGT | BRBINF | | 00 |
+ * --------------------------------- ------
+ * | 01 | BRBSRC | BRBTGT | BRBINF | | 01 |
+ * --------------------------------- ------
+ * | .. | BRBSRC | BRBTGT | BRBINF | | .. |
+ * --------------------------------- ------
+ * | 31 | BRBSRC | BRBTGT | BRBINF | | 31 |
+ * --------------------------------- ------
+ *
+ * Bank 1
+ *
+ * --------------------------------- ------
+ * | 32 | BRBSRC | BRBTGT | BRBINF | | 00 |
+ * --------------------------------- ------
+ * | 33 | BRBSRC | BRBTGT | BRBINF | | 01 |
+ * --------------------------------- ------
+ * | .. | BRBSRC | BRBTGT | BRBINF | | .. |
+ * --------------------------------- ------
+ * | 63 | BRBSRC | BRBTGT | BRBINF | | 31 |
+ * --------------------------------- ------
+ */
+#define BRBE_BANK_MAX_ENTRIES 32
+
+struct brbe_regset {
+ u64 brbsrc;
+ u64 brbtgt;
+ u64 brbinf;
+};
+
+#define PERF_BR_ARM64_MAX (PERF_BR_MAX + PERF_BR_NEW_MAX)
+
+struct brbe_hw_attr {
+ int brbe_version;
+ int brbe_cc;
+ int brbe_nr;
+ int brbe_format;
+};
+
+#define BRBE_REGN_CASE(n, case_macro) \
+ case n: case_macro(n); break
+
+#define BRBE_REGN_SWITCH(x, case_macro) \
+ do { \
+ switch (x) { \
+ BRBE_REGN_CASE(0, case_macro); \
+ BRBE_REGN_CASE(1, case_macro); \
+ BRBE_REGN_CASE(2, case_macro); \
+ BRBE_REGN_CASE(3, case_macro); \
+ BRBE_REGN_CASE(4, case_macro); \
+ BRBE_REGN_CASE(5, case_macro); \
+ BRBE_REGN_CASE(6, case_macro); \
+ BRBE_REGN_CASE(7, case_macro); \
+ BRBE_REGN_CASE(8, case_macro); \
+ BRBE_REGN_CASE(9, case_macro); \
+ BRBE_REGN_CASE(10, case_macro); \
+ BRBE_REGN_CASE(11, case_macro); \
+ BRBE_REGN_CASE(12, case_macro); \
+ BRBE_REGN_CASE(13, case_macro); \
+ BRBE_REGN_CASE(14, case_macro); \
+ BRBE_REGN_CASE(15, case_macro); \
+ BRBE_REGN_CASE(16, case_macro); \
+ BRBE_REGN_CASE(17, case_macro); \
+ BRBE_REGN_CASE(18, case_macro); \
+ BRBE_REGN_CASE(19, case_macro); \
+ BRBE_REGN_CASE(20, case_macro); \
+ BRBE_REGN_CASE(21, case_macro); \
+ BRBE_REGN_CASE(22, case_macro); \
+ BRBE_REGN_CASE(23, case_macro); \
+ BRBE_REGN_CASE(24, case_macro); \
+ BRBE_REGN_CASE(25, case_macro); \
+ BRBE_REGN_CASE(26, case_macro); \
+ BRBE_REGN_CASE(27, case_macro); \
+ BRBE_REGN_CASE(28, case_macro); \
+ BRBE_REGN_CASE(29, case_macro); \
+ BRBE_REGN_CASE(30, case_macro); \
+ BRBE_REGN_CASE(31, case_macro); \
+ default: WARN(1, "Invalid BRB* index %d\n", x); \
+ } \
+ } while (0)
+
+#define RETURN_READ_BRBSRCN(n) \
+ return read_sysreg_s(SYS_BRBSRC_EL1(n))
+static inline u64 get_brbsrc_reg(int idx)
+{
+ BRBE_REGN_SWITCH(idx, RETURN_READ_BRBSRCN);
+ return 0;
+}
+
+#define RETURN_READ_BRBTGTN(n) \
+ return read_sysreg_s(SYS_BRBTGT_EL1(n))
+static u64 get_brbtgt_reg(int idx)
+{
+ BRBE_REGN_SWITCH(idx, RETURN_READ_BRBTGTN);
+ return 0;
+}
+
+#define RETURN_READ_BRBINFN(n) \
+ return read_sysreg_s(SYS_BRBINF_EL1(n))
+static u64 get_brbinf_reg(int idx)
+{
+ BRBE_REGN_SWITCH(idx, RETURN_READ_BRBINFN);
+ return 0;
+}
+
+static u64 brbe_record_valid(u64 brbinf)
+{
+ return FIELD_GET(BRBINFx_EL1_VALID_MASK, brbinf);
+}
+
+static bool brbe_invalid(u64 brbinf)
+{
+ return brbe_record_valid(brbinf) == BRBINFx_EL1_VALID_NONE;
+}
+
+static bool brbe_record_is_complete(u64 brbinf)
+{
+ return brbe_record_valid(brbinf) == BRBINFx_EL1_VALID_FULL;
+}
+
+static bool brbe_record_is_source_only(u64 brbinf)
+{
+ return brbe_record_valid(brbinf) == BRBINFx_EL1_VALID_SOURCE;
+}
+
+static bool brbe_record_is_target_only(u64 brbinf)
+{
+ return brbe_record_valid(brbinf) == BRBINFx_EL1_VALID_TARGET;
+}
+
+static int brbinf_get_in_tx(u64 brbinf)
+{
+ return FIELD_GET(BRBINFx_EL1_T_MASK, brbinf);
+}
+
+static int brbinf_get_mispredict(u64 brbinf)
+{
+ return FIELD_GET(BRBINFx_EL1_MPRED_MASK, brbinf);
+}
+
+static int brbinf_get_lastfailed(u64 brbinf)
+{
+ return FIELD_GET(BRBINFx_EL1_LASTFAILED_MASK, brbinf);
+}
+
+static u16 brbinf_get_cycles(u64 brbinf)
+{
+ u32 exp, mant, cycles;
+ /*
+ * Captured cycle count is unknown and hence
+ * should not be passed on to userspace.
+ */
+ if (brbinf & BRBINFx_EL1_CCU)
+ return 0;
+
+ exp = FIELD_GET(BRBINFx_EL1_CC_EXP_MASK, brbinf);
+ mant = FIELD_GET(BRBINFx_EL1_CC_MANT_MASK, brbinf);
+
+ if (!exp)
+ return mant;
+
+ cycles = (mant | 0x100) << (exp - 1);
+
+ return min(cycles, U16_MAX);
+}
+
+static int brbinf_get_type(u64 brbinf)
+{
+ return FIELD_GET(BRBINFx_EL1_TYPE_MASK, brbinf);
+}
+
+static int brbinf_get_el(u64 brbinf)
+{
+ return FIELD_GET(BRBINFx_EL1_EL_MASK, brbinf);
+}
+
+void brbe_invalidate(void)
+{
+ /* Ensure all branches before this point are recorded */
+ isb();
+ asm volatile(BRB_IALL_INSN);
+ /* Ensure all branch records are invalidated after this point */
+ isb();
+}
+
+static bool valid_brbe_nr(int brbe_nr)
+{
+ return brbe_nr == BRBIDR0_EL1_NUMREC_8 ||
+ brbe_nr == BRBIDR0_EL1_NUMREC_16 ||
+ brbe_nr == BRBIDR0_EL1_NUMREC_32 ||
+ brbe_nr == BRBIDR0_EL1_NUMREC_64;
+}
+
+static bool valid_brbe_cc(int brbe_cc)
+{
+ return brbe_cc == BRBIDR0_EL1_CC_20_BIT;
+}
+
+static bool valid_brbe_format(int brbe_format)
+{
+ return brbe_format == BRBIDR0_EL1_FORMAT_FORMAT_0;
+}
+
+static bool valid_brbidr(u64 brbidr)
+{
+ int brbe_format, brbe_cc, brbe_nr;
+
+ brbe_format = FIELD_GET(BRBIDR0_EL1_FORMAT_MASK, brbidr);
+ brbe_cc = FIELD_GET(BRBIDR0_EL1_CC_MASK, brbidr);
+ brbe_nr = FIELD_GET(BRBIDR0_EL1_NUMREC_MASK, brbidr);
+
+ return valid_brbe_format(brbe_format) && valid_brbe_cc(brbe_cc) && valid_brbe_nr(brbe_nr);
+}
+
+static bool valid_brbe_version(int brbe_version)
+{
+ return brbe_version == ID_AA64DFR0_EL1_BRBE_IMP ||
+ brbe_version == ID_AA64DFR0_EL1_BRBE_BRBE_V1P1;
+}
+
+static void select_brbe_bank(int bank)
+{
+ u64 brbfcr;
+
+ brbfcr = read_sysreg_s(SYS_BRBFCR_EL1);
+ brbfcr &= ~BRBFCR_EL1_BANK_MASK;
+ brbfcr |= SYS_FIELD_PREP(BRBFCR_EL1, BANK, bank);
+ write_sysreg_s(brbfcr, SYS_BRBFCR_EL1);
+ /*
+ * Arm ARM (DDI 0487K.a) D.18.4 rule PPBZP requires explicit sync
+ * between setting BANK and accessing branch records.
+ */
+ isb();
+}
+
+static bool __read_brbe_regset(struct brbe_regset *entry, int idx)
+{
+ entry->brbinf = get_brbinf_reg(idx);
+
+ if (brbe_invalid(entry->brbinf))
+ return false;
+
+ entry->brbsrc = get_brbsrc_reg(idx);
+ entry->brbtgt = get_brbtgt_reg(idx);
+ return true;
+}
+
+/*
+ * Generic perf branch filters supported on BRBE
+ *
+ * New branch filters need to be evaluated whether they could be supported on
+ * BRBE. This ensures that such branch filters would not just be accepted, to
+ * fail silently. PERF_SAMPLE_BRANCH_HV is a special case that is selectively
+ * supported only on platforms where kernel is in hyp mode.
+ */
+#define BRBE_EXCLUDE_BRANCH_FILTERS (PERF_SAMPLE_BRANCH_ABORT_TX | \
+ PERF_SAMPLE_BRANCH_IN_TX | \
+ PERF_SAMPLE_BRANCH_NO_TX | \
+ PERF_SAMPLE_BRANCH_CALL_STACK | \
+ PERF_SAMPLE_BRANCH_COUNTERS)
+
+#define BRBE_ALLOWED_BRANCH_TYPES (PERF_SAMPLE_BRANCH_ANY | \
+ PERF_SAMPLE_BRANCH_ANY_CALL | \
+ PERF_SAMPLE_BRANCH_ANY_RETURN | \
+ PERF_SAMPLE_BRANCH_IND_CALL | \
+ PERF_SAMPLE_BRANCH_COND | \
+ PERF_SAMPLE_BRANCH_IND_JUMP | \
+ PERF_SAMPLE_BRANCH_CALL)
+
+
+#define BRBE_ALLOWED_BRANCH_FILTERS (PERF_SAMPLE_BRANCH_USER | \
+ PERF_SAMPLE_BRANCH_KERNEL | \
+ PERF_SAMPLE_BRANCH_HV | \
+ BRBE_ALLOWED_BRANCH_TYPES | \
+ PERF_SAMPLE_BRANCH_NO_FLAGS | \
+ PERF_SAMPLE_BRANCH_NO_CYCLES | \
+ PERF_SAMPLE_BRANCH_TYPE_SAVE | \
+ PERF_SAMPLE_BRANCH_HW_INDEX | \
+ PERF_SAMPLE_BRANCH_PRIV_SAVE)
+
+#define BRBE_PERF_BRANCH_FILTERS (BRBE_ALLOWED_BRANCH_FILTERS | \
+ BRBE_EXCLUDE_BRANCH_FILTERS)
+
+/*
+ * BRBE supports the following functional branch type filters while
+ * generating branch records. These branch filters can be enabled,
+ * either individually or as a group i.e ORing multiple filters
+ * with each other.
+ *
+ * BRBFCR_EL1_CONDDIR - Conditional direct branch
+ * BRBFCR_EL1_DIRCALL - Direct call
+ * BRBFCR_EL1_INDCALL - Indirect call
+ * BRBFCR_EL1_INDIRECT - Indirect branch
+ * BRBFCR_EL1_DIRECT - Direct branch
+ * BRBFCR_EL1_RTN - Subroutine return
+ */
+static u64 branch_type_to_brbfcr(int branch_type)
+{
+ u64 brbfcr = 0;
+
+ if (branch_type & PERF_SAMPLE_BRANCH_ANY) {
+ brbfcr |= BRBFCR_EL1_BRANCH_FILTERS;
+ return brbfcr;
+ }
+
+ if (branch_type & PERF_SAMPLE_BRANCH_ANY_CALL) {
+ brbfcr |= BRBFCR_EL1_INDCALL;
+ brbfcr |= BRBFCR_EL1_DIRCALL;
+ }
+
+ if (branch_type & PERF_SAMPLE_BRANCH_ANY_RETURN)
+ brbfcr |= BRBFCR_EL1_RTN;
+
+ if (branch_type & PERF_SAMPLE_BRANCH_IND_CALL)
+ brbfcr |= BRBFCR_EL1_INDCALL;
+
+ if (branch_type & PERF_SAMPLE_BRANCH_COND)
+ brbfcr |= BRBFCR_EL1_CONDDIR;
+
+ if (branch_type & PERF_SAMPLE_BRANCH_IND_JUMP)
+ brbfcr |= BRBFCR_EL1_INDIRECT;
+
+ if (branch_type & PERF_SAMPLE_BRANCH_CALL)
+ brbfcr |= BRBFCR_EL1_DIRCALL;
+
+ return brbfcr;
+}
+
+/*
+ * BRBE supports the following privilege mode filters while generating
+ * branch records.
+ *
+ * BRBCR_ELx_E0BRE - EL0 branch records
+ * BRBCR_ELx_ExBRE - EL1/EL2 branch records
+ *
+ * BRBE also supports the following additional functional branch type
+ * filters while generating branch records.
+ *
+ * BRBCR_ELx_EXCEPTION - Exception
+ * BRBCR_ELx_ERTN - Exception return
+ */
+static u64 branch_type_to_brbcr(int branch_type)
+{
+ u64 brbcr = BRBCR_ELx_FZP | BRBCR_ELx_DEFAULT_TS;
+
+ if (branch_type & PERF_SAMPLE_BRANCH_USER)
+ brbcr |= BRBCR_ELx_E0BRE;
+
+ /*
+ * When running in the hyp mode, writing into BRBCR_EL1
+ * actually writes into BRBCR_EL2 instead. Field E2BRE
+ * is also at the same position as E1BRE.
+ */
+ if (branch_type & PERF_SAMPLE_BRANCH_KERNEL)
+ brbcr |= BRBCR_ELx_ExBRE;
+
+ if (branch_type & PERF_SAMPLE_BRANCH_HV) {
+ if (is_kernel_in_hyp_mode())
+ brbcr |= BRBCR_ELx_ExBRE;
+ }
+
+ if (!(branch_type & PERF_SAMPLE_BRANCH_NO_CYCLES))
+ brbcr |= BRBCR_ELx_CC;
+
+ if (!(branch_type & PERF_SAMPLE_BRANCH_NO_FLAGS))
+ brbcr |= BRBCR_ELx_MPRED;
+
+ /*
+ * The exception and exception return branches could be
+ * captured, irrespective of the perf event's privilege.
+ * If the perf event does not have enough privilege for
+ * a given exception level, then addresses which falls
+ * under that exception level will be reported as zero
+ * for the captured branch record, creating source only
+ * or target only records.
+ */
+ if (branch_type & PERF_SAMPLE_BRANCH_KERNEL) {
+ if (branch_type & PERF_SAMPLE_BRANCH_ANY) {
+ brbcr |= BRBCR_ELx_EXCEPTION;
+ brbcr |= BRBCR_ELx_ERTN;
+ }
+
+ if (branch_type & PERF_SAMPLE_BRANCH_ANY_CALL)
+ brbcr |= BRBCR_ELx_EXCEPTION;
+
+ if (branch_type & PERF_SAMPLE_BRANCH_ANY_RETURN)
+ brbcr |= BRBCR_ELx_ERTN;
+ }
+ return brbcr;
+}
+
+bool brbe_branch_attr_valid(struct perf_event *event)
+{
+ u64 branch_type = event->attr.branch_sample_type;
+
+ /*
+ * Ensure both perf branch filter allowed and exclude
+ * masks are always in sync with the generic perf ABI.
+ */
+ BUILD_BUG_ON(BRBE_PERF_BRANCH_FILTERS != (PERF_SAMPLE_BRANCH_MAX - 1));
+
+ if (branch_type & BRBE_EXCLUDE_BRANCH_FILTERS) {
+ pr_debug("requested branch filter not supported 0x%llx\n", branch_type);
+ return false;
+ }
+
+ /* Ensure at least 1 branch type is enabled */
+ if (!(branch_type & BRBE_ALLOWED_BRANCH_TYPES)) {
+ pr_debug("no branch type enabled 0x%llx\n", branch_type);
+ return false;
+ }
+
+ /*
+ * No branches are recorded in guests nor nVHE hypervisors, so
+ * excluding the host or both kernel and user is invalid.
+ *
+ * Ideally we'd just require exclude_guest and exclude_hv, but setting
+ * event filters with perf for kernel or user don't set exclude_guest.
+ * So effectively, exclude_guest and exclude_hv are ignored.
+ */
+ if (event->attr.exclude_host || (event->attr.exclude_user && event->attr.exclude_kernel)) {
+ pr_debug("branch filter in hypervisor or guest only not supported 0x%llx\n", branch_type);
+ return false;
+ }
+
+ event->hw.branch_reg.config = branch_type_to_brbfcr(event->attr.branch_sample_type);
+ event->hw.extra_reg.config = branch_type_to_brbcr(event->attr.branch_sample_type);
+
+ return true;
+}
+
+unsigned int brbe_num_branch_records(const struct arm_pmu *armpmu)
+{
+ return FIELD_GET(BRBIDR0_EL1_NUMREC_MASK, armpmu->reg_brbidr);
+}
+
+void brbe_probe(struct arm_pmu *armpmu)
+{
+ u64 brbidr, aa64dfr0 = read_sysreg_s(SYS_ID_AA64DFR0_EL1);
+ u32 brbe;
+
+ brbe = cpuid_feature_extract_unsigned_field(aa64dfr0, ID_AA64DFR0_EL1_BRBE_SHIFT);
+ if (!valid_brbe_version(brbe))
+ return;
+
+ brbidr = read_sysreg_s(SYS_BRBIDR0_EL1);
+ if (!valid_brbidr(brbidr))
+ return;
+
+ armpmu->reg_brbidr = brbidr;
+}
+
+/*
+ * BRBE is assumed to be disabled/paused on entry
+ */
+void brbe_enable(const struct arm_pmu *arm_pmu)
+{
+ struct pmu_hw_events *cpuc = this_cpu_ptr(arm_pmu->hw_events);
+ u64 brbfcr = 0, brbcr = 0;
+
+ /*
+ * Discard existing records to avoid a discontinuity, e.g. records
+ * missed during handling an overflow.
+ */
+ brbe_invalidate();
+
+ /*
+ * Merge the permitted branch filters of all events.
+ */
+ for (int i = 0; i < ARMPMU_MAX_HWEVENTS; i++) {
+ struct perf_event *event = cpuc->events[i];
+
+ if (event && has_branch_stack(event)) {
+ brbfcr |= event->hw.branch_reg.config;
+ brbcr |= event->hw.extra_reg.config;
+ }
+ }
+
+ /*
+ * In VHE mode with MDCR_EL2.HPMN equal to PMCR_EL0.N, BRBCR_EL1.FZP
+ * controls freezing the branch records on counter overflow rather than
+ * BRBCR_EL2.FZP (which writes to BRBCR_EL1 are redirected to).
+ * The exception levels are enabled/disabled in BRBCR_EL2, so keep EL1
+ * and EL0 recording disabled for guests.
+ *
+ * As BRBCR_EL1 CC and MPRED bits also need to match, use the same
+ * value for both registers just masking the exception levels.
+ */
+ if (is_kernel_in_hyp_mode())
+ write_sysreg_s(brbcr & ~(BRBCR_ELx_ExBRE | BRBCR_ELx_E0BRE), SYS_BRBCR_EL12);
+ write_sysreg_s(brbcr, SYS_BRBCR_EL1);
+ /* Ensure BRBCR_ELx settings take effect before unpausing */
+ isb();
+
+ /* Finally write SYS_BRBFCR_EL to unpause BRBE */
+ write_sysreg_s(brbfcr, SYS_BRBFCR_EL1);
+ /* Synchronization in PMCR write ensures ordering WRT PMU enabling */
+}
+
+void brbe_disable(void)
+{
+ /*
+ * No need for synchronization here as synchronization in PMCR write
+ * ensures ordering and in the interrupt handler this is a NOP as
+ * we're already paused.
+ */
+ write_sysreg_s(BRBFCR_EL1_PAUSED, SYS_BRBFCR_EL1);
+ write_sysreg_s(0, SYS_BRBCR_EL1);
+}
+
+static const int brbe_type_to_perf_type_map[BRBINFx_EL1_TYPE_DEBUG_EXIT + 1][2] = {
+ [BRBINFx_EL1_TYPE_DIRECT_UNCOND] = { PERF_BR_UNCOND, 0 },
+ [BRBINFx_EL1_TYPE_INDIRECT] = { PERF_BR_IND, 0 },
+ [BRBINFx_EL1_TYPE_DIRECT_LINK] = { PERF_BR_CALL, 0 },
+ [BRBINFx_EL1_TYPE_INDIRECT_LINK] = { PERF_BR_IND_CALL, 0 },
+ [BRBINFx_EL1_TYPE_RET] = { PERF_BR_RET, 0 },
+ [BRBINFx_EL1_TYPE_DIRECT_COND] = { PERF_BR_COND, 0 },
+ [BRBINFx_EL1_TYPE_CALL] = { PERF_BR_SYSCALL, 0 },
+ [BRBINFx_EL1_TYPE_ERET] = { PERF_BR_ERET, 0 },
+ [BRBINFx_EL1_TYPE_IRQ] = { PERF_BR_IRQ, 0 },
+ [BRBINFx_EL1_TYPE_TRAP] = { PERF_BR_IRQ, 0 },
+ [BRBINFx_EL1_TYPE_SERROR] = { PERF_BR_SERROR, 0 },
+ [BRBINFx_EL1_TYPE_ALIGN_FAULT] = { PERF_BR_EXTEND_ABI, PERF_BR_NEW_FAULT_ALGN },
+ [BRBINFx_EL1_TYPE_INSN_FAULT] = { PERF_BR_EXTEND_ABI, PERF_BR_NEW_FAULT_INST },
+ [BRBINFx_EL1_TYPE_DATA_FAULT] = { PERF_BR_EXTEND_ABI, PERF_BR_NEW_FAULT_DATA },
+};
+
+static void brbe_set_perf_entry_type(struct perf_branch_entry *entry, u64 brbinf)
+{
+ int brbe_type = brbinf_get_type(brbinf);
+
+ if (brbe_type <= BRBINFx_EL1_TYPE_DEBUG_EXIT) {
+ const int *br_type = brbe_type_to_perf_type_map[brbe_type];
+
+ entry->type = br_type[0];
+ entry->new_type = br_type[1];
+ }
+}
+
+static int brbinf_get_perf_priv(u64 brbinf)
+{
+ int brbe_el = brbinf_get_el(brbinf);
+
+ switch (brbe_el) {
+ case BRBINFx_EL1_EL_EL0:
+ return PERF_BR_PRIV_USER;
+ case BRBINFx_EL1_EL_EL1:
+ return PERF_BR_PRIV_KERNEL;
+ case BRBINFx_EL1_EL_EL2:
+ if (is_kernel_in_hyp_mode())
+ return PERF_BR_PRIV_KERNEL;
+ return PERF_BR_PRIV_HV;
+ default:
+ pr_warn_once("%d - unknown branch privilege captured\n", brbe_el);
+ return PERF_BR_PRIV_UNKNOWN;
+ }
+}
+
+static bool perf_entry_from_brbe_regset(int index, struct perf_branch_entry *entry,
+ const struct perf_event *event)
+{
+ struct brbe_regset bregs;
+ u64 brbinf;
+
+ if (!__read_brbe_regset(&bregs, index))
+ return false;
+
+ brbinf = bregs.brbinf;
+ perf_clear_branch_entry_bitfields(entry);
+ if (brbe_record_is_complete(brbinf)) {
+ entry->from = bregs.brbsrc;
+ entry->to = bregs.brbtgt;
+ } else if (brbe_record_is_source_only(brbinf)) {
+ entry->from = bregs.brbsrc;
+ entry->to = 0;
+ } else if (brbe_record_is_target_only(brbinf)) {
+ entry->from = 0;
+ entry->to = bregs.brbtgt;
+ }
+
+ brbe_set_perf_entry_type(entry, brbinf);
+
+ if (!branch_sample_no_cycles(event))
+ entry->cycles = brbinf_get_cycles(brbinf);
+
+ if (!branch_sample_no_flags(event)) {
+ /* Mispredict info is available for source only and complete branch records. */
+ if (!brbe_record_is_target_only(brbinf)) {
+ entry->mispred = brbinf_get_mispredict(brbinf);
+ entry->predicted = !entry->mispred;
+ }
+
+ /*
+ * Currently TME feature is neither implemented in any hardware
+ * nor it is being supported in the kernel. Just warn here once
+ * if TME related information shows up rather unexpectedly.
+ */
+ if (brbinf_get_lastfailed(brbinf) || brbinf_get_in_tx(brbinf))
+ pr_warn_once("Unknown transaction states\n");
+ }
+
+ /*
+ * Branch privilege level is available for target only and complete
+ * branch records.
+ */
+ if (!brbe_record_is_source_only(brbinf))
+ entry->priv = brbinf_get_perf_priv(brbinf);
+
+ return true;
+}
+
+#define PERF_BR_ARM64_ALL ( \
+ BIT(PERF_BR_COND) | \
+ BIT(PERF_BR_UNCOND) | \
+ BIT(PERF_BR_IND) | \
+ BIT(PERF_BR_CALL) | \
+ BIT(PERF_BR_IND_CALL) | \
+ BIT(PERF_BR_RET))
+
+#define PERF_BR_ARM64_ALL_KERNEL ( \
+ BIT(PERF_BR_SYSCALL) | \
+ BIT(PERF_BR_IRQ) | \
+ BIT(PERF_BR_SERROR) | \
+ BIT(PERF_BR_MAX + PERF_BR_NEW_FAULT_ALGN) | \
+ BIT(PERF_BR_MAX + PERF_BR_NEW_FAULT_DATA) | \
+ BIT(PERF_BR_MAX + PERF_BR_NEW_FAULT_INST))
+
+static void prepare_event_branch_type_mask(u64 branch_sample,
+ unsigned long *event_type_mask)
+{
+ if (branch_sample & PERF_SAMPLE_BRANCH_ANY) {
+ if (branch_sample & PERF_SAMPLE_BRANCH_KERNEL)
+ bitmap_from_u64(event_type_mask,
+ BIT(PERF_BR_ERET) | PERF_BR_ARM64_ALL |
+ PERF_BR_ARM64_ALL_KERNEL);
+ else
+ bitmap_from_u64(event_type_mask, PERF_BR_ARM64_ALL);
+ return;
+ }
+
+ bitmap_zero(event_type_mask, PERF_BR_ARM64_MAX);
+
+ if (branch_sample & PERF_SAMPLE_BRANCH_ANY_CALL) {
+ if (branch_sample & PERF_SAMPLE_BRANCH_KERNEL)
+ bitmap_from_u64(event_type_mask, PERF_BR_ARM64_ALL_KERNEL);
+
+ set_bit(PERF_BR_CALL, event_type_mask);
+ set_bit(PERF_BR_IND_CALL, event_type_mask);
+ }
+
+ if (branch_sample & PERF_SAMPLE_BRANCH_IND_JUMP)
+ set_bit(PERF_BR_IND, event_type_mask);
+
+ if (branch_sample & PERF_SAMPLE_BRANCH_COND)
+ set_bit(PERF_BR_COND, event_type_mask);
+
+ if (branch_sample & PERF_SAMPLE_BRANCH_CALL)
+ set_bit(PERF_BR_CALL, event_type_mask);
+
+ if (branch_sample & PERF_SAMPLE_BRANCH_IND_CALL)
+ set_bit(PERF_BR_IND_CALL, event_type_mask);
+
+ if (branch_sample & PERF_SAMPLE_BRANCH_ANY_RETURN) {
+ set_bit(PERF_BR_RET, event_type_mask);
+
+ if (branch_sample & PERF_SAMPLE_BRANCH_KERNEL)
+ set_bit(PERF_BR_ERET, event_type_mask);
+ }
+}
+
+/*
+ * BRBE is configured with an OR of permissions from all events, so there may
+ * be events which have to be dropped or events where just the source or target
+ * address has to be zeroed.
+ */
+static bool filter_branch_privilege(struct perf_branch_entry *entry, u64 branch_sample_type)
+{
+ bool from_user = access_ok((void __user *)(unsigned long)entry->from, 4);
+ bool to_user = access_ok((void __user *)(unsigned long)entry->to, 4);
+ bool exclude_kernel = !((branch_sample_type & PERF_SAMPLE_BRANCH_KERNEL) ||
+ (is_kernel_in_hyp_mode() && (branch_sample_type & PERF_SAMPLE_BRANCH_HV)));
+
+ /* We can only have a half record if permissions have not been expanded */
+ if (!entry->from || !entry->to)
+ return true;
+
+ /*
+ * If record is within a single exception level, just need to either
+ * drop or keep the entire record.
+ */
+ if (from_user == to_user)
+ return ((entry->priv == PERF_BR_PRIV_KERNEL) && !exclude_kernel) ||
+ ((entry->priv == PERF_BR_PRIV_USER) &&
+ (branch_sample_type & PERF_SAMPLE_BRANCH_USER));
+
+ /*
+ * Record is across exception levels, mask addresses for the exception
+ * level we're not capturing.
+ */
+ if (!(branch_sample_type & PERF_SAMPLE_BRANCH_USER)) {
+ if (from_user)
+ entry->from = 0;
+ if (to_user)
+ entry->to = 0;
+ }
+
+ if (exclude_kernel) {
+ if (!from_user)
+ entry->from = 0;
+ if (!to_user)
+ entry->to = 0;
+ }
+
+ return true;
+}
+
+static bool filter_branch_type(struct perf_branch_entry *entry,
+ const unsigned long *event_type_mask)
+{
+ if (entry->type == PERF_BR_EXTEND_ABI)
+ return test_bit(PERF_BR_MAX + entry->new_type, event_type_mask);
+ else
+ return test_bit(entry->type, event_type_mask);
+}
+
+static bool filter_branch_record(struct perf_branch_entry *entry,
+ u64 branch_sample,
+ const unsigned long *event_type_mask)
+{
+ return filter_branch_type(entry, event_type_mask) &&
+ filter_branch_privilege(entry, branch_sample);
+}
+
+void brbe_read_filtered_entries(struct perf_branch_stack *branch_stack,
+ const struct perf_event *event)
+{
+ struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+ int nr_hw = brbe_num_branch_records(cpu_pmu);
+ int nr_banks = DIV_ROUND_UP(nr_hw, BRBE_BANK_MAX_ENTRIES);
+ int nr_filtered = 0;
+ u64 branch_sample_type = event->attr.branch_sample_type;
+ DECLARE_BITMAP(event_type_mask, PERF_BR_ARM64_MAX);
+
+ prepare_event_branch_type_mask(branch_sample_type, event_type_mask);
+
+ for (int bank = 0; bank < nr_banks; bank++) {
+ int nr_remaining = nr_hw - (bank * BRBE_BANK_MAX_ENTRIES);
+ int nr_this_bank = min(nr_remaining, BRBE_BANK_MAX_ENTRIES);
+
+ select_brbe_bank(bank);
+
+ for (int i = 0; i < nr_this_bank; i++) {
+ struct perf_branch_entry *pbe = &branch_stack->entries[nr_filtered];
+
+ if (!perf_entry_from_brbe_regset(i, pbe, event))
+ goto done;
+
+ if (!filter_branch_record(pbe, branch_sample_type, event_type_mask))
+ continue;
+
+ nr_filtered++;
+ }
+ }
+
+done:
+ branch_stack->nr = nr_filtered;
+}
diff --git a/drivers/perf/arm_brbe.h b/drivers/perf/arm_brbe.h
new file mode 100644
index 000000000000..b7c7d8796c86
--- /dev/null
+++ b/drivers/perf/arm_brbe.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Branch Record Buffer Extension Helpers.
+ *
+ * Copyright (C) 2022-2025 ARM Limited
+ *
+ * Author: Anshuman Khandual <anshuman.khandual@arm.com>
+ */
+
+struct arm_pmu;
+struct perf_branch_stack;
+struct perf_event;
+
+#ifdef CONFIG_ARM64_BRBE
+void brbe_probe(struct arm_pmu *arm_pmu);
+unsigned int brbe_num_branch_records(const struct arm_pmu *armpmu);
+void brbe_invalidate(void);
+
+void brbe_enable(const struct arm_pmu *arm_pmu);
+void brbe_disable(void);
+
+bool brbe_branch_attr_valid(struct perf_event *event);
+void brbe_read_filtered_entries(struct perf_branch_stack *branch_stack,
+ const struct perf_event *event);
+#else
+static inline void brbe_probe(struct arm_pmu *arm_pmu) { }
+static inline unsigned int brbe_num_branch_records(const struct arm_pmu *armpmu)
+{
+ return 0;
+}
+
+static inline void brbe_invalidate(void) { }
+
+static inline void brbe_enable(const struct arm_pmu *arm_pmu) { };
+static inline void brbe_disable(void) { };
+
+static inline bool brbe_branch_attr_valid(struct perf_event *event)
+{
+ WARN_ON_ONCE(!has_branch_stack(event));
+ return false;
+}
+
+static void brbe_read_filtered_entries(struct perf_branch_stack *branch_stack,
+ const struct perf_event *event)
+{
+}
+#endif
diff --git a/drivers/perf/arm_cspmu/Kconfig b/drivers/perf/arm_cspmu/Kconfig
index 25d25ded0983..6f4e28fc84a2 100644
--- a/drivers/perf/arm_cspmu/Kconfig
+++ b/drivers/perf/arm_cspmu/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
#
-# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
config ARM_CORESIGHT_PMU_ARCH_SYSTEM_PMU
tristate "ARM Coresight Architecture PMU"
@@ -10,3 +10,20 @@ config ARM_CORESIGHT_PMU_ARCH_SYSTEM_PMU
based on ARM CoreSight PMU architecture. Note that this PMU
architecture does not have relationship with the ARM CoreSight
Self-Hosted Tracing.
+
+config NVIDIA_CORESIGHT_PMU_ARCH_SYSTEM_PMU
+ tristate "NVIDIA Coresight Architecture PMU"
+ depends on ARM_CORESIGHT_PMU_ARCH_SYSTEM_PMU
+ help
+ Provides NVIDIA specific attributes for performance monitoring unit
+ (PMU) devices based on ARM CoreSight PMU architecture.
+
+config AMPERE_CORESIGHT_PMU_ARCH_SYSTEM_PMU
+ tristate "Ampere Coresight Architecture PMU"
+ depends on ARM_CORESIGHT_PMU_ARCH_SYSTEM_PMU
+ help
+ Provides Ampere specific attributes for performance monitoring unit
+ (PMU) devices based on ARM CoreSight PMU architecture.
+
+ In the first phase, the driver enables support on MCU PMU used in
+ AmpereOne SoC family.
diff --git a/drivers/perf/arm_cspmu/Makefile b/drivers/perf/arm_cspmu/Makefile
index fedb17df982d..220a734efd54 100644
--- a/drivers/perf/arm_cspmu/Makefile
+++ b/drivers/perf/arm_cspmu/Makefile
@@ -1,6 +1,10 @@
-# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_ARM_CORESIGHT_PMU_ARCH_SYSTEM_PMU) += arm_cspmu_module.o
-arm_cspmu_module-y := arm_cspmu.o nvidia_cspmu.o
+
+arm_cspmu_module-y := arm_cspmu.o
+
+obj-$(CONFIG_NVIDIA_CORESIGHT_PMU_ARCH_SYSTEM_PMU) += nvidia_cspmu.o
+obj-$(CONFIG_AMPERE_CORESIGHT_PMU_ARCH_SYSTEM_PMU) += ampere_cspmu.o
diff --git a/drivers/perf/arm_cspmu/ampere_cspmu.c b/drivers/perf/arm_cspmu/ampere_cspmu.c
new file mode 100644
index 000000000000..b8ca69fd9d1d
--- /dev/null
+++ b/drivers/perf/arm_cspmu/ampere_cspmu.c
@@ -0,0 +1,261 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Ampere SoC PMU (Performance Monitor Unit)
+ *
+ * Copyright (c) 2023, Ampere Computing LLC
+ */
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/topology.h>
+
+#include "arm_cspmu.h"
+
+#define PMAUXR0 PMIMPDEF
+#define PMAUXR1 (PMIMPDEF + 0x4)
+#define PMAUXR2 (PMIMPDEF + 0x8)
+#define PMAUXR3 (PMIMPDEF + 0xC)
+
+#define to_ampere_cspmu_ctx(cspmu) ((struct ampere_cspmu_ctx *)(cspmu->impl.ctx))
+
+struct ampere_cspmu_ctx {
+ const char *name;
+ struct attribute **event_attr;
+ struct attribute **format_attr;
+};
+
+static DEFINE_IDA(mcu_pmu_ida);
+
+#define SOC_PMU_EVENT_ATTR_EXTRACTOR(_name, _config, _start, _end) \
+ static inline u32 get_##_name(const struct perf_event *event) \
+ { \
+ return FIELD_GET(GENMASK_ULL(_end, _start), \
+ event->attr._config); \
+ } \
+
+SOC_PMU_EVENT_ATTR_EXTRACTOR(event, config, 0, 8);
+SOC_PMU_EVENT_ATTR_EXTRACTOR(threshold, config1, 0, 7);
+SOC_PMU_EVENT_ATTR_EXTRACTOR(rank, config1, 8, 23);
+SOC_PMU_EVENT_ATTR_EXTRACTOR(bank, config1, 24, 55);
+
+static struct attribute *ampereone_mcu_pmu_event_attrs[] = {
+ ARM_CSPMU_EVENT_ATTR(cycle_count, 0x00),
+ ARM_CSPMU_EVENT_ATTR(act_sent, 0x01),
+ ARM_CSPMU_EVENT_ATTR(pre_sent, 0x02),
+ ARM_CSPMU_EVENT_ATTR(rd_sent, 0x03),
+ ARM_CSPMU_EVENT_ATTR(rda_sent, 0x04),
+ ARM_CSPMU_EVENT_ATTR(wr_sent, 0x05),
+ ARM_CSPMU_EVENT_ATTR(wra_sent, 0x06),
+ ARM_CSPMU_EVENT_ATTR(pd_entry_vld, 0x07),
+ ARM_CSPMU_EVENT_ATTR(sref_entry_vld, 0x08),
+ ARM_CSPMU_EVENT_ATTR(prea_sent, 0x09),
+ ARM_CSPMU_EVENT_ATTR(pre_sb_sent, 0x0a),
+ ARM_CSPMU_EVENT_ATTR(ref_sent, 0x0b),
+ ARM_CSPMU_EVENT_ATTR(rfm_sent, 0x0c),
+ ARM_CSPMU_EVENT_ATTR(ref_sb_sent, 0x0d),
+ ARM_CSPMU_EVENT_ATTR(rfm_sb_sent, 0x0e),
+ ARM_CSPMU_EVENT_ATTR(rd_rda_sent, 0x0f),
+ ARM_CSPMU_EVENT_ATTR(wr_wra_sent, 0x10),
+ ARM_CSPMU_EVENT_ATTR(raw_hazard, 0x11),
+ ARM_CSPMU_EVENT_ATTR(war_hazard, 0x12),
+ ARM_CSPMU_EVENT_ATTR(waw_hazard, 0x13),
+ ARM_CSPMU_EVENT_ATTR(rar_hazard, 0x14),
+ ARM_CSPMU_EVENT_ATTR(raw_war_waw_hazard, 0x15),
+ ARM_CSPMU_EVENT_ATTR(hprd_lprd_wr_req_vld, 0x16),
+ ARM_CSPMU_EVENT_ATTR(lprd_req_vld, 0x17),
+ ARM_CSPMU_EVENT_ATTR(hprd_req_vld, 0x18),
+ ARM_CSPMU_EVENT_ATTR(hprd_lprd_req_vld, 0x19),
+ ARM_CSPMU_EVENT_ATTR(prefetch_tgt, 0x1a),
+ ARM_CSPMU_EVENT_ATTR(wr_req_vld, 0x1b),
+ ARM_CSPMU_EVENT_ATTR(partial_wr_req_vld, 0x1c),
+ ARM_CSPMU_EVENT_ATTR(rd_retry, 0x1d),
+ ARM_CSPMU_EVENT_ATTR(wr_retry, 0x1e),
+ ARM_CSPMU_EVENT_ATTR(retry_gnt, 0x1f),
+ ARM_CSPMU_EVENT_ATTR(rank_change, 0x20),
+ ARM_CSPMU_EVENT_ATTR(dir_change, 0x21),
+ ARM_CSPMU_EVENT_ATTR(rank_dir_change, 0x22),
+ ARM_CSPMU_EVENT_ATTR(rank_active, 0x23),
+ ARM_CSPMU_EVENT_ATTR(rank_idle, 0x24),
+ ARM_CSPMU_EVENT_ATTR(rank_pd, 0x25),
+ ARM_CSPMU_EVENT_ATTR(rank_sref, 0x26),
+ ARM_CSPMU_EVENT_ATTR(queue_fill_gt_thresh, 0x27),
+ ARM_CSPMU_EVENT_ATTR(queue_rds_gt_thresh, 0x28),
+ ARM_CSPMU_EVENT_ATTR(queue_wrs_gt_thresh, 0x29),
+ ARM_CSPMU_EVENT_ATTR(phy_updt_complt, 0x2a),
+ ARM_CSPMU_EVENT_ATTR(tz_fail, 0x2b),
+ ARM_CSPMU_EVENT_ATTR(dram_errc, 0x2c),
+ ARM_CSPMU_EVENT_ATTR(dram_errd, 0x2d),
+ ARM_CSPMU_EVENT_ATTR(read_data_return, 0x32),
+ ARM_CSPMU_EVENT_ATTR(chi_wr_data_delta, 0x33),
+ ARM_CSPMU_EVENT_ATTR(zq_start, 0x34),
+ ARM_CSPMU_EVENT_ATTR(zq_latch, 0x35),
+ ARM_CSPMU_EVENT_ATTR(wr_fifo_full, 0x36),
+ ARM_CSPMU_EVENT_ATTR(info_fifo_full, 0x37),
+ ARM_CSPMU_EVENT_ATTR(cmd_fifo_full, 0x38),
+ ARM_CSPMU_EVENT_ATTR(dfi_nop, 0x39),
+ ARM_CSPMU_EVENT_ATTR(dfi_cmd, 0x3a),
+ ARM_CSPMU_EVENT_ATTR(rd_run_len, 0x3b),
+ ARM_CSPMU_EVENT_ATTR(wr_run_len, 0x3c),
+
+ ARM_CSPMU_EVENT_ATTR(cycles, ARM_CSPMU_EVT_CYCLES_DEFAULT),
+ NULL,
+};
+
+static struct attribute *ampereone_mcu_format_attrs[] = {
+ ARM_CSPMU_FORMAT_EVENT_ATTR,
+ ARM_CSPMU_FORMAT_ATTR(threshold, "config1:0-7"),
+ ARM_CSPMU_FORMAT_ATTR(rank, "config1:8-23"),
+ ARM_CSPMU_FORMAT_ATTR(bank, "config1:24-55"),
+ NULL,
+};
+
+static struct attribute **
+ampere_cspmu_get_event_attrs(const struct arm_cspmu *cspmu)
+{
+ const struct ampere_cspmu_ctx *ctx = to_ampere_cspmu_ctx(cspmu);
+
+ return ctx->event_attr;
+}
+
+static struct attribute **
+ampere_cspmu_get_format_attrs(const struct arm_cspmu *cspmu)
+{
+ const struct ampere_cspmu_ctx *ctx = to_ampere_cspmu_ctx(cspmu);
+
+ return ctx->format_attr;
+}
+
+static const char *
+ampere_cspmu_get_name(const struct arm_cspmu *cspmu)
+{
+ const struct ampere_cspmu_ctx *ctx = to_ampere_cspmu_ctx(cspmu);
+
+ return ctx->name;
+}
+
+static void ampere_cspmu_set_cc_filter(struct arm_cspmu *cspmu,
+ const struct perf_event *event)
+{
+ /*
+ * PMCCFILTR is RES0, so this is just a dummy callback to override
+ * the default implementation and avoid writing to it.
+ */
+}
+
+static void ampere_cspmu_set_ev_filter(struct arm_cspmu *cspmu,
+ const struct perf_event *event)
+{
+ u32 threshold, rank, bank;
+
+ threshold = get_threshold(event);
+ rank = get_rank(event);
+ bank = get_bank(event);
+
+ writel(threshold, cspmu->base0 + PMAUXR0);
+ writel(rank, cspmu->base0 + PMAUXR1);
+ writel(bank, cspmu->base0 + PMAUXR2);
+}
+
+static int ampere_cspmu_validate_configs(struct perf_event *event,
+ struct perf_event *event2)
+{
+ if (get_threshold(event) != get_threshold(event2) ||
+ get_rank(event) != get_rank(event2) ||
+ get_bank(event) != get_bank(event2))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ampere_cspmu_validate_event(struct arm_cspmu *cspmu,
+ struct perf_event *new)
+{
+ struct perf_event *curr, *leader = new->group_leader;
+ unsigned int idx;
+ int ret;
+
+ ret = ampere_cspmu_validate_configs(new, leader);
+ if (ret)
+ return ret;
+
+ /* We compare the global filter settings to the existing events */
+ idx = find_first_bit(cspmu->hw_events.used_ctrs,
+ cspmu->cycle_counter_logical_idx);
+
+ /* This is the first event, thus any configuration is fine */
+ if (idx == cspmu->cycle_counter_logical_idx)
+ return 0;
+
+ curr = cspmu->hw_events.events[idx];
+
+ return ampere_cspmu_validate_configs(curr, new);
+}
+
+static char *ampere_cspmu_format_name(const struct arm_cspmu *cspmu,
+ const char *name_pattern)
+{
+ struct device *dev = cspmu->dev;
+ int id;
+
+ id = ida_alloc(&mcu_pmu_ida, GFP_KERNEL);
+ if (id < 0)
+ return ERR_PTR(id);
+
+ return devm_kasprintf(dev, GFP_KERNEL, name_pattern, id);
+}
+
+static int ampere_cspmu_init_ops(struct arm_cspmu *cspmu)
+{
+ struct device *dev = cspmu->dev;
+ struct ampere_cspmu_ctx *ctx;
+ struct arm_cspmu_impl_ops *impl_ops = &cspmu->impl.ops;
+
+ ctx = devm_kzalloc(dev, sizeof(struct ampere_cspmu_ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->event_attr = ampereone_mcu_pmu_event_attrs;
+ ctx->format_attr = ampereone_mcu_format_attrs;
+ ctx->name = ampere_cspmu_format_name(cspmu, "ampere_mcu_pmu_%d");
+ if (IS_ERR_OR_NULL(ctx->name))
+ return ctx->name ? PTR_ERR(ctx->name) : -ENOMEM;
+
+ cspmu->impl.ctx = ctx;
+
+ impl_ops->set_cc_filter = ampere_cspmu_set_cc_filter;
+ impl_ops->set_ev_filter = ampere_cspmu_set_ev_filter;
+ impl_ops->validate_event = ampere_cspmu_validate_event;
+ impl_ops->get_name = ampere_cspmu_get_name;
+ impl_ops->get_event_attrs = ampere_cspmu_get_event_attrs;
+ impl_ops->get_format_attrs = ampere_cspmu_get_format_attrs;
+
+ return 0;
+}
+
+/* Match all Ampere Coresight PMU devices */
+static const struct arm_cspmu_impl_match ampere_cspmu_param = {
+ .pmiidr_val = ARM_CSPMU_IMPL_ID_AMPERE,
+ .module = THIS_MODULE,
+ .impl_init_ops = ampere_cspmu_init_ops
+};
+
+static int __init ampere_cspmu_init(void)
+{
+ int ret;
+
+ ret = arm_cspmu_impl_register(&ampere_cspmu_param);
+ if (ret)
+ pr_err("ampere_cspmu backend registration error: %d\n", ret);
+
+ return ret;
+}
+
+static void __exit ampere_cspmu_exit(void)
+{
+ arm_cspmu_impl_unregister(&ampere_cspmu_param);
+}
+
+module_init(ampere_cspmu_init);
+module_exit(ampere_cspmu_exit);
+
+MODULE_DESCRIPTION("Ampere SoC Performance Monitor Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/perf/arm_cspmu/arm_cspmu.c b/drivers/perf/arm_cspmu/arm_cspmu.c
index e2b7827c4563..34430b68f602 100644
--- a/drivers/perf/arm_cspmu/arm_cspmu.c
+++ b/drivers/perf/arm_cspmu/arm_cspmu.c
@@ -16,7 +16,7 @@
* The user should refer to the vendor technical documentation to get details
* about the supported events.
*
- * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
*/
@@ -26,11 +26,12 @@
#include <linux/interrupt.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>
#include "arm_cspmu.h"
-#include "nvidia_cspmu.h"
#define PMUNAME "arm_cspmu"
#define DRVNAME "arm-cs-arch-pmu"
@@ -39,51 +40,6 @@
ARM_CSPMU_EXT_ATTR(_name, arm_cspmu_cpumask_show, \
(unsigned long)_config)
-/*
- * CoreSight PMU Arch register offsets.
- */
-#define PMEVCNTR_LO 0x0
-#define PMEVCNTR_HI 0x4
-#define PMEVTYPER 0x400
-#define PMCCFILTR 0x47C
-#define PMEVFILTR 0xA00
-#define PMCNTENSET 0xC00
-#define PMCNTENCLR 0xC20
-#define PMINTENSET 0xC40
-#define PMINTENCLR 0xC60
-#define PMOVSCLR 0xC80
-#define PMOVSSET 0xCC0
-#define PMCFGR 0xE00
-#define PMCR 0xE04
-#define PMIIDR 0xE08
-
-/* PMCFGR register field */
-#define PMCFGR_NCG GENMASK(31, 28)
-#define PMCFGR_HDBG BIT(24)
-#define PMCFGR_TRO BIT(23)
-#define PMCFGR_SS BIT(22)
-#define PMCFGR_FZO BIT(21)
-#define PMCFGR_MSI BIT(20)
-#define PMCFGR_UEN BIT(19)
-#define PMCFGR_NA BIT(17)
-#define PMCFGR_EX BIT(16)
-#define PMCFGR_CCD BIT(15)
-#define PMCFGR_CC BIT(14)
-#define PMCFGR_SIZE GENMASK(13, 8)
-#define PMCFGR_N GENMASK(7, 0)
-
-/* PMCR register field */
-#define PMCR_TRO BIT(11)
-#define PMCR_HDBG BIT(10)
-#define PMCR_FZO BIT(9)
-#define PMCR_NA BIT(8)
-#define PMCR_DP BIT(5)
-#define PMCR_X BIT(4)
-#define PMCR_D BIT(3)
-#define PMCR_C BIT(2)
-#define PMCR_P BIT(1)
-#define PMCR_E BIT(0)
-
/* Each SET/CLR register supports up to 32 counters. */
#define ARM_CSPMU_SET_CLR_COUNTER_SHIFT 5
#define ARM_CSPMU_SET_CLR_COUNTER_NUM \
@@ -100,26 +56,25 @@
#define ARM_CSPMU_ACTIVE_CPU_MASK 0x0
#define ARM_CSPMU_ASSOCIATED_CPU_MASK 0x1
-/* Check and use default if implementer doesn't provide attribute callback */
-#define CHECK_DEFAULT_IMPL_OPS(ops, callback) \
- do { \
- if (!ops->callback) \
- ops->callback = arm_cspmu_ ## callback; \
- } while (0)
-
/*
* Maximum poll count for reading counter value using high-low-high sequence.
*/
#define HILOHI_MAX_POLL 1000
-/* JEDEC-assigned JEP106 identification code */
-#define ARM_CSPMU_IMPL_ID_NVIDIA 0x36B
-
static unsigned long arm_cspmu_cpuhp_state;
+static DEFINE_MUTEX(arm_cspmu_lock);
+
+static void arm_cspmu_set_ev_filter(struct arm_cspmu *cspmu,
+ const struct perf_event *event);
+static void arm_cspmu_set_cc_filter(struct arm_cspmu *cspmu,
+ const struct perf_event *event);
+
static struct acpi_apmt_node *arm_cspmu_apmt_node(struct device *dev)
{
- return *(struct acpi_apmt_node **)dev_get_platdata(dev);
+ struct acpi_apmt_node **ptr = dev_get_platdata(dev);
+
+ return ptr ? *ptr : NULL;
}
/*
@@ -225,19 +180,10 @@ arm_cspmu_event_attr_is_visible(struct kobject *kobj,
return attr->mode;
}
-ssize_t arm_cspmu_sysfs_format_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct dev_ext_attribute *eattr =
- container_of(attr, struct dev_ext_attribute, attr);
- return sysfs_emit(buf, "%s\n", (char *)eattr->var);
-}
-EXPORT_SYMBOL_GPL(arm_cspmu_sysfs_format_show);
-
static struct attribute *arm_cspmu_format_attrs[] = {
ARM_CSPMU_FORMAT_EVENT_ATTR,
ARM_CSPMU_FORMAT_FILTER_ATTR,
+ ARM_CSPMU_FORMAT_FILTER2_ATTR,
NULL,
};
@@ -262,11 +208,6 @@ static bool arm_cspmu_is_cycle_counter_event(const struct perf_event *event)
return (event->attr.config == ARM_CSPMU_EVT_CYCLES_DEFAULT);
}
-static u32 arm_cspmu_event_filter(const struct perf_event *event)
-{
- return event->attr.config1 & ARM_CSPMU_FILTER_MASK;
-}
-
static ssize_t arm_cspmu_identifier_show(struct device *dev,
struct device_attribute *attr,
char *page)
@@ -315,6 +256,10 @@ static const char *arm_cspmu_get_name(const struct arm_cspmu *cspmu)
dev = cspmu->dev;
apmt_node = arm_cspmu_apmt_node(dev);
+ if (!apmt_node)
+ return devm_kasprintf(dev, GFP_KERNEL, PMUNAME "_%u",
+ atomic_fetch_inc(&pmu_idx[0]));
+
pmu_type = apmt_node->type;
if (pmu_type >= ACPI_APMT_NODE_TYPE_COUNT) {
@@ -373,61 +318,133 @@ static struct attribute_group arm_cspmu_cpumask_attr_group = {
.attrs = arm_cspmu_cpumask_attrs,
};
-struct impl_match {
- u32 pmiidr;
- u32 mask;
- int (*impl_init_ops)(struct arm_cspmu *cspmu);
-};
-
-static const struct impl_match impl_match[] = {
+static struct arm_cspmu_impl_match impl_match[] = {
{
- .pmiidr = ARM_CSPMU_IMPL_ID_NVIDIA,
- .mask = ARM_CSPMU_PMIIDR_IMPLEMENTER,
- .impl_init_ops = nv_cspmu_init_ops
+ .module_name = "nvidia_cspmu",
+ .pmiidr_val = ARM_CSPMU_IMPL_ID_NVIDIA,
+ .pmiidr_mask = PMIIDR_IMPLEMENTER,
+ .module = NULL,
+ .impl_init_ops = NULL,
},
- {}
+ {
+ .module_name = "ampere_cspmu",
+ .pmiidr_val = ARM_CSPMU_IMPL_ID_AMPERE,
+ .pmiidr_mask = PMIIDR_IMPLEMENTER,
+ .module = NULL,
+ .impl_init_ops = NULL,
+ },
+
+ {0}
};
+static struct arm_cspmu_impl_match *arm_cspmu_impl_match_get(u32 pmiidr)
+{
+ struct arm_cspmu_impl_match *match = impl_match;
+
+ for (; match->pmiidr_val; match++) {
+ u32 mask = match->pmiidr_mask;
+
+ if ((match->pmiidr_val & mask) == (pmiidr & mask))
+ return match;
+ }
+
+ return NULL;
+}
+
+static u32 arm_cspmu_get_pmiidr(struct arm_cspmu *cspmu)
+{
+ u32 pmiidr, pmpidr;
+
+ pmiidr = readl(cspmu->base0 + PMIIDR);
+
+ if (pmiidr != 0)
+ return pmiidr;
+
+ /* Construct PMIIDR value from PMPIDRs. */
+
+ pmpidr = readl(cspmu->base0 + PMPIDR0);
+ pmiidr |= FIELD_PREP(PMIIDR_PRODUCTID_PART_0,
+ FIELD_GET(PMPIDR0_PART_0, pmpidr));
+
+ pmpidr = readl(cspmu->base0 + PMPIDR1);
+ pmiidr |= FIELD_PREP(PMIIDR_PRODUCTID_PART_1,
+ FIELD_GET(PMPIDR1_PART_1, pmpidr));
+ pmiidr |= FIELD_PREP(PMIIDR_IMPLEMENTER_DES_0,
+ FIELD_GET(PMPIDR1_DES_0, pmpidr));
+
+ pmpidr = readl(cspmu->base0 + PMPIDR2);
+ pmiidr |= FIELD_PREP(PMIIDR_VARIANT,
+ FIELD_GET(PMPIDR2_REVISION, pmpidr));
+ pmiidr |= FIELD_PREP(PMIIDR_IMPLEMENTER_DES_1,
+ FIELD_GET(PMPIDR2_DES_1, pmpidr));
+
+ pmpidr = readl(cspmu->base0 + PMPIDR3);
+ pmiidr |= FIELD_PREP(PMIIDR_REVISION,
+ FIELD_GET(PMPIDR3_REVAND, pmpidr));
+
+ pmpidr = readl(cspmu->base0 + PMPIDR4);
+ pmiidr |= FIELD_PREP(PMIIDR_IMPLEMENTER_DES_2,
+ FIELD_GET(PMPIDR4_DES_2, pmpidr));
+
+ return pmiidr;
+}
+
+#define DEFAULT_IMPL_OP(name) .name = arm_cspmu_##name
+
static int arm_cspmu_init_impl_ops(struct arm_cspmu *cspmu)
{
- int ret;
- struct arm_cspmu_impl_ops *impl_ops = &cspmu->impl.ops;
+ int ret = 0;
struct acpi_apmt_node *apmt_node = arm_cspmu_apmt_node(cspmu->dev);
- const struct impl_match *match = impl_match;
+ struct arm_cspmu_impl_match *match;
+
+ /* Start with a default PMU implementation */
+ cspmu->impl.module = THIS_MODULE;
+ cspmu->impl.pmiidr = arm_cspmu_get_pmiidr(cspmu);
+ cspmu->impl.ops = (struct arm_cspmu_impl_ops) {
+ DEFAULT_IMPL_OP(get_event_attrs),
+ DEFAULT_IMPL_OP(get_format_attrs),
+ DEFAULT_IMPL_OP(get_identifier),
+ DEFAULT_IMPL_OP(get_name),
+ DEFAULT_IMPL_OP(is_cycle_counter_event),
+ DEFAULT_IMPL_OP(event_type),
+ DEFAULT_IMPL_OP(set_cc_filter),
+ DEFAULT_IMPL_OP(set_ev_filter),
+ DEFAULT_IMPL_OP(event_attr_is_visible),
+ };
- /*
- * Get PMU implementer and product id from APMT node.
- * If APMT node doesn't have implementer/product id, try get it
- * from PMIIDR.
- */
- cspmu->impl.pmiidr =
- (apmt_node->impl_id) ? apmt_node->impl_id :
- readl(cspmu->base0 + PMIIDR);
+ /* Firmware may override implementer/product ID from PMIIDR */
+ if (apmt_node && apmt_node->impl_id)
+ cspmu->impl.pmiidr = apmt_node->impl_id;
/* Find implementer specific attribute ops. */
- for (; match->pmiidr; match++) {
- const u32 mask = match->mask;
-
- if ((match->pmiidr & mask) == (cspmu->impl.pmiidr & mask)) {
- ret = match->impl_init_ops(cspmu);
- if (ret)
- return ret;
-
- break;
+ match = arm_cspmu_impl_match_get(cspmu->impl.pmiidr);
+
+ /* Load implementer module and initialize the callbacks. */
+ if (match) {
+ mutex_lock(&arm_cspmu_lock);
+
+ if (match->impl_init_ops) {
+ /* Prevent unload until PMU registration is done. */
+ if (try_module_get(match->module)) {
+ cspmu->impl.module = match->module;
+ cspmu->impl.match = match;
+ ret = match->impl_init_ops(cspmu);
+ if (ret)
+ module_put(match->module);
+ } else {
+ WARN(1, "arm_cspmu failed to get module: %s\n",
+ match->module_name);
+ ret = -EINVAL;
+ }
+ } else {
+ request_module_nowait(match->module_name);
+ ret = -EPROBE_DEFER;
}
- }
- /* Use default callbacks if implementer doesn't provide one. */
- CHECK_DEFAULT_IMPL_OPS(impl_ops, get_event_attrs);
- CHECK_DEFAULT_IMPL_OPS(impl_ops, get_format_attrs);
- CHECK_DEFAULT_IMPL_OPS(impl_ops, get_identifier);
- CHECK_DEFAULT_IMPL_OPS(impl_ops, get_name);
- CHECK_DEFAULT_IMPL_OPS(impl_ops, is_cycle_counter_event);
- CHECK_DEFAULT_IMPL_OPS(impl_ops, event_type);
- CHECK_DEFAULT_IMPL_OPS(impl_ops, event_filter);
- CHECK_DEFAULT_IMPL_OPS(impl_ops, event_attr_is_visible);
+ mutex_unlock(&arm_cspmu_lock);
+ }
- return 0;
+ return ret;
}
static struct attribute_group *
@@ -472,28 +489,16 @@ arm_cspmu_alloc_format_attr_group(struct arm_cspmu *cspmu)
return format_group;
}
-static struct attribute_group **
-arm_cspmu_alloc_attr_group(struct arm_cspmu *cspmu)
+static int arm_cspmu_alloc_attr_groups(struct arm_cspmu *cspmu)
{
- struct attribute_group **attr_groups = NULL;
- struct device *dev = cspmu->dev;
+ const struct attribute_group **attr_groups = cspmu->attr_groups;
const struct arm_cspmu_impl_ops *impl_ops = &cspmu->impl.ops;
- int ret;
-
- ret = arm_cspmu_init_impl_ops(cspmu);
- if (ret)
- return NULL;
cspmu->identifier = impl_ops->get_identifier(cspmu);
cspmu->name = impl_ops->get_name(cspmu);
if (!cspmu->identifier || !cspmu->name)
- return NULL;
-
- attr_groups = devm_kcalloc(dev, 5, sizeof(struct attribute_group *),
- GFP_KERNEL);
- if (!attr_groups)
- return NULL;
+ return -ENOMEM;
attr_groups[0] = arm_cspmu_alloc_event_attr_group(cspmu);
attr_groups[1] = arm_cspmu_alloc_format_attr_group(cspmu);
@@ -501,18 +506,14 @@ arm_cspmu_alloc_attr_group(struct arm_cspmu *cspmu)
attr_groups[3] = &arm_cspmu_cpumask_attr_group;
if (!attr_groups[0] || !attr_groups[1])
- return NULL;
+ return -ENOMEM;
- return attr_groups;
+ return 0;
}
static inline void arm_cspmu_reset_counters(struct arm_cspmu *cspmu)
{
- u32 pmcr = 0;
-
- pmcr |= PMCR_P;
- pmcr |= PMCR_C;
- writel(pmcr, cspmu->base0 + PMCR);
+ writel(PMCR_C | PMCR_P, cspmu->base0 + PMCR);
}
static inline void arm_cspmu_start_counters(struct arm_cspmu *cspmu)
@@ -549,7 +550,7 @@ static void arm_cspmu_disable(struct pmu *pmu)
static int arm_cspmu_get_event_idx(struct arm_cspmu_hw_events *hw_events,
struct perf_event *event)
{
- int idx;
+ int idx, ret;
struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
if (supports_cycle_counter(cspmu)) {
@@ -583,6 +584,12 @@ static int arm_cspmu_get_event_idx(struct arm_cspmu_hw_events *hw_events,
if (idx >= cspmu->num_logical_ctrs)
return -EAGAIN;
+ if (cspmu->impl.ops.validate_event) {
+ ret = cspmu->impl.ops.validate_event(cspmu, event);
+ if (ret)
+ return ret;
+ }
+
set_bit(idx, hw_events->used_ctrs);
return idx;
@@ -635,6 +642,9 @@ static int arm_cspmu_event_init(struct perf_event *event)
cspmu = to_arm_cspmu(event->pmu);
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
/*
* Following other "uncore" PMUs, we do not support sampling mode or
* attach to a task (per-process mode).
@@ -696,7 +706,10 @@ static void arm_cspmu_write_counter(struct perf_event *event, u64 val)
if (use_64b_counter_reg(cspmu)) {
offset = counter_offset(sizeof(u64), event->hw.idx);
- writeq(val, cspmu->base1 + offset);
+ if (cspmu->has_atomic_dword)
+ writeq(val, cspmu->base1 + offset);
+ else
+ lo_hi_writeq(val, cspmu->base1 + offset);
} else {
offset = counter_offset(sizeof(u32), event->hw.idx);
@@ -789,27 +802,29 @@ static inline void arm_cspmu_set_event(struct arm_cspmu *cspmu,
writel(hwc->config, cspmu->base0 + offset);
}
-static inline void arm_cspmu_set_ev_filter(struct arm_cspmu *cspmu,
- struct hw_perf_event *hwc,
- u32 filter)
+static void arm_cspmu_set_ev_filter(struct arm_cspmu *cspmu,
+ const struct perf_event *event)
{
- u32 offset = PMEVFILTR + (4 * hwc->idx);
+ u32 filter = event->attr.config1 & ARM_CSPMU_FILTER_MASK;
+ u32 filter2 = event->attr.config2 & ARM_CSPMU_FILTER_MASK;
+ u32 offset = 4 * event->hw.idx;
- writel(filter, cspmu->base0 + offset);
+ writel(filter, cspmu->base0 + PMEVFILTR + offset);
+ writel(filter2, cspmu->base0 + PMEVFILT2R + offset);
}
-static inline void arm_cspmu_set_cc_filter(struct arm_cspmu *cspmu, u32 filter)
+static void arm_cspmu_set_cc_filter(struct arm_cspmu *cspmu,
+ const struct perf_event *event)
{
- u32 offset = PMCCFILTR;
+ u32 filter = event->attr.config1 & ARM_CSPMU_FILTER_MASK;
- writel(filter, cspmu->base0 + offset);
+ writel(filter, cspmu->base0 + PMCCFILTR);
}
static void arm_cspmu_start(struct perf_event *event, int pmu_flags)
{
struct arm_cspmu *cspmu = to_arm_cspmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
- u32 filter;
/* We always reprogram the counter */
if (pmu_flags & PERF_EF_RELOAD)
@@ -817,13 +832,11 @@ static void arm_cspmu_start(struct perf_event *event, int pmu_flags)
arm_cspmu_set_event_period(event);
- filter = cspmu->impl.ops.event_filter(event);
-
if (event->hw.extra_reg.idx == cspmu->cycle_counter_logical_idx) {
- arm_cspmu_set_cc_filter(cspmu, filter);
+ cspmu->impl.ops.set_cc_filter(cspmu, event);
} else {
arm_cspmu_set_event(cspmu, hwc);
- arm_cspmu_set_ev_filter(cspmu, hwc, filter);
+ cspmu->impl.ops.set_ev_filter(cspmu, event);
}
hwc->state = 0;
@@ -840,6 +853,10 @@ static void arm_cspmu_stop(struct perf_event *event, int pmu_flags)
return;
arm_cspmu_disable_counter(cspmu, hwc->idx);
+
+ if (cspmu->impl.ops.reset_ev_filter)
+ cspmu->impl.ops.reset_ev_filter(cspmu, event);
+
arm_cspmu_event_update(event);
hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
@@ -915,7 +932,14 @@ static struct arm_cspmu *arm_cspmu_alloc(struct platform_device *pdev)
platform_set_drvdata(pdev, cspmu);
apmt_node = arm_cspmu_apmt_node(dev);
- cspmu->has_atomic_dword = apmt_node->flags & ACPI_APMT_FLAGS_ATOMIC;
+ if (apmt_node) {
+ cspmu->has_atomic_dword = apmt_node->flags & ACPI_APMT_FLAGS_ATOMIC;
+ } else {
+ u32 width = 0;
+
+ device_property_read_u32(dev, "reg-io-width", &width);
+ cspmu->has_atomic_dword = (width == 8);
+ }
return cspmu;
}
@@ -1061,7 +1085,6 @@ static int arm_cspmu_request_irq(struct arm_cspmu *cspmu)
static inline int arm_cspmu_find_cpu_container(int cpu, u32 container_uid)
{
- u32 acpi_uid;
struct device *cpu_dev;
struct acpi_device *acpi_dev;
@@ -1071,10 +1094,7 @@ static inline int arm_cspmu_find_cpu_container(int cpu, u32 container_uid)
acpi_dev = ACPI_COMPANION(cpu_dev);
while (acpi_dev) {
- if (!strcmp(acpi_device_hid(acpi_dev),
- ACPI_PROCESSOR_CONTAINER_HID) &&
- !kstrtouint(acpi_device_uid(acpi_dev), 0, &acpi_uid) &&
- acpi_uid == container_uid)
+ if (acpi_dev_hid_uid_match(acpi_dev, ACPI_PROCESSOR_CONTAINER_HID, container_uid))
return 0;
acpi_dev = acpi_dev_parent(acpi_dev);
@@ -1110,11 +1130,6 @@ static int arm_cspmu_acpi_get_cpus(struct arm_cspmu *cspmu)
}
}
- if (cpumask_empty(&cspmu->associated_cpus)) {
- dev_dbg(cspmu->dev, "No cpu associated with the PMU\n");
- return -ENODEV;
- }
-
return 0;
}
#else
@@ -1124,19 +1139,45 @@ static int arm_cspmu_acpi_get_cpus(struct arm_cspmu *cspmu)
}
#endif
+static int arm_cspmu_of_get_cpus(struct arm_cspmu *cspmu)
+{
+ struct of_phandle_iterator it;
+ int ret, cpu;
+
+ of_for_each_phandle(&it, ret, dev_of_node(cspmu->dev), "cpus", NULL, 0) {
+ cpu = of_cpu_node_to_id(it.node);
+ if (cpu < 0)
+ continue;
+ cpumask_set_cpu(cpu, &cspmu->associated_cpus);
+ }
+ return ret == -ENOENT ? 0 : ret;
+}
+
static int arm_cspmu_get_cpus(struct arm_cspmu *cspmu)
{
- return arm_cspmu_acpi_get_cpus(cspmu);
+ int ret = 0;
+
+ if (arm_cspmu_apmt_node(cspmu->dev))
+ ret = arm_cspmu_acpi_get_cpus(cspmu);
+ else if (device_property_present(cspmu->dev, "cpus"))
+ ret = arm_cspmu_of_get_cpus(cspmu);
+ else
+ cpumask_copy(&cspmu->associated_cpus, cpu_possible_mask);
+
+ if (!ret && cpumask_empty(&cspmu->associated_cpus)) {
+ dev_dbg(cspmu->dev, "No cpu associated with the PMU\n");
+ ret = -ENODEV;
+ }
+ return ret;
}
static int arm_cspmu_register_pmu(struct arm_cspmu *cspmu)
{
int ret, capabilities;
- struct attribute_group **attr_groups;
- attr_groups = arm_cspmu_alloc_attr_group(cspmu);
- if (!attr_groups)
- return -ENOMEM;
+ ret = arm_cspmu_alloc_attr_groups(cspmu);
+ if (ret)
+ return ret;
ret = cpuhp_state_add_instance(arm_cspmu_cpuhp_state,
&cspmu->cpuhp_node);
@@ -1149,7 +1190,8 @@ static int arm_cspmu_register_pmu(struct arm_cspmu *cspmu)
cspmu->pmu = (struct pmu){
.task_ctx_nr = perf_invalid_context,
- .module = THIS_MODULE,
+ .module = cspmu->impl.module,
+ .parent = cspmu->dev,
.pmu_enable = arm_cspmu_enable,
.pmu_disable = arm_cspmu_disable,
.event_init = arm_cspmu_event_init,
@@ -1158,12 +1200,11 @@ static int arm_cspmu_register_pmu(struct arm_cspmu *cspmu)
.start = arm_cspmu_start,
.stop = arm_cspmu_stop,
.read = arm_cspmu_read,
- .attr_groups = (const struct attribute_group **)attr_groups,
+ .attr_groups = cspmu->attr_groups,
.capabilities = capabilities,
};
/* Hardware counter init */
- arm_cspmu_stop_counters(cspmu);
arm_cspmu_reset_counters(cspmu);
ret = perf_pmu_register(&cspmu->pmu, cspmu->name, -1);
@@ -1196,21 +1237,25 @@ static int arm_cspmu_device_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = arm_cspmu_register_pmu(cspmu);
+ ret = arm_cspmu_init_impl_ops(cspmu);
if (ret)
return ret;
- return 0;
+ ret = arm_cspmu_register_pmu(cspmu);
+
+ /* Matches arm_cspmu_init_impl_ops() above. */
+ if (cspmu->impl.module != THIS_MODULE)
+ module_put(cspmu->impl.module);
+
+ return ret;
}
-static int arm_cspmu_device_remove(struct platform_device *pdev)
+static void arm_cspmu_device_remove(struct platform_device *pdev)
{
struct arm_cspmu *cspmu = platform_get_drvdata(pdev);
perf_pmu_unregister(&cspmu->pmu);
cpuhp_state_remove_instance(arm_cspmu_cpuhp_state, &cspmu->cpuhp_node);
-
- return 0;
}
static const struct platform_device_id arm_cspmu_id[] = {
@@ -1219,11 +1264,18 @@ static const struct platform_device_id arm_cspmu_id[] = {
};
MODULE_DEVICE_TABLE(platform, arm_cspmu_id);
+static const struct of_device_id arm_cspmu_of_match[] = {
+ { .compatible = "arm,coresight-pmu" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, arm_cspmu_of_match);
+
static struct platform_driver arm_cspmu_driver = {
.driver = {
- .name = DRVNAME,
- .suppress_bind_attrs = true,
- },
+ .name = DRVNAME,
+ .of_match_table = arm_cspmu_of_match,
+ .suppress_bind_attrs = true,
+ },
.probe = arm_cspmu_device_probe,
.remove = arm_cspmu_device_remove,
.id_table = arm_cspmu_id,
@@ -1256,8 +1308,7 @@ static int arm_cspmu_cpu_online(unsigned int cpu, struct hlist_node *node)
static int arm_cspmu_cpu_teardown(unsigned int cpu, struct hlist_node *node)
{
- int dst;
- struct cpumask online_supported;
+ unsigned int dst;
struct arm_cspmu *cspmu =
hlist_entry_safe(node, struct arm_cspmu, cpuhp_node);
@@ -1267,9 +1318,8 @@ static int arm_cspmu_cpu_teardown(unsigned int cpu, struct hlist_node *node)
return 0;
/* Choose a new CPU to migrate ownership of the PMU to */
- cpumask_and(&online_supported, &cspmu->associated_cpus,
- cpu_online_mask);
- dst = cpumask_any_but(&online_supported, cpu);
+ dst = cpumask_any_and_but(&cspmu->associated_cpus,
+ cpu_online_mask, cpu);
if (dst >= nr_cpu_ids)
return 0;
@@ -1300,7 +1350,79 @@ static void __exit arm_cspmu_exit(void)
cpuhp_remove_multi_state(arm_cspmu_cpuhp_state);
}
+int arm_cspmu_impl_register(const struct arm_cspmu_impl_match *impl_match)
+{
+ struct arm_cspmu_impl_match *match;
+ int ret = 0;
+
+ match = arm_cspmu_impl_match_get(impl_match->pmiidr_val);
+
+ if (match) {
+ mutex_lock(&arm_cspmu_lock);
+
+ if (!match->impl_init_ops) {
+ match->module = impl_match->module;
+ match->impl_init_ops = impl_match->impl_init_ops;
+ } else {
+ /* Broken match table may contain non-unique entries */
+ WARN(1, "arm_cspmu backend already registered for module: %s, pmiidr: 0x%x, mask: 0x%x\n",
+ match->module_name,
+ match->pmiidr_val,
+ match->pmiidr_mask);
+
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&arm_cspmu_lock);
+
+ if (!ret)
+ ret = driver_attach(&arm_cspmu_driver.driver);
+ } else {
+ pr_err("arm_cspmu reg failed, unable to find a match for pmiidr: 0x%x\n",
+ impl_match->pmiidr_val);
+
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(arm_cspmu_impl_register);
+
+static int arm_cspmu_match_device(struct device *dev, const void *match)
+{
+ struct arm_cspmu *cspmu = platform_get_drvdata(to_platform_device(dev));
+
+ return (cspmu && cspmu->impl.match == match) ? 1 : 0;
+}
+
+void arm_cspmu_impl_unregister(const struct arm_cspmu_impl_match *impl_match)
+{
+ struct device *dev;
+ struct arm_cspmu_impl_match *match;
+
+ match = arm_cspmu_impl_match_get(impl_match->pmiidr_val);
+
+ if (WARN_ON(!match))
+ return;
+
+ /* Unbind the driver from all matching backend devices. */
+ while ((dev = driver_find_device(&arm_cspmu_driver.driver, NULL,
+ match, arm_cspmu_match_device))) {
+ device_release_driver(dev);
+ put_device(dev);
+ }
+
+ mutex_lock(&arm_cspmu_lock);
+
+ match->module = NULL;
+ match->impl_init_ops = NULL;
+
+ mutex_unlock(&arm_cspmu_lock);
+}
+EXPORT_SYMBOL_GPL(arm_cspmu_impl_unregister);
+
module_init(arm_cspmu_init);
module_exit(arm_cspmu_exit);
+MODULE_DESCRIPTION("ARM CoreSight Architecture Performance Monitor Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/perf/arm_cspmu/arm_cspmu.h b/drivers/perf/arm_cspmu/arm_cspmu.h
index 83df53d1c132..cd65a58dbd88 100644
--- a/drivers/perf/arm_cspmu/arm_cspmu.h
+++ b/drivers/perf/arm_cspmu/arm_cspmu.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0
*
* ARM CoreSight Architecture PMU driver.
- * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
*/
@@ -28,7 +28,7 @@
})[0].attr.attr)
#define ARM_CSPMU_FORMAT_ATTR(_name, _config) \
- ARM_CSPMU_EXT_ATTR(_name, arm_cspmu_sysfs_format_show, (char *)_config)
+ ARM_CSPMU_EXT_ATTR(_name, device_show_string, _config)
#define ARM_CSPMU_EVENT_ATTR(_name, _config) \
PMU_EVENT_ATTR_ID(_name, arm_cspmu_sysfs_event_show, _config)
@@ -47,6 +47,8 @@
/* Default filter format */
#define ARM_CSPMU_FORMAT_FILTER_ATTR \
ARM_CSPMU_FORMAT_ATTR(filter, "config1:0-31")
+#define ARM_CSPMU_FORMAT_FILTER2_ATTR \
+ ARM_CSPMU_FORMAT_ATTR(filter2, "config2:0-31")
/*
* This is the default event number for cycle count, if supported, since the
@@ -65,9 +67,91 @@
/* The cycle counter, if implemented, is located at counter[31]. */
#define ARM_CSPMU_CYCLE_CNTR_IDX 31
+/*
+ * CoreSight PMU Arch register offsets.
+ */
+#define PMEVCNTR_LO 0x0
+#define PMEVCNTR_HI 0x4
+#define PMEVTYPER 0x400
+#define PMCCFILTR 0x47C
+#define PMEVFILT2R 0x800
+#define PMEVFILTR 0xA00
+#define PMCNTENSET 0xC00
+#define PMCNTENCLR 0xC20
+#define PMINTENSET 0xC40
+#define PMINTENCLR 0xC60
+#define PMOVSCLR 0xC80
+#define PMOVSSET 0xCC0
+#define PMIMPDEF 0xD80
+#define PMCFGR 0xE00
+#define PMCR 0xE04
+#define PMIIDR 0xE08
+#define PMPIDR0 0xFE0
+#define PMPIDR1 0xFE4
+#define PMPIDR2 0xFE8
+#define PMPIDR3 0xFEC
+#define PMPIDR4 0xFD0
+
+/* PMCFGR register field */
+#define PMCFGR_NCG GENMASK(31, 28)
+#define PMCFGR_HDBG BIT(24)
+#define PMCFGR_TRO BIT(23)
+#define PMCFGR_SS BIT(22)
+#define PMCFGR_FZO BIT(21)
+#define PMCFGR_MSI BIT(20)
+#define PMCFGR_UEN BIT(19)
+#define PMCFGR_NA BIT(17)
+#define PMCFGR_EX BIT(16)
+#define PMCFGR_CCD BIT(15)
+#define PMCFGR_CC BIT(14)
+#define PMCFGR_SIZE GENMASK(13, 8)
+#define PMCFGR_N GENMASK(7, 0)
+
+/* PMCR register field */
+#define PMCR_TRO BIT(11)
+#define PMCR_HDBG BIT(10)
+#define PMCR_FZO BIT(9)
+#define PMCR_NA BIT(8)
+#define PMCR_DP BIT(5)
+#define PMCR_X BIT(4)
+#define PMCR_D BIT(3)
+#define PMCR_C BIT(2)
+#define PMCR_P BIT(1)
+#define PMCR_E BIT(0)
+
/* PMIIDR register field */
-#define ARM_CSPMU_PMIIDR_IMPLEMENTER GENMASK(11, 0)
-#define ARM_CSPMU_PMIIDR_PRODUCTID GENMASK(31, 20)
+#define PMIIDR_IMPLEMENTER GENMASK(11, 0)
+#define PMIIDR_IMPLEMENTER_DES_0 GENMASK(3, 0)
+#define PMIIDR_IMPLEMENTER_DES_1 GENMASK(6, 4)
+#define PMIIDR_IMPLEMENTER_DES_2 GENMASK(11, 8)
+#define PMIIDR_REVISION GENMASK(15, 12)
+#define PMIIDR_VARIANT GENMASK(19, 16)
+#define PMIIDR_PRODUCTID GENMASK(31, 20)
+#define PMIIDR_PRODUCTID_PART_0 GENMASK(27, 20)
+#define PMIIDR_PRODUCTID_PART_1 GENMASK(31, 28)
+
+/* PMPIDR0 register field */
+#define PMPIDR0_PART_0 GENMASK(7, 0)
+
+/* PMPIDR1 register field */
+#define PMPIDR1_DES_0 GENMASK(7, 4)
+#define PMPIDR1_PART_1 GENMASK(3, 0)
+
+/* PMPIDR2 register field */
+#define PMPIDR2_REVISION GENMASK(7, 4)
+#define PMPIDR2_DES_1 GENMASK(2, 0)
+
+/* PMPIDR3 register field */
+#define PMPIDR3_REVAND GENMASK(7, 4)
+#define PMPIDR3_CMOD GENMASK(3, 0)
+
+/* PMPIDR4 register field */
+#define PMPIDR4_SIZE GENMASK(7, 4)
+#define PMPIDR4_DES_2 GENMASK(3, 0)
+
+/* JEDEC-assigned JEP106 identification code */
+#define ARM_CSPMU_IMPL_ID_NVIDIA 0x36B
+#define ARM_CSPMU_IMPL_ID_AMPERE 0xA16
struct arm_cspmu;
@@ -99,16 +183,38 @@ struct arm_cspmu_impl_ops {
bool (*is_cycle_counter_event)(const struct perf_event *event);
/* Decode event type/id from configs */
u32 (*event_type)(const struct perf_event *event);
- /* Decode filter value from configs */
- u32 (*event_filter)(const struct perf_event *event);
+ /* Set/reset event filters */
+ void (*set_cc_filter)(struct arm_cspmu *cspmu,
+ const struct perf_event *event);
+ void (*set_ev_filter)(struct arm_cspmu *cspmu,
+ const struct perf_event *event);
+ void (*reset_ev_filter)(struct arm_cspmu *cspmu,
+ const struct perf_event *event);
+ /* Implementation specific event validation */
+ int (*validate_event)(struct arm_cspmu *cspmu,
+ struct perf_event *event);
/* Hide/show unsupported events */
umode_t (*event_attr_is_visible)(struct kobject *kobj,
struct attribute *attr, int unused);
};
+/* Vendor/implementer registration parameter. */
+struct arm_cspmu_impl_match {
+ /* Backend module. */
+ struct module *module;
+ const char *module_name;
+ /* PMIIDR value/mask. */
+ u32 pmiidr_val;
+ u32 pmiidr_mask;
+ /* Callback to vendor backend to init arm_cspmu_impl::ops. */
+ int (*impl_init_ops)(struct arm_cspmu *cspmu);
+};
+
/* Vendor/implementer descriptor. */
struct arm_cspmu_impl {
u32 pmiidr;
+ struct module *module;
+ struct arm_cspmu_impl_match *match;
struct arm_cspmu_impl_ops ops;
void *ctx;
};
@@ -133,6 +239,7 @@ struct arm_cspmu {
int cycle_counter_logical_idx;
struct arm_cspmu_hw_events hw_events;
+ const struct attribute_group *attr_groups[5];
struct arm_cspmu_impl impl;
};
@@ -142,9 +249,10 @@ ssize_t arm_cspmu_sysfs_event_show(struct device *dev,
struct device_attribute *attr,
char *buf);
-/* Default function to show format attribute in sysfs. */
-ssize_t arm_cspmu_sysfs_format_show(struct device *dev,
- struct device_attribute *attr,
- char *buf);
+/* Register vendor backend. */
+int arm_cspmu_impl_register(const struct arm_cspmu_impl_match *impl_match);
+
+/* Unregister vendor backend. */
+void arm_cspmu_impl_unregister(const struct arm_cspmu_impl_match *impl_match);
#endif /* __ARM_CSPMU_H__ */
diff --git a/drivers/perf/arm_cspmu/nvidia_cspmu.c b/drivers/perf/arm_cspmu/nvidia_cspmu.c
index 72ef80caa3c8..e06a06d3407b 100644
--- a/drivers/perf/arm_cspmu/nvidia_cspmu.c
+++ b/drivers/perf/arm_cspmu/nvidia_cspmu.c
@@ -1,14 +1,16 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
*/
/* Support for NVIDIA specific attributes. */
+#include <linux/io.h>
+#include <linux/module.h>
#include <linux/topology.h>
-#include "nvidia_cspmu.h"
+#include "arm_cspmu.h"
#define NV_PCIE_PORT_COUNT 10ULL
#define NV_PCIE_FILTER_ID_MASK GENMASK_ULL(NV_PCIE_PORT_COUNT - 1, 0)
@@ -21,7 +23,7 @@
#define NV_GENERIC_FILTER_ID_MASK GENMASK_ULL(31, 0)
-#define NV_PRODID_MASK GENMASK(31, 0)
+#define NV_PRODID_MASK (PMIIDR_PRODUCTID | PMIIDR_VARIANT | PMIIDR_REVISION)
#define NV_FORMAT_NAME_GENERIC 0
@@ -38,10 +40,21 @@
struct nv_cspmu_ctx {
const char *name;
- u32 filter_mask;
- u32 filter_default_val;
+
struct attribute **event_attr;
struct attribute **format_attr;
+
+ u32 filter_mask;
+ u32 filter_default_val;
+ u32 filter2_mask;
+ u32 filter2_default_val;
+
+ u32 (*get_filter)(const struct perf_event *event);
+ u32 (*get_filter2)(const struct perf_event *event);
+
+ void *data;
+
+ int (*init_data)(struct arm_cspmu *cspmu);
};
static struct attribute *scf_pmu_event_attrs[] = {
@@ -53,65 +66,24 @@ static struct attribute *scf_pmu_event_attrs[] = {
ARM_CSPMU_EVENT_ATTR(scf_cache_wb, 0xF3),
NV_CSPMU_EVENT_ATTR_4(socket, rd_data, 0x101),
- NV_CSPMU_EVENT_ATTR_4(socket, dl_rsp, 0x105),
NV_CSPMU_EVENT_ATTR_4(socket, wb_data, 0x109),
- NV_CSPMU_EVENT_ATTR_4(socket, ev_rsp, 0x10d),
- NV_CSPMU_EVENT_ATTR_4(socket, prb_data, 0x111),
NV_CSPMU_EVENT_ATTR_4(socket, rd_outstanding, 0x115),
- NV_CSPMU_EVENT_ATTR_4(socket, dl_outstanding, 0x119),
- NV_CSPMU_EVENT_ATTR_4(socket, wb_outstanding, 0x11d),
- NV_CSPMU_EVENT_ATTR_4(socket, wr_outstanding, 0x121),
- NV_CSPMU_EVENT_ATTR_4(socket, ev_outstanding, 0x125),
- NV_CSPMU_EVENT_ATTR_4(socket, prb_outstanding, 0x129),
NV_CSPMU_EVENT_ATTR_4(socket, rd_access, 0x12d),
- NV_CSPMU_EVENT_ATTR_4(socket, dl_access, 0x131),
NV_CSPMU_EVENT_ATTR_4(socket, wb_access, 0x135),
NV_CSPMU_EVENT_ATTR_4(socket, wr_access, 0x139),
- NV_CSPMU_EVENT_ATTR_4(socket, ev_access, 0x13d),
- NV_CSPMU_EVENT_ATTR_4(socket, prb_access, 0x141),
-
- NV_CSPMU_EVENT_ATTR_4(ocu, gmem_rd_data, 0x145),
- NV_CSPMU_EVENT_ATTR_4(ocu, gmem_rd_access, 0x149),
- NV_CSPMU_EVENT_ATTR_4(ocu, gmem_wb_access, 0x14d),
- NV_CSPMU_EVENT_ATTR_4(ocu, gmem_rd_outstanding, 0x151),
- NV_CSPMU_EVENT_ATTR_4(ocu, gmem_wr_outstanding, 0x155),
-
- NV_CSPMU_EVENT_ATTR_4(ocu, rem_rd_data, 0x159),
- NV_CSPMU_EVENT_ATTR_4(ocu, rem_rd_access, 0x15d),
- NV_CSPMU_EVENT_ATTR_4(ocu, rem_wb_access, 0x161),
- NV_CSPMU_EVENT_ATTR_4(ocu, rem_rd_outstanding, 0x165),
- NV_CSPMU_EVENT_ATTR_4(ocu, rem_wr_outstanding, 0x169),
ARM_CSPMU_EVENT_ATTR(gmem_rd_data, 0x16d),
ARM_CSPMU_EVENT_ATTR(gmem_rd_access, 0x16e),
ARM_CSPMU_EVENT_ATTR(gmem_rd_outstanding, 0x16f),
- ARM_CSPMU_EVENT_ATTR(gmem_dl_rsp, 0x170),
- ARM_CSPMU_EVENT_ATTR(gmem_dl_access, 0x171),
- ARM_CSPMU_EVENT_ATTR(gmem_dl_outstanding, 0x172),
ARM_CSPMU_EVENT_ATTR(gmem_wb_data, 0x173),
ARM_CSPMU_EVENT_ATTR(gmem_wb_access, 0x174),
- ARM_CSPMU_EVENT_ATTR(gmem_wb_outstanding, 0x175),
- ARM_CSPMU_EVENT_ATTR(gmem_ev_rsp, 0x176),
- ARM_CSPMU_EVENT_ATTR(gmem_ev_access, 0x177),
- ARM_CSPMU_EVENT_ATTR(gmem_ev_outstanding, 0x178),
ARM_CSPMU_EVENT_ATTR(gmem_wr_data, 0x179),
- ARM_CSPMU_EVENT_ATTR(gmem_wr_outstanding, 0x17a),
ARM_CSPMU_EVENT_ATTR(gmem_wr_access, 0x17b),
NV_CSPMU_EVENT_ATTR_4(socket, wr_data, 0x17c),
- NV_CSPMU_EVENT_ATTR_4(ocu, gmem_wr_data, 0x180),
- NV_CSPMU_EVENT_ATTR_4(ocu, gmem_wb_data, 0x184),
- NV_CSPMU_EVENT_ATTR_4(ocu, gmem_wr_access, 0x188),
- NV_CSPMU_EVENT_ATTR_4(ocu, gmem_wb_outstanding, 0x18c),
-
- NV_CSPMU_EVENT_ATTR_4(ocu, rem_wr_data, 0x190),
- NV_CSPMU_EVENT_ATTR_4(ocu, rem_wb_data, 0x194),
- NV_CSPMU_EVENT_ATTR_4(ocu, rem_wr_access, 0x198),
- NV_CSPMU_EVENT_ATTR_4(ocu, rem_wb_outstanding, 0x19c),
-
ARM_CSPMU_EVENT_ATTR(gmem_wr_total_bytes, 0x1a0),
ARM_CSPMU_EVENT_ATTR(remote_socket_wr_total_bytes, 0x1a1),
ARM_CSPMU_EVENT_ATTR(remote_socket_rd_data, 0x1a2),
@@ -121,35 +93,12 @@ static struct attribute *scf_pmu_event_attrs[] = {
ARM_CSPMU_EVENT_ATTR(cmem_rd_data, 0x1a5),
ARM_CSPMU_EVENT_ATTR(cmem_rd_access, 0x1a6),
ARM_CSPMU_EVENT_ATTR(cmem_rd_outstanding, 0x1a7),
- ARM_CSPMU_EVENT_ATTR(cmem_dl_rsp, 0x1a8),
- ARM_CSPMU_EVENT_ATTR(cmem_dl_access, 0x1a9),
- ARM_CSPMU_EVENT_ATTR(cmem_dl_outstanding, 0x1aa),
ARM_CSPMU_EVENT_ATTR(cmem_wb_data, 0x1ab),
ARM_CSPMU_EVENT_ATTR(cmem_wb_access, 0x1ac),
- ARM_CSPMU_EVENT_ATTR(cmem_wb_outstanding, 0x1ad),
- ARM_CSPMU_EVENT_ATTR(cmem_ev_rsp, 0x1ae),
- ARM_CSPMU_EVENT_ATTR(cmem_ev_access, 0x1af),
- ARM_CSPMU_EVENT_ATTR(cmem_ev_outstanding, 0x1b0),
ARM_CSPMU_EVENT_ATTR(cmem_wr_data, 0x1b1),
- ARM_CSPMU_EVENT_ATTR(cmem_wr_outstanding, 0x1b2),
-
- NV_CSPMU_EVENT_ATTR_4(ocu, cmem_rd_data, 0x1b3),
- NV_CSPMU_EVENT_ATTR_4(ocu, cmem_rd_access, 0x1b7),
- NV_CSPMU_EVENT_ATTR_4(ocu, cmem_wb_access, 0x1bb),
- NV_CSPMU_EVENT_ATTR_4(ocu, cmem_rd_outstanding, 0x1bf),
- NV_CSPMU_EVENT_ATTR_4(ocu, cmem_wr_outstanding, 0x1c3),
-
- ARM_CSPMU_EVENT_ATTR(ocu_prb_access, 0x1c7),
- ARM_CSPMU_EVENT_ATTR(ocu_prb_data, 0x1c8),
- ARM_CSPMU_EVENT_ATTR(ocu_prb_outstanding, 0x1c9),
ARM_CSPMU_EVENT_ATTR(cmem_wr_access, 0x1ca),
- NV_CSPMU_EVENT_ATTR_4(ocu, cmem_wr_access, 0x1cb),
- NV_CSPMU_EVENT_ATTR_4(ocu, cmem_wb_data, 0x1cf),
- NV_CSPMU_EVENT_ATTR_4(ocu, cmem_wr_data, 0x1d3),
- NV_CSPMU_EVENT_ATTR_4(ocu, cmem_wb_outstanding, 0x1d7),
-
ARM_CSPMU_EVENT_ATTR(cmem_wr_total_bytes, 0x1db),
ARM_CSPMU_EVENT_ATTR(cycles, ARM_CSPMU_EVT_CYCLES_DEFAULT),
@@ -193,6 +142,7 @@ static struct attribute *pcie_pmu_format_attrs[] = {
static struct attribute *nvlink_c2c_pmu_format_attrs[] = {
ARM_CSPMU_FORMAT_EVENT_ATTR,
+ ARM_CSPMU_FORMAT_ATTR(port, "config1:0-1"),
NULL,
};
@@ -205,6 +155,7 @@ static struct attribute *cnvlink_pmu_format_attrs[] = {
static struct attribute *generic_pmu_format_attrs[] = {
ARM_CSPMU_FORMAT_EVENT_ATTR,
ARM_CSPMU_FORMAT_FILTER_ATTR,
+ ARM_CSPMU_FORMAT_FILTER2_ATTR,
NULL,
};
@@ -237,12 +188,55 @@ static u32 nv_cspmu_event_filter(const struct perf_event *event)
const struct nv_cspmu_ctx *ctx =
to_nv_cspmu_ctx(to_arm_cspmu(event->pmu));
- if (ctx->filter_mask == 0)
+ const u32 filter_val = event->attr.config1 & ctx->filter_mask;
+
+ if (filter_val == 0)
return ctx->filter_default_val;
- return event->attr.config1 & ctx->filter_mask;
+ return filter_val;
+}
+
+static u32 nv_cspmu_event_filter2(const struct perf_event *event)
+{
+ const struct nv_cspmu_ctx *ctx =
+ to_nv_cspmu_ctx(to_arm_cspmu(event->pmu));
+
+ const u32 filter_val = event->attr.config2 & ctx->filter2_mask;
+
+ if (filter_val == 0)
+ return ctx->filter2_default_val;
+
+ return filter_val;
+}
+
+static void nv_cspmu_set_ev_filter(struct arm_cspmu *cspmu,
+ const struct perf_event *event)
+{
+ u32 filter, offset;
+ const struct nv_cspmu_ctx *ctx =
+ to_nv_cspmu_ctx(to_arm_cspmu(event->pmu));
+ offset = 4 * event->hw.idx;
+
+ if (ctx->get_filter) {
+ filter = ctx->get_filter(event);
+ writel(filter, cspmu->base0 + PMEVFILTR + offset);
+ }
+
+ if (ctx->get_filter2) {
+ filter = ctx->get_filter2(event);
+ writel(filter, cspmu->base0 + PMEVFILT2R + offset);
+ }
+}
+
+static void nv_cspmu_set_cc_filter(struct arm_cspmu *cspmu,
+ const struct perf_event *event)
+{
+ u32 filter = nv_cspmu_event_filter(event);
+
+ writel(filter, cspmu->base0 + PMCCFILTR);
}
+
enum nv_cspmu_name_fmt {
NAME_FMT_GENERIC,
NAME_FMT_SOCKET
@@ -251,74 +245,120 @@ enum nv_cspmu_name_fmt {
struct nv_cspmu_match {
u32 prodid;
u32 prodid_mask;
- u64 filter_mask;
- u32 filter_default_val;
const char *name_pattern;
enum nv_cspmu_name_fmt name_fmt;
- struct attribute **event_attr;
- struct attribute **format_attr;
+ struct nv_cspmu_ctx template_ctx;
+ struct arm_cspmu_impl_ops ops;
};
static const struct nv_cspmu_match nv_cspmu_match[] = {
{
- .prodid = 0x103,
+ .prodid = 0x10300000,
.prodid_mask = NV_PRODID_MASK,
- .filter_mask = NV_PCIE_FILTER_ID_MASK,
- .filter_default_val = NV_PCIE_FILTER_ID_MASK,
.name_pattern = "nvidia_pcie_pmu_%u",
.name_fmt = NAME_FMT_SOCKET,
- .event_attr = mcf_pmu_event_attrs,
- .format_attr = pcie_pmu_format_attrs
+ .template_ctx = {
+ .event_attr = mcf_pmu_event_attrs,
+ .format_attr = pcie_pmu_format_attrs,
+ .filter_mask = NV_PCIE_FILTER_ID_MASK,
+ .filter_default_val = NV_PCIE_FILTER_ID_MASK,
+ .filter2_mask = 0x0,
+ .filter2_default_val = 0x0,
+ .get_filter = nv_cspmu_event_filter,
+ .get_filter2 = NULL,
+ .data = NULL,
+ .init_data = NULL
+ },
},
{
- .prodid = 0x104,
+ .prodid = 0x10400000,
.prodid_mask = NV_PRODID_MASK,
- .filter_mask = 0x0,
- .filter_default_val = NV_NVL_C2C_FILTER_ID_MASK,
.name_pattern = "nvidia_nvlink_c2c1_pmu_%u",
.name_fmt = NAME_FMT_SOCKET,
- .event_attr = mcf_pmu_event_attrs,
- .format_attr = nvlink_c2c_pmu_format_attrs
+ .template_ctx = {
+ .event_attr = mcf_pmu_event_attrs,
+ .format_attr = nvlink_c2c_pmu_format_attrs,
+ .filter_mask = NV_NVL_C2C_FILTER_ID_MASK,
+ .filter_default_val = NV_NVL_C2C_FILTER_ID_MASK,
+ .filter2_mask = 0x0,
+ .filter2_default_val = 0x0,
+ .get_filter = nv_cspmu_event_filter,
+ .get_filter2 = NULL,
+ .data = NULL,
+ .init_data = NULL
+ },
},
{
- .prodid = 0x105,
+ .prodid = 0x10500000,
.prodid_mask = NV_PRODID_MASK,
- .filter_mask = 0x0,
- .filter_default_val = NV_NVL_C2C_FILTER_ID_MASK,
.name_pattern = "nvidia_nvlink_c2c0_pmu_%u",
.name_fmt = NAME_FMT_SOCKET,
- .event_attr = mcf_pmu_event_attrs,
- .format_attr = nvlink_c2c_pmu_format_attrs
+ .template_ctx = {
+ .event_attr = mcf_pmu_event_attrs,
+ .format_attr = nvlink_c2c_pmu_format_attrs,
+ .filter_mask = NV_NVL_C2C_FILTER_ID_MASK,
+ .filter_default_val = NV_NVL_C2C_FILTER_ID_MASK,
+ .filter2_mask = 0x0,
+ .filter2_default_val = 0x0,
+ .get_filter = nv_cspmu_event_filter,
+ .get_filter2 = NULL,
+ .data = NULL,
+ .init_data = NULL
+ },
},
{
- .prodid = 0x106,
+ .prodid = 0x10600000,
.prodid_mask = NV_PRODID_MASK,
- .filter_mask = NV_CNVL_FILTER_ID_MASK,
- .filter_default_val = NV_CNVL_FILTER_ID_MASK,
.name_pattern = "nvidia_cnvlink_pmu_%u",
.name_fmt = NAME_FMT_SOCKET,
- .event_attr = mcf_pmu_event_attrs,
- .format_attr = cnvlink_pmu_format_attrs
+ .template_ctx = {
+ .event_attr = mcf_pmu_event_attrs,
+ .format_attr = cnvlink_pmu_format_attrs,
+ .filter_mask = NV_CNVL_FILTER_ID_MASK,
+ .filter_default_val = NV_CNVL_FILTER_ID_MASK,
+ .filter2_mask = 0x0,
+ .filter2_default_val = 0x0,
+ .get_filter = nv_cspmu_event_filter,
+ .get_filter2 = NULL,
+ .data = NULL,
+ .init_data = NULL
+ },
},
{
- .prodid = 0x2CF,
+ .prodid = 0x2CF00000,
.prodid_mask = NV_PRODID_MASK,
- .filter_mask = 0x0,
- .filter_default_val = 0x0,
.name_pattern = "nvidia_scf_pmu_%u",
.name_fmt = NAME_FMT_SOCKET,
- .event_attr = scf_pmu_event_attrs,
- .format_attr = scf_pmu_format_attrs
+ .template_ctx = {
+ .event_attr = scf_pmu_event_attrs,
+ .format_attr = scf_pmu_format_attrs,
+ .filter_mask = 0x0,
+ .filter_default_val = 0x0,
+ .filter2_mask = 0x0,
+ .filter2_default_val = 0x0,
+ .get_filter = nv_cspmu_event_filter,
+ .get_filter2 = NULL,
+ .data = NULL,
+ .init_data = NULL
+ },
},
{
.prodid = 0,
.prodid_mask = 0,
- .filter_mask = NV_GENERIC_FILTER_ID_MASK,
- .filter_default_val = NV_GENERIC_FILTER_ID_MASK,
.name_pattern = "nvidia_uncore_pmu_%u",
.name_fmt = NAME_FMT_GENERIC,
- .event_attr = generic_pmu_event_attrs,
- .format_attr = generic_pmu_format_attrs
+ .template_ctx = {
+ .event_attr = generic_pmu_event_attrs,
+ .format_attr = generic_pmu_format_attrs,
+ .filter_mask = NV_GENERIC_FILTER_ID_MASK,
+ .filter_default_val = NV_GENERIC_FILTER_ID_MASK,
+ .filter2_mask = NV_GENERIC_FILTER_ID_MASK,
+ .filter2_default_val = NV_GENERIC_FILTER_ID_MASK,
+ .get_filter = nv_cspmu_event_filter,
+ .get_filter2 = nv_cspmu_event_filter2,
+ .data = NULL,
+ .init_data = NULL
+ },
},
};
@@ -351,9 +391,16 @@ static char *nv_cspmu_format_name(const struct arm_cspmu *cspmu,
return name;
}
-int nv_cspmu_init_ops(struct arm_cspmu *cspmu)
+#define SET_OP(name, impl, match, default_op) \
+ do { \
+ if (match->ops.name) \
+ impl->name = match->ops.name; \
+ else if (default_op != NULL) \
+ impl->name = default_op; \
+ } while (false)
+
+static int nv_cspmu_init_ops(struct arm_cspmu *cspmu)
{
- u32 prodid;
struct nv_cspmu_ctx *ctx;
struct device *dev = cspmu->dev;
struct arm_cspmu_impl_ops *impl_ops = &cspmu->impl.ops;
@@ -363,38 +410,59 @@ int nv_cspmu_init_ops(struct arm_cspmu *cspmu)
if (!ctx)
return -ENOMEM;
- prodid = FIELD_GET(ARM_CSPMU_PMIIDR_PRODUCTID, cspmu->impl.pmiidr);
-
/* Find matching PMU. */
for (; match->prodid; match++) {
const u32 prodid_mask = match->prodid_mask;
- if ((match->prodid & prodid_mask) == (prodid & prodid_mask))
+ if ((match->prodid & prodid_mask) ==
+ (cspmu->impl.pmiidr & prodid_mask))
break;
}
- ctx->name = nv_cspmu_format_name(cspmu, match);
- ctx->filter_mask = match->filter_mask;
- ctx->filter_default_val = match->filter_default_val;
- ctx->event_attr = match->event_attr;
- ctx->format_attr = match->format_attr;
+ /* Initialize the context with the matched template. */
+ memcpy(ctx, &match->template_ctx, sizeof(struct nv_cspmu_ctx));
+ ctx->name = nv_cspmu_format_name(cspmu, match);
cspmu->impl.ctx = ctx;
/* NVIDIA specific callbacks. */
- impl_ops->event_filter = nv_cspmu_event_filter;
- impl_ops->get_event_attrs = nv_cspmu_get_event_attrs;
- impl_ops->get_format_attrs = nv_cspmu_get_format_attrs;
- impl_ops->get_name = nv_cspmu_get_name;
+ SET_OP(set_cc_filter, impl_ops, match, nv_cspmu_set_cc_filter);
+ SET_OP(set_ev_filter, impl_ops, match, nv_cspmu_set_ev_filter);
+ SET_OP(get_event_attrs, impl_ops, match, nv_cspmu_get_event_attrs);
+ SET_OP(get_format_attrs, impl_ops, match, nv_cspmu_get_format_attrs);
+ SET_OP(get_name, impl_ops, match, nv_cspmu_get_name);
- /* Set others to NULL to use default callback. */
- impl_ops->event_type = NULL;
- impl_ops->event_attr_is_visible = NULL;
- impl_ops->get_identifier = NULL;
- impl_ops->is_cycle_counter_event = NULL;
+ if (ctx->init_data)
+ return ctx->init_data(cspmu);
return 0;
}
-EXPORT_SYMBOL_GPL(nv_cspmu_init_ops);
+/* Match all NVIDIA Coresight PMU devices */
+static const struct arm_cspmu_impl_match nv_cspmu_param = {
+ .pmiidr_val = ARM_CSPMU_IMPL_ID_NVIDIA,
+ .module = THIS_MODULE,
+ .impl_init_ops = nv_cspmu_init_ops
+};
+
+static int __init nvidia_cspmu_init(void)
+{
+ int ret;
+
+ ret = arm_cspmu_impl_register(&nv_cspmu_param);
+ if (ret)
+ pr_err("nvidia_cspmu backend registration error: %d\n", ret);
+
+ return ret;
+}
+
+static void __exit nvidia_cspmu_exit(void)
+{
+ arm_cspmu_impl_unregister(&nv_cspmu_param);
+}
+
+module_init(nvidia_cspmu_init);
+module_exit(nvidia_cspmu_exit);
+
+MODULE_DESCRIPTION("NVIDIA Coresight Architecture Performance Monitor Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/perf/arm_cspmu/nvidia_cspmu.h b/drivers/perf/arm_cspmu/nvidia_cspmu.h
deleted file mode 100644
index 71e18f0dc50b..000000000000
--- a/drivers/perf/arm_cspmu/nvidia_cspmu.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
- *
- */
-
-/* Support for NVIDIA specific attributes. */
-
-#ifndef __NVIDIA_CSPMU_H__
-#define __NVIDIA_CSPMU_H__
-
-#include "arm_cspmu.h"
-
-/* Allocate NVIDIA descriptor. */
-int nv_cspmu_init_ops(struct arm_cspmu *cspmu);
-
-#endif /* __NVIDIA_CSPMU_H__ */
diff --git a/drivers/perf/arm_dmc620_pmu.c b/drivers/perf/arm_dmc620_pmu.c
index 30cea6859574..619cf937602f 100644
--- a/drivers/perf/arm_dmc620_pmu.c
+++ b/drivers/perf/arm_dmc620_pmu.c
@@ -542,12 +542,16 @@ static int dmc620_pmu_event_init(struct perf_event *event)
if (event->cpu < 0)
return -EINVAL;
+ hwc->idx = -1;
+
+ if (event->group_leader == event)
+ return 0;
+
/*
* We can't atomically disable all HW counters so only one event allowed,
* although software events are acceptable.
*/
- if (event->group_leader != event &&
- !is_software_event(event->group_leader))
+ if (!is_software_event(event->group_leader))
return -EINVAL;
for_each_sibling_event(sibling, event->group_leader) {
@@ -556,7 +560,6 @@ static int dmc620_pmu_event_init(struct perf_event *event)
return -EINVAL;
}
- hwc->idx = -1;
return 0;
}
@@ -673,6 +676,7 @@ static int dmc620_pmu_device_probe(struct platform_device *pdev)
dmc620_pmu->pmu = (struct pmu) {
.module = THIS_MODULE,
+ .parent = &pdev->dev,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
.task_ctx_nr = perf_invalid_context,
.event_init = dmc620_pmu_event_init,
@@ -724,7 +728,7 @@ out_teardown_dev:
return ret;
}
-static int dmc620_pmu_device_remove(struct platform_device *pdev)
+static void dmc620_pmu_device_remove(struct platform_device *pdev)
{
struct dmc620_pmu *dmc620_pmu = platform_get_drvdata(pdev);
@@ -732,8 +736,6 @@ static int dmc620_pmu_device_remove(struct platform_device *pdev)
/* perf will synchronise RCU before devres can free dmc620_pmu */
perf_pmu_unregister(&dmc620_pmu->pmu);
-
- return 0;
}
static const struct acpi_device_id dmc620_acpi_match[] = {
diff --git a/drivers/perf/arm_dsu_pmu.c b/drivers/perf/arm_dsu_pmu.c
index 8223c49bd082..cb4fb59fe04b 100644
--- a/drivers/perf/arm_dsu_pmu.c
+++ b/drivers/perf/arm_dsu_pmu.c
@@ -85,7 +85,7 @@
DSU_EXT_ATTR(_name, dsu_pmu_sysfs_event_show, (unsigned long)_config)
#define DSU_FORMAT_ATTR(_name, _config) \
- DSU_EXT_ATTR(_name, dsu_pmu_sysfs_format_show, (char *)_config)
+ DSU_EXT_ATTR(_name, device_show_string, _config)
#define DSU_CPUMASK_ATTR(_name, _config) \
DSU_EXT_ATTR(_name, dsu_pmu_cpumask_show, (unsigned long)_config)
@@ -139,15 +139,6 @@ static ssize_t dsu_pmu_sysfs_event_show(struct device *dev,
return sysfs_emit(buf, "event=0x%lx\n", (unsigned long)eattr->var);
}
-static ssize_t dsu_pmu_sysfs_format_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct dev_ext_attribute *eattr = container_of(attr,
- struct dev_ext_attribute, attr);
- return sysfs_emit(buf, "%s\n", (char *)eattr->var);
-}
-
static ssize_t dsu_pmu_cpumask_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -230,15 +221,6 @@ static const struct attribute_group *dsu_pmu_attr_groups[] = {
NULL,
};
-static int dsu_pmu_get_online_cpu_any_but(struct dsu_pmu *dsu_pmu, int cpu)
-{
- struct cpumask online_supported;
-
- cpumask_and(&online_supported,
- &dsu_pmu->associated_cpus, cpu_online_mask);
- return cpumask_any_but(&online_supported, cpu);
-}
-
static inline bool dsu_pmu_counter_valid(struct dsu_pmu *dsu_pmu, u32 idx)
{
return (idx < dsu_pmu->num_counters) ||
@@ -371,7 +353,7 @@ static inline u32 dsu_pmu_get_reset_overflow(void)
return __dsu_pmu_get_reset_overflow();
}
-/**
+/*
* dsu_pmu_set_event_period: Set the period for the counter.
*
* All DSU PMU event counters, except the cycle counter are 32bit
@@ -602,7 +584,7 @@ static struct dsu_pmu *dsu_pmu_alloc(struct platform_device *pdev)
return dsu_pmu;
}
-/**
+/*
* dsu_pmu_dt_get_cpus: Get the list of CPUs in the cluster
* from device tree.
*/
@@ -632,7 +614,7 @@ static int dsu_pmu_dt_get_cpus(struct device *dev, cpumask_t *mask)
return 0;
}
-/**
+/*
* dsu_pmu_acpi_get_cpus: Get the list of CPUs in the cluster
* from ACPI.
*/
@@ -751,6 +733,7 @@ static int dsu_pmu_device_probe(struct platform_device *pdev)
dsu_pmu->pmu = (struct pmu) {
.task_ctx_nr = perf_invalid_context,
+ .parent = &pdev->dev,
.module = THIS_MODULE,
.pmu_enable = dsu_pmu_enable,
.pmu_disable = dsu_pmu_disable,
@@ -774,14 +757,12 @@ static int dsu_pmu_device_probe(struct platform_device *pdev)
return rc;
}
-static int dsu_pmu_device_remove(struct platform_device *pdev)
+static void dsu_pmu_device_remove(struct platform_device *pdev)
{
struct dsu_pmu *dsu_pmu = platform_get_drvdata(pdev);
perf_pmu_unregister(&dsu_pmu->pmu);
cpuhp_state_remove_instance(dsu_pmu_cpuhp_state, &dsu_pmu->cpuhp_node);
-
- return 0;
}
static const struct of_device_id dsu_pmu_of_match[] = {
@@ -829,14 +810,16 @@ static int dsu_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
static int dsu_pmu_cpu_teardown(unsigned int cpu, struct hlist_node *node)
{
- int dst;
- struct dsu_pmu *dsu_pmu = hlist_entry_safe(node, struct dsu_pmu,
- cpuhp_node);
+ struct dsu_pmu *dsu_pmu;
+ unsigned int dst;
+
+ dsu_pmu = hlist_entry_safe(node, struct dsu_pmu, cpuhp_node);
if (!cpumask_test_and_clear_cpu(cpu, &dsu_pmu->active_cpu))
return 0;
- dst = dsu_pmu_get_online_cpu_any_but(dsu_pmu, cpu);
+ dst = cpumask_any_and_but(&dsu_pmu->associated_cpus,
+ cpu_online_mask, cpu);
/* If there are no active CPUs in the DSU, leave IRQ disabled */
if (dst >= nr_cpu_ids)
return 0;
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index d712a19e47ac..973a027d9063 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -26,7 +26,8 @@
#include <asm/irq_regs.h>
-static int armpmu_count_irq_users(const int irq);
+static int armpmu_count_irq_users(const struct cpumask *affinity,
+ const int irq);
struct pmu_irq_ops {
void (*enable_pmuirq)(unsigned int irq);
@@ -64,7 +65,9 @@ static void armpmu_enable_percpu_pmuirq(unsigned int irq)
static void armpmu_free_percpu_pmuirq(unsigned int irq, int cpu,
void __percpu *devid)
{
- if (armpmu_count_irq_users(irq) == 1)
+ struct arm_pmu *armpmu = *per_cpu_ptr((void * __percpu *)devid, cpu);
+
+ if (armpmu_count_irq_users(&armpmu->supported_cpus, irq) == 1)
free_percpu_irq(irq, devid);
}
@@ -89,7 +92,9 @@ static void armpmu_disable_percpu_pmunmi(unsigned int irq)
static void armpmu_free_percpu_pmunmi(unsigned int irq, int cpu,
void __percpu *devid)
{
- if (armpmu_count_irq_users(irq) == 1)
+ struct arm_pmu *armpmu = *per_cpu_ptr((void * __percpu *)devid, cpu);
+
+ if (armpmu_count_irq_users(&armpmu->supported_cpus, irq) == 1)
free_percpu_nmi(irq, devid);
}
@@ -99,7 +104,6 @@ static const struct pmu_irq_ops percpu_pmunmi_ops = {
.free_pmuirq = armpmu_free_percpu_pmunmi
};
-static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu);
static DEFINE_PER_CPU(int, cpu_irq);
static DEFINE_PER_CPU(const struct pmu_irq_ops *, cpu_irq_ops);
@@ -318,6 +322,12 @@ armpmu_del(struct perf_event *event, int flags)
int idx = hwc->idx;
armpmu_stop(event, PERF_EF_UPDATE);
+
+ if (has_branch_stack(event)) {
+ hw_events->branch_users--;
+ perf_sched_cb_dec(event->pmu);
+ }
+
hw_events->events[idx] = NULL;
armpmu->clear_event_idx(hw_events, event);
perf_event_update_userpage(event);
@@ -342,12 +352,15 @@ armpmu_add(struct perf_event *event, int flags)
if (idx < 0)
return idx;
- /*
- * If there is an event in the counter we are going to use then make
- * sure it is disabled.
- */
+ /* The newly-allocated counter should be empty */
+ WARN_ON_ONCE(hw_events->events[idx]);
+
+ if (has_branch_stack(event)) {
+ hw_events->branch_users++;
+ perf_sched_cb_inc(event->pmu);
+ }
+
event->hw.idx = idx;
- armpmu->disable(event);
hw_events->events[idx] = event;
hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
@@ -445,7 +458,7 @@ __hw_perf_event_init(struct perf_event *event)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
- int mapping;
+ int mapping, ret;
hwc->flags = 0;
mapping = armpmu->map_event(event);
@@ -470,11 +483,10 @@ __hw_perf_event_init(struct perf_event *event)
/*
* Check whether we need to exclude the counter from certain modes.
*/
- if (armpmu->set_event_filter &&
- armpmu->set_event_filter(hwc, &event->attr)) {
- pr_debug("ARM performance counters do not support "
- "mode exclusion\n");
- return -EOPNOTSUPP;
+ if (armpmu->set_event_filter) {
+ ret = armpmu->set_event_filter(hwc, &event->attr);
+ if (ret)
+ return ret;
}
/*
@@ -512,8 +524,7 @@ static int armpmu_event_init(struct perf_event *event)
!cpumask_test_cpu(event->cpu, &armpmu->supported_cpus))
return -ENOENT;
- /* does not support taken branch sampling */
- if (has_branch_stack(event))
+ if (has_branch_stack(event) && !armpmu->reg_brbidr)
return -EOPNOTSUPP;
return __hw_perf_event_init(event);
@@ -523,7 +534,7 @@ static void armpmu_enable(struct pmu *pmu)
{
struct arm_pmu *armpmu = to_arm_pmu(pmu);
struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
- bool enabled = !bitmap_empty(hw_events->used_mask, armpmu->num_events);
+ bool enabled = !bitmap_empty(hw_events->used_mask, ARMPMU_MAX_HWEVENTS);
/* For task-bound events we may be called on other CPUs */
if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
@@ -573,11 +584,11 @@ static const struct attribute_group armpmu_common_attr_group = {
.attrs = armpmu_common_attrs,
};
-static int armpmu_count_irq_users(const int irq)
+static int armpmu_count_irq_users(const struct cpumask *affinity, const int irq)
{
int cpu, count = 0;
- for_each_possible_cpu(cpu) {
+ for_each_cpu(cpu, affinity) {
if (per_cpu(cpu_irq, cpu) == irq)
count++;
}
@@ -585,12 +596,13 @@ static int armpmu_count_irq_users(const int irq)
return count;
}
-static const struct pmu_irq_ops *armpmu_find_irq_ops(int irq)
+static const struct pmu_irq_ops *
+armpmu_find_irq_ops(const struct cpumask *affinity, int irq)
{
const struct pmu_irq_ops *ops = NULL;
int cpu;
- for_each_possible_cpu(cpu) {
+ for_each_cpu(cpu, affinity) {
if (per_cpu(cpu_irq, cpu) != irq)
continue;
@@ -602,22 +614,25 @@ static const struct pmu_irq_ops *armpmu_find_irq_ops(int irq)
return ops;
}
-void armpmu_free_irq(int irq, int cpu)
+void armpmu_free_irq(struct arm_pmu * __percpu *armpmu, int irq, int cpu)
{
if (per_cpu(cpu_irq, cpu) == 0)
return;
if (WARN_ON(irq != per_cpu(cpu_irq, cpu)))
return;
- per_cpu(cpu_irq_ops, cpu)->free_pmuirq(irq, cpu, &cpu_armpmu);
+ per_cpu(cpu_irq_ops, cpu)->free_pmuirq(irq, cpu, armpmu);
per_cpu(cpu_irq, cpu) = 0;
per_cpu(cpu_irq_ops, cpu) = NULL;
}
-int armpmu_request_irq(int irq, int cpu)
+int armpmu_request_irq(struct arm_pmu * __percpu *pcpu_armpmu, int irq, int cpu)
{
int err = 0;
+ struct arm_pmu **armpmu = per_cpu_ptr(pcpu_armpmu, cpu);
+ const struct cpumask *affinity = *armpmu ? &(*armpmu)->supported_cpus :
+ cpu_possible_mask; /* ACPI */
const irq_handler_t handler = armpmu_dispatch_irq;
const struct pmu_irq_ops *irq_ops;
@@ -639,25 +654,24 @@ int armpmu_request_irq(int irq, int cpu)
IRQF_NOBALANCING | IRQF_NO_AUTOEN |
IRQF_NO_THREAD;
- err = request_nmi(irq, handler, irq_flags, "arm-pmu",
- per_cpu_ptr(&cpu_armpmu, cpu));
+ err = request_nmi(irq, handler, irq_flags, "arm-pmu", armpmu);
/* If cannot get an NMI, get a normal interrupt */
if (err) {
err = request_irq(irq, handler, irq_flags, "arm-pmu",
- per_cpu_ptr(&cpu_armpmu, cpu));
+ armpmu);
irq_ops = &pmuirq_ops;
} else {
has_nmi = true;
irq_ops = &pmunmi_ops;
}
- } else if (armpmu_count_irq_users(irq) == 0) {
- err = request_percpu_nmi(irq, handler, "arm-pmu", &cpu_armpmu);
+ } else if (armpmu_count_irq_users(affinity, irq) == 0) {
+ err = request_percpu_nmi(irq, handler, "arm-pmu", affinity, pcpu_armpmu);
/* If cannot get an NMI, get a normal interrupt */
if (err) {
- err = request_percpu_irq(irq, handler, "arm-pmu",
- &cpu_armpmu);
+ err = request_percpu_irq_affinity(irq, handler, "arm-pmu",
+ affinity, pcpu_armpmu);
irq_ops = &percpu_pmuirq_ops;
} else {
has_nmi = true;
@@ -665,7 +679,7 @@ int armpmu_request_irq(int irq, int cpu)
}
} else {
/* Per cpudevid irq was already requested by another CPU */
- irq_ops = armpmu_find_irq_ops(irq);
+ irq_ops = armpmu_find_irq_ops(affinity, irq);
if (WARN_ON(!irq_ops))
err = -EINVAL;
@@ -710,8 +724,6 @@ static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
if (pmu->reset)
pmu->reset(pmu);
- per_cpu(cpu_armpmu, cpu) = pmu;
-
irq = armpmu_get_cpu_irq(pmu, cpu);
if (irq)
per_cpu(cpu_irq_ops, cpu)->enable_pmuirq(irq);
@@ -731,8 +743,6 @@ static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node)
if (irq)
per_cpu(cpu_irq_ops, cpu)->disable_pmuirq(irq);
- per_cpu(cpu_armpmu, cpu) = NULL;
-
return 0;
}
@@ -743,7 +753,7 @@ static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
struct perf_event *event;
int idx;
- for (idx = 0; idx < armpmu->num_events; idx++) {
+ for_each_set_bit(idx, armpmu->cntr_mask, ARMPMU_MAX_HWEVENTS) {
event = hw_events->events[idx];
if (!event)
continue;
@@ -773,7 +783,7 @@ static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
{
struct arm_pmu *armpmu = container_of(b, struct arm_pmu, cpu_pm_nb);
struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
- bool enabled = !bitmap_empty(hw_events->used_mask, armpmu->num_events);
+ bool enabled = !bitmap_empty(hw_events->used_mask, ARMPMU_MAX_HWEVENTS);
if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
return NOTIFY_DONE;
@@ -893,7 +903,6 @@ struct arm_pmu *armpmu_alloc(void)
struct pmu_hw_events *events;
events = per_cpu_ptr(pmu->hw_events, cpu);
- raw_spin_lock_init(&events->pmu_lock);
events->percpu_pmu = pmu;
}
@@ -919,6 +928,12 @@ int armpmu_register(struct arm_pmu *pmu)
if (ret)
return ret;
+ /*
+ * By this stage we know our supported CPUs on either DT/ACPI platforms,
+ * detect the SMT implementation.
+ */
+ pmu->has_smt = topology_core_has_smt(cpumask_first(&pmu->supported_cpus));
+
if (!pmu->set_event_filter)
pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE;
@@ -926,8 +941,9 @@ int armpmu_register(struct arm_pmu *pmu)
if (ret)
goto out_destroy;
- pr_info("enabled with %s PMU driver, %d counters available%s\n",
- pmu->name, pmu->num_events,
+ pr_info("enabled with %s PMU driver, %d (%*pb) counters available%s\n",
+ pmu->name, bitmap_weight(pmu->cntr_mask, ARMPMU_MAX_HWEVENTS),
+ ARMPMU_MAX_HWEVENTS, &pmu->cntr_mask,
has_nmi ? ", using NMIs" : "");
kvm_host_pmu_init(pmu);
diff --git a/drivers/perf/arm_pmu_acpi.c b/drivers/perf/arm_pmu_acpi.c
index 05dda19c5359..e80f76d95e68 100644
--- a/drivers/perf/arm_pmu_acpi.c
+++ b/drivers/perf/arm_pmu_acpi.c
@@ -218,7 +218,7 @@ static int arm_pmu_acpi_parse_irqs(void)
* them with their PMUs.
*/
per_cpu(pmu_irqs, cpu) = irq;
- err = armpmu_request_irq(irq, cpu);
+ err = armpmu_request_irq(&probed_pmus, irq, cpu);
if (err)
goto out_err;
}
diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c
index 3596db36cbff..1c9e50a13201 100644
--- a/drivers/perf/arm_pmu_platform.c
+++ b/drivers/perf/arm_pmu_platform.c
@@ -42,14 +42,13 @@ static int probe_current_pmu(struct arm_pmu *pmu,
return ret;
}
-static int pmu_parse_percpu_irq(struct arm_pmu *pmu, int irq)
+static int pmu_parse_percpu_irq(struct arm_pmu *pmu, int irq,
+ const struct cpumask *affinity)
{
- int cpu, ret;
struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
+ int cpu;
- ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus);
- if (ret)
- return ret;
+ cpumask_copy(&pmu->supported_cpus, affinity);
for_each_cpu(cpu, &pmu->supported_cpus)
per_cpu(hw_events->irq, cpu) = irq;
@@ -59,7 +58,7 @@ static int pmu_parse_percpu_irq(struct arm_pmu *pmu, int irq)
static bool pmu_has_irq_affinity(struct device_node *node)
{
- return !!of_find_property(node, "interrupt-affinity", NULL);
+ return of_property_present(node, "interrupt-affinity");
}
static int pmu_parse_irq_affinity(struct device *dev, int i)
@@ -115,9 +114,12 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
}
if (num_irqs == 1) {
- int irq = platform_get_irq(pdev, 0);
+ const struct cpumask *affinity;
+ int irq;
+
+ irq = platform_get_irq_affinity(pdev, 0, &affinity);
if ((irq > 0) && irq_is_percpu_devid(irq))
- return pmu_parse_percpu_irq(pmu, irq);
+ return pmu_parse_percpu_irq(pmu, irq, affinity);
}
if (nr_cpu_ids != 1 && !pmu_has_irq_affinity(dev->of_node))
@@ -163,7 +165,7 @@ static int armpmu_request_irqs(struct arm_pmu *armpmu)
if (!irq)
continue;
- err = armpmu_request_irq(irq, cpu);
+ err = armpmu_request_irq(&hw_events->percpu_pmu, irq, cpu);
if (err)
break;
}
@@ -179,7 +181,7 @@ static void armpmu_free_irqs(struct arm_pmu *armpmu)
for_each_cpu(cpu, &armpmu->supported_cpus) {
int irq = per_cpu(hw_events->irq, cpu);
- armpmu_free_irq(irq, cpu);
+ armpmu_free_irq(&hw_events->percpu_pmu, irq, cpu);
}
}
@@ -196,6 +198,7 @@ int arm_pmu_device_probe(struct platform_device *pdev,
if (!pmu)
return -ENOMEM;
+ pmu->pmu.parent = &pdev->dev;
pmu->plat_device = pdev;
ret = pmu_parse_irqs(pmu);
diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c
index e5a2ac4155f6..8014ff766cff 100644
--- a/drivers/perf/arm_pmuv3.c
+++ b/drivers/perf/arm_pmuv3.c
@@ -15,6 +15,7 @@
#include <clocksource/arm_arch_timer.h>
#include <linux/acpi.h>
+#include <linux/bitfield.h>
#include <linux/clocksource.h>
#include <linux/of.h>
#include <linux/perf/arm_pmu.h>
@@ -24,7 +25,7 @@
#include <linux/smp.h>
#include <linux/nmi.h>
-#include <asm/arm_pmuv3.h>
+#include "arm_brbe.h"
/* ARMv8 Cortex-A53 specific event types. */
#define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2
@@ -169,7 +170,11 @@ armv8pmu_events_sysfs_show(struct device *dev,
PMU_EVENT_ATTR_ID(name, armv8pmu_events_sysfs_show, config)
static struct attribute *armv8_pmuv3_event_attrs[] = {
- ARMV8_EVENT_ATTR(sw_incr, ARMV8_PMUV3_PERFCTR_SW_INCR),
+ /*
+ * Don't expose the sw_incr event in /sys. It's not usable as writes to
+ * PMSWINC_EL0 will trap as PMUSERENR.{SW,EN}=={0,0} and event rotation
+ * means we don't have a fixed event<->counter relationship regardless.
+ */
ARMV8_EVENT_ATTR(l1i_cache_refill, ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL),
ARMV8_EVENT_ATTR(l1i_tlb_refill, ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL),
ARMV8_EVENT_ATTR(l1d_cache_refill, ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL),
@@ -294,26 +299,71 @@ static const struct attribute_group armv8_pmuv3_events_attr_group = {
.is_visible = armv8pmu_event_attr_is_visible,
};
-PMU_FORMAT_ATTR(event, "config:0-15");
-PMU_FORMAT_ATTR(long, "config1:0");
-PMU_FORMAT_ATTR(rdpmc, "config1:1");
+/* User ABI */
+#define ATTR_CFG_FLD_event_CFG config
+#define ATTR_CFG_FLD_event_LO 0
+#define ATTR_CFG_FLD_event_HI 15
+#define ATTR_CFG_FLD_long_CFG config1
+#define ATTR_CFG_FLD_long_LO 0
+#define ATTR_CFG_FLD_long_HI 0
+#define ATTR_CFG_FLD_rdpmc_CFG config1
+#define ATTR_CFG_FLD_rdpmc_LO 1
+#define ATTR_CFG_FLD_rdpmc_HI 1
+#define ATTR_CFG_FLD_threshold_count_CFG config1 /* PMEVTYPER.TC[0] */
+#define ATTR_CFG_FLD_threshold_count_LO 2
+#define ATTR_CFG_FLD_threshold_count_HI 2
+#define ATTR_CFG_FLD_threshold_compare_CFG config1 /* PMEVTYPER.TC[2:1] */
+#define ATTR_CFG_FLD_threshold_compare_LO 3
+#define ATTR_CFG_FLD_threshold_compare_HI 4
+#define ATTR_CFG_FLD_threshold_CFG config1 /* PMEVTYPER.TH */
+#define ATTR_CFG_FLD_threshold_LO 5
+#define ATTR_CFG_FLD_threshold_HI 16
+
+GEN_PMU_FORMAT_ATTR(event);
+GEN_PMU_FORMAT_ATTR(long);
+GEN_PMU_FORMAT_ATTR(rdpmc);
+GEN_PMU_FORMAT_ATTR(threshold_count);
+GEN_PMU_FORMAT_ATTR(threshold_compare);
+GEN_PMU_FORMAT_ATTR(threshold);
static int sysctl_perf_user_access __read_mostly;
-static inline bool armv8pmu_event_is_64bit(struct perf_event *event)
+static bool armv8pmu_event_is_64bit(struct perf_event *event)
+{
+ return ATTR_CFG_GET_FLD(&event->attr, long);
+}
+
+static bool armv8pmu_event_want_user_access(struct perf_event *event)
{
- return event->attr.config1 & 0x1;
+ return ATTR_CFG_GET_FLD(&event->attr, rdpmc);
}
-static inline bool armv8pmu_event_want_user_access(struct perf_event *event)
+static u32 armv8pmu_event_get_threshold(struct perf_event_attr *attr)
{
- return event->attr.config1 & 0x2;
+ return ATTR_CFG_GET_FLD(attr, threshold);
+}
+
+static u8 armv8pmu_event_threshold_control(struct perf_event_attr *attr)
+{
+ u8 th_compare = ATTR_CFG_GET_FLD(attr, threshold_compare);
+ u8 th_count = ATTR_CFG_GET_FLD(attr, threshold_count);
+
+ /*
+ * The count bit is always the bottom bit of the full control field, and
+ * the comparison is the upper two bits, but it's not explicitly
+ * labelled in the Arm ARM. For the Perf interface we split it into two
+ * fields, so reconstruct it here.
+ */
+ return (th_compare << 1) | th_count;
}
static struct attribute *armv8_pmuv3_format_attrs[] = {
&format_attr_event.attr,
&format_attr_long.attr,
&format_attr_rdpmc.attr,
+ &format_attr_threshold.attr,
+ &format_attr_threshold_compare.attr,
+ &format_attr_threshold_count.attr,
NULL,
};
@@ -327,7 +377,7 @@ static ssize_t slots_show(struct device *dev, struct device_attribute *attr,
{
struct pmu *pmu = dev_get_drvdata(dev);
struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
- u32 slots = cpu_pmu->reg_pmmir & ARMV8_PMU_SLOTS_MASK;
+ u32 slots = FIELD_GET(ARMV8_PMU_SLOTS, cpu_pmu->reg_pmmir);
return sysfs_emit(page, "0x%08x\n", slots);
}
@@ -339,8 +389,7 @@ static ssize_t bus_slots_show(struct device *dev, struct device_attribute *attr,
{
struct pmu *pmu = dev_get_drvdata(dev);
struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
- u32 bus_slots = (cpu_pmu->reg_pmmir >> ARMV8_PMU_BUS_SLOTS_SHIFT)
- & ARMV8_PMU_BUS_SLOTS_MASK;
+ u32 bus_slots = FIELD_GET(ARMV8_PMU_BUS_SLOTS, cpu_pmu->reg_pmmir);
return sysfs_emit(page, "0x%08x\n", bus_slots);
}
@@ -352,8 +401,7 @@ static ssize_t bus_width_show(struct device *dev, struct device_attribute *attr,
{
struct pmu *pmu = dev_get_drvdata(dev);
struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
- u32 bus_width = (cpu_pmu->reg_pmmir >> ARMV8_PMU_BUS_WIDTH_SHIFT)
- & ARMV8_PMU_BUS_WIDTH_MASK;
+ u32 bus_width = FIELD_GET(ARMV8_PMU_BUS_WIDTH, cpu_pmu->reg_pmmir);
u32 val = 0;
/* Encoded as Log2(number of bytes), plus one */
@@ -365,26 +413,72 @@ static ssize_t bus_width_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RO(bus_width);
+static u32 threshold_max(struct arm_pmu *cpu_pmu)
+{
+ /*
+ * PMMIR.THWIDTH is readable and non-zero on aarch32, but it would be
+ * impossible to write the threshold in the upper 32 bits of PMEVTYPER.
+ */
+ if (IS_ENABLED(CONFIG_ARM))
+ return 0;
+
+ /*
+ * The largest value that can be written to PMEVTYPER<n>_EL0.TH is
+ * (2 ^ PMMIR.THWIDTH) - 1.
+ */
+ return (1 << FIELD_GET(ARMV8_PMU_THWIDTH, cpu_pmu->reg_pmmir)) - 1;
+}
+
+static ssize_t threshold_max_show(struct device *dev,
+ struct device_attribute *attr, char *page)
+{
+ struct pmu *pmu = dev_get_drvdata(dev);
+ struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
+
+ return sysfs_emit(page, "0x%08x\n", threshold_max(cpu_pmu));
+}
+
+static DEVICE_ATTR_RO(threshold_max);
+
+static ssize_t branches_show(struct device *dev,
+ struct device_attribute *attr, char *page)
+{
+ struct pmu *pmu = dev_get_drvdata(dev);
+ struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
+
+ return sysfs_emit(page, "%d\n", brbe_num_branch_records(cpu_pmu));
+}
+
+static DEVICE_ATTR_RO(branches);
+
static struct attribute *armv8_pmuv3_caps_attrs[] = {
+ &dev_attr_branches.attr,
&dev_attr_slots.attr,
&dev_attr_bus_slots.attr,
&dev_attr_bus_width.attr,
+ &dev_attr_threshold_max.attr,
NULL,
};
+static umode_t caps_is_visible(struct kobject *kobj, struct attribute *attr, int i)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct pmu *pmu = dev_get_drvdata(dev);
+ struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
+
+ if (i == 0)
+ return brbe_num_branch_records(cpu_pmu) ? attr->mode : 0;
+
+ return attr->mode;
+}
+
static const struct attribute_group armv8_pmuv3_caps_attr_group = {
.name = "caps",
.attrs = armv8_pmuv3_caps_attrs,
+ .is_visible = caps_is_visible,
};
/*
- * Perf Events' indices
- */
-#define ARMV8_IDX_CYCLE_COUNTER 0
-#define ARMV8_IDX_COUNTER0 1
-#define ARMV8_IDX_CYCLE_COUNTER_USER 32
-
-/*
* We unconditionally enable ARMv8.5-PMU long event counter support
* (64-bit events) where supported. Indicate if this arm_pmu has long
* event counter support.
@@ -397,7 +491,7 @@ static bool armv8pmu_has_long_event(struct arm_pmu *cpu_pmu)
return (IS_ENABLED(CONFIG_ARM64) && is_pmuv3p5(cpu_pmu->pmuver));
}
-static inline bool armv8pmu_event_has_user_read(struct perf_event *event)
+static bool armv8pmu_event_has_user_read(struct perf_event *event)
{
return event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT;
}
@@ -407,7 +501,7 @@ static inline bool armv8pmu_event_has_user_read(struct perf_event *event)
* except when we have allocated the 64bit cycle counter (for CPU
* cycles event) or when user space counter access is enabled.
*/
-static inline bool armv8pmu_event_is_chained(struct perf_event *event)
+static bool armv8pmu_event_is_chained(struct perf_event *event)
{
int idx = event->hw.idx;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
@@ -415,49 +509,40 @@ static inline bool armv8pmu_event_is_chained(struct perf_event *event)
return !armv8pmu_event_has_user_read(event) &&
armv8pmu_event_is_64bit(event) &&
!armv8pmu_has_long_event(cpu_pmu) &&
- (idx != ARMV8_IDX_CYCLE_COUNTER);
+ (idx < ARMV8_PMU_MAX_GENERAL_COUNTERS);
}
/*
* ARMv8 low level PMU access
*/
-
-/*
- * Perf Event to low level counters mapping
- */
-#define ARMV8_IDX_TO_COUNTER(x) \
- (((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK)
-
-static inline u32 armv8pmu_pmcr_read(void)
+static u64 armv8pmu_pmcr_read(void)
{
return read_pmcr();
}
-static inline void armv8pmu_pmcr_write(u32 val)
+static void armv8pmu_pmcr_write(u64 val)
{
val &= ARMV8_PMU_PMCR_MASK;
isb();
write_pmcr(val);
}
-static inline int armv8pmu_has_overflowed(u32 pmovsr)
+static int armv8pmu_has_overflowed(u64 pmovsr)
{
- return pmovsr & ARMV8_PMU_OVERFLOWED_MASK;
+ return !!(pmovsr & ARMV8_PMU_OVERFLOWED_MASK);
}
-static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
+static int armv8pmu_counter_has_overflowed(u64 pmnc, int idx)
{
- return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx));
+ return !!(pmnc & BIT(idx));
}
-static inline u64 armv8pmu_read_evcntr(int idx)
+static u64 armv8pmu_read_evcntr(int idx)
{
- u32 counter = ARMV8_IDX_TO_COUNTER(idx);
-
- return read_pmevcntrn(counter);
+ return read_pmevcntrn(idx);
}
-static inline u64 armv8pmu_read_hw_counter(struct perf_event *event)
+static u64 armv8pmu_read_hw_counter(struct perf_event *event)
{
int idx = event->hw.idx;
u64 val = armv8pmu_read_evcntr(idx);
@@ -483,7 +568,7 @@ static bool armv8pmu_event_needs_bias(struct perf_event *event)
return false;
if (armv8pmu_has_long_event(cpu_pmu) ||
- idx == ARMV8_IDX_CYCLE_COUNTER)
+ idx >= ARMV8_PMU_MAX_GENERAL_COUNTERS)
return true;
return false;
@@ -511,22 +596,22 @@ static u64 armv8pmu_read_counter(struct perf_event *event)
int idx = hwc->idx;
u64 value;
- if (idx == ARMV8_IDX_CYCLE_COUNTER)
+ if (idx == ARMV8_PMU_CYCLE_IDX)
value = read_pmccntr();
+ else if (idx == ARMV8_PMU_INSTR_IDX)
+ value = read_pmicntr();
else
value = armv8pmu_read_hw_counter(event);
return armv8pmu_unbias_long_counter(event, value);
}
-static inline void armv8pmu_write_evcntr(int idx, u64 value)
+static void armv8pmu_write_evcntr(int idx, u64 value)
{
- u32 counter = ARMV8_IDX_TO_COUNTER(idx);
-
- write_pmevcntrn(counter, value);
+ write_pmevcntrn(idx, value);
}
-static inline void armv8pmu_write_hw_counter(struct perf_event *event,
+static void armv8pmu_write_hw_counter(struct perf_event *event,
u64 value)
{
int idx = event->hw.idx;
@@ -546,21 +631,29 @@ static void armv8pmu_write_counter(struct perf_event *event, u64 value)
value = armv8pmu_bias_long_counter(event, value);
- if (idx == ARMV8_IDX_CYCLE_COUNTER)
+ if (idx == ARMV8_PMU_CYCLE_IDX)
write_pmccntr(value);
+ else if (idx == ARMV8_PMU_INSTR_IDX)
+ write_pmicntr(value);
else
armv8pmu_write_hw_counter(event, value);
}
-static inline void armv8pmu_write_evtype(int idx, u32 val)
+static void armv8pmu_write_evtype(int idx, unsigned long val)
{
- u32 counter = ARMV8_IDX_TO_COUNTER(idx);
+ unsigned long mask = ARMV8_PMU_EVTYPE_EVENT |
+ ARMV8_PMU_INCLUDE_EL2 |
+ ARMV8_PMU_EXCLUDE_EL0 |
+ ARMV8_PMU_EXCLUDE_EL1;
+
+ if (IS_ENABLED(CONFIG_ARM64))
+ mask |= ARMV8_PMU_EVTYPE_TC | ARMV8_PMU_EVTYPE_TH;
- val &= ARMV8_PMU_EVTYPE_MASK;
- write_pmevtypern(counter, val);
+ val &= mask;
+ write_pmevtypern(idx, val);
}
-static inline void armv8pmu_write_event_type(struct perf_event *event)
+static void armv8pmu_write_event_type(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
@@ -577,24 +670,26 @@ static inline void armv8pmu_write_event_type(struct perf_event *event)
armv8pmu_write_evtype(idx - 1, hwc->config_base);
armv8pmu_write_evtype(idx, chain_evt);
} else {
- if (idx == ARMV8_IDX_CYCLE_COUNTER)
+ if (idx == ARMV8_PMU_CYCLE_IDX)
write_pmccfiltr(hwc->config_base);
+ else if (idx == ARMV8_PMU_INSTR_IDX)
+ write_pmicfiltr(hwc->config_base);
else
armv8pmu_write_evtype(idx, hwc->config_base);
}
}
-static u32 armv8pmu_event_cnten_mask(struct perf_event *event)
+static u64 armv8pmu_event_cnten_mask(struct perf_event *event)
{
- int counter = ARMV8_IDX_TO_COUNTER(event->hw.idx);
- u32 mask = BIT(counter);
+ int counter = event->hw.idx;
+ u64 mask = BIT(counter);
if (armv8pmu_event_is_chained(event))
mask |= BIT(counter - 1);
return mask;
}
-static inline void armv8pmu_enable_counter(u32 mask)
+static void armv8pmu_enable_counter(u64 mask)
{
/*
* Make sure event configuration register writes are visible before we
@@ -604,10 +699,10 @@ static inline void armv8pmu_enable_counter(u32 mask)
write_pmcntenset(mask);
}
-static inline void armv8pmu_enable_event_counter(struct perf_event *event)
+static void armv8pmu_enable_event_counter(struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
- u32 mask = armv8pmu_event_cnten_mask(event);
+ u64 mask = armv8pmu_event_cnten_mask(event);
kvm_set_pmu_events(mask, attr);
@@ -616,7 +711,7 @@ static inline void armv8pmu_enable_event_counter(struct perf_event *event)
armv8pmu_enable_counter(mask);
}
-static inline void armv8pmu_disable_counter(u32 mask)
+static void armv8pmu_disable_counter(u64 mask)
{
write_pmcntenclr(mask);
/*
@@ -626,10 +721,10 @@ static inline void armv8pmu_disable_counter(u32 mask)
isb();
}
-static inline void armv8pmu_disable_event_counter(struct perf_event *event)
+static void armv8pmu_disable_event_counter(struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
- u32 mask = armv8pmu_event_cnten_mask(event);
+ u64 mask = armv8pmu_event_cnten_mask(event);
kvm_clr_pmu_events(mask);
@@ -638,18 +733,17 @@ static inline void armv8pmu_disable_event_counter(struct perf_event *event)
armv8pmu_disable_counter(mask);
}
-static inline void armv8pmu_enable_intens(u32 mask)
+static void armv8pmu_enable_intens(u64 mask)
{
write_pmintenset(mask);
}
-static inline void armv8pmu_enable_event_irq(struct perf_event *event)
+static void armv8pmu_enable_event_irq(struct perf_event *event)
{
- u32 counter = ARMV8_IDX_TO_COUNTER(event->hw.idx);
- armv8pmu_enable_intens(BIT(counter));
+ armv8pmu_enable_intens(BIT(event->hw.idx));
}
-static inline void armv8pmu_disable_intens(u32 mask)
+static void armv8pmu_disable_intens(u64 mask)
{
write_pmintenclr(mask);
isb();
@@ -658,21 +752,20 @@ static inline void armv8pmu_disable_intens(u32 mask)
isb();
}
-static inline void armv8pmu_disable_event_irq(struct perf_event *event)
+static void armv8pmu_disable_event_irq(struct perf_event *event)
{
- u32 counter = ARMV8_IDX_TO_COUNTER(event->hw.idx);
- armv8pmu_disable_intens(BIT(counter));
+ armv8pmu_disable_intens(BIT(event->hw.idx));
}
-static inline u32 armv8pmu_getreset_flags(void)
+static u64 armv8pmu_getreset_flags(void)
{
- u32 value;
+ u64 value;
/* Read */
value = read_pmovsclr();
/* Write to clear flags */
- value &= ARMV8_PMU_OVSR_MASK;
+ value &= ARMV8_PMU_OVERFLOWED_MASK;
write_pmovsclr(value);
return value;
@@ -704,24 +797,31 @@ static void armv8pmu_enable_user_access(struct arm_pmu *cpu_pmu)
int i;
struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
- /* Clear any unused counters to avoid leaking their contents */
- for_each_clear_bit(i, cpuc->used_mask, cpu_pmu->num_events) {
- if (i == ARMV8_IDX_CYCLE_COUNTER)
- write_pmccntr(0);
- else
- armv8pmu_write_evcntr(i, 0);
+ if (is_pmuv3p9(cpu_pmu->pmuver)) {
+ u64 mask = 0;
+ for_each_set_bit(i, cpuc->used_mask, ARMPMU_MAX_HWEVENTS) {
+ if (armv8pmu_event_has_user_read(cpuc->events[i]))
+ mask |= BIT(i);
+ }
+ write_pmuacr(mask);
+ } else {
+ /* Clear any unused counters to avoid leaking their contents */
+ for_each_andnot_bit(i, cpu_pmu->cntr_mask, cpuc->used_mask,
+ ARMPMU_MAX_HWEVENTS) {
+ if (i == ARMV8_PMU_CYCLE_IDX)
+ write_pmccntr(0);
+ else if (i == ARMV8_PMU_INSTR_IDX)
+ write_pmicntr(0);
+ else
+ armv8pmu_write_evcntr(i, 0);
+ }
}
- update_pmuserenr(ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_CR);
+ update_pmuserenr(ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_UEN);
}
static void armv8pmu_enable_event(struct perf_event *event)
{
- /*
- * Enable counter and interrupt, and set the counter to count
- * the event that we're interested in.
- */
- armv8pmu_disable_event_counter(event);
armv8pmu_write_event_type(event);
armv8pmu_enable_event_irq(event);
armv8pmu_enable_event_counter(event);
@@ -736,6 +836,7 @@ static void armv8pmu_disable_event(struct perf_event *event)
static void armv8pmu_start(struct arm_pmu *cpu_pmu)
{
struct perf_event_context *ctx;
+ struct pmu_hw_events *hw_events = this_cpu_ptr(cpu_pmu->hw_events);
int nr_user = 0;
ctx = perf_cpu_task_ctx();
@@ -747,19 +848,39 @@ static void armv8pmu_start(struct arm_pmu *cpu_pmu)
else
armv8pmu_disable_user_access();
+ kvm_vcpu_pmu_resync_el0();
+
+ if (hw_events->branch_users)
+ brbe_enable(cpu_pmu);
+
/* Enable all counters */
armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
}
static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
{
+ struct pmu_hw_events *hw_events = this_cpu_ptr(cpu_pmu->hw_events);
+
+ if (hw_events->branch_users)
+ brbe_disable();
+
/* Disable all counters */
armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E);
}
+static void read_branch_records(struct pmu_hw_events *cpuc,
+ struct perf_event *event,
+ struct perf_sample_data *data)
+{
+ struct perf_branch_stack *branch_stack = cpuc->branch_stack;
+
+ brbe_read_filtered_entries(branch_stack, event);
+ perf_sample_save_brstack(data, event, branch_stack, NULL);
+}
+
static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
{
- u32 pmovsr;
+ u64 pmovsr;
struct perf_sample_data data;
struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
struct pt_regs *regs;
@@ -786,7 +907,7 @@ static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
* to prevent skews in group events.
*/
armv8pmu_stop(cpu_pmu);
- for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
+ for_each_set_bit(idx, cpu_pmu->cntr_mask, ARMPMU_MAX_HWEVENTS) {
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
@@ -807,13 +928,15 @@ static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu)
if (!armpmu_event_set_period(event))
continue;
+ if (has_branch_stack(event))
+ read_branch_records(cpuc, event, &data);
+
/*
* Perf event overflow will queue the processing of the event as
* an irq_work which will be taken care of in the handling of
* IPI_IRQ_WORK.
*/
- if (perf_event_overflow(event, &data, regs))
- cpu_pmu->disable(event);
+ perf_event_overflow(event, &data, regs);
}
armv8pmu_start(cpu_pmu);
@@ -825,7 +948,7 @@ static int armv8pmu_get_single_idx(struct pmu_hw_events *cpuc,
{
int idx;
- for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; idx++) {
+ for_each_set_bit(idx, cpu_pmu->cntr_mask, ARMV8_PMU_MAX_GENERAL_COUNTERS) {
if (!test_and_set_bit(idx, cpuc->used_mask))
return idx;
}
@@ -841,7 +964,9 @@ static int armv8pmu_get_chain_idx(struct pmu_hw_events *cpuc,
* Chaining requires two consecutive event counters, where
* the lower idx must be even.
*/
- for (idx = ARMV8_IDX_COUNTER0 + 1; idx < cpu_pmu->num_events; idx += 2) {
+ for_each_set_bit(idx, cpu_pmu->cntr_mask, ARMV8_PMU_MAX_GENERAL_COUNTERS) {
+ if (!(idx & 0x1))
+ continue;
if (!test_and_set_bit(idx, cpuc->used_mask)) {
/* Check if the preceding even counter is available */
if (!test_and_set_bit(idx - 1, cpuc->used_mask))
@@ -853,6 +978,42 @@ static int armv8pmu_get_chain_idx(struct pmu_hw_events *cpuc,
return -EAGAIN;
}
+static bool armv8pmu_can_use_pmccntr(struct pmu_hw_events *cpuc,
+ struct perf_event *event)
+{
+ struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ unsigned long evtype = hwc->config_base & ARMV8_PMU_EVTYPE_EVENT;
+
+ if (evtype != ARMV8_PMUV3_PERFCTR_CPU_CYCLES)
+ return false;
+
+ /*
+ * A CPU_CYCLES event with threshold counting cannot use PMCCNTR_EL0
+ * since it lacks threshold support.
+ */
+ if (armv8pmu_event_get_threshold(&event->attr))
+ return false;
+
+ /*
+ * PMCCNTR_EL0 is not affected by BRBE controls like BRBCR_ELx.FZP.
+ * So don't use it for branch events.
+ */
+ if (has_branch_stack(event))
+ return false;
+
+ /*
+ * The PMCCNTR_EL0 increments from the processor clock rather than
+ * the PE clock (ARM DDI0487 L.b D13.1.3) which means it'll continue
+ * counting on a WFI PE if one of its SMT sibling is not idle on a
+ * multi-threaded implementation. So don't use it on SMT cores.
+ */
+ if (cpu_pmu->has_smt)
+ return false;
+
+ return true;
+}
+
static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
struct perf_event *event)
{
@@ -861,9 +1022,9 @@ static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
unsigned long evtype = hwc->config_base & ARMV8_PMU_EVTYPE_EVENT;
/* Always prefer to place a cycle counter into the cycle counter. */
- if (evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) {
- if (!test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask))
- return ARMV8_IDX_CYCLE_COUNTER;
+ if (armv8pmu_can_use_pmccntr(cpuc, event)) {
+ if (!test_and_set_bit(ARMV8_PMU_CYCLE_IDX, cpuc->used_mask))
+ return ARMV8_PMU_CYCLE_IDX;
else if (armv8pmu_event_is_64bit(event) &&
armv8pmu_event_want_user_access(event) &&
!armv8pmu_has_long_event(cpu_pmu))
@@ -871,6 +1032,19 @@ static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
}
/*
+ * Always prefer to place a instruction counter into the instruction counter,
+ * but don't expose the instruction counter to userspace access as userspace
+ * may not know how to handle it.
+ */
+ if ((evtype == ARMV8_PMUV3_PERFCTR_INST_RETIRED) &&
+ !armv8pmu_event_get_threshold(&event->attr) &&
+ test_bit(ARMV8_PMU_INSTR_IDX, cpu_pmu->cntr_mask) &&
+ !armv8pmu_event_want_user_access(event)) {
+ if (!test_and_set_bit(ARMV8_PMU_INSTR_IDX, cpuc->used_mask))
+ return ARMV8_PMU_INSTR_IDX;
+ }
+
+ /*
* Otherwise use events counters
*/
if (armv8pmu_event_is_chained(event))
@@ -894,15 +1068,20 @@ static int armv8pmu_user_event_idx(struct perf_event *event)
if (!sysctl_perf_user_access || !armv8pmu_event_has_user_read(event))
return 0;
- /*
- * We remap the cycle counter index to 32 to
- * match the offset applied to the rest of
- * the counter indices.
- */
- if (event->hw.idx == ARMV8_IDX_CYCLE_COUNTER)
- return ARMV8_IDX_CYCLE_COUNTER_USER;
+ return event->hw.idx + 1;
+}
+
+static void armv8pmu_sched_task(struct perf_event_pmu_context *pmu_ctx,
+ struct task_struct *task, bool sched_in)
+{
+ struct arm_pmu *armpmu = to_arm_pmu(pmu_ctx->pmu);
+ struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
+
+ if (!hw_events->branch_users)
+ return;
- return event->hw.idx;
+ if (sched_in)
+ brbe_invalidate();
}
/*
@@ -912,9 +1091,22 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event,
struct perf_event_attr *attr)
{
unsigned long config_base = 0;
+ struct perf_event *perf_event = container_of(attr, struct perf_event,
+ attr);
+ struct arm_pmu *cpu_pmu = to_arm_pmu(perf_event->pmu);
+ u32 th;
+
+ if (attr->exclude_idle) {
+ pr_debug("ARM performance counters do not support mode exclusion\n");
+ return -EOPNOTSUPP;
+ }
- if (attr->exclude_idle)
- return -EPERM;
+ if (has_branch_stack(perf_event)) {
+ if (!brbe_num_branch_records(cpu_pmu) || !brbe_branch_attr_valid(perf_event))
+ return -EOPNOTSUPP;
+
+ perf_event->attach_state |= PERF_ATTACH_SCHED_CB;
+ }
/*
* If we're running in hyp mode, then we *are* the hypervisor.
@@ -944,6 +1136,22 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event,
config_base |= ARMV8_PMU_EXCLUDE_EL0;
/*
+ * If FEAT_PMUv3_TH isn't implemented, then THWIDTH (threshold_max) will
+ * be 0 and will also trigger this check, preventing it from being used.
+ */
+ th = armv8pmu_event_get_threshold(attr);
+ if (th > threshold_max(cpu_pmu)) {
+ pr_debug("PMU event threshold exceeds max value\n");
+ return -EINVAL;
+ }
+
+ if (th) {
+ config_base |= FIELD_PREP(ARMV8_PMU_EVTYPE_TH, th);
+ config_base |= FIELD_PREP(ARMV8_PMU_EVTYPE_TC,
+ armv8pmu_event_threshold_control(attr));
+ }
+
+ /*
* Install the filter into config_base as this is used to
* construct the event type.
*/
@@ -955,14 +1163,21 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event,
static void armv8pmu_reset(void *info)
{
struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
- u32 pmcr;
+ u64 pmcr, mask;
+
+ bitmap_to_arr64(&mask, cpu_pmu->cntr_mask, ARMPMU_MAX_HWEVENTS);
/* The counter and interrupt enable registers are unknown at reset. */
- armv8pmu_disable_counter(U32_MAX);
- armv8pmu_disable_intens(U32_MAX);
+ armv8pmu_disable_counter(mask);
+ armv8pmu_disable_intens(mask);
/* Clear the counters we flip at guest entry/exit */
- kvm_clr_pmu_events(U32_MAX);
+ kvm_clr_pmu_events(mask);
+
+ if (brbe_num_branch_records(cpu_pmu)) {
+ brbe_disable();
+ brbe_invalidate();
+ }
/*
* Initialize & Reset PMNC. Request overflow interrupt for
@@ -983,14 +1198,14 @@ static int __armv8_pmuv3_map_event_id(struct arm_pmu *armpmu,
if (event->attr.type == PERF_TYPE_HARDWARE &&
event->attr.config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) {
- if (test_bit(ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED,
- armpmu->pmceid_bitmap))
- return ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED;
-
if (test_bit(ARMV8_PMUV3_PERFCTR_BR_RETIRED,
armpmu->pmceid_bitmap))
return ARMV8_PMUV3_PERFCTR_BR_RETIRED;
+ if (test_bit(ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED,
+ armpmu->pmceid_bitmap))
+ return ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED;
+
return HW_OP_UNSUPPORTED;
}
@@ -1105,11 +1320,15 @@ static void __armv8pmu_probe_pmu(void *info)
probe->present = true;
/* Read the nb of CNTx counters supported from PMNC */
- cpu_pmu->num_events = (armv8pmu_pmcr_read() >> ARMV8_PMU_PMCR_N_SHIFT)
- & ARMV8_PMU_PMCR_N_MASK;
+ bitmap_set(cpu_pmu->cntr_mask,
+ 0, FIELD_GET(ARMV8_PMU_PMCR_N, armv8pmu_pmcr_read()));
/* Add the CPU cycles counter */
- cpu_pmu->num_events += 1;
+ set_bit(ARMV8_PMU_CYCLE_IDX, cpu_pmu->cntr_mask);
+
+ /* Add the CPU instructions counter */
+ if (pmuv3_has_icntr())
+ set_bit(ARMV8_PMU_INSTR_IDX, cpu_pmu->cntr_mask);
pmceid[0] = pmceid_raw[0] = read_pmceid0();
pmceid[1] = pmceid_raw[1] = read_pmceid1();
@@ -1124,10 +1343,29 @@ static void __armv8pmu_probe_pmu(void *info)
pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS);
/* store PMMIR register for sysfs */
- if (is_pmuv3p4(pmuver) && (pmceid_raw[1] & BIT(31)))
+ if (is_pmuv3p4(pmuver))
cpu_pmu->reg_pmmir = read_pmmir();
else
cpu_pmu->reg_pmmir = 0;
+
+ brbe_probe(cpu_pmu);
+}
+
+static int branch_records_alloc(struct arm_pmu *armpmu)
+{
+ size_t size = struct_size_t(struct perf_branch_stack, entries,
+ brbe_num_branch_records(armpmu));
+ int cpu;
+
+ for_each_cpu(cpu, &armpmu->supported_cpus) {
+ struct pmu_hw_events *events_cpu;
+
+ events_cpu = per_cpu_ptr(armpmu->hw_events, cpu);
+ events_cpu->branch_stack = kmalloc(size, GFP_KERNEL);
+ if (!events_cpu->branch_stack)
+ return -ENOMEM;
+ }
+ return 0;
}
static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu)
@@ -1144,7 +1382,15 @@ static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu)
if (ret)
return ret;
- return probe.present ? 0 : -ENODEV;
+ if (!probe.present)
+ return -ENODEV;
+
+ if (brbe_num_branch_records(cpu_pmu)) {
+ ret = branch_records_alloc(cpu_pmu);
+ if (ret)
+ return ret;
+ }
+ return 0;
}
static void armv8pmu_disable_user_access_ipi(void *unused)
@@ -1152,7 +1398,7 @@ static void armv8pmu_disable_user_access_ipi(void *unused)
armv8pmu_disable_user_access();
}
-static int armv8pmu_proc_user_access_handler(struct ctl_table *table, int write,
+static int armv8pmu_proc_user_access_handler(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
@@ -1163,7 +1409,7 @@ static int armv8pmu_proc_user_access_handler(struct ctl_table *table, int write,
return 0;
}
-static struct ctl_table armv8_pmu_sysctl_table[] = {
+static const struct ctl_table armv8_pmu_sysctl_table[] = {
{
.procname = "perf_user_access",
.data = &sysctl_perf_user_access,
@@ -1173,7 +1419,6 @@ static struct ctl_table armv8_pmu_sysctl_table[] = {
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
- { }
};
static void armv8_pmu_register_sysctl_table(void)
@@ -1185,10 +1430,7 @@ static void armv8_pmu_register_sysctl_table(void)
}
static int armv8_pmu_init(struct arm_pmu *cpu_pmu, char *name,
- int (*map_event)(struct perf_event *event),
- const struct attribute_group *events,
- const struct attribute_group *format,
- const struct attribute_group *caps)
+ int (*map_event)(struct perf_event *event))
{
int ret = armv8pmu_probe_pmu(cpu_pmu);
if (ret)
@@ -1207,34 +1449,36 @@ static int armv8_pmu_init(struct arm_pmu *cpu_pmu, char *name,
cpu_pmu->set_event_filter = armv8pmu_set_event_filter;
cpu_pmu->pmu.event_idx = armv8pmu_user_event_idx;
+ if (brbe_num_branch_records(cpu_pmu))
+ cpu_pmu->pmu.sched_task = armv8pmu_sched_task;
cpu_pmu->name = name;
cpu_pmu->map_event = map_event;
- cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = events ?
- events : &armv8_pmuv3_events_attr_group;
- cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = format ?
- format : &armv8_pmuv3_format_attr_group;
- cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_CAPS] = caps ?
- caps : &armv8_pmuv3_caps_attr_group;
-
+ cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = &armv8_pmuv3_events_attr_group;
+ cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = &armv8_pmuv3_format_attr_group;
+ cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_CAPS] = &armv8_pmuv3_caps_attr_group;
armv8_pmu_register_sysctl_table();
return 0;
}
-static int armv8_pmu_init_nogroups(struct arm_pmu *cpu_pmu, char *name,
- int (*map_event)(struct perf_event *event))
-{
- return armv8_pmu_init(cpu_pmu, name, map_event, NULL, NULL, NULL);
+#define PMUV3_INIT_SIMPLE(name) \
+static int name##_pmu_init(struct arm_pmu *cpu_pmu) \
+{ \
+ return armv8_pmu_init(cpu_pmu, #name, armv8_pmuv3_map_event); \
}
-#define PMUV3_INIT_SIMPLE(name) \
+#define PMUV3_INIT_MAP_EVENT(name, map_event) \
static int name##_pmu_init(struct arm_pmu *cpu_pmu) \
{ \
- return armv8_pmu_init_nogroups(cpu_pmu, #name, armv8_pmuv3_map_event);\
+ return armv8_pmu_init(cpu_pmu, #name, map_event); \
}
PMUV3_INIT_SIMPLE(armv8_pmuv3)
+PMUV3_INIT_SIMPLE(armv8_c1_nano)
+PMUV3_INIT_SIMPLE(armv8_c1_premium)
+PMUV3_INIT_SIMPLE(armv8_c1_pro)
+PMUV3_INIT_SIMPLE(armv8_c1_ultra)
PMUV3_INIT_SIMPLE(armv8_cortex_a34)
PMUV3_INIT_SIMPLE(armv8_cortex_a55)
PMUV3_INIT_SIMPLE(armv8_cortex_a65)
@@ -1242,96 +1486,89 @@ PMUV3_INIT_SIMPLE(armv8_cortex_a75)
PMUV3_INIT_SIMPLE(armv8_cortex_a76)
PMUV3_INIT_SIMPLE(armv8_cortex_a77)
PMUV3_INIT_SIMPLE(armv8_cortex_a78)
+PMUV3_INIT_SIMPLE(armv9_cortex_a320)
PMUV3_INIT_SIMPLE(armv9_cortex_a510)
PMUV3_INIT_SIMPLE(armv9_cortex_a520)
+PMUV3_INIT_SIMPLE(armv9_cortex_a520ae)
PMUV3_INIT_SIMPLE(armv9_cortex_a710)
PMUV3_INIT_SIMPLE(armv9_cortex_a715)
PMUV3_INIT_SIMPLE(armv9_cortex_a720)
+PMUV3_INIT_SIMPLE(armv9_cortex_a720ae)
+PMUV3_INIT_SIMPLE(armv9_cortex_a725)
PMUV3_INIT_SIMPLE(armv8_cortex_x1)
PMUV3_INIT_SIMPLE(armv9_cortex_x2)
PMUV3_INIT_SIMPLE(armv9_cortex_x3)
PMUV3_INIT_SIMPLE(armv9_cortex_x4)
+PMUV3_INIT_SIMPLE(armv9_cortex_x925)
PMUV3_INIT_SIMPLE(armv8_neoverse_e1)
PMUV3_INIT_SIMPLE(armv8_neoverse_n1)
PMUV3_INIT_SIMPLE(armv9_neoverse_n2)
+PMUV3_INIT_SIMPLE(armv9_neoverse_n3)
PMUV3_INIT_SIMPLE(armv8_neoverse_v1)
+PMUV3_INIT_SIMPLE(armv8_neoverse_v2)
+PMUV3_INIT_SIMPLE(armv8_neoverse_v3)
+PMUV3_INIT_SIMPLE(armv8_neoverse_v3ae)
+PMUV3_INIT_SIMPLE(armv8_rainier)
PMUV3_INIT_SIMPLE(armv8_nvidia_carmel)
PMUV3_INIT_SIMPLE(armv8_nvidia_denver)
-static int armv8_a35_pmu_init(struct arm_pmu *cpu_pmu)
-{
- return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a35",
- armv8_a53_map_event);
-}
-
-static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu)
-{
- return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a53",
- armv8_a53_map_event);
-}
-
-static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu)
-{
- return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a57",
- armv8_a57_map_event);
-}
-
-static int armv8_a72_pmu_init(struct arm_pmu *cpu_pmu)
-{
- return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a72",
- armv8_a57_map_event);
-}
-
-static int armv8_a73_pmu_init(struct arm_pmu *cpu_pmu)
-{
- return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a73",
- armv8_a73_map_event);
-}
+PMUV3_INIT_SIMPLE(armv8_samsung_mongoose)
-static int armv8_thunder_pmu_init(struct arm_pmu *cpu_pmu)
-{
- return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cavium_thunder",
- armv8_thunder_map_event);
-}
-
-static int armv8_vulcan_pmu_init(struct arm_pmu *cpu_pmu)
-{
- return armv8_pmu_init_nogroups(cpu_pmu, "armv8_brcm_vulcan",
- armv8_vulcan_map_event);
-}
+PMUV3_INIT_MAP_EVENT(armv8_cortex_a35, armv8_a53_map_event)
+PMUV3_INIT_MAP_EVENT(armv8_cortex_a53, armv8_a53_map_event)
+PMUV3_INIT_MAP_EVENT(armv8_cortex_a57, armv8_a57_map_event)
+PMUV3_INIT_MAP_EVENT(armv8_cortex_a72, armv8_a57_map_event)
+PMUV3_INIT_MAP_EVENT(armv8_cortex_a73, armv8_a73_map_event)
+PMUV3_INIT_MAP_EVENT(armv8_cavium_thunder, armv8_thunder_map_event)
+PMUV3_INIT_MAP_EVENT(armv8_brcm_vulcan, armv8_vulcan_map_event)
static const struct of_device_id armv8_pmu_of_device_ids[] = {
{.compatible = "arm,armv8-pmuv3", .data = armv8_pmuv3_pmu_init},
+ {.compatible = "arm,c1-nano-pmu", .data = armv8_c1_nano_pmu_init},
+ {.compatible = "arm,c1-premium-pmu", .data = armv8_c1_premium_pmu_init},
+ {.compatible = "arm,c1-pro-pmu", .data = armv8_c1_pro_pmu_init},
+ {.compatible = "arm,c1-ultra-pmu", .data = armv8_c1_ultra_pmu_init},
{.compatible = "arm,cortex-a34-pmu", .data = armv8_cortex_a34_pmu_init},
- {.compatible = "arm,cortex-a35-pmu", .data = armv8_a35_pmu_init},
- {.compatible = "arm,cortex-a53-pmu", .data = armv8_a53_pmu_init},
+ {.compatible = "arm,cortex-a35-pmu", .data = armv8_cortex_a35_pmu_init},
+ {.compatible = "arm,cortex-a53-pmu", .data = armv8_cortex_a53_pmu_init},
{.compatible = "arm,cortex-a55-pmu", .data = armv8_cortex_a55_pmu_init},
- {.compatible = "arm,cortex-a57-pmu", .data = armv8_a57_pmu_init},
+ {.compatible = "arm,cortex-a57-pmu", .data = armv8_cortex_a57_pmu_init},
{.compatible = "arm,cortex-a65-pmu", .data = armv8_cortex_a65_pmu_init},
- {.compatible = "arm,cortex-a72-pmu", .data = armv8_a72_pmu_init},
- {.compatible = "arm,cortex-a73-pmu", .data = armv8_a73_pmu_init},
+ {.compatible = "arm,cortex-a72-pmu", .data = armv8_cortex_a72_pmu_init},
+ {.compatible = "arm,cortex-a73-pmu", .data = armv8_cortex_a73_pmu_init},
{.compatible = "arm,cortex-a75-pmu", .data = armv8_cortex_a75_pmu_init},
{.compatible = "arm,cortex-a76-pmu", .data = armv8_cortex_a76_pmu_init},
{.compatible = "arm,cortex-a77-pmu", .data = armv8_cortex_a77_pmu_init},
{.compatible = "arm,cortex-a78-pmu", .data = armv8_cortex_a78_pmu_init},
+ {.compatible = "arm,cortex-a320-pmu", .data = armv9_cortex_a320_pmu_init},
{.compatible = "arm,cortex-a510-pmu", .data = armv9_cortex_a510_pmu_init},
{.compatible = "arm,cortex-a520-pmu", .data = armv9_cortex_a520_pmu_init},
+ {.compatible = "arm,cortex-a520ae-pmu", .data = armv9_cortex_a520ae_pmu_init},
{.compatible = "arm,cortex-a710-pmu", .data = armv9_cortex_a710_pmu_init},
{.compatible = "arm,cortex-a715-pmu", .data = armv9_cortex_a715_pmu_init},
{.compatible = "arm,cortex-a720-pmu", .data = armv9_cortex_a720_pmu_init},
+ {.compatible = "arm,cortex-a720ae-pmu", .data = armv9_cortex_a720ae_pmu_init},
+ {.compatible = "arm,cortex-a725-pmu", .data = armv9_cortex_a725_pmu_init},
{.compatible = "arm,cortex-x1-pmu", .data = armv8_cortex_x1_pmu_init},
{.compatible = "arm,cortex-x2-pmu", .data = armv9_cortex_x2_pmu_init},
{.compatible = "arm,cortex-x3-pmu", .data = armv9_cortex_x3_pmu_init},
{.compatible = "arm,cortex-x4-pmu", .data = armv9_cortex_x4_pmu_init},
+ {.compatible = "arm,cortex-x925-pmu", .data = armv9_cortex_x925_pmu_init},
{.compatible = "arm,neoverse-e1-pmu", .data = armv8_neoverse_e1_pmu_init},
{.compatible = "arm,neoverse-n1-pmu", .data = armv8_neoverse_n1_pmu_init},
{.compatible = "arm,neoverse-n2-pmu", .data = armv9_neoverse_n2_pmu_init},
+ {.compatible = "arm,neoverse-n3-pmu", .data = armv9_neoverse_n3_pmu_init},
{.compatible = "arm,neoverse-v1-pmu", .data = armv8_neoverse_v1_pmu_init},
- {.compatible = "cavium,thunder-pmu", .data = armv8_thunder_pmu_init},
- {.compatible = "brcm,vulcan-pmu", .data = armv8_vulcan_pmu_init},
+ {.compatible = "arm,neoverse-v2-pmu", .data = armv8_neoverse_v2_pmu_init},
+ {.compatible = "arm,neoverse-v3-pmu", .data = armv8_neoverse_v3_pmu_init},
+ {.compatible = "arm,neoverse-v3ae-pmu", .data = armv8_neoverse_v3ae_pmu_init},
+ {.compatible = "arm,rainier-pmu", .data = armv8_rainier_pmu_init},
+ {.compatible = "cavium,thunder-pmu", .data = armv8_cavium_thunder_pmu_init},
+ {.compatible = "brcm,vulcan-pmu", .data = armv8_brcm_vulcan_pmu_init},
{.compatible = "nvidia,carmel-pmu", .data = armv8_nvidia_carmel_pmu_init},
{.compatible = "nvidia,denver-pmu", .data = armv8_nvidia_denver_pmu_init},
+ {.compatible = "samsung,mongoose-pmu", .data = armv8_samsung_mongoose_pmu_init},
{},
};
diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c
index 6303b82566f9..621f02a7f43b 100644
--- a/drivers/perf/arm_smmuv3_pmu.c
+++ b/drivers/perf/arm_smmuv3_pmu.c
@@ -431,6 +431,17 @@ static int smmu_pmu_event_init(struct perf_event *event)
return -EINVAL;
}
+ /*
+ * Ensure all events are on the same cpu so all events are in the
+ * same cpu context, to avoid races on pmu_enable etc.
+ */
+ event->cpu = smmu_pmu->on_cpu;
+
+ hwc->idx = -1;
+
+ if (event->group_leader == event)
+ return 0;
+
for_each_sibling_event(sibling, event->group_leader) {
if (is_software_event(sibling))
continue;
@@ -442,14 +453,6 @@ static int smmu_pmu_event_init(struct perf_event *event)
return -EINVAL;
}
- hwc->idx = -1;
-
- /*
- * Ensure all events are on the same cpu so all events are in the
- * same cpu context, to avoid races on pmu_enable etc.
- */
- event->cpu = smmu_pmu->on_cpu;
-
return 0;
}
@@ -716,7 +719,7 @@ static void smmu_pmu_free_msis(void *data)
{
struct device *dev = data;
- platform_msi_domain_free_irqs(dev);
+ platform_device_msi_free_irqs_all(dev);
}
static void smmu_pmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
@@ -746,7 +749,7 @@ static void smmu_pmu_setup_msi(struct smmu_pmu *pmu)
if (!(readl(pmu->reg_base + SMMU_PMCG_CFGR) & SMMU_PMCG_CFGR_MSI))
return;
- ret = platform_msi_domain_alloc_irqs(dev, 1, smmu_pmu_write_msi_msg);
+ ret = platform_device_msi_init_and_alloc_irqs(dev, 1, smmu_pmu_write_msi_msg);
if (ret) {
dev_warn(dev, "failed to allocate MSIs\n");
return;
@@ -860,6 +863,7 @@ static int smmu_pmu_probe(struct platform_device *pdev)
smmu_pmu->pmu = (struct pmu) {
.module = THIS_MODULE,
+ .parent = &pdev->dev,
.task_ctx_nr = perf_invalid_context,
.pmu_enable = smmu_pmu_enable,
.pmu_disable = smmu_pmu_disable,
@@ -965,14 +969,12 @@ out_unregister:
return err;
}
-static int smmu_pmu_remove(struct platform_device *pdev)
+static void smmu_pmu_remove(struct platform_device *pdev)
{
struct smmu_pmu *smmu_pmu = platform_get_drvdata(pdev);
perf_pmu_unregister(&smmu_pmu->pmu);
cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node);
-
- return 0;
}
static void smmu_pmu_shutdown(struct platform_device *pdev)
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index d2b0cbf0e0c4..4801115f2b54 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -41,7 +41,7 @@
/*
* Cache if the event is allowed to trace Context information.
- * This allows us to perform the check, i.e, perfmon_capable(),
+ * This allows us to perform the check, i.e, perf_allow_kernel(),
* in the context of the event owner, once, during the event_init().
*/
#define SPE_PMU_HW_FLAGS_CX 0x00001
@@ -50,7 +50,7 @@ static_assert((PERF_EVENT_FLAG_ARCH & SPE_PMU_HW_FLAGS_CX) == SPE_PMU_HW_FLAGS_C
static void set_spe_event_has_cx(struct perf_event *event)
{
- if (IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR) && perfmon_capable())
+ if (IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR) && !perf_allow_kernel())
event->hw.flags |= SPE_PMU_HW_FLAGS_CX;
}
@@ -85,9 +85,13 @@ struct arm_spe_pmu {
#define SPE_PMU_FEAT_LDS (1UL << 4)
#define SPE_PMU_FEAT_ERND (1UL << 5)
#define SPE_PMU_FEAT_INV_FILT_EVT (1UL << 6)
+#define SPE_PMU_FEAT_DISCARD (1UL << 7)
+#define SPE_PMU_FEAT_EFT (1UL << 8)
+#define SPE_PMU_FEAT_FDS (1UL << 9)
#define SPE_PMU_FEAT_DEV_PROBED (1UL << 63)
u64 features;
+ u64 pmsevfr_res0;
u16 max_record_sz;
u16 align;
struct perf_output_handle __percpu *handle;
@@ -96,7 +100,8 @@ struct arm_spe_pmu {
#define to_spe_pmu(p) (container_of(p, struct arm_spe_pmu, pmu))
/* Convert a free-running index from perf into an SPE buffer offset */
-#define PERF_IDX2OFF(idx, buf) ((idx) % ((buf)->nr_pages << PAGE_SHIFT))
+#define PERF_IDX2OFF(idx, buf) \
+ ((idx) % ((unsigned long)(buf)->nr_pages << PAGE_SHIFT))
/* Keep track of our dynamic hotplug state */
static enum cpuhp_state arm_spe_pmu_online;
@@ -114,6 +119,7 @@ enum arm_spe_pmu_capabilities {
SPE_PMU_CAP_FEAT_MAX,
SPE_PMU_CAP_CNT_SZ = SPE_PMU_CAP_FEAT_MAX,
SPE_PMU_CAP_MIN_IVAL,
+ SPE_PMU_CAP_EVENT_FILTER,
};
static int arm_spe_pmu_feat_caps[SPE_PMU_CAP_FEAT_MAX] = {
@@ -121,7 +127,7 @@ static int arm_spe_pmu_feat_caps[SPE_PMU_CAP_FEAT_MAX] = {
[SPE_PMU_CAP_ERND] = SPE_PMU_FEAT_ERND,
};
-static u32 arm_spe_pmu_cap_get(struct arm_spe_pmu *spe_pmu, int cap)
+static u64 arm_spe_pmu_cap_get(struct arm_spe_pmu *spe_pmu, int cap)
{
if (cap < SPE_PMU_CAP_FEAT_MAX)
return !!(spe_pmu->features & arm_spe_pmu_feat_caps[cap]);
@@ -131,6 +137,8 @@ static u32 arm_spe_pmu_cap_get(struct arm_spe_pmu *spe_pmu, int cap)
return spe_pmu->counter_sz;
case SPE_PMU_CAP_MIN_IVAL:
return spe_pmu->min_period;
+ case SPE_PMU_CAP_EVENT_FILTER:
+ return ~spe_pmu->pmsevfr_res0;
default:
WARN(1, "unknown cap %d\n", cap);
}
@@ -147,7 +155,19 @@ static ssize_t arm_spe_pmu_cap_show(struct device *dev,
container_of(attr, struct dev_ext_attribute, attr);
int cap = (long)ea->var;
- return sysfs_emit(buf, "%u\n", arm_spe_pmu_cap_get(spe_pmu, cap));
+ return sysfs_emit(buf, "%llu\n", arm_spe_pmu_cap_get(spe_pmu, cap));
+}
+
+static ssize_t arm_spe_pmu_cap_show_hex(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct arm_spe_pmu *spe_pmu = dev_get_drvdata(dev);
+ struct dev_ext_attribute *ea =
+ container_of(attr, struct dev_ext_attribute, attr);
+ int cap = (long)ea->var;
+
+ return sysfs_emit(buf, "0x%llx\n", arm_spe_pmu_cap_get(spe_pmu, cap));
}
#define SPE_EXT_ATTR_ENTRY(_name, _func, _var) \
@@ -157,12 +177,15 @@ static ssize_t arm_spe_pmu_cap_show(struct device *dev,
#define SPE_CAP_EXT_ATTR_ENTRY(_name, _var) \
SPE_EXT_ATTR_ENTRY(_name, arm_spe_pmu_cap_show, _var)
+#define SPE_CAP_EXT_ATTR_ENTRY_HEX(_name, _var) \
+ SPE_EXT_ATTR_ENTRY(_name, arm_spe_pmu_cap_show_hex, _var)
static struct attribute *arm_spe_pmu_cap_attr[] = {
SPE_CAP_EXT_ATTR_ENTRY(arch_inst, SPE_PMU_CAP_ARCH_INST),
SPE_CAP_EXT_ATTR_ENTRY(ernd, SPE_PMU_CAP_ERND),
SPE_CAP_EXT_ATTR_ENTRY(count_size, SPE_PMU_CAP_CNT_SZ),
SPE_CAP_EXT_ATTR_ENTRY(min_interval, SPE_PMU_CAP_MIN_IVAL),
+ SPE_CAP_EXT_ATTR_ENTRY_HEX(event_filter, SPE_PMU_CAP_EVENT_FILTER),
NULL,
};
@@ -193,6 +216,30 @@ static const struct attribute_group arm_spe_pmu_cap_group = {
#define ATTR_CFG_FLD_store_filter_CFG config /* PMSFCR_EL1.ST */
#define ATTR_CFG_FLD_store_filter_LO 34
#define ATTR_CFG_FLD_store_filter_HI 34
+#define ATTR_CFG_FLD_discard_CFG config /* PMBLIMITR_EL1.FM = DISCARD */
+#define ATTR_CFG_FLD_discard_LO 35
+#define ATTR_CFG_FLD_discard_HI 35
+#define ATTR_CFG_FLD_branch_filter_mask_CFG config /* PMSFCR_EL1.Bm */
+#define ATTR_CFG_FLD_branch_filter_mask_LO 36
+#define ATTR_CFG_FLD_branch_filter_mask_HI 36
+#define ATTR_CFG_FLD_load_filter_mask_CFG config /* PMSFCR_EL1.LDm */
+#define ATTR_CFG_FLD_load_filter_mask_LO 37
+#define ATTR_CFG_FLD_load_filter_mask_HI 37
+#define ATTR_CFG_FLD_store_filter_mask_CFG config /* PMSFCR_EL1.STm */
+#define ATTR_CFG_FLD_store_filter_mask_LO 38
+#define ATTR_CFG_FLD_store_filter_mask_HI 38
+#define ATTR_CFG_FLD_simd_filter_CFG config /* PMSFCR_EL1.SIMD */
+#define ATTR_CFG_FLD_simd_filter_LO 39
+#define ATTR_CFG_FLD_simd_filter_HI 39
+#define ATTR_CFG_FLD_simd_filter_mask_CFG config /* PMSFCR_EL1.SIMDm */
+#define ATTR_CFG_FLD_simd_filter_mask_LO 40
+#define ATTR_CFG_FLD_simd_filter_mask_HI 40
+#define ATTR_CFG_FLD_float_filter_CFG config /* PMSFCR_EL1.FP */
+#define ATTR_CFG_FLD_float_filter_LO 41
+#define ATTR_CFG_FLD_float_filter_HI 41
+#define ATTR_CFG_FLD_float_filter_mask_CFG config /* PMSFCR_EL1.FPm */
+#define ATTR_CFG_FLD_float_filter_mask_LO 42
+#define ATTR_CFG_FLD_float_filter_mask_HI 42
#define ATTR_CFG_FLD_event_filter_CFG config1 /* PMSEVFR_EL1 */
#define ATTR_CFG_FLD_event_filter_LO 0
@@ -206,38 +253,29 @@ static const struct attribute_group arm_spe_pmu_cap_group = {
#define ATTR_CFG_FLD_inv_event_filter_LO 0
#define ATTR_CFG_FLD_inv_event_filter_HI 63
-/* Why does everything I do descend into this? */
-#define __GEN_PMU_FORMAT_ATTR(cfg, lo, hi) \
- (lo) == (hi) ? #cfg ":" #lo "\n" : #cfg ":" #lo "-" #hi
-
-#define _GEN_PMU_FORMAT_ATTR(cfg, lo, hi) \
- __GEN_PMU_FORMAT_ATTR(cfg, lo, hi)
-
-#define GEN_PMU_FORMAT_ATTR(name) \
- PMU_FORMAT_ATTR(name, \
- _GEN_PMU_FORMAT_ATTR(ATTR_CFG_FLD_##name##_CFG, \
- ATTR_CFG_FLD_##name##_LO, \
- ATTR_CFG_FLD_##name##_HI))
-
-#define _ATTR_CFG_GET_FLD(attr, cfg, lo, hi) \
- ((((attr)->cfg) >> lo) & GENMASK(hi - lo, 0))
-
-#define ATTR_CFG_GET_FLD(attr, name) \
- _ATTR_CFG_GET_FLD(attr, \
- ATTR_CFG_FLD_##name##_CFG, \
- ATTR_CFG_FLD_##name##_LO, \
- ATTR_CFG_FLD_##name##_HI)
+#define ATTR_CFG_FLD_inv_data_src_filter_CFG config4 /* inverse of PMSDSFR_EL1 */
+#define ATTR_CFG_FLD_inv_data_src_filter_LO 0
+#define ATTR_CFG_FLD_inv_data_src_filter_HI 63
GEN_PMU_FORMAT_ATTR(ts_enable);
GEN_PMU_FORMAT_ATTR(pa_enable);
GEN_PMU_FORMAT_ATTR(pct_enable);
GEN_PMU_FORMAT_ATTR(jitter);
GEN_PMU_FORMAT_ATTR(branch_filter);
+GEN_PMU_FORMAT_ATTR(branch_filter_mask);
GEN_PMU_FORMAT_ATTR(load_filter);
+GEN_PMU_FORMAT_ATTR(load_filter_mask);
GEN_PMU_FORMAT_ATTR(store_filter);
+GEN_PMU_FORMAT_ATTR(store_filter_mask);
+GEN_PMU_FORMAT_ATTR(simd_filter);
+GEN_PMU_FORMAT_ATTR(simd_filter_mask);
+GEN_PMU_FORMAT_ATTR(float_filter);
+GEN_PMU_FORMAT_ATTR(float_filter_mask);
GEN_PMU_FORMAT_ATTR(event_filter);
GEN_PMU_FORMAT_ATTR(inv_event_filter);
+GEN_PMU_FORMAT_ATTR(inv_data_src_filter);
GEN_PMU_FORMAT_ATTR(min_latency);
+GEN_PMU_FORMAT_ATTR(discard);
static struct attribute *arm_spe_pmu_formats_attr[] = {
&format_attr_ts_enable.attr,
@@ -245,11 +283,20 @@ static struct attribute *arm_spe_pmu_formats_attr[] = {
&format_attr_pct_enable.attr,
&format_attr_jitter.attr,
&format_attr_branch_filter.attr,
+ &format_attr_branch_filter_mask.attr,
&format_attr_load_filter.attr,
+ &format_attr_load_filter_mask.attr,
&format_attr_store_filter.attr,
+ &format_attr_store_filter_mask.attr,
+ &format_attr_simd_filter.attr,
+ &format_attr_simd_filter_mask.attr,
+ &format_attr_float_filter.attr,
+ &format_attr_float_filter_mask.attr,
&format_attr_event_filter.attr,
&format_attr_inv_event_filter.attr,
+ &format_attr_inv_data_src_filter.attr,
&format_attr_min_latency.attr,
+ &format_attr_discard.attr,
NULL,
};
@@ -260,9 +307,26 @@ static umode_t arm_spe_pmu_format_attr_is_visible(struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct arm_spe_pmu *spe_pmu = dev_get_drvdata(dev);
+ if (attr == &format_attr_discard.attr && !(spe_pmu->features & SPE_PMU_FEAT_DISCARD))
+ return 0;
+
if (attr == &format_attr_inv_event_filter.attr && !(spe_pmu->features & SPE_PMU_FEAT_INV_FILT_EVT))
return 0;
+ if (attr == &format_attr_inv_data_src_filter.attr &&
+ !(spe_pmu->features & SPE_PMU_FEAT_FDS))
+ return 0;
+
+ if ((attr == &format_attr_branch_filter_mask.attr ||
+ attr == &format_attr_load_filter_mask.attr ||
+ attr == &format_attr_store_filter_mask.attr ||
+ attr == &format_attr_simd_filter.attr ||
+ attr == &format_attr_simd_filter_mask.attr ||
+ attr == &format_attr_float_filter.attr ||
+ attr == &format_attr_float_filter_mask.attr) &&
+ !(spe_pmu->features & SPE_PMU_FEAT_EFT))
+ return 0;
+
return attr->mode;
}
@@ -321,17 +385,21 @@ static u64 arm_spe_event_to_pmscr(struct perf_event *event)
static void arm_spe_event_sanitise_period(struct perf_event *event)
{
- struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
u64 period = event->hw.sample_period;
u64 max_period = PMSIRR_EL1_INTERVAL_MASK;
- if (period < spe_pmu->min_period)
- period = spe_pmu->min_period;
- else if (period > max_period)
- period = max_period;
- else
- period &= max_period;
+ /*
+ * The PMSIDR_EL1.Interval field (stored in spe_pmu->min_period) is a
+ * recommendation for the minimum interval, not a hardware limitation.
+ *
+ * According to the Arm ARM (DDI 0487 L.a), section D24.7.12 PMSIRR_EL1,
+ * Sampling Interval Reload Register, the INTERVAL field (bits [31:8])
+ * states: "Software must set this to a nonzero value". Use 1 as the
+ * minimum value.
+ */
+ u64 min_period = FIELD_PREP(PMSIRR_EL1_INTERVAL_MASK, 1);
+ period = clamp_t(u64, period, min_period, max_period) & max_period;
event->hw.sample_period = period;
}
@@ -354,8 +422,15 @@ static u64 arm_spe_event_to_pmsfcr(struct perf_event *event)
u64 reg = 0;
reg |= FIELD_PREP(PMSFCR_EL1_LD, ATTR_CFG_GET_FLD(attr, load_filter));
+ reg |= FIELD_PREP(PMSFCR_EL1_LDm, ATTR_CFG_GET_FLD(attr, load_filter_mask));
reg |= FIELD_PREP(PMSFCR_EL1_ST, ATTR_CFG_GET_FLD(attr, store_filter));
+ reg |= FIELD_PREP(PMSFCR_EL1_STm, ATTR_CFG_GET_FLD(attr, store_filter_mask));
reg |= FIELD_PREP(PMSFCR_EL1_B, ATTR_CFG_GET_FLD(attr, branch_filter));
+ reg |= FIELD_PREP(PMSFCR_EL1_Bm, ATTR_CFG_GET_FLD(attr, branch_filter_mask));
+ reg |= FIELD_PREP(PMSFCR_EL1_SIMD, ATTR_CFG_GET_FLD(attr, simd_filter));
+ reg |= FIELD_PREP(PMSFCR_EL1_SIMDm, ATTR_CFG_GET_FLD(attr, simd_filter_mask));
+ reg |= FIELD_PREP(PMSFCR_EL1_FP, ATTR_CFG_GET_FLD(attr, float_filter));
+ reg |= FIELD_PREP(PMSFCR_EL1_FPm, ATTR_CFG_GET_FLD(attr, float_filter_mask));
if (reg)
reg |= PMSFCR_EL1_FT;
@@ -366,6 +441,9 @@ static u64 arm_spe_event_to_pmsfcr(struct perf_event *event)
if (ATTR_CFG_GET_FLD(attr, inv_event_filter))
reg |= PMSFCR_EL1_FnE;
+ if (ATTR_CFG_GET_FLD(attr, inv_data_src_filter))
+ reg |= PMSFCR_EL1_FDS;
+
if (ATTR_CFG_GET_FLD(attr, min_latency))
reg |= PMSFCR_EL1_FL;
@@ -390,6 +468,17 @@ static u64 arm_spe_event_to_pmslatfr(struct perf_event *event)
return FIELD_PREP(PMSLATFR_EL1_MINLAT, ATTR_CFG_GET_FLD(attr, min_latency));
}
+static u64 arm_spe_event_to_pmsdsfr(struct perf_event *event)
+{
+ struct perf_event_attr *attr = &event->attr;
+
+ /*
+ * Data src filter is inverted so that the default value of 0 is
+ * equivalent to no filtering.
+ */
+ return ~ATTR_CFG_GET_FLD(attr, inv_data_src_filter);
+}
+
static void arm_spe_pmu_pad_buf(struct perf_output_handle *handle, int len)
{
struct arm_spe_pmu_buf *buf = perf_get_aux(handle);
@@ -524,6 +613,12 @@ static void arm_spe_perf_aux_output_begin(struct perf_output_handle *handle,
u64 base, limit;
struct arm_spe_pmu_buf *buf;
+ if (ATTR_CFG_GET_FLD(&event->attr, discard)) {
+ limit = FIELD_PREP(PMBLIMITR_EL1_FM, PMBLIMITR_EL1_FM_DISCARD);
+ limit |= PMBLIMITR_EL1_E;
+ goto out_write_limit;
+ }
+
/* Start a new aux session */
buf = perf_aux_output_begin(handle, event);
if (!buf) {
@@ -700,20 +795,6 @@ static irqreturn_t arm_spe_pmu_irq_handler(int irq, void *dev)
return IRQ_HANDLED;
}
-static u64 arm_spe_pmsevfr_res0(u16 pmsver)
-{
- switch (pmsver) {
- case ID_AA64DFR0_EL1_PMSVer_IMP:
- return PMSEVFR_EL1_RES0_IMP;
- case ID_AA64DFR0_EL1_PMSVer_V1P1:
- return PMSEVFR_EL1_RES0_V1P1;
- case ID_AA64DFR0_EL1_PMSVer_V1P2:
- /* Return the highest version we support in default */
- default:
- return PMSEVFR_EL1_RES0_V1P2;
- }
-}
-
/* Perf callbacks */
static int arm_spe_pmu_event_init(struct perf_event *event)
{
@@ -729,10 +810,14 @@ static int arm_spe_pmu_event_init(struct perf_event *event)
!cpumask_test_cpu(event->cpu, &spe_pmu->supported_cpus))
return -ENOENT;
- if (arm_spe_event_to_pmsevfr(event) & arm_spe_pmsevfr_res0(spe_pmu->pmsver))
+ if (arm_spe_event_to_pmsevfr(event) & spe_pmu->pmsevfr_res0)
+ return -EOPNOTSUPP;
+
+ if (arm_spe_event_to_pmsnevfr(event) & spe_pmu->pmsevfr_res0)
return -EOPNOTSUPP;
- if (arm_spe_event_to_pmsnevfr(event) & arm_spe_pmsevfr_res0(spe_pmu->pmsver))
+ if (arm_spe_event_to_pmsdsfr(event) != U64_MAX &&
+ !(spe_pmu->features & SPE_PMU_FEAT_FDS))
return -EOPNOTSUPP;
if (attr->exclude_idle)
@@ -765,11 +850,24 @@ static int arm_spe_pmu_event_init(struct perf_event *event)
!(spe_pmu->features & SPE_PMU_FEAT_FILT_LAT))
return -EOPNOTSUPP;
+ if ((FIELD_GET(PMSFCR_EL1_LDm, reg) ||
+ FIELD_GET(PMSFCR_EL1_STm, reg) ||
+ FIELD_GET(PMSFCR_EL1_Bm, reg) ||
+ FIELD_GET(PMSFCR_EL1_SIMD, reg) ||
+ FIELD_GET(PMSFCR_EL1_SIMDm, reg) ||
+ FIELD_GET(PMSFCR_EL1_FP, reg) ||
+ FIELD_GET(PMSFCR_EL1_FPm, reg)) &&
+ !(spe_pmu->features & SPE_PMU_FEAT_EFT))
+ return -EOPNOTSUPP;
+
+ if (ATTR_CFG_GET_FLD(&event->attr, discard) &&
+ !(spe_pmu->features & SPE_PMU_FEAT_DISCARD))
+ return -EOPNOTSUPP;
+
set_spe_event_has_cx(event);
reg = arm_spe_event_to_pmscr(event);
- if (!perfmon_capable() &&
- (reg & (PMSCR_EL1_PA | PMSCR_EL1_PCT)))
- return -EACCES;
+ if (reg & (PMSCR_EL1_PA | PMSCR_EL1_PCT))
+ return perf_allow_kernel();
return 0;
}
@@ -797,6 +895,11 @@ static void arm_spe_pmu_start(struct perf_event *event, int flags)
write_sysreg_s(reg, SYS_PMSNEVFR_EL1);
}
+ if (spe_pmu->features & SPE_PMU_FEAT_FDS) {
+ reg = arm_spe_event_to_pmsdsfr(event);
+ write_sysreg_s(reg, SYS_PMSDSFR_EL1);
+ }
+
reg = arm_spe_event_to_pmslatfr(event);
write_sysreg_s(reg, SYS_PMSLATFR_EL1);
@@ -954,6 +1057,7 @@ static int arm_spe_pmu_perf_init(struct arm_spe_pmu *spe_pmu)
spe_pmu->pmu = (struct pmu) {
.module = THIS_MODULE,
+ .parent = &spe_pmu->pdev->dev,
.capabilities = PERF_PMU_CAP_EXCLUSIVE | PERF_PMU_CAP_ITRACE,
.attr_groups = arm_spe_pmu_attr_groups,
/*
@@ -1049,6 +1153,15 @@ static void __arm_spe_pmu_dev_probe(void *info)
if (FIELD_GET(PMSIDR_EL1_ERND, reg))
spe_pmu->features |= SPE_PMU_FEAT_ERND;
+ if (spe_pmu->pmsver >= ID_AA64DFR0_EL1_PMSVer_V1P2)
+ spe_pmu->features |= SPE_PMU_FEAT_DISCARD;
+
+ if (FIELD_GET(PMSIDR_EL1_EFT, reg))
+ spe_pmu->features |= SPE_PMU_FEAT_EFT;
+
+ if (FIELD_GET(PMSIDR_EL1_FDS, reg))
+ spe_pmu->features |= SPE_PMU_FEAT_FDS;
+
/* This field has a spaced out encoding, so just use a look-up */
fld = FIELD_GET(PMSIDR_EL1_INTERVAL, reg);
switch (fld) {
@@ -1103,6 +1216,10 @@ static void __arm_spe_pmu_dev_probe(void *info)
spe_pmu->counter_sz = 16;
}
+ /* Write all 1s and then read back. Unsupported filter bits are RAZ/WI. */
+ write_sysreg_s(U64_MAX, SYS_PMSEVFR_EL1);
+ spe_pmu->pmsevfr_res0 = ~read_sysreg_s(SYS_PMSEVFR_EL1);
+
dev_info(dev,
"probed SPEv1.%d for CPUs %*pbl [max_record_sz %u, align %u, features 0x%llx]\n",
spe_pmu->pmsver - 1, cpumask_pr_args(&spe_pmu->supported_cpus),
@@ -1179,8 +1296,8 @@ static int arm_spe_pmu_dev_init(struct arm_spe_pmu *spe_pmu)
return -ENXIO;
/* Request our PPIs (note that the IRQ is still disabled) */
- ret = request_percpu_irq(spe_pmu->irq, arm_spe_pmu_irq_handler, DRVNAME,
- spe_pmu->handle);
+ ret = request_percpu_irq_affinity(spe_pmu->irq, arm_spe_pmu_irq_handler,
+ DRVNAME, mask, spe_pmu->handle);
if (ret)
return ret;
@@ -1207,8 +1324,10 @@ static void arm_spe_pmu_dev_teardown(struct arm_spe_pmu *spe_pmu)
static int arm_spe_pmu_irq_probe(struct arm_spe_pmu *spe_pmu)
{
struct platform_device *pdev = spe_pmu->pdev;
- int irq = platform_get_irq(pdev, 0);
+ const struct cpumask *affinity;
+ int irq;
+ irq = platform_get_irq_affinity(pdev, 0, &affinity);
if (irq < 0)
return -ENXIO;
@@ -1217,10 +1336,7 @@ static int arm_spe_pmu_irq_probe(struct arm_spe_pmu *spe_pmu)
return -EINVAL;
}
- if (irq_get_percpu_devid_partition(irq, &spe_pmu->supported_cpus)) {
- dev_err(&pdev->dev, "failed to get PPI partition (%d)\n", irq);
- return -EINVAL;
- }
+ cpumask_copy(&spe_pmu->supported_cpus, affinity);
spe_pmu->irq = irq;
return 0;
@@ -1285,14 +1401,13 @@ out_free_handle:
return ret;
}
-static int arm_spe_pmu_device_remove(struct platform_device *pdev)
+static void arm_spe_pmu_device_remove(struct platform_device *pdev)
{
struct arm_spe_pmu *spe_pmu = platform_get_drvdata(pdev);
arm_spe_pmu_perf_destroy(spe_pmu);
arm_spe_pmu_dev_teardown(spe_pmu);
free_percpu(spe_pmu->handle);
- return 0;
}
static struct platform_driver arm_spe_pmu_driver = {
@@ -1303,7 +1418,7 @@ static struct platform_driver arm_spe_pmu_driver = {
.suppress_bind_attrs = true,
},
.probe = arm_spe_pmu_device_probe,
- .remove = arm_spe_pmu_device_remove,
+ .remove = arm_spe_pmu_device_remove,
};
static int __init arm_spe_pmu_init(void)
diff --git a/drivers/perf/arm_v6_pmu.c b/drivers/perf/arm_v6_pmu.c
new file mode 100644
index 000000000000..7cb12c8e06c7
--- /dev/null
+++ b/drivers/perf/arm_v6_pmu.c
@@ -0,0 +1,431 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ARMv6 Performance counter handling code.
+ *
+ * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
+ *
+ * ARMv6 has 2 configurable performance counters and a single cycle counter.
+ * They all share a single reset bit but can be written to zero so we can use
+ * that for a reset.
+ *
+ * The counters can't be individually enabled or disabled so when we remove
+ * one event and replace it with another we could get spurious counts from the
+ * wrong event. However, we can take advantage of the fact that the
+ * performance counters can export events to the event bus, and the event bus
+ * itself can be monitored. This requires that we *don't* export the events to
+ * the event bus. The procedure for disabling a configurable counter is:
+ * - change the counter to count the ETMEXTOUT[0] signal (0x20). This
+ * effectively stops the counter from counting.
+ * - disable the counter's interrupt generation (each counter has it's
+ * own interrupt enable bit).
+ * Once stopped, the counter value can be written as 0 to reset.
+ *
+ * To enable a counter:
+ * - enable the counter's interrupt generation.
+ * - set the new event type.
+ *
+ * Note: the dedicated cycle counter only counts cycles and can't be
+ * enabled/disabled independently of the others. When we want to disable the
+ * cycle counter, we have to just disable the interrupt reporting and start
+ * ignoring that counter. When re-enabling, we have to reset the value and
+ * enable the interrupt.
+ */
+
+#include <asm/cputype.h>
+#include <asm/irq_regs.h>
+
+#include <linux/of.h>
+#include <linux/perf/arm_pmu.h>
+#include <linux/platform_device.h>
+
+enum armv6_perf_types {
+ ARMV6_PERFCTR_ICACHE_MISS = 0x0,
+ ARMV6_PERFCTR_IBUF_STALL = 0x1,
+ ARMV6_PERFCTR_DDEP_STALL = 0x2,
+ ARMV6_PERFCTR_ITLB_MISS = 0x3,
+ ARMV6_PERFCTR_DTLB_MISS = 0x4,
+ ARMV6_PERFCTR_BR_EXEC = 0x5,
+ ARMV6_PERFCTR_BR_MISPREDICT = 0x6,
+ ARMV6_PERFCTR_INSTR_EXEC = 0x7,
+ ARMV6_PERFCTR_DCACHE_HIT = 0x9,
+ ARMV6_PERFCTR_DCACHE_ACCESS = 0xA,
+ ARMV6_PERFCTR_DCACHE_MISS = 0xB,
+ ARMV6_PERFCTR_DCACHE_WBACK = 0xC,
+ ARMV6_PERFCTR_SW_PC_CHANGE = 0xD,
+ ARMV6_PERFCTR_MAIN_TLB_MISS = 0xF,
+ ARMV6_PERFCTR_EXPL_D_ACCESS = 0x10,
+ ARMV6_PERFCTR_LSU_FULL_STALL = 0x11,
+ ARMV6_PERFCTR_WBUF_DRAINED = 0x12,
+ ARMV6_PERFCTR_CPU_CYCLES = 0xFF,
+ ARMV6_PERFCTR_NOP = 0x20,
+};
+
+enum armv6_counters {
+ ARMV6_CYCLE_COUNTER = 0,
+ ARMV6_COUNTER0,
+ ARMV6_COUNTER1,
+ ARMV6_NUM_COUNTERS
+};
+
+/*
+ * The hardware events that we support. We do support cache operations but
+ * we have harvard caches and no way to combine instruction and data
+ * accesses/misses in hardware.
+ */
+static const unsigned armv6_perf_map[PERF_COUNT_HW_MAX] = {
+ PERF_MAP_ALL_UNSUPPORTED,
+ [PERF_COUNT_HW_CPU_CYCLES] = ARMV6_PERFCTR_CPU_CYCLES,
+ [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6_PERFCTR_INSTR_EXEC,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6_PERFCTR_BR_EXEC,
+ [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6_PERFCTR_BR_MISPREDICT,
+ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV6_PERFCTR_IBUF_STALL,
+ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV6_PERFCTR_LSU_FULL_STALL,
+};
+
+static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+ PERF_CACHE_MAP_ALL_UNSUPPORTED,
+
+ /*
+ * The performance counters don't differentiate between read and write
+ * accesses/misses so this isn't strictly correct, but it's the best we
+ * can do. Writes and reads get combined.
+ */
+ [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS,
+ [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS,
+ [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS,
+ [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS,
+
+ [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS,
+
+ /*
+ * The ARM performance counters can count micro DTLB misses, micro ITLB
+ * misses and main TLB misses. There isn't an event for TLB misses, so
+ * use the micro misses here and if users want the main TLB misses they
+ * can use a raw counter.
+ */
+ [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS,
+ [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS,
+
+ [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS,
+ [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS,
+};
+
+static inline unsigned long
+armv6_pmcr_read(void)
+{
+ u32 val;
+ asm volatile("mrc p15, 0, %0, c15, c12, 0" : "=r"(val));
+ return val;
+}
+
+static inline void
+armv6_pmcr_write(unsigned long val)
+{
+ asm volatile("mcr p15, 0, %0, c15, c12, 0" : : "r"(val));
+}
+
+#define ARMV6_PMCR_ENABLE (1 << 0)
+#define ARMV6_PMCR_CTR01_RESET (1 << 1)
+#define ARMV6_PMCR_CCOUNT_RESET (1 << 2)
+#define ARMV6_PMCR_CCOUNT_DIV (1 << 3)
+#define ARMV6_PMCR_COUNT0_IEN (1 << 4)
+#define ARMV6_PMCR_COUNT1_IEN (1 << 5)
+#define ARMV6_PMCR_CCOUNT_IEN (1 << 6)
+#define ARMV6_PMCR_COUNT0_OVERFLOW (1 << 8)
+#define ARMV6_PMCR_COUNT1_OVERFLOW (1 << 9)
+#define ARMV6_PMCR_CCOUNT_OVERFLOW (1 << 10)
+#define ARMV6_PMCR_EVT_COUNT0_SHIFT 20
+#define ARMV6_PMCR_EVT_COUNT0_MASK (0xFF << ARMV6_PMCR_EVT_COUNT0_SHIFT)
+#define ARMV6_PMCR_EVT_COUNT1_SHIFT 12
+#define ARMV6_PMCR_EVT_COUNT1_MASK (0xFF << ARMV6_PMCR_EVT_COUNT1_SHIFT)
+
+#define ARMV6_PMCR_OVERFLOWED_MASK \
+ (ARMV6_PMCR_COUNT0_OVERFLOW | ARMV6_PMCR_COUNT1_OVERFLOW | \
+ ARMV6_PMCR_CCOUNT_OVERFLOW)
+
+static inline int
+armv6_pmcr_has_overflowed(unsigned long pmcr)
+{
+ return pmcr & ARMV6_PMCR_OVERFLOWED_MASK;
+}
+
+static inline int
+armv6_pmcr_counter_has_overflowed(unsigned long pmcr,
+ enum armv6_counters counter)
+{
+ int ret = 0;
+
+ if (ARMV6_CYCLE_COUNTER == counter)
+ ret = pmcr & ARMV6_PMCR_CCOUNT_OVERFLOW;
+ else if (ARMV6_COUNTER0 == counter)
+ ret = pmcr & ARMV6_PMCR_COUNT0_OVERFLOW;
+ else if (ARMV6_COUNTER1 == counter)
+ ret = pmcr & ARMV6_PMCR_COUNT1_OVERFLOW;
+ else
+ WARN_ONCE(1, "invalid counter number (%d)\n", counter);
+
+ return ret;
+}
+
+static inline u64 armv6pmu_read_counter(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ int counter = hwc->idx;
+ unsigned long value = 0;
+
+ if (ARMV6_CYCLE_COUNTER == counter)
+ asm volatile("mrc p15, 0, %0, c15, c12, 1" : "=r"(value));
+ else if (ARMV6_COUNTER0 == counter)
+ asm volatile("mrc p15, 0, %0, c15, c12, 2" : "=r"(value));
+ else if (ARMV6_COUNTER1 == counter)
+ asm volatile("mrc p15, 0, %0, c15, c12, 3" : "=r"(value));
+ else
+ WARN_ONCE(1, "invalid counter number (%d)\n", counter);
+
+ return value;
+}
+
+static inline void armv6pmu_write_counter(struct perf_event *event, u64 value)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ int counter = hwc->idx;
+
+ if (ARMV6_CYCLE_COUNTER == counter)
+ asm volatile("mcr p15, 0, %0, c15, c12, 1" : : "r"(value));
+ else if (ARMV6_COUNTER0 == counter)
+ asm volatile("mcr p15, 0, %0, c15, c12, 2" : : "r"(value));
+ else if (ARMV6_COUNTER1 == counter)
+ asm volatile("mcr p15, 0, %0, c15, c12, 3" : : "r"(value));
+ else
+ WARN_ONCE(1, "invalid counter number (%d)\n", counter);
+}
+
+static void armv6pmu_enable_event(struct perf_event *event)
+{
+ unsigned long val, mask, evt;
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ if (ARMV6_CYCLE_COUNTER == idx) {
+ mask = 0;
+ evt = ARMV6_PMCR_CCOUNT_IEN;
+ } else if (ARMV6_COUNTER0 == idx) {
+ mask = ARMV6_PMCR_EVT_COUNT0_MASK;
+ evt = (hwc->config_base << ARMV6_PMCR_EVT_COUNT0_SHIFT) |
+ ARMV6_PMCR_COUNT0_IEN;
+ } else if (ARMV6_COUNTER1 == idx) {
+ mask = ARMV6_PMCR_EVT_COUNT1_MASK;
+ evt = (hwc->config_base << ARMV6_PMCR_EVT_COUNT1_SHIFT) |
+ ARMV6_PMCR_COUNT1_IEN;
+ } else {
+ WARN_ONCE(1, "invalid counter number (%d)\n", idx);
+ return;
+ }
+
+ /*
+ * Mask out the current event and set the counter to count the event
+ * that we're interested in.
+ */
+ val = armv6_pmcr_read();
+ val &= ~mask;
+ val |= evt;
+ armv6_pmcr_write(val);
+}
+
+static irqreturn_t
+armv6pmu_handle_irq(struct arm_pmu *cpu_pmu)
+{
+ unsigned long pmcr = armv6_pmcr_read();
+ struct perf_sample_data data;
+ struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
+ struct pt_regs *regs;
+ int idx;
+
+ if (!armv6_pmcr_has_overflowed(pmcr))
+ return IRQ_NONE;
+
+ regs = get_irq_regs();
+
+ /*
+ * The interrupts are cleared by writing the overflow flags back to
+ * the control register. All of the other bits don't have any effect
+ * if they are rewritten, so write the whole value back.
+ */
+ armv6_pmcr_write(pmcr);
+
+ for_each_set_bit(idx, cpu_pmu->cntr_mask, ARMV6_NUM_COUNTERS) {
+ struct perf_event *event = cpuc->events[idx];
+ struct hw_perf_event *hwc;
+
+ /* Ignore if we don't have an event. */
+ if (!event)
+ continue;
+
+ /*
+ * We have a single interrupt for all counters. Check that
+ * each counter has overflowed before we process it.
+ */
+ if (!armv6_pmcr_counter_has_overflowed(pmcr, idx))
+ continue;
+
+ hwc = &event->hw;
+ armpmu_event_update(event);
+ perf_sample_data_init(&data, 0, hwc->last_period);
+ if (!armpmu_event_set_period(event))
+ continue;
+
+ perf_event_overflow(event, &data, regs);
+ }
+
+ /*
+ * Handle the pending perf events.
+ *
+ * Note: this call *must* be run with interrupts disabled. For
+ * platforms that can have the PMU interrupts raised as an NMI, this
+ * will not work.
+ */
+ irq_work_run();
+
+ return IRQ_HANDLED;
+}
+
+static void armv6pmu_start(struct arm_pmu *cpu_pmu)
+{
+ unsigned long val;
+
+ val = armv6_pmcr_read();
+ val |= ARMV6_PMCR_ENABLE;
+ armv6_pmcr_write(val);
+}
+
+static void armv6pmu_stop(struct arm_pmu *cpu_pmu)
+{
+ unsigned long val;
+
+ val = armv6_pmcr_read();
+ val &= ~ARMV6_PMCR_ENABLE;
+ armv6_pmcr_write(val);
+}
+
+static int
+armv6pmu_get_event_idx(struct pmu_hw_events *cpuc,
+ struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ /* Always place a cycle counter into the cycle counter. */
+ if (ARMV6_PERFCTR_CPU_CYCLES == hwc->config_base) {
+ if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask))
+ return -EAGAIN;
+
+ return ARMV6_CYCLE_COUNTER;
+ } else {
+ /*
+ * For anything other than a cycle counter, try and use
+ * counter0 and counter1.
+ */
+ if (!test_and_set_bit(ARMV6_COUNTER1, cpuc->used_mask))
+ return ARMV6_COUNTER1;
+
+ if (!test_and_set_bit(ARMV6_COUNTER0, cpuc->used_mask))
+ return ARMV6_COUNTER0;
+
+ /* The counters are all in use. */
+ return -EAGAIN;
+ }
+}
+
+static void armv6pmu_clear_event_idx(struct pmu_hw_events *cpuc,
+ struct perf_event *event)
+{
+ clear_bit(event->hw.idx, cpuc->used_mask);
+}
+
+static void armv6pmu_disable_event(struct perf_event *event)
+{
+ unsigned long val, mask, evt;
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ if (ARMV6_CYCLE_COUNTER == idx) {
+ mask = ARMV6_PMCR_CCOUNT_IEN;
+ evt = 0;
+ } else if (ARMV6_COUNTER0 == idx) {
+ mask = ARMV6_PMCR_COUNT0_IEN | ARMV6_PMCR_EVT_COUNT0_MASK;
+ evt = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT0_SHIFT;
+ } else if (ARMV6_COUNTER1 == idx) {
+ mask = ARMV6_PMCR_COUNT1_IEN | ARMV6_PMCR_EVT_COUNT1_MASK;
+ evt = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT1_SHIFT;
+ } else {
+ WARN_ONCE(1, "invalid counter number (%d)\n", idx);
+ return;
+ }
+
+ /*
+ * Mask out the current event and set the counter to count the number
+ * of ETM bus signal assertion cycles. The external reporting should
+ * be disabled and so this should never increment.
+ */
+ val = armv6_pmcr_read();
+ val &= ~mask;
+ val |= evt;
+ armv6_pmcr_write(val);
+}
+
+static int armv6_map_event(struct perf_event *event)
+{
+ return armpmu_map_event(event, &armv6_perf_map,
+ &armv6_perf_cache_map, 0xFF);
+}
+
+static void armv6pmu_init(struct arm_pmu *cpu_pmu)
+{
+ cpu_pmu->handle_irq = armv6pmu_handle_irq;
+ cpu_pmu->enable = armv6pmu_enable_event;
+ cpu_pmu->disable = armv6pmu_disable_event;
+ cpu_pmu->read_counter = armv6pmu_read_counter;
+ cpu_pmu->write_counter = armv6pmu_write_counter;
+ cpu_pmu->get_event_idx = armv6pmu_get_event_idx;
+ cpu_pmu->clear_event_idx = armv6pmu_clear_event_idx;
+ cpu_pmu->start = armv6pmu_start;
+ cpu_pmu->stop = armv6pmu_stop;
+ cpu_pmu->map_event = armv6_map_event;
+
+ bitmap_set(cpu_pmu->cntr_mask, 0, ARMV6_NUM_COUNTERS);
+}
+
+static int armv6_1136_pmu_init(struct arm_pmu *cpu_pmu)
+{
+ armv6pmu_init(cpu_pmu);
+ cpu_pmu->name = "armv6_1136";
+ return 0;
+}
+
+static int armv6_1176_pmu_init(struct arm_pmu *cpu_pmu)
+{
+ armv6pmu_init(cpu_pmu);
+ cpu_pmu->name = "armv6_1176";
+ return 0;
+}
+
+static const struct of_device_id armv6_pmu_of_device_ids[] = {
+ {.compatible = "arm,arm1176-pmu", .data = armv6_1176_pmu_init},
+ {.compatible = "arm,arm1136-pmu", .data = armv6_1136_pmu_init},
+ { /* sentinel value */ }
+};
+
+static int armv6_pmu_device_probe(struct platform_device *pdev)
+{
+ return arm_pmu_device_probe(pdev, armv6_pmu_of_device_ids, NULL);
+}
+
+static struct platform_driver armv6_pmu_driver = {
+ .driver = {
+ .name = "armv6-pmu",
+ .of_match_table = armv6_pmu_of_device_ids,
+ },
+ .probe = armv6_pmu_device_probe,
+};
+
+builtin_platform_driver(armv6_pmu_driver);
diff --git a/drivers/perf/arm_v7_pmu.c b/drivers/perf/arm_v7_pmu.c
new file mode 100644
index 000000000000..a1e438101114
--- /dev/null
+++ b/drivers/perf/arm_v7_pmu.c
@@ -0,0 +1,1924 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
+ *
+ * ARMv7 support: Jean Pihet <jpihet@mvista.com>
+ * 2010 (c) MontaVista Software, LLC.
+ *
+ * Copied from ARMv6 code, with the low level code inspired
+ * by the ARMv7 Oprofile code.
+ *
+ * Cortex-A8 has up to 4 configurable performance counters and
+ * a single cycle counter.
+ * Cortex-A9 has up to 31 configurable performance counters and
+ * a single cycle counter.
+ *
+ * All counters can be enabled/disabled and IRQ masked separately. The cycle
+ * counter and all 4 performance counters together can be reset separately.
+ */
+
+#include <asm/cp15.h>
+#include <asm/cputype.h>
+#include <asm/irq_regs.h>
+#include <asm/vfp.h>
+#include "../vfp/vfpinstr.h"
+
+#include <linux/of.h>
+#include <linux/perf/arm_pmu.h>
+#include <linux/platform_device.h>
+
+/*
+ * Common ARMv7 event types
+ *
+ * Note: An implementation may not be able to count all of these events
+ * but the encodings are considered to be `reserved' in the case that
+ * they are not available.
+ */
+#define ARMV7_PERFCTR_PMNC_SW_INCR 0x00
+#define ARMV7_PERFCTR_L1_ICACHE_REFILL 0x01
+#define ARMV7_PERFCTR_ITLB_REFILL 0x02
+#define ARMV7_PERFCTR_L1_DCACHE_REFILL 0x03
+#define ARMV7_PERFCTR_L1_DCACHE_ACCESS 0x04
+#define ARMV7_PERFCTR_DTLB_REFILL 0x05
+#define ARMV7_PERFCTR_MEM_READ 0x06
+#define ARMV7_PERFCTR_MEM_WRITE 0x07
+#define ARMV7_PERFCTR_INSTR_EXECUTED 0x08
+#define ARMV7_PERFCTR_EXC_TAKEN 0x09
+#define ARMV7_PERFCTR_EXC_EXECUTED 0x0A
+#define ARMV7_PERFCTR_CID_WRITE 0x0B
+
+/*
+ * ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
+ * It counts:
+ * - all (taken) branch instructions,
+ * - instructions that explicitly write the PC,
+ * - exception generating instructions.
+ */
+#define ARMV7_PERFCTR_PC_WRITE 0x0C
+#define ARMV7_PERFCTR_PC_IMM_BRANCH 0x0D
+#define ARMV7_PERFCTR_PC_PROC_RETURN 0x0E
+#define ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS 0x0F
+#define ARMV7_PERFCTR_PC_BRANCH_MIS_PRED 0x10
+#define ARMV7_PERFCTR_CLOCK_CYCLES 0x11
+#define ARMV7_PERFCTR_PC_BRANCH_PRED 0x12
+
+/* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */
+#define ARMV7_PERFCTR_MEM_ACCESS 0x13
+#define ARMV7_PERFCTR_L1_ICACHE_ACCESS 0x14
+#define ARMV7_PERFCTR_L1_DCACHE_WB 0x15
+#define ARMV7_PERFCTR_L2_CACHE_ACCESS 0x16
+#define ARMV7_PERFCTR_L2_CACHE_REFILL 0x17
+#define ARMV7_PERFCTR_L2_CACHE_WB 0x18
+#define ARMV7_PERFCTR_BUS_ACCESS 0x19
+#define ARMV7_PERFCTR_MEM_ERROR 0x1A
+#define ARMV7_PERFCTR_INSTR_SPEC 0x1B
+#define ARMV7_PERFCTR_TTBR_WRITE 0x1C
+#define ARMV7_PERFCTR_BUS_CYCLES 0x1D
+
+#define ARMV7_PERFCTR_CPU_CYCLES 0xFF
+
+/* ARMv7 Cortex-A8 specific event types */
+#define ARMV7_A8_PERFCTR_L2_CACHE_ACCESS 0x43
+#define ARMV7_A8_PERFCTR_L2_CACHE_REFILL 0x44
+#define ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS 0x50
+#define ARMV7_A8_PERFCTR_STALL_ISIDE 0x56
+
+/* ARMv7 Cortex-A9 specific event types */
+#define ARMV7_A9_PERFCTR_INSTR_CORE_RENAME 0x68
+#define ARMV7_A9_PERFCTR_STALL_ICACHE 0x60
+#define ARMV7_A9_PERFCTR_STALL_DISPATCH 0x66
+
+/* ARMv7 Cortex-A5 specific event types */
+#define ARMV7_A5_PERFCTR_PREFETCH_LINEFILL 0xc2
+#define ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP 0xc3
+
+/* ARMv7 Cortex-A15 specific event types */
+#define ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ 0x40
+#define ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE 0x41
+#define ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ 0x42
+#define ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE 0x43
+
+#define ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ 0x4C
+#define ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE 0x4D
+
+#define ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ 0x50
+#define ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE 0x51
+#define ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ 0x52
+#define ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE 0x53
+
+#define ARMV7_A15_PERFCTR_PC_WRITE_SPEC 0x76
+
+/* ARMv7 Cortex-A12 specific event types */
+#define ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ 0x40
+#define ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE 0x41
+
+#define ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ 0x50
+#define ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE 0x51
+
+#define ARMV7_A12_PERFCTR_PC_WRITE_SPEC 0x76
+
+#define ARMV7_A12_PERFCTR_PF_TLB_REFILL 0xe7
+
+/* ARMv7 Krait specific event types */
+#define KRAIT_PMRESR0_GROUP0 0xcc
+#define KRAIT_PMRESR1_GROUP0 0xd0
+#define KRAIT_PMRESR2_GROUP0 0xd4
+#define KRAIT_VPMRESR0_GROUP0 0xd8
+
+#define KRAIT_PERFCTR_L1_ICACHE_ACCESS 0x10011
+#define KRAIT_PERFCTR_L1_ICACHE_MISS 0x10010
+
+#define KRAIT_PERFCTR_L1_ITLB_ACCESS 0x12222
+#define KRAIT_PERFCTR_L1_DTLB_ACCESS 0x12210
+
+/* ARMv7 Scorpion specific event types */
+#define SCORPION_LPM0_GROUP0 0x4c
+#define SCORPION_LPM1_GROUP0 0x50
+#define SCORPION_LPM2_GROUP0 0x54
+#define SCORPION_L2LPM_GROUP0 0x58
+#define SCORPION_VLPM_GROUP0 0x5c
+
+#define SCORPION_ICACHE_ACCESS 0x10053
+#define SCORPION_ICACHE_MISS 0x10052
+
+#define SCORPION_DTLB_ACCESS 0x12013
+#define SCORPION_DTLB_MISS 0x12012
+
+#define SCORPION_ITLB_MISS 0x12021
+
+/*
+ * Cortex-A8 HW events mapping
+ *
+ * The hardware events that we support. We do support cache operations but
+ * we have harvard caches and no way to combine instruction and data
+ * accesses/misses in hardware.
+ */
+static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
+ PERF_MAP_ALL_UNSUPPORTED,
+ [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
+ [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+ [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
+ [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A8_PERFCTR_STALL_ISIDE,
+};
+
+static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+ PERF_CACHE_MAP_ALL_UNSUPPORTED,
+
+ /*
+ * The performance counters don't differentiate between read and write
+ * accesses/misses so this isn't strictly correct, but it's the best we
+ * can do. Writes and reads get combined.
+ */
+ [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+ [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
+ [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+ [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
+
+ [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS,
+ [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
+
+ [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
+ [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
+ [C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
+ [C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
+
+ [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
+ [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
+
+ [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
+ [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
+
+ [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
+ [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
+ [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+};
+
+/*
+ * Cortex-A9 HW events mapping
+ */
+static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
+ PERF_MAP_ALL_UNSUPPORTED,
+ [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
+ [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_A9_PERFCTR_INSTR_CORE_RENAME,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+ [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
+ [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A9_PERFCTR_STALL_ICACHE,
+ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV7_A9_PERFCTR_STALL_DISPATCH,
+};
+
+static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+ PERF_CACHE_MAP_ALL_UNSUPPORTED,
+
+ /*
+ * The performance counters don't differentiate between read and write
+ * accesses/misses so this isn't strictly correct, but it's the best we
+ * can do. Writes and reads get combined.
+ */
+ [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+ [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
+ [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+ [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
+
+ [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
+
+ [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
+ [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
+
+ [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
+ [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
+
+ [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
+ [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
+ [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+};
+
+/*
+ * Cortex-A5 HW events mapping
+ */
+static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = {
+ PERF_MAP_ALL_UNSUPPORTED,
+ [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
+ [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+ [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
+ [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+};
+
+static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+ PERF_CACHE_MAP_ALL_UNSUPPORTED,
+
+ [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+ [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
+ [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+ [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
+ [C(L1D)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
+ [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
+
+ [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
+ [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
+ /*
+ * The prefetch counters don't differentiate between the I side and the
+ * D side.
+ */
+ [C(L1I)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
+ [C(L1I)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
+
+ [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
+ [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
+
+ [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
+ [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
+
+ [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
+ [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
+ [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+};
+
+/*
+ * Cortex-A15 HW events mapping
+ */
+static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = {
+ PERF_MAP_ALL_UNSUPPORTED,
+ [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
+ [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+ [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_A15_PERFCTR_PC_WRITE_SPEC,
+ [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
+};
+
+static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+ PERF_CACHE_MAP_ALL_UNSUPPORTED,
+
+ [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ,
+ [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ,
+ [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE,
+ [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE,
+
+ /*
+ * Not all performance counters differentiate between read and write
+ * accesses/misses so we're not always strictly correct, but it's the
+ * best we can do. Writes and reads get combined in these cases.
+ */
+ [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
+ [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
+
+ [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ,
+ [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ,
+ [C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE,
+ [C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE,
+
+ [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ,
+ [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE,
+
+ [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
+ [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
+
+ [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
+ [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
+ [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+};
+
+/*
+ * Cortex-A7 HW events mapping
+ */
+static const unsigned armv7_a7_perf_map[PERF_COUNT_HW_MAX] = {
+ PERF_MAP_ALL_UNSUPPORTED,
+ [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
+ [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+ [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
+ [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
+};
+
+static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+ PERF_CACHE_MAP_ALL_UNSUPPORTED,
+
+ /*
+ * The performance counters don't differentiate between read and write
+ * accesses/misses so this isn't strictly correct, but it's the best we
+ * can do. Writes and reads get combined.
+ */
+ [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+ [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
+ [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+ [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
+
+ [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
+ [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
+
+ [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_CACHE_ACCESS,
+ [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
+ [C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_CACHE_ACCESS,
+ [C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
+
+ [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
+ [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
+
+ [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
+ [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
+
+ [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
+ [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
+ [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+};
+
+/*
+ * Cortex-A12 HW events mapping
+ */
+static const unsigned armv7_a12_perf_map[PERF_COUNT_HW_MAX] = {
+ PERF_MAP_ALL_UNSUPPORTED,
+ [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
+ [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+ [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_A12_PERFCTR_PC_WRITE_SPEC,
+ [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
+};
+
+static const unsigned armv7_a12_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+ PERF_CACHE_MAP_ALL_UNSUPPORTED,
+
+ [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ,
+ [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
+ [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE,
+ [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
+
+ /*
+ * Not all performance counters differentiate between read and write
+ * accesses/misses so we're not always strictly correct, but it's the
+ * best we can do. Writes and reads get combined in these cases.
+ */
+ [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
+ [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
+
+ [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ,
+ [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
+ [C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE,
+ [C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
+
+ [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
+ [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
+ [C(DTLB)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV7_A12_PERFCTR_PF_TLB_REFILL,
+
+ [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
+ [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
+
+ [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
+ [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
+ [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+};
+
+/*
+ * Krait HW events mapping
+ */
+static const unsigned krait_perf_map[PERF_COUNT_HW_MAX] = {
+ PERF_MAP_ALL_UNSUPPORTED,
+ [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
+ [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
+ [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
+};
+
+static const unsigned krait_perf_map_no_branch[PERF_COUNT_HW_MAX] = {
+ PERF_MAP_ALL_UNSUPPORTED,
+ [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
+ [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
+ [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
+};
+
+static const unsigned krait_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+ PERF_CACHE_MAP_ALL_UNSUPPORTED,
+
+ /*
+ * The performance counters don't differentiate between read and write
+ * accesses/misses so this isn't strictly correct, but it's the best we
+ * can do. Writes and reads get combined.
+ */
+ [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+ [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
+ [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+ [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
+
+ [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_ICACHE_ACCESS,
+ [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = KRAIT_PERFCTR_L1_ICACHE_MISS,
+
+ [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_DTLB_ACCESS,
+ [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_DTLB_ACCESS,
+
+ [C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_ITLB_ACCESS,
+ [C(ITLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_ITLB_ACCESS,
+
+ [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
+ [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
+ [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+};
+
+/*
+ * Scorpion HW events mapping
+ */
+static const unsigned scorpion_perf_map[PERF_COUNT_HW_MAX] = {
+ PERF_MAP_ALL_UNSUPPORTED,
+ [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
+ [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
+ [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
+};
+
+static const unsigned scorpion_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+ PERF_CACHE_MAP_ALL_UNSUPPORTED,
+ /*
+ * The performance counters don't differentiate between read and write
+ * accesses/misses so this isn't strictly correct, but it's the best we
+ * can do. Writes and reads get combined.
+ */
+ [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+ [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
+ [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+ [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
+ [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = SCORPION_ICACHE_ACCESS,
+ [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_ICACHE_MISS,
+ /*
+ * Only ITLB misses and DTLB refills are supported. If users want the
+ * DTLB refills misses a raw counter must be used.
+ */
+ [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = SCORPION_DTLB_ACCESS,
+ [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_DTLB_MISS,
+ [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = SCORPION_DTLB_ACCESS,
+ [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = SCORPION_DTLB_MISS,
+ [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_ITLB_MISS,
+ [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = SCORPION_ITLB_MISS,
+ [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
+ [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
+ [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+};
+
+PMU_FORMAT_ATTR(event, "config:0-7");
+
+static struct attribute *armv7_pmu_format_attrs[] = {
+ &format_attr_event.attr,
+ NULL,
+};
+
+static struct attribute_group armv7_pmu_format_attr_group = {
+ .name = "format",
+ .attrs = armv7_pmu_format_attrs,
+};
+
+#define ARMV7_EVENT_ATTR_RESOLVE(m) #m
+#define ARMV7_EVENT_ATTR(name, config) \
+ PMU_EVENT_ATTR_STRING(name, armv7_event_attr_##name, \
+ "event=" ARMV7_EVENT_ATTR_RESOLVE(config))
+
+ARMV7_EVENT_ATTR(sw_incr, ARMV7_PERFCTR_PMNC_SW_INCR);
+ARMV7_EVENT_ATTR(l1i_cache_refill, ARMV7_PERFCTR_L1_ICACHE_REFILL);
+ARMV7_EVENT_ATTR(l1i_tlb_refill, ARMV7_PERFCTR_ITLB_REFILL);
+ARMV7_EVENT_ATTR(l1d_cache_refill, ARMV7_PERFCTR_L1_DCACHE_REFILL);
+ARMV7_EVENT_ATTR(l1d_cache, ARMV7_PERFCTR_L1_DCACHE_ACCESS);
+ARMV7_EVENT_ATTR(l1d_tlb_refill, ARMV7_PERFCTR_DTLB_REFILL);
+ARMV7_EVENT_ATTR(ld_retired, ARMV7_PERFCTR_MEM_READ);
+ARMV7_EVENT_ATTR(st_retired, ARMV7_PERFCTR_MEM_WRITE);
+ARMV7_EVENT_ATTR(inst_retired, ARMV7_PERFCTR_INSTR_EXECUTED);
+ARMV7_EVENT_ATTR(exc_taken, ARMV7_PERFCTR_EXC_TAKEN);
+ARMV7_EVENT_ATTR(exc_return, ARMV7_PERFCTR_EXC_EXECUTED);
+ARMV7_EVENT_ATTR(cid_write_retired, ARMV7_PERFCTR_CID_WRITE);
+ARMV7_EVENT_ATTR(pc_write_retired, ARMV7_PERFCTR_PC_WRITE);
+ARMV7_EVENT_ATTR(br_immed_retired, ARMV7_PERFCTR_PC_IMM_BRANCH);
+ARMV7_EVENT_ATTR(br_return_retired, ARMV7_PERFCTR_PC_PROC_RETURN);
+ARMV7_EVENT_ATTR(unaligned_ldst_retired, ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS);
+ARMV7_EVENT_ATTR(br_mis_pred, ARMV7_PERFCTR_PC_BRANCH_MIS_PRED);
+ARMV7_EVENT_ATTR(cpu_cycles, ARMV7_PERFCTR_CLOCK_CYCLES);
+ARMV7_EVENT_ATTR(br_pred, ARMV7_PERFCTR_PC_BRANCH_PRED);
+
+static struct attribute *armv7_pmuv1_event_attrs[] = {
+ &armv7_event_attr_sw_incr.attr.attr,
+ &armv7_event_attr_l1i_cache_refill.attr.attr,
+ &armv7_event_attr_l1i_tlb_refill.attr.attr,
+ &armv7_event_attr_l1d_cache_refill.attr.attr,
+ &armv7_event_attr_l1d_cache.attr.attr,
+ &armv7_event_attr_l1d_tlb_refill.attr.attr,
+ &armv7_event_attr_ld_retired.attr.attr,
+ &armv7_event_attr_st_retired.attr.attr,
+ &armv7_event_attr_inst_retired.attr.attr,
+ &armv7_event_attr_exc_taken.attr.attr,
+ &armv7_event_attr_exc_return.attr.attr,
+ &armv7_event_attr_cid_write_retired.attr.attr,
+ &armv7_event_attr_pc_write_retired.attr.attr,
+ &armv7_event_attr_br_immed_retired.attr.attr,
+ &armv7_event_attr_br_return_retired.attr.attr,
+ &armv7_event_attr_unaligned_ldst_retired.attr.attr,
+ &armv7_event_attr_br_mis_pred.attr.attr,
+ &armv7_event_attr_cpu_cycles.attr.attr,
+ &armv7_event_attr_br_pred.attr.attr,
+ NULL,
+};
+
+static struct attribute_group armv7_pmuv1_events_attr_group = {
+ .name = "events",
+ .attrs = armv7_pmuv1_event_attrs,
+};
+
+ARMV7_EVENT_ATTR(mem_access, ARMV7_PERFCTR_MEM_ACCESS);
+ARMV7_EVENT_ATTR(l1i_cache, ARMV7_PERFCTR_L1_ICACHE_ACCESS);
+ARMV7_EVENT_ATTR(l1d_cache_wb, ARMV7_PERFCTR_L1_DCACHE_WB);
+ARMV7_EVENT_ATTR(l2d_cache, ARMV7_PERFCTR_L2_CACHE_ACCESS);
+ARMV7_EVENT_ATTR(l2d_cache_refill, ARMV7_PERFCTR_L2_CACHE_REFILL);
+ARMV7_EVENT_ATTR(l2d_cache_wb, ARMV7_PERFCTR_L2_CACHE_WB);
+ARMV7_EVENT_ATTR(bus_access, ARMV7_PERFCTR_BUS_ACCESS);
+ARMV7_EVENT_ATTR(memory_error, ARMV7_PERFCTR_MEM_ERROR);
+ARMV7_EVENT_ATTR(inst_spec, ARMV7_PERFCTR_INSTR_SPEC);
+ARMV7_EVENT_ATTR(ttbr_write_retired, ARMV7_PERFCTR_TTBR_WRITE);
+ARMV7_EVENT_ATTR(bus_cycles, ARMV7_PERFCTR_BUS_CYCLES);
+
+static struct attribute *armv7_pmuv2_event_attrs[] = {
+ &armv7_event_attr_sw_incr.attr.attr,
+ &armv7_event_attr_l1i_cache_refill.attr.attr,
+ &armv7_event_attr_l1i_tlb_refill.attr.attr,
+ &armv7_event_attr_l1d_cache_refill.attr.attr,
+ &armv7_event_attr_l1d_cache.attr.attr,
+ &armv7_event_attr_l1d_tlb_refill.attr.attr,
+ &armv7_event_attr_ld_retired.attr.attr,
+ &armv7_event_attr_st_retired.attr.attr,
+ &armv7_event_attr_inst_retired.attr.attr,
+ &armv7_event_attr_exc_taken.attr.attr,
+ &armv7_event_attr_exc_return.attr.attr,
+ &armv7_event_attr_cid_write_retired.attr.attr,
+ &armv7_event_attr_pc_write_retired.attr.attr,
+ &armv7_event_attr_br_immed_retired.attr.attr,
+ &armv7_event_attr_br_return_retired.attr.attr,
+ &armv7_event_attr_unaligned_ldst_retired.attr.attr,
+ &armv7_event_attr_br_mis_pred.attr.attr,
+ &armv7_event_attr_cpu_cycles.attr.attr,
+ &armv7_event_attr_br_pred.attr.attr,
+ &armv7_event_attr_mem_access.attr.attr,
+ &armv7_event_attr_l1i_cache.attr.attr,
+ &armv7_event_attr_l1d_cache_wb.attr.attr,
+ &armv7_event_attr_l2d_cache.attr.attr,
+ &armv7_event_attr_l2d_cache_refill.attr.attr,
+ &armv7_event_attr_l2d_cache_wb.attr.attr,
+ &armv7_event_attr_bus_access.attr.attr,
+ &armv7_event_attr_memory_error.attr.attr,
+ &armv7_event_attr_inst_spec.attr.attr,
+ &armv7_event_attr_ttbr_write_retired.attr.attr,
+ &armv7_event_attr_bus_cycles.attr.attr,
+ NULL,
+};
+
+static struct attribute_group armv7_pmuv2_events_attr_group = {
+ .name = "events",
+ .attrs = armv7_pmuv2_event_attrs,
+};
+
+/*
+ * Perf Events' indices
+ */
+#define ARMV7_IDX_CYCLE_COUNTER 31
+#define ARMV7_IDX_COUNTER_MAX 31
+/*
+ * ARMv7 low level PMNC access
+ */
+
+/*
+ * Per-CPU PMNC: config reg
+ */
+#define ARMV7_PMNC_E (1 << 0) /* Enable all counters */
+#define ARMV7_PMNC_P (1 << 1) /* Reset all counters */
+#define ARMV7_PMNC_C (1 << 2) /* Cycle counter reset */
+#define ARMV7_PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */
+#define ARMV7_PMNC_X (1 << 4) /* Export to ETM */
+#define ARMV7_PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
+#define ARMV7_PMNC_N_SHIFT 11 /* Number of counters supported */
+#define ARMV7_PMNC_N_MASK 0x1f
+#define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */
+
+/*
+ * FLAG: counters overflow flag status reg
+ */
+#define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */
+#define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK
+
+/*
+ * PMXEVTYPER: Event selection reg
+ */
+#define ARMV7_EVTYPE_MASK 0xc80000ff /* Mask for writable bits */
+#define ARMV7_EVTYPE_EVENT 0xff /* Mask for EVENT bits */
+
+/*
+ * Event filters for PMUv2
+ */
+#define ARMV7_EXCLUDE_PL1 BIT(31)
+#define ARMV7_EXCLUDE_USER BIT(30)
+#define ARMV7_INCLUDE_HYP BIT(27)
+
+/*
+ * Secure debug enable reg
+ */
+#define ARMV7_SDER_SUNIDEN BIT(1) /* Permit non-invasive debug */
+
+static inline u32 armv7_pmnc_read(void)
+{
+ u32 val;
+ asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
+ return val;
+}
+
+static inline void armv7_pmnc_write(u32 val)
+{
+ val &= ARMV7_PMNC_MASK;
+ isb();
+ asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
+}
+
+static inline int armv7_pmnc_has_overflowed(u32 pmnc)
+{
+ return pmnc & ARMV7_OVERFLOWED_MASK;
+}
+
+static inline int armv7_pmnc_counter_valid(struct arm_pmu *cpu_pmu, int idx)
+{
+ return test_bit(idx, cpu_pmu->cntr_mask);
+}
+
+static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx)
+{
+ return pmnc & BIT(idx);
+}
+
+static inline void armv7_pmnc_select_counter(int idx)
+{
+ asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (idx));
+ isb();
+}
+
+static inline u64 armv7pmu_read_counter(struct perf_event *event)
+{
+ struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+ u32 value = 0;
+
+ if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
+ pr_err("CPU%u reading wrong counter %d\n",
+ smp_processor_id(), idx);
+ } else if (idx == ARMV7_IDX_CYCLE_COUNTER) {
+ asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
+ } else {
+ armv7_pmnc_select_counter(idx);
+ asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (value));
+ }
+
+ return value;
+}
+
+static inline void armv7pmu_write_counter(struct perf_event *event, u64 value)
+{
+ struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
+ pr_err("CPU%u writing wrong counter %d\n",
+ smp_processor_id(), idx);
+ } else if (idx == ARMV7_IDX_CYCLE_COUNTER) {
+ asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" ((u32)value));
+ } else {
+ armv7_pmnc_select_counter(idx);
+ asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" ((u32)value));
+ }
+}
+
+static inline void armv7_pmnc_write_evtsel(int idx, u32 val)
+{
+ armv7_pmnc_select_counter(idx);
+ val &= ARMV7_EVTYPE_MASK;
+ asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
+}
+
+static inline void armv7_pmnc_enable_counter(int idx)
+{
+ asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(idx)));
+}
+
+static inline void armv7_pmnc_disable_counter(int idx)
+{
+ asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(idx)));
+}
+
+static inline void armv7_pmnc_enable_intens(int idx)
+{
+ asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(idx)));
+}
+
+static inline void armv7_pmnc_disable_intens(int idx)
+{
+ asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(idx)));
+ isb();
+ /* Clear the overflow flag in case an interrupt is pending. */
+ asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(idx)));
+ isb();
+}
+
+static inline u32 armv7_pmnc_getreset_flags(void)
+{
+ u32 val;
+
+ /* Read */
+ asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
+
+ /* Write to clear flags */
+ val &= ARMV7_FLAG_MASK;
+ asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
+
+ return val;
+}
+
+#ifdef DEBUG
+static void armv7_pmnc_dump_regs(struct arm_pmu *cpu_pmu)
+{
+ u32 val;
+ unsigned int cnt;
+
+ pr_info("PMNC registers dump:\n");
+
+ asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
+ pr_info("PMNC =0x%08x\n", val);
+
+ asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
+ pr_info("CNTENS=0x%08x\n", val);
+
+ asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
+ pr_info("INTENS=0x%08x\n", val);
+
+ asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
+ pr_info("FLAGS =0x%08x\n", val);
+
+ asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
+ pr_info("SELECT=0x%08x\n", val);
+
+ asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
+ pr_info("CCNT =0x%08x\n", val);
+
+ for_each_set_bit(cnt, cpu_pmu->cntr_mask, ARMV7_IDX_COUNTER_MAX) {
+ armv7_pmnc_select_counter(cnt);
+ asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
+ pr_info("CNT[%d] count =0x%08x\n", cnt, val);
+ asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
+ pr_info("CNT[%d] evtsel=0x%08x\n", cnt, val);
+ }
+}
+#endif
+
+static void armv7pmu_enable_event(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+ int idx = hwc->idx;
+
+ if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
+ pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
+ smp_processor_id(), idx);
+ return;
+ }
+
+ /*
+ * Set event (if destined for PMNx counters)
+ * We only need to set the event for the cycle counter if we
+ * have the ability to perform event filtering.
+ */
+ if (cpu_pmu->set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER)
+ armv7_pmnc_write_evtsel(idx, hwc->config_base);
+
+ armv7_pmnc_enable_intens(idx);
+ armv7_pmnc_enable_counter(idx);
+}
+
+static void armv7pmu_disable_event(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+ int idx = hwc->idx;
+
+ if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
+ pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
+ smp_processor_id(), idx);
+ return;
+ }
+
+ armv7_pmnc_disable_counter(idx);
+ armv7_pmnc_disable_intens(idx);
+}
+
+static irqreturn_t armv7pmu_handle_irq(struct arm_pmu *cpu_pmu)
+{
+ u32 pmnc;
+ struct perf_sample_data data;
+ struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
+ struct pt_regs *regs;
+ int idx;
+
+ /*
+ * Get and reset the IRQ flags
+ */
+ pmnc = armv7_pmnc_getreset_flags();
+
+ /*
+ * Did an overflow occur?
+ */
+ if (!armv7_pmnc_has_overflowed(pmnc))
+ return IRQ_NONE;
+
+ /*
+ * Handle the counter(s) overflow(s)
+ */
+ regs = get_irq_regs();
+
+ for_each_set_bit(idx, cpu_pmu->cntr_mask, ARMPMU_MAX_HWEVENTS) {
+ struct perf_event *event = cpuc->events[idx];
+ struct hw_perf_event *hwc;
+
+ /* Ignore if we don't have an event. */
+ if (!event)
+ continue;
+
+ /*
+ * We have a single interrupt for all counters. Check that
+ * each counter has overflowed before we process it.
+ */
+ if (!armv7_pmnc_counter_has_overflowed(pmnc, idx))
+ continue;
+
+ hwc = &event->hw;
+ armpmu_event_update(event);
+ perf_sample_data_init(&data, 0, hwc->last_period);
+ if (!armpmu_event_set_period(event))
+ continue;
+
+ perf_event_overflow(event, &data, regs);
+ }
+
+ /*
+ * Handle the pending perf events.
+ *
+ * Note: this call *must* be run with interrupts disabled. For
+ * platforms that can have the PMU interrupts raised as an NMI, this
+ * will not work.
+ */
+ irq_work_run();
+
+ return IRQ_HANDLED;
+}
+
+static void armv7pmu_start(struct arm_pmu *cpu_pmu)
+{
+ /* Enable all counters */
+ armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
+}
+
+static void armv7pmu_stop(struct arm_pmu *cpu_pmu)
+{
+ /* Disable all counters */
+ armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
+}
+
+static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc,
+ struct perf_event *event)
+{
+ int idx;
+ struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ unsigned long evtype = hwc->config_base & ARMV7_EVTYPE_EVENT;
+
+ /* Always place a cycle counter into the cycle counter. */
+ if (evtype == ARMV7_PERFCTR_CPU_CYCLES) {
+ if (test_and_set_bit(ARMV7_IDX_CYCLE_COUNTER, cpuc->used_mask))
+ return -EAGAIN;
+
+ return ARMV7_IDX_CYCLE_COUNTER;
+ }
+
+ /*
+ * For anything other than a cycle counter, try and use
+ * the events counters
+ */
+ for_each_set_bit(idx, cpu_pmu->cntr_mask, ARMV7_IDX_COUNTER_MAX) {
+ if (!test_and_set_bit(idx, cpuc->used_mask))
+ return idx;
+ }
+
+ /* The counters are all in use. */
+ return -EAGAIN;
+}
+
+static void armv7pmu_clear_event_idx(struct pmu_hw_events *cpuc,
+ struct perf_event *event)
+{
+ clear_bit(event->hw.idx, cpuc->used_mask);
+}
+
+/*
+ * Add an event filter to a given event. This will only work for PMUv2 PMUs.
+ */
+static int armv7pmu_set_event_filter(struct hw_perf_event *event,
+ struct perf_event_attr *attr)
+{
+ unsigned long config_base = 0;
+
+ if (attr->exclude_idle) {
+ pr_debug("ARM performance counters do not support mode exclusion\n");
+ return -EOPNOTSUPP;
+ }
+ if (attr->exclude_user)
+ config_base |= ARMV7_EXCLUDE_USER;
+ if (attr->exclude_kernel)
+ config_base |= ARMV7_EXCLUDE_PL1;
+ if (!attr->exclude_hv)
+ config_base |= ARMV7_INCLUDE_HYP;
+
+ /*
+ * Install the filter into config_base as this is used to
+ * construct the event type.
+ */
+ event->config_base = config_base;
+
+ return 0;
+}
+
+static void armv7pmu_reset(void *info)
+{
+ struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
+ u32 idx, val;
+
+ if (cpu_pmu->secure_access) {
+ asm volatile("mrc p15, 0, %0, c1, c1, 1" : "=r" (val));
+ val |= ARMV7_SDER_SUNIDEN;
+ asm volatile("mcr p15, 0, %0, c1, c1, 1" : : "r" (val));
+ }
+
+ /* The counter and interrupt enable registers are unknown at reset. */
+ for_each_set_bit(idx, cpu_pmu->cntr_mask, ARMPMU_MAX_HWEVENTS) {
+ armv7_pmnc_disable_counter(idx);
+ armv7_pmnc_disable_intens(idx);
+ }
+
+ /* Initialize & Reset PMNC: C and P bits */
+ armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
+}
+
+static int armv7_a8_map_event(struct perf_event *event)
+{
+ return armpmu_map_event(event, &armv7_a8_perf_map,
+ &armv7_a8_perf_cache_map, 0xFF);
+}
+
+static int armv7_a9_map_event(struct perf_event *event)
+{
+ return armpmu_map_event(event, &armv7_a9_perf_map,
+ &armv7_a9_perf_cache_map, 0xFF);
+}
+
+static int armv7_a5_map_event(struct perf_event *event)
+{
+ return armpmu_map_event(event, &armv7_a5_perf_map,
+ &armv7_a5_perf_cache_map, 0xFF);
+}
+
+static int armv7_a15_map_event(struct perf_event *event)
+{
+ return armpmu_map_event(event, &armv7_a15_perf_map,
+ &armv7_a15_perf_cache_map, 0xFF);
+}
+
+static int armv7_a7_map_event(struct perf_event *event)
+{
+ return armpmu_map_event(event, &armv7_a7_perf_map,
+ &armv7_a7_perf_cache_map, 0xFF);
+}
+
+static int armv7_a12_map_event(struct perf_event *event)
+{
+ return armpmu_map_event(event, &armv7_a12_perf_map,
+ &armv7_a12_perf_cache_map, 0xFF);
+}
+
+static int krait_map_event(struct perf_event *event)
+{
+ return armpmu_map_event(event, &krait_perf_map,
+ &krait_perf_cache_map, 0xFFFFF);
+}
+
+static int krait_map_event_no_branch(struct perf_event *event)
+{
+ return armpmu_map_event(event, &krait_perf_map_no_branch,
+ &krait_perf_cache_map, 0xFFFFF);
+}
+
+static int scorpion_map_event(struct perf_event *event)
+{
+ return armpmu_map_event(event, &scorpion_perf_map,
+ &scorpion_perf_cache_map, 0xFFFFF);
+}
+
+static void armv7pmu_init(struct arm_pmu *cpu_pmu)
+{
+ cpu_pmu->handle_irq = armv7pmu_handle_irq;
+ cpu_pmu->enable = armv7pmu_enable_event;
+ cpu_pmu->disable = armv7pmu_disable_event;
+ cpu_pmu->read_counter = armv7pmu_read_counter;
+ cpu_pmu->write_counter = armv7pmu_write_counter;
+ cpu_pmu->get_event_idx = armv7pmu_get_event_idx;
+ cpu_pmu->clear_event_idx = armv7pmu_clear_event_idx;
+ cpu_pmu->start = armv7pmu_start;
+ cpu_pmu->stop = armv7pmu_stop;
+ cpu_pmu->reset = armv7pmu_reset;
+};
+
+static void armv7_read_num_pmnc_events(void *info)
+{
+ int nb_cnt;
+ struct arm_pmu *cpu_pmu = info;
+
+ /* Read the nb of CNTx counters supported from PMNC */
+ nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
+ bitmap_set(cpu_pmu->cntr_mask, 0, nb_cnt);
+
+ /* Add the CPU cycles counter */
+ set_bit(ARMV7_IDX_CYCLE_COUNTER, cpu_pmu->cntr_mask);
+}
+
+static int armv7_probe_num_events(struct arm_pmu *arm_pmu)
+{
+ return smp_call_function_any(&arm_pmu->supported_cpus,
+ armv7_read_num_pmnc_events,
+ arm_pmu, 1);
+}
+
+static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
+{
+ armv7pmu_init(cpu_pmu);
+ cpu_pmu->name = "armv7_cortex_a8";
+ cpu_pmu->map_event = armv7_a8_map_event;
+ cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
+ &armv7_pmuv1_events_attr_group;
+ cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
+ &armv7_pmu_format_attr_group;
+ return armv7_probe_num_events(cpu_pmu);
+}
+
+static int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
+{
+ armv7pmu_init(cpu_pmu);
+ cpu_pmu->name = "armv7_cortex_a9";
+ cpu_pmu->map_event = armv7_a9_map_event;
+ cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
+ &armv7_pmuv1_events_attr_group;
+ cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
+ &armv7_pmu_format_attr_group;
+ return armv7_probe_num_events(cpu_pmu);
+}
+
+static int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
+{
+ armv7pmu_init(cpu_pmu);
+ cpu_pmu->name = "armv7_cortex_a5";
+ cpu_pmu->map_event = armv7_a5_map_event;
+ cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
+ &armv7_pmuv1_events_attr_group;
+ cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
+ &armv7_pmu_format_attr_group;
+ return armv7_probe_num_events(cpu_pmu);
+}
+
+static int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
+{
+ armv7pmu_init(cpu_pmu);
+ cpu_pmu->name = "armv7_cortex_a15";
+ cpu_pmu->map_event = armv7_a15_map_event;
+ cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
+ cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
+ &armv7_pmuv2_events_attr_group;
+ cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
+ &armv7_pmu_format_attr_group;
+ return armv7_probe_num_events(cpu_pmu);
+}
+
+static int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
+{
+ armv7pmu_init(cpu_pmu);
+ cpu_pmu->name = "armv7_cortex_a7";
+ cpu_pmu->map_event = armv7_a7_map_event;
+ cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
+ cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
+ &armv7_pmuv2_events_attr_group;
+ cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
+ &armv7_pmu_format_attr_group;
+ return armv7_probe_num_events(cpu_pmu);
+}
+
+static int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu)
+{
+ armv7pmu_init(cpu_pmu);
+ cpu_pmu->name = "armv7_cortex_a12";
+ cpu_pmu->map_event = armv7_a12_map_event;
+ cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
+ cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
+ &armv7_pmuv2_events_attr_group;
+ cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
+ &armv7_pmu_format_attr_group;
+ return armv7_probe_num_events(cpu_pmu);
+}
+
+static int armv7_a17_pmu_init(struct arm_pmu *cpu_pmu)
+{
+ int ret = armv7_a12_pmu_init(cpu_pmu);
+ cpu_pmu->name = "armv7_cortex_a17";
+ cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
+ &armv7_pmuv2_events_attr_group;
+ cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
+ &armv7_pmu_format_attr_group;
+ return ret;
+}
+
+/*
+ * Krait Performance Monitor Region Event Selection Register (PMRESRn)
+ *
+ * 31 30 24 16 8 0
+ * +--------------------------------+
+ * PMRESR0 | EN | CC | CC | CC | CC | N = 1, R = 0
+ * +--------------------------------+
+ * PMRESR1 | EN | CC | CC | CC | CC | N = 1, R = 1
+ * +--------------------------------+
+ * PMRESR2 | EN | CC | CC | CC | CC | N = 1, R = 2
+ * +--------------------------------+
+ * VPMRESR0 | EN | CC | CC | CC | CC | N = 2, R = ?
+ * +--------------------------------+
+ * EN | G=3 | G=2 | G=1 | G=0
+ *
+ * Event Encoding:
+ *
+ * hwc->config_base = 0xNRCCG
+ *
+ * N = prefix, 1 for Krait CPU (PMRESRn), 2 for Venum VFP (VPMRESR)
+ * R = region register
+ * CC = class of events the group G is choosing from
+ * G = group or particular event
+ *
+ * Example: 0x12021 is a Krait CPU event in PMRESR2's group 1 with code 2
+ *
+ * A region (R) corresponds to a piece of the CPU (execution unit, instruction
+ * unit, etc.) while the event code (CC) corresponds to a particular class of
+ * events (interrupts for example). An event code is broken down into
+ * groups (G) that can be mapped into the PMU (irq, fiqs, and irq+fiqs for
+ * example).
+ */
+
+#define KRAIT_EVENT (1 << 16)
+#define VENUM_EVENT (2 << 16)
+#define KRAIT_EVENT_MASK (KRAIT_EVENT | VENUM_EVENT)
+#define PMRESRn_EN BIT(31)
+
+#define EVENT_REGION(event) (((event) >> 12) & 0xf) /* R */
+#define EVENT_GROUP(event) ((event) & 0xf) /* G */
+#define EVENT_CODE(event) (((event) >> 4) & 0xff) /* CC */
+#define EVENT_VENUM(event) (!!(event & VENUM_EVENT)) /* N=2 */
+#define EVENT_CPU(event) (!!(event & KRAIT_EVENT)) /* N=1 */
+
+static u32 krait_read_pmresrn(int n)
+{
+ u32 val;
+
+ switch (n) {
+ case 0:
+ asm volatile("mrc p15, 1, %0, c9, c15, 0" : "=r" (val));
+ break;
+ case 1:
+ asm volatile("mrc p15, 1, %0, c9, c15, 1" : "=r" (val));
+ break;
+ case 2:
+ asm volatile("mrc p15, 1, %0, c9, c15, 2" : "=r" (val));
+ break;
+ default:
+ BUG(); /* Should be validated in krait_pmu_get_event_idx() */
+ }
+
+ return val;
+}
+
+static void krait_write_pmresrn(int n, u32 val)
+{
+ switch (n) {
+ case 0:
+ asm volatile("mcr p15, 1, %0, c9, c15, 0" : : "r" (val));
+ break;
+ case 1:
+ asm volatile("mcr p15, 1, %0, c9, c15, 1" : : "r" (val));
+ break;
+ case 2:
+ asm volatile("mcr p15, 1, %0, c9, c15, 2" : : "r" (val));
+ break;
+ default:
+ BUG(); /* Should be validated in krait_pmu_get_event_idx() */
+ }
+}
+
+static u32 venum_read_pmresr(void)
+{
+ u32 val;
+ asm volatile("mrc p10, 7, %0, c11, c0, 0" : "=r" (val));
+ return val;
+}
+
+static void venum_write_pmresr(u32 val)
+{
+ asm volatile("mcr p10, 7, %0, c11, c0, 0" : : "r" (val));
+}
+
+static void venum_pre_pmresr(u32 *venum_orig_val, u32 *fp_orig_val)
+{
+ u32 venum_new_val;
+ u32 fp_new_val;
+
+ BUG_ON(preemptible());
+ /* CPACR Enable CP10 and CP11 access */
+ *venum_orig_val = get_copro_access();
+ venum_new_val = *venum_orig_val | CPACC_SVC(10) | CPACC_SVC(11);
+ set_copro_access(venum_new_val);
+
+ /* Enable FPEXC */
+ *fp_orig_val = fmrx(FPEXC);
+ fp_new_val = *fp_orig_val | FPEXC_EN;
+ fmxr(FPEXC, fp_new_val);
+}
+
+static void venum_post_pmresr(u32 venum_orig_val, u32 fp_orig_val)
+{
+ BUG_ON(preemptible());
+ /* Restore FPEXC */
+ fmxr(FPEXC, fp_orig_val);
+ isb();
+ /* Restore CPACR */
+ set_copro_access(venum_orig_val);
+}
+
+static u32 krait_get_pmresrn_event(unsigned int region)
+{
+ static const u32 pmresrn_table[] = { KRAIT_PMRESR0_GROUP0,
+ KRAIT_PMRESR1_GROUP0,
+ KRAIT_PMRESR2_GROUP0 };
+ return pmresrn_table[region];
+}
+
+static void krait_evt_setup(int idx, u32 config_base)
+{
+ u32 val;
+ u32 mask;
+ u32 vval, fval;
+ unsigned int region = EVENT_REGION(config_base);
+ unsigned int group = EVENT_GROUP(config_base);
+ unsigned int code = EVENT_CODE(config_base);
+ unsigned int group_shift;
+ bool venum_event = EVENT_VENUM(config_base);
+
+ group_shift = group * 8;
+ mask = 0xff << group_shift;
+
+ /* Configure evtsel for the region and group */
+ if (venum_event)
+ val = KRAIT_VPMRESR0_GROUP0;
+ else
+ val = krait_get_pmresrn_event(region);
+ val += group;
+ /* Mix in mode-exclusion bits */
+ val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1);
+ armv7_pmnc_write_evtsel(idx, val);
+
+ if (venum_event) {
+ venum_pre_pmresr(&vval, &fval);
+ val = venum_read_pmresr();
+ val &= ~mask;
+ val |= code << group_shift;
+ val |= PMRESRn_EN;
+ venum_write_pmresr(val);
+ venum_post_pmresr(vval, fval);
+ } else {
+ val = krait_read_pmresrn(region);
+ val &= ~mask;
+ val |= code << group_shift;
+ val |= PMRESRn_EN;
+ krait_write_pmresrn(region, val);
+ }
+}
+
+static u32 clear_pmresrn_group(u32 val, int group)
+{
+ u32 mask;
+ int group_shift;
+
+ group_shift = group * 8;
+ mask = 0xff << group_shift;
+ val &= ~mask;
+
+ /* Don't clear enable bit if entire region isn't disabled */
+ if (val & ~PMRESRn_EN)
+ return val |= PMRESRn_EN;
+
+ return 0;
+}
+
+static void krait_clearpmu(u32 config_base)
+{
+ u32 val;
+ u32 vval, fval;
+ unsigned int region = EVENT_REGION(config_base);
+ unsigned int group = EVENT_GROUP(config_base);
+ bool venum_event = EVENT_VENUM(config_base);
+
+ if (venum_event) {
+ venum_pre_pmresr(&vval, &fval);
+ val = venum_read_pmresr();
+ val = clear_pmresrn_group(val, group);
+ venum_write_pmresr(val);
+ venum_post_pmresr(vval, fval);
+ } else {
+ val = krait_read_pmresrn(region);
+ val = clear_pmresrn_group(val, group);
+ krait_write_pmresrn(region, val);
+ }
+}
+
+static void krait_pmu_disable_event(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ /* Disable counter and interrupt */
+
+ /* Disable counter */
+ armv7_pmnc_disable_counter(idx);
+
+ /*
+ * Clear pmresr code (if destined for PMNx counters)
+ */
+ if (hwc->config_base & KRAIT_EVENT_MASK)
+ krait_clearpmu(hwc->config_base);
+
+ /* Disable interrupt for this counter */
+ armv7_pmnc_disable_intens(idx);
+}
+
+static void krait_pmu_enable_event(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ /*
+ * Set event (if destined for PMNx counters)
+ * We set the event for the cycle counter because we
+ * have the ability to perform event filtering.
+ */
+ if (hwc->config_base & KRAIT_EVENT_MASK)
+ krait_evt_setup(idx, hwc->config_base);
+ else
+ armv7_pmnc_write_evtsel(idx, hwc->config_base);
+
+ armv7_pmnc_enable_intens(idx);
+ armv7_pmnc_enable_counter(idx);
+}
+
+static void krait_pmu_reset(void *info)
+{
+ u32 vval, fval;
+ struct arm_pmu *cpu_pmu = info;
+ u32 idx;
+
+ armv7pmu_reset(info);
+
+ /* Clear all pmresrs */
+ krait_write_pmresrn(0, 0);
+ krait_write_pmresrn(1, 0);
+ krait_write_pmresrn(2, 0);
+
+ venum_pre_pmresr(&vval, &fval);
+ venum_write_pmresr(0);
+ venum_post_pmresr(vval, fval);
+
+ /* Reset PMxEVNCTCR to sane default */
+ for_each_set_bit(idx, cpu_pmu->cntr_mask, ARMV7_IDX_COUNTER_MAX) {
+ armv7_pmnc_select_counter(idx);
+ asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
+ }
+
+}
+
+static int krait_event_to_bit(struct perf_event *event, unsigned int region,
+ unsigned int group)
+{
+ int bit;
+ struct hw_perf_event *hwc = &event->hw;
+ struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+
+ if (hwc->config_base & VENUM_EVENT)
+ bit = KRAIT_VPMRESR0_GROUP0;
+ else
+ bit = krait_get_pmresrn_event(region);
+ bit -= krait_get_pmresrn_event(0);
+ bit += group;
+ /*
+ * Lower bits are reserved for use by the counters (see
+ * armv7pmu_get_event_idx() for more info)
+ */
+ bit += bitmap_weight(cpu_pmu->cntr_mask, ARMV7_IDX_COUNTER_MAX);
+
+ return bit;
+}
+
+/*
+ * We check for column exclusion constraints here.
+ * Two events cant use the same group within a pmresr register.
+ */
+static int krait_pmu_get_event_idx(struct pmu_hw_events *cpuc,
+ struct perf_event *event)
+{
+ int idx;
+ int bit = -1;
+ struct hw_perf_event *hwc = &event->hw;
+ unsigned int region = EVENT_REGION(hwc->config_base);
+ unsigned int code = EVENT_CODE(hwc->config_base);
+ unsigned int group = EVENT_GROUP(hwc->config_base);
+ bool venum_event = EVENT_VENUM(hwc->config_base);
+ bool krait_event = EVENT_CPU(hwc->config_base);
+
+ if (venum_event || krait_event) {
+ /* Ignore invalid events */
+ if (group > 3 || region > 2)
+ return -EINVAL;
+ if (venum_event && (code & 0xe0))
+ return -EINVAL;
+
+ bit = krait_event_to_bit(event, region, group);
+ if (test_and_set_bit(bit, cpuc->used_mask))
+ return -EAGAIN;
+ }
+
+ idx = armv7pmu_get_event_idx(cpuc, event);
+ if (idx < 0 && bit >= 0)
+ clear_bit(bit, cpuc->used_mask);
+
+ return idx;
+}
+
+static void krait_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
+ struct perf_event *event)
+{
+ int bit;
+ struct hw_perf_event *hwc = &event->hw;
+ unsigned int region = EVENT_REGION(hwc->config_base);
+ unsigned int group = EVENT_GROUP(hwc->config_base);
+ bool venum_event = EVENT_VENUM(hwc->config_base);
+ bool krait_event = EVENT_CPU(hwc->config_base);
+
+ armv7pmu_clear_event_idx(cpuc, event);
+ if (venum_event || krait_event) {
+ bit = krait_event_to_bit(event, region, group);
+ clear_bit(bit, cpuc->used_mask);
+ }
+}
+
+static int krait_pmu_init(struct arm_pmu *cpu_pmu)
+{
+ armv7pmu_init(cpu_pmu);
+ cpu_pmu->name = "armv7_krait";
+ /* Some early versions of Krait don't support PC write events */
+ if (of_property_read_bool(cpu_pmu->plat_device->dev.of_node,
+ "qcom,no-pc-write"))
+ cpu_pmu->map_event = krait_map_event_no_branch;
+ else
+ cpu_pmu->map_event = krait_map_event;
+ cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
+ cpu_pmu->reset = krait_pmu_reset;
+ cpu_pmu->enable = krait_pmu_enable_event;
+ cpu_pmu->disable = krait_pmu_disable_event;
+ cpu_pmu->get_event_idx = krait_pmu_get_event_idx;
+ cpu_pmu->clear_event_idx = krait_pmu_clear_event_idx;
+ return armv7_probe_num_events(cpu_pmu);
+}
+
+/*
+ * Scorpion Local Performance Monitor Register (LPMn)
+ *
+ * 31 30 24 16 8 0
+ * +--------------------------------+
+ * LPM0 | EN | CC | CC | CC | CC | N = 1, R = 0
+ * +--------------------------------+
+ * LPM1 | EN | CC | CC | CC | CC | N = 1, R = 1
+ * +--------------------------------+
+ * LPM2 | EN | CC | CC | CC | CC | N = 1, R = 2
+ * +--------------------------------+
+ * L2LPM | EN | CC | CC | CC | CC | N = 1, R = 3
+ * +--------------------------------+
+ * VLPM | EN | CC | CC | CC | CC | N = 2, R = ?
+ * +--------------------------------+
+ * EN | G=3 | G=2 | G=1 | G=0
+ *
+ *
+ * Event Encoding:
+ *
+ * hwc->config_base = 0xNRCCG
+ *
+ * N = prefix, 1 for Scorpion CPU (LPMn/L2LPM), 2 for Venum VFP (VLPM)
+ * R = region register
+ * CC = class of events the group G is choosing from
+ * G = group or particular event
+ *
+ * Example: 0x12021 is a Scorpion CPU event in LPM2's group 1 with code 2
+ *
+ * A region (R) corresponds to a piece of the CPU (execution unit, instruction
+ * unit, etc.) while the event code (CC) corresponds to a particular class of
+ * events (interrupts for example). An event code is broken down into
+ * groups (G) that can be mapped into the PMU (irq, fiqs, and irq+fiqs for
+ * example).
+ */
+
+static u32 scorpion_read_pmresrn(int n)
+{
+ u32 val;
+
+ switch (n) {
+ case 0:
+ asm volatile("mrc p15, 0, %0, c15, c0, 0" : "=r" (val));
+ break;
+ case 1:
+ asm volatile("mrc p15, 1, %0, c15, c0, 0" : "=r" (val));
+ break;
+ case 2:
+ asm volatile("mrc p15, 2, %0, c15, c0, 0" : "=r" (val));
+ break;
+ case 3:
+ asm volatile("mrc p15, 3, %0, c15, c2, 0" : "=r" (val));
+ break;
+ default:
+ BUG(); /* Should be validated in scorpion_pmu_get_event_idx() */
+ }
+
+ return val;
+}
+
+static void scorpion_write_pmresrn(int n, u32 val)
+{
+ switch (n) {
+ case 0:
+ asm volatile("mcr p15, 0, %0, c15, c0, 0" : : "r" (val));
+ break;
+ case 1:
+ asm volatile("mcr p15, 1, %0, c15, c0, 0" : : "r" (val));
+ break;
+ case 2:
+ asm volatile("mcr p15, 2, %0, c15, c0, 0" : : "r" (val));
+ break;
+ case 3:
+ asm volatile("mcr p15, 3, %0, c15, c2, 0" : : "r" (val));
+ break;
+ default:
+ BUG(); /* Should be validated in scorpion_pmu_get_event_idx() */
+ }
+}
+
+static u32 scorpion_get_pmresrn_event(unsigned int region)
+{
+ static const u32 pmresrn_table[] = { SCORPION_LPM0_GROUP0,
+ SCORPION_LPM1_GROUP0,
+ SCORPION_LPM2_GROUP0,
+ SCORPION_L2LPM_GROUP0 };
+ return pmresrn_table[region];
+}
+
+static void scorpion_evt_setup(int idx, u32 config_base)
+{
+ u32 val;
+ u32 mask;
+ u32 vval, fval;
+ unsigned int region = EVENT_REGION(config_base);
+ unsigned int group = EVENT_GROUP(config_base);
+ unsigned int code = EVENT_CODE(config_base);
+ unsigned int group_shift;
+ bool venum_event = EVENT_VENUM(config_base);
+
+ group_shift = group * 8;
+ mask = 0xff << group_shift;
+
+ /* Configure evtsel for the region and group */
+ if (venum_event)
+ val = SCORPION_VLPM_GROUP0;
+ else
+ val = scorpion_get_pmresrn_event(region);
+ val += group;
+ /* Mix in mode-exclusion bits */
+ val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1);
+ armv7_pmnc_write_evtsel(idx, val);
+
+ asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
+
+ if (venum_event) {
+ venum_pre_pmresr(&vval, &fval);
+ val = venum_read_pmresr();
+ val &= ~mask;
+ val |= code << group_shift;
+ val |= PMRESRn_EN;
+ venum_write_pmresr(val);
+ venum_post_pmresr(vval, fval);
+ } else {
+ val = scorpion_read_pmresrn(region);
+ val &= ~mask;
+ val |= code << group_shift;
+ val |= PMRESRn_EN;
+ scorpion_write_pmresrn(region, val);
+ }
+}
+
+static void scorpion_clearpmu(u32 config_base)
+{
+ u32 val;
+ u32 vval, fval;
+ unsigned int region = EVENT_REGION(config_base);
+ unsigned int group = EVENT_GROUP(config_base);
+ bool venum_event = EVENT_VENUM(config_base);
+
+ if (venum_event) {
+ venum_pre_pmresr(&vval, &fval);
+ val = venum_read_pmresr();
+ val = clear_pmresrn_group(val, group);
+ venum_write_pmresr(val);
+ venum_post_pmresr(vval, fval);
+ } else {
+ val = scorpion_read_pmresrn(region);
+ val = clear_pmresrn_group(val, group);
+ scorpion_write_pmresrn(region, val);
+ }
+}
+
+static void scorpion_pmu_disable_event(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ /* Disable counter and interrupt */
+
+ /* Disable counter */
+ armv7_pmnc_disable_counter(idx);
+
+ /*
+ * Clear pmresr code (if destined for PMNx counters)
+ */
+ if (hwc->config_base & KRAIT_EVENT_MASK)
+ scorpion_clearpmu(hwc->config_base);
+
+ /* Disable interrupt for this counter */
+ armv7_pmnc_disable_intens(idx);
+}
+
+static void scorpion_pmu_enable_event(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ /*
+ * Set event (if destined for PMNx counters)
+ * We don't set the event for the cycle counter because we
+ * don't have the ability to perform event filtering.
+ */
+ if (hwc->config_base & KRAIT_EVENT_MASK)
+ scorpion_evt_setup(idx, hwc->config_base);
+ else if (idx != ARMV7_IDX_CYCLE_COUNTER)
+ armv7_pmnc_write_evtsel(idx, hwc->config_base);
+
+ armv7_pmnc_enable_intens(idx);
+ armv7_pmnc_enable_counter(idx);
+}
+
+static void scorpion_pmu_reset(void *info)
+{
+ u32 vval, fval;
+ struct arm_pmu *cpu_pmu = info;
+ u32 idx;
+
+ armv7pmu_reset(info);
+
+ /* Clear all pmresrs */
+ scorpion_write_pmresrn(0, 0);
+ scorpion_write_pmresrn(1, 0);
+ scorpion_write_pmresrn(2, 0);
+ scorpion_write_pmresrn(3, 0);
+
+ venum_pre_pmresr(&vval, &fval);
+ venum_write_pmresr(0);
+ venum_post_pmresr(vval, fval);
+
+ /* Reset PMxEVNCTCR to sane default */
+ for_each_set_bit(idx, cpu_pmu->cntr_mask, ARMV7_IDX_COUNTER_MAX) {
+ armv7_pmnc_select_counter(idx);
+ asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
+ }
+}
+
+static int scorpion_event_to_bit(struct perf_event *event, unsigned int region,
+ unsigned int group)
+{
+ int bit;
+ struct hw_perf_event *hwc = &event->hw;
+ struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+
+ if (hwc->config_base & VENUM_EVENT)
+ bit = SCORPION_VLPM_GROUP0;
+ else
+ bit = scorpion_get_pmresrn_event(region);
+ bit -= scorpion_get_pmresrn_event(0);
+ bit += group;
+ /*
+ * Lower bits are reserved for use by the counters (see
+ * armv7pmu_get_event_idx() for more info)
+ */
+ bit += bitmap_weight(cpu_pmu->cntr_mask, ARMV7_IDX_COUNTER_MAX);
+
+ return bit;
+}
+
+/*
+ * We check for column exclusion constraints here.
+ * Two events cant use the same group within a pmresr register.
+ */
+static int scorpion_pmu_get_event_idx(struct pmu_hw_events *cpuc,
+ struct perf_event *event)
+{
+ int idx;
+ int bit = -1;
+ struct hw_perf_event *hwc = &event->hw;
+ unsigned int region = EVENT_REGION(hwc->config_base);
+ unsigned int group = EVENT_GROUP(hwc->config_base);
+ bool venum_event = EVENT_VENUM(hwc->config_base);
+ bool scorpion_event = EVENT_CPU(hwc->config_base);
+
+ if (venum_event || scorpion_event) {
+ /* Ignore invalid events */
+ if (group > 3 || region > 3)
+ return -EINVAL;
+
+ bit = scorpion_event_to_bit(event, region, group);
+ if (test_and_set_bit(bit, cpuc->used_mask))
+ return -EAGAIN;
+ }
+
+ idx = armv7pmu_get_event_idx(cpuc, event);
+ if (idx < 0 && bit >= 0)
+ clear_bit(bit, cpuc->used_mask);
+
+ return idx;
+}
+
+static void scorpion_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
+ struct perf_event *event)
+{
+ int bit;
+ struct hw_perf_event *hwc = &event->hw;
+ unsigned int region = EVENT_REGION(hwc->config_base);
+ unsigned int group = EVENT_GROUP(hwc->config_base);
+ bool venum_event = EVENT_VENUM(hwc->config_base);
+ bool scorpion_event = EVENT_CPU(hwc->config_base);
+
+ armv7pmu_clear_event_idx(cpuc, event);
+ if (venum_event || scorpion_event) {
+ bit = scorpion_event_to_bit(event, region, group);
+ clear_bit(bit, cpuc->used_mask);
+ }
+}
+
+static int scorpion_pmu_init(struct arm_pmu *cpu_pmu)
+{
+ armv7pmu_init(cpu_pmu);
+ cpu_pmu->name = "armv7_scorpion";
+ cpu_pmu->map_event = scorpion_map_event;
+ cpu_pmu->reset = scorpion_pmu_reset;
+ cpu_pmu->enable = scorpion_pmu_enable_event;
+ cpu_pmu->disable = scorpion_pmu_disable_event;
+ cpu_pmu->get_event_idx = scorpion_pmu_get_event_idx;
+ cpu_pmu->clear_event_idx = scorpion_pmu_clear_event_idx;
+ return armv7_probe_num_events(cpu_pmu);
+}
+
+static int scorpion_mp_pmu_init(struct arm_pmu *cpu_pmu)
+{
+ armv7pmu_init(cpu_pmu);
+ cpu_pmu->name = "armv7_scorpion_mp";
+ cpu_pmu->map_event = scorpion_map_event;
+ cpu_pmu->reset = scorpion_pmu_reset;
+ cpu_pmu->enable = scorpion_pmu_enable_event;
+ cpu_pmu->disable = scorpion_pmu_disable_event;
+ cpu_pmu->get_event_idx = scorpion_pmu_get_event_idx;
+ cpu_pmu->clear_event_idx = scorpion_pmu_clear_event_idx;
+ return armv7_probe_num_events(cpu_pmu);
+}
+
+static const struct of_device_id armv7_pmu_of_device_ids[] = {
+ {.compatible = "arm,cortex-a17-pmu", .data = armv7_a17_pmu_init},
+ {.compatible = "arm,cortex-a15-pmu", .data = armv7_a15_pmu_init},
+ {.compatible = "arm,cortex-a12-pmu", .data = armv7_a12_pmu_init},
+ {.compatible = "arm,cortex-a9-pmu", .data = armv7_a9_pmu_init},
+ {.compatible = "arm,cortex-a8-pmu", .data = armv7_a8_pmu_init},
+ {.compatible = "arm,cortex-a7-pmu", .data = armv7_a7_pmu_init},
+ {.compatible = "arm,cortex-a5-pmu", .data = armv7_a5_pmu_init},
+ {.compatible = "qcom,krait-pmu", .data = krait_pmu_init},
+ {.compatible = "qcom,scorpion-pmu", .data = scorpion_pmu_init},
+ {.compatible = "qcom,scorpion-mp-pmu", .data = scorpion_mp_pmu_init},
+ {},
+};
+
+static int armv7_pmu_device_probe(struct platform_device *pdev)
+{
+ return arm_pmu_device_probe(pdev, armv7_pmu_of_device_ids, NULL);
+}
+
+static struct platform_driver armv7_pmu_driver = {
+ .driver = {
+ .name = "armv7-pmu",
+ .of_match_table = armv7_pmu_of_device_ids,
+ .suppress_bind_attrs = true,
+ },
+ .probe = armv7_pmu_device_probe,
+};
+
+builtin_platform_driver(armv7_pmu_driver);
diff --git a/drivers/perf/arm_xscale_pmu.c b/drivers/perf/arm_xscale_pmu.c
new file mode 100644
index 000000000000..c2ac41dd9e19
--- /dev/null
+++ b/drivers/perf/arm_xscale_pmu.c
@@ -0,0 +1,747 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ARMv5 [xscale] Performance counter handling code.
+ *
+ * Copyright (C) 2010, ARM Ltd., Will Deacon <will.deacon@arm.com>
+ *
+ * Based on the previous xscale OProfile code.
+ *
+ * There are two variants of the xscale PMU that we support:
+ * - xscale1pmu: 2 event counters and a cycle counter
+ * - xscale2pmu: 4 event counters and a cycle counter
+ * The two variants share event definitions, but have different
+ * PMU structures.
+ */
+
+#include <asm/cputype.h>
+#include <asm/irq_regs.h>
+
+#include <linux/of.h>
+#include <linux/perf/arm_pmu.h>
+#include <linux/platform_device.h>
+
+enum xscale_perf_types {
+ XSCALE_PERFCTR_ICACHE_MISS = 0x00,
+ XSCALE_PERFCTR_ICACHE_NO_DELIVER = 0x01,
+ XSCALE_PERFCTR_DATA_STALL = 0x02,
+ XSCALE_PERFCTR_ITLB_MISS = 0x03,
+ XSCALE_PERFCTR_DTLB_MISS = 0x04,
+ XSCALE_PERFCTR_BRANCH = 0x05,
+ XSCALE_PERFCTR_BRANCH_MISS = 0x06,
+ XSCALE_PERFCTR_INSTRUCTION = 0x07,
+ XSCALE_PERFCTR_DCACHE_FULL_STALL = 0x08,
+ XSCALE_PERFCTR_DCACHE_FULL_STALL_CONTIG = 0x09,
+ XSCALE_PERFCTR_DCACHE_ACCESS = 0x0A,
+ XSCALE_PERFCTR_DCACHE_MISS = 0x0B,
+ XSCALE_PERFCTR_DCACHE_WRITE_BACK = 0x0C,
+ XSCALE_PERFCTR_PC_CHANGED = 0x0D,
+ XSCALE_PERFCTR_BCU_REQUEST = 0x10,
+ XSCALE_PERFCTR_BCU_FULL = 0x11,
+ XSCALE_PERFCTR_BCU_DRAIN = 0x12,
+ XSCALE_PERFCTR_BCU_ECC_NO_ELOG = 0x14,
+ XSCALE_PERFCTR_BCU_1_BIT_ERR = 0x15,
+ XSCALE_PERFCTR_RMW = 0x16,
+ /* XSCALE_PERFCTR_CCNT is not hardware defined */
+ XSCALE_PERFCTR_CCNT = 0xFE,
+ XSCALE_PERFCTR_UNUSED = 0xFF,
+};
+
+enum xscale_counters {
+ XSCALE_CYCLE_COUNTER = 0,
+ XSCALE_COUNTER0,
+ XSCALE_COUNTER1,
+ XSCALE_COUNTER2,
+ XSCALE_COUNTER3,
+};
+#define XSCALE1_NUM_COUNTERS 3
+#define XSCALE2_NUM_COUNTERS 5
+
+static const unsigned xscale_perf_map[PERF_COUNT_HW_MAX] = {
+ PERF_MAP_ALL_UNSUPPORTED,
+ [PERF_COUNT_HW_CPU_CYCLES] = XSCALE_PERFCTR_CCNT,
+ [PERF_COUNT_HW_INSTRUCTIONS] = XSCALE_PERFCTR_INSTRUCTION,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = XSCALE_PERFCTR_BRANCH,
+ [PERF_COUNT_HW_BRANCH_MISSES] = XSCALE_PERFCTR_BRANCH_MISS,
+ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = XSCALE_PERFCTR_ICACHE_NO_DELIVER,
+};
+
+static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+ PERF_CACHE_MAP_ALL_UNSUPPORTED,
+
+ [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS,
+ [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS,
+ [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS,
+ [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS,
+
+ [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS,
+
+ [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS,
+ [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS,
+
+ [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS,
+ [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS,
+};
+
+#define XSCALE_PMU_ENABLE 0x001
+#define XSCALE_PMN_RESET 0x002
+#define XSCALE_CCNT_RESET 0x004
+#define XSCALE_PMU_RESET (CCNT_RESET | PMN_RESET)
+#define XSCALE_PMU_CNT64 0x008
+
+#define XSCALE1_OVERFLOWED_MASK 0x700
+#define XSCALE1_CCOUNT_OVERFLOW 0x400
+#define XSCALE1_COUNT0_OVERFLOW 0x100
+#define XSCALE1_COUNT1_OVERFLOW 0x200
+#define XSCALE1_CCOUNT_INT_EN 0x040
+#define XSCALE1_COUNT0_INT_EN 0x010
+#define XSCALE1_COUNT1_INT_EN 0x020
+#define XSCALE1_COUNT0_EVT_SHFT 12
+#define XSCALE1_COUNT0_EVT_MASK (0xff << XSCALE1_COUNT0_EVT_SHFT)
+#define XSCALE1_COUNT1_EVT_SHFT 20
+#define XSCALE1_COUNT1_EVT_MASK (0xff << XSCALE1_COUNT1_EVT_SHFT)
+
+static inline u32
+xscale1pmu_read_pmnc(void)
+{
+ u32 val;
+ asm volatile("mrc p14, 0, %0, c0, c0, 0" : "=r" (val));
+ return val;
+}
+
+static inline void
+xscale1pmu_write_pmnc(u32 val)
+{
+ /* upper 4bits and 7, 11 are write-as-0 */
+ val &= 0xffff77f;
+ asm volatile("mcr p14, 0, %0, c0, c0, 0" : : "r" (val));
+}
+
+static inline int
+xscale1_pmnc_counter_has_overflowed(unsigned long pmnc,
+ enum xscale_counters counter)
+{
+ int ret = 0;
+
+ switch (counter) {
+ case XSCALE_CYCLE_COUNTER:
+ ret = pmnc & XSCALE1_CCOUNT_OVERFLOW;
+ break;
+ case XSCALE_COUNTER0:
+ ret = pmnc & XSCALE1_COUNT0_OVERFLOW;
+ break;
+ case XSCALE_COUNTER1:
+ ret = pmnc & XSCALE1_COUNT1_OVERFLOW;
+ break;
+ default:
+ WARN_ONCE(1, "invalid counter number (%d)\n", counter);
+ }
+
+ return ret;
+}
+
+static irqreturn_t
+xscale1pmu_handle_irq(struct arm_pmu *cpu_pmu)
+{
+ unsigned long pmnc;
+ struct perf_sample_data data;
+ struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
+ struct pt_regs *regs;
+ int idx;
+
+ /*
+ * NOTE: there's an A stepping erratum that states if an overflow
+ * bit already exists and another occurs, the previous
+ * Overflow bit gets cleared. There's no workaround.
+ * Fixed in B stepping or later.
+ */
+ pmnc = xscale1pmu_read_pmnc();
+
+ /*
+ * Write the value back to clear the overflow flags. Overflow
+ * flags remain in pmnc for use below. We also disable the PMU
+ * while we process the interrupt.
+ */
+ xscale1pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE);
+
+ if (!(pmnc & XSCALE1_OVERFLOWED_MASK))
+ return IRQ_NONE;
+
+ regs = get_irq_regs();
+
+ for_each_set_bit(idx, cpu_pmu->cntr_mask, XSCALE1_NUM_COUNTERS) {
+ struct perf_event *event = cpuc->events[idx];
+ struct hw_perf_event *hwc;
+
+ if (!event)
+ continue;
+
+ if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx))
+ continue;
+
+ hwc = &event->hw;
+ armpmu_event_update(event);
+ perf_sample_data_init(&data, 0, hwc->last_period);
+ if (!armpmu_event_set_period(event))
+ continue;
+
+ perf_event_overflow(event, &data, regs);
+ }
+
+ irq_work_run();
+
+ /*
+ * Re-enable the PMU.
+ */
+ pmnc = xscale1pmu_read_pmnc() | XSCALE_PMU_ENABLE;
+ xscale1pmu_write_pmnc(pmnc);
+
+ return IRQ_HANDLED;
+}
+
+static void xscale1pmu_enable_event(struct perf_event *event)
+{
+ unsigned long val, mask, evt;
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ switch (idx) {
+ case XSCALE_CYCLE_COUNTER:
+ mask = 0;
+ evt = XSCALE1_CCOUNT_INT_EN;
+ break;
+ case XSCALE_COUNTER0:
+ mask = XSCALE1_COUNT0_EVT_MASK;
+ evt = (hwc->config_base << XSCALE1_COUNT0_EVT_SHFT) |
+ XSCALE1_COUNT0_INT_EN;
+ break;
+ case XSCALE_COUNTER1:
+ mask = XSCALE1_COUNT1_EVT_MASK;
+ evt = (hwc->config_base << XSCALE1_COUNT1_EVT_SHFT) |
+ XSCALE1_COUNT1_INT_EN;
+ break;
+ default:
+ WARN_ONCE(1, "invalid counter number (%d)\n", idx);
+ return;
+ }
+
+ val = xscale1pmu_read_pmnc();
+ val &= ~mask;
+ val |= evt;
+ xscale1pmu_write_pmnc(val);
+}
+
+static void xscale1pmu_disable_event(struct perf_event *event)
+{
+ unsigned long val, mask, evt;
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ switch (idx) {
+ case XSCALE_CYCLE_COUNTER:
+ mask = XSCALE1_CCOUNT_INT_EN;
+ evt = 0;
+ break;
+ case XSCALE_COUNTER0:
+ mask = XSCALE1_COUNT0_INT_EN | XSCALE1_COUNT0_EVT_MASK;
+ evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT0_EVT_SHFT;
+ break;
+ case XSCALE_COUNTER1:
+ mask = XSCALE1_COUNT1_INT_EN | XSCALE1_COUNT1_EVT_MASK;
+ evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT1_EVT_SHFT;
+ break;
+ default:
+ WARN_ONCE(1, "invalid counter number (%d)\n", idx);
+ return;
+ }
+
+ val = xscale1pmu_read_pmnc();
+ val &= ~mask;
+ val |= evt;
+ xscale1pmu_write_pmnc(val);
+}
+
+static int
+xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc,
+ struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ if (XSCALE_PERFCTR_CCNT == hwc->config_base) {
+ if (test_and_set_bit(XSCALE_CYCLE_COUNTER, cpuc->used_mask))
+ return -EAGAIN;
+
+ return XSCALE_CYCLE_COUNTER;
+ } else {
+ if (!test_and_set_bit(XSCALE_COUNTER1, cpuc->used_mask))
+ return XSCALE_COUNTER1;
+
+ if (!test_and_set_bit(XSCALE_COUNTER0, cpuc->used_mask))
+ return XSCALE_COUNTER0;
+
+ return -EAGAIN;
+ }
+}
+
+static void xscalepmu_clear_event_idx(struct pmu_hw_events *cpuc,
+ struct perf_event *event)
+{
+ clear_bit(event->hw.idx, cpuc->used_mask);
+}
+
+static void xscale1pmu_start(struct arm_pmu *cpu_pmu)
+{
+ unsigned long val;
+
+ val = xscale1pmu_read_pmnc();
+ val |= XSCALE_PMU_ENABLE;
+ xscale1pmu_write_pmnc(val);
+}
+
+static void xscale1pmu_stop(struct arm_pmu *cpu_pmu)
+{
+ unsigned long val;
+
+ val = xscale1pmu_read_pmnc();
+ val &= ~XSCALE_PMU_ENABLE;
+ xscale1pmu_write_pmnc(val);
+}
+
+static inline u64 xscale1pmu_read_counter(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ int counter = hwc->idx;
+ u32 val = 0;
+
+ switch (counter) {
+ case XSCALE_CYCLE_COUNTER:
+ asm volatile("mrc p14, 0, %0, c1, c0, 0" : "=r" (val));
+ break;
+ case XSCALE_COUNTER0:
+ asm volatile("mrc p14, 0, %0, c2, c0, 0" : "=r" (val));
+ break;
+ case XSCALE_COUNTER1:
+ asm volatile("mrc p14, 0, %0, c3, c0, 0" : "=r" (val));
+ break;
+ }
+
+ return val;
+}
+
+static inline void xscale1pmu_write_counter(struct perf_event *event, u64 val)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ int counter = hwc->idx;
+
+ switch (counter) {
+ case XSCALE_CYCLE_COUNTER:
+ asm volatile("mcr p14, 0, %0, c1, c0, 0" : : "r" (val));
+ break;
+ case XSCALE_COUNTER0:
+ asm volatile("mcr p14, 0, %0, c2, c0, 0" : : "r" (val));
+ break;
+ case XSCALE_COUNTER1:
+ asm volatile("mcr p14, 0, %0, c3, c0, 0" : : "r" (val));
+ break;
+ }
+}
+
+static int xscale_map_event(struct perf_event *event)
+{
+ return armpmu_map_event(event, &xscale_perf_map,
+ &xscale_perf_cache_map, 0xFF);
+}
+
+static int xscale1pmu_init(struct arm_pmu *cpu_pmu)
+{
+ cpu_pmu->name = "armv5_xscale1";
+ cpu_pmu->handle_irq = xscale1pmu_handle_irq;
+ cpu_pmu->enable = xscale1pmu_enable_event;
+ cpu_pmu->disable = xscale1pmu_disable_event;
+ cpu_pmu->read_counter = xscale1pmu_read_counter;
+ cpu_pmu->write_counter = xscale1pmu_write_counter;
+ cpu_pmu->get_event_idx = xscale1pmu_get_event_idx;
+ cpu_pmu->clear_event_idx = xscalepmu_clear_event_idx;
+ cpu_pmu->start = xscale1pmu_start;
+ cpu_pmu->stop = xscale1pmu_stop;
+ cpu_pmu->map_event = xscale_map_event;
+
+ bitmap_set(cpu_pmu->cntr_mask, 0, XSCALE1_NUM_COUNTERS);
+
+ return 0;
+}
+
+#define XSCALE2_OVERFLOWED_MASK 0x01f
+#define XSCALE2_CCOUNT_OVERFLOW 0x001
+#define XSCALE2_COUNT0_OVERFLOW 0x002
+#define XSCALE2_COUNT1_OVERFLOW 0x004
+#define XSCALE2_COUNT2_OVERFLOW 0x008
+#define XSCALE2_COUNT3_OVERFLOW 0x010
+#define XSCALE2_CCOUNT_INT_EN 0x001
+#define XSCALE2_COUNT0_INT_EN 0x002
+#define XSCALE2_COUNT1_INT_EN 0x004
+#define XSCALE2_COUNT2_INT_EN 0x008
+#define XSCALE2_COUNT3_INT_EN 0x010
+#define XSCALE2_COUNT0_EVT_SHFT 0
+#define XSCALE2_COUNT0_EVT_MASK (0xff << XSCALE2_COUNT0_EVT_SHFT)
+#define XSCALE2_COUNT1_EVT_SHFT 8
+#define XSCALE2_COUNT1_EVT_MASK (0xff << XSCALE2_COUNT1_EVT_SHFT)
+#define XSCALE2_COUNT2_EVT_SHFT 16
+#define XSCALE2_COUNT2_EVT_MASK (0xff << XSCALE2_COUNT2_EVT_SHFT)
+#define XSCALE2_COUNT3_EVT_SHFT 24
+#define XSCALE2_COUNT3_EVT_MASK (0xff << XSCALE2_COUNT3_EVT_SHFT)
+
+static inline u32
+xscale2pmu_read_pmnc(void)
+{
+ u32 val;
+ asm volatile("mrc p14, 0, %0, c0, c1, 0" : "=r" (val));
+ /* bits 1-2 and 4-23 are read-unpredictable */
+ return val & 0xff000009;
+}
+
+static inline void
+xscale2pmu_write_pmnc(u32 val)
+{
+ /* bits 4-23 are write-as-0, 24-31 are write ignored */
+ val &= 0xf;
+ asm volatile("mcr p14, 0, %0, c0, c1, 0" : : "r" (val));
+}
+
+static inline u32
+xscale2pmu_read_overflow_flags(void)
+{
+ u32 val;
+ asm volatile("mrc p14, 0, %0, c5, c1, 0" : "=r" (val));
+ return val;
+}
+
+static inline void
+xscale2pmu_write_overflow_flags(u32 val)
+{
+ asm volatile("mcr p14, 0, %0, c5, c1, 0" : : "r" (val));
+}
+
+static inline u32
+xscale2pmu_read_event_select(void)
+{
+ u32 val;
+ asm volatile("mrc p14, 0, %0, c8, c1, 0" : "=r" (val));
+ return val;
+}
+
+static inline void
+xscale2pmu_write_event_select(u32 val)
+{
+ asm volatile("mcr p14, 0, %0, c8, c1, 0" : : "r"(val));
+}
+
+static inline u32
+xscale2pmu_read_int_enable(void)
+{
+ u32 val;
+ asm volatile("mrc p14, 0, %0, c4, c1, 0" : "=r" (val));
+ return val;
+}
+
+static void
+xscale2pmu_write_int_enable(u32 val)
+{
+ asm volatile("mcr p14, 0, %0, c4, c1, 0" : : "r" (val));
+}
+
+static inline int
+xscale2_pmnc_counter_has_overflowed(unsigned long of_flags,
+ enum xscale_counters counter)
+{
+ int ret = 0;
+
+ switch (counter) {
+ case XSCALE_CYCLE_COUNTER:
+ ret = of_flags & XSCALE2_CCOUNT_OVERFLOW;
+ break;
+ case XSCALE_COUNTER0:
+ ret = of_flags & XSCALE2_COUNT0_OVERFLOW;
+ break;
+ case XSCALE_COUNTER1:
+ ret = of_flags & XSCALE2_COUNT1_OVERFLOW;
+ break;
+ case XSCALE_COUNTER2:
+ ret = of_flags & XSCALE2_COUNT2_OVERFLOW;
+ break;
+ case XSCALE_COUNTER3:
+ ret = of_flags & XSCALE2_COUNT3_OVERFLOW;
+ break;
+ default:
+ WARN_ONCE(1, "invalid counter number (%d)\n", counter);
+ }
+
+ return ret;
+}
+
+static irqreturn_t
+xscale2pmu_handle_irq(struct arm_pmu *cpu_pmu)
+{
+ unsigned long pmnc, of_flags;
+ struct perf_sample_data data;
+ struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
+ struct pt_regs *regs;
+ int idx;
+
+ /* Disable the PMU. */
+ pmnc = xscale2pmu_read_pmnc();
+ xscale2pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE);
+
+ /* Check the overflow flag register. */
+ of_flags = xscale2pmu_read_overflow_flags();
+ if (!(of_flags & XSCALE2_OVERFLOWED_MASK))
+ return IRQ_NONE;
+
+ /* Clear the overflow bits. */
+ xscale2pmu_write_overflow_flags(of_flags);
+
+ regs = get_irq_regs();
+
+ for_each_set_bit(idx, cpu_pmu->cntr_mask, XSCALE2_NUM_COUNTERS) {
+ struct perf_event *event = cpuc->events[idx];
+ struct hw_perf_event *hwc;
+
+ if (!event)
+ continue;
+
+ if (!xscale2_pmnc_counter_has_overflowed(of_flags, idx))
+ continue;
+
+ hwc = &event->hw;
+ armpmu_event_update(event);
+ perf_sample_data_init(&data, 0, hwc->last_period);
+ if (!armpmu_event_set_period(event))
+ continue;
+
+ perf_event_overflow(event, &data, regs);
+ }
+
+ irq_work_run();
+
+ /*
+ * Re-enable the PMU.
+ */
+ pmnc = xscale2pmu_read_pmnc() | XSCALE_PMU_ENABLE;
+ xscale2pmu_write_pmnc(pmnc);
+
+ return IRQ_HANDLED;
+}
+
+static void xscale2pmu_enable_event(struct perf_event *event)
+{
+ unsigned long ien, evtsel;
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ ien = xscale2pmu_read_int_enable();
+ evtsel = xscale2pmu_read_event_select();
+
+ switch (idx) {
+ case XSCALE_CYCLE_COUNTER:
+ ien |= XSCALE2_CCOUNT_INT_EN;
+ break;
+ case XSCALE_COUNTER0:
+ ien |= XSCALE2_COUNT0_INT_EN;
+ evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
+ evtsel |= hwc->config_base << XSCALE2_COUNT0_EVT_SHFT;
+ break;
+ case XSCALE_COUNTER1:
+ ien |= XSCALE2_COUNT1_INT_EN;
+ evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
+ evtsel |= hwc->config_base << XSCALE2_COUNT1_EVT_SHFT;
+ break;
+ case XSCALE_COUNTER2:
+ ien |= XSCALE2_COUNT2_INT_EN;
+ evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
+ evtsel |= hwc->config_base << XSCALE2_COUNT2_EVT_SHFT;
+ break;
+ case XSCALE_COUNTER3:
+ ien |= XSCALE2_COUNT3_INT_EN;
+ evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
+ evtsel |= hwc->config_base << XSCALE2_COUNT3_EVT_SHFT;
+ break;
+ default:
+ WARN_ONCE(1, "invalid counter number (%d)\n", idx);
+ return;
+ }
+
+ xscale2pmu_write_event_select(evtsel);
+ xscale2pmu_write_int_enable(ien);
+}
+
+static void xscale2pmu_disable_event(struct perf_event *event)
+{
+ unsigned long ien, evtsel, of_flags;
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ ien = xscale2pmu_read_int_enable();
+ evtsel = xscale2pmu_read_event_select();
+
+ switch (idx) {
+ case XSCALE_CYCLE_COUNTER:
+ ien &= ~XSCALE2_CCOUNT_INT_EN;
+ of_flags = XSCALE2_CCOUNT_OVERFLOW;
+ break;
+ case XSCALE_COUNTER0:
+ ien &= ~XSCALE2_COUNT0_INT_EN;
+ evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
+ evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT;
+ of_flags = XSCALE2_COUNT0_OVERFLOW;
+ break;
+ case XSCALE_COUNTER1:
+ ien &= ~XSCALE2_COUNT1_INT_EN;
+ evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
+ evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT;
+ of_flags = XSCALE2_COUNT1_OVERFLOW;
+ break;
+ case XSCALE_COUNTER2:
+ ien &= ~XSCALE2_COUNT2_INT_EN;
+ evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
+ evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT;
+ of_flags = XSCALE2_COUNT2_OVERFLOW;
+ break;
+ case XSCALE_COUNTER3:
+ ien &= ~XSCALE2_COUNT3_INT_EN;
+ evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
+ evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT;
+ of_flags = XSCALE2_COUNT3_OVERFLOW;
+ break;
+ default:
+ WARN_ONCE(1, "invalid counter number (%d)\n", idx);
+ return;
+ }
+
+ xscale2pmu_write_event_select(evtsel);
+ xscale2pmu_write_int_enable(ien);
+ xscale2pmu_write_overflow_flags(of_flags);
+}
+
+static int
+xscale2pmu_get_event_idx(struct pmu_hw_events *cpuc,
+ struct perf_event *event)
+{
+ int idx = xscale1pmu_get_event_idx(cpuc, event);
+ if (idx >= 0)
+ goto out;
+
+ if (!test_and_set_bit(XSCALE_COUNTER3, cpuc->used_mask))
+ idx = XSCALE_COUNTER3;
+ else if (!test_and_set_bit(XSCALE_COUNTER2, cpuc->used_mask))
+ idx = XSCALE_COUNTER2;
+out:
+ return idx;
+}
+
+static void xscale2pmu_start(struct arm_pmu *cpu_pmu)
+{
+ unsigned long val;
+
+ val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64;
+ val |= XSCALE_PMU_ENABLE;
+ xscale2pmu_write_pmnc(val);
+}
+
+static void xscale2pmu_stop(struct arm_pmu *cpu_pmu)
+{
+ unsigned long val;
+
+ val = xscale2pmu_read_pmnc();
+ val &= ~XSCALE_PMU_ENABLE;
+ xscale2pmu_write_pmnc(val);
+}
+
+static inline u64 xscale2pmu_read_counter(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ int counter = hwc->idx;
+ u32 val = 0;
+
+ switch (counter) {
+ case XSCALE_CYCLE_COUNTER:
+ asm volatile("mrc p14, 0, %0, c1, c1, 0" : "=r" (val));
+ break;
+ case XSCALE_COUNTER0:
+ asm volatile("mrc p14, 0, %0, c0, c2, 0" : "=r" (val));
+ break;
+ case XSCALE_COUNTER1:
+ asm volatile("mrc p14, 0, %0, c1, c2, 0" : "=r" (val));
+ break;
+ case XSCALE_COUNTER2:
+ asm volatile("mrc p14, 0, %0, c2, c2, 0" : "=r" (val));
+ break;
+ case XSCALE_COUNTER3:
+ asm volatile("mrc p14, 0, %0, c3, c2, 0" : "=r" (val));
+ break;
+ }
+
+ return val;
+}
+
+static inline void xscale2pmu_write_counter(struct perf_event *event, u64 val)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ int counter = hwc->idx;
+
+ switch (counter) {
+ case XSCALE_CYCLE_COUNTER:
+ asm volatile("mcr p14, 0, %0, c1, c1, 0" : : "r" (val));
+ break;
+ case XSCALE_COUNTER0:
+ asm volatile("mcr p14, 0, %0, c0, c2, 0" : : "r" (val));
+ break;
+ case XSCALE_COUNTER1:
+ asm volatile("mcr p14, 0, %0, c1, c2, 0" : : "r" (val));
+ break;
+ case XSCALE_COUNTER2:
+ asm volatile("mcr p14, 0, %0, c2, c2, 0" : : "r" (val));
+ break;
+ case XSCALE_COUNTER3:
+ asm volatile("mcr p14, 0, %0, c3, c2, 0" : : "r" (val));
+ break;
+ }
+}
+
+static int xscale2pmu_init(struct arm_pmu *cpu_pmu)
+{
+ cpu_pmu->name = "armv5_xscale2";
+ cpu_pmu->handle_irq = xscale2pmu_handle_irq;
+ cpu_pmu->enable = xscale2pmu_enable_event;
+ cpu_pmu->disable = xscale2pmu_disable_event;
+ cpu_pmu->read_counter = xscale2pmu_read_counter;
+ cpu_pmu->write_counter = xscale2pmu_write_counter;
+ cpu_pmu->get_event_idx = xscale2pmu_get_event_idx;
+ cpu_pmu->clear_event_idx = xscalepmu_clear_event_idx;
+ cpu_pmu->start = xscale2pmu_start;
+ cpu_pmu->stop = xscale2pmu_stop;
+ cpu_pmu->map_event = xscale_map_event;
+
+ bitmap_set(cpu_pmu->cntr_mask, 0, XSCALE2_NUM_COUNTERS);
+
+ return 0;
+}
+
+static const struct pmu_probe_info xscale_pmu_probe_table[] = {
+ XSCALE_PMU_PROBE(ARM_CPU_XSCALE_ARCH_V1, xscale1pmu_init),
+ XSCALE_PMU_PROBE(ARM_CPU_XSCALE_ARCH_V2, xscale2pmu_init),
+ { /* sentinel value */ }
+};
+
+static int xscale_pmu_device_probe(struct platform_device *pdev)
+{
+ return arm_pmu_device_probe(pdev, NULL, xscale_pmu_probe_table);
+}
+
+static struct platform_driver xscale_pmu_driver = {
+ .driver = {
+ .name = "xscale-pmu",
+ },
+ .probe = xscale_pmu_device_probe,
+};
+
+builtin_platform_driver(xscale_pmu_driver);
diff --git a/drivers/perf/cxl_pmu.c b/drivers/perf/cxl_pmu.c
index 0a8f597e695b..d094030220bf 100644
--- a/drivers/perf/cxl_pmu.c
+++ b/drivers/perf/cxl_pmu.c
@@ -25,7 +25,7 @@
#include "../cxl/pmu.h"
#define CXL_PMU_CAP_REG 0x0
-#define CXL_PMU_CAP_NUM_COUNTERS_MSK GENMASK_ULL(4, 0)
+#define CXL_PMU_CAP_NUM_COUNTERS_MSK GENMASK_ULL(5, 0)
#define CXL_PMU_CAP_COUNTER_WIDTH_MSK GENMASK_ULL(15, 8)
#define CXL_PMU_CAP_NUM_EVN_CAP_REG_SUP_MSK GENMASK_ULL(24, 20)
#define CXL_PMU_CAP_FILTERS_SUP_MSK GENMASK_ULL(39, 32)
@@ -59,7 +59,7 @@
#define CXL_PMU_COUNTER_CFG_EVENT_GRP_ID_IDX_MSK GENMASK_ULL(63, 59)
#define CXL_PMU_FILTER_CFG_REG(n, f) (0x400 + 4 * ((f) + (n) * 8))
-#define CXL_PMU_FILTER_CFG_VALUE_MSK GENMASK(15, 0)
+#define CXL_PMU_FILTER_CFG_VALUE_MSK GENMASK(31, 0)
#define CXL_PMU_COUNTER_REG(n) (0xc00 + 8 * (n))
@@ -113,7 +113,7 @@ struct cxl_pmu_info {
/*
* All CPMU counters are discoverable via the Event Capabilities Registers.
- * Each Event Capability register contains a a VID / GroupID.
+ * Each Event Capability register contains a VID / GroupID.
* A counter may then count any combination (by summing) of events in
* that group which are in the Supported Events Bitmask.
* However, there are some complexities to the scheme.
@@ -208,21 +208,10 @@ static int cxl_pmu_parse_caps(struct device *dev, struct cxl_pmu_info *info)
return 0;
}
-static ssize_t cxl_pmu_format_sysfs_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct dev_ext_attribute *eattr;
-
- eattr = container_of(attr, struct dev_ext_attribute, attr);
-
- return sysfs_emit(buf, "%s\n", (char *)eattr->var);
-}
-
#define CXL_PMU_FORMAT_ATTR(_name, _format)\
(&((struct dev_ext_attribute[]) { \
{ \
- .attr = __ATTR(_name, 0444, \
- cxl_pmu_format_sysfs_show, NULL), \
+ .attr = __ATTR(_name, 0444, device_show_string, NULL), \
.var = (void *)_format \
} \
})[0].attr.attr)
@@ -314,9 +303,9 @@ static bool cxl_pmu_config1_get_edge(struct perf_event *event)
}
/*
- * CPMU specification allows for 8 filters, each with a 16 bit value...
- * So we need to find 8x16bits to store it in.
- * As the value used for disable is 0xffff, a separate enable switch
+ * CPMU specification allows for 8 filters, each with a 32 bit value...
+ * So we need to find 8x32bits to store it in.
+ * As the value used for disable is 0xffff_ffff, a separate enable switch
* is needed.
*/
@@ -345,7 +334,7 @@ static ssize_t cxl_pmu_event_sysfs_show(struct device *dev,
/* For CXL spec defined events */
#define CXL_PMU_EVENT_CXL_ATTR(_name, _gid, _msk) \
- CXL_PMU_EVENT_ATTR(_name, PCI_DVSEC_VENDOR_ID_CXL, _gid, _msk)
+ CXL_PMU_EVENT_ATTR(_name, PCI_VENDOR_ID_CXL, _gid, _msk)
static struct attribute *cxl_pmu_event_attrs[] = {
CXL_PMU_EVENT_CXL_ATTR(clock_ticks, CXL_PMU_GID_CLOCK_TICKS, BIT(0)),
@@ -365,7 +354,7 @@ static struct attribute *cxl_pmu_event_attrs[] = {
CXL_PMU_EVENT_CXL_ATTR(d2h_req_wowrinvf, CXL_PMU_GID_D2H_REQ, BIT(13)),
CXL_PMU_EVENT_CXL_ATTR(d2h_req_wrinv, CXL_PMU_GID_D2H_REQ, BIT(14)),
CXL_PMU_EVENT_CXL_ATTR(d2h_req_cacheflushed, CXL_PMU_GID_D2H_REQ, BIT(16)),
- /* CXL rev 3.0 Table 3-20 - D2H Repsonse Encodings */
+ /* CXL rev 3.0 Table 3-20 - D2H Response Encodings */
CXL_PMU_EVENT_CXL_ATTR(d2h_rsp_rspihiti, CXL_PMU_GID_D2H_RSP, BIT(4)),
CXL_PMU_EVENT_CXL_ATTR(d2h_rsp_rspvhitv, CXL_PMU_GID_D2H_RSP, BIT(6)),
CXL_PMU_EVENT_CXL_ATTR(d2h_rsp_rspihitse, CXL_PMU_GID_D2H_RSP, BIT(5)),
@@ -388,12 +377,14 @@ static struct attribute *cxl_pmu_event_attrs[] = {
/* CXL rev 3.0 Table 13-5 directly lists these */
CXL_PMU_EVENT_CXL_ATTR(cachedata_d2h_data, CXL_PMU_GID_CACHE_DATA, BIT(0)),
CXL_PMU_EVENT_CXL_ATTR(cachedata_h2d_data, CXL_PMU_GID_CACHE_DATA, BIT(1)),
- /* CXL rev 3.0 Table 3-29 M2S Req Memory Opcodes */
+ /* CXL rev 3.1 Table 3-35 M2S Req Memory Opcodes */
CXL_PMU_EVENT_CXL_ATTR(m2s_req_meminv, CXL_PMU_GID_M2S_REQ, BIT(0)),
CXL_PMU_EVENT_CXL_ATTR(m2s_req_memrd, CXL_PMU_GID_M2S_REQ, BIT(1)),
CXL_PMU_EVENT_CXL_ATTR(m2s_req_memrddata, CXL_PMU_GID_M2S_REQ, BIT(2)),
CXL_PMU_EVENT_CXL_ATTR(m2s_req_memrdfwd, CXL_PMU_GID_M2S_REQ, BIT(3)),
CXL_PMU_EVENT_CXL_ATTR(m2s_req_memwrfwd, CXL_PMU_GID_M2S_REQ, BIT(4)),
+ CXL_PMU_EVENT_CXL_ATTR(m2s_req_memrdtee, CXL_PMU_GID_M2S_REQ, BIT(5)),
+ CXL_PMU_EVENT_CXL_ATTR(m2s_req_memrddatatee, CXL_PMU_GID_M2S_REQ, BIT(6)),
CXL_PMU_EVENT_CXL_ATTR(m2s_req_memspecrd, CXL_PMU_GID_M2S_REQ, BIT(8)),
CXL_PMU_EVENT_CXL_ATTR(m2s_req_meminvnt, CXL_PMU_GID_M2S_REQ, BIT(9)),
CXL_PMU_EVENT_CXL_ATTR(m2s_req_memcleanevict, CXL_PMU_GID_M2S_REQ, BIT(10)),
@@ -415,11 +406,12 @@ static struct attribute *cxl_pmu_event_attrs[] = {
CXL_PMU_EVENT_CXL_ATTR(s2m_bisnp_curblk, CXL_PMU_GID_S2M_BISNP, BIT(4)),
CXL_PMU_EVENT_CXL_ATTR(s2m_bisnp_datblk, CXL_PMU_GID_S2M_BISNP, BIT(5)),
CXL_PMU_EVENT_CXL_ATTR(s2m_bisnp_invblk, CXL_PMU_GID_S2M_BISNP, BIT(6)),
- /* CXL rev 3.0 Table 3-43 S2M NDR Opcopdes */
+ /* CXL rev 3.1 Table 3-50 S2M NDR Opcodes */
CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_cmp, CXL_PMU_GID_S2M_NDR, BIT(0)),
CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_cmps, CXL_PMU_GID_S2M_NDR, BIT(1)),
CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_cmpe, CXL_PMU_GID_S2M_NDR, BIT(2)),
- CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_biconflictack, CXL_PMU_GID_S2M_NDR, BIT(3)),
+ CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_cmpm, CXL_PMU_GID_S2M_NDR, BIT(3)),
+ CXL_PMU_EVENT_CXL_ATTR(s2m_ndr_biconflictack, CXL_PMU_GID_S2M_NDR, BIT(4)),
/* CXL rev 3.0 Table 3-46 S2M DRS opcodes */
CXL_PMU_EVENT_CXL_ATTR(s2m_drs_memdata, CXL_PMU_GID_S2M_DRS, BIT(0)),
CXL_PMU_EVENT_CXL_ATTR(s2m_drs_memdatanxm, CXL_PMU_GID_S2M_DRS, BIT(1)),
@@ -635,14 +627,14 @@ static void cxl_pmu_event_start(struct perf_event *event, int flags)
hwc->state = 0;
/*
- * Currently only hdm filter control is implemnted, this code will
+ * Currently only hdm filter control is implemented, this code will
* want generalizing when more filters are added.
*/
if (info->filter_hdm) {
if (cxl_pmu_config1_hdm_filter_en(event))
cfg = cxl_pmu_config2_get_hdm_decoder(event);
else
- cfg = GENMASK(15, 0); /* No filtering if 0xFFFF_FFFF */
+ cfg = GENMASK(31, 0); /* No filtering if 0xFFFF_FFFF */
writeq(cfg, base + CXL_PMU_FILTER_CFG_REG(hwc->idx, 0));
}
@@ -842,8 +834,8 @@ static int cxl_pmu_probe(struct device *dev)
if (rc)
return rc;
- info->hw_events = devm_kcalloc(dev, sizeof(*info->hw_events),
- info->num_counters, GFP_KERNEL);
+ info->hw_events = devm_kcalloc(dev, info->num_counters,
+ sizeof(*info->hw_events), GFP_KERNEL);
if (!info->hw_events)
return -ENOMEM;
@@ -881,7 +873,7 @@ static int cxl_pmu_probe(struct device *dev)
return rc;
irq = rc;
- irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_overflow\n", dev_name);
+ irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_overflow", dev_name);
if (!irq_name)
return -ENOMEM;
@@ -983,8 +975,9 @@ static __exit void cxl_pmu_exit(void)
cpuhp_remove_multi_state(cxl_pmu_cpuhp_state_num);
}
+MODULE_DESCRIPTION("CXL Performance Monitor Driver");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS(CXL);
+MODULE_IMPORT_NS("CXL");
module_init(cxl_pmu_init);
module_exit(cxl_pmu_exit);
MODULE_ALIAS_CXL(CXL_DEVICE_PMU);
diff --git a/drivers/perf/dwc_pcie_pmu.c b/drivers/perf/dwc_pcie_pmu.c
new file mode 100644
index 000000000000..22f73ac894e9
--- /dev/null
+++ b/drivers/perf/dwc_pcie_pmu.c
@@ -0,0 +1,889 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Synopsys DesignWare PCIe PMU driver
+ *
+ * Copyright (C) 2021-2023 Alibaba Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/cpuhotplug.h>
+#include <linux/cpumask.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/pcie-dwc.h>
+#include <linux/perf_event.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/smp.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+
+#define DWC_PCIE_EVENT_CNT_CTL 0x8
+
+/*
+ * Event Counter Data Select includes two parts:
+ * - 27-24: Group number(4-bit: 0..0x7)
+ * - 23-16: Event number(8-bit: 0..0x13) within the Group
+ *
+ * Put them together as in TRM.
+ */
+#define DWC_PCIE_CNT_EVENT_SEL GENMASK(27, 16)
+#define DWC_PCIE_CNT_LANE_SEL GENMASK(11, 8)
+#define DWC_PCIE_CNT_STATUS BIT(7)
+#define DWC_PCIE_CNT_ENABLE GENMASK(4, 2)
+#define DWC_PCIE_PER_EVENT_OFF 0x1
+#define DWC_PCIE_PER_EVENT_ON 0x3
+#define DWC_PCIE_EVENT_CLEAR GENMASK(1, 0)
+#define DWC_PCIE_EVENT_PER_CLEAR 0x1
+
+/* Event Selection Field has two subfields */
+#define DWC_PCIE_CNT_EVENT_SEL_GROUP GENMASK(11, 8)
+#define DWC_PCIE_CNT_EVENT_SEL_EVID GENMASK(7, 0)
+
+#define DWC_PCIE_EVENT_CNT_DATA 0xC
+
+#define DWC_PCIE_TIME_BASED_ANAL_CTL 0x10
+#define DWC_PCIE_TIME_BASED_REPORT_SEL GENMASK(31, 24)
+#define DWC_PCIE_TIME_BASED_DURATION_SEL GENMASK(15, 8)
+#define DWC_PCIE_DURATION_MANUAL_CTL 0x0
+#define DWC_PCIE_DURATION_1MS 0x1
+#define DWC_PCIE_DURATION_10MS 0x2
+#define DWC_PCIE_DURATION_100MS 0x3
+#define DWC_PCIE_DURATION_1S 0x4
+#define DWC_PCIE_DURATION_2S 0x5
+#define DWC_PCIE_DURATION_4S 0x6
+#define DWC_PCIE_DURATION_4US 0xFF
+#define DWC_PCIE_TIME_BASED_TIMER_START BIT(0)
+#define DWC_PCIE_TIME_BASED_CNT_ENABLE 0x1
+
+#define DWC_PCIE_TIME_BASED_ANAL_DATA_REG_LOW 0x14
+#define DWC_PCIE_TIME_BASED_ANAL_DATA_REG_HIGH 0x18
+
+/* Event attributes */
+#define DWC_PCIE_CONFIG_EVENTID GENMASK(15, 0)
+#define DWC_PCIE_CONFIG_TYPE GENMASK(19, 16)
+#define DWC_PCIE_CONFIG_LANE GENMASK(27, 20)
+
+#define DWC_PCIE_EVENT_ID(event) FIELD_GET(DWC_PCIE_CONFIG_EVENTID, (event)->attr.config)
+#define DWC_PCIE_EVENT_TYPE(event) FIELD_GET(DWC_PCIE_CONFIG_TYPE, (event)->attr.config)
+#define DWC_PCIE_EVENT_LANE(event) FIELD_GET(DWC_PCIE_CONFIG_LANE, (event)->attr.config)
+
+enum dwc_pcie_event_type {
+ DWC_PCIE_TIME_BASE_EVENT,
+ DWC_PCIE_LANE_EVENT,
+ DWC_PCIE_EVENT_TYPE_MAX,
+};
+
+#define DWC_PCIE_LANE_GROUP_6 6
+#define DWC_PCIE_LANE_GROUP_7 7
+#define DWC_PCIE_LANE_MAX_EVENTS_PER_GROUP 256
+
+#define DWC_PCIE_LANE_EVENT_MAX_PERIOD GENMASK_ULL(31, 0)
+#define DWC_PCIE_MAX_PERIOD GENMASK_ULL(63, 0)
+
+struct dwc_pcie_pmu {
+ struct pmu pmu;
+ struct pci_dev *pdev; /* Root Port device */
+ u16 ras_des_offset;
+ u32 nr_lanes;
+
+ /* Groups #6 and #7 */
+ DECLARE_BITMAP(lane_events, 2 * DWC_PCIE_LANE_MAX_EVENTS_PER_GROUP);
+ struct perf_event *time_based_event;
+
+ struct hlist_node cpuhp_node;
+ int on_cpu;
+};
+
+#define to_dwc_pcie_pmu(p) (container_of(p, struct dwc_pcie_pmu, pmu))
+
+static int dwc_pcie_pmu_hp_state;
+static struct list_head dwc_pcie_dev_info_head =
+ LIST_HEAD_INIT(dwc_pcie_dev_info_head);
+static bool notify;
+
+struct dwc_pcie_dev_info {
+ struct platform_device *plat_dev;
+ struct pci_dev *pdev;
+ struct list_head dev_node;
+};
+
+static ssize_t cpumask_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(dev_get_drvdata(dev));
+
+ return cpumap_print_to_pagebuf(true, buf, cpumask_of(pcie_pmu->on_cpu));
+}
+static DEVICE_ATTR_RO(cpumask);
+
+static struct attribute *dwc_pcie_pmu_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL
+};
+
+static struct attribute_group dwc_pcie_cpumask_attr_group = {
+ .attrs = dwc_pcie_pmu_cpumask_attrs,
+};
+
+struct dwc_pcie_format_attr {
+ struct device_attribute attr;
+ u64 field;
+ int config;
+};
+
+PMU_FORMAT_ATTR(eventid, "config:0-15");
+PMU_FORMAT_ATTR(type, "config:16-19");
+PMU_FORMAT_ATTR(lane, "config:20-27");
+
+static struct attribute *dwc_pcie_format_attrs[] = {
+ &format_attr_type.attr,
+ &format_attr_eventid.attr,
+ &format_attr_lane.attr,
+ NULL,
+};
+
+static struct attribute_group dwc_pcie_format_attrs_group = {
+ .name = "format",
+ .attrs = dwc_pcie_format_attrs,
+};
+
+struct dwc_pcie_event_attr {
+ struct device_attribute attr;
+ enum dwc_pcie_event_type type;
+ u16 eventid;
+ u8 lane;
+};
+
+static ssize_t dwc_pcie_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dwc_pcie_event_attr *eattr;
+
+ eattr = container_of(attr, typeof(*eattr), attr);
+
+ if (eattr->type == DWC_PCIE_LANE_EVENT)
+ return sysfs_emit(buf, "eventid=0x%x,type=0x%x,lane=?\n",
+ eattr->eventid, eattr->type);
+ else if (eattr->type == DWC_PCIE_TIME_BASE_EVENT)
+ return sysfs_emit(buf, "eventid=0x%x,type=0x%x\n",
+ eattr->eventid, eattr->type);
+
+ return 0;
+}
+
+#define DWC_PCIE_EVENT_ATTR(_name, _type, _eventid, _lane) \
+ (&((struct dwc_pcie_event_attr[]) {{ \
+ .attr = __ATTR(_name, 0444, dwc_pcie_event_show, NULL), \
+ .type = _type, \
+ .eventid = _eventid, \
+ .lane = _lane, \
+ }})[0].attr.attr)
+
+#define DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(_name, _eventid) \
+ DWC_PCIE_EVENT_ATTR(_name, DWC_PCIE_TIME_BASE_EVENT, _eventid, 0)
+#define DWC_PCIE_PMU_LANE_EVENT_ATTR(_name, _eventid) \
+ DWC_PCIE_EVENT_ATTR(_name, DWC_PCIE_LANE_EVENT, _eventid, 0)
+
+static struct attribute *dwc_pcie_pmu_time_event_attrs[] = {
+ /* Group #0 */
+ DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(one_cycle, 0x00),
+ DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(TX_L0S, 0x01),
+ DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(RX_L0S, 0x02),
+ DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L0, 0x03),
+ DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1, 0x04),
+ DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1_1, 0x05),
+ DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1_2, 0x06),
+ DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(CFG_RCVRY, 0x07),
+ DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1_AUX, 0x08),
+ DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(TX_RX_L0S, 0x09),
+
+ /* Group #1 */
+ DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(tx_pcie_tlp_data_payload, 0x20),
+ DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(rx_pcie_tlp_data_payload, 0x21),
+ DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(tx_ccix_tlp_data_payload, 0x22),
+ DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(rx_ccix_tlp_data_payload, 0x23),
+
+ /*
+ * Leave it to the user to specify the lane ID to avoid generating
+ * a list of hundreds of events.
+ */
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_ack_dllp, 0x600),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_update_fc_dllp, 0x601),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_ack_dllp, 0x602),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_update_fc_dllp, 0x603),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_nullified_tlp, 0x604),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_nullified_tlp, 0x605),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_duplicate_tlp, 0x606),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_memory_write, 0x700),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_memory_read, 0x701),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_configuration_write, 0x702),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_configuration_read, 0x703),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_io_write, 0x704),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_io_read, 0x705),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_completion_without_data, 0x706),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_completion_with_data, 0x707),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_message_tlp, 0x708),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_atomic, 0x709),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_tlp_with_prefix, 0x70A),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_memory_write, 0x70B),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_memory_read, 0x70C),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_io_write, 0x70F),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_io_read, 0x710),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_completion_without_data, 0x711),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_completion_with_data, 0x712),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_message_tlp, 0x713),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_atomic, 0x714),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_tlp_with_prefix, 0x715),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_ccix_tlp, 0x716),
+ DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_ccix_tlp, 0x717),
+ NULL
+};
+
+static const struct attribute_group dwc_pcie_event_attrs_group = {
+ .name = "events",
+ .attrs = dwc_pcie_pmu_time_event_attrs,
+};
+
+static const struct attribute_group *dwc_pcie_attr_groups[] = {
+ &dwc_pcie_event_attrs_group,
+ &dwc_pcie_format_attrs_group,
+ &dwc_pcie_cpumask_attr_group,
+ NULL
+};
+
+static void dwc_pcie_pmu_lane_event_enable(struct dwc_pcie_pmu *pcie_pmu,
+ struct perf_event *event,
+ bool enable)
+{
+ struct pci_dev *pdev = pcie_pmu->pdev;
+ u16 ras_des_offset = pcie_pmu->ras_des_offset;
+ int event_id = DWC_PCIE_EVENT_ID(event);
+ int lane = DWC_PCIE_EVENT_LANE(event);
+ u32 ctrl;
+
+ ctrl = FIELD_PREP(DWC_PCIE_CNT_EVENT_SEL, event_id) |
+ FIELD_PREP(DWC_PCIE_CNT_LANE_SEL, lane) |
+ FIELD_PREP(DWC_PCIE_EVENT_CLEAR, DWC_PCIE_EVENT_PER_CLEAR);
+
+ if (enable)
+ ctrl |= FIELD_PREP(DWC_PCIE_CNT_ENABLE, DWC_PCIE_PER_EVENT_ON);
+ else
+ ctrl |= FIELD_PREP(DWC_PCIE_CNT_ENABLE, DWC_PCIE_PER_EVENT_OFF);
+
+ pci_write_config_dword(pdev, ras_des_offset + DWC_PCIE_EVENT_CNT_CTL,
+ ctrl);
+}
+
+static void dwc_pcie_pmu_time_based_event_enable(struct dwc_pcie_pmu *pcie_pmu,
+ bool enable)
+{
+ struct pci_dev *pdev = pcie_pmu->pdev;
+ u16 ras_des_offset = pcie_pmu->ras_des_offset;
+
+ pci_clear_and_set_config_dword(pdev,
+ ras_des_offset + DWC_PCIE_TIME_BASED_ANAL_CTL,
+ DWC_PCIE_TIME_BASED_TIMER_START, enable);
+}
+
+static u64 dwc_pcie_pmu_read_lane_event_counter(struct perf_event *event)
+{
+ struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu);
+ struct pci_dev *pdev = pcie_pmu->pdev;
+ int event_id = DWC_PCIE_EVENT_ID(event);
+ int lane = DWC_PCIE_EVENT_LANE(event);
+ u16 ras_des_offset = pcie_pmu->ras_des_offset;
+ u32 val, ctrl;
+
+ ctrl = FIELD_PREP(DWC_PCIE_CNT_EVENT_SEL, event_id) |
+ FIELD_PREP(DWC_PCIE_CNT_LANE_SEL, lane) |
+ FIELD_PREP(DWC_PCIE_CNT_ENABLE, DWC_PCIE_PER_EVENT_ON);
+ pci_write_config_dword(pdev, ras_des_offset + DWC_PCIE_EVENT_CNT_CTL,
+ ctrl);
+ pci_read_config_dword(pdev, ras_des_offset + DWC_PCIE_EVENT_CNT_DATA, &val);
+
+ ctrl |= FIELD_PREP(DWC_PCIE_EVENT_CLEAR, DWC_PCIE_EVENT_PER_CLEAR);
+ pci_write_config_dword(pdev, ras_des_offset + DWC_PCIE_EVENT_CNT_CTL,
+ ctrl);
+
+ return val;
+}
+
+static u64 dwc_pcie_pmu_read_time_based_counter(struct perf_event *event)
+{
+ struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu);
+ struct pci_dev *pdev = pcie_pmu->pdev;
+ int event_id = DWC_PCIE_EVENT_ID(event);
+ u16 ras_des_offset = pcie_pmu->ras_des_offset;
+ u32 lo, hi, ss;
+ u64 val;
+
+ /*
+ * The 64-bit value of the data counter is spread across two
+ * registers that are not synchronized. In order to read them
+ * atomically, ensure that the high 32 bits match before and after
+ * reading the low 32 bits.
+ */
+ pci_read_config_dword(pdev,
+ ras_des_offset + DWC_PCIE_TIME_BASED_ANAL_DATA_REG_HIGH, &hi);
+ do {
+ /* snapshot the high 32 bits */
+ ss = hi;
+
+ pci_read_config_dword(
+ pdev, ras_des_offset + DWC_PCIE_TIME_BASED_ANAL_DATA_REG_LOW,
+ &lo);
+ pci_read_config_dword(
+ pdev, ras_des_offset + DWC_PCIE_TIME_BASED_ANAL_DATA_REG_HIGH,
+ &hi);
+ } while (hi != ss);
+
+ val = ((u64)hi << 32) | lo;
+ /*
+ * The Group#1 event measures the amount of data processed in 16-byte
+ * units. Simplify the end-user interface by multiplying the counter
+ * at the point of read.
+ */
+ if (event_id >= 0x20 && event_id <= 0x23)
+ val *= 16;
+
+ return val;
+}
+
+static void dwc_pcie_pmu_event_update(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event);
+ u64 delta, prev, now;
+
+ if (type == DWC_PCIE_LANE_EVENT) {
+ now = dwc_pcie_pmu_read_lane_event_counter(event) &
+ DWC_PCIE_LANE_EVENT_MAX_PERIOD;
+ local64_add(now, &event->count);
+ return;
+ }
+
+ do {
+ prev = local64_read(&hwc->prev_count);
+ now = dwc_pcie_pmu_read_time_based_counter(event);
+
+ } while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev);
+
+ delta = (now - prev) & DWC_PCIE_MAX_PERIOD;
+ local64_add(delta, &event->count);
+}
+
+static int dwc_pcie_pmu_validate_add_lane_event(struct perf_event *event,
+ unsigned long val_lane_events[])
+{
+ int event_id, event_nr, group;
+
+ event_id = DWC_PCIE_EVENT_ID(event);
+ event_nr = FIELD_GET(DWC_PCIE_CNT_EVENT_SEL_EVID, event_id);
+ group = FIELD_GET(DWC_PCIE_CNT_EVENT_SEL_GROUP, event_id);
+
+ if (group != DWC_PCIE_LANE_GROUP_6 && group != DWC_PCIE_LANE_GROUP_7)
+ return -EINVAL;
+
+ group -= DWC_PCIE_LANE_GROUP_6;
+
+ if (test_and_set_bit(group * DWC_PCIE_LANE_MAX_EVENTS_PER_GROUP + event_nr,
+ val_lane_events))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int dwc_pcie_pmu_validate_group(struct perf_event *event)
+{
+ struct perf_event *sibling, *leader = event->group_leader;
+ DECLARE_BITMAP(val_lane_events, 2 * DWC_PCIE_LANE_MAX_EVENTS_PER_GROUP);
+ bool time_event = false;
+ int type;
+
+ type = DWC_PCIE_EVENT_TYPE(leader);
+ if (type == DWC_PCIE_TIME_BASE_EVENT)
+ time_event = true;
+ else
+ if (dwc_pcie_pmu_validate_add_lane_event(leader, val_lane_events))
+ return -ENOSPC;
+
+ for_each_sibling_event(sibling, leader) {
+ type = DWC_PCIE_EVENT_TYPE(sibling);
+ if (type == DWC_PCIE_TIME_BASE_EVENT) {
+ if (time_event)
+ return -ENOSPC;
+
+ time_event = true;
+ continue;
+ }
+
+ if (dwc_pcie_pmu_validate_add_lane_event(sibling, val_lane_events))
+ return -ENOSPC;
+ }
+
+ return 0;
+}
+
+static int dwc_pcie_pmu_event_init(struct perf_event *event)
+{
+ struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu);
+ enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event);
+ struct perf_event *sibling;
+ u32 lane;
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ /* We don't support sampling */
+ if (is_sampling_event(event))
+ return -EINVAL;
+
+ /* We cannot support task bound events */
+ if (event->cpu < 0 || event->attach_state & PERF_ATTACH_TASK)
+ return -EINVAL;
+
+ for_each_sibling_event(sibling, event->group_leader) {
+ if (sibling->pmu != event->pmu && !is_software_event(sibling))
+ return -EINVAL;
+ }
+
+ if (type < 0 || type >= DWC_PCIE_EVENT_TYPE_MAX)
+ return -EINVAL;
+
+ if (type == DWC_PCIE_LANE_EVENT) {
+ lane = DWC_PCIE_EVENT_LANE(event);
+ if (lane < 0 || lane >= pcie_pmu->nr_lanes)
+ return -EINVAL;
+ }
+
+ if (dwc_pcie_pmu_validate_group(event))
+ return -ENOSPC;
+
+ event->cpu = pcie_pmu->on_cpu;
+
+ return 0;
+}
+
+static void dwc_pcie_pmu_event_start(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu);
+ enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event);
+
+ hwc->state = 0;
+ local64_set(&hwc->prev_count, 0);
+
+ if (type == DWC_PCIE_LANE_EVENT)
+ dwc_pcie_pmu_lane_event_enable(pcie_pmu, event, true);
+ else if (type == DWC_PCIE_TIME_BASE_EVENT)
+ dwc_pcie_pmu_time_based_event_enable(pcie_pmu, true);
+}
+
+static void dwc_pcie_pmu_event_stop(struct perf_event *event, int flags)
+{
+ struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu);
+ enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event);
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (event->hw.state & PERF_HES_STOPPED)
+ return;
+
+ dwc_pcie_pmu_event_update(event);
+
+ if (type == DWC_PCIE_LANE_EVENT)
+ dwc_pcie_pmu_lane_event_enable(pcie_pmu, event, false);
+ else if (type == DWC_PCIE_TIME_BASE_EVENT)
+ dwc_pcie_pmu_time_based_event_enable(pcie_pmu, false);
+
+ hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+}
+
+static int dwc_pcie_pmu_event_add(struct perf_event *event, int flags)
+{
+ struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu);
+ struct pci_dev *pdev = pcie_pmu->pdev;
+ struct hw_perf_event *hwc = &event->hw;
+ enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event);
+ int event_id = DWC_PCIE_EVENT_ID(event);
+ int lane = DWC_PCIE_EVENT_LANE(event);
+ u16 ras_des_offset = pcie_pmu->ras_des_offset;
+ u32 ctrl;
+
+ hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+
+ if (type == DWC_PCIE_LANE_EVENT) {
+ int event_nr = FIELD_GET(DWC_PCIE_CNT_EVENT_SEL_EVID, event_id);
+ int group = FIELD_GET(DWC_PCIE_CNT_EVENT_SEL_GROUP, event_id) -
+ DWC_PCIE_LANE_GROUP_6;
+
+ if (test_and_set_bit(group * DWC_PCIE_LANE_MAX_EVENTS_PER_GROUP + event_nr,
+ pcie_pmu->lane_events))
+ return -ENOSPC;
+
+ /* EVENT_COUNTER_DATA_REG needs clear manually */
+ ctrl = FIELD_PREP(DWC_PCIE_CNT_EVENT_SEL, event_id) |
+ FIELD_PREP(DWC_PCIE_CNT_LANE_SEL, lane) |
+ FIELD_PREP(DWC_PCIE_CNT_ENABLE, DWC_PCIE_PER_EVENT_OFF) |
+ FIELD_PREP(DWC_PCIE_EVENT_CLEAR, DWC_PCIE_EVENT_PER_CLEAR);
+ pci_write_config_dword(pdev, ras_des_offset + DWC_PCIE_EVENT_CNT_CTL,
+ ctrl);
+ } else if (type == DWC_PCIE_TIME_BASE_EVENT) {
+ if (pcie_pmu->time_based_event)
+ return -ENOSPC;
+
+ pcie_pmu->time_based_event = event;
+
+ /*
+ * TIME_BASED_ANAL_DATA_REG is a 64 bit register, we can safely
+ * use it with any manually controlled duration. And it is
+ * cleared when next measurement starts.
+ */
+ ctrl = FIELD_PREP(DWC_PCIE_TIME_BASED_REPORT_SEL, event_id) |
+ FIELD_PREP(DWC_PCIE_TIME_BASED_DURATION_SEL,
+ DWC_PCIE_DURATION_MANUAL_CTL) |
+ DWC_PCIE_TIME_BASED_CNT_ENABLE;
+ pci_write_config_dword(
+ pdev, ras_des_offset + DWC_PCIE_TIME_BASED_ANAL_CTL, ctrl);
+ }
+
+ if (flags & PERF_EF_START)
+ dwc_pcie_pmu_event_start(event, PERF_EF_RELOAD);
+
+ perf_event_update_userpage(event);
+
+ return 0;
+}
+
+static void dwc_pcie_pmu_event_del(struct perf_event *event, int flags)
+{
+ struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu);
+ enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event);
+
+ dwc_pcie_pmu_event_stop(event, flags | PERF_EF_UPDATE);
+ perf_event_update_userpage(event);
+
+ if (type == DWC_PCIE_TIME_BASE_EVENT) {
+ pcie_pmu->time_based_event = NULL;
+ } else {
+ int event_id = DWC_PCIE_EVENT_ID(event);
+ int event_nr = FIELD_GET(DWC_PCIE_CNT_EVENT_SEL_EVID, event_id);
+ int group = FIELD_GET(DWC_PCIE_CNT_EVENT_SEL_GROUP, event_id) -
+ DWC_PCIE_LANE_GROUP_6;
+
+ clear_bit(group * DWC_PCIE_LANE_MAX_EVENTS_PER_GROUP + event_nr,
+ pcie_pmu->lane_events);
+ }
+}
+
+static void dwc_pcie_pmu_remove_cpuhp_instance(void *hotplug_node)
+{
+ cpuhp_state_remove_instance_nocalls(dwc_pcie_pmu_hp_state, hotplug_node);
+}
+
+/*
+ * Find the binded DES capability device info of a PCI device.
+ * @pdev: The PCI device.
+ */
+static struct dwc_pcie_dev_info *dwc_pcie_find_dev_info(struct pci_dev *pdev)
+{
+ struct dwc_pcie_dev_info *dev_info;
+
+ list_for_each_entry(dev_info, &dwc_pcie_dev_info_head, dev_node)
+ if (dev_info->pdev == pdev)
+ return dev_info;
+
+ return NULL;
+}
+
+static void dwc_pcie_unregister_pmu(void *data)
+{
+ struct dwc_pcie_pmu *pcie_pmu = data;
+
+ perf_pmu_unregister(&pcie_pmu->pmu);
+}
+
+static u16 dwc_pcie_des_cap(struct pci_dev *pdev)
+{
+ const struct dwc_pcie_vsec_id *vid;
+ u16 vsec;
+ u32 val;
+
+ if (!pci_is_pcie(pdev) || !(pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT))
+ return 0;
+
+ for (vid = dwc_pcie_rasdes_vsec_ids; vid->vendor_id; vid++) {
+ vsec = pci_find_vsec_capability(pdev, vid->vendor_id,
+ vid->vsec_id);
+ if (vsec) {
+ pci_read_config_dword(pdev, vsec + PCI_VNDR_HEADER,
+ &val);
+ if (PCI_VNDR_HEADER_REV(val) == vid->vsec_rev) {
+ pci_dbg(pdev, "Detected PCIe Vendor-Specific Extended Capability RAS DES\n");
+ return vsec;
+ }
+ }
+ }
+ return 0;
+}
+
+static void dwc_pcie_unregister_dev(struct dwc_pcie_dev_info *dev_info)
+{
+ platform_device_unregister(dev_info->plat_dev);
+ list_del(&dev_info->dev_node);
+ kfree(dev_info);
+}
+
+static int dwc_pcie_register_dev(struct pci_dev *pdev)
+{
+ struct platform_device *plat_dev;
+ struct dwc_pcie_dev_info *dev_info;
+ u32 sbdf;
+
+ sbdf = (pci_domain_nr(pdev->bus) << 16) | PCI_DEVID(pdev->bus->number, pdev->devfn);
+ plat_dev = platform_device_register_simple("dwc_pcie_pmu", sbdf, NULL, 0);
+ if (IS_ERR(plat_dev))
+ return PTR_ERR(plat_dev);
+
+ dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
+ if (!dev_info) {
+ platform_device_unregister(plat_dev);
+ return -ENOMEM;
+ }
+
+ /* Cache platform device to handle pci device hotplug */
+ dev_info->plat_dev = plat_dev;
+ dev_info->pdev = pdev;
+ list_add(&dev_info->dev_node, &dwc_pcie_dev_info_head);
+
+ return 0;
+}
+
+static int dwc_pcie_pmu_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct device *dev = data;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct dwc_pcie_dev_info *dev_info;
+
+ switch (action) {
+ case BUS_NOTIFY_ADD_DEVICE:
+ if (!dwc_pcie_des_cap(pdev))
+ return NOTIFY_DONE;
+ if (dwc_pcie_register_dev(pdev))
+ return NOTIFY_BAD;
+ break;
+ case BUS_NOTIFY_DEL_DEVICE:
+ dev_info = dwc_pcie_find_dev_info(pdev);
+ if (!dev_info)
+ return NOTIFY_DONE;
+ dwc_pcie_unregister_dev(dev_info);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block dwc_pcie_pmu_nb = {
+ .notifier_call = dwc_pcie_pmu_notifier,
+};
+
+static int dwc_pcie_pmu_probe(struct platform_device *plat_dev)
+{
+ struct pci_dev *pdev;
+ struct dwc_pcie_pmu *pcie_pmu;
+ char *name;
+ u32 sbdf;
+ u16 vsec;
+ int ret;
+
+ sbdf = plat_dev->id;
+ pdev = pci_get_domain_bus_and_slot(sbdf >> 16, PCI_BUS_NUM(sbdf & 0xffff),
+ sbdf & 0xff);
+ if (!pdev) {
+ pr_err("No pdev found for the sbdf 0x%x\n", sbdf);
+ return -ENODEV;
+ }
+
+ vsec = dwc_pcie_des_cap(pdev);
+ if (!vsec)
+ return -ENODEV;
+
+ pci_dev_put(pdev);
+ name = devm_kasprintf(&plat_dev->dev, GFP_KERNEL, "dwc_rootport_%x", sbdf);
+ if (!name)
+ return -ENOMEM;
+
+ pcie_pmu = devm_kzalloc(&plat_dev->dev, sizeof(*pcie_pmu), GFP_KERNEL);
+ if (!pcie_pmu)
+ return -ENOMEM;
+
+ pcie_pmu->pdev = pdev;
+ pcie_pmu->ras_des_offset = vsec;
+ pcie_pmu->nr_lanes = pcie_get_width_cap(pdev);
+ pcie_pmu->on_cpu = -1;
+ pcie_pmu->pmu = (struct pmu){
+ .name = name,
+ .parent = &plat_dev->dev,
+ .module = THIS_MODULE,
+ .attr_groups = dwc_pcie_attr_groups,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
+ .task_ctx_nr = perf_invalid_context,
+ .event_init = dwc_pcie_pmu_event_init,
+ .add = dwc_pcie_pmu_event_add,
+ .del = dwc_pcie_pmu_event_del,
+ .start = dwc_pcie_pmu_event_start,
+ .stop = dwc_pcie_pmu_event_stop,
+ .read = dwc_pcie_pmu_event_update,
+ };
+
+ /* Add this instance to the list used by the offline callback */
+ ret = cpuhp_state_add_instance(dwc_pcie_pmu_hp_state,
+ &pcie_pmu->cpuhp_node);
+ if (ret) {
+ pci_err(pdev, "Error %d registering hotplug @%x\n", ret, sbdf);
+ return ret;
+ }
+
+ /* Unwind when platform driver removes */
+ ret = devm_add_action_or_reset(&plat_dev->dev,
+ dwc_pcie_pmu_remove_cpuhp_instance,
+ &pcie_pmu->cpuhp_node);
+ if (ret)
+ return ret;
+
+ ret = perf_pmu_register(&pcie_pmu->pmu, name, -1);
+ if (ret) {
+ pci_err(pdev, "Error %d registering PMU @%x\n", ret, sbdf);
+ return ret;
+ }
+ ret = devm_add_action_or_reset(&plat_dev->dev, dwc_pcie_unregister_pmu,
+ pcie_pmu);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int dwc_pcie_pmu_online_cpu(unsigned int cpu, struct hlist_node *cpuhp_node)
+{
+ struct dwc_pcie_pmu *pcie_pmu;
+
+ pcie_pmu = hlist_entry_safe(cpuhp_node, struct dwc_pcie_pmu, cpuhp_node);
+ if (pcie_pmu->on_cpu == -1)
+ pcie_pmu->on_cpu = cpumask_local_spread(
+ 0, dev_to_node(&pcie_pmu->pdev->dev));
+
+ return 0;
+}
+
+static int dwc_pcie_pmu_offline_cpu(unsigned int cpu, struct hlist_node *cpuhp_node)
+{
+ struct dwc_pcie_pmu *pcie_pmu;
+ struct pci_dev *pdev;
+ unsigned int target;
+ int node;
+
+ pcie_pmu = hlist_entry_safe(cpuhp_node, struct dwc_pcie_pmu, cpuhp_node);
+ /* Nothing to do if this CPU doesn't own the PMU */
+ if (cpu != pcie_pmu->on_cpu)
+ return 0;
+
+ pcie_pmu->on_cpu = -1;
+ pdev = pcie_pmu->pdev;
+ node = dev_to_node(&pdev->dev);
+
+ target = cpumask_any_and_but(cpumask_of_node(node), cpu_online_mask, cpu);
+ if (target >= nr_cpu_ids)
+ target = cpumask_any_but(cpu_online_mask, cpu);
+
+ if (target >= nr_cpu_ids) {
+ pci_err(pdev, "There is no CPU to set\n");
+ return 0;
+ }
+
+ /* This PMU does NOT support interrupt, just migrate context. */
+ perf_pmu_migrate_context(&pcie_pmu->pmu, cpu, target);
+ pcie_pmu->on_cpu = target;
+
+ return 0;
+}
+
+static struct platform_driver dwc_pcie_pmu_driver = {
+ .probe = dwc_pcie_pmu_probe,
+ .driver = {.name = "dwc_pcie_pmu",},
+};
+
+static void dwc_pcie_cleanup_devices(void)
+{
+ struct dwc_pcie_dev_info *dev_info, *tmp;
+
+ list_for_each_entry_safe(dev_info, tmp, &dwc_pcie_dev_info_head, dev_node) {
+ dwc_pcie_unregister_dev(dev_info);
+ }
+}
+
+static int __init dwc_pcie_pmu_init(void)
+{
+ struct pci_dev *pdev = NULL;
+ int ret;
+
+ for_each_pci_dev(pdev) {
+ if (!dwc_pcie_des_cap(pdev))
+ continue;
+
+ ret = dwc_pcie_register_dev(pdev);
+ if (ret) {
+ pci_dev_put(pdev);
+ goto err_cleanup;
+ }
+ }
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ "perf/dwc_pcie_pmu:online",
+ dwc_pcie_pmu_online_cpu,
+ dwc_pcie_pmu_offline_cpu);
+ if (ret < 0)
+ goto err_cleanup;
+
+ dwc_pcie_pmu_hp_state = ret;
+
+ ret = platform_driver_register(&dwc_pcie_pmu_driver);
+ if (ret)
+ goto err_remove_cpuhp;
+
+ ret = bus_register_notifier(&pci_bus_type, &dwc_pcie_pmu_nb);
+ if (ret)
+ goto err_unregister_driver;
+ notify = true;
+
+ return 0;
+
+err_unregister_driver:
+ platform_driver_unregister(&dwc_pcie_pmu_driver);
+err_remove_cpuhp:
+ cpuhp_remove_multi_state(dwc_pcie_pmu_hp_state);
+err_cleanup:
+ dwc_pcie_cleanup_devices();
+ return ret;
+}
+
+static void __exit dwc_pcie_pmu_exit(void)
+{
+ if (notify)
+ bus_unregister_notifier(&pci_bus_type, &dwc_pcie_pmu_nb);
+ dwc_pcie_cleanup_devices();
+ platform_driver_unregister(&dwc_pcie_pmu_driver);
+ cpuhp_remove_multi_state(dwc_pcie_pmu_hp_state);
+}
+
+module_init(dwc_pcie_pmu_init);
+module_exit(dwc_pcie_pmu_exit);
+
+MODULE_DESCRIPTION("PMU driver for DesignWare Cores PCI Express Controller");
+MODULE_AUTHOR("Shuai Xue <xueshuai@linux.alibaba.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c
index 92611c98120f..bcdf5575d71c 100644
--- a/drivers/perf/fsl_imx8_ddr_perf.c
+++ b/drivers/perf/fsl_imx8_ddr_perf.c
@@ -5,6 +5,7 @@
*/
#include <linux/bitfield.h>
+#include <linux/clk.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -19,6 +20,8 @@
#define COUNTER_READ 0x20
#define COUNTER_DPCR1 0x30
+#define COUNTER_MUX_CNTL 0x50
+#define COUNTER_MASK_COMP 0x54
#define CNTL_OVER 0x1
#define CNTL_CLEAR 0x2
@@ -32,6 +35,13 @@
#define CNTL_CSV_SHIFT 24
#define CNTL_CSV_MASK (0xFFU << CNTL_CSV_SHIFT)
+#define READ_PORT_SHIFT 0
+#define READ_PORT_MASK (0x7 << READ_PORT_SHIFT)
+#define READ_CHANNEL_REVERT 0x00000008 /* bit 3 for read channel select */
+#define WRITE_PORT_SHIFT 8
+#define WRITE_PORT_MASK (0x7 << WRITE_PORT_SHIFT)
+#define WRITE_CHANNEL_REVERT 0x00000800 /* bit 11 for write channel select */
+
#define EVENT_CYCLES_ID 0
#define EVENT_CYCLES_COUNTER 0
#define NUM_COUNTERS 4
@@ -43,17 +53,27 @@
#define to_ddr_pmu(p) container_of(p, struct ddr_pmu, pmu)
#define DDR_PERF_DEV_NAME "imx8_ddr"
+#define DB_PERF_DEV_NAME "imx8_db"
#define DDR_CPUHP_CB_NAME DDR_PERF_DEV_NAME "_perf_pmu"
static DEFINE_IDA(ddr_ida);
+static DEFINE_IDA(db_ida);
/* DDR Perf hardware feature */
#define DDR_CAP_AXI_ID_FILTER 0x1 /* support AXI ID filter */
#define DDR_CAP_AXI_ID_FILTER_ENHANCED 0x3 /* support enhanced AXI ID filter */
+#define DDR_CAP_AXI_ID_PORT_CHANNEL_FILTER 0x4 /* support AXI ID PORT CHANNEL filter */
+
+/* Perf type */
+enum fsl_ddr_type {
+ DDR_PERF_TYPE = 0, /* ddr Perf (default) */
+ DB_PERF_TYPE, /* db Perf */
+};
struct fsl_ddr_devtype_data {
unsigned int quirks; /* quirks needed for different DDR Perf core */
const char *identifier; /* system PMU identifier for userspace */
+ enum fsl_ddr_type type; /* types of Perf, ddr or db */
};
static const struct fsl_ddr_devtype_data imx8_devtype_data;
@@ -82,6 +102,17 @@ static const struct fsl_ddr_devtype_data imx8mp_devtype_data = {
.identifier = "i.MX8MP",
};
+static const struct fsl_ddr_devtype_data imx8dxl_devtype_data = {
+ .quirks = DDR_CAP_AXI_ID_PORT_CHANNEL_FILTER,
+ .identifier = "i.MX8DXL",
+};
+
+static const struct fsl_ddr_devtype_data imx8dxl_db_devtype_data = {
+ .quirks = DDR_CAP_AXI_ID_PORT_CHANNEL_FILTER,
+ .identifier = "i.MX8DXL",
+ .type = DB_PERF_TYPE,
+};
+
static const struct of_device_id imx_ddr_pmu_dt_ids[] = {
{ .compatible = "fsl,imx8-ddr-pmu", .data = &imx8_devtype_data},
{ .compatible = "fsl,imx8m-ddr-pmu", .data = &imx8m_devtype_data},
@@ -89,6 +120,8 @@ static const struct of_device_id imx_ddr_pmu_dt_ids[] = {
{ .compatible = "fsl,imx8mm-ddr-pmu", .data = &imx8mm_devtype_data},
{ .compatible = "fsl,imx8mn-ddr-pmu", .data = &imx8mn_devtype_data},
{ .compatible = "fsl,imx8mp-ddr-pmu", .data = &imx8mp_devtype_data},
+ { .compatible = "fsl,imx8dxl-ddr-pmu", .data = &imx8dxl_devtype_data},
+ { .compatible = "fsl,imx8dxl-db-pmu", .data = &imx8dxl_db_devtype_data},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, imx_ddr_pmu_dt_ids);
@@ -144,6 +177,7 @@ static const struct attribute_group ddr_perf_identifier_attr_group = {
enum ddr_perf_filter_capabilities {
PERF_CAP_AXI_ID_FILTER = 0,
PERF_CAP_AXI_ID_FILTER_ENHANCED,
+ PERF_CAP_AXI_ID_PORT_CHANNEL_FILTER,
PERF_CAP_AXI_ID_FEAT_MAX,
};
@@ -157,6 +191,8 @@ static u32 ddr_perf_filter_cap_get(struct ddr_pmu *pmu, int cap)
case PERF_CAP_AXI_ID_FILTER_ENHANCED:
quirks &= DDR_CAP_AXI_ID_FILTER_ENHANCED;
return quirks == DDR_CAP_AXI_ID_FILTER_ENHANCED;
+ case PERF_CAP_AXI_ID_PORT_CHANNEL_FILTER:
+ return !!(quirks & DDR_CAP_AXI_ID_PORT_CHANNEL_FILTER);
default:
WARN(1, "unknown filter cap %d\n", cap);
}
@@ -187,6 +223,7 @@ static ssize_t ddr_perf_filter_cap_show(struct device *dev,
static struct attribute *ddr_perf_filter_cap_attr[] = {
PERF_FILTER_EXT_ATTR_ENTRY(filter, PERF_CAP_AXI_ID_FILTER),
PERF_FILTER_EXT_ATTR_ENTRY(enhanced_filter, PERF_CAP_AXI_ID_FILTER_ENHANCED),
+ PERF_FILTER_EXT_ATTR_ENTRY(super_filter, PERF_CAP_AXI_ID_PORT_CHANNEL_FILTER),
NULL,
};
@@ -264,19 +301,51 @@ static struct attribute *ddr_perf_events_attrs[] = {
NULL,
};
+static const int ddr_perf_db_visible_event_list[] = {
+ EVENT_CYCLES_ID,
+ 0x41,
+ 0x42,
+};
+
+static umode_t ddr_perf_events_attrs_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct ddr_pmu *pmu = dev_get_drvdata(dev);
+ struct perf_pmu_events_attr *pmu_attr;
+ unsigned int i;
+
+ pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr);
+
+ if (pmu->devtype_data->type == DDR_PERF_TYPE)
+ return attr->mode;
+
+ /* DB Type */
+ for (i = 0; i < ARRAY_SIZE(ddr_perf_db_visible_event_list); i++)
+ if (pmu_attr->id == ddr_perf_db_visible_event_list[i])
+ return attr->mode;
+
+ return 0;
+}
+
static const struct attribute_group ddr_perf_events_attr_group = {
.name = "events",
.attrs = ddr_perf_events_attrs,
+ .is_visible = ddr_perf_events_attrs_is_visible,
};
PMU_FORMAT_ATTR(event, "config:0-7");
PMU_FORMAT_ATTR(axi_id, "config1:0-15");
PMU_FORMAT_ATTR(axi_mask, "config1:16-31");
+PMU_FORMAT_ATTR(axi_port, "config2:0-2");
+PMU_FORMAT_ATTR(axi_channel, "config2:3-3");
static struct attribute *ddr_perf_format_attrs[] = {
&format_attr_event.attr,
&format_attr_axi_id.attr,
&format_attr_axi_mask.attr,
+ &format_attr_axi_port.attr,
+ &format_attr_axi_channel.attr,
NULL,
};
@@ -530,6 +599,7 @@ static int ddr_perf_event_add(struct perf_event *event, int flags)
int counter;
int cfg = event->attr.config;
int cfg1 = event->attr.config1;
+ int cfg2 = event->attr.config2;
if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) {
int i;
@@ -553,6 +623,26 @@ static int ddr_perf_event_add(struct perf_event *event, int flags)
return -EOPNOTSUPP;
}
+ if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_PORT_CHANNEL_FILTER) {
+ if (ddr_perf_is_filtered(event)) {
+ /* revert axi id masking(axi_mask) value */
+ cfg1 ^= AXI_MASKING_REVERT;
+ writel(cfg1, pmu->base + COUNTER_MASK_COMP + ((counter - 1) << 4));
+
+ if (cfg == 0x41) {
+ /* revert axi read channel(axi_channel) value */
+ cfg2 ^= READ_CHANNEL_REVERT;
+ cfg2 |= FIELD_PREP(READ_PORT_MASK, cfg2);
+ } else {
+ /* revert axi write channel(axi_channel) value */
+ cfg2 ^= WRITE_CHANNEL_REVERT;
+ cfg2 |= FIELD_PREP(WRITE_PORT_MASK, cfg2);
+ }
+
+ writel(cfg2, pmu->base + COUNTER_MUX_CNTL + ((counter - 1) << 4));
+ }
+ }
+
pmu->events[counter] = event;
hwc->idx = counter;
@@ -600,12 +690,13 @@ static void ddr_perf_pmu_disable(struct pmu *pmu)
{
}
-static int ddr_perf_init(struct ddr_pmu *pmu, void __iomem *base,
- struct device *dev)
+static void ddr_perf_init(struct ddr_pmu *pmu, void __iomem *base,
+ struct device *dev)
{
*pmu = (struct ddr_pmu) {
.pmu = (struct pmu) {
.module = THIS_MODULE,
+ .parent = dev,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
.task_ctx_nr = perf_invalid_context,
.attr_groups = attr_groups,
@@ -621,9 +712,6 @@ static int ddr_perf_init(struct ddr_pmu *pmu, void __iomem *base,
.base = base,
.dev = dev,
};
-
- pmu->id = ida_alloc(&ddr_ida, GFP_KERNEL);
- return pmu->id;
}
static irqreturn_t ddr_perf_irq_handler(int irq, void *p)
@@ -689,10 +777,13 @@ static int ddr_perf_offline_cpu(unsigned int cpu, struct hlist_node *node)
static int ddr_perf_probe(struct platform_device *pdev)
{
+ struct clk_bulk_data *clks;
struct ddr_pmu *pmu;
struct device_node *np;
void __iomem *base;
+ struct ida *ida;
char *name;
+ int nclks;
int num;
int ret;
int irq;
@@ -707,19 +798,33 @@ static int ddr_perf_probe(struct platform_device *pdev)
if (!pmu)
return -ENOMEM;
- num = ddr_perf_init(pmu, base, &pdev->dev);
+ ddr_perf_init(pmu, base, &pdev->dev);
platform_set_drvdata(pdev, pmu);
- name = devm_kasprintf(&pdev->dev, GFP_KERNEL, DDR_PERF_DEV_NAME "%d",
- num);
+ nclks = devm_clk_bulk_get_all_enabled(&pdev->dev, &clks);
+ if (nclks < 0)
+ return dev_err_probe(&pdev->dev, nclks, "Failure get clks\n");
+
+ pmu->devtype_data = of_device_get_match_data(&pdev->dev);
+
+ ida = pmu->devtype_data->type == DDR_PERF_TYPE ? &ddr_ida : &db_ida;
+ num = ida_alloc(ida, GFP_KERNEL);
+ if (num < 0)
+ return num;
+
+ pmu->id = num;
+
+ if (pmu->devtype_data->type == DDR_PERF_TYPE)
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, DDR_PERF_DEV_NAME "%d", num);
+ else
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, DB_PERF_DEV_NAME "%d", num);
+
if (!name) {
ret = -ENOMEM;
- goto cpuhp_state_err;
+ goto idr_free;
}
- pmu->devtype_data = of_device_get_match_data(&pdev->dev);
-
pmu->cpu = raw_smp_processor_id();
ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
DDR_CPUHP_CB_NAME,
@@ -728,7 +833,7 @@ static int ddr_perf_probe(struct platform_device *pdev)
if (ret < 0) {
dev_err(&pdev->dev, "cpuhp_setup_state_multi failed\n");
- goto cpuhp_state_err;
+ goto idr_free;
}
pmu->cpuhp_state = ret;
@@ -775,13 +880,13 @@ ddr_perf_err:
cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
cpuhp_instance_err:
cpuhp_remove_multi_state(pmu->cpuhp_state);
-cpuhp_state_err:
- ida_free(&ddr_ida, pmu->id);
+idr_free:
+ ida_free(ida, pmu->id);
dev_warn(&pdev->dev, "i.MX8 DDR Perf PMU failed (%d), disabled\n", ret);
return ret;
}
-static int ddr_perf_remove(struct platform_device *pdev)
+static void ddr_perf_remove(struct platform_device *pdev)
{
struct ddr_pmu *pmu = platform_get_drvdata(pdev);
@@ -790,8 +895,11 @@ static int ddr_perf_remove(struct platform_device *pdev)
perf_pmu_unregister(&pmu->pmu);
- ida_free(&ddr_ida, pmu->id);
- return 0;
+ if (pmu->devtype_data->type == DDR_PERF_TYPE)
+ ida_free(&ddr_ida, pmu->id);
+ else
+ ida_free(&db_ida, pmu->id);
+
}
static struct platform_driver imx_ddr_pmu_driver = {
@@ -805,4 +913,5 @@ static struct platform_driver imx_ddr_pmu_driver = {
};
module_platform_driver(imx_ddr_pmu_driver);
+MODULE_DESCRIPTION("Freescale i.MX8 DDR Performance Monitor Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/perf/fsl_imx9_ddr_perf.c b/drivers/perf/fsl_imx9_ddr_perf.c
index 5cf770a1bc31..7050b48c0467 100644
--- a/drivers/perf/fsl_imx9_ddr_perf.c
+++ b/drivers/perf/fsl_imx9_ddr_perf.c
@@ -11,14 +11,24 @@
#include <linux/perf_event.h>
/* Performance monitor configuration */
-#define PMCFG1 0x00
-#define PMCFG1_RD_TRANS_FILT_EN BIT(31)
-#define PMCFG1_WR_TRANS_FILT_EN BIT(30)
-#define PMCFG1_RD_BT_FILT_EN BIT(29)
-#define PMCFG1_ID_MASK GENMASK(17, 0)
+#define PMCFG1 0x00
+#define MX93_PMCFG1_RD_TRANS_FILT_EN BIT(31)
+#define MX93_PMCFG1_WR_TRANS_FILT_EN BIT(30)
+#define MX93_PMCFG1_RD_BT_FILT_EN BIT(29)
+#define MX93_PMCFG1_ID_MASK GENMASK(17, 0)
-#define PMCFG2 0x04
-#define PMCFG2_ID GENMASK(17, 0)
+#define MX95_PMCFG1_WR_BEAT_FILT_EN BIT(31)
+#define MX95_PMCFG1_RD_BEAT_FILT_EN BIT(30)
+
+#define PMCFG2 0x04
+#define MX93_PMCFG2_ID GENMASK(17, 0)
+
+#define PMCFG3 0x08
+#define PMCFG4 0x0C
+#define PMCFG5 0x10
+#define PMCFG6 0x14
+#define MX95_PMCFG_ID_MASK GENMASK(9, 0)
+#define MX95_PMCFG_ID GENMASK(25, 16)
/* Global control register affects all counters and takes priority over local control registers */
#define PMGC0 0x40
@@ -41,6 +51,10 @@
#define NUM_COUNTERS 11
#define CYCLES_COUNTER 0
+#define CYCLES_EVENT_ID 0
+
+#define CONFIG_EVENT_MASK GENMASK(7, 0)
+#define CONFIG_COUNTER_MASK GENMASK(23, 16)
#define to_ddr_pmu(p) container_of(p, struct ddr_pmu, pmu)
@@ -49,8 +63,21 @@
static DEFINE_IDA(ddr_ida);
+/*
+ * V1 support 1 read transaction, 1 write transaction and 1 read beats
+ * event which corresponding respecitively to counter 2, 3 and 4.
+ */
+#define DDR_PERF_AXI_FILTER_V1 0x1
+
+/*
+ * V2 support 1 read beats and 3 write beats events which corresponding
+ * respecitively to counter 2-5.
+ */
+#define DDR_PERF_AXI_FILTER_V2 0x2
+
struct imx_ddr_devtype_data {
const char *identifier; /* system PMU identifier for userspace */
+ unsigned int filter_ver; /* AXI filter version */
};
struct ddr_pmu {
@@ -67,12 +94,41 @@ struct ddr_pmu {
int id;
};
+static const struct imx_ddr_devtype_data imx91_devtype_data = {
+ .identifier = "imx91",
+ .filter_ver = DDR_PERF_AXI_FILTER_V1
+};
+
static const struct imx_ddr_devtype_data imx93_devtype_data = {
.identifier = "imx93",
+ .filter_ver = DDR_PERF_AXI_FILTER_V1
};
+static const struct imx_ddr_devtype_data imx94_devtype_data = {
+ .identifier = "imx94",
+ .filter_ver = DDR_PERF_AXI_FILTER_V2
+};
+
+static const struct imx_ddr_devtype_data imx95_devtype_data = {
+ .identifier = "imx95",
+ .filter_ver = DDR_PERF_AXI_FILTER_V2
+};
+
+static inline bool axi_filter_v1(struct ddr_pmu *pmu)
+{
+ return pmu->devtype_data->filter_ver == DDR_PERF_AXI_FILTER_V1;
+}
+
+static inline bool axi_filter_v2(struct ddr_pmu *pmu)
+{
+ return pmu->devtype_data->filter_ver == DDR_PERF_AXI_FILTER_V2;
+}
+
static const struct of_device_id imx_ddr_pmu_dt_ids[] = {
- {.compatible = "fsl,imx93-ddr-pmu", .data = &imx93_devtype_data},
+ { .compatible = "fsl,imx91-ddr-pmu", .data = &imx91_devtype_data },
+ { .compatible = "fsl,imx93-ddr-pmu", .data = &imx93_devtype_data },
+ { .compatible = "fsl,imx94-ddr-pmu", .data = &imx94_devtype_data },
+ { .compatible = "fsl,imx95-ddr-pmu", .data = &imx95_devtype_data },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, imx_ddr_pmu_dt_ids);
@@ -118,21 +174,40 @@ static const struct attribute_group ddr_perf_cpumask_attr_group = {
.attrs = ddr_perf_cpumask_attrs,
};
+struct imx9_pmu_events_attr {
+ struct device_attribute attr;
+ u64 id;
+ const struct imx_ddr_devtype_data *devtype_data;
+};
+
static ssize_t ddr_pmu_event_show(struct device *dev,
struct device_attribute *attr, char *page)
{
- struct perf_pmu_events_attr *pmu_attr;
+ struct imx9_pmu_events_attr *pmu_attr;
- pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
+ pmu_attr = container_of(attr, struct imx9_pmu_events_attr, attr);
return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
}
-#define IMX9_DDR_PMU_EVENT_ATTR(_name, _id) \
- (&((struct perf_pmu_events_attr[]) { \
+#define COUNTER_OFFSET_IN_EVENT 8
+#define ID(counter, id) ((counter << COUNTER_OFFSET_IN_EVENT) | id)
+
+#define DDR_PMU_EVENT_ATTR_COMM(_name, _id, _data) \
+ (&((struct imx9_pmu_events_attr[]) { \
{ .attr = __ATTR(_name, 0444, ddr_pmu_event_show, NULL),\
- .id = _id, } \
+ .id = _id, \
+ .devtype_data = _data, } \
})[0].attr.attr)
+#define IMX9_DDR_PMU_EVENT_ATTR(_name, _id) \
+ DDR_PMU_EVENT_ATTR_COMM(_name, _id, NULL)
+
+#define IMX93_DDR_PMU_EVENT_ATTR(_name, _id) \
+ DDR_PMU_EVENT_ATTR_COMM(_name, _id, &imx93_devtype_data)
+
+#define IMX95_DDR_PMU_EVENT_ATTR(_name, _id) \
+ DDR_PMU_EVENT_ATTR_COMM(_name, _id, &imx95_devtype_data)
+
static struct attribute *ddr_perf_events_attrs[] = {
/* counter0 cycles event */
IMX9_DDR_PMU_EVENT_ATTR(cycles, 0),
@@ -159,90 +234,115 @@ static struct attribute *ddr_perf_events_attrs[] = {
IMX9_DDR_PMU_EVENT_ATTR(ddrc_pm_29, 63),
/* counter1 specific events */
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_0, 64),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_1, 65),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_2, 66),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_3, 67),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_4, 68),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_5, 69),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_6, 70),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_7, 71),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_0, ID(1, 64)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_1, ID(1, 65)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_2, ID(1, 66)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_3, ID(1, 67)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_4, ID(1, 68)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_5, ID(1, 69)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_6, ID(1, 70)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_riq_7, ID(1, 71)),
/* counter2 specific events */
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_0, 64),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_1, 65),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_2, 66),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_3, 67),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_4, 68),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_5, 69),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_6, 70),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_7, 71),
- IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_empty, 72),
- IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pm_rd_trans_filt, 73),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_0, ID(2, 64)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_1, ID(2, 65)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_2, ID(2, 66)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_3, ID(2, 67)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_4, ID(2, 68)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_5, ID(2, 69)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_6, ID(2, 70)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_ld_wiq_7, ID(2, 71)),
+ IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_empty, ID(2, 72)),
+ IMX93_DDR_PMU_EVENT_ATTR(eddrtq_pm_rd_trans_filt, ID(2, 73)), /* imx93 specific*/
+ IMX95_DDR_PMU_EVENT_ATTR(eddrtq_pm_wr_beat_filt, ID(2, 73)), /* imx95 specific*/
/* counter3 specific events */
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_0, 64),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_1, 65),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_2, 66),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_3, 67),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_4, 68),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_5, 69),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_6, 70),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_7, 71),
- IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_full, 72),
- IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pm_wr_trans_filt, 73),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_0, ID(3, 64)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_1, ID(3, 65)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_2, ID(3, 66)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_3, ID(3, 67)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_4, ID(3, 68)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_5, ID(3, 69)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_6, ID(3, 70)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_collision_7, ID(3, 71)),
+ IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_full, ID(3, 72)),
+ IMX93_DDR_PMU_EVENT_ATTR(eddrtq_pm_wr_trans_filt, ID(3, 73)), /* imx93 specific*/
+ IMX95_DDR_PMU_EVENT_ATTR(eddrtq_pm_rd_beat_filt2, ID(3, 73)), /* imx95 specific*/
/* counter4 specific events */
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_0, 64),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_1, 65),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_2, 66),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_3, 67),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_4, 68),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_5, 69),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_6, 70),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_7, 71),
- IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq2_rmw, 72),
- IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pm_rd_beat_filt, 73),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_0, ID(4, 64)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_1, ID(4, 65)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_2, ID(4, 66)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_3, ID(4, 67)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_4, ID(4, 68)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_5, ID(4, 69)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_6, ID(4, 70)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_row_open_7, ID(4, 71)),
+ IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq2_rmw, ID(4, 72)),
+ IMX93_DDR_PMU_EVENT_ATTR(eddrtq_pm_rd_beat_filt, ID(4, 73)), /* imx93 specific*/
+ IMX95_DDR_PMU_EVENT_ATTR(eddrtq_pm_rd_beat_filt1, ID(4, 73)), /* imx95 specific*/
/* counter5 specific events */
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_0, 64),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_1, 65),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_2, 66),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_3, 67),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_4, 68),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_5, 69),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_6, 70),
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_7, 71),
- IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq1, 72),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_0, ID(5, 64)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_1, ID(5, 65)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_2, ID(5, 66)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_3, ID(5, 67)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_4, ID(5, 68)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_5, ID(5, 69)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_6, ID(5, 70)),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_start_7, ID(5, 71)),
+ IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq1, ID(5, 72)),
+ IMX95_DDR_PMU_EVENT_ATTR(eddrtq_pm_rd_beat_filt0, ID(5, 73)), /* imx95 specific*/
/* counter6 specific events */
- IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_end_0, 64),
- IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq2, 72),
+ IMX9_DDR_PMU_EVENT_ATTR(ddrc_qx_valid_end_0, ID(6, 64)),
+ IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq2, ID(6, 72)),
/* counter7 specific events */
- IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_1_2_full, 64),
- IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_wrq0, 65),
+ IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_1_2_full, ID(7, 64)),
+ IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_wrq0, ID(7, 65)),
/* counter8 specific events */
- IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_bias_switched, 64),
- IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_1_4_full, 65),
+ IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_bias_switched, ID(8, 64)),
+ IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_1_4_full, ID(8, 65)),
/* counter9 specific events */
- IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_wrq1, 65),
- IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_3_4_full, 66),
+ IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_wrq1, ID(9, 65)),
+ IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_3_4_full, ID(9, 66)),
/* counter10 specific events */
- IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_misc_mrk, 65),
- IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq0, 66),
+ IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_misc_mrk, ID(10, 65)),
+ IMX9_DDR_PMU_EVENT_ATTR(eddrtq_pmon_ld_rdq0, ID(10, 66)),
NULL,
};
+static umode_t
+ddr_perf_events_attrs_is_visible(struct kobject *kobj,
+ struct attribute *attr, int unused)
+{
+ struct pmu *pmu = dev_get_drvdata(kobj_to_dev(kobj));
+ struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu);
+ struct imx9_pmu_events_attr *eattr;
+
+ eattr = container_of(attr, typeof(*eattr), attr.attr);
+
+ if (!eattr->devtype_data)
+ return attr->mode;
+
+ if (eattr->devtype_data != ddr_pmu->devtype_data &&
+ eattr->devtype_data->filter_ver != ddr_pmu->devtype_data->filter_ver)
+ return 0;
+
+ return attr->mode;
+}
+
static const struct attribute_group ddr_perf_events_attr_group = {
.name = "events",
.attrs = ddr_perf_events_attrs,
+ .is_visible = ddr_perf_events_attrs_is_visible,
};
-PMU_FORMAT_ATTR(event, "config:0-7");
+PMU_FORMAT_ATTR(event, "config:0-7,16-23");
PMU_FORMAT_ATTR(counter, "config:8-15");
PMU_FORMAT_ATTR(axi_id, "config1:0-17");
PMU_FORMAT_ATTR(axi_mask, "config2:0-17");
@@ -339,8 +439,10 @@ static void ddr_perf_counter_local_config(struct ddr_pmu *pmu, int config,
int counter, bool enable)
{
u32 ctrl_a;
+ int event;
ctrl_a = readl_relaxed(pmu->base + PMLCA(counter));
+ event = FIELD_GET(CONFIG_EVENT_MASK, config);
if (enable) {
ctrl_a |= PMLCA_FC;
@@ -352,7 +454,7 @@ static void ddr_perf_counter_local_config(struct ddr_pmu *pmu, int config,
ctrl_a &= ~PMLCA_FC;
ctrl_a |= PMLCA_CE;
ctrl_a &= ~FIELD_PREP(PMLCA_EVENT, 0x7F);
- ctrl_a |= FIELD_PREP(PMLCA_EVENT, (config & 0x000000FF));
+ ctrl_a |= FIELD_PREP(PMLCA_EVENT, event);
writel(ctrl_a, pmu->base + PMLCA(counter));
} else {
/* Freeze counter. */
@@ -361,39 +463,81 @@ static void ddr_perf_counter_local_config(struct ddr_pmu *pmu, int config,
}
}
-static void ddr_perf_monitor_config(struct ddr_pmu *pmu, int cfg, int cfg1, int cfg2)
+static void imx93_ddr_perf_monitor_config(struct ddr_pmu *pmu, int event,
+ int counter, int axi_id, int axi_mask)
{
u32 pmcfg1, pmcfg2;
- int event, counter;
-
- event = cfg & 0x000000FF;
- counter = (cfg & 0x0000FF00) >> 8;
+ static const u32 mask[] = {
+ MX93_PMCFG1_RD_TRANS_FILT_EN,
+ MX93_PMCFG1_WR_TRANS_FILT_EN,
+ MX93_PMCFG1_RD_BT_FILT_EN
+ };
pmcfg1 = readl_relaxed(pmu->base + PMCFG1);
- if (counter == 2 && event == 73)
- pmcfg1 |= PMCFG1_RD_TRANS_FILT_EN;
- else if (counter == 2 && event != 73)
- pmcfg1 &= ~PMCFG1_RD_TRANS_FILT_EN;
+ if (counter >= 2 && counter <= 4)
+ pmcfg1 = event == 73 ? pmcfg1 | mask[counter - 2] :
+ pmcfg1 & ~mask[counter - 2];
- if (counter == 3 && event == 73)
- pmcfg1 |= PMCFG1_WR_TRANS_FILT_EN;
- else if (counter == 3 && event != 73)
- pmcfg1 &= ~PMCFG1_WR_TRANS_FILT_EN;
+ pmcfg1 &= ~FIELD_PREP(MX93_PMCFG1_ID_MASK, 0x3FFFF);
+ pmcfg1 |= FIELD_PREP(MX93_PMCFG1_ID_MASK, axi_mask);
+ writel_relaxed(pmcfg1, pmu->base + PMCFG1);
- if (counter == 4 && event == 73)
- pmcfg1 |= PMCFG1_RD_BT_FILT_EN;
- else if (counter == 4 && event != 73)
- pmcfg1 &= ~PMCFG1_RD_BT_FILT_EN;
+ pmcfg2 = readl_relaxed(pmu->base + PMCFG2);
+ pmcfg2 &= ~FIELD_PREP(MX93_PMCFG2_ID, 0x3FFFF);
+ pmcfg2 |= FIELD_PREP(MX93_PMCFG2_ID, axi_id);
+ writel_relaxed(pmcfg2, pmu->base + PMCFG2);
+}
- pmcfg1 &= ~FIELD_PREP(PMCFG1_ID_MASK, 0x3FFFF);
- pmcfg1 |= FIELD_PREP(PMCFG1_ID_MASK, cfg2);
- writel(pmcfg1, pmu->base + PMCFG1);
+static void imx95_ddr_perf_monitor_config(struct ddr_pmu *pmu, int event,
+ int counter, int axi_id, int axi_mask)
+{
+ u32 pmcfg1, pmcfg, offset = 0;
- pmcfg2 = readl_relaxed(pmu->base + PMCFG2);
- pmcfg2 &= ~FIELD_PREP(PMCFG2_ID, 0x3FFFF);
- pmcfg2 |= FIELD_PREP(PMCFG2_ID, cfg1);
- writel(pmcfg2, pmu->base + PMCFG2);
+ pmcfg1 = readl_relaxed(pmu->base + PMCFG1);
+
+ if (event == 73) {
+ switch (counter) {
+ case 2:
+ pmcfg1 |= MX95_PMCFG1_WR_BEAT_FILT_EN;
+ offset = PMCFG3;
+ break;
+ case 3:
+ pmcfg1 |= MX95_PMCFG1_RD_BEAT_FILT_EN;
+ offset = PMCFG4;
+ break;
+ case 4:
+ pmcfg1 |= MX95_PMCFG1_RD_BEAT_FILT_EN;
+ offset = PMCFG5;
+ break;
+ case 5:
+ pmcfg1 |= MX95_PMCFG1_RD_BEAT_FILT_EN;
+ offset = PMCFG6;
+ break;
+ }
+ } else {
+ switch (counter) {
+ case 2:
+ pmcfg1 &= ~MX95_PMCFG1_WR_BEAT_FILT_EN;
+ break;
+ case 3:
+ case 4:
+ case 5:
+ pmcfg1 &= ~MX95_PMCFG1_RD_BEAT_FILT_EN;
+ break;
+ }
+ }
+
+ writel_relaxed(pmcfg1, pmu->base + PMCFG1);
+
+ if (offset) {
+ pmcfg = readl_relaxed(pmu->base + offset);
+ pmcfg &= ~(FIELD_PREP(MX95_PMCFG_ID_MASK, 0x3FF) |
+ FIELD_PREP(MX95_PMCFG_ID, 0x3FF));
+ pmcfg |= (FIELD_PREP(MX95_PMCFG_ID_MASK, axi_mask) |
+ FIELD_PREP(MX95_PMCFG_ID, axi_id));
+ writel_relaxed(pmcfg, pmu->base + offset);
+ }
}
static void ddr_perf_event_update(struct perf_event *event)
@@ -460,6 +604,28 @@ static void ddr_perf_event_start(struct perf_event *event, int flags)
hwc->state = 0;
}
+static int ddr_perf_alloc_counter(struct ddr_pmu *pmu, int event, int counter)
+{
+ int i;
+
+ if (event == CYCLES_EVENT_ID) {
+ // Cycles counter is dedicated for cycle event.
+ if (pmu->events[CYCLES_COUNTER] == NULL)
+ return CYCLES_COUNTER;
+ } else if (counter != 0) {
+ // Counter specific event use specific counter.
+ if (pmu->events[counter] == NULL)
+ return counter;
+ } else {
+ // Auto allocate counter for referene event.
+ for (i = 1; i < NUM_COUNTERS; i++)
+ if (pmu->events[i] == NULL)
+ return i;
+ }
+
+ return -ENOENT;
+}
+
static int ddr_perf_event_add(struct perf_event *event, int flags)
{
struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
@@ -467,21 +633,33 @@ static int ddr_perf_event_add(struct perf_event *event, int flags)
int cfg = event->attr.config;
int cfg1 = event->attr.config1;
int cfg2 = event->attr.config2;
- int counter;
+ int event_id, counter;
- counter = (cfg & 0x0000FF00) >> 8;
+ event_id = FIELD_GET(CONFIG_EVENT_MASK, cfg);
+ counter = FIELD_GET(CONFIG_COUNTER_MASK, cfg);
+
+ counter = ddr_perf_alloc_counter(pmu, event_id, counter);
+ if (counter < 0) {
+ dev_dbg(pmu->dev, "There are not enough counters\n");
+ return -EOPNOTSUPP;
+ }
pmu->events[counter] = event;
pmu->active_events++;
hwc->idx = counter;
hwc->state |= PERF_HES_STOPPED;
+ if (axi_filter_v1(pmu))
+ /* read trans, write trans, read beat */
+ imx93_ddr_perf_monitor_config(pmu, event_id, counter, cfg1, cfg2);
+
+ if (axi_filter_v2(pmu))
+ /* write beat, read beat2, read beat1, read beat */
+ imx95_ddr_perf_monitor_config(pmu, event_id, counter, cfg1, cfg2);
+
if (flags & PERF_EF_START)
ddr_perf_event_start(event, flags);
- /* read trans, write trans, read beat */
- ddr_perf_monitor_config(pmu, cfg, cfg1, cfg2);
-
return 0;
}
@@ -501,9 +679,11 @@ static void ddr_perf_event_del(struct perf_event *event, int flags)
{
struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
+ int counter = hwc->idx;
ddr_perf_event_stop(event, PERF_EF_UPDATE);
+ pmu->events[counter] = NULL;
pmu->active_events--;
hwc->idx = -1;
}
@@ -617,7 +797,7 @@ static int ddr_perf_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pmu);
- pmu->id = ida_simple_get(&ddr_ida, 0, 0, GFP_KERNEL);
+ pmu->id = ida_alloc(&ddr_ida, GFP_KERNEL);
name = devm_kasprintf(&pdev->dev, GFP_KERNEL, DDR_PERF_DEV_NAME "%d", pmu->id);
if (!name) {
ret = -ENOMEM;
@@ -674,12 +854,12 @@ cpuhp_instance_err:
cpuhp_remove_multi_state(pmu->cpuhp_state);
cpuhp_state_err:
format_string_err:
- ida_simple_remove(&ddr_ida, pmu->id);
+ ida_free(&ddr_ida, pmu->id);
dev_warn(&pdev->dev, "i.MX9 DDR Perf PMU failed (%d), disabled\n", ret);
return ret;
}
-static int ddr_perf_remove(struct platform_device *pdev)
+static void ddr_perf_remove(struct platform_device *pdev)
{
struct ddr_pmu *pmu = platform_get_drvdata(pdev);
@@ -688,9 +868,7 @@ static int ddr_perf_remove(struct platform_device *pdev)
perf_pmu_unregister(&pmu->pmu);
- ida_simple_remove(&ddr_ida, pmu->id);
-
- return 0;
+ ida_free(&ddr_ida, pmu->id);
}
static struct platform_driver imx_ddr_pmu_driver = {
diff --git a/drivers/perf/fujitsu_uncore_pmu.c b/drivers/perf/fujitsu_uncore_pmu.c
new file mode 100644
index 000000000000..c3c6f56474ad
--- /dev/null
+++ b/drivers/perf/fujitsu_uncore_pmu.c
@@ -0,0 +1,613 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Driver for the Uncore PMUs in Fujitsu chips.
+ *
+ * See Documentation/admin-guide/perf/fujitsu_uncore_pmu.rst for more details.
+ *
+ * Copyright (c) 2025 Fujitsu. All rights reserved.
+ */
+
+#include <linux/acpi.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/perf_event.h>
+#include <linux/platform_device.h>
+
+/* Number of counters on each PMU */
+#define MAC_NUM_COUNTERS 8
+#define PCI_NUM_COUNTERS 8
+/* Mask for the event type field within perf_event_attr.config and EVTYPE reg */
+#define UNCORE_EVTYPE_MASK 0xFF
+
+/* Perfmon registers */
+#define PM_EVCNTR(__cntr) (0x000 + (__cntr) * 8)
+#define PM_CNTCTL(__cntr) (0x100 + (__cntr) * 8)
+#define PM_CNTCTL_RESET 0
+#define PM_EVTYPE(__cntr) (0x200 + (__cntr) * 8)
+#define PM_EVTYPE_EVSEL(__val) FIELD_GET(UNCORE_EVTYPE_MASK, __val)
+#define PM_CR 0x400
+#define PM_CR_RESET BIT(1)
+#define PM_CR_ENABLE BIT(0)
+#define PM_CNTENSET 0x410
+#define PM_CNTENSET_IDX(__cntr) BIT(__cntr)
+#define PM_CNTENCLR 0x418
+#define PM_CNTENCLR_IDX(__cntr) BIT(__cntr)
+#define PM_CNTENCLR_RESET 0xFF
+#define PM_INTENSET 0x420
+#define PM_INTENSET_IDX(__cntr) BIT(__cntr)
+#define PM_INTENCLR 0x428
+#define PM_INTENCLR_IDX(__cntr) BIT(__cntr)
+#define PM_INTENCLR_RESET 0xFF
+#define PM_OVSR 0x440
+#define PM_OVSR_OVSRCLR_RESET 0xFF
+
+enum fujitsu_uncore_pmu {
+ FUJITSU_UNCORE_PMU_MAC = 1,
+ FUJITSU_UNCORE_PMU_PCI = 2,
+};
+
+struct uncore_pmu {
+ int num_counters;
+ struct pmu pmu;
+ struct hlist_node node;
+ void __iomem *regs;
+ struct perf_event **events;
+ unsigned long *used_mask;
+ int cpu;
+ int irq;
+ struct device *dev;
+};
+
+#define to_uncore_pmu(p) (container_of(p, struct uncore_pmu, pmu))
+
+static int uncore_pmu_cpuhp_state;
+
+static void fujitsu_uncore_counter_start(struct perf_event *event)
+{
+ struct uncore_pmu *uncorepmu = to_uncore_pmu(event->pmu);
+ int idx = event->hw.idx;
+
+ /* Initialize the hardware counter and reset prev_count*/
+ local64_set(&event->hw.prev_count, 0);
+ writeq_relaxed(0, uncorepmu->regs + PM_EVCNTR(idx));
+
+ /* Set the event type */
+ writeq_relaxed(PM_EVTYPE_EVSEL(event->attr.config), uncorepmu->regs + PM_EVTYPE(idx));
+
+ /* Enable interrupt generation by this counter */
+ writeq_relaxed(PM_INTENSET_IDX(idx), uncorepmu->regs + PM_INTENSET);
+
+ /* Finally, enable the counter */
+ writeq_relaxed(PM_CNTCTL_RESET, uncorepmu->regs + PM_CNTCTL(idx));
+ writeq_relaxed(PM_CNTENSET_IDX(idx), uncorepmu->regs + PM_CNTENSET);
+}
+
+static void fujitsu_uncore_counter_stop(struct perf_event *event)
+{
+ struct uncore_pmu *uncorepmu = to_uncore_pmu(event->pmu);
+ int idx = event->hw.idx;
+
+ /* Disable the counter */
+ writeq_relaxed(PM_CNTENCLR_IDX(idx), uncorepmu->regs + PM_CNTENCLR);
+
+ /* Disable interrupt generation by this counter */
+ writeq_relaxed(PM_INTENCLR_IDX(idx), uncorepmu->regs + PM_INTENCLR);
+}
+
+static void fujitsu_uncore_counter_update(struct perf_event *event)
+{
+ struct uncore_pmu *uncorepmu = to_uncore_pmu(event->pmu);
+ int idx = event->hw.idx;
+ u64 prev, new;
+
+ do {
+ prev = local64_read(&event->hw.prev_count);
+ new = readq_relaxed(uncorepmu->regs + PM_EVCNTR(idx));
+ } while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev);
+
+ local64_add(new - prev, &event->count);
+}
+
+static inline void fujitsu_uncore_init(struct uncore_pmu *uncorepmu)
+{
+ int i;
+
+ writeq_relaxed(PM_CR_RESET, uncorepmu->regs + PM_CR);
+
+ writeq_relaxed(PM_CNTENCLR_RESET, uncorepmu->regs + PM_CNTENCLR);
+ writeq_relaxed(PM_INTENCLR_RESET, uncorepmu->regs + PM_INTENCLR);
+ writeq_relaxed(PM_OVSR_OVSRCLR_RESET, uncorepmu->regs + PM_OVSR);
+
+ for (i = 0; i < uncorepmu->num_counters; ++i) {
+ writeq_relaxed(PM_CNTCTL_RESET, uncorepmu->regs + PM_CNTCTL(i));
+ writeq_relaxed(PM_EVTYPE_EVSEL(0), uncorepmu->regs + PM_EVTYPE(i));
+ }
+ writeq_relaxed(PM_CR_ENABLE, uncorepmu->regs + PM_CR);
+}
+
+static irqreturn_t fujitsu_uncore_handle_irq(int irq_num, void *data)
+{
+ struct uncore_pmu *uncorepmu = data;
+ /* Read the overflow status register */
+ long status = readq_relaxed(uncorepmu->regs + PM_OVSR);
+ int idx;
+
+ if (status == 0)
+ return IRQ_NONE;
+
+ /* Clear the bits we read on the overflow status register */
+ writeq_relaxed(status, uncorepmu->regs + PM_OVSR);
+
+ for_each_set_bit(idx, &status, uncorepmu->num_counters) {
+ struct perf_event *event;
+
+ event = uncorepmu->events[idx];
+ if (!event)
+ continue;
+
+ fujitsu_uncore_counter_update(event);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void fujitsu_uncore_pmu_enable(struct pmu *pmu)
+{
+ writeq_relaxed(PM_CR_ENABLE, to_uncore_pmu(pmu)->regs + PM_CR);
+}
+
+static void fujitsu_uncore_pmu_disable(struct pmu *pmu)
+{
+ writeq_relaxed(0, to_uncore_pmu(pmu)->regs + PM_CR);
+}
+
+static bool fujitsu_uncore_validate_event_group(struct perf_event *event)
+{
+ struct uncore_pmu *uncorepmu = to_uncore_pmu(event->pmu);
+ struct perf_event *leader = event->group_leader;
+ struct perf_event *sibling;
+ int counters = 1;
+
+ if (leader == event)
+ return true;
+
+ if (leader->pmu == event->pmu)
+ counters++;
+
+ for_each_sibling_event(sibling, leader) {
+ if (sibling->pmu == event->pmu)
+ counters++;
+ }
+
+ /*
+ * If the group requires more counters than the HW has, it
+ * cannot ever be scheduled.
+ */
+ return counters <= uncorepmu->num_counters;
+}
+
+static int fujitsu_uncore_event_init(struct perf_event *event)
+{
+ struct uncore_pmu *uncorepmu = to_uncore_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+
+ /* Is the event for this PMU? */
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ /*
+ * Sampling not supported since these events are not
+ * core-attributable.
+ */
+ if (is_sampling_event(event))
+ return -EINVAL;
+
+ /*
+ * Task mode not available, we run the counters as socket counters,
+ * not attributable to any CPU and therefore cannot attribute per-task.
+ */
+ if (event->cpu < 0)
+ return -EINVAL;
+
+ /* Validate the group */
+ if (!fujitsu_uncore_validate_event_group(event))
+ return -EINVAL;
+
+ hwc->idx = -1;
+
+ event->cpu = uncorepmu->cpu;
+
+ return 0;
+}
+
+static void fujitsu_uncore_event_start(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ hwc->state = 0;
+ fujitsu_uncore_counter_start(event);
+}
+
+static void fujitsu_uncore_event_stop(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (hwc->state & PERF_HES_STOPPED)
+ return;
+
+ fujitsu_uncore_counter_stop(event);
+ if (flags & PERF_EF_UPDATE)
+ fujitsu_uncore_counter_update(event);
+ hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+}
+
+static int fujitsu_uncore_event_add(struct perf_event *event, int flags)
+{
+ struct uncore_pmu *uncorepmu = to_uncore_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx;
+
+ /* Try to allocate a counter. */
+ idx = bitmap_find_free_region(uncorepmu->used_mask, uncorepmu->num_counters, 0);
+ if (idx < 0)
+ /* The counters are all in use. */
+ return -EAGAIN;
+
+ hwc->idx = idx;
+ hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+ uncorepmu->events[idx] = event;
+
+ if (flags & PERF_EF_START)
+ fujitsu_uncore_event_start(event, 0);
+
+ /* Propagate changes to the userspace mapping. */
+ perf_event_update_userpage(event);
+
+ return 0;
+}
+
+static void fujitsu_uncore_event_del(struct perf_event *event, int flags)
+{
+ struct uncore_pmu *uncorepmu = to_uncore_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+
+ /* Stop and clean up */
+ fujitsu_uncore_event_stop(event, flags | PERF_EF_UPDATE);
+ uncorepmu->events[hwc->idx] = NULL;
+ bitmap_release_region(uncorepmu->used_mask, hwc->idx, 0);
+
+ /* Propagate changes to the userspace mapping. */
+ perf_event_update_userpage(event);
+}
+
+static void fujitsu_uncore_event_read(struct perf_event *event)
+{
+ fujitsu_uncore_counter_update(event);
+}
+
+#define UNCORE_PMU_FORMAT_ATTR(_name, _config) \
+ (&((struct dev_ext_attribute[]) { \
+ { .attr = __ATTR(_name, 0444, device_show_string, NULL), \
+ .var = (void *)_config, } \
+ })[0].attr.attr)
+
+static struct attribute *fujitsu_uncore_pmu_formats[] = {
+ UNCORE_PMU_FORMAT_ATTR(event, "config:0-7"),
+ NULL
+};
+
+static const struct attribute_group fujitsu_uncore_pmu_format_group = {
+ .name = "format",
+ .attrs = fujitsu_uncore_pmu_formats,
+};
+
+static ssize_t fujitsu_uncore_pmu_event_show(struct device *dev,
+ struct device_attribute *attr, char *page)
+{
+ struct perf_pmu_events_attr *pmu_attr;
+
+ pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
+ return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
+}
+
+#define MAC_EVENT_ATTR(_name, _id) \
+ PMU_EVENT_ATTR_ID(_name, fujitsu_uncore_pmu_event_show, _id)
+
+static struct attribute *fujitsu_uncore_mac_pmu_events[] = {
+ MAC_EVENT_ATTR(cycles, 0x00),
+ MAC_EVENT_ATTR(read-count, 0x10),
+ MAC_EVENT_ATTR(read-count-request, 0x11),
+ MAC_EVENT_ATTR(read-count-return, 0x12),
+ MAC_EVENT_ATTR(read-count-request-pftgt, 0x13),
+ MAC_EVENT_ATTR(read-count-request-normal, 0x14),
+ MAC_EVENT_ATTR(read-count-return-pftgt-hit, 0x15),
+ MAC_EVENT_ATTR(read-count-return-pftgt-miss, 0x16),
+ MAC_EVENT_ATTR(read-wait, 0x17),
+ MAC_EVENT_ATTR(write-count, 0x20),
+ MAC_EVENT_ATTR(write-count-write, 0x21),
+ MAC_EVENT_ATTR(write-count-pwrite, 0x22),
+ MAC_EVENT_ATTR(memory-read-count, 0x40),
+ MAC_EVENT_ATTR(memory-write-count, 0x50),
+ MAC_EVENT_ATTR(memory-pwrite-count, 0x60),
+ MAC_EVENT_ATTR(ea-mac, 0x80),
+ MAC_EVENT_ATTR(ea-memory, 0x90),
+ MAC_EVENT_ATTR(ea-memory-mac-write, 0x92),
+ MAC_EVENT_ATTR(ea-ha, 0xa0),
+ NULL
+};
+
+#define PCI_EVENT_ATTR(_name, _id) \
+ PMU_EVENT_ATTR_ID(_name, fujitsu_uncore_pmu_event_show, _id)
+
+static struct attribute *fujitsu_uncore_pci_pmu_events[] = {
+ PCI_EVENT_ATTR(pci-port0-cycles, 0x00),
+ PCI_EVENT_ATTR(pci-port0-read-count, 0x10),
+ PCI_EVENT_ATTR(pci-port0-read-count-bus, 0x14),
+ PCI_EVENT_ATTR(pci-port0-write-count, 0x20),
+ PCI_EVENT_ATTR(pci-port0-write-count-bus, 0x24),
+ PCI_EVENT_ATTR(pci-port1-cycles, 0x40),
+ PCI_EVENT_ATTR(pci-port1-read-count, 0x50),
+ PCI_EVENT_ATTR(pci-port1-read-count-bus, 0x54),
+ PCI_EVENT_ATTR(pci-port1-write-count, 0x60),
+ PCI_EVENT_ATTR(pci-port1-write-count-bus, 0x64),
+ PCI_EVENT_ATTR(ea-pci, 0x80),
+ NULL
+};
+
+static const struct attribute_group fujitsu_uncore_mac_pmu_events_group = {
+ .name = "events",
+ .attrs = fujitsu_uncore_mac_pmu_events,
+};
+
+static const struct attribute_group fujitsu_uncore_pci_pmu_events_group = {
+ .name = "events",
+ .attrs = fujitsu_uncore_pci_pmu_events,
+};
+
+static ssize_t cpumask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct uncore_pmu *uncorepmu = to_uncore_pmu(dev_get_drvdata(dev));
+
+ return cpumap_print_to_pagebuf(true, buf, cpumask_of(uncorepmu->cpu));
+}
+static DEVICE_ATTR_RO(cpumask);
+
+static struct attribute *fujitsu_uncore_pmu_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL
+};
+
+static const struct attribute_group fujitsu_uncore_pmu_cpumask_attr_group = {
+ .attrs = fujitsu_uncore_pmu_cpumask_attrs,
+};
+
+static const struct attribute_group *fujitsu_uncore_mac_pmu_attr_grps[] = {
+ &fujitsu_uncore_pmu_format_group,
+ &fujitsu_uncore_mac_pmu_events_group,
+ &fujitsu_uncore_pmu_cpumask_attr_group,
+ NULL
+};
+
+static const struct attribute_group *fujitsu_uncore_pci_pmu_attr_grps[] = {
+ &fujitsu_uncore_pmu_format_group,
+ &fujitsu_uncore_pci_pmu_events_group,
+ &fujitsu_uncore_pmu_cpumask_attr_group,
+ NULL
+};
+
+static void fujitsu_uncore_pmu_migrate(struct uncore_pmu *uncorepmu, unsigned int cpu)
+{
+ perf_pmu_migrate_context(&uncorepmu->pmu, uncorepmu->cpu, cpu);
+ irq_set_affinity(uncorepmu->irq, cpumask_of(cpu));
+ uncorepmu->cpu = cpu;
+}
+
+static int fujitsu_uncore_pmu_online_cpu(unsigned int cpu, struct hlist_node *cpuhp_node)
+{
+ struct uncore_pmu *uncorepmu;
+ int node;
+
+ uncorepmu = hlist_entry_safe(cpuhp_node, struct uncore_pmu, node);
+ node = dev_to_node(uncorepmu->dev);
+ if (cpu_to_node(uncorepmu->cpu) != node && cpu_to_node(cpu) == node)
+ fujitsu_uncore_pmu_migrate(uncorepmu, cpu);
+
+ return 0;
+}
+
+static int fujitsu_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *cpuhp_node)
+{
+ struct uncore_pmu *uncorepmu;
+ unsigned int target;
+ int node;
+
+ uncorepmu = hlist_entry_safe(cpuhp_node, struct uncore_pmu, node);
+ if (cpu != uncorepmu->cpu)
+ return 0;
+
+ node = dev_to_node(uncorepmu->dev);
+ target = cpumask_any_and_but(cpumask_of_node(node), cpu_online_mask, cpu);
+ if (target >= nr_cpu_ids)
+ target = cpumask_any_but(cpu_online_mask, cpu);
+
+ if (target < nr_cpu_ids)
+ fujitsu_uncore_pmu_migrate(uncorepmu, target);
+
+ return 0;
+}
+
+static int fujitsu_uncore_pmu_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ unsigned long device_type = (unsigned long)device_get_match_data(dev);
+ const struct attribute_group **attr_groups;
+ struct uncore_pmu *uncorepmu;
+ struct resource *memrc;
+ size_t alloc_size;
+ char *name;
+ int ret;
+ int irq;
+ u64 uid;
+
+ ret = acpi_dev_uid_to_integer(ACPI_COMPANION(dev), &uid);
+ if (ret)
+ return dev_err_probe(dev, ret, "unable to read ACPI uid\n");
+
+ uncorepmu = devm_kzalloc(dev, sizeof(*uncorepmu), GFP_KERNEL);
+ if (!uncorepmu)
+ return -ENOMEM;
+ uncorepmu->dev = dev;
+ uncorepmu->cpu = cpumask_local_spread(0, dev_to_node(dev));
+ platform_set_drvdata(pdev, uncorepmu);
+
+ switch (device_type) {
+ case FUJITSU_UNCORE_PMU_MAC:
+ uncorepmu->num_counters = MAC_NUM_COUNTERS;
+ attr_groups = fujitsu_uncore_mac_pmu_attr_grps;
+ name = devm_kasprintf(dev, GFP_KERNEL, "mac_iod%llu_mac%llu_ch%llu",
+ (uid >> 8) & 0xF, (uid >> 4) & 0xF, uid & 0xF);
+ break;
+ case FUJITSU_UNCORE_PMU_PCI:
+ uncorepmu->num_counters = PCI_NUM_COUNTERS;
+ attr_groups = fujitsu_uncore_pci_pmu_attr_grps;
+ name = devm_kasprintf(dev, GFP_KERNEL, "pci_iod%llu_pci%llu",
+ (uid >> 4) & 0xF, uid & 0xF);
+ break;
+ default:
+ return dev_err_probe(dev, -EINVAL, "illegal device type: %lu\n", device_type);
+ }
+ if (!name)
+ return -ENOMEM;
+
+ uncorepmu->pmu = (struct pmu) {
+ .parent = dev,
+ .task_ctx_nr = perf_invalid_context,
+
+ .attr_groups = attr_groups,
+
+ .pmu_enable = fujitsu_uncore_pmu_enable,
+ .pmu_disable = fujitsu_uncore_pmu_disable,
+ .event_init = fujitsu_uncore_event_init,
+ .add = fujitsu_uncore_event_add,
+ .del = fujitsu_uncore_event_del,
+ .start = fujitsu_uncore_event_start,
+ .stop = fujitsu_uncore_event_stop,
+ .read = fujitsu_uncore_event_read,
+
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT,
+ };
+
+ alloc_size = sizeof(uncorepmu->events[0]) * uncorepmu->num_counters;
+ uncorepmu->events = devm_kzalloc(dev, alloc_size, GFP_KERNEL);
+ if (!uncorepmu->events)
+ return -ENOMEM;
+
+ alloc_size = sizeof(uncorepmu->used_mask[0]) * BITS_TO_LONGS(uncorepmu->num_counters);
+ uncorepmu->used_mask = devm_kzalloc(dev, alloc_size, GFP_KERNEL);
+ if (!uncorepmu->used_mask)
+ return -ENOMEM;
+
+ uncorepmu->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &memrc);
+ if (IS_ERR(uncorepmu->regs))
+ return PTR_ERR(uncorepmu->regs);
+
+ fujitsu_uncore_init(uncorepmu);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(dev, irq, fujitsu_uncore_handle_irq,
+ IRQF_NOBALANCING | IRQF_NO_THREAD,
+ name, uncorepmu);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to request IRQ:%d\n", irq);
+
+ ret = irq_set_affinity(irq, cpumask_of(uncorepmu->cpu));
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to set irq affinity:%d\n", irq);
+
+ uncorepmu->irq = irq;
+
+ /* Add this instance to the list used by the offline callback */
+ ret = cpuhp_state_add_instance(uncore_pmu_cpuhp_state, &uncorepmu->node);
+ if (ret)
+ return dev_err_probe(dev, ret, "Error registering hotplug");
+
+ ret = perf_pmu_register(&uncorepmu->pmu, name, -1);
+ if (ret < 0) {
+ cpuhp_state_remove_instance_nocalls(uncore_pmu_cpuhp_state, &uncorepmu->node);
+ return dev_err_probe(dev, ret, "Failed to register %s PMU\n", name);
+ }
+
+ dev_dbg(dev, "Registered %s, type: %d\n", name, uncorepmu->pmu.type);
+
+ return 0;
+}
+
+static void fujitsu_uncore_pmu_remove(struct platform_device *pdev)
+{
+ struct uncore_pmu *uncorepmu = platform_get_drvdata(pdev);
+
+ writeq_relaxed(0, uncorepmu->regs + PM_CR);
+
+ perf_pmu_unregister(&uncorepmu->pmu);
+ cpuhp_state_remove_instance_nocalls(uncore_pmu_cpuhp_state, &uncorepmu->node);
+}
+
+static const struct acpi_device_id fujitsu_uncore_pmu_acpi_match[] = {
+ { "FUJI200C", FUJITSU_UNCORE_PMU_MAC },
+ { "FUJI200D", FUJITSU_UNCORE_PMU_PCI },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, fujitsu_uncore_pmu_acpi_match);
+
+static struct platform_driver fujitsu_uncore_pmu_driver = {
+ .driver = {
+ .name = "fujitsu-uncore-pmu",
+ .acpi_match_table = fujitsu_uncore_pmu_acpi_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = fujitsu_uncore_pmu_probe,
+ .remove = fujitsu_uncore_pmu_remove,
+};
+
+static int __init fujitsu_uncore_pmu_init(void)
+{
+ int ret;
+
+ /* Install a hook to update the reader CPU in case it goes offline */
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ "perf/fujitsu/uncore:online",
+ fujitsu_uncore_pmu_online_cpu,
+ fujitsu_uncore_pmu_offline_cpu);
+ if (ret < 0)
+ return ret;
+
+ uncore_pmu_cpuhp_state = ret;
+
+ ret = platform_driver_register(&fujitsu_uncore_pmu_driver);
+ if (ret)
+ cpuhp_remove_multi_state(uncore_pmu_cpuhp_state);
+
+ return ret;
+}
+
+static void __exit fujitsu_uncore_pmu_exit(void)
+{
+ platform_driver_unregister(&fujitsu_uncore_pmu_driver);
+ cpuhp_remove_multi_state(uncore_pmu_cpuhp_state);
+}
+
+module_init(fujitsu_uncore_pmu_init);
+module_exit(fujitsu_uncore_pmu_exit);
+
+MODULE_AUTHOR("Koichi Okuno <fj2767dz@fujitsu.com>");
+MODULE_DESCRIPTION("Fujitsu Uncore PMU driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/perf/hisilicon/Makefile b/drivers/perf/hisilicon/Makefile
index 48dcc8381ea7..186be3d02238 100644
--- a/drivers/perf/hisilicon/Makefile
+++ b/drivers/perf/hisilicon/Makefile
@@ -1,7 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_HISI_PMU) += hisi_uncore_pmu.o hisi_uncore_l3c_pmu.o \
hisi_uncore_hha_pmu.o hisi_uncore_ddrc_pmu.o hisi_uncore_sllc_pmu.o \
- hisi_uncore_pa_pmu.o hisi_uncore_cpa_pmu.o hisi_uncore_uc_pmu.o
+ hisi_uncore_pa_pmu.o hisi_uncore_cpa_pmu.o hisi_uncore_uc_pmu.o \
+ hisi_uncore_noc_pmu.o hisi_uncore_mn_pmu.o
obj-$(CONFIG_HISI_PCIE_PMU) += hisi_pcie_pmu.o
obj-$(CONFIG_HNS3_PMU) += hns3_pmu.o
diff --git a/drivers/perf/hisilicon/hisi_pcie_pmu.c b/drivers/perf/hisilicon/hisi_pcie_pmu.c
index 5a00adb2de8c..c5394d007b61 100644
--- a/drivers/perf/hisilicon/hisi_pcie_pmu.c
+++ b/drivers/perf/hisilicon/hisi_pcie_pmu.c
@@ -99,16 +99,6 @@ HISI_PCIE_PMU_FILTER_ATTR(len_mode, config1, 11, 10);
HISI_PCIE_PMU_FILTER_ATTR(port, config2, 15, 0);
HISI_PCIE_PMU_FILTER_ATTR(bdf, config2, 31, 16);
-static ssize_t hisi_pcie_format_sysfs_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct dev_ext_attribute *eattr;
-
- eattr = container_of(attr, struct dev_ext_attribute, attr);
-
- return sysfs_emit(buf, "%s\n", (char *)eattr->var);
-}
-
static ssize_t hisi_pcie_event_sysfs_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -120,8 +110,7 @@ static ssize_t hisi_pcie_event_sysfs_show(struct device *dev, struct device_attr
#define HISI_PCIE_PMU_FORMAT_ATTR(_name, _format) \
(&((struct dev_ext_attribute[]){ \
- { .attr = __ATTR(_name, 0444, hisi_pcie_format_sysfs_show, \
- NULL), \
+ { .attr = __ATTR(_name, 0444, device_show_string, NULL), \
.var = (void *)_format } \
})[0].attr.attr)
@@ -152,6 +141,22 @@ static ssize_t bus_show(struct device *dev, struct device_attribute *attr, char
}
static DEVICE_ATTR_RO(bus);
+static ssize_t bdf_min_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(dev_get_drvdata(dev));
+
+ return sysfs_emit(buf, "%#04x\n", pcie_pmu->bdf_min);
+}
+static DEVICE_ATTR_RO(bdf_min);
+
+static ssize_t bdf_max_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(dev_get_drvdata(dev));
+
+ return sysfs_emit(buf, "%#04x\n", pcie_pmu->bdf_max);
+}
+static DEVICE_ATTR_RO(bdf_max);
+
static struct hisi_pcie_reg_pair
hisi_pcie_parse_reg_value(struct hisi_pcie_pmu *pcie_pmu, u32 reg_off)
{
@@ -216,12 +221,10 @@ static void hisi_pcie_pmu_writeq(struct hisi_pcie_pmu *pcie_pmu, u32 reg_offset,
writeq_relaxed(val, pcie_pmu->base + offset);
}
-static void hisi_pcie_pmu_config_filter(struct perf_event *event)
+static u64 hisi_pcie_pmu_get_event_ctrl_val(struct perf_event *event)
{
- struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
- struct hw_perf_event *hwc = &event->hw;
u64 port, trig_len, thr_len, len_mode;
- u64 reg = HISI_PCIE_INIT_SET;
+ u64 reg = 0;
/* Config HISI_PCIE_EVENT_CTRL according to event. */
reg |= FIELD_PREP(HISI_PCIE_EVENT_M, hisi_pcie_get_real_event(event));
@@ -256,10 +259,19 @@ static void hisi_pcie_pmu_config_filter(struct perf_event *event)
else
reg |= FIELD_PREP(HISI_PCIE_LEN_M, HISI_PCIE_LEN_M_DEFAULT);
+ return reg;
+}
+
+static void hisi_pcie_pmu_config_event_ctrl(struct perf_event *event)
+{
+ struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ u64 reg = hisi_pcie_pmu_get_event_ctrl_val(event);
+
hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EVENT_CTRL, hwc->idx, reg);
}
-static void hisi_pcie_pmu_clear_filter(struct perf_event *event)
+static void hisi_pcie_pmu_clear_event_ctrl(struct perf_event *event)
{
struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
@@ -299,18 +311,24 @@ static bool hisi_pcie_pmu_valid_filter(struct perf_event *event,
if (hisi_pcie_get_trig_len(event) > HISI_PCIE_TRIG_MAX_VAL)
return false;
- if (requester_id) {
- if (!hisi_pcie_pmu_valid_requester_id(pcie_pmu, requester_id))
- return false;
- }
+ /* Need to explicitly set filter of "port" or "bdf" */
+ if (!hisi_pcie_get_port(event) &&
+ !hisi_pcie_pmu_valid_requester_id(pcie_pmu, requester_id))
+ return false;
return true;
}
+/*
+ * Check Whether two events share the same config. The same config means not
+ * only the event code, but also the filter settings of the two events are
+ * the same.
+ */
static bool hisi_pcie_pmu_cmp_event(struct perf_event *target,
struct perf_event *event)
{
- return hisi_pcie_get_real_event(target) == hisi_pcie_get_real_event(event);
+ return hisi_pcie_pmu_get_event_ctrl_val(target) ==
+ hisi_pcie_pmu_get_event_ctrl_val(event);
}
static bool hisi_pcie_pmu_validate_event_group(struct perf_event *event)
@@ -337,15 +355,27 @@ static bool hisi_pcie_pmu_validate_event_group(struct perf_event *event)
return false;
for (num = 0; num < counters; num++) {
+ /*
+ * If we find a related event, then it's a valid group
+ * since we don't need to allocate a new counter for it.
+ */
if (hisi_pcie_pmu_cmp_event(event_group[num], sibling))
break;
}
+ /*
+ * Otherwise it's a new event but if there's no available counter,
+ * fail the check since we cannot schedule all the events in
+ * the group simultaneously.
+ */
+ if (num == HISI_PCIE_MAX_COUNTERS)
+ return false;
+
if (num == counters)
event_group[counters++] = sibling;
}
- return counters <= HISI_PCIE_MAX_COUNTERS;
+ return true;
}
static int hisi_pcie_pmu_event_init(struct perf_event *event)
@@ -353,16 +383,15 @@ static int hisi_pcie_pmu_event_init(struct perf_event *event)
struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
- event->cpu = pcie_pmu->on_cpu;
+ /* Check the type first before going on, otherwise it's not our event */
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
if (EXT_COUNTER_IS_USED(hisi_pcie_get_event(event)))
hwc->event_base = HISI_PCIE_EXT_CNT;
else
hwc->event_base = HISI_PCIE_CNT;
- if (event->attr.type != event->pmu->type)
- return -ENOENT;
-
/* Sampling is not supported. */
if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
return -EOPNOTSUPP;
@@ -373,6 +402,8 @@ static int hisi_pcie_pmu_event_init(struct perf_event *event)
if (!hisi_pcie_pmu_validate_event_group(event))
return -EINVAL;
+ event->cpu = pcie_pmu->on_cpu;
+
return 0;
}
@@ -384,40 +415,32 @@ static u64 hisi_pcie_pmu_read_counter(struct perf_event *event)
return hisi_pcie_pmu_readq(pcie_pmu, event->hw.event_base, idx);
}
-static int hisi_pcie_pmu_find_related_event(struct hisi_pcie_pmu *pcie_pmu,
- struct perf_event *event)
+/*
+ * Check all work events, if a relevant event is found then we return it
+ * first, otherwise return the first idle counter (need to reset).
+ */
+static int hisi_pcie_pmu_get_event_idx(struct hisi_pcie_pmu *pcie_pmu,
+ struct perf_event *event)
{
+ int first_idle = -EAGAIN;
struct perf_event *sibling;
int idx;
for (idx = 0; idx < HISI_PCIE_MAX_COUNTERS; idx++) {
sibling = pcie_pmu->hw_events[idx];
- if (!sibling)
- continue;
-
- if (!hisi_pcie_pmu_cmp_event(sibling, event))
+ if (!sibling) {
+ if (first_idle == -EAGAIN)
+ first_idle = idx;
continue;
+ }
/* Related events must be used in group */
- if (sibling->group_leader == event->group_leader)
- return idx;
- else
- return -EINVAL;
- }
-
- return idx;
-}
-
-static int hisi_pcie_pmu_get_event_idx(struct hisi_pcie_pmu *pcie_pmu)
-{
- int idx;
-
- for (idx = 0; idx < HISI_PCIE_MAX_COUNTERS; idx++) {
- if (!pcie_pmu->hw_events[idx])
+ if (hisi_pcie_pmu_cmp_event(sibling, event) &&
+ sibling->group_leader == event->group_leader)
return idx;
}
- return -EINVAL;
+ return first_idle;
}
static void hisi_pcie_pmu_event_update(struct perf_event *event)
@@ -445,10 +468,24 @@ static void hisi_pcie_pmu_set_period(struct perf_event *event)
struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
+ u64 orig_cnt, cnt;
+
+ orig_cnt = hisi_pcie_pmu_read_counter(event);
local64_set(&hwc->prev_count, HISI_PCIE_INIT_VAL);
hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_CNT, idx, HISI_PCIE_INIT_VAL);
hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EXT_CNT, idx, HISI_PCIE_INIT_VAL);
+
+ /*
+ * The counter maybe unwritable if the target event is unsupported.
+ * Check this by comparing the counts after setting the period. If
+ * the counts stay unchanged after setting the period then update
+ * the hwc->prev_count correctly. Otherwise the final counts user
+ * get maybe totally wrong.
+ */
+ cnt = hisi_pcie_pmu_read_counter(event);
+ if (orig_cnt == cnt)
+ local64_set(&hwc->prev_count, cnt);
}
static void hisi_pcie_pmu_enable_counter(struct hisi_pcie_pmu *pcie_pmu, struct hw_perf_event *hwc)
@@ -504,7 +541,7 @@ static void hisi_pcie_pmu_start(struct perf_event *event, int flags)
WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
hwc->state = 0;
- hisi_pcie_pmu_config_filter(event);
+ hisi_pcie_pmu_config_event_ctrl(event);
hisi_pcie_pmu_enable_counter(pcie_pmu, hwc);
hisi_pcie_pmu_enable_int(pcie_pmu, hwc);
hisi_pcie_pmu_set_period(event);
@@ -525,7 +562,7 @@ static void hisi_pcie_pmu_stop(struct perf_event *event, int flags)
hisi_pcie_pmu_event_update(event);
hisi_pcie_pmu_disable_int(pcie_pmu, hwc);
hisi_pcie_pmu_disable_counter(pcie_pmu, hwc);
- hisi_pcie_pmu_clear_filter(event);
+ hisi_pcie_pmu_clear_event_ctrl(event);
WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
hwc->state |= PERF_HES_STOPPED;
@@ -543,27 +580,18 @@ static int hisi_pcie_pmu_add(struct perf_event *event, int flags)
hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
- /* Check all working events to find a related event. */
- idx = hisi_pcie_pmu_find_related_event(pcie_pmu, event);
- if (idx < 0)
- return idx;
-
- /* Current event shares an enabled counter with the related event */
- if (idx < HISI_PCIE_MAX_COUNTERS) {
- hwc->idx = idx;
- goto start_count;
- }
-
- idx = hisi_pcie_pmu_get_event_idx(pcie_pmu);
+ idx = hisi_pcie_pmu_get_event_idx(pcie_pmu, event);
if (idx < 0)
return idx;
hwc->idx = idx;
- pcie_pmu->hw_events[idx] = event;
- /* Reset Counter to avoid previous statistic interference. */
- hisi_pcie_pmu_reset_counter(pcie_pmu, idx);
-start_count:
+ /* No enabled counter found with related event, reset it */
+ if (!pcie_pmu->hw_events[idx]) {
+ hisi_pcie_pmu_reset_counter(pcie_pmu, idx);
+ pcie_pmu->hw_events[idx] = event;
+ }
+
if (flags & PERF_EF_START)
hisi_pcie_pmu_start(event, PERF_EF_RELOAD);
@@ -676,7 +704,6 @@ static int hisi_pcie_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
{
struct hisi_pcie_pmu *pcie_pmu = hlist_entry_safe(node, struct hisi_pcie_pmu, node);
unsigned int target;
- cpumask_t mask;
int numa_node;
/* Nothing to do if this CPU doesn't own the PMU */
@@ -687,10 +714,10 @@ static int hisi_pcie_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
/* Choose a local CPU from all online cpus. */
numa_node = dev_to_node(&pcie_pmu->pdev->dev);
- if (cpumask_and(&mask, cpumask_of_node(numa_node), cpu_online_mask) &&
- cpumask_andnot(&mask, &mask, cpumask_of(cpu)))
- target = cpumask_any(&mask);
- else
+
+ target = cpumask_any_and_but(cpumask_of_node(numa_node),
+ cpu_online_mask, cpu);
+ if (target >= nr_cpu_ids)
target = cpumask_any_but(cpu_online_mask, cpu);
if (target >= nr_cpu_ids) {
@@ -713,10 +740,18 @@ static struct attribute *hisi_pcie_pmu_events_attr[] = {
HISI_PCIE_PMU_EVENT_ATTR(rx_mrd_cnt, 0x10210),
HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_latency, 0x0011),
HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_cnt, 0x10011),
+ HISI_PCIE_PMU_EVENT_ATTR(rx_mwr_flux, 0x0104),
+ HISI_PCIE_PMU_EVENT_ATTR(rx_mwr_time, 0x10104),
HISI_PCIE_PMU_EVENT_ATTR(rx_mrd_flux, 0x0804),
HISI_PCIE_PMU_EVENT_ATTR(rx_mrd_time, 0x10804),
+ HISI_PCIE_PMU_EVENT_ATTR(rx_cpl_flux, 0x2004),
+ HISI_PCIE_PMU_EVENT_ATTR(rx_cpl_time, 0x12004),
+ HISI_PCIE_PMU_EVENT_ATTR(tx_mwr_flux, 0x0105),
+ HISI_PCIE_PMU_EVENT_ATTR(tx_mwr_time, 0x10105),
HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_flux, 0x0405),
HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_time, 0x10405),
+ HISI_PCIE_PMU_EVENT_ATTR(tx_cpl_flux, 0x1005),
+ HISI_PCIE_PMU_EVENT_ATTR(tx_cpl_time, 0x11005),
NULL
};
@@ -744,6 +779,8 @@ static const struct attribute_group hisi_pcie_pmu_format_group = {
static struct attribute *hisi_pcie_pmu_bus_attrs[] = {
&dev_attr_bus.attr,
+ &dev_attr_bdf_max.attr,
+ &dev_attr_bdf_min.attr,
NULL
};
@@ -802,6 +839,7 @@ static int hisi_pcie_alloc_pmu(struct pci_dev *pdev, struct hisi_pcie_pmu *pcie_
pcie_pmu->pmu = (struct pmu) {
.name = name,
.module = THIS_MODULE,
+ .parent = &pdev->dev,
.event_init = hisi_pcie_pmu_event_init,
.pmu_enable = hisi_pcie_pmu_enable,
.pmu_disable = hisi_pcie_pmu_disable,
diff --git a/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c b/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c
index 40f1bc9f9b91..b879b81adfdd 100644
--- a/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c
@@ -180,20 +180,18 @@ MODULE_DEVICE_TABLE(acpi, hisi_cpa_pmu_acpi_match);
static int hisi_cpa_pmu_init_data(struct platform_device *pdev,
struct hisi_pmu *cpa_pmu)
{
- if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
- &cpa_pmu->sicl_id)) {
+ hisi_uncore_pmu_init_topology(cpa_pmu, &pdev->dev);
+
+ if (cpa_pmu->topo.sicl_id < 0) {
dev_err(&pdev->dev, "Can not read sicl-id\n");
return -EINVAL;
}
- if (device_property_read_u32(&pdev->dev, "hisilicon,idx-id",
- &cpa_pmu->index_id)) {
+ if (cpa_pmu->topo.index_id < 0) {
dev_err(&pdev->dev, "Cannot read idx-id\n");
return -EINVAL;
}
- cpa_pmu->ccl_id = -1;
- cpa_pmu->sccl_id = -1;
cpa_pmu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(cpa_pmu->base))
return PTR_ERR(cpa_pmu->base);
@@ -227,34 +225,11 @@ static const struct attribute_group hisi_cpa_pmu_events_group = {
.attrs = hisi_cpa_pmu_events_attr,
};
-static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
-
-static struct attribute *hisi_cpa_pmu_cpumask_attrs[] = {
- &dev_attr_cpumask.attr,
- NULL
-};
-
-static const struct attribute_group hisi_cpa_pmu_cpumask_attr_group = {
- .attrs = hisi_cpa_pmu_cpumask_attrs,
-};
-
-static struct device_attribute hisi_cpa_pmu_identifier_attr =
- __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
-
-static struct attribute *hisi_cpa_pmu_identifier_attrs[] = {
- &hisi_cpa_pmu_identifier_attr.attr,
- NULL
-};
-
-static const struct attribute_group hisi_cpa_pmu_identifier_group = {
- .attrs = hisi_cpa_pmu_identifier_attrs,
-};
-
static const struct attribute_group *hisi_cpa_pmu_attr_groups[] = {
&hisi_cpa_pmu_format_group,
&hisi_cpa_pmu_events_group,
- &hisi_cpa_pmu_cpumask_attr_group,
- &hisi_cpa_pmu_identifier_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
NULL
};
@@ -311,8 +286,8 @@ static int hisi_cpa_pmu_probe(struct platform_device *pdev)
if (ret)
return ret;
- name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sicl%d_cpa%u",
- cpa_pmu->sicl_id, cpa_pmu->index_id);
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sicl%d_cpa%d",
+ cpa_pmu->topo.sicl_id, cpa_pmu->topo.index_id);
if (!name)
return -ENOMEM;
@@ -341,7 +316,7 @@ static int hisi_cpa_pmu_probe(struct platform_device *pdev)
return ret;
}
-static int hisi_cpa_pmu_remove(struct platform_device *pdev)
+static void hisi_cpa_pmu_remove(struct platform_device *pdev)
{
struct hisi_pmu *cpa_pmu = platform_get_drvdata(pdev);
@@ -349,7 +324,6 @@ static int hisi_cpa_pmu_remove(struct platform_device *pdev)
cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE,
&cpa_pmu->node);
hisi_cpa_pmu_enable_pm(cpa_pmu);
- return 0;
}
static struct platform_driver hisi_cpa_pmu_driver = {
@@ -390,6 +364,7 @@ static void __exit hisi_cpa_pmu_module_exit(void)
}
module_exit(hisi_cpa_pmu_module_exit);
+MODULE_IMPORT_NS("HISI_PMU");
MODULE_DESCRIPTION("HiSilicon SoC CPA PMU driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Qi Liu <liuqi115@huawei.com>");
diff --git a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
index ffb039d05d07..21c494881ca0 100644
--- a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
@@ -43,12 +43,21 @@
#define DDRC_V2_EVENT_TYPE 0xe74
#define DDRC_V2_PERF_CTRL 0xeA0
+/* DDRC interrupt registers definition in v3 */
+#define DDRC_V3_INT_MASK 0x534
+#define DDRC_V3_INT_STATUS 0x538
+#define DDRC_V3_INT_CLEAR 0x53C
+
/* DDRC has 8-counters */
#define DDRC_NR_COUNTERS 0x8
#define DDRC_V1_PERF_CTRL_EN 0x2
#define DDRC_V2_PERF_CTRL_EN 0x1
#define DDRC_V1_NR_EVENTS 0x7
-#define DDRC_V2_NR_EVENTS 0x90
+#define DDRC_V2_NR_EVENTS 0xFF
+
+#define DDRC_EVENT_CNTn(base, n) ((base) + (n) * 8)
+#define DDRC_EVENT_TYPEn(base, n) ((base) + (n) * 4)
+#define DDRC_UNIMPLEMENTED_REG GENMASK(31, 0)
/*
* For PMU v1, there are eight-events and every event has been mapped
@@ -63,47 +72,37 @@ static const u32 ddrc_reg_off[] = {
DDRC_PRE_CMD, DDRC_ACT_CMD, DDRC_RNK_CHG, DDRC_RW_CHG
};
-/*
- * Select the counter register offset using the counter index.
- * In PMU v1, there are no programmable counter, the count
- * is read form the statistics counter register itself.
- */
-static u32 hisi_ddrc_pmu_v1_get_counter_offset(int cntr_idx)
-{
- return ddrc_reg_off[cntr_idx];
-}
-
-static u32 hisi_ddrc_pmu_v2_get_counter_offset(int cntr_idx)
-{
- return DDRC_V2_EVENT_CNT + cntr_idx * 8;
-}
+struct hisi_ddrc_pmu_regs {
+ u32 event_cnt;
+ u32 event_ctrl;
+ u32 event_type;
+ u32 perf_ctrl;
+ u32 perf_ctrl_en;
+ u32 int_mask;
+ u32 int_clear;
+ u32 int_status;
+};
-static u64 hisi_ddrc_pmu_v1_read_counter(struct hisi_pmu *ddrc_pmu,
+static u64 hisi_ddrc_pmu_read_counter(struct hisi_pmu *ddrc_pmu,
struct hw_perf_event *hwc)
{
- return readl(ddrc_pmu->base +
- hisi_ddrc_pmu_v1_get_counter_offset(hwc->idx));
-}
+ struct hisi_ddrc_pmu_regs *regs = ddrc_pmu->dev_info->private;
-static void hisi_ddrc_pmu_v1_write_counter(struct hisi_pmu *ddrc_pmu,
- struct hw_perf_event *hwc, u64 val)
-{
- writel((u32)val,
- ddrc_pmu->base + hisi_ddrc_pmu_v1_get_counter_offset(hwc->idx));
-}
+ if (regs->event_cnt == DDRC_UNIMPLEMENTED_REG)
+ return readl(ddrc_pmu->base + ddrc_reg_off[hwc->idx]);
-static u64 hisi_ddrc_pmu_v2_read_counter(struct hisi_pmu *ddrc_pmu,
- struct hw_perf_event *hwc)
-{
- return readq(ddrc_pmu->base +
- hisi_ddrc_pmu_v2_get_counter_offset(hwc->idx));
+ return readq(ddrc_pmu->base + DDRC_EVENT_CNTn(regs->event_cnt, hwc->idx));
}
-static void hisi_ddrc_pmu_v2_write_counter(struct hisi_pmu *ddrc_pmu,
- struct hw_perf_event *hwc, u64 val)
+static void hisi_ddrc_pmu_write_counter(struct hisi_pmu *ddrc_pmu,
+ struct hw_perf_event *hwc, u64 val)
{
- writeq(val,
- ddrc_pmu->base + hisi_ddrc_pmu_v2_get_counter_offset(hwc->idx));
+ struct hisi_ddrc_pmu_regs *regs = ddrc_pmu->dev_info->private;
+
+ if (regs->event_cnt == DDRC_UNIMPLEMENTED_REG)
+ writel((u32)val, ddrc_pmu->base + ddrc_reg_off[hwc->idx]);
+ else
+ writeq(val, ddrc_pmu->base + DDRC_EVENT_CNTn(regs->event_cnt, hwc->idx));
}
/*
@@ -111,57 +110,15 @@ static void hisi_ddrc_pmu_v2_write_counter(struct hisi_pmu *ddrc_pmu,
* so there is no need to write event type, while it is programmable counter in
* PMU v2.
*/
-static void hisi_ddrc_pmu_write_evtype(struct hisi_pmu *hha_pmu, int idx,
+static void hisi_ddrc_pmu_write_evtype(struct hisi_pmu *ddrc_pmu, int idx,
u32 type)
{
- u32 offset;
+ struct hisi_ddrc_pmu_regs *regs = ddrc_pmu->dev_info->private;
- if (hha_pmu->identifier >= HISI_PMU_V2) {
- offset = DDRC_V2_EVENT_TYPE + 4 * idx;
- writel(type, hha_pmu->base + offset);
- }
-}
+ if (regs->event_type == DDRC_UNIMPLEMENTED_REG)
+ return;
-static void hisi_ddrc_pmu_v1_start_counters(struct hisi_pmu *ddrc_pmu)
-{
- u32 val;
-
- /* Set perf_enable in DDRC_PERF_CTRL to start event counting */
- val = readl(ddrc_pmu->base + DDRC_PERF_CTRL);
- val |= DDRC_V1_PERF_CTRL_EN;
- writel(val, ddrc_pmu->base + DDRC_PERF_CTRL);
-}
-
-static void hisi_ddrc_pmu_v1_stop_counters(struct hisi_pmu *ddrc_pmu)
-{
- u32 val;
-
- /* Clear perf_enable in DDRC_PERF_CTRL to stop event counting */
- val = readl(ddrc_pmu->base + DDRC_PERF_CTRL);
- val &= ~DDRC_V1_PERF_CTRL_EN;
- writel(val, ddrc_pmu->base + DDRC_PERF_CTRL);
-}
-
-static void hisi_ddrc_pmu_v1_enable_counter(struct hisi_pmu *ddrc_pmu,
- struct hw_perf_event *hwc)
-{
- u32 val;
-
- /* Set counter index(event code) in DDRC_EVENT_CTRL register */
- val = readl(ddrc_pmu->base + DDRC_EVENT_CTRL);
- val |= (1 << GET_DDRC_EVENTID(hwc));
- writel(val, ddrc_pmu->base + DDRC_EVENT_CTRL);
-}
-
-static void hisi_ddrc_pmu_v1_disable_counter(struct hisi_pmu *ddrc_pmu,
- struct hw_perf_event *hwc)
-{
- u32 val;
-
- /* Clear counter index(event code) in DDRC_EVENT_CTRL register */
- val = readl(ddrc_pmu->base + DDRC_EVENT_CTRL);
- val &= ~(1 << GET_DDRC_EVENTID(hwc));
- writel(val, ddrc_pmu->base + DDRC_EVENT_CTRL);
+ writel(type, ddrc_pmu->base + DDRC_EVENT_TYPEn(regs->event_type, idx));
}
static int hisi_ddrc_pmu_v1_get_event_idx(struct perf_event *event)
@@ -180,140 +137,119 @@ static int hisi_ddrc_pmu_v1_get_event_idx(struct perf_event *event)
return idx;
}
-static int hisi_ddrc_pmu_v2_get_event_idx(struct perf_event *event)
+static int hisi_ddrc_pmu_get_event_idx(struct perf_event *event)
{
+ struct hisi_pmu *ddrc_pmu = to_hisi_pmu(event->pmu);
+ struct hisi_ddrc_pmu_regs *regs = ddrc_pmu->dev_info->private;
+
+ if (regs->event_type == DDRC_UNIMPLEMENTED_REG)
+ return hisi_ddrc_pmu_v1_get_event_idx(event);
+
return hisi_uncore_pmu_get_event_idx(event);
}
-static void hisi_ddrc_pmu_v2_start_counters(struct hisi_pmu *ddrc_pmu)
+static void hisi_ddrc_pmu_start_counters(struct hisi_pmu *ddrc_pmu)
{
+ struct hisi_ddrc_pmu_regs *regs = ddrc_pmu->dev_info->private;
u32 val;
- val = readl(ddrc_pmu->base + DDRC_V2_PERF_CTRL);
- val |= DDRC_V2_PERF_CTRL_EN;
- writel(val, ddrc_pmu->base + DDRC_V2_PERF_CTRL);
+ val = readl(ddrc_pmu->base + regs->perf_ctrl);
+ val |= regs->perf_ctrl_en;
+ writel(val, ddrc_pmu->base + regs->perf_ctrl);
}
-static void hisi_ddrc_pmu_v2_stop_counters(struct hisi_pmu *ddrc_pmu)
+static void hisi_ddrc_pmu_stop_counters(struct hisi_pmu *ddrc_pmu)
{
+ struct hisi_ddrc_pmu_regs *regs = ddrc_pmu->dev_info->private;
u32 val;
- val = readl(ddrc_pmu->base + DDRC_V2_PERF_CTRL);
- val &= ~DDRC_V2_PERF_CTRL_EN;
- writel(val, ddrc_pmu->base + DDRC_V2_PERF_CTRL);
+ val = readl(ddrc_pmu->base + regs->perf_ctrl);
+ val &= ~regs->perf_ctrl_en;
+ writel(val, ddrc_pmu->base + regs->perf_ctrl);
}
-static void hisi_ddrc_pmu_v2_enable_counter(struct hisi_pmu *ddrc_pmu,
+static void hisi_ddrc_pmu_enable_counter(struct hisi_pmu *ddrc_pmu,
struct hw_perf_event *hwc)
{
+ struct hisi_ddrc_pmu_regs *regs = ddrc_pmu->dev_info->private;
u32 val;
- val = readl(ddrc_pmu->base + DDRC_V2_EVENT_CTRL);
- val |= 1 << hwc->idx;
- writel(val, ddrc_pmu->base + DDRC_V2_EVENT_CTRL);
+ val = readl(ddrc_pmu->base + regs->event_ctrl);
+ val |= BIT_ULL(hwc->idx);
+ writel(val, ddrc_pmu->base + regs->event_ctrl);
}
-static void hisi_ddrc_pmu_v2_disable_counter(struct hisi_pmu *ddrc_pmu,
+static void hisi_ddrc_pmu_disable_counter(struct hisi_pmu *ddrc_pmu,
struct hw_perf_event *hwc)
{
+ struct hisi_ddrc_pmu_regs *regs = ddrc_pmu->dev_info->private;
u32 val;
- val = readl(ddrc_pmu->base + DDRC_V2_EVENT_CTRL);
- val &= ~(1 << hwc->idx);
- writel(val, ddrc_pmu->base + DDRC_V2_EVENT_CTRL);
-}
-
-static void hisi_ddrc_pmu_v1_enable_counter_int(struct hisi_pmu *ddrc_pmu,
- struct hw_perf_event *hwc)
-{
- u32 val;
-
- /* Write 0 to enable interrupt */
- val = readl(ddrc_pmu->base + DDRC_INT_MASK);
- val &= ~(1 << hwc->idx);
- writel(val, ddrc_pmu->base + DDRC_INT_MASK);
-}
-
-static void hisi_ddrc_pmu_v1_disable_counter_int(struct hisi_pmu *ddrc_pmu,
- struct hw_perf_event *hwc)
-{
- u32 val;
-
- /* Write 1 to mask interrupt */
- val = readl(ddrc_pmu->base + DDRC_INT_MASK);
- val |= 1 << hwc->idx;
- writel(val, ddrc_pmu->base + DDRC_INT_MASK);
+ val = readl(ddrc_pmu->base + regs->event_ctrl);
+ val &= ~BIT_ULL(hwc->idx);
+ writel(val, ddrc_pmu->base + regs->event_ctrl);
}
-static void hisi_ddrc_pmu_v2_enable_counter_int(struct hisi_pmu *ddrc_pmu,
- struct hw_perf_event *hwc)
+static void hisi_ddrc_pmu_enable_counter_int(struct hisi_pmu *ddrc_pmu,
+ struct hw_perf_event *hwc)
{
+ struct hisi_ddrc_pmu_regs *regs = ddrc_pmu->dev_info->private;
u32 val;
- val = readl(ddrc_pmu->base + DDRC_V2_INT_MASK);
- val &= ~(1 << hwc->idx);
- writel(val, ddrc_pmu->base + DDRC_V2_INT_MASK);
+ val = readl(ddrc_pmu->base + regs->int_mask);
+ val &= ~BIT_ULL(hwc->idx);
+ writel(val, ddrc_pmu->base + regs->int_mask);
}
-static void hisi_ddrc_pmu_v2_disable_counter_int(struct hisi_pmu *ddrc_pmu,
- struct hw_perf_event *hwc)
+static void hisi_ddrc_pmu_disable_counter_int(struct hisi_pmu *ddrc_pmu,
+ struct hw_perf_event *hwc)
{
+ struct hisi_ddrc_pmu_regs *regs = ddrc_pmu->dev_info->private;
u32 val;
- val = readl(ddrc_pmu->base + DDRC_V2_INT_MASK);
- val |= 1 << hwc->idx;
- writel(val, ddrc_pmu->base + DDRC_V2_INT_MASK);
+ val = readl(ddrc_pmu->base + regs->int_mask);
+ val |= BIT_ULL(hwc->idx);
+ writel(val, ddrc_pmu->base + regs->int_mask);
}
-static u32 hisi_ddrc_pmu_v1_get_int_status(struct hisi_pmu *ddrc_pmu)
+static u32 hisi_ddrc_pmu_get_int_status(struct hisi_pmu *ddrc_pmu)
{
- return readl(ddrc_pmu->base + DDRC_INT_STATUS);
-}
+ struct hisi_ddrc_pmu_regs *regs = ddrc_pmu->dev_info->private;
-static void hisi_ddrc_pmu_v1_clear_int_status(struct hisi_pmu *ddrc_pmu,
- int idx)
-{
- writel(1 << idx, ddrc_pmu->base + DDRC_INT_CLEAR);
+ return readl(ddrc_pmu->base + regs->int_status);
}
-static u32 hisi_ddrc_pmu_v2_get_int_status(struct hisi_pmu *ddrc_pmu)
+static void hisi_ddrc_pmu_clear_int_status(struct hisi_pmu *ddrc_pmu,
+ int idx)
{
- return readl(ddrc_pmu->base + DDRC_V2_INT_STATUS);
-}
+ struct hisi_ddrc_pmu_regs *regs = ddrc_pmu->dev_info->private;
-static void hisi_ddrc_pmu_v2_clear_int_status(struct hisi_pmu *ddrc_pmu,
- int idx)
-{
- writel(1 << idx, ddrc_pmu->base + DDRC_V2_INT_CLEAR);
+ writel(1 << idx, ddrc_pmu->base + regs->int_clear);
}
-static const struct acpi_device_id hisi_ddrc_pmu_acpi_match[] = {
- { "HISI0233", },
- { "HISI0234", },
- {}
-};
-MODULE_DEVICE_TABLE(acpi, hisi_ddrc_pmu_acpi_match);
-
static int hisi_ddrc_pmu_init_data(struct platform_device *pdev,
struct hisi_pmu *ddrc_pmu)
{
+ hisi_uncore_pmu_init_topology(ddrc_pmu, &pdev->dev);
+
/*
* Use the SCCL_ID and DDRC channel ID to identify the
* DDRC PMU, while SCCL_ID is in MPIDR[aff2].
*/
if (device_property_read_u32(&pdev->dev, "hisilicon,ch-id",
- &ddrc_pmu->index_id)) {
+ &ddrc_pmu->topo.index_id)) {
dev_err(&pdev->dev, "Can not read ddrc channel-id!\n");
return -EINVAL;
}
- if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
- &ddrc_pmu->sccl_id)) {
+ if (ddrc_pmu->topo.sccl_id < 0) {
dev_err(&pdev->dev, "Can not read ddrc sccl-id!\n");
return -EINVAL;
}
- /* DDRC PMUs only share the same SCCL */
- ddrc_pmu->ccl_id = -1;
+
+ ddrc_pmu->dev_info = device_get_match_data(&pdev->dev);
+ if (!ddrc_pmu->dev_info)
+ return -ENODEV;
ddrc_pmu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ddrc_pmu->base)) {
@@ -323,8 +259,7 @@ static int hisi_ddrc_pmu_init_data(struct platform_device *pdev,
ddrc_pmu->identifier = readl(ddrc_pmu->base + DDRC_VERSION);
if (ddrc_pmu->identifier >= HISI_PMU_V2) {
- if (device_property_read_u32(&pdev->dev, "hisilicon,sub-id",
- &ddrc_pmu->sub_id)) {
+ if (ddrc_pmu->topo.sub_id < 0) {
dev_err(&pdev->dev, "Can not read sub-id!\n");
return -EINVAL;
}
@@ -382,73 +317,35 @@ static const struct attribute_group hisi_ddrc_pmu_v2_events_group = {
.attrs = hisi_ddrc_pmu_v2_events_attr,
};
-static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
-
-static struct attribute *hisi_ddrc_pmu_cpumask_attrs[] = {
- &dev_attr_cpumask.attr,
- NULL,
-};
-
-static const struct attribute_group hisi_ddrc_pmu_cpumask_attr_group = {
- .attrs = hisi_ddrc_pmu_cpumask_attrs,
-};
-
-static struct device_attribute hisi_ddrc_pmu_identifier_attr =
- __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
-
-static struct attribute *hisi_ddrc_pmu_identifier_attrs[] = {
- &hisi_ddrc_pmu_identifier_attr.attr,
- NULL
-};
-
-static const struct attribute_group hisi_ddrc_pmu_identifier_group = {
- .attrs = hisi_ddrc_pmu_identifier_attrs,
-};
-
static const struct attribute_group *hisi_ddrc_pmu_v1_attr_groups[] = {
&hisi_ddrc_pmu_v1_format_group,
&hisi_ddrc_pmu_v1_events_group,
- &hisi_ddrc_pmu_cpumask_attr_group,
- &hisi_ddrc_pmu_identifier_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
NULL,
};
static const struct attribute_group *hisi_ddrc_pmu_v2_attr_groups[] = {
&hisi_ddrc_pmu_v2_format_group,
&hisi_ddrc_pmu_v2_events_group,
- &hisi_ddrc_pmu_cpumask_attr_group,
- &hisi_ddrc_pmu_identifier_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
NULL
};
-static const struct hisi_uncore_ops hisi_uncore_ddrc_v1_ops = {
+static const struct hisi_uncore_ops hisi_uncore_ddrc_ops = {
.write_evtype = hisi_ddrc_pmu_write_evtype,
- .get_event_idx = hisi_ddrc_pmu_v1_get_event_idx,
- .start_counters = hisi_ddrc_pmu_v1_start_counters,
- .stop_counters = hisi_ddrc_pmu_v1_stop_counters,
- .enable_counter = hisi_ddrc_pmu_v1_enable_counter,
- .disable_counter = hisi_ddrc_pmu_v1_disable_counter,
- .enable_counter_int = hisi_ddrc_pmu_v1_enable_counter_int,
- .disable_counter_int = hisi_ddrc_pmu_v1_disable_counter_int,
- .write_counter = hisi_ddrc_pmu_v1_write_counter,
- .read_counter = hisi_ddrc_pmu_v1_read_counter,
- .get_int_status = hisi_ddrc_pmu_v1_get_int_status,
- .clear_int_status = hisi_ddrc_pmu_v1_clear_int_status,
-};
-
-static const struct hisi_uncore_ops hisi_uncore_ddrc_v2_ops = {
- .write_evtype = hisi_ddrc_pmu_write_evtype,
- .get_event_idx = hisi_ddrc_pmu_v2_get_event_idx,
- .start_counters = hisi_ddrc_pmu_v2_start_counters,
- .stop_counters = hisi_ddrc_pmu_v2_stop_counters,
- .enable_counter = hisi_ddrc_pmu_v2_enable_counter,
- .disable_counter = hisi_ddrc_pmu_v2_disable_counter,
- .enable_counter_int = hisi_ddrc_pmu_v2_enable_counter_int,
- .disable_counter_int = hisi_ddrc_pmu_v2_disable_counter_int,
- .write_counter = hisi_ddrc_pmu_v2_write_counter,
- .read_counter = hisi_ddrc_pmu_v2_read_counter,
- .get_int_status = hisi_ddrc_pmu_v2_get_int_status,
- .clear_int_status = hisi_ddrc_pmu_v2_clear_int_status,
+ .get_event_idx = hisi_ddrc_pmu_get_event_idx,
+ .start_counters = hisi_ddrc_pmu_start_counters,
+ .stop_counters = hisi_ddrc_pmu_stop_counters,
+ .enable_counter = hisi_ddrc_pmu_enable_counter,
+ .disable_counter = hisi_ddrc_pmu_disable_counter,
+ .enable_counter_int = hisi_ddrc_pmu_enable_counter_int,
+ .disable_counter_int = hisi_ddrc_pmu_disable_counter_int,
+ .write_counter = hisi_ddrc_pmu_write_counter,
+ .read_counter = hisi_ddrc_pmu_read_counter,
+ .get_int_status = hisi_ddrc_pmu_get_int_status,
+ .clear_int_status = hisi_ddrc_pmu_clear_int_status,
};
static int hisi_ddrc_pmu_dev_probe(struct platform_device *pdev,
@@ -464,18 +361,10 @@ static int hisi_ddrc_pmu_dev_probe(struct platform_device *pdev,
if (ret)
return ret;
- if (ddrc_pmu->identifier >= HISI_PMU_V2) {
- ddrc_pmu->counter_bits = 48;
- ddrc_pmu->check_event = DDRC_V2_NR_EVENTS;
- ddrc_pmu->pmu_events.attr_groups = hisi_ddrc_pmu_v2_attr_groups;
- ddrc_pmu->ops = &hisi_uncore_ddrc_v2_ops;
- } else {
- ddrc_pmu->counter_bits = 32;
- ddrc_pmu->check_event = DDRC_V1_NR_EVENTS;
- ddrc_pmu->pmu_events.attr_groups = hisi_ddrc_pmu_v1_attr_groups;
- ddrc_pmu->ops = &hisi_uncore_ddrc_v1_ops;
- }
-
+ ddrc_pmu->pmu_events.attr_groups = ddrc_pmu->dev_info->attr_groups;
+ ddrc_pmu->counter_bits = ddrc_pmu->dev_info->counter_bits;
+ ddrc_pmu->check_event = ddrc_pmu->dev_info->check_event;
+ ddrc_pmu->ops = &hisi_uncore_ddrc_ops;
ddrc_pmu->num_counters = DDRC_NR_COUNTERS;
ddrc_pmu->dev = &pdev->dev;
ddrc_pmu->on_cpu = -1;
@@ -501,13 +390,13 @@ static int hisi_ddrc_pmu_probe(struct platform_device *pdev)
if (ddrc_pmu->identifier >= HISI_PMU_V2)
name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
- "hisi_sccl%u_ddrc%u_%u",
- ddrc_pmu->sccl_id, ddrc_pmu->index_id,
- ddrc_pmu->sub_id);
+ "hisi_sccl%d_ddrc%d_%d",
+ ddrc_pmu->topo.sccl_id, ddrc_pmu->topo.index_id,
+ ddrc_pmu->topo.sub_id);
else
name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
- "hisi_sccl%u_ddrc%u", ddrc_pmu->sccl_id,
- ddrc_pmu->index_id);
+ "hisi_sccl%d_ddrc%d", ddrc_pmu->topo.sccl_id,
+ ddrc_pmu->topo.index_id);
if (!name)
return -ENOMEM;
@@ -531,16 +420,77 @@ static int hisi_ddrc_pmu_probe(struct platform_device *pdev)
return ret;
}
-static int hisi_ddrc_pmu_remove(struct platform_device *pdev)
+static void hisi_ddrc_pmu_remove(struct platform_device *pdev)
{
struct hisi_pmu *ddrc_pmu = platform_get_drvdata(pdev);
perf_pmu_unregister(&ddrc_pmu->pmu);
cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
&ddrc_pmu->node);
- return 0;
}
+static struct hisi_ddrc_pmu_regs hisi_ddrc_v1_pmu_regs = {
+ .event_cnt = DDRC_UNIMPLEMENTED_REG,
+ .event_ctrl = DDRC_EVENT_CTRL,
+ .event_type = DDRC_UNIMPLEMENTED_REG,
+ .perf_ctrl = DDRC_PERF_CTRL,
+ .perf_ctrl_en = DDRC_V1_PERF_CTRL_EN,
+ .int_mask = DDRC_INT_MASK,
+ .int_clear = DDRC_INT_CLEAR,
+ .int_status = DDRC_INT_STATUS,
+};
+
+static const struct hisi_pmu_dev_info hisi_ddrc_v1 = {
+ .counter_bits = 32,
+ .check_event = DDRC_V1_NR_EVENTS,
+ .attr_groups = hisi_ddrc_pmu_v1_attr_groups,
+ .private = &hisi_ddrc_v1_pmu_regs,
+};
+
+static struct hisi_ddrc_pmu_regs hisi_ddrc_v2_pmu_regs = {
+ .event_cnt = DDRC_V2_EVENT_CNT,
+ .event_ctrl = DDRC_V2_EVENT_CTRL,
+ .event_type = DDRC_V2_EVENT_TYPE,
+ .perf_ctrl = DDRC_V2_PERF_CTRL,
+ .perf_ctrl_en = DDRC_V2_PERF_CTRL_EN,
+ .int_mask = DDRC_V2_INT_MASK,
+ .int_clear = DDRC_V2_INT_CLEAR,
+ .int_status = DDRC_V2_INT_STATUS,
+};
+
+static const struct hisi_pmu_dev_info hisi_ddrc_v2 = {
+ .counter_bits = 48,
+ .check_event = DDRC_V2_NR_EVENTS,
+ .attr_groups = hisi_ddrc_pmu_v2_attr_groups,
+ .private = &hisi_ddrc_v2_pmu_regs,
+};
+
+static struct hisi_ddrc_pmu_regs hisi_ddrc_v3_pmu_regs = {
+ .event_cnt = DDRC_V2_EVENT_CNT,
+ .event_ctrl = DDRC_V2_EVENT_CTRL,
+ .event_type = DDRC_V2_EVENT_TYPE,
+ .perf_ctrl = DDRC_V2_PERF_CTRL,
+ .perf_ctrl_en = DDRC_V2_PERF_CTRL_EN,
+ .int_mask = DDRC_V3_INT_MASK,
+ .int_clear = DDRC_V3_INT_CLEAR,
+ .int_status = DDRC_V3_INT_STATUS,
+};
+
+static const struct hisi_pmu_dev_info hisi_ddrc_v3 = {
+ .counter_bits = 48,
+ .check_event = DDRC_V2_NR_EVENTS,
+ .attr_groups = hisi_ddrc_pmu_v2_attr_groups,
+ .private = &hisi_ddrc_v3_pmu_regs,
+};
+
+static const struct acpi_device_id hisi_ddrc_pmu_acpi_match[] = {
+ { "HISI0233", (kernel_ulong_t)&hisi_ddrc_v1 },
+ { "HISI0234", (kernel_ulong_t)&hisi_ddrc_v2 },
+ { "HISI0235", (kernel_ulong_t)&hisi_ddrc_v3 },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, hisi_ddrc_pmu_acpi_match);
+
static struct platform_driver hisi_ddrc_pmu_driver = {
.driver = {
.name = "hisi_ddrc_pmu",
@@ -576,10 +526,10 @@ static void __exit hisi_ddrc_pmu_module_exit(void)
{
platform_driver_unregister(&hisi_ddrc_pmu_driver);
cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE);
-
}
module_exit(hisi_ddrc_pmu_module_exit);
+MODULE_IMPORT_NS("HISI_PMU");
MODULE_DESCRIPTION("HiSilicon SoC DDRC uncore PMU driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>");
diff --git a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
index 15caf99e1eef..97cfaa586a87 100644
--- a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
@@ -47,9 +47,9 @@
#define HHA_SRCID_CMD GENMASK(16, 6)
#define HHA_SRCID_MSK GENMASK(30, 20)
#define HHA_DATSRC_SKT_EN BIT(23)
-#define HHA_EVTYPE_NONE 0xff
+#define HHA_EVTYPE_MASK GENMASK(7, 0)
#define HHA_V1_NR_EVENT 0x65
-#define HHA_V2_NR_EVENT 0xCE
+#define HHA_V2_NR_EVENT 0xFF
HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid_cmd, config1, 10, 0);
HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid_msk, config1, 21, 11);
@@ -197,7 +197,7 @@ static void hisi_hha_pmu_write_evtype(struct hisi_pmu *hha_pmu, int idx,
/* Write event code to HHA_EVENT_TYPEx register */
val = readl(hha_pmu->base + reg);
- val &= ~(HHA_EVTYPE_NONE << shift);
+ val &= ~(HHA_EVTYPE_MASK << shift);
val |= (type << shift);
writel(val, hha_pmu->base + reg);
}
@@ -295,12 +295,13 @@ static int hisi_hha_pmu_init_data(struct platform_device *pdev,
unsigned long long id;
acpi_status status;
+ hisi_uncore_pmu_init_topology(hha_pmu, &pdev->dev);
+
/*
* Use SCCL_ID and UID to identify the HHA PMU, while
* SCCL_ID is in MPIDR[aff2].
*/
- if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
- &hha_pmu->sccl_id)) {
+ if (hha_pmu->topo.sccl_id < 0) {
dev_err(&pdev->dev, "Can not read hha sccl-id!\n");
return -EINVAL;
}
@@ -309,8 +310,7 @@ static int hisi_hha_pmu_init_data(struct platform_device *pdev,
* Early versions of BIOS support _UID by mistake, so we support
* both "hisilicon, idx-id" as preference, if available.
*/
- if (device_property_read_u32(&pdev->dev, "hisilicon,idx-id",
- &hha_pmu->index_id)) {
+ if (hha_pmu->topo.index_id < 0) {
status = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev),
"_UID", NULL, &id);
if (ACPI_FAILURE(status)) {
@@ -318,10 +318,8 @@ static int hisi_hha_pmu_init_data(struct platform_device *pdev,
return -EINVAL;
}
- hha_pmu->index_id = id;
+ hha_pmu->topo.index_id = id;
}
- /* HHA PMUs only share the same SCCL */
- hha_pmu->ccl_id = -1;
hha_pmu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hha_pmu->base)) {
@@ -407,42 +405,19 @@ static const struct attribute_group hisi_hha_pmu_v2_events_group = {
.attrs = hisi_hha_pmu_v2_events_attr,
};
-static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
-
-static struct attribute *hisi_hha_pmu_cpumask_attrs[] = {
- &dev_attr_cpumask.attr,
- NULL,
-};
-
-static const struct attribute_group hisi_hha_pmu_cpumask_attr_group = {
- .attrs = hisi_hha_pmu_cpumask_attrs,
-};
-
-static struct device_attribute hisi_hha_pmu_identifier_attr =
- __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
-
-static struct attribute *hisi_hha_pmu_identifier_attrs[] = {
- &hisi_hha_pmu_identifier_attr.attr,
- NULL
-};
-
-static const struct attribute_group hisi_hha_pmu_identifier_group = {
- .attrs = hisi_hha_pmu_identifier_attrs,
-};
-
static const struct attribute_group *hisi_hha_pmu_v1_attr_groups[] = {
&hisi_hha_pmu_v1_format_group,
&hisi_hha_pmu_v1_events_group,
- &hisi_hha_pmu_cpumask_attr_group,
- &hisi_hha_pmu_identifier_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
NULL,
};
static const struct attribute_group *hisi_hha_pmu_v2_attr_groups[] = {
&hisi_hha_pmu_v2_format_group,
&hisi_hha_pmu_v2_events_group,
- &hisi_hha_pmu_cpumask_attr_group,
- &hisi_hha_pmu_identifier_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
NULL
};
@@ -510,8 +485,8 @@ static int hisi_hha_pmu_probe(struct platform_device *pdev)
if (ret)
return ret;
- name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_hha%u",
- hha_pmu->sccl_id, hha_pmu->index_id);
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%d_hha%d",
+ hha_pmu->topo.sccl_id, hha_pmu->topo.index_id);
if (!name)
return -ENOMEM;
@@ -534,14 +509,13 @@ static int hisi_hha_pmu_probe(struct platform_device *pdev)
return ret;
}
-static int hisi_hha_pmu_remove(struct platform_device *pdev)
+static void hisi_hha_pmu_remove(struct platform_device *pdev)
{
struct hisi_pmu *hha_pmu = platform_get_drvdata(pdev);
perf_pmu_unregister(&hha_pmu->pmu);
cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE,
&hha_pmu->node);
- return 0;
}
static struct platform_driver hisi_hha_pmu_driver = {
@@ -582,6 +556,7 @@ static void __exit hisi_hha_pmu_module_exit(void)
}
module_exit(hisi_hha_pmu_module_exit);
+MODULE_IMPORT_NS("HISI_PMU");
MODULE_DESCRIPTION("HiSilicon SoC HHA uncore PMU driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>");
diff --git a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
index 794dbcd19b7a..f963e4f9e552 100644
--- a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
@@ -39,6 +39,7 @@
/* L3C has 8-counters */
#define L3C_NR_COUNTERS 0x8
+#define L3C_MAX_EXT 2
#define L3C_PERF_CTRL_EN 0x10000
#define L3C_TRACETAG_EN BIT(31)
@@ -55,59 +56,172 @@
#define L3C_V1_NR_EVENTS 0x59
#define L3C_V2_NR_EVENTS 0xFF
-HISI_PMU_EVENT_ATTR_EXTRACTOR(tt_core, config1, 7, 0);
+HISI_PMU_EVENT_ATTR_EXTRACTOR(ext, config, 17, 16);
+/*
+ * Remain the config1:0-7 for backward compatibility if some existing users
+ * hardcode the config1:0-7 directly without parsing the sysfs attribute.
+ */
+HISI_PMU_EVENT_ATTR_EXTRACTOR(tt_core_deprecated, config1, 7, 0);
HISI_PMU_EVENT_ATTR_EXTRACTOR(tt_req, config1, 10, 8);
HISI_PMU_EVENT_ATTR_EXTRACTOR(datasrc_cfg, config1, 15, 11);
HISI_PMU_EVENT_ATTR_EXTRACTOR(datasrc_skt, config1, 16, 16);
+HISI_PMU_EVENT_ATTR_EXTRACTOR(tt_core, config2, 15, 0);
-static void hisi_l3c_pmu_config_req_tracetag(struct perf_event *event)
+struct hisi_l3c_pmu {
+ struct hisi_pmu l3c_pmu;
+
+ /* MMIO and IRQ resources for extension events */
+ void __iomem *ext_base[L3C_MAX_EXT];
+ int ext_irq[L3C_MAX_EXT];
+ int ext_num;
+};
+
+#define to_hisi_l3c_pmu(_l3c_pmu) \
+ container_of(_l3c_pmu, struct hisi_l3c_pmu, l3c_pmu)
+
+/*
+ * The hardware counter idx used in counter enable/disable,
+ * interrupt enable/disable and status check, etc.
+ */
+#define L3C_HW_IDX(_cntr_idx) ((_cntr_idx) % L3C_NR_COUNTERS)
+
+/* Range of ext counters in used mask. */
+#define L3C_CNTR_EXT_L(_ext) (((_ext) + 1) * L3C_NR_COUNTERS)
+#define L3C_CNTR_EXT_H(_ext) (((_ext) + 2) * L3C_NR_COUNTERS)
+
+struct hisi_l3c_pmu_ext {
+ bool support_ext;
+};
+
+static bool support_ext(struct hisi_l3c_pmu *pmu)
+{
+ struct hisi_l3c_pmu_ext *l3c_pmu_ext = pmu->l3c_pmu.dev_info->private;
+
+ return l3c_pmu_ext->support_ext;
+}
+
+/*
+ * tt_core was extended to cover all the CPUs sharing the L3 and was moved from
+ * config1:0-7 to config2:0-*. Try it first and fallback to tt_core_deprecated
+ * if user's still using the deprecated one.
+ */
+static u32 hisi_l3c_pmu_get_tt_core(struct perf_event *event)
+{
+ u32 core = hisi_get_tt_core(event);
+
+ if (core)
+ return core;
+
+ return hisi_get_tt_core_deprecated(event);
+}
+
+static int hisi_l3c_pmu_get_event_idx(struct perf_event *event)
{
struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu);
+ struct hisi_l3c_pmu *hisi_l3c_pmu = to_hisi_l3c_pmu(l3c_pmu);
+ unsigned long *used_mask = l3c_pmu->pmu_events.used_mask;
+ int ext = hisi_get_ext(event);
+ int idx;
+
+ /*
+ * For an L3C PMU that supports extension events, we can monitor
+ * maximum 2 * num_counters to 3 * num_counters events, depending on
+ * the number of ext regions supported by hardware. Thus use bit
+ * [0, num_counters - 1] for normal events and bit
+ * [ext * num_counters, (ext + 1) * num_counters - 1] for extension
+ * events. The idx allocation will keep unchanged for normal events and
+ * we can also use the idx to distinguish whether it's an extension
+ * event or not.
+ *
+ * Since normal events and extension events locates on the different
+ * address space, save the base address to the event->hw.event_base.
+ */
+ if (ext && !support_ext(hisi_l3c_pmu))
+ return -EOPNOTSUPP;
+
+ if (ext)
+ event->hw.event_base = (unsigned long)hisi_l3c_pmu->ext_base[ext - 1];
+ else
+ event->hw.event_base = (unsigned long)l3c_pmu->base;
+
+ ext -= 1;
+ idx = find_next_zero_bit(used_mask, L3C_CNTR_EXT_H(ext), L3C_CNTR_EXT_L(ext));
+
+ if (idx >= L3C_CNTR_EXT_H(ext))
+ return -EAGAIN;
+
+ set_bit(idx, used_mask);
+
+ return idx;
+}
+
+static u32 hisi_l3c_pmu_event_readl(struct hw_perf_event *hwc, u32 reg)
+{
+ return readl((void __iomem *)hwc->event_base + reg);
+}
+
+static void hisi_l3c_pmu_event_writel(struct hw_perf_event *hwc, u32 reg, u32 val)
+{
+ writel(val, (void __iomem *)hwc->event_base + reg);
+}
+
+static u64 hisi_l3c_pmu_event_readq(struct hw_perf_event *hwc, u32 reg)
+{
+ return readq((void __iomem *)hwc->event_base + reg);
+}
+
+static void hisi_l3c_pmu_event_writeq(struct hw_perf_event *hwc, u32 reg, u64 val)
+{
+ writeq(val, (void __iomem *)hwc->event_base + reg);
+}
+
+static void hisi_l3c_pmu_config_req_tracetag(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
u32 tt_req = hisi_get_tt_req(event);
if (tt_req) {
u32 val;
/* Set request-type for tracetag */
- val = readl(l3c_pmu->base + L3C_TRACETAG_CTRL);
+ val = hisi_l3c_pmu_event_readl(hwc, L3C_TRACETAG_CTRL);
val |= tt_req << L3C_TRACETAG_REQ_SHIFT;
val |= L3C_TRACETAG_REQ_EN;
- writel(val, l3c_pmu->base + L3C_TRACETAG_CTRL);
+ hisi_l3c_pmu_event_writel(hwc, L3C_TRACETAG_CTRL, val);
/* Enable request-tracetag statistics */
- val = readl(l3c_pmu->base + L3C_PERF_CTRL);
+ val = hisi_l3c_pmu_event_readl(hwc, L3C_PERF_CTRL);
val |= L3C_TRACETAG_EN;
- writel(val, l3c_pmu->base + L3C_PERF_CTRL);
+ hisi_l3c_pmu_event_writel(hwc, L3C_PERF_CTRL, val);
}
}
static void hisi_l3c_pmu_clear_req_tracetag(struct perf_event *event)
{
- struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
u32 tt_req = hisi_get_tt_req(event);
if (tt_req) {
u32 val;
/* Clear request-type */
- val = readl(l3c_pmu->base + L3C_TRACETAG_CTRL);
+ val = hisi_l3c_pmu_event_readl(hwc, L3C_TRACETAG_CTRL);
val &= ~(tt_req << L3C_TRACETAG_REQ_SHIFT);
val &= ~L3C_TRACETAG_REQ_EN;
- writel(val, l3c_pmu->base + L3C_TRACETAG_CTRL);
+ hisi_l3c_pmu_event_writel(hwc, L3C_TRACETAG_CTRL, val);
/* Disable request-tracetag statistics */
- val = readl(l3c_pmu->base + L3C_PERF_CTRL);
+ val = hisi_l3c_pmu_event_readl(hwc, L3C_PERF_CTRL);
val &= ~L3C_TRACETAG_EN;
- writel(val, l3c_pmu->base + L3C_PERF_CTRL);
+ hisi_l3c_pmu_event_writel(hwc, L3C_PERF_CTRL, val);
}
}
static void hisi_l3c_pmu_write_ds(struct perf_event *event, u32 ds_cfg)
{
- struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
u32 reg, reg_idx, shift, val;
- int idx = hwc->idx;
+ int idx = L3C_HW_IDX(hwc->idx);
/*
* Select the appropriate datasource register(L3C_DATSRC_TYPE0/1).
@@ -120,15 +234,15 @@ static void hisi_l3c_pmu_write_ds(struct perf_event *event, u32 ds_cfg)
reg_idx = idx % 4;
shift = 8 * reg_idx;
- val = readl(l3c_pmu->base + reg);
+ val = hisi_l3c_pmu_event_readl(hwc, reg);
val &= ~(L3C_DATSRC_MASK << shift);
val |= ds_cfg << shift;
- writel(val, l3c_pmu->base + reg);
+ hisi_l3c_pmu_event_writel(hwc, reg, val);
}
static void hisi_l3c_pmu_config_ds(struct perf_event *event)
{
- struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
u32 ds_cfg = hisi_get_datasrc_cfg(event);
u32 ds_skt = hisi_get_datasrc_skt(event);
@@ -138,15 +252,15 @@ static void hisi_l3c_pmu_config_ds(struct perf_event *event)
if (ds_skt) {
u32 val;
- val = readl(l3c_pmu->base + L3C_DATSRC_CTRL);
+ val = hisi_l3c_pmu_event_readl(hwc, L3C_DATSRC_CTRL);
val |= L3C_DATSRC_SKT_EN;
- writel(val, l3c_pmu->base + L3C_DATSRC_CTRL);
+ hisi_l3c_pmu_event_writel(hwc, L3C_DATSRC_CTRL, val);
}
}
static void hisi_l3c_pmu_clear_ds(struct perf_event *event)
{
- struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
u32 ds_cfg = hisi_get_datasrc_cfg(event);
u32 ds_skt = hisi_get_datasrc_skt(event);
@@ -156,57 +270,63 @@ static void hisi_l3c_pmu_clear_ds(struct perf_event *event)
if (ds_skt) {
u32 val;
- val = readl(l3c_pmu->base + L3C_DATSRC_CTRL);
+ val = hisi_l3c_pmu_event_readl(hwc, L3C_DATSRC_CTRL);
val &= ~L3C_DATSRC_SKT_EN;
- writel(val, l3c_pmu->base + L3C_DATSRC_CTRL);
+ hisi_l3c_pmu_event_writel(hwc, L3C_DATSRC_CTRL, val);
}
}
static void hisi_l3c_pmu_config_core_tracetag(struct perf_event *event)
{
- struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu);
- u32 core = hisi_get_tt_core(event);
+ struct hw_perf_event *hwc = &event->hw;
+ u32 core = hisi_l3c_pmu_get_tt_core(event);
if (core) {
u32 val;
/* Config and enable core information */
- writel(core, l3c_pmu->base + L3C_CORE_CTRL);
- val = readl(l3c_pmu->base + L3C_PERF_CTRL);
+ hisi_l3c_pmu_event_writel(hwc, L3C_CORE_CTRL, core);
+ val = hisi_l3c_pmu_event_readl(hwc, L3C_PERF_CTRL);
val |= L3C_CORE_EN;
- writel(val, l3c_pmu->base + L3C_PERF_CTRL);
+ hisi_l3c_pmu_event_writel(hwc, L3C_PERF_CTRL, val);
/* Enable core-tracetag statistics */
- val = readl(l3c_pmu->base + L3C_TRACETAG_CTRL);
+ val = hisi_l3c_pmu_event_readl(hwc, L3C_TRACETAG_CTRL);
val |= L3C_TRACETAG_CORE_EN;
- writel(val, l3c_pmu->base + L3C_TRACETAG_CTRL);
+ hisi_l3c_pmu_event_writel(hwc, L3C_TRACETAG_CTRL, val);
}
}
static void hisi_l3c_pmu_clear_core_tracetag(struct perf_event *event)
{
- struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu);
- u32 core = hisi_get_tt_core(event);
+ struct hw_perf_event *hwc = &event->hw;
+ u32 core = hisi_l3c_pmu_get_tt_core(event);
if (core) {
u32 val;
/* Clear core information */
- writel(L3C_COER_NONE, l3c_pmu->base + L3C_CORE_CTRL);
- val = readl(l3c_pmu->base + L3C_PERF_CTRL);
+ hisi_l3c_pmu_event_writel(hwc, L3C_CORE_CTRL, L3C_COER_NONE);
+ val = hisi_l3c_pmu_event_readl(hwc, L3C_PERF_CTRL);
val &= ~L3C_CORE_EN;
- writel(val, l3c_pmu->base + L3C_PERF_CTRL);
+ hisi_l3c_pmu_event_writel(hwc, L3C_PERF_CTRL, val);
/* Disable core-tracetag statistics */
- val = readl(l3c_pmu->base + L3C_TRACETAG_CTRL);
+ val = hisi_l3c_pmu_event_readl(hwc, L3C_TRACETAG_CTRL);
val &= ~L3C_TRACETAG_CORE_EN;
- writel(val, l3c_pmu->base + L3C_TRACETAG_CTRL);
+ hisi_l3c_pmu_event_writel(hwc, L3C_TRACETAG_CTRL, val);
}
}
+static bool hisi_l3c_pmu_have_filter(struct perf_event *event)
+{
+ return hisi_get_tt_req(event) || hisi_l3c_pmu_get_tt_core(event) ||
+ hisi_get_datasrc_cfg(event) || hisi_get_datasrc_skt(event);
+}
+
static void hisi_l3c_pmu_enable_filter(struct perf_event *event)
{
- if (event->attr.config1 != 0x0) {
+ if (hisi_l3c_pmu_have_filter(event)) {
hisi_l3c_pmu_config_req_tracetag(event);
hisi_l3c_pmu_config_core_tracetag(event);
hisi_l3c_pmu_config_ds(event);
@@ -215,38 +335,56 @@ static void hisi_l3c_pmu_enable_filter(struct perf_event *event)
static void hisi_l3c_pmu_disable_filter(struct perf_event *event)
{
- if (event->attr.config1 != 0x0) {
+ if (hisi_l3c_pmu_have_filter(event)) {
hisi_l3c_pmu_clear_ds(event);
hisi_l3c_pmu_clear_core_tracetag(event);
hisi_l3c_pmu_clear_req_tracetag(event);
}
}
+static int hisi_l3c_pmu_check_filter(struct perf_event *event)
+{
+ struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu);
+ struct hisi_l3c_pmu *hisi_l3c_pmu = to_hisi_l3c_pmu(l3c_pmu);
+ int ext = hisi_get_ext(event);
+
+ if (ext < 0 || ext > hisi_l3c_pmu->ext_num)
+ return -EINVAL;
+
+ if (hisi_get_tt_core(event) && hisi_get_tt_core_deprecated(event))
+ return -EINVAL;
+
+ return 0;
+}
+
/*
* Select the counter register offset using the counter index
*/
static u32 hisi_l3c_pmu_get_counter_offset(int cntr_idx)
{
- return (L3C_CNTR0_LOWER + (cntr_idx * 8));
+ return L3C_CNTR0_LOWER + L3C_HW_IDX(cntr_idx) * 8;
}
static u64 hisi_l3c_pmu_read_counter(struct hisi_pmu *l3c_pmu,
struct hw_perf_event *hwc)
{
- return readq(l3c_pmu->base + hisi_l3c_pmu_get_counter_offset(hwc->idx));
+ return hisi_l3c_pmu_event_readq(hwc, hisi_l3c_pmu_get_counter_offset(hwc->idx));
}
static void hisi_l3c_pmu_write_counter(struct hisi_pmu *l3c_pmu,
struct hw_perf_event *hwc, u64 val)
{
- writeq(val, l3c_pmu->base + hisi_l3c_pmu_get_counter_offset(hwc->idx));
+ hisi_l3c_pmu_event_writeq(hwc, hisi_l3c_pmu_get_counter_offset(hwc->idx), val);
}
static void hisi_l3c_pmu_write_evtype(struct hisi_pmu *l3c_pmu, int idx,
u32 type)
{
+ struct hw_perf_event *hwc = &l3c_pmu->pmu_events.hw_events[idx]->hw;
u32 reg, reg_idx, shift, val;
+ idx = L3C_HW_IDX(idx);
+
/*
* Select the appropriate event select register(L3C_EVENT_TYPE0/1).
* There are 2 event select registers for the 8 hardware counters.
@@ -259,36 +397,72 @@ static void hisi_l3c_pmu_write_evtype(struct hisi_pmu *l3c_pmu, int idx,
shift = 8 * reg_idx;
/* Write event code to L3C_EVENT_TYPEx Register */
- val = readl(l3c_pmu->base + reg);
+ val = hisi_l3c_pmu_event_readl(hwc, reg);
val &= ~(L3C_EVTYPE_NONE << shift);
- val |= (type << shift);
- writel(val, l3c_pmu->base + reg);
+ val |= type << shift;
+ hisi_l3c_pmu_event_writel(hwc, reg, val);
}
static void hisi_l3c_pmu_start_counters(struct hisi_pmu *l3c_pmu)
{
+ struct hisi_l3c_pmu *hisi_l3c_pmu = to_hisi_l3c_pmu(l3c_pmu);
+ unsigned long *used_mask = l3c_pmu->pmu_events.used_mask;
+ unsigned long used_cntr = find_first_bit(used_mask, l3c_pmu->num_counters);
u32 val;
+ int i;
/*
- * Set perf_enable bit in L3C_PERF_CTRL register to start counting
- * for all enabled counters.
+ * Check if any counter belongs to the normal range (instead of ext
+ * range). If so, enable it.
*/
- val = readl(l3c_pmu->base + L3C_PERF_CTRL);
- val |= L3C_PERF_CTRL_EN;
- writel(val, l3c_pmu->base + L3C_PERF_CTRL);
+ if (used_cntr < L3C_NR_COUNTERS) {
+ val = readl(l3c_pmu->base + L3C_PERF_CTRL);
+ val |= L3C_PERF_CTRL_EN;
+ writel(val, l3c_pmu->base + L3C_PERF_CTRL);
+ }
+
+ /* If not, do enable it on ext ranges. */
+ for (i = 0; i < hisi_l3c_pmu->ext_num; i++) {
+ /* Find used counter in this ext range, skip the range if not. */
+ used_cntr = find_next_bit(used_mask, L3C_CNTR_EXT_H(i), L3C_CNTR_EXT_L(i));
+ if (used_cntr >= L3C_CNTR_EXT_H(i))
+ continue;
+
+ val = readl(hisi_l3c_pmu->ext_base[i] + L3C_PERF_CTRL);
+ val |= L3C_PERF_CTRL_EN;
+ writel(val, hisi_l3c_pmu->ext_base[i] + L3C_PERF_CTRL);
+ }
}
static void hisi_l3c_pmu_stop_counters(struct hisi_pmu *l3c_pmu)
{
+ struct hisi_l3c_pmu *hisi_l3c_pmu = to_hisi_l3c_pmu(l3c_pmu);
+ unsigned long *used_mask = l3c_pmu->pmu_events.used_mask;
+ unsigned long used_cntr = find_first_bit(used_mask, l3c_pmu->num_counters);
u32 val;
+ int i;
/*
- * Clear perf_enable bit in L3C_PERF_CTRL register to stop counting
- * for all enabled counters.
+ * Check if any counter belongs to the normal range (instead of ext
+ * range). If so, stop it.
*/
- val = readl(l3c_pmu->base + L3C_PERF_CTRL);
- val &= ~(L3C_PERF_CTRL_EN);
- writel(val, l3c_pmu->base + L3C_PERF_CTRL);
+ if (used_cntr < L3C_NR_COUNTERS) {
+ val = readl(l3c_pmu->base + L3C_PERF_CTRL);
+ val &= ~L3C_PERF_CTRL_EN;
+ writel(val, l3c_pmu->base + L3C_PERF_CTRL);
+ }
+
+ /* If not, do stop it on ext ranges. */
+ for (i = 0; i < hisi_l3c_pmu->ext_num; i++) {
+ /* Find used counter in this ext range, skip the range if not. */
+ used_cntr = find_next_bit(used_mask, L3C_CNTR_EXT_H(i), L3C_CNTR_EXT_L(i));
+ if (used_cntr >= L3C_CNTR_EXT_H(i))
+ continue;
+
+ val = readl(hisi_l3c_pmu->ext_base[i] + L3C_PERF_CTRL);
+ val &= ~L3C_PERF_CTRL_EN;
+ writel(val, hisi_l3c_pmu->ext_base[i] + L3C_PERF_CTRL);
+ }
}
static void hisi_l3c_pmu_enable_counter(struct hisi_pmu *l3c_pmu,
@@ -297,9 +471,9 @@ static void hisi_l3c_pmu_enable_counter(struct hisi_pmu *l3c_pmu,
u32 val;
/* Enable counter index in L3C_EVENT_CTRL register */
- val = readl(l3c_pmu->base + L3C_EVENT_CTRL);
- val |= (1 << hwc->idx);
- writel(val, l3c_pmu->base + L3C_EVENT_CTRL);
+ val = hisi_l3c_pmu_event_readl(hwc, L3C_EVENT_CTRL);
+ val |= 1 << L3C_HW_IDX(hwc->idx);
+ hisi_l3c_pmu_event_writel(hwc, L3C_EVENT_CTRL, val);
}
static void hisi_l3c_pmu_disable_counter(struct hisi_pmu *l3c_pmu,
@@ -308,9 +482,9 @@ static void hisi_l3c_pmu_disable_counter(struct hisi_pmu *l3c_pmu,
u32 val;
/* Clear counter index in L3C_EVENT_CTRL register */
- val = readl(l3c_pmu->base + L3C_EVENT_CTRL);
- val &= ~(1 << hwc->idx);
- writel(val, l3c_pmu->base + L3C_EVENT_CTRL);
+ val = hisi_l3c_pmu_event_readl(hwc, L3C_EVENT_CTRL);
+ val &= ~(1 << L3C_HW_IDX(hwc->idx));
+ hisi_l3c_pmu_event_writel(hwc, L3C_EVENT_CTRL, val);
}
static void hisi_l3c_pmu_enable_counter_int(struct hisi_pmu *l3c_pmu,
@@ -318,10 +492,10 @@ static void hisi_l3c_pmu_enable_counter_int(struct hisi_pmu *l3c_pmu,
{
u32 val;
- val = readl(l3c_pmu->base + L3C_INT_MASK);
+ val = hisi_l3c_pmu_event_readl(hwc, L3C_INT_MASK);
/* Write 0 to enable interrupt */
- val &= ~(1 << hwc->idx);
- writel(val, l3c_pmu->base + L3C_INT_MASK);
+ val &= ~(1 << L3C_HW_IDX(hwc->idx));
+ hisi_l3c_pmu_event_writel(hwc, L3C_INT_MASK, val);
}
static void hisi_l3c_pmu_disable_counter_int(struct hisi_pmu *l3c_pmu,
@@ -329,48 +503,61 @@ static void hisi_l3c_pmu_disable_counter_int(struct hisi_pmu *l3c_pmu,
{
u32 val;
- val = readl(l3c_pmu->base + L3C_INT_MASK);
+ val = hisi_l3c_pmu_event_readl(hwc, L3C_INT_MASK);
/* Write 1 to mask interrupt */
- val |= (1 << hwc->idx);
- writel(val, l3c_pmu->base + L3C_INT_MASK);
+ val |= 1 << L3C_HW_IDX(hwc->idx);
+ hisi_l3c_pmu_event_writel(hwc, L3C_INT_MASK, val);
}
static u32 hisi_l3c_pmu_get_int_status(struct hisi_pmu *l3c_pmu)
{
- return readl(l3c_pmu->base + L3C_INT_STATUS);
+ struct hisi_l3c_pmu *hisi_l3c_pmu = to_hisi_l3c_pmu(l3c_pmu);
+ u32 ext_int, status, status_ext = 0;
+ int i;
+
+ status = readl(l3c_pmu->base + L3C_INT_STATUS);
+
+ if (!support_ext(hisi_l3c_pmu))
+ return status;
+
+ for (i = 0; i < hisi_l3c_pmu->ext_num; i++) {
+ ext_int = readl(hisi_l3c_pmu->ext_base[i] + L3C_INT_STATUS);
+ status_ext |= ext_int << (L3C_NR_COUNTERS * i);
+ }
+
+ return status | (status_ext << L3C_NR_COUNTERS);
}
static void hisi_l3c_pmu_clear_int_status(struct hisi_pmu *l3c_pmu, int idx)
{
- writel(1 << idx, l3c_pmu->base + L3C_INT_CLEAR);
-}
+ struct hw_perf_event *hwc = &l3c_pmu->pmu_events.hw_events[idx]->hw;
-static const struct acpi_device_id hisi_l3c_pmu_acpi_match[] = {
- { "HISI0213", },
- { "HISI0214", },
- {}
-};
-MODULE_DEVICE_TABLE(acpi, hisi_l3c_pmu_acpi_match);
+ hisi_l3c_pmu_event_writel(hwc, L3C_INT_CLEAR, 1 << L3C_HW_IDX(idx));
+}
static int hisi_l3c_pmu_init_data(struct platform_device *pdev,
struct hisi_pmu *l3c_pmu)
{
+ hisi_uncore_pmu_init_topology(l3c_pmu, &pdev->dev);
+
/*
* Use the SCCL_ID and CCL_ID to identify the L3C PMU, while
* SCCL_ID is in MPIDR[aff2] and CCL_ID is in MPIDR[aff1].
*/
- if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
- &l3c_pmu->sccl_id)) {
+ if (l3c_pmu->topo.sccl_id < 0) {
dev_err(&pdev->dev, "Can not read l3c sccl-id!\n");
return -EINVAL;
}
- if (device_property_read_u32(&pdev->dev, "hisilicon,ccl-id",
- &l3c_pmu->ccl_id)) {
+ if (l3c_pmu->topo.ccl_id < 0) {
dev_err(&pdev->dev, "Can not read l3c ccl-id!\n");
return -EINVAL;
}
+ l3c_pmu->dev_info = device_get_match_data(&pdev->dev);
+ if (!l3c_pmu->dev_info)
+ return -ENODEV;
+
l3c_pmu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(l3c_pmu->base)) {
dev_err(&pdev->dev, "ioremap failed for l3c_pmu resource\n");
@@ -382,6 +569,50 @@ static int hisi_l3c_pmu_init_data(struct platform_device *pdev,
return 0;
}
+static int hisi_l3c_pmu_init_ext(struct hisi_pmu *l3c_pmu, struct platform_device *pdev)
+{
+ struct hisi_l3c_pmu *hisi_l3c_pmu = to_hisi_l3c_pmu(l3c_pmu);
+ int ret, irq, ext_num, i;
+ char *irqname;
+
+ /* HiSilicon L3C PMU supporting ext should have more than 1 irq resources. */
+ ext_num = platform_irq_count(pdev);
+ if (ext_num < L3C_MAX_EXT)
+ return -ENODEV;
+
+ /*
+ * The number of ext supported equals the number of irq - 1, since one
+ * of the irqs belongs to the normal part of PMU.
+ */
+ hisi_l3c_pmu->ext_num = ext_num - 1;
+
+ for (i = 0; i < hisi_l3c_pmu->ext_num; i++) {
+ hisi_l3c_pmu->ext_base[i] = devm_platform_ioremap_resource(pdev, i + 1);
+ if (IS_ERR(hisi_l3c_pmu->ext_base[i]))
+ return PTR_ERR(hisi_l3c_pmu->ext_base[i]);
+
+ irq = platform_get_irq(pdev, i + 1);
+ if (irq < 0)
+ return irq;
+
+ irqname = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s ext%d",
+ dev_name(&pdev->dev), i + 1);
+ if (!irqname)
+ return -ENOMEM;
+
+ ret = devm_request_irq(&pdev->dev, irq, hisi_uncore_pmu_isr,
+ IRQF_NOBALANCING | IRQF_NO_THREAD,
+ irqname, l3c_pmu);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+ "Fail to request EXT IRQ: %d.\n", irq);
+
+ hisi_l3c_pmu->ext_irq[i] = irq;
+ }
+
+ return 0;
+}
+
static struct attribute *hisi_l3c_pmu_v1_format_attr[] = {
HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
NULL,
@@ -394,10 +625,11 @@ static const struct attribute_group hisi_l3c_pmu_v1_format_group = {
static struct attribute *hisi_l3c_pmu_v2_format_attr[] = {
HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
- HISI_PMU_FORMAT_ATTR(tt_core, "config1:0-7"),
+ HISI_PMU_FORMAT_ATTR(tt_core_deprecated, "config1:0-7"),
HISI_PMU_FORMAT_ATTR(tt_req, "config1:8-10"),
HISI_PMU_FORMAT_ATTR(datasrc_cfg, "config1:11-15"),
HISI_PMU_FORMAT_ATTR(datasrc_skt, "config1:16"),
+ HISI_PMU_FORMAT_ATTR(tt_core, "config2:0-15"),
NULL
};
@@ -406,6 +638,20 @@ static const struct attribute_group hisi_l3c_pmu_v2_format_group = {
.attrs = hisi_l3c_pmu_v2_format_attr,
};
+static struct attribute *hisi_l3c_pmu_v3_format_attr[] = {
+ HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
+ HISI_PMU_FORMAT_ATTR(ext, "config:16-17"),
+ HISI_PMU_FORMAT_ATTR(tt_core_deprecated, "config1:0-7"),
+ HISI_PMU_FORMAT_ATTR(tt_req, "config1:8-10"),
+ HISI_PMU_FORMAT_ATTR(tt_core, "config2:0-15"),
+ NULL
+};
+
+static const struct attribute_group hisi_l3c_pmu_v3_format_group = {
+ .name = "format",
+ .attrs = hisi_l3c_pmu_v3_format_attr,
+};
+
static struct attribute *hisi_l3c_pmu_v1_events_attr[] = {
HISI_PMU_EVENT_ATTR(rd_cpipe, 0x00),
HISI_PMU_EVENT_ATTR(wr_cpipe, 0x01),
@@ -441,48 +687,82 @@ static const struct attribute_group hisi_l3c_pmu_v2_events_group = {
.attrs = hisi_l3c_pmu_v2_events_attr,
};
-static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
-
-static struct attribute *hisi_l3c_pmu_cpumask_attrs[] = {
- &dev_attr_cpumask.attr,
- NULL,
-};
-
-static const struct attribute_group hisi_l3c_pmu_cpumask_attr_group = {
- .attrs = hisi_l3c_pmu_cpumask_attrs,
-};
-
-static struct device_attribute hisi_l3c_pmu_identifier_attr =
- __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
-
-static struct attribute *hisi_l3c_pmu_identifier_attrs[] = {
- &hisi_l3c_pmu_identifier_attr.attr,
+static struct attribute *hisi_l3c_pmu_v3_events_attr[] = {
+ HISI_PMU_EVENT_ATTR(rd_spipe, 0x18),
+ HISI_PMU_EVENT_ATTR(rd_hit_spipe, 0x19),
+ HISI_PMU_EVENT_ATTR(wr_spipe, 0x1a),
+ HISI_PMU_EVENT_ATTR(wr_hit_spipe, 0x1b),
+ HISI_PMU_EVENT_ATTR(io_rd_spipe, 0x1c),
+ HISI_PMU_EVENT_ATTR(io_rd_hit_spipe, 0x1d),
+ HISI_PMU_EVENT_ATTR(io_wr_spipe, 0x1e),
+ HISI_PMU_EVENT_ATTR(io_wr_hit_spipe, 0x1f),
+ HISI_PMU_EVENT_ATTR(cycles, 0x7f),
+ HISI_PMU_EVENT_ATTR(l3c_ref, 0xbc),
+ HISI_PMU_EVENT_ATTR(l3c2ring, 0xbd),
NULL
};
-static const struct attribute_group hisi_l3c_pmu_identifier_group = {
- .attrs = hisi_l3c_pmu_identifier_attrs,
+static const struct attribute_group hisi_l3c_pmu_v3_events_group = {
+ .name = "events",
+ .attrs = hisi_l3c_pmu_v3_events_attr,
};
static const struct attribute_group *hisi_l3c_pmu_v1_attr_groups[] = {
&hisi_l3c_pmu_v1_format_group,
&hisi_l3c_pmu_v1_events_group,
- &hisi_l3c_pmu_cpumask_attr_group,
- &hisi_l3c_pmu_identifier_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
NULL,
};
static const struct attribute_group *hisi_l3c_pmu_v2_attr_groups[] = {
&hisi_l3c_pmu_v2_format_group,
&hisi_l3c_pmu_v2_events_group,
- &hisi_l3c_pmu_cpumask_attr_group,
- &hisi_l3c_pmu_identifier_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
+ NULL
+};
+
+static const struct attribute_group *hisi_l3c_pmu_v3_attr_groups[] = {
+ &hisi_l3c_pmu_v3_format_group,
+ &hisi_l3c_pmu_v3_events_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
NULL
};
+static struct hisi_l3c_pmu_ext hisi_l3c_pmu_support_ext = {
+ .support_ext = true,
+};
+
+static struct hisi_l3c_pmu_ext hisi_l3c_pmu_not_support_ext = {
+ .support_ext = false,
+};
+
+static const struct hisi_pmu_dev_info hisi_l3c_pmu_v1 = {
+ .attr_groups = hisi_l3c_pmu_v1_attr_groups,
+ .counter_bits = 48,
+ .check_event = L3C_V1_NR_EVENTS,
+ .private = &hisi_l3c_pmu_not_support_ext,
+};
+
+static const struct hisi_pmu_dev_info hisi_l3c_pmu_v2 = {
+ .attr_groups = hisi_l3c_pmu_v2_attr_groups,
+ .counter_bits = 64,
+ .check_event = L3C_V2_NR_EVENTS,
+ .private = &hisi_l3c_pmu_not_support_ext,
+};
+
+static const struct hisi_pmu_dev_info hisi_l3c_pmu_v3 = {
+ .attr_groups = hisi_l3c_pmu_v3_attr_groups,
+ .counter_bits = 64,
+ .check_event = L3C_V2_NR_EVENTS,
+ .private = &hisi_l3c_pmu_support_ext,
+};
+
static const struct hisi_uncore_ops hisi_uncore_l3c_ops = {
.write_evtype = hisi_l3c_pmu_write_evtype,
- .get_event_idx = hisi_uncore_pmu_get_event_idx,
+ .get_event_idx = hisi_l3c_pmu_get_event_idx,
.start_counters = hisi_l3c_pmu_start_counters,
.stop_counters = hisi_l3c_pmu_stop_counters,
.enable_counter = hisi_l3c_pmu_enable_counter,
@@ -495,11 +775,14 @@ static const struct hisi_uncore_ops hisi_uncore_l3c_ops = {
.clear_int_status = hisi_l3c_pmu_clear_int_status,
.enable_filter = hisi_l3c_pmu_enable_filter,
.disable_filter = hisi_l3c_pmu_disable_filter,
+ .check_filter = hisi_l3c_pmu_check_filter,
};
static int hisi_l3c_pmu_dev_probe(struct platform_device *pdev,
struct hisi_pmu *l3c_pmu)
{
+ struct hisi_l3c_pmu *hisi_l3c_pmu = to_hisi_l3c_pmu(l3c_pmu);
+ struct hisi_l3c_pmu_ext *l3c_pmu_dev_ext;
int ret;
ret = hisi_l3c_pmu_init_data(pdev, l3c_pmu);
@@ -510,42 +793,55 @@ static int hisi_l3c_pmu_dev_probe(struct platform_device *pdev,
if (ret)
return ret;
- if (l3c_pmu->identifier >= HISI_PMU_V2) {
- l3c_pmu->counter_bits = 64;
- l3c_pmu->check_event = L3C_V2_NR_EVENTS;
- l3c_pmu->pmu_events.attr_groups = hisi_l3c_pmu_v2_attr_groups;
- } else {
- l3c_pmu->counter_bits = 48;
- l3c_pmu->check_event = L3C_V1_NR_EVENTS;
- l3c_pmu->pmu_events.attr_groups = hisi_l3c_pmu_v1_attr_groups;
- }
-
+ l3c_pmu->pmu_events.attr_groups = l3c_pmu->dev_info->attr_groups;
+ l3c_pmu->counter_bits = l3c_pmu->dev_info->counter_bits;
+ l3c_pmu->check_event = l3c_pmu->dev_info->check_event;
l3c_pmu->num_counters = L3C_NR_COUNTERS;
l3c_pmu->ops = &hisi_uncore_l3c_ops;
l3c_pmu->dev = &pdev->dev;
l3c_pmu->on_cpu = -1;
+ l3c_pmu_dev_ext = l3c_pmu->dev_info->private;
+ if (l3c_pmu_dev_ext->support_ext) {
+ ret = hisi_l3c_pmu_init_ext(l3c_pmu, pdev);
+ if (ret)
+ return ret;
+ /*
+ * The extension events have their own counters with the
+ * same number of the normal events counters. So we can
+ * have at maximum num_counters * ext events monitored.
+ */
+ l3c_pmu->num_counters += hisi_l3c_pmu->ext_num * L3C_NR_COUNTERS;
+ }
+
return 0;
}
static int hisi_l3c_pmu_probe(struct platform_device *pdev)
{
+ struct hisi_l3c_pmu *hisi_l3c_pmu;
struct hisi_pmu *l3c_pmu;
char *name;
int ret;
- l3c_pmu = devm_kzalloc(&pdev->dev, sizeof(*l3c_pmu), GFP_KERNEL);
- if (!l3c_pmu)
+ hisi_l3c_pmu = devm_kzalloc(&pdev->dev, sizeof(*hisi_l3c_pmu), GFP_KERNEL);
+ if (!hisi_l3c_pmu)
return -ENOMEM;
+ l3c_pmu = &hisi_l3c_pmu->l3c_pmu;
platform_set_drvdata(pdev, l3c_pmu);
ret = hisi_l3c_pmu_dev_probe(pdev, l3c_pmu);
if (ret)
return ret;
- name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_l3c%u",
- l3c_pmu->sccl_id, l3c_pmu->ccl_id);
+ if (l3c_pmu->topo.sub_id >= 0)
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%d_l3c%d_%d",
+ l3c_pmu->topo.sccl_id, l3c_pmu->topo.ccl_id,
+ l3c_pmu->topo.sub_id);
+ else
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%d_l3c%d",
+ l3c_pmu->topo.sccl_id, l3c_pmu->topo.ccl_id);
if (!name)
return -ENOMEM;
@@ -568,16 +864,23 @@ static int hisi_l3c_pmu_probe(struct platform_device *pdev)
return ret;
}
-static int hisi_l3c_pmu_remove(struct platform_device *pdev)
+static void hisi_l3c_pmu_remove(struct platform_device *pdev)
{
struct hisi_pmu *l3c_pmu = platform_get_drvdata(pdev);
perf_pmu_unregister(&l3c_pmu->pmu);
cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE,
&l3c_pmu->node);
- return 0;
}
+static const struct acpi_device_id hisi_l3c_pmu_acpi_match[] = {
+ { "HISI0213", (kernel_ulong_t)&hisi_l3c_pmu_v1 },
+ { "HISI0214", (kernel_ulong_t)&hisi_l3c_pmu_v2 },
+ { "HISI0215", (kernel_ulong_t)&hisi_l3c_pmu_v3 },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, hisi_l3c_pmu_acpi_match);
+
static struct platform_driver hisi_l3c_pmu_driver = {
.driver = {
.name = "hisi_l3c_pmu",
@@ -588,14 +891,60 @@ static struct platform_driver hisi_l3c_pmu_driver = {
.remove = hisi_l3c_pmu_remove,
};
+static int hisi_l3c_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct hisi_pmu *l3c_pmu = hlist_entry_safe(node, struct hisi_pmu, node);
+ struct hisi_l3c_pmu *hisi_l3c_pmu = to_hisi_l3c_pmu(l3c_pmu);
+ int ret, i;
+
+ ret = hisi_uncore_pmu_online_cpu(cpu, node);
+ if (ret)
+ return ret;
+
+ /* Avoid L3C pmu not supporting ext from ext irq migrating. */
+ if (!support_ext(hisi_l3c_pmu))
+ return 0;
+
+ for (i = 0; i < hisi_l3c_pmu->ext_num; i++)
+ WARN_ON(irq_set_affinity(hisi_l3c_pmu->ext_irq[i],
+ cpumask_of(l3c_pmu->on_cpu)));
+
+ return 0;
+}
+
+static int hisi_l3c_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct hisi_pmu *l3c_pmu = hlist_entry_safe(node, struct hisi_pmu, node);
+ struct hisi_l3c_pmu *hisi_l3c_pmu = to_hisi_l3c_pmu(l3c_pmu);
+ int ret, i;
+
+ ret = hisi_uncore_pmu_offline_cpu(cpu, node);
+ if (ret)
+ return ret;
+
+ /* If failed to find any available CPU, skip irq migration. */
+ if (l3c_pmu->on_cpu < 0)
+ return 0;
+
+ /* Avoid L3C pmu not supporting ext from ext irq migrating. */
+ if (!support_ext(hisi_l3c_pmu))
+ return 0;
+
+ for (i = 0; i < hisi_l3c_pmu->ext_num; i++)
+ WARN_ON(irq_set_affinity(hisi_l3c_pmu->ext_irq[i],
+ cpumask_of(l3c_pmu->on_cpu)));
+
+ return 0;
+}
+
static int __init hisi_l3c_pmu_module_init(void)
{
int ret;
ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE,
"AP_PERF_ARM_HISI_L3_ONLINE",
- hisi_uncore_pmu_online_cpu,
- hisi_uncore_pmu_offline_cpu);
+ hisi_l3c_pmu_online_cpu,
+ hisi_l3c_pmu_offline_cpu);
if (ret) {
pr_err("L3C PMU: Error setup hotplug, ret = %d\n", ret);
return ret;
@@ -616,6 +965,7 @@ static void __exit hisi_l3c_pmu_module_exit(void)
}
module_exit(hisi_l3c_pmu_module_exit);
+MODULE_IMPORT_NS("HISI_PMU");
MODULE_DESCRIPTION("HiSilicon SoC L3C uncore PMU driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Anurup M <anurup.m@huawei.com>");
diff --git a/drivers/perf/hisilicon/hisi_uncore_mn_pmu.c b/drivers/perf/hisilicon/hisi_uncore_mn_pmu.c
new file mode 100644
index 000000000000..4df4eebe243e
--- /dev/null
+++ b/drivers/perf/hisilicon/hisi_uncore_mn_pmu.c
@@ -0,0 +1,411 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * HiSilicon SoC MN uncore Hardware event counters support
+ *
+ * Copyright (c) 2025 HiSilicon Technologies Co., Ltd.
+ */
+#include <linux/cpuhotplug.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/irq.h>
+#include <linux/list.h>
+#include <linux/mod_devicetable.h>
+#include <linux/property.h>
+
+#include "hisi_uncore_pmu.h"
+
+/* Dynamic CPU hotplug state used by MN PMU */
+static enum cpuhp_state hisi_mn_pmu_online;
+
+/* MN register definition */
+#define HISI_MN_DYNAMIC_CTRL_REG 0x400
+#define HISI_MN_DYNAMIC_CTRL_EN BIT(0)
+#define HISI_MN_PERF_CTRL_REG 0x408
+#define HISI_MN_PERF_CTRL_EN BIT(6)
+#define HISI_MN_INT_MASK_REG 0x800
+#define HISI_MN_INT_STATUS_REG 0x808
+#define HISI_MN_INT_CLEAR_REG 0x80C
+#define HISI_MN_EVENT_CTRL_REG 0x1C00
+#define HISI_MN_VERSION_REG 0x1C04
+#define HISI_MN_EVTYPE0_REG 0x1d00
+#define HISI_MN_EVTYPE_MASK GENMASK(7, 0)
+#define HISI_MN_CNTR0_REG 0x1e00
+#define HISI_MN_EVTYPE_REGn(evtype0, n) ((evtype0) + (n) * 4)
+#define HISI_MN_CNTR_REGn(cntr0, n) ((cntr0) + (n) * 8)
+
+#define HISI_MN_NR_COUNTERS 4
+#define HISI_MN_TIMEOUT_US 500U
+
+struct hisi_mn_pmu_regs {
+ u32 version;
+ u32 dyn_ctrl;
+ u32 perf_ctrl;
+ u32 int_mask;
+ u32 int_clear;
+ u32 int_status;
+ u32 event_ctrl;
+ u32 event_type0;
+ u32 event_cntr0;
+};
+
+/*
+ * Each event request takes a certain amount of time to complete. If
+ * we counting the latency related event, we need to wait for the all
+ * requests complete. Otherwise, the value of counter is slightly larger.
+ */
+static void hisi_mn_pmu_counter_flush(struct hisi_pmu *mn_pmu)
+{
+ struct hisi_mn_pmu_regs *reg_info = mn_pmu->dev_info->private;
+ int ret;
+ u32 val;
+
+ val = readl(mn_pmu->base + reg_info->dyn_ctrl);
+ val |= HISI_MN_DYNAMIC_CTRL_EN;
+ writel(val, mn_pmu->base + reg_info->dyn_ctrl);
+
+ ret = readl_poll_timeout_atomic(mn_pmu->base + reg_info->dyn_ctrl,
+ val, !(val & HISI_MN_DYNAMIC_CTRL_EN),
+ 1, HISI_MN_TIMEOUT_US);
+ if (ret)
+ dev_warn(mn_pmu->dev, "Counter flush timeout\n");
+}
+
+static u64 hisi_mn_pmu_read_counter(struct hisi_pmu *mn_pmu,
+ struct hw_perf_event *hwc)
+{
+ struct hisi_mn_pmu_regs *reg_info = mn_pmu->dev_info->private;
+
+ return readq(mn_pmu->base + HISI_MN_CNTR_REGn(reg_info->event_cntr0, hwc->idx));
+}
+
+static void hisi_mn_pmu_write_counter(struct hisi_pmu *mn_pmu,
+ struct hw_perf_event *hwc, u64 val)
+{
+ struct hisi_mn_pmu_regs *reg_info = mn_pmu->dev_info->private;
+
+ writeq(val, mn_pmu->base + HISI_MN_CNTR_REGn(reg_info->event_cntr0, hwc->idx));
+}
+
+static void hisi_mn_pmu_write_evtype(struct hisi_pmu *mn_pmu, int idx, u32 type)
+{
+ struct hisi_mn_pmu_regs *reg_info = mn_pmu->dev_info->private;
+ u32 val;
+
+ /*
+ * Select the appropriate event select register.
+ * There are 2 32-bit event select registers for the
+ * 8 hardware counters, each event code is 8-bit wide.
+ */
+ val = readl(mn_pmu->base + HISI_MN_EVTYPE_REGn(reg_info->event_type0, idx / 4));
+ val &= ~(HISI_MN_EVTYPE_MASK << HISI_PMU_EVTYPE_SHIFT(idx));
+ val |= (type << HISI_PMU_EVTYPE_SHIFT(idx));
+ writel(val, mn_pmu->base + HISI_MN_EVTYPE_REGn(reg_info->event_type0, idx / 4));
+}
+
+static void hisi_mn_pmu_start_counters(struct hisi_pmu *mn_pmu)
+{
+ struct hisi_mn_pmu_regs *reg_info = mn_pmu->dev_info->private;
+ u32 val;
+
+ val = readl(mn_pmu->base + reg_info->perf_ctrl);
+ val |= HISI_MN_PERF_CTRL_EN;
+ writel(val, mn_pmu->base + reg_info->perf_ctrl);
+}
+
+static void hisi_mn_pmu_stop_counters(struct hisi_pmu *mn_pmu)
+{
+ struct hisi_mn_pmu_regs *reg_info = mn_pmu->dev_info->private;
+ u32 val;
+
+ val = readl(mn_pmu->base + reg_info->perf_ctrl);
+ val &= ~HISI_MN_PERF_CTRL_EN;
+ writel(val, mn_pmu->base + reg_info->perf_ctrl);
+
+ hisi_mn_pmu_counter_flush(mn_pmu);
+}
+
+static void hisi_mn_pmu_enable_counter(struct hisi_pmu *mn_pmu,
+ struct hw_perf_event *hwc)
+{
+ struct hisi_mn_pmu_regs *reg_info = mn_pmu->dev_info->private;
+ u32 val;
+
+ val = readl(mn_pmu->base + reg_info->event_ctrl);
+ val |= BIT(hwc->idx);
+ writel(val, mn_pmu->base + reg_info->event_ctrl);
+}
+
+static void hisi_mn_pmu_disable_counter(struct hisi_pmu *mn_pmu,
+ struct hw_perf_event *hwc)
+{
+ struct hisi_mn_pmu_regs *reg_info = mn_pmu->dev_info->private;
+ u32 val;
+
+ val = readl(mn_pmu->base + reg_info->event_ctrl);
+ val &= ~BIT(hwc->idx);
+ writel(val, mn_pmu->base + reg_info->event_ctrl);
+}
+
+static void hisi_mn_pmu_enable_counter_int(struct hisi_pmu *mn_pmu,
+ struct hw_perf_event *hwc)
+{
+ struct hisi_mn_pmu_regs *reg_info = mn_pmu->dev_info->private;
+ u32 val;
+
+ val = readl(mn_pmu->base + reg_info->int_mask);
+ val &= ~BIT(hwc->idx);
+ writel(val, mn_pmu->base + reg_info->int_mask);
+}
+
+static void hisi_mn_pmu_disable_counter_int(struct hisi_pmu *mn_pmu,
+ struct hw_perf_event *hwc)
+{
+ struct hisi_mn_pmu_regs *reg_info = mn_pmu->dev_info->private;
+ u32 val;
+
+ val = readl(mn_pmu->base + reg_info->int_mask);
+ val |= BIT(hwc->idx);
+ writel(val, mn_pmu->base + reg_info->int_mask);
+}
+
+static u32 hisi_mn_pmu_get_int_status(struct hisi_pmu *mn_pmu)
+{
+ struct hisi_mn_pmu_regs *reg_info = mn_pmu->dev_info->private;
+
+ return readl(mn_pmu->base + reg_info->int_status);
+}
+
+static void hisi_mn_pmu_clear_int_status(struct hisi_pmu *mn_pmu, int idx)
+{
+ struct hisi_mn_pmu_regs *reg_info = mn_pmu->dev_info->private;
+
+ writel(BIT(idx), mn_pmu->base + reg_info->int_clear);
+}
+
+static struct attribute *hisi_mn_pmu_format_attr[] = {
+ HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
+ NULL
+};
+
+static const struct attribute_group hisi_mn_pmu_format_group = {
+ .name = "format",
+ .attrs = hisi_mn_pmu_format_attr,
+};
+
+static struct attribute *hisi_mn_pmu_events_attr[] = {
+ HISI_PMU_EVENT_ATTR(req_eobarrier_num, 0x00),
+ HISI_PMU_EVENT_ATTR(req_ecbarrier_num, 0x01),
+ HISI_PMU_EVENT_ATTR(req_dvmop_num, 0x02),
+ HISI_PMU_EVENT_ATTR(req_dvmsync_num, 0x03),
+ HISI_PMU_EVENT_ATTR(req_retry_num, 0x04),
+ HISI_PMU_EVENT_ATTR(req_writenosnp_num, 0x05),
+ HISI_PMU_EVENT_ATTR(req_readnosnp_num, 0x06),
+ HISI_PMU_EVENT_ATTR(snp_dvm_num, 0x07),
+ HISI_PMU_EVENT_ATTR(snp_dvmsync_num, 0x08),
+ HISI_PMU_EVENT_ATTR(l3t_req_dvm_num, 0x09),
+ HISI_PMU_EVENT_ATTR(l3t_req_dvmsync_num, 0x0A),
+ HISI_PMU_EVENT_ATTR(mn_req_dvm_num, 0x0B),
+ HISI_PMU_EVENT_ATTR(mn_req_dvmsync_num, 0x0C),
+ HISI_PMU_EVENT_ATTR(pa_req_dvm_num, 0x0D),
+ HISI_PMU_EVENT_ATTR(pa_req_dvmsync_num, 0x0E),
+ HISI_PMU_EVENT_ATTR(snp_dvm_latency, 0x80),
+ HISI_PMU_EVENT_ATTR(snp_dvmsync_latency, 0x81),
+ HISI_PMU_EVENT_ATTR(l3t_req_dvm_latency, 0x82),
+ HISI_PMU_EVENT_ATTR(l3t_req_dvmsync_latency, 0x83),
+ HISI_PMU_EVENT_ATTR(mn_req_dvm_latency, 0x84),
+ HISI_PMU_EVENT_ATTR(mn_req_dvmsync_latency, 0x85),
+ HISI_PMU_EVENT_ATTR(pa_req_dvm_latency, 0x86),
+ HISI_PMU_EVENT_ATTR(pa_req_dvmsync_latency, 0x87),
+ NULL
+};
+
+static const struct attribute_group hisi_mn_pmu_events_group = {
+ .name = "events",
+ .attrs = hisi_mn_pmu_events_attr,
+};
+
+static const struct attribute_group *hisi_mn_pmu_attr_groups[] = {
+ &hisi_mn_pmu_format_group,
+ &hisi_mn_pmu_events_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
+ NULL
+};
+
+static const struct hisi_uncore_ops hisi_uncore_mn_ops = {
+ .write_evtype = hisi_mn_pmu_write_evtype,
+ .get_event_idx = hisi_uncore_pmu_get_event_idx,
+ .start_counters = hisi_mn_pmu_start_counters,
+ .stop_counters = hisi_mn_pmu_stop_counters,
+ .enable_counter = hisi_mn_pmu_enable_counter,
+ .disable_counter = hisi_mn_pmu_disable_counter,
+ .enable_counter_int = hisi_mn_pmu_enable_counter_int,
+ .disable_counter_int = hisi_mn_pmu_disable_counter_int,
+ .write_counter = hisi_mn_pmu_write_counter,
+ .read_counter = hisi_mn_pmu_read_counter,
+ .get_int_status = hisi_mn_pmu_get_int_status,
+ .clear_int_status = hisi_mn_pmu_clear_int_status,
+};
+
+static int hisi_mn_pmu_dev_init(struct platform_device *pdev,
+ struct hisi_pmu *mn_pmu)
+{
+ struct hisi_mn_pmu_regs *reg_info;
+ int ret;
+
+ hisi_uncore_pmu_init_topology(mn_pmu, &pdev->dev);
+
+ if (mn_pmu->topo.scl_id < 0)
+ return dev_err_probe(&pdev->dev, -EINVAL,
+ "Failed to read MN scl id\n");
+
+ if (mn_pmu->topo.index_id < 0)
+ return dev_err_probe(&pdev->dev, -EINVAL,
+ "Failed to read MN index id\n");
+
+ mn_pmu->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mn_pmu->base))
+ return dev_err_probe(&pdev->dev, PTR_ERR(mn_pmu->base),
+ "Failed to ioremap resource\n");
+
+ ret = hisi_uncore_pmu_init_irq(mn_pmu, pdev);
+ if (ret)
+ return ret;
+
+ mn_pmu->dev_info = device_get_match_data(&pdev->dev);
+ if (!mn_pmu->dev_info)
+ return -ENODEV;
+
+ mn_pmu->pmu_events.attr_groups = mn_pmu->dev_info->attr_groups;
+ mn_pmu->counter_bits = mn_pmu->dev_info->counter_bits;
+ mn_pmu->check_event = mn_pmu->dev_info->check_event;
+ mn_pmu->num_counters = HISI_MN_NR_COUNTERS;
+ mn_pmu->ops = &hisi_uncore_mn_ops;
+ mn_pmu->dev = &pdev->dev;
+ mn_pmu->on_cpu = -1;
+
+ reg_info = mn_pmu->dev_info->private;
+ mn_pmu->identifier = readl(mn_pmu->base + reg_info->version);
+
+ return 0;
+}
+
+static void hisi_mn_pmu_remove_cpuhp(void *hotplug_node)
+{
+ cpuhp_state_remove_instance_nocalls(hisi_mn_pmu_online, hotplug_node);
+}
+
+static void hisi_mn_pmu_unregister(void *pmu)
+{
+ perf_pmu_unregister(pmu);
+}
+
+static int hisi_mn_pmu_probe(struct platform_device *pdev)
+{
+ struct hisi_pmu *mn_pmu;
+ char *name;
+ int ret;
+
+ mn_pmu = devm_kzalloc(&pdev->dev, sizeof(*mn_pmu), GFP_KERNEL);
+ if (!mn_pmu)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, mn_pmu);
+
+ ret = hisi_mn_pmu_dev_init(pdev, mn_pmu);
+ if (ret)
+ return ret;
+
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_scl%d_mn%d",
+ mn_pmu->topo.scl_id, mn_pmu->topo.index_id);
+ if (!name)
+ return -ENOMEM;
+
+ ret = cpuhp_state_add_instance(hisi_mn_pmu_online, &mn_pmu->node);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Failed to register cpu hotplug\n");
+
+ ret = devm_add_action_or_reset(&pdev->dev, hisi_mn_pmu_remove_cpuhp, &mn_pmu->node);
+ if (ret)
+ return ret;
+
+ hisi_pmu_init(mn_pmu, THIS_MODULE);
+
+ ret = perf_pmu_register(&mn_pmu->pmu, name, -1);
+ if (ret)
+ return dev_err_probe(mn_pmu->dev, ret, "Failed to register MN PMU\n");
+
+ return devm_add_action_or_reset(&pdev->dev, hisi_mn_pmu_unregister, &mn_pmu->pmu);
+}
+
+static struct hisi_mn_pmu_regs hisi_mn_v1_pmu_regs = {
+ .version = HISI_MN_VERSION_REG,
+ .dyn_ctrl = HISI_MN_DYNAMIC_CTRL_REG,
+ .perf_ctrl = HISI_MN_PERF_CTRL_REG,
+ .int_mask = HISI_MN_INT_MASK_REG,
+ .int_clear = HISI_MN_INT_CLEAR_REG,
+ .int_status = HISI_MN_INT_STATUS_REG,
+ .event_ctrl = HISI_MN_EVENT_CTRL_REG,
+ .event_type0 = HISI_MN_EVTYPE0_REG,
+ .event_cntr0 = HISI_MN_CNTR0_REG,
+};
+
+static const struct hisi_pmu_dev_info hisi_mn_v1 = {
+ .attr_groups = hisi_mn_pmu_attr_groups,
+ .counter_bits = 48,
+ .check_event = HISI_MN_EVTYPE_MASK,
+ .private = &hisi_mn_v1_pmu_regs,
+};
+
+static const struct acpi_device_id hisi_mn_pmu_acpi_match[] = {
+ { "HISI0222", (kernel_ulong_t) &hisi_mn_v1 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, hisi_mn_pmu_acpi_match);
+
+static struct platform_driver hisi_mn_pmu_driver = {
+ .driver = {
+ .name = "hisi_mn_pmu",
+ .acpi_match_table = hisi_mn_pmu_acpi_match,
+ /*
+ * We have not worked out a safe bind/unbind process,
+ * Forcefully unbinding during sampling will lead to a
+ * kernel panic, so this is not supported yet.
+ */
+ .suppress_bind_attrs = true,
+ },
+ .probe = hisi_mn_pmu_probe,
+};
+
+static int __init hisi_mn_pmu_module_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "perf/hisi/mn:online",
+ hisi_uncore_pmu_online_cpu,
+ hisi_uncore_pmu_offline_cpu);
+ if (ret < 0) {
+ pr_err("hisi_mn_pmu: Failed to setup MN PMU hotplug: %d\n", ret);
+ return ret;
+ }
+ hisi_mn_pmu_online = ret;
+
+ ret = platform_driver_register(&hisi_mn_pmu_driver);
+ if (ret)
+ cpuhp_remove_multi_state(hisi_mn_pmu_online);
+
+ return ret;
+}
+module_init(hisi_mn_pmu_module_init);
+
+static void __exit hisi_mn_pmu_module_exit(void)
+{
+ platform_driver_unregister(&hisi_mn_pmu_driver);
+ cpuhp_remove_multi_state(hisi_mn_pmu_online);
+}
+module_exit(hisi_mn_pmu_module_exit);
+
+MODULE_IMPORT_NS("HISI_PMU");
+MODULE_DESCRIPTION("HiSilicon SoC MN uncore PMU driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Junhao He <hejunhao3@huawei.com>");
diff --git a/drivers/perf/hisilicon/hisi_uncore_noc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_noc_pmu.c
new file mode 100644
index 000000000000..de3b9cc7aada
--- /dev/null
+++ b/drivers/perf/hisilicon/hisi_uncore_noc_pmu.c
@@ -0,0 +1,443 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for HiSilicon Uncore NoC (Network on Chip) PMU device
+ *
+ * Copyright (c) 2025 HiSilicon Technologies Co., Ltd.
+ * Author: Yicong Yang <yangyicong@hisilicon.com>
+ */
+#include <linux/bitops.h>
+#include <linux/cpuhotplug.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/sysfs.h>
+
+#include "hisi_uncore_pmu.h"
+
+#define NOC_PMU_VERSION 0x1e00
+#define NOC_PMU_GLOBAL_CTRL 0x1e04
+#define NOC_PMU_GLOBAL_CTRL_PMU_EN BIT(0)
+#define NOC_PMU_GLOBAL_CTRL_TT_EN BIT(1)
+#define NOC_PMU_CNT_INFO 0x1e08
+#define NOC_PMU_CNT_INFO_OVERFLOW(n) BIT(n)
+#define NOC_PMU_EVENT_CTRL0 0x1e20
+#define NOC_PMU_EVENT_CTRL_TYPE GENMASK(4, 0)
+/*
+ * Note channel of 0x0 will reset the counter value, so don't do it before
+ * we read out the counter.
+ */
+#define NOC_PMU_EVENT_CTRL_CHANNEL GENMASK(10, 8)
+#define NOC_PMU_EVENT_CTRL_EN BIT(11)
+#define NOC_PMU_EVENT_COUNTER0 0x1e80
+
+#define NOC_PMU_NR_COUNTERS 4
+#define NOC_PMU_CH_DEFAULT 0x7
+
+#define NOC_PMU_EVENT_CTRLn(ctrl0, n) ((ctrl0) + 4 * (n))
+#define NOC_PMU_EVENT_CNTRn(cntr0, n) ((cntr0) + 8 * (n))
+
+HISI_PMU_EVENT_ATTR_EXTRACTOR(ch, config1, 2, 0);
+HISI_PMU_EVENT_ATTR_EXTRACTOR(tt_en, config1, 3, 3);
+
+/* Dynamic CPU hotplug state used by this PMU driver */
+static enum cpuhp_state hisi_noc_pmu_cpuhp_state;
+
+struct hisi_noc_pmu_regs {
+ u32 version;
+ u32 pmu_ctrl;
+ u32 event_ctrl0;
+ u32 event_cntr0;
+ u32 overflow_status;
+};
+
+/*
+ * Tracetag filtering is not per event and all the events should keep
+ * the consistence. Return true if the new comer doesn't match the
+ * tracetag filtering configuration of the current scheduled events.
+ */
+static bool hisi_noc_pmu_check_global_filter(struct perf_event *curr,
+ struct perf_event *new)
+{
+ return hisi_get_tt_en(curr) == hisi_get_tt_en(new);
+}
+
+static void hisi_noc_pmu_write_evtype(struct hisi_pmu *noc_pmu, int idx, u32 type)
+{
+ struct hisi_noc_pmu_regs *reg_info = noc_pmu->dev_info->private;
+ u32 reg;
+
+ reg = readl(noc_pmu->base + NOC_PMU_EVENT_CTRLn(reg_info->event_ctrl0, idx));
+ reg &= ~NOC_PMU_EVENT_CTRL_TYPE;
+ reg |= FIELD_PREP(NOC_PMU_EVENT_CTRL_TYPE, type);
+ writel(reg, noc_pmu->base + NOC_PMU_EVENT_CTRLn(reg_info->event_ctrl0, idx));
+}
+
+static int hisi_noc_pmu_get_event_idx(struct perf_event *event)
+{
+ struct hisi_pmu *noc_pmu = to_hisi_pmu(event->pmu);
+ struct hisi_pmu_hwevents *pmu_events = &noc_pmu->pmu_events;
+ int cur_idx;
+
+ cur_idx = find_first_bit(pmu_events->used_mask, noc_pmu->num_counters);
+ if (cur_idx != noc_pmu->num_counters &&
+ !hisi_noc_pmu_check_global_filter(pmu_events->hw_events[cur_idx], event))
+ return -EAGAIN;
+
+ return hisi_uncore_pmu_get_event_idx(event);
+}
+
+static u64 hisi_noc_pmu_read_counter(struct hisi_pmu *noc_pmu,
+ struct hw_perf_event *hwc)
+{
+ struct hisi_noc_pmu_regs *reg_info = noc_pmu->dev_info->private;
+
+ return readq(noc_pmu->base + NOC_PMU_EVENT_CNTRn(reg_info->event_cntr0, hwc->idx));
+}
+
+static void hisi_noc_pmu_write_counter(struct hisi_pmu *noc_pmu,
+ struct hw_perf_event *hwc, u64 val)
+{
+ struct hisi_noc_pmu_regs *reg_info = noc_pmu->dev_info->private;
+
+ writeq(val, noc_pmu->base + NOC_PMU_EVENT_CNTRn(reg_info->event_cntr0, hwc->idx));
+}
+
+static void hisi_noc_pmu_enable_counter(struct hisi_pmu *noc_pmu,
+ struct hw_perf_event *hwc)
+{
+ struct hisi_noc_pmu_regs *reg_info = noc_pmu->dev_info->private;
+ u32 reg;
+
+ reg = readl(noc_pmu->base + NOC_PMU_EVENT_CTRLn(reg_info->event_ctrl0, hwc->idx));
+ reg |= NOC_PMU_EVENT_CTRL_EN;
+ writel(reg, noc_pmu->base + NOC_PMU_EVENT_CTRLn(reg_info->event_ctrl0, hwc->idx));
+}
+
+static void hisi_noc_pmu_disable_counter(struct hisi_pmu *noc_pmu,
+ struct hw_perf_event *hwc)
+{
+ struct hisi_noc_pmu_regs *reg_info = noc_pmu->dev_info->private;
+ u32 reg;
+
+ reg = readl(noc_pmu->base + NOC_PMU_EVENT_CTRLn(reg_info->event_ctrl0, hwc->idx));
+ reg &= ~NOC_PMU_EVENT_CTRL_EN;
+ writel(reg, noc_pmu->base + NOC_PMU_EVENT_CTRLn(reg_info->event_ctrl0, hwc->idx));
+}
+
+static void hisi_noc_pmu_enable_counter_int(struct hisi_pmu *noc_pmu,
+ struct hw_perf_event *hwc)
+{
+ /* We don't support interrupt, so a stub here. */
+}
+
+static void hisi_noc_pmu_disable_counter_int(struct hisi_pmu *noc_pmu,
+ struct hw_perf_event *hwc)
+{
+}
+
+static void hisi_noc_pmu_start_counters(struct hisi_pmu *noc_pmu)
+{
+ struct hisi_noc_pmu_regs *reg_info = noc_pmu->dev_info->private;
+ u32 reg;
+
+ reg = readl(noc_pmu->base + reg_info->pmu_ctrl);
+ reg |= NOC_PMU_GLOBAL_CTRL_PMU_EN;
+ writel(reg, noc_pmu->base + reg_info->pmu_ctrl);
+}
+
+static void hisi_noc_pmu_stop_counters(struct hisi_pmu *noc_pmu)
+{
+ struct hisi_noc_pmu_regs *reg_info = noc_pmu->dev_info->private;
+ u32 reg;
+
+ reg = readl(noc_pmu->base + reg_info->pmu_ctrl);
+ reg &= ~NOC_PMU_GLOBAL_CTRL_PMU_EN;
+ writel(reg, noc_pmu->base + reg_info->pmu_ctrl);
+}
+
+static u32 hisi_noc_pmu_get_int_status(struct hisi_pmu *noc_pmu)
+{
+ struct hisi_noc_pmu_regs *reg_info = noc_pmu->dev_info->private;
+
+ return readl(noc_pmu->base + reg_info->overflow_status);
+}
+
+static void hisi_noc_pmu_clear_int_status(struct hisi_pmu *noc_pmu, int idx)
+{
+ struct hisi_noc_pmu_regs *reg_info = noc_pmu->dev_info->private;
+ u32 reg;
+
+ reg = readl(noc_pmu->base + reg_info->overflow_status);
+ reg &= ~NOC_PMU_CNT_INFO_OVERFLOW(idx);
+ writel(reg, noc_pmu->base + reg_info->overflow_status);
+}
+
+static void hisi_noc_pmu_enable_filter(struct perf_event *event)
+{
+ struct hisi_pmu *noc_pmu = to_hisi_pmu(event->pmu);
+ struct hisi_noc_pmu_regs *reg_info = noc_pmu->dev_info->private;
+ struct hw_perf_event *hwc = &event->hw;
+ u32 tt_en = hisi_get_tt_en(event);
+ u32 ch = hisi_get_ch(event);
+ u32 reg;
+
+ if (!ch)
+ ch = NOC_PMU_CH_DEFAULT;
+
+ reg = readl(noc_pmu->base + NOC_PMU_EVENT_CTRLn(reg_info->event_ctrl0, hwc->idx));
+ reg &= ~NOC_PMU_EVENT_CTRL_CHANNEL;
+ reg |= FIELD_PREP(NOC_PMU_EVENT_CTRL_CHANNEL, ch);
+ writel(reg, noc_pmu->base + NOC_PMU_EVENT_CTRLn(reg_info->event_ctrl0, hwc->idx));
+
+ /*
+ * Since tracetag filter applies to all the counters, don't touch it
+ * if user doesn't specify it explicitly.
+ */
+ if (tt_en) {
+ reg = readl(noc_pmu->base + reg_info->pmu_ctrl);
+ reg |= NOC_PMU_GLOBAL_CTRL_TT_EN;
+ writel(reg, noc_pmu->base + reg_info->pmu_ctrl);
+ }
+}
+
+static void hisi_noc_pmu_disable_filter(struct perf_event *event)
+{
+ struct hisi_pmu *noc_pmu = to_hisi_pmu(event->pmu);
+ struct hisi_noc_pmu_regs *reg_info = noc_pmu->dev_info->private;
+ u32 tt_en = hisi_get_tt_en(event);
+ u32 reg;
+
+ /*
+ * If we're not the last counter, don't touch the global tracetag
+ * configuration.
+ */
+ if (bitmap_weight(noc_pmu->pmu_events.used_mask, noc_pmu->num_counters) > 1)
+ return;
+
+ if (tt_en) {
+ reg = readl(noc_pmu->base + reg_info->pmu_ctrl);
+ reg &= ~NOC_PMU_GLOBAL_CTRL_TT_EN;
+ writel(reg, noc_pmu->base + reg_info->pmu_ctrl);
+ }
+}
+
+static const struct hisi_uncore_ops hisi_uncore_noc_ops = {
+ .write_evtype = hisi_noc_pmu_write_evtype,
+ .get_event_idx = hisi_noc_pmu_get_event_idx,
+ .read_counter = hisi_noc_pmu_read_counter,
+ .write_counter = hisi_noc_pmu_write_counter,
+ .enable_counter = hisi_noc_pmu_enable_counter,
+ .disable_counter = hisi_noc_pmu_disable_counter,
+ .enable_counter_int = hisi_noc_pmu_enable_counter_int,
+ .disable_counter_int = hisi_noc_pmu_disable_counter_int,
+ .start_counters = hisi_noc_pmu_start_counters,
+ .stop_counters = hisi_noc_pmu_stop_counters,
+ .get_int_status = hisi_noc_pmu_get_int_status,
+ .clear_int_status = hisi_noc_pmu_clear_int_status,
+ .enable_filter = hisi_noc_pmu_enable_filter,
+ .disable_filter = hisi_noc_pmu_disable_filter,
+};
+
+static struct attribute *hisi_noc_pmu_format_attrs[] = {
+ HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
+ HISI_PMU_FORMAT_ATTR(ch, "config1:0-2"),
+ HISI_PMU_FORMAT_ATTR(tt_en, "config1:3"),
+ NULL
+};
+
+static const struct attribute_group hisi_noc_pmu_format_group = {
+ .name = "format",
+ .attrs = hisi_noc_pmu_format_attrs,
+};
+
+static struct attribute *hisi_noc_pmu_events_attrs[] = {
+ HISI_PMU_EVENT_ATTR(cycles, 0x0e),
+ /* Flux on/off the ring */
+ HISI_PMU_EVENT_ATTR(ingress_flow_sum, 0x1a),
+ HISI_PMU_EVENT_ATTR(egress_flow_sum, 0x17),
+ /* Buffer full duration on/off the ring */
+ HISI_PMU_EVENT_ATTR(ingress_buf_full, 0x19),
+ HISI_PMU_EVENT_ATTR(egress_buf_full, 0x12),
+ /* Failure packets count on/off the ring */
+ HISI_PMU_EVENT_ATTR(cw_ingress_fail, 0x01),
+ HISI_PMU_EVENT_ATTR(cc_ingress_fail, 0x09),
+ HISI_PMU_EVENT_ATTR(cw_egress_fail, 0x03),
+ HISI_PMU_EVENT_ATTR(cc_egress_fail, 0x0b),
+ /* Flux of the ring */
+ HISI_PMU_EVENT_ATTR(cw_main_flow_sum, 0x05),
+ HISI_PMU_EVENT_ATTR(cc_main_flow_sum, 0x0d),
+ NULL
+};
+
+static const struct attribute_group hisi_noc_pmu_events_group = {
+ .name = "events",
+ .attrs = hisi_noc_pmu_events_attrs,
+};
+
+static const struct attribute_group *hisi_noc_pmu_attr_groups[] = {
+ &hisi_noc_pmu_format_group,
+ &hisi_noc_pmu_events_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
+ NULL
+};
+
+static int hisi_noc_pmu_dev_init(struct platform_device *pdev, struct hisi_pmu *noc_pmu)
+{
+ struct hisi_noc_pmu_regs *reg_info;
+
+ hisi_uncore_pmu_init_topology(noc_pmu, &pdev->dev);
+
+ if (noc_pmu->topo.scl_id < 0)
+ return dev_err_probe(&pdev->dev, -EINVAL, "failed to get scl-id\n");
+
+ if (noc_pmu->topo.index_id < 0)
+ return dev_err_probe(&pdev->dev, -EINVAL, "failed to get idx-id\n");
+
+ if (noc_pmu->topo.sub_id < 0)
+ return dev_err_probe(&pdev->dev, -EINVAL, "failed to get sub-id\n");
+
+ noc_pmu->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(noc_pmu->base))
+ return dev_err_probe(&pdev->dev, PTR_ERR(noc_pmu->base),
+ "fail to remap io memory\n");
+
+ noc_pmu->dev_info = device_get_match_data(&pdev->dev);
+ if (!noc_pmu->dev_info)
+ return -ENODEV;
+
+ noc_pmu->pmu_events.attr_groups = noc_pmu->dev_info->attr_groups;
+ noc_pmu->counter_bits = noc_pmu->dev_info->counter_bits;
+ noc_pmu->check_event = noc_pmu->dev_info->check_event;
+ noc_pmu->num_counters = NOC_PMU_NR_COUNTERS;
+ noc_pmu->ops = &hisi_uncore_noc_ops;
+ noc_pmu->dev = &pdev->dev;
+ noc_pmu->on_cpu = -1;
+
+ reg_info = noc_pmu->dev_info->private;
+ noc_pmu->identifier = readl(noc_pmu->base + reg_info->version);
+
+ return 0;
+}
+
+static void hisi_noc_pmu_remove_cpuhp_instance(void *hotplug_node)
+{
+ cpuhp_state_remove_instance_nocalls(hisi_noc_pmu_cpuhp_state, hotplug_node);
+}
+
+static void hisi_noc_pmu_unregister_pmu(void *pmu)
+{
+ perf_pmu_unregister(pmu);
+}
+
+static int hisi_noc_pmu_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct hisi_pmu *noc_pmu;
+ char *name;
+ int ret;
+
+ noc_pmu = devm_kzalloc(dev, sizeof(*noc_pmu), GFP_KERNEL);
+ if (!noc_pmu)
+ return -ENOMEM;
+
+ /*
+ * HiSilicon Uncore PMU framework needs to get common hisi_pmu device
+ * from device's drvdata.
+ */
+ platform_set_drvdata(pdev, noc_pmu);
+
+ ret = hisi_noc_pmu_dev_init(pdev, noc_pmu);
+ if (ret)
+ return ret;
+
+ ret = cpuhp_state_add_instance(hisi_noc_pmu_cpuhp_state, &noc_pmu->node);
+ if (ret)
+ return dev_err_probe(dev, ret, "Fail to register cpuhp instance\n");
+
+ ret = devm_add_action_or_reset(dev, hisi_noc_pmu_remove_cpuhp_instance,
+ &noc_pmu->node);
+ if (ret)
+ return ret;
+
+ hisi_pmu_init(noc_pmu, THIS_MODULE);
+
+ name = devm_kasprintf(dev, GFP_KERNEL, "hisi_scl%d_noc%d_%d",
+ noc_pmu->topo.scl_id, noc_pmu->topo.index_id,
+ noc_pmu->topo.sub_id);
+ if (!name)
+ return -ENOMEM;
+
+ ret = perf_pmu_register(&noc_pmu->pmu, name, -1);
+ if (ret)
+ return dev_err_probe(dev, ret, "Fail to register PMU\n");
+
+ return devm_add_action_or_reset(dev, hisi_noc_pmu_unregister_pmu,
+ &noc_pmu->pmu);
+}
+
+static struct hisi_noc_pmu_regs hisi_noc_v1_pmu_regs = {
+ .version = NOC_PMU_VERSION,
+ .pmu_ctrl = NOC_PMU_GLOBAL_CTRL,
+ .event_ctrl0 = NOC_PMU_EVENT_CTRL0,
+ .event_cntr0 = NOC_PMU_EVENT_COUNTER0,
+ .overflow_status = NOC_PMU_CNT_INFO,
+};
+
+static const struct hisi_pmu_dev_info hisi_noc_v1 = {
+ .attr_groups = hisi_noc_pmu_attr_groups,
+ .counter_bits = 64,
+ .check_event = NOC_PMU_EVENT_CTRL_TYPE,
+ .private = &hisi_noc_v1_pmu_regs,
+};
+
+static const struct acpi_device_id hisi_noc_pmu_ids[] = {
+ { "HISI04E0", (kernel_ulong_t) &hisi_noc_v1 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, hisi_noc_pmu_ids);
+
+static struct platform_driver hisi_noc_pmu_driver = {
+ .driver = {
+ .name = "hisi_noc_pmu",
+ .acpi_match_table = hisi_noc_pmu_ids,
+ .suppress_bind_attrs = true,
+ },
+ .probe = hisi_noc_pmu_probe,
+};
+
+static int __init hisi_noc_pmu_module_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "perf/hisi/noc:online",
+ hisi_uncore_pmu_online_cpu,
+ hisi_uncore_pmu_offline_cpu);
+ if (ret < 0) {
+ pr_err("hisi_noc_pmu: Fail to setup cpuhp callbacks, ret = %d\n", ret);
+ return ret;
+ }
+ hisi_noc_pmu_cpuhp_state = ret;
+
+ ret = platform_driver_register(&hisi_noc_pmu_driver);
+ if (ret)
+ cpuhp_remove_multi_state(hisi_noc_pmu_cpuhp_state);
+
+ return ret;
+}
+module_init(hisi_noc_pmu_module_init);
+
+static void __exit hisi_noc_pmu_module_exit(void)
+{
+ platform_driver_unregister(&hisi_noc_pmu_driver);
+ cpuhp_remove_multi_state(hisi_noc_pmu_cpuhp_state);
+}
+module_exit(hisi_noc_pmu_module_exit);
+
+MODULE_IMPORT_NS("HISI_PMU");
+MODULE_DESCRIPTION("HiSilicon SoC Uncore NoC PMU driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Yicong Yang <yangyicong@hisilicon.com>");
diff --git a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
index d941e746b424..80108c63cb60 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
@@ -269,25 +269,22 @@ static void hisi_pa_pmu_clear_int_status(struct hisi_pmu *pa_pmu, int idx)
static int hisi_pa_pmu_init_data(struct platform_device *pdev,
struct hisi_pmu *pa_pmu)
{
+ hisi_uncore_pmu_init_topology(pa_pmu, &pdev->dev);
+
/*
* As PA PMU is in a SICL, use the SICL_ID and the index ID
* to identify the PA PMU.
*/
- if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
- &pa_pmu->sicl_id)) {
+ if (pa_pmu->topo.sicl_id < 0) {
dev_err(&pdev->dev, "Cannot read sicl-id!\n");
return -EINVAL;
}
- if (device_property_read_u32(&pdev->dev, "hisilicon,idx-id",
- &pa_pmu->index_id)) {
+ if (pa_pmu->topo.index_id < 0) {
dev_err(&pdev->dev, "Cannot read idx-id!\n");
return -EINVAL;
}
- pa_pmu->ccl_id = -1;
- pa_pmu->sccl_id = -1;
-
pa_pmu->dev_info = device_get_match_data(&pdev->dev);
if (!pa_pmu->dev_info)
return -ENODEV;
@@ -356,29 +353,6 @@ static const struct attribute_group hisi_h60pa_pmu_events_group = {
.attrs = hisi_h60pa_pmu_events_attr,
};
-static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
-
-static struct attribute *hisi_pa_pmu_cpumask_attrs[] = {
- &dev_attr_cpumask.attr,
- NULL
-};
-
-static const struct attribute_group hisi_pa_pmu_cpumask_attr_group = {
- .attrs = hisi_pa_pmu_cpumask_attrs,
-};
-
-static struct device_attribute hisi_pa_pmu_identifier_attr =
- __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
-
-static struct attribute *hisi_pa_pmu_identifier_attrs[] = {
- &hisi_pa_pmu_identifier_attr.attr,
- NULL
-};
-
-static const struct attribute_group hisi_pa_pmu_identifier_group = {
- .attrs = hisi_pa_pmu_identifier_attrs,
-};
-
static struct hisi_pa_pmu_int_regs hisi_pa_pmu_regs = {
.mask_offset = PA_INT_MASK,
.clear_offset = PA_INT_CLEAR,
@@ -388,8 +362,8 @@ static struct hisi_pa_pmu_int_regs hisi_pa_pmu_regs = {
static const struct attribute_group *hisi_pa_pmu_v2_attr_groups[] = {
&hisi_pa_pmu_v2_format_group,
&hisi_pa_pmu_v2_events_group,
- &hisi_pa_pmu_cpumask_attr_group,
- &hisi_pa_pmu_identifier_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
NULL
};
@@ -402,8 +376,8 @@ static const struct hisi_pmu_dev_info hisi_h32pa_v2 = {
static const struct attribute_group *hisi_pa_pmu_v3_attr_groups[] = {
&hisi_pa_pmu_v2_format_group,
&hisi_pa_pmu_v3_events_group,
- &hisi_pa_pmu_cpumask_attr_group,
- &hisi_pa_pmu_identifier_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
NULL
};
@@ -422,8 +396,8 @@ static struct hisi_pa_pmu_int_regs hisi_h60pa_pmu_regs = {
static const struct attribute_group *hisi_h60pa_pmu_attr_groups[] = {
&hisi_pa_pmu_v2_format_group,
&hisi_h60pa_pmu_events_group,
- &hisi_pa_pmu_cpumask_attr_group,
- &hisi_pa_pmu_identifier_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
NULL
};
@@ -466,7 +440,7 @@ static int hisi_pa_pmu_dev_probe(struct platform_device *pdev,
pa_pmu->pmu_events.attr_groups = pa_pmu->dev_info->attr_groups;
pa_pmu->num_counters = PA_NR_COUNTERS;
pa_pmu->ops = &hisi_uncore_pa_ops;
- pa_pmu->check_event = 0xB0;
+ pa_pmu->check_event = PA_EVTYPE_MASK;
pa_pmu->counter_bits = 64;
pa_pmu->dev = &pdev->dev;
pa_pmu->on_cpu = -1;
@@ -488,9 +462,9 @@ static int hisi_pa_pmu_probe(struct platform_device *pdev)
if (ret)
return ret;
- name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sicl%d_%s%u",
- pa_pmu->sicl_id, pa_pmu->dev_info->name,
- pa_pmu->index_id);
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sicl%d_%s%d",
+ pa_pmu->topo.sicl_id, pa_pmu->dev_info->name,
+ pa_pmu->topo.index_id);
if (!name)
return -ENOMEM;
@@ -505,8 +479,8 @@ static int hisi_pa_pmu_probe(struct platform_device *pdev)
ret = perf_pmu_register(&pa_pmu->pmu, name, -1);
if (ret) {
dev_err(pa_pmu->dev, "PMU register failed, ret = %d\n", ret);
- cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
- &pa_pmu->node);
+ cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
+ &pa_pmu->node);
return ret;
}
@@ -514,14 +488,13 @@ static int hisi_pa_pmu_probe(struct platform_device *pdev)
return ret;
}
-static int hisi_pa_pmu_remove(struct platform_device *pdev)
+static void hisi_pa_pmu_remove(struct platform_device *pdev)
{
struct hisi_pmu *pa_pmu = platform_get_drvdata(pdev);
perf_pmu_unregister(&pa_pmu->pmu);
cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE,
&pa_pmu->node);
- return 0;
}
static const struct acpi_device_id hisi_pa_pmu_acpi_match[] = {
@@ -570,6 +543,7 @@ static void __exit hisi_pa_pmu_module_exit(void)
}
module_exit(hisi_pa_pmu_module_exit);
+MODULE_IMPORT_NS("HISI_PMU");
MODULE_DESCRIPTION("HiSilicon Protocol Adapter uncore PMU driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>");
diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c
index 04031450d5fe..de71dcf11653 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c
@@ -14,6 +14,7 @@
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
+#include <linux/property.h>
#include <asm/cputype.h>
#include <asm/local64.h>
@@ -23,20 +24,6 @@
#define HISI_MAX_PERIOD(nr) (GENMASK_ULL((nr) - 1, 0))
/*
- * PMU format attributes
- */
-ssize_t hisi_format_sysfs_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct dev_ext_attribute *eattr;
-
- eattr = container_of(attr, struct dev_ext_attribute, attr);
-
- return sysfs_emit(buf, "%s\n", (char *)eattr->var);
-}
-EXPORT_SYMBOL_GPL(hisi_format_sysfs_show);
-
-/*
* PMU event attributes
*/
ssize_t hisi_event_sysfs_show(struct device *dev,
@@ -48,7 +35,7 @@ ssize_t hisi_event_sysfs_show(struct device *dev,
return sysfs_emit(page, "config=0x%lx\n", (unsigned long)eattr->var);
}
-EXPORT_SYMBOL_GPL(hisi_event_sysfs_show);
+EXPORT_SYMBOL_NS_GPL(hisi_event_sysfs_show, "HISI_PMU");
/*
* sysfs cpumask attributes. For uncore PMU, we only have a single CPU to show
@@ -60,7 +47,52 @@ ssize_t hisi_cpumask_sysfs_show(struct device *dev,
return sysfs_emit(buf, "%d\n", hisi_pmu->on_cpu);
}
-EXPORT_SYMBOL_GPL(hisi_cpumask_sysfs_show);
+EXPORT_SYMBOL_NS_GPL(hisi_cpumask_sysfs_show, "HISI_PMU");
+
+static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
+
+static ssize_t hisi_associated_cpus_sysfs_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hisi_pmu *hisi_pmu = to_hisi_pmu(dev_get_drvdata(dev));
+
+ return cpumap_print_to_pagebuf(true, buf, &hisi_pmu->associated_cpus);
+}
+static DEVICE_ATTR(associated_cpus, 0444, hisi_associated_cpus_sysfs_show, NULL);
+
+static struct attribute *hisi_pmu_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ &dev_attr_associated_cpus.attr,
+ NULL
+};
+
+const struct attribute_group hisi_pmu_cpumask_attr_group = {
+ .attrs = hisi_pmu_cpumask_attrs,
+};
+EXPORT_SYMBOL_NS_GPL(hisi_pmu_cpumask_attr_group, "HISI_PMU");
+
+ssize_t hisi_uncore_pmu_identifier_attr_show(struct device *dev,
+ struct device_attribute *attr,
+ char *page)
+{
+ struct hisi_pmu *hisi_pmu = to_hisi_pmu(dev_get_drvdata(dev));
+
+ return sysfs_emit(page, "0x%08x\n", hisi_pmu->identifier);
+}
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_identifier_attr_show, "HISI_PMU");
+
+static struct device_attribute hisi_pmu_identifier_attr =
+ __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
+
+static struct attribute *hisi_pmu_identifier_attrs[] = {
+ &hisi_pmu_identifier_attr.attr,
+ NULL
+};
+
+const struct attribute_group hisi_pmu_identifier_group = {
+ .attrs = hisi_pmu_identifier_attrs,
+};
+EXPORT_SYMBOL_NS_GPL(hisi_pmu_identifier_group, "HISI_PMU");
static bool hisi_validate_event_group(struct perf_event *event)
{
@@ -110,24 +142,14 @@ int hisi_uncore_pmu_get_event_idx(struct perf_event *event)
return idx;
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_get_event_idx);
-
-ssize_t hisi_uncore_pmu_identifier_attr_show(struct device *dev,
- struct device_attribute *attr,
- char *page)
-{
- struct hisi_pmu *hisi_pmu = to_hisi_pmu(dev_get_drvdata(dev));
-
- return sysfs_emit(page, "0x%08x\n", hisi_pmu->identifier);
-}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_identifier_attr_show);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_get_event_idx, "HISI_PMU");
static void hisi_uncore_pmu_clear_event_idx(struct hisi_pmu *hisi_pmu, int idx)
{
clear_bit(idx, hisi_pmu->pmu_events.used_mask);
}
-static irqreturn_t hisi_uncore_pmu_isr(int irq, void *data)
+irqreturn_t hisi_uncore_pmu_isr(int irq, void *data)
{
struct hisi_pmu *hisi_pmu = data;
struct perf_event *event;
@@ -156,6 +178,7 @@ static irqreturn_t hisi_uncore_pmu_isr(int irq, void *data)
return IRQ_HANDLED;
}
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_isr, "HISI_PMU");
int hisi_uncore_pmu_init_irq(struct hisi_pmu *hisi_pmu,
struct platform_device *pdev)
@@ -179,7 +202,7 @@ int hisi_uncore_pmu_init_irq(struct hisi_pmu *hisi_pmu,
return 0;
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_init_irq);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_init_irq, "HISI_PMU");
int hisi_uncore_pmu_event_init(struct perf_event *event)
{
@@ -212,7 +235,7 @@ int hisi_uncore_pmu_event_init(struct perf_event *event)
return -EINVAL;
hisi_pmu = to_hisi_pmu(event->pmu);
- if (event->attr.config > hisi_pmu->check_event)
+ if ((event->attr.config & HISI_EVENTID_MASK) > hisi_pmu->check_event)
return -EINVAL;
if (hisi_pmu->on_cpu == -1)
@@ -233,7 +256,7 @@ int hisi_uncore_pmu_event_init(struct perf_event *event)
return 0;
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_event_init);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_event_init, "HISI_PMU");
/*
* Set the counter to count the event that we're interested in,
@@ -287,7 +310,7 @@ void hisi_uncore_pmu_set_event_period(struct perf_event *event)
/* Write start value to the hardware event counter */
hisi_pmu->ops->write_counter(hisi_pmu, hwc, val);
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_set_event_period);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_set_event_period, "HISI_PMU");
void hisi_uncore_pmu_event_update(struct perf_event *event)
{
@@ -308,7 +331,7 @@ void hisi_uncore_pmu_event_update(struct perf_event *event)
HISI_MAX_PERIOD(hisi_pmu->counter_bits);
local64_add(delta, &event->count);
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_event_update);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_event_update, "HISI_PMU");
void hisi_uncore_pmu_start(struct perf_event *event, int flags)
{
@@ -331,7 +354,7 @@ void hisi_uncore_pmu_start(struct perf_event *event, int flags)
hisi_uncore_pmu_enable_event(event);
perf_event_update_userpage(event);
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_start);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_start, "HISI_PMU");
void hisi_uncore_pmu_stop(struct perf_event *event, int flags)
{
@@ -348,7 +371,7 @@ void hisi_uncore_pmu_stop(struct perf_event *event, int flags)
hisi_uncore_pmu_event_update(event);
hwc->state |= PERF_HES_UPTODATE;
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_stop);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_stop, "HISI_PMU");
int hisi_uncore_pmu_add(struct perf_event *event, int flags)
{
@@ -371,7 +394,7 @@ int hisi_uncore_pmu_add(struct perf_event *event, int flags)
return 0;
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_add);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_add, "HISI_PMU");
void hisi_uncore_pmu_del(struct perf_event *event, int flags)
{
@@ -383,14 +406,14 @@ void hisi_uncore_pmu_del(struct perf_event *event, int flags)
perf_event_update_userpage(event);
hisi_pmu->pmu_events.hw_events[hwc->idx] = NULL;
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_del);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_del, "HISI_PMU");
void hisi_uncore_pmu_read(struct perf_event *event)
{
/* Read hardware counter and update the perf counter statistics */
hisi_uncore_pmu_event_update(event);
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_read);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_read, "HISI_PMU");
void hisi_uncore_pmu_enable(struct pmu *pmu)
{
@@ -403,7 +426,7 @@ void hisi_uncore_pmu_enable(struct pmu *pmu)
hisi_pmu->ops->start_counters(hisi_pmu);
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_enable);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_enable, "HISI_PMU");
void hisi_uncore_pmu_disable(struct pmu *pmu)
{
@@ -411,7 +434,7 @@ void hisi_uncore_pmu_disable(struct pmu *pmu)
hisi_pmu->ops->stop_counters(hisi_pmu);
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_disable);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_disable, "HISI_PMU");
/*
@@ -458,22 +481,19 @@ static void hisi_read_sccl_and_ccl_id(int *scclp, int *cclp)
*/
static bool hisi_pmu_cpu_is_associated_pmu(struct hisi_pmu *hisi_pmu)
{
+ struct hisi_pmu_topology *topo = &hisi_pmu->topo;
int sccl_id, ccl_id;
- /* If SCCL_ID is -1, the PMU is in a SICL and has no CPU affinity */
- if (hisi_pmu->sccl_id == -1)
- return true;
-
- if (hisi_pmu->ccl_id == -1) {
+ if (topo->ccl_id == -1) {
/* If CCL_ID is -1, the PMU only shares the same SCCL */
hisi_read_sccl_and_ccl_id(&sccl_id, NULL);
- return sccl_id == hisi_pmu->sccl_id;
+ return sccl_id == topo->sccl_id;
}
hisi_read_sccl_and_ccl_id(&sccl_id, &ccl_id);
- return sccl_id == hisi_pmu->sccl_id && ccl_id == hisi_pmu->ccl_id;
+ return sccl_id == topo->sccl_id && ccl_id == topo->ccl_id;
}
int hisi_uncore_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
@@ -481,35 +501,46 @@ int hisi_uncore_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
struct hisi_pmu *hisi_pmu = hlist_entry_safe(node, struct hisi_pmu,
node);
- if (!hisi_pmu_cpu_is_associated_pmu(hisi_pmu))
+ /*
+ * If the CPU is not associated to PMU, initialize the hisi_pmu->on_cpu
+ * based on the locality if it hasn't been initialized yet. For PMUs
+ * do have associated CPUs, it'll be updated later.
+ */
+ if (!hisi_pmu_cpu_is_associated_pmu(hisi_pmu)) {
+ if (hisi_pmu->on_cpu != -1)
+ return 0;
+
+ hisi_pmu->on_cpu = cpumask_local_spread(0, dev_to_node(hisi_pmu->dev));
+ if (hisi_pmu->irq > 0)
+ WARN_ON(irq_set_affinity(hisi_pmu->irq,
+ cpumask_of(hisi_pmu->on_cpu)));
return 0;
+ }
cpumask_set_cpu(cpu, &hisi_pmu->associated_cpus);
- /* If another CPU is already managing this PMU, simply return. */
- if (hisi_pmu->on_cpu != -1)
+ /* If another associated CPU is already managing this PMU, simply return. */
+ if (hisi_pmu->on_cpu != -1 &&
+ cpumask_test_cpu(hisi_pmu->on_cpu, &hisi_pmu->associated_cpus))
return 0;
/* Use this CPU in cpumask for event counting */
hisi_pmu->on_cpu = cpu;
/* Overflow interrupt also should use the same CPU */
- WARN_ON(irq_set_affinity(hisi_pmu->irq, cpumask_of(cpu)));
+ if (hisi_pmu->irq > 0)
+ WARN_ON(irq_set_affinity(hisi_pmu->irq, cpumask_of(cpu)));
return 0;
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_online_cpu);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_online_cpu, "HISI_PMU");
int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
{
struct hisi_pmu *hisi_pmu = hlist_entry_safe(node, struct hisi_pmu,
node);
- cpumask_t pmu_online_cpus;
unsigned int target;
- if (!cpumask_test_and_clear_cpu(cpu, &hisi_pmu->associated_cpus))
- return 0;
-
/* Nothing to do if this CPU doesn't own the PMU */
if (hisi_pmu->on_cpu != cpu)
return 0;
@@ -517,27 +548,65 @@ int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
/* Give up ownership of the PMU */
hisi_pmu->on_cpu = -1;
- /* Choose a new CPU to migrate ownership of the PMU to */
- cpumask_and(&pmu_online_cpus, &hisi_pmu->associated_cpus,
- cpu_online_mask);
- target = cpumask_any_but(&pmu_online_cpus, cpu);
+ /*
+ * Migrate ownership of the PMU to a new CPU chosen from PMU's online
+ * associated CPUs if possible, if no associated CPU online then
+ * migrate to one online CPU.
+ */
+ target = cpumask_any_and_but(&hisi_pmu->associated_cpus,
+ cpu_online_mask, cpu);
+ if (target >= nr_cpu_ids)
+ target = cpumask_any_but(cpu_online_mask, cpu);
+
if (target >= nr_cpu_ids)
return 0;
perf_pmu_migrate_context(&hisi_pmu->pmu, cpu, target);
/* Use this CPU for event counting */
hisi_pmu->on_cpu = target;
- WARN_ON(irq_set_affinity(hisi_pmu->irq, cpumask_of(target)));
+
+ if (hisi_pmu->irq > 0)
+ WARN_ON(irq_set_affinity(hisi_pmu->irq, cpumask_of(target)));
return 0;
}
-EXPORT_SYMBOL_GPL(hisi_uncore_pmu_offline_cpu);
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_offline_cpu, "HISI_PMU");
+
+/*
+ * Retrieve the topology information from the firmware for the hisi_pmu device.
+ * The topology ID will be -1 if we cannot initialize it, it may either due to
+ * the PMU doesn't locate on this certain topology or the firmware needs to be
+ * fixed.
+ */
+void hisi_uncore_pmu_init_topology(struct hisi_pmu *hisi_pmu, struct device *dev)
+{
+ struct hisi_pmu_topology *topo = &hisi_pmu->topo;
+
+ topo->sccl_id = -1;
+ topo->ccl_id = -1;
+ topo->index_id = -1;
+ topo->sub_id = -1;
+
+ if (device_property_read_u32(dev, "hisilicon,scl-id", &topo->sccl_id))
+ dev_dbg(dev, "no scl-id present\n");
+
+ if (device_property_read_u32(dev, "hisilicon,ccl-id", &topo->ccl_id))
+ dev_dbg(dev, "no ccl-id present\n");
+
+ if (device_property_read_u32(dev, "hisilicon,idx-id", &topo->index_id))
+ dev_dbg(dev, "no idx-id present\n");
+
+ if (device_property_read_u32(dev, "hisilicon,sub-id", &topo->sub_id))
+ dev_dbg(dev, "no sub-id present\n");
+}
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_init_topology, "HISI_PMU");
void hisi_pmu_init(struct hisi_pmu *hisi_pmu, struct module *module)
{
struct pmu *pmu = &hisi_pmu->pmu;
pmu->module = module;
+ pmu->parent = hisi_pmu->dev;
pmu->task_ctx_nr = perf_invalid_context;
pmu->event_init = hisi_uncore_pmu_event_init;
pmu->pmu_enable = hisi_uncore_pmu_enable;
@@ -550,6 +619,7 @@ void hisi_pmu_init(struct hisi_pmu *hisi_pmu, struct module *module)
pmu->attr_groups = hisi_pmu->pmu_events.attr_groups;
pmu->capabilities = PERF_PMU_CAP_NO_EXCLUDE;
}
-EXPORT_SYMBOL_GPL(hisi_pmu_init);
+EXPORT_SYMBOL_NS_GPL(hisi_pmu_init, "HISI_PMU");
+MODULE_DESCRIPTION("HiSilicon SoC uncore Performance Monitor driver framework");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.h b/drivers/perf/hisilicon/hisi_uncore_pmu.h
index 92402aa69d70..3ffe6acda653 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pmu.h
+++ b/drivers/perf/hisilicon/hisi_uncore_pmu.h
@@ -24,7 +24,7 @@
#define pr_fmt(fmt) "hisi_pmu: " fmt
#define HISI_PMU_V2 0x30
-#define HISI_MAX_COUNTERS 0x10
+#define HISI_MAX_COUNTERS 0x18
#define to_hisi_pmu(p) (container_of(p, struct hisi_pmu, pmu))
#define HISI_PMU_ATTR(_name, _func, _config) \
@@ -33,7 +33,7 @@
})[0].attr.attr)
#define HISI_PMU_FORMAT_ATTR(_name, _config) \
- HISI_PMU_ATTR(_name, hisi_format_sysfs_show, (void *)_config)
+ HISI_PMU_ATTR(_name, device_show_string, _config)
#define HISI_PMU_EVENT_ATTR(_name, _config) \
HISI_PMU_ATTR(_name, hisi_event_sysfs_show, (unsigned long)_config)
@@ -43,7 +43,8 @@
return FIELD_GET(GENMASK_ULL(hi, lo), event->attr.config); \
}
-#define HISI_GET_EVENTID(ev) (ev->hw.config_base & 0xff)
+#define HISI_EVENTID_MASK GENMASK(7, 0)
+#define HISI_GET_EVENTID(ev) ((ev)->hw.config_base & HISI_EVENTID_MASK)
#define HISI_PMU_EVTYPE_BITS 8
#define HISI_PMU_EVTYPE_SHIFT(idx) ((idx) % 4 * HISI_PMU_EVTYPE_BITS)
@@ -72,6 +73,8 @@ struct hisi_uncore_ops {
struct hisi_pmu_dev_info {
const char *name;
const struct attribute_group **attr_groups;
+ u32 counter_bits;
+ u32 check_event;
void *private;
};
@@ -81,27 +84,55 @@ struct hisi_pmu_hwevents {
const struct attribute_group **attr_groups;
};
+/**
+ * struct hisi_pmu_topology - Describe the topology hierarchy on which the PMU
+ * is located.
+ * @sccl_id: ID of the SCCL on which the PMU locate is located.
+ * @sicl_id: ID of the SICL on which the PMU locate is located.
+ * @scl_id: ID used by the core which is unaware of the SCCL/SICL.
+ * @ccl_id: ID of the CCL (CPU cluster) on which the PMU is located.
+ * @index_id: the ID of the PMU module if there're several PMUs at a
+ * particularly location in the topology.
+ * @sub_id: submodule ID of the PMU. For example we use this for DDRC PMU v2
+ * since each DDRC has more than one DMC
+ *
+ * The ID will be -1 if the PMU isn't located on a certain topology.
+ */
+struct hisi_pmu_topology {
+ /*
+ * SCCL (Super CPU CLuster) and SICL (Super I/O Cluster) are parallel
+ * so a PMU cannot locate on a SCCL and a SICL. If the SCCL/SICL
+ * distinction is not relevant, use scl_id instead.
+ */
+ union {
+ int sccl_id;
+ int sicl_id;
+ int scl_id;
+ };
+ int ccl_id;
+ int index_id;
+ int sub_id;
+};
+
/* Generic pmu struct for different pmu types */
struct hisi_pmu {
struct pmu pmu;
const struct hisi_uncore_ops *ops;
const struct hisi_pmu_dev_info *dev_info;
struct hisi_pmu_hwevents pmu_events;
- /* associated_cpus: All CPUs associated with the PMU */
+ struct hisi_pmu_topology topo;
+ /*
+ * CPUs associated to the PMU and are preferred to use for counting.
+ * Could be empty if PMU has no association (e.g. PMU on SICL), in
+ * which case any online CPU will be used.
+ */
cpumask_t associated_cpus;
/* CPU used for counting */
int on_cpu;
int irq;
struct device *dev;
struct hlist_node node;
- int sccl_id;
- int sicl_id;
- int ccl_id;
void __iomem *base;
- /* the ID of the PMU modules */
- u32 index_id;
- /* For DDRC PMU v2: each DDRC has more than one DMC */
- u32 sub_id;
int num_counters;
int counter_bits;
/* check event code range */
@@ -109,6 +140,10 @@ struct hisi_pmu {
u32 identifier;
};
+/* Generic implementation of cpumask/identifier group */
+extern const struct attribute_group hisi_pmu_cpumask_attr_group;
+extern const struct attribute_group hisi_pmu_identifier_group;
+
int hisi_uncore_pmu_get_event_idx(struct perf_event *event);
void hisi_uncore_pmu_read(struct perf_event *event);
int hisi_uncore_pmu_add(struct perf_event *event, int flags);
@@ -122,8 +157,6 @@ void hisi_uncore_pmu_enable(struct pmu *pmu);
void hisi_uncore_pmu_disable(struct pmu *pmu);
ssize_t hisi_event_sysfs_show(struct device *dev,
struct device_attribute *attr, char *buf);
-ssize_t hisi_format_sysfs_show(struct device *dev,
- struct device_attribute *attr, char *buf);
ssize_t hisi_cpumask_sysfs_show(struct device *dev,
struct device_attribute *attr, char *buf);
int hisi_uncore_pmu_online_cpu(unsigned int cpu, struct hlist_node *node);
@@ -132,8 +165,10 @@ int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node);
ssize_t hisi_uncore_pmu_identifier_attr_show(struct device *dev,
struct device_attribute *attr,
char *page);
+irqreturn_t hisi_uncore_pmu_isr(int irq, void *data);
int hisi_uncore_pmu_init_irq(struct hisi_pmu *hisi_pmu,
struct platform_device *pdev);
+void hisi_uncore_pmu_init_topology(struct hisi_pmu *hisi_pmu, struct device *dev);
void hisi_pmu_init(struct hisi_pmu *hisi_pmu, struct module *module);
#endif /* __HISI_UNCORE_PMU_H__ */
diff --git a/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
index 6fe534a665ed..cd32d606df05 100644
--- a/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
@@ -28,6 +28,18 @@
#define SLLC_VERSION 0x1cf0
#define SLLC_EVENT_CNT0_L 0x1d00
+/* SLLC registers definition in v3 */
+#define SLLC_V3_INT_MASK 0x6834
+#define SLLC_V3_INT_STATUS 0x6838
+#define SLLC_V3_INT_CLEAR 0x683c
+#define SLLC_V3_VERSION 0x6c00
+#define SLLC_V3_PERF_CTRL 0x6d00
+#define SLLC_V3_SRCID_CTRL 0x6d04
+#define SLLC_V3_TGTID_CTRL 0x6d08
+#define SLLC_V3_EVENT_CTRL 0x6d14
+#define SLLC_V3_EVENT_TYPE0 0x6d18
+#define SLLC_V3_EVENT_CNT0_L 0x6e00
+
#define SLLC_EVTYPE_MASK 0xff
#define SLLC_PERF_CTRL_EN BIT(0)
#define SLLC_FILT_EN BIT(1)
@@ -40,7 +52,14 @@
#define SLLC_TGTID_MAX_SHIFT 12
#define SLLC_SRCID_CMD_SHIFT 1
#define SLLC_SRCID_MSK_SHIFT 12
-#define SLLC_NR_EVENTS 0x80
+
+#define SLLC_V3_TGTID_MIN_SHIFT 1
+#define SLLC_V3_TGTID_MAX_SHIFT 10
+#define SLLC_V3_SRCID_CMD_SHIFT 1
+#define SLLC_V3_SRCID_MSK_SHIFT 10
+
+#define SLLC_NR_EVENTS 0xff
+#define SLLC_EVENT_CNTn(cnt0, n) ((cnt0) + (n) * 8)
HISI_PMU_EVENT_ATTR_EXTRACTOR(tgtid_min, config1, 10, 0);
HISI_PMU_EVENT_ATTR_EXTRACTOR(tgtid_max, config1, 21, 11);
@@ -48,6 +67,23 @@ HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid_cmd, config1, 32, 22);
HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid_msk, config1, 43, 33);
HISI_PMU_EVENT_ATTR_EXTRACTOR(tracetag_en, config1, 44, 44);
+struct hisi_sllc_pmu_regs {
+ u32 int_mask;
+ u32 int_clear;
+ u32 int_status;
+ u32 perf_ctrl;
+ u32 srcid_ctrl;
+ u32 srcid_cmd_shift;
+ u32 srcid_mask_shift;
+ u32 tgtid_ctrl;
+ u32 tgtid_min_shift;
+ u32 tgtid_max_shift;
+ u32 event_ctrl;
+ u32 event_type0;
+ u32 version;
+ u32 event_cnt0;
+};
+
static bool tgtid_is_valid(u32 max, u32 min)
{
return max > 0 && max >= min;
@@ -56,96 +92,104 @@ static bool tgtid_is_valid(u32 max, u32 min)
static void hisi_sllc_pmu_enable_tracetag(struct perf_event *event)
{
struct hisi_pmu *sllc_pmu = to_hisi_pmu(event->pmu);
+ struct hisi_sllc_pmu_regs *regs = sllc_pmu->dev_info->private;
u32 tt_en = hisi_get_tracetag_en(event);
if (tt_en) {
u32 val;
- val = readl(sllc_pmu->base + SLLC_PERF_CTRL);
+ val = readl(sllc_pmu->base + regs->perf_ctrl);
val |= SLLC_TRACETAG_EN | SLLC_FILT_EN;
- writel(val, sllc_pmu->base + SLLC_PERF_CTRL);
+ writel(val, sllc_pmu->base + regs->perf_ctrl);
}
}
static void hisi_sllc_pmu_disable_tracetag(struct perf_event *event)
{
struct hisi_pmu *sllc_pmu = to_hisi_pmu(event->pmu);
+ struct hisi_sllc_pmu_regs *regs = sllc_pmu->dev_info->private;
u32 tt_en = hisi_get_tracetag_en(event);
if (tt_en) {
u32 val;
- val = readl(sllc_pmu->base + SLLC_PERF_CTRL);
+ val = readl(sllc_pmu->base + regs->perf_ctrl);
val &= ~(SLLC_TRACETAG_EN | SLLC_FILT_EN);
- writel(val, sllc_pmu->base + SLLC_PERF_CTRL);
+ writel(val, sllc_pmu->base + regs->perf_ctrl);
}
}
static void hisi_sllc_pmu_config_tgtid(struct perf_event *event)
{
struct hisi_pmu *sllc_pmu = to_hisi_pmu(event->pmu);
+ struct hisi_sllc_pmu_regs *regs = sllc_pmu->dev_info->private;
u32 min = hisi_get_tgtid_min(event);
u32 max = hisi_get_tgtid_max(event);
if (tgtid_is_valid(max, min)) {
- u32 val = (max << SLLC_TGTID_MAX_SHIFT) | (min << SLLC_TGTID_MIN_SHIFT);
+ u32 val = (max << regs->tgtid_max_shift) |
+ (min << regs->tgtid_min_shift);
- writel(val, sllc_pmu->base + SLLC_TGTID_CTRL);
+ writel(val, sllc_pmu->base + regs->tgtid_ctrl);
/* Enable the tgtid */
- val = readl(sllc_pmu->base + SLLC_PERF_CTRL);
+ val = readl(sllc_pmu->base + regs->perf_ctrl);
val |= SLLC_TGTID_EN | SLLC_FILT_EN;
- writel(val, sllc_pmu->base + SLLC_PERF_CTRL);
+ writel(val, sllc_pmu->base + regs->perf_ctrl);
}
}
static void hisi_sllc_pmu_clear_tgtid(struct perf_event *event)
{
struct hisi_pmu *sllc_pmu = to_hisi_pmu(event->pmu);
+ struct hisi_sllc_pmu_regs *regs = sllc_pmu->dev_info->private;
u32 min = hisi_get_tgtid_min(event);
u32 max = hisi_get_tgtid_max(event);
if (tgtid_is_valid(max, min)) {
u32 val;
- writel(SLLC_TGTID_NONE, sllc_pmu->base + SLLC_TGTID_CTRL);
+ writel(SLLC_TGTID_NONE, sllc_pmu->base + regs->tgtid_ctrl);
/* Disable the tgtid */
- val = readl(sllc_pmu->base + SLLC_PERF_CTRL);
+ val = readl(sllc_pmu->base + regs->perf_ctrl);
val &= ~(SLLC_TGTID_EN | SLLC_FILT_EN);
- writel(val, sllc_pmu->base + SLLC_PERF_CTRL);
+ writel(val, sllc_pmu->base + regs->perf_ctrl);
}
}
static void hisi_sllc_pmu_config_srcid(struct perf_event *event)
{
struct hisi_pmu *sllc_pmu = to_hisi_pmu(event->pmu);
+ struct hisi_sllc_pmu_regs *regs = sllc_pmu->dev_info->private;
u32 cmd = hisi_get_srcid_cmd(event);
if (cmd) {
u32 val, msk;
msk = hisi_get_srcid_msk(event);
- val = (cmd << SLLC_SRCID_CMD_SHIFT) | (msk << SLLC_SRCID_MSK_SHIFT);
- writel(val, sllc_pmu->base + SLLC_SRCID_CTRL);
+ val = (cmd << regs->srcid_cmd_shift) |
+ (msk << regs->srcid_mask_shift);
+ writel(val, sllc_pmu->base + regs->srcid_ctrl);
/* Enable the srcid */
- val = readl(sllc_pmu->base + SLLC_PERF_CTRL);
+ val = readl(sllc_pmu->base + regs->perf_ctrl);
val |= SLLC_SRCID_EN | SLLC_FILT_EN;
- writel(val, sllc_pmu->base + SLLC_PERF_CTRL);
+ writel(val, sllc_pmu->base + regs->perf_ctrl);
}
}
static void hisi_sllc_pmu_clear_srcid(struct perf_event *event)
{
struct hisi_pmu *sllc_pmu = to_hisi_pmu(event->pmu);
+ struct hisi_sllc_pmu_regs *regs = sllc_pmu->dev_info->private;
u32 cmd = hisi_get_srcid_cmd(event);
if (cmd) {
u32 val;
- writel(SLLC_SRCID_NONE, sllc_pmu->base + SLLC_SRCID_CTRL);
+ writel(SLLC_SRCID_NONE, sllc_pmu->base + regs->srcid_ctrl);
/* Disable the srcid */
- val = readl(sllc_pmu->base + SLLC_PERF_CTRL);
+ val = readl(sllc_pmu->base + regs->perf_ctrl);
val &= ~(SLLC_SRCID_EN | SLLC_FILT_EN);
- writel(val, sllc_pmu->base + SLLC_PERF_CTRL);
+ writel(val, sllc_pmu->base + regs->perf_ctrl);
}
}
@@ -167,29 +211,27 @@ static void hisi_sllc_pmu_clear_filter(struct perf_event *event)
}
}
-static u32 hisi_sllc_pmu_get_counter_offset(int idx)
-{
- return (SLLC_EVENT_CNT0_L + idx * 8);
-}
-
static u64 hisi_sllc_pmu_read_counter(struct hisi_pmu *sllc_pmu,
struct hw_perf_event *hwc)
{
- return readq(sllc_pmu->base +
- hisi_sllc_pmu_get_counter_offset(hwc->idx));
+ struct hisi_sllc_pmu_regs *regs = sllc_pmu->dev_info->private;
+
+ return readq(sllc_pmu->base + SLLC_EVENT_CNTn(regs->event_cnt0, hwc->idx));
}
static void hisi_sllc_pmu_write_counter(struct hisi_pmu *sllc_pmu,
struct hw_perf_event *hwc, u64 val)
{
- writeq(val, sllc_pmu->base +
- hisi_sllc_pmu_get_counter_offset(hwc->idx));
+ struct hisi_sllc_pmu_regs *regs = sllc_pmu->dev_info->private;
+
+ writeq(val, sllc_pmu->base + SLLC_EVENT_CNTn(regs->event_cnt0, hwc->idx));
}
static void hisi_sllc_pmu_write_evtype(struct hisi_pmu *sllc_pmu, int idx,
u32 type)
{
- u32 reg, reg_idx, shift, val;
+ struct hisi_sllc_pmu_regs *regs = sllc_pmu->dev_info->private;
+ u32 reg, val;
/*
* Select the appropriate event select register(SLLC_EVENT_TYPE0/1).
@@ -198,114 +240,117 @@ static void hisi_sllc_pmu_write_evtype(struct hisi_pmu *sllc_pmu, int idx,
* SLLC_EVENT_TYPE0 is chosen. For the latter 4 hardware counters,
* SLLC_EVENT_TYPE1 is chosen.
*/
- reg = SLLC_EVENT_TYPE0 + (idx / 4) * 4;
- reg_idx = idx % 4;
- shift = 8 * reg_idx;
+ reg = regs->event_type0 + (idx / 4) * 4;
/* Write event code to SLLC_EVENT_TYPEx Register */
val = readl(sllc_pmu->base + reg);
- val &= ~(SLLC_EVTYPE_MASK << shift);
- val |= (type << shift);
+ val &= ~(SLLC_EVTYPE_MASK << HISI_PMU_EVTYPE_SHIFT(idx));
+ val |= (type << HISI_PMU_EVTYPE_SHIFT(idx));
writel(val, sllc_pmu->base + reg);
}
static void hisi_sllc_pmu_start_counters(struct hisi_pmu *sllc_pmu)
{
+ struct hisi_sllc_pmu_regs *regs = sllc_pmu->dev_info->private;
u32 val;
- val = readl(sllc_pmu->base + SLLC_PERF_CTRL);
+ val = readl(sllc_pmu->base + regs->perf_ctrl);
val |= SLLC_PERF_CTRL_EN;
- writel(val, sllc_pmu->base + SLLC_PERF_CTRL);
+ writel(val, sllc_pmu->base + regs->perf_ctrl);
}
static void hisi_sllc_pmu_stop_counters(struct hisi_pmu *sllc_pmu)
{
+ struct hisi_sllc_pmu_regs *regs = sllc_pmu->dev_info->private;
u32 val;
- val = readl(sllc_pmu->base + SLLC_PERF_CTRL);
+ val = readl(sllc_pmu->base + regs->perf_ctrl);
val &= ~(SLLC_PERF_CTRL_EN);
- writel(val, sllc_pmu->base + SLLC_PERF_CTRL);
+ writel(val, sllc_pmu->base + regs->perf_ctrl);
}
static void hisi_sllc_pmu_enable_counter(struct hisi_pmu *sllc_pmu,
struct hw_perf_event *hwc)
{
+ struct hisi_sllc_pmu_regs *regs = sllc_pmu->dev_info->private;
u32 val;
- val = readl(sllc_pmu->base + SLLC_EVENT_CTRL);
- val |= 1 << hwc->idx;
- writel(val, sllc_pmu->base + SLLC_EVENT_CTRL);
+ val = readl(sllc_pmu->base + regs->event_ctrl);
+ val |= BIT_ULL(hwc->idx);
+ writel(val, sllc_pmu->base + regs->event_ctrl);
}
static void hisi_sllc_pmu_disable_counter(struct hisi_pmu *sllc_pmu,
struct hw_perf_event *hwc)
{
+ struct hisi_sllc_pmu_regs *regs = sllc_pmu->dev_info->private;
u32 val;
- val = readl(sllc_pmu->base + SLLC_EVENT_CTRL);
- val &= ~(1 << hwc->idx);
- writel(val, sllc_pmu->base + SLLC_EVENT_CTRL);
+ val = readl(sllc_pmu->base + regs->event_ctrl);
+ val &= ~BIT_ULL(hwc->idx);
+ writel(val, sllc_pmu->base + regs->event_ctrl);
}
static void hisi_sllc_pmu_enable_counter_int(struct hisi_pmu *sllc_pmu,
struct hw_perf_event *hwc)
{
+ struct hisi_sllc_pmu_regs *regs = sllc_pmu->dev_info->private;
u32 val;
- val = readl(sllc_pmu->base + SLLC_INT_MASK);
- /* Write 0 to enable interrupt */
- val &= ~(1 << hwc->idx);
- writel(val, sllc_pmu->base + SLLC_INT_MASK);
+ val = readl(sllc_pmu->base + regs->int_mask);
+ val &= ~BIT_ULL(hwc->idx);
+ writel(val, sllc_pmu->base + regs->int_mask);
}
static void hisi_sllc_pmu_disable_counter_int(struct hisi_pmu *sllc_pmu,
struct hw_perf_event *hwc)
{
+ struct hisi_sllc_pmu_regs *regs = sllc_pmu->dev_info->private;
u32 val;
- val = readl(sllc_pmu->base + SLLC_INT_MASK);
- /* Write 1 to mask interrupt */
- val |= 1 << hwc->idx;
- writel(val, sllc_pmu->base + SLLC_INT_MASK);
+ val = readl(sllc_pmu->base + regs->int_mask);
+ val |= BIT_ULL(hwc->idx);
+ writel(val, sllc_pmu->base + regs->int_mask);
}
static u32 hisi_sllc_pmu_get_int_status(struct hisi_pmu *sllc_pmu)
{
- return readl(sllc_pmu->base + SLLC_INT_STATUS);
+ struct hisi_sllc_pmu_regs *regs = sllc_pmu->dev_info->private;
+
+ return readl(sllc_pmu->base + regs->int_status);
}
static void hisi_sllc_pmu_clear_int_status(struct hisi_pmu *sllc_pmu, int idx)
{
- writel(1 << idx, sllc_pmu->base + SLLC_INT_CLEAR);
-}
+ struct hisi_sllc_pmu_regs *regs = sllc_pmu->dev_info->private;
-static const struct acpi_device_id hisi_sllc_pmu_acpi_match[] = {
- { "HISI0263", },
- {}
-};
-MODULE_DEVICE_TABLE(acpi, hisi_sllc_pmu_acpi_match);
+ writel(BIT_ULL(idx), sllc_pmu->base + regs->int_clear);
+}
static int hisi_sllc_pmu_init_data(struct platform_device *pdev,
struct hisi_pmu *sllc_pmu)
{
+ struct hisi_sllc_pmu_regs *regs;
+
+ hisi_uncore_pmu_init_topology(sllc_pmu, &pdev->dev);
+
/*
* Use the SCCL_ID and the index ID to identify the SLLC PMU,
* while SCCL_ID is from MPIDR_EL1 by CPU.
*/
- if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
- &sllc_pmu->sccl_id)) {
+ if (sllc_pmu->topo.sccl_id < 0) {
dev_err(&pdev->dev, "Cannot read sccl-id!\n");
return -EINVAL;
}
- if (device_property_read_u32(&pdev->dev, "hisilicon,idx-id",
- &sllc_pmu->index_id)) {
+ if (sllc_pmu->topo.index_id < 0) {
dev_err(&pdev->dev, "Cannot read idx-id!\n");
return -EINVAL;
}
- /* SLLC PMUs only share the same SCCL */
- sllc_pmu->ccl_id = -1;
+ sllc_pmu->dev_info = device_get_match_data(&pdev->dev);
+ if (!sllc_pmu->dev_info)
+ return -ENODEV;
sllc_pmu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sllc_pmu->base)) {
@@ -313,7 +358,8 @@ static int hisi_sllc_pmu_init_data(struct platform_device *pdev,
return PTR_ERR(sllc_pmu->base);
}
- sllc_pmu->identifier = readl(sllc_pmu->base + SLLC_VERSION);
+ regs = sllc_pmu->dev_info->private;
+ sllc_pmu->identifier = readl(sllc_pmu->base + regs->version);
return 0;
}
@@ -347,35 +393,54 @@ static const struct attribute_group hisi_sllc_pmu_v2_events_group = {
.attrs = hisi_sllc_pmu_v2_events_attr,
};
-static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
-
-static struct attribute *hisi_sllc_pmu_cpumask_attrs[] = {
- &dev_attr_cpumask.attr,
+static const struct attribute_group *hisi_sllc_pmu_v2_attr_groups[] = {
+ &hisi_sllc_pmu_v2_format_group,
+ &hisi_sllc_pmu_v2_events_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
NULL
};
-static const struct attribute_group hisi_sllc_pmu_cpumask_attr_group = {
- .attrs = hisi_sllc_pmu_cpumask_attrs,
+static struct hisi_sllc_pmu_regs hisi_sllc_v2_pmu_regs = {
+ .int_mask = SLLC_INT_MASK,
+ .int_clear = SLLC_INT_CLEAR,
+ .int_status = SLLC_INT_STATUS,
+ .perf_ctrl = SLLC_PERF_CTRL,
+ .srcid_ctrl = SLLC_SRCID_CTRL,
+ .srcid_cmd_shift = SLLC_SRCID_CMD_SHIFT,
+ .srcid_mask_shift = SLLC_SRCID_MSK_SHIFT,
+ .tgtid_ctrl = SLLC_TGTID_CTRL,
+ .tgtid_min_shift = SLLC_TGTID_MIN_SHIFT,
+ .tgtid_max_shift = SLLC_TGTID_MAX_SHIFT,
+ .event_ctrl = SLLC_EVENT_CTRL,
+ .event_type0 = SLLC_EVENT_TYPE0,
+ .version = SLLC_VERSION,
+ .event_cnt0 = SLLC_EVENT_CNT0_L,
};
-static struct device_attribute hisi_sllc_pmu_identifier_attr =
- __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
-
-static struct attribute *hisi_sllc_pmu_identifier_attrs[] = {
- &hisi_sllc_pmu_identifier_attr.attr,
- NULL
+static const struct hisi_pmu_dev_info hisi_sllc_v2 = {
+ .private = &hisi_sllc_v2_pmu_regs,
};
-static const struct attribute_group hisi_sllc_pmu_identifier_group = {
- .attrs = hisi_sllc_pmu_identifier_attrs,
+static struct hisi_sllc_pmu_regs hisi_sllc_v3_pmu_regs = {
+ .int_mask = SLLC_V3_INT_MASK,
+ .int_clear = SLLC_V3_INT_CLEAR,
+ .int_status = SLLC_V3_INT_STATUS,
+ .perf_ctrl = SLLC_V3_PERF_CTRL,
+ .srcid_ctrl = SLLC_V3_SRCID_CTRL,
+ .srcid_cmd_shift = SLLC_V3_SRCID_CMD_SHIFT,
+ .srcid_mask_shift = SLLC_V3_SRCID_MSK_SHIFT,
+ .tgtid_ctrl = SLLC_V3_TGTID_CTRL,
+ .tgtid_min_shift = SLLC_V3_TGTID_MIN_SHIFT,
+ .tgtid_max_shift = SLLC_V3_TGTID_MAX_SHIFT,
+ .event_ctrl = SLLC_V3_EVENT_CTRL,
+ .event_type0 = SLLC_V3_EVENT_TYPE0,
+ .version = SLLC_V3_VERSION,
+ .event_cnt0 = SLLC_V3_EVENT_CNT0_L,
};
-static const struct attribute_group *hisi_sllc_pmu_v2_attr_groups[] = {
- &hisi_sllc_pmu_v2_format_group,
- &hisi_sllc_pmu_v2_events_group,
- &hisi_sllc_pmu_cpumask_attr_group,
- &hisi_sllc_pmu_identifier_group,
- NULL
+static const struct hisi_pmu_dev_info hisi_sllc_v3 = {
+ .private = &hisi_sllc_v3_pmu_regs,
};
static const struct hisi_uncore_ops hisi_uncore_sllc_ops = {
@@ -433,8 +498,8 @@ static int hisi_sllc_pmu_probe(struct platform_device *pdev)
if (ret)
return ret;
- name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_sllc%u",
- sllc_pmu->sccl_id, sllc_pmu->index_id);
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%d_sllc%d",
+ sllc_pmu->topo.sccl_id, sllc_pmu->topo.index_id);
if (!name)
return -ENOMEM;
@@ -450,8 +515,8 @@ static int hisi_sllc_pmu_probe(struct platform_device *pdev)
ret = perf_pmu_register(&sllc_pmu->pmu, name, -1);
if (ret) {
dev_err(sllc_pmu->dev, "PMU register failed, ret = %d\n", ret);
- cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE,
- &sllc_pmu->node);
+ cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE,
+ &sllc_pmu->node);
return ret;
}
@@ -460,16 +525,22 @@ static int hisi_sllc_pmu_probe(struct platform_device *pdev)
return ret;
}
-static int hisi_sllc_pmu_remove(struct platform_device *pdev)
+static void hisi_sllc_pmu_remove(struct platform_device *pdev)
{
struct hisi_pmu *sllc_pmu = platform_get_drvdata(pdev);
perf_pmu_unregister(&sllc_pmu->pmu);
cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE,
&sllc_pmu->node);
- return 0;
}
+static const struct acpi_device_id hisi_sllc_pmu_acpi_match[] = {
+ { "HISI0263", (kernel_ulong_t)&hisi_sllc_v2 },
+ { "HISI0264", (kernel_ulong_t)&hisi_sllc_v3 },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, hisi_sllc_pmu_acpi_match);
+
static struct platform_driver hisi_sllc_pmu_driver = {
.driver = {
.name = "hisi_sllc_pmu",
@@ -508,6 +579,7 @@ static void __exit hisi_sllc_pmu_module_exit(void)
}
module_exit(hisi_sllc_pmu_module_exit);
+MODULE_IMPORT_NS("HISI_PMU");
MODULE_DESCRIPTION("HiSilicon SLLC uncore PMU driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>");
diff --git a/drivers/perf/hisilicon/hisi_uncore_uc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_uc_pmu.c
index 63da05e5831c..03cb9b564b99 100644
--- a/drivers/perf/hisilicon/hisi_uncore_uc_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_uc_pmu.c
@@ -11,7 +11,6 @@
#include <linux/irq.h>
#include <linux/list.h>
#include <linux/mod_devicetable.h>
-#include <linux/property.h>
#include "hisi_uncore_pmu.h"
@@ -287,12 +286,52 @@ static u64 hisi_uc_pmu_read_counter(struct hisi_pmu *uc_pmu,
return readq(uc_pmu->base + HISI_UC_CNTR_REGn(hwc->idx));
}
-static void hisi_uc_pmu_write_counter(struct hisi_pmu *uc_pmu,
+static bool hisi_uc_pmu_get_glb_en_state(struct hisi_pmu *uc_pmu)
+{
+ u32 val;
+
+ val = readl(uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
+ return !!FIELD_GET(HISI_UC_EVENT_GLB_EN, val);
+}
+
+static void hisi_uc_pmu_write_counter_normal(struct hisi_pmu *uc_pmu,
struct hw_perf_event *hwc, u64 val)
{
writeq(val, uc_pmu->base + HISI_UC_CNTR_REGn(hwc->idx));
}
+static void hisi_uc_pmu_write_counter_quirk_v2(struct hisi_pmu *uc_pmu,
+ struct hw_perf_event *hwc, u64 val)
+{
+ hisi_uc_pmu_start_counters(uc_pmu);
+ hisi_uc_pmu_write_counter_normal(uc_pmu, hwc, val);
+ hisi_uc_pmu_stop_counters(uc_pmu);
+}
+
+static void hisi_uc_pmu_write_counter(struct hisi_pmu *uc_pmu,
+ struct hw_perf_event *hwc, u64 val)
+{
+ bool enable = hisi_uc_pmu_get_glb_en_state(uc_pmu);
+ bool erratum = uc_pmu->identifier == HISI_PMU_V2;
+
+ /*
+ * HiSilicon UC PMU v2 suffers the erratum 162700402 that the
+ * PMU counter cannot be set due to the lack of clock under power
+ * saving mode. This will lead to error or inaccurate counts.
+ * The clock can be enabled by the PMU global enabling control.
+ * The irq handler and pmu_start() will call the function to set
+ * period. If the function under irq context, the PMU has been
+ * enabled therefore we set counter directly. Other situations
+ * the PMU is disabled, we need to enable it to turn on the
+ * counter clock to set period, and then restore PMU enable
+ * status, the counter can hold its value without a clock.
+ */
+ if (enable || !erratum)
+ hisi_uc_pmu_write_counter_normal(uc_pmu, hwc, val);
+ else
+ hisi_uc_pmu_write_counter_quirk_v2(uc_pmu, hwc, val);
+}
+
static void hisi_uc_pmu_enable_counter_int(struct hisi_pmu *uc_pmu,
struct hw_perf_event *hwc)
{
@@ -326,25 +365,24 @@ static void hisi_uc_pmu_clear_int_status(struct hisi_pmu *uc_pmu, int idx)
static int hisi_uc_pmu_init_data(struct platform_device *pdev,
struct hisi_pmu *uc_pmu)
{
+ hisi_uncore_pmu_init_topology(uc_pmu, &pdev->dev);
+
/*
* Use SCCL (Super CPU Cluster) ID and CCL (CPU Cluster) ID to
* identify the topology information of UC PMU devices in the chip.
* They have some CCLs per SCCL and then 4 UC PMU per CCL.
*/
- if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
- &uc_pmu->sccl_id)) {
+ if (uc_pmu->topo.sccl_id < 0) {
dev_err(&pdev->dev, "Can not read uc sccl-id!\n");
return -EINVAL;
}
- if (device_property_read_u32(&pdev->dev, "hisilicon,ccl-id",
- &uc_pmu->ccl_id)) {
+ if (uc_pmu->topo.ccl_id < 0) {
dev_err(&pdev->dev, "Can not read uc ccl-id!\n");
return -EINVAL;
}
- if (device_property_read_u32(&pdev->dev, "hisilicon,sub-id",
- &uc_pmu->sub_id)) {
+ if (uc_pmu->topo.sub_id < 0) {
dev_err(&pdev->dev, "Can not read sub-id!\n");
return -EINVAL;
}
@@ -383,8 +421,8 @@ static struct attribute *hisi_uc_pmu_events_attr[] = {
HISI_PMU_EVENT_ATTR(cpu_rd, 0x10),
HISI_PMU_EVENT_ATTR(cpu_rd64, 0x17),
HISI_PMU_EVENT_ATTR(cpu_rs64, 0x19),
- HISI_PMU_EVENT_ATTR(cpu_mru, 0x1a),
- HISI_PMU_EVENT_ATTR(cycles, 0x9c),
+ HISI_PMU_EVENT_ATTR(cpu_mru, 0x1c),
+ HISI_PMU_EVENT_ATTR(cycles, 0x95),
HISI_PMU_EVENT_ATTR(spipe_hit, 0xb3),
HISI_PMU_EVENT_ATTR(hpipe_hit, 0xdb),
HISI_PMU_EVENT_ATTR(cring_rxdat_cnt, 0xfa),
@@ -399,34 +437,11 @@ static const struct attribute_group hisi_uc_pmu_events_group = {
.attrs = hisi_uc_pmu_events_attr,
};
-static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
-
-static struct attribute *hisi_uc_pmu_cpumask_attrs[] = {
- &dev_attr_cpumask.attr,
- NULL,
-};
-
-static const struct attribute_group hisi_uc_pmu_cpumask_attr_group = {
- .attrs = hisi_uc_pmu_cpumask_attrs,
-};
-
-static struct device_attribute hisi_uc_pmu_identifier_attr =
- __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
-
-static struct attribute *hisi_uc_pmu_identifier_attrs[] = {
- &hisi_uc_pmu_identifier_attr.attr,
- NULL
-};
-
-static const struct attribute_group hisi_uc_pmu_identifier_group = {
- .attrs = hisi_uc_pmu_identifier_attrs,
-};
-
static const struct attribute_group *hisi_uc_pmu_attr_groups[] = {
&hisi_uc_pmu_format_group,
&hisi_uc_pmu_events_group,
- &hisi_uc_pmu_cpumask_attr_group,
- &hisi_uc_pmu_identifier_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
NULL
};
@@ -498,8 +513,9 @@ static int hisi_uc_pmu_probe(struct platform_device *pdev)
if (ret)
return ret;
- name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%d_uc%d_%u",
- uc_pmu->sccl_id, uc_pmu->ccl_id, uc_pmu->sub_id);
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%d_uc%d_%d",
+ uc_pmu->topo.sccl_id, uc_pmu->topo.ccl_id,
+ uc_pmu->topo.sub_id);
if (!name)
return -ENOMEM;
@@ -573,6 +589,7 @@ static void __exit hisi_uc_pmu_module_exit(void)
}
module_exit(hisi_uc_pmu_module_exit);
+MODULE_IMPORT_NS("HISI_PMU");
MODULE_DESCRIPTION("HiSilicon SoC UC uncore PMU driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Junhao He <hejunhao3@huawei.com>");
diff --git a/drivers/perf/hisilicon/hns3_pmu.c b/drivers/perf/hisilicon/hns3_pmu.c
index e0457d84af6b..c157f3572cae 100644
--- a/drivers/perf/hisilicon/hns3_pmu.c
+++ b/drivers/perf/hisilicon/hns3_pmu.c
@@ -363,16 +363,6 @@ HNS3_PMU_FILTER_ATTR(global, config1, 52, 52);
HNS3_PMU_EVT_PPS_##_name##_TIME, \
HNS3_PMU_FILTER_INTR_##_name})
-static ssize_t hns3_pmu_format_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct dev_ext_attribute *eattr;
-
- eattr = container_of(attr, struct dev_ext_attribute, attr);
-
- return sysfs_emit(buf, "%s\n", (char *)eattr->var);
-}
-
static ssize_t hns3_pmu_event_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -421,7 +411,7 @@ static ssize_t hns3_pmu_filter_mode_show(struct device *dev,
})[0].attr.attr)
#define HNS3_PMU_FORMAT_ATTR(_name, _format) \
- HNS3_PMU_ATTR(_name, hns3_pmu_format_show, (void *)_format)
+ HNS3_PMU_ATTR(_name, device_show_string, _format)
#define HNS3_PMU_EVENT_ATTR(_name, _event) \
HNS3_PMU_ATTR(_name, hns3_pmu_event_show, (void *)_event)
#define HNS3_PMU_FLT_MODE_ATTR(_name, _event) \
@@ -1085,15 +1075,27 @@ static bool hns3_pmu_validate_event_group(struct perf_event *event)
return false;
for (num = 0; num < counters; num++) {
+ /*
+ * If we find a related event, then it's a valid group
+ * since we don't need to allocate a new counter for it.
+ */
if (hns3_pmu_cmp_event(event_group[num], sibling))
break;
}
+ /*
+ * Otherwise it's a new event but if there's no available counter,
+ * fail the check since we cannot schedule all the events in
+ * the group simultaneously.
+ */
+ if (num == HNS3_PMU_MAX_HW_EVENTS)
+ return false;
+
if (num == counters)
event_group[counters++] = sibling;
}
- return counters <= HNS3_PMU_MAX_HW_EVENTS;
+ return true;
}
static u32 hns3_pmu_get_filter_condition(struct perf_event *event)
@@ -1419,6 +1421,7 @@ static int hns3_pmu_alloc_pmu(struct pci_dev *pdev, struct hns3_pmu *hns3_pmu)
hns3_pmu->pmu = (struct pmu) {
.name = name,
.module = THIS_MODULE,
+ .parent = &pdev->dev,
.event_init = hns3_pmu_event_init,
.pmu_enable = hns3_pmu_enable,
.pmu_disable = hns3_pmu_disable,
@@ -1515,7 +1518,7 @@ static int hns3_pmu_irq_register(struct pci_dev *pdev,
return ret;
}
- ret = devm_add_action(&pdev->dev, hns3_pmu_free_irq, pdev);
+ ret = devm_add_action_or_reset(&pdev->dev, hns3_pmu_free_irq, pdev);
if (ret) {
pci_err(pdev, "failed to add free irq action, ret = %d.\n", ret);
return ret;
@@ -1556,8 +1559,8 @@ static int hns3_pmu_init_pmu(struct pci_dev *pdev, struct hns3_pmu *hns3_pmu)
ret = perf_pmu_register(&hns3_pmu->pmu, hns3_pmu->pmu.name, -1);
if (ret) {
pci_err(pdev, "failed to register perf PMU, ret = %d.\n", ret);
- cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
- &hns3_pmu->node);
+ cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
+ &hns3_pmu->node);
}
return ret;
@@ -1568,8 +1571,8 @@ static void hns3_pmu_uninit_pmu(struct pci_dev *pdev)
struct hns3_pmu *hns3_pmu = pci_get_drvdata(pdev);
perf_pmu_unregister(&hns3_pmu->pmu);
- cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
- &hns3_pmu->node);
+ cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE,
+ &hns3_pmu->node);
}
static int hns3_pmu_init_dev(struct pci_dev *pdev)
diff --git a/drivers/perf/marvell_cn10k_ddr_pmu.c b/drivers/perf/marvell_cn10k_ddr_pmu.c
index 524ba82bfce2..72ac17efd846 100644
--- a/drivers/perf/marvell_cn10k_ddr_pmu.c
+++ b/drivers/perf/marvell_cn10k_ddr_pmu.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
-/* Marvell CN10K DRAM Subsystem (DSS) Performance Monitor Driver
+/*
+ * Marvell CN10K DRAM Subsystem (DSS) Performance Monitor Driver
*
- * Copyright (C) 2021 Marvell.
+ * Copyright (C) 2021-2024 Marvell.
*/
#include <linux/init.h>
@@ -14,24 +15,29 @@
#include <linux/platform_device.h>
/* Performance Counters Operating Mode Control Registers */
-#define DDRC_PERF_CNT_OP_MODE_CTRL 0x8020
-#define OP_MODE_CTRL_VAL_MANNUAL 0x1
+#define CN10K_DDRC_PERF_CNT_OP_MODE_CTRL 0x8020
+#define ODY_DDRC_PERF_CNT_OP_MODE_CTRL 0x20020
+#define OP_MODE_CTRL_VAL_MANUAL 0x1
/* Performance Counters Start Operation Control Registers */
-#define DDRC_PERF_CNT_START_OP_CTRL 0x8028
+#define CN10K_DDRC_PERF_CNT_START_OP_CTRL 0x8028
+#define ODY_DDRC_PERF_CNT_START_OP_CTRL 0x200A0
#define START_OP_CTRL_VAL_START 0x1ULL
#define START_OP_CTRL_VAL_ACTIVE 0x2
/* Performance Counters End Operation Control Registers */
-#define DDRC_PERF_CNT_END_OP_CTRL 0x8030
+#define CN10K_DDRC_PERF_CNT_END_OP_CTRL 0x8030
+#define ODY_DDRC_PERF_CNT_END_OP_CTRL 0x200E0
#define END_OP_CTRL_VAL_END 0x1ULL
/* Performance Counters End Status Registers */
-#define DDRC_PERF_CNT_END_STATUS 0x8038
+#define CN10K_DDRC_PERF_CNT_END_STATUS 0x8038
+#define ODY_DDRC_PERF_CNT_END_STATUS 0x20120
#define END_STATUS_VAL_END_TIMER_MODE_END 0x1
/* Performance Counters Configuration Registers */
-#define DDRC_PERF_CFG_BASE 0x8040
+#define CN10K_DDRC_PERF_CFG_BASE 0x8040
+#define ODY_DDRC_PERF_CFG_BASE 0x20160
/* 8 Generic event counter + 2 fixed event counters */
#define DDRC_PERF_NUM_GEN_COUNTERS 8
@@ -42,18 +48,28 @@
DDRC_PERF_NUM_FIX_COUNTERS)
/* Generic event counter registers */
-#define DDRC_PERF_CFG(n) (DDRC_PERF_CFG_BASE + 8 * (n))
+#define DDRC_PERF_CFG(base, n) ((base) + 8 * (n))
#define EVENT_ENABLE BIT_ULL(63)
/* Two dedicated event counters for DDR reads and writes */
#define EVENT_DDR_READS 101
#define EVENT_DDR_WRITES 100
+#define DDRC_PERF_REG(base, n) ((base) + 8 * (n))
/*
* programmable events IDs in programmable event counters.
* DO NOT change these event-id numbers, they are used to
* program event bitmap in h/w.
*/
+#define EVENT_DFI_CMD_IS_RETRY 61
+#define EVENT_RD_UC_ECC_ERROR 60
+#define EVENT_RD_CRC_ERROR 59
+#define EVENT_CAPAR_ERROR 58
+#define EVENT_WR_CRC_ERROR 57
+#define EVENT_DFI_PARITY_POISON 56
+#define EVENT_RETRY_FIFO_FULL 46
+#define EVENT_DFI_CYCLES 45
+
#define EVENT_OP_IS_ZQLATCH 55
#define EVENT_OP_IS_ZQSTART 54
#define EVENT_OP_IS_TCR_MRR 53
@@ -102,28 +118,37 @@
#define EVENT_HIF_RD_OR_WR 1
/* Event counter value registers */
-#define DDRC_PERF_CNT_VALUE_BASE 0x8080
-#define DDRC_PERF_CNT_VALUE(n) (DDRC_PERF_CNT_VALUE_BASE + 8 * (n))
+#define CN10K_DDRC_PERF_CNT_VALUE_BASE 0x8080
+#define ODY_DDRC_PERF_CNT_VALUE_BASE 0x201C0
/* Fixed event counter enable/disable register */
-#define DDRC_PERF_CNT_FREERUN_EN 0x80C0
+#define CN10K_DDRC_PERF_CNT_FREERUN_EN 0x80C0
#define DDRC_PERF_FREERUN_WRITE_EN 0x1
#define DDRC_PERF_FREERUN_READ_EN 0x2
/* Fixed event counter control register */
-#define DDRC_PERF_CNT_FREERUN_CTRL 0x80C8
+#define CN10K_DDRC_PERF_CNT_FREERUN_CTRL 0x80C8
+#define ODY_DDRC_PERF_CNT_FREERUN_CTRL 0x20240
#define DDRC_FREERUN_WRITE_CNT_CLR 0x1
#define DDRC_FREERUN_READ_CNT_CLR 0x2
-/* Fixed event counter value register */
-#define DDRC_PERF_CNT_VALUE_WR_OP 0x80D0
-#define DDRC_PERF_CNT_VALUE_RD_OP 0x80D8
+/* Fixed event counter clear register, defined only for Odyssey */
+#define ODY_DDRC_PERF_CNT_FREERUN_CLR 0x20248
+
#define DDRC_PERF_CNT_VALUE_OVERFLOW BIT_ULL(48)
#define DDRC_PERF_CNT_MAX_VALUE GENMASK_ULL(48, 0)
+/* Fixed event counter value register */
+#define CN10K_DDRC_PERF_CNT_VALUE_WR_OP 0x80D0
+#define CN10K_DDRC_PERF_CNT_VALUE_RD_OP 0x80D8
+#define ODY_DDRC_PERF_CNT_VALUE_WR_OP 0x20250
+#define ODY_DDRC_PERF_CNT_VALUE_RD_OP 0x20258
+
struct cn10k_ddr_pmu {
struct pmu pmu;
void __iomem *base;
+ const struct ddr_pmu_platform_data *p_data;
+ const struct ddr_pmu_ops *ops;
unsigned int cpu;
struct device *dev;
int active_events;
@@ -132,8 +157,36 @@ struct cn10k_ddr_pmu {
struct hlist_node node;
};
+struct ddr_pmu_ops {
+ void (*enable_read_freerun_counter)(struct cn10k_ddr_pmu *pmu,
+ bool enable);
+ void (*enable_write_freerun_counter)(struct cn10k_ddr_pmu *pmu,
+ bool enable);
+ void (*clear_read_freerun_counter)(struct cn10k_ddr_pmu *pmu);
+ void (*clear_write_freerun_counter)(struct cn10k_ddr_pmu *pmu);
+ void (*pmu_overflow_handler)(struct cn10k_ddr_pmu *pmu, int evt_idx);
+};
+
#define to_cn10k_ddr_pmu(p) container_of(p, struct cn10k_ddr_pmu, pmu)
+struct ddr_pmu_platform_data {
+ u64 counter_overflow_val;
+ u64 counter_max_val;
+ u64 cnt_base;
+ u64 cfg_base;
+ u64 cnt_op_mode_ctrl;
+ u64 cnt_start_op_ctrl;
+ u64 cnt_end_op_ctrl;
+ u64 cnt_end_status;
+ u64 cnt_freerun_en;
+ u64 cnt_freerun_ctrl;
+ u64 cnt_freerun_clr;
+ u64 cnt_value_wr_op;
+ u64 cnt_value_rd_op;
+ bool is_cn10k;
+ bool is_ody;
+};
+
static ssize_t cn10k_ddr_pmu_event_show(struct device *dev,
struct device_attribute *attr,
char *page)
@@ -209,6 +262,85 @@ static struct attribute *cn10k_ddr_perf_events_attrs[] = {
NULL
};
+static struct attribute *odyssey_ddr_perf_events_attrs[] = {
+ /* Programmable */
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_rd_or_wr_access, EVENT_HIF_RD_OR_WR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_wr_access, EVENT_HIF_WR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_rd_access, EVENT_HIF_RD),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_rmw_access, EVENT_HIF_RMW),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_hif_pri_rdaccess, EVENT_HIF_HI_PRI_RD),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_rd_bypass_access, EVENT_READ_BYPASS),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_act_bypass_access, EVENT_ACT_BYPASS),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_dfi_wr_data_access,
+ EVENT_DFI_WR_DATA_CYCLES),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_dfi_rd_data_access,
+ EVENT_DFI_RD_DATA_CYCLES),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_hpri_sched_rd_crit_access,
+ EVENT_HPR_XACT_WHEN_CRITICAL),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_lpri_sched_rd_crit_access,
+ EVENT_LPR_XACT_WHEN_CRITICAL),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_wr_trxn_crit_access,
+ EVENT_WR_XACT_WHEN_CRITICAL),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_active_access, EVENT_OP_IS_ACTIVATE),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_rd_or_wr_access,
+ EVENT_OP_IS_RD_OR_WR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_rd_active_access,
+ EVENT_OP_IS_RD_ACTIVATE),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_read, EVENT_OP_IS_RD),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_write, EVENT_OP_IS_WR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_mwr, EVENT_OP_IS_MWR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_precharge, EVENT_OP_IS_PRECHARGE),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_precharge_for_rdwr,
+ EVENT_PRECHARGE_FOR_RDWR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_precharge_for_other,
+ EVENT_PRECHARGE_FOR_OTHER),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_rdwr_transitions, EVENT_RDWR_TRANSITIONS),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_write_combine, EVENT_WRITE_COMBINE),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_war_hazard, EVENT_WAR_HAZARD),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_raw_hazard, EVENT_RAW_HAZARD),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_waw_hazard, EVENT_WAW_HAZARD),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_enter_selfref, EVENT_OP_IS_ENTER_SELFREF),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_enter_powerdown,
+ EVENT_OP_IS_ENTER_POWERDOWN),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_enter_mpsm, EVENT_OP_IS_ENTER_MPSM),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_refresh, EVENT_OP_IS_REFRESH),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_crit_ref, EVENT_OP_IS_CRIT_REF),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_spec_ref, EVENT_OP_IS_SPEC_REF),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_load_mode, EVENT_OP_IS_LOAD_MODE),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_zqcl, EVENT_OP_IS_ZQCL),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_cam_wr_access, EVENT_OP_IS_ZQCS),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_dfi_cycles, EVENT_DFI_CYCLES),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_retry_fifo_full,
+ EVENT_RETRY_FIFO_FULL),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_bsm_alloc, EVENT_BSM_ALLOC),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_bsm_starvation, EVENT_BSM_STARVATION),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_win_limit_reached_rd,
+ EVENT_VISIBLE_WIN_LIMIT_REACHED_RD),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_win_limit_reached_wr,
+ EVENT_VISIBLE_WIN_LIMIT_REACHED_WR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_dqsosc_mpc, EVENT_OP_IS_DQSOSC_MPC),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_dqsosc_mrr, EVENT_OP_IS_DQSOSC_MRR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_tcr_mrr, EVENT_OP_IS_TCR_MRR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_zqstart, EVENT_OP_IS_ZQSTART),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_zqlatch, EVENT_OP_IS_ZQLATCH),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_dfi_parity_poison,
+ EVENT_DFI_PARITY_POISON),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_wr_crc_error, EVENT_WR_CRC_ERROR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_capar_error, EVENT_CAPAR_ERROR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_rd_crc_error, EVENT_RD_CRC_ERROR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_rd_uc_ecc_error, EVENT_RD_UC_ECC_ERROR),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_dfi_cmd_is_retry, EVENT_DFI_CMD_IS_RETRY),
+ /* Free run event counters */
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_ddr_reads, EVENT_DDR_READS),
+ CN10K_DDR_PMU_EVENT_ATTR(ddr_ddr_writes, EVENT_DDR_WRITES),
+ NULL
+};
+
+static struct attribute_group odyssey_ddr_perf_events_attr_group = {
+ .name = "events",
+ .attrs = odyssey_ddr_perf_events_attrs,
+};
+
static struct attribute_group cn10k_ddr_perf_events_attr_group = {
.name = "events",
.attrs = cn10k_ddr_perf_events_attrs,
@@ -254,6 +386,13 @@ static const struct attribute_group *cn10k_attr_groups[] = {
NULL,
};
+static const struct attribute_group *odyssey_attr_groups[] = {
+ &odyssey_ddr_perf_events_attr_group,
+ &cn10k_ddr_perf_format_attr_group,
+ &cn10k_ddr_perf_cpumask_attr_group,
+ NULL
+};
+
/* Default poll timeout is 100 sec, which is very sufficient for
* 48 bit counter incremented max at 5.6 GT/s, which may take many
* hours to overflow.
@@ -266,9 +405,18 @@ static ktime_t cn10k_ddr_pmu_timer_period(void)
return ms_to_ktime((u64)cn10k_ddr_pmu_poll_period_sec * USEC_PER_SEC);
}
-static int ddr_perf_get_event_bitmap(int eventid, u64 *event_bitmap)
+static int ddr_perf_get_event_bitmap(int eventid, u64 *event_bitmap,
+ struct cn10k_ddr_pmu *ddr_pmu)
{
+ int err = 0;
+
switch (eventid) {
+ case EVENT_DFI_PARITY_POISON ...EVENT_DFI_CMD_IS_RETRY:
+ if (!ddr_pmu->p_data->is_ody) {
+ err = -EINVAL;
+ break;
+ }
+ fallthrough;
case EVENT_HIF_RD_OR_WR ... EVENT_WAW_HAZARD:
case EVENT_OP_IS_REFRESH ... EVENT_OP_IS_ZQLATCH:
*event_bitmap = (1ULL << (eventid - 1));
@@ -279,11 +427,12 @@ static int ddr_perf_get_event_bitmap(int eventid, u64 *event_bitmap)
*event_bitmap = (0xFULL << (eventid - 1));
break;
default:
- pr_err("%s Invalid eventid %d\n", __func__, eventid);
- return -EINVAL;
+ err = -EINVAL;
}
- return 0;
+ if (err)
+ pr_err("%s Invalid eventid %d\n", __func__, eventid);
+ return err;
}
static int cn10k_ddr_perf_alloc_counter(struct cn10k_ddr_pmu *pmu,
@@ -351,9 +500,33 @@ static int cn10k_ddr_perf_event_init(struct perf_event *event)
return 0;
}
+static void cn10k_ddr_perf_counter_start(struct cn10k_ddr_pmu *ddr_pmu,
+ int counter)
+{
+ const struct ddr_pmu_platform_data *p_data = ddr_pmu->p_data;
+ u64 ctrl_reg = p_data->cnt_start_op_ctrl;
+
+ writeq_relaxed(START_OP_CTRL_VAL_START, ddr_pmu->base +
+ DDRC_PERF_REG(ctrl_reg, counter));
+}
+
+static void cn10k_ddr_perf_counter_stop(struct cn10k_ddr_pmu *ddr_pmu,
+ int counter)
+{
+ const struct ddr_pmu_platform_data *p_data = ddr_pmu->p_data;
+ u64 ctrl_reg = p_data->cnt_end_op_ctrl;
+
+ writeq_relaxed(END_OP_CTRL_VAL_END, ddr_pmu->base +
+ DDRC_PERF_REG(ctrl_reg, counter));
+}
+
static void cn10k_ddr_perf_counter_enable(struct cn10k_ddr_pmu *pmu,
int counter, bool enable)
{
+ const struct ddr_pmu_platform_data *p_data = pmu->p_data;
+ u64 ctrl_reg = pmu->p_data->cnt_op_mode_ctrl;
+ const struct ddr_pmu_ops *ops = pmu->ops;
+ bool is_ody = pmu->p_data->is_ody;
u32 reg;
u64 val;
@@ -363,7 +536,7 @@ static void cn10k_ddr_perf_counter_enable(struct cn10k_ddr_pmu *pmu,
}
if (counter < DDRC_PERF_NUM_GEN_COUNTERS) {
- reg = DDRC_PERF_CFG(counter);
+ reg = DDRC_PERF_CFG(p_data->cfg_base, counter);
val = readq_relaxed(pmu->base + reg);
if (enable)
@@ -372,40 +545,52 @@ static void cn10k_ddr_perf_counter_enable(struct cn10k_ddr_pmu *pmu,
val &= ~EVENT_ENABLE;
writeq_relaxed(val, pmu->base + reg);
- } else {
- val = readq_relaxed(pmu->base + DDRC_PERF_CNT_FREERUN_EN);
- if (enable) {
- if (counter == DDRC_PERF_READ_COUNTER_IDX)
- val |= DDRC_PERF_FREERUN_READ_EN;
- else
- val |= DDRC_PERF_FREERUN_WRITE_EN;
- } else {
- if (counter == DDRC_PERF_READ_COUNTER_IDX)
- val &= ~DDRC_PERF_FREERUN_READ_EN;
- else
- val &= ~DDRC_PERF_FREERUN_WRITE_EN;
+
+ if (is_ody) {
+ if (enable) {
+ /*
+ * Setup the PMU counter to work in
+ * manual mode
+ */
+ reg = DDRC_PERF_REG(ctrl_reg, counter);
+ writeq_relaxed(OP_MODE_CTRL_VAL_MANUAL,
+ pmu->base + reg);
+
+ cn10k_ddr_perf_counter_start(pmu, counter);
+ } else {
+ cn10k_ddr_perf_counter_stop(pmu, counter);
+ }
}
- writeq_relaxed(val, pmu->base + DDRC_PERF_CNT_FREERUN_EN);
+ } else {
+ if (counter == DDRC_PERF_READ_COUNTER_IDX)
+ ops->enable_read_freerun_counter(pmu, enable);
+ else
+ ops->enable_write_freerun_counter(pmu, enable);
}
}
static u64 cn10k_ddr_perf_read_counter(struct cn10k_ddr_pmu *pmu, int counter)
{
+ const struct ddr_pmu_platform_data *p_data = pmu->p_data;
u64 val;
if (counter == DDRC_PERF_READ_COUNTER_IDX)
- return readq_relaxed(pmu->base + DDRC_PERF_CNT_VALUE_RD_OP);
+ return readq_relaxed(pmu->base +
+ p_data->cnt_value_rd_op);
if (counter == DDRC_PERF_WRITE_COUNTER_IDX)
- return readq_relaxed(pmu->base + DDRC_PERF_CNT_VALUE_WR_OP);
+ return readq_relaxed(pmu->base +
+ p_data->cnt_value_wr_op);
- val = readq_relaxed(pmu->base + DDRC_PERF_CNT_VALUE(counter));
+ val = readq_relaxed(pmu->base +
+ DDRC_PERF_REG(p_data->cnt_base, counter));
return val;
}
static void cn10k_ddr_perf_event_update(struct perf_event *event)
{
struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu);
+ const struct ddr_pmu_platform_data *p_data = pmu->p_data;
struct hw_perf_event *hwc = &event->hw;
u64 prev_count, new_count, mask;
@@ -414,7 +599,7 @@ static void cn10k_ddr_perf_event_update(struct perf_event *event)
new_count = cn10k_ddr_perf_read_counter(pmu, hwc->idx);
} while (local64_xchg(&hwc->prev_count, new_count) != prev_count);
- mask = DDRC_PERF_CNT_MAX_VALUE;
+ mask = p_data->counter_max_val;
local64_add((new_count - prev_count) & mask, &event->count);
}
@@ -435,6 +620,8 @@ static void cn10k_ddr_perf_event_start(struct perf_event *event, int flags)
static int cn10k_ddr_perf_event_add(struct perf_event *event, int flags)
{
struct cn10k_ddr_pmu *pmu = to_cn10k_ddr_pmu(event->pmu);
+ const struct ddr_pmu_platform_data *p_data = pmu->p_data;
+ const struct ddr_pmu_ops *ops = pmu->ops;
struct hw_perf_event *hwc = &event->hw;
u8 config = event->attr.config;
int counter, ret;
@@ -454,8 +641,8 @@ static int cn10k_ddr_perf_event_add(struct perf_event *event, int flags)
if (counter < DDRC_PERF_NUM_GEN_COUNTERS) {
/* Generic counters, configure event id */
- reg_offset = DDRC_PERF_CFG(counter);
- ret = ddr_perf_get_event_bitmap(config, &val);
+ reg_offset = DDRC_PERF_CFG(p_data->cfg_base, counter);
+ ret = ddr_perf_get_event_bitmap(config, &val, pmu);
if (ret)
return ret;
@@ -463,11 +650,9 @@ static int cn10k_ddr_perf_event_add(struct perf_event *event, int flags)
} else {
/* fixed event counter, clear counter value */
if (counter == DDRC_PERF_READ_COUNTER_IDX)
- val = DDRC_FREERUN_READ_CNT_CLR;
+ ops->clear_read_freerun_counter(pmu);
else
- val = DDRC_FREERUN_WRITE_CNT_CLR;
-
- writeq_relaxed(val, pmu->base + DDRC_PERF_CNT_FREERUN_CTRL);
+ ops->clear_write_freerun_counter(pmu);
}
hwc->state |= PERF_HES_STOPPED;
@@ -512,17 +697,19 @@ static void cn10k_ddr_perf_event_del(struct perf_event *event, int flags)
static void cn10k_ddr_perf_pmu_enable(struct pmu *pmu)
{
struct cn10k_ddr_pmu *ddr_pmu = to_cn10k_ddr_pmu(pmu);
+ const struct ddr_pmu_platform_data *p_data = ddr_pmu->p_data;
writeq_relaxed(START_OP_CTRL_VAL_START, ddr_pmu->base +
- DDRC_PERF_CNT_START_OP_CTRL);
+ p_data->cnt_start_op_ctrl);
}
static void cn10k_ddr_perf_pmu_disable(struct pmu *pmu)
{
struct cn10k_ddr_pmu *ddr_pmu = to_cn10k_ddr_pmu(pmu);
+ const struct ddr_pmu_platform_data *p_data = ddr_pmu->p_data;
writeq_relaxed(END_OP_CTRL_VAL_END, ddr_pmu->base +
- DDRC_PERF_CNT_END_OP_CTRL);
+ p_data->cnt_end_op_ctrl);
}
static void cn10k_ddr_perf_event_update_all(struct cn10k_ddr_pmu *pmu)
@@ -547,8 +734,123 @@ static void cn10k_ddr_perf_event_update_all(struct cn10k_ddr_pmu *pmu)
}
}
+static void ddr_pmu_enable_read_freerun(struct cn10k_ddr_pmu *pmu, bool enable)
+{
+ const struct ddr_pmu_platform_data *p_data = pmu->p_data;
+ u64 val;
+
+ val = readq_relaxed(pmu->base + p_data->cnt_freerun_en);
+ if (enable)
+ val |= DDRC_PERF_FREERUN_READ_EN;
+ else
+ val &= ~DDRC_PERF_FREERUN_READ_EN;
+
+ writeq_relaxed(val, pmu->base + p_data->cnt_freerun_en);
+}
+
+static void ddr_pmu_enable_write_freerun(struct cn10k_ddr_pmu *pmu, bool enable)
+{
+ const struct ddr_pmu_platform_data *p_data = pmu->p_data;
+ u64 val;
+
+ val = readq_relaxed(pmu->base + p_data->cnt_freerun_en);
+ if (enable)
+ val |= DDRC_PERF_FREERUN_WRITE_EN;
+ else
+ val &= ~DDRC_PERF_FREERUN_WRITE_EN;
+
+ writeq_relaxed(val, pmu->base + p_data->cnt_freerun_en);
+}
+
+static void ddr_pmu_read_clear_freerun(struct cn10k_ddr_pmu *pmu)
+{
+ const struct ddr_pmu_platform_data *p_data = pmu->p_data;
+ u64 val;
+
+ val = DDRC_FREERUN_READ_CNT_CLR;
+ writeq_relaxed(val, pmu->base + p_data->cnt_freerun_ctrl);
+}
+
+static void ddr_pmu_write_clear_freerun(struct cn10k_ddr_pmu *pmu)
+{
+ const struct ddr_pmu_platform_data *p_data = pmu->p_data;
+ u64 val;
+
+ val = DDRC_FREERUN_WRITE_CNT_CLR;
+ writeq_relaxed(val, pmu->base + p_data->cnt_freerun_ctrl);
+}
+
+static void ddr_pmu_overflow_hander(struct cn10k_ddr_pmu *pmu, int evt_idx)
+{
+ cn10k_ddr_perf_event_update_all(pmu);
+ cn10k_ddr_perf_pmu_disable(&pmu->pmu);
+ cn10k_ddr_perf_pmu_enable(&pmu->pmu);
+}
+
+static void ddr_pmu_ody_enable_read_freerun(struct cn10k_ddr_pmu *pmu,
+ bool enable)
+{
+ const struct ddr_pmu_platform_data *p_data = pmu->p_data;
+ u64 val;
+
+ val = readq_relaxed(pmu->base + p_data->cnt_freerun_ctrl);
+ if (enable)
+ val |= DDRC_PERF_FREERUN_READ_EN;
+ else
+ val &= ~DDRC_PERF_FREERUN_READ_EN;
+
+ writeq_relaxed(val, pmu->base + p_data->cnt_freerun_ctrl);
+}
+
+static void ddr_pmu_ody_enable_write_freerun(struct cn10k_ddr_pmu *pmu,
+ bool enable)
+{
+ const struct ddr_pmu_platform_data *p_data = pmu->p_data;
+ u64 val;
+
+ val = readq_relaxed(pmu->base + p_data->cnt_freerun_ctrl);
+ if (enable)
+ val |= DDRC_PERF_FREERUN_WRITE_EN;
+ else
+ val &= ~DDRC_PERF_FREERUN_WRITE_EN;
+
+ writeq_relaxed(val, pmu->base + p_data->cnt_freerun_ctrl);
+}
+
+static void ddr_pmu_ody_read_clear_freerun(struct cn10k_ddr_pmu *pmu)
+{
+ const struct ddr_pmu_platform_data *p_data = pmu->p_data;
+ u64 val;
+
+ val = DDRC_FREERUN_READ_CNT_CLR;
+ writeq_relaxed(val, pmu->base + p_data->cnt_freerun_clr);
+}
+
+static void ddr_pmu_ody_write_clear_freerun(struct cn10k_ddr_pmu *pmu)
+{
+ const struct ddr_pmu_platform_data *p_data = pmu->p_data;
+ u64 val;
+
+ val = DDRC_FREERUN_WRITE_CNT_CLR;
+ writeq_relaxed(val, pmu->base + p_data->cnt_freerun_clr);
+}
+
+static void ddr_pmu_ody_overflow_hander(struct cn10k_ddr_pmu *pmu, int evt_idx)
+{
+ /*
+ * On reaching the maximum value of the counter, the counter freezes
+ * there. The particular event is updated and the respective counter
+ * is stopped and started again so that it starts counting from zero
+ */
+ cn10k_ddr_perf_event_update(pmu->events[evt_idx]);
+ cn10k_ddr_perf_counter_stop(pmu, evt_idx);
+ cn10k_ddr_perf_counter_start(pmu, evt_idx);
+}
+
static irqreturn_t cn10k_ddr_pmu_overflow_handler(struct cn10k_ddr_pmu *pmu)
{
+ const struct ddr_pmu_platform_data *p_data = pmu->p_data;
+ const struct ddr_pmu_ops *ops = pmu->ops;
struct perf_event *event;
struct hw_perf_event *hwc;
u64 prev_count, new_count;
@@ -586,11 +888,9 @@ static irqreturn_t cn10k_ddr_pmu_overflow_handler(struct cn10k_ddr_pmu *pmu)
continue;
value = cn10k_ddr_perf_read_counter(pmu, i);
- if (value == DDRC_PERF_CNT_MAX_VALUE) {
+ if (value == p_data->counter_max_val) {
pr_info("Counter-(%d) reached max value\n", i);
- cn10k_ddr_perf_event_update_all(pmu);
- cn10k_ddr_perf_pmu_disable(&pmu->pmu);
- cn10k_ddr_perf_pmu_enable(&pmu->pmu);
+ ops->pmu_overflow_handler(pmu, i);
}
}
@@ -629,11 +929,68 @@ static int cn10k_ddr_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
return 0;
}
+static const struct ddr_pmu_ops ddr_pmu_ops = {
+ .enable_read_freerun_counter = ddr_pmu_enable_read_freerun,
+ .enable_write_freerun_counter = ddr_pmu_enable_write_freerun,
+ .clear_read_freerun_counter = ddr_pmu_read_clear_freerun,
+ .clear_write_freerun_counter = ddr_pmu_write_clear_freerun,
+ .pmu_overflow_handler = ddr_pmu_overflow_hander,
+};
+
+#if defined(CONFIG_ACPI) || defined(CONFIG_OF)
+static const struct ddr_pmu_platform_data cn10k_ddr_pmu_pdata = {
+ .counter_overflow_val = BIT_ULL(48),
+ .counter_max_val = GENMASK_ULL(48, 0),
+ .cnt_base = CN10K_DDRC_PERF_CNT_VALUE_BASE,
+ .cfg_base = CN10K_DDRC_PERF_CFG_BASE,
+ .cnt_op_mode_ctrl = CN10K_DDRC_PERF_CNT_OP_MODE_CTRL,
+ .cnt_start_op_ctrl = CN10K_DDRC_PERF_CNT_START_OP_CTRL,
+ .cnt_end_op_ctrl = CN10K_DDRC_PERF_CNT_END_OP_CTRL,
+ .cnt_end_status = CN10K_DDRC_PERF_CNT_END_STATUS,
+ .cnt_freerun_en = CN10K_DDRC_PERF_CNT_FREERUN_EN,
+ .cnt_freerun_ctrl = CN10K_DDRC_PERF_CNT_FREERUN_CTRL,
+ .cnt_freerun_clr = 0,
+ .cnt_value_wr_op = CN10K_DDRC_PERF_CNT_VALUE_WR_OP,
+ .cnt_value_rd_op = CN10K_DDRC_PERF_CNT_VALUE_RD_OP,
+ .is_cn10k = TRUE,
+};
+#endif
+
+static const struct ddr_pmu_ops ddr_pmu_ody_ops = {
+ .enable_read_freerun_counter = ddr_pmu_ody_enable_read_freerun,
+ .enable_write_freerun_counter = ddr_pmu_ody_enable_write_freerun,
+ .clear_read_freerun_counter = ddr_pmu_ody_read_clear_freerun,
+ .clear_write_freerun_counter = ddr_pmu_ody_write_clear_freerun,
+ .pmu_overflow_handler = ddr_pmu_ody_overflow_hander,
+};
+
+#ifdef CONFIG_ACPI
+static const struct ddr_pmu_platform_data odyssey_ddr_pmu_pdata = {
+ .counter_overflow_val = 0,
+ .counter_max_val = GENMASK_ULL(63, 0),
+ .cnt_base = ODY_DDRC_PERF_CNT_VALUE_BASE,
+ .cfg_base = ODY_DDRC_PERF_CFG_BASE,
+ .cnt_op_mode_ctrl = ODY_DDRC_PERF_CNT_OP_MODE_CTRL,
+ .cnt_start_op_ctrl = ODY_DDRC_PERF_CNT_START_OP_CTRL,
+ .cnt_end_op_ctrl = ODY_DDRC_PERF_CNT_END_OP_CTRL,
+ .cnt_end_status = ODY_DDRC_PERF_CNT_END_STATUS,
+ .cnt_freerun_en = 0,
+ .cnt_freerun_ctrl = ODY_DDRC_PERF_CNT_FREERUN_CTRL,
+ .cnt_freerun_clr = ODY_DDRC_PERF_CNT_FREERUN_CLR,
+ .cnt_value_wr_op = ODY_DDRC_PERF_CNT_VALUE_WR_OP,
+ .cnt_value_rd_op = ODY_DDRC_PERF_CNT_VALUE_RD_OP,
+ .is_ody = TRUE,
+};
+#endif
+
static int cn10k_ddr_perf_probe(struct platform_device *pdev)
{
+ const struct ddr_pmu_platform_data *dev_data;
struct cn10k_ddr_pmu *ddr_pmu;
struct resource *res;
void __iomem *base;
+ bool is_cn10k;
+ bool is_ody;
char *name;
int ret;
@@ -644,30 +1001,60 @@ static int cn10k_ddr_perf_probe(struct platform_device *pdev)
ddr_pmu->dev = &pdev->dev;
platform_set_drvdata(pdev, ddr_pmu);
+ dev_data = device_get_match_data(&pdev->dev);
+ if (!dev_data) {
+ dev_err(&pdev->dev, "Error: No device match data found\n");
+ return -ENODEV;
+ }
+
base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(base))
return PTR_ERR(base);
ddr_pmu->base = base;
- /* Setup the PMU counter to work in manual mode */
- writeq_relaxed(OP_MODE_CTRL_VAL_MANNUAL, ddr_pmu->base +
- DDRC_PERF_CNT_OP_MODE_CTRL);
-
- ddr_pmu->pmu = (struct pmu) {
- .module = THIS_MODULE,
- .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
- .task_ctx_nr = perf_invalid_context,
- .attr_groups = cn10k_attr_groups,
- .event_init = cn10k_ddr_perf_event_init,
- .add = cn10k_ddr_perf_event_add,
- .del = cn10k_ddr_perf_event_del,
- .start = cn10k_ddr_perf_event_start,
- .stop = cn10k_ddr_perf_event_stop,
- .read = cn10k_ddr_perf_event_update,
- .pmu_enable = cn10k_ddr_perf_pmu_enable,
- .pmu_disable = cn10k_ddr_perf_pmu_disable,
- };
+ ddr_pmu->p_data = dev_data;
+ is_cn10k = ddr_pmu->p_data->is_cn10k;
+ is_ody = ddr_pmu->p_data->is_ody;
+
+ if (is_cn10k) {
+ ddr_pmu->ops = &ddr_pmu_ops;
+ /* Setup the PMU counter to work in manual mode */
+ writeq_relaxed(OP_MODE_CTRL_VAL_MANUAL, ddr_pmu->base +
+ ddr_pmu->p_data->cnt_op_mode_ctrl);
+
+ ddr_pmu->pmu = (struct pmu) {
+ .module = THIS_MODULE,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
+ .task_ctx_nr = perf_invalid_context,
+ .attr_groups = cn10k_attr_groups,
+ .event_init = cn10k_ddr_perf_event_init,
+ .add = cn10k_ddr_perf_event_add,
+ .del = cn10k_ddr_perf_event_del,
+ .start = cn10k_ddr_perf_event_start,
+ .stop = cn10k_ddr_perf_event_stop,
+ .read = cn10k_ddr_perf_event_update,
+ .pmu_enable = cn10k_ddr_perf_pmu_enable,
+ .pmu_disable = cn10k_ddr_perf_pmu_disable,
+ };
+ }
+
+ if (is_ody) {
+ ddr_pmu->ops = &ddr_pmu_ody_ops;
+
+ ddr_pmu->pmu = (struct pmu) {
+ .module = THIS_MODULE,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
+ .task_ctx_nr = perf_invalid_context,
+ .attr_groups = odyssey_attr_groups,
+ .event_init = cn10k_ddr_perf_event_init,
+ .add = cn10k_ddr_perf_event_add,
+ .del = cn10k_ddr_perf_event_del,
+ .start = cn10k_ddr_perf_event_start,
+ .stop = cn10k_ddr_perf_event_stop,
+ .read = cn10k_ddr_perf_event_update,
+ };
+ }
/* Choose this cpu to collect perf data */
ddr_pmu->cpu = raw_smp_processor_id();
@@ -677,8 +1064,8 @@ static int cn10k_ddr_perf_probe(struct platform_device *pdev)
if (!name)
return -ENOMEM;
- hrtimer_init(&ddr_pmu->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- ddr_pmu->hrtimer.function = cn10k_ddr_pmu_timer_handler;
+ hrtimer_setup(&ddr_pmu->hrtimer, cn10k_ddr_pmu_timer_handler, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
cpuhp_state_add_instance_nocalls(
CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE,
@@ -688,7 +1075,7 @@ static int cn10k_ddr_perf_probe(struct platform_device *pdev)
if (ret)
goto error;
- pr_info("CN10K DDR PMU Driver for ddrc@%llx\n", res->start);
+ pr_info("DDR PMU Driver for ddrc@%llx\n", res->start);
return 0;
error:
cpuhp_state_remove_instance_nocalls(
@@ -697,7 +1084,7 @@ error:
return ret;
}
-static int cn10k_ddr_perf_remove(struct platform_device *pdev)
+static void cn10k_ddr_perf_remove(struct platform_device *pdev)
{
struct cn10k_ddr_pmu *ddr_pmu = platform_get_drvdata(pdev);
@@ -706,12 +1093,11 @@ static int cn10k_ddr_perf_remove(struct platform_device *pdev)
&ddr_pmu->node);
perf_pmu_unregister(&ddr_pmu->pmu);
- return 0;
}
#ifdef CONFIG_OF
static const struct of_device_id cn10k_ddr_pmu_of_match[] = {
- { .compatible = "marvell,cn10k-ddr-pmu", },
+ { .compatible = "marvell,cn10k-ddr-pmu", .data = &cn10k_ddr_pmu_pdata },
{ },
};
MODULE_DEVICE_TABLE(of, cn10k_ddr_pmu_of_match);
@@ -719,7 +1105,8 @@ MODULE_DEVICE_TABLE(of, cn10k_ddr_pmu_of_match);
#ifdef CONFIG_ACPI
static const struct acpi_device_id cn10k_ddr_pmu_acpi_match[] = {
- {"MRVL000A", 0},
+ {"MRVL000A", (kernel_ulong_t)&cn10k_ddr_pmu_pdata },
+ {"MRVL000C", (kernel_ulong_t)&odyssey_ddr_pmu_pdata},
{},
};
MODULE_DEVICE_TABLE(acpi, cn10k_ddr_pmu_acpi_match);
@@ -764,4 +1151,5 @@ module_init(cn10k_ddr_pmu_init);
module_exit(cn10k_ddr_pmu_exit);
MODULE_AUTHOR("Bharat Bhushan <bbhushan2@marvell.com>");
+MODULE_DESCRIPTION("Marvell CN10K DRAM Subsystem (DSS) Performance Monitor Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/perf/marvell_cn10k_tad_pmu.c b/drivers/perf/marvell_cn10k_tad_pmu.c
index fec8e82edb95..51ccb0befa05 100644
--- a/drivers/perf/marvell_cn10k_tad_pmu.c
+++ b/drivers/perf/marvell_cn10k_tad_pmu.c
@@ -37,6 +37,15 @@ struct tad_pmu {
DECLARE_BITMAP(counters_map, TAD_MAX_COUNTERS);
};
+enum mrvl_tad_pmu_version {
+ TAD_PMU_V1 = 1,
+ TAD_PMU_V2,
+};
+
+struct tad_pmu_data {
+ int id;
+};
+
static int tad_pmu_cpuhp_state;
static void tad_pmu_event_counter_read(struct perf_event *event)
@@ -214,6 +223,24 @@ static const struct attribute_group tad_pmu_events_attr_group = {
.attrs = tad_pmu_event_attrs,
};
+static struct attribute *ody_tad_pmu_event_attrs[] = {
+ TAD_PMU_EVENT_ATTR(tad_req_msh_in_exlmn, 0x3),
+ TAD_PMU_EVENT_ATTR(tad_alloc_dtg, 0x1a),
+ TAD_PMU_EVENT_ATTR(tad_alloc_ltg, 0x1b),
+ TAD_PMU_EVENT_ATTR(tad_alloc_any, 0x1c),
+ TAD_PMU_EVENT_ATTR(tad_hit_dtg, 0x1d),
+ TAD_PMU_EVENT_ATTR(tad_hit_ltg, 0x1e),
+ TAD_PMU_EVENT_ATTR(tad_hit_any, 0x1f),
+ TAD_PMU_EVENT_ATTR(tad_tag_rd, 0x20),
+ TAD_PMU_EVENT_ATTR(tad_tot_cycle, 0xFF),
+ NULL
+};
+
+static const struct attribute_group ody_tad_pmu_events_attr_group = {
+ .name = "events",
+ .attrs = ody_tad_pmu_event_attrs,
+};
+
PMU_FORMAT_ATTR(event, "config:0-7");
static struct attribute *tad_pmu_format_attrs[] = {
@@ -252,8 +279,16 @@ static const struct attribute_group *tad_pmu_attr_groups[] = {
NULL
};
+static const struct attribute_group *ody_tad_pmu_attr_groups[] = {
+ &ody_tad_pmu_events_attr_group,
+ &tad_pmu_format_attr_group,
+ &tad_pmu_cpumask_attr_group,
+ NULL
+};
+
static int tad_pmu_probe(struct platform_device *pdev)
{
+ const struct tad_pmu_data *dev_data;
struct device *dev = &pdev->dev;
struct tad_region *regions;
struct tad_pmu *tad_pmu;
@@ -261,6 +296,7 @@ static int tad_pmu_probe(struct platform_device *pdev)
u32 tad_pmu_page_size;
u32 tad_page_size;
u32 tad_cnt;
+ int version;
int i, ret;
char *name;
@@ -270,6 +306,13 @@ static int tad_pmu_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, tad_pmu);
+ dev_data = device_get_match_data(&pdev->dev);
+ if (!dev_data) {
+ dev_err(&pdev->dev, "Error: No device match data found\n");
+ return -ENODEV;
+ }
+ version = dev_data->id;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "Mem resource not found\n");
@@ -319,7 +362,6 @@ static int tad_pmu_probe(struct platform_device *pdev)
tad_pmu->pmu = (struct pmu) {
.module = THIS_MODULE,
- .attr_groups = tad_pmu_attr_groups,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE |
PERF_PMU_CAP_NO_INTERRUPT,
.task_ctx_nr = perf_invalid_context,
@@ -332,6 +374,11 @@ static int tad_pmu_probe(struct platform_device *pdev)
.read = tad_pmu_event_counter_read,
};
+ if (version == TAD_PMU_V1)
+ tad_pmu->pmu.attr_groups = tad_pmu_attr_groups;
+ else
+ tad_pmu->pmu.attr_groups = ody_tad_pmu_attr_groups;
+
tad_pmu->cpu = raw_smp_processor_id();
/* Register pmu instance for cpu hotplug */
@@ -351,27 +398,38 @@ static int tad_pmu_probe(struct platform_device *pdev)
return ret;
}
-static int tad_pmu_remove(struct platform_device *pdev)
+static void tad_pmu_remove(struct platform_device *pdev)
{
struct tad_pmu *pmu = platform_get_drvdata(pdev);
cpuhp_state_remove_instance_nocalls(tad_pmu_cpuhp_state,
&pmu->node);
perf_pmu_unregister(&pmu->pmu);
-
- return 0;
}
+#if defined(CONFIG_OF) || defined(CONFIG_ACPI)
+static const struct tad_pmu_data tad_pmu_data = {
+ .id = TAD_PMU_V1,
+};
+#endif
+
+#ifdef CONFIG_ACPI
+static const struct tad_pmu_data tad_pmu_v2_data = {
+ .id = TAD_PMU_V2,
+};
+#endif
+
#ifdef CONFIG_OF
static const struct of_device_id tad_pmu_of_match[] = {
- { .compatible = "marvell,cn10k-tad-pmu", },
+ { .compatible = "marvell,cn10k-tad-pmu", .data = &tad_pmu_data },
{},
};
#endif
#ifdef CONFIG_ACPI
static const struct acpi_device_id tad_pmu_acpi_match[] = {
- {"MRVL000B", 0},
+ {"MRVL000B", (kernel_ulong_t)&tad_pmu_data},
+ {"MRVL000D", (kernel_ulong_t)&tad_pmu_v2_data},
{},
};
MODULE_DEVICE_TABLE(acpi, tad_pmu_acpi_match);
diff --git a/drivers/perf/marvell_pem_pmu.c b/drivers/perf/marvell_pem_pmu.c
new file mode 100644
index 000000000000..29fbcd1848e4
--- /dev/null
+++ b/drivers/perf/marvell_pem_pmu.c
@@ -0,0 +1,425 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Marvell PEM(PCIe RC) Performance Monitor Driver
+ *
+ * Copyright (C) 2024 Marvell.
+ */
+
+#include <linux/acpi.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/perf_event.h>
+#include <linux/platform_device.h>
+
+/*
+ * Each of these events maps to a free running 64 bit counter
+ * with no event control, but can be reset.
+ */
+enum pem_events {
+ IB_TLP_NPR,
+ IB_TLP_PR,
+ IB_TLP_CPL,
+ IB_TLP_DWORDS_NPR,
+ IB_TLP_DWORDS_PR,
+ IB_TLP_DWORDS_CPL,
+ IB_INFLIGHT,
+ IB_READS,
+ IB_REQ_NO_RO_NCB,
+ IB_REQ_NO_RO_EBUS,
+ OB_TLP_NPR,
+ OB_TLP_PR,
+ OB_TLP_CPL,
+ OB_TLP_DWORDS_NPR,
+ OB_TLP_DWORDS_PR,
+ OB_TLP_DWORDS_CPL,
+ OB_INFLIGHT,
+ OB_READS,
+ OB_MERGES_NPR,
+ OB_MERGES_PR,
+ OB_MERGES_CPL,
+ ATS_TRANS,
+ ATS_TRANS_LATENCY,
+ ATS_PRI,
+ ATS_PRI_LATENCY,
+ ATS_INV,
+ ATS_INV_LATENCY,
+ PEM_EVENTIDS_MAX
+};
+
+static u64 eventid_to_offset_table[] = {
+ [IB_TLP_NPR] = 0x0,
+ [IB_TLP_PR] = 0x8,
+ [IB_TLP_CPL] = 0x10,
+ [IB_TLP_DWORDS_NPR] = 0x100,
+ [IB_TLP_DWORDS_PR] = 0x108,
+ [IB_TLP_DWORDS_CPL] = 0x110,
+ [IB_INFLIGHT] = 0x200,
+ [IB_READS] = 0x300,
+ [IB_REQ_NO_RO_NCB] = 0x400,
+ [IB_REQ_NO_RO_EBUS] = 0x408,
+ [OB_TLP_NPR] = 0x500,
+ [OB_TLP_PR] = 0x508,
+ [OB_TLP_CPL] = 0x510,
+ [OB_TLP_DWORDS_NPR] = 0x600,
+ [OB_TLP_DWORDS_PR] = 0x608,
+ [OB_TLP_DWORDS_CPL] = 0x610,
+ [OB_INFLIGHT] = 0x700,
+ [OB_READS] = 0x800,
+ [OB_MERGES_NPR] = 0x900,
+ [OB_MERGES_PR] = 0x908,
+ [OB_MERGES_CPL] = 0x910,
+ [ATS_TRANS] = 0x2D18,
+ [ATS_TRANS_LATENCY] = 0x2D20,
+ [ATS_PRI] = 0x2D28,
+ [ATS_PRI_LATENCY] = 0x2D30,
+ [ATS_INV] = 0x2D38,
+ [ATS_INV_LATENCY] = 0x2D40,
+};
+
+struct pem_pmu {
+ struct pmu pmu;
+ void __iomem *base;
+ unsigned int cpu;
+ struct device *dev;
+ struct hlist_node node;
+};
+
+#define to_pem_pmu(p) container_of(p, struct pem_pmu, pmu)
+
+static int eventid_to_offset(int eventid)
+{
+ return eventid_to_offset_table[eventid];
+}
+
+/* Events */
+static ssize_t pem_pmu_event_show(struct device *dev,
+ struct device_attribute *attr,
+ char *page)
+{
+ struct perf_pmu_events_attr *pmu_attr;
+
+ pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
+ return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
+}
+
+#define PEM_EVENT_ATTR(_name, _id) \
+ (&((struct perf_pmu_events_attr[]) { \
+ { .attr = __ATTR(_name, 0444, pem_pmu_event_show, NULL), \
+ .id = _id, } \
+ })[0].attr.attr)
+
+static struct attribute *pem_perf_events_attrs[] = {
+ PEM_EVENT_ATTR(ib_tlp_npr, IB_TLP_NPR),
+ PEM_EVENT_ATTR(ib_tlp_pr, IB_TLP_PR),
+ PEM_EVENT_ATTR(ib_tlp_cpl_partid, IB_TLP_CPL),
+ PEM_EVENT_ATTR(ib_tlp_dwords_npr, IB_TLP_DWORDS_NPR),
+ PEM_EVENT_ATTR(ib_tlp_dwords_pr, IB_TLP_DWORDS_PR),
+ PEM_EVENT_ATTR(ib_tlp_dwords_cpl_partid, IB_TLP_DWORDS_CPL),
+ PEM_EVENT_ATTR(ib_inflight, IB_INFLIGHT),
+ PEM_EVENT_ATTR(ib_reads, IB_READS),
+ PEM_EVENT_ATTR(ib_req_no_ro_ncb, IB_REQ_NO_RO_NCB),
+ PEM_EVENT_ATTR(ib_req_no_ro_ebus, IB_REQ_NO_RO_EBUS),
+ PEM_EVENT_ATTR(ob_tlp_npr_partid, OB_TLP_NPR),
+ PEM_EVENT_ATTR(ob_tlp_pr_partid, OB_TLP_PR),
+ PEM_EVENT_ATTR(ob_tlp_cpl_partid, OB_TLP_CPL),
+ PEM_EVENT_ATTR(ob_tlp_dwords_npr_partid, OB_TLP_DWORDS_NPR),
+ PEM_EVENT_ATTR(ob_tlp_dwords_pr_partid, OB_TLP_DWORDS_PR),
+ PEM_EVENT_ATTR(ob_tlp_dwords_cpl_partid, OB_TLP_DWORDS_CPL),
+ PEM_EVENT_ATTR(ob_inflight_partid, OB_INFLIGHT),
+ PEM_EVENT_ATTR(ob_reads_partid, OB_READS),
+ PEM_EVENT_ATTR(ob_merges_npr_partid, OB_MERGES_NPR),
+ PEM_EVENT_ATTR(ob_merges_pr_partid, OB_MERGES_PR),
+ PEM_EVENT_ATTR(ob_merges_cpl_partid, OB_MERGES_CPL),
+ PEM_EVENT_ATTR(ats_trans, ATS_TRANS),
+ PEM_EVENT_ATTR(ats_trans_latency, ATS_TRANS_LATENCY),
+ PEM_EVENT_ATTR(ats_pri, ATS_PRI),
+ PEM_EVENT_ATTR(ats_pri_latency, ATS_PRI_LATENCY),
+ PEM_EVENT_ATTR(ats_inv, ATS_INV),
+ PEM_EVENT_ATTR(ats_inv_latency, ATS_INV_LATENCY),
+ NULL
+};
+
+static struct attribute_group pem_perf_events_attr_group = {
+ .name = "events",
+ .attrs = pem_perf_events_attrs,
+};
+
+PMU_FORMAT_ATTR(event, "config:0-5");
+
+static struct attribute *pem_perf_format_attrs[] = {
+ &format_attr_event.attr,
+ NULL
+};
+
+static struct attribute_group pem_perf_format_attr_group = {
+ .name = "format",
+ .attrs = pem_perf_format_attrs,
+};
+
+/* cpumask */
+static ssize_t pem_perf_cpumask_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pem_pmu *pmu = dev_get_drvdata(dev);
+
+ return cpumap_print_to_pagebuf(true, buf, cpumask_of(pmu->cpu));
+}
+
+static struct device_attribute pem_perf_cpumask_attr =
+ __ATTR(cpumask, 0444, pem_perf_cpumask_show, NULL);
+
+static struct attribute *pem_perf_cpumask_attrs[] = {
+ &pem_perf_cpumask_attr.attr,
+ NULL
+};
+
+static struct attribute_group pem_perf_cpumask_attr_group = {
+ .attrs = pem_perf_cpumask_attrs,
+};
+
+static const struct attribute_group *pem_perf_attr_groups[] = {
+ &pem_perf_events_attr_group,
+ &pem_perf_cpumask_attr_group,
+ &pem_perf_format_attr_group,
+ NULL
+};
+
+static int pem_perf_event_init(struct perf_event *event)
+{
+ struct pem_pmu *pmu = to_pem_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ struct perf_event *sibling;
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ if (event->attr.config >= PEM_EVENTIDS_MAX)
+ return -EINVAL;
+
+ if (is_sampling_event(event) ||
+ event->attach_state & PERF_ATTACH_TASK) {
+ return -EOPNOTSUPP;
+ }
+
+ if (event->cpu < 0)
+ return -EOPNOTSUPP;
+
+ /* We must NOT create groups containing mixed PMUs */
+ if (event->group_leader->pmu != event->pmu &&
+ !is_software_event(event->group_leader))
+ return -EINVAL;
+
+ for_each_sibling_event(sibling, event->group_leader) {
+ if (sibling->pmu != event->pmu &&
+ !is_software_event(sibling))
+ return -EINVAL;
+ }
+ /*
+ * Set ownership of event to one CPU, same event can not be observed
+ * on multiple cpus at same time.
+ */
+ event->cpu = pmu->cpu;
+ hwc->idx = -1;
+ return 0;
+}
+
+static u64 pem_perf_read_counter(struct pem_pmu *pmu,
+ struct perf_event *event, int eventid)
+{
+ return readq_relaxed(pmu->base + eventid_to_offset(eventid));
+}
+
+static void pem_perf_event_update(struct perf_event *event)
+{
+ struct pem_pmu *pmu = to_pem_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ u64 prev_count, new_count;
+
+ do {
+ prev_count = local64_read(&hwc->prev_count);
+ new_count = pem_perf_read_counter(pmu, event, hwc->idx);
+ } while (local64_xchg(&hwc->prev_count, new_count) != prev_count);
+
+ local64_add((new_count - prev_count), &event->count);
+}
+
+static void pem_perf_event_start(struct perf_event *event, int flags)
+{
+ struct pem_pmu *pmu = to_pem_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int eventid = hwc->idx;
+
+ /*
+ * All counters are free-running and associated with
+ * a fixed event to track in Hardware
+ */
+ local64_set(&hwc->prev_count,
+ pem_perf_read_counter(pmu, event, eventid));
+
+ hwc->state = 0;
+}
+
+static int pem_perf_event_add(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ hwc->idx = event->attr.config;
+ if (WARN_ON_ONCE(hwc->idx >= PEM_EVENTIDS_MAX))
+ return -EINVAL;
+ hwc->state |= PERF_HES_STOPPED;
+
+ if (flags & PERF_EF_START)
+ pem_perf_event_start(event, flags);
+
+ return 0;
+}
+
+static void pem_perf_event_stop(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (flags & PERF_EF_UPDATE)
+ pem_perf_event_update(event);
+
+ hwc->state |= PERF_HES_STOPPED;
+}
+
+static void pem_perf_event_del(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ pem_perf_event_stop(event, PERF_EF_UPDATE);
+ hwc->idx = -1;
+}
+
+static int pem_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct pem_pmu *pmu = hlist_entry_safe(node, struct pem_pmu, node);
+ unsigned int target;
+
+ if (cpu != pmu->cpu)
+ return 0;
+
+ target = cpumask_any_but(cpu_online_mask, cpu);
+ if (target >= nr_cpu_ids)
+ return 0;
+
+ perf_pmu_migrate_context(&pmu->pmu, cpu, target);
+ pmu->cpu = target;
+ return 0;
+}
+
+static int pem_perf_probe(struct platform_device *pdev)
+{
+ struct pem_pmu *pem_pmu;
+ struct resource *res;
+ void __iomem *base;
+ char *name;
+ int ret;
+
+ pem_pmu = devm_kzalloc(&pdev->dev, sizeof(*pem_pmu), GFP_KERNEL);
+ if (!pem_pmu)
+ return -ENOMEM;
+
+ pem_pmu->dev = &pdev->dev;
+ platform_set_drvdata(pdev, pem_pmu);
+
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ pem_pmu->base = base;
+
+ pem_pmu->pmu = (struct pmu) {
+ .module = THIS_MODULE,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
+ .task_ctx_nr = perf_invalid_context,
+ .attr_groups = pem_perf_attr_groups,
+ .event_init = pem_perf_event_init,
+ .add = pem_perf_event_add,
+ .del = pem_perf_event_del,
+ .start = pem_perf_event_start,
+ .stop = pem_perf_event_stop,
+ .read = pem_perf_event_update,
+ };
+
+ /* Choose this cpu to collect perf data */
+ pem_pmu->cpu = raw_smp_processor_id();
+
+ name = devm_kasprintf(pem_pmu->dev, GFP_KERNEL, "mrvl_pcie_rc_pmu_%llx",
+ res->start);
+ if (!name)
+ return -ENOMEM;
+
+ cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_MRVL_PEM_ONLINE,
+ &pem_pmu->node);
+
+ ret = perf_pmu_register(&pem_pmu->pmu, name, -1);
+ if (ret)
+ goto error;
+
+ return 0;
+error:
+ cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_MRVL_PEM_ONLINE,
+ &pem_pmu->node);
+ return ret;
+}
+
+static void pem_perf_remove(struct platform_device *pdev)
+{
+ struct pem_pmu *pem_pmu = platform_get_drvdata(pdev);
+
+ cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_MRVL_PEM_ONLINE,
+ &pem_pmu->node);
+
+ perf_pmu_unregister(&pem_pmu->pmu);
+}
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id pem_pmu_acpi_match[] = {
+ {"MRVL000E", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, pem_pmu_acpi_match);
+#endif
+
+static struct platform_driver pem_pmu_driver = {
+ .driver = {
+ .name = "pem-pmu",
+ .acpi_match_table = ACPI_PTR(pem_pmu_acpi_match),
+ .suppress_bind_attrs = true,
+ },
+ .probe = pem_perf_probe,
+ .remove = pem_perf_remove,
+};
+
+static int __init pem_pmu_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_MRVL_PEM_ONLINE,
+ "perf/marvell/pem:online", NULL,
+ pem_pmu_offline_cpu);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&pem_pmu_driver);
+ if (ret)
+ cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_MRVL_PEM_ONLINE);
+ return ret;
+}
+
+static void __exit pem_pmu_exit(void)
+{
+ platform_driver_unregister(&pem_pmu_driver);
+ cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_MRVL_PEM_ONLINE);
+}
+
+module_init(pem_pmu_init);
+module_exit(pem_pmu_exit);
+
+MODULE_DESCRIPTION("Marvell PEM Perf driver");
+MODULE_AUTHOR("Gowthami Thiagarajan <gthiagarajan@marvell.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c
index 3f9a98c17a89..ea8c85729937 100644
--- a/drivers/perf/qcom_l2_pmu.c
+++ b/drivers/perf/qcom_l2_pmu.c
@@ -801,9 +801,8 @@ static int l2cache_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
static int l2cache_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
{
- struct cluster_pmu *cluster;
struct l2cache_pmu *l2cache_pmu;
- cpumask_t cluster_online_cpus;
+ struct cluster_pmu *cluster;
unsigned int target;
l2cache_pmu = hlist_entry_safe(node, struct l2cache_pmu, node);
@@ -820,9 +819,8 @@ static int l2cache_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
cluster->on_cpu = -1;
/* Any other CPU for this cluster which is still online */
- cpumask_and(&cluster_online_cpus, &cluster->cluster_cpus,
- cpu_online_mask);
- target = cpumask_any_but(&cluster_online_cpus, cpu);
+ target = cpumask_any_and_but(&cluster->cluster_cpus,
+ cpu_online_mask, cpu);
if (target >= nr_cpu_ids) {
disable_irq(cluster->irq);
return 0;
@@ -904,6 +902,7 @@ static int l2_cache_pmu_probe(struct platform_device *pdev)
l2cache_pmu->pmu = (struct pmu) {
/* suffix is instance id for future use with multiple sockets */
.name = "l2cache_0",
+ .parent = &pdev->dev,
.task_ctx_nr = perf_invalid_context,
.pmu_enable = l2_cache_pmu_enable,
.pmu_disable = l2_cache_pmu_disable,
@@ -965,7 +964,7 @@ out_unregister:
return err;
}
-static int l2_cache_pmu_remove(struct platform_device *pdev)
+static void l2_cache_pmu_remove(struct platform_device *pdev)
{
struct l2cache_pmu *l2cache_pmu =
to_l2cache_pmu(platform_get_drvdata(pdev));
@@ -973,7 +972,6 @@ static int l2_cache_pmu_remove(struct platform_device *pdev)
perf_pmu_unregister(&l2cache_pmu->pmu);
cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
&l2cache_pmu->node);
- return 0;
}
static struct platform_driver l2_cache_pmu_driver = {
diff --git a/drivers/perf/qcom_l3_pmu.c b/drivers/perf/qcom_l3_pmu.c
index 2887edb4eb0b..66e6cabd6fff 100644
--- a/drivers/perf/qcom_l3_pmu.c
+++ b/drivers/perf/qcom_l3_pmu.c
@@ -609,18 +609,9 @@ static void qcom_l3_cache__event_read(struct perf_event *event)
/* formats */
-static ssize_t l3cache_pmu_format_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct dev_ext_attribute *eattr;
-
- eattr = container_of(attr, struct dev_ext_attribute, attr);
- return sysfs_emit(buf, "%s\n", (char *) eattr->var);
-}
-
#define L3CACHE_PMU_FORMAT_ATTR(_name, _config) \
(&((struct dev_ext_attribute[]) { \
- { .attr = __ATTR(_name, 0444, l3cache_pmu_format_show, NULL), \
+ { .attr = __ATTR(_name, 0444, device_show_string, NULL), \
.var = (void *) _config, } \
})[0].attr.attr)
@@ -742,12 +733,13 @@ static int qcom_l3_cache_pmu_probe(struct platform_device *pdev)
l3pmu = devm_kzalloc(&pdev->dev, sizeof(*l3pmu), GFP_KERNEL);
name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "l3cache_%s_%s",
- acpi_dev_parent(acpi_dev)->pnp.unique_id,
- acpi_dev->pnp.unique_id);
+ acpi_device_uid(acpi_dev_parent(acpi_dev)),
+ acpi_device_uid(acpi_dev));
if (!l3pmu || !name)
return -ENOMEM;
l3pmu->pmu = (struct pmu) {
+ .parent = &pdev->dev,
.task_ctx_nr = perf_invalid_context,
.pmu_enable = qcom_l3_cache__pmu_enable,
diff --git a/drivers/perf/riscv_pmu.c b/drivers/perf/riscv_pmu.c
index 56897d4d4fd3..7644147d50b4 100644
--- a/drivers/perf/riscv_pmu.c
+++ b/drivers/perf/riscv_pmu.c
@@ -14,9 +14,80 @@
#include <linux/perf/riscv_pmu.h>
#include <linux/printk.h>
#include <linux/smp.h>
+#include <linux/sched_clock.h>
#include <asm/sbi.h>
+static bool riscv_perf_user_access(struct perf_event *event)
+{
+ return ((event->attr.type == PERF_TYPE_HARDWARE) ||
+ (event->attr.type == PERF_TYPE_HW_CACHE) ||
+ (event->attr.type == PERF_TYPE_RAW)) &&
+ !!(event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT) &&
+ (event->hw.idx != -1);
+}
+
+void arch_perf_update_userpage(struct perf_event *event,
+ struct perf_event_mmap_page *userpg, u64 now)
+{
+ struct clock_read_data *rd;
+ unsigned int seq;
+ u64 ns;
+
+ userpg->cap_user_time = 0;
+ userpg->cap_user_time_zero = 0;
+ userpg->cap_user_time_short = 0;
+ userpg->cap_user_rdpmc = riscv_perf_user_access(event);
+
+ /*
+ * The counters are 64-bit but the priv spec doesn't mandate all the
+ * bits to be implemented: that's why, counter width can vary based on
+ * the cpu vendor.
+ */
+ if (userpg->cap_user_rdpmc)
+ userpg->pmc_width = to_riscv_pmu(event->pmu)->ctr_get_width(event->hw.idx) + 1;
+
+ do {
+ rd = sched_clock_read_begin(&seq);
+
+ userpg->time_mult = rd->mult;
+ userpg->time_shift = rd->shift;
+ userpg->time_zero = rd->epoch_ns;
+ userpg->time_cycles = rd->epoch_cyc;
+ userpg->time_mask = rd->sched_clock_mask;
+
+ /*
+ * Subtract the cycle base, such that software that
+ * doesn't know about cap_user_time_short still 'works'
+ * assuming no wraps.
+ */
+ ns = mul_u64_u32_shr(rd->epoch_cyc, rd->mult, rd->shift);
+ userpg->time_zero -= ns;
+
+ } while (sched_clock_read_retry(seq));
+
+ userpg->time_offset = userpg->time_zero - now;
+
+ /*
+ * time_shift is not expected to be greater than 31 due to
+ * the original published conversion algorithm shifting a
+ * 32-bit value (now specifies a 64-bit value) - refer
+ * perf_event_mmap_page documentation in perf_event.h.
+ */
+ if (userpg->time_shift == 32) {
+ userpg->time_shift = 31;
+ userpg->time_mult >>= 1;
+ }
+
+ /*
+ * Internal timekeeping for enabled/running/stopped times
+ * is always computed with the sched_clock.
+ */
+ userpg->cap_user_time = 1;
+ userpg->cap_user_time_zero = 1;
+ userpg->cap_user_time_short = 1;
+}
+
static unsigned long csr_read_num(int csr_num)
{
#define switchcase_csr_read(__csr_num, __val) {\
@@ -77,19 +148,11 @@ u64 riscv_pmu_ctr_get_width_mask(struct perf_event *event)
struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
- if (!rvpmu->ctr_get_width)
- /**
- * If the pmu driver doesn't support counter width, set it to default
- * maximum allowed by the specification.
- */
- cwidth = 63;
- else {
- if (hwc->idx == -1)
- /* Handle init case where idx is not initialized yet */
- cwidth = rvpmu->ctr_get_width(0);
- else
- cwidth = rvpmu->ctr_get_width(hwc->idx);
- }
+ if (hwc->idx == -1)
+ /* Handle init case where idx is not initialized yet */
+ cwidth = rvpmu->ctr_get_width(0);
+ else
+ cwidth = rvpmu->ctr_get_width(hwc->idx);
return GENMASK_ULL(cwidth, 0);
}
@@ -102,7 +165,7 @@ u64 riscv_pmu_event_update(struct perf_event *event)
unsigned long cmask;
u64 oldval, delta;
- if (!rvpmu->ctr_read)
+ if (!rvpmu->ctr_read || (hwc->state & PERF_HES_UPTODATE))
return 0;
cmask = riscv_pmu_ctr_get_width_mask(event);
@@ -126,8 +189,6 @@ void riscv_pmu_stop(struct perf_event *event, int flags)
struct hw_perf_event *hwc = &event->hw;
struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
- WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
-
if (!(hwc->state & PERF_HES_STOPPED)) {
if (rvpmu->ctr_stop) {
rvpmu->ctr_stop(event, 0);
@@ -171,6 +232,8 @@ int riscv_pmu_event_set_period(struct perf_event *event)
local64_set(&hwc->prev_count, (u64)-left);
+ perf_event_update_userpage(event);
+
return overflow;
}
@@ -246,6 +309,10 @@ static int riscv_pmu_event_init(struct perf_event *event)
u64 event_config = 0;
uint64_t cmask;
+ /* driver does not support branch stack sampling */
+ if (has_branch_stack(event))
+ return -EOPNOTSUPP;
+
hwc->flags = 0;
mapped_event = rvpmu->event_map(event, &event_config);
if (mapped_event < 0) {
@@ -264,6 +331,9 @@ static int riscv_pmu_event_init(struct perf_event *event)
hwc->idx = -1;
hwc->event_base = mapped_event;
+ if (rvpmu->event_init)
+ rvpmu->event_init(event);
+
if (!is_sampling_event(event)) {
/*
* For non-sampling runs, limit the sample_period to half
@@ -280,6 +350,39 @@ static int riscv_pmu_event_init(struct perf_event *event)
return 0;
}
+static int riscv_pmu_event_idx(struct perf_event *event)
+{
+ struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
+
+ if (!(event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT))
+ return 0;
+
+ if (rvpmu->csr_index)
+ return rvpmu->csr_index(event) + 1;
+
+ return 0;
+}
+
+static void riscv_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
+{
+ struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
+
+ if (rvpmu->event_mapped) {
+ rvpmu->event_mapped(event, mm);
+ perf_event_update_userpage(event);
+ }
+}
+
+static void riscv_pmu_event_unmapped(struct perf_event *event, struct mm_struct *mm)
+{
+ struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
+
+ if (rvpmu->event_unmapped) {
+ rvpmu->event_unmapped(event, mm);
+ perf_event_update_userpage(event);
+ }
+}
+
struct riscv_pmu *riscv_pmu_alloc(void)
{
struct riscv_pmu *pmu;
@@ -301,9 +404,13 @@ struct riscv_pmu *riscv_pmu_alloc(void)
cpuc->n_events = 0;
for (i = 0; i < RISCV_MAX_COUNTERS; i++)
cpuc->events[i] = NULL;
+ cpuc->snapshot_addr = NULL;
}
pmu->pmu = (struct pmu) {
.event_init = riscv_pmu_event_init,
+ .event_mapped = riscv_pmu_event_mapped,
+ .event_unmapped = riscv_pmu_event_unmapped,
+ .event_idx = riscv_pmu_event_idx,
.add = riscv_pmu_add,
.del = riscv_pmu_del,
.start = riscv_pmu_start,
diff --git a/drivers/perf/riscv_pmu_legacy.c b/drivers/perf/riscv_pmu_legacy.c
index ca9e20bfc7ac..93c8e0fdb589 100644
--- a/drivers/perf/riscv_pmu_legacy.c
+++ b/drivers/perf/riscv_pmu_legacy.c
@@ -13,7 +13,7 @@
#include <linux/platform_device.h>
#define RISCV_PMU_LEGACY_CYCLE 0
-#define RISCV_PMU_LEGACY_INSTRET 1
+#define RISCV_PMU_LEGACY_INSTRET 2
static bool pmu_init_done;
@@ -22,13 +22,13 @@ static int pmu_legacy_ctr_get_idx(struct perf_event *event)
struct perf_event_attr *attr = &event->attr;
if (event->attr.type != PERF_TYPE_HARDWARE)
- return -EOPNOTSUPP;
+ return -ENOENT;
if (attr->config == PERF_COUNT_HW_CPU_CYCLES)
return RISCV_PMU_LEGACY_CYCLE;
else if (attr->config == PERF_COUNT_HW_INSTRUCTIONS)
return RISCV_PMU_LEGACY_INSTRET;
else
- return -EOPNOTSUPP;
+ return -ENOENT;
}
/* For legacy config & counter index are same */
@@ -37,6 +37,12 @@ static int pmu_legacy_event_map(struct perf_event *event, u64 *config)
return pmu_legacy_ctr_get_idx(event);
}
+/* cycle & instret are always 64 bit, one bit less according to SBI spec */
+static int pmu_legacy_ctr_get_width(int idx)
+{
+ return 63;
+}
+
static u64 pmu_legacy_read_ctr(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
@@ -71,6 +77,29 @@ static void pmu_legacy_ctr_start(struct perf_event *event, u64 ival)
local64_set(&hwc->prev_count, initial_val);
}
+static uint8_t pmu_legacy_csr_index(struct perf_event *event)
+{
+ return event->hw.idx;
+}
+
+static void pmu_legacy_event_mapped(struct perf_event *event, struct mm_struct *mm)
+{
+ if (event->attr.config != PERF_COUNT_HW_CPU_CYCLES &&
+ event->attr.config != PERF_COUNT_HW_INSTRUCTIONS)
+ return;
+
+ event->hw.flags |= PERF_EVENT_FLAG_USER_READ_CNT;
+}
+
+static void pmu_legacy_event_unmapped(struct perf_event *event, struct mm_struct *mm)
+{
+ if (event->attr.config != PERF_COUNT_HW_CPU_CYCLES &&
+ event->attr.config != PERF_COUNT_HW_INSTRUCTIONS)
+ return;
+
+ event->hw.flags &= ~PERF_EVENT_FLAG_USER_READ_CNT;
+}
+
/*
* This is just a simple implementation to allow legacy implementations
* compatible with new RISC-V PMU driver framework.
@@ -88,9 +117,14 @@ static void pmu_legacy_init(struct riscv_pmu *pmu)
pmu->ctr_stop = NULL;
pmu->event_map = pmu_legacy_event_map;
pmu->ctr_get_idx = pmu_legacy_ctr_get_idx;
- pmu->ctr_get_width = NULL;
+ pmu->ctr_get_width = pmu_legacy_ctr_get_width;
pmu->ctr_clear_idx = NULL;
pmu->ctr_read = pmu_legacy_read_ctr;
+ pmu->event_mapped = pmu_legacy_event_mapped;
+ pmu->event_unmapped = pmu_legacy_event_unmapped;
+ pmu->csr_index = pmu_legacy_csr_index;
+ pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
+ pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE;
perf_pmu_register(&pmu->pmu, "cpu", PERF_TYPE_RAW);
}
@@ -102,6 +136,7 @@ static int pmu_legacy_device_probe(struct platform_device *pdev)
pmu = riscv_pmu_alloc();
if (!pmu)
return -ENOMEM;
+ pmu->pmu.parent = &pdev->dev;
pmu_legacy_init(pmu);
return 0;
diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
index 4163ff517471..7dd282da67ce 100644
--- a/drivers/perf/riscv_pmu_sbi.c
+++ b/drivers/perf/riscv_pmu_sbi.c
@@ -19,13 +19,54 @@
#include <linux/of.h>
#include <linux/cpu_pm.h>
#include <linux/sched/clock.h>
+#include <linux/soc/andes/irq.h>
+#include <linux/workqueue.h>
#include <asm/errata_list.h>
#include <asm/sbi.h>
-#include <asm/hwcap.h>
-
-PMU_FORMAT_ATTR(event, "config:0-47");
-PMU_FORMAT_ATTR(firmware, "config:63");
+#include <asm/cpufeature.h>
+#include <asm/vendor_extensions.h>
+#include <asm/vendor_extensions/andes.h>
+
+#define ALT_SBI_PMU_OVERFLOW(__ovl) \
+asm volatile(ALTERNATIVE_2( \
+ "csrr %0, " __stringify(CSR_SCOUNTOVF), \
+ "csrr %0, " __stringify(THEAD_C9XX_CSR_SCOUNTEROF), \
+ THEAD_VENDOR_ID, ERRATA_THEAD_PMU, \
+ CONFIG_ERRATA_THEAD_PMU, \
+ "csrr %0, " __stringify(ANDES_CSR_SCOUNTEROF), \
+ ANDES_VENDOR_ID, \
+ RISCV_ISA_VENDOR_EXT_XANDESPMU + RISCV_VENDOR_EXT_ALTERNATIVES_BASE, \
+ CONFIG_ANDES_CUSTOM_PMU) \
+ : "=r" (__ovl) : \
+ : "memory")
+
+#define ALT_SBI_PMU_OVF_CLEAR_PENDING(__irq_mask) \
+asm volatile(ALTERNATIVE( \
+ "csrc " __stringify(CSR_IP) ", %0\n\t", \
+ "csrc " __stringify(ANDES_CSR_SLIP) ", %0\n\t", \
+ ANDES_VENDOR_ID, \
+ RISCV_ISA_VENDOR_EXT_XANDESPMU + RISCV_VENDOR_EXT_ALTERNATIVES_BASE, \
+ CONFIG_ANDES_CUSTOM_PMU) \
+ : : "r"(__irq_mask) \
+ : "memory")
+
+#define SYSCTL_NO_USER_ACCESS 0
+#define SYSCTL_USER_ACCESS 1
+#define SYSCTL_LEGACY 2
+
+#define PERF_EVENT_FLAG_NO_USER_ACCESS BIT(SYSCTL_NO_USER_ACCESS)
+#define PERF_EVENT_FLAG_USER_ACCESS BIT(SYSCTL_USER_ACCESS)
+#define PERF_EVENT_FLAG_LEGACY BIT(SYSCTL_LEGACY)
+
+PMU_FORMAT_ATTR(event, "config:0-55");
+PMU_FORMAT_ATTR(firmware, "config:62-63");
+
+static bool sbi_v2_available;
+static bool sbi_v3_available;
+static DEFINE_STATIC_KEY_FALSE(sbi_pmu_snapshot_available);
+#define sbi_pmu_snapshot_available() \
+ static_branch_unlikely(&sbi_pmu_snapshot_available)
static struct attribute *riscv_arch_formats_attr[] = {
&format_attr_event.attr,
@@ -43,6 +84,9 @@ static const struct attribute_group *riscv_pmu_attr_groups[] = {
NULL,
};
+/* Allow user mode access by default */
+static int sysctl_perf_user_access __read_mostly = SYSCTL_USER_ACCESS;
+
/*
* RISC-V doesn't have heterogeneous harts yet. This need to be part of
* per_cpu in case of harts with different pmu counters
@@ -50,11 +94,13 @@ static const struct attribute_group *riscv_pmu_attr_groups[] = {
static union sbi_pmu_ctr_info *pmu_ctr_list;
static bool riscv_pmu_use_irq;
static unsigned int riscv_pmu_irq_num;
+static unsigned int riscv_pmu_irq_mask;
static unsigned int riscv_pmu_irq;
/* Cache the available counters in a bitmask */
static unsigned long cmask;
+static int pmu_event_find_cache(u64 config);
struct sbi_pmu_event_data {
union {
union {
@@ -75,7 +121,7 @@ struct sbi_pmu_event_data {
};
};
-static const struct sbi_pmu_event_data pmu_hw_event_map[] = {
+static struct sbi_pmu_event_data pmu_hw_event_map[] = {
[PERF_COUNT_HW_CPU_CYCLES] = {.hw_gen_event = {
SBI_PMU_HW_CPU_CYCLES,
SBI_PMU_EVENT_TYPE_HW, 0}},
@@ -109,7 +155,7 @@ static const struct sbi_pmu_event_data pmu_hw_event_map[] = {
};
#define C(x) PERF_COUNT_HW_CACHE_##x
-static const struct sbi_pmu_event_data pmu_cache_event_map[PERF_COUNT_HW_CACHE_MAX]
+static struct sbi_pmu_event_data pmu_cache_event_map[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
[C(L1D)] = {
@@ -254,6 +300,103 @@ static const struct sbi_pmu_event_data pmu_cache_event_map[PERF_COUNT_HW_CACHE_M
},
};
+static int pmu_sbi_check_event_info(void)
+{
+ int num_events = ARRAY_SIZE(pmu_hw_event_map) + PERF_COUNT_HW_CACHE_MAX *
+ PERF_COUNT_HW_CACHE_OP_MAX * PERF_COUNT_HW_CACHE_RESULT_MAX;
+ struct riscv_pmu_event_info *event_info_shmem;
+ phys_addr_t base_addr;
+ int i, j, k, result = 0, count = 0;
+ struct sbiret ret;
+
+ event_info_shmem = kcalloc(num_events, sizeof(*event_info_shmem), GFP_KERNEL);
+ if (!event_info_shmem)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(pmu_hw_event_map); i++)
+ event_info_shmem[count++].event_idx = pmu_hw_event_map[i].event_idx;
+
+ for (i = 0; i < ARRAY_SIZE(pmu_cache_event_map); i++) {
+ for (j = 0; j < ARRAY_SIZE(pmu_cache_event_map[i]); j++) {
+ for (k = 0; k < ARRAY_SIZE(pmu_cache_event_map[i][j]); k++)
+ event_info_shmem[count++].event_idx =
+ pmu_cache_event_map[i][j][k].event_idx;
+ }
+ }
+
+ base_addr = __pa(event_info_shmem);
+ if (IS_ENABLED(CONFIG_32BIT))
+ ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_EVENT_GET_INFO, lower_32_bits(base_addr),
+ upper_32_bits(base_addr), count, 0, 0, 0);
+ else
+ ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_EVENT_GET_INFO, base_addr, 0,
+ count, 0, 0, 0);
+ if (ret.error) {
+ result = -EOPNOTSUPP;
+ goto free_mem;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(pmu_hw_event_map); i++) {
+ if (!(event_info_shmem[i].output & RISCV_PMU_EVENT_INFO_OUTPUT_MASK))
+ pmu_hw_event_map[i].event_idx = -ENOENT;
+ }
+
+ count = ARRAY_SIZE(pmu_hw_event_map);
+
+ for (i = 0; i < ARRAY_SIZE(pmu_cache_event_map); i++) {
+ for (j = 0; j < ARRAY_SIZE(pmu_cache_event_map[i]); j++) {
+ for (k = 0; k < ARRAY_SIZE(pmu_cache_event_map[i][j]); k++) {
+ if (!(event_info_shmem[count].output &
+ RISCV_PMU_EVENT_INFO_OUTPUT_MASK))
+ pmu_cache_event_map[i][j][k].event_idx = -ENOENT;
+ count++;
+ }
+ }
+ }
+
+free_mem:
+ kfree(event_info_shmem);
+
+ return result;
+}
+
+static void pmu_sbi_check_event(struct sbi_pmu_event_data *edata)
+{
+ struct sbiret ret;
+
+ ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH,
+ 0, cmask, 0, edata->event_idx, 0, 0);
+ if (!ret.error) {
+ sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP,
+ ret.value, 0x1, SBI_PMU_STOP_FLAG_RESET, 0, 0, 0);
+ } else if (ret.error == SBI_ERR_NOT_SUPPORTED) {
+ /* This event cannot be monitored by any counter */
+ edata->event_idx = -ENOENT;
+ }
+}
+
+static void pmu_sbi_check_std_events(struct work_struct *work)
+{
+ int ret;
+
+ if (sbi_v3_available) {
+ ret = pmu_sbi_check_event_info();
+ if (ret)
+ pr_err("pmu_sbi_check_event_info failed with error %d\n", ret);
+ return;
+ }
+
+ for (int i = 0; i < ARRAY_SIZE(pmu_hw_event_map); i++)
+ pmu_sbi_check_event(&pmu_hw_event_map[i]);
+
+ for (int i = 0; i < ARRAY_SIZE(pmu_cache_event_map); i++)
+ for (int j = 0; j < ARRAY_SIZE(pmu_cache_event_map[i]); j++)
+ for (int k = 0; k < ARRAY_SIZE(pmu_cache_event_map[i][j]); k++)
+ pmu_sbi_check_event(&pmu_cache_event_map[i][j][k]);
+}
+
+static DECLARE_WORK(check_std_events_work, pmu_sbi_check_std_events);
+
static int pmu_sbi_ctr_get_width(int idx)
{
return pmu_ctr_list[idx].width;
@@ -267,8 +410,73 @@ static bool pmu_sbi_ctr_is_fw(int cidx)
if (!info)
return false;
- return (info->type == SBI_PMU_CTR_TYPE_FW) ? true : false;
+ return info->type == SBI_PMU_CTR_TYPE_FW;
+}
+
+int riscv_pmu_get_event_info(u32 type, u64 config, u64 *econfig)
+{
+ int ret = -ENOENT;
+
+ switch (type) {
+ case PERF_TYPE_HARDWARE:
+ if (config >= PERF_COUNT_HW_MAX)
+ return -EINVAL;
+ ret = pmu_hw_event_map[config].event_idx;
+ break;
+ case PERF_TYPE_HW_CACHE:
+ ret = pmu_event_find_cache(config);
+ break;
+ case PERF_TYPE_RAW:
+ /*
+ * As per SBI v0.3 specification,
+ * -- the upper 16 bits must be unused for a hardware raw event.
+ * As per SBI v2.0 specification,
+ * -- the upper 8 bits must be unused for a hardware raw event.
+ * Bits 63:62 are used to distinguish between raw events
+ * 00 - Hardware raw event
+ * 10 - SBI firmware events
+ * 11 - Risc-V platform specific firmware event
+ */
+ switch (config >> 62) {
+ case 0:
+ if (sbi_v3_available) {
+ /* Return error any bits [56-63] is set as it is not allowed by the spec */
+ if (!(config & ~RISCV_PMU_RAW_EVENT_V2_MASK)) {
+ if (econfig)
+ *econfig = config & RISCV_PMU_RAW_EVENT_V2_MASK;
+ ret = RISCV_PMU_RAW_EVENT_V2_IDX;
+ }
+ /* Return error any bits [48-63] is set as it is not allowed by the spec */
+ } else if (!(config & ~RISCV_PMU_RAW_EVENT_MASK)) {
+ if (econfig)
+ *econfig = config & RISCV_PMU_RAW_EVENT_MASK;
+ ret = RISCV_PMU_RAW_EVENT_IDX;
+ }
+ break;
+ case 2:
+ ret = (config & 0xFFFF) | (SBI_PMU_EVENT_TYPE_FW << 16);
+ break;
+ case 3:
+ /*
+ * For Risc-V platform specific firmware events
+ * Event code - 0xFFFF
+ * Event data - raw event encoding
+ */
+ ret = SBI_PMU_EVENT_TYPE_FW << 16 | RISCV_PLAT_FW_EVENT;
+ if (econfig)
+ *econfig = config & RISCV_PMU_PLAT_FW_EVENT_MASK;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return ret;
}
+EXPORT_SYMBOL_GPL(riscv_pmu_get_event_info);
/*
* Returns the counter width of a programmable counter and number of hardware
@@ -301,6 +509,11 @@ int riscv_pmu_get_hpm_info(u32 *hw_ctr_width, u32 *num_hw_ctr)
}
EXPORT_SYMBOL_GPL(riscv_pmu_get_hpm_info);
+static uint8_t pmu_sbi_csr_index(struct perf_event *event)
+{
+ return pmu_ctr_list[event->hw.idx].csr - CSR_CYCLE;
+}
+
static unsigned long pmu_sbi_get_filter_flags(struct perf_event *event)
{
unsigned long cflags = 0;
@@ -329,18 +542,34 @@ static int pmu_sbi_ctr_get_idx(struct perf_event *event)
struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events);
struct sbiret ret;
int idx;
- uint64_t cbase = 0;
+ uint64_t cbase = 0, cmask = rvpmu->cmask;
unsigned long cflags = 0;
cflags = pmu_sbi_get_filter_flags(event);
+
+ /*
+ * In legacy mode, we have to force the fixed counters for those events
+ * but not in the user access mode as we want to use the other counters
+ * that support sampling/filtering.
+ */
+ if ((hwc->flags & PERF_EVENT_FLAG_LEGACY) && (event->attr.type == PERF_TYPE_HARDWARE)) {
+ if (event->attr.config == PERF_COUNT_HW_CPU_CYCLES) {
+ cflags |= SBI_PMU_CFG_FLAG_SKIP_MATCH;
+ cmask = 1;
+ } else if (event->attr.config == PERF_COUNT_HW_INSTRUCTIONS) {
+ cflags |= SBI_PMU_CFG_FLAG_SKIP_MATCH;
+ cmask = BIT(CSR_INSTRET - CSR_CYCLE);
+ }
+ }
+
/* retrieve the available counter index */
#if defined(CONFIG_32BIT)
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase,
- rvpmu->cmask, cflags, hwc->event_base, hwc->config,
+ cmask, cflags, hwc->event_base, hwc->config,
hwc->config >> 32);
#else
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase,
- rvpmu->cmask, cflags, hwc->event_base, hwc->config, 0);
+ cmask, cflags, hwc->event_base, hwc->config, 0);
#endif
if (ret.error) {
pr_debug("Not able to find a counter for event %lx config %llx\n",
@@ -414,41 +643,97 @@ static int pmu_sbi_event_map(struct perf_event *event, u64 *econfig)
{
u32 type = event->attr.type;
u64 config = event->attr.config;
- int bSoftware;
- u64 raw_config_val;
- int ret;
- switch (type) {
- case PERF_TYPE_HARDWARE:
- if (config >= PERF_COUNT_HW_MAX)
- return -EINVAL;
- ret = pmu_hw_event_map[event->attr.config].event_idx;
- break;
- case PERF_TYPE_HW_CACHE:
- ret = pmu_event_find_cache(config);
- break;
- case PERF_TYPE_RAW:
- /*
- * As per SBI specification, the upper 16 bits must be unused for
- * a raw event. Use the MSB (63b) to distinguish between hardware
- * raw event and firmware events.
- */
- bSoftware = config >> 63;
- raw_config_val = config & RISCV_PMU_RAW_EVENT_MASK;
- if (bSoftware) {
- ret = (raw_config_val & 0xFFFF) |
- (SBI_PMU_EVENT_TYPE_FW << 16);
- } else {
- ret = RISCV_PMU_RAW_EVENT_IDX;
- *econfig = raw_config_val;
+ /*
+ * Ensure we are finished checking standard hardware events for
+ * validity before allowing userspace to configure any events.
+ */
+ flush_work(&check_std_events_work);
+
+ return riscv_pmu_get_event_info(type, config, econfig);
+}
+
+static void pmu_sbi_snapshot_free(struct riscv_pmu *pmu)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct cpu_hw_events *cpu_hw_evt = per_cpu_ptr(pmu->hw_events, cpu);
+
+ if (!cpu_hw_evt->snapshot_addr)
+ continue;
+
+ free_page((unsigned long)cpu_hw_evt->snapshot_addr);
+ cpu_hw_evt->snapshot_addr = NULL;
+ cpu_hw_evt->snapshot_addr_phys = 0;
+ }
+}
+
+static int pmu_sbi_snapshot_alloc(struct riscv_pmu *pmu)
+{
+ int cpu;
+ struct page *snapshot_page;
+
+ for_each_possible_cpu(cpu) {
+ struct cpu_hw_events *cpu_hw_evt = per_cpu_ptr(pmu->hw_events, cpu);
+
+ snapshot_page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
+ if (!snapshot_page) {
+ pmu_sbi_snapshot_free(pmu);
+ return -ENOMEM;
}
- break;
- default:
- ret = -EINVAL;
- break;
+ cpu_hw_evt->snapshot_addr = page_to_virt(snapshot_page);
+ cpu_hw_evt->snapshot_addr_phys = page_to_phys(snapshot_page);
}
- return ret;
+ return 0;
+}
+
+static int pmu_sbi_snapshot_disable(void)
+{
+ struct sbiret ret;
+
+ ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_SNAPSHOT_SET_SHMEM, SBI_SHMEM_DISABLE,
+ SBI_SHMEM_DISABLE, 0, 0, 0, 0);
+ if (ret.error) {
+ pr_warn("failed to disable snapshot shared memory\n");
+ return sbi_err_map_linux_errno(ret.error);
+ }
+
+ return 0;
+}
+
+static int pmu_sbi_snapshot_setup(struct riscv_pmu *pmu, int cpu)
+{
+ struct cpu_hw_events *cpu_hw_evt;
+ struct sbiret ret = {0};
+
+ cpu_hw_evt = per_cpu_ptr(pmu->hw_events, cpu);
+ if (!cpu_hw_evt->snapshot_addr_phys)
+ return -EINVAL;
+
+ if (cpu_hw_evt->snapshot_set_done)
+ return 0;
+
+ if (IS_ENABLED(CONFIG_32BIT))
+ ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_SNAPSHOT_SET_SHMEM,
+ cpu_hw_evt->snapshot_addr_phys,
+ (u64)(cpu_hw_evt->snapshot_addr_phys) >> 32, 0, 0, 0, 0);
+ else
+ ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_SNAPSHOT_SET_SHMEM,
+ cpu_hw_evt->snapshot_addr_phys, 0, 0, 0, 0, 0);
+
+ /* Free up the snapshot area memory and fall back to SBI PMU calls without snapshot */
+ if (ret.error) {
+ if (ret.error != SBI_ERR_NOT_SUPPORTED)
+ pr_warn("pmu snapshot setup failed with error %ld\n", ret.error);
+ return sbi_err_map_linux_errno(ret.error);
+ }
+
+ memset(cpu_hw_evt->snapshot_cval_shcopy, 0, sizeof(u64) * RISCV_MAX_COUNTERS);
+ cpu_hw_evt->snapshot_set_done = true;
+
+ return 0;
}
static u64 pmu_sbi_ctr_read(struct perf_event *event)
@@ -456,30 +741,68 @@ static u64 pmu_sbi_ctr_read(struct perf_event *event)
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
struct sbiret ret;
- union sbi_pmu_ctr_info info;
u64 val = 0;
+ struct riscv_pmu *pmu = to_riscv_pmu(event->pmu);
+ struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events);
+ struct riscv_pmu_snapshot_data *sdata = cpu_hw_evt->snapshot_addr;
+ union sbi_pmu_ctr_info info = pmu_ctr_list[idx];
+
+ /* Read the value from the shared memory directly only if counter is stopped */
+ if (sbi_pmu_snapshot_available() && (hwc->state & PERF_HES_STOPPED)) {
+ val = sdata->ctr_values[idx];
+ return val;
+ }
if (pmu_sbi_is_fw_event(event)) {
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_FW_READ,
hwc->idx, 0, 0, 0, 0, 0);
- if (!ret.error)
- val = ret.value;
+ if (ret.error)
+ return 0;
+
+ val = ret.value;
+ if (IS_ENABLED(CONFIG_32BIT) && sbi_v2_available && info.width >= 32) {
+ ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_FW_READ_HI,
+ hwc->idx, 0, 0, 0, 0, 0);
+ if (!ret.error)
+ val |= ((u64)ret.value << 32);
+ else
+ WARN_ONCE(1, "Unable to read upper 32 bits of firmware counter error: %ld\n",
+ ret.error);
+ }
} else {
- info = pmu_ctr_list[idx];
val = riscv_pmu_ctr_read_csr(info.csr);
if (IS_ENABLED(CONFIG_32BIT))
- val = ((u64)riscv_pmu_ctr_read_csr(info.csr + 0x80)) << 31 | val;
+ val |= ((u64)riscv_pmu_ctr_read_csr(info.csr + 0x80)) << 32;
}
return val;
}
+static void pmu_sbi_set_scounteren(void *arg)
+{
+ struct perf_event *event = (struct perf_event *)arg;
+
+ if (event->hw.idx != -1)
+ csr_write(CSR_SCOUNTEREN,
+ csr_read(CSR_SCOUNTEREN) | BIT(pmu_sbi_csr_index(event)));
+}
+
+static void pmu_sbi_reset_scounteren(void *arg)
+{
+ struct perf_event *event = (struct perf_event *)arg;
+
+ if (event->hw.idx != -1)
+ csr_write(CSR_SCOUNTEREN,
+ csr_read(CSR_SCOUNTEREN) & ~BIT(pmu_sbi_csr_index(event)));
+}
+
static void pmu_sbi_ctr_start(struct perf_event *event, u64 ival)
{
struct sbiret ret;
struct hw_perf_event *hwc = &event->hw;
unsigned long flag = SBI_PMU_START_FLAG_SET_INIT_VALUE;
+ /* There is no benefit setting SNAPSHOT FLAG for a single counter */
#if defined(CONFIG_32BIT)
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, hwc->idx,
1, flag, ival, ival >> 32, 0);
@@ -490,18 +813,46 @@ static void pmu_sbi_ctr_start(struct perf_event *event, u64 ival)
if (ret.error && (ret.error != SBI_ERR_ALREADY_STARTED))
pr_err("Starting counter idx %d failed with error %d\n",
hwc->idx, sbi_err_map_linux_errno(ret.error));
+
+ if ((hwc->flags & PERF_EVENT_FLAG_USER_ACCESS) &&
+ (hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT))
+ pmu_sbi_set_scounteren((void *)event);
}
static void pmu_sbi_ctr_stop(struct perf_event *event, unsigned long flag)
{
struct sbiret ret;
struct hw_perf_event *hwc = &event->hw;
+ struct riscv_pmu *pmu = to_riscv_pmu(event->pmu);
+ struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events);
+ struct riscv_pmu_snapshot_data *sdata = cpu_hw_evt->snapshot_addr;
+
+ if ((hwc->flags & PERF_EVENT_FLAG_USER_ACCESS) &&
+ (hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT))
+ pmu_sbi_reset_scounteren((void *)event);
+
+ if (sbi_pmu_snapshot_available())
+ flag |= SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT;
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, hwc->idx, 1, flag, 0, 0, 0);
- if (ret.error && (ret.error != SBI_ERR_ALREADY_STOPPED) &&
- flag != SBI_PMU_STOP_FLAG_RESET)
+ if (!ret.error && sbi_pmu_snapshot_available()) {
+ /*
+ * The counter snapshot is based on the index base specified by hwc->idx.
+ * The actual counter value is updated in shared memory at index 0 when counter
+ * mask is 0x01. To ensure accurate counter values, it's necessary to transfer
+ * the counter value to shared memory. However, if hwc->idx is zero, the counter
+ * value is already correctly updated in shared memory, requiring no further
+ * adjustment.
+ */
+ if (hwc->idx > 0) {
+ sdata->ctr_values[hwc->idx] = sdata->ctr_values[0];
+ sdata->ctr_values[0] = 0;
+ }
+ } else if (ret.error && (ret.error != SBI_ERR_ALREADY_STOPPED) &&
+ flag != SBI_PMU_STOP_FLAG_RESET) {
pr_err("Stopping counter idx %d failed with error %d\n",
hwc->idx, sbi_err_map_linux_errno(ret.error));
+ }
}
static int pmu_sbi_find_num_ctrs(void)
@@ -553,16 +904,45 @@ static inline void pmu_sbi_stop_all(struct riscv_pmu *pmu)
* which may include counters that are not enabled yet.
*/
sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP,
- 0, pmu->cmask, 0, 0, 0, 0);
+ 0, pmu->cmask, SBI_PMU_STOP_FLAG_RESET, 0, 0, 0);
}
static inline void pmu_sbi_stop_hw_ctrs(struct riscv_pmu *pmu)
{
struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events);
+ struct riscv_pmu_snapshot_data *sdata = cpu_hw_evt->snapshot_addr;
+ unsigned long flag = 0;
+ int i, idx;
+ struct sbiret ret;
+ u64 temp_ctr_overflow_mask = 0;
+
+ if (sbi_pmu_snapshot_available())
+ flag = SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT;
+
+ /* Reset the shadow copy to avoid save/restore any value from previous overflow */
+ memset(cpu_hw_evt->snapshot_cval_shcopy, 0, sizeof(u64) * RISCV_MAX_COUNTERS);
+
+ for (i = 0; i < BITS_TO_LONGS(RISCV_MAX_COUNTERS); i++) {
+ /* No need to check the error here as we can't do anything about the error */
+ ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, i * BITS_PER_LONG,
+ cpu_hw_evt->used_hw_ctrs[i], flag, 0, 0, 0);
+ if (!ret.error && sbi_pmu_snapshot_available()) {
+ /* Save the counter values to avoid clobbering */
+ for_each_set_bit(idx, &cpu_hw_evt->used_hw_ctrs[i], BITS_PER_LONG)
+ cpu_hw_evt->snapshot_cval_shcopy[i * BITS_PER_LONG + idx] =
+ sdata->ctr_values[idx];
+ /* Save the overflow mask to avoid clobbering */
+ temp_ctr_overflow_mask |= sdata->ctr_overflow_mask << (i * BITS_PER_LONG);
+ }
+ }
- /* No need to check the error here as we can't do anything about the error */
- sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, 0,
- cpu_hw_evt->used_hw_ctrs[0], 0, 0, 0, 0);
+ /* Restore the counter values to the shared memory for used hw counters */
+ if (sbi_pmu_snapshot_available()) {
+ for_each_set_bit(idx, cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS)
+ sdata->ctr_values[idx] = cpu_hw_evt->snapshot_cval_shcopy[idx];
+ if (temp_ctr_overflow_mask)
+ sdata->ctr_overflow_mask = temp_ctr_overflow_mask;
+ }
}
/*
@@ -571,11 +951,10 @@ static inline void pmu_sbi_stop_hw_ctrs(struct riscv_pmu *pmu)
* while the overflowed counters need to be started with updated initialization
* value.
*/
-static inline void pmu_sbi_start_overflow_mask(struct riscv_pmu *pmu,
- unsigned long ctr_ovf_mask)
+static inline void pmu_sbi_start_ovf_ctrs_sbi(struct cpu_hw_events *cpu_hw_evt,
+ u64 ctr_ovf_mask)
{
- int idx = 0;
- struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events);
+ int idx = 0, i;
struct perf_event *event;
unsigned long flag = SBI_PMU_START_FLAG_SET_INIT_VALUE;
unsigned long ctr_start_mask = 0;
@@ -583,11 +962,14 @@ static inline void pmu_sbi_start_overflow_mask(struct riscv_pmu *pmu,
struct hw_perf_event *hwc;
u64 init_val = 0;
- ctr_start_mask = cpu_hw_evt->used_hw_ctrs[0] & ~ctr_ovf_mask;
-
- /* Start all the counters that did not overflow in a single shot */
- sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, 0, ctr_start_mask,
- 0, 0, 0, 0);
+ for (i = 0; i < BITS_TO_LONGS(RISCV_MAX_COUNTERS); i++) {
+ ctr_start_mask = cpu_hw_evt->used_hw_ctrs[i] & ~ctr_ovf_mask;
+ /* Start all the counters that did not overflow in a single shot */
+ if (ctr_start_mask) {
+ sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, i * BITS_PER_LONG,
+ ctr_start_mask, 0, 0, 0, 0);
+ }
+ }
/* Reinitialize and start all the counter that overflowed */
while (ctr_ovf_mask) {
@@ -610,6 +992,52 @@ static inline void pmu_sbi_start_overflow_mask(struct riscv_pmu *pmu,
}
}
+static inline void pmu_sbi_start_ovf_ctrs_snapshot(struct cpu_hw_events *cpu_hw_evt,
+ u64 ctr_ovf_mask)
+{
+ int i, idx = 0;
+ struct perf_event *event;
+ unsigned long flag = SBI_PMU_START_FLAG_INIT_SNAPSHOT;
+ u64 max_period, init_val = 0;
+ struct hw_perf_event *hwc;
+ struct riscv_pmu_snapshot_data *sdata = cpu_hw_evt->snapshot_addr;
+
+ for_each_set_bit(idx, cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS) {
+ if (ctr_ovf_mask & BIT(idx)) {
+ event = cpu_hw_evt->events[idx];
+ hwc = &event->hw;
+ max_period = riscv_pmu_ctr_get_width_mask(event);
+ init_val = local64_read(&hwc->prev_count) & max_period;
+ cpu_hw_evt->snapshot_cval_shcopy[idx] = init_val;
+ }
+ /*
+ * We do not need to update the non-overflow counters the previous
+ * value should have been there already.
+ */
+ }
+
+ for (i = 0; i < BITS_TO_LONGS(RISCV_MAX_COUNTERS); i++) {
+ /* Restore the counter values to relative indices for used hw counters */
+ for_each_set_bit(idx, &cpu_hw_evt->used_hw_ctrs[i], BITS_PER_LONG)
+ sdata->ctr_values[idx] =
+ cpu_hw_evt->snapshot_cval_shcopy[idx + i * BITS_PER_LONG];
+ /* Start all the counters in a single shot */
+ sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, idx * BITS_PER_LONG,
+ cpu_hw_evt->used_hw_ctrs[i], flag, 0, 0, 0);
+ }
+}
+
+static void pmu_sbi_start_overflow_mask(struct riscv_pmu *pmu,
+ u64 ctr_ovf_mask)
+{
+ struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events);
+
+ if (sbi_pmu_snapshot_available())
+ pmu_sbi_start_ovf_ctrs_snapshot(cpu_hw_evt, ctr_ovf_mask);
+ else
+ pmu_sbi_start_ovf_ctrs_sbi(cpu_hw_evt, ctr_ovf_mask);
+}
+
static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
{
struct perf_sample_data data;
@@ -619,19 +1047,25 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
int lidx, hidx, fidx;
struct riscv_pmu *pmu;
struct perf_event *event;
- unsigned long overflow;
- unsigned long overflowed_ctrs = 0;
+ u64 overflow;
+ u64 overflowed_ctrs = 0;
struct cpu_hw_events *cpu_hw_evt = dev;
u64 start_clock = sched_clock();
+ struct riscv_pmu_snapshot_data *sdata = cpu_hw_evt->snapshot_addr;
if (WARN_ON_ONCE(!cpu_hw_evt))
return IRQ_NONE;
/* Firmware counter don't support overflow yet */
fidx = find_first_bit(cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS);
+ if (fidx == RISCV_MAX_COUNTERS) {
+ csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
+ return IRQ_NONE;
+ }
+
event = cpu_hw_evt->events[fidx];
if (!event) {
- csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
+ ALT_SBI_PMU_OVF_CLEAR_PENDING(riscv_pmu_irq_mask);
return IRQ_NONE;
}
@@ -639,13 +1073,16 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
pmu_sbi_stop_hw_ctrs(pmu);
/* Overflow status register should only be read after counter are stopped */
- ALT_SBI_PMU_OVERFLOW(overflow);
+ if (sbi_pmu_snapshot_available())
+ overflow = sdata->ctr_overflow_mask;
+ else
+ ALT_SBI_PMU_OVERFLOW(overflow);
/*
* Overflow interrupt pending bit should only be cleared after stopping
* all the counters to avoid any race condition.
*/
- csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
+ ALT_SBI_PMU_OVF_CLEAR_PENDING(riscv_pmu_irq_mask);
/* No overflow bit is set */
if (!overflow)
@@ -665,19 +1102,27 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
if (!info || info->type != SBI_PMU_CTR_TYPE_HW)
continue;
- /* compute hardware counter index */
- hidx = info->csr - CSR_CYCLE;
- /* check if the corresponding bit is set in sscountovf */
- if (!(overflow & (1 << hidx)))
+ if (sbi_pmu_snapshot_available())
+ /* SBI implementation already updated the logical indicies */
+ hidx = lidx;
+ else
+ /* compute hardware counter index */
+ hidx = info->csr - CSR_CYCLE;
+
+ /* check if the corresponding bit is set in scountovf or overflow mask in shmem */
+ if (!(overflow & BIT(hidx)))
continue;
/*
* Keep a track of overflowed counters so that they can be started
* with updated initial value.
*/
- overflowed_ctrs |= 1 << lidx;
+ overflowed_ctrs |= BIT(lidx);
hw_evt = &event->hw;
+ /* Update the event states here so that we know the state while reading */
+ hw_evt->state |= PERF_HES_STOPPED;
riscv_pmu_event_update(event);
+ hw_evt->state |= PERF_HES_UPTODATE;
perf_sample_data_init(&data, 0, hw_evt->last_period);
if (riscv_pmu_event_set_period(event)) {
/*
@@ -690,6 +1135,8 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
*/
perf_event_overflow(event, &data, regs);
}
+ /* Reset the state as we are going to start the counter after the loop */
+ hw_evt->state = 0;
}
pmu_sbi_start_overflow_mask(pmu, overflowed_ctrs);
@@ -704,21 +1151,26 @@ static int pmu_sbi_starting_cpu(unsigned int cpu, struct hlist_node *node)
struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events);
/*
- * Enable the access for CYCLE, TIME, and INSTRET CSRs from userspace,
- * as is necessary to maintain uABI compatibility.
+ * We keep enabling userspace access to CYCLE, TIME and INSTRET via the
+ * legacy option but that will be removed in the future.
*/
- csr_write(CSR_SCOUNTEREN, 0x7);
+ if (sysctl_perf_user_access == SYSCTL_LEGACY)
+ csr_write(CSR_SCOUNTEREN, 0x7);
+ else
+ csr_write(CSR_SCOUNTEREN, 0x2);
/* Stop all the counters so that they can be enabled from perf */
pmu_sbi_stop_all(pmu);
if (riscv_pmu_use_irq) {
cpu_hw_evt->irq = riscv_pmu_irq;
- csr_clear(CSR_IP, BIT(riscv_pmu_irq_num));
- csr_set(CSR_IE, BIT(riscv_pmu_irq_num));
+ ALT_SBI_PMU_OVF_CLEAR_PENDING(riscv_pmu_irq_mask);
enable_percpu_irq(riscv_pmu_irq, IRQ_TYPE_NONE);
}
+ if (sbi_pmu_snapshot_available())
+ return pmu_sbi_snapshot_setup(pmu, cpu);
+
return 0;
}
@@ -726,12 +1178,14 @@ static int pmu_sbi_dying_cpu(unsigned int cpu, struct hlist_node *node)
{
if (riscv_pmu_use_irq) {
disable_percpu_irq(riscv_pmu_irq);
- csr_clear(CSR_IE, BIT(riscv_pmu_irq_num));
}
/* Disable all counters access for user mode now */
csr_write(CSR_SCOUNTEREN, 0x0);
+ if (sbi_pmu_snapshot_available())
+ return pmu_sbi_snapshot_disable();
+
return 0;
}
@@ -750,8 +1204,15 @@ static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pde
riscv_cached_mimpid(0) == 0) {
riscv_pmu_irq_num = THEAD_C9XX_RV_IRQ_PMU;
riscv_pmu_use_irq = true;
+ } else if (riscv_has_vendor_extension_unlikely(ANDES_VENDOR_ID,
+ RISCV_ISA_VENDOR_EXT_XANDESPMU) &&
+ IS_ENABLED(CONFIG_ANDES_CUSTOM_PMU)) {
+ riscv_pmu_irq_num = ANDES_SLI_CAUSE_BASE + ANDES_RV_IRQ_PMOVI;
+ riscv_pmu_use_irq = true;
}
+ riscv_pmu_irq_mask = BIT(riscv_pmu_irq_num % BITS_PER_LONG);
+
if (!riscv_pmu_use_irq)
return -EOPNOTSUPP;
@@ -834,10 +1295,130 @@ static inline void riscv_pm_pmu_unregister(struct riscv_pmu *pmu) { }
static void riscv_pmu_destroy(struct riscv_pmu *pmu)
{
+ if (sbi_v2_available) {
+ if (sbi_pmu_snapshot_available()) {
+ pmu_sbi_snapshot_disable();
+ pmu_sbi_snapshot_free(pmu);
+ }
+ }
riscv_pm_pmu_unregister(pmu);
cpuhp_state_remove_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node);
}
+static void pmu_sbi_event_init(struct perf_event *event)
+{
+ /*
+ * The permissions are set at event_init so that we do not depend
+ * on the sysctl value that can change.
+ */
+ if (sysctl_perf_user_access == SYSCTL_NO_USER_ACCESS)
+ event->hw.flags |= PERF_EVENT_FLAG_NO_USER_ACCESS;
+ else if (sysctl_perf_user_access == SYSCTL_USER_ACCESS)
+ event->hw.flags |= PERF_EVENT_FLAG_USER_ACCESS;
+ else
+ event->hw.flags |= PERF_EVENT_FLAG_LEGACY;
+}
+
+static void pmu_sbi_event_mapped(struct perf_event *event, struct mm_struct *mm)
+{
+ if (event->hw.flags & PERF_EVENT_FLAG_NO_USER_ACCESS)
+ return;
+
+ if (event->hw.flags & PERF_EVENT_FLAG_LEGACY) {
+ if (event->attr.config != PERF_COUNT_HW_CPU_CYCLES &&
+ event->attr.config != PERF_COUNT_HW_INSTRUCTIONS) {
+ return;
+ }
+ }
+
+ /*
+ * The user mmapped the event to directly access it: this is where
+ * we determine based on sysctl_perf_user_access if we grant userspace
+ * the direct access to this event. That means that within the same
+ * task, some events may be directly accessible and some other may not,
+ * if the user changes the value of sysctl_perf_user_accesss in the
+ * meantime.
+ */
+
+ event->hw.flags |= PERF_EVENT_FLAG_USER_READ_CNT;
+
+ /*
+ * We must enable userspace access *before* advertising in the user page
+ * that it is possible to do so to avoid any race.
+ * And we must notify all cpus here because threads that currently run
+ * on other cpus will try to directly access the counter too without
+ * calling pmu_sbi_ctr_start.
+ */
+ if (event->hw.flags & PERF_EVENT_FLAG_USER_ACCESS)
+ on_each_cpu_mask(mm_cpumask(mm),
+ pmu_sbi_set_scounteren, (void *)event, 1);
+}
+
+static void pmu_sbi_event_unmapped(struct perf_event *event, struct mm_struct *mm)
+{
+ if (event->hw.flags & PERF_EVENT_FLAG_NO_USER_ACCESS)
+ return;
+
+ if (event->hw.flags & PERF_EVENT_FLAG_LEGACY) {
+ if (event->attr.config != PERF_COUNT_HW_CPU_CYCLES &&
+ event->attr.config != PERF_COUNT_HW_INSTRUCTIONS) {
+ return;
+ }
+ }
+
+ /*
+ * Here we can directly remove user access since the user does not have
+ * access to the user page anymore so we avoid the racy window where the
+ * user could have read cap_user_rdpmc to true right before we disable
+ * it.
+ */
+ event->hw.flags &= ~PERF_EVENT_FLAG_USER_READ_CNT;
+
+ if (event->hw.flags & PERF_EVENT_FLAG_USER_ACCESS)
+ on_each_cpu_mask(mm_cpumask(mm),
+ pmu_sbi_reset_scounteren, (void *)event, 1);
+}
+
+static void riscv_pmu_update_counter_access(void *info)
+{
+ if (sysctl_perf_user_access == SYSCTL_LEGACY)
+ csr_write(CSR_SCOUNTEREN, 0x7);
+ else
+ csr_write(CSR_SCOUNTEREN, 0x2);
+}
+
+static int riscv_pmu_proc_user_access_handler(const struct ctl_table *table,
+ int write, void *buffer,
+ size_t *lenp, loff_t *ppos)
+{
+ int prev = sysctl_perf_user_access;
+ int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+
+ /*
+ * Test against the previous value since we clear SCOUNTEREN when
+ * sysctl_perf_user_access is set to SYSCTL_USER_ACCESS, but we should
+ * not do that if that was already the case.
+ */
+ if (ret || !write || prev == sysctl_perf_user_access)
+ return ret;
+
+ on_each_cpu(riscv_pmu_update_counter_access, NULL, 1);
+
+ return 0;
+}
+
+static const struct ctl_table sbi_pmu_sysctl_table[] = {
+ {
+ .procname = "perf_user_access",
+ .data = &sysctl_perf_user_access,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = riscv_pmu_proc_user_access_handler,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_TWO,
+ },
+};
+
static int pmu_sbi_device_probe(struct platform_device *pdev)
{
struct riscv_pmu *pmu = NULL;
@@ -873,6 +1454,7 @@ static int pmu_sbi_device_probe(struct platform_device *pdev)
}
pmu->pmu.attr_groups = riscv_pmu_attr_groups;
+ pmu->pmu.parent = &pdev->dev;
pmu->cmask = cmask;
pmu->ctr_start = pmu_sbi_ctr_start;
pmu->ctr_stop = pmu_sbi_ctr_stop;
@@ -881,10 +1463,10 @@ static int pmu_sbi_device_probe(struct platform_device *pdev)
pmu->ctr_get_width = pmu_sbi_ctr_get_width;
pmu->ctr_clear_idx = pmu_sbi_ctr_clear_idx;
pmu->ctr_read = pmu_sbi_ctr_read;
-
- ret = cpuhp_state_add_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node);
- if (ret)
- return ret;
+ pmu->event_init = pmu_sbi_event_init;
+ pmu->event_mapped = pmu_sbi_event_mapped;
+ pmu->event_unmapped = pmu_sbi_event_unmapped;
+ pmu->csr_index = pmu_sbi_csr_index;
ret = riscv_pm_pmu_register(pmu);
if (ret)
@@ -894,6 +1476,42 @@ static int pmu_sbi_device_probe(struct platform_device *pdev)
if (ret)
goto out_unregister;
+ /* SBI PMU Snapsphot is only available in SBI v2.0 */
+ if (sbi_v2_available) {
+ int cpu;
+
+ ret = pmu_sbi_snapshot_alloc(pmu);
+ if (ret)
+ goto out_unregister;
+
+ cpu = get_cpu();
+ ret = pmu_sbi_snapshot_setup(pmu, cpu);
+ put_cpu();
+
+ if (ret) {
+ /* Snapshot is an optional feature. Continue if not available */
+ pmu_sbi_snapshot_free(pmu);
+ } else {
+ pr_info("SBI PMU snapshot detected\n");
+ /*
+ * We enable it once here for the boot cpu. If snapshot shmem setup
+ * fails during cpu hotplug process, it will fail to start the cpu
+ * as we can not handle hetergenous PMUs with different snapshot
+ * capability.
+ */
+ static_branch_enable(&sbi_pmu_snapshot_available);
+ }
+ }
+
+ register_sysctl("kernel", sbi_pmu_sysctl_table);
+
+ ret = cpuhp_state_add_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node);
+ if (ret)
+ goto out_unregister;
+
+ /* Asynchronously check which standard events are available */
+ schedule_work(&check_std_events_work);
+
return 0;
out_unregister:
@@ -907,7 +1525,7 @@ out_free:
static struct platform_driver pmu_sbi_driver = {
.probe = pmu_sbi_device_probe,
.driver = {
- .name = RISCV_PMU_PDEV_NAME,
+ .name = RISCV_PMU_SBI_PDEV_NAME,
},
};
@@ -921,6 +1539,12 @@ static int __init pmu_sbi_devinit(void)
return 0;
}
+ if (sbi_spec_version >= sbi_mk_version(2, 0))
+ sbi_v2_available = true;
+
+ if (sbi_spec_version >= sbi_mk_version(3, 0))
+ sbi_v3_available = true;
+
ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_RISCV_STARTING,
"perf/riscv/pmu:starting",
pmu_sbi_starting_cpu, pmu_sbi_dying_cpu);
@@ -934,7 +1558,7 @@ static int __init pmu_sbi_devinit(void)
if (ret)
return ret;
- pdev = platform_device_register_simple(RISCV_PMU_PDEV_NAME, -1, NULL, 0);
+ pdev = platform_device_register_simple(RISCV_PMU_SBI_PDEV_NAME, -1, NULL, 0);
if (IS_ERR(pdev)) {
platform_driver_unregister(&pmu_sbi_driver);
return PTR_ERR(pdev);
diff --git a/drivers/perf/starfive_starlink_pmu.c b/drivers/perf/starfive_starlink_pmu.c
new file mode 100644
index 000000000000..5e5a672b4229
--- /dev/null
+++ b/drivers/perf/starfive_starlink_pmu.c
@@ -0,0 +1,642 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * StarFive's StarLink PMU driver
+ *
+ * Copyright (C) 2023 StarFive Technology Co., Ltd.
+ *
+ * Author: Ji Sheng Teoh <jisheng.teoh@starfivetech.com>
+ *
+ */
+
+#define STARLINK_PMU_PDEV_NAME "starfive_starlink_pmu"
+#define pr_fmt(fmt) STARLINK_PMU_PDEV_NAME ": " fmt
+
+#include <linux/bitmap.h>
+#include <linux/cpu_pm.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/perf_event.h>
+#include <linux/platform_device.h>
+#include <linux/sysfs.h>
+
+#define STARLINK_PMU_MAX_COUNTERS 64
+#define STARLINK_PMU_NUM_COUNTERS 16
+#define STARLINK_PMU_IDX_CYCLE_COUNTER 63
+
+#define STARLINK_PMU_EVENT_SELECT 0x060
+#define STARLINK_PMU_EVENT_COUNTER 0x160
+#define STARLINK_PMU_COUNTER_MASK GENMASK_ULL(63, 0)
+#define STARLINK_PMU_CYCLE_COUNTER 0x058
+
+#define STARLINK_PMU_CONTROL 0x040
+#define STARLINK_PMU_GLOBAL_ENABLE BIT_ULL(0)
+
+#define STARLINK_PMU_INTERRUPT_ENABLE 0x050
+#define STARLINK_PMU_COUNTER_OVERFLOW_STATUS 0x048
+#define STARLINK_PMU_CYCLE_OVERFLOW_MASK BIT_ULL(63)
+
+#define STARLINK_CYCLES 0x058
+#define CACHE_READ_REQUEST 0x04000701
+#define CACHE_WRITE_REQUEST 0x03000001
+#define CACHE_RELEASE_REQUEST 0x0003e001
+#define CACHE_READ_HIT 0x00901202
+#define CACHE_READ_MISS 0x04008002
+#define CACHE_WRITE_HIT 0x006c0002
+#define CACHE_WRITE_MISS 0x03000002
+#define CACHE_WRITEBACK 0x00000403
+
+#define to_starlink_pmu(p) (container_of(p, struct starlink_pmu, pmu))
+
+#define STARLINK_FORMAT_ATTR(_name, _config) \
+ (&((struct dev_ext_attribute[]) { \
+ { .attr = __ATTR(_name, 0444, starlink_pmu_sysfs_format_show, NULL), \
+ .var = (void *)_config, } \
+ })[0].attr.attr)
+
+#define STARLINK_EVENT_ATTR(_name, _id) \
+ PMU_EVENT_ATTR_ID(_name, starlink_pmu_sysfs_event_show, _id)
+
+static int starlink_pmu_cpuhp_state;
+
+struct starlink_hw_events {
+ struct perf_event *events[STARLINK_PMU_MAX_COUNTERS];
+ DECLARE_BITMAP(used_mask, STARLINK_PMU_MAX_COUNTERS);
+};
+
+struct starlink_pmu {
+ struct pmu pmu;
+ struct starlink_hw_events __percpu *hw_events;
+ struct hlist_node node;
+ struct notifier_block starlink_pmu_pm_nb;
+ void __iomem *pmu_base;
+ cpumask_t cpumask;
+ int irq;
+};
+
+static ssize_t
+starlink_pmu_sysfs_format_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct dev_ext_attribute *eattr = container_of(attr,
+ struct dev_ext_attribute, attr);
+
+ return sysfs_emit(buf, "%s\n", (char *)eattr->var);
+}
+
+static struct attribute *starlink_pmu_format_attrs[] = {
+ STARLINK_FORMAT_ATTR(event, "config:0-31"),
+ NULL
+};
+
+static const struct attribute_group starlink_pmu_format_attr_group = {
+ .name = "format",
+ .attrs = starlink_pmu_format_attrs,
+};
+
+static ssize_t
+starlink_pmu_sysfs_event_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct perf_pmu_events_attr *eattr = container_of(attr,
+ struct perf_pmu_events_attr, attr);
+
+ return sysfs_emit(buf, "event=0x%02llx\n", eattr->id);
+}
+
+static struct attribute *starlink_pmu_event_attrs[] = {
+ STARLINK_EVENT_ATTR(cycles, STARLINK_CYCLES),
+ STARLINK_EVENT_ATTR(read_request, CACHE_READ_REQUEST),
+ STARLINK_EVENT_ATTR(write_request, CACHE_WRITE_REQUEST),
+ STARLINK_EVENT_ATTR(release_request, CACHE_RELEASE_REQUEST),
+ STARLINK_EVENT_ATTR(read_hit, CACHE_READ_HIT),
+ STARLINK_EVENT_ATTR(read_miss, CACHE_READ_MISS),
+ STARLINK_EVENT_ATTR(write_hit, CACHE_WRITE_HIT),
+ STARLINK_EVENT_ATTR(write_miss, CACHE_WRITE_MISS),
+ STARLINK_EVENT_ATTR(writeback, CACHE_WRITEBACK),
+ NULL
+};
+
+static const struct attribute_group starlink_pmu_events_attr_group = {
+ .name = "events",
+ .attrs = starlink_pmu_event_attrs,
+};
+
+static ssize_t
+cpumask_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct starlink_pmu *starlink_pmu = to_starlink_pmu(dev_get_drvdata(dev));
+
+ return cpumap_print_to_pagebuf(true, buf, &starlink_pmu->cpumask);
+}
+
+static DEVICE_ATTR_RO(cpumask);
+
+static struct attribute *starlink_pmu_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL
+};
+
+static const struct attribute_group starlink_pmu_cpumask_attr_group = {
+ .attrs = starlink_pmu_cpumask_attrs,
+};
+
+static const struct attribute_group *starlink_pmu_attr_groups[] = {
+ &starlink_pmu_format_attr_group,
+ &starlink_pmu_events_attr_group,
+ &starlink_pmu_cpumask_attr_group,
+ NULL
+};
+
+static void starlink_pmu_set_event_period(struct perf_event *event)
+{
+ struct starlink_pmu *starlink_pmu = to_starlink_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = event->hw.idx;
+
+ /*
+ * Program counter to half of it's max count to handle
+ * cases of extreme interrupt latency.
+ */
+ u64 val = STARLINK_PMU_COUNTER_MASK >> 1;
+
+ local64_set(&hwc->prev_count, val);
+ if (hwc->config == STARLINK_CYCLES)
+ writeq(val, starlink_pmu->pmu_base + STARLINK_PMU_CYCLE_COUNTER);
+ else
+ writeq(val, starlink_pmu->pmu_base + STARLINK_PMU_EVENT_COUNTER +
+ idx * sizeof(u64));
+}
+
+static void starlink_pmu_counter_start(struct perf_event *event,
+ struct starlink_pmu *starlink_pmu)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = event->hw.idx;
+ u64 val;
+
+ /*
+ * Enable counter overflow interrupt[63:0],
+ * which is mapped as follow:
+ *
+ * event counter 0 - Bit [0]
+ * event counter 1 - Bit [1]
+ * ...
+ * cycle counter - Bit [63]
+ */
+ val = readq(starlink_pmu->pmu_base + STARLINK_PMU_INTERRUPT_ENABLE);
+
+ if (hwc->config == STARLINK_CYCLES) {
+ /*
+ * Cycle count has its dedicated register, and it starts
+ * counting as soon as STARLINK_PMU_GLOBAL_ENABLE is set.
+ */
+ val |= STARLINK_PMU_CYCLE_OVERFLOW_MASK;
+ } else {
+ writeq(event->hw.config, starlink_pmu->pmu_base +
+ STARLINK_PMU_EVENT_SELECT + idx * sizeof(u64));
+
+ val |= BIT_ULL(idx);
+ }
+
+ writeq(val, starlink_pmu->pmu_base + STARLINK_PMU_INTERRUPT_ENABLE);
+
+ writeq(STARLINK_PMU_GLOBAL_ENABLE, starlink_pmu->pmu_base +
+ STARLINK_PMU_CONTROL);
+}
+
+static void starlink_pmu_counter_stop(struct perf_event *event,
+ struct starlink_pmu *starlink_pmu)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = event->hw.idx;
+ u64 val;
+
+ val = readq(starlink_pmu->pmu_base + STARLINK_PMU_CONTROL);
+ val &= ~STARLINK_PMU_GLOBAL_ENABLE;
+ writeq(val, starlink_pmu->pmu_base + STARLINK_PMU_CONTROL);
+
+ val = readq(starlink_pmu->pmu_base + STARLINK_PMU_INTERRUPT_ENABLE);
+ if (hwc->config == STARLINK_CYCLES)
+ val &= ~STARLINK_PMU_CYCLE_OVERFLOW_MASK;
+ else
+ val &= ~BIT_ULL(idx);
+
+ writeq(val, starlink_pmu->pmu_base + STARLINK_PMU_INTERRUPT_ENABLE);
+}
+
+static void starlink_pmu_update(struct perf_event *event)
+{
+ struct starlink_pmu *starlink_pmu = to_starlink_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+ u64 prev_raw_count, new_raw_count;
+ u64 oldval;
+ u64 delta;
+
+ do {
+ prev_raw_count = local64_read(&hwc->prev_count);
+ if (hwc->config == STARLINK_CYCLES)
+ new_raw_count = readq(starlink_pmu->pmu_base +
+ STARLINK_PMU_CYCLE_COUNTER);
+ else
+ new_raw_count = readq(starlink_pmu->pmu_base +
+ STARLINK_PMU_EVENT_COUNTER +
+ idx * sizeof(u64));
+ oldval = local64_cmpxchg(&hwc->prev_count, prev_raw_count,
+ new_raw_count);
+ } while (oldval != prev_raw_count);
+
+ delta = (new_raw_count - prev_raw_count) & STARLINK_PMU_COUNTER_MASK;
+ local64_add(delta, &event->count);
+}
+
+static void starlink_pmu_start(struct perf_event *event, int flags)
+{
+ struct starlink_pmu *starlink_pmu = to_starlink_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
+ return;
+
+ if (flags & PERF_EF_RELOAD)
+ WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
+
+ hwc->state = 0;
+
+ starlink_pmu_set_event_period(event);
+ starlink_pmu_counter_start(event, starlink_pmu);
+
+ perf_event_update_userpage(event);
+}
+
+static void starlink_pmu_stop(struct perf_event *event, int flags)
+{
+ struct starlink_pmu *starlink_pmu = to_starlink_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (hwc->state & PERF_HES_STOPPED)
+ return;
+
+ starlink_pmu_counter_stop(event, starlink_pmu);
+ starlink_pmu_update(event);
+ hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+}
+
+static int starlink_pmu_add(struct perf_event *event, int flags)
+{
+ struct starlink_pmu *starlink_pmu = to_starlink_pmu(event->pmu);
+ struct starlink_hw_events *hw_events =
+ this_cpu_ptr(starlink_pmu->hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+ unsigned long *used_mask = hw_events->used_mask;
+ u32 n_events = STARLINK_PMU_NUM_COUNTERS;
+ int idx;
+
+ /*
+ * Cycle counter has dedicated register to hold counter value.
+ * Event other than cycle count has to be enabled through
+ * event select register, and assigned with independent counter
+ * as they appear.
+ */
+
+ if (hwc->config == STARLINK_CYCLES) {
+ idx = STARLINK_PMU_IDX_CYCLE_COUNTER;
+ } else {
+ idx = find_first_zero_bit(used_mask, n_events);
+ /* All counter are in use */
+ if (idx < 0)
+ return idx;
+
+ set_bit(idx, used_mask);
+ }
+
+ hwc->idx = idx;
+ hw_events->events[idx] = event;
+ hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+
+ if (flags & PERF_EF_START)
+ starlink_pmu_start(event, PERF_EF_RELOAD);
+
+ perf_event_update_userpage(event);
+
+ return 0;
+}
+
+static void starlink_pmu_del(struct perf_event *event, int flags)
+{
+ struct starlink_pmu *starlink_pmu = to_starlink_pmu(event->pmu);
+ struct starlink_hw_events *hw_events =
+ this_cpu_ptr(starlink_pmu->hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+
+ starlink_pmu_stop(event, PERF_EF_UPDATE);
+ hw_events->events[hwc->idx] = NULL;
+ clear_bit(hwc->idx, hw_events->used_mask);
+
+ perf_event_update_userpage(event);
+}
+
+static bool starlink_pmu_validate_event_group(struct perf_event *event)
+{
+ struct perf_event *leader = event->group_leader;
+ struct perf_event *sibling;
+ int counter = 1;
+
+ /*
+ * Ensure hardware events in the group are on the same PMU,
+ * software events are acceptable.
+ */
+ if (event->group_leader->pmu != event->pmu &&
+ !is_software_event(event->group_leader))
+ return false;
+
+ for_each_sibling_event(sibling, leader) {
+ if (sibling->pmu != event->pmu && !is_software_event(sibling))
+ return false;
+
+ counter++;
+ }
+
+ return counter <= STARLINK_PMU_NUM_COUNTERS;
+}
+
+static int starlink_pmu_event_init(struct perf_event *event)
+{
+ struct starlink_pmu *starlink_pmu = to_starlink_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+
+ /*
+ * Sampling is not supported, as counters are shared
+ * by all CPU.
+ */
+ if (hwc->sample_period)
+ return -EOPNOTSUPP;
+
+ /*
+ * Per-task and attach to a task are not supported,
+ * as uncore events are not specific to any CPU.
+ */
+ if (event->cpu < 0 || event->attach_state & PERF_ATTACH_TASK)
+ return -EOPNOTSUPP;
+
+ if (!starlink_pmu_validate_event_group(event))
+ return -EINVAL;
+
+ hwc->idx = -1;
+ hwc->config = event->attr.config;
+ event->cpu = cpumask_first(&starlink_pmu->cpumask);
+
+ return 0;
+}
+
+static irqreturn_t starlink_pmu_handle_irq(int irq_num, void *data)
+{
+ struct starlink_pmu *starlink_pmu = data;
+ struct starlink_hw_events *hw_events =
+ this_cpu_ptr(starlink_pmu->hw_events);
+ bool handled = false;
+ int idx;
+ u64 overflow_status;
+
+ for (idx = 0; idx < STARLINK_PMU_MAX_COUNTERS; idx++) {
+ struct perf_event *event = hw_events->events[idx];
+
+ if (!event)
+ continue;
+
+ overflow_status = readq(starlink_pmu->pmu_base +
+ STARLINK_PMU_COUNTER_OVERFLOW_STATUS);
+ if (!(overflow_status & BIT_ULL(idx)))
+ continue;
+
+ writeq(BIT_ULL(idx), starlink_pmu->pmu_base +
+ STARLINK_PMU_COUNTER_OVERFLOW_STATUS);
+
+ starlink_pmu_update(event);
+ starlink_pmu_set_event_period(event);
+ handled = true;
+ }
+ return IRQ_RETVAL(handled);
+}
+
+static int starlink_setup_irqs(struct starlink_pmu *starlink_pmu,
+ struct platform_device *pdev)
+{
+ int ret, irq;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return -EINVAL;
+
+ ret = devm_request_irq(&pdev->dev, irq, starlink_pmu_handle_irq,
+ 0, STARLINK_PMU_PDEV_NAME, starlink_pmu);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Failed to request IRQ\n");
+
+ starlink_pmu->irq = irq;
+
+ return 0;
+}
+
+static int starlink_pmu_pm_notify(struct notifier_block *b,
+ unsigned long cmd, void *v)
+{
+ struct starlink_pmu *starlink_pmu = container_of(b, struct starlink_pmu,
+ starlink_pmu_pm_nb);
+ struct starlink_hw_events *hw_events =
+ this_cpu_ptr(starlink_pmu->hw_events);
+ int enabled = bitmap_weight(hw_events->used_mask,
+ STARLINK_PMU_MAX_COUNTERS);
+ struct perf_event *event;
+ int idx;
+
+ if (!enabled)
+ return NOTIFY_OK;
+
+ for (idx = 0; idx < STARLINK_PMU_MAX_COUNTERS; idx++) {
+ event = hw_events->events[idx];
+ if (!event)
+ continue;
+
+ switch (cmd) {
+ case CPU_PM_ENTER:
+ /* Stop and update the counter */
+ starlink_pmu_stop(event, PERF_EF_UPDATE);
+ break;
+ case CPU_PM_EXIT:
+ case CPU_PM_ENTER_FAILED:
+ /* Restore and enable the counter */
+ starlink_pmu_start(event, PERF_EF_RELOAD);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return NOTIFY_OK;
+}
+
+static int starlink_pmu_pm_register(struct starlink_pmu *starlink_pmu)
+{
+ if (!IS_ENABLED(CONFIG_CPU_PM))
+ return 0;
+
+ starlink_pmu->starlink_pmu_pm_nb.notifier_call = starlink_pmu_pm_notify;
+ return cpu_pm_register_notifier(&starlink_pmu->starlink_pmu_pm_nb);
+}
+
+static void starlink_pmu_pm_unregister(struct starlink_pmu *starlink_pmu)
+{
+ if (!IS_ENABLED(CONFIG_CPU_PM))
+ return;
+
+ cpu_pm_unregister_notifier(&starlink_pmu->starlink_pmu_pm_nb);
+}
+
+static void starlink_pmu_destroy(struct starlink_pmu *starlink_pmu)
+{
+ starlink_pmu_pm_unregister(starlink_pmu);
+ cpuhp_state_remove_instance(starlink_pmu_cpuhp_state,
+ &starlink_pmu->node);
+}
+
+static int starlink_pmu_probe(struct platform_device *pdev)
+{
+ struct starlink_pmu *starlink_pmu;
+ struct starlink_hw_events *hw_events;
+ struct resource *res;
+ int cpuid, i, ret;
+
+ starlink_pmu = devm_kzalloc(&pdev->dev, sizeof(*starlink_pmu), GFP_KERNEL);
+ if (!starlink_pmu)
+ return -ENOMEM;
+
+ starlink_pmu->pmu_base =
+ devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(starlink_pmu->pmu_base))
+ return PTR_ERR(starlink_pmu->pmu_base);
+
+ starlink_pmu->hw_events = alloc_percpu_gfp(struct starlink_hw_events,
+ GFP_KERNEL);
+ if (!starlink_pmu->hw_events) {
+ dev_err(&pdev->dev, "Failed to allocate per-cpu PMU data\n");
+ return -ENOMEM;
+ }
+
+ for_each_possible_cpu(cpuid) {
+ hw_events = per_cpu_ptr(starlink_pmu->hw_events, cpuid);
+ for (i = 0; i < STARLINK_PMU_MAX_COUNTERS; i++)
+ hw_events->events[i] = NULL;
+ }
+
+ ret = starlink_setup_irqs(starlink_pmu, pdev);
+ if (ret)
+ return ret;
+
+ ret = cpuhp_state_add_instance(starlink_pmu_cpuhp_state,
+ &starlink_pmu->node);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register hotplug\n");
+ return ret;
+ }
+
+ ret = starlink_pmu_pm_register(starlink_pmu);
+ if (ret) {
+ cpuhp_state_remove_instance(starlink_pmu_cpuhp_state,
+ &starlink_pmu->node);
+ return ret;
+ }
+
+ starlink_pmu->pmu = (struct pmu) {
+ .task_ctx_nr = perf_invalid_context,
+ .event_init = starlink_pmu_event_init,
+ .add = starlink_pmu_add,
+ .del = starlink_pmu_del,
+ .start = starlink_pmu_start,
+ .stop = starlink_pmu_stop,
+ .read = starlink_pmu_update,
+ .attr_groups = starlink_pmu_attr_groups,
+ };
+
+ ret = perf_pmu_register(&starlink_pmu->pmu, STARLINK_PMU_PDEV_NAME, -1);
+ if (ret)
+ starlink_pmu_destroy(starlink_pmu);
+
+ return ret;
+}
+
+static const struct of_device_id starlink_pmu_of_match[] = {
+ { .compatible = "starfive,jh8100-starlink-pmu" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, starlink_pmu_of_match);
+
+static struct platform_driver starlink_pmu_driver = {
+ .driver = {
+ .name = STARLINK_PMU_PDEV_NAME,
+ .of_match_table = starlink_pmu_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = starlink_pmu_probe,
+};
+
+static int
+starlink_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct starlink_pmu *starlink_pmu = hlist_entry_safe(node,
+ struct starlink_pmu,
+ node);
+
+ if (cpumask_empty(&starlink_pmu->cpumask))
+ cpumask_set_cpu(cpu, &starlink_pmu->cpumask);
+
+ WARN_ON(irq_set_affinity(starlink_pmu->irq, cpumask_of(cpu)));
+
+ return 0;
+}
+
+static int
+starlink_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct starlink_pmu *starlink_pmu = hlist_entry_safe(node,
+ struct starlink_pmu,
+ node);
+ unsigned int target;
+
+ if (!cpumask_test_and_clear_cpu(cpu, &starlink_pmu->cpumask))
+ return 0;
+
+ target = cpumask_any_but(cpu_online_mask, cpu);
+ if (target >= nr_cpu_ids)
+ return 0;
+
+ perf_pmu_migrate_context(&starlink_pmu->pmu, cpu, target);
+
+ cpumask_set_cpu(target, &starlink_pmu->cpumask);
+ WARN_ON(irq_set_affinity(starlink_pmu->irq, cpumask_of(target)));
+
+ return 0;
+}
+
+static int __init starlink_pmu_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ "soc/starfive/starlink_pmu:online",
+ starlink_pmu_online_cpu,
+ starlink_pmu_offline_cpu);
+ if (ret < 0)
+ return ret;
+
+ starlink_pmu_cpuhp_state = ret;
+
+ return platform_driver_register(&starlink_pmu_driver);
+}
+
+device_initcall(starlink_pmu_init);
diff --git a/drivers/perf/thunderx2_pmu.c b/drivers/perf/thunderx2_pmu.c
index 1edb9c03704f..6ed4707bd6bb 100644
--- a/drivers/perf/thunderx2_pmu.c
+++ b/drivers/perf/thunderx2_pmu.c
@@ -504,24 +504,19 @@ static void tx2_uncore_event_update(struct perf_event *event)
static enum tx2_uncore_type get_tx2_pmu_type(struct acpi_device *adev)
{
- int i = 0;
- struct acpi_tx2_pmu_device {
- __u8 id[ACPI_ID_LEN];
- enum tx2_uncore_type type;
- } devices[] = {
+ struct acpi_device_id devices[] = {
{"CAV901D", PMU_TYPE_L3C},
{"CAV901F", PMU_TYPE_DMC},
{"CAV901E", PMU_TYPE_CCPI2},
- {"", PMU_TYPE_INVALID}
+ {}
};
+ const struct acpi_device_id *id;
- while (devices[i].type != PMU_TYPE_INVALID) {
- if (!strcmp(acpi_device_hid(adev), devices[i].id))
- break;
- i++;
- }
+ id = acpi_match_acpi_device(devices, adev);
+ if (!id)
+ return PMU_TYPE_INVALID;
- return devices[i].type;
+ return (enum tx2_uncore_type)id->driver_data;
}
static bool tx2_uncore_validate_event(struct pmu *pmu,
@@ -729,6 +724,7 @@ static int tx2_uncore_pmu_register(
/* Perf event registration */
tx2_pmu->pmu = (struct pmu) {
.module = THIS_MODULE,
+ .parent = tx2_pmu->dev,
.attr_groups = tx2_pmu->attr_groups,
.task_ctx_nr = perf_invalid_context,
.event_init = tx2_uncore_event_init,
@@ -756,9 +752,8 @@ static int tx2_uncore_pmu_add_dev(struct tx2_uncore_pmu *tx2_pmu)
tx2_pmu->cpu = cpu;
if (tx2_pmu->hrtimer_callback) {
- hrtimer_init(&tx2_pmu->hrtimer,
- CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- tx2_pmu->hrtimer.function = tx2_pmu->hrtimer_callback;
+ hrtimer_setup(&tx2_pmu->hrtimer, tx2_pmu->hrtimer_callback, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
ret = tx2_uncore_pmu_register(tx2_pmu);
@@ -932,9 +927,8 @@ static int tx2_uncore_pmu_online_cpu(unsigned int cpu,
static int tx2_uncore_pmu_offline_cpu(unsigned int cpu,
struct hlist_node *hpnode)
{
- int new_cpu;
struct tx2_uncore_pmu *tx2_pmu;
- struct cpumask cpu_online_mask_temp;
+ unsigned int new_cpu;
tx2_pmu = hlist_entry_safe(hpnode,
struct tx2_uncore_pmu, hpnode);
@@ -945,11 +939,8 @@ static int tx2_uncore_pmu_offline_cpu(unsigned int cpu,
if (tx2_pmu->hrtimer_callback)
hrtimer_cancel(&tx2_pmu->hrtimer);
- cpumask_copy(&cpu_online_mask_temp, cpu_online_mask);
- cpumask_clear_cpu(cpu, &cpu_online_mask_temp);
- new_cpu = cpumask_any_and(
- cpumask_of_node(tx2_pmu->node),
- &cpu_online_mask_temp);
+ new_cpu = cpumask_any_and_but(cpumask_of_node(tx2_pmu->node),
+ cpu_online_mask, cpu);
tx2_pmu->cpu = new_cpu;
if (new_cpu >= nr_cpu_ids)
@@ -993,7 +984,7 @@ static int tx2_uncore_probe(struct platform_device *pdev)
return 0;
}
-static int tx2_uncore_remove(struct platform_device *pdev)
+static void tx2_uncore_remove(struct platform_device *pdev)
{
struct tx2_uncore_pmu *tx2_pmu, *temp;
struct device *dev = &pdev->dev;
@@ -1009,7 +1000,6 @@ static int tx2_uncore_remove(struct platform_device *pdev)
}
}
}
- return 0;
}
static struct platform_driver tx2_uncore_driver = {
diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c
index 9972bfc11a5c..33b5497bdc06 100644
--- a/drivers/perf/xgene_pmu.c
+++ b/drivers/perf/xgene_pmu.c
@@ -16,11 +16,9 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of_address.h>
-#include <linux/of_fdt.h>
-#include <linux/of_irq.h>
-#include <linux/of_platform.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/slab.h>
@@ -164,18 +162,9 @@ enum xgene_pmu_dev_type {
/*
* sysfs format attributes
*/
-static ssize_t xgene_pmu_format_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct dev_ext_attribute *eattr;
-
- eattr = container_of(attr, struct dev_ext_attribute, attr);
- return sysfs_emit(buf, "%s\n", (char *) eattr->var);
-}
-
#define XGENE_PMU_FORMAT_ATTR(_name, _config) \
(&((struct dev_ext_attribute[]) { \
- { .attr = __ATTR(_name, S_IRUGO, xgene_pmu_format_show, NULL), \
+ { .attr = __ATTR(_name, S_IRUGO, device_show_string, NULL), \
.var = (void *) _config, } \
})[0].attr.attr)
@@ -1104,6 +1093,7 @@ static int xgene_init_perf(struct xgene_pmu_dev *pmu_dev, char *name)
/* Perf driver registration */
pmu_dev->pmu = (struct pmu) {
+ .parent = pmu_dev->parent->dev,
.attr_groups = pmu_dev->attr_groups,
.task_ctx_nr = perf_invalid_context,
.pmu_enable = xgene_perf_pmu_enable,
@@ -1731,6 +1721,12 @@ static const struct xgene_pmu_data xgene_pmu_v2_data = {
.id = PCP_PMU_V2,
};
+#ifdef CONFIG_ACPI
+static const struct xgene_pmu_data xgene_pmu_v3_data = {
+ .id = PCP_PMU_V3,
+};
+#endif
+
static const struct xgene_pmu_ops xgene_pmu_ops = {
.mask_int = xgene_pmu_mask_int,
.unmask_int = xgene_pmu_unmask_int,
@@ -1773,9 +1769,9 @@ static const struct of_device_id xgene_pmu_of_match[] = {
MODULE_DEVICE_TABLE(of, xgene_pmu_of_match);
#ifdef CONFIG_ACPI
static const struct acpi_device_id xgene_pmu_acpi_match[] = {
- {"APMC0D5B", PCP_PMU_V1},
- {"APMC0D5C", PCP_PMU_V2},
- {"APMC0D83", PCP_PMU_V3},
+ {"APMC0D5B", (kernel_ulong_t)&xgene_pmu_data},
+ {"APMC0D5C", (kernel_ulong_t)&xgene_pmu_v2_data},
+ {"APMC0D83", (kernel_ulong_t)&xgene_pmu_v3_data},
{},
};
MODULE_DEVICE_TABLE(acpi, xgene_pmu_acpi_match);
@@ -1831,7 +1827,6 @@ static int xgene_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
static int xgene_pmu_probe(struct platform_device *pdev)
{
const struct xgene_pmu_data *dev_data;
- const struct of_device_id *of_id;
struct xgene_pmu *xgene_pmu;
int irq, rc;
int version;
@@ -1850,24 +1845,10 @@ static int xgene_pmu_probe(struct platform_device *pdev)
xgene_pmu->dev = &pdev->dev;
platform_set_drvdata(pdev, xgene_pmu);
- version = -EINVAL;
- of_id = of_match_device(xgene_pmu_of_match, &pdev->dev);
- if (of_id) {
- dev_data = (const struct xgene_pmu_data *) of_id->data;
- version = dev_data->id;
- }
-
-#ifdef CONFIG_ACPI
- if (ACPI_COMPANION(&pdev->dev)) {
- const struct acpi_device_id *acpi_id;
-
- acpi_id = acpi_match_device(xgene_pmu_acpi_match, &pdev->dev);
- if (acpi_id)
- version = (int) acpi_id->driver_data;
- }
-#endif
- if (version < 0)
+ dev_data = device_get_match_data(&pdev->dev);
+ if (!dev_data)
return -ENODEV;
+ version = dev_data->id;
if (version == PCP_PMU_V3)
xgene_pmu->ops = &xgene_pmu_v3_ops;
@@ -1948,7 +1929,7 @@ xgene_pmu_dev_cleanup(struct xgene_pmu *xgene_pmu, struct list_head *pmus)
}
}
-static int xgene_pmu_remove(struct platform_device *pdev)
+static void xgene_pmu_remove(struct platform_device *pdev)
{
struct xgene_pmu *xgene_pmu = dev_get_drvdata(&pdev->dev);
@@ -1958,8 +1939,6 @@ static int xgene_pmu_remove(struct platform_device *pdev)
xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->mcpmus);
cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE,
&xgene_pmu->node);
-
- return 0;
}
static struct platform_driver xgene_pmu_driver = {