summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/arm64/iort.c6
-rw-r--r--drivers/acpi/pptt.c53
-rw-r--r--drivers/base/Kconfig2
-rw-r--r--drivers/base/arch_topology.c298
-rw-r--r--drivers/char/Kconfig9
-rw-r--r--drivers/char/random.c14
-rw-r--r--drivers/of/fdt.c14
-rw-r--r--drivers/perf/arm_smmuv3_pmu.c65
-rw-r--r--drivers/perf/fsl_imx8_ddr_perf.c75
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c4
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_hha_pmu.c4
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c4
-rw-r--r--drivers/perf/qcom_l2_pmu.c6
-rw-r--r--drivers/perf/xgene_pmu.c4
14 files changed, 515 insertions, 43 deletions
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index 8569b79e8b58..5a7551d060f2 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -1256,12 +1256,12 @@ static int __init arm_smmu_v3_set_proximity(struct device *dev,
smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) {
- int node = acpi_map_pxm_to_node(smmu->pxm);
+ int dev_node = acpi_map_pxm_to_node(smmu->pxm);
- if (node != NUMA_NO_NODE && !node_online(node))
+ if (dev_node != NUMA_NO_NODE && !node_online(dev_node))
return -EINVAL;
- set_dev_node(dev, node);
+ set_dev_node(dev, dev_node);
pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n",
smmu->base_address,
smmu->pxm);
diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c
index 1e7ac0bd0d3a..f31544d3656e 100644
--- a/drivers/acpi/pptt.c
+++ b/drivers/acpi/pptt.c
@@ -541,6 +541,44 @@ static int find_acpi_cpu_topology_tag(unsigned int cpu, int level, int flag)
}
/**
+ * check_acpi_cpu_flag() - Determine if CPU node has a flag set
+ * @cpu: Kernel logical CPU number
+ * @rev: The minimum PPTT revision defining the flag
+ * @flag: The flag itself
+ *
+ * Check the node representing a CPU for a given flag.
+ *
+ * Return: -ENOENT if the PPTT doesn't exist, the CPU cannot be found or
+ * the table revision isn't new enough.
+ * 1, any passed flag set
+ * 0, flag unset
+ */
+static int check_acpi_cpu_flag(unsigned int cpu, int rev, u32 flag)
+{
+ struct acpi_table_header *table;
+ acpi_status status;
+ u32 acpi_cpu_id = get_acpi_id_for_cpu(cpu);
+ struct acpi_pptt_processor *cpu_node = NULL;
+ int ret = -ENOENT;
+
+ status = acpi_get_table(ACPI_SIG_PPTT, 0, &table);
+ if (ACPI_FAILURE(status)) {
+ acpi_pptt_warn_missing();
+ return ret;
+ }
+
+ if (table->revision >= rev)
+ cpu_node = acpi_find_processor_node(table, acpi_cpu_id);
+
+ if (cpu_node)
+ ret = (cpu_node->flags & flag) != 0;
+
+ acpi_put_table(table);
+
+ return ret;
+}
+
+/**
* acpi_find_last_cache_level() - Determines the number of cache levels for a PE
* @cpu: Kernel logical CPU number
*
@@ -605,6 +643,20 @@ int cache_setup_acpi(unsigned int cpu)
}
/**
+ * acpi_pptt_cpu_is_thread() - Determine if CPU is a thread
+ * @cpu: Kernel logical CPU number
+ *
+ * Return: 1, a thread
+ * 0, not a thread
+ * -ENOENT ,if the PPTT doesn't exist, the CPU cannot be found or
+ * the table revision isn't new enough.
+ */
+int acpi_pptt_cpu_is_thread(unsigned int cpu)
+{
+ return check_acpi_cpu_flag(cpu, 2, ACPI_PPTT_ACPI_PROCESSOR_IS_THREAD);
+}
+
+/**
* find_acpi_cpu_topology() - Determine a unique topology value for a given CPU
* @cpu: Kernel logical CPU number
* @level: The topological level for which we would like a unique ID
@@ -664,7 +716,6 @@ int find_acpi_cpu_cache_topology(unsigned int cpu, int level)
return ret;
}
-
/**
* find_acpi_cpu_topology_package() - Determine a unique CPU package value
* @cpu: Kernel logical CPU number
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index dc404492381d..28b92e3cc570 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -202,7 +202,7 @@ config GENERIC_ARCH_TOPOLOGY
help
Enable support for architectures common topology code: e.g., parsing
CPU capacity information from DT, usage of such information for
- appropriate scaling, sysfs interface for changing capacity values at
+ appropriate scaling, sysfs interface for reading capacity values at
runtime.
endmenu
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 63c1e76739f1..b54d241a2ff5 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -15,6 +15,11 @@
#include <linux/string.h>
#include <linux/sched/topology.h>
#include <linux/cpuset.h>
+#include <linux/cpumask.h>
+#include <linux/init.h>
+#include <linux/percpu.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE;
@@ -241,3 +246,296 @@ static void parsing_done_workfn(struct work_struct *work)
#else
core_initcall(free_raw_capacity);
#endif
+
+#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
+static int __init get_cpu_for_node(struct device_node *node)
+{
+ struct device_node *cpu_node;
+ int cpu;
+
+ cpu_node = of_parse_phandle(node, "cpu", 0);
+ if (!cpu_node)
+ return -1;
+
+ cpu = of_cpu_node_to_id(cpu_node);
+ if (cpu >= 0)
+ topology_parse_cpu_capacity(cpu_node, cpu);
+ else
+ pr_crit("Unable to find CPU node for %pOF\n", cpu_node);
+
+ of_node_put(cpu_node);
+ return cpu;
+}
+
+static int __init parse_core(struct device_node *core, int package_id,
+ int core_id)
+{
+ char name[10];
+ bool leaf = true;
+ int i = 0;
+ int cpu;
+ struct device_node *t;
+
+ do {
+ snprintf(name, sizeof(name), "thread%d", i);
+ t = of_get_child_by_name(core, name);
+ if (t) {
+ leaf = false;
+ cpu = get_cpu_for_node(t);
+ if (cpu >= 0) {
+ cpu_topology[cpu].package_id = package_id;
+ cpu_topology[cpu].core_id = core_id;
+ cpu_topology[cpu].thread_id = i;
+ } else {
+ pr_err("%pOF: Can't get CPU for thread\n",
+ t);
+ of_node_put(t);
+ return -EINVAL;
+ }
+ of_node_put(t);
+ }
+ i++;
+ } while (t);
+
+ cpu = get_cpu_for_node(core);
+ if (cpu >= 0) {
+ if (!leaf) {
+ pr_err("%pOF: Core has both threads and CPU\n",
+ core);
+ return -EINVAL;
+ }
+
+ cpu_topology[cpu].package_id = package_id;
+ cpu_topology[cpu].core_id = core_id;
+ } else if (leaf) {
+ pr_err("%pOF: Can't get CPU for leaf core\n", core);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __init parse_cluster(struct device_node *cluster, int depth)
+{
+ char name[10];
+ bool leaf = true;
+ bool has_cores = false;
+ struct device_node *c;
+ static int package_id __initdata;
+ int core_id = 0;
+ int i, ret;
+
+ /*
+ * First check for child clusters; we currently ignore any
+ * information about the nesting of clusters and present the
+ * scheduler with a flat list of them.
+ */
+ i = 0;
+ do {
+ snprintf(name, sizeof(name), "cluster%d", i);
+ c = of_get_child_by_name(cluster, name);
+ if (c) {
+ leaf = false;
+ ret = parse_cluster(c, depth + 1);
+ of_node_put(c);
+ if (ret != 0)
+ return ret;
+ }
+ i++;
+ } while (c);
+
+ /* Now check for cores */
+ i = 0;
+ do {
+ snprintf(name, sizeof(name), "core%d", i);
+ c = of_get_child_by_name(cluster, name);
+ if (c) {
+ has_cores = true;
+
+ if (depth == 0) {
+ pr_err("%pOF: cpu-map children should be clusters\n",
+ c);
+ of_node_put(c);
+ return -EINVAL;
+ }
+
+ if (leaf) {
+ ret = parse_core(c, package_id, core_id++);
+ } else {
+ pr_err("%pOF: Non-leaf cluster with core %s\n",
+ cluster, name);
+ ret = -EINVAL;
+ }
+
+ of_node_put(c);
+ if (ret != 0)
+ return ret;
+ }
+ i++;
+ } while (c);
+
+ if (leaf && !has_cores)
+ pr_warn("%pOF: empty cluster\n", cluster);
+
+ if (leaf)
+ package_id++;
+
+ return 0;
+}
+
+static int __init parse_dt_topology(void)
+{
+ struct device_node *cn, *map;
+ int ret = 0;
+ int cpu;
+
+ cn = of_find_node_by_path("/cpus");
+ if (!cn) {
+ pr_err("No CPU information found in DT\n");
+ return 0;
+ }
+
+ /*
+ * When topology is provided cpu-map is essentially a root
+ * cluster with restricted subnodes.
+ */
+ map = of_get_child_by_name(cn, "cpu-map");
+ if (!map)
+ goto out;
+
+ ret = parse_cluster(map, 0);
+ if (ret != 0)
+ goto out_map;
+
+ topology_normalize_cpu_scale();
+
+ /*
+ * Check that all cores are in the topology; the SMP code will
+ * only mark cores described in the DT as possible.
+ */
+ for_each_possible_cpu(cpu)
+ if (cpu_topology[cpu].package_id == -1)
+ ret = -EINVAL;
+
+out_map:
+ of_node_put(map);
+out:
+ of_node_put(cn);
+ return ret;
+}
+#endif
+
+/*
+ * cpu topology table
+ */
+struct cpu_topology cpu_topology[NR_CPUS];
+EXPORT_SYMBOL_GPL(cpu_topology);
+
+const struct cpumask *cpu_coregroup_mask(int cpu)
+{
+ const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu));
+
+ /* Find the smaller of NUMA, core or LLC siblings */
+ if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) {
+ /* not numa in package, lets use the package siblings */
+ core_mask = &cpu_topology[cpu].core_sibling;
+ }
+ if (cpu_topology[cpu].llc_id != -1) {
+ if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask))
+ core_mask = &cpu_topology[cpu].llc_sibling;
+ }
+
+ return core_mask;
+}
+
+void update_siblings_masks(unsigned int cpuid)
+{
+ struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
+ int cpu;
+
+ /* update core and thread sibling masks */
+ for_each_online_cpu(cpu) {
+ cpu_topo = &cpu_topology[cpu];
+
+ if (cpuid_topo->llc_id == cpu_topo->llc_id) {
+ cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling);
+ cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling);
+ }
+
+ if (cpuid_topo->package_id != cpu_topo->package_id)
+ continue;
+
+ cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
+ cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
+
+ if (cpuid_topo->core_id != cpu_topo->core_id)
+ continue;
+
+ cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
+ cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
+ }
+}
+
+static void clear_cpu_topology(int cpu)
+{
+ struct cpu_topology *cpu_topo = &cpu_topology[cpu];
+
+ cpumask_clear(&cpu_topo->llc_sibling);
+ cpumask_set_cpu(cpu, &cpu_topo->llc_sibling);
+
+ cpumask_clear(&cpu_topo->core_sibling);
+ cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
+ cpumask_clear(&cpu_topo->thread_sibling);
+ cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
+}
+
+void __init reset_cpu_topology(void)
+{
+ unsigned int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct cpu_topology *cpu_topo = &cpu_topology[cpu];
+
+ cpu_topo->thread_id = -1;
+ cpu_topo->core_id = -1;
+ cpu_topo->package_id = -1;
+ cpu_topo->llc_id = -1;
+
+ clear_cpu_topology(cpu);
+ }
+}
+
+void remove_cpu_topology(unsigned int cpu)
+{
+ int sibling;
+
+ for_each_cpu(sibling, topology_core_cpumask(cpu))
+ cpumask_clear_cpu(cpu, topology_core_cpumask(sibling));
+ for_each_cpu(sibling, topology_sibling_cpumask(cpu))
+ cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling));
+ for_each_cpu(sibling, topology_llc_cpumask(cpu))
+ cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling));
+
+ clear_cpu_topology(cpu);
+}
+
+__weak int __init parse_acpi_topology(void)
+{
+ return 0;
+}
+
+#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
+void __init init_cpu_topology(void)
+{
+ reset_cpu_topology();
+
+ /*
+ * Discard anything that was parsed if we hit an error so we
+ * don't use partial information.
+ */
+ if (parse_acpi_topology())
+ reset_cpu_topology();
+ else if (of_have_populated_dt() && parse_dt_topology())
+ reset_cpu_topology();
+}
+#endif
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 3e866885a405..2794f4b3f62d 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -573,3 +573,12 @@ config RANDOM_TRUST_CPU
has not installed a hidden back door to compromise the CPU's
random number generation facilities. This can also be configured
at boot with "random.trust_cpu=on/off".
+
+config RANDOM_TRUST_BOOTLOADER
+ bool "Trust the bootloader to initialize Linux's CRNG"
+ help
+ Some bootloaders can provide entropy to increase the kernel's initial
+ device randomness. Say Y here to assume the entropy provided by the
+ booloader is trustworthy so it will be added to the kernel's entropy
+ pool. Otherwise, say N here so it will be regarded as device input that
+ only mixes the entropy pool. \ No newline at end of file
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 5d5ea4ce1442..566922df4b7b 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -2445,3 +2445,17 @@ void add_hwgenerator_randomness(const char *buffer, size_t count,
credit_entropy_bits(poolp, entropy);
}
EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
+
+/* Handle random seed passed by bootloader.
+ * If the seed is trustworthy, it would be regarded as hardware RNGs. Otherwise
+ * it would be regarded as device data.
+ * The decision is controlled by CONFIG_RANDOM_TRUST_BOOTLOADER.
+ */
+void add_bootloader_randomness(const void *buf, unsigned int size)
+{
+ if (IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER))
+ add_hwgenerator_randomness(buf, size, size * 8);
+ else
+ add_device_randomness(buf, size);
+}
+EXPORT_SYMBOL_GPL(add_bootloader_randomness); \ No newline at end of file
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 9cdf14b9aaab..223d617ecfe1 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -24,6 +24,7 @@
#include <linux/debugfs.h>
#include <linux/serial_core.h>
#include <linux/sysfs.h>
+#include <linux/random.h>
#include <asm/setup.h> /* for COMMAND_LINE_SIZE */
#include <asm/page.h>
@@ -1044,6 +1045,7 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
{
int l;
const char *p;
+ const void *rng_seed;
pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
@@ -1078,6 +1080,18 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
pr_debug("Command line is: %s\n", (char*)data);
+ rng_seed = of_get_flat_dt_prop(node, "rng-seed", &l);
+ if (rng_seed && l > 0) {
+ add_bootloader_randomness(rng_seed, l);
+
+ /* try to clear seed so it won't be found. */
+ fdt_nop_property(initial_boot_params, node, "rng-seed");
+
+ /* update CRC check value */
+ of_fdt_crc32 = crc32_be(~0, initial_boot_params,
+ fdt_totalsize(initial_boot_params));
+ }
+
/* break now */
return 1;
}
diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c
index da71c741cb46..abcf54f7d19c 100644
--- a/drivers/perf/arm_smmuv3_pmu.c
+++ b/drivers/perf/arm_smmuv3_pmu.c
@@ -113,8 +113,6 @@ struct smmu_pmu {
u64 counter_mask;
u32 options;
bool global_filter;
- u32 global_filter_span;
- u32 global_filter_sid;
};
#define to_smmu_pmu(p) (container_of(p, struct smmu_pmu, pmu))
@@ -260,6 +258,19 @@ static void smmu_pmu_set_event_filter(struct perf_event *event,
smmu_pmu_set_smr(smmu_pmu, idx, sid);
}
+static bool smmu_pmu_check_global_filter(struct perf_event *curr,
+ struct perf_event *new)
+{
+ if (get_filter_enable(new) != get_filter_enable(curr))
+ return false;
+
+ if (!get_filter_enable(new))
+ return true;
+
+ return get_filter_span(new) == get_filter_span(curr) &&
+ get_filter_stream_id(new) == get_filter_stream_id(curr);
+}
+
static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
struct perf_event *event, int idx)
{
@@ -279,17 +290,14 @@ static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
}
/* Requested settings same as current global settings*/
- if (span == smmu_pmu->global_filter_span &&
- sid == smmu_pmu->global_filter_sid)
+ idx = find_first_bit(smmu_pmu->used_counters, num_ctrs);
+ if (idx == num_ctrs ||
+ smmu_pmu_check_global_filter(smmu_pmu->events[idx], event)) {
+ smmu_pmu_set_event_filter(event, 0, span, sid);
return 0;
+ }
- if (!bitmap_empty(smmu_pmu->used_counters, num_ctrs))
- return -EAGAIN;
-
- smmu_pmu_set_event_filter(event, 0, span, sid);
- smmu_pmu->global_filter_span = span;
- smmu_pmu->global_filter_sid = sid;
- return 0;
+ return -EAGAIN;
}
static int smmu_pmu_get_event_idx(struct smmu_pmu *smmu_pmu,
@@ -312,6 +320,19 @@ static int smmu_pmu_get_event_idx(struct smmu_pmu *smmu_pmu,
return idx;
}
+static bool smmu_pmu_events_compatible(struct perf_event *curr,
+ struct perf_event *new)
+{
+ if (new->pmu != curr->pmu)
+ return false;
+
+ if (to_smmu_pmu(new->pmu)->global_filter &&
+ !smmu_pmu_check_global_filter(curr, new))
+ return false;
+
+ return true;
+}
+
/*
* Implementation of abstract pmu functionality required by
* the core perf events code.
@@ -323,6 +344,7 @@ static int smmu_pmu_event_init(struct perf_event *event)
struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
struct device *dev = smmu_pmu->dev;
struct perf_event *sibling;
+ int group_num_events = 1;
u16 event_id;
if (event->attr.type != event->pmu->type)
@@ -347,18 +369,23 @@ static int smmu_pmu_event_init(struct perf_event *event)
}
/* Don't allow groups with mixed PMUs, except for s/w events */
- if (event->group_leader->pmu != event->pmu &&
- !is_software_event(event->group_leader)) {
- dev_dbg(dev, "Can't create mixed PMU group\n");
- return -EINVAL;
+ if (!is_software_event(event->group_leader)) {
+ if (!smmu_pmu_events_compatible(event->group_leader, event))
+ return -EINVAL;
+
+ if (++group_num_events > smmu_pmu->num_counters)
+ return -EINVAL;
}
for_each_sibling_event(sibling, event->group_leader) {
- if (sibling->pmu != event->pmu &&
- !is_software_event(sibling)) {
- dev_dbg(dev, "Can't create mixed PMU group\n");
+ if (is_software_event(sibling))
+ continue;
+
+ if (!smmu_pmu_events_compatible(sibling, event))
+ return -EINVAL;
+
+ if (++group_num_events > smmu_pmu->num_counters)
return -EINVAL;
- }
}
hwc->idx = -1;
diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c
index 63fe21600072..ce7345745b42 100644
--- a/drivers/perf/fsl_imx8_ddr_perf.c
+++ b/drivers/perf/fsl_imx8_ddr_perf.c
@@ -35,6 +35,8 @@
#define EVENT_CYCLES_COUNTER 0
#define NUM_COUNTERS 4
+#define AXI_MASKING_REVERT 0xffff0000 /* AXI_MASKING(MSB 16bits) + AXI_ID(LSB 16bits) */
+
#define to_ddr_pmu(p) container_of(p, struct ddr_pmu, pmu)
#define DDR_PERF_DEV_NAME "imx8_ddr"
@@ -42,11 +44,25 @@
static DEFINE_IDA(ddr_ida);
+/* DDR Perf hardware feature */
+#define DDR_CAP_AXI_ID_FILTER 0x1 /* support AXI ID filter */
+
+struct fsl_ddr_devtype_data {
+ unsigned int quirks; /* quirks needed for different DDR Perf core */
+};
+
+static const struct fsl_ddr_devtype_data imx8_devtype_data;
+
+static const struct fsl_ddr_devtype_data imx8m_devtype_data = {
+ .quirks = DDR_CAP_AXI_ID_FILTER,
+};
+
static const struct of_device_id imx_ddr_pmu_dt_ids[] = {
- { .compatible = "fsl,imx8-ddr-pmu",},
- { .compatible = "fsl,imx8m-ddr-pmu",},
+ { .compatible = "fsl,imx8-ddr-pmu", .data = &imx8_devtype_data},
+ { .compatible = "fsl,imx8m-ddr-pmu", .data = &imx8m_devtype_data},
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, imx_ddr_pmu_dt_ids);
struct ddr_pmu {
struct pmu pmu;
@@ -57,6 +73,7 @@ struct ddr_pmu {
struct perf_event *events[NUM_COUNTERS];
int active_events;
enum cpuhp_state cpuhp_state;
+ const struct fsl_ddr_devtype_data *devtype_data;
int irq;
int id;
};
@@ -128,6 +145,8 @@ static struct attribute *ddr_perf_events_attrs[] = {
IMX8_DDR_PMU_EVENT_ATTR(refresh, 0x37),
IMX8_DDR_PMU_EVENT_ATTR(write, 0x38),
IMX8_DDR_PMU_EVENT_ATTR(raw-hazard, 0x39),
+ IMX8_DDR_PMU_EVENT_ATTR(axid-read, 0x41),
+ IMX8_DDR_PMU_EVENT_ATTR(axid-write, 0x42),
NULL,
};
@@ -137,9 +156,13 @@ static struct attribute_group ddr_perf_events_attr_group = {
};
PMU_FORMAT_ATTR(event, "config:0-7");
+PMU_FORMAT_ATTR(axi_id, "config1:0-15");
+PMU_FORMAT_ATTR(axi_mask, "config1:16-31");
static struct attribute *ddr_perf_format_attrs[] = {
&format_attr_event.attr,
+ &format_attr_axi_id.attr,
+ &format_attr_axi_mask.attr,
NULL,
};
@@ -189,6 +212,26 @@ static u32 ddr_perf_read_counter(struct ddr_pmu *pmu, int counter)
return readl_relaxed(pmu->base + COUNTER_READ + counter * 4);
}
+static bool ddr_perf_is_filtered(struct perf_event *event)
+{
+ return event->attr.config == 0x41 || event->attr.config == 0x42;
+}
+
+static u32 ddr_perf_filter_val(struct perf_event *event)
+{
+ return event->attr.config1;
+}
+
+static bool ddr_perf_filters_compatible(struct perf_event *a,
+ struct perf_event *b)
+{
+ if (!ddr_perf_is_filtered(a))
+ return true;
+ if (!ddr_perf_is_filtered(b))
+ return true;
+ return ddr_perf_filter_val(a) == ddr_perf_filter_val(b);
+}
+
static int ddr_perf_event_init(struct perf_event *event)
{
struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
@@ -215,6 +258,15 @@ static int ddr_perf_event_init(struct perf_event *event)
!is_software_event(event->group_leader))
return -EINVAL;
+ if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) {
+ if (!ddr_perf_filters_compatible(event, event->group_leader))
+ return -EINVAL;
+ for_each_sibling_event(sibling, event->group_leader) {
+ if (!ddr_perf_filters_compatible(event, sibling))
+ return -EINVAL;
+ }
+ }
+
for_each_sibling_event(sibling, event->group_leader) {
if (sibling->pmu != event->pmu &&
!is_software_event(sibling))
@@ -287,6 +339,23 @@ static int ddr_perf_event_add(struct perf_event *event, int flags)
struct hw_perf_event *hwc = &event->hw;
int counter;
int cfg = event->attr.config;
+ int cfg1 = event->attr.config1;
+
+ if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) {
+ int i;
+
+ for (i = 1; i < NUM_COUNTERS; i++) {
+ if (pmu->events[i] &&
+ !ddr_perf_filters_compatible(event, pmu->events[i]))
+ return -EINVAL;
+ }
+
+ if (ddr_perf_is_filtered(event)) {
+ /* revert axi id masking(axi_mask) value */
+ cfg1 ^= AXI_MASKING_REVERT;
+ writel(cfg1, pmu->base + COUNTER_DPCR1);
+ }
+ }
counter = ddr_perf_alloc_counter(pmu, cfg);
if (counter < 0) {
@@ -472,6 +541,8 @@ static int ddr_perf_probe(struct platform_device *pdev)
if (!name)
return -ENOMEM;
+ pmu->devtype_data = of_device_get_match_data(&pdev->dev);
+
pmu->cpu = raw_smp_processor_id();
ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
DDR_CPUHP_CB_NAME,
diff --git a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
index 6ad0823bcf23..e42d4464c2cf 100644
--- a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
@@ -217,10 +217,8 @@ static int hisi_ddrc_pmu_init_irq(struct hisi_pmu *ddrc_pmu,
/* Read and init IRQ */
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "DDRC PMU get irq fail; irq:%d\n", irq);
+ if (irq < 0)
return irq;
- }
ret = devm_request_irq(&pdev->dev, irq, hisi_ddrc_pmu_isr,
IRQF_NOBALANCING | IRQF_NO_THREAD,
diff --git a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
index 4f2917f3e25e..f28063873e11 100644
--- a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
@@ -207,10 +207,8 @@ static int hisi_hha_pmu_init_irq(struct hisi_pmu *hha_pmu,
/* Read and init IRQ */
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "HHA PMU get irq fail; irq:%d\n", irq);
+ if (irq < 0)
return irq;
- }
ret = devm_request_irq(&pdev->dev, irq, hisi_hha_pmu_isr,
IRQF_NOBALANCING | IRQF_NO_THREAD,
diff --git a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
index 9153e093f9df..078b8dc57250 100644
--- a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
@@ -206,10 +206,8 @@ static int hisi_l3c_pmu_init_irq(struct hisi_pmu *l3c_pmu,
/* Read and init IRQ */
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "L3C PMU get irq fail; irq:%d\n", irq);
+ if (irq < 0)
return irq;
- }
ret = devm_request_irq(&pdev->dev, irq, hisi_l3c_pmu_isr,
IRQF_NOBALANCING | IRQF_NO_THREAD,
diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c
index d06182fe14b8..21d6991dbe0b 100644
--- a/drivers/perf/qcom_l2_pmu.c
+++ b/drivers/perf/qcom_l2_pmu.c
@@ -909,12 +909,8 @@ static int l2_cache_pmu_probe_cluster(struct device *dev, void *data)
cluster->cluster_id = fw_cluster_id;
irq = platform_get_irq(sdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev,
- "Failed to get valid irq for cluster %ld\n",
- fw_cluster_id);
+ if (irq < 0)
return irq;
- }
irq_set_status_flags(irq, IRQ_NOAUTOEN);
cluster->irq = irq;
diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c
index 3259e2ebeb39..7e328d6385c3 100644
--- a/drivers/perf/xgene_pmu.c
+++ b/drivers/perf/xgene_pmu.c
@@ -1901,10 +1901,8 @@ static int xgene_pmu_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "No IRQ resource\n");
+ if (irq < 0)
return -EINVAL;
- }
rc = devm_request_irq(&pdev->dev, irq, xgene_pmu_isr,
IRQF_NOBALANCING | IRQF_NO_THREAD,