summaryrefslogtreecommitdiff
path: root/arch/x86/events/amd
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/events/amd')
-rw-r--r--arch/x86/events/amd/brs.c15
-rw-r--r--arch/x86/events/amd/core.c16
-rw-r--r--arch/x86/events/amd/ibs.c315
-rw-r--r--arch/x86/events/amd/iommu.c4
-rw-r--r--arch/x86/events/amd/lbr.c24
-rw-r--r--arch/x86/events/amd/power.c11
-rw-r--r--arch/x86/events/amd/uncore.c117
7 files changed, 412 insertions, 90 deletions
diff --git a/arch/x86/events/amd/brs.c b/arch/x86/events/amd/brs.c
index 780acd3dff22..06f35a6b58a5 100644
--- a/arch/x86/events/amd/brs.c
+++ b/arch/x86/events/amd/brs.c
@@ -44,12 +44,12 @@ static inline unsigned int brs_to(int idx)
static __always_inline void set_debug_extn_cfg(u64 val)
{
/* bits[4:3] must always be set to 11b */
- __wrmsr(MSR_AMD_DBG_EXTN_CFG, val | 3ULL << 3, val >> 32);
+ native_wrmsrq(MSR_AMD_DBG_EXTN_CFG, val | 3ULL << 3);
}
static __always_inline u64 get_debug_extn_cfg(void)
{
- return __rdmsr(MSR_AMD_DBG_EXTN_CFG);
+ return native_rdmsrq(MSR_AMD_DBG_EXTN_CFG);
}
static bool __init amd_brs_detect(void)
@@ -187,7 +187,7 @@ void amd_brs_reset(void)
/*
* Mark first entry as poisoned
*/
- wrmsrl(brs_to(0), BRS_POISON);
+ wrmsrq(brs_to(0), BRS_POISON);
}
int __init amd_brs_init(void)
@@ -325,7 +325,7 @@ void amd_brs_drain(void)
u32 brs_idx = tos - i;
u64 from, to;
- rdmsrl(brs_to(brs_idx), to);
+ rdmsrq(brs_to(brs_idx), to);
/* Entry does not belong to us (as marked by kernel) */
if (to == BRS_POISON)
@@ -341,7 +341,7 @@ void amd_brs_drain(void)
if (!amd_brs_match_plm(event, to))
continue;
- rdmsrl(brs_from(brs_idx), from);
+ rdmsrq(brs_from(brs_idx), from);
perf_clear_branch_entry_bitfields(br+nr);
@@ -371,7 +371,7 @@ static void amd_brs_poison_buffer(void)
idx = amd_brs_get_tos(&cfg);
/* Poison target of entry */
- wrmsrl(brs_to(idx), BRS_POISON);
+ wrmsrq(brs_to(idx), BRS_POISON);
}
/*
@@ -381,7 +381,8 @@ static void amd_brs_poison_buffer(void)
* On ctxswin, sched_in = true, called after the PMU has started
* On ctxswout, sched_in = false, called before the PMU is stopped
*/
-void amd_pmu_brs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
+void amd_pmu_brs_sched_task(struct perf_event_pmu_context *pmu_ctx,
+ struct task_struct *task, bool sched_in)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
index 30d6ceb4c8ad..b20661b8621d 100644
--- a/arch/x86/events/amd/core.c
+++ b/arch/x86/events/amd/core.c
@@ -9,6 +9,7 @@
#include <linux/jiffies.h>
#include <asm/apicdef.h>
#include <asm/apic.h>
+#include <asm/msr.h>
#include <asm/nmi.h>
#include "../perf_event.h"
@@ -563,13 +564,13 @@ static void amd_pmu_cpu_reset(int cpu)
return;
/* Clear enable bits i.e. PerfCntrGlobalCtl.PerfCntrEn */
- wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, 0);
+ wrmsrq(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, 0);
/*
* Clear freeze and overflow bits i.e. PerfCntrGLobalStatus.LbrFreeze
* and PerfCntrGLobalStatus.PerfCntrOvfl
*/
- wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR,
+ wrmsrq(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR,
GLOBAL_STATUS_LBRS_FROZEN | amd_pmu_global_cntr_mask);
}
@@ -651,7 +652,7 @@ static void amd_pmu_cpu_dead(int cpu)
static __always_inline void amd_pmu_set_global_ctl(u64 ctl)
{
- wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, ctl);
+ wrmsrq(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, ctl);
}
static inline u64 amd_pmu_get_global_status(void)
@@ -659,7 +660,7 @@ static inline u64 amd_pmu_get_global_status(void)
u64 status;
/* PerfCntrGlobalStatus is read-only */
- rdmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS, status);
+ rdmsrq(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS, status);
return status;
}
@@ -672,14 +673,14 @@ static inline void amd_pmu_ack_global_status(u64 status)
* clears the same bit in PerfCntrGlobalStatus
*/
- wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR, status);
+ wrmsrq(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR, status);
}
static bool amd_pmu_test_overflow_topbit(int idx)
{
u64 counter;
- rdmsrl(x86_pmu_event_addr(idx), counter);
+ rdmsrq(x86_pmu_event_addr(idx), counter);
return !(counter & BIT_ULL(x86_pmu.cntval_bits - 1));
}
@@ -1003,8 +1004,7 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs)
perf_sample_save_brstack(&data, event, &cpuc->lbr_stack, NULL);
- if (perf_event_overflow(event, &data, regs))
- x86_pmu_stop(event, 0);
+ perf_event_overflow(event, &data, regs);
}
/*
diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
index e7a8b8758e08..112f43b23ebf 100644
--- a/arch/x86/events/amd/ibs.c
+++ b/arch/x86/events/amd/ibs.c
@@ -15,6 +15,7 @@
#include <linux/sched/clock.h>
#include <asm/apic.h>
+#include <asm/msr.h>
#include "../perf_event.h"
@@ -26,10 +27,7 @@ static u32 ibs_caps;
#include <linux/hardirq.h>
#include <asm/nmi.h>
-#include <asm/amd-ibs.h>
-
-#define IBS_FETCH_CONFIG_MASK (IBS_FETCH_RAND_EN | IBS_FETCH_MAX_CNT)
-#define IBS_OP_CONFIG_MASK IBS_OP_MAX_CNT
+#include <asm/amd/ibs.h>
/* attr.config2 */
#define IBS_SW_FILTER_MASK 1
@@ -89,6 +87,7 @@ struct perf_ibs {
u64 cnt_mask;
u64 enable_mask;
u64 valid_mask;
+ u16 min_period;
u64 max_period;
unsigned long offset_mask[1];
int offset_max;
@@ -270,11 +269,19 @@ static int validate_group(struct perf_event *event)
return 0;
}
+static bool perf_ibs_ldlat_event(struct perf_ibs *perf_ibs,
+ struct perf_event *event)
+{
+ return perf_ibs == &perf_ibs_op &&
+ (ibs_caps & IBS_CAPS_OPLDLAT) &&
+ (event->attr.config1 & 0xFFF);
+}
+
static int perf_ibs_init(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
struct perf_ibs *perf_ibs;
- u64 max_cnt, config;
+ u64 config;
int ret;
perf_ibs = get_ibs_pmu(event->attr.type);
@@ -310,25 +317,47 @@ static int perf_ibs_init(struct perf_event *event)
if (config & perf_ibs->cnt_mask)
/* raw max_cnt may not be set */
return -EINVAL;
- if (!event->attr.sample_freq && hwc->sample_period & 0x0f)
- /*
- * lower 4 bits can not be set in ibs max cnt,
- * but allowing it in case we adjust the
- * sample period to set a frequency.
- */
- return -EINVAL;
- hwc->sample_period &= ~0x0FULL;
- if (!hwc->sample_period)
- hwc->sample_period = 0x10;
+
+ if (event->attr.freq) {
+ hwc->sample_period = perf_ibs->min_period;
+ } else {
+ /* Silently mask off lower nibble. IBS hw mandates it. */
+ hwc->sample_period &= ~0x0FULL;
+ if (hwc->sample_period < perf_ibs->min_period)
+ return -EINVAL;
+ }
} else {
- max_cnt = config & perf_ibs->cnt_mask;
+ u64 period = 0;
+
+ if (event->attr.freq)
+ return -EINVAL;
+
+ if (perf_ibs == &perf_ibs_op) {
+ period = (config & IBS_OP_MAX_CNT) << 4;
+ if (ibs_caps & IBS_CAPS_OPCNTEXT)
+ period |= config & IBS_OP_MAX_CNT_EXT_MASK;
+ } else {
+ period = (config & IBS_FETCH_MAX_CNT) << 4;
+ }
+
config &= ~perf_ibs->cnt_mask;
- event->attr.sample_period = max_cnt << 4;
- hwc->sample_period = event->attr.sample_period;
+ event->attr.sample_period = period;
+ hwc->sample_period = period;
+
+ if (hwc->sample_period < perf_ibs->min_period)
+ return -EINVAL;
}
- if (!hwc->sample_period)
- return -EINVAL;
+ if (perf_ibs_ldlat_event(perf_ibs, event)) {
+ u64 ldlat = event->attr.config1 & 0xFFF;
+
+ if (ldlat < 128 || ldlat > 2048)
+ return -EINVAL;
+ ldlat >>= 7;
+
+ config |= (ldlat - 1) << 59;
+ config |= IBS_OP_L3MISSONLY | IBS_OP_LDLAT_EN;
+ }
/*
* If we modify hwc->sample_period, we also need to update
@@ -349,7 +378,8 @@ static int perf_ibs_set_period(struct perf_ibs *perf_ibs,
int overflow;
/* ignore lower 4 bits in min count: */
- overflow = perf_event_set_period(hwc, 1<<4, perf_ibs->max_period, period);
+ overflow = perf_event_set_period(hwc, perf_ibs->min_period,
+ perf_ibs->max_period, period);
local64_set(&hwc->prev_count, 0);
return overflow;
@@ -395,7 +425,7 @@ perf_ibs_event_update(struct perf_ibs *perf_ibs, struct perf_event *event,
* prev count manually on overflow.
*/
while (!perf_event_try_update(event, count, 64)) {
- rdmsrl(event->hw.config_base, *config);
+ rdmsrq(event->hw.config_base, *config);
count = perf_ibs->get_count(*config);
}
}
@@ -406,9 +436,9 @@ static inline void perf_ibs_enable_event(struct perf_ibs *perf_ibs,
u64 tmp = hwc->config | config;
if (perf_ibs->fetch_count_reset_broken)
- wrmsrl(hwc->config_base, tmp & ~perf_ibs->enable_mask);
+ wrmsrq(hwc->config_base, tmp & ~perf_ibs->enable_mask);
- wrmsrl(hwc->config_base, tmp | perf_ibs->enable_mask);
+ wrmsrq(hwc->config_base, tmp | perf_ibs->enable_mask);
}
/*
@@ -423,9 +453,9 @@ static inline void perf_ibs_disable_event(struct perf_ibs *perf_ibs,
{
config &= ~perf_ibs->cnt_mask;
if (boot_cpu_data.x86 == 0x10)
- wrmsrl(hwc->config_base, config);
+ wrmsrq(hwc->config_base, config);
config &= ~perf_ibs->enable_mask;
- wrmsrl(hwc->config_base, config);
+ wrmsrq(hwc->config_base, config);
}
/*
@@ -447,6 +477,9 @@ static void perf_ibs_start(struct perf_event *event, int flags)
WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
hwc->state = 0;
+ if (event->attr.freq && hwc->sample_period < perf_ibs->min_period)
+ hwc->sample_period = perf_ibs->min_period;
+
perf_ibs_set_period(perf_ibs, hwc, &period);
if (perf_ibs == &perf_ibs_op && (ibs_caps & IBS_CAPS_OPCNTEXT)) {
config |= period & IBS_OP_MAX_CNT_EXT_MASK;
@@ -481,7 +514,7 @@ static void perf_ibs_stop(struct perf_event *event, int flags)
if (!stopping && (hwc->state & PERF_HES_UPTODATE))
return;
- rdmsrl(hwc->config_base, config);
+ rdmsrq(hwc->config_base, config);
if (stopping) {
/*
@@ -554,6 +587,28 @@ static void perf_ibs_del(struct perf_event *event, int flags)
static void perf_ibs_read(struct perf_event *event) { }
+static int perf_ibs_check_period(struct perf_event *event, u64 value)
+{
+ struct perf_ibs *perf_ibs;
+ u64 low_nibble;
+
+ if (event->attr.freq)
+ return 0;
+
+ perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
+ low_nibble = value & 0xFULL;
+
+ /*
+ * This contradicts with perf_ibs_init() which allows sample period
+ * with lower nibble bits set but silently masks them off. Whereas
+ * this returns error.
+ */
+ if (low_nibble || value < perf_ibs->min_period)
+ return -EINVAL;
+
+ return 0;
+}
+
/*
* We need to initialize with empty group if all attributes in the
* group are dynamic.
@@ -572,7 +627,10 @@ PMU_FORMAT_ATTR(cnt_ctl, "config:19");
PMU_FORMAT_ATTR(swfilt, "config2:0");
PMU_EVENT_ATTR_STRING(l3missonly, fetch_l3missonly, "config:59");
PMU_EVENT_ATTR_STRING(l3missonly, op_l3missonly, "config:16");
+PMU_EVENT_ATTR_STRING(ldlat, ibs_op_ldlat_format, "config1:0-11");
PMU_EVENT_ATTR_STRING(zen4_ibs_extensions, zen4_ibs_extensions, "1");
+PMU_EVENT_ATTR_STRING(ldlat, ibs_op_ldlat_cap, "1");
+PMU_EVENT_ATTR_STRING(dtlb_pgsize, ibs_op_dtlb_pgsize_cap, "1");
static umode_t
zen4_ibs_extensions_is_visible(struct kobject *kobj, struct attribute *attr, int i)
@@ -580,6 +638,18 @@ zen4_ibs_extensions_is_visible(struct kobject *kobj, struct attribute *attr, int
return ibs_caps & IBS_CAPS_ZEN4 ? attr->mode : 0;
}
+static umode_t
+ibs_op_ldlat_is_visible(struct kobject *kobj, struct attribute *attr, int i)
+{
+ return ibs_caps & IBS_CAPS_OPLDLAT ? attr->mode : 0;
+}
+
+static umode_t
+ibs_op_dtlb_pgsize_is_visible(struct kobject *kobj, struct attribute *attr, int i)
+{
+ return ibs_caps & IBS_CAPS_OPDTLBPGSIZE ? attr->mode : 0;
+}
+
static struct attribute *fetch_attrs[] = {
&format_attr_rand_en.attr,
&format_attr_swfilt.attr,
@@ -596,6 +666,16 @@ static struct attribute *zen4_ibs_extensions_attrs[] = {
NULL,
};
+static struct attribute *ibs_op_ldlat_cap_attrs[] = {
+ &ibs_op_ldlat_cap.attr.attr,
+ NULL,
+};
+
+static struct attribute *ibs_op_dtlb_pgsize_cap_attrs[] = {
+ &ibs_op_dtlb_pgsize_cap.attr.attr,
+ NULL,
+};
+
static struct attribute_group group_fetch_formats = {
.name = "format",
.attrs = fetch_attrs,
@@ -613,6 +693,18 @@ static struct attribute_group group_zen4_ibs_extensions = {
.is_visible = zen4_ibs_extensions_is_visible,
};
+static struct attribute_group group_ibs_op_ldlat_cap = {
+ .name = "caps",
+ .attrs = ibs_op_ldlat_cap_attrs,
+ .is_visible = ibs_op_ldlat_is_visible,
+};
+
+static struct attribute_group group_ibs_op_dtlb_pgsize_cap = {
+ .name = "caps",
+ .attrs = ibs_op_dtlb_pgsize_cap_attrs,
+ .is_visible = ibs_op_dtlb_pgsize_is_visible,
+};
+
static const struct attribute_group *fetch_attr_groups[] = {
&group_fetch_formats,
&empty_caps_group,
@@ -651,6 +743,11 @@ static struct attribute_group group_op_formats = {
.attrs = op_attrs,
};
+static struct attribute *ibs_op_ldlat_format_attrs[] = {
+ &ibs_op_ldlat_format.attr.attr,
+ NULL,
+};
+
static struct attribute_group group_cnt_ctl = {
.name = "format",
.attrs = cnt_ctl_attrs,
@@ -669,10 +766,19 @@ static const struct attribute_group *op_attr_groups[] = {
NULL,
};
+static struct attribute_group group_ibs_op_ldlat_format = {
+ .name = "format",
+ .attrs = ibs_op_ldlat_format_attrs,
+ .is_visible = ibs_op_ldlat_is_visible,
+};
+
static const struct attribute_group *op_attr_update[] = {
&group_cnt_ctl,
&group_op_l3missonly,
&group_zen4_ibs_extensions,
+ &group_ibs_op_ldlat_cap,
+ &group_ibs_op_ldlat_format,
+ &group_ibs_op_dtlb_pgsize_cap,
NULL,
};
@@ -686,12 +792,14 @@ static struct perf_ibs perf_ibs_fetch = {
.start = perf_ibs_start,
.stop = perf_ibs_stop,
.read = perf_ibs_read,
+ .check_period = perf_ibs_check_period,
},
.msr = MSR_AMD64_IBSFETCHCTL,
- .config_mask = IBS_FETCH_CONFIG_MASK,
+ .config_mask = IBS_FETCH_MAX_CNT | IBS_FETCH_RAND_EN,
.cnt_mask = IBS_FETCH_MAX_CNT,
.enable_mask = IBS_FETCH_ENABLE,
.valid_mask = IBS_FETCH_VAL,
+ .min_period = 0x10,
.max_period = IBS_FETCH_MAX_CNT << 4,
.offset_mask = { MSR_AMD64_IBSFETCH_REG_MASK },
.offset_max = MSR_AMD64_IBSFETCH_REG_COUNT,
@@ -709,13 +817,15 @@ static struct perf_ibs perf_ibs_op = {
.start = perf_ibs_start,
.stop = perf_ibs_stop,
.read = perf_ibs_read,
+ .check_period = perf_ibs_check_period,
},
.msr = MSR_AMD64_IBSOPCTL,
- .config_mask = IBS_OP_CONFIG_MASK,
+ .config_mask = IBS_OP_MAX_CNT,
.cnt_mask = IBS_OP_MAX_CNT | IBS_OP_CUR_CNT |
IBS_OP_CUR_CNT_RAND,
.enable_mask = IBS_OP_ENABLE,
.valid_mask = IBS_OP_VAL,
+ .min_period = 0x90,
.max_period = IBS_OP_MAX_CNT << 4,
.offset_mask = { MSR_AMD64_IBSOP_REG_MASK },
.offset_max = MSR_AMD64_IBSOP_REG_COUNT,
@@ -917,6 +1027,10 @@ static void perf_ibs_get_tlb_lvl(union ibs_op_data3 *op_data3,
if (!op_data3->dc_lin_addr_valid)
return;
+ if ((ibs_caps & IBS_CAPS_OPDTLBPGSIZE) &&
+ !op_data3->dc_phy_addr_valid)
+ return;
+
if (!op_data3->dc_l1tlb_miss) {
data_src->mem_dtlb = PERF_MEM_TLB_L1 | PERF_MEM_TLB_HIT;
return;
@@ -941,6 +1055,8 @@ static void perf_ibs_get_mem_lock(union ibs_op_data3 *op_data3,
data_src->mem_lock = PERF_MEM_LOCK_LOCKED;
}
+/* Be careful. Works only for contiguous MSRs. */
+#define ibs_fetch_msr_idx(msr) (msr - MSR_AMD64_IBSFETCHCTL)
#define ibs_op_msr_idx(msr) (msr - MSR_AMD64_IBSOPCTL)
static void perf_ibs_get_data_src(struct perf_ibs_data *ibs_data,
@@ -1021,21 +1137,92 @@ static void perf_ibs_parse_ld_st_data(__u64 sample_type,
}
}
-static int perf_ibs_get_offset_max(struct perf_ibs *perf_ibs, u64 sample_type,
+static bool perf_ibs_is_mem_sample_type(struct perf_ibs *perf_ibs,
+ struct perf_event *event)
+{
+ u64 sample_type = event->attr.sample_type;
+
+ return perf_ibs == &perf_ibs_op &&
+ sample_type & (PERF_SAMPLE_DATA_SRC |
+ PERF_SAMPLE_WEIGHT_TYPE |
+ PERF_SAMPLE_ADDR |
+ PERF_SAMPLE_PHYS_ADDR);
+}
+
+static int perf_ibs_get_offset_max(struct perf_ibs *perf_ibs,
+ struct perf_event *event,
int check_rip)
{
- if (sample_type & PERF_SAMPLE_RAW ||
- (perf_ibs == &perf_ibs_op &&
- (sample_type & PERF_SAMPLE_DATA_SRC ||
- sample_type & PERF_SAMPLE_WEIGHT_TYPE ||
- sample_type & PERF_SAMPLE_ADDR ||
- sample_type & PERF_SAMPLE_PHYS_ADDR)))
+ if (event->attr.sample_type & PERF_SAMPLE_RAW ||
+ perf_ibs_is_mem_sample_type(perf_ibs, event) ||
+ perf_ibs_ldlat_event(perf_ibs, event))
return perf_ibs->offset_max;
else if (check_rip)
return 3;
return 1;
}
+static bool perf_ibs_is_kernel_data_addr(struct perf_event *event,
+ struct perf_ibs_data *ibs_data)
+{
+ u64 sample_type_mask = PERF_SAMPLE_ADDR | PERF_SAMPLE_RAW;
+ union ibs_op_data3 op_data3;
+ u64 dc_lin_addr;
+
+ op_data3.val = ibs_data->regs[ibs_op_msr_idx(MSR_AMD64_IBSOPDATA3)];
+ dc_lin_addr = ibs_data->regs[ibs_op_msr_idx(MSR_AMD64_IBSDCLINAD)];
+
+ return unlikely((event->attr.sample_type & sample_type_mask) &&
+ op_data3.dc_lin_addr_valid && kernel_ip(dc_lin_addr));
+}
+
+static bool perf_ibs_is_kernel_br_target(struct perf_event *event,
+ struct perf_ibs_data *ibs_data,
+ int br_target_idx)
+{
+ union ibs_op_data op_data;
+ u64 br_target;
+
+ op_data.val = ibs_data->regs[ibs_op_msr_idx(MSR_AMD64_IBSOPDATA)];
+ br_target = ibs_data->regs[br_target_idx];
+
+ return unlikely((event->attr.sample_type & PERF_SAMPLE_RAW) &&
+ op_data.op_brn_ret && kernel_ip(br_target));
+}
+
+static bool perf_ibs_swfilt_discard(struct perf_ibs *perf_ibs, struct perf_event *event,
+ struct pt_regs *regs, struct perf_ibs_data *ibs_data,
+ int br_target_idx)
+{
+ if (perf_exclude_event(event, regs))
+ return true;
+
+ if (perf_ibs != &perf_ibs_op || !event->attr.exclude_kernel)
+ return false;
+
+ if (perf_ibs_is_kernel_data_addr(event, ibs_data))
+ return true;
+
+ if (br_target_idx != -1 &&
+ perf_ibs_is_kernel_br_target(event, ibs_data, br_target_idx))
+ return true;
+
+ return false;
+}
+
+static void perf_ibs_phyaddr_clear(struct perf_ibs *perf_ibs,
+ struct perf_ibs_data *ibs_data)
+{
+ if (perf_ibs == &perf_ibs_op) {
+ ibs_data->regs[ibs_op_msr_idx(MSR_AMD64_IBSOPDATA3)] &= ~(1ULL << 18);
+ ibs_data->regs[ibs_op_msr_idx(MSR_AMD64_IBSDCPHYSAD)] = 0;
+ return;
+ }
+
+ ibs_data->regs[ibs_fetch_msr_idx(MSR_AMD64_IBSFETCHCTL)] &= ~(1ULL << 52);
+ ibs_data->regs[ibs_fetch_msr_idx(MSR_AMD64_IBSFETCHPHYSAD)] = 0;
+}
+
static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
{
struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
@@ -1048,6 +1235,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
int offset, size, check_rip, offset_max, throttle = 0;
unsigned int msr;
u64 *buf, *config, period, new_config = 0;
+ int br_target_idx = -1;
if (!test_bit(IBS_STARTED, pcpu->state)) {
fail:
@@ -1069,7 +1257,7 @@ fail:
hwc = &event->hw;
msr = hwc->config_base;
buf = ibs_data.regs;
- rdmsrl(msr, *buf);
+ rdmsrq(msr, *buf);
if (!(*buf++ & perf_ibs->valid_mask))
goto fail;
@@ -1084,15 +1272,31 @@ fail:
offset = 1;
check_rip = (perf_ibs == &perf_ibs_op && (ibs_caps & IBS_CAPS_RIPINVALIDCHK));
- offset_max = perf_ibs_get_offset_max(perf_ibs, event->attr.sample_type, check_rip);
+ offset_max = perf_ibs_get_offset_max(perf_ibs, event, check_rip);
do {
- rdmsrl(msr + offset, *buf++);
+ rdmsrq(msr + offset, *buf++);
size++;
offset = find_next_bit(perf_ibs->offset_mask,
perf_ibs->offset_max,
offset + 1);
} while (offset < offset_max);
+
+ if (perf_ibs_ldlat_event(perf_ibs, event)) {
+ union ibs_op_data3 op_data3;
+
+ op_data3.val = ibs_data.regs[ibs_op_msr_idx(MSR_AMD64_IBSOPDATA3)];
+ /*
+ * Opening event is errored out if load latency threshold is
+ * outside of [128, 2048] range. Since the event has reached
+ * interrupt handler, we can safely assume the threshold is
+ * within [128, 2048] range.
+ */
+ if (!op_data3.ld_op || !op_data3.dc_miss ||
+ op_data3.dc_miss_lat <= (event->attr.config1 & 0xFFF))
+ goto out;
+ }
+
/*
* Read IbsBrTarget, IbsOpData4, and IbsExtdCtl separately
* depending on their availability.
@@ -1101,16 +1305,17 @@ fail:
if (event->attr.sample_type & PERF_SAMPLE_RAW) {
if (perf_ibs == &perf_ibs_op) {
if (ibs_caps & IBS_CAPS_BRNTRGT) {
- rdmsrl(MSR_AMD64_IBSBRTARGET, *buf++);
+ rdmsrq(MSR_AMD64_IBSBRTARGET, *buf++);
+ br_target_idx = size;
size++;
}
if (ibs_caps & IBS_CAPS_OPDATA4) {
- rdmsrl(MSR_AMD64_IBSOPDATA4, *buf++);
+ rdmsrq(MSR_AMD64_IBSOPDATA4, *buf++);
size++;
}
}
if (perf_ibs == &perf_ibs_fetch && (ibs_caps & IBS_CAPS_FETCHCTLEXTD)) {
- rdmsrl(MSR_AMD64_ICIBSEXTDCTL, *buf++);
+ rdmsrq(MSR_AMD64_ICIBSEXTDCTL, *buf++);
size++;
}
}
@@ -1129,10 +1334,19 @@ fail:
}
if ((event->attr.config2 & IBS_SW_FILTER_MASK) &&
- perf_exclude_event(event, &regs)) {
+ perf_ibs_swfilt_discard(perf_ibs, event, &regs, &ibs_data, br_target_idx)) {
throttle = perf_event_account_interrupt(event);
goto out;
}
+ /*
+ * Prevent leaking physical addresses to unprivileged users. Skip
+ * PERF_SAMPLE_PHYS_ADDR check since generic code prevents it for
+ * unprivileged users.
+ */
+ if ((event->attr.sample_type & PERF_SAMPLE_RAW) &&
+ perf_allow_kernel()) {
+ perf_ibs_phyaddr_clear(perf_ibs, &ibs_data);
+ }
if (event->attr.sample_type & PERF_SAMPLE_RAW) {
raw = (struct perf_raw_record){
@@ -1155,10 +1369,12 @@ fail:
perf_sample_save_callchain(&data, event, iregs);
throttle = perf_event_overflow(event, &data, &regs);
+
+ if (event->attr.freq && hwc->sample_period < perf_ibs->min_period)
+ hwc->sample_period = perf_ibs->min_period;
+
out:
- if (throttle) {
- perf_ibs_stop(event, 0);
- } else {
+ if (!throttle) {
if (perf_ibs == &perf_ibs_op) {
if (ibs_caps & IBS_CAPS_OPCNTEXT) {
new_config = period & IBS_OP_MAX_CNT_EXT_MASK;
@@ -1244,7 +1460,8 @@ static __init int perf_ibs_op_init(void)
if (ibs_caps & IBS_CAPS_OPCNTEXT) {
perf_ibs_op.max_period |= IBS_OP_MAX_CNT_EXT_MASK;
perf_ibs_op.config_mask |= IBS_OP_MAX_CNT_EXT_MASK;
- perf_ibs_op.cnt_mask |= IBS_OP_MAX_CNT_EXT_MASK;
+ perf_ibs_op.cnt_mask |= (IBS_OP_MAX_CNT_EXT_MASK |
+ IBS_OP_CUR_CNT_EXT_MASK);
}
if (ibs_caps & IBS_CAPS_ZEN4)
@@ -1347,7 +1564,7 @@ static inline int ibs_eilvt_valid(void)
preempt_disable();
- rdmsrl(MSR_AMD64_IBSCTL, val);
+ rdmsrq(MSR_AMD64_IBSCTL, val);
offset = val & IBSCTL_LVT_OFFSET_MASK;
if (!(val & IBSCTL_LVT_OFFSET_VALID)) {
@@ -1462,7 +1679,7 @@ static inline int get_ibs_lvt_offset(void)
{
u64 val;
- rdmsrl(MSR_AMD64_IBSCTL, val);
+ rdmsrq(MSR_AMD64_IBSCTL, val);
if (!(val & IBSCTL_LVT_OFFSET_VALID))
return -EINVAL;
diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c
index b15f7b950d2e..a721da9987dd 100644
--- a/arch/x86/events/amd/iommu.c
+++ b/arch/x86/events/amd/iommu.c
@@ -16,6 +16,8 @@
#include <linux/slab.h>
#include <linux/amd-iommu.h>
+#include <asm/msr.h>
+
#include "../perf_event.h"
#include "iommu.h"
@@ -30,7 +32,7 @@
#define GET_DOMID_MASK(x) (((x)->conf1 >> 16) & 0xFFFFULL)
#define GET_PASID_MASK(x) (((x)->conf1 >> 32) & 0xFFFFFULL)
-#define IOMMU_NAME_SIZE 16
+#define IOMMU_NAME_SIZE 24
struct perf_amd_iommu {
struct list_head list;
diff --git a/arch/x86/events/amd/lbr.c b/arch/x86/events/amd/lbr.c
index 19c7b76e21bc..d24da377df77 100644
--- a/arch/x86/events/amd/lbr.c
+++ b/arch/x86/events/amd/lbr.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/perf_event.h>
+#include <asm/msr.h>
#include <asm/perf_event.h>
#include "../perf_event.h"
@@ -61,19 +62,19 @@ struct branch_entry {
static __always_inline void amd_pmu_lbr_set_from(unsigned int idx, u64 val)
{
- wrmsrl(MSR_AMD_SAMP_BR_FROM + idx * 2, val);
+ wrmsrq(MSR_AMD_SAMP_BR_FROM + idx * 2, val);
}
static __always_inline void amd_pmu_lbr_set_to(unsigned int idx, u64 val)
{
- wrmsrl(MSR_AMD_SAMP_BR_FROM + idx * 2 + 1, val);
+ wrmsrq(MSR_AMD_SAMP_BR_FROM + idx * 2 + 1, val);
}
static __always_inline u64 amd_pmu_lbr_get_from(unsigned int idx)
{
u64 val;
- rdmsrl(MSR_AMD_SAMP_BR_FROM + idx * 2, val);
+ rdmsrq(MSR_AMD_SAMP_BR_FROM + idx * 2, val);
return val;
}
@@ -82,7 +83,7 @@ static __always_inline u64 amd_pmu_lbr_get_to(unsigned int idx)
{
u64 val;
- rdmsrl(MSR_AMD_SAMP_BR_FROM + idx * 2 + 1, val);
+ rdmsrq(MSR_AMD_SAMP_BR_FROM + idx * 2 + 1, val);
return val;
}
@@ -333,7 +334,7 @@ void amd_pmu_lbr_reset(void)
cpuc->last_task_ctx = NULL;
cpuc->last_log_id = 0;
- wrmsrl(MSR_AMD64_LBR_SELECT, 0);
+ wrmsrq(MSR_AMD64_LBR_SELECT, 0);
}
void amd_pmu_lbr_add(struct perf_event *event)
@@ -371,7 +372,8 @@ void amd_pmu_lbr_del(struct perf_event *event)
perf_sched_cb_dec(event->pmu);
}
-void amd_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
+void amd_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx,
+ struct task_struct *task, bool sched_in)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
@@ -395,16 +397,16 @@ void amd_pmu_lbr_enable_all(void)
/* Set hardware branch filter */
if (cpuc->lbr_select) {
lbr_select = cpuc->lbr_sel->config & LBR_SELECT_MASK;
- wrmsrl(MSR_AMD64_LBR_SELECT, lbr_select);
+ wrmsrq(MSR_AMD64_LBR_SELECT, lbr_select);
}
if (cpu_feature_enabled(X86_FEATURE_AMD_LBR_PMC_FREEZE)) {
- rdmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl);
- wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
+ rdmsrq(MSR_IA32_DEBUGCTLMSR, dbg_ctl);
+ wrmsrq(MSR_IA32_DEBUGCTLMSR, dbg_ctl | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
}
- rdmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg);
- wrmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg | DBG_EXTN_CFG_LBRV2EN);
+ rdmsrq(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg);
+ wrmsrq(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg | DBG_EXTN_CFG_LBRV2EN);
}
void amd_pmu_lbr_disable_all(void)
diff --git a/arch/x86/events/amd/power.c b/arch/x86/events/amd/power.c
index 37d5b380516e..dad42790cf7d 100644
--- a/arch/x86/events/amd/power.c
+++ b/arch/x86/events/amd/power.c
@@ -11,6 +11,7 @@
#include <linux/slab.h>
#include <linux/perf_event.h>
#include <asm/cpu_device_id.h>
+#include <asm/msr.h>
#include "../perf_event.h"
/* Event code: LSB 8 bits, passed in attr->config any other bit is reserved. */
@@ -48,8 +49,8 @@ static void event_update(struct perf_event *event)
prev_pwr_acc = hwc->pwr_acc;
prev_ptsc = hwc->ptsc;
- rdmsrl(MSR_F15H_CU_PWR_ACCUMULATOR, new_pwr_acc);
- rdmsrl(MSR_F15H_PTSC, new_ptsc);
+ rdmsrq(MSR_F15H_CU_PWR_ACCUMULATOR, new_pwr_acc);
+ rdmsrq(MSR_F15H_PTSC, new_ptsc);
/*
* Calculate the CU power consumption over a time period, the unit of
@@ -75,8 +76,8 @@ static void __pmu_event_start(struct perf_event *event)
event->hw.state = 0;
- rdmsrl(MSR_F15H_PTSC, event->hw.ptsc);
- rdmsrl(MSR_F15H_CU_PWR_ACCUMULATOR, event->hw.pwr_acc);
+ rdmsrq(MSR_F15H_PTSC, event->hw.ptsc);
+ rdmsrq(MSR_F15H_CU_PWR_ACCUMULATOR, event->hw.pwr_acc);
}
static void pmu_event_start(struct perf_event *event, int mode)
@@ -272,7 +273,7 @@ static int __init amd_power_pmu_init(void)
cpu_pwr_sample_ratio = cpuid_ecx(0x80000007);
- if (rdmsrl_safe(MSR_F15H_CU_MAX_PWR_ACCUMULATOR, &max_cu_acc_power)) {
+ if (rdmsrq_safe(MSR_F15H_CU_MAX_PWR_ACCUMULATOR, &max_cu_acc_power)) {
pr_err("Failed to read max compute unit power accumulator MSR\n");
return -ENODEV;
}
diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c
index 49c26ce2b115..e8b6af199c73 100644
--- a/arch/x86/events/amd/uncore.c
+++ b/arch/x86/events/amd/uncore.c
@@ -21,6 +21,7 @@
#define NUM_COUNTERS_NB 4
#define NUM_COUNTERS_L2 4
#define NUM_COUNTERS_L3 6
+#define NUM_COUNTERS_MAX 64
#define RDPMC_BASE_NB 6
#define RDPMC_BASE_LLC 10
@@ -38,7 +39,10 @@ struct amd_uncore_ctx {
int refcnt;
int cpu;
struct perf_event **events;
- struct hlist_node node;
+ unsigned long active_mask[BITS_TO_LONGS(NUM_COUNTERS_MAX)];
+ int nr_active;
+ struct hrtimer hrtimer;
+ u64 hrtimer_duration;
};
struct amd_uncore_pmu {
@@ -83,11 +87,51 @@ struct amd_uncore {
static struct amd_uncore uncores[UNCORE_TYPE_MAX];
+/* Interval for hrtimer, defaults to 60000 milliseconds */
+static unsigned int update_interval = 60 * MSEC_PER_SEC;
+module_param(update_interval, uint, 0444);
+
static struct amd_uncore_pmu *event_to_amd_uncore_pmu(struct perf_event *event)
{
return container_of(event->pmu, struct amd_uncore_pmu, pmu);
}
+static enum hrtimer_restart amd_uncore_hrtimer(struct hrtimer *hrtimer)
+{
+ struct amd_uncore_ctx *ctx;
+ struct perf_event *event;
+ int bit;
+
+ ctx = container_of(hrtimer, struct amd_uncore_ctx, hrtimer);
+
+ if (!ctx->nr_active || ctx->cpu != smp_processor_id())
+ return HRTIMER_NORESTART;
+
+ for_each_set_bit(bit, ctx->active_mask, NUM_COUNTERS_MAX) {
+ event = ctx->events[bit];
+ event->pmu->read(event);
+ }
+
+ hrtimer_forward_now(hrtimer, ns_to_ktime(ctx->hrtimer_duration));
+ return HRTIMER_RESTART;
+}
+
+static void amd_uncore_start_hrtimer(struct amd_uncore_ctx *ctx)
+{
+ hrtimer_start(&ctx->hrtimer, ns_to_ktime(ctx->hrtimer_duration),
+ HRTIMER_MODE_REL_PINNED_HARD);
+}
+
+static void amd_uncore_cancel_hrtimer(struct amd_uncore_ctx *ctx)
+{
+ hrtimer_cancel(&ctx->hrtimer);
+}
+
+static void amd_uncore_init_hrtimer(struct amd_uncore_ctx *ctx)
+{
+ hrtimer_setup(&ctx->hrtimer, amd_uncore_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
+}
+
static void amd_uncore_read(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
@@ -106,9 +150,9 @@ static void amd_uncore_read(struct perf_event *event)
* read counts directly from the corresponding PERF_CTR.
*/
if (hwc->event_base_rdpmc < 0)
- rdmsrl(hwc->event_base, new);
+ rdmsrq(hwc->event_base, new);
else
- rdpmcl(hwc->event_base_rdpmc, new);
+ new = rdpmc(hwc->event_base_rdpmc);
local64_set(&hwc->prev_count, new);
delta = (new << COUNTER_SHIFT) - (prev << COUNTER_SHIFT);
@@ -118,27 +162,40 @@ static void amd_uncore_read(struct perf_event *event)
static void amd_uncore_start(struct perf_event *event, int flags)
{
+ struct amd_uncore_pmu *pmu = event_to_amd_uncore_pmu(event);
+ struct amd_uncore_ctx *ctx = *per_cpu_ptr(pmu->ctx, event->cpu);
struct hw_perf_event *hwc = &event->hw;
+ if (!ctx->nr_active++)
+ amd_uncore_start_hrtimer(ctx);
+
if (flags & PERF_EF_RELOAD)
- wrmsrl(hwc->event_base, (u64)local64_read(&hwc->prev_count));
+ wrmsrq(hwc->event_base, (u64)local64_read(&hwc->prev_count));
hwc->state = 0;
- wrmsrl(hwc->config_base, (hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE));
+ __set_bit(hwc->idx, ctx->active_mask);
+ wrmsrq(hwc->config_base, (hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE));
perf_event_update_userpage(event);
}
static void amd_uncore_stop(struct perf_event *event, int flags)
{
+ struct amd_uncore_pmu *pmu = event_to_amd_uncore_pmu(event);
+ struct amd_uncore_ctx *ctx = *per_cpu_ptr(pmu->ctx, event->cpu);
struct hw_perf_event *hwc = &event->hw;
- wrmsrl(hwc->config_base, hwc->config);
+ wrmsrq(hwc->config_base, hwc->config);
hwc->state |= PERF_HES_STOPPED;
if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
event->pmu->read(event);
hwc->state |= PERF_HES_UPTODATE;
}
+
+ if (!--ctx->nr_active)
+ amd_uncore_cancel_hrtimer(ctx);
+
+ __clear_bit(hwc->idx, ctx->active_mask);
}
static int amd_uncore_add(struct perf_event *event, int flags)
@@ -491,6 +548,9 @@ static int amd_uncore_ctx_init(struct amd_uncore *uncore, unsigned int cpu)
goto fail;
}
+ amd_uncore_init_hrtimer(curr);
+ curr->hrtimer_duration = (u64)update_interval * NSEC_PER_MSEC;
+
cpumask_set_cpu(cpu, &pmu->active_mask);
}
@@ -880,16 +940,55 @@ static int amd_uncore_umc_event_init(struct perf_event *event)
static void amd_uncore_umc_start(struct perf_event *event, int flags)
{
+ struct amd_uncore_pmu *pmu = event_to_amd_uncore_pmu(event);
+ struct amd_uncore_ctx *ctx = *per_cpu_ptr(pmu->ctx, event->cpu);
struct hw_perf_event *hwc = &event->hw;
+ if (!ctx->nr_active++)
+ amd_uncore_start_hrtimer(ctx);
+
if (flags & PERF_EF_RELOAD)
- wrmsrl(hwc->event_base, (u64)local64_read(&hwc->prev_count));
+ wrmsrq(hwc->event_base, (u64)local64_read(&hwc->prev_count));
hwc->state = 0;
- wrmsrl(hwc->config_base, (hwc->config | AMD64_PERFMON_V2_ENABLE_UMC));
+ __set_bit(hwc->idx, ctx->active_mask);
+ wrmsrq(hwc->config_base, (hwc->config | AMD64_PERFMON_V2_ENABLE_UMC));
perf_event_update_userpage(event);
}
+static void amd_uncore_umc_read(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ u64 prev, new, shift;
+ s64 delta;
+
+ shift = COUNTER_SHIFT + 1;
+ prev = local64_read(&hwc->prev_count);
+
+ /*
+ * UMC counters do not have RDPMC assignments. Read counts directly
+ * from the corresponding PERF_CTR.
+ */
+ rdmsrl(hwc->event_base, new);
+
+ /*
+ * Unlike the other uncore counters, UMC counters saturate and set the
+ * Overflow bit (bit 48) on overflow. Since they do not roll over,
+ * proactively reset the corresponding PERF_CTR when bit 47 is set so
+ * that the counter never gets a chance to saturate.
+ */
+ if (new & BIT_ULL(63 - COUNTER_SHIFT)) {
+ wrmsrl(hwc->event_base, 0);
+ local64_set(&hwc->prev_count, 0);
+ } else {
+ local64_set(&hwc->prev_count, new);
+ }
+
+ delta = (new << shift) - (prev << shift);
+ delta >>= shift;
+ local64_add(delta, &event->count);
+}
+
static
void amd_uncore_umc_ctx_scan(struct amd_uncore *uncore, unsigned int cpu)
{
@@ -968,7 +1067,7 @@ int amd_uncore_umc_ctx_init(struct amd_uncore *uncore, unsigned int cpu)
.del = amd_uncore_del,
.start = amd_uncore_umc_start,
.stop = amd_uncore_stop,
- .read = amd_uncore_read,
+ .read = amd_uncore_umc_read,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT,
.module = THIS_MODULE,
};