summaryrefslogtreecommitdiff
path: root/kernel/events
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2017-06-08 10:12:12 +0200
committerIngo Molnar <mingo@kernel.org>2017-06-08 10:12:12 +0200
commita5506c46a4d249a422b0eca537026fc7a1ac78b5 (patch)
tree6a463b0e6ac0ca16eb195a4f49a22afb1403337f /kernel/events
parent36cc2b9222b5106de34085c4dd8635ac67ef5cba (diff)
parentcc1582c231ea041fbc68861dfaf957eaf902b829 (diff)
Merge branch 'perf/urgent' into perf/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/events')
-rw-r--r--kernel/events/core.c21
1 files changed, 21 insertions, 0 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index f8c27d3ef3a1..3de0b98c4414 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -7316,6 +7316,21 @@ int perf_event_account_interrupt(struct perf_event *event)
return __perf_event_account_interrupt(event, 1);
}
+static bool sample_is_allowed(struct perf_event *event, struct pt_regs *regs)
+{
+ /*
+ * Due to interrupt latency (AKA "skid"), we may enter the
+ * kernel before taking an overflow, even if the PMU is only
+ * counting user events.
+ * To avoid leaking information to userspace, we must always
+ * reject kernel samples when exclude_kernel is set.
+ */
+ if (event->attr.exclude_kernel && !user_mode(regs))
+ return false;
+
+ return true;
+}
+
/*
* Generic event overflow handling, sampling.
*/
@@ -7337,6 +7352,12 @@ static int __perf_event_overflow(struct perf_event *event,
ret = __perf_event_account_interrupt(event, throttle);
/*
+ * For security, drop the skid kernel samples if necessary.
+ */
+ if (!sample_is_allowed(event, regs))
+ return ret;
+
+ /*
* XXX event_limit might not quite work as expected on inherited
* events
*/