summaryrefslogtreecommitdiff
path: root/arch/x86/events/core.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-04-02 09:25:10 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-04-02 09:25:10 -0700
commit128c434a70996a3738c7ca4aa2ee91942e4c48f0 (patch)
tree65646fafea62ef939e49ef57a31edec4c5b049fb /arch/x86/events/core.c
parent0a89b5eb818aee2efe8b092bb303aec56486a3d3 (diff)
parent658b299580da2830a69eea44a8b6da1c2579a32e (diff)
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Thomas Gleixner: "This update provides: - make the scheduler clock switch to unstable mode smooth so the timestamps stay at microseconds granularity instead of switching to tick granularity. - unbreak perf test tsc by taking the new offset into account which was added in order to proveide better sched clock continuity - switching sched clock to unstable mode runs all clock related computations which affect the sched clock output itself from a work queue. In case of preemption sched clock uses half updated data and provides wrong timestamps. Keep the math in the protected context and delegate only the static key switch to workqueue context. - remove a duplicate header include" * 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched/headers: Remove duplicate #include <linux/sched/debug.h> line sched/clock: Fix broken stable to unstable transfer sched/clock, x86/perf: Fix "perf test tsc" sched/clock: Fix clear_sched_clock_stable() preempt wobbly
Diffstat (limited to 'arch/x86/events/core.c')
-rw-r--r--arch/x86/events/core.c9
1 files changed, 6 insertions, 3 deletions
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 2aa1ad194db2..580b60f5ac83 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2256,6 +2256,7 @@ void arch_perf_update_userpage(struct perf_event *event,
struct perf_event_mmap_page *userpg, u64 now)
{
struct cyc2ns_data *data;
+ u64 offset;
userpg->cap_user_time = 0;
userpg->cap_user_time_zero = 0;
@@ -2263,11 +2264,13 @@ void arch_perf_update_userpage(struct perf_event *event,
!!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED);
userpg->pmc_width = x86_pmu.cntval_bits;
- if (!sched_clock_stable())
+ if (!using_native_sched_clock() || !sched_clock_stable())
return;
data = cyc2ns_read_begin();
+ offset = data->cyc2ns_offset + __sched_clock_offset;
+
/*
* Internal timekeeping for enabled/running/stopped times
* is always in the local_clock domain.
@@ -2275,7 +2278,7 @@ void arch_perf_update_userpage(struct perf_event *event,
userpg->cap_user_time = 1;
userpg->time_mult = data->cyc2ns_mul;
userpg->time_shift = data->cyc2ns_shift;
- userpg->time_offset = data->cyc2ns_offset - now;
+ userpg->time_offset = offset - now;
/*
* cap_user_time_zero doesn't make sense when we're using a different
@@ -2283,7 +2286,7 @@ void arch_perf_update_userpage(struct perf_event *event,
*/
if (!event->attr.use_clockid) {
userpg->cap_user_time_zero = 1;
- userpg->time_zero = data->cyc2ns_offset;
+ userpg->time_zero = offset;
}
cyc2ns_read_end(data);