diff options
| -rw-r--r-- | arch/powerpc/kernel/perf_counter.c | 9 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/perf_counter.c | 15 | ||||
| -rw-r--r-- | include/linux/perf_counter.h | 6 | ||||
| -rw-r--r-- | kernel/perf_counter.c | 9 | 
4 files changed, 28 insertions, 11 deletions
| diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index 5e0bf399c433..4990ce2e5f08 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c @@ -767,6 +767,7 @@ static void power_pmu_unthrottle(struct perf_counter *counter)  	perf_disable();  	power_pmu_read(counter);  	left = counter->hw.sample_period; +	counter->hw.last_period = left;  	val = 0;  	if (left < 0x80000000L)  		val = 0x80000000L - left; @@ -937,7 +938,8 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)  	counter->hw.config = events[n];  	counter->hw.counter_base = cflags[n]; -	atomic64_set(&counter->hw.period_left, counter->hw.sample_period); +	counter->hw.last_period = counter->hw.sample_period; +	atomic64_set(&counter->hw.period_left, counter->hw.last_period);  	/*  	 * See if we need to reserve the PMU. @@ -1002,8 +1004,9 @@ static void record_and_restart(struct perf_counter *counter, long val,  	 */  	if (record) {  		struct perf_sample_data data = { -			.regs = regs, -			.addr = 0, +			.regs	= regs, +			.addr	= 0, +			.period	= counter->hw.last_period,  		};  		if (counter->attr.sample_type & PERF_SAMPLE_ADDR) { diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 82a23d487f92..57ae1bec81be 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -698,6 +698,7 @@ static int __hw_perf_counter_init(struct perf_counter *counter)  	if (!hwc->sample_period) {  		hwc->sample_period = x86_pmu.max_period; +		hwc->last_period = hwc->sample_period;  		atomic64_set(&hwc->period_left, hwc->sample_period);  	} @@ -880,12 +881,14 @@ x86_perf_counter_set_period(struct perf_counter *counter,  	if (unlikely(left <= -period)) {  		left = period;  		atomic64_set(&hwc->period_left, left); +		hwc->last_period = period;  		ret = 1;  	}  	if (unlikely(left <= 0)) {  		left += period;  		atomic64_set(&hwc->period_left, left); +		hwc->last_period = period;  		ret = 1;  	}  	/* @@ -1257,9 +1260,12 @@ static int amd_pmu_handle_irq(struct pt_regs *regs)  		if (val & (1ULL << (x86_pmu.counter_bits - 1)))  			continue; -		/* counter overflow */ -		handled = 1; -		inc_irq_stat(apic_perf_irqs); +		/* +		 * counter overflow +		 */ +		handled		= 1; +		data.period	= counter->hw.last_period; +  		if (!x86_perf_counter_set_period(counter, hwc, idx))  			continue; @@ -1267,6 +1273,9 @@ static int amd_pmu_handle_irq(struct pt_regs *regs)  			amd_pmu_disable_counter(hwc, idx);  	} +	if (handled) +		inc_irq_stat(apic_perf_irqs); +  	return handled;  } diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index d8c0eb480f9a..5b966472b458 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h @@ -366,6 +366,7 @@ struct hw_perf_counter {  	};  	atomic64_t			prev_count;  	u64				sample_period; +	u64				last_period;  	atomic64_t			period_left;  	u64				interrupts; @@ -606,8 +607,9 @@ extern int hw_perf_group_sched_in(struct perf_counter *group_leader,  extern void perf_counter_update_userpage(struct perf_counter *counter);  struct perf_sample_data { -	struct pt_regs	*regs; -	u64		addr; +	struct pt_regs		*regs; +	u64			addr; +	u64			period;  };  extern int perf_counter_overflow(struct perf_counter *counter, int nmi, diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 4fe85e804f43..8b89b40bd0f0 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -2495,7 +2495,7 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,  		perf_output_put(&handle, cpu_entry);  	if (sample_type & PERF_SAMPLE_PERIOD) -		perf_output_put(&handle, counter->hw.sample_period); +		perf_output_put(&handle, data->period);  	/*  	 * XXX PERF_SAMPLE_GROUP vs inherited counters seems difficult. @@ -3040,11 +3040,13 @@ static void perf_swcounter_set_period(struct perf_counter *counter)  	if (unlikely(left <= -period)) {  		left = period;  		atomic64_set(&hwc->period_left, left); +		hwc->last_period = period;  	}  	if (unlikely(left <= 0)) {  		left += period;  		atomic64_add(period, &hwc->period_left); +		hwc->last_period = period;  	}  	atomic64_set(&hwc->prev_count, -left); @@ -3086,8 +3088,9 @@ static void perf_swcounter_overflow(struct perf_counter *counter,  				    int nmi, struct pt_regs *regs, u64 addr)  {  	struct perf_sample_data data = { -		.regs = regs, -		.addr = addr, +		.regs	= regs, +		.addr	= addr, +		.period	= counter->hw.last_period,  	};  	perf_swcounter_update(counter); | 
