summaryrefslogtreecommitdiff
path: root/arch/parisc/kernel/time.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/parisc/kernel/time.c')
-rw-r--r--arch/parisc/kernel/time.c51
1 files changed, 19 insertions, 32 deletions
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index 04508158815c..9714fbd7c42d 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -40,6 +40,8 @@
#include <linux/timex.h>
+int time_keeper_id __read_mostly; /* CPU used for timekeeping. */
+
static unsigned long clocktick __ro_after_init; /* timer cycles per tick */
/*
@@ -70,8 +72,6 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
/* gcc can optimize for "read-only" case with a local clocktick */
unsigned long cpt = clocktick;
- profile_tick(CPU_PROFILING);
-
/* Initialize next_tick to the old expected tick time. */
next_tick = cpuinfo->it_value;
@@ -86,10 +86,9 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
cpuinfo->it_value = next_tick;
/* Go do system house keeping. */
- if (cpu == 0)
- xtime_update(ticks_elapsed);
-
- update_process_times(user_mode(get_irq_regs()));
+ if (IS_ENABLED(CONFIG_SMP) && (cpu != time_keeper_id))
+ ticks_elapsed = 0;
+ legacy_timer_tick(ticks_elapsed);
/* Skip clockticks on purpose if we know we would miss those.
* The new CR16 must be "later" than current CR16 otherwise
@@ -153,7 +152,7 @@ static struct clocksource clocksource_cr16 = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
-void __init start_cpu_itimer(void)
+void start_cpu_itimer(void)
{
unsigned int cpu = smp_processor_id();
unsigned long next_tick = mfctl(16) + clocktick;
@@ -180,9 +179,16 @@ static int rtc_generic_get_time(struct device *dev, struct rtc_time *tm)
static int rtc_generic_set_time(struct device *dev, struct rtc_time *tm)
{
time64_t secs = rtc_tm_to_time64(tm);
-
- if (pdc_tod_set(secs, 0) < 0)
+ int ret;
+
+ /* hppa has Y2K38 problem: pdc_tod_set() takes an u32 value! */
+ ret = pdc_tod_set(secs, 0);
+ if (ret != 0) {
+ pr_warn("pdc_tod_set(%lld) returned error %d\n", secs, ret);
+ if (ret == PDC_INVALID_ARG)
+ return -EINVAL;
return -EOPNOTSUPP;
+ }
return 0;
}
@@ -245,33 +251,14 @@ void __init time_init(void)
static int __init init_cr16_clocksource(void)
{
/*
- * The cr16 interval timers are not syncronized across CPUs on
- * different sockets, so mark them unstable and lower rating on
- * multi-socket SMP systems.
+ * The cr16 interval timers are not synchronized across CPUs.
*/
if (num_online_cpus() > 1 && !running_on_qemu) {
- int cpu;
- unsigned long cpu0_loc;
- cpu0_loc = per_cpu(cpu_data, 0).cpu_loc;
-
- for_each_online_cpu(cpu) {
- if (cpu == 0)
- continue;
- if ((cpu0_loc != 0) &&
- (cpu0_loc == per_cpu(cpu_data, cpu).cpu_loc))
- continue;
-
- clocksource_cr16.name = "cr16_unstable";
- clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE;
- clocksource_cr16.rating = 0;
- break;
- }
+ clocksource_cr16.name = "cr16_unstable";
+ clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE;
+ clocksource_cr16.rating = 0;
}
- /* XXX: We may want to mark sched_clock stable here if cr16 clocks are
- * in sync:
- * (clocksource_cr16.flags == CLOCK_SOURCE_IS_CONTINUOUS) */
-
/* register at clocksource framework */
clocksource_register_hz(&clocksource_cr16,
100 * PAGE0->mem_10msec);