summaryrefslogtreecommitdiff
path: root/drivers/clocksource
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/clocksource')
-rw-r--r--drivers/clocksource/Kconfig22
-rw-r--r--drivers/clocksource/Makefile2
-rw-r--r--drivers/clocksource/acpi_pm.c32
-rw-r--r--drivers/clocksource/arm_arch_timer.c25
-rw-r--r--drivers/clocksource/arm_global_timer.c38
-rw-r--r--drivers/clocksource/asm9260_timer.c1
-rw-r--r--drivers/clocksource/dw_apb_timer.c39
-rw-r--r--drivers/clocksource/exynos_mct.c1
-rw-r--r--drivers/clocksource/hyperv_timer.c58
-rw-r--r--drivers/clocksource/i8253.c49
-rw-r--r--drivers/clocksource/ingenic-ost.c7
-rw-r--r--drivers/clocksource/jcore-pit.c22
-rw-r--r--drivers/clocksource/mips-gic-timer.c59
-rw-r--r--drivers/clocksource/renesas-ostm.c3
-rw-r--r--drivers/clocksource/samsung_pwm_timer.c4
-rw-r--r--drivers/clocksource/sh_cmt.c13
-rw-r--r--drivers/clocksource/timer-armada-370-xp.c1
-rw-r--r--drivers/clocksource/timer-cadence-ttc.c6
-rw-r--r--drivers/clocksource/timer-clint.c4
-rw-r--r--drivers/clocksource/timer-gxp.c2
-rw-r--r--drivers/clocksource/timer-imx-gpt.c3
-rw-r--r--drivers/clocksource/timer-imx-sysctr.c117
-rw-r--r--drivers/clocksource/timer-imx-tpm.c16
-rw-r--r--drivers/clocksource/timer-of.c17
-rw-r--r--drivers/clocksource/timer-of.h1
-rw-r--r--drivers/clocksource/timer-qcom.c8
-rw-r--r--drivers/clocksource/timer-ralink.c150
-rw-r--r--drivers/clocksource/timer-riscv.c5
-rw-r--r--drivers/clocksource/timer-rtl-otto.c291
-rw-r--r--drivers/clocksource/timer-stm32.c4
-rw-r--r--drivers/clocksource/timer-sun5i.c2
-rw-r--r--drivers/clocksource/timer-tegra.c1
-rw-r--r--drivers/clocksource/timer-tegra186.c2
-rw-r--r--drivers/clocksource/timer-ti-32k.c2
-rw-r--r--drivers/clocksource/timer-ti-dm-systimer.c8
-rw-r--r--drivers/clocksource/timer-ti-dm.c11
36 files changed, 819 insertions, 207 deletions
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 34faa0320ece..487c85259967 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -134,6 +134,16 @@ config RDA_TIMER
help
Enables the support for the RDA Micro timer driver.
+config REALTEK_OTTO_TIMER
+ bool "Clocksource/timer for the Realtek Otto platform" if COMPILE_TEST
+ select TIMER_OF
+ help
+ This driver adds support for the timers found in the Realtek RTL83xx
+ and RTL93xx SoCs series. This includes chips such as RTL8380, RTL8381
+ and RTL832, as well as chips from the RTL839x series, such as RTL8390
+ RT8391, RTL8392, RTL8393 and RTL8396 and chips of the RTL930x series
+ such as RTL9301, RTL9302 or RTL9303.
+
config SUN4I_TIMER
bool "Sun4i timer driver" if COMPILE_TEST
depends on HAS_IOMEM
@@ -390,7 +400,8 @@ config ARM_GT_INITIAL_PRESCALER_VAL
This affects CPU_FREQ max delta from the initial frequency.
config ARM_TIMER_SP804
- bool "Support for Dual Timer SP804 module" if COMPILE_TEST
+ bool "Support for Dual Timer SP804 module"
+ depends on ARM || ARM64 || COMPILE_TEST
depends on GENERIC_SCHED_CLOCK && HAVE_CLK
select CLKSRC_MMIO
select TIMER_OF if OF
@@ -743,4 +754,13 @@ config EP93XX_TIMER
Enables support for the Cirrus Logic timer block
EP93XX.
+config RALINK_TIMER
+ bool "Ralink System Tick Counter"
+ depends on SOC_RT305X || SOC_MT7620 || COMPILE_TEST
+ select CLKSRC_MMIO
+ select TIMER_OF
+ help
+ Enables support for system tick counter present on
+ Ralink SoCs RT3352 and MT7620.
+
endmenu
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 4bb856e4df55..43ef16a4efa6 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -59,6 +59,7 @@ obj-$(CONFIG_MILBEAUT_TIMER) += timer-milbeaut.o
obj-$(CONFIG_SPRD_TIMER) += timer-sprd.o
obj-$(CONFIG_NPCM7XX_TIMER) += timer-npcm7xx.o
obj-$(CONFIG_RDA_TIMER) += timer-rda.o
+obj-$(CONFIG_REALTEK_OTTO_TIMER) += timer-rtl-otto.o
obj-$(CONFIG_ARC_TIMERS) += arc_timer.o
obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o
@@ -90,3 +91,4 @@ obj-$(CONFIG_GOLDFISH_TIMER) += timer-goldfish.o
obj-$(CONFIG_GXP_TIMER) += timer-gxp.o
obj-$(CONFIG_CLKSRC_LOONGSON1_PWM) += timer-loongson1-pwm.o
obj-$(CONFIG_EP93XX_TIMER) += timer-ep93xx.o
+obj-$(CONFIG_RALINK_TIMER) += timer-ralink.o
diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c
index 82338773602c..b4330a01a566 100644
--- a/drivers/clocksource/acpi_pm.c
+++ b/drivers/clocksource/acpi_pm.c
@@ -25,6 +25,10 @@
#include <asm/io.h>
#include <asm/time.h>
+static void *suspend_resume_cb_data;
+
+static void (*suspend_resume_callback)(void *data, bool suspend);
+
/*
* The I/O port the PMTMR resides at.
* The location is detected during setup_arch(),
@@ -58,6 +62,32 @@ u32 acpi_pm_read_verified(void)
return v2;
}
+void acpi_pmtmr_register_suspend_resume_callback(void (*cb)(void *data, bool suspend), void *data)
+{
+ suspend_resume_callback = cb;
+ suspend_resume_cb_data = data;
+}
+EXPORT_SYMBOL_GPL(acpi_pmtmr_register_suspend_resume_callback);
+
+void acpi_pmtmr_unregister_suspend_resume_callback(void)
+{
+ suspend_resume_callback = NULL;
+ suspend_resume_cb_data = NULL;
+}
+EXPORT_SYMBOL_GPL(acpi_pmtmr_unregister_suspend_resume_callback);
+
+static void acpi_pm_suspend(struct clocksource *cs)
+{
+ if (suspend_resume_callback)
+ suspend_resume_callback(suspend_resume_cb_data, true);
+}
+
+static void acpi_pm_resume(struct clocksource *cs)
+{
+ if (suspend_resume_callback)
+ suspend_resume_callback(suspend_resume_cb_data, false);
+}
+
static u64 acpi_pm_read(struct clocksource *cs)
{
return (u64)read_pmtmr();
@@ -69,6 +99,8 @@ static struct clocksource clocksource_acpi_pm = {
.read = acpi_pm_read,
.mask = (u64)ACPI_PM_MASK,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ .suspend = acpi_pm_suspend,
+ .resume = acpi_pm_resume,
};
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index e054de92de91..808f259781fd 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -331,7 +331,7 @@ static u64 notrace hisi_161010101_read_cntvct_el0(void)
return __hisi_161010101_read_reg(cntvct_el0);
}
-static struct ate_acpi_oem_info hisi_161010101_oem_info[] = {
+static const struct ate_acpi_oem_info hisi_161010101_oem_info[] = {
/*
* Note that trailing spaces are required to properly match
* the OEM table information.
@@ -1179,8 +1179,6 @@ static void arch_timer_stop(struct clock_event_device *clk)
disable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi]);
if (arch_timer_has_nonsecure_ppi())
disable_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
-
- clk->set_state_shutdown(clk);
}
static int arch_timer_dying_cpu(unsigned int cpu)
@@ -1430,7 +1428,7 @@ static int __init arch_timer_of_init(struct device_node *np)
arch_timers_present |= ARCH_TIMER_TYPE_CP15;
- has_names = of_property_read_bool(np, "interrupt-names");
+ has_names = of_property_present(np, "interrupt-names");
for (i = ARCH_TIMER_PHYS_SECURE_PPI; i < ARCH_TIMER_MAX_TIMER_PPI; i++) {
if (has_names)
@@ -1556,7 +1554,7 @@ static int __init
arch_timer_mem_frame_register(struct arch_timer_mem_frame *frame)
{
void __iomem *base;
- int ret, irq = 0;
+ int ret, irq;
if (arch_timer_mem_use_virtual)
irq = frame->virt_irq;
@@ -1594,7 +1592,6 @@ static int __init arch_timer_mem_of_init(struct device_node *np)
{
struct arch_timer_mem *timer_mem;
struct arch_timer_mem_frame *frame;
- struct device_node *frame_node;
struct resource res;
int ret = -EINVAL;
u32 rate;
@@ -1608,33 +1605,29 @@ static int __init arch_timer_mem_of_init(struct device_node *np)
timer_mem->cntctlbase = res.start;
timer_mem->size = resource_size(&res);
- for_each_available_child_of_node(np, frame_node) {
+ for_each_available_child_of_node_scoped(np, frame_node) {
u32 n;
struct arch_timer_mem_frame *frame;
if (of_property_read_u32(frame_node, "frame-number", &n)) {
pr_err(FW_BUG "Missing frame-number.\n");
- of_node_put(frame_node);
goto out;
}
if (n >= ARCH_TIMER_MEM_MAX_FRAMES) {
pr_err(FW_BUG "Wrong frame-number, only 0-%u are permitted.\n",
ARCH_TIMER_MEM_MAX_FRAMES - 1);
- of_node_put(frame_node);
goto out;
}
frame = &timer_mem->frame[n];
if (frame->valid) {
pr_err(FW_BUG "Duplicated frame-number.\n");
- of_node_put(frame_node);
goto out;
}
- if (of_address_to_resource(frame_node, 0, &res)) {
- of_node_put(frame_node);
+ if (of_address_to_resource(frame_node, 0, &res))
goto out;
- }
+
frame->cntbase = res.start;
frame->size = resource_size(&res);
@@ -1807,7 +1800,7 @@ TIMER_ACPI_DECLARE(arch_timer, ACPI_SIG_GTDT, arch_timer_acpi_init);
#endif
int kvm_arch_ptp_get_crosststamp(u64 *cycle, struct timespec64 *ts,
- struct clocksource **cs)
+ enum clocksource_ids *cs_id)
{
struct arm_smccc_res hvc_res;
u32 ptp_counter;
@@ -1831,8 +1824,8 @@ int kvm_arch_ptp_get_crosststamp(u64 *cycle, struct timespec64 *ts,
*ts = ktime_to_timespec64(ktime);
if (cycle)
*cycle = (u64)hvc_res.a2 << 32 | hvc_res.a3;
- if (cs)
- *cs = &clocksource_counter;
+ if (cs_id)
+ *cs_id = CSID_ARM_ARCH_COUNTER;
return 0;
}
diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c
index 44a61dc6f932..2d86bbc2764a 100644
--- a/drivers/clocksource/arm_global_timer.c
+++ b/drivers/clocksource/arm_global_timer.c
@@ -9,6 +9,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/bitfield.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/cpu.h>
@@ -31,10 +32,7 @@
#define GT_CONTROL_COMP_ENABLE BIT(1) /* banked */
#define GT_CONTROL_IRQ_ENABLE BIT(2) /* banked */
#define GT_CONTROL_AUTO_INC BIT(3) /* banked */
-#define GT_CONTROL_PRESCALER_SHIFT 8
-#define GT_CONTROL_PRESCALER_MAX 0xF
-#define GT_CONTROL_PRESCALER_MASK (GT_CONTROL_PRESCALER_MAX << \
- GT_CONTROL_PRESCALER_SHIFT)
+#define GT_CONTROL_PRESCALER_MASK GENMASK(15, 8)
#define GT_INT_STATUS 0x0c
#define GT_INT_STATUS_EVENT_FLAG BIT(0)
@@ -52,7 +50,8 @@
*/
static void __iomem *gt_base;
static struct notifier_block gt_clk_rate_change_nb;
-static u32 gt_psv_new, gt_psv_bck, gt_target_rate;
+static u32 gt_psv_new, gt_psv_bck;
+static unsigned long gt_target_rate;
static int gt_ppi;
static struct clock_event_device __percpu *gt_evt;
@@ -88,7 +87,7 @@ static u64 gt_counter_read(void)
return _gt_counter_read();
}
-/**
+/*
* To ensure that updates to comparator value register do not set the
* Interrupt Status Register proceed as follows:
* 1. Clear the Comp Enable bit in the Timer Control Register.
@@ -196,7 +195,6 @@ static int gt_dying_cpu(unsigned int cpu)
{
struct clock_event_device *clk = this_cpu_ptr(gt_evt);
- gt_clockevent_shutdown(clk);
disable_percpu_irq(clk->irq);
return 0;
}
@@ -247,7 +245,7 @@ static void gt_write_presc(u32 psv)
reg = readl(gt_base + GT_CONTROL);
reg &= ~GT_CONTROL_PRESCALER_MASK;
- reg |= psv << GT_CONTROL_PRESCALER_SHIFT;
+ reg |= FIELD_PREP(GT_CONTROL_PRESCALER_MASK, psv);
writel(reg, gt_base + GT_CONTROL);
}
@@ -256,8 +254,7 @@ static u32 gt_read_presc(void)
u32 reg;
reg = readl(gt_base + GT_CONTROL);
- reg &= GT_CONTROL_PRESCALER_MASK;
- return reg >> GT_CONTROL_PRESCALER_SHIFT;
+ return FIELD_GET(GT_CONTROL_PRESCALER_MASK, reg);
}
static void __init gt_delay_timer_init(void)
@@ -272,9 +269,9 @@ static int __init gt_clocksource_init(void)
writel(0, gt_base + GT_COUNTER0);
writel(0, gt_base + GT_COUNTER1);
/* set prescaler and enable timer on all the cores */
- writel(((CONFIG_ARM_GT_INITIAL_PRESCALER_VAL - 1) <<
- GT_CONTROL_PRESCALER_SHIFT)
- | GT_CONTROL_TIMER_ENABLE, gt_base + GT_CONTROL);
+ writel(FIELD_PREP(GT_CONTROL_PRESCALER_MASK,
+ CONFIG_ARM_GT_INITIAL_PRESCALER_VAL - 1) |
+ GT_CONTROL_TIMER_ENABLE, gt_base + GT_CONTROL);
#ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
sched_clock_register(gt_sched_clock_read, 64, gt_target_rate);
@@ -290,18 +287,17 @@ static int gt_clk_rate_change_cb(struct notifier_block *nb,
switch (event) {
case PRE_RATE_CHANGE:
{
- int psv;
-
- psv = DIV_ROUND_CLOSEST(ndata->new_rate,
- gt_target_rate);
+ unsigned long psv;
- if (abs(gt_target_rate - (ndata->new_rate / psv)) > MAX_F_ERR)
+ psv = DIV_ROUND_CLOSEST(ndata->new_rate, gt_target_rate);
+ if (!psv ||
+ abs(gt_target_rate - (ndata->new_rate / psv)) > MAX_F_ERR)
return NOTIFY_BAD;
psv--;
/* prescaler within legal range? */
- if (psv < 0 || psv > GT_CONTROL_PRESCALER_MAX)
+ if (!FIELD_FIT(GT_CONTROL_PRESCALER_MASK, psv))
return NOTIFY_BAD;
/*
@@ -346,7 +342,7 @@ static int __init global_timer_of_register(struct device_node *np)
{
struct clk *gt_clk;
static unsigned long gt_clk_rate;
- int err = 0;
+ int err;
/*
* In A9 r2p0 the comparators for each processor with the global timer
@@ -411,7 +407,7 @@ static int __init global_timer_of_register(struct device_node *np)
err = gt_clocksource_init();
if (err)
goto out_irq;
-
+
err = cpuhp_setup_state(CPUHP_AP_ARM_GLOBAL_TIMER_STARTING,
"clockevents/arm/global_timer:starting",
gt_starting_cpu, gt_dying_cpu);
diff --git a/drivers/clocksource/asm9260_timer.c b/drivers/clocksource/asm9260_timer.c
index 5b39d3701fa3..8f97ab0b01ec 100644
--- a/drivers/clocksource/asm9260_timer.c
+++ b/drivers/clocksource/asm9260_timer.c
@@ -210,6 +210,7 @@ static int __init asm9260_timer_init(struct device_node *np)
DRIVER_NAME, &event_dev);
if (ret) {
pr_err("Failed to setup irq!\n");
+ clk_disable_unprepare(clk);
return ret;
}
diff --git a/drivers/clocksource/dw_apb_timer.c b/drivers/clocksource/dw_apb_timer.c
index f5f24a95ee82..3a55ae5fe225 100644
--- a/drivers/clocksource/dw_apb_timer.c
+++ b/drivers/clocksource/dw_apb_timer.c
@@ -68,25 +68,6 @@ static inline void apbt_writel_relaxed(struct dw_apb_timer *timer, u32 val,
writel_relaxed(val, timer->base + offs);
}
-static void apbt_disable_int(struct dw_apb_timer *timer)
-{
- u32 ctrl = apbt_readl(timer, APBTMR_N_CONTROL);
-
- ctrl |= APBTMR_CONTROL_INT;
- apbt_writel(timer, ctrl, APBTMR_N_CONTROL);
-}
-
-/**
- * dw_apb_clockevent_pause() - stop the clock_event_device from running
- *
- * @dw_ced: The APB clock to stop generating events.
- */
-void dw_apb_clockevent_pause(struct dw_apb_clock_event_device *dw_ced)
-{
- disable_irq(dw_ced->timer.irq);
- apbt_disable_int(&dw_ced->timer);
-}
-
static void apbt_eoi(struct dw_apb_timer *timer)
{
apbt_readl_relaxed(timer, APBTMR_N_EOI);
@@ -285,26 +266,6 @@ dw_apb_clockevent_init(int cpu, const char *name, unsigned rating,
}
/**
- * dw_apb_clockevent_resume() - resume a clock that has been paused.
- *
- * @dw_ced: The APB clock to resume.
- */
-void dw_apb_clockevent_resume(struct dw_apb_clock_event_device *dw_ced)
-{
- enable_irq(dw_ced->timer.irq);
-}
-
-/**
- * dw_apb_clockevent_stop() - stop the clock_event_device and release the IRQ.
- *
- * @dw_ced: The APB clock to stop generating the events.
- */
-void dw_apb_clockevent_stop(struct dw_apb_clock_event_device *dw_ced)
-{
- free_irq(dw_ced->timer.irq, &dw_ced->ced);
-}
-
-/**
* dw_apb_clockevent_register() - register the clock with the generic layer
*
* @dw_ced: The APB clock to register as a clock_event_device.
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index ef8cb1b71be4..e6a02e351d77 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -496,7 +496,6 @@ static int exynos4_mct_dying_cpu(unsigned int cpu)
per_cpu_ptr(&percpu_mct_tick, cpu);
struct clock_event_device *evt = &mevt->evt;
- evt->set_state_shutdown(evt);
if (mct_int_type == MCT_INT_SPI) {
if (evt->irq != -1)
disable_irq_nosync(evt->irq);
diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c
index 8ff7cd4e20bb..f00019b078a7 100644
--- a/drivers/clocksource/hyperv_timer.c
+++ b/drivers/clocksource/hyperv_timer.c
@@ -23,11 +23,12 @@
#include <linux/acpi.h>
#include <linux/hyperv.h>
#include <clocksource/hyperv_timer.h>
-#include <asm/hyperv-tlfs.h>
+#include <hyperv/hvhdk.h>
#include <asm/mshyperv.h>
static struct clock_event_device __percpu *hv_clock_event;
-static u64 hv_sched_clock_offset __ro_after_init;
+/* Note: offset can hold negative values after hibernation. */
+static u64 hv_sched_clock_offset __read_mostly;
/*
* If false, we're using the old mechanism for stimer0 interrupts
@@ -81,14 +82,14 @@ static int hv_ce_set_next_event(unsigned long delta,
current_tick = hv_read_reference_counter();
current_tick += delta;
- hv_set_register(HV_REGISTER_STIMER0_COUNT, current_tick);
+ hv_set_msr(HV_MSR_STIMER0_COUNT, current_tick);
return 0;
}
static int hv_ce_shutdown(struct clock_event_device *evt)
{
- hv_set_register(HV_REGISTER_STIMER0_COUNT, 0);
- hv_set_register(HV_REGISTER_STIMER0_CONFIG, 0);
+ hv_set_msr(HV_MSR_STIMER0_COUNT, 0);
+ hv_set_msr(HV_MSR_STIMER0_CONFIG, 0);
if (direct_mode_enabled && stimer0_irq >= 0)
disable_percpu_irq(stimer0_irq);
@@ -119,7 +120,7 @@ static int hv_ce_set_oneshot(struct clock_event_device *evt)
timer_cfg.direct_mode = 0;
timer_cfg.sintx = stimer0_message_sint;
}
- hv_set_register(HV_REGISTER_STIMER0_CONFIG, timer_cfg.as_uint64);
+ hv_set_msr(HV_MSR_STIMER0_CONFIG, timer_cfg.as_uint64);
return 0;
}
@@ -137,7 +138,21 @@ static int hv_stimer_init(unsigned int cpu)
ce->name = "Hyper-V clockevent";
ce->features = CLOCK_EVT_FEAT_ONESHOT;
ce->cpumask = cpumask_of(cpu);
- ce->rating = 1000;
+
+ /*
+ * Lower the rating of the Hyper-V timer in a TDX VM without paravisor,
+ * so the local APIC timer (lapic_clockevent) is the default timer in
+ * such a VM. The Hyper-V timer is not preferred in such a VM because
+ * it depends on the slow VM Reference Counter MSR (the Hyper-V TSC
+ * page is not enbled in such a VM because the VM uses Invariant TSC
+ * as a better clocksource and it's challenging to mark the Hyper-V
+ * TSC page shared in very early boot).
+ */
+ if (!ms_hyperv.paravisor_present && hv_isolation_type_tdx())
+ ce->rating = 90;
+ else
+ ce->rating = 1000;
+
ce->set_state_shutdown = hv_ce_shutdown;
ce->set_state_oneshot = hv_ce_set_oneshot;
ce->set_next_event = hv_ce_set_next_event;
@@ -372,11 +387,11 @@ static __always_inline u64 read_hv_clock_msr(void)
* is set to 0 when the partition is created and is incremented in 100
* nanosecond units.
*
- * Use hv_raw_get_register() because this function is used from
- * noinstr. Notable; while HV_REGISTER_TIME_REF_COUNT is a synthetic
+ * Use hv_raw_get_msr() because this function is used from
+ * noinstr. Notable; while HV_MSR_TIME_REF_COUNT is a synthetic
* register it doesn't need the GHCB path.
*/
- return hv_raw_get_register(HV_REGISTER_TIME_REF_COUNT);
+ return hv_raw_get_msr(HV_MSR_TIME_REF_COUNT);
}
/*
@@ -439,9 +454,9 @@ static void suspend_hv_clock_tsc(struct clocksource *arg)
union hv_reference_tsc_msr tsc_msr;
/* Disable the TSC page */
- tsc_msr.as_uint64 = hv_get_register(HV_REGISTER_REFERENCE_TSC);
+ tsc_msr.as_uint64 = hv_get_msr(HV_MSR_REFERENCE_TSC);
tsc_msr.enable = 0;
- hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr.as_uint64);
+ hv_set_msr(HV_MSR_REFERENCE_TSC, tsc_msr.as_uint64);
}
@@ -450,10 +465,21 @@ static void resume_hv_clock_tsc(struct clocksource *arg)
union hv_reference_tsc_msr tsc_msr;
/* Re-enable the TSC page */
- tsc_msr.as_uint64 = hv_get_register(HV_REGISTER_REFERENCE_TSC);
+ tsc_msr.as_uint64 = hv_get_msr(HV_MSR_REFERENCE_TSC);
tsc_msr.enable = 1;
tsc_msr.pfn = tsc_pfn;
- hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr.as_uint64);
+ hv_set_msr(HV_MSR_REFERENCE_TSC, tsc_msr.as_uint64);
+}
+
+/*
+ * Called during resume from hibernation, from overridden
+ * x86_platform.restore_sched_clock_state routine. This is to adjust offsets
+ * used to calculate time for hv tsc page based sched_clock, to account for
+ * time spent before hibernation.
+ */
+void hv_adj_sched_clock_offset(u64 offset)
+{
+ hv_sched_clock_offset -= offset;
}
#ifdef HAVE_VDSO_CLOCKMODE_HVCLOCK
@@ -555,14 +581,14 @@ static void __init hv_init_tsc_clocksource(void)
* thus TSC clocksource will work even without the real TSC page
* mapped.
*/
- tsc_msr.as_uint64 = hv_get_register(HV_REGISTER_REFERENCE_TSC);
+ tsc_msr.as_uint64 = hv_get_msr(HV_MSR_REFERENCE_TSC);
if (hv_root_partition)
tsc_pfn = tsc_msr.pfn;
else
tsc_pfn = HVPFN_DOWN(virt_to_phys(tsc_page));
tsc_msr.enable = 1;
tsc_msr.pfn = tsc_pfn;
- hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr.as_uint64);
+ hv_set_msr(HV_MSR_REFERENCE_TSC, tsc_msr.as_uint64);
clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100);
diff --git a/drivers/clocksource/i8253.c b/drivers/clocksource/i8253.c
index d4350bb10b83..39f7c2d736d1 100644
--- a/drivers/clocksource/i8253.c
+++ b/drivers/clocksource/i8253.c
@@ -20,13 +20,6 @@
DEFINE_RAW_SPINLOCK(i8253_lock);
EXPORT_SYMBOL(i8253_lock);
-/*
- * Handle PIT quirk in pit_shutdown() where zeroing the counter register
- * restarts the PIT, negating the shutdown. On platforms with the quirk,
- * platform specific code can set this to false.
- */
-bool i8253_clear_counter_on_shutdown __ro_after_init = true;
-
#ifdef CONFIG_CLKSRC_I8253
/*
* Since the PIT overflows every tick, its not very useful
@@ -108,21 +101,47 @@ int __init clocksource_i8253_init(void)
#endif
#ifdef CONFIG_CLKEVT_I8253
-static int pit_shutdown(struct clock_event_device *evt)
+void clockevent_i8253_disable(void)
{
- if (!clockevent_state_oneshot(evt) && !clockevent_state_periodic(evt))
- return 0;
-
raw_spin_lock(&i8253_lock);
+ /*
+ * Writing the MODE register should stop the counter, according to
+ * the datasheet. This appears to work on real hardware (well, on
+ * modern Intel and AMD boxes; I didn't dig the Pegasos out of the
+ * shed).
+ *
+ * However, some virtual implementations differ, and the MODE change
+ * doesn't have any effect until either the counter is written (KVM
+ * in-kernel PIT) or the next interrupt (QEMU). And in those cases,
+ * it may not stop the *count*, only the interrupts. Although in
+ * the virt case, that probably doesn't matter, as the value of the
+ * counter will only be calculated on demand if the guest reads it;
+ * it's the interrupts which cause steal time.
+ *
+ * Hyper-V apparently has a bug where even in mode 0, the IRQ keeps
+ * firing repeatedly if the counter is running. But it *does* do the
+ * right thing when the MODE register is written.
+ *
+ * So: write the MODE and then load the counter, which ensures that
+ * the IRQ is stopped on those buggy virt implementations. And then
+ * write the MODE again, which is the right way to stop it.
+ */
outb_p(0x30, PIT_MODE);
+ outb_p(0, PIT_CH0);
+ outb_p(0, PIT_CH0);
- if (i8253_clear_counter_on_shutdown) {
- outb_p(0, PIT_CH0);
- outb_p(0, PIT_CH0);
- }
+ outb_p(0x30, PIT_MODE);
raw_spin_unlock(&i8253_lock);
+}
+
+static int pit_shutdown(struct clock_event_device *evt)
+{
+ if (!clockevent_state_oneshot(evt) && !clockevent_state_periodic(evt))
+ return 0;
+
+ clockevent_i8253_disable();
return 0;
}
diff --git a/drivers/clocksource/ingenic-ost.c b/drivers/clocksource/ingenic-ost.c
index 9f7c280a1336..e0ec33307c84 100644
--- a/drivers/clocksource/ingenic-ost.c
+++ b/drivers/clocksource/ingenic-ost.c
@@ -93,14 +93,10 @@ static int __init ingenic_ost_probe(struct platform_device *pdev)
return PTR_ERR(map);
}
- ost->clk = devm_clk_get(dev, "ost");
+ ost->clk = devm_clk_get_enabled(dev, "ost");
if (IS_ERR(ost->clk))
return PTR_ERR(ost->clk);
- err = clk_prepare_enable(ost->clk);
- if (err)
- return err;
-
/* Clear counter high/low registers */
if (soc_info->is64bit)
regmap_write(map, TCU_REG_OST_CNTL, 0);
@@ -129,7 +125,6 @@ static int __init ingenic_ost_probe(struct platform_device *pdev)
err = clocksource_register_hz(cs, rate);
if (err) {
dev_err(dev, "clocksource registration failed");
- clk_disable_unprepare(ost->clk);
return err;
}
diff --git a/drivers/clocksource/jcore-pit.c b/drivers/clocksource/jcore-pit.c
index a4a991101fa3..82815428f8f9 100644
--- a/drivers/clocksource/jcore-pit.c
+++ b/drivers/clocksource/jcore-pit.c
@@ -114,13 +114,25 @@ static int jcore_pit_local_init(unsigned cpu)
pit->periodic_delta = DIV_ROUND_CLOSEST(NSEC_PER_SEC, HZ * buspd);
clockevents_config_and_register(&pit->ced, freq, 1, ULONG_MAX);
+ enable_percpu_irq(pit->ced.irq, IRQ_TYPE_NONE);
+
+ return 0;
+}
+
+static int jcore_pit_local_teardown(unsigned cpu)
+{
+ struct jcore_pit *pit = this_cpu_ptr(jcore_pit_percpu);
+
+ pr_info("Local J-Core PIT teardown on cpu %u\n", cpu);
+
+ disable_percpu_irq(pit->ced.irq);
return 0;
}
static irqreturn_t jcore_timer_interrupt(int irq, void *dev_id)
{
- struct jcore_pit *pit = this_cpu_ptr(dev_id);
+ struct jcore_pit *pit = dev_id;
if (clockevent_state_oneshot(&pit->ced))
jcore_pit_disable(pit);
@@ -168,9 +180,9 @@ static int __init jcore_pit_init(struct device_node *node)
return -ENOMEM;
}
- err = request_irq(pit_irq, jcore_timer_interrupt,
- IRQF_TIMER | IRQF_PERCPU,
- "jcore_pit", jcore_pit_percpu);
+ irq_set_percpu_devid(pit_irq);
+ err = request_percpu_irq(pit_irq, jcore_timer_interrupt,
+ "jcore_pit", jcore_pit_percpu);
if (err) {
pr_err("pit irq request failed: %d\n", err);
free_percpu(jcore_pit_percpu);
@@ -238,7 +250,7 @@ static int __init jcore_pit_init(struct device_node *node)
cpuhp_setup_state(CPUHP_AP_JCORE_TIMER_STARTING,
"clockevents/jcore:starting",
- jcore_pit_local_init, NULL);
+ jcore_pit_local_init, jcore_pit_local_teardown);
return 0;
}
diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c
index b3ae38f36720..7907b740497a 100644
--- a/drivers/clocksource/mips-gic-timer.c
+++ b/drivers/clocksource/mips-gic-timer.c
@@ -19,6 +19,7 @@
static DEFINE_PER_CPU(struct clock_event_device, gic_clockevent_device);
static int gic_timer_irq;
static unsigned int gic_frequency;
+static unsigned int gic_count_width;
static bool __read_mostly gic_clock_unstable;
static void gic_clocksource_unstable(char *reason);
@@ -165,6 +166,37 @@ static u64 gic_hpt_read(struct clocksource *cs)
return gic_read_count();
}
+static u64 gic_hpt_read_multicluster(struct clocksource *cs)
+{
+ unsigned int hi, hi2, lo;
+ u64 count;
+
+ mips_cm_lock_other(0, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL);
+
+ if (mips_cm_is64) {
+ count = read_gic_redir_counter();
+ goto out;
+ }
+
+ hi = read_gic_redir_counter_32h();
+ while (true) {
+ lo = read_gic_redir_counter_32l();
+
+ /* If hi didn't change then lo didn't wrap & we're done */
+ hi2 = read_gic_redir_counter_32h();
+ if (hi2 == hi)
+ break;
+
+ /* Otherwise, repeat with the latest hi value */
+ hi = hi2;
+ }
+
+ count = (((u64)hi) << 32) + lo;
+out:
+ mips_cm_unlock_other();
+ return count;
+}
+
static struct clocksource gic_clocksource = {
.name = "GIC",
.read = gic_hpt_read,
@@ -186,18 +218,26 @@ static void gic_clocksource_unstable(char *reason)
static int __init __gic_clocksource_init(void)
{
- unsigned int count_width;
int ret;
/* Set clocksource mask. */
- count_width = read_gic_config() & GIC_CONFIG_COUNTBITS;
- count_width >>= __ffs(GIC_CONFIG_COUNTBITS);
- count_width *= 4;
- count_width += 32;
- gic_clocksource.mask = CLOCKSOURCE_MASK(count_width);
+ gic_count_width = read_gic_config() & GIC_CONFIG_COUNTBITS;
+ gic_count_width >>= __ffs(GIC_CONFIG_COUNTBITS);
+ gic_count_width *= 4;
+ gic_count_width += 32;
+ gic_clocksource.mask = CLOCKSOURCE_MASK(gic_count_width);
/* Calculate a somewhat reasonable rating value. */
- gic_clocksource.rating = 200 + gic_frequency / 10000000;
+ if (mips_cm_revision() >= CM_REV_CM3 || !IS_ENABLED(CONFIG_CPU_FREQ))
+ gic_clocksource.rating = 300; /* Good when frequecy is stable */
+ else
+ gic_clocksource.rating = 200;
+ gic_clocksource.rating += clamp(gic_frequency / 10000000, 0, 99);
+
+ if (mips_cps_multicluster_cpus()) {
+ gic_clocksource.read = &gic_hpt_read_multicluster;
+ gic_clocksource.vdso_clock_mode = VDSO_CLOCKMODE_NONE;
+ }
ret = clocksource_register_hz(&gic_clocksource, gic_frequency);
if (ret < 0)
@@ -257,10 +297,11 @@ static int __init gic_clocksource_of_init(struct device_node *node)
* stable CPU frequency or on the platforms with CM3 and CPU frequency
* change performed by the CPC core clocks divider.
*/
- if (mips_cm_revision() >= CM_REV_CM3 || !IS_ENABLED(CONFIG_CPU_FREQ)) {
+ if ((mips_cm_revision() >= CM_REV_CM3 || !IS_ENABLED(CONFIG_CPU_FREQ)) &&
+ !mips_cps_multicluster_cpus()) {
sched_clock_register(mips_cm_is64 ?
gic_read_count_64 : gic_read_count_2x32,
- 64, gic_frequency);
+ gic_count_width, gic_frequency);
}
return 0;
diff --git a/drivers/clocksource/renesas-ostm.c b/drivers/clocksource/renesas-ostm.c
index 8da972dc1713..3fcbd02b2483 100644
--- a/drivers/clocksource/renesas-ostm.c
+++ b/drivers/clocksource/renesas-ostm.c
@@ -210,6 +210,7 @@ static int __init ostm_init(struct device_node *np)
pr_info("%pOF: used for clock events\n", np);
}
+ of_node_set_flag(np, OF_POPULATED);
return 0;
err_cleanup:
@@ -224,7 +225,7 @@ err_free:
TIMER_OF_DECLARE(ostm, "renesas,ostm", ostm_init);
-#ifdef CONFIG_ARCH_RZG2L
+#if defined(CONFIG_ARCH_RZG2L) || defined(CONFIG_ARCH_R9A09G057)
static int __init ostm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
diff --git a/drivers/clocksource/samsung_pwm_timer.c b/drivers/clocksource/samsung_pwm_timer.c
index 6e46781bc9ac..b9561e3f196c 100644
--- a/drivers/clocksource/samsung_pwm_timer.c
+++ b/drivers/clocksource/samsung_pwm_timer.c
@@ -418,8 +418,6 @@ void __init samsung_pwm_clocksource_init(void __iomem *base,
static int __init samsung_pwm_alloc(struct device_node *np,
const struct samsung_pwm_variant *variant)
{
- struct property *prop;
- const __be32 *cur;
u32 val;
int i, ret;
@@ -427,7 +425,7 @@ static int __init samsung_pwm_alloc(struct device_node *np,
for (i = 0; i < SAMSUNG_PWM_NUM; ++i)
pwm.irq[i] = irq_of_parse_and_map(np, i);
- of_property_for_each_u32(np, "samsung,pwm-outputs", prop, cur, val) {
+ of_property_for_each_u32(np, "samsung,pwm-outputs", val) {
if (val >= SAMSUNG_PWM_NUM) {
pr_warn("%s: invalid channel index in samsung,pwm-outputs property\n", __func__);
continue;
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 26919556ef5f..b72b36e0abed 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -528,6 +528,7 @@ static void sh_cmt_set_next(struct sh_cmt_channel *ch, unsigned long delta)
static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
{
struct sh_cmt_channel *ch = dev_id;
+ unsigned long flags;
/* clear flags */
sh_cmt_write_cmcsr(ch, sh_cmt_read_cmcsr(ch) &
@@ -558,6 +559,8 @@ static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
ch->flags &= ~FLAG_SKIPEVENT;
+ raw_spin_lock_irqsave(&ch->lock, flags);
+
if (ch->flags & FLAG_REPROGRAM) {
ch->flags &= ~FLAG_REPROGRAM;
sh_cmt_clock_event_program_verify(ch, 1);
@@ -570,6 +573,8 @@ static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
ch->flags &= ~FLAG_IRQCONTEXT;
+ raw_spin_unlock_irqrestore(&ch->lock, flags);
+
return IRQ_HANDLED;
}
@@ -780,12 +785,18 @@ static int sh_cmt_clock_event_next(unsigned long delta,
struct clock_event_device *ced)
{
struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
+ unsigned long flags;
BUG_ON(!clockevent_state_oneshot(ced));
+
+ raw_spin_lock_irqsave(&ch->lock, flags);
+
if (likely(ch->flags & FLAG_IRQCONTEXT))
ch->next_match_value = delta - 1;
else
- sh_cmt_set_next(ch, delta - 1);
+ __sh_cmt_set_next(ch, delta - 1);
+
+ raw_spin_unlock_irqrestore(&ch->lock, flags);
return 0;
}
diff --git a/drivers/clocksource/timer-armada-370-xp.c b/drivers/clocksource/timer-armada-370-xp.c
index 6ec565d6939a..54284c1c0651 100644
--- a/drivers/clocksource/timer-armada-370-xp.c
+++ b/drivers/clocksource/timer-armada-370-xp.c
@@ -201,7 +201,6 @@ static int armada_370_xp_timer_dying_cpu(unsigned int cpu)
{
struct clock_event_device *evt = per_cpu_ptr(armada_370_xp_evt, cpu);
- evt->set_state_shutdown(evt);
disable_percpu_irq(evt->irq);
return 0;
}
diff --git a/drivers/clocksource/timer-cadence-ttc.c b/drivers/clocksource/timer-cadence-ttc.c
index ca7a06489c40..b8a1cf59b9d6 100644
--- a/drivers/clocksource/timer-cadence-ttc.c
+++ b/drivers/clocksource/timer-cadence-ttc.c
@@ -435,7 +435,7 @@ static int __init ttc_setup_clockevent(struct clk *clk,
&ttcce->ttc.clk_rate_change_nb);
if (err) {
pr_warn("Unable to register clock notifier.\n");
- goto out_kfree;
+ goto out_clk_unprepare;
}
ttcce->ttc.freq = clk_get_rate(ttcce->ttc.clk);
@@ -465,13 +465,15 @@ static int __init ttc_setup_clockevent(struct clk *clk,
err = request_irq(irq, ttc_clock_event_interrupt,
IRQF_TIMER, ttcce->ce.name, ttcce);
if (err)
- goto out_kfree;
+ goto out_clk_unprepare;
clockevents_config_and_register(&ttcce->ce,
ttcce->ttc.freq / PRESCALE, 1, 0xfffe);
return 0;
+out_clk_unprepare:
+ clk_disable_unprepare(ttcce->ttc.clk);
out_kfree:
kfree(ttcce);
return err;
diff --git a/drivers/clocksource/timer-clint.c b/drivers/clocksource/timer-clint.c
index 9a55e733ae99..0bdd9d7ec545 100644
--- a/drivers/clocksource/timer-clint.c
+++ b/drivers/clocksource/timer-clint.c
@@ -131,7 +131,7 @@ static int clint_timer_starting_cpu(unsigned int cpu)
struct clock_event_device *ce = per_cpu_ptr(&clint_clock_event, cpu);
ce->cpumask = cpumask_of(cpu);
- clockevents_config_and_register(ce, clint_timer_freq, 100, 0x7fffffff);
+ clockevents_config_and_register(ce, clint_timer_freq, 100, ULONG_MAX);
enable_percpu_irq(clint_timer_irq,
irq_get_trigger_type(clint_timer_irq));
@@ -251,7 +251,7 @@ static int __init clint_timer_init_dt(struct device_node *np)
}
irq_set_chained_handler(clint_ipi_irq, clint_ipi_interrupt);
- riscv_ipi_set_virq_range(rc, BITS_PER_BYTE, true);
+ riscv_ipi_set_virq_range(rc, BITS_PER_BYTE);
clint_clear_ipi();
#endif
diff --git a/drivers/clocksource/timer-gxp.c b/drivers/clocksource/timer-gxp.c
index 57aa2e2cce53..48a73c101eb8 100644
--- a/drivers/clocksource/timer-gxp.c
+++ b/drivers/clocksource/timer-gxp.c
@@ -85,7 +85,7 @@ static int __init gxp_timer_init(struct device_node *node)
clk = of_clk_get(node, 0);
if (IS_ERR(clk)) {
- ret = (int)PTR_ERR(clk);
+ ret = PTR_ERR(clk);
pr_err("%pOFn clock not found: %d\n", node, ret);
goto err_free;
}
diff --git a/drivers/clocksource/timer-imx-gpt.c b/drivers/clocksource/timer-imx-gpt.c
index 6a878d227a13..489e69169ed4 100644
--- a/drivers/clocksource/timer-imx-gpt.c
+++ b/drivers/clocksource/timer-imx-gpt.c
@@ -258,9 +258,8 @@ static irqreturn_t mxc_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *ced = dev_id;
struct imx_timer *imxtm = to_imx_timer(ced);
- uint32_t tstat;
- tstat = readl_relaxed(imxtm->base + imxtm->gpt->reg_tstat);
+ readl_relaxed(imxtm->base + imxtm->gpt->reg_tstat);
imxtm->gpt->gpt_irq_acknowledge(imxtm);
diff --git a/drivers/clocksource/timer-imx-sysctr.c b/drivers/clocksource/timer-imx-sysctr.c
index 5a7a951c4efc..44525813be1e 100644
--- a/drivers/clocksource/timer-imx-sysctr.c
+++ b/drivers/clocksource/timer-imx-sysctr.c
@@ -4,48 +4,62 @@
#include <linux/interrupt.h>
#include <linux/clockchips.h>
+#include <linux/slab.h>
#include "timer-of.h"
#define CMP_OFFSET 0x10000
+#define RD_OFFSET 0x20000
#define CNTCV_LO 0x8
#define CNTCV_HI 0xc
#define CMPCV_LO (CMP_OFFSET + 0x20)
#define CMPCV_HI (CMP_OFFSET + 0x24)
#define CMPCR (CMP_OFFSET + 0x2c)
+#define CNTCV_LO_IMX95 (RD_OFFSET + 0x8)
+#define CNTCV_HI_IMX95 (RD_OFFSET + 0xc)
#define SYS_CTR_EN 0x1
#define SYS_CTR_IRQ_MASK 0x2
#define SYS_CTR_CLK_DIV 0x3
-static void __iomem *sys_ctr_base __ro_after_init;
-static u32 cmpcr __ro_after_init;
+struct sysctr_private {
+ u32 cmpcr;
+ u32 lo_off;
+ u32 hi_off;
+};
-static void sysctr_timer_enable(bool enable)
+static void sysctr_timer_enable(struct clock_event_device *evt, bool enable)
{
- writel(enable ? cmpcr | SYS_CTR_EN : cmpcr, sys_ctr_base + CMPCR);
+ struct timer_of *to = to_timer_of(evt);
+ struct sysctr_private *priv = to->private_data;
+ void __iomem *base = timer_of_base(to);
+
+ writel(enable ? priv->cmpcr | SYS_CTR_EN : priv->cmpcr, base + CMPCR);
}
-static void sysctr_irq_acknowledge(void)
+static void sysctr_irq_acknowledge(struct clock_event_device *evt)
{
/*
* clear the enable bit(EN =0) will clear
* the status bit(ISTAT = 0), then the interrupt
* signal will be negated(acknowledged).
*/
- sysctr_timer_enable(false);
+ sysctr_timer_enable(evt, false);
}
-static inline u64 sysctr_read_counter(void)
+static inline u64 sysctr_read_counter(struct clock_event_device *evt)
{
+ struct timer_of *to = to_timer_of(evt);
+ struct sysctr_private *priv = to->private_data;
+ void __iomem *base = timer_of_base(to);
u32 cnt_hi, tmp_hi, cnt_lo;
do {
- cnt_hi = readl_relaxed(sys_ctr_base + CNTCV_HI);
- cnt_lo = readl_relaxed(sys_ctr_base + CNTCV_LO);
- tmp_hi = readl_relaxed(sys_ctr_base + CNTCV_HI);
+ cnt_hi = readl_relaxed(base + priv->hi_off);
+ cnt_lo = readl_relaxed(base + priv->lo_off);
+ tmp_hi = readl_relaxed(base + priv->hi_off);
} while (tmp_hi != cnt_hi);
return ((u64) cnt_hi << 32) | cnt_lo;
@@ -54,22 +68,24 @@ static inline u64 sysctr_read_counter(void)
static int sysctr_set_next_event(unsigned long delta,
struct clock_event_device *evt)
{
+ struct timer_of *to = to_timer_of(evt);
+ void __iomem *base = timer_of_base(to);
u32 cmp_hi, cmp_lo;
u64 next;
- sysctr_timer_enable(false);
+ sysctr_timer_enable(evt, false);
- next = sysctr_read_counter();
+ next = sysctr_read_counter(evt);
next += delta;
cmp_hi = (next >> 32) & 0x00fffff;
cmp_lo = next & 0xffffffff;
- writel_relaxed(cmp_hi, sys_ctr_base + CMPCV_HI);
- writel_relaxed(cmp_lo, sys_ctr_base + CMPCV_LO);
+ writel_relaxed(cmp_hi, base + CMPCV_HI);
+ writel_relaxed(cmp_lo, base + CMPCV_LO);
- sysctr_timer_enable(true);
+ sysctr_timer_enable(evt, true);
return 0;
}
@@ -81,7 +97,7 @@ static int sysctr_set_state_oneshot(struct clock_event_device *evt)
static int sysctr_set_state_shutdown(struct clock_event_device *evt)
{
- sysctr_timer_enable(false);
+ sysctr_timer_enable(evt, false);
return 0;
}
@@ -90,7 +106,7 @@ static irqreturn_t sysctr_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = dev_id;
- sysctr_irq_acknowledge();
+ sysctr_irq_acknowledge(evt);
evt->event_handler(evt);
@@ -117,34 +133,75 @@ static struct timer_of to_sysctr = {
},
};
-static void __init sysctr_clockevent_init(void)
+static int __init __sysctr_timer_init(struct device_node *np)
{
+ struct sysctr_private *priv;
+ void __iomem *base;
+ int ret;
+
+ priv = kzalloc(sizeof(struct sysctr_private), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ ret = timer_of_init(np, &to_sysctr);
+ if (ret) {
+ kfree(priv);
+ return ret;
+ }
+
+ if (!of_property_read_bool(np, "nxp,no-divider")) {
+ /* system counter clock is divided by 3 internally */
+ to_sysctr.of_clk.rate /= SYS_CTR_CLK_DIV;
+ }
+
to_sysctr.clkevt.cpumask = cpu_possible_mask;
+ to_sysctr.private_data = priv;
+
+ base = timer_of_base(&to_sysctr);
+ priv->cmpcr = readl(base + CMPCR) & ~SYS_CTR_EN;
+
+ return 0;
+}
+
+static int __init sysctr_timer_init(struct device_node *np)
+{
+ struct sysctr_private *priv;
+ int ret;
+
+ ret = __sysctr_timer_init(np);
+ if (ret)
+ return ret;
+
+ priv = to_sysctr.private_data;
+ priv->lo_off = CNTCV_LO;
+ priv->hi_off = CNTCV_HI;
clockevents_config_and_register(&to_sysctr.clkevt,
timer_of_rate(&to_sysctr),
0xff, 0x7fffffff);
+
+ return 0;
}
-static int __init sysctr_timer_init(struct device_node *np)
+static int __init sysctr_timer_imx95_init(struct device_node *np)
{
- int ret = 0;
+ struct sysctr_private *priv;
+ int ret;
- ret = timer_of_init(np, &to_sysctr);
+ ret = __sysctr_timer_init(np);
if (ret)
return ret;
- if (!of_property_read_bool(np, "nxp,no-divider")) {
- /* system counter clock is divided by 3 internally */
- to_sysctr.of_clk.rate /= SYS_CTR_CLK_DIV;
- }
-
- sys_ctr_base = timer_of_base(&to_sysctr);
- cmpcr = readl(sys_ctr_base + CMPCR);
- cmpcr &= ~SYS_CTR_EN;
+ priv = to_sysctr.private_data;
+ priv->lo_off = CNTCV_LO_IMX95;
+ priv->hi_off = CNTCV_HI_IMX95;
- sysctr_clockevent_init();
+ clockevents_config_and_register(&to_sysctr.clkevt,
+ timer_of_rate(&to_sysctr),
+ 0xff, 0x7fffffff);
return 0;
}
+
TIMER_OF_DECLARE(sysctr_timer, "nxp,sysctr-timer", sysctr_timer_init);
+TIMER_OF_DECLARE(sysctr_timer_imx95, "nxp,imx95-sysctr-timer", sysctr_timer_imx95_init);
diff --git a/drivers/clocksource/timer-imx-tpm.c b/drivers/clocksource/timer-imx-tpm.c
index bd64a8a8427f..92c025b70eb6 100644
--- a/drivers/clocksource/timer-imx-tpm.c
+++ b/drivers/clocksource/timer-imx-tpm.c
@@ -83,20 +83,28 @@ static u64 notrace tpm_read_sched_clock(void)
static int tpm_set_next_event(unsigned long delta,
struct clock_event_device *evt)
{
- unsigned long next, now;
+ unsigned long next, prev, now;
- next = tpm_read_counter();
- next += delta;
+ prev = tpm_read_counter();
+ next = prev + delta;
writel(next, timer_base + TPM_C0V);
now = tpm_read_counter();
/*
+ * Need to wait CNT increase at least 1 cycle to make sure
+ * the C0V has been updated into HW.
+ */
+ if ((next & 0xffffffff) != readl(timer_base + TPM_C0V))
+ while (now == tpm_read_counter())
+ ;
+
+ /*
* NOTE: We observed in a very small probability, the bus fabric
* contention between GPU and A7 may results a few cycles delay
* of writing CNT registers which may cause the min_delta event got
* missed, so we need add a ETIME check here in case it happened.
*/
- return (int)(next - now) <= 0 ? -ETIME : 0;
+ return (now - prev) >= delta ? -ETIME : 0;
}
static int tpm_set_state_oneshot(struct clock_event_device *evt)
diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c
index c3f54d9912be..420202bf76e4 100644
--- a/drivers/clocksource/timer-of.c
+++ b/drivers/clocksource/timer-of.c
@@ -25,10 +25,7 @@ static __init void timer_of_irq_exit(struct of_timer_irq *of_irq)
struct clock_event_device *clkevt = &to->clkevt;
- if (of_irq->percpu)
- free_percpu_irq(of_irq->irq, clkevt);
- else
- free_irq(of_irq->irq, clkevt);
+ free_irq(of_irq->irq, clkevt);
}
/**
@@ -42,9 +39,6 @@ static __init void timer_of_irq_exit(struct of_timer_irq *of_irq)
* - Get interrupt number by name
* - Get interrupt number by index
*
- * When the interrupt is per CPU, 'request_percpu_irq()' is called,
- * otherwise 'request_irq()' is used.
- *
* Returns 0 on success, < 0 otherwise
*/
static __init int timer_of_irq_init(struct device_node *np,
@@ -69,12 +63,9 @@ static __init int timer_of_irq_init(struct device_node *np,
return -EINVAL;
}
- ret = of_irq->percpu ?
- request_percpu_irq(of_irq->irq, of_irq->handler,
- np->full_name, clkevt) :
- request_irq(of_irq->irq, of_irq->handler,
- of_irq->flags ? of_irq->flags : IRQF_TIMER,
- np->full_name, clkevt);
+ ret = request_irq(of_irq->irq, of_irq->handler,
+ of_irq->flags ? of_irq->flags : IRQF_TIMER,
+ np->full_name, clkevt);
if (ret) {
pr_err("Failed to request irq %d for %pOF\n", of_irq->irq, np);
return ret;
diff --git a/drivers/clocksource/timer-of.h b/drivers/clocksource/timer-of.h
index a5478f3e8589..01a2c6b7db06 100644
--- a/drivers/clocksource/timer-of.h
+++ b/drivers/clocksource/timer-of.h
@@ -11,7 +11,6 @@
struct of_timer_irq {
int irq;
int index;
- int percpu;
const char *name;
unsigned long flags;
irq_handler_t handler;
diff --git a/drivers/clocksource/timer-qcom.c b/drivers/clocksource/timer-qcom.c
index b4afe3a67583..ddb1debe6a6b 100644
--- a/drivers/clocksource/timer-qcom.c
+++ b/drivers/clocksource/timer-qcom.c
@@ -130,7 +130,6 @@ static int msm_local_timer_dying_cpu(unsigned int cpu)
{
struct clock_event_device *evt = per_cpu_ptr(msm_evt, cpu);
- evt->set_state_shutdown(evt);
disable_percpu_irq(evt->irq);
return 0;
}
@@ -233,6 +232,7 @@ static int __init msm_dt_timer_init(struct device_node *np)
}
if (of_property_read_u32(np, "clock-frequency", &freq)) {
+ iounmap(cpu0_base);
pr_err("Unknown frequency\n");
return -EINVAL;
}
@@ -243,7 +243,11 @@ static int __init msm_dt_timer_init(struct device_node *np)
freq /= 4;
writel_relaxed(DGT_CLK_CTL_DIV_4, source_base + DGT_CLK_CTL);
- return msm_timer_init(freq, 32, irq, !!percpu_offset);
+ ret = msm_timer_init(freq, 32, irq, !!percpu_offset);
+ if (ret)
+ iounmap(cpu0_base);
+
+ return ret;
}
TIMER_OF_DECLARE(kpss_timer, "qcom,kpss-timer", msm_dt_timer_init);
TIMER_OF_DECLARE(scss_timer, "qcom,scss-timer", msm_dt_timer_init);
diff --git a/drivers/clocksource/timer-ralink.c b/drivers/clocksource/timer-ralink.c
new file mode 100644
index 000000000000..6ecdb4228f76
--- /dev/null
+++ b/drivers/clocksource/timer-ralink.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Ralink System Tick Counter driver present on RT3352 and MT7620 SoCs.
+ *
+ * Copyright (C) 2013 by John Crispin <john@phrozen.org>
+ */
+
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/interrupt.h>
+#include <linux/reset.h>
+#include <linux/init.h>
+#include <linux/time.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+
+#define SYSTICK_FREQ (50 * 1000)
+
+#define SYSTICK_CONFIG 0x00
+#define SYSTICK_COMPARE 0x04
+#define SYSTICK_COUNT 0x08
+
+/* route systick irq to mips irq 7 instead of the r4k-timer */
+#define CFG_EXT_STK_EN 0x2
+/* enable the counter */
+#define CFG_CNT_EN 0x1
+
+struct systick_device {
+ void __iomem *membase;
+ struct clock_event_device dev;
+ int irq_requested;
+ int freq_scale;
+};
+
+static int systick_set_oneshot(struct clock_event_device *evt);
+static int systick_shutdown(struct clock_event_device *evt);
+
+static int systick_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ struct systick_device *sdev;
+ u32 count;
+
+ sdev = container_of(evt, struct systick_device, dev);
+ count = ioread32(sdev->membase + SYSTICK_COUNT);
+ count = (count + delta) % SYSTICK_FREQ;
+ iowrite32(count, sdev->membase + SYSTICK_COMPARE);
+
+ return 0;
+}
+
+static void systick_event_handler(struct clock_event_device *dev)
+{
+ /* noting to do here */
+}
+
+static irqreturn_t systick_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *dev = (struct clock_event_device *)dev_id;
+
+ dev->event_handler(dev);
+
+ return IRQ_HANDLED;
+}
+
+static struct systick_device systick = {
+ .dev = {
+ /*
+ * cevt-r4k uses 300, make sure systick
+ * gets used if available
+ */
+ .rating = 310,
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+ .set_next_event = systick_next_event,
+ .set_state_shutdown = systick_shutdown,
+ .set_state_oneshot = systick_set_oneshot,
+ .event_handler = systick_event_handler,
+ },
+};
+
+static int systick_shutdown(struct clock_event_device *evt)
+{
+ struct systick_device *sdev;
+
+ sdev = container_of(evt, struct systick_device, dev);
+
+ if (sdev->irq_requested)
+ free_irq(systick.dev.irq, &systick.dev);
+ sdev->irq_requested = 0;
+ iowrite32(0, systick.membase + SYSTICK_CONFIG);
+
+ return 0;
+}
+
+static int systick_set_oneshot(struct clock_event_device *evt)
+{
+ const char *name = systick.dev.name;
+ struct systick_device *sdev;
+ int irq = systick.dev.irq;
+
+ sdev = container_of(evt, struct systick_device, dev);
+
+ if (!sdev->irq_requested) {
+ if (request_irq(irq, systick_interrupt,
+ IRQF_PERCPU | IRQF_TIMER, name, &systick.dev))
+ pr_err("Failed to request irq %d (%s)\n", irq, name);
+ }
+ sdev->irq_requested = 1;
+ iowrite32(CFG_EXT_STK_EN | CFG_CNT_EN,
+ systick.membase + SYSTICK_CONFIG);
+
+ return 0;
+}
+
+static int __init ralink_systick_init(struct device_node *np)
+{
+ int ret;
+
+ systick.membase = of_iomap(np, 0);
+ if (!systick.membase)
+ return -ENXIO;
+
+ systick.dev.name = np->name;
+ clockevents_calc_mult_shift(&systick.dev, SYSTICK_FREQ, 60);
+ systick.dev.max_delta_ns = clockevent_delta2ns(0x7fff, &systick.dev);
+ systick.dev.max_delta_ticks = 0x7fff;
+ systick.dev.min_delta_ns = clockevent_delta2ns(0x3, &systick.dev);
+ systick.dev.min_delta_ticks = 0x3;
+ systick.dev.irq = irq_of_parse_and_map(np, 0);
+ if (!systick.dev.irq) {
+ pr_err("%pOFn: request_irq failed", np);
+ return -EINVAL;
+ }
+
+ ret = clocksource_mmio_init(systick.membase + SYSTICK_COUNT, np->name,
+ SYSTICK_FREQ, 301, 16,
+ clocksource_mmio_readl_up);
+ if (ret)
+ return ret;
+
+ clockevents_register_device(&systick.dev);
+
+ pr_info("%pOFn: running - mult: %d, shift: %d\n",
+ np, systick.dev.mult, systick.dev.shift);
+
+ return 0;
+}
+
+TIMER_OF_DECLARE(systick, "ralink,cevt-systick", ralink_systick_init);
diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c
index e66dcbd66566..48ce50c5f5e6 100644
--- a/drivers/clocksource/timer-riscv.c
+++ b/drivers/clocksource/timer-riscv.c
@@ -108,13 +108,16 @@ static int riscv_timer_starting_cpu(unsigned int cpu)
{
struct clock_event_device *ce = per_cpu_ptr(&riscv_clock_event, cpu);
+ /* Clear timer interrupt */
+ riscv_clock_event_stop();
+
ce->cpumask = cpumask_of(cpu);
ce->irq = riscv_clock_event_irq;
if (riscv_timer_cannot_wake_cpu)
ce->features |= CLOCK_EVT_FEAT_C3STOP;
if (static_branch_likely(&riscv_sstc_available))
ce->rating = 450;
- clockevents_config_and_register(ce, riscv_timebase, 100, 0x7fffffff);
+ clockevents_config_and_register(ce, riscv_timebase, 100, ULONG_MAX);
enable_percpu_irq(riscv_clock_event_irq,
irq_get_trigger_type(riscv_clock_event_irq));
diff --git a/drivers/clocksource/timer-rtl-otto.c b/drivers/clocksource/timer-rtl-otto.c
new file mode 100644
index 000000000000..8a3068b36e75
--- /dev/null
+++ b/drivers/clocksource/timer-rtl-otto.c
@@ -0,0 +1,291 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/cpu.h>
+#include <linux/cpuhotplug.h>
+#include <linux/cpumask.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/printk.h>
+#include <linux/sched_clock.h>
+#include "timer-of.h"
+
+#define RTTM_DATA 0x0
+#define RTTM_CNT 0x4
+#define RTTM_CTRL 0x8
+#define RTTM_INT 0xc
+
+#define RTTM_CTRL_ENABLE BIT(28)
+#define RTTM_INT_PENDING BIT(16)
+#define RTTM_INT_ENABLE BIT(20)
+
+/*
+ * The Otto platform provides multiple 28 bit timers/counters with the following
+ * operating logic. If enabled the timer counts up. Per timer one can set a
+ * maximum counter value as an end marker. If end marker is reached the timer
+ * fires an interrupt. If the timer "overflows" by reaching the end marker or
+ * by adding 1 to 0x0fffffff the counter is reset to 0. When this happens and
+ * the timer is in operating mode COUNTER it stops. In mode TIMER it will
+ * continue to count up.
+ */
+#define RTTM_CTRL_COUNTER 0
+#define RTTM_CTRL_TIMER BIT(24)
+
+#define RTTM_BIT_COUNT 28
+#define RTTM_MIN_DELTA 8
+#define RTTM_MAX_DELTA CLOCKSOURCE_MASK(28)
+
+/*
+ * Timers are derived from the LXB clock frequency. Usually this is a fixed
+ * multiple of the 25 MHz oscillator. The 930X SOC is an exception from that.
+ * Its LXB clock has only dividers and uses the switch PLL of 2.45 GHz as its
+ * base. The only meaningful frequencies we can achieve from that are 175.000
+ * MHz and 153.125 MHz. The greatest common divisor of all explained possible
+ * speeds is 3125000. Pin the timers to this 3.125 MHz reference frequency.
+ */
+#define RTTM_TICKS_PER_SEC 3125000
+
+struct rttm_cs {
+ struct timer_of to;
+ struct clocksource cs;
+};
+
+/* Simple internal register functions */
+static inline void rttm_set_counter(void __iomem *base, unsigned int counter)
+{
+ iowrite32(counter, base + RTTM_CNT);
+}
+
+static inline unsigned int rttm_get_counter(void __iomem *base)
+{
+ return ioread32(base + RTTM_CNT);
+}
+
+static inline void rttm_set_period(void __iomem *base, unsigned int period)
+{
+ iowrite32(period, base + RTTM_DATA);
+}
+
+static inline void rttm_disable_timer(void __iomem *base)
+{
+ iowrite32(0, base + RTTM_CTRL);
+}
+
+static inline void rttm_enable_timer(void __iomem *base, u32 mode, u32 divisor)
+{
+ iowrite32(RTTM_CTRL_ENABLE | mode | divisor, base + RTTM_CTRL);
+}
+
+static inline void rttm_ack_irq(void __iomem *base)
+{
+ iowrite32(ioread32(base + RTTM_INT) | RTTM_INT_PENDING, base + RTTM_INT);
+}
+
+static inline void rttm_enable_irq(void __iomem *base)
+{
+ iowrite32(RTTM_INT_ENABLE, base + RTTM_INT);
+}
+
+static inline void rttm_disable_irq(void __iomem *base)
+{
+ iowrite32(0, base + RTTM_INT);
+}
+
+/* Aggregated control functions for kernel clock framework */
+#define RTTM_DEBUG(base) \
+ pr_debug("------------- %d %p\n", \
+ smp_processor_id(), base)
+
+static irqreturn_t rttm_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *clkevt = dev_id;
+ struct timer_of *to = to_timer_of(clkevt);
+
+ rttm_ack_irq(to->of_base.base);
+ RTTM_DEBUG(to->of_base.base);
+ clkevt->event_handler(clkevt);
+
+ return IRQ_HANDLED;
+}
+
+static void rttm_stop_timer(void __iomem *base)
+{
+ rttm_disable_timer(base);
+ rttm_ack_irq(base);
+}
+
+static void rttm_start_timer(struct timer_of *to, u32 mode)
+{
+ rttm_set_counter(to->of_base.base, 0);
+ rttm_enable_timer(to->of_base.base, mode, to->of_clk.rate / RTTM_TICKS_PER_SEC);
+}
+
+static int rttm_next_event(unsigned long delta, struct clock_event_device *clkevt)
+{
+ struct timer_of *to = to_timer_of(clkevt);
+
+ RTTM_DEBUG(to->of_base.base);
+ rttm_stop_timer(to->of_base.base);
+ rttm_set_period(to->of_base.base, delta);
+ rttm_start_timer(to, RTTM_CTRL_COUNTER);
+
+ return 0;
+}
+
+static int rttm_state_oneshot(struct clock_event_device *clkevt)
+{
+ struct timer_of *to = to_timer_of(clkevt);
+
+ RTTM_DEBUG(to->of_base.base);
+ rttm_stop_timer(to->of_base.base);
+ rttm_set_period(to->of_base.base, RTTM_TICKS_PER_SEC / HZ);
+ rttm_start_timer(to, RTTM_CTRL_COUNTER);
+
+ return 0;
+}
+
+static int rttm_state_periodic(struct clock_event_device *clkevt)
+{
+ struct timer_of *to = to_timer_of(clkevt);
+
+ RTTM_DEBUG(to->of_base.base);
+ rttm_stop_timer(to->of_base.base);
+ rttm_set_period(to->of_base.base, RTTM_TICKS_PER_SEC / HZ);
+ rttm_start_timer(to, RTTM_CTRL_TIMER);
+
+ return 0;
+}
+
+static int rttm_state_shutdown(struct clock_event_device *clkevt)
+{
+ struct timer_of *to = to_timer_of(clkevt);
+
+ RTTM_DEBUG(to->of_base.base);
+ rttm_stop_timer(to->of_base.base);
+
+ return 0;
+}
+
+static void rttm_setup_timer(void __iomem *base)
+{
+ RTTM_DEBUG(base);
+ rttm_stop_timer(base);
+ rttm_set_period(base, 0);
+}
+
+static u64 rttm_read_clocksource(struct clocksource *cs)
+{
+ struct rttm_cs *rcs = container_of(cs, struct rttm_cs, cs);
+
+ return rttm_get_counter(rcs->to.of_base.base);
+}
+
+/* Module initialization part. */
+static DEFINE_PER_CPU(struct timer_of, rttm_to) = {
+ .flags = TIMER_OF_BASE | TIMER_OF_CLOCK | TIMER_OF_IRQ,
+ .of_irq = {
+ .flags = IRQF_PERCPU | IRQF_TIMER,
+ .handler = rttm_timer_interrupt,
+ },
+ .clkevt = {
+ .rating = 400,
+ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+ .set_state_periodic = rttm_state_periodic,
+ .set_state_shutdown = rttm_state_shutdown,
+ .set_state_oneshot = rttm_state_oneshot,
+ .set_next_event = rttm_next_event
+ },
+};
+
+static int rttm_enable_clocksource(struct clocksource *cs)
+{
+ struct rttm_cs *rcs = container_of(cs, struct rttm_cs, cs);
+
+ rttm_disable_irq(rcs->to.of_base.base);
+ rttm_setup_timer(rcs->to.of_base.base);
+ rttm_enable_timer(rcs->to.of_base.base, RTTM_CTRL_TIMER,
+ rcs->to.of_clk.rate / RTTM_TICKS_PER_SEC);
+
+ return 0;
+}
+
+struct rttm_cs rttm_cs = {
+ .to = {
+ .flags = TIMER_OF_BASE | TIMER_OF_CLOCK,
+ },
+ .cs = {
+ .name = "realtek_otto_timer",
+ .rating = 400,
+ .mask = CLOCKSOURCE_MASK(RTTM_BIT_COUNT),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ .read = rttm_read_clocksource,
+ }
+};
+
+static u64 notrace rttm_read_clock(void)
+{
+ return rttm_get_counter(rttm_cs.to.of_base.base);
+}
+
+static int rttm_cpu_starting(unsigned int cpu)
+{
+ struct timer_of *to = per_cpu_ptr(&rttm_to, cpu);
+
+ RTTM_DEBUG(to->of_base.base);
+ to->clkevt.cpumask = cpumask_of(cpu);
+ irq_force_affinity(to->of_irq.irq, to->clkevt.cpumask);
+ clockevents_config_and_register(&to->clkevt, RTTM_TICKS_PER_SEC,
+ RTTM_MIN_DELTA, RTTM_MAX_DELTA);
+ rttm_enable_irq(to->of_base.base);
+
+ return 0;
+}
+
+static int __init rttm_probe(struct device_node *np)
+{
+ unsigned int cpu, cpu_rollback;
+ struct timer_of *to;
+ unsigned int clkidx = num_possible_cpus();
+
+ /* Use the first n timers as per CPU clock event generators */
+ for_each_possible_cpu(cpu) {
+ to = per_cpu_ptr(&rttm_to, cpu);
+ to->of_irq.index = to->of_base.index = cpu;
+ if (timer_of_init(np, to)) {
+ pr_err("setup of timer %d failed\n", cpu);
+ goto rollback;
+ }
+ rttm_setup_timer(to->of_base.base);
+ }
+
+ /* Activate the n'th + 1 timer as a stable CPU clocksource. */
+ to = &rttm_cs.to;
+ to->of_base.index = clkidx;
+ timer_of_init(np, to);
+ if (rttm_cs.to.of_base.base && rttm_cs.to.of_clk.rate) {
+ rttm_enable_clocksource(&rttm_cs.cs);
+ clocksource_register_hz(&rttm_cs.cs, RTTM_TICKS_PER_SEC);
+ sched_clock_register(rttm_read_clock, RTTM_BIT_COUNT, RTTM_TICKS_PER_SEC);
+ } else
+ pr_err(" setup of timer %d as clocksource failed", clkidx);
+
+ return cpuhp_setup_state(CPUHP_AP_REALTEK_TIMER_STARTING,
+ "timer/realtek:online",
+ rttm_cpu_starting, NULL);
+rollback:
+ pr_err("timer registration failed\n");
+ for_each_possible_cpu(cpu_rollback) {
+ if (cpu_rollback == cpu)
+ break;
+ to = per_cpu_ptr(&rttm_to, cpu_rollback);
+ timer_of_cleanup(to);
+ }
+
+ return -EINVAL;
+}
+
+TIMER_OF_DECLARE(otto_timer, "realtek,otto-timer", rttm_probe);
diff --git a/drivers/clocksource/timer-stm32.c b/drivers/clocksource/timer-stm32.c
index c9a753f96ba1..0a4ea3288bfb 100644
--- a/drivers/clocksource/timer-stm32.c
+++ b/drivers/clocksource/timer-stm32.c
@@ -73,7 +73,7 @@ static void stm32_timer_of_bits_set(struct timer_of *to, int bits)
* Accessor helper to get the number of bits in the timer-of private
* structure.
*
- * Returns an integer corresponding to the number of bits.
+ * Returns: an integer corresponding to the number of bits.
*/
static int stm32_timer_of_bits_get(struct timer_of *to)
{
@@ -177,7 +177,7 @@ static irqreturn_t stm32_clock_event_handler(int irq, void *dev_id)
}
/**
- * stm32_timer_width - Sort out the timer width (32/16)
+ * stm32_timer_set_width - Sort out the timer width (32/16)
* @to: a pointer to a timer-of structure
*
* Write the 32-bit max value and read/return the result. If the timer
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c
index 0d229a9058da..6b48a9006444 100644
--- a/drivers/clocksource/timer-sun5i.c
+++ b/drivers/clocksource/timer-sun5i.c
@@ -318,7 +318,7 @@ MODULE_DEVICE_TABLE(of, sun5i_timer_of_match);
static struct platform_driver sun5i_timer_driver = {
.probe = sun5i_timer_probe,
- .remove_new = sun5i_timer_remove,
+ .remove = sun5i_timer_remove,
.driver = {
.name = "sun5i-timer",
.of_match_table = sun5i_timer_of_match,
diff --git a/drivers/clocksource/timer-tegra.c b/drivers/clocksource/timer-tegra.c
index e9635c25eef4..35b6ce9deffa 100644
--- a/drivers/clocksource/timer-tegra.c
+++ b/drivers/clocksource/timer-tegra.c
@@ -158,7 +158,6 @@ static int tegra_timer_stop(unsigned int cpu)
{
struct timer_of *to = per_cpu_ptr(&tegra_to, cpu);
- to->clkevt.set_state_shutdown(&to->clkevt);
disable_irq_nosync(to->clkevt.irq);
return 0;
diff --git a/drivers/clocksource/timer-tegra186.c b/drivers/clocksource/timer-tegra186.c
index 304537dadf2c..5d4cf5237a11 100644
--- a/drivers/clocksource/timer-tegra186.c
+++ b/drivers/clocksource/timer-tegra186.c
@@ -502,7 +502,7 @@ static struct platform_driver tegra186_wdt_driver = {
.of_match_table = tegra186_timer_of_match,
},
.probe = tegra186_timer_probe,
- .remove_new = tegra186_timer_remove,
+ .remove = tegra186_timer_remove,
};
module_platform_driver(tegra186_wdt_driver);
diff --git a/drivers/clocksource/timer-ti-32k.c b/drivers/clocksource/timer-ti-32k.c
index 59b0be482f32..a86529a70737 100644
--- a/drivers/clocksource/timer-ti-32k.c
+++ b/drivers/clocksource/timer-ti-32k.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* timer-ti-32k.c - OMAP2 32k Timer Support
*
* Copyright (C) 2009 Nokia Corporation
diff --git a/drivers/clocksource/timer-ti-dm-systimer.c b/drivers/clocksource/timer-ti-dm-systimer.c
index c2dcd8d68e45..985a6d08512b 100644
--- a/drivers/clocksource/timer-ti-dm-systimer.c
+++ b/drivers/clocksource/timer-ti-dm-systimer.c
@@ -202,10 +202,10 @@ static bool __init dmtimer_is_preferred(struct device_node *np)
/* Secure gptimer12 is always clocked with a fixed source */
if (!of_property_read_bool(np, "ti,timer-secure")) {
- if (!of_property_read_bool(np, "assigned-clocks"))
+ if (!of_property_present(np, "assigned-clocks"))
return false;
- if (!of_property_read_bool(np, "assigned-clock-parents"))
+ if (!of_property_present(np, "assigned-clock-parents"))
return false;
}
@@ -686,9 +686,9 @@ subsys_initcall(dmtimer_percpu_timer_startup);
static int __init dmtimer_percpu_quirk_init(struct device_node *np, u32 pa)
{
- struct device_node *arm_timer;
+ struct device_node *arm_timer __free(device_node) =
+ of_find_compatible_node(NULL, NULL, "arm,armv7-timer");
- arm_timer = of_find_compatible_node(NULL, NULL, "arm,armv7-timer");
if (of_device_is_available(arm_timer)) {
pr_warn_once("ARM architected timer wrap issue i940 detected\n");
return 0;
diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
index 56acf2617262..e9e32df6b566 100644
--- a/drivers/clocksource/timer-ti-dm.c
+++ b/drivers/clocksource/timer-ti-dm.c
@@ -129,7 +129,6 @@ struct dmtimer {
void __iomem *func_base; /* function register base */
atomic_t enabled;
- unsigned long rate;
unsigned reserved:1;
unsigned posted:1;
unsigned omap1:1;
@@ -1105,8 +1104,12 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
return -ENOMEM;
timer->irq = platform_get_irq(pdev, 0);
- if (timer->irq < 0)
- return timer->irq;
+ if (timer->irq < 0) {
+ if (of_property_read_bool(dev->of_node, "ti,timer-pwm"))
+ dev_info(dev, "Did not find timer interrupt, timer usable in PWM mode only\n");
+ else
+ return timer->irq;
+ }
timer->io_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(timer->io_base))
@@ -1292,7 +1295,7 @@ MODULE_DEVICE_TABLE(of, omap_timer_match);
static struct platform_driver omap_dm_timer_driver = {
.probe = omap_dm_timer_probe,
- .remove_new = omap_dm_timer_remove,
+ .remove = omap_dm_timer_remove,
.driver = {
.name = "omap_timer",
.of_match_table = omap_timer_match,