diff options
Diffstat (limited to 'drivers/clocksource/sh_cmt.c')
| -rw-r--r-- | drivers/clocksource/sh_cmt.c | 293 |
1 files changed, 187 insertions, 106 deletions
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c index 55d3e03f2cd4..791b298c995b 100644 --- a/drivers/clocksource/sh_cmt.c +++ b/drivers/clocksource/sh_cmt.c @@ -13,11 +13,11 @@ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> +#include <linux/iopoll.h> #include <linux/ioport.h> #include <linux/irq.h> #include <linux/module.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/pm_domain.h> #include <linux/pm_runtime.h> @@ -25,6 +25,10 @@ #include <linux/slab.h> #include <linux/spinlock.h> +#ifdef CONFIG_SUPERH +#include <asm/platform_early.h> +#endif + struct sh_cmt_device; /* @@ -112,6 +116,7 @@ struct sh_cmt_device { void __iomem *mapbase; struct clk *clk; unsigned long rate; + unsigned int reg_delay; raw_spinlock_t lock; /* Protect the shared start/stop register */ @@ -231,6 +236,8 @@ static const struct sh_cmt_info sh_cmt_info[] = { #define CMCNT 1 /* channel register */ #define CMCOR 2 /* channel register */ +#define CMCLKE 0x1000 /* CLK Enable Register (R-Car Gen2) */ + static inline u32 sh_cmt_read_cmstr(struct sh_cmt_channel *ch) { if (ch->iostart) @@ -241,10 +248,17 @@ static inline u32 sh_cmt_read_cmstr(struct sh_cmt_channel *ch) static inline void sh_cmt_write_cmstr(struct sh_cmt_channel *ch, u32 value) { - if (ch->iostart) - ch->cmt->info->write_control(ch->iostart, 0, value); - else - ch->cmt->info->write_control(ch->cmt->mapbase, 0, value); + u32 old_value = sh_cmt_read_cmstr(ch); + + if (value != old_value) { + if (ch->iostart) { + ch->cmt->info->write_control(ch->iostart, 0, value); + udelay(ch->cmt->reg_delay); + } else { + ch->cmt->info->write_control(ch->cmt->mapbase, 0, value); + udelay(ch->cmt->reg_delay); + } + } } static inline u32 sh_cmt_read_cmcsr(struct sh_cmt_channel *ch) @@ -254,7 +268,12 @@ static inline u32 sh_cmt_read_cmcsr(struct sh_cmt_channel *ch) static inline void sh_cmt_write_cmcsr(struct sh_cmt_channel *ch, u32 value) { - ch->cmt->info->write_control(ch->ioctrl, CMCSR, value); + u32 old_value = sh_cmt_read_cmcsr(ch); + + if (value != old_value) { + ch->cmt->info->write_control(ch->ioctrl, CMCSR, value); + udelay(ch->cmt->reg_delay); + } } static inline u32 sh_cmt_read_cmcnt(struct sh_cmt_channel *ch) @@ -262,14 +281,33 @@ static inline u32 sh_cmt_read_cmcnt(struct sh_cmt_channel *ch) return ch->cmt->info->read_count(ch->ioctrl, CMCNT); } -static inline void sh_cmt_write_cmcnt(struct sh_cmt_channel *ch, u32 value) +static inline int sh_cmt_write_cmcnt(struct sh_cmt_channel *ch, u32 value) { + /* Tests showed that we need to wait 3 clocks here */ + unsigned int cmcnt_delay = DIV_ROUND_UP(3 * ch->cmt->reg_delay, 2); + u32 reg; + + if (ch->cmt->info->model > SH_CMT_16BIT) { + int ret = read_poll_timeout_atomic(sh_cmt_read_cmcsr, reg, + !(reg & SH_CMT32_CMCSR_WRFLG), + 1, cmcnt_delay, false, ch); + if (ret < 0) + return ret; + } + ch->cmt->info->write_count(ch->ioctrl, CMCNT, value); + udelay(cmcnt_delay); + return 0; } static inline void sh_cmt_write_cmcor(struct sh_cmt_channel *ch, u32 value) { - ch->cmt->info->write_count(ch->ioctrl, CMCOR, value); + u32 old_value = ch->cmt->info->read_count(ch->ioctrl, CMCOR); + + if (value != old_value) { + ch->cmt->info->write_count(ch->ioctrl, CMCOR, value); + udelay(ch->cmt->reg_delay); + } } static u32 sh_cmt_get_counter(struct sh_cmt_channel *ch, u32 *has_wrapped) @@ -313,19 +351,10 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_channel *ch, int start) static int sh_cmt_enable(struct sh_cmt_channel *ch) { - int k, ret; + int ret; - pm_runtime_get_sync(&ch->cmt->pdev->dev); dev_pm_syscore_device(&ch->cmt->pdev->dev, true); - /* enable clock */ - ret = clk_enable(ch->cmt->clk); - if (ret) { - dev_err(&ch->cmt->pdev->dev, "ch%u: cannot enable clock\n", - ch->index); - goto err0; - } - /* make sure channel is disabled */ sh_cmt_start_stop_ch(ch, 0); @@ -334,48 +363,25 @@ static int sh_cmt_enable(struct sh_cmt_channel *ch) sh_cmt_write_cmcsr(ch, SH_CMT16_CMCSR_CMIE | SH_CMT16_CMCSR_CKS512); } else { - sh_cmt_write_cmcsr(ch, SH_CMT32_CMCSR_CMM | - SH_CMT32_CMCSR_CMTOUT_IE | + u32 cmtout = ch->cmt->info->model <= SH_CMT_48BIT ? + SH_CMT32_CMCSR_CMTOUT_IE : 0; + sh_cmt_write_cmcsr(ch, cmtout | SH_CMT32_CMCSR_CMM | SH_CMT32_CMCSR_CMR_IRQ | SH_CMT32_CMCSR_CKS_RCLK8); } sh_cmt_write_cmcor(ch, 0xffffffff); - sh_cmt_write_cmcnt(ch, 0); - - /* - * According to the sh73a0 user's manual, as CMCNT can be operated - * only by the RCLK (Pseudo 32 KHz), there's one restriction on - * modifying CMCNT register; two RCLK cycles are necessary before - * this register is either read or any modification of the value - * it holds is reflected in the LSI's actual operation. - * - * While at it, we're supposed to clear out the CMCNT as of this - * moment, so make sure it's processed properly here. This will - * take RCLKx2 at maximum. - */ - for (k = 0; k < 100; k++) { - if (!sh_cmt_read_cmcnt(ch)) - break; - udelay(1); - } + ret = sh_cmt_write_cmcnt(ch, 0); - if (sh_cmt_read_cmcnt(ch)) { + if (ret || sh_cmt_read_cmcnt(ch)) { dev_err(&ch->cmt->pdev->dev, "ch%u: cannot clear CMCNT\n", ch->index); - ret = -ETIMEDOUT; - goto err1; + return -ETIMEDOUT; } /* enable channel */ sh_cmt_start_stop_ch(ch, 1); return 0; - err1: - /* stop clock */ - clk_disable(ch->cmt->clk); - - err0: - return ret; } static void sh_cmt_disable(struct sh_cmt_channel *ch) @@ -386,11 +392,7 @@ static void sh_cmt_disable(struct sh_cmt_channel *ch) /* disable interrupts in CMT block */ sh_cmt_write_cmcsr(ch, 0); - /* stop clock */ - clk_disable(ch->cmt->clk); - dev_pm_syscore_device(&ch->cmt->pdev->dev, false); - pm_runtime_put(&ch->cmt->pdev->dev); } /* private flags */ @@ -508,6 +510,7 @@ static void sh_cmt_set_next(struct sh_cmt_channel *ch, unsigned long delta) static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id) { struct sh_cmt_channel *ch = dev_id; + unsigned long flags; /* clear flags */ sh_cmt_write_cmcsr(ch, sh_cmt_read_cmcsr(ch) & @@ -538,6 +541,8 @@ static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id) ch->flags &= ~FLAG_SKIPEVENT; + raw_spin_lock_irqsave(&ch->lock, flags); + if (ch->flags & FLAG_REPROGRAM) { ch->flags &= ~FLAG_REPROGRAM; sh_cmt_clock_event_program_verify(ch, 1); @@ -550,10 +555,12 @@ static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id) ch->flags &= ~FLAG_IRQCONTEXT; + raw_spin_unlock_irqrestore(&ch->lock, flags); + return IRQ_HANDLED; } -static int sh_cmt_start(struct sh_cmt_channel *ch, unsigned long flag) +static int sh_cmt_start_clocksource(struct sh_cmt_channel *ch) { int ret = 0; unsigned long flags; @@ -565,18 +572,56 @@ static int sh_cmt_start(struct sh_cmt_channel *ch, unsigned long flag) if (ret) goto out; - ch->flags |= flag; + + ch->flags |= FLAG_CLOCKSOURCE; /* setup timeout if no clockevent */ - if ((flag == FLAG_CLOCKSOURCE) && (!(ch->flags & FLAG_CLOCKEVENT))) + if (ch->cmt->num_channels == 1 && !(ch->flags & FLAG_CLOCKEVENT)) __sh_cmt_set_next(ch, ch->max_match_value); +out: + raw_spin_unlock_irqrestore(&ch->lock, flags); + + return ret; +} + +static void sh_cmt_stop_clocksource(struct sh_cmt_channel *ch) +{ + unsigned long flags; + unsigned long f; + + raw_spin_lock_irqsave(&ch->lock, flags); + + f = ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE); + + ch->flags &= ~FLAG_CLOCKSOURCE; + + if (f && !(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) + sh_cmt_disable(ch); + + raw_spin_unlock_irqrestore(&ch->lock, flags); +} + +static int sh_cmt_start_clockevent(struct sh_cmt_channel *ch) +{ + int ret = 0; + unsigned long flags; + + raw_spin_lock_irqsave(&ch->lock, flags); + + if (!(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) + ret = sh_cmt_enable(ch); + + if (ret) + goto out; + + ch->flags |= FLAG_CLOCKEVENT; out: raw_spin_unlock_irqrestore(&ch->lock, flags); return ret; } -static void sh_cmt_stop(struct sh_cmt_channel *ch, unsigned long flag) +static void sh_cmt_stop_clockevent(struct sh_cmt_channel *ch) { unsigned long flags; unsigned long f; @@ -584,13 +629,14 @@ static void sh_cmt_stop(struct sh_cmt_channel *ch, unsigned long flag) raw_spin_lock_irqsave(&ch->lock, flags); f = ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE); - ch->flags &= ~flag; + + ch->flags &= ~FLAG_CLOCKEVENT; if (f && !(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) sh_cmt_disable(ch); /* adjust the timeout to maximum if only clocksource left */ - if ((flag == FLAG_CLOCKEVENT) && (ch->flags & FLAG_CLOCKSOURCE)) + if (ch->flags & FLAG_CLOCKSOURCE) __sh_cmt_set_next(ch, ch->max_match_value); raw_spin_unlock_irqrestore(&ch->lock, flags); @@ -604,20 +650,25 @@ static struct sh_cmt_channel *cs_to_sh_cmt(struct clocksource *cs) static u64 sh_cmt_clocksource_read(struct clocksource *cs) { struct sh_cmt_channel *ch = cs_to_sh_cmt(cs); - unsigned long flags; u32 has_wrapped; - u64 value; - u32 raw; - raw_spin_lock_irqsave(&ch->lock, flags); - value = ch->total_cycles; - raw = sh_cmt_get_counter(ch, &has_wrapped); + if (ch->cmt->num_channels == 1) { + unsigned long flags; + u64 value; + u32 raw; - if (unlikely(has_wrapped)) - raw += ch->match_value + 1; - raw_spin_unlock_irqrestore(&ch->lock, flags); + raw_spin_lock_irqsave(&ch->lock, flags); + value = ch->total_cycles; + raw = sh_cmt_get_counter(ch, &has_wrapped); - return value + raw; + if (unlikely(has_wrapped)) + raw += ch->match_value + 1; + raw_spin_unlock_irqrestore(&ch->lock, flags); + + return value + raw; + } + + return sh_cmt_get_counter(ch, &has_wrapped); } static int sh_cmt_clocksource_enable(struct clocksource *cs) @@ -629,7 +680,7 @@ static int sh_cmt_clocksource_enable(struct clocksource *cs) ch->total_cycles = 0; - ret = sh_cmt_start(ch, FLAG_CLOCKSOURCE); + ret = sh_cmt_start_clocksource(ch); if (!ret) ch->cs_enabled = true; @@ -642,7 +693,7 @@ static void sh_cmt_clocksource_disable(struct clocksource *cs) WARN_ON(!ch->cs_enabled); - sh_cmt_stop(ch, FLAG_CLOCKSOURCE); + sh_cmt_stop_clocksource(ch); ch->cs_enabled = false; } @@ -653,8 +704,8 @@ static void sh_cmt_clocksource_suspend(struct clocksource *cs) if (!ch->cs_enabled) return; - sh_cmt_stop(ch, FLAG_CLOCKSOURCE); - pm_genpd_syscore_poweroff(&ch->cmt->pdev->dev); + sh_cmt_stop_clocksource(ch); + dev_pm_genpd_suspend(&ch->cmt->pdev->dev); } static void sh_cmt_clocksource_resume(struct clocksource *cs) @@ -664,8 +715,8 @@ static void sh_cmt_clocksource_resume(struct clocksource *cs) if (!ch->cs_enabled) return; - pm_genpd_syscore_poweron(&ch->cmt->pdev->dev); - sh_cmt_start(ch, FLAG_CLOCKSOURCE); + dev_pm_genpd_resume(&ch->cmt->pdev->dev); + sh_cmt_start_clocksource(ch); } static int sh_cmt_register_clocksource(struct sh_cmt_channel *ch, @@ -680,7 +731,7 @@ static int sh_cmt_register_clocksource(struct sh_cmt_channel *ch, cs->disable = sh_cmt_clocksource_disable; cs->suspend = sh_cmt_clocksource_suspend; cs->resume = sh_cmt_clocksource_resume; - cs->mask = CLOCKSOURCE_MASK(sizeof(u64) * 8); + cs->mask = CLOCKSOURCE_MASK(ch->cmt->info->width); cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; dev_info(&ch->cmt->pdev->dev, "ch%u: used as clock source\n", @@ -697,7 +748,7 @@ static struct sh_cmt_channel *ced_to_sh_cmt(struct clock_event_device *ced) static void sh_cmt_clock_event_start(struct sh_cmt_channel *ch, int periodic) { - sh_cmt_start(ch, FLAG_CLOCKEVENT); + sh_cmt_start_clockevent(ch); if (periodic) sh_cmt_set_next(ch, ((ch->cmt->rate + HZ/2) / HZ) - 1); @@ -709,7 +760,7 @@ static int sh_cmt_clock_event_shutdown(struct clock_event_device *ced) { struct sh_cmt_channel *ch = ced_to_sh_cmt(ced); - sh_cmt_stop(ch, FLAG_CLOCKEVENT); + sh_cmt_stop_clockevent(ch); return 0; } @@ -720,7 +771,7 @@ static int sh_cmt_clock_event_set_state(struct clock_event_device *ced, /* deal with old setting first */ if (clockevent_state_oneshot(ced) || clockevent_state_periodic(ced)) - sh_cmt_stop(ch, FLAG_CLOCKEVENT); + sh_cmt_stop_clockevent(ch); dev_info(&ch->cmt->pdev->dev, "ch%u: used for %s clock events\n", ch->index, periodic ? "periodic" : "oneshot"); @@ -742,12 +793,18 @@ static int sh_cmt_clock_event_next(unsigned long delta, struct clock_event_device *ced) { struct sh_cmt_channel *ch = ced_to_sh_cmt(ced); + unsigned long flags; BUG_ON(!clockevent_state_oneshot(ced)); + + raw_spin_lock_irqsave(&ch->lock, flags); + if (likely(ch->flags & FLAG_IRQCONTEXT)) ch->next_match_value = delta - 1; else - sh_cmt_set_next(ch, delta - 1); + __sh_cmt_set_next(ch, delta - 1); + + raw_spin_unlock_irqrestore(&ch->lock, flags); return 0; } @@ -756,7 +813,7 @@ static void sh_cmt_clock_event_suspend(struct clock_event_device *ced) { struct sh_cmt_channel *ch = ced_to_sh_cmt(ced); - pm_genpd_syscore_poweroff(&ch->cmt->pdev->dev); + dev_pm_genpd_suspend(&ch->cmt->pdev->dev); clk_unprepare(ch->cmt->clk); } @@ -765,7 +822,7 @@ static void sh_cmt_clock_event_resume(struct clock_event_device *ced) struct sh_cmt_channel *ch = ced_to_sh_cmt(ced); clk_prepare(ch->cmt->clk); - pm_genpd_syscore_poweron(&ch->cmt->pdev->dev); + dev_pm_genpd_resume(&ch->cmt->pdev->dev); } static int sh_cmt_register_clockevent(struct sh_cmt_channel *ch, @@ -776,11 +833,8 @@ static int sh_cmt_register_clockevent(struct sh_cmt_channel *ch, int ret; irq = platform_get_irq(ch->cmt->pdev, ch->index); - if (irq < 0) { - dev_err(&ch->cmt->pdev->dev, "ch%u: failed to get irq\n", - ch->index); + if (irq < 0) return irq; - } ret = request_irq(irq, sh_cmt_interrupt, IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING, @@ -842,6 +896,7 @@ static int sh_cmt_setup_channel(struct sh_cmt_channel *ch, unsigned int index, unsigned int hwidx, bool clockevent, bool clocksource, struct sh_cmt_device *cmt) { + u32 value; int ret; /* Skip unused channels. */ @@ -871,6 +926,11 @@ static int sh_cmt_setup_channel(struct sh_cmt_channel *ch, unsigned int index, ch->iostart = cmt->mapbase + ch->hwidx * 0x100; ch->ioctrl = ch->iostart + 0x10; ch->timer_bit = 0; + + /* Enable the clock supply to the channel */ + value = ioread32(cmt->mapbase + CMCLKE); + value |= BIT(hwidx); + iowrite32(value, cmt->mapbase + CMCLKE); break; } @@ -904,7 +964,7 @@ static int sh_cmt_map_memory(struct sh_cmt_device *cmt) return -ENXIO; } - cmt->mapbase = ioremap_nocache(mem->start, resource_size(mem)); + cmt->mapbase = ioremap(mem->start, resource_size(mem)); if (cmt->mapbase == NULL) { dev_err(&cmt->pdev->dev, "failed to remap I/O memory\n"); return -ENXIO; @@ -921,13 +981,25 @@ static const struct platform_device_id sh_cmt_id_table[] = { MODULE_DEVICE_TABLE(platform, sh_cmt_id_table); static const struct of_device_id sh_cmt_of_table[] __maybe_unused = { - { .compatible = "renesas,cmt-48", .data = &sh_cmt_info[SH_CMT_48BIT] }, + { + /* deprecated, preserved for backward compatibility */ + .compatible = "renesas,cmt-48", + .data = &sh_cmt_info[SH_CMT_48BIT] + }, { /* deprecated, preserved for backward compatibility */ .compatible = "renesas,cmt-48-gen2", .data = &sh_cmt_info[SH_CMT0_RCAR_GEN2] }, { + .compatible = "renesas,r8a7740-cmt1", + .data = &sh_cmt_info[SH_CMT_48BIT] + }, + { + .compatible = "renesas,sh73a0-cmt1", + .data = &sh_cmt_info[SH_CMT_48BIT] + }, + { .compatible = "renesas,rcar-gen2-cmt0", .data = &sh_cmt_info[SH_CMT0_RCAR_GEN2] }, @@ -943,14 +1015,22 @@ static const struct of_device_id sh_cmt_of_table[] __maybe_unused = { .compatible = "renesas,rcar-gen3-cmt1", .data = &sh_cmt_info[SH_CMT1_RCAR_GEN2] }, + { + .compatible = "renesas,rcar-gen4-cmt0", + .data = &sh_cmt_info[SH_CMT0_RCAR_GEN2] + }, + { + .compatible = "renesas,rcar-gen4-cmt1", + .data = &sh_cmt_info[SH_CMT1_RCAR_GEN2] + }, { } }; MODULE_DEVICE_TABLE(of, sh_cmt_of_table); static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev) { - unsigned int mask; - unsigned int i; + unsigned int mask, i; + unsigned long rate; int ret; cmt->pdev = pdev; @@ -986,17 +1066,21 @@ static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev) if (ret < 0) goto err_clk_unprepare; - if (cmt->info->width == 16) - cmt->rate = clk_get_rate(cmt->clk) / 512; - else - cmt->rate = clk_get_rate(cmt->clk) / 8; + rate = clk_get_rate(cmt->clk); + if (!rate) { + ret = -EINVAL; + goto err_clk_disable; + } - clk_disable(cmt->clk); + /* We shall wait 2 input clks after register writes */ + if (cmt->info->model >= SH_CMT_48BIT) + cmt->reg_delay = DIV_ROUND_UP(2UL * USEC_PER_SEC, rate); + cmt->rate = rate / (cmt->info->width == 16 ? 512 : 8); /* Map the memory resource(s). */ ret = sh_cmt_map_memory(cmt); if (ret < 0) - goto err_clk_unprepare; + goto err_clk_disable; /* Allocate and setup the channels. */ cmt->num_channels = hweight8(cmt->hw_channels); @@ -1031,6 +1115,8 @@ static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev) err_unmap: kfree(cmt->channels); iounmap(cmt->mapbase); +err_clk_disable: + clk_disable(cmt->clk); err_clk_unprepare: clk_unprepare(cmt->clk); err_clk_put: @@ -1043,7 +1129,7 @@ static int sh_cmt_probe(struct platform_device *pdev) struct sh_cmt_device *cmt = platform_get_drvdata(pdev); int ret; - if (!is_early_platform_device(pdev)) { + if (!is_sh_early_platform_device(pdev)) { pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); } @@ -1063,29 +1149,22 @@ static int sh_cmt_probe(struct platform_device *pdev) pm_runtime_idle(&pdev->dev); return ret; } - if (is_early_platform_device(pdev)) + if (is_sh_early_platform_device(pdev)) return 0; out: if (cmt->has_clockevent || cmt->has_clocksource) pm_runtime_irq_safe(&pdev->dev); - else - pm_runtime_idle(&pdev->dev); return 0; } -static int sh_cmt_remove(struct platform_device *pdev) -{ - return -EBUSY; /* cannot unregister clockevent and clocksource */ -} - static struct platform_driver sh_cmt_device_driver = { .probe = sh_cmt_probe, - .remove = sh_cmt_remove, .driver = { .name = "sh_cmt", .of_match_table = of_match_ptr(sh_cmt_of_table), + .suppress_bind_attrs = true, }, .id_table = sh_cmt_id_table, }; @@ -1100,10 +1179,12 @@ static void __exit sh_cmt_exit(void) platform_driver_unregister(&sh_cmt_device_driver); } -early_platform_init("earlytimer", &sh_cmt_device_driver); +#ifdef CONFIG_SUPERH +sh_early_platform_init("earlytimer", &sh_cmt_device_driver); +#endif + subsys_initcall(sh_cmt_init); module_exit(sh_cmt_exit); MODULE_AUTHOR("Magnus Damm"); MODULE_DESCRIPTION("SuperH CMT Timer Driver"); -MODULE_LICENSE("GPL v2"); |
