summaryrefslogtreecommitdiff
path: root/arch/hexagon/kernel/time.c
blob: feffe527ac92939311fe61ee1d247572752ce41d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Time related functions for Hexagon architecture
 *
 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
 */

#include <linux/init.h>
#include <linux/clockchips.h>
#include <linux/clocksource.h>
#include <linux/interrupt.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/ioport.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/module.h>

#include <asm/timer-regs.h>
#include <asm/hexagon_vm.h>

/*
 * For the clocksource we need:
 *	pcycle frequency (600MHz)
 * For the loops_per_jiffy we need:
 *	thread/cpu frequency (100MHz)
 * And for the timer, we need:
 *	sleep clock rate
 */

cycles_t	pcycle_freq_mhz;
cycles_t	thread_freq_mhz;
cycles_t	sleep_clk_freq;

static struct resource rtos_timer_resources[] = {
	{
		.start	= RTOS_TIMER_REGS_ADDR,
		.end	= RTOS_TIMER_REGS_ADDR+PAGE_SIZE-1,
		.flags	= IORESOURCE_MEM,
	},
};

static struct platform_device rtos_timer_device = {
	.name		= "rtos_timer",
	.id		= -1,
	.num_resources	= ARRAY_SIZE(rtos_timer_resources),
	.resource	= rtos_timer_resources,
};

/*  A lot of this stuff should move into a platform specific section.  */
struct adsp_hw_timer_struct {
	u32 match;   /*  Match value  */
	u32 count;
	u32 enable;  /*  [1] - CLR_ON_MATCH_EN, [0] - EN  */
	u32 clear;   /*  one-shot register that clears the count  */
};

/*  Look for "TCX0" for related constants.  */
static __iomem struct adsp_hw_timer_struct *rtos_timer;

static u64 timer_get_cycles(struct clocksource *cs)
{
	return (u64) __vmgettime();
}

static struct clocksource hexagon_clocksource = {
	.name		= "pcycles",
	.rating		= 250,
	.read		= timer_get_cycles,
	.mask		= CLOCKSOURCE_MASK(64),
	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
};

static int set_next_event(unsigned long delta, struct clock_event_device *evt)
{
	/*  Assuming the timer will be disabled when we enter here.  */

	iowrite32(1, &rtos_timer->clear);
	iowrite32(0, &rtos_timer->clear);

	iowrite32(delta, &rtos_timer->match);
	iowrite32(1 << TIMER_ENABLE, &rtos_timer->enable);
	return 0;
}

#ifdef CONFIG_SMP
/*  Broadcast mechanism  */
static void broadcast(const struct cpumask *mask)
{
	send_ipi(mask, IPI_TIMER);
}
#endif

/* XXX Implement set_state_shutdown() */
static struct clock_event_device hexagon_clockevent_dev = {
	.name		= "clockevent",
	.features	= CLOCK_EVT_FEAT_ONESHOT,
	.rating		= 400,
	.irq		= RTOS_TIMER_INT,
	.set_next_event = set_next_event,
#ifdef CONFIG_SMP
	.broadcast	= broadcast,
#endif
};

#ifdef CONFIG_SMP
static DEFINE_PER_CPU(struct clock_event_device, clock_events);

void setup_percpu_clockdev(void)
{
	int cpu = smp_processor_id();
	struct clock_event_device *ce_dev = &hexagon_clockevent_dev;
	struct clock_event_device *dummy_clock_dev =
		&per_cpu(clock_events, cpu);

	memcpy(dummy_clock_dev, ce_dev, sizeof(*dummy_clock_dev));
	INIT_LIST_HEAD(&dummy_clock_dev->list);

	dummy_clock_dev->features = CLOCK_EVT_FEAT_DUMMY;
	dummy_clock_dev->cpumask = cpumask_of(cpu);

	clockevents_register_device(dummy_clock_dev);
}

/*  Called from smp.c for each CPU's timer ipi call  */
void ipi_timer(void)
{
	int cpu = smp_processor_id();
	struct clock_event_device *ce_dev = &per_cpu(clock_events, cpu);

	ce_dev->event_handler(ce_dev);
}
#endif /* CONFIG_SMP */

static irqreturn_t timer_interrupt(int irq, void *devid)
{
	struct clock_event_device *ce_dev = &hexagon_clockevent_dev;

	iowrite32(0, &rtos_timer->enable);
	ce_dev->event_handler(ce_dev);

	return IRQ_HANDLED;
}

/*
 * time_init_deferred - called by start_kernel to set up timer/clock source
 *
 * Install the IRQ handler for the clock, setup timers.
 * This is done late, as that way, we can use ioremap().
 *
 * This runs just before the delay loop is calibrated, and
 * is used for delay calibration.
 */
void __init time_init_deferred(void)
{
	struct resource *resource = NULL;
	struct clock_event_device *ce_dev = &hexagon_clockevent_dev;
	unsigned long flag = IRQF_TIMER | IRQF_TRIGGER_RISING;

	ce_dev->cpumask = cpu_all_mask;

	if (!resource)
		resource = rtos_timer_device.resource;

	/*  ioremap here means this has to run later, after paging init  */
	rtos_timer = ioremap(resource->start, resource_size(resource));

	if (!rtos_timer) {
		release_mem_region(resource->start, resource_size(resource));
	}
	clocksource_register_khz(&hexagon_clocksource, pcycle_freq_mhz * 1000);

	/*  Note: the sim generic RTOS clock is apparently really 18750Hz  */

	/*
	 * Last arg is some guaranteed seconds for which the conversion will
	 * work without overflow.
	 */
	clockevents_calc_mult_shift(ce_dev, sleep_clk_freq, 4);

	ce_dev->max_delta_ns = clockevent_delta2ns(0x7fffffff, ce_dev);
	ce_dev->max_delta_ticks = 0x7fffffff;
	ce_dev->min_delta_ns = clockevent_delta2ns(0xf, ce_dev);
	ce_dev->min_delta_ticks = 0xf;

#ifdef CONFIG_SMP
	setup_percpu_clockdev();
#endif

	clockevents_register_device(ce_dev);
	if (request_irq(ce_dev->irq, timer_interrupt, flag, "rtos_timer", NULL))
		pr_err("Failed to register rtos_timer interrupt\n");
}

void __init time_init(void)
{
	late_time_init = time_init_deferred;
}

void __delay(unsigned long cycles)
{
	unsigned long long start = __vmgettime();

	while ((__vmgettime() - start) < cycles)
		cpu_relax();
}
EXPORT_SYMBOL(__delay);

/*
 * This could become parametric or perhaps even computed at run-time,
 * but for now we take the observed simulator jitter.
 */
static long long fudgefactor = 350;  /* Maybe lower if kernel optimized. */

void __udelay(unsigned long usecs)
{
	unsigned long long start = __vmgettime();
	unsigned long long finish = (pcycle_freq_mhz * usecs) - fudgefactor;

	while ((__vmgettime() - start) < finish)
		cpu_relax(); /*  not sure how this improves readability  */
}
EXPORT_SYMBOL(__udelay);