summaryrefslogtreecommitdiff
path: root/arch/blackfin/kernel/time-ts.c
blob: cb7a01d4f00924c47da598b7ea977241dae1e3e6 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
/*
 * Based on arm clockevents implementation and old bfin time tick.
 *
 * Copyright 2008-2009 Analog Devics Inc.
 *                2008 GeoTechnologies
 *                     Vitja Makarov
 *
 * Licensed under the GPL-2
 */

#include <linux/module.h>
#include <linux/profile.h>
#include <linux/interrupt.h>
#include <linux/time.h>
#include <linux/timex.h>
#include <linux/irq.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/cpufreq.h>

#include <asm/blackfin.h>
#include <asm/time.h>
#include <asm/gptimers.h>
#include <asm/nmi.h>

/* Accelerators for sched_clock()
 * convert from cycles(64bits) => nanoseconds (64bits)
 *  basic equation:
 *		ns = cycles / (freq / ns_per_sec)
 *		ns = cycles * (ns_per_sec / freq)
 *		ns = cycles * (10^9 / (cpu_khz * 10^3))
 *		ns = cycles * (10^6 / cpu_khz)
 *
 *	Then we use scaling math (suggested by george@mvista.com) to get:
 *		ns = cycles * (10^6 * SC / cpu_khz) / SC
 *		ns = cycles * cyc2ns_scale / SC
 *
 *	And since SC is a constant power of two, we can convert the div
 *  into a shift.
 *
 *  We can use khz divisor instead of mhz to keep a better precision, since
 *  cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
 *  (mathieu.desnoyers@polymtl.ca)
 *
 *			-johnstul@us.ibm.com "math is hard, lets go shopping!"
 */

#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */

#if defined(CONFIG_CYCLES_CLOCKSOURCE)

static notrace cycle_t bfin_read_cycles(struct clocksource *cs)
{
#ifdef CONFIG_CPU_FREQ
	return __bfin_cycles_off + (get_cycles() << __bfin_cycles_mod);
#else
	return get_cycles();
#endif
}

static struct clocksource bfin_cs_cycles = {
	.name		= "bfin_cs_cycles",
	.rating		= 400,
	.read		= bfin_read_cycles,
	.mask		= CLOCKSOURCE_MASK(64),
	.shift		= CYC2NS_SCALE_FACTOR,
	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
};

static inline unsigned long long bfin_cs_cycles_sched_clock(void)
{
	return clocksource_cyc2ns(bfin_read_cycles(&bfin_cs_cycles),
		bfin_cs_cycles.mult, bfin_cs_cycles.shift);
}

static int __init bfin_cs_cycles_init(void)
{
	bfin_cs_cycles.mult = \
		clocksource_hz2mult(get_cclk(), bfin_cs_cycles.shift);

	if (clocksource_register(&bfin_cs_cycles))
		panic("failed to register clocksource");

	return 0;
}
#else
# define bfin_cs_cycles_init()
#endif

#ifdef CONFIG_GPTMR0_CLOCKSOURCE

void __init setup_gptimer0(void)
{
	disable_gptimers(TIMER0bit);

	set_gptimer_config(TIMER0_id, \
		TIMER_OUT_DIS | TIMER_PERIOD_CNT | TIMER_MODE_PWM);
	set_gptimer_period(TIMER0_id, -1);
	set_gptimer_pwidth(TIMER0_id, -2);
	SSYNC();
	enable_gptimers(TIMER0bit);
}

static cycle_t bfin_read_gptimer0(struct clocksource *cs)
{
	return bfin_read_TIMER0_COUNTER();
}

static struct clocksource bfin_cs_gptimer0 = {
	.name		= "bfin_cs_gptimer0",
	.rating		= 350,
	.read		= bfin_read_gptimer0,
	.mask		= CLOCKSOURCE_MASK(32),
	.shift		= CYC2NS_SCALE_FACTOR,
	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
};

static inline unsigned long long bfin_cs_gptimer0_sched_clock(void)
{
	return clocksource_cyc2ns(bfin_read_TIMER0_COUNTER(),
		bfin_cs_gptimer0.mult, bfin_cs_gptimer0.shift);
}

static int __init bfin_cs_gptimer0_init(void)
{
	setup_gptimer0();

	bfin_cs_gptimer0.mult = \
		clocksource_hz2mult(get_sclk(), bfin_cs_gptimer0.shift);

	if (clocksource_register(&bfin_cs_gptimer0))
		panic("failed to register clocksource");

	return 0;
}
#else
# define bfin_cs_gptimer0_init()
#endif

#if defined(CONFIG_GPTMR0_CLOCKSOURCE) || defined(CONFIG_CYCLES_CLOCKSOURCE)
/* prefer to use cycles since it has higher rating */
notrace unsigned long long sched_clock(void)
{
#if defined(CONFIG_CYCLES_CLOCKSOURCE)
	return bfin_cs_cycles_sched_clock();
#else
	return bfin_cs_gptimer0_sched_clock();
#endif
}
#endif

#if defined(CONFIG_TICKSOURCE_GPTMR0)
static int bfin_gptmr0_set_next_event(unsigned long cycles,
                                     struct clock_event_device *evt)
{
	disable_gptimers(TIMER0bit);

	/* it starts counting three SCLK cycles after the TIMENx bit is set */
	set_gptimer_pwidth(TIMER0_id, cycles - 3);
	enable_gptimers(TIMER0bit);
	return 0;
}

static void bfin_gptmr0_set_mode(enum clock_event_mode mode,
				struct clock_event_device *evt)
{
	switch (mode) {
	case CLOCK_EVT_MODE_PERIODIC: {
		set_gptimer_config(TIMER0_id, \
			TIMER_OUT_DIS | TIMER_IRQ_ENA | \
			TIMER_PERIOD_CNT | TIMER_MODE_PWM);
		set_gptimer_period(TIMER0_id, get_sclk() / HZ);
		set_gptimer_pwidth(TIMER0_id, get_sclk() / HZ - 1);
		enable_gptimers(TIMER0bit);
		break;
	}
	case CLOCK_EVT_MODE_ONESHOT:
		disable_gptimers(TIMER0bit);
		set_gptimer_config(TIMER0_id, \
			TIMER_OUT_DIS | TIMER_IRQ_ENA | TIMER_MODE_PWM);
		set_gptimer_period(TIMER0_id, 0);
		break;
	case CLOCK_EVT_MODE_UNUSED:
	case CLOCK_EVT_MODE_SHUTDOWN:
		disable_gptimers(TIMER0bit);
		break;
	case CLOCK_EVT_MODE_RESUME:
		break;
	}
}

static void bfin_gptmr0_ack(void)
{
	set_gptimer_status(TIMER_GROUP1, TIMER_STATUS_TIMIL0);
}

static void __init bfin_gptmr0_init(void)
{
	disable_gptimers(TIMER0bit);
}

#ifdef CONFIG_CORE_TIMER_IRQ_L1
__attribute__((l1_text))
#endif
irqreturn_t bfin_gptmr0_interrupt(int irq, void *dev_id)
{
	struct clock_event_device *evt = dev_id;
	smp_mb();
	evt->event_handler(evt);
	bfin_gptmr0_ack();
	return IRQ_HANDLED;
}

static struct irqaction gptmr0_irq = {
	.name		= "Blackfin GPTimer0",
	.flags		= IRQF_DISABLED | IRQF_TIMER | \
			  IRQF_IRQPOLL | IRQF_PERCPU,
	.handler	= bfin_gptmr0_interrupt,
};

static struct clock_event_device clockevent_gptmr0 = {
	.name		= "bfin_gptimer0",
	.rating		= 300,
	.irq		= IRQ_TIMER0,
	.shift		= 32,
	.features	= CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
	.set_next_event = bfin_gptmr0_set_next_event,
	.set_mode	= bfin_gptmr0_set_mode,
};

static void __init bfin_gptmr0_clockevent_init(struct clock_event_device *evt)
{
	unsigned long clock_tick;

	clock_tick = get_sclk();
	evt->mult = div_sc(clock_tick, NSEC_PER_SEC, evt->shift);
	evt->max_delta_ns = clockevent_delta2ns(-1, evt);
	evt->min_delta_ns = clockevent_delta2ns(100, evt);

	evt->cpumask = cpumask_of(0);

	clockevents_register_device(evt);
}
#endif /* CONFIG_TICKSOURCE_GPTMR0 */

#if defined(CONFIG_TICKSOURCE_CORETMR)
/* per-cpu local core timer */
static DEFINE_PER_CPU(struct clock_event_device, coretmr_events);

static int bfin_coretmr_set_next_event(unsigned long cycles,
				struct clock_event_device *evt)
{
	bfin_write_TCNTL(TMPWR);
	CSYNC();
	bfin_write_TCOUNT(cycles);
	CSYNC();
	bfin_write_TCNTL(TMPWR | TMREN);
	return 0;
}

static void bfin_coretmr_set_mode(enum clock_event_mode mode,
				struct clock_event_device *evt)
{
	switch (mode) {
	case CLOCK_EVT_MODE_PERIODIC: {
		unsigned long tcount = ((get_cclk() / (HZ * TIME_SCALE)) - 1);
		bfin_write_TCNTL(TMPWR);
		CSYNC();
		bfin_write_TSCALE(TIME_SCALE - 1);
		bfin_write_TPERIOD(tcount);
		bfin_write_TCOUNT(tcount);
		CSYNC();
		bfin_write_TCNTL(TMPWR | TMREN | TAUTORLD);
		break;
	}
	case CLOCK_EVT_MODE_ONESHOT:
		bfin_write_TCNTL(TMPWR);
		CSYNC();
		bfin_write_TSCALE(TIME_SCALE - 1);
		bfin_write_TPERIOD(0);
		bfin_write_TCOUNT(0);
		break;
	case CLOCK_EVT_MODE_UNUSED:
	case CLOCK_EVT_MODE_SHUTDOWN:
		bfin_write_TCNTL(0);
		CSYNC();
		break;
	case CLOCK_EVT_MODE_RESUME:
		break;
	}
}

void bfin_coretmr_init(void)
{
	/* power up the timer, but don't enable it just yet */
	bfin_write_TCNTL(TMPWR);
	CSYNC();

	/* the TSCALE prescaler counter. */
	bfin_write_TSCALE(TIME_SCALE - 1);
	bfin_write_TPERIOD(0);
	bfin_write_TCOUNT(0);

	CSYNC();
}

#ifdef CONFIG_CORE_TIMER_IRQ_L1
__attribute__((l1_text))
#endif
irqreturn_t bfin_coretmr_interrupt(int irq, void *dev_id)
{
	int cpu = smp_processor_id();
	struct clock_event_device *evt = &per_cpu(coretmr_events, cpu);

	smp_mb();
	evt->event_handler(evt);

	touch_nmi_watchdog();

	return IRQ_HANDLED;
}

static struct irqaction coretmr_irq = {
	.name		= "Blackfin CoreTimer",
	.flags		= IRQF_DISABLED | IRQF_TIMER | \
			  IRQF_IRQPOLL | IRQF_PERCPU,
	.handler	= bfin_coretmr_interrupt,
};

void bfin_coretmr_clockevent_init(void)
{
	unsigned long clock_tick;
	unsigned int cpu = smp_processor_id();
	struct clock_event_device *evt = &per_cpu(coretmr_events, cpu);

	evt->name = "bfin_core_timer";
	evt->rating = 350;
	evt->irq = -1;
	evt->shift = 32;
	evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
	evt->set_next_event = bfin_coretmr_set_next_event;
	evt->set_mode = bfin_coretmr_set_mode;

	clock_tick = get_cclk() / TIME_SCALE;
	evt->mult = div_sc(clock_tick, NSEC_PER_SEC, evt->shift);
	evt->max_delta_ns = clockevent_delta2ns(-1, evt);
	evt->min_delta_ns = clockevent_delta2ns(100, evt);

	evt->cpumask = cpumask_of(cpu);

	clockevents_register_device(evt);
}
#endif /* CONFIG_TICKSOURCE_CORETMR */


void __init time_init(void)
{
	time_t secs_since_1970 = (365 * 37 + 9) * 24 * 60 * 60;	/* 1 Jan 2007 */

#ifdef CONFIG_RTC_DRV_BFIN
	/* [#2663] hack to filter junk RTC values that would cause
	 * userspace to have to deal with time values greater than
	 * 2^31 seconds (which uClibc cannot cope with yet)
	 */
	if ((bfin_read_RTC_STAT() & 0xC0000000) == 0xC0000000) {
		printk(KERN_NOTICE "bfin-rtc: invalid date; resetting\n");
		bfin_write_RTC_STAT(0);
	}
#endif

	/* Initialize xtime. From now on, xtime is updated with timer interrupts */
	xtime.tv_sec = secs_since_1970;
	xtime.tv_nsec = 0;
	set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec);

	bfin_cs_cycles_init();
	bfin_cs_gptimer0_init();

#if defined(CONFIG_TICKSOURCE_CORETMR)
	bfin_coretmr_init();
	setup_irq(IRQ_CORETMR, &coretmr_irq);
	bfin_coretmr_clockevent_init();
#endif

#if defined(CONFIG_TICKSOURCE_GPTMR0)
	bfin_gptmr0_init();
	setup_irq(IRQ_TIMER0, &gptmr0_irq);
	gptmr0_irq.dev_id = &clockevent_gptmr0;
	bfin_gptmr0_clockevent_init(&clockevent_gptmr0);
#endif

#if !defined(CONFIG_TICKSOURCE_CORETMR) && !defined(CONFIG_TICKSOURCE_GPTMR0)
# error at least one clock event device is required
#endif
}