]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/clocksource/arm_arch_timer.c
clocksource/drivers/arm_arch_timer: Introduce generic errata handling infrastructure
[linux.git] / drivers / clocksource / arm_arch_timer.c
1 /*
2  *  linux/drivers/clocksource/arm_arch_timer.c
3  *
4  *  Copyright (C) 2011 ARM Ltd.
5  *  All Rights Reserved
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11
12 #define pr_fmt(fmt)     "arm_arch_timer: " fmt
13
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/device.h>
17 #include <linux/smp.h>
18 #include <linux/cpu.h>
19 #include <linux/cpu_pm.h>
20 #include <linux/clockchips.h>
21 #include <linux/clocksource.h>
22 #include <linux/interrupt.h>
23 #include <linux/of_irq.h>
24 #include <linux/of_address.h>
25 #include <linux/io.h>
26 #include <linux/slab.h>
27 #include <linux/sched_clock.h>
28 #include <linux/acpi.h>
29
30 #include <asm/arch_timer.h>
31 #include <asm/virt.h>
32
33 #include <clocksource/arm_arch_timer.h>
34
35 #define CNTTIDR         0x08
36 #define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4))
37
38 #define CNTACR(n)       (0x40 + ((n) * 4))
39 #define CNTACR_RPCT     BIT(0)
40 #define CNTACR_RVCT     BIT(1)
41 #define CNTACR_RFRQ     BIT(2)
42 #define CNTACR_RVOFF    BIT(3)
43 #define CNTACR_RWVT     BIT(4)
44 #define CNTACR_RWPT     BIT(5)
45
46 #define CNTVCT_LO       0x08
47 #define CNTVCT_HI       0x0c
48 #define CNTFRQ          0x10
49 #define CNTP_TVAL       0x28
50 #define CNTP_CTL        0x2c
51 #define CNTV_TVAL       0x38
52 #define CNTV_CTL        0x3c
53
54 #define ARCH_CP15_TIMER BIT(0)
55 #define ARCH_MEM_TIMER  BIT(1)
56 static unsigned arch_timers_present __initdata;
57
58 static void __iomem *arch_counter_base;
59
60 struct arch_timer {
61         void __iomem *base;
62         struct clock_event_device evt;
63 };
64
65 #define to_arch_timer(e) container_of(e, struct arch_timer, evt)
66
67 static u32 arch_timer_rate;
68
69 enum ppi_nr {
70         PHYS_SECURE_PPI,
71         PHYS_NONSECURE_PPI,
72         VIRT_PPI,
73         HYP_PPI,
74         MAX_TIMER_PPI
75 };
76
77 static int arch_timer_ppi[MAX_TIMER_PPI];
78
79 static struct clock_event_device __percpu *arch_timer_evt;
80
81 static enum ppi_nr arch_timer_uses_ppi = VIRT_PPI;
82 static bool arch_timer_c3stop;
83 static bool arch_timer_mem_use_virtual;
84 static bool arch_counter_suspend_stop;
85
86 static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM);
87
88 static int __init early_evtstrm_cfg(char *buf)
89 {
90         return strtobool(buf, &evtstrm_enable);
91 }
92 early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg);
93
94 /*
95  * Architected system timer support.
96  */
97
98 #ifdef CONFIG_FSL_ERRATUM_A008585
99 /*
100  * The number of retries is an arbitrary value well beyond the highest number
101  * of iterations the loop has been observed to take.
102  */
103 #define __fsl_a008585_read_reg(reg) ({                  \
104         u64 _old, _new;                                 \
105         int _retries = 200;                             \
106                                                         \
107         do {                                            \
108                 _old = read_sysreg(reg);                \
109                 _new = read_sysreg(reg);                \
110                 _retries--;                             \
111         } while (unlikely(_old != _new) && _retries);   \
112                                                         \
113         WARN_ON_ONCE(!_retries);                        \
114         _new;                                           \
115 })
116
117 static u32 notrace fsl_a008585_read_cntp_tval_el0(void)
118 {
119         return __fsl_a008585_read_reg(cntp_tval_el0);
120 }
121
122 static u32 notrace fsl_a008585_read_cntv_tval_el0(void)
123 {
124         return __fsl_a008585_read_reg(cntv_tval_el0);
125 }
126
127 static u64 notrace fsl_a008585_read_cntvct_el0(void)
128 {
129         return __fsl_a008585_read_reg(cntvct_el0);
130 }
131 #endif
132
133 #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
134 const struct arch_timer_erratum_workaround *timer_unstable_counter_workaround = NULL;
135 EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
136
137 DEFINE_STATIC_KEY_FALSE(arch_timer_read_ool_enabled);
138 EXPORT_SYMBOL_GPL(arch_timer_read_ool_enabled);
139
140 static const struct arch_timer_erratum_workaround ool_workarounds[] = {
141 #ifdef CONFIG_FSL_ERRATUM_A008585
142         {
143                 .id = "fsl,erratum-a008585",
144                 .read_cntp_tval_el0 = fsl_a008585_read_cntp_tval_el0,
145                 .read_cntv_tval_el0 = fsl_a008585_read_cntv_tval_el0,
146                 .read_cntvct_el0 = fsl_a008585_read_cntvct_el0,
147         },
148 #endif
149 };
150 #endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */
151
152 static __always_inline
153 void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val,
154                           struct clock_event_device *clk)
155 {
156         if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
157                 struct arch_timer *timer = to_arch_timer(clk);
158                 switch (reg) {
159                 case ARCH_TIMER_REG_CTRL:
160                         writel_relaxed(val, timer->base + CNTP_CTL);
161                         break;
162                 case ARCH_TIMER_REG_TVAL:
163                         writel_relaxed(val, timer->base + CNTP_TVAL);
164                         break;
165                 }
166         } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
167                 struct arch_timer *timer = to_arch_timer(clk);
168                 switch (reg) {
169                 case ARCH_TIMER_REG_CTRL:
170                         writel_relaxed(val, timer->base + CNTV_CTL);
171                         break;
172                 case ARCH_TIMER_REG_TVAL:
173                         writel_relaxed(val, timer->base + CNTV_TVAL);
174                         break;
175                 }
176         } else {
177                 arch_timer_reg_write_cp15(access, reg, val);
178         }
179 }
180
181 static __always_inline
182 u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
183                         struct clock_event_device *clk)
184 {
185         u32 val;
186
187         if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
188                 struct arch_timer *timer = to_arch_timer(clk);
189                 switch (reg) {
190                 case ARCH_TIMER_REG_CTRL:
191                         val = readl_relaxed(timer->base + CNTP_CTL);
192                         break;
193                 case ARCH_TIMER_REG_TVAL:
194                         val = readl_relaxed(timer->base + CNTP_TVAL);
195                         break;
196                 }
197         } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
198                 struct arch_timer *timer = to_arch_timer(clk);
199                 switch (reg) {
200                 case ARCH_TIMER_REG_CTRL:
201                         val = readl_relaxed(timer->base + CNTV_CTL);
202                         break;
203                 case ARCH_TIMER_REG_TVAL:
204                         val = readl_relaxed(timer->base + CNTV_TVAL);
205                         break;
206                 }
207         } else {
208                 val = arch_timer_reg_read_cp15(access, reg);
209         }
210
211         return val;
212 }
213
214 static __always_inline irqreturn_t timer_handler(const int access,
215                                         struct clock_event_device *evt)
216 {
217         unsigned long ctrl;
218
219         ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, evt);
220         if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
221                 ctrl |= ARCH_TIMER_CTRL_IT_MASK;
222                 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt);
223                 evt->event_handler(evt);
224                 return IRQ_HANDLED;
225         }
226
227         return IRQ_NONE;
228 }
229
230 static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)
231 {
232         struct clock_event_device *evt = dev_id;
233
234         return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
235 }
236
237 static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
238 {
239         struct clock_event_device *evt = dev_id;
240
241         return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
242 }
243
244 static irqreturn_t arch_timer_handler_phys_mem(int irq, void *dev_id)
245 {
246         struct clock_event_device *evt = dev_id;
247
248         return timer_handler(ARCH_TIMER_MEM_PHYS_ACCESS, evt);
249 }
250
251 static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id)
252 {
253         struct clock_event_device *evt = dev_id;
254
255         return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt);
256 }
257
258 static __always_inline int timer_shutdown(const int access,
259                                           struct clock_event_device *clk)
260 {
261         unsigned long ctrl;
262
263         ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
264         ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
265         arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
266
267         return 0;
268 }
269
270 static int arch_timer_shutdown_virt(struct clock_event_device *clk)
271 {
272         return timer_shutdown(ARCH_TIMER_VIRT_ACCESS, clk);
273 }
274
275 static int arch_timer_shutdown_phys(struct clock_event_device *clk)
276 {
277         return timer_shutdown(ARCH_TIMER_PHYS_ACCESS, clk);
278 }
279
280 static int arch_timer_shutdown_virt_mem(struct clock_event_device *clk)
281 {
282         return timer_shutdown(ARCH_TIMER_MEM_VIRT_ACCESS, clk);
283 }
284
285 static int arch_timer_shutdown_phys_mem(struct clock_event_device *clk)
286 {
287         return timer_shutdown(ARCH_TIMER_MEM_PHYS_ACCESS, clk);
288 }
289
290 static __always_inline void set_next_event(const int access, unsigned long evt,
291                                            struct clock_event_device *clk)
292 {
293         unsigned long ctrl;
294         ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
295         ctrl |= ARCH_TIMER_CTRL_ENABLE;
296         ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
297         arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt, clk);
298         arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
299 }
300
301 #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
302 static __always_inline void erratum_set_next_event_generic(const int access,
303                 unsigned long evt, struct clock_event_device *clk)
304 {
305         unsigned long ctrl;
306         u64 cval = evt + arch_counter_get_cntvct();
307
308         ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
309         ctrl |= ARCH_TIMER_CTRL_ENABLE;
310         ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
311
312         if (access == ARCH_TIMER_PHYS_ACCESS)
313                 write_sysreg(cval, cntp_cval_el0);
314         else if (access == ARCH_TIMER_VIRT_ACCESS)
315                 write_sysreg(cval, cntv_cval_el0);
316
317         arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
318 }
319
320 static int erratum_set_next_event_virt(unsigned long evt,
321                                            struct clock_event_device *clk)
322 {
323         erratum_set_next_event_generic(ARCH_TIMER_VIRT_ACCESS, evt, clk);
324         return 0;
325 }
326
327 static int erratum_set_next_event_phys(unsigned long evt,
328                                            struct clock_event_device *clk)
329 {
330         erratum_set_next_event_generic(ARCH_TIMER_PHYS_ACCESS, evt, clk);
331         return 0;
332 }
333 #endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */
334
335 static int arch_timer_set_next_event_virt(unsigned long evt,
336                                           struct clock_event_device *clk)
337 {
338         set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk);
339         return 0;
340 }
341
342 static int arch_timer_set_next_event_phys(unsigned long evt,
343                                           struct clock_event_device *clk)
344 {
345         set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk);
346         return 0;
347 }
348
349 static int arch_timer_set_next_event_virt_mem(unsigned long evt,
350                                               struct clock_event_device *clk)
351 {
352         set_next_event(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk);
353         return 0;
354 }
355
356 static int arch_timer_set_next_event_phys_mem(unsigned long evt,
357                                               struct clock_event_device *clk)
358 {
359         set_next_event(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk);
360         return 0;
361 }
362
363 static void erratum_workaround_set_sne(struct clock_event_device *clk)
364 {
365 #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
366         if (!static_branch_unlikely(&arch_timer_read_ool_enabled))
367                 return;
368
369         if (arch_timer_uses_ppi == VIRT_PPI)
370                 clk->set_next_event = erratum_set_next_event_virt;
371         else
372                 clk->set_next_event = erratum_set_next_event_phys;
373 #endif
374 }
375
376 static void __arch_timer_setup(unsigned type,
377                                struct clock_event_device *clk)
378 {
379         clk->features = CLOCK_EVT_FEAT_ONESHOT;
380
381         if (type == ARCH_CP15_TIMER) {
382                 if (arch_timer_c3stop)
383                         clk->features |= CLOCK_EVT_FEAT_C3STOP;
384                 clk->name = "arch_sys_timer";
385                 clk->rating = 450;
386                 clk->cpumask = cpumask_of(smp_processor_id());
387                 clk->irq = arch_timer_ppi[arch_timer_uses_ppi];
388                 switch (arch_timer_uses_ppi) {
389                 case VIRT_PPI:
390                         clk->set_state_shutdown = arch_timer_shutdown_virt;
391                         clk->set_state_oneshot_stopped = arch_timer_shutdown_virt;
392                         clk->set_next_event = arch_timer_set_next_event_virt;
393                         break;
394                 case PHYS_SECURE_PPI:
395                 case PHYS_NONSECURE_PPI:
396                 case HYP_PPI:
397                         clk->set_state_shutdown = arch_timer_shutdown_phys;
398                         clk->set_state_oneshot_stopped = arch_timer_shutdown_phys;
399                         clk->set_next_event = arch_timer_set_next_event_phys;
400                         break;
401                 default:
402                         BUG();
403                 }
404
405                 erratum_workaround_set_sne(clk);
406         } else {
407                 clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
408                 clk->name = "arch_mem_timer";
409                 clk->rating = 400;
410                 clk->cpumask = cpu_all_mask;
411                 if (arch_timer_mem_use_virtual) {
412                         clk->set_state_shutdown = arch_timer_shutdown_virt_mem;
413                         clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem;
414                         clk->set_next_event =
415                                 arch_timer_set_next_event_virt_mem;
416                 } else {
417                         clk->set_state_shutdown = arch_timer_shutdown_phys_mem;
418                         clk->set_state_oneshot_stopped = arch_timer_shutdown_phys_mem;
419                         clk->set_next_event =
420                                 arch_timer_set_next_event_phys_mem;
421                 }
422         }
423
424         clk->set_state_shutdown(clk);
425
426         clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff);
427 }
428
429 static void arch_timer_evtstrm_enable(int divider)
430 {
431         u32 cntkctl = arch_timer_get_cntkctl();
432
433         cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK;
434         /* Set the divider and enable virtual event stream */
435         cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
436                         | ARCH_TIMER_VIRT_EVT_EN;
437         arch_timer_set_cntkctl(cntkctl);
438         elf_hwcap |= HWCAP_EVTSTRM;
439 #ifdef CONFIG_COMPAT
440         compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM;
441 #endif
442 }
443
444 static void arch_timer_configure_evtstream(void)
445 {
446         int evt_stream_div, pos;
447
448         /* Find the closest power of two to the divisor */
449         evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ;
450         pos = fls(evt_stream_div);
451         if (pos > 1 && !(evt_stream_div & (1 << (pos - 2))))
452                 pos--;
453         /* enable event stream */
454         arch_timer_evtstrm_enable(min(pos, 15));
455 }
456
457 static void arch_counter_set_user_access(void)
458 {
459         u32 cntkctl = arch_timer_get_cntkctl();
460
461         /* Disable user access to the timers and the physical counter */
462         /* Also disable virtual event stream */
463         cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
464                         | ARCH_TIMER_USR_VT_ACCESS_EN
465                         | ARCH_TIMER_VIRT_EVT_EN
466                         | ARCH_TIMER_USR_PCT_ACCESS_EN);
467
468         /* Enable user access to the virtual counter */
469         cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN;
470
471         arch_timer_set_cntkctl(cntkctl);
472 }
473
474 static bool arch_timer_has_nonsecure_ppi(void)
475 {
476         return (arch_timer_uses_ppi == PHYS_SECURE_PPI &&
477                 arch_timer_ppi[PHYS_NONSECURE_PPI]);
478 }
479
480 static u32 check_ppi_trigger(int irq)
481 {
482         u32 flags = irq_get_trigger_type(irq);
483
484         if (flags != IRQF_TRIGGER_HIGH && flags != IRQF_TRIGGER_LOW) {
485                 pr_warn("WARNING: Invalid trigger for IRQ%d, assuming level low\n", irq);
486                 pr_warn("WARNING: Please fix your firmware\n");
487                 flags = IRQF_TRIGGER_LOW;
488         }
489
490         return flags;
491 }
492
493 static int arch_timer_starting_cpu(unsigned int cpu)
494 {
495         struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
496         u32 flags;
497
498         __arch_timer_setup(ARCH_CP15_TIMER, clk);
499
500         flags = check_ppi_trigger(arch_timer_ppi[arch_timer_uses_ppi]);
501         enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags);
502
503         if (arch_timer_has_nonsecure_ppi()) {
504                 flags = check_ppi_trigger(arch_timer_ppi[PHYS_NONSECURE_PPI]);
505                 enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], flags);
506         }
507
508         arch_counter_set_user_access();
509         if (evtstrm_enable)
510                 arch_timer_configure_evtstream();
511
512         return 0;
513 }
514
515 static void
516 arch_timer_detect_rate(void __iomem *cntbase, struct device_node *np)
517 {
518         /* Who has more than one independent system counter? */
519         if (arch_timer_rate)
520                 return;
521
522         /*
523          * Try to determine the frequency from the device tree or CNTFRQ,
524          * if ACPI is enabled, get the frequency from CNTFRQ ONLY.
525          */
526         if (!acpi_disabled ||
527             of_property_read_u32(np, "clock-frequency", &arch_timer_rate)) {
528                 if (cntbase)
529                         arch_timer_rate = readl_relaxed(cntbase + CNTFRQ);
530                 else
531                         arch_timer_rate = arch_timer_get_cntfrq();
532         }
533
534         /* Check the timer frequency. */
535         if (arch_timer_rate == 0)
536                 pr_warn("Architected timer frequency not available\n");
537 }
538
539 static void arch_timer_banner(unsigned type)
540 {
541         pr_info("Architected %s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n",
542                      type & ARCH_CP15_TIMER ? "cp15" : "",
543                      type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ?  " and " : "",
544                      type & ARCH_MEM_TIMER ? "mmio" : "",
545                      (unsigned long)arch_timer_rate / 1000000,
546                      (unsigned long)(arch_timer_rate / 10000) % 100,
547                      type & ARCH_CP15_TIMER ?
548                      (arch_timer_uses_ppi == VIRT_PPI) ? "virt" : "phys" :
549                         "",
550                      type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ?  "/" : "",
551                      type & ARCH_MEM_TIMER ?
552                         arch_timer_mem_use_virtual ? "virt" : "phys" :
553                         "");
554 }
555
556 u32 arch_timer_get_rate(void)
557 {
558         return arch_timer_rate;
559 }
560
561 static u64 arch_counter_get_cntvct_mem(void)
562 {
563         u32 vct_lo, vct_hi, tmp_hi;
564
565         do {
566                 vct_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
567                 vct_lo = readl_relaxed(arch_counter_base + CNTVCT_LO);
568                 tmp_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
569         } while (vct_hi != tmp_hi);
570
571         return ((u64) vct_hi << 32) | vct_lo;
572 }
573
574 /*
575  * Default to cp15 based access because arm64 uses this function for
576  * sched_clock() before DT is probed and the cp15 method is guaranteed
577  * to exist on arm64. arm doesn't use this before DT is probed so even
578  * if we don't have the cp15 accessors we won't have a problem.
579  */
580 u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct;
581
582 static u64 arch_counter_read(struct clocksource *cs)
583 {
584         return arch_timer_read_counter();
585 }
586
587 static u64 arch_counter_read_cc(const struct cyclecounter *cc)
588 {
589         return arch_timer_read_counter();
590 }
591
592 static struct clocksource clocksource_counter = {
593         .name   = "arch_sys_counter",
594         .rating = 400,
595         .read   = arch_counter_read,
596         .mask   = CLOCKSOURCE_MASK(56),
597         .flags  = CLOCK_SOURCE_IS_CONTINUOUS,
598 };
599
600 static struct cyclecounter cyclecounter = {
601         .read   = arch_counter_read_cc,
602         .mask   = CLOCKSOURCE_MASK(56),
603 };
604
605 static struct arch_timer_kvm_info arch_timer_kvm_info;
606
607 struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)
608 {
609         return &arch_timer_kvm_info;
610 }
611
612 static void __init arch_counter_register(unsigned type)
613 {
614         u64 start_count;
615
616         /* Register the CP15 based counter if we have one */
617         if (type & ARCH_CP15_TIMER) {
618                 if (IS_ENABLED(CONFIG_ARM64) || arch_timer_uses_ppi == VIRT_PPI)
619                         arch_timer_read_counter = arch_counter_get_cntvct;
620                 else
621                         arch_timer_read_counter = arch_counter_get_cntpct;
622
623                 clocksource_counter.archdata.vdso_direct = true;
624
625 #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
626                 /*
627                  * Don't use the vdso fastpath if errata require using
628                  * the out-of-line counter accessor.
629                  */
630                 if (static_branch_unlikely(&arch_timer_read_ool_enabled))
631                         clocksource_counter.archdata.vdso_direct = false;
632 #endif
633         } else {
634                 arch_timer_read_counter = arch_counter_get_cntvct_mem;
635         }
636
637         if (!arch_counter_suspend_stop)
638                 clocksource_counter.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
639         start_count = arch_timer_read_counter();
640         clocksource_register_hz(&clocksource_counter, arch_timer_rate);
641         cyclecounter.mult = clocksource_counter.mult;
642         cyclecounter.shift = clocksource_counter.shift;
643         timecounter_init(&arch_timer_kvm_info.timecounter,
644                          &cyclecounter, start_count);
645
646         /* 56 bits minimum, so we assume worst case rollover */
647         sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate);
648 }
649
650 static void arch_timer_stop(struct clock_event_device *clk)
651 {
652         pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
653                  clk->irq, smp_processor_id());
654
655         disable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi]);
656         if (arch_timer_has_nonsecure_ppi())
657                 disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]);
658
659         clk->set_state_shutdown(clk);
660 }
661
662 static int arch_timer_dying_cpu(unsigned int cpu)
663 {
664         struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
665
666         arch_timer_stop(clk);
667         return 0;
668 }
669
670 #ifdef CONFIG_CPU_PM
671 static unsigned int saved_cntkctl;
672 static int arch_timer_cpu_pm_notify(struct notifier_block *self,
673                                     unsigned long action, void *hcpu)
674 {
675         if (action == CPU_PM_ENTER)
676                 saved_cntkctl = arch_timer_get_cntkctl();
677         else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT)
678                 arch_timer_set_cntkctl(saved_cntkctl);
679         return NOTIFY_OK;
680 }
681
682 static struct notifier_block arch_timer_cpu_pm_notifier = {
683         .notifier_call = arch_timer_cpu_pm_notify,
684 };
685
686 static int __init arch_timer_cpu_pm_init(void)
687 {
688         return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier);
689 }
690
691 static void __init arch_timer_cpu_pm_deinit(void)
692 {
693         WARN_ON(cpu_pm_unregister_notifier(&arch_timer_cpu_pm_notifier));
694 }
695
696 #else
697 static int __init arch_timer_cpu_pm_init(void)
698 {
699         return 0;
700 }
701
702 static void __init arch_timer_cpu_pm_deinit(void)
703 {
704 }
705 #endif
706
707 static int __init arch_timer_register(void)
708 {
709         int err;
710         int ppi;
711
712         arch_timer_evt = alloc_percpu(struct clock_event_device);
713         if (!arch_timer_evt) {
714                 err = -ENOMEM;
715                 goto out;
716         }
717
718         ppi = arch_timer_ppi[arch_timer_uses_ppi];
719         switch (arch_timer_uses_ppi) {
720         case VIRT_PPI:
721                 err = request_percpu_irq(ppi, arch_timer_handler_virt,
722                                          "arch_timer", arch_timer_evt);
723                 break;
724         case PHYS_SECURE_PPI:
725         case PHYS_NONSECURE_PPI:
726                 err = request_percpu_irq(ppi, arch_timer_handler_phys,
727                                          "arch_timer", arch_timer_evt);
728                 if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) {
729                         ppi = arch_timer_ppi[PHYS_NONSECURE_PPI];
730                         err = request_percpu_irq(ppi, arch_timer_handler_phys,
731                                                  "arch_timer", arch_timer_evt);
732                         if (err)
733                                 free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
734                                                 arch_timer_evt);
735                 }
736                 break;
737         case HYP_PPI:
738                 err = request_percpu_irq(ppi, arch_timer_handler_phys,
739                                          "arch_timer", arch_timer_evt);
740                 break;
741         default:
742                 BUG();
743         }
744
745         if (err) {
746                 pr_err("arch_timer: can't register interrupt %d (%d)\n",
747                        ppi, err);
748                 goto out_free;
749         }
750
751         err = arch_timer_cpu_pm_init();
752         if (err)
753                 goto out_unreg_notify;
754
755
756         /* Register and immediately configure the timer on the boot CPU */
757         err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING,
758                                 "clockevents/arm/arch_timer:starting",
759                                 arch_timer_starting_cpu, arch_timer_dying_cpu);
760         if (err)
761                 goto out_unreg_cpupm;
762         return 0;
763
764 out_unreg_cpupm:
765         arch_timer_cpu_pm_deinit();
766
767 out_unreg_notify:
768         free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt);
769         if (arch_timer_has_nonsecure_ppi())
770                 free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI],
771                                 arch_timer_evt);
772
773 out_free:
774         free_percpu(arch_timer_evt);
775 out:
776         return err;
777 }
778
779 static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq)
780 {
781         int ret;
782         irq_handler_t func;
783         struct arch_timer *t;
784
785         t = kzalloc(sizeof(*t), GFP_KERNEL);
786         if (!t)
787                 return -ENOMEM;
788
789         t->base = base;
790         t->evt.irq = irq;
791         __arch_timer_setup(ARCH_MEM_TIMER, &t->evt);
792
793         if (arch_timer_mem_use_virtual)
794                 func = arch_timer_handler_virt_mem;
795         else
796                 func = arch_timer_handler_phys_mem;
797
798         ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &t->evt);
799         if (ret) {
800                 pr_err("arch_timer: Failed to request mem timer irq\n");
801                 kfree(t);
802         }
803
804         return ret;
805 }
806
807 static const struct of_device_id arch_timer_of_match[] __initconst = {
808         { .compatible   = "arm,armv7-timer",    },
809         { .compatible   = "arm,armv8-timer",    },
810         {},
811 };
812
813 static const struct of_device_id arch_timer_mem_of_match[] __initconst = {
814         { .compatible   = "arm,armv7-timer-mem", },
815         {},
816 };
817
818 static bool __init
819 arch_timer_needs_probing(int type, const struct of_device_id *matches)
820 {
821         struct device_node *dn;
822         bool needs_probing = false;
823
824         dn = of_find_matching_node(NULL, matches);
825         if (dn && of_device_is_available(dn) && !(arch_timers_present & type))
826                 needs_probing = true;
827         of_node_put(dn);
828
829         return needs_probing;
830 }
831
832 static int __init arch_timer_common_init(void)
833 {
834         unsigned mask = ARCH_CP15_TIMER | ARCH_MEM_TIMER;
835
836         /* Wait until both nodes are probed if we have two timers */
837         if ((arch_timers_present & mask) != mask) {
838                 if (arch_timer_needs_probing(ARCH_MEM_TIMER, arch_timer_mem_of_match))
839                         return 0;
840                 if (arch_timer_needs_probing(ARCH_CP15_TIMER, arch_timer_of_match))
841                         return 0;
842         }
843
844         arch_timer_banner(arch_timers_present);
845         arch_counter_register(arch_timers_present);
846         return arch_timer_arch_init();
847 }
848
849 static int __init arch_timer_init(void)
850 {
851         int ret;
852         /*
853          * If HYP mode is available, we know that the physical timer
854          * has been configured to be accessible from PL1. Use it, so
855          * that a guest can use the virtual timer instead.
856          *
857          * If no interrupt provided for virtual timer, we'll have to
858          * stick to the physical timer. It'd better be accessible...
859          *
860          * On ARMv8.1 with VH extensions, the kernel runs in HYP. VHE
861          * accesses to CNTP_*_EL1 registers are silently redirected to
862          * their CNTHP_*_EL2 counterparts, and use a different PPI
863          * number.
864          */
865         if (is_hyp_mode_available() || !arch_timer_ppi[VIRT_PPI]) {
866                 bool has_ppi;
867
868                 if (is_kernel_in_hyp_mode()) {
869                         arch_timer_uses_ppi = HYP_PPI;
870                         has_ppi = !!arch_timer_ppi[HYP_PPI];
871                 } else {
872                         arch_timer_uses_ppi = PHYS_SECURE_PPI;
873                         has_ppi = (!!arch_timer_ppi[PHYS_SECURE_PPI] ||
874                                    !!arch_timer_ppi[PHYS_NONSECURE_PPI]);
875                 }
876
877                 if (!has_ppi) {
878                         pr_warn("arch_timer: No interrupt available, giving up\n");
879                         return -EINVAL;
880                 }
881         }
882
883         ret = arch_timer_register();
884         if (ret)
885                 return ret;
886
887         ret = arch_timer_common_init();
888         if (ret)
889                 return ret;
890
891         arch_timer_kvm_info.virtual_irq = arch_timer_ppi[VIRT_PPI];
892         
893         return 0;
894 }
895
896 static int __init arch_timer_of_init(struct device_node *np)
897 {
898         int i;
899
900         if (arch_timers_present & ARCH_CP15_TIMER) {
901                 pr_warn("arch_timer: multiple nodes in dt, skipping\n");
902                 return 0;
903         }
904
905         arch_timers_present |= ARCH_CP15_TIMER;
906         for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++)
907                 arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
908
909         arch_timer_detect_rate(NULL, np);
910
911         arch_timer_c3stop = !of_property_read_bool(np, "always-on");
912
913 #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
914         for (i = 0; i < ARRAY_SIZE(ool_workarounds); i++) {
915                 if (of_property_read_bool(np, ool_workarounds[i].id)) {
916                         timer_unstable_counter_workaround = &ool_workarounds[i];
917                         static_branch_enable(&arch_timer_read_ool_enabled);
918                         pr_info("arch_timer: Enabling workaround for %s\n",
919                                 timer_unstable_counter_workaround->id);
920                         break;
921                 }
922         }
923 #endif
924
925         /*
926          * If we cannot rely on firmware initializing the timer registers then
927          * we should use the physical timers instead.
928          */
929         if (IS_ENABLED(CONFIG_ARM) &&
930             of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
931                 arch_timer_uses_ppi = PHYS_SECURE_PPI;
932
933         /* On some systems, the counter stops ticking when in suspend. */
934         arch_counter_suspend_stop = of_property_read_bool(np,
935                                                          "arm,no-tick-in-suspend");
936
937         return arch_timer_init();
938 }
939 CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init);
940 CLOCKSOURCE_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init);
941
942 static int __init arch_timer_mem_init(struct device_node *np)
943 {
944         struct device_node *frame, *best_frame = NULL;
945         void __iomem *cntctlbase, *base;
946         unsigned int irq, ret = -EINVAL;
947         u32 cnttidr;
948
949         arch_timers_present |= ARCH_MEM_TIMER;
950         cntctlbase = of_iomap(np, 0);
951         if (!cntctlbase) {
952                 pr_err("arch_timer: Can't find CNTCTLBase\n");
953                 return -ENXIO;
954         }
955
956         cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
957
958         /*
959          * Try to find a virtual capable frame. Otherwise fall back to a
960          * physical capable frame.
961          */
962         for_each_available_child_of_node(np, frame) {
963                 int n;
964                 u32 cntacr;
965
966                 if (of_property_read_u32(frame, "frame-number", &n)) {
967                         pr_err("arch_timer: Missing frame-number\n");
968                         of_node_put(frame);
969                         goto out;
970                 }
971
972                 /* Try enabling everything, and see what sticks */
973                 cntacr = CNTACR_RFRQ | CNTACR_RWPT | CNTACR_RPCT |
974                          CNTACR_RWVT | CNTACR_RVOFF | CNTACR_RVCT;
975                 writel_relaxed(cntacr, cntctlbase + CNTACR(n));
976                 cntacr = readl_relaxed(cntctlbase + CNTACR(n));
977
978                 if ((cnttidr & CNTTIDR_VIRT(n)) &&
979                     !(~cntacr & (CNTACR_RWVT | CNTACR_RVCT))) {
980                         of_node_put(best_frame);
981                         best_frame = frame;
982                         arch_timer_mem_use_virtual = true;
983                         break;
984                 }
985
986                 if (~cntacr & (CNTACR_RWPT | CNTACR_RPCT))
987                         continue;
988
989                 of_node_put(best_frame);
990                 best_frame = of_node_get(frame);
991         }
992
993         ret= -ENXIO;
994         base = arch_counter_base = of_io_request_and_map(best_frame, 0,
995                                                          "arch_mem_timer");
996         if (IS_ERR(base)) {
997                 pr_err("arch_timer: Can't map frame's registers\n");
998                 goto out;
999         }
1000
1001         if (arch_timer_mem_use_virtual)
1002                 irq = irq_of_parse_and_map(best_frame, 1);
1003         else
1004                 irq = irq_of_parse_and_map(best_frame, 0);
1005
1006         ret = -EINVAL;
1007         if (!irq) {
1008                 pr_err("arch_timer: Frame missing %s irq",
1009                        arch_timer_mem_use_virtual ? "virt" : "phys");
1010                 goto out;
1011         }
1012
1013         arch_timer_detect_rate(base, np);
1014         ret = arch_timer_mem_register(base, irq);
1015         if (ret)
1016                 goto out;
1017
1018         return arch_timer_common_init();
1019 out:
1020         iounmap(cntctlbase);
1021         of_node_put(best_frame);
1022         return ret;
1023 }
1024 CLOCKSOURCE_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
1025                        arch_timer_mem_init);
1026
1027 #ifdef CONFIG_ACPI
1028 static int __init map_generic_timer_interrupt(u32 interrupt, u32 flags)
1029 {
1030         int trigger, polarity;
1031
1032         if (!interrupt)
1033                 return 0;
1034
1035         trigger = (flags & ACPI_GTDT_INTERRUPT_MODE) ? ACPI_EDGE_SENSITIVE
1036                         : ACPI_LEVEL_SENSITIVE;
1037
1038         polarity = (flags & ACPI_GTDT_INTERRUPT_POLARITY) ? ACPI_ACTIVE_LOW
1039                         : ACPI_ACTIVE_HIGH;
1040
1041         return acpi_register_gsi(NULL, interrupt, trigger, polarity);
1042 }
1043
1044 /* Initialize per-processor generic timer */
1045 static int __init arch_timer_acpi_init(struct acpi_table_header *table)
1046 {
1047         struct acpi_table_gtdt *gtdt;
1048
1049         if (arch_timers_present & ARCH_CP15_TIMER) {
1050                 pr_warn("arch_timer: already initialized, skipping\n");
1051                 return -EINVAL;
1052         }
1053
1054         gtdt = container_of(table, struct acpi_table_gtdt, header);
1055
1056         arch_timers_present |= ARCH_CP15_TIMER;
1057
1058         arch_timer_ppi[PHYS_SECURE_PPI] =
1059                 map_generic_timer_interrupt(gtdt->secure_el1_interrupt,
1060                 gtdt->secure_el1_flags);
1061
1062         arch_timer_ppi[PHYS_NONSECURE_PPI] =
1063                 map_generic_timer_interrupt(gtdt->non_secure_el1_interrupt,
1064                 gtdt->non_secure_el1_flags);
1065
1066         arch_timer_ppi[VIRT_PPI] =
1067                 map_generic_timer_interrupt(gtdt->virtual_timer_interrupt,
1068                 gtdt->virtual_timer_flags);
1069
1070         arch_timer_ppi[HYP_PPI] =
1071                 map_generic_timer_interrupt(gtdt->non_secure_el2_interrupt,
1072                 gtdt->non_secure_el2_flags);
1073
1074         /* Get the frequency from CNTFRQ */
1075         arch_timer_detect_rate(NULL, NULL);
1076
1077         /* Always-on capability */
1078         arch_timer_c3stop = !(gtdt->non_secure_el1_flags & ACPI_GTDT_ALWAYS_ON);
1079
1080         arch_timer_init();
1081         return 0;
1082 }
1083 CLOCKSOURCE_ACPI_DECLARE(arch_timer, ACPI_SIG_GTDT, arch_timer_acpi_init);
1084 #endif