2 * Xtensa SMP support functions.
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
8 * Copyright (C) 2008 - 2013 Tensilica Inc.
10 * Chris Zankel <chris@zankel.net>
11 * Joe Taylor <joe@tensilica.com>
12 * Pete Delaney <piet@tensilica.com
15 #include <linux/cpu.h>
16 #include <linux/cpumask.h>
17 #include <linux/delay.h>
18 #include <linux/init.h>
19 #include <linux/interrupt.h>
20 #include <linux/irqdomain.h>
21 #include <linux/irq.h>
22 #include <linux/kdebug.h>
23 #include <linux/module.h>
24 #include <linux/sched/hotplug.h>
25 #include <linux/reboot.h>
26 #include <linux/seq_file.h>
27 #include <linux/smp.h>
28 #include <linux/thread_info.h>
30 #include <asm/cacheflush.h>
31 #include <asm/kdebug.h>
32 #include <asm/mmu_context.h>
33 #include <asm/mxregs.h>
34 #include <asm/platform.h>
35 #include <asm/tlbflush.h>
36 #include <asm/traps.h>
39 # if XCHAL_HAVE_S32C1I == 0
40 # error "The S32C1I option is required for SMP."
44 static void system_invalidate_dcache_range(unsigned long start,
46 static void system_flush_invalidate_dcache_range(unsigned long start,
49 /* IPI (Inter Process Interrupt) */
53 static irqreturn_t ipi_interrupt(int irq, void *dev_id);
54 static struct irqaction ipi_irqaction = {
55 .handler = ipi_interrupt,
62 unsigned irq = irq_create_mapping(NULL, IPI_IRQ);
63 setup_irq(irq, &ipi_irqaction);
66 static inline unsigned int get_core_count(void)
68 /* Bits 18..21 of SYSCFGID contain the core count minus 1. */
69 unsigned int syscfgid = get_er(SYSCFGID);
70 return ((syscfgid >> 18) & 0xf) + 1;
73 static inline int get_core_id(void)
75 /* Bits 0...18 of SYSCFGID contain the core id */
76 unsigned int core_id = get_er(SYSCFGID);
77 return core_id & 0x3fff;
80 void __init smp_prepare_cpus(unsigned int max_cpus)
84 for (i = 0; i < max_cpus; ++i)
85 set_cpu_present(i, true);
88 void __init smp_init_cpus(void)
91 unsigned int ncpus = get_core_count();
92 unsigned int core_id = get_core_id();
94 pr_info("%s: Core Count = %d\n", __func__, ncpus);
95 pr_info("%s: Core Id = %d\n", __func__, core_id);
97 for (i = 0; i < ncpus; ++i)
98 set_cpu_possible(i, true);
101 void __init smp_prepare_boot_cpu(void)
103 unsigned int cpu = smp_processor_id();
105 cpu_asid_cache(cpu) = ASID_USER_FIRST;
108 void __init smp_cpus_done(unsigned int max_cpus)
112 static int boot_secondary_processors = 1; /* Set with xt-gdb via .xt-gdb */
113 static DECLARE_COMPLETION(cpu_running);
115 void secondary_start_kernel(void)
117 struct mm_struct *mm = &init_mm;
118 unsigned int cpu = smp_processor_id();
122 #ifdef CONFIG_DEBUG_KERNEL
123 if (boot_secondary_processors == 0) {
124 pr_debug("%s: boot_secondary_processors:%d; Hanging cpu:%d\n",
125 __func__, boot_secondary_processors, cpu);
127 __asm__ __volatile__ ("waiti " __stringify(LOCKLEVEL));
130 pr_debug("%s: boot_secondary_processors:%d; Booting cpu:%d\n",
131 __func__, boot_secondary_processors, cpu);
135 secondary_trap_init();
137 /* All kernel threads share the same mm context. */
141 current->active_mm = mm;
142 cpumask_set_cpu(cpu, mm_cpumask(mm));
143 enter_lazy_tlb(mm, current);
146 trace_hardirqs_off();
150 notify_cpu_starting(cpu);
152 secondary_init_irq();
153 local_timer_setup(cpu);
155 set_cpu_online(cpu, true);
159 complete(&cpu_running);
161 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
164 static void mx_cpu_start(void *p)
166 unsigned cpu = (unsigned)p;
167 unsigned long run_stall_mask = get_er(MPSCORE);
169 set_er(run_stall_mask & ~(1u << cpu), MPSCORE);
170 pr_debug("%s: cpu: %d, run_stall_mask: %lx ---> %lx\n",
171 __func__, cpu, run_stall_mask, get_er(MPSCORE));
174 static void mx_cpu_stop(void *p)
176 unsigned cpu = (unsigned)p;
177 unsigned long run_stall_mask = get_er(MPSCORE);
179 set_er(run_stall_mask | (1u << cpu), MPSCORE);
180 pr_debug("%s: cpu: %d, run_stall_mask: %lx ---> %lx\n",
181 __func__, cpu, run_stall_mask, get_er(MPSCORE));
184 #ifdef CONFIG_HOTPLUG_CPU
185 unsigned long cpu_start_id __cacheline_aligned;
187 unsigned long cpu_start_ccount;
189 static int boot_secondary(unsigned int cpu, struct task_struct *ts)
191 unsigned long timeout = jiffies + msecs_to_jiffies(1000);
192 unsigned long ccount;
195 #ifdef CONFIG_HOTPLUG_CPU
197 system_flush_invalidate_dcache_range(
198 (unsigned long)&cpu_start_id, sizeof(cpu_start_id));
200 smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1);
202 for (i = 0; i < 2; ++i) {
204 ccount = get_ccount();
207 cpu_start_ccount = ccount;
209 while (time_before(jiffies, timeout)) {
211 if (!cpu_start_ccount)
215 if (cpu_start_ccount) {
216 smp_call_function_single(0, mx_cpu_stop,
218 cpu_start_ccount = 0;
225 int __cpu_up(unsigned int cpu, struct task_struct *idle)
229 if (cpu_asid_cache(cpu) == 0)
230 cpu_asid_cache(cpu) = ASID_USER_FIRST;
232 start_info.stack = (unsigned long)task_pt_regs(idle);
235 pr_debug("%s: Calling wakeup_secondary(cpu:%d, idle:%p, sp: %08lx)\n",
236 __func__, cpu, idle, start_info.stack);
238 ret = boot_secondary(cpu, idle);
240 wait_for_completion_timeout(&cpu_running,
241 msecs_to_jiffies(1000));
242 if (!cpu_online(cpu))
247 pr_err("CPU %u failed to boot\n", cpu);
252 #ifdef CONFIG_HOTPLUG_CPU
255 * __cpu_disable runs on the processor to be shutdown.
257 int __cpu_disable(void)
259 unsigned int cpu = smp_processor_id();
262 * Take this CPU offline. Once we clear this, we can't return,
263 * and we must not schedule until we're ready to give up the cpu.
265 set_cpu_online(cpu, false);
268 * OK - migrate IRQs away from this CPU
273 * Flush user cache and TLB mappings, and then remove this CPU
274 * from the vm mask set of all processes.
276 local_flush_cache_all();
277 local_flush_tlb_all();
278 invalidate_page_directory();
280 clear_tasks_mm_cpumask(cpu);
285 static void platform_cpu_kill(unsigned int cpu)
287 smp_call_function_single(0, mx_cpu_stop, (void *)cpu, true);
291 * called on the thread which is asking for a CPU to be shutdown -
292 * waits until shutdown has completed, or it is timed out.
294 void __cpu_die(unsigned int cpu)
296 unsigned long timeout = jiffies + msecs_to_jiffies(1000);
297 while (time_before(jiffies, timeout)) {
298 system_invalidate_dcache_range((unsigned long)&cpu_start_id,
299 sizeof(cpu_start_id));
300 if (cpu_start_id == -cpu) {
301 platform_cpu_kill(cpu);
305 pr_err("CPU%u: unable to kill\n", cpu);
308 void arch_cpu_idle_dead(void)
313 * Called from the idle thread for the CPU which has been shutdown.
315 * Note that we disable IRQs here, but do not re-enable them
316 * before returning to the caller. This is also the behaviour
317 * of the other hotplug-cpu capable cores, so presumably coming
318 * out of idle fixes this.
320 void __ref cpu_die(void)
324 __asm__ __volatile__(
325 " movi a2, cpu_restart\n"
329 #endif /* CONFIG_HOTPLUG_CPU */
338 static const struct {
339 const char *short_text;
340 const char *long_text;
342 { .short_text = "RES", .long_text = "Rescheduling interrupts" },
343 { .short_text = "CAL", .long_text = "Function call interrupts" },
344 { .short_text = "DIE", .long_text = "CPU shutdown interrupts" },
348 unsigned long ipi_count[IPI_MAX];
351 static DEFINE_PER_CPU(struct ipi_data, ipi_data);
353 static void send_ipi_message(const struct cpumask *callmask,
354 enum ipi_msg_type msg_id)
357 unsigned long mask = 0;
359 for_each_cpu(index, callmask)
360 if (index != smp_processor_id())
363 set_er(mask, MIPISET(msg_id));
366 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
368 send_ipi_message(mask, IPI_CALL_FUNC);
371 void arch_send_call_function_single_ipi(int cpu)
373 send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
376 void smp_send_reschedule(int cpu)
378 send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
381 void smp_send_stop(void)
383 struct cpumask targets;
385 cpumask_copy(&targets, cpu_online_mask);
386 cpumask_clear_cpu(smp_processor_id(), &targets);
387 send_ipi_message(&targets, IPI_CPU_STOP);
390 static void ipi_cpu_stop(unsigned int cpu)
392 set_cpu_online(cpu, false);
396 irqreturn_t ipi_interrupt(int irq, void *dev_id)
398 unsigned int cpu = smp_processor_id();
399 struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
403 msg = get_er(MIPICAUSE(cpu));
404 for (i = 0; i < IPI_MAX; i++)
405 if (msg & (1 << i)) {
406 set_er(1 << i, MIPICAUSE(cpu));
410 if (msg & (1 << IPI_RESCHEDULE))
412 if (msg & (1 << IPI_CALL_FUNC))
413 generic_smp_call_function_interrupt();
414 if (msg & (1 << IPI_CPU_STOP))
420 void show_ipi_list(struct seq_file *p, int prec)
425 for (i = 0; i < IPI_MAX; ++i) {
426 seq_printf(p, "%*s:", prec, ipi_text[i].short_text);
427 for_each_online_cpu(cpu)
428 seq_printf(p, " %10lu",
429 per_cpu(ipi_data, cpu).ipi_count[i]);
430 seq_printf(p, " %s\n", ipi_text[i].long_text);
434 int setup_profiling_timer(unsigned int multiplier)
436 pr_debug("setup_profiling_timer %d\n", multiplier);
440 /* TLB flush functions */
443 struct vm_area_struct *vma;
448 static void ipi_flush_tlb_all(void *arg)
450 local_flush_tlb_all();
453 void flush_tlb_all(void)
455 on_each_cpu(ipi_flush_tlb_all, NULL, 1);
458 static void ipi_flush_tlb_mm(void *arg)
460 local_flush_tlb_mm(arg);
463 void flush_tlb_mm(struct mm_struct *mm)
465 on_each_cpu(ipi_flush_tlb_mm, mm, 1);
468 static void ipi_flush_tlb_page(void *arg)
470 struct flush_data *fd = arg;
471 local_flush_tlb_page(fd->vma, fd->addr1);
474 void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
476 struct flush_data fd = {
480 on_each_cpu(ipi_flush_tlb_page, &fd, 1);
483 static void ipi_flush_tlb_range(void *arg)
485 struct flush_data *fd = arg;
486 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
489 void flush_tlb_range(struct vm_area_struct *vma,
490 unsigned long start, unsigned long end)
492 struct flush_data fd = {
497 on_each_cpu(ipi_flush_tlb_range, &fd, 1);
500 static void ipi_flush_tlb_kernel_range(void *arg)
502 struct flush_data *fd = arg;
503 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
506 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
508 struct flush_data fd = {
512 on_each_cpu(ipi_flush_tlb_kernel_range, &fd, 1);
515 /* Cache flush functions */
517 static void ipi_flush_cache_all(void *arg)
519 local_flush_cache_all();
522 void flush_cache_all(void)
524 on_each_cpu(ipi_flush_cache_all, NULL, 1);
527 static void ipi_flush_cache_page(void *arg)
529 struct flush_data *fd = arg;
530 local_flush_cache_page(fd->vma, fd->addr1, fd->addr2);
533 void flush_cache_page(struct vm_area_struct *vma,
534 unsigned long address, unsigned long pfn)
536 struct flush_data fd = {
541 on_each_cpu(ipi_flush_cache_page, &fd, 1);
544 static void ipi_flush_cache_range(void *arg)
546 struct flush_data *fd = arg;
547 local_flush_cache_range(fd->vma, fd->addr1, fd->addr2);
550 void flush_cache_range(struct vm_area_struct *vma,
551 unsigned long start, unsigned long end)
553 struct flush_data fd = {
558 on_each_cpu(ipi_flush_cache_range, &fd, 1);
561 static void ipi_flush_icache_range(void *arg)
563 struct flush_data *fd = arg;
564 local_flush_icache_range(fd->addr1, fd->addr2);
567 void flush_icache_range(unsigned long start, unsigned long end)
569 struct flush_data fd = {
573 on_each_cpu(ipi_flush_icache_range, &fd, 1);
575 EXPORT_SYMBOL(flush_icache_range);
577 /* ------------------------------------------------------------------------- */
579 static void ipi_invalidate_dcache_range(void *arg)
581 struct flush_data *fd = arg;
582 __invalidate_dcache_range(fd->addr1, fd->addr2);
585 static void system_invalidate_dcache_range(unsigned long start,
588 struct flush_data fd = {
592 on_each_cpu(ipi_invalidate_dcache_range, &fd, 1);
595 static void ipi_flush_invalidate_dcache_range(void *arg)
597 struct flush_data *fd = arg;
598 __flush_invalidate_dcache_range(fd->addr1, fd->addr2);
601 static void system_flush_invalidate_dcache_range(unsigned long start,
604 struct flush_data fd = {
608 on_each_cpu(ipi_flush_invalidate_dcache_range, &fd, 1);