1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/arch/arm/kernel/setup.c
5 * Copyright (C) 1995-2001 Russell King
8 #include <linux/export.h>
9 #include <linux/kernel.h>
10 #include <linux/stddef.h>
11 #include <linux/ioport.h>
12 #include <linux/delay.h>
13 #include <linux/utsname.h>
14 #include <linux/initrd.h>
15 #include <linux/console.h>
16 #include <linux/seq_file.h>
17 #include <linux/screen_info.h>
18 #include <linux/of_platform.h>
19 #include <linux/init.h>
20 #include <linux/kexec.h>
21 #include <linux/of_fdt.h>
22 #include <linux/cpu.h>
23 #include <linux/interrupt.h>
24 #include <linux/smp.h>
25 #include <linux/proc_fs.h>
26 #include <linux/memblock.h>
27 #include <linux/bug.h>
28 #include <linux/compiler.h>
29 #include <linux/sort.h>
30 #include <linux/psci.h>
32 #include <asm/unified.h>
35 #include <asm/cputype.h>
38 #include <asm/early_ioremap.h>
39 #include <asm/fixmap.h>
40 #include <asm/procinfo.h>
42 #include <asm/sections.h>
43 #include <asm/setup.h>
44 #include <asm/smp_plat.h>
45 #include <asm/mach-types.h>
46 #include <asm/cacheflush.h>
47 #include <asm/cachetype.h>
48 #include <asm/tlbflush.h>
49 #include <asm/xen/hypervisor.h>
52 #include <asm/mach/arch.h>
53 #include <asm/mach/irq.h>
54 #include <asm/mach/time.h>
55 #include <asm/system_info.h>
56 #include <asm/system_misc.h>
57 #include <asm/traps.h>
58 #include <asm/unwind.h>
59 #include <asm/memblock.h>
65 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
68 static int __init fpe_setup(char *line)
70 memcpy(fpe_type, line, 8);
74 __setup("fpe=", fpe_setup);
77 extern void init_default_cache_policy(unsigned long);
78 extern void paging_init(const struct machine_desc *desc);
79 extern void early_mm_init(const struct machine_desc *);
80 extern void adjust_lowmem_bounds(void);
81 extern enum reboot_mode reboot_mode;
82 extern void setup_dma_zone(const struct machine_desc *desc);
84 unsigned int processor_id;
85 EXPORT_SYMBOL(processor_id);
86 unsigned int __machine_arch_type __read_mostly;
87 EXPORT_SYMBOL(__machine_arch_type);
88 unsigned int cacheid __read_mostly;
89 EXPORT_SYMBOL(cacheid);
91 unsigned int __atags_pointer __initdata;
93 unsigned int system_rev;
94 EXPORT_SYMBOL(system_rev);
96 const char *system_serial;
97 EXPORT_SYMBOL(system_serial);
99 unsigned int system_serial_low;
100 EXPORT_SYMBOL(system_serial_low);
102 unsigned int system_serial_high;
103 EXPORT_SYMBOL(system_serial_high);
105 unsigned int elf_hwcap __read_mostly;
106 EXPORT_SYMBOL(elf_hwcap);
108 unsigned int elf_hwcap2 __read_mostly;
109 EXPORT_SYMBOL(elf_hwcap2);
113 struct processor processor __ro_after_init;
114 #if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
115 struct processor *cpu_vtable[NR_CPUS] = {
121 struct cpu_tlb_fns cpu_tlb __ro_after_init;
124 struct cpu_user_fns cpu_user __ro_after_init;
127 struct cpu_cache_fns cpu_cache __ro_after_init;
129 #ifdef CONFIG_OUTER_CACHE
130 struct outer_cache_fns outer_cache __ro_after_init;
131 EXPORT_SYMBOL(outer_cache);
135 * Cached cpu_architecture() result for use by assembler code.
136 * C code should use the cpu_architecture() function instead of accessing this
139 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
146 } ____cacheline_aligned;
148 #ifndef CONFIG_CPU_V7M
149 static struct stack stacks[NR_CPUS];
152 char elf_platform[ELF_PLATFORM_SIZE];
153 EXPORT_SYMBOL(elf_platform);
155 static const char *cpu_name;
156 static const char *machine_name;
157 static char __initdata cmd_line[COMMAND_LINE_SIZE];
158 const struct machine_desc *machine_desc __initdata;
160 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
161 #define ENDIANNESS ((char)endian_test.l)
163 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
166 * Standard memory resources
168 static struct resource mem_res[] = {
173 .flags = IORESOURCE_MEM
176 .name = "Kernel code",
179 .flags = IORESOURCE_SYSTEM_RAM
182 .name = "Kernel data",
185 .flags = IORESOURCE_SYSTEM_RAM
189 #define video_ram mem_res[0]
190 #define kernel_code mem_res[1]
191 #define kernel_data mem_res[2]
193 static struct resource io_res[] = {
198 .flags = IORESOURCE_IO | IORESOURCE_BUSY
204 .flags = IORESOURCE_IO | IORESOURCE_BUSY
210 .flags = IORESOURCE_IO | IORESOURCE_BUSY
214 #define lp0 io_res[0]
215 #define lp1 io_res[1]
216 #define lp2 io_res[2]
218 static const char *proc_arch[] = {
238 #ifdef CONFIG_CPU_V7M
239 static int __get_cpu_architecture(void)
241 return CPU_ARCH_ARMv7M;
244 static int __get_cpu_architecture(void)
248 if ((read_cpuid_id() & 0x0008f000) == 0) {
249 cpu_arch = CPU_ARCH_UNKNOWN;
250 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
251 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
252 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
253 cpu_arch = (read_cpuid_id() >> 16) & 7;
255 cpu_arch += CPU_ARCH_ARMv3;
256 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
257 /* Revised CPUID format. Read the Memory Model Feature
258 * Register 0 and check for VMSAv7 or PMSAv7 */
259 unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
260 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
261 (mmfr0 & 0x000000f0) >= 0x00000030)
262 cpu_arch = CPU_ARCH_ARMv7;
263 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
264 (mmfr0 & 0x000000f0) == 0x00000020)
265 cpu_arch = CPU_ARCH_ARMv6;
267 cpu_arch = CPU_ARCH_UNKNOWN;
269 cpu_arch = CPU_ARCH_UNKNOWN;
275 int __pure cpu_architecture(void)
277 BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
279 return __cpu_architecture;
282 static int cpu_has_aliasing_icache(unsigned int arch)
285 unsigned int id_reg, num_sets, line_size;
287 /* PIPT caches never alias. */
288 if (icache_is_pipt())
291 /* arch specifies the register format */
294 set_csselr(CSSELR_ICACHE | CSSELR_L1);
296 id_reg = read_ccsidr();
297 line_size = 4 << ((id_reg & 0x7) + 2);
298 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
299 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
302 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
305 /* I-cache aliases will be handled by D-cache aliasing code */
309 return aliasing_icache;
312 static void __init cacheid_init(void)
314 unsigned int arch = cpu_architecture();
316 if (arch >= CPU_ARCH_ARMv6) {
317 unsigned int cachetype = read_cpuid_cachetype();
319 if ((arch == CPU_ARCH_ARMv7M) && !(cachetype & 0xf000f)) {
321 } else if ((cachetype & (7 << 29)) == 4 << 29) {
322 /* ARMv7 register format */
323 arch = CPU_ARCH_ARMv7;
324 cacheid = CACHEID_VIPT_NONALIASING;
325 switch (cachetype & (3 << 14)) {
327 cacheid |= CACHEID_ASID_TAGGED;
330 cacheid |= CACHEID_PIPT;
334 arch = CPU_ARCH_ARMv6;
335 if (cachetype & (1 << 23))
336 cacheid = CACHEID_VIPT_ALIASING;
338 cacheid = CACHEID_VIPT_NONALIASING;
340 if (cpu_has_aliasing_icache(arch))
341 cacheid |= CACHEID_VIPT_I_ALIASING;
343 cacheid = CACHEID_VIVT;
346 pr_info("CPU: %s data cache, %s instruction cache\n",
347 cache_is_vivt() ? "VIVT" :
348 cache_is_vipt_aliasing() ? "VIPT aliasing" :
349 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
350 cache_is_vivt() ? "VIVT" :
351 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
352 icache_is_vipt_aliasing() ? "VIPT aliasing" :
353 icache_is_pipt() ? "PIPT" :
354 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
358 * These functions re-use the assembly code in head.S, which
359 * already provide the required functionality.
361 extern struct proc_info_list *lookup_processor_type(unsigned int);
363 void __init early_print(const char *str, ...)
365 extern void printascii(const char *);
370 vsnprintf(buf, sizeof(buf), str, ap);
373 #ifdef CONFIG_DEBUG_LL
379 #ifdef CONFIG_ARM_PATCH_IDIV
381 static inline u32 __attribute_const__ sdiv_instruction(void)
383 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
384 /* "sdiv r0, r0, r1" */
385 u32 insn = __opcode_thumb32_compose(0xfb90, 0xf0f1);
386 return __opcode_to_mem_thumb32(insn);
389 /* "sdiv r0, r0, r1" */
390 return __opcode_to_mem_arm(0xe710f110);
393 static inline u32 __attribute_const__ udiv_instruction(void)
395 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
396 /* "udiv r0, r0, r1" */
397 u32 insn = __opcode_thumb32_compose(0xfbb0, 0xf0f1);
398 return __opcode_to_mem_thumb32(insn);
401 /* "udiv r0, r0, r1" */
402 return __opcode_to_mem_arm(0xe730f110);
405 static inline u32 __attribute_const__ bx_lr_instruction(void)
407 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
409 u32 insn = __opcode_thumb32_compose(0x4770, 0x46c0);
410 return __opcode_to_mem_thumb32(insn);
414 return __opcode_to_mem_arm(0xe12fff1e);
417 static void __init patch_aeabi_idiv(void)
419 extern void __aeabi_uidiv(void);
420 extern void __aeabi_idiv(void);
424 mask = IS_ENABLED(CONFIG_THUMB2_KERNEL) ? HWCAP_IDIVT : HWCAP_IDIVA;
425 if (!(elf_hwcap & mask))
428 pr_info("CPU: div instructions available: patching division code\n");
430 fn_addr = ((uintptr_t)&__aeabi_uidiv) & ~1;
431 asm ("" : "+g" (fn_addr));
432 ((u32 *)fn_addr)[0] = udiv_instruction();
433 ((u32 *)fn_addr)[1] = bx_lr_instruction();
434 flush_icache_range(fn_addr, fn_addr + 8);
436 fn_addr = ((uintptr_t)&__aeabi_idiv) & ~1;
437 asm ("" : "+g" (fn_addr));
438 ((u32 *)fn_addr)[0] = sdiv_instruction();
439 ((u32 *)fn_addr)[1] = bx_lr_instruction();
440 flush_icache_range(fn_addr, fn_addr + 8);
444 static inline void patch_aeabi_idiv(void) { }
447 static void __init cpuid_init_hwcaps(void)
452 if (cpu_architecture() < CPU_ARCH_ARMv7)
455 block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24);
457 elf_hwcap |= HWCAP_IDIVA;
459 elf_hwcap |= HWCAP_IDIVT;
461 /* LPAE implies atomic ldrd/strd instructions */
462 block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
464 elf_hwcap |= HWCAP_LPAE;
466 /* check for supported v8 Crypto instructions */
467 isar5 = read_cpuid_ext(CPUID_EXT_ISAR5);
469 block = cpuid_feature_extract_field(isar5, 4);
471 elf_hwcap2 |= HWCAP2_PMULL;
473 elf_hwcap2 |= HWCAP2_AES;
475 block = cpuid_feature_extract_field(isar5, 8);
477 elf_hwcap2 |= HWCAP2_SHA1;
479 block = cpuid_feature_extract_field(isar5, 12);
481 elf_hwcap2 |= HWCAP2_SHA2;
483 block = cpuid_feature_extract_field(isar5, 16);
485 elf_hwcap2 |= HWCAP2_CRC32;
488 static void __init elf_hwcap_fixup(void)
490 unsigned id = read_cpuid_id();
493 * HWCAP_TLS is available only on 1136 r1p0 and later,
494 * see also kuser_get_tls_init.
496 if (read_cpuid_part() == ARM_CPU_PART_ARM1136 &&
497 ((id >> 20) & 3) == 0) {
498 elf_hwcap &= ~HWCAP_TLS;
502 /* Verify if CPUID scheme is implemented */
503 if ((id & 0x000f0000) != 0x000f0000)
507 * If the CPU supports LDREX/STREX and LDREXB/STREXB,
508 * avoid advertising SWP; it may not be atomic with
509 * multiprocessing cores.
511 if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
512 (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
513 cpuid_feature_extract(CPUID_EXT_ISAR4, 20) >= 3))
514 elf_hwcap &= ~HWCAP_SWP;
518 * cpu_init - initialise one CPU.
520 * cpu_init sets up the per-CPU stacks.
522 void notrace cpu_init(void)
524 #ifndef CONFIG_CPU_V7M
525 unsigned int cpu = smp_processor_id();
526 struct stack *stk = &stacks[cpu];
528 if (cpu >= NR_CPUS) {
529 pr_crit("CPU%u: bad primary CPU number\n", cpu);
534 * This only works on resume and secondary cores. For booting on the
535 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
537 set_my_cpu_offset(per_cpu_offset(cpu));
542 * Define the placement constraint for the inline asm directive below.
543 * In Thumb-2, msr with an immediate value is not allowed.
545 #ifdef CONFIG_THUMB2_KERNEL
552 * setup stacks for re-entrant exception handlers
556 "add r14, %0, %2\n\t"
559 "add r14, %0, %4\n\t"
562 "add r14, %0, %6\n\t"
565 "add r14, %0, %8\n\t"
570 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
571 "I" (offsetof(struct stack, irq[0])),
572 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
573 "I" (offsetof(struct stack, abt[0])),
574 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
575 "I" (offsetof(struct stack, und[0])),
576 PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
577 "I" (offsetof(struct stack, fiq[0])),
578 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
583 u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
585 void __init smp_setup_processor_id(void)
588 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
589 u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
591 cpu_logical_map(0) = cpu;
592 for (i = 1; i < nr_cpu_ids; ++i)
593 cpu_logical_map(i) = i == cpu ? 0 : i;
596 * clear __my_cpu_offset on boot CPU to avoid hang caused by
597 * using percpu variable early, for example, lockdep will
598 * access percpu variable inside lock_release
600 set_my_cpu_offset(0);
602 pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
605 struct mpidr_hash mpidr_hash;
608 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
609 * level in order to build a linear index from an
610 * MPIDR value. Resulting algorithm is a collision
611 * free hash carried out through shifting and ORing
613 static void __init smp_build_mpidr_hash(void)
616 u32 fs[3], bits[3], ls, mask = 0;
618 * Pre-scan the list of MPIDRS and filter out bits that do
619 * not contribute to affinity levels, ie they never toggle.
621 for_each_possible_cpu(i)
622 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
623 pr_debug("mask of set bits 0x%x\n", mask);
625 * Find and stash the last and first bit set at all affinity levels to
626 * check how many bits are required to represent them.
628 for (i = 0; i < 3; i++) {
629 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
631 * Find the MSB bit and LSB bits position
632 * to determine how many bits are required
633 * to express the affinity level.
636 fs[i] = affinity ? ffs(affinity) - 1 : 0;
637 bits[i] = ls - fs[i];
640 * An index can be created from the MPIDR by isolating the
641 * significant bits at each affinity level and by shifting
642 * them in order to compress the 24 bits values space to a
643 * compressed set of values. This is equivalent to hashing
644 * the MPIDR through shifting and ORing. It is a collision free
645 * hash though not minimal since some levels might contain a number
646 * of CPUs that is not an exact power of 2 and their bit
647 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
649 mpidr_hash.shift_aff[0] = fs[0];
650 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
651 mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
653 mpidr_hash.mask = mask;
654 mpidr_hash.bits = bits[2] + bits[1] + bits[0];
655 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
656 mpidr_hash.shift_aff[0],
657 mpidr_hash.shift_aff[1],
658 mpidr_hash.shift_aff[2],
662 * 4x is an arbitrary value used to warn on a hash table much bigger
663 * than expected on most systems.
665 if (mpidr_hash_size() > 4 * num_possible_cpus())
666 pr_warn("Large number of MPIDR hash buckets detected\n");
667 sync_cache_w(&mpidr_hash);
672 * locate processor in the list of supported processor types. The linker
673 * builds this table for us from the entries in arch/arm/mm/proc-*.S
675 struct proc_info_list *lookup_processor(u32 midr)
677 struct proc_info_list *list = lookup_processor_type(midr);
680 pr_err("CPU%u: configuration botched (ID %08x), CPU halted\n",
681 smp_processor_id(), midr);
683 /* can't use cpu_relax() here as it may require MMU setup */;
689 static void __init setup_processor(void)
691 unsigned int midr = read_cpuid_id();
692 struct proc_info_list *list = lookup_processor(midr);
694 cpu_name = list->cpu_name;
695 __cpu_architecture = __get_cpu_architecture();
697 init_proc_vtable(list->proc);
699 cpu_tlb = *list->tlb;
702 cpu_user = *list->user;
705 cpu_cache = *list->cache;
708 pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
709 list->cpu_name, midr, midr & 15,
710 proc_arch[cpu_architecture()], get_cr());
712 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
713 list->arch_name, ENDIANNESS);
714 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
715 list->elf_name, ENDIANNESS);
716 elf_hwcap = list->elf_hwcap;
721 #ifndef CONFIG_ARM_THUMB
722 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
725 init_default_cache_policy(list->__cpu_mm_mmu_flags);
727 erratum_a15_798181_init();
735 void __init dump_machine_table(void)
737 const struct machine_desc *p;
739 early_print("Available machine support:\n\nID (hex)\tNAME\n");
740 for_each_machine_desc(p)
741 early_print("%08x\t%s\n", p->nr, p->name);
743 early_print("\nPlease check your kernel config and/or bootloader.\n");
746 /* can't use cpu_relax() here as it may require MMU setup */;
749 int __init arm_add_memory(u64 start, u64 size)
754 * Ensure that start/size are aligned to a page boundary.
755 * Size is rounded down, start is rounded up.
757 aligned_start = PAGE_ALIGN(start);
758 if (aligned_start > start + size)
761 size -= aligned_start - start;
763 #ifndef CONFIG_PHYS_ADDR_T_64BIT
764 if (aligned_start > ULONG_MAX) {
765 pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
770 if (aligned_start + size > ULONG_MAX) {
771 pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
774 * To ensure bank->start + bank->size is representable in
775 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
776 * This means we lose a page after masking.
778 size = ULONG_MAX - aligned_start;
782 if (aligned_start < PHYS_OFFSET) {
783 if (aligned_start + size <= PHYS_OFFSET) {
784 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
785 aligned_start, aligned_start + size);
789 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
790 aligned_start, (u64)PHYS_OFFSET);
792 size -= PHYS_OFFSET - aligned_start;
793 aligned_start = PHYS_OFFSET;
796 start = aligned_start;
797 size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
800 * Check whether this memory region has non-zero size or
801 * invalid node number.
806 memblock_add(start, size);
811 * Pick out the memory size. We look for mem=size@start,
812 * where start and size are "size[KkMm]"
815 static int __init early_mem(char *p)
817 static int usermem __initdata = 0;
823 * If the user specifies memory size, we
824 * blow away any automatically generated
829 memblock_remove(memblock_start_of_DRAM(),
830 memblock_end_of_DRAM() - memblock_start_of_DRAM());
834 size = memparse(p, &endp);
836 start = memparse(endp + 1, NULL);
838 arm_add_memory(start, size);
842 early_param("mem", early_mem);
844 static void __init request_standard_resources(const struct machine_desc *mdesc)
846 struct memblock_region *region;
847 struct resource *res;
849 kernel_code.start = virt_to_phys(_text);
850 kernel_code.end = virt_to_phys(__init_begin - 1);
851 kernel_data.start = virt_to_phys(_sdata);
852 kernel_data.end = virt_to_phys(_end - 1);
854 for_each_memblock(memory, region) {
855 phys_addr_t start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
856 phys_addr_t end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
857 unsigned long boot_alias_start;
860 * Some systems have a special memory alias which is only
861 * used for booting. We need to advertise this region to
862 * kexec-tools so they know where bootable RAM is located.
864 boot_alias_start = phys_to_idmap(start);
865 if (arm_has_idmap_alias() && boot_alias_start != IDMAP_INVALID_ADDR) {
866 res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES);
868 panic("%s: Failed to allocate %zu bytes\n",
869 __func__, sizeof(*res));
870 res->name = "System RAM (boot alias)";
871 res->start = boot_alias_start;
872 res->end = phys_to_idmap(end);
873 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
874 request_resource(&iomem_resource, res);
877 res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES);
879 panic("%s: Failed to allocate %zu bytes\n", __func__,
881 res->name = "System RAM";
884 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
886 request_resource(&iomem_resource, res);
888 if (kernel_code.start >= res->start &&
889 kernel_code.end <= res->end)
890 request_resource(res, &kernel_code);
891 if (kernel_data.start >= res->start &&
892 kernel_data.end <= res->end)
893 request_resource(res, &kernel_data);
896 if (mdesc->video_start) {
897 video_ram.start = mdesc->video_start;
898 video_ram.end = mdesc->video_end;
899 request_resource(&iomem_resource, &video_ram);
903 * Some machines don't have the possibility of ever
904 * possessing lp0, lp1 or lp2
906 if (mdesc->reserve_lp0)
907 request_resource(&ioport_resource, &lp0);
908 if (mdesc->reserve_lp1)
909 request_resource(&ioport_resource, &lp1);
910 if (mdesc->reserve_lp2)
911 request_resource(&ioport_resource, &lp2);
914 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) || \
916 struct screen_info screen_info = {
917 .orig_video_lines = 30,
918 .orig_video_cols = 80,
919 .orig_video_mode = 0,
920 .orig_video_ega_bx = 0,
921 .orig_video_isVGA = 1,
922 .orig_video_points = 8
926 static int __init customize_machine(void)
929 * customizes platform devices, or adds new ones
930 * On DT based machines, we fall back to populating the
931 * machine from the device tree, if no callback is provided,
932 * otherwise we would always need an init_machine callback.
934 if (machine_desc->init_machine)
935 machine_desc->init_machine();
939 arch_initcall(customize_machine);
941 static int __init init_machine_late(void)
943 struct device_node *root;
946 if (machine_desc->init_late)
947 machine_desc->init_late();
949 root = of_find_node_by_path("/");
951 ret = of_property_read_string(root, "serial-number",
954 system_serial = NULL;
958 system_serial = kasprintf(GFP_KERNEL, "%08x%08x",
964 late_initcall(init_machine_late);
968 * The crash region must be aligned to 128MB to avoid
969 * zImage relocating below the reserved region.
971 #define CRASH_ALIGN (128 << 20)
973 static inline unsigned long long get_total_mem(void)
977 total = max_low_pfn - min_low_pfn;
978 return total << PAGE_SHIFT;
982 * reserve_crashkernel() - reserves memory are for crash kernel
984 * This function reserves memory area given in "crashkernel=" kernel command
985 * line parameter. The memory reserved is used by a dump capture kernel when
986 * primary kernel is crashing.
988 static void __init reserve_crashkernel(void)
990 unsigned long long crash_size, crash_base;
991 unsigned long long total_mem;
994 total_mem = get_total_mem();
995 ret = parse_crashkernel(boot_command_line, total_mem,
996 &crash_size, &crash_base);
1000 if (crash_base <= 0) {
1001 unsigned long long crash_max = idmap_to_phys((u32)~0);
1002 unsigned long long lowmem_max = __pa(high_memory - 1) + 1;
1003 if (crash_max > lowmem_max)
1004 crash_max = lowmem_max;
1005 crash_base = memblock_find_in_range(CRASH_ALIGN, crash_max,
1006 crash_size, CRASH_ALIGN);
1008 pr_err("crashkernel reservation failed - No suitable area found.\n");
1012 unsigned long long start;
1014 start = memblock_find_in_range(crash_base,
1015 crash_base + crash_size,
1016 crash_size, SECTION_SIZE);
1017 if (start != crash_base) {
1018 pr_err("crashkernel reservation failed - memory is in use.\n");
1023 ret = memblock_reserve(crash_base, crash_size);
1025 pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
1026 (unsigned long)crash_base);
1030 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
1031 (unsigned long)(crash_size >> 20),
1032 (unsigned long)(crash_base >> 20),
1033 (unsigned long)(total_mem >> 20));
1035 /* The crashk resource must always be located in normal mem */
1036 crashk_res.start = crash_base;
1037 crashk_res.end = crash_base + crash_size - 1;
1038 insert_resource(&iomem_resource, &crashk_res);
1040 if (arm_has_idmap_alias()) {
1042 * If we have a special RAM alias for use at boot, we
1043 * need to advertise to kexec tools where the alias is.
1045 static struct resource crashk_boot_res = {
1046 .name = "Crash kernel (boot alias)",
1047 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
1050 crashk_boot_res.start = phys_to_idmap(crash_base);
1051 crashk_boot_res.end = crashk_boot_res.start + crash_size - 1;
1052 insert_resource(&iomem_resource, &crashk_boot_res);
1056 static inline void reserve_crashkernel(void) {}
1057 #endif /* CONFIG_KEXEC */
1059 void __init hyp_mode_check(void)
1061 #ifdef CONFIG_ARM_VIRT_EXT
1064 if (is_hyp_mode_available()) {
1065 pr_info("CPU: All CPU(s) started in HYP mode.\n");
1066 pr_info("CPU: Virtualization extensions available.\n");
1067 } else if (is_hyp_mode_mismatched()) {
1068 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
1069 __boot_cpu_mode & MODE_MASK);
1070 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
1072 pr_info("CPU: All CPU(s) started in SVC mode.\n");
1076 void __init setup_arch(char **cmdline_p)
1078 const struct machine_desc *mdesc;
1081 mdesc = setup_machine_fdt(__atags_pointer);
1083 mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
1085 early_print("\nError: invalid dtb and unrecognized/unsupported machine ID\n");
1086 early_print(" r1=0x%08x, r2=0x%08x\n", __machine_arch_type,
1088 if (__atags_pointer)
1089 early_print(" r2[]=%*ph\n", 16,
1090 phys_to_virt(__atags_pointer));
1091 dump_machine_table();
1094 machine_desc = mdesc;
1095 machine_name = mdesc->name;
1096 dump_stack_set_arch_desc("%s", mdesc->name);
1098 if (mdesc->reboot_mode != REBOOT_HARD)
1099 reboot_mode = mdesc->reboot_mode;
1101 init_mm.start_code = (unsigned long) _text;
1102 init_mm.end_code = (unsigned long) _etext;
1103 init_mm.end_data = (unsigned long) _edata;
1104 init_mm.brk = (unsigned long) _end;
1106 /* populate cmd_line too for later use, preserving boot_command_line */
1107 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
1108 *cmdline_p = cmd_line;
1110 early_fixmap_init();
1111 early_ioremap_init();
1113 parse_early_param();
1116 early_mm_init(mdesc);
1118 setup_dma_zone(mdesc);
1122 * Make sure the calculation for lowmem/highmem is set appropriately
1123 * before reserving/allocating any mmeory
1125 adjust_lowmem_bounds();
1126 arm_memblock_init(mdesc);
1127 /* Memory may have been removed so recalculate the bounds. */
1128 adjust_lowmem_bounds();
1130 early_ioremap_reset();
1133 request_standard_resources(mdesc);
1136 arm_pm_restart = mdesc->restart;
1138 unflatten_device_tree();
1140 arm_dt_init_cpu_maps();
1144 if (!mdesc->smp_init || !mdesc->smp_init()) {
1145 if (psci_smp_available())
1146 smp_set_ops(&psci_smp_ops);
1147 else if (mdesc->smp)
1148 smp_set_ops(mdesc->smp);
1151 smp_build_mpidr_hash();
1158 reserve_crashkernel();
1160 #ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
1161 handle_arch_irq = mdesc->handle_irq;
1165 #if defined(CONFIG_VGA_CONSOLE)
1166 conswitchp = &vga_con;
1170 if (mdesc->init_early)
1171 mdesc->init_early();
1175 static int __init topology_init(void)
1179 for_each_possible_cpu(cpu) {
1180 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
1181 cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu);
1182 register_cpu(&cpuinfo->cpu, cpu);
1187 subsys_initcall(topology_init);
1189 #ifdef CONFIG_HAVE_PROC_CPU
1190 static int __init proc_cpu_init(void)
1192 struct proc_dir_entry *res;
1194 res = proc_mkdir("cpu", NULL);
1199 fs_initcall(proc_cpu_init);
1202 static const char *hwcap_str[] = {
1228 static const char *hwcap2_str[] = {
1237 static int c_show(struct seq_file *m, void *v)
1242 for_each_online_cpu(i) {
1244 * glibc reads /proc/cpuinfo to determine the number of
1245 * online processors, looking for lines beginning with
1246 * "processor". Give glibc what it expects.
1248 seq_printf(m, "processor\t: %d\n", i);
1249 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
1250 seq_printf(m, "model name\t: %s rev %d (%s)\n",
1251 cpu_name, cpuid & 15, elf_platform);
1253 #if defined(CONFIG_SMP)
1254 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1255 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1256 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1258 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1259 loops_per_jiffy / (500000/HZ),
1260 (loops_per_jiffy / (5000/HZ)) % 100);
1262 /* dump out the processor features */
1263 seq_puts(m, "Features\t: ");
1265 for (j = 0; hwcap_str[j]; j++)
1266 if (elf_hwcap & (1 << j))
1267 seq_printf(m, "%s ", hwcap_str[j]);
1269 for (j = 0; hwcap2_str[j]; j++)
1270 if (elf_hwcap2 & (1 << j))
1271 seq_printf(m, "%s ", hwcap2_str[j]);
1273 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1274 seq_printf(m, "CPU architecture: %s\n",
1275 proc_arch[cpu_architecture()]);
1277 if ((cpuid & 0x0008f000) == 0x00000000) {
1279 seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1281 if ((cpuid & 0x0008f000) == 0x00007000) {
1283 seq_printf(m, "CPU variant\t: 0x%02x\n",
1284 (cpuid >> 16) & 127);
1287 seq_printf(m, "CPU variant\t: 0x%x\n",
1288 (cpuid >> 20) & 15);
1290 seq_printf(m, "CPU part\t: 0x%03x\n",
1291 (cpuid >> 4) & 0xfff);
1293 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1296 seq_printf(m, "Hardware\t: %s\n", machine_name);
1297 seq_printf(m, "Revision\t: %04x\n", system_rev);
1298 seq_printf(m, "Serial\t\t: %s\n", system_serial);
1303 static void *c_start(struct seq_file *m, loff_t *pos)
1305 return *pos < 1 ? (void *)1 : NULL;
1308 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1314 static void c_stop(struct seq_file *m, void *v)
1318 const struct seq_operations cpuinfo_op = {