]> asedeno.scripts.mit.edu Git - linux.git/blob - arch/nds32/kernel/setup.c
Merge tag 'filesystems_for_v4.20-rc1' of git://git.kernel.org/pub/scm/linux/kernel...
[linux.git] / arch / nds32 / kernel / setup.c
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2005-2017 Andes Technology Corporation
3
4 #include <linux/cpu.h>
5 #include <linux/bootmem.h>
6 #include <linux/seq_file.h>
7 #include <linux/memblock.h>
8 #include <linux/console.h>
9 #include <linux/screen_info.h>
10 #include <linux/delay.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/of_fdt.h>
13 #include <linux/of_platform.h>
14 #include <asm/setup.h>
15 #include <asm/sections.h>
16 #include <asm/proc-fns.h>
17 #include <asm/cache_info.h>
18 #include <asm/elf.h>
19 #include <nds32_intrinsic.h>
20
21 #define HWCAP_MFUSR_PC          0x000001
22 #define HWCAP_EXT               0x000002
23 #define HWCAP_EXT2              0x000004
24 #define HWCAP_FPU               0x000008
25 #define HWCAP_AUDIO             0x000010
26 #define HWCAP_BASE16            0x000020
27 #define HWCAP_STRING            0x000040
28 #define HWCAP_REDUCED_REGS      0x000080
29 #define HWCAP_VIDEO             0x000100
30 #define HWCAP_ENCRYPT           0x000200
31 #define HWCAP_EDM               0x000400
32 #define HWCAP_LMDMA             0x000800
33 #define HWCAP_PFM               0x001000
34 #define HWCAP_HSMP              0x002000
35 #define HWCAP_TRACE             0x004000
36 #define HWCAP_DIV               0x008000
37 #define HWCAP_MAC               0x010000
38 #define HWCAP_L2C               0x020000
39 #define HWCAP_FPU_DP            0x040000
40 #define HWCAP_V2                0x080000
41 #define HWCAP_DX_REGS           0x100000
42
43 unsigned long cpu_id, cpu_rev, cpu_cfgid;
44 char cpu_series;
45 char *endianness = NULL;
46
47 unsigned int __atags_pointer __initdata;
48 unsigned int elf_hwcap;
49 EXPORT_SYMBOL(elf_hwcap);
50
51 /*
52  * The following string table, must sync with HWCAP_xx bitmask,
53  * which is defined in <asm/procinfo.h>
54  */
55 static const char *hwcap_str[] = {
56         "mfusr_pc",
57         "perf1",
58         "perf2",
59         "fpu",
60         "audio",
61         "16b",
62         "string",
63         "reduced_regs",
64         "video",
65         "encrypt",
66         "edm",
67         "lmdma",
68         "pfm",
69         "hsmp",
70         "trace",
71         "div",
72         "mac",
73         "l2c",
74         "dx_regs",
75         "v2",
76         NULL,
77 };
78
79 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
80 #define WRITE_METHOD "write through"
81 #else
82 #define WRITE_METHOD "write back"
83 #endif
84
85 struct cache_info L1_cache_info[2];
86 static void __init dump_cpu_info(int cpu)
87 {
88         int i, p = 0;
89         char str[sizeof(hwcap_str) + 16];
90
91         for (i = 0; hwcap_str[i]; i++) {
92                 if (elf_hwcap & (1 << i)) {
93                         sprintf(str + p, "%s ", hwcap_str[i]);
94                         p += strlen(hwcap_str[i]) + 1;
95                 }
96         }
97
98         pr_info("CPU%d Features: %s\n", cpu, str);
99
100         L1_cache_info[ICACHE].ways = CACHE_WAY(ICACHE);
101         L1_cache_info[ICACHE].line_size = CACHE_LINE_SIZE(ICACHE);
102         L1_cache_info[ICACHE].sets = CACHE_SET(ICACHE);
103         L1_cache_info[ICACHE].size =
104             L1_cache_info[ICACHE].ways * L1_cache_info[ICACHE].line_size *
105             L1_cache_info[ICACHE].sets / 1024;
106         pr_info("L1I:%dKB/%dS/%dW/%dB\n", L1_cache_info[ICACHE].size,
107                 L1_cache_info[ICACHE].sets, L1_cache_info[ICACHE].ways,
108                 L1_cache_info[ICACHE].line_size);
109         L1_cache_info[DCACHE].ways = CACHE_WAY(DCACHE);
110         L1_cache_info[DCACHE].line_size = CACHE_LINE_SIZE(DCACHE);
111         L1_cache_info[DCACHE].sets = CACHE_SET(DCACHE);
112         L1_cache_info[DCACHE].size =
113             L1_cache_info[DCACHE].ways * L1_cache_info[DCACHE].line_size *
114             L1_cache_info[DCACHE].sets / 1024;
115         pr_info("L1D:%dKB/%dS/%dW/%dB\n", L1_cache_info[DCACHE].size,
116                 L1_cache_info[DCACHE].sets, L1_cache_info[DCACHE].ways,
117                 L1_cache_info[DCACHE].line_size);
118         pr_info("L1 D-Cache is %s\n", WRITE_METHOD);
119         if (L1_cache_info[DCACHE].size != L1_CACHE_BYTES)
120                 pr_crit
121                     ("The cache line size(%d) of this processor is not the same as L1_CACHE_BYTES(%d).\n",
122                      L1_cache_info[DCACHE].size, L1_CACHE_BYTES);
123 #ifdef CONFIG_CPU_CACHE_ALIASING
124         {
125                 int aliasing_num;
126                 aliasing_num =
127                     L1_cache_info[ICACHE].size * 1024 / PAGE_SIZE /
128                     L1_cache_info[ICACHE].ways;
129                 L1_cache_info[ICACHE].aliasing_num = aliasing_num;
130                 L1_cache_info[ICACHE].aliasing_mask =
131                     (aliasing_num - 1) << PAGE_SHIFT;
132                 aliasing_num =
133                     L1_cache_info[DCACHE].size * 1024 / PAGE_SIZE /
134                     L1_cache_info[DCACHE].ways;
135                 L1_cache_info[DCACHE].aliasing_num = aliasing_num;
136                 L1_cache_info[DCACHE].aliasing_mask =
137                     (aliasing_num - 1) << PAGE_SHIFT;
138         }
139 #endif
140 }
141
142 static void __init setup_cpuinfo(void)
143 {
144         unsigned long tmp = 0, cpu_name;
145
146         cpu_dcache_inval_all();
147         cpu_icache_inval_all();
148         __nds32__isb();
149
150         cpu_id = (__nds32__mfsr(NDS32_SR_CPU_VER) & CPU_VER_mskCPUID) >> CPU_VER_offCPUID;
151         cpu_name = ((cpu_id) & 0xf0) >> 4;
152         cpu_series = cpu_name ? cpu_name - 10 + 'A' : 'N';
153         cpu_id = cpu_id & 0xf;
154         cpu_rev = (__nds32__mfsr(NDS32_SR_CPU_VER) & CPU_VER_mskREV) >> CPU_VER_offREV;
155         cpu_cfgid = (__nds32__mfsr(NDS32_SR_CPU_VER) & CPU_VER_mskCFGID) >> CPU_VER_offCFGID;
156
157         pr_info("CPU:%c%ld, CPU_VER 0x%08x(id %lu, rev %lu, cfg %lu)\n",
158                 cpu_series, cpu_id, __nds32__mfsr(NDS32_SR_CPU_VER), cpu_id, cpu_rev, cpu_cfgid);
159
160         elf_hwcap |= HWCAP_MFUSR_PC;
161
162         if (((__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskBASEV) >> MSC_CFG_offBASEV) == 0) {
163                 if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskDIV)
164                         elf_hwcap |= HWCAP_DIV;
165
166                 if ((__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskMAC)
167                     || (cpu_id == 12 && cpu_rev < 4))
168                         elf_hwcap |= HWCAP_MAC;
169         } else {
170                 elf_hwcap |= HWCAP_V2;
171                 elf_hwcap |= HWCAP_DIV;
172                 elf_hwcap |= HWCAP_MAC;
173         }
174
175         if (cpu_cfgid & 0x0001)
176                 elf_hwcap |= HWCAP_EXT;
177
178         if (cpu_cfgid & 0x0002)
179                 elf_hwcap |= HWCAP_BASE16;
180
181         if (cpu_cfgid & 0x0004)
182                 elf_hwcap |= HWCAP_EXT2;
183
184         if (cpu_cfgid & 0x0008)
185                 elf_hwcap |= HWCAP_FPU;
186
187         if (cpu_cfgid & 0x0010)
188                 elf_hwcap |= HWCAP_STRING;
189
190         if (__nds32__mfsr(NDS32_SR_MMU_CFG) & MMU_CFG_mskDE)
191                 endianness = "MSB";
192         else
193                 endianness = "LSB";
194
195         if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskEDM)
196                 elf_hwcap |= HWCAP_EDM;
197
198         if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskLMDMA)
199                 elf_hwcap |= HWCAP_LMDMA;
200
201         if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskPFM)
202                 elf_hwcap |= HWCAP_PFM;
203
204         if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskHSMP)
205                 elf_hwcap |= HWCAP_HSMP;
206
207         if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskTRACE)
208                 elf_hwcap |= HWCAP_TRACE;
209
210         if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskAUDIO)
211                 elf_hwcap |= HWCAP_AUDIO;
212
213         if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskL2C)
214                 elf_hwcap |= HWCAP_L2C;
215
216         tmp = __nds32__mfsr(NDS32_SR_CACHE_CTL);
217         if (!IS_ENABLED(CONFIG_CPU_DCACHE_DISABLE))
218                 tmp |= CACHE_CTL_mskDC_EN;
219
220         if (!IS_ENABLED(CONFIG_CPU_ICACHE_DISABLE))
221                 tmp |= CACHE_CTL_mskIC_EN;
222         __nds32__mtsr_isb(tmp, NDS32_SR_CACHE_CTL);
223
224         dump_cpu_info(smp_processor_id());
225 }
226
227 static void __init setup_memory(void)
228 {
229         unsigned long ram_start_pfn;
230         unsigned long free_ram_start_pfn;
231         phys_addr_t memory_start, memory_end;
232         struct memblock_region *region;
233
234         memory_end = memory_start = 0;
235
236         /* Find main memory where is the kernel */
237         for_each_memblock(memory, region) {
238                 memory_start = region->base;
239                 memory_end = region->base + region->size;
240                 pr_info("%s: Memory: 0x%x-0x%x\n", __func__,
241                         memory_start, memory_end);
242         }
243
244         if (!memory_end) {
245                 panic("No memory!");
246         }
247
248         ram_start_pfn = PFN_UP(memblock_start_of_DRAM());
249         /* free_ram_start_pfn is first page after kernel */
250         free_ram_start_pfn = PFN_UP(__pa(&_end));
251         max_pfn = PFN_DOWN(memblock_end_of_DRAM());
252         /* it could update max_pfn */
253         if (max_pfn - ram_start_pfn <= MAXMEM_PFN)
254                 max_low_pfn = max_pfn;
255         else {
256                 max_low_pfn = MAXMEM_PFN + ram_start_pfn;
257                 if (!IS_ENABLED(CONFIG_HIGHMEM))
258                         max_pfn = MAXMEM_PFN + ram_start_pfn;
259         }
260         /* high_memory is related with VMALLOC */
261         high_memory = (void *)__va(max_low_pfn * PAGE_SIZE);
262         min_low_pfn = free_ram_start_pfn;
263
264         /*
265          * initialize the boot-time allocator (with low memory only).
266          *
267          * This makes the memory from the end of the kernel to the end of
268          * RAM usable.
269          */
270         memblock_set_bottom_up(true);
271         memblock_reserve(PFN_PHYS(ram_start_pfn), PFN_PHYS(free_ram_start_pfn - ram_start_pfn));
272
273         early_init_fdt_reserve_self();
274         early_init_fdt_scan_reserved_mem();
275
276         memblock_dump_all();
277 }
278
279 void __init setup_arch(char **cmdline_p)
280 {
281         early_init_devtree(__atags_pointer ? \
282                 phys_to_virt(__atags_pointer) : __dtb_start);
283
284         setup_cpuinfo();
285
286         init_mm.start_code = (unsigned long)&_stext;
287         init_mm.end_code = (unsigned long)&_etext;
288         init_mm.end_data = (unsigned long)&_edata;
289         init_mm.brk = (unsigned long)&_end;
290
291         /* setup bootmem allocator */
292         setup_memory();
293
294         /* paging_init() sets up the MMU and marks all pages as reserved */
295         paging_init();
296
297         /* invalidate all TLB entries because the new mapping is created */
298         __nds32__tlbop_flua();
299
300         /* use generic way to parse */
301         parse_early_param();
302
303         unflatten_and_copy_device_tree();
304
305         if(IS_ENABLED(CONFIG_VT)) {
306                 if(IS_ENABLED(CONFIG_DUMMY_CONSOLE))
307                         conswitchp = &dummy_con;
308         }
309
310         *cmdline_p = boot_command_line;
311         early_trap_init();
312 }
313
314 static int c_show(struct seq_file *m, void *v)
315 {
316         int i;
317
318         seq_printf(m, "Processor\t: %c%ld (id %lu, rev %lu, cfg %lu)\n",
319                    cpu_series, cpu_id, cpu_id, cpu_rev, cpu_cfgid);
320
321         seq_printf(m, "L1I\t\t: %luKB/%luS/%luW/%luB\n",
322                    CACHE_SET(ICACHE) * CACHE_WAY(ICACHE) *
323                    CACHE_LINE_SIZE(ICACHE) / 1024, CACHE_SET(ICACHE),
324                    CACHE_WAY(ICACHE), CACHE_LINE_SIZE(ICACHE));
325
326         seq_printf(m, "L1D\t\t: %luKB/%luS/%luW/%luB\n",
327                    CACHE_SET(DCACHE) * CACHE_WAY(DCACHE) *
328                    CACHE_LINE_SIZE(DCACHE) / 1024, CACHE_SET(DCACHE),
329                    CACHE_WAY(DCACHE), CACHE_LINE_SIZE(DCACHE));
330
331         seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
332                    loops_per_jiffy / (500000 / HZ),
333                    (loops_per_jiffy / (5000 / HZ)) % 100);
334
335         /* dump out the processor features */
336         seq_puts(m, "Features\t: ");
337
338         for (i = 0; hwcap_str[i]; i++)
339                 if (elf_hwcap & (1 << i))
340                         seq_printf(m, "%s ", hwcap_str[i]);
341
342         seq_puts(m, "\n\n");
343
344         return 0;
345 }
346
347 static void *c_start(struct seq_file *m, loff_t * pos)
348 {
349         return *pos < 1 ? (void *)1 : NULL;
350 }
351
352 static void *c_next(struct seq_file *m, void *v, loff_t * pos)
353 {
354         ++*pos;
355         return NULL;
356 }
357
358 static void c_stop(struct seq_file *m, void *v)
359 {
360 }
361
362 struct seq_operations cpuinfo_op = {
363         .start = c_start,
364         .next = c_next,
365         .stop = c_stop,
366         .show = c_show
367 };