]> asedeno.scripts.mit.edu Git - linux.git/blob - arch/arm64/kernel/topology.c
047d98e685020b3704e10d21424afb9710f655d6
[linux.git] / arch / arm64 / kernel / topology.c
1 /*
2  * arch/arm64/kernel/topology.c
3  *
4  * Copyright (C) 2011,2013,2014 Linaro Limited.
5  *
6  * Based on the arm32 version written by Vincent Guittot in turn based on
7  * arch/sh/kernel/topology.c
8  *
9  * This file is subject to the terms and conditions of the GNU General Public
10  * License.  See the file "COPYING" in the main directory of this archive
11  * for more details.
12  */
13
14 #include <linux/acpi.h>
15 #include <linux/arch_topology.h>
16 #include <linux/cpu.h>
17 #include <linux/cpumask.h>
18 #include <linux/init.h>
19 #include <linux/percpu.h>
20 #include <linux/node.h>
21 #include <linux/nodemask.h>
22 #include <linux/of.h>
23 #include <linux/sched.h>
24 #include <linux/sched/topology.h>
25 #include <linux/slab.h>
26 #include <linux/smp.h>
27 #include <linux/string.h>
28
29 #include <asm/cpu.h>
30 #include <asm/cputype.h>
31 #include <asm/topology.h>
32
33 static int __init get_cpu_for_node(struct device_node *node)
34 {
35         struct device_node *cpu_node;
36         int cpu;
37
38         cpu_node = of_parse_phandle(node, "cpu", 0);
39         if (!cpu_node)
40                 return -1;
41
42         cpu = of_cpu_node_to_id(cpu_node);
43         if (cpu >= 0)
44                 topology_parse_cpu_capacity(cpu_node, cpu);
45         else
46                 pr_crit("Unable to find CPU node for %pOF\n", cpu_node);
47
48         of_node_put(cpu_node);
49         return cpu;
50 }
51
52 static int __init parse_core(struct device_node *core, int package_id,
53                              int core_id)
54 {
55         char name[10];
56         bool leaf = true;
57         int i = 0;
58         int cpu;
59         struct device_node *t;
60
61         do {
62                 snprintf(name, sizeof(name), "thread%d", i);
63                 t = of_get_child_by_name(core, name);
64                 if (t) {
65                         leaf = false;
66                         cpu = get_cpu_for_node(t);
67                         if (cpu >= 0) {
68                                 cpu_topology[cpu].package_id = package_id;
69                                 cpu_topology[cpu].core_id = core_id;
70                                 cpu_topology[cpu].thread_id = i;
71                         } else {
72                                 pr_err("%pOF: Can't get CPU for thread\n",
73                                        t);
74                                 of_node_put(t);
75                                 return -EINVAL;
76                         }
77                         of_node_put(t);
78                 }
79                 i++;
80         } while (t);
81
82         cpu = get_cpu_for_node(core);
83         if (cpu >= 0) {
84                 if (!leaf) {
85                         pr_err("%pOF: Core has both threads and CPU\n",
86                                core);
87                         return -EINVAL;
88                 }
89
90                 cpu_topology[cpu].package_id = package_id;
91                 cpu_topology[cpu].core_id = core_id;
92         } else if (leaf) {
93                 pr_err("%pOF: Can't get CPU for leaf core\n", core);
94                 return -EINVAL;
95         }
96
97         return 0;
98 }
99
100 static int __init parse_cluster(struct device_node *cluster, int depth)
101 {
102         char name[10];
103         bool leaf = true;
104         bool has_cores = false;
105         struct device_node *c;
106         static int package_id __initdata;
107         int core_id = 0;
108         int i, ret;
109
110         /*
111          * First check for child clusters; we currently ignore any
112          * information about the nesting of clusters and present the
113          * scheduler with a flat list of them.
114          */
115         i = 0;
116         do {
117                 snprintf(name, sizeof(name), "cluster%d", i);
118                 c = of_get_child_by_name(cluster, name);
119                 if (c) {
120                         leaf = false;
121                         ret = parse_cluster(c, depth + 1);
122                         of_node_put(c);
123                         if (ret != 0)
124                                 return ret;
125                 }
126                 i++;
127         } while (c);
128
129         /* Now check for cores */
130         i = 0;
131         do {
132                 snprintf(name, sizeof(name), "core%d", i);
133                 c = of_get_child_by_name(cluster, name);
134                 if (c) {
135                         has_cores = true;
136
137                         if (depth == 0) {
138                                 pr_err("%pOF: cpu-map children should be clusters\n",
139                                        c);
140                                 of_node_put(c);
141                                 return -EINVAL;
142                         }
143
144                         if (leaf) {
145                                 ret = parse_core(c, package_id, core_id++);
146                         } else {
147                                 pr_err("%pOF: Non-leaf cluster with core %s\n",
148                                        cluster, name);
149                                 ret = -EINVAL;
150                         }
151
152                         of_node_put(c);
153                         if (ret != 0)
154                                 return ret;
155                 }
156                 i++;
157         } while (c);
158
159         if (leaf && !has_cores)
160                 pr_warn("%pOF: empty cluster\n", cluster);
161
162         if (leaf)
163                 package_id++;
164
165         return 0;
166 }
167
168 static int __init parse_dt_topology(void)
169 {
170         struct device_node *cn, *map;
171         int ret = 0;
172         int cpu;
173
174         cn = of_find_node_by_path("/cpus");
175         if (!cn) {
176                 pr_err("No CPU information found in DT\n");
177                 return 0;
178         }
179
180         /*
181          * When topology is provided cpu-map is essentially a root
182          * cluster with restricted subnodes.
183          */
184         map = of_get_child_by_name(cn, "cpu-map");
185         if (!map)
186                 goto out;
187
188         ret = parse_cluster(map, 0);
189         if (ret != 0)
190                 goto out_map;
191
192         topology_normalize_cpu_scale();
193
194         /*
195          * Check that all cores are in the topology; the SMP code will
196          * only mark cores described in the DT as possible.
197          */
198         for_each_possible_cpu(cpu)
199                 if (cpu_topology[cpu].package_id == -1)
200                         ret = -EINVAL;
201
202 out_map:
203         of_node_put(map);
204 out:
205         of_node_put(cn);
206         return ret;
207 }
208
209 /*
210  * cpu topology table
211  */
212 struct cpu_topology cpu_topology[NR_CPUS];
213 EXPORT_SYMBOL_GPL(cpu_topology);
214
215 const struct cpumask *cpu_coregroup_mask(int cpu)
216 {
217         return &cpu_topology[cpu].core_sibling;
218 }
219
220 static void update_siblings_masks(unsigned int cpuid)
221 {
222         struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
223         int cpu;
224
225         /* update core and thread sibling masks */
226         for_each_possible_cpu(cpu) {
227                 cpu_topo = &cpu_topology[cpu];
228
229                 if (cpuid_topo->package_id != cpu_topo->package_id)
230                         continue;
231
232                 cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
233                 if (cpu != cpuid)
234                         cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
235
236                 if (cpuid_topo->core_id != cpu_topo->core_id)
237                         continue;
238
239                 cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
240                 if (cpu != cpuid)
241                         cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
242         }
243 }
244
245 void store_cpu_topology(unsigned int cpuid)
246 {
247         struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
248         u64 mpidr;
249
250         if (cpuid_topo->package_id != -1)
251                 goto topology_populated;
252
253         mpidr = read_cpuid_mpidr();
254
255         /* Uniprocessor systems can rely on default topology values */
256         if (mpidr & MPIDR_UP_BITMASK)
257                 return;
258
259         /* Create cpu topology mapping based on MPIDR. */
260         if (mpidr & MPIDR_MT_BITMASK) {
261                 /* Multiprocessor system : Multi-threads per core */
262                 cpuid_topo->thread_id  = MPIDR_AFFINITY_LEVEL(mpidr, 0);
263                 cpuid_topo->core_id    = MPIDR_AFFINITY_LEVEL(mpidr, 1);
264                 cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 2) |
265                                          MPIDR_AFFINITY_LEVEL(mpidr, 3) << 8;
266         } else {
267                 /* Multiprocessor system : Single-thread per core */
268                 cpuid_topo->thread_id  = -1;
269                 cpuid_topo->core_id    = MPIDR_AFFINITY_LEVEL(mpidr, 0);
270                 cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 1) |
271                                          MPIDR_AFFINITY_LEVEL(mpidr, 2) << 8 |
272                                          MPIDR_AFFINITY_LEVEL(mpidr, 3) << 16;
273         }
274
275         pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n",
276                  cpuid, cpuid_topo->package_id, cpuid_topo->core_id,
277                  cpuid_topo->thread_id, mpidr);
278
279 topology_populated:
280         update_siblings_masks(cpuid);
281 }
282
283 static void __init reset_cpu_topology(void)
284 {
285         unsigned int cpu;
286
287         for_each_possible_cpu(cpu) {
288                 struct cpu_topology *cpu_topo = &cpu_topology[cpu];
289
290                 cpu_topo->thread_id = -1;
291                 cpu_topo->core_id = 0;
292                 cpu_topo->package_id = -1;
293
294                 cpumask_clear(&cpu_topo->core_sibling);
295                 cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
296                 cpumask_clear(&cpu_topo->thread_sibling);
297                 cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
298         }
299 }
300
301 #ifdef CONFIG_ACPI
302 /*
303  * Propagate the topology information of the processor_topology_node tree to the
304  * cpu_topology array.
305  */
306 static int __init parse_acpi_topology(void)
307 {
308         bool is_threaded;
309         int cpu, topology_id;
310
311         is_threaded = read_cpuid_mpidr() & MPIDR_MT_BITMASK;
312
313         for_each_possible_cpu(cpu) {
314                 topology_id = find_acpi_cpu_topology(cpu, 0);
315                 if (topology_id < 0)
316                         return topology_id;
317
318                 if (is_threaded) {
319                         cpu_topology[cpu].thread_id = topology_id;
320                         topology_id = find_acpi_cpu_topology(cpu, 1);
321                         cpu_topology[cpu].core_id   = topology_id;
322                 } else {
323                         cpu_topology[cpu].thread_id  = -1;
324                         cpu_topology[cpu].core_id    = topology_id;
325                 }
326                 topology_id = find_acpi_cpu_topology_package(cpu);
327                 cpu_topology[cpu].package_id = topology_id;
328         }
329
330         return 0;
331 }
332
333 #else
334 static inline int __init parse_acpi_topology(void)
335 {
336         return -EINVAL;
337 }
338 #endif
339
340 void __init init_cpu_topology(void)
341 {
342         reset_cpu_topology();
343
344         /*
345          * Discard anything that was parsed if we hit an error so we
346          * don't use partial information.
347          */
348         if (!acpi_disabled && parse_acpi_topology())
349                 reset_cpu_topology();
350         else if (of_have_populated_dt() && parse_dt_topology())
351                 reset_cpu_topology();
352 }