]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
s390/topology: use cpu_topology array instead of per cpu variable
authorHeiko Carstens <heiko.carstens@de.ibm.com>
Fri, 2 Dec 2016 09:38:37 +0000 (10:38 +0100)
committerMartin Schwidefsky <schwidefsky@de.ibm.com>
Wed, 7 Dec 2016 06:23:16 +0000 (07:23 +0100)
CPU topology information like cpu to node mapping must be setup in
setup_arch already. Topology information is currently made available
with a per cpu variable; this however will not work when the
initialization will be moved to setup_arch, since the generic percpu
setup will be done much later.

Therefore convert back to a cpu_topology array.

Reviewed-by: Michael Holzheu <holzheu@linux.vnet.ibm.com>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
arch/s390/include/asm/topology.h
arch/s390/kernel/topology.c
arch/s390/numa/mode_emu.c

index f15f5571ca2b5e6bf64283e6588ac1560cc7238a..bc6f45421c98e0a59bdb78da1788c4f01c04cbb6 100644 (file)
@@ -22,18 +22,17 @@ struct cpu_topology_s390 {
        cpumask_t drawer_mask;
 };
 
-DECLARE_PER_CPU(struct cpu_topology_s390, cpu_topology);
-
-#define topology_physical_package_id(cpu) (per_cpu(cpu_topology, cpu).socket_id)
-#define topology_thread_id(cpu)                  (per_cpu(cpu_topology, cpu).thread_id)
-#define topology_sibling_cpumask(cpu) \
-               (&per_cpu(cpu_topology, cpu).thread_mask)
-#define topology_core_id(cpu)            (per_cpu(cpu_topology, cpu).core_id)
-#define topology_core_cpumask(cpu)       (&per_cpu(cpu_topology, cpu).core_mask)
-#define topology_book_id(cpu)            (per_cpu(cpu_topology, cpu).book_id)
-#define topology_book_cpumask(cpu)       (&per_cpu(cpu_topology, cpu).book_mask)
-#define topology_drawer_id(cpu)                  (per_cpu(cpu_topology, cpu).drawer_id)
-#define topology_drawer_cpumask(cpu)     (&per_cpu(cpu_topology, cpu).drawer_mask)
+extern struct cpu_topology_s390 cpu_topology[NR_CPUS];
+
+#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id)
+#define topology_thread_id(cpu)                  (cpu_topology[cpu].thread_id)
+#define topology_sibling_cpumask(cpu)    (&cpu_topology[cpu].thread_mask)
+#define topology_core_id(cpu)            (cpu_topology[cpu].core_id)
+#define topology_core_cpumask(cpu)       (&cpu_topology[cpu].core_mask)
+#define topology_book_id(cpu)            (cpu_topology[cpu].book_id)
+#define topology_book_cpumask(cpu)       (&cpu_topology[cpu].book_mask)
+#define topology_drawer_id(cpu)                  (cpu_topology[cpu].drawer_id)
+#define topology_drawer_cpumask(cpu)     (&cpu_topology[cpu].drawer_mask)
 
 #define mc_capable() 1
 
@@ -65,7 +64,7 @@ static inline void topology_expect_change(void) { }
 #define cpu_to_node cpu_to_node
 static inline int cpu_to_node(int cpu)
 {
-       return per_cpu(cpu_topology, cpu).node_id;
+       return cpu_topology[cpu].node_id;
 }
 
 /* Returns a pointer to the cpumask of CPUs on node 'node'. */
index 8705ee66c0874fb73fe6cdf3c751a2d717fbca90..7169d112c91a4697905f480590bcda9b0cdaec59 100644 (file)
@@ -41,15 +41,15 @@ static bool topology_enabled = true;
 static DECLARE_WORK(topology_work, topology_work_fn);
 
 /*
- * Socket/Book linked lists and per_cpu(cpu_topology) updates are
+ * Socket/Book linked lists and cpu_topology updates are
  * protected by "sched_domains_mutex".
  */
 static struct mask_info socket_info;
 static struct mask_info book_info;
 static struct mask_info drawer_info;
 
-DEFINE_PER_CPU(struct cpu_topology_s390, cpu_topology);
-EXPORT_PER_CPU_SYMBOL_GPL(cpu_topology);
+struct cpu_topology_s390 cpu_topology[NR_CPUS];
+EXPORT_SYMBOL_GPL(cpu_topology);
 
 static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
 {
@@ -97,7 +97,7 @@ static void add_cpus_to_mask(struct topology_core *tl_core,
                if (lcpu < 0)
                        continue;
                for (i = 0; i <= smp_cpu_mtid; i++) {
-                       topo = &per_cpu(cpu_topology, lcpu + i);
+                       topo = &cpu_topology[lcpu + i];
                        topo->drawer_id = drawer->id;
                        topo->book_id = book->id;
                        topo->socket_id = socket->id;
@@ -220,7 +220,7 @@ static void update_cpu_masks(void)
        int cpu;
 
        for_each_possible_cpu(cpu) {
-               topo = &per_cpu(cpu_topology, cpu);
+               topo = &cpu_topology[cpu];
                topo->thread_mask = cpu_thread_map(cpu);
                topo->core_mask = cpu_group_map(&socket_info, cpu);
                topo->book_mask = cpu_group_map(&book_info, cpu);
@@ -394,23 +394,23 @@ int topology_cpu_init(struct cpu *cpu)
 
 static const struct cpumask *cpu_thread_mask(int cpu)
 {
-       return &per_cpu(cpu_topology, cpu).thread_mask;
+       return &cpu_topology[cpu].thread_mask;
 }
 
 
 const struct cpumask *cpu_coregroup_mask(int cpu)
 {
-       return &per_cpu(cpu_topology, cpu).core_mask;
+       return &cpu_topology[cpu].core_mask;
 }
 
 static const struct cpumask *cpu_book_mask(int cpu)
 {
-       return &per_cpu(cpu_topology, cpu).book_mask;
+       return &cpu_topology[cpu].book_mask;
 }
 
 static const struct cpumask *cpu_drawer_mask(int cpu)
 {
-       return &per_cpu(cpu_topology, cpu).drawer_mask;
+       return &cpu_topology[cpu].drawer_mask;
 }
 
 static int __init early_parse_topology(char *p)
index b83109328fec32c919d07788c82729941ee350a4..2ed27e8eb4d41673969c4cc6152f08394a5cd3f6 100644 (file)
@@ -355,7 +355,7 @@ static struct toptree *toptree_from_topology(void)
        phys = toptree_new(TOPTREE_ID_PHYS, 1);
 
        for_each_online_cpu(cpu) {
-               top = &per_cpu(cpu_topology, cpu);
+               top = &cpu_topology[cpu];
                node = toptree_get_child(phys, 0);
                drawer = toptree_get_child(node, top->drawer_id);
                book = toptree_get_child(drawer, top->book_id);
@@ -378,7 +378,7 @@ static void topology_add_core(struct toptree *core)
        int cpu;
 
        for_each_cpu(cpu, &core->mask) {
-               top = &per_cpu(cpu_topology, cpu);
+               top = &cpu_topology[cpu];
                cpumask_copy(&top->thread_mask, &core->mask);
                cpumask_copy(&top->core_mask, &core_mc(core)->mask);
                cpumask_copy(&top->book_mask, &core_book(core)->mask);