]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
arm64: topology: enable ACPI/PPTT based CPU topology
authorJeremy Linton <jeremy.linton@arm.com>
Fri, 11 May 2018 23:58:05 +0000 (18:58 -0500)
committerCatalin Marinas <catalin.marinas@arm.com>
Thu, 17 May 2018 16:28:09 +0000 (17:28 +0100)
Propagate the topology information from the PPTT tree to the
cpu_topology array. We can get the thread id and core_id by assuming
certain levels of the PPTT tree correspond to those concepts.
The package_id is flagged in the tree and can be found by calling
find_acpi_cpu_topology_package() which terminates
its search when it finds an ACPI node flagged as the physical
package. If the tree doesn't contain enough levels to represent
all of the requested levels then the root node will be returned
for all subsequent levels.

Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Vijaya Kumar K <vkilari@codeaurora.org>
Tested-by: Xiongfeng Wang <wangxiongfeng2@huawei.com>
Tested-by: Tomasz Nowicki <Tomasz.Nowicki@cavium.com>
Acked-by: Sudeep Holla <sudeep.holla@arm.com>
Acked-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Acked-by: Morten Rasmussen <morten.rasmussen@arm.com>
Signed-off-by: Jeremy Linton <jeremy.linton@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
arch/arm64/kernel/topology.c

index dc18b1e531943e35e0905412ff3ac71ce956334f..047d98e685020b3704e10d21424afb9710f655d6 100644 (file)
@@ -11,6 +11,7 @@
  * for more details.
  */
 
+#include <linux/acpi.h>
 #include <linux/arch_topology.h>
 #include <linux/cpu.h>
 #include <linux/cpumask.h>
@@ -22,6 +23,7 @@
 #include <linux/sched.h>
 #include <linux/sched/topology.h>
 #include <linux/slab.h>
+#include <linux/smp.h>
 #include <linux/string.h>
 
 #include <asm/cpu.h>
@@ -296,6 +298,45 @@ static void __init reset_cpu_topology(void)
        }
 }
 
+#ifdef CONFIG_ACPI
+/*
+ * Propagate the topology information of the processor_topology_node tree to the
+ * cpu_topology array.
+ */
+static int __init parse_acpi_topology(void)
+{
+       bool is_threaded;
+       int cpu, topology_id;
+
+       is_threaded = read_cpuid_mpidr() & MPIDR_MT_BITMASK;
+
+       for_each_possible_cpu(cpu) {
+               topology_id = find_acpi_cpu_topology(cpu, 0);
+               if (topology_id < 0)
+                       return topology_id;
+
+               if (is_threaded) {
+                       cpu_topology[cpu].thread_id = topology_id;
+                       topology_id = find_acpi_cpu_topology(cpu, 1);
+                       cpu_topology[cpu].core_id   = topology_id;
+               } else {
+                       cpu_topology[cpu].thread_id  = -1;
+                       cpu_topology[cpu].core_id    = topology_id;
+               }
+               topology_id = find_acpi_cpu_topology_package(cpu);
+               cpu_topology[cpu].package_id = topology_id;
+       }
+
+       return 0;
+}
+
+#else
+static inline int __init parse_acpi_topology(void)
+{
+       return -EINVAL;
+}
+#endif
+
 void __init init_cpu_topology(void)
 {
        reset_cpu_topology();
@@ -304,6 +345,8 @@ void __init init_cpu_topology(void)
         * Discard anything that was parsed if we hit an error so we
         * don't use partial information.
         */
-       if (of_have_populated_dt() && parse_dt_topology())
+       if (!acpi_disabled && parse_acpi_topology())
+               reset_cpu_topology();
+       else if (of_have_populated_dt() && parse_dt_topology())
                reset_cpu_topology();
 }