]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
x86/apic: Use non-atomic operations when possible
authorNadav Amit <namit@vmware.com>
Thu, 13 Jun 2019 06:48:13 +0000 (23:48 -0700)
committerThomas Gleixner <tglx@linutronix.de>
Sun, 23 Jun 2019 12:07:23 +0000 (14:07 +0200)
Using __clear_bit() and __cpumask_clear_cpu() is more efficient than using
their atomic counterparts.

Use them when atomicity is not needed, such as when manipulating bitmasks
that are on the stack.

Signed-off-by: Nadav Amit <namit@vmware.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Link: https://lkml.kernel.org/r/20190613064813.8102-10-namit@vmware.com
arch/x86/kernel/apic/apic_flat_64.c
arch/x86/kernel/apic/x2apic_cluster.c
arch/x86/kernel/smp.c

index 0005c284a5c5ce64a615a1c07855e04809724503..65072858f5531f33826aa4056f34f35f2185383a 100644 (file)
@@ -78,7 +78,7 @@ flat_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector)
        int cpu = smp_processor_id();
 
        if (cpu < BITS_PER_LONG)
-               clear_bit(cpu, &mask);
+               __clear_bit(cpu, &mask);
 
        _flat_send_IPI_mask(mask, vector);
 }
@@ -92,7 +92,7 @@ static void flat_send_IPI_allbutself(int vector)
                        unsigned long mask = cpumask_bits(cpu_online_mask)[0];
 
                        if (cpu < BITS_PER_LONG)
-                               clear_bit(cpu, &mask);
+                               __clear_bit(cpu, &mask);
 
                        _flat_send_IPI_mask(mask, vector);
                }
index 7685444a106bb29a3994a5d85066e60b2b4c0d09..609e499387a1aad695feebdcfed43d830ae65607 100644 (file)
@@ -50,7 +50,7 @@ __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
        cpumask_copy(tmpmsk, mask);
        /* If IPI should not be sent to self, clear current CPU */
        if (apic_dest != APIC_DEST_ALLINC)
-               cpumask_clear_cpu(smp_processor_id(), tmpmsk);
+               __cpumask_clear_cpu(smp_processor_id(), tmpmsk);
 
        /* Collapse cpus in a cluster so a single IPI per cluster is sent */
        for_each_cpu(cpu, tmpmsk) {
index 04adc8d60aed82178caf3a099d66b497a6c11bcf..acddd988602d38c52816c17a96c5301c3102e52d 100644 (file)
@@ -146,7 +146,7 @@ void native_send_call_func_ipi(const struct cpumask *mask)
        }
 
        cpumask_copy(allbutself, cpu_online_mask);
-       cpumask_clear_cpu(smp_processor_id(), allbutself);
+       __cpumask_clear_cpu(smp_processor_id(), allbutself);
 
        if (cpumask_equal(mask, allbutself) &&
            cpumask_equal(cpu_online_mask, cpu_callout_mask))