]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 19 Apr 2014 17:41:43 +0000 (10:41 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 19 Apr 2014 17:41:43 +0000 (10:41 -0700)
Pull x86 fix from Ingo Molnar:
 "This fixes the preemption-count imbalance crash reported by Owen
  Kibel"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/mce: Fix CMCI preemption bugs

arch/x86/kernel/cpu/mcheck/mce.c
arch/x86/kernel/cpu/mcheck/mce_intel.c

index eeee23ff75ef8ddb28a002d26b5bee98b21e03ca..68317c80de7f65723c3f909762c6204629ef562d 100644 (file)
@@ -598,7 +598,6 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
 {
        struct mce m;
        int i;
-       unsigned long *v;
 
        this_cpu_inc(mce_poll_count);
 
@@ -618,8 +617,7 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
                if (!(m.status & MCI_STATUS_VAL))
                        continue;
 
-               v = &get_cpu_var(mce_polled_error);
-               set_bit(0, v);
+               this_cpu_write(mce_polled_error, 1);
                /*
                 * Uncorrected or signalled events are handled by the exception
                 * handler when it is enabled, so don't process those here.
index 3bdb95ae8c430fa8bacc76a9f644c4abce8ec35e..9a316b21df8bd4df741a2507513f5879712a2dee 100644 (file)
@@ -42,7 +42,7 @@ static DEFINE_PER_CPU(mce_banks_t, mce_banks_owned);
  * cmci_discover_lock protects against parallel discovery attempts
  * which could race against each other.
  */
-static DEFINE_RAW_SPINLOCK(cmci_discover_lock);
+static DEFINE_SPINLOCK(cmci_discover_lock);
 
 #define CMCI_THRESHOLD         1
 #define CMCI_POLL_INTERVAL     (30 * HZ)
@@ -144,14 +144,14 @@ static void cmci_storm_disable_banks(void)
        int bank;
        u64 val;
 
-       raw_spin_lock_irqsave(&cmci_discover_lock, flags);
+       spin_lock_irqsave(&cmci_discover_lock, flags);
        owned = __get_cpu_var(mce_banks_owned);
        for_each_set_bit(bank, owned, MAX_NR_BANKS) {
                rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
                val &= ~MCI_CTL2_CMCI_EN;
                wrmsrl(MSR_IA32_MCx_CTL2(bank), val);
        }
-       raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
+       spin_unlock_irqrestore(&cmci_discover_lock, flags);
 }
 
 static bool cmci_storm_detect(void)
@@ -211,7 +211,7 @@ static void cmci_discover(int banks)
        int i;
        int bios_wrong_thresh = 0;
 
-       raw_spin_lock_irqsave(&cmci_discover_lock, flags);
+       spin_lock_irqsave(&cmci_discover_lock, flags);
        for (i = 0; i < banks; i++) {
                u64 val;
                int bios_zero_thresh = 0;
@@ -266,7 +266,7 @@ static void cmci_discover(int banks)
                        WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks)));
                }
        }
-       raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
+       spin_unlock_irqrestore(&cmci_discover_lock, flags);
        if (mca_cfg.bios_cmci_threshold && bios_wrong_thresh) {
                pr_info_once(
                        "bios_cmci_threshold: Some banks do not have valid thresholds set\n");
@@ -316,10 +316,10 @@ void cmci_clear(void)
 
        if (!cmci_supported(&banks))
                return;
-       raw_spin_lock_irqsave(&cmci_discover_lock, flags);
+       spin_lock_irqsave(&cmci_discover_lock, flags);
        for (i = 0; i < banks; i++)
                __cmci_disable_bank(i);
-       raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
+       spin_unlock_irqrestore(&cmci_discover_lock, flags);
 }
 
 static void cmci_rediscover_work_func(void *arg)
@@ -360,9 +360,9 @@ void cmci_disable_bank(int bank)
        if (!cmci_supported(&banks))
                return;
 
-       raw_spin_lock_irqsave(&cmci_discover_lock, flags);
+       spin_lock_irqsave(&cmci_discover_lock, flags);
        __cmci_disable_bank(bank);
-       raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
+       spin_unlock_irqrestore(&cmci_discover_lock, flags);
 }
 
 static void intel_init_cmci(void)