]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
perf, x86: Calculate perfctr msr addresses in helper functions
authorRobert Richter <robert.richter@amd.com>
Wed, 2 Feb 2011 16:40:57 +0000 (17:40 +0100)
committerIngo Molnar <mingo@elte.hu>
Wed, 16 Feb 2011 12:30:50 +0000 (13:30 +0100)
This patch adds helper functions to calculate perfctr msr addresses.
We need this to later add support for AMD family 15h cpus. For this we
have to change the algorithms to generate the perfctr's msr addresses.

Signed-off-by: Robert Richter <robert.richter@amd.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1296664860-10886-3-git-send-email-robert.richter@amd.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event_intel.c

index 70d6d8fc241196a528d74039e323354face5b0ce..ee40c1ad0ebcd0cc37388fc4724b01e075f16f65 100644 (file)
@@ -321,6 +321,16 @@ x86_perf_event_update(struct perf_event *event)
        return new_raw_count;
 }
 
+static inline unsigned int x86_pmu_config_addr(int index)
+{
+       return x86_pmu.eventsel + index;
+}
+
+static inline unsigned int x86_pmu_event_addr(int index)
+{
+       return x86_pmu.perfctr + index;
+}
+
 static atomic_t active_events;
 static DEFINE_MUTEX(pmc_reserve_mutex);
 
@@ -331,12 +341,12 @@ static bool reserve_pmc_hardware(void)
        int i;
 
        for (i = 0; i < x86_pmu.num_counters; i++) {
-               if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
+               if (!reserve_perfctr_nmi(x86_pmu_event_addr(i)))
                        goto perfctr_fail;
        }
 
        for (i = 0; i < x86_pmu.num_counters; i++) {
-               if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
+               if (!reserve_evntsel_nmi(x86_pmu_config_addr(i)))
                        goto eventsel_fail;
        }
 
@@ -344,13 +354,13 @@ static bool reserve_pmc_hardware(void)
 
 eventsel_fail:
        for (i--; i >= 0; i--)
-               release_evntsel_nmi(x86_pmu.eventsel + i);
+               release_evntsel_nmi(x86_pmu_config_addr(i));
 
        i = x86_pmu.num_counters;
 
 perfctr_fail:
        for (i--; i >= 0; i--)
-               release_perfctr_nmi(x86_pmu.perfctr + i);
+               release_perfctr_nmi(x86_pmu_event_addr(i));
 
        return false;
 }
@@ -360,8 +370,8 @@ static void release_pmc_hardware(void)
        int i;
 
        for (i = 0; i < x86_pmu.num_counters; i++) {
-               release_perfctr_nmi(x86_pmu.perfctr + i);
-               release_evntsel_nmi(x86_pmu.eventsel + i);
+               release_perfctr_nmi(x86_pmu_event_addr(i));
+               release_evntsel_nmi(x86_pmu_config_addr(i));
        }
 }
 
@@ -382,7 +392,7 @@ static bool check_hw_exists(void)
         * complain and bail.
         */
        for (i = 0; i < x86_pmu.num_counters; i++) {
-               reg = x86_pmu.eventsel + i;
+               reg = x86_pmu_config_addr(i);
                ret = rdmsrl_safe(reg, &val);
                if (ret)
                        goto msr_fail;
@@ -407,8 +417,8 @@ static bool check_hw_exists(void)
         * that don't trap on the MSR access and always return 0s.
         */
        val = 0xabcdUL;
-       ret = checking_wrmsrl(x86_pmu.perfctr, val);
-       ret |= rdmsrl_safe(x86_pmu.perfctr, &val_new);
+       ret = checking_wrmsrl(x86_pmu_event_addr(0), val);
+       ret |= rdmsrl_safe(x86_pmu_event_addr(0), &val_new);
        if (ret || val != val_new)
                goto msr_fail;
 
@@ -617,11 +627,11 @@ static void x86_pmu_disable_all(void)
 
                if (!test_bit(idx, cpuc->active_mask))
                        continue;
-               rdmsrl(x86_pmu.eventsel + idx, val);
+               rdmsrl(x86_pmu_config_addr(idx), val);
                if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
                        continue;
                val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
-               wrmsrl(x86_pmu.eventsel + idx, val);
+               wrmsrl(x86_pmu_config_addr(idx), val);
        }
 }
 
@@ -1110,8 +1120,8 @@ void perf_event_print_debug(void)
        pr_info("CPU#%d: active:     %016llx\n", cpu, *(u64 *)cpuc->active_mask);
 
        for (idx = 0; idx < x86_pmu.num_counters; idx++) {
-               rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
-               rdmsrl(x86_pmu.perfctr  + idx, pmc_count);
+               rdmsrl(x86_pmu_config_addr(idx), pmc_ctrl);
+               rdmsrl(x86_pmu_event_addr(idx), pmc_count);
 
                prev_left = per_cpu(pmc_prev_left[idx], cpu);
 
index 008835c1d79ca2c5fb83b2c2ca2ddfeab657cca1..084b38362db77284b30949647c5aba2cce8fb26f 100644 (file)
@@ -691,8 +691,8 @@ static void intel_pmu_reset(void)
        printk("clearing PMU state on CPU#%d\n", smp_processor_id());
 
        for (idx = 0; idx < x86_pmu.num_counters; idx++) {
-               checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
-               checking_wrmsrl(x86_pmu.perfctr  + idx, 0ull);
+               checking_wrmsrl(x86_pmu_config_addr(idx), 0ull);
+               checking_wrmsrl(x86_pmu_event_addr(idx),  0ull);
        }
        for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
                checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);