]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - arch/x86/kernel/paravirt-spinlocks.c
x86/paravirt: Optimize native pv_lock_ops.vcpu_is_preempted()
[linux.git] / arch / x86 / kernel / paravirt-spinlocks.c
index 2f204dd552a40c45812df4dd28e2394a0d764a25..6d4bf812af45d9ed144ee33faf9f906ed9e107c6 100644 (file)
@@ -12,7 +12,6 @@ __visible void __native_queued_spin_unlock(struct qspinlock *lock)
 {
        native_queued_spin_unlock(lock);
 }
-
 PV_CALLEE_SAVE_REGS_THUNK(__native_queued_spin_unlock);
 
 bool pv_is_native_spin_unlock(void)
@@ -21,9 +20,16 @@ bool pv_is_native_spin_unlock(void)
                __raw_callee_save___native_queued_spin_unlock;
 }
 
-static bool native_vcpu_is_preempted(int cpu)
+__visible bool __native_vcpu_is_preempted(int cpu)
+{
+       return false;
+}
+PV_CALLEE_SAVE_REGS_THUNK(__native_vcpu_is_preempted);
+
+bool pv_is_native_vcpu_is_preempted(void)
 {
-       return 0;
+       return pv_lock_ops.vcpu_is_preempted.func ==
+               __raw_callee_save___native_vcpu_is_preempted;
 }
 
 struct pv_lock_ops pv_lock_ops = {
@@ -32,7 +38,7 @@ struct pv_lock_ops pv_lock_ops = {
        .queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock),
        .wait = paravirt_nop,
        .kick = paravirt_nop,
-       .vcpu_is_preempted = native_vcpu_is_preempted,
+       .vcpu_is_preempted = PV_CALLEE_SAVE(__native_vcpu_is_preempted),
 #endif /* SMP */
 };
 EXPORT_SYMBOL(pv_lock_ops);