]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - arch/x86/kernel/paravirt_patch_64.c
x86/paravirt: Optimize native pv_lock_ops.vcpu_is_preempted()
[linux.git] / arch / x86 / kernel / paravirt_patch_64.c
index bb3840cedb4f00c0479b8e97844816a83091ed31..e61dd9791f4fd400dd61ebfd1195e29a0088963a 100644 (file)
@@ -21,6 +21,7 @@ DEF_NATIVE(, mov64, "mov %rdi, %rax");
 
 #if defined(CONFIG_PARAVIRT_SPINLOCKS)
 DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%rdi)");
+DEF_NATIVE(pv_lock_ops, vcpu_is_preempted, "xor %rax, %rax");
 #endif
 
 unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
@@ -36,6 +37,7 @@ unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
 }
 
 extern bool pv_is_native_spin_unlock(void);
+extern bool pv_is_native_vcpu_is_preempted(void);
 
 unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
                      unsigned long addr, unsigned len)
@@ -68,6 +70,12 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
                                end   = end_pv_lock_ops_queued_spin_unlock;
                                goto patch_site;
                        }
+               case PARAVIRT_PATCH(pv_lock_ops.vcpu_is_preempted):
+                       if (pv_is_native_vcpu_is_preempted()) {
+                               start = start_pv_lock_ops_vcpu_is_preempted;
+                               end   = end_pv_lock_ops_vcpu_is_preempted;
+                               goto patch_site;
+                       }
 #endif
 
        default: