]> asedeno.scripts.mit.edu Git - linux.git/blob - arch/x86/kernel/paravirt_patch_64.c
x86/paravirt: Optimize native pv_lock_ops.vcpu_is_preempted()
[linux.git] / arch / x86 / kernel / paravirt_patch_64.c
1 #include <asm/paravirt.h>
2 #include <asm/asm-offsets.h>
3 #include <linux/stringify.h>
4
5 DEF_NATIVE(pv_irq_ops, irq_disable, "cli");
6 DEF_NATIVE(pv_irq_ops, irq_enable, "sti");
7 DEF_NATIVE(pv_irq_ops, restore_fl, "pushq %rdi; popfq");
8 DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
9 DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
10 DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
11 DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
12 DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)");
13 DEF_NATIVE(pv_cpu_ops, clts, "clts");
14 DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd");
15
16 DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq");
17 DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs");
18
19 DEF_NATIVE(, mov32, "mov %edi, %eax");
20 DEF_NATIVE(, mov64, "mov %rdi, %rax");
21
22 #if defined(CONFIG_PARAVIRT_SPINLOCKS)
23 DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%rdi)");
24 DEF_NATIVE(pv_lock_ops, vcpu_is_preempted, "xor %rax, %rax");
25 #endif
26
27 unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
28 {
29         return paravirt_patch_insns(insnbuf, len,
30                                     start__mov32, end__mov32);
31 }
32
33 unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
34 {
35         return paravirt_patch_insns(insnbuf, len,
36                                     start__mov64, end__mov64);
37 }
38
39 extern bool pv_is_native_spin_unlock(void);
40 extern bool pv_is_native_vcpu_is_preempted(void);
41
42 unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
43                       unsigned long addr, unsigned len)
44 {
45         const unsigned char *start, *end;
46         unsigned ret;
47
48 #define PATCH_SITE(ops, x)                                      \
49                 case PARAVIRT_PATCH(ops.x):                     \
50                         start = start_##ops##_##x;              \
51                         end = end_##ops##_##x;                  \
52                         goto patch_site
53         switch(type) {
54                 PATCH_SITE(pv_irq_ops, restore_fl);
55                 PATCH_SITE(pv_irq_ops, save_fl);
56                 PATCH_SITE(pv_irq_ops, irq_enable);
57                 PATCH_SITE(pv_irq_ops, irq_disable);
58                 PATCH_SITE(pv_cpu_ops, usergs_sysret64);
59                 PATCH_SITE(pv_cpu_ops, swapgs);
60                 PATCH_SITE(pv_mmu_ops, read_cr2);
61                 PATCH_SITE(pv_mmu_ops, read_cr3);
62                 PATCH_SITE(pv_mmu_ops, write_cr3);
63                 PATCH_SITE(pv_cpu_ops, clts);
64                 PATCH_SITE(pv_mmu_ops, flush_tlb_single);
65                 PATCH_SITE(pv_cpu_ops, wbinvd);
66 #if defined(CONFIG_PARAVIRT_SPINLOCKS)
67                 case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock):
68                         if (pv_is_native_spin_unlock()) {
69                                 start = start_pv_lock_ops_queued_spin_unlock;
70                                 end   = end_pv_lock_ops_queued_spin_unlock;
71                                 goto patch_site;
72                         }
73                 case PARAVIRT_PATCH(pv_lock_ops.vcpu_is_preempted):
74                         if (pv_is_native_vcpu_is_preempted()) {
75                                 start = start_pv_lock_ops_vcpu_is_preempted;
76                                 end   = end_pv_lock_ops_vcpu_is_preempted;
77                                 goto patch_site;
78                         }
79 #endif
80
81         default:
82                 ret = paravirt_patch_default(type, clobbers, ibuf, addr, len);
83                 break;
84
85 patch_site:
86                 ret = paravirt_patch_insns(ibuf, len, start, end);
87                 break;
88         }
89 #undef PATCH_SITE
90         return ret;
91 }